============
-
121주차 진도를 복습하였습니다.
-
vfs_caches_init()
- start_kernel 1 ~/kernel/iamroot/linux-stable/init/main.c
- vfs_caches_init 925 ~/kernel/iamroot/linux-stable/init/main.c
- files_init 3485 ~/kernel/iamroot/linux-stable/fs/dcache.c
asmlinkage void __init start_kernel(void)
{
char * command_line;
extern const struct kernel_param __start___param[], __stop___param[];
// ATAG,DTB 정보로 사용
...
buffer_init();
// buffer_head 를 사용하기 위한 kmem_cache 할당자 및 max_buffer_heads 값 초기화 수행
key_init(); // null funtion
security_init(); // null funtion
dbg_late_init(); // null funtion
// totalram_pages: 총 free된 page 수
vfs_caches_init(totalram_pages);
- call: start_kernel()->vfs_caches_init()
// ARM10C 20151003
// totalram_pages: 총 free된 page 수
void __init vfs_caches_init(unsigned long mempages)
{
unsigned long reserve;
/* Base hash sizes on available memory, with a reserve equal to
150% of current kernel size */
// NOTE:
// mempages 값과 nr_free_pages() 의 값을 정확히 알 수 없음
// 계산된 reserve의 값을 XXX 로 함
// mempages: 총 free된 page 수, nr_free_pages(): 현재의 free pages 수
reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
// reserve: XXX
// mempages: 총 free된 page 수, reserve: XXX
mempages -= reserve;
// mempages: 총 free된 page 수 - XXX
// PATH_MAX: 4096
// SLAB_HWCACHE_ALIGN: 0x00002000UL, SLAB_PANIC: 0x00040000UL
// kmem_cache_create("names_cache", 4096, 0, 0x42000, NULL): kmem_cache#6
names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
// names_cachep: kmem_cache#6
dcache_init();
// dcache_init에서 한일:
//
// struct dentry를 위한 kmem_cache 생성
// dentry_cache: kmem_cache#5
inode_init();
// inode_init에서 한일:
//
// struct inode를 위한 kmem_cache 생성
// inode_cachep: kmem_cache#4
// mempages: 총 free된 page 수 - XXX
files_init(mempages);
// files_init에서 한일:
//
// filp_cachep: kmem_cache#3
// files_stat.max_files: (총 free된 page 수 - XXX) * 4 / 10
// sysctl_nr_open_max: 0x3FFFFFE0
//
// (&(&(&(&nr_files)->lock)->wait_lock)->rlock)->raw_lock: { { 0 } }
// (&(&(&(&nr_files)->lock)->wait_lock)->rlock)->magic: 0xdead4ead
// (&(&(&(&nr_files)->lock)->wait_lock)->rlock)->owner: 0xffffffff
// (&(&(&(&nr_files)->lock)->wait_lock)->rlock)->owner_cpu: 0xffffffff
// (&(&nr_files)->list)->next: &(&nr_files)->list
// (&(&nr_files)->list)->prev: &(&nr_files)->list
// (&nr_files)->count: 0
// (&nr_files)->counters: kmem_cache#26-o0 에서 할당된 4 bytes 메모리 주소
// list head 인 &percpu_counters에 &(&nr_files)->list를 연결함
mnt_init();
- start_kernel()->vfs_caches_init()
- mnt_init()
- mnt_cahce를 할당받는다.
// ARM10C 20151024
void __init mnt_init(void)
{
unsigned u;
int err;
// sizeof(struct mount): 152 bytes, SLAB_HWCACHE_ALIGN: 0x00002000UL, SLAB_PANIC: 0x00040000UL
// kmem_cache_create("mnt_cache", 152, 0, 0x42000, NULL): kmem_cache#2
mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
// mnt_cache: kmem_cache#2
// sizeof(struct hlist_head): 4 bytes, mhash_entries: 0
// alloc_large_system_hash("Mount-cache", 4, 0, 19, 0, &m_hash_shift, &m_hash_mask, 0, 0): 16kB만큼 할당받은 메모리 주소
mount_hashtable = alloc_large_system_hash("Mount-cache",
sizeof(struct hlist_head),
mhash_entries, 19,
0,
&m_hash_shift, &m_hash_mask, 0, 0);
// mount_hashtable: 16kB만큼 할당받은 메모리 주소
// sizeof(struct hlist_head): 4 bytes, mphash_entries: 0
// alloc_large_system_hash("Mountpoint-cache", 4, 0, 19, 0, &m_hash_shift, &m_hash_mask, 0, 0): 16kB만큼 할당받은 메모리 주소
mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
sizeof(struct hlist_head),
mphash_entries, 19,
0,
&mp_hash_shift, &mp_hash_mask, 0, 0);
// mountpoint_hashtable: 16kB만큼 할당받은 메모리 주소
// 2015/10/24 종료
// 2015/10/31 시작
// mount_hashtable: 16kB만큼 할당받은 메모리 주소, mountpoint_hashtable: 16kB만큼 할당받은 메모리 주소
if (!mount_hashtable || !mountpoint_hashtable)
panic("Failed to allocate mount hash table\n");
// m_hash_mask: 0xFFF
for (u = 0; u <= m_hash_mask; u++)
// u: 0
INIT_HLIST_HEAD(&mount_hashtable[u]);
// INIT_HLIST_HEAD 에서 한일:
// ((&mount_hashtable[0])->first = NULL)
// u: 1...4095 까지 loop 수행
// mp_hash_mask: 0xFFF
for (u = 0; u <= mp_hash_mask; u++)
// u: 0
INIT_HLIST_HEAD(&mountpoint_hashtable[u]);
// INIT_HLIST_HEAD 에서 한일:
// ((&mountpoint_hashtable[0])->first = NULL)
// u: 1...4095 까지 loop 수행
err = sysfs_init();
- lish.h::INIT_HLIST_HEAD()
#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
- start_kernel()->vfs_caches_init()
- mnt_init()
- sysfs_init()
// ARM10C 20151031
int __init sysfs_init(void)
{
// ENOMEM: 12
int err = -ENOMEM;
// err: -12
// sizeof(struct sysfs_dirent): 64 bytes
// kmem_cache_create("sysfs_dir_cache", 64, 0, 0, NULL): kmem_cache#1
sysfs_dir_cachep = kmem_cache_create("sysfs_dir_cache",
sizeof(struct sysfs_dirent),
0, 0, NULL);
// sysfs_dir_cachep: kmem_cache#1
// sysfs_dir_cachep: kmem_cache#1
if (!sysfs_dir_cachep)
goto out;
// sysfs_inode_init(): 0
err = sysfs_inode_init();
// err: 0
- sysfs_inode_init에서 한일:
- (&sysfs_backing_dev_info)->dev: NULL
- (&sysfs_backing_dev_info)->min_ratio: 0
- (&sysfs_backing_dev_info)->max_ratio: 100
- (&sysfs_backing_dev_info)->max_prop_frac: 0x400
- &(&sysfs_backing_dev_info)->wb_lock 을 이용한 spinlock 초기화 수행
- (&(&sysfs_backing_dev_info)->bdi_list)->next: &(&sysfs_backing_dev_info)->bdi_list
- (&(&sysfs_backing_dev_info)->bdi_list)->prev: &(&sysfs_backing_dev_info)->bdi_list
- (&(&sysfs_backing_dev_info)->work_list)->next: &(&sysfs_backing_dev_info)->work_list
- (&(&sysfs_backing_dev_info)->work_list)->prev: &(&sysfs_backing_dev_info)->work_list
- (&sysfs_backing_dev_info)->wb 의 맴버값을 0으로 초기화함
- (&(&sysfs_backing_dev_info)->wb)->bdi: &sysfs_backing_dev_info
- (&(&sysfs_backing_dev_info)->wb)->last_old_flush: XXX
- (&(&(&sysfs_backing_dev_info)->wb)->b_dirty)->next: &(&(&sysfs_backing_dev_info)->wb)->b_dirty
- (&(&(&sysfs_backing_dev_info)->wb)->b_dirty)->prev: &(&(&sysfs_backing_dev_info)->wb)->b_dirty
- (&(&(&sysfs_backing_dev_info)->wb)->b_io)->next: &(&(&sysfs_backing_dev_info)->wb)->b_io
- (&(&(&sysfs_backing_dev_info)->wb)->b_io)->prev: &(&(&sysfs_backing_dev_info)->wb)->b_io
- (&(&(&sysfs_backing_dev_info)->wb)->b_more_io)->next: &(&(&sysfs_backing_dev_info)->wb)->b_more_io
- (&(&(&sysfs_backing_dev_info)->wb)->b_more_io)->prev: &(&(&sysfs_backing_dev_info)->wb)->b_more_io
- &(&(&sysfs_backing_dev_info)->wb)->list_lock 을 이용한 spinlock 초기화 수행
- (&(&(&(&sysfs_backing_dev_info)->wb)->dwork)->work)->data: { 0xFFFFFFE0 }
- (&(&(&(&(&sysfs_backing_dev_info)->wb)->dwork)->work)->entry)->next: &(&(&(&(&sysfs_backing_dev_info)->wb)->dwork)->work)->entry
- (&(&(&(&(&sysfs_backing_dev_info)->wb)->dwork)->work)->entry)->prev: &(&(&(&(&sysfs_backing_dev_info)->wb)->dwork)->work)->entry
- (&(&(&(&sysfs_backing_dev_info)->wb)->dwork)->work)->func: bdi_writeback_workfn
- (&(&(&(&sysfs_backing_dev_info)->wb)->dwork)->timer)->entry.next: NULL
- (&(&(&(&sysfs_backing_dev_info)->wb)->dwork)->timer)->base: [pcp0] tvec_bases | 0x2
- (&(&(&(&sysfs_backing_dev_info)->wb)->dwork)->timer)->slack: -1
- (&(&(&(&sysfs_backing_dev_info)->wb)->dwork)->timer)->function = (delayed_work_timer_fn);
- (&(&(&(&sysfs_backing_dev_info)->wb)->dwork)->timer)->data = ((unsigned long)(&(&(&sysfs_backing_dev_info)->wb)->dwork));
- (&(&(&(&(&sysfs_backing_dev_info)->bdi_stat[0...3])->lock)->wait_lock)->rlock)->raw_lock: { { 0 } }
- (&(&(&(&(&sysfs_backing_dev_info)->bdi_stat[0...3])->lock)->wait_lock)->rlock)->magic: 0xdead4ead
- (&(&(&(&(&sysfs_backing_dev_info)->bdi_stat[0...3])->lock)->wait_lock)->rlock)->owner: 0xffffffff
- (&(&(&(&(&sysfs_backing_dev_info)->bdi_stat[0...3])->lock)->wait_lock)->rlock)->owner_cpu: 0xffffffff
- (&(&(&sysfs_backing_dev_info)->bdi_stat[0...3])->list)->next: &(&(&sysfs_backing_dev_info)->bdi_stat[0...3])->list
- (&(&(&sysfs_backing_dev_info)->bdi_stat[0...3])->list)->prev: &(&(&sysfs_backing_dev_info)->bdi_stat[0...3])->list
- (&(&sysfs_backing_dev_info)->bdi_stat[0...3])->count: 0
- (&(&sysfs_backing_dev_info)->bdi_stat[0...3])->counters: kmem_cache#26-o0 에서 할당된 4 bytes 메모리 주소
- list head 인 &percpu_counters에 &(&(&sysfs_backing_dev_info)->bdi_stat[0...3])->list를 연결함
- (&sysfs_backing_dev_info)->dirty_exceeded: 0
- (&sysfs_backing_dev_info)->bw_time_stamp: XXX
- (&sysfs_backing_dev_info)->written_stamp: 0
- (&sysfs_backing_dev_info)->balanced_dirty_ratelimit: 0x6400
- (&sysfs_backing_dev_info)->dirty_ratelimit: 0x6400
- (&sysfs_backing_dev_info)->write_bandwidth: 0x6400
- (&sysfs_backing_dev_info)->avg_write_bandwidth: 0x6400
- (&(&(&(&(&(&sysfs_backing_dev_info)->completions)->events)->lock)->wait_lock)->rlock)->raw_lock: { { 0 } }
- (&(&(&(&(&(&sysfs_backing_dev_info)->completions)->events)->lock)->wait_lock)->rlock)->magic: 0xdead4ead
- (&(&(&(&(&(&sysfs_backing_dev_info)->completions)->events)->lock)->wait_lock)->rlock)->owner: 0xffffffff
- (&(&(&(&(&(&sysfs_backing_dev_info)->completions)->events)->lock)->wait_lock)->rlock)->owner_cpu: 0xffffffff
- (&(&(&(&sysfs_backing_dev_info)->completions)->events)->list)->next: &(&(&(&sysfs_backing_dev_info)->completions)->events)->list
- (&(&(&(&sysfs_backing_dev_info)->completions)->events)->list)->prev: &(&(&(&sysfs_backing_dev_info)->completions)->events)->list
- (&(&(&sysfs_backing_dev_info)->completions)->events)->count: 0
- (&(&(&sysfs_backing_dev_info)->completions)->events)->counters: kmem_cache#26-o0 에서 할당된 4 bytes 메모리 주소
- list head 인 &percpu_counters에 &(&(&(&sysfs_backing_dev_info)->completions)->events)->list를 연결함
- (&(&sysfs_backing_dev_info)->completions)->period: 0
- &(&(&sysfs_backing_dev_info)->completions)->lock을 이용한 spinlock 초기화 수행
```mount.c
// ARM10C 20151031
int __init sysfs_init(void)
{
// ENOMEM: 12
int err = -ENOMEM;
// err: -12
// sizeof(struct sysfs_dirent): 64 bytes
// kmem_cache_create("sysfs_dir_cache", 64, 0, 0, NULL): kmem_cache#1
sysfs_dir_cachep = kmem_cache_create("sysfs_dir_cache",
sizeof(struct sysfs_dirent),
0, 0, NULL);
// sysfs_dir_cachep: kmem_cache#1
// sysfs_dir_cachep: kmem_cache#1
if (!sysfs_dir_cachep)
goto out;
// sysfs_inode_init(): 0
err = sysfs_inode_init();
// err: 0
// err: 0
if (err)
goto out_err;
// register_filesystem(&sysfs_fs_type): 0
err = register_filesystem(&sysfs_fs_type);
// err: 0
// register_filesystem에서 한일:
// file_systems: &sysfs_fs_type
// err: 0
if (!err) {
sysfs_mnt = kern_mount(&sysfs_fs_type);
- kern_mount(&sysfs_fs_type)
// ARM10C 20151031
// &sysfs_fs_type
#define kern_mount(type) kern_mount_data(type, NULL)
- call:
// ARM10C 20151031
// &sysfs_fs_type, NULL
struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
{
struct vfsmount *mnt;
// type: &sysfs_fs_type, MS_KERNMOUNT: 0x400000, type->name: (&sysfs_fs_type)->name: "sysfs", data: NULL
mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, data);
// ARM10C 20151031
// type: &sysfs_fs_type, MS_KERNMOUNT: 0x400000, type->name: (&sysfs_fs_type)->name: "sysfs", data: NULL
struct vfsmount *
vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
{
struct mount *mnt;
struct dentry *root;
// type: &sysfs_fs_type
if (!type)
return ERR_PTR(-ENODEV);
// name: "sysfs"
mnt = alloc_vfsmnt(name);
- alloc_cfsmnt()
// ARM10C 20151031
// name: "sysfs"
static struct mount *alloc_vfsmnt(const char *name)
{
// mnt_cache: kmem_cache#2, GFP_KERNEL: 0xD0
// kmem_cache_zalloc(kmem_cache#2, 0xD0): kmem_cache#2-oX
struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL);
// mnt: kmem_cache#2-oX
// mnt: kmem_cache#2-oX
if (mnt) {
int err;
// mnt: kmem_cache#2-oX
err = mnt_alloc_id(mnt);
- mnt_alloc_id()
// ARM10C 20151031
// mnt: kmem_cache#2-oX
static int mnt_alloc_id(struct mount *mnt)
{
int res;
retry:
// GFP_KERNEL: 0xD0
ida_pre_get(&mnt_id_ida, GFP_KERNEL);
// ida_pre_get에서 한일:
// idr_layer_cache를 사용하여 struct idr_layer 의 메모리 kmem_cache#21-o0...7를 8 개를 할당 받음
// (kmem_cache#21-o0...7)->ary[0]: NULL
// (&(&mnt_id_ida)->idr)->id_free: kmem_cache#21-o7
// (&(&mnt_id_ida)->idr)->id_free_cnt: 7
//
// struct ida_bitmap 의 메모리 kmem_cache#27-oX 할당 받음
// (&mnt_id_ida)->free_bitmap: kmem_cache#27-oX
spin_lock(&mnt_id_lock);
// spin_lock에서 한일:
// &mnt_id_lock을 이용한 spin lock 수행
// mnt_id_start: 0, &mnt->mnt_id: &(kmem_cache#2-oX)->mnt_id
res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id);
// ARM10C 20151031
// &mnt_id_ida, mnt_id_start: 0, &mnt->mnt_id: &(kmem_cache#2-oX)->mnt_id
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
{
// MAX_IDR_LEVEL: 4
struct idr_layer *pa[MAX_IDR_LEVEL + 1];
struct ida_bitmap *bitmap;
unsigned long flags;
// starting_id: 0, IDA_BITMAP_BITS: 1344
int idr_id = starting_id / IDA_BITMAP_BITS;
// idr_id: 0
// starting_id: 0, IDA_BITMAP_BITS: 1344
int offset = starting_id % IDA_BITMAP_BITS;
// offset: 0
int t, id;
restart:
/* get vacant slot */
// &ida->idr: &(&mnt_id_ida)->idr, idr_id: 0
t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
// ARM10C 20151031
// &ida->idr: &(&mnt_id_ida)->idr, idr_id: 0, pa, 0, &ida->idr: &(&mnt_id_ida)->idr
static int idr_get_empty_slot(struct idr *idp, int starting_id,
struct idr_layer **pa, gfp_t gfp_mask,
struct idr *layer_idr)
{
struct idr_layer *p, *new;
int layers, v, id;
unsigned long flags;
// starting_id: 0
id = starting_id;
// id: 0
build_up:
// idp->top: (&(&mnt_id_ida)->idr)->top: NULL
p = idp->top;
// p: NULL
// idp->layers: (&(&mnt_id_ida)->idr)->layers: 0
layers = idp->layers;
// layers: 0
// p: NULL
if (unlikely(!p)) {
// gfp_mask: 0, layer_idr: &(&mnt_id_ida)->idr
// idr_layer_alloc(0, &(&mnt_id_ida)->idr): kmem_cache#21-o7, p: kmem_cache#21-o7
if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
return -ENOMEM;
// idr_layer_alloc에서 한일:
// (&(&mnt_id_ida)->idr)->id_free: NULL
// (&(&mnt_id_ida)->idr)->id_free_cnt: 6
// (kmem_cache#21-o7)->ary[0]: NULL
// p->layer: (kmem_cache#21-o7)->layer
p->layer = 0;
// p->layer: (kmem_cache#21-o7)->layer: 0
// layers: 0
layers = 1;
// layers: 1
}
// 2015/10/31 종료
// 2015/11/07 시작
/*
* Add a new layer to the top of the tree if the requested
* id is larger than the currently allocated space.
*/
// id: 0, layers: 1, idr_max(1): 255
while (id > idr_max(layers)) {
- idr_max()
// ARM10C 20151107
// layers: 1
static int idr_max(int layers)
{
// layers: 1, IDR_BITS: 8, MAX_IDR_SHIFT: 31, min_t(int, 8, 31): 8
int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
// bits: 8
// ARM10C 20151107
#define min_t(type, x, y) ({ \
type __min1 = (x); \
type __min2 = (y); \
__min1 < __min2 ? __min1: __min2; })
- idr_max()
// ARM10C 20151107
// layers: 1
static int idr_max(int layers)
{
// layers: 1, IDR_BITS: 8, MAX_IDR_SHIFT: 31, min_t(int, 8, 31): 8
int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
// bits: 8
// bits: 8
return (1 << bits) - 1;
// return 255
}
- return idr_get_empty_slot()
// ARM10C 20151031
// &ida->idr: &(&mnt_id_ida)->idr, idr_id: 0, pa, 0, &ida->idr: &(&mnt_id_ida)->idr
static int idr_get_empty_slot(struct idr *idp, int starting_id,
struct idr_layer **pa, gfp_t gfp_mask,
struct idr *layer_idr)
{
struct idr_layer *p, *new;
int layers, v, id;
unsigned long flags;
// starting_id: 0
id = starting_id;
// id: 0
build_up:
// idp->top: (&(&mnt_id_ida)->idr)->top: NULL
p = idp->top;
// p: NULL
// idp->layers: (&(&mnt_id_ida)->idr)->layers: 0
layers = idp->layers;
// layers: 0
// p: NULL
if (unlikely(!p)) {
// gfp_mask: 0, layer_idr: &(&mnt_id_ida)->idr
// idr_layer_alloc(0, &(&mnt_id_ida)->idr): kmem_cache#21-o7, p: kmem_cache#21-o7
if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
return -ENOMEM;
// idr_layer_alloc에서 한일:
// (&(&mnt_id_ida)->idr)->id_free: NULL
// (&(&mnt_id_ida)->idr)->id_free_cnt: 6
// (kmem_cache#21-o7)->ary[0]: NULL
// p->layer: (kmem_cache#21-o7)->layer
p->layer = 0;
// p->layer: (kmem_cache#21-o7)->layer: 0
// layers: 0
layers = 1;
// layers: 1
}
// 2015/10/31 종료
/*
* Add a new layer to the top of the tree if the requested
* id is larger than the currently allocated space.
*/
// id: 0, layers: 1, idr_max(1): 255
while (id > idr_max(layers)) {
layers++;
if (!p->count) {
/* special case: if the tree is currently empty,
* then we grow the tree by moving the top node
* upwards.
*/
p->layer++;
WARN_ON_ONCE(p->prefix);
continue;
}
if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
/*
* The allocation failed. If we built part of
* the structure tear it down.
*/
spin_lock_irqsave(&idp->lock, flags);
for (new = p; p && p != idp->top; new = p) {
p = p->ary[0];
new->ary[0] = NULL;
new->count = 0;
bitmap_clear(new->bitmap, 0, IDR_SIZE);
__move_to_free_list(idp, new);
}
spin_unlock_irqrestore(&idp->lock, flags);
return -ENOMEM;
}
new->ary[0] = p;
new->count = 1;
new->layer = layers-1;
new->prefix = id & idr_layer_prefix_mask(new->layer);
if (bitmap_full(p->bitmap, IDR_SIZE))
__set_bit(0, new->bitmap);
p = new;
}
// idp->top: (&(&mnt_id_ida)->idr)->top, p: kmem_cache#21-o7
// __rcu_assign_pointer((&(&mnt_id_ida)->idr)->top, kmem_cache#21-o7, __rcu):
// do {
// smp_wmb();
// ((&(&mnt_id_ida)->idr)->top) = (typeof(*kmem_cache#21-o7) __force space *)(kmem_cache#21-o7);
// } while (0)
rcu_assign_pointer(idp->top, p);
// ((&(&mnt_id_ida)->idr)->top): (typeof(*kmem_cache#21-o7) __force space *)(kmem_cache#21-o7)
- call rcu_assign_pointer()
// ARM10C 20151107
// idp->top: (&(&mnt_id_ida)->idr)->top, p: kmem_cache#21-o7
#define rcu_assign_pointer(p, v) \
__rcu_assign_pointer((p), (v), __rcu)
- return idr_get_empty_slot()
// ARM10C 20151031
// &ida->idr: &(&mnt_id_ida)->idr, idr_id: 0, pa, 0, &ida->idr: &(&mnt_id_ida)->idr
static int idr_get_empty_slot(struct idr *idp, int starting_id,
struct idr_layer **pa, gfp_t gfp_mask,
struct idr *layer_idr)
{
struct idr_layer *p, *new;
int layers, v, id;
unsigned long flags;
// starting_id: 0
id = starting_id;
// id: 0
build_up:
// idp->top: (&(&mnt_id_ida)->idr)->top: NULL
p = idp->top;
// p: NULL
...
// idp->top: (&(&mnt_id_ida)->idr)->top, p: kmem_cache#21-o7
// __rcu_assign_pointer((&(&mnt_id_ida)->idr)->top, kmem_cache#21-o7, __rcu):
// do {
// smp_wmb();
// ((&(&mnt_id_ida)->idr)->top) = (typeof(*kmem_cache#21-o7) __force space *)(kmem_cache#21-o7);
// } while (0)
rcu_assign_pointer(idp->top, p);
// ((&(&mnt_id_ida)->idr)->top): (typeof(*kmem_cache#21-o7) __force space *)(kmem_cache#21-o7)
// idp->layers: (&(&mnt_id_ida)->idr)->layers, layers: 1
idp->layers = layers;
// idp->layers: (&(&mnt_id_ida)->idr)->layers: 1
// idp: &(&mnt_id_ida)->idr, id: 0, pa, gfp_mask: 0, layer_idr: &(&mnt_id_ida)->idr
v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
- call sub_alloc()
- struct idr_layer
// sizeof(struct idr_layer): 1076 bytes
struct idr_layer {
int prefix; /* the ID prefix of this idr_layer */
// IDR_SIZE: 0x100
DECLARE_BITMAP(bitmap, IDR_SIZE); /* A zero bit means "space here" */
// IDR_BITS: 8
struct idr_layer __rcu *ary[1<<IDR_BITS];
int count; /* When zero, we can release it */
int layer; /* distance from leaf */
struct rcu_head rcu_head;
};
// ARM10C 20151107
// idp: &(&mnt_id_ida)->idr, id: 0, pa, gfp_mask: 0, layer_idr: &(&mnt_id_ida)->idr
static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
gfp_t gfp_mask, struct idr *layer_idr)
{
int n, m, sh;
struct idr_layer *p, *new;
int l, id, oid;
// *starting_id: 0
id = *starting_id;
// id: 0
restart:
// idp->top: (&(&mnt_id_ida)->idr)->top: kmem_cache#21-o7 (struct idr_layer)
p = idp->top;
// p: kmem_cache#21-o7 (struct idr_layer)
// idp->layers: (&(&mnt_id_ida)->idr)->layers: 1
l = idp->layers;
// l: 1
// l: 1
pa[l--] = NULL;
// pa[1]: NULL, l: 0
while (1) {
/*
* We run around this while until we reach the leaf node...
*/
// id: 0, IDR_BITS: 8, l: 0, IDR_MASK: 0xFF
n = (id >> (IDR_BITS*l)) & IDR_MASK;
// n: 0
// p->bitmap: (kmem_cache#21-o7)->bitmap (struct idr_layer), IDR_SIZE: 0x100, n: 0
// find_next_zero_bit((kmem_cache#21-o7)->bitmap (struct idr_layer), 0x100, 0): 0x100
m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);
// m: 0x100
// m: 0x100, IDR_SIZE: 0x100
if (m == IDR_SIZE) {
/* no space available go back to previous layer. */
// l: 0
l++;
// l: 1
// id: 0
oid = id;
// oid: 0
// id: 0, IDR_BITS: 8, l: 1
id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
// id: 0x100
/* if already at the top layer, we need to grow */
// id: 0x100, idp->layers: (&(&mnt_id_ida)->idr)->layers: 1, IDR_BITS: 8
if (id >= 1 << (idp->layers * IDR_BITS)) {
/* if already at the top layer, we need to grow */
// id: 0x100, idp->layers: (&(&mnt_id_ida)->idr)->layers: 1, IDR_BITS: 8
if (id >= 1 << (idp->layers * IDR_BITS)) {
*starting_id = id;
return -EAGAIN;
}
p = pa[l];
BUG_ON(!p);
/* If we need to go up one layer, continue the
* loop; otherwise, restart from the top.
*/
sh = IDR_BITS * (l + 1);
if (oid >> sh == id >> sh)
continue;
else
goto restart;
}
if (m != n) {
sh = IDR_BITS*l;
id = ((id >> sh) ^ n ^ m) << sh;
}
if ((id >= MAX_IDR_BIT) || (id < 0))
return -ENOSPC;
if (l == 0)
break;
/*
* Create the layer below if it is missing.
*/
if (!p->ary[m]) {
new = idr_layer_alloc(gfp_mask, layer_idr);
if (!new)
return -ENOMEM;
new->layer = l-1;
new->prefix = id & idr_layer_prefix_mask(new->layer);
rcu_assign_pointer(p->ary[m], new);
p->count++;
}
pa[l--] = p;
p = p->ary[m];
}
pa[l] = p;
return id;
}
- return id: 0
- return id: 0
// ARM10C 20151031
// &ida->idr: &(&mnt_id_ida)->idr, idr_id: 0, pa, 0, &ida->idr: &(&mnt_id_ida)->idr
static int idr_get_empty_slot(struct idr *idp, int starting_id,
struct idr_layer **pa, gfp_t gfp_mask,
struct idr *layer_idr)
{
struct idr_layer *p, *new;
int layers, v, id;
unsigned long flags;
// starting_id: 0
id = starting_id;
// id: 0
build_up:
// idp->top: (&(&mnt_id_ida)->idr)->top: NULL
p = idp->top;
// p: NULL
// idp->layers: (&(&mnt_id_ida)->idr)->layers: 0
layers = idp->layers;
// layers: 0
// p: NULL
if (unlikely(!p)) {
// gfp_mask: 0, layer_idr: &(&mnt_id_ida)->idr
// idr_layer_alloc(0, &(&mnt_id_ida)->idr): kmem_cache#21-o7, p: kmem_cache#21-o7
if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
return -ENOMEM;
// idr_layer_alloc에서 한일:
// (&(&mnt_id_ida)->idr)->id_free: NULL
// (&(&mnt_id_ida)->idr)->id_free_cnt: 6
// (kmem_cache#21-o7)->ary[0]: NULL
// p->layer: (kmem_cache#21-o7)->layer
p->layer = 0;
// p->layer: (kmem_cache#21-o7)->layer: 0
// layers: 0
layers = 1;
// layers: 1
}
// 2015/10/31 종료
// 2015/11/07 시작
/*
* Add a new layer to the top of the tree if the requested
* id is larger than the currently allocated space.
*/
// id: 0, layers: 1, idr_max(1): 255
while (id > idr_max(layers)) {
layers++;
if (!p->count) {
/* special case: if the tree is currently empty,
* then we grow the tree by moving the top node
* upwards.
*/
p->layer++;
WARN_ON_ONCE(p->prefix);
continue;
}
if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
/*
* The allocation failed. If we built part of
* the structure tear it down.
*/
spin_lock_irqsave(&idp->lock, flags);
for (new = p; p && p != idp->top; new = p) {
p = p->ary[0];
new->ary[0] = NULL;
new->count = 0;
bitmap_clear(new->bitmap, 0, IDR_SIZE);
__move_to_free_list(idp, new);
}
spin_unlock_irqrestore(&idp->lock, flags);
return -ENOMEM;
}
new->ary[0] = p;
new->count = 1;
new->layer = layers-1;
new->prefix = id & idr_layer_prefix_mask(new->layer);
if (bitmap_full(p->bitmap, IDR_SIZE))
__set_bit(0, new->bitmap);
p = new;
}
// idp->top: (&(&mnt_id_ida)->idr)->top, p: kmem_cache#21-o7
// __rcu_assign_pointer((&(&mnt_id_ida)->idr)->top, kmem_cache#21-o7, __rcu):
// do {
// smp_wmb();
// ((&(&mnt_id_ida)->idr)->top) = (typeof(*kmem_cache#21-o7) __force space *)(kmem_cache#21-o7);
// } while (0)
rcu_assign_pointer(idp->top, p);
// ((&(&mnt_id_ida)->idr)->top): (typeof(*kmem_cache#21-o7) __force space *)(kmem_cache#21-o7)
// idp->layers: (&(&mnt_id_ida)->idr)->layers, layers: 1
idp->layers = layers;
// idp->layers: (&(&mnt_id_ida)->idr)->layers: 1
// idp: &(&mnt_id_ida)->idr, id: 0, pa, gfp_mask: 0, layer_idr: &(&mnt_id_ida)->idr
v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
if (v == -EAGAIN)
goto build_up;
return(v);
}
- sub_alloc()
// ARM10C 20151031
// &mnt_id_ida, mnt_id_start: 0, &mnt->mnt_id: &(kmem_cache#2-oX)->mnt_id
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
{
// MAX_IDR_LEVEL: 4
struct idr_layer *pa[MAX_IDR_LEVEL + 1];
struct ida_bitmap *bitmap;
unsigned long flags;
// starting_id: 0, IDA_BITMAP_BITS: 1344
int idr_id = starting_id / IDA_BITMAP_BITS;
// idr_id: 0
// starting_id: 0, IDA_BITMAP_BITS: 1344
int offset = starting_id % IDA_BITMAP_BITS;
// offset: 0
int t, id;
restart:
/* get vacant slot */
// &ida->idr: &(&mnt_id_ida)->idr, idr_id: 0
t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
if (t < 0)
return t == -ENOMEM ? -EAGAIN : t;
if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
return -ENOSPC;
if (t != idr_id)
offset = 0;
idr_id = t;
/* if bitmap isn't there, create a new one */
bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
if (!bitmap) {
spin_lock_irqsave(&ida->idr.lock, flags);
bitmap = ida->free_bitmap;
ida->free_bitmap = NULL;
spin_unlock_irqrestore(&ida->idr.lock, flags);
if (!bitmap)
return -EAGAIN;
memset(bitmap, 0, sizeof(struct ida_bitmap));
rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
(void *)bitmap);
pa[0]->count++;
}
/* lookup for empty slot */
t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
if (t == IDA_BITMAP_BITS) {
/* no empty slot after offset, continue to the next chunk */
idr_id++;
offset = 0;
goto restart;
}
id = idr_id * IDA_BITMAP_BITS + t;
if (id >= MAX_IDR_BIT)
return -ENOSPC;
__set_bit(t, bitmap->bitmap);
-
42 * 4 * 8 : 1344
-
__set_bit()
// ARM10C 20140118
static inline void __set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
// nr 값을 이용해 해당 비트를 1로 설정
*p |= mask;
}
// ARM10C 20151031
// &mnt_id_ida, mnt_id_start: 0, &mnt->mnt_id: &(kmem_cache#2-oX)->mnt_id
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
{
// MAX_IDR_LEVEL: 4
struct idr_layer *pa[MAX_IDR_LEVEL + 1];
struct ida_bitmap *bitmap;
unsigned long flags;
// starting_id: 0, IDA_BITMAP_BITS: 1344
int idr_id = starting_id / IDA_BITMAP_BITS;
// idr_id: 0
// starting_id: 0, IDA_BITMAP_BITS: 1344
int offset = starting_id % IDA_BITMAP_BITS;
// offset: 0
int t, id;
restart:
/* get vacant slot */
// &ida->idr: &(&mnt_id_ida)->idr, idr_id: 0
t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
if (t < 0)
return t == -ENOMEM ? -EAGAIN : t;
if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
return -ENOSPC;
if (t != idr_id)
offset = 0;
idr_id = t;
/* if bitmap isn't there, create a new one */
bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
if (!bitmap) {
spin_lock_irqsave(&ida->idr.lock, flags);
bitmap = ida->free_bitmap;
ida->free_bitmap = NULL;
spin_unlock_irqrestore(&ida->idr.lock, flags);
if (!bitmap)
return -EAGAIN;
memset(bitmap, 0, sizeof(struct ida_bitmap));
rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
(void *)bitmap);
pa[0]->count++;
}
/* lookup for empty slot */
t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
if (t == IDA_BITMAP_BITS) {
/* no empty slot after offset, continue to the next chunk */
idr_id++;
offset = 0;
goto restart;
}
id = idr_id * IDA_BITMAP_BITS + t;
if (id >= MAX_IDR_BIT)
return -ENOSPC;
__set_bit(t, bitmap->bitmap);
if (++bitmap->nr_busy == IDA_BITMAP_BITS)
idr_mark_full(pa, idr_id);
*p_id = id;
/* Each leaf node can handle nearly a thousand slots and the
* whole idea of ida is to have small memory foot print.
* Throw away extra resources one by one after each successful
* allocation.
*/
if (ida->idr.id_free_cnt || ida->free_bitmap) {
struct idr_layer *p = get_from_free_list(&ida->idr);
if (p)
kmem_cache_free(idr_layer_cache, p);
- 1st log
97a9fed..67b58f9 master -> origin/master
Updating 97a9fed..67b58f9
Fast-forward
arch/arm/include/asm/bitops.h | 2 ++
arch/arm/lib/findbit.S | 2 ++
include/linux/idr.h | 7 ++++++-
include/linux/kernel.h | 1 +
include/linux/rcupdate.h | 8 ++++++++
lib/idr.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++
6 files changed, 72 insertions(+), 1 deletion(-)