From 498293bfbfcd1358114c74aa426d4afe774a7d05 Mon Sep 17 00:00:00 2001 From: Didrik Lundberg Date: Thu, 6 Aug 2015 10:30:29 +0200 Subject: [PATCH] Functions which use curr_vm now makes a function call to determine which core they are running on instead of referring to a shared curr_vm --- core/hw/cpu/arm/arm_common/arm_pt.S | 20 +- core/hw/cpu/arm/arm_common/start.c | 15 +- core/hw/ld/virt-hyper.ld | 28 +- core/hypervisor/dmmu.c | 708 +++++++++++---------- core/hypervisor/dmmu.h | 4 +- core/hypervisor/handlers.c | 188 +++--- core/hypervisor/hypercalls/hyp_cpu.c | 16 +- core/hypervisor/hypercalls/hyp_dmmu.c | 51 +- core/hypervisor/hypercalls/hyp_interrupt.c | 37 +- core/hypervisor/hypercalls/hyp_mmu.c | 44 +- core/hypervisor/hypercalls/hyp_rpc.c | 36 +- core/hypervisor/hypercalls/hypercalls.c | 57 +- core/hypervisor/init_slave.c | 36 +- core/hypervisor/linux/linux_init.c | 37 +- 14 files changed, 690 insertions(+), 587 deletions(-) diff --git a/core/hw/cpu/arm/arm_common/arm_pt.S b/core/hw/cpu/arm/arm_common/arm_pt.S index 0e4d52c..554a791 100755 --- a/core/hw/cpu/arm/arm_common/arm_pt.S +++ b/core/hw/cpu/arm/arm_common/arm_pt.S @@ -1,9 +1,8 @@ /* * ARM pagetable functions including the initial setup */ - .global arm_setup_initial_pt - .global arm_setup_initial_slave_pt + .global arm_setup_initial_pt_slave .global arm_reset_initial_pt .code 32 @@ -52,7 +51,11 @@ arm_setup_initial_pt_slave: * later). */ - mov r0, =(__hyper_pt_start_slave__ + HAL_OFFSET) + /*mov r0, = #(__hyper_pt_start_slave__ + HAL_OFFSET)*/ + ldr r0, =__hyper_pt_start_slave__ + ldr r1, =HAL_OFFSET + add r0, r0, r1 + mov r1, #0 add r2, r0, #0x4000 @@ -68,7 +71,10 @@ arm_setup_initial_pt_slave: arm_reset_initial_pt: - ldr r4, = (__hyper_pt_start__ + HAL_OFFSET) + ldr r4, =__hyper_pt_start__ + ldr r1, =HAL_OFFSET + add r4, r4, r1 + mcr p15, 0, r4, c2, c0, 0 @ load page table pointer bx lr @@ -78,7 +84,11 @@ arm_setup_initial_pt: */ - mov r0, =(__hyper_pt_start__ + HAL_OFFSET) + /* mov r0, = #(__hyper_pt_start__ + HAL_OFFSET) */ + ldr r0, =__hyper_pt_start__ + ldr r1, =HAL_OFFSET + add r0, r0, r1 + mov r1, #0 add r2, r0, #0x4000 diff --git a/core/hw/cpu/arm/arm_common/start.c b/core/hw/cpu/arm/arm_common/start.c index a21a2a8..1acbe7a 100644 --- a/core/hw/cpu/arm/arm_common/start.c +++ b/core/hw/cpu/arm/arm_common/start.c @@ -2,16 +2,17 @@ #include "guest_blob.h" #include -extern virtual_machine *curr_vm; +extern virtual_machine* get_curr_vm(); void start(){ - uint32_t r3 = curr_vm->mode_states[HC_GM_KERNEL].ctx.reg[3]; - uint32_t r4 = curr_vm->mode_states[HC_GM_KERNEL].ctx.reg[4]; - uint32_t r5 = curr_vm->mode_states[HC_GM_KERNEL].ctx.reg[5]; - uint32_t r6 = curr_vm->mode_states[HC_GM_KERNEL].ctx.reg[6]; - addr_t start = curr_vm->config->firmware->vstart + curr_vm->config->guest_entry_offset; + virtual_machine* _curr_vm = get_curr_vm(); + uint32_t r3 = _curr_vm->mode_states[HC_GM_KERNEL].ctx.reg[3]; + uint32_t r4 = _curr_vm->mode_states[HC_GM_KERNEL].ctx.reg[4]; + uint32_t r5 = _curr_vm->mode_states[HC_GM_KERNEL].ctx.reg[5]; + uint32_t r6 = _curr_vm->mode_states[HC_GM_KERNEL].ctx.reg[6]; + addr_t start = _curr_vm->config->firmware->vstart + _curr_vm->config->guest_entry_offset; #ifdef LINUX - start = curr_vm->config->firmware->pstart + curr_vm->config->guest_entry_offset; + start = _curr_vm->config->firmware->pstart + _curr_vm->config->guest_entry_offset; #endif printf("Branching to address: %x\n", start); diff --git a/core/hw/ld/virt-hyper.ld b/core/hw/ld/virt-hyper.ld index b29d8b1..1fe824a 100644 --- a/core/hw/ld/virt-hyper.ld +++ b/core/hw/ld/virt-hyper.ld @@ -71,12 +71,7 @@ SECTIONS /* Heap: up to 104 KiB */ __hyper_heap_start__ = .; . = 0xf0100000 - 1024 * (64 + 32 + 8); - __hyper_heap_end__ = .; - - /* Primary stack (for core 0) */ - __hyper_stack_bottom__ = .; - . += 1024 * 8; - __hyper_stack_top__ = .; + __hyper_heap_end__ = .; /* Guest (running on core 0) */ __hyper_guest_start__ = .; @@ -89,25 +84,30 @@ SECTIONS . += 1024 * 64; __hyper_pt_end__ = .; - /* Secondary stack (for core 1) + /* Primary stack (for core 0) TODO: Moved this to here, from between Heap and Guest above. */ + __hyper_stack_bottom__ = .; + . += 1024 * 8; + __hyper_stack_top__ = .; + + /* Secondary stack (for core 1) */ __hyper_stack_bottom_core_1__ = .; . += 1024 * 8; - __hyper_stack_top_core_1__ = .; */ + __hyper_stack_top_core_1__ = .; - /* Tertiary stack (for core 2) + /* Tertiary stack (for core 2) */ __hyper_stack_bottom_core_2__ = .; . += 1024 * 8; - __hyper_stack_top_core_2__ = .; + __hyper_stack_top_core_2__ = .; - /* Quaternary stack (for core 3) + /* Quaternary stack (for core 3) */ __hyper_stack_bottom_core_3__ = .; . += 1024 * 8; - __hyper_stack_top_core_3__ = .; */ + __hyper_stack_top_core_3__ = .; - /* Secondary (slave) PT memory (only needed to run guests on different cores) + /* Secondary (slave) PT memory (only needed to run guests on different cores) */ __hyper_pt_start_slave__ = .; . += 1024 * 64; - __hyper_pt_end_slave__ = .; */ + __hyper_pt_end_slave__ = .; __hyper_end__ = .; diff --git a/core/hypervisor/dmmu.c b/core/hypervisor/dmmu.c index 3fe1699..8450eb2 100644 --- a/core/hypervisor/dmmu.c +++ b/core/hypervisor/dmmu.c @@ -6,7 +6,6 @@ // DEBUG FLAGS #define DEBUG_DMMU_MMU_LEVEL 1 -extern virtual_machine *curr_vm; extern uint32_t *flpt_va; #if 1 @@ -78,38 +77,47 @@ void dmmu_init() } BOOL guest_pa_range_checker(pa, size) { - // TODO: we are not managing the spatial isolation with the TRUSTED MODE - uint32_t guest_start_pa = curr_vm->config->firmware->pstart; - /*Added 1MB to range check, Last +1MB after guest physical address is reserved for L1PT*/ - uint32_t guest_end_pa = curr_vm->config->firmware->pstart + curr_vm->config->firmware->psize + SECTION_SIZE; - if (!((pa >= (guest_start_pa)) && (pa + size <= guest_end_pa))) - return FALSE; - return TRUE; + //TODO: we are not managing the spatial isolation with the TRUSTED MODE + virtual_machine* _curr_vm = get_curr_vm(); + uint32_t guest_start_pa = _curr_vm->config->firmware->pstart; + /*Added 1 MiB to range check, Last +1 MiB after guest physical address is reserved for L1PT*/ + uint32_t guest_end_pa = _curr_vm->config->firmware->pstart + _curr_vm->config->firmware->psize + SECTION_SIZE; + + if (!((pa >= (guest_start_pa)) && (pa + size <= guest_end_pa))){ + return FALSE; + } + + return TRUE; } BOOL guest_inside_always_cached_region(pa, size) { - uint32_t guest_pt_start_pa = curr_vm->config->firmware->pstart + curr_vm->config->always_cached_offset; - uint32_t guest_pt_end_pa = guest_pt_start_pa + curr_vm->config->always_cached_size; - if (!((pa >= (guest_pt_start_pa)) && (pa + size <= guest_pt_end_pa))) - return FALSE; + virtual_machine* _curr_vm = get_curr_vm(); + uint32_t guest_pt_start_pa = _curr_vm->config->firmware->pstart + _curr_vm->config->always_cached_offset; + uint32_t guest_pt_end_pa = guest_pt_start_pa + _curr_vm->config->always_cached_size; + + if (!((pa >= (guest_pt_start_pa)) && (pa + size <= guest_pt_end_pa))){ + return FALSE; + } - return TRUE; + return TRUE; } BOOL guest_intersect_always_cached_region(pa, size) { - uint32_t guest_pt_start_pa = curr_vm->config->firmware->pstart + curr_vm->config->always_cached_offset; - uint32_t guest_pt_end_pa = guest_pt_start_pa + curr_vm->config->always_cached_size; - if ((guest_pt_start_pa <= pa) && (guest_pt_end_pa > pa)){ - return TRUE; - } - if ((guest_pt_start_pa <= pa+size) && (guest_pt_end_pa >= pa+size)){ - return TRUE; - } - if ((guest_pt_start_pa >= pa) && (guest_pt_end_pa <= pa+size)){ - return TRUE; - } + virtual_machine* _curr_vm = get_curr_vm(); + uint32_t guest_pt_start_pa = _curr_vm->config->firmware->pstart + _curr_vm->config->always_cached_offset; + uint32_t guest_pt_end_pa = guest_pt_start_pa + _curr_vm->config->always_cached_size; + + if ((guest_pt_start_pa <= pa) && (guest_pt_end_pa > pa)){ + return TRUE; + } + if ((guest_pt_start_pa <= pa+size) && (guest_pt_end_pa >= pa+size)){ + return TRUE; + } + if ((guest_pt_start_pa >= pa) && (guest_pt_end_pa <= pa+size)){ + return TRUE; + } - return FALSE; + return FALSE; } /* ------------------------------------------------------------------- @@ -117,315 +125,316 @@ BOOL guest_intersect_always_cached_region(pa, size) { -------------------------------------------------------------------*/ uint32_t l1PT_checker(uint32_t l1_desc) { - l1_pt_t *pt = (l1_pt_t *) (&l1_desc) ; - dmmu_entry_t *bft_entry_pt = get_bft_entry_by_block_idx(PT_PA_TO_PH_BLOCK(pt->addr)); - - uint32_t err_flag = SUCCESS_MMU; + l1_pt_t *pt = (l1_pt_t *) (&l1_desc) ; + dmmu_entry_t *bft_entry_pt = get_bft_entry_by_block_idx(PT_PA_TO_PH_BLOCK(pt->addr)); + + uint32_t err_flag = SUCCESS_MMU; + + if ((pt->addr & 0b10) == 2){ + err_flag = ERR_MMU_L2_BASE_OUT_OF_RANGE; + } else if (bft_entry_pt->type != PAGE_INFO_TYPE_L2PT) { + err_flag = ERR_MMU_IS_NOT_L2_PT; + } else if (bft_entry_pt->refcnt >= (MAX_30BIT - 4096)) { + err_flag = ERR_MMU_REF_OVERFLOW; + } else if (pt->pxn) { + err_flag = ERR_MMU_AP_UNSUPPORTED; + } else { + return SUCCESS_MMU; + } - if ((pt->addr & 0b10) == 2){ - err_flag = ERR_MMU_L2_BASE_OUT_OF_RANGE; - } else if (bft_entry_pt->type != PAGE_INFO_TYPE_L2PT) { + #if DEBUG_DMMU_MMU_LEVEL > 2 + printf("l1PT_checker failed: %x %d\n", l1_desc, err_flag); + #endif - err_flag = ERR_MMU_IS_NOT_L2_PT; - } - else if (bft_entry_pt->refcnt >= (MAX_30BIT - 4096)) { - err_flag = ERR_MMU_REF_OVERFLOW; - } - else if (pt->pxn) { - err_flag = ERR_MMU_AP_UNSUPPORTED; - } - else { - return SUCCESS_MMU; - } -#if DEBUG_DMMU_MMU_LEVEL > 2 - printf("l1PT_checker failed: %x %d\n", l1_desc, err_flag); -#endif - return err_flag; + return err_flag; } uint32_t l1Sec_checker(uint32_t l1_desc, addr_t l1_base_pa_add) { - uint32_t ap; - uint32_t err_flag = SUCCESS_MMU; // to be set when one of the pages in the section is not a data page - uint32_t sec_idx; + uint32_t ap; + uint32_t err_flag = SUCCESS_MMU; // to be set when one of the pages in the section is not a data page + uint32_t sec_idx; - l1_sec_t *sec = (l1_sec_t *) (&l1_desc) ; - ap = GET_L1_AP(sec); + l1_sec_t *sec = (l1_sec_t *) (&l1_desc) ; + ap = GET_L1_AP(sec); - // Cacheability attribute check -#ifdef CHECK_PAGETABLES_CACHEABILITY - if (guest_intersect_always_cached_region(START_PA_OF_SECTION(sec), SECTION_SIZE)) { - if (sec->c != 1) - return ERR_MMU_NOT_CACHEABLE; - } -#endif + // Cacheability attribute check + #ifdef CHECK_PAGETABLES_CACHEABILITY + if (guest_intersect_always_cached_region(START_PA_OF_SECTION(sec), SECTION_SIZE)) { + if (sec->c != 1){ + return ERR_MMU_NOT_CACHEABLE; + } + } + #endif - if(sec->secIndic == 1) // l1_desc is a super section descriptor - err_flag = ERR_MMU_SUPERSECTION; - // TODO: (ap != 1) condition need to be added to proof of API - else if((ap != 1) && (ap != 2) && (ap != 3)) - err_flag = ERR_MMU_AP_UNSUPPORTED; - // TODO: Check also that the guest can not read into the hypervisor memory - // TODO: in general we need also to prevent that it can read from the trusted component, thus identifying a more fine grade control - // e.g. using domain - // TODO: e.g. if you can read in user mode and the domain is the guest user domain or kernel domain then the pa must be in the guest memory - else if (ap == 3) { - uint32_t max_kernel_ac = (curr_vm->config->guest_modes[HC_GM_KERNEL]->domain_ac | curr_vm->config->guest_modes[HC_GM_TASK]->domain_ac); - uint32_t page_domain_mask = (0b11 << (2 * sec->dom)); - uint32_t kernel_ac = max_kernel_ac & page_domain_mask; - if (kernel_ac != 0) { - if (!guest_pa_range_checker(START_PA_OF_SECTION(sec), SECTION_SIZE)) - err_flag = ERR_MMU_OUT_OF_RANGE_PA; - } + if(sec->secIndic == 1){ // l1_desc is a super section descriptor + err_flag = ERR_MMU_SUPERSECTION; - for(sec_idx = 0; sec_idx < 256; sec_idx++) - { - uint32_t ph_block_in_sec = PA_TO_PH_BLOCK(START_PA_OF_SECTION(sec)) | (sec_idx); // Address of a page in the section - dmmu_entry_t *bft_entry_in_sec = get_bft_entry_by_block_idx(ph_block_in_sec); - - if(bft_entry_in_sec->type != PAGE_INFO_TYPE_DATA) - { - err_flag = ERR_MMU_PH_BLOCK_NOT_WRITABLE; - } - // if one of the L1 page table's pages is in the section - if( ((((uint32_t)ph_block_in_sec) << 12) & L1_BASE_MASK) == l1_base_pa_add ) - { - err_flag = ERR_MMU_NEW_L1_NOW_WRITABLE; - } - if(bft_entry_in_sec->refcnt >= (MAX_30BIT - 4096)) - { - err_flag = ERR_MMU_REF_OVERFLOW; - } - } - } - if(err_flag != SUCCESS_MMU) { -#if DEBUG_DMMU_MMU_LEVEL > 2 + //TODO: (ap != 1) condition need to be added to proof of API + } else if((ap != 1) && (ap != 2) && (ap != 3)){ + err_flag = ERR_MMU_AP_UNSUPPORTED; + + // TODO: Check also that the guest can not read into the hypervisor memory + // TODO: in general we need also to prevent that it can read from the trusted component, thus identifying a more fine grade control + // e.g. using domain + // TODO: e.g. if you can read in user mode and the domain is the guest user domain or kernel domain then the pa must be in the guest memory + } else if (ap == 3) { + virtual_machine* _curr_vm = get_curr_vm(); + uint32_t max_kernel_ac = (_curr_vm->config->guest_modes[HC_GM_KERNEL]->domain_ac | _curr_vm->config->guest_modes[HC_GM_TASK]->domain_ac); + uint32_t page_domain_mask = (0b11 << (2 * sec->dom)); + uint32_t kernel_ac = max_kernel_ac & page_domain_mask; + if (kernel_ac != 0) { + if (!guest_pa_range_checker(START_PA_OF_SECTION(sec), SECTION_SIZE)){ + err_flag = ERR_MMU_OUT_OF_RANGE_PA; + } + } - printf("l1Sec_checker failed: %x %x %d\n", l1_desc, l1_base_pa_add, err_flag); -#endif - return err_flag; - } + for (sec_idx = 0; sec_idx < 256; sec_idx++){ + uint32_t ph_block_in_sec = PA_TO_PH_BLOCK(START_PA_OF_SECTION(sec)) | (sec_idx); // Address of a page in the section + dmmu_entry_t *bft_entry_in_sec = get_bft_entry_by_block_idx(ph_block_in_sec); + if(bft_entry_in_sec->type != PAGE_INFO_TYPE_DATA){ + err_flag = ERR_MMU_PH_BLOCK_NOT_WRITABLE; + } + // if one of the L1 page table's pages is in the section + if( ((((uint32_t)ph_block_in_sec) << 12) & L1_BASE_MASK) == l1_base_pa_add ){ + err_flag = ERR_MMU_NEW_L1_NOW_WRITABLE; + } + if(bft_entry_in_sec->refcnt >= (MAX_30BIT - 4096)){ + err_flag = ERR_MMU_REF_OVERFLOW; + } + } + } - return err_flag; -} + if(err_flag != SUCCESS_MMU) { + #if DEBUG_DMMU_MMU_LEVEL > 2 + printf("l1Sec_checker failed: %x %x %d\n", l1_desc, l1_base_pa_add, err_flag); + #endif + return err_flag; + } -uint32_t l1Desc_validityChecker_dispatcher(uint32_t l1_type, uint32_t l1_desc, addr_t pgd) -{ - if(l1_type == 0) - return SUCCESS_MMU; - if (l1_type == 1) - return l1PT_checker(l1_desc); - if (l1_type == 2 ) - return l1Sec_checker(l1_desc, pgd); - return ERR_MMU_SUPERSECTION; + return err_flag; } -void create_L1_refs_update(addr_t l1_base_pa_add) -{ - int l1_idx, sec_idx; - for(l1_idx = 0; l1_idx < 4096; l1_idx++) - { - uint32_t l1_desc_pa_add = L1_IDX_TO_PA(l1_base_pa_add, l1_idx); // base address is 16KB aligned - uint32_t l1_desc_va_add = mmu_guest_pa_to_va(l1_desc_pa_add, curr_vm->config); - uint32_t l1_desc = *((uint32_t *) l1_desc_va_add); - uint32_t l1_type = l1_desc & DESC_TYPE_MASK; - if(l1_type == 1) - { - l1_pt_t *pt = (l1_pt_t *) (&l1_desc) ; - dmmu_entry_t *bft_entry_pt = get_bft_entry_by_block_idx(PT_PA_TO_PH_BLOCK(pt->addr)); - bft_entry_pt->refcnt += 1; +uint32_t l1Desc_validityChecker_dispatcher(uint32_t l1_type, uint32_t l1_desc, addr_t pgd){ + if(l1_type == 0){ + return SUCCESS_MMU; } - else if(l1_type == 2) - { - l1_sec_t *sec = (l1_sec_t *) (&l1_desc) ; - uint32_t ap = GET_L1_AP(sec); - if(ap == 3) - { - for(sec_idx = 0; sec_idx < 256; sec_idx++) - { - uint32_t ph_block = PA_TO_PH_BLOCK(START_PA_OF_SECTION(sec)) | (sec_idx); - dmmu_entry_t *bft_entry = get_bft_entry_by_block_idx(ph_block); - bft_entry->refcnt += 1; - } - } + if (l1_type == 1){ + return l1PT_checker(l1_desc); } - } + if (l1_type == 2 ){ + return l1Sec_checker(l1_desc, pgd); + } + return ERR_MMU_SUPERSECTION; } -#define DEBUG_PG_CONTENT 1 -int dmmu_create_L1_pt(addr_t l1_base_pa_add) -{ - uint32_t l1_idx, pt_idx; - uint32_t l1_desc; - uint32_t l1_desc_va_add; - uint32_t l1_desc_pa_add; - uint32_t l1_type; - uint32_t ap; - uint32_t ph_block; - int i; - - /*Check that the guest does not override the physical addresses outside its range*/ - // TODO, where we take the guest assigned physical memory? - if (!guest_pa_range_checker(l1_base_pa_add, 4*PAGE_SIZE)) - return ERR_MMU_OUT_OF_RANGE_PA; -#ifdef CHECK_PAGETABLES_CACHEABILITY - if (!guest_inside_always_cached_region(l1_base_pa_add, 4*PAGE_SIZE)) - return ERR_MMU_OUT_OF_CACHEABLE_RANGE; -#endif +void create_L1_refs_update(addr_t l1_base_pa_add){ + int l1_idx, sec_idx; + virtual_machine* _curr_vm = get_curr_vm(); + for(l1_idx = 0; l1_idx < 4096; l1_idx++){ + uint32_t l1_desc_pa_add = L1_IDX_TO_PA(l1_base_pa_add, l1_idx); // base address is 16KB aligned + uint32_t l1_desc_va_add = mmu_guest_pa_to_va(l1_desc_pa_add, _curr_vm->config); + uint32_t l1_desc = *((uint32_t *) l1_desc_va_add); + uint32_t l1_type = l1_desc & DESC_TYPE_MASK; + if(l1_type == 1){ + l1_pt_t *pt = (l1_pt_t *) (&l1_desc); + dmmu_entry_t *bft_entry_pt = get_bft_entry_by_block_idx(PT_PA_TO_PH_BLOCK(pt->addr)); + bft_entry_pt->refcnt += 1; + } else if(l1_type == 2) { + l1_sec_t *sec = (l1_sec_t *) (&l1_desc); + uint32_t ap = GET_L1_AP(sec); + if(ap == 3){ + for(sec_idx = 0; sec_idx < 256; sec_idx++){ + uint32_t ph_block = PA_TO_PH_BLOCK(START_PA_OF_SECTION(sec)) | (sec_idx); + dmmu_entry_t *bft_entry = get_bft_entry_by_block_idx(ph_block); + bft_entry->refcnt += 1; + } + } + } + } +} - /* 16 KiB aligned ? */ - if (l1_base_pa_add != (l1_base_pa_add & 0xFFFFC000)) - return ERR_MMU_L1_BASE_IS_NOT_16KB_ALIGNED; +#define DEBUG_PG_CONTENT 1 +int dmmu_create_L1_pt(addr_t l1_base_pa_add){ + uint32_t l1_idx, pt_idx; + uint32_t l1_desc; + uint32_t l1_desc_va_add; + uint32_t l1_desc_pa_add; + uint32_t l1_type; + uint32_t ap; + uint32_t ph_block; + int i; + + virtual_machine* _curr_vm = get_curr_vm(); + + /*Check that the guest does not override the physical addresses outside its range*/ + // TODO, where we take the guest assigned physical memory? + if (!guest_pa_range_checker(l1_base_pa_add, 4*PAGE_SIZE)){ + return ERR_MMU_OUT_OF_RANGE_PA; + } - ph_block = PA_TO_PH_BLOCK(l1_base_pa_add); + #ifdef CHECK_PAGETABLES_CACHEABILITY + if (!guest_inside_always_cached_region(l1_base_pa_add, 4*PAGE_SIZE)){ + return ERR_MMU_OUT_OF_CACHEABLE_RANGE; + } + #endif - if(get_bft_entry_by_block_idx(ph_block)->type == PAGE_INFO_TYPE_L1PT && - get_bft_entry_by_block_idx(ph_block+1)->type == PAGE_INFO_TYPE_L1PT && - get_bft_entry_by_block_idx(ph_block+2)->type == PAGE_INFO_TYPE_L1PT && - get_bft_entry_by_block_idx(ph_block+3)->type == PAGE_INFO_TYPE_L1PT) { - return ERR_MMU_ALREADY_L1_PT; - } + /* 16 KiB aligned ? */ + if (l1_base_pa_add != (l1_base_pa_add & 0xFFFFC000)){ + return ERR_MMU_L1_BASE_IS_NOT_16KB_ALIGNED; + } - /* try to allocate a PT in physical address */ + ph_block = PA_TO_PH_BLOCK(l1_base_pa_add); - if(get_bft_entry_by_block_idx(ph_block)->type != PAGE_INFO_TYPE_DATA || - get_bft_entry_by_block_idx(ph_block+1)->type != PAGE_INFO_TYPE_DATA || - get_bft_entry_by_block_idx(ph_block+2)->type != PAGE_INFO_TYPE_DATA || - get_bft_entry_by_block_idx(ph_block+3)->type != PAGE_INFO_TYPE_DATA) - return ERR_MMU_PT_REGION; + if(get_bft_entry_by_block_idx(ph_block)->type == PAGE_INFO_TYPE_L1PT && + get_bft_entry_by_block_idx(ph_block+1)->type == PAGE_INFO_TYPE_L1PT && + get_bft_entry_by_block_idx(ph_block+2)->type == PAGE_INFO_TYPE_L1PT && + get_bft_entry_by_block_idx(ph_block+3)->type == PAGE_INFO_TYPE_L1PT) { + return ERR_MMU_ALREADY_L1_PT; + } - if(get_bft_entry_by_block_idx(ph_block)->refcnt != 0 || - get_bft_entry_by_block_idx(ph_block+1)->refcnt != 0 || - get_bft_entry_by_block_idx(ph_block+2)->refcnt != 0 || - get_bft_entry_by_block_idx(ph_block+3)->refcnt != 0) - return ERR_MMU_REFERENCED; + /* try to allocate a PT in physical address */ + if(get_bft_entry_by_block_idx(ph_block)->type != PAGE_INFO_TYPE_DATA || + get_bft_entry_by_block_idx(ph_block+1)->type != PAGE_INFO_TYPE_DATA || + get_bft_entry_by_block_idx(ph_block+2)->type != PAGE_INFO_TYPE_DATA || + get_bft_entry_by_block_idx(ph_block+3)->type != PAGE_INFO_TYPE_DATA){ + return ERR_MMU_PT_REGION; + } + if(get_bft_entry_by_block_idx(ph_block)->refcnt != 0 || + get_bft_entry_by_block_idx(ph_block+1)->refcnt != 0 || + get_bft_entry_by_block_idx(ph_block+2)->refcnt != 0 || + get_bft_entry_by_block_idx(ph_block+3)->refcnt != 0){ + return ERR_MMU_REFERENCED; + } - // copies the reserved virtual addresses from the master page table - // each virtual page non-unmapped in the master page table is considered reserved - for (l1_idx = 0; l1_idx < 4096; l1_idx++) { - l1_desc = *(flpt_va + l1_idx); - if (L1_TYPE(l1_desc) != UNMAPPED_ENTRY) { - l1_desc_pa_add = L1_IDX_TO_PA(l1_base_pa_add, l1_idx); // base address is 16KB aligned - l1_desc_va_add = mmu_guest_pa_to_va(l1_desc_pa_add, curr_vm->config); - *((uint32_t *) l1_desc_va_add) = l1_desc; - } - } + //Copies the reserved virtual addresses from the master page table. + //Each virtual page non-unmapped in the master page table is considered reserved + for (l1_idx = 0; l1_idx < 4096; l1_idx++) { + l1_desc = *(flpt_va + l1_idx); + if (L1_TYPE(l1_desc) != UNMAPPED_ENTRY) { + l1_desc_pa_add = L1_IDX_TO_PA(l1_base_pa_add, l1_idx); // base address is 16KB aligned + l1_desc_va_add = mmu_guest_pa_to_va(l1_desc_pa_add, _curr_vm->config); + *((uint32_t *) l1_desc_va_add) = l1_desc; + } + } - uint32_t sanity_checker = SUCCESS_MMU; - for(l1_idx = 0; l1_idx < 4096; l1_idx++) - { - l1_desc_pa_add = L1_IDX_TO_PA(l1_base_pa_add, l1_idx); // base address is 16KB aligned - l1_desc_va_add = mmu_guest_pa_to_va(l1_desc_pa_add, curr_vm->config); - l1_desc = *((uint32_t *) l1_desc_va_add); - l1_type = l1_desc & DESC_TYPE_MASK; + uint32_t sanity_checker = SUCCESS_MMU; + for(l1_idx = 0; l1_idx < 4096; l1_idx++){ + l1_desc_pa_add = L1_IDX_TO_PA(l1_base_pa_add, l1_idx); // base address is 16KB aligned + l1_desc_va_add = mmu_guest_pa_to_va(l1_desc_pa_add, _curr_vm->config); + l1_desc = *((uint32_t *) l1_desc_va_add); + l1_type = l1_desc & DESC_TYPE_MASK; -#if DEBUG_DMMU_MMU_LEVEL > 3 - if(l1_desc != 0x0) - printf("pg %x %x \n", l1_idx, l1_desc); -#endif + #if DEBUG_DMMU_MMU_LEVEL > 3 + if(l1_desc != 0x0){ + printf("pg %x %x \n", l1_idx, l1_desc); + } + #endif - uint32_t current_check = (l1Desc_validityChecker_dispatcher(l1_type, l1_desc, l1_base_pa_add)); + uint32_t current_check = (l1Desc_validityChecker_dispatcher(l1_type, l1_desc, l1_base_pa_add)); - if(current_check != SUCCESS_MMU){ -#if DEBUG_DMMU_MMU_LEVEL > 1 - printf("L1Create: failed to validate the entry %d: %d \n", l1_idx, current_check); -#endif - if (sanity_checker == SUCCESS_MMU) - sanity_checker = current_check; - } + if(current_check != SUCCESS_MMU){ + #if DEBUG_DMMU_MMU_LEVEL > 1 + printf("L1Create: failed to validate the entry %d: %d \n", l1_idx, current_check); + #endif + if (sanity_checker == SUCCESS_MMU){ + sanity_checker = current_check; + } + } } - if(sanity_checker != SUCCESS_MMU) - return sanity_checker; + if(sanity_checker != SUCCESS_MMU){ + return sanity_checker; + } - create_L1_refs_update(l1_base_pa_add); - get_bft_entry_by_block_idx(ph_block)->type = PAGE_INFO_TYPE_L1PT; - get_bft_entry_by_block_idx(ph_block+1)->type = PAGE_INFO_TYPE_L1PT; - get_bft_entry_by_block_idx(ph_block+2)->type = PAGE_INFO_TYPE_L1PT; - get_bft_entry_by_block_idx(ph_block+3)->type = PAGE_INFO_TYPE_L1PT; + create_L1_refs_update(l1_base_pa_add); + get_bft_entry_by_block_idx(ph_block)->type = PAGE_INFO_TYPE_L1PT; + get_bft_entry_by_block_idx(ph_block+1)->type = PAGE_INFO_TYPE_L1PT; + get_bft_entry_by_block_idx(ph_block+2)->type = PAGE_INFO_TYPE_L1PT; + get_bft_entry_by_block_idx(ph_block+3)->type = PAGE_INFO_TYPE_L1PT; - return SUCCESS_MMU; + return SUCCESS_MMU; } /* ------------------------------------------------------------------- * Freeing a given L1 page table * ------------------------------------------------------------------- */ -int dmmu_unmap_L1_pt(addr_t l1_base_pa_add) -{ - uint32_t l1_idx, pt_idx, sec_idx; - uint32_t l1_desc; - uint32_t l1_desc_va_add; - uint32_t l1_desc_pa_add; - uint32_t l1_type; - uint32_t ap; - uint32_t ph_block; - addr_t curr_l1_base_pa_add; - int i; - - // checking to see - - /*Check that the guest does not override the physical addresses outside its range*/ - // TODO, where we take the guest assigned physical memory? - if (!guest_pa_range_checker(l1_base_pa_add, 4*PAGE_SIZE)) - return ERR_MMU_OUT_OF_RANGE_PA; - - /* 16 KiB aligned ? */ - if (l1_base_pa_add != (l1_base_pa_add & 0xFFFFC000)) - return ERR_MMU_L1_BASE_IS_NOT_16KB_ALIGNED; +int dmmu_unmap_L1_pt(addr_t l1_base_pa_add){ + uint32_t l1_idx, pt_idx, sec_idx; + uint32_t l1_desc; + uint32_t l1_desc_va_add; + uint32_t l1_desc_pa_add; + uint32_t l1_type; + uint32_t ap; + uint32_t ph_block; + addr_t curr_l1_base_pa_add; + int i; + + // checking to see + + /*Check that the guest does not override the physical addresses outside its range*/ + // TODO, where we take the guest assigned physical memory? + if (!guest_pa_range_checker(l1_base_pa_add, 4*PAGE_SIZE)){ + return ERR_MMU_OUT_OF_RANGE_PA; + } + /* 16 KiB aligned ? */ + if (l1_base_pa_add != (l1_base_pa_add & 0xFFFFC000)){ + return ERR_MMU_L1_BASE_IS_NOT_16KB_ALIGNED; + } + if(get_bft_entry_by_block_idx(ph_block)->type != PAGE_INFO_TYPE_L1PT || + get_bft_entry_by_block_idx(ph_block+1)->type != PAGE_INFO_TYPE_L1PT || + get_bft_entry_by_block_idx(ph_block+2)->type != PAGE_INFO_TYPE_L1PT || + get_bft_entry_by_block_idx(ph_block+3)->type != PAGE_INFO_TYPE_L1PT) { + return ERR_MMU_IS_NOT_L1_PT; + } - if(get_bft_entry_by_block_idx(ph_block)->type != PAGE_INFO_TYPE_L1PT || - get_bft_entry_by_block_idx(ph_block+1)->type != PAGE_INFO_TYPE_L1PT || - get_bft_entry_by_block_idx(ph_block+2)->type != PAGE_INFO_TYPE_L1PT || - get_bft_entry_by_block_idx(ph_block+3)->type != PAGE_INFO_TYPE_L1PT) { - return ERR_MMU_IS_NOT_L1_PT; - } + // You can not free the current L1 + COP_READ(COP_SYSTEM, COP_SYSTEM_TRANSLATION_TABLE0, (uint32_t)curr_l1_base_pa_add); + if ((curr_l1_base_pa_add & 0xFFFFC000) == (l1_base_pa_add & 0xFFFFC000)){ + return ERR_MMU_FREE_ACTIVE_L1; + } - // You can not free the current L1 - COP_READ(COP_SYSTEM, COP_SYSTEM_TRANSLATION_TABLE0, (uint32_t)curr_l1_base_pa_add); - if ((curr_l1_base_pa_add & 0xFFFFC000) == (l1_base_pa_add & 0xFFFFC000)) - return ERR_MMU_FREE_ACTIVE_L1; + ph_block = PA_TO_PH_BLOCK(l1_base_pa_add); - ph_block = PA_TO_PH_BLOCK(l1_base_pa_add); + virtual_machine* _curr_vm = get_curr_vm(); - //unmap_L1_pt_ref_update - for(l1_idx = 0; l1_idx < 4096; l1_idx++) - { - uint32_t l1_desc_pa_add = L1_IDX_TO_PA(l1_base_pa_add, l1_idx); // base address is 16KB aligned - uint32_t l1_desc_va_add = mmu_guest_pa_to_va(l1_desc_pa_add, curr_vm->config); - uint32_t l1_desc = *((uint32_t *) l1_desc_va_add); - uint32_t l1_type = l1_desc & DESC_TYPE_MASK; - if(l1_type == 0) - continue; - if(l1_type == 1) - { - l1_pt_t *pt = (l1_pt_t *) (&l1_desc) ; - dmmu_entry_t *bft_entry_pt = get_bft_entry_by_block_idx(PT_PA_TO_PH_BLOCK(pt->addr)); - bft_entry_pt->refcnt -= 1; - } - if(l1_type == 2) - { - l1_sec_t *sec = (l1_sec_t *) (&l1_desc) ; - uint32_t ap = GET_L1_AP(sec); - if(ap == 3) - { - for(sec_idx = 0; sec_idx < 256; sec_idx++) - { - uint32_t ph_block = PA_TO_PH_BLOCK(START_PA_OF_SECTION(sec)) | (sec_idx); - dmmu_entry_t *bft_entry = get_bft_entry_by_block_idx(ph_block); - bft_entry->refcnt -= 1; + //unmap_L1_pt_ref_update + for(l1_idx = 0; l1_idx < 4096; l1_idx++){ + uint32_t l1_desc_pa_add = L1_IDX_TO_PA(l1_base_pa_add, l1_idx); // base address is 16KB aligned + uint32_t l1_desc_va_add = mmu_guest_pa_to_va(l1_desc_pa_add, _curr_vm->config); + uint32_t l1_desc = *((uint32_t *) l1_desc_va_add); + uint32_t l1_type = l1_desc & DESC_TYPE_MASK; + if(l1_type == 0){ + continue; + } + //TODO: Why not use "else if" here and below? + if(l1_type == 1){ + l1_pt_t *pt = (l1_pt_t *) (&l1_desc) ; + dmmu_entry_t *bft_entry_pt = get_bft_entry_by_block_idx(PT_PA_TO_PH_BLOCK(pt->addr)); + bft_entry_pt->refcnt -= 1; + } + if(l1_type == 2){ + l1_sec_t *sec = (l1_sec_t *) (&l1_desc); + uint32_t ap = GET_L1_AP(sec); + if(ap == 3){ + for(sec_idx = 0; sec_idx < 256; sec_idx++){ + uint32_t ph_block = PA_TO_PH_BLOCK(START_PA_OF_SECTION(sec)) | (sec_idx); + dmmu_entry_t *bft_entry = get_bft_entry_by_block_idx(ph_block); + bft_entry->refcnt -= 1; + } + } } - } } - } - //unmap_L1_pt_pgtype_update - get_bft_entry_by_block_idx(ph_block)->type = PAGE_INFO_TYPE_DATA; - get_bft_entry_by_block_idx(ph_block+1)->type = PAGE_INFO_TYPE_DATA; - get_bft_entry_by_block_idx(ph_block+2)->type = PAGE_INFO_TYPE_DATA; - get_bft_entry_by_block_idx(ph_block+3)->type = PAGE_INFO_TYPE_DATA; - return 0; + //Unmap_L1_pt_pgtype_update + get_bft_entry_by_block_idx(ph_block)->type = PAGE_INFO_TYPE_DATA; + get_bft_entry_by_block_idx(ph_block+1)->type = PAGE_INFO_TYPE_DATA; + get_bft_entry_by_block_idx(ph_block+2)->type = PAGE_INFO_TYPE_DATA; + get_bft_entry_by_block_idx(ph_block+3)->type = PAGE_INFO_TYPE_DATA; + + return 0; } /* ------------------------------------------------------------------- @@ -442,6 +451,8 @@ uint32_t dmmu_map_L1_section(addr_t va, addr_t sec_base_add, uint32_t attrs) uint32_t ap; int sec_idx; + + /*Check that the guest does not override the virtual addresses used by the hypervisor */ // user the master page table to discover if the va is reserved // WARNING: we can currently reserve only blocks of 1MB and non single blocks @@ -456,10 +467,12 @@ uint32_t dmmu_map_L1_section(addr_t va, addr_t sec_base_add, uint32_t attrs) if (!guest_pa_range_checker(sec_base_add, SECTION_SIZE)) return ERR_MMU_OUT_OF_RANGE_PA; + virtual_machine* _curr_vm = get_curr_vm(); + COP_READ(COP_SYSTEM, COP_SYSTEM_TRANSLATION_TABLE0, (uint32_t)l1_base_add); l1_idx = VA_TO_L1_IDX(va); l1_desc_pa_add = L1_IDX_TO_PA(l1_base_add, l1_idx); - l1_desc_va_add = mmu_guest_pa_to_va(l1_desc_pa_add, (curr_vm->config)); + l1_desc_va_add = mmu_guest_pa_to_va(l1_desc_pa_add, (_curr_vm->config)); l1_desc = *((uint32_t *) l1_desc_va_add); if (L1_TYPE(l1_desc) != UNMAPPED_ENTRY) @@ -514,6 +527,8 @@ int dmmu_l1_pt_map(addr_t va, addr_t l2_base_pa_add, uint32_t attrs) uint32_t l1_desc; uint32_t page_desc; + + // user the master page table to discover if the va is reserved // WARNING: we can currently reserve only blocks of 1MB and non single blocks l1_idx = VA_TO_L1_IDX(va); @@ -531,10 +546,12 @@ int dmmu_l1_pt_map(addr_t va, addr_t l2_base_pa_add, uint32_t attrs) if(bft_entry->type != PAGE_INFO_TYPE_L2PT) return ERR_MMU_IS_NOT_L2_PT; + virtual_machine* _curr_vm = get_curr_vm(); + COP_READ(COP_SYSTEM, COP_SYSTEM_TRANSLATION_TABLE0, (uint32_t)l1_base_add); l1_idx = VA_TO_L1_IDX(va); l1_desc_pa_add = L1_IDX_TO_PA(l1_base_add, l1_idx); - l1_desc_va_add = mmu_guest_pa_to_va(l1_desc_pa_add, (curr_vm->config)); + l1_desc_va_add = mmu_guest_pa_to_va(l1_desc_pa_add, (_curr_vm->config)); l1_desc = *((uint32_t *) l1_desc_va_add); if(L1_DESC_PXN(attrs)) @@ -570,6 +587,8 @@ uint32_t dmmu_unmap_L1_pageTable_entry (addr_t va) uint32_t l1_desc; uint32_t l1_type; + + // user the master page table to discover if the va is reserved // WARNING: we can currently reserve only blocks of 1MB and non single blocks l1_idx = VA_TO_L1_IDX(va); @@ -578,10 +597,12 @@ uint32_t dmmu_unmap_L1_pageTable_entry (addr_t va) return ERR_MMU_RESERVED_VA; } + virtual_machine* _curr_vm = get_curr_vm(); + COP_READ(COP_SYSTEM, COP_SYSTEM_TRANSLATION_TABLE0, (uint32_t)l1_base_add); l1_idx = VA_TO_L1_IDX(va); l1_desc_pa_add = L1_IDX_TO_PA(l1_base_add, l1_idx); - l1_desc_va_add = mmu_guest_pa_to_va(l1_desc_pa_add, curr_vm->config);//PA_PT_ADD_VA(l1_desc_pa_add); + l1_desc_va_add = mmu_guest_pa_to_va(l1_desc_pa_add, _curr_vm->config);//PA_PT_ADD_VA(l1_desc_pa_add); l1_desc = *((uint32_t *) l1_desc_va_add); #if DEBUG_DMMU_MMU_LEVEL > 2 @@ -681,10 +702,13 @@ void create_L2_refs_update(addr_t l2_base_pa_add) uint32_t l2_desc_pa_add; uint32_t l2_desc_va_add; int l2_idx; + + virtual_machine* _curr_vm = get_curr_vm(); + for(l2_idx = 0; l2_idx < 512; l2_idx++) { l2_desc_pa_add = L2_DESC_PA(l2_base_pa_add, l2_idx); // base address is 4KB aligned - l2_desc_va_add = mmu_guest_pa_to_va(l2_desc_pa_add, curr_vm->config); + l2_desc_va_add = mmu_guest_pa_to_va(l2_desc_pa_add, _curr_vm->config); uint32_t l2_desc = *((uint32_t *) l2_desc_va_add); uint32_t l2_type = l2_desc & DESC_TYPE_MASK; l2_small_t *pg_desc = (l2_small_t *) (&l2_desc) ; @@ -700,12 +724,10 @@ void create_L2_refs_update(addr_t l2_base_pa_add) } } -void create_L2_pgtype_update(uint32_t l2_base_pa_add) -{ - - uint32_t ph_block = PA_TO_PH_BLOCK(l2_base_pa_add); - dmmu_entry_t *bft_entry = get_bft_entry_by_block_idx(ph_block); - bft_entry->type = PAGE_INFO_TYPE_L2PT; +void create_L2_pgtype_update(uint32_t l2_base_pa_add){ + uint32_t ph_block = PA_TO_PH_BLOCK(l2_base_pa_add); + dmmu_entry_t *bft_entry = get_bft_entry_by_block_idx(ph_block); + bft_entry->type = PAGE_INFO_TYPE_L2PT; } @@ -718,7 +740,9 @@ uint32_t dmmu_create_L2_pt(addr_t l2_base_pa_add) uint32_t l2_desc; uint32_t l2_type; uint32_t l2_idx; - uint32_t l2_base_va_add = mmu_guest_pa_to_va(l2_base_pa_add, curr_vm->config); + + virtual_machine* _curr_vm = get_curr_vm(); + uint32_t l2_base_va_add = mmu_guest_pa_to_va(l2_base_pa_add, _curr_vm->config); /*Check that the guest does not override the physical addresses outside its range*/ // TODO, where we take the guest assigned physical memory? @@ -750,7 +774,7 @@ uint32_t dmmu_create_L2_pt(addr_t l2_base_pa_add) for(l2_idx = 0; l2_idx < 512; l2_idx++) { l2_desc_pa_add = L2_DESC_PA(l2_base_pa_add, l2_idx); // base address is 4KB aligned - l2_desc_va_add = mmu_guest_pa_to_va(l2_desc_pa_add, curr_vm->config); + l2_desc_va_add = mmu_guest_pa_to_va(l2_desc_pa_add, _curr_vm->config); l2_desc = *((uint32_t *) l2_desc_va_add); l2_type = l2_desc & DESC_TYPE_MASK; uint32_t current_check = (l2Desc_validityChecker_dispatcher(l2_type, l2_desc, l2_base_pa_add)); @@ -799,11 +823,13 @@ int dmmu_unmap_L2_pt(addr_t l2_base_pa_add) if(bft_entry->refcnt > 0) return ERR_MMU_REFERENCE_L2; + virtual_machine* _curr_vm = get_curr_vm(); + //updating the entries of L2 for(l2_idx = 0; l2_idx < 512; l2_idx++) { l2_desc_pa_add = L2_DESC_PA(l2_base_pa_add, l2_idx); // base address is 4KB aligned - l2_desc_va_add = mmu_guest_pa_to_va(l2_desc_pa_add, curr_vm->config); + l2_desc_va_add = mmu_guest_pa_to_va(l2_desc_pa_add, _curr_vm->config); l2_desc = *((uint32_t *) l2_desc_va_add); l2_small_t *pg_desc = (l2_small_t *) (&l2_desc) ; dmmu_entry_t *bft_entry_pg = get_bft_entry_by_block_idx(PA_TO_PH_BLOCK(START_PA_OF_SPT(pg_desc))); @@ -842,8 +868,10 @@ int dmmu_l2_map_entry(addr_t l2_base_pa_add, uint32_t l2_idx, addr_t page_pa_add if (!guest_pa_range_checker(page_pa_add, PAGE_SIZE)) return ERR_MMU_OUT_OF_RANGE_PA; + virtual_machine* _curr_vm = get_curr_vm(); + l2_desc_pa_add = L2_IDX_TO_PA(l2_base_pa_add, l2_idx); - l2_desc_va_add = mmu_guest_pa_to_va(l2_desc_pa_add, (curr_vm->config)); + l2_desc_va_add = mmu_guest_pa_to_va(l2_desc_pa_add, (_curr_vm->config)); // Finding the corresponding entry for the page_pa_add and l2_base_pa_add in BFT @@ -900,8 +928,10 @@ int dmmu_l2_unmap_entry(addr_t l2_base_pa_add, uint32_t l2_idx) if(bft_entry->type != PAGE_INFO_TYPE_L2PT) return ERR_MMU_IS_NOT_L2_PT; + virtual_machine* _curr_vm = get_curr_vm(); + l2_desc_pa_add = L2_IDX_TO_PA(l2_base_pa_add, l2_idx); - l2_desc_va_add = mmu_guest_pa_to_va(l2_desc_pa_add, (curr_vm->config)); + l2_desc_va_add = mmu_guest_pa_to_va(l2_desc_pa_add, (_curr_vm->config)); l2_desc = *((uint32_t *) l2_desc_va_add); l2_type = l2_desc & DESC_TYPE_MASK; @@ -945,11 +975,14 @@ int dmmu_switch_mm(addr_t l1_base_pa_add) if(get_bft_entry_by_block_idx(ph_block)->type != PAGE_INFO_TYPE_L1PT) return ERR_MMU_IS_NOT_L1_PT; #if DEBUG_DMMU_MMU_LEVEL > 3 + + virtual_machine* _curr_vm = get_curr_vm(); + uint32_t l1_idx; for(l1_idx = 0; l1_idx < 4096; l1_idx++) { uint32_t l1_desc_pa_add = L1_IDX_TO_PA(l1_base_pa_add, l1_idx); // base address is 16KB aligned - uint32_t l1_desc_va_add = mmu_guest_pa_to_va(l1_desc_pa_add, curr_vm->config); + uint32_t l1_desc_va_add = mmu_guest_pa_to_va(l1_desc_pa_add, _curr_vm->config); uint32_t l1_desc = *((uint32_t *) l1_desc_va_add); if(l1_desc != 0x0) printf("pg %x %x \n", l1_idx, l1_desc); @@ -972,40 +1005,39 @@ enum dmmu_command { CMD_MAP_L1_SECTION, CMD_UNMAP_L1_PT_ENTRY, CMD_CREATE_L2_PT, CMD_MAP_L1_PT, CMD_MAP_L2_ENTRY, CMD_UNMAP_L2_ENTRY, CMD_FREE_L2, CMD_CREATE_L1_PT, CMD_SWITCH_ACTIVE_L1, CMD_FREE_L1 }; -int dmmu_handler(uint32_t p03, uint32_t p1, uint32_t p2) -{ - uint32_t p0 = p03 & 0xF; - uint32_t p3 = p03 >> 4; - -#if DEBUG_DMMU_MMU_LEVEL > 1 - printf("dmmu_handler: DMMU %x %x %x\n", p1, p2, p3); -#endif - - switch(p0) { - case CMD_CREATE_L1_PT: - return dmmu_create_L1_pt(p1); - case CMD_FREE_L1: - return dmmu_unmap_L1_pt(p1); - case CMD_MAP_L1_SECTION: - return dmmu_map_L1_section(p1,p2,p3); - case CMD_MAP_L1_PT: - return dmmu_l1_pt_map(p1, p2, p3); - case CMD_UNMAP_L1_PT_ENTRY: - return dmmu_unmap_L1_pageTable_entry(p1); - case CMD_CREATE_L2_PT: - return dmmu_create_L2_pt(p1); - case CMD_FREE_L2: - return dmmu_unmap_L2_pt(p1); - case CMD_MAP_L2_ENTRY: - p3 = p03 & 0xFFFFFFF0; - uint32_t idx = p2 >> 20; - uint32_t attrs = p2 & 0xFFF; - return dmmu_l2_map_entry(p1, idx, p3, attrs); - case CMD_UNMAP_L2_ENTRY: - return dmmu_l2_unmap_entry(p1, p2); - case CMD_SWITCH_ACTIVE_L1: - return dmmu_switch_mm(p1); - default: - return ERR_MMU_UNIMPLEMENTED; - } +int dmmu_handler(uint32_t p03, uint32_t p1, uint32_t p2){ + uint32_t p0 = p03 & 0xF; + uint32_t p3 = p03 >> 4; + + #if DEBUG_DMMU_MMU_LEVEL > 1 + printf("dmmu_handler: DMMU %x %x %x\n", p1, p2, p3); + #endif + + switch(p0) { + case CMD_CREATE_L1_PT: + return dmmu_create_L1_pt(p1); + case CMD_FREE_L1: + return dmmu_unmap_L1_pt(p1); + case CMD_MAP_L1_SECTION: + return dmmu_map_L1_section(p1,p2,p3); + case CMD_MAP_L1_PT: + return dmmu_l1_pt_map(p1, p2, p3); + case CMD_UNMAP_L1_PT_ENTRY: + return dmmu_unmap_L1_pageTable_entry(p1); + case CMD_CREATE_L2_PT: + return dmmu_create_L2_pt(p1); + case CMD_FREE_L2: + return dmmu_unmap_L2_pt(p1); + case CMD_MAP_L2_ENTRY: + p3 = p03 & 0xFFFFFFF0; + uint32_t idx = p2 >> 20; + uint32_t attrs = p2 & 0xFFF; + return dmmu_l2_map_entry(p1, idx, p3, attrs); + case CMD_UNMAP_L2_ENTRY: + return dmmu_l2_unmap_entry(p1, p2); + case CMD_SWITCH_ACTIVE_L1: + return dmmu_switch_mm(p1); + default: + return ERR_MMU_UNIMPLEMENTED; + } } diff --git a/core/hypervisor/dmmu.h b/core/hypervisor/dmmu.h index d17ad8b..c0686cf 100644 --- a/core/hypervisor/dmmu.h +++ b/core/hypervisor/dmmu.h @@ -17,8 +17,8 @@ // To check if all the page tables are allocated form the region that is always chackable or not //#define DEBUG_DMMU_CACHEABILITY_CHECKERS #define CHECK_PAGETABLES_CACHEABILITY -#define PG_ADDR_LOWER_BOUND curr_vm->config->firmware->pstart + 0x6800000 -#define PG_ADDR_UPPER_BOUND curr_vm->config->firmware->pstart + 0x6A00000 +#define PG_ADDR_LOWER_BOUND _curr_vm->config->firmware->pstart + 0x6800000 +#define PG_ADDR_UPPER_BOUND _curr_vm->config->firmware->pstart + 0x6A00000 /* BFT entry type */ enum dmmu_entry_type { diff --git a/core/hypervisor/handlers.c b/core/hypervisor/handlers.c index 84a45d5..ce395c2 100755 --- a/core/hypervisor/handlers.c +++ b/core/hypervisor/handlers.c @@ -8,15 +8,50 @@ extern void hypercall_dyn_set_pmd(addr_t *pmd, uint32_t desc); extern void hypercall_dyn_set_pte(addr_t *l2pt_linux_entry_va, uint32_t linux_pte, uint32_t phys_pte); extern int dmmu_handler(uint32_t p03, uint32_t p1, uint32_t p2); +//TODO: Fix this... extern virtual_machine *curr_vm; +extern int __hyper_stack_bottom__; + +//This function returns a pointer to the VM currently running on this core, +//getting the ID of the processor from the stack pointer (good because it is +//platform-independent). +virtual_machine* get_curr_vm(){ + //TODO: See how this turns out in assembly. + //TODO: This will be used a lot, so it should be maximally efficient. + //1. Get value of stack pointer. + register uint32_t stack_pointer __asm("sp"); //The actual register? + uint32_t temp_stack_pointer = stack_pointer; //TODO: Do this on one line? + + //2. Subtract __hyper_stack_bottom__ from the stack pointer (all stacks are + //adjacent, so we get the placement of the address relative to the start of + //all stacks). + temp_stack_pointer = temp_stack_pointer - __hyper_stack_bottom__; + + //3. XOR with masks of the different stacks until you get 0. + //The size of one stack is 8*1024 bytes -> 10 0000 0000 0000 in binary. + //So, we want to clear the first 13 bits. + //TODO: Note that 0x1FFF is hard-coded from the stack size. We can get + //the mask (0x1FFF in this case) by subtracting 1 from the stack size, if + //the stack size is a power of two. In general (when stack size is not power + //of two), doing this is a harder problem. + temp_stack_pointer = temp_stack_pointer ^ 0x1FFF; + //TODO: Where do we decide how many VMs we have? Until then: + while((temp_stack_pointer ^ (curr_vm->id << 12)) != 0){ + curr_vm = curr_vm->next; + } + + //4. Return curr_vm, which is a pointer to the current machine on this + //particular core. + return &curr_vm; //TODO: Entirely cricket? +} #define USE_DMMU -void swi_handler(uint32_t param0, uint32_t param1, uint32_t param2, uint32_t hypercall_number) -{ +void swi_handler(uint32_t param0, uint32_t param1, uint32_t param2, uint32_t hypercall_number){ + virtual_machine* _curr_vm = get_curr_vm(); /*TODO Added check that controls if it comes from user space, makes it pretty inefficient, remake later*/ /*Testing RPC from user space, remove later*/ - if(curr_vm->current_guest_mode == HC_GM_TASK){ + if(_curr_vm->current_guest_mode == HC_GM_TASK){ if(hypercall_number == 1020){ //ALLOWED RPC OPERATION hypercall_rpc(param0, (uint32_t *)param1); @@ -27,20 +62,20 @@ void swi_handler(uint32_t param0, uint32_t param1, uint32_t param2, uint32_t hyp return; } } - if(curr_vm->current_guest_mode == HC_GM_TASK){ + if(_curr_vm->current_guest_mode == HC_GM_TASK){ - // debug("\tUser process made system call:\t\t\t %x\n", curr_vm->mode_states[HC_GM_TASK].ctx.reg[7] ); + // debug("\tUser process made system call:\t\t\t %x\n", _curr_vm->mode_states[HC_GM_TASK].ctx.reg[7] ); change_guest_mode(HC_GM_KERNEL); /* TODO: The current way of saving context by the hypervisor is very inefficient, * can be improved alot with some hacking and shortcuts (for the FUTURE)*/ - curr_vm->current_mode_state->ctx.sp -= (72 + 8) ; //FRAME_SIZE (18 registers to be saved) + 2 swi args + _curr_vm->current_mode_state->ctx.sp -= (72 + 8) ; //FRAME_SIZE (18 registers to be saved) + 2 swi args uint32_t *context, *sp, i; - context = &curr_vm->mode_states[HC_GM_TASK].ctx.reg[0]; - sp =(uint32_t *) curr_vm->mode_states[HC_GM_KERNEL].ctx.sp; + context = &_curr_vm->mode_states[HC_GM_TASK].ctx.reg[0]; + sp =(uint32_t *) _curr_vm->mode_states[HC_GM_KERNEL].ctx.sp; - *sp++ = curr_vm->mode_states[HC_GM_TASK].ctx.reg[4]; - *sp++ = curr_vm->mode_states[HC_GM_TASK].ctx.reg[5]; + *sp++ = _curr_vm->mode_states[HC_GM_TASK].ctx.reg[4]; + *sp++ = _curr_vm->mode_states[HC_GM_TASK].ctx.reg[5]; /* Saves 16 ARM registers (all ARM registers in context except program * status register). */ @@ -50,46 +85,51 @@ void swi_handler(uint32_t param0, uint32_t param1, uint32_t param2, uint32_t hyp i--; } - *sp = curr_vm->mode_states[HC_GM_TASK].ctx.reg[0]; //OLD_R0 + *sp = _curr_vm->mode_states[HC_GM_TASK].ctx.reg[0]; //OLD_R0 //update CR for alignment fault //Enable IRQ - curr_vm->current_mode_state->ctx.psr &= ~(IRQ_MASK); + _curr_vm->current_mode_state->ctx.psr &= ~(IRQ_MASK); - curr_vm->current_mode_state->ctx.lr = curr_vm->exception_vector[V_RET_FAST_SYSCALL];//curr_vm->handlers.syscall.ret_fast_syscall; + _curr_vm->current_mode_state->ctx.lr = _curr_vm->exception_vector[V_RET_FAST_SYSCALL];//_curr_vm->handlers.syscall.ret_fast_syscall; //copy task context to kernel context. syscall supports 6 arguments /*system call nr in r7*/ - curr_vm->current_mode_state->ctx.reg[7] = curr_vm->mode_states[HC_GM_TASK].ctx.reg[7]; + _curr_vm->current_mode_state->ctx.reg[7] = _curr_vm->mode_states[HC_GM_TASK].ctx.reg[7]; - if(curr_vm->mode_states[HC_GM_TASK].ctx.reg[7] < curr_vm->guest_info.nr_syscalls ){ + if(_curr_vm->mode_states[HC_GM_TASK].ctx.reg[7] < _curr_vm->guest_info.nr_syscalls ){ /*Regular system call, restore params*/ for(i =0;i <= 5; i++) - curr_vm->current_mode_state->ctx.reg[i] = curr_vm->mode_states[HC_GM_TASK].ctx.reg[i]; + _curr_vm->current_mode_state->ctx.reg[i] = _curr_vm->mode_states[HC_GM_TASK].ctx.reg[i]; /*Set PC to systemcall function*/ - curr_vm->current_mode_state->ctx.pc = *( (uint32_t *) (curr_vm->exception_vector[V_SWI] + (curr_vm->current_mode_state->ctx.reg[7] << 2))); + _curr_vm->current_mode_state->ctx.pc = *( (uint32_t *) (_curr_vm->exception_vector[V_SWI] + (_curr_vm->current_mode_state->ctx.reg[7] << 2))); } else{ //TODO Have not added check that its a valid private arm syscall, done anyways inside arm_syscall - //if(curr_vm->current_mode_state->ctx.reg[7] >= 0xF0000){ //NR_SYSCALL_BASE + //if(_curr_vm->current_mode_state->ctx.reg[7] >= 0xF0000){ //NR_SYSCALL_BASE /*Arm private system call*/ - curr_vm->current_mode_state->ctx.reg[0] = curr_vm->mode_states[HC_GM_TASK].ctx.reg[7]; - curr_vm->current_mode_state->ctx.reg[1] = curr_vm->mode_states[HC_GM_KERNEL].ctx.sp + 8; //Adjust sp with S_OFF, contains regs - curr_vm->current_mode_state->ctx.pc = curr_vm->exception_vector[V_ARM_SYSCALL]; + _curr_vm->current_mode_state->ctx.reg[0] = _curr_vm->mode_states[HC_GM_TASK].ctx.reg[7]; + _curr_vm->current_mode_state->ctx.reg[1] = _curr_vm->mode_states[HC_GM_KERNEL].ctx.sp + 8; //Adjust sp with S_OFF, contains regs + _curr_vm->current_mode_state->ctx.pc = _curr_vm->exception_vector[V_ARM_SYSCALL]; } } - else if(curr_vm->current_guest_mode != HC_GM_TASK){ + else if(_curr_vm->current_guest_mode != HC_GM_TASK){ //printf("\tHypercall number: %d (%x, %x) called\n", hypercall_number, param0, param1); uint32_t res; switch(hypercall_number){ /* TEMP: DMMU TEST */ case 666: - //res = dmmu_handler(param0, param1, param2, curr_vm->current_mode_state->ctx.reg[3]); + //res = dmmu_handler(param0, param1, param2, _curr_vm->current_mode_state->ctx.reg[3]); res = dmmu_handler(param0, param1, param2); - curr_vm->current_mode_state->ctx.reg[0] = res; + _curr_vm->current_mode_state->ctx.reg[0] = res; + + //Note: The below four rows must be here in order that the + //hypervisor has correct cache behaviour. This was revealed + //when testing on the RPi2 hardware (this error did not show up + //when testing on the Beagleboard simulator). isb(); mem_mmu_tlb_invalidate_all(TRUE, TRUE); CacheDataCleanInvalidateAll(); @@ -227,12 +267,12 @@ void swi_handler(uint32_t param0, uint32_t param1, uint32_t param2, uint32_t hyp } } -return_value prefetch_abort_handler(uint32_t addr, uint32_t status, uint32_t unused) -{ - if(addr >= 0xc0000000) - printf("Prefetch abort:%x Status:%x, u=%x \n", addr, status, unused); - - uint32_t interrupted_mode = curr_vm->current_guest_mode; +return_value prefetch_abort_handler(uint32_t addr, uint32_t status, uint32_t unused){ + virtual_machine* _curr_vm = get_curr_vm(); + if(addr >= 0xc0000000){ + printf("Prefetch abort: %x Status: %x, u= %x \n", addr, status, unused); + } + uint32_t interrupted_mode = _curr_vm->current_guest_mode; /*Need to be in virtual kernel mode to access data abort handler*/ change_guest_mode(HC_GM_KERNEL); @@ -240,10 +280,10 @@ return_value prefetch_abort_handler(uint32_t addr, uint32_t status, uint32_t unu /*Set uregs, Linux kernel ususally sets these up in exception vector * which we have to handle now*/ - curr_vm->mode_states[HC_GM_KERNEL].ctx.sp -= (72) ; //FRAME_SIZE (18 registers to be saved) + _curr_vm->mode_states[HC_GM_KERNEL].ctx.sp -= (72) ; //FRAME_SIZE (18 registers to be saved) - uint32_t *sp = (uint32_t *)curr_vm->mode_states[HC_GM_KERNEL].ctx.sp; - uint32_t *context = curr_vm->mode_states[interrupted_mode].ctx.reg; + uint32_t *sp = (uint32_t *)_curr_vm->mode_states[HC_GM_KERNEL].ctx.sp; + uint32_t *context = _curr_vm->mode_states[interrupted_mode].ctx.reg; uint32_t i; for(i = 0; i < 17; i++){ @@ -252,41 +292,41 @@ return_value prefetch_abort_handler(uint32_t addr, uint32_t status, uint32_t unu *sp = 0xFFFFFFFF; //ORIG_R0 /*Prepare args for prefetchabort handler*/ - curr_vm->mode_states[HC_GM_KERNEL].ctx.reg[0] = addr; - curr_vm->mode_states[HC_GM_KERNEL].ctx.reg[1] = status; + _curr_vm->mode_states[HC_GM_KERNEL].ctx.reg[0] = addr; + _curr_vm->mode_states[HC_GM_KERNEL].ctx.reg[1] = status; /*Linux saves the user registers in the stack*/ - curr_vm->mode_states[HC_GM_KERNEL].ctx.reg[2] = (uint32_t)curr_vm->mode_states[HC_GM_KERNEL].ctx.sp; + _curr_vm->mode_states[HC_GM_KERNEL].ctx.reg[2] = (uint32_t)_curr_vm->mode_states[HC_GM_KERNEL].ctx.sp; - if(!(curr_vm->mode_states[HC_GM_KERNEL].ctx.psr & 0xF)){ //coming from svc - curr_vm->mode_states[HC_GM_KERNEL].ctx.psr |= IRQ_MASK; //TODO DISABLE IRQnot neccessarily, check this + if(!(_curr_vm->mode_states[HC_GM_KERNEL].ctx.psr & 0xF)){ //coming from svc + _curr_vm->mode_states[HC_GM_KERNEL].ctx.psr |= IRQ_MASK; //TODO DISABLE IRQnot neccessarily, check this } else{ - curr_vm->mode_states[HC_GM_KERNEL].ctx.psr &= ~(IRQ_MASK); //ENABLE IRQ coming from usr + _curr_vm->mode_states[HC_GM_KERNEL].ctx.psr &= ~(IRQ_MASK); //ENABLE IRQ coming from usr } /*Prepare pc for handler and lr to return from handler*/ - curr_vm->mode_states[HC_GM_KERNEL].ctx.pc = curr_vm->exception_vector[V_PREFETCH_ABORT];//(uint32_t)curr_vm->handlers.pabort; - curr_vm->mode_states[HC_GM_KERNEL].ctx.lr = curr_vm->exception_vector[V_RET_FROM_EXCEPTION];//(uint32_t)curr_vm->handlers.ret_from_exception; - //printf("Kernel PC:%x LR:%x \n",curr_vm->mode_states[HC_GM_KERNEL].ctx.pc, curr_vm->mode_states[HC_GM_KERNEL].ctx.lr); + _curr_vm->mode_states[HC_GM_KERNEL].ctx.pc = _curr_vm->exception_vector[V_PREFETCH_ABORT];//(uint32_t)_curr_vm->handlers.pabort; + _curr_vm->mode_states[HC_GM_KERNEL].ctx.lr = _curr_vm->exception_vector[V_RET_FROM_EXCEPTION];//(uint32_t)_curr_vm->handlers.ret_from_exception; + //printf("Kernel PC:%x LR:%x \n",_curr_vm->mode_states[HC_GM_KERNEL].ctx.pc, _curr_vm->mode_states[HC_GM_KERNEL].ctx.lr); return RV_OK; } -return_value data_abort_handler(uint32_t addr, uint32_t status, uint32_t unused) -{ - if(addr >= 0xc0000000) - printf("Data abort:%x Status:%x, u=%x \n", addr, status, unused); - - uint32_t interrupted_mode = curr_vm->current_guest_mode; +return_value data_abort_handler(uint32_t addr, uint32_t status, uint32_t unused){ + virtual_machine* _curr_vm = get_curr_vm(); + if(addr >= 0xc0000000){ + printf("Data abort: %x Status: %x, u= %x \n", addr, status, unused); + } + uint32_t interrupted_mode = _curr_vm->current_guest_mode; /*Must be in virtual kernel mode to access kernel handlers*/ change_guest_mode(HC_GM_KERNEL); - curr_vm->mode_states[HC_GM_KERNEL].ctx.sp -= (72) ; //FRAME_SIZE (18 registers to be saved) + _curr_vm->mode_states[HC_GM_KERNEL].ctx.sp -= (72) ; //FRAME_SIZE (18 registers to be saved) /*Set uregs, Linux kernel ususally sets these up in exception vector * which we have to handle now*/ - uint32_t *sp = (uint32_t *)curr_vm->mode_states[HC_GM_KERNEL].ctx.sp; - uint32_t *context = curr_vm->mode_states[interrupted_mode].ctx.reg; + uint32_t *sp = (uint32_t *)_curr_vm->mode_states[HC_GM_KERNEL].ctx.sp; + uint32_t *context = _curr_vm->mode_states[interrupted_mode].ctx.reg; uint32_t i; for(i = 0; i < 17; i++){ @@ -300,55 +340,55 @@ return_value data_abort_handler(uint32_t addr, uint32_t status, uint32_t unused) //Context saved in sp /*Prepare args for dataabort handler*/ - curr_vm->mode_states[HC_GM_KERNEL].ctx.reg[0] = addr; - curr_vm->mode_states[HC_GM_KERNEL].ctx.reg[1] = status; + _curr_vm->mode_states[HC_GM_KERNEL].ctx.reg[0] = addr; + _curr_vm->mode_states[HC_GM_KERNEL].ctx.reg[1] = status; /*Linux saves the user registers in the stack*/ - curr_vm->mode_states[HC_GM_KERNEL].ctx.reg[2] = (uint32_t)curr_vm->mode_states[HC_GM_KERNEL].ctx.sp; + _curr_vm->mode_states[HC_GM_KERNEL].ctx.reg[2] = (uint32_t)_curr_vm->mode_states[HC_GM_KERNEL].ctx.sp; - if(!(curr_vm->mode_states[HC_GM_KERNEL].ctx.psr & 0xF)){ //coming from svc - curr_vm->mode_states[HC_GM_KERNEL].ctx.psr |= IRQ_MASK; //TODO DISABLE IRQnot neccessarily, check this + if(!(_curr_vm->mode_states[HC_GM_KERNEL].ctx.psr & 0xF)){ //coming from svc + _curr_vm->mode_states[HC_GM_KERNEL].ctx.psr |= IRQ_MASK; //TODO DISABLE IRQnot neccessarily, check this // } else{ - curr_vm->mode_states[HC_GM_KERNEL].ctx.psr &= ~(IRQ_MASK); //ENABLE IRQ coming from usr + _curr_vm->mode_states[HC_GM_KERNEL].ctx.psr &= ~(IRQ_MASK); //ENABLE IRQ coming from usr } /*Prepare pc for handler and lr to return from handler*/ - curr_vm->mode_states[HC_GM_KERNEL].ctx.pc = curr_vm->exception_vector[V_DATA_ABORT];//(uint32_t)curr_vm->handlers.dabort; - curr_vm->mode_states[HC_GM_KERNEL].ctx.lr = curr_vm->exception_vector[V_RET_FROM_EXCEPTION];//(uint32_t)curr_vm->handlers.ret_from_exception; + _curr_vm->mode_states[HC_GM_KERNEL].ctx.pc = _curr_vm->exception_vector[V_DATA_ABORT];//(uint32_t)_curr_vm->handlers.dabort; + _curr_vm->mode_states[HC_GM_KERNEL].ctx.lr = _curr_vm->exception_vector[V_RET_FROM_EXCEPTION];//(uint32_t)_curr_vm->handlers.ret_from_exception; - //printf("Kernel PC:%x LR:%x \n",curr_vm->mode_states[HC_GM_KERNEL].ctx.pc, curr_vm->mode_states[HC_GM_KERNEL].ctx.lr); + //printf("Kernel PC:%x LR:%x \n",_curr_vm->mode_states[HC_GM_KERNEL].ctx.pc, _curr_vm->mode_states[HC_GM_KERNEL].ctx.lr); return RV_OK; } -return_value irq_handler(uint32_t irq, uint32_t r1, uint32_t r2 ) -{ -// printf("IRQ handler called %x:%x:%x\n", irq, r1, r2); +return_value irq_handler(uint32_t irq, uint32_t r1, uint32_t r2 ){ + virtual_machine* _curr_vm = get_curr_vm(); + //printf("IRQ handler called %x:%x:%x\n", irq, r1, r2); /*Interrupt inside interrupt mode (i.e soft interrupt) */ - if(curr_vm->current_guest_mode == HC_GM_INTERRUPT){ - curr_vm->current_mode_state->ctx.psr |= IRQ_MASK; + if(_curr_vm->current_guest_mode == HC_GM_INTERRUPT){ + _curr_vm->current_mode_state->ctx.psr |= IRQ_MASK; /*We dont handle reentrant IRQ... yet, let the current interrupt finish*/ //Bypass it for now - //Should redirect to usr_exit -> (uint32_t)curr_vm->handlers.ret_from_exception; -// curr_vm->current_mode_state->ctx.pc = (uint32_t)curr_vm->handlers.tick; + //Should redirect to usr_exit -> (uint32_t)_curr_vm->handlers.ret_from_exception; +// _curr_vm->current_mode_state->ctx.pc = (uint32_t)_curr_vm->handlers.tick; return RV_OK; } - curr_vm->interrupted_mode = curr_vm->current_guest_mode; + _curr_vm->interrupted_mode = _curr_vm->current_guest_mode; change_guest_mode(HC_GM_INTERRUPT); - curr_vm->current_mode_state->ctx.reg[0] = irq; - curr_vm->current_mode_state->ctx.pc = curr_vm->exception_vector[V_IRQ];//(uint32_t)curr_vm->handlers.irq; - curr_vm->current_mode_state->ctx.psr |= IRQ_MASK; - //curr_vm->current_mode_state->ctx.sp = curr_vm->config->interrupt_config.sp; - curr_vm->current_mode_state->ctx.sp = curr_vm->mode_states[HC_GM_KERNEL].ctx.sp; + _curr_vm->current_mode_state->ctx.reg[0] = irq; + _curr_vm->current_mode_state->ctx.pc = _curr_vm->exception_vector[V_IRQ];//(uint32_t)_curr_vm->handlers.irq; + _curr_vm->current_mode_state->ctx.psr |= IRQ_MASK; + //_curr_vm->current_mode_state->ctx.sp = _curr_vm->config->interrupt_config.sp; + _curr_vm->current_mode_state->ctx.sp = _curr_vm->mode_states[HC_GM_KERNEL].ctx.sp; return RV_OK; } /*These are not handled yet*/ return_value undef_handler(uint32_t instr, uint32_t unused, uint32_t addr) { - printf("Undefined abort. Address:%x Instruction:%x \n", addr, instr); + printf("Undefined abort. Address: %x Instruction: %x \n", addr, instr); while(1); return RV_OK; } diff --git a/core/hypervisor/hypercalls/hyp_cpu.c b/core/hypervisor/hypercalls/hyp_cpu.c index 60b348e..927cad8 100644 --- a/core/hypervisor/hypercalls/hyp_cpu.c +++ b/core/hypervisor/hypercalls/hyp_cpu.c @@ -1,30 +1,30 @@ #include "hw.h" #include "hyper.h" -extern virtual_machine *curr_vm; +extern virtual_machine* get_curr_vm(); /*CPU and CO-PROCESSOR operations*/ #if 0 -void hypercall_set_cpu_cr(uint32_t cpu_cr) -{ - if(curr_vm->current_guest_mode != HC_GM_KERNEL) +void hypercall_set_cpu_cr(uint32_t cpu_cr){ + virtual_machine* _curr_vm = get_curr_vm(); + if(_curr_vm->current_guest_mode != HC_GM_KERNEL) hyper_panic("User mode not allowed to access system control register\n"); COP_WRITE(COP_SYSTEM,COP_SYSTEM_CONTROL,cpu_cr); } -void hypercall_get_cpu_cr() -{ - if(curr_vm->current_guest_mode != HC_GM_KERNEL) +void hypercall_get_cpu_cr(){ + virtual_machine* _curr_vm = get_curr_vm(); + if(_curr_vm->current_guest_mode != HC_GM_KERNEL) hyper_panic("User mode not allowed to set system control register\n"); uint32_t cpu_cr; COP_READ(COP_SYSTEM,COP_SYSTEM_CONTROL,cpu_cr); /*Return result in r0*/ - curr_vm->mode_states[HC_GM_KERNEL].ctx.reg[0] = cpu_cr; + _curr_vm->mode_states[HC_GM_KERNEL].ctx.reg[0] = cpu_cr; } #endif diff --git a/core/hypervisor/hypercalls/hyp_dmmu.c b/core/hypervisor/hypercalls/hyp_dmmu.c index ad0bccd..aa35221 100644 --- a/core/hypervisor/hypercalls/hyp_dmmu.c +++ b/core/hypervisor/hypercalls/hyp_dmmu.c @@ -3,16 +3,16 @@ #include "dmmu.h" #include "mmu.h" -extern virtual_machine *curr_vm; +extern virtual_machine* get_curr_vm(); #if 0 #define DEBUG_MMU #endif /*Get physical address from Linux virtual address*/ -#define LINUX_PA(va) ((va) - (addr_t)(curr_vm->config->firmware->vstart) + (addr_t)(curr_vm->config->firmware->pstart)) +#define LINUX_PA(va) ((va) - (addr_t)(_curr_vm->config->firmware->vstart) + (addr_t)(_curr_vm->config->firmware->pstart)) /*Get virtual address from Linux physical address*/ -#define LINUX_VA(pa) ((pa) - (addr_t)(curr_vm->config->firmware->pstart) + (addr_t)(curr_vm->config->firmware->vstart)) +#define LINUX_VA(pa) ((pa) - (addr_t)(_curr_vm->config->firmware->pstart) + (addr_t)(_curr_vm->config->firmware->vstart)) addr_t linux_pt_get_empty_l2(); @@ -34,14 +34,14 @@ void hypercall_dyn_switch_mm(addr_t table_base, uint32_t context_id) /* Free Page table, Make it RW again */ -void hypercall_dyn_free_pgd(addr_t *pgd_va) -{ +void hypercall_dyn_free_pgd(addr_t *pgd_va){ + virtual_machine* _curr_vm = get_curr_vm(); #ifdef DEBUG_MMU printf("\n\t\t\tHypercall FREE PGD\n\t\t pgd:%x ", pgd_va); #endif uint32_t i, clean_va; - uint32_t page_offset = curr_vm->guest_info.page_offset; + uint32_t page_offset = _curr_vm->guest_info.page_offset; /*First get the physical address of the lvl 2 page by * looking at the index of the pgd location. Then set @@ -49,7 +49,7 @@ void hypercall_dyn_free_pgd(addr_t *pgd_va) addr_t *master_pgd_va; /*Get master page table*/ - master_pgd_va = (addr_t *)(curr_vm->config->pa_initial_l1_offset + page_offset); + master_pgd_va = (addr_t *)(_curr_vm->config->pa_initial_l1_offset + page_offset); addr_t *l1_pt_entry_for_desc = (addr_t *)&master_pgd_va[(addr_t)pgd_va >> MMU_L1_SECTION_SHIFT]; uint32_t l1_desc_entry = *l1_pt_entry_for_desc; @@ -61,7 +61,7 @@ void hypercall_dyn_free_pgd(addr_t *pgd_va) uint32_t l2_entry_idx = (((uint32_t)pgd_va << 12) >> 24) + table2_idx; - uint32_t *l2_page_entry = (addr_t *)(mmu_guest_pa_to_va(table2_pa & L2_BASE_MASK, (curr_vm->config))); + uint32_t *l2_page_entry = (addr_t *)(mmu_guest_pa_to_va(table2_pa & L2_BASE_MASK, (_curr_vm->config))); uint32_t page_pa = MMU_L2_SMALL_ADDR(l2_page_entry[l2_entry_idx]); uint32_t attrs = MMU_L2_TYPE_SMALL; @@ -98,8 +98,8 @@ void hypercall_dyn_free_pgd(addr_t *pgd_va) /*New pages for processes, copys kernel space from master pages table *and cleans the cache, set these pages read only for user */ -void hypercall_dyn_new_pgd(addr_t *pgd_va) -{ +void hypercall_dyn_new_pgd(addr_t *pgd_va){ + virtual_machine* _curr_vm = get_curr_vm(); #ifdef DEBUG_MMU printf("\n\t\t\tHypercall new PGD\n\t\t pgd:%x ", pgd_va); #endif @@ -109,11 +109,11 @@ void hypercall_dyn_new_pgd(addr_t *pgd_va) uint32_t i, end, table2_idx ; addr_t *master_pgd_va; - addr_t phys_start = curr_vm->config->firmware->pstart; - addr_t page_offset = curr_vm->guest_info.page_offset; + addr_t phys_start = _curr_vm->config->firmware->pstart; + addr_t page_offset = _curr_vm->guest_info.page_offset; addr_t linux_va; /*Get master page table*/ - master_pgd_va = (addr_t *)(curr_vm->config->pa_initial_l1_offset + page_offset); + master_pgd_va = (addr_t *)(_curr_vm->config->pa_initial_l1_offset + page_offset); addr_t *l1_pt_entry_for_desc = (addr_t *)&master_pgd_va[(addr_t)pgd_va >> MMU_L1_SECTION_SHIFT]; uint32_t l1_desc_entry = *l1_pt_entry_for_desc; @@ -177,7 +177,7 @@ void hypercall_dyn_new_pgd(addr_t *pgd_va) uint32_t l2_entry_idx = (((uint32_t)pgd_va << 12) >> 24) + table2_idx; - uint32_t *l2_page_entry = (addr_t *)(mmu_guest_pa_to_va(table2_pa & L2_BASE_MASK, (curr_vm->config))); + uint32_t *l2_page_entry = (addr_t *)(mmu_guest_pa_to_va(table2_pa & L2_BASE_MASK, (_curr_vm->config))); uint32_t page_pa = MMU_L2_SMALL_ADDR(l2_page_entry[l2_entry_idx]); addr_t clean_va; @@ -221,8 +221,7 @@ void hypercall_dyn_new_pgd(addr_t *pgd_va) /*In ARM linux pmd refers to pgd, ARM L1 Page table *Linux maps 2 pmds at a time */ -void hypercall_dyn_set_pmd(addr_t *pmd, uint32_t desc) -{ +void hypercall_dyn_set_pmd(addr_t *pmd, uint32_t desc){ #ifdef DEBUG_MMU printf("\n\t\t\tHypercall set PMD\n\t\t pmd:%x val:%x ", pmd, desc); #endif @@ -231,8 +230,9 @@ void hypercall_dyn_set_pmd(addr_t *pmd, uint32_t desc) addr_t curr_pgd_pa, *pgd_va, attrs; uint32_t l1_pt_idx_for_desc, l1_desc_entry, phys_start; - phys_start = curr_vm->config->firmware->pstart; - addr_t page_offset = curr_vm->guest_info.page_offset; + virtual_machine* _curr_vm = get_curr_vm(); + phys_start = _curr_vm->config->firmware->pstart; + addr_t page_offset = _curr_vm->guest_info.page_offset; uint32_t page_offset_idx = (page_offset >> MMU_L1_SECTION_SHIFT) * 4; /*Page attributes*/ @@ -245,7 +245,7 @@ void hypercall_dyn_set_pmd(addr_t *pmd, uint32_t desc) /*Get current page table*/ COP_READ(COP_SYSTEM, COP_SYSTEM_TRANSLATION_TABLE0, (uint32_t)curr_pgd_pa); - addr_t master_pgd_va = (curr_vm->config->pa_initial_l1_offset + page_offset); + addr_t master_pgd_va = (_curr_vm->config->pa_initial_l1_offset + page_offset); /*Switch to the page table that we want to modify if we are not in it*/ if((LINUX_PA((addr_t)pmd & L1_BASE_MASK)) != (curr_pgd_pa)){ @@ -345,7 +345,7 @@ void hypercall_dyn_set_pmd(addr_t *pmd, uint32_t desc) addr_t desc_va_idx = MMU_L1_SECTION_IDX((addr_t)desc_va); addr_t l2pt_pa = MMU_L1_PT_ADDR(pgd_va[desc_va_idx]); - addr_t *l2pt_va = (addr_t *)(mmu_guest_pa_to_va(l2pt_pa, (curr_vm->config))); + addr_t *l2pt_va = (addr_t *)(mmu_guest_pa_to_va(l2pt_pa, (_curr_vm->config))); uint32_t l2_idx = ((uint32_t)l1_entry << 12) >> 24; uint32_t l2entry_desc = l2pt_va[l2_idx]; @@ -441,14 +441,15 @@ void hypercall_dyn_set_pmd(addr_t *pmd, uint32_t desc) /*va is the virtual address of the page table entry for linux pages *the physical pages are located 0x800 below */ -void hypercall_dyn_set_pte(addr_t *l2pt_linux_entry_va, uint32_t linux_pte, uint32_t phys_pte) -{ +void hypercall_dyn_set_pte(addr_t *l2pt_linux_entry_va, uint32_t linux_pte, uint32_t phys_pte){ #ifdef DEBUG_MMU printf("\n\t\t\tHypercall set PTE\n\t\t va:%x linux_pte:%x phys_pte:%x ", l2pt_linux_entry_va, phys_pte, linux_pte); #endif - addr_t phys_start = curr_vm->config->firmware->pstart; - uint32_t page_offset = curr_vm->guest_info.page_offset; - uint32_t guest_size = curr_vm->config->firmware->psize; + + virtual_machine* _curr_vm = get_curr_vm(); + addr_t phys_start = _curr_vm->config->firmware->pstart; + uint32_t page_offset = _curr_vm->guest_info.page_offset; + uint32_t guest_size = _curr_vm->config->firmware->psize; uint32_t *l2pt_hw_entry_va = (addr_t *)((addr_t ) l2pt_linux_entry_va - 0x800); addr_t l2pt_hw_entry_pa = ((addr_t)l2pt_hw_entry_va - page_offset + phys_start ); diff --git a/core/hypervisor/hypercalls/hyp_interrupt.c b/core/hypervisor/hypercalls/hyp_interrupt.c index b10369f..59c9d9d 100644 --- a/core/hypervisor/hypercalls/hyp_interrupt.c +++ b/core/hypervisor/hypercalls/hyp_interrupt.c @@ -1,6 +1,5 @@ #include "hw.h" #include "hyper.h" -extern virtual_machine *curr_vm; /*Interrupt operations*/ @@ -8,15 +7,17 @@ extern virtual_machine *curr_vm; *restore operation will restore the flags to the interrupt mask */ void hypercall_interrupt_set(uint32_t interrupt, uint32_t op) { + + virtual_machine* _curr_vm = get_curr_vm(); interrupt &= (ARM_IRQ_MASK | ARM_FIQ_MASK); if(op==1) /*Enable*/ - curr_vm->current_mode_state->ctx.psr &= ~(interrupt); + _curr_vm->current_mode_state->ctx.psr &= ~(interrupt); else if(op==0) /*Disable*/ - curr_vm->current_mode_state->ctx.psr |= interrupt; + _curr_vm->current_mode_state->ctx.psr |= interrupt; else if(op ==2){ /*Restore ,restores the flag according to param0*/ - curr_vm->current_mode_state->ctx.psr &= ~(ARM_IRQ_MASK | ARM_FIQ_MASK) ; - curr_vm->current_mode_state->ctx.psr |= interrupt; + _curr_vm->current_mode_state->ctx.psr &= ~(ARM_IRQ_MASK | ARM_FIQ_MASK) ; + _curr_vm->current_mode_state->ctx.psr |= interrupt; } else hyper_panic("Unknown interrupt operation", 1); @@ -26,40 +27,42 @@ void hypercall_interrupt_set(uint32_t interrupt, uint32_t op) void hypercall_irq_save(uint32_t *param) { uint32_t cpsr; - + virtual_machine* _curr_vm = get_curr_vm(); /*Read CPSR from guest context*/ - cpsr = curr_vm->current_mode_state->ctx.psr; + cpsr = _curr_vm->current_mode_state->ctx.psr; /*Return value in reg0*/ - curr_vm->current_mode_state->ctx.reg[0] = cpsr; + _curr_vm->current_mode_state->ctx.reg[0] = cpsr; /*Disable IRQ*/ cpsr |= ARM_IRQ_MASK; - curr_vm->current_mode_state->ctx.psr = cpsr; + _curr_vm->current_mode_state->ctx.psr = cpsr; } void hypercall_irq_restore(uint32_t flag) { + virtual_machine* _curr_vm = get_curr_vm(); /*Only let guest restore IRQ, FIQ flags not mode*/ flag &= (ARM_IRQ_MASK | ARM_FIQ_MASK); - curr_vm->current_mode_state->ctx.psr &= ~(ARM_IRQ_MASK | ARM_FIQ_MASK) ; - curr_vm->current_mode_state->ctx.psr |= flag; + _curr_vm->current_mode_state->ctx.psr &= ~(ARM_IRQ_MASK | ARM_FIQ_MASK) ; + _curr_vm->current_mode_state->ctx.psr |= flag; } #endif -void hypercall_end_interrupt () { - if (curr_vm->current_guest_mode != HC_GM_INTERRUPT) { +void hypercall_end_interrupt(){ + virtual_machine* _curr_vm = get_curr_vm(); + if (_curr_vm->current_guest_mode != HC_GM_INTERRUPT) { hyper_panic("Guest tried to end interrupt but not in interrupt mode.", 1); } - if (curr_vm->interrupted_mode >= HC_NGUESTMODES) { + if (_curr_vm->interrupted_mode >= HC_NGUESTMODES) { hyper_panic("Invalid interrupted mode value.", 2); } - if (curr_vm->interrupted_mode == curr_vm->current_guest_mode) { + if (_curr_vm->interrupted_mode == _curr_vm->current_guest_mode) { //hyper_panic("Interrupt mode interrupted itself??", 3); printf("An interuppt inside the interrupt happened!\n"); } - change_guest_mode(curr_vm->interrupted_mode); - curr_vm->interrupted_mode = MODE_NONE; + change_guest_mode(_curr_vm->interrupted_mode); + _curr_vm->interrupted_mode = MODE_NONE; } diff --git a/core/hypervisor/hypercalls/hyp_mmu.c b/core/hypervisor/hypercalls/hyp_mmu.c index 250f8e9..30f02e6 100644 --- a/core/hypervisor/hypercalls/hyp_mmu.c +++ b/core/hypervisor/hypercalls/hyp_mmu.c @@ -3,7 +3,7 @@ #include "hyper.h" #include "hyp_cache.h" -extern virtual_machine *curr_vm; +extern virtual_machine* get_curr_vm(); extern uint32_t *flpt_va; extern uint32_t *slpt_va; @@ -94,8 +94,9 @@ uint32_t hypercall_map_l1_section(addr_t va, addr_t sec_base_add, uint32_t attrs return ERR_HYP_RESERVED_VA; /*Check that the guest does not override the physical addresses outside its range*/ - uint32_t guest_start_pa = curr_vm->guest_info.phys_offset; - uint32_t guest_size = curr_vm->guest_info.guest_size; + virtual_machine* _curr_vm = get_curr_vm(); + uint32_t guest_start_pa = _curr_vm->guest_info.phys_offset; + uint32_t guest_size = _curr_vm->guest_info.guest_size; printf("gadds %x %x\n", guest_start_pa, guest_size); if(!(sec_base_add >= (guest_start_pa) && sec_base_add < (guest_start_pa + guest_size ))) return ERR_HYP_OUT_OF_RANGE_PA; @@ -161,9 +162,9 @@ void hypercall_create_section(addr_t va, addr_t pa, uint32_t page_attr) #ifdef DEBUG_MMU printf("\n\t\t\tHypercall create section\n\t\t va:%x pa:%x page_attr:%x ", va, pa, page_attr); #endif - - uint32_t PHYS_OFFSET = curr_vm->guest_info.phys_offset; - uint32_t guest_size = curr_vm->guest_info.guest_size; + virtual_machine* _curr_vm = get_curr_vm(); + uint32_t PHYS_OFFSET = _curr_vm->guest_info.phys_offset; + uint32_t guest_size = _curr_vm->guest_info.guest_size; /*Set domain in Section page for user */ page_attr &= ~MMU_L1_DOMAIN_MASK; @@ -194,7 +195,7 @@ void hypercall_create_section(addr_t va, addr_t pa, uint32_t page_attr) } -#define LINUX_VA(x) ((((x - curr_vm->guest_info.phys_offset) << 4) >> 24) + 0xc00) +#define LINUX_VA(x) ((((x - _curr_vm->guest_info.phys_offset) << 4) >> 24) + 0xc00) /*Switch page table * TODO Add list of allowed page tables*/ @@ -207,7 +208,8 @@ void hypercall_switch_mm(addr_t table_base, uint32_t context_id) uint32_t *l2_pt, lvl2_idx; uint32_t pgd_va; uint32_t pgd_size = 0x4000; - uint32_t PAGE_OFFSET = curr_vm->guest_info.page_offset; + virtual_machine* _curr_vm = get_curr_vm(); + uint32_t PAGE_OFFSET = _curr_vm->guest_info.page_offset; /*First translate the physical address to linux virtual*/ pgd_va = LINUX_VA(table_base); @@ -247,7 +249,8 @@ void hypercall_free_pgd(addr_t *pgd) #endif // printf("\n\tLinux kernel Free PGD: %x\n", pgd); uint32_t pgd_size = 0x4000; - uint32_t PAGE_OFFSET = curr_vm->guest_info.page_offset; + virtual_machine* _curr_vm = get_curr_vm(); + uint32_t PAGE_OFFSET = _curr_vm->guest_info.page_offset; /*Check page address*/ if( (uint32_t)pgd < PAGE_OFFSET || (uint32_t)pgd > (uint32_t)(HAL_VIRT_START - pgd_size) ) @@ -273,8 +276,8 @@ void hypercall_free_pgd(addr_t *pgd) for(i=lvl2_idx; i < lvl2_idx + 4; i++){ l2_pt[i] |= (1 << 4 | 1 << 5); /*RW */ - clean_va = (MMU_L2_SMALL_ADDR(l2_pt[i])) + curr_vm->guest_info.page_offset - -curr_vm->guest_info.phys_offset; + clean_va = (MMU_L2_SMALL_ADDR(l2_pt[i])) + _curr_vm->guest_info.page_offset + -_curr_vm->guest_info.phys_offset; COP_WRITE(COP_SYSTEM, COP_DCACHE_INVALIDATE_MVA, &l2_pt[i]); dsb(); @@ -297,8 +300,9 @@ void hypercall_new_pgd(addr_t *pgd) #endif uint32_t slpt_pa, lvl2_idx, i; uint32_t *l2_pt, clean; - uint32_t PAGE_OFFSET = curr_vm->guest_info.page_offset; - uint32_t PHYS_OFFSET = curr_vm->guest_info.phys_offset; + virtual_machine* _curr_vm = get_curr_vm(); + uint32_t PAGE_OFFSET = _curr_vm->guest_info.page_offset; + uint32_t PHYS_OFFSET = _curr_vm->guest_info.phys_offset; uint32_t pgd_size = 0x4000; /*Check page address*/ @@ -374,9 +378,10 @@ void hypercall_set_pmd(addr_t *pmd, uint32_t val) printf("\n\t\t\tHypercall set PMD\n\t\t pmd:%x val:%x ", pmd, val); #endif uint32_t offset, *l1_pt, slpt_pa, sect_idx; - uint32_t PAGE_OFFSET = curr_vm->guest_info.page_offset; - uint32_t PHYS_OFFSET = curr_vm->guest_info.phys_offset; - uint32_t guest_size = curr_vm->guest_info.guest_size; + virtual_machine* _curr_vm = get_curr_vm(); + uint32_t PAGE_OFFSET = _curr_vm->guest_info.page_offset; + uint32_t PHYS_OFFSET = _curr_vm->guest_info.phys_offset; + uint32_t guest_size = _curr_vm->guest_info.guest_size; /*Security Checks*/ uint32_t pa = MMU_L1_SECTION_ADDR(val); @@ -487,9 +492,10 @@ void hypercall_set_pte(addr_t *va, uint32_t linux_pte, uint32_t phys_pte) printf("\n\t\t\tHypercall set PTE\n\t\t va:%x linux_pte:%x phys_pte:%x ", va, phys_pte, linux_pte); #endif uint32_t *phys_va = (uint32_t *)((uint32_t)va - 0x800); - uint32_t PAGE_OFFSET = curr_vm->guest_info.page_offset; - uint32_t PHYS_OFFSET = curr_vm->guest_info.phys_offset; - uint32_t guest_size = curr_vm->guest_info.guest_size; + virtual_machine* _curr_vm = get_curr_vm(); + uint32_t PAGE_OFFSET = _curr_vm->guest_info.page_offset; + uint32_t PHYS_OFFSET = _curr_vm->guest_info.phys_offset; + uint32_t guest_size = _curr_vm->guest_info.guest_size; /*Security Checks*/ uint32_t pa = MMU_L2_SMALL_ADDR(phys_pte); diff --git a/core/hypervisor/hypercalls/hyp_rpc.c b/core/hypervisor/hypercalls/hyp_rpc.c index e904f09..90e454d 100644 --- a/core/hypervisor/hypercalls/hyp_rpc.c +++ b/core/hypervisor/hypercalls/hyp_rpc.c @@ -2,34 +2,34 @@ #include "hyper_config.h" #include "hyper.h" -extern virtual_machine *curr_vm; +extern virtual_machine* get_curr_vm(); void hypercall_rpc(uint32_t rpc_op, void *arg){ const hc_rpc_handler *handler; - - handler = curr_vm->config->rpc_handlers; + virtual_machine* _curr_vm = get_curr_vm(); + handler = _curr_vm->config->rpc_handlers; uint32_t handling_mode = handler->mode; - if(curr_vm->current_mode_state->rpc_to != MODE_NONE ) + if(_curr_vm->current_mode_state->rpc_to != MODE_NONE ) hyper_panic("Guest trying to start RPC while being in one", 1); - if(curr_vm->current_guest_mode == handling_mode) + if(_curr_vm->current_guest_mode == handling_mode) hyper_panic("Guest trying to send RPC to itself", 1); - if(curr_vm->mode_states[handling_mode].rpc_for != MODE_NONE) + if(_curr_vm->mode_states[handling_mode].rpc_for != MODE_NONE) hyper_panic("Guest trying to send RPC to a mode that is already handling a RPC", 1); if(rpc_op <=4){ - curr_vm->current_mode_state->rpc_to = handling_mode; - curr_vm->mode_states[handling_mode].rpc_for = curr_vm->current_guest_mode; + _curr_vm->current_mode_state->rpc_to = handling_mode; + _curr_vm->mode_states[handling_mode].rpc_for = _curr_vm->current_guest_mode; change_guest_mode(HC_GM_TRUSTED); - curr_vm->current_mode_state->ctx.reg[0] = rpc_op; - curr_vm->current_mode_state->ctx.reg[1] = (uint32_t)arg; - curr_vm->current_mode_state->ctx.pc = handler->entry_point; - curr_vm->current_mode_state->ctx.psr = 0xD0; /*USR mode, IRQ off*/ + _curr_vm->current_mode_state->ctx.reg[0] = rpc_op; + _curr_vm->current_mode_state->ctx.reg[1] = (uint32_t)arg; + _curr_vm->current_mode_state->ctx.pc = handler->entry_point; + _curr_vm->current_mode_state->ctx.psr = 0xD0; /*USR mode, IRQ off*/ } else{ hyper_panic("Unallowed rpc operation\n",1); @@ -39,19 +39,19 @@ void hypercall_rpc(uint32_t rpc_op, void *arg){ } void hypercall_end_rpc(){ - - uint32_t calling_mode = curr_vm->current_mode_state->rpc_for; + virtual_machine* _curr_vm = get_curr_vm(); + uint32_t calling_mode = _curr_vm->current_mode_state->rpc_for; if(calling_mode == MODE_NONE ) hyper_panic("Guest ended rpc without being in one", 1); - if(curr_vm->mode_states[calling_mode].rpc_to != curr_vm->current_guest_mode ) + if(_curr_vm->mode_states[calling_mode].rpc_to != _curr_vm->current_guest_mode ) hyper_panic("Guest trying to end rpc but caller did not start one", 1); - calling_mode = curr_vm->current_mode_state->rpc_for; + calling_mode = _curr_vm->current_mode_state->rpc_for; - curr_vm->current_mode_state->rpc_for = MODE_NONE; - curr_vm->mode_states[calling_mode].rpc_to = MODE_NONE; + _curr_vm->current_mode_state->rpc_for = MODE_NONE; + _curr_vm->mode_states[calling_mode].rpc_to = MODE_NONE; change_guest_mode(calling_mode); diff --git a/core/hypervisor/hypercalls/hypercalls.c b/core/hypervisor/hypercalls/hypercalls.c index 8ed80d8..8cb0449 100644 --- a/core/hypervisor/hypercalls/hypercalls.c +++ b/core/hypervisor/hypercalls/hypercalls.c @@ -1,7 +1,7 @@ #include "hw.h" #include "hyper.h" -extern virtual_machine *curr_vm; +extern virtual_machine* get_curr_vm(); extern uint32_t *flpt_va; extern uint32_t *slpt_va; @@ -11,17 +11,19 @@ void change_guest_mode (uint32_t mode) if(mode >= HC_NGUESTMODES) hyper_panic("Trying to switch to unknown guest mode", 1); uint32_t domac; - curr_vm->current_mode_state = &curr_vm->mode_states[mode]; - cpu_context_current_set(&(curr_vm->current_mode_state->ctx)); - curr_vm->current_guest_mode = mode; - domac = curr_vm->current_mode_state->mode_config->domain_ac; + virtual_machine* _curr_vm = get_curr_vm(); + _curr_vm->current_mode_state = &_curr_vm->mode_states[mode]; + cpu_context_current_set(&(_curr_vm->current_mode_state->ctx)); + _curr_vm->current_guest_mode = mode; + domac = _curr_vm->current_mode_state->mode_config->domain_ac; COP_WRITE(COP_SYSTEM, COP_SYSTEM_DOMAIN, domac); } /* void hypercall_register_handler(uint32_t handler) { + virtual_machine* _curr_vm = get_curr_vm(); printf("Registering guest tick handler: %x \n", handler); - curr_vm->guest_tick_handler = handler; + _curr_vm->guest_tick_handler = handler; } */ @@ -47,13 +49,14 @@ void hypercall_guest_init(boot_info *info) info->cpu_mmf= mmf; info->cpu_cr = cr; - curr_vm->guest_info.nr_syscalls = (uint32_t)info->guest.nr_syscalls; - curr_vm->guest_info.page_offset = info->guest.page_offset; - curr_vm->guest_info.phys_offset = info->guest.phys_offset; - curr_vm->guest_info.vmalloc_end = info->guest.vmalloc_end; - curr_vm->guest_info.guest_size = info->guest.guest_size; + virtual_machine* _curr_vm = get_curr_vm(); + _curr_vm->guest_info.nr_syscalls = (uint32_t)info->guest.nr_syscalls; + _curr_vm->guest_info.page_offset = info->guest.page_offset; + _curr_vm->guest_info.phys_offset = info->guest.phys_offset; + _curr_vm->guest_info.vmalloc_end = info->guest.vmalloc_end; + _curr_vm->guest_info.guest_size = info->guest.guest_size; - curr_vm->exception_vector = (uint32_t *)info->guest.page_offset; + _curr_vm->exception_vector = (uint32_t *)info->guest.page_offset; #ifdef LINUX //clear_linux_mappings(); @@ -70,7 +73,8 @@ void hypercall_restore_regs(uint32_t *regs) uint32_t *context; uint32_t i = 16; - context = &curr_vm->current_mode_state->ctx.reg[0]; + virtual_machine* _curr_vm = get_curr_vm(); + context = &_curr_vm->current_mode_state->ctx.reg[0]; while(i > 0){ *context++ = *regs++; @@ -92,7 +96,8 @@ void hypercall_restore_linux_regs(uint32_t return_value, BOOL syscall) if(syscall) offset = 8; - sp = (uint32_t *)curr_vm->mode_states[HC_GM_KERNEL].ctx.sp; + virtual_machine* _curr_vm = get_curr_vm(); + sp = (uint32_t *)_curr_vm->mode_states[HC_GM_KERNEL].ctx.sp; uint32_t size = sizeof(uint32_t)*17; //17 registers to be restored from pointer if(((uint32_t)sp < 0xC0000000) || ((uint32_t)sp > (uint32_t)(HAL_VIRT_START - size))) @@ -113,7 +118,7 @@ void hypercall_restore_linux_regs(uint32_t return_value, BOOL syscall) if(kernel_space){ //debug("Switching to KERNEL mode!\n"); - context = &curr_vm->mode_states[HC_GM_KERNEL].ctx.reg[0]; + context = &_curr_vm->mode_states[HC_GM_KERNEL].ctx.reg[0]; i = 13; /*Restore register r0-r12, reuse sp and lr @@ -127,11 +132,11 @@ void hypercall_restore_linux_regs(uint32_t return_value, BOOL syscall) /*Code originaly run in SVC mode, however only make sure it * can run in virtual kernel mode*/ - curr_vm->mode_states[HC_GM_KERNEL].ctx.psr = 0xFFFFFFF0 & mode; //force user mode //TODO CHECK NOT VALID AFTER FIRST TIME?CHECK CURRENT MODE INSTEAD - curr_vm->mode_states[HC_GM_KERNEL].ctx.pc = stack_pc; + _curr_vm->mode_states[HC_GM_KERNEL].ctx.psr = 0xFFFFFFF0 & mode; //force user mode //TODO CHECK NOT VALID AFTER FIRST TIME?CHECK CURRENT MODE INSTEAD + _curr_vm->mode_states[HC_GM_KERNEL].ctx.pc = stack_pc; /*Adjust kernel stack pointer*/ - curr_vm->mode_states[HC_GM_KERNEL].ctx.sp += (18*4); // Frame size + _curr_vm->mode_states[HC_GM_KERNEL].ctx.sp += (18*4); // Frame size change_guest_mode(HC_GM_KERNEL); } @@ -139,13 +144,13 @@ void hypercall_restore_linux_regs(uint32_t return_value, BOOL syscall) else if (!(kernel_space)){ //debug("Switching to USER mode!\n"); if(syscall){ //this mean skip r0 - curr_vm->mode_states[HC_GM_TASK].ctx.reg[0] = return_value; - context = &curr_vm->mode_states[HC_GM_TASK].ctx.reg[1]; + _curr_vm->mode_states[HC_GM_TASK].ctx.reg[0] = return_value; + context = &_curr_vm->mode_states[HC_GM_TASK].ctx.reg[1]; i = 15; //saves r1-pc sp += 3; //adjust sp to skip arg 4, 5 and r0 } else{ - context = (uint32_t *)&curr_vm->mode_states[HC_GM_TASK].ctx; + context = (uint32_t *)&_curr_vm->mode_states[HC_GM_TASK].ctx; i = 16; //saves r0-pc } @@ -156,10 +161,10 @@ void hypercall_restore_linux_regs(uint32_t return_value, BOOL syscall) i--; } - curr_vm->mode_states[HC_GM_TASK].ctx.psr = 0xFFFFFFF0 & mode; // Make sure of user mode + _curr_vm->mode_states[HC_GM_TASK].ctx.psr = 0xFFFFFFF0 & mode; // Make sure of user mode /*Adjust kernel stack pointer*/ - curr_vm->mode_states[HC_GM_KERNEL].ctx.sp += (18*4 + offset); // Frame size + offset (2 swi args) + _curr_vm->mode_states[HC_GM_KERNEL].ctx.sp += (18*4 + offset); // Frame size + offset (2 swi args) change_guest_mode(HC_GM_TASK); } else @@ -174,9 +179,9 @@ void terminate(int number){ while(1); //get stuck here } -void hypercall_num_error (uint32_t hypercall_num) -{ - uint32_t addr = (curr_vm->current_mode_state->ctx.pc -4); +void hypercall_num_error (uint32_t hypercall_num){ + virtual_machine* _curr_vm = get_curr_vm(); + uint32_t addr = (_curr_vm->current_mode_state->ctx.pc -4); printf ("Unknown hypercall %d originated at 0x%x, aborting", hypercall_num, addr); terminate(1); diff --git a/core/hypervisor/init_slave.c b/core/hypervisor/init_slave.c index c18b702..89fbc89 100644 --- a/core/hypervisor/init_slave.c +++ b/core/hypervisor/init_slave.c @@ -1,5 +1,3 @@ -//Goes inside core/hypervisor - #include #include "hyper.h" #include "guest_blob.h" @@ -10,14 +8,14 @@ extern int __hyper_pt_start__; extern uint32_t l2_index_p; -/*Pointers to start of first and second level page tables. - *Defined in linker script. */ -uint32_t *flpt_va = (uint32_t *)(&__hyper_pt_start__); -uint32_t *slpt_va = (uint32_t *)((uint32_t)&__hyper_pt_start__ + 0x4000); //16 KiB Page offset - extern memory_layout_entry * memory_padr_layout; +extern hc_config minimal_config; + +extern uint32_t *flpt_va; +extern uint32_t *slpt_va; //16 KiB Page offset //We will need one virtual machine for each core. +//TODO: Why not use an array? virtual_machine vm_0; virtual_machine vm_1; virtual_machine vm_2; @@ -89,7 +87,7 @@ void guests_init_multicore(){ addr_t guest_pstart = curr_vm->config->firmware->pstart; addr_t guest_psize = curr_vm->config->firmware->psize; - /* KTH CHANGES + /* KTH CHANGES * The hypervisor must always be able to read from/write to the guest page * tables. For now, the guest page tables can be written into the guest * memory anywhere. In the future we probably need more master page tables, @@ -111,15 +109,15 @@ void guests_init_multicore(){ //on both sides on the below row. Is this a bug or pedagogic code in some way? va_offset + SECTION_SIZE <= guest_psize + SECTION_SIZE; /* +1 MiB at end for L1PT */ va_offset += SECTION_SIZE){ - uint32_t offset, pmd; - uint32_t va = vm_0.config->reserved_va_for_pt_access_start + va_offset; - uint32_t pa = guest_pstart + va_offset; - pt_create_section(flpt_va, va, pa, MLT_HYPER_RAM); - - /* Invalidate the newly created entries. */ - offset = ((va >> MMU_L1_SECTION_SHIFT)*4); - pmd = (uint32_t *)((uint32_t)flpt_va + offset); - COP_WRITE(COP_SYSTEM, COP_DCACHE_INVALIDATE_MVA, pmd); + uint32_t offset, pmd; + uint32_t va = vm_0.config->reserved_va_for_pt_access_start + va_offset; + uint32_t pa = guest_pstart + va_offset; + pt_create_section(flpt_va, va, pa, MLT_HYPER_RAM); + + /* Invalidate the newly created entries. */ + offset = ((va >> MMU_L1_SECTION_SHIFT)*4); + pmd = (uint32_t *)((uint32_t)flpt_va + offset); + COP_WRITE(COP_SYSTEM, COP_DCACHE_INVALIDATE_MVA, pmd); } memory_commit(); @@ -132,8 +130,8 @@ void guests_init_multicore(){ //to start from different numbers depending on the index of the guest. dmmu_entry_t * bft = (dmmu_entry_t *) DMMU_BFT_BASE_VA; for (i=0; i*4096<0x8000; i++) { - bft[PA_TO_PH_BLOCK((uint32_t)GET_PHYS(slpt_va) + i*4096)].type = PAGE_INFO_TYPE_L2PT; - bft[PA_TO_PH_BLOCK((uint32_t)GET_PHYS(slpt_va) + i*4096)].refcnt = 1; + bft[PA_TO_PH_BLOCK((uint32_t)GET_PHYS(slpt_va) + i*4096)].type = PAGE_INFO_TYPE_L2PT; + bft[PA_TO_PH_BLOCK((uint32_t)GET_PHYS(slpt_va) + i*4096)].refcnt = 1; } /* At this point we are finished initializing the master page table, and can diff --git a/core/hypervisor/linux/linux_init.c b/core/hypervisor/linux/linux_init.c index d6a1317..395a620 100644 --- a/core/hypervisor/linux/linux_init.c +++ b/core/hypervisor/linux/linux_init.c @@ -25,10 +25,12 @@ const unsigned long syscall_restart_code[2] = { }; void clear_linux_mappings() { - uint32_t PAGE_OFFSET = curr_vm->guest_info.page_offset; - uint32_t VMALLOC_END = curr_vm->guest_info.vmalloc_end; - uint32_t guest_size = curr_vm->guest_info.guest_size; - uint32_t MODULES_VADDR = (curr_vm->guest_info.page_offset - 16*1024*1024); + virtual_machine* _curr_vm = get_curr_vm(); + + uint32_t PAGE_OFFSET = _curr_vm->guest_info.page_offset; + uint32_t VMALLOC_END = _curr_vm->guest_info.vmalloc_end; + uint32_t guest_size = _curr_vm->guest_info.guest_size; + uint32_t MODULES_VADDR = (_curr_vm->guest_info.page_offset - 16*1024*1024); uint32_t address; uint32_t offset = 0; @@ -71,11 +73,13 @@ void clear_linux_mappings() //return value. void dmmu_clear_linux_mappings() { - addr_t guest_vstart = curr_vm->config->firmware->vstart; - addr_t guest_psize = curr_vm->config->firmware->psize; + virtual_machine* _curr_vm = get_curr_vm(); + + addr_t guest_vstart = _curr_vm->config->firmware->vstart; + addr_t guest_psize = _curr_vm->config->firmware->psize; uint32_t address; - uint32_t VMALLOC_END = curr_vm->guest_info.vmalloc_end; + uint32_t VMALLOC_END = _curr_vm->guest_info.vmalloc_end; /* * Clear out all the mappings below the kernel image. Maps @@ -139,8 +143,10 @@ uint32_t linux_l2_index_p = 0; addr_t linux_pt_get_empty_l2() { - uint32_t pa_l2_pt = curr_vm->config->firmware->pstart + curr_vm->config->pa_initial_l2_offset; - uint32_t va_l2_pt = mmu_guest_pa_to_va(pa_l2_pt, curr_vm->config); + + virtual_machine* _curr_vm = get_curr_vm(); + uint32_t pa_l2_pt = _curr_vm->config->firmware->pstart + _curr_vm->config->pa_initial_l2_offset; + uint32_t va_l2_pt = mmu_guest_pa_to_va(pa_l2_pt, _curr_vm->config); if((linux_l2_index_p * 0x400) > SECTION_SIZE){ // Set max size of L2 pages printf("No more space for more L2s\n"); while(1); //hang @@ -165,9 +171,10 @@ void linux_init_dmmu() uint32_t error; uint32_t sect_attrs, sect_attrs_ro, small_attrs, small_attrs_ro, page_attrs,table2_idx, i; addr_t table2_pa; - addr_t guest_vstart = curr_vm->config->firmware->vstart; - addr_t guest_pstart = curr_vm->config->firmware->pstart; - addr_t guest_psize = curr_vm->config->firmware->psize; + virtual_machine* _curr_vm = get_curr_vm(); + addr_t guest_vstart = _curr_vm->config->firmware->vstart; + addr_t guest_pstart = _curr_vm->config->firmware->pstart; + addr_t guest_psize = _curr_vm->config->firmware->psize; /*Linux specific mapping*/ /*Section page with user RW in kernel domain with Cache and Buffer*/ sect_attrs = MMU_L1_TYPE_SECTION; @@ -208,10 +215,10 @@ void linux_init_dmmu() dmmu_map_L1_section(guest_vstart+offset, guest_pstart+offset, sect_attrs); } - addr_t reserved_l2_pts_pa = curr_vm->config->pa_initial_l2_offset + guest_pstart; + addr_t reserved_l2_pts_pa = _curr_vm->config->pa_initial_l2_offset + guest_pstart; /*Set whole 1MB reserved address region in Linux as L2_pt*/ - addr_t reserved_l2_pts_va = mmu_guest_pa_to_va(reserved_l2_pts_pa, curr_vm->config); + addr_t reserved_l2_pts_va = mmu_guest_pa_to_va(reserved_l2_pts_pa, _curr_vm->config); /*Memory setting the reserved L2 pages to 0 *There is a lot of garbage occupying the L2 page address in real HW @@ -247,7 +254,7 @@ void linux_init_dmmu() uint32_t end = table2_idx + 0x100; uint32_t page_pa; for(i = table2_idx, page_pa = offset; i < end ; i++, page_pa+=0x1000){ - if(!(curr_vm->config->pa_initial_l2_offset <= page_pa && page_pa <= ( curr_vm->config->pa_initial_l2_offset|0x0000F000))) + if(!(_curr_vm->config->pa_initial_l2_offset <= page_pa && page_pa <= (_curr_vm->config->pa_initial_l2_offset|0x0000F000))) dmmu_l2_map_entry(table2_pa, i, page_pa + guest_pstart, small_attrs); else dmmu_l2_map_entry(table2_pa, i, page_pa + guest_pstart, small_attrs_ro);