diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2014-05-27 14:44:49 +0200 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2014-06-05 16:10:34 +0200 |
commit | e7e898a76aa00e2238b119ed2910442b1c3cacdd (patch) | |
tree | 26bc240a507389cd0360333c3706cf60cd942ad5 /target-i386 | |
parent | e8f6d00c30ed88910d0d985f4b2bf41654172ceb (diff) | |
download | hqemu-e7e898a76aa00e2238b119ed2910442b1c3cacdd.zip hqemu-e7e898a76aa00e2238b119ed2910442b1c3cacdd.tar.gz |
target-i386: simplify pte/vaddr calculation
They can moved to after the dirty bit processing, and unified between
CR0.PG=1 and CR0.PG=0.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'target-i386')
-rw-r--r-- | target-i386/helper.c | 15 |
1 files changed, 7 insertions, 8 deletions
diff --git a/target-i386/helper.c b/target-i386/helper.c index c52eb5a..153a91b 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -527,7 +527,7 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, hwaddr paddr; uint64_t rsvd_mask = PG_HI_RSVD_MASK; uint32_t page_offset; - target_ulong vaddr, virt_addr; + target_ulong vaddr; is_user = mmu_idx == MMU_USER_IDX; #if defined(DEBUG_MMU) @@ -544,7 +544,6 @@ int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, pte = (uint32_t)pte; } #endif - virt_addr = addr & TARGET_PAGE_MASK; prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; page_size = 4096; goto do_mapping; @@ -748,9 +747,6 @@ do_check_protect: } stl_phys_notdirty(cs->as, pte_addr, pte); } - /* align to page_size */ - pte &= ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff); - virt_addr = addr & ~(page_size - 1); /* the page can be put in the TLB */ prot = PAGE_READ; @@ -771,11 +767,14 @@ do_check_protect: do_mapping: pte = pte & env->a20_mask; + /* align to page_size */ + pte &= PG_ADDRESS_MASK & ~(page_size - 1); + /* Even if 4MB pages, we map only one 4KB page in the cache to avoid filling it too fast */ - page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1); - paddr = (pte & TARGET_PAGE_MASK) + page_offset; - vaddr = virt_addr + page_offset; + vaddr = addr & TARGET_PAGE_MASK; + page_offset = vaddr & (page_size - 1); + paddr = pte + page_offset; tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size); return 0; |