diff options
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/boot/dts/apm/apm-storm.dtsi | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_arm.h | 5 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 48 | ||||
-rw-r--r-- | arch/arm64/include/asm/proc-fns.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/tlb.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/tlbflush.h | 13 | ||||
-rw-r--r-- | arch/arm64/kernel/efi.c | 15 | ||||
-rw-r--r-- | arch/arm64/kernel/head.S | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/process.c | 8 | ||||
-rw-r--r-- | arch/arm64/mm/dma-mapping.c | 12 |
10 files changed, 64 insertions, 52 deletions
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index f1ad9c2..a857794 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi @@ -622,7 +622,7 @@ }; sgenet0: ethernet@1f210000 { - compatible = "apm,xgene-enet"; + compatible = "apm,xgene1-sgenet"; status = "disabled"; reg = <0x0 0x1f210000 0x0 0xd100>, <0x0 0x1f200000 0x0 0Xc300>, @@ -636,7 +636,7 @@ }; xgenet: ethernet@1f610000 { - compatible = "apm,xgene-enet"; + compatible = "apm,xgene1-xgenet"; status = "disabled"; reg = <0x0 0x1f610000 0x0 0xd100>, <0x0 0x1f600000 0x0 0Xc300>, diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 94674eb..54bb4ba 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -129,6 +129,9 @@ * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are * not known to exist and will break with this configuration. * + * VTCR_EL2.PS is extracted from ID_AA64MMFR0_EL1.PARange at boot time + * (see hyp-init.S). + * * Note that when using 4K pages, we concatenate two first level page tables * together. * @@ -138,7 +141,6 @@ #ifdef CONFIG_ARM64_64K_PAGES /* * Stage2 translation configuration: - * 40bits output (PS = 2) * 40bits input (T0SZ = 24) * 64kB pages (TG0 = 1) * 2 level page tables (SL = 1) @@ -150,7 +152,6 @@ #else /* * Stage2 translation configuration: - * 40bits output (PS = 2) * 40bits input (T0SZ = 24) * 4kB pages (TG0 = 0) * 3 level page tables (SL = 1) diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 6458b53..bbfb600 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -158,6 +158,8 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) #define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT) #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) +#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1)) + /* * If we are concatenating first level stage-2 page tables, we would have less * than or equal to 16 pointers in the fake PGD, because that's what the @@ -171,43 +173,6 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) #define KVM_PREALLOC_LEVEL (0) #endif -/** - * kvm_prealloc_hwpgd - allocate inital table for VTTBR - * @kvm: The KVM struct pointer for the VM. - * @pgd: The kernel pseudo pgd - * - * When the kernel uses more levels of page tables than the guest, we allocate - * a fake PGD and pre-populate it to point to the next-level page table, which - * will be the real initial page table pointed to by the VTTBR. - * - * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and - * the kernel will use folded pud. When KVM_PREALLOC_LEVEL==1, we - * allocate 2 consecutive PUD pages. - */ -static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd) -{ - unsigned int i; - unsigned long hwpgd; - - if (KVM_PREALLOC_LEVEL == 0) - return 0; - - hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT); - if (!hwpgd) - return -ENOMEM; - - for (i = 0; i < PTRS_PER_S2_PGD; i++) { - if (KVM_PREALLOC_LEVEL == 1) - pgd_populate(NULL, pgd + i, - (pud_t *)hwpgd + i * PTRS_PER_PUD); - else if (KVM_PREALLOC_LEVEL == 2) - pud_populate(NULL, pud_offset(pgd, 0) + i, - (pmd_t *)hwpgd + i * PTRS_PER_PMD); - } - - return 0; -} - static inline void *kvm_get_hwpgd(struct kvm *kvm) { pgd_t *pgd = kvm->arch.pgd; @@ -224,12 +189,11 @@ static inline void *kvm_get_hwpgd(struct kvm *kvm) return pmd_offset(pud, 0); } -static inline void kvm_free_hwpgd(struct kvm *kvm) +static inline unsigned int kvm_get_hwpgd_size(void) { - if (KVM_PREALLOC_LEVEL > 0) { - unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm); - free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT); - } + if (KVM_PREALLOC_LEVEL > 0) + return PTRS_PER_S2_PGD * PAGE_SIZE; + return PTRS_PER_S2_PGD * sizeof(pgd_t); } static inline bool kvm_page_empty(void *ptr) diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 9a8fd84..941c375 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -39,7 +39,11 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); #include <asm/memory.h> -#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) +#define cpu_switch_mm(pgd,mm) \ +do { \ + BUG_ON(pgd == swapper_pg_dir); \ + cpu_do_switch_mm(virt_to_phys(pgd),mm); \ +} while (0) #define cpu_get_pgd() \ ({ \ diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index c028fe3..53d9c35 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -48,6 +48,7 @@ static inline void tlb_flush(struct mmu_gather *tlb) static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr) { + __flush_tlb_pgtable(tlb->mm, addr); pgtable_page_dtor(pte); tlb_remove_entry(tlb, pte); } @@ -56,6 +57,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) { + __flush_tlb_pgtable(tlb->mm, addr); tlb_remove_entry(tlb, virt_to_page(pmdp)); } #endif @@ -64,6 +66,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, unsigned long addr) { + __flush_tlb_pgtable(tlb->mm, addr); tlb_remove_entry(tlb, virt_to_page(pudp)); } #endif diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 4abe9b9..c3bb05b 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -144,6 +144,19 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end } /* + * Used to invalidate the TLB (walk caches) corresponding to intermediate page + * table levels (pgd/pud/pmd). + */ +static inline void __flush_tlb_pgtable(struct mm_struct *mm, + unsigned long uaddr) +{ + unsigned long addr = uaddr >> 12 | ((unsigned long)ASID(mm) << 48); + + dsb(ishst); + asm("tlbi vae1is, %0" : : "r" (addr)); + dsb(ish); +} +/* * On AArch64, the cache coherency is handled via the set_pte_at() function. */ static inline void update_mmu_cache(struct vm_area_struct *vma, diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index b42c7b4..ab21e0d 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -337,7 +337,11 @@ core_initcall(arm64_dmi_init); static void efi_set_pgd(struct mm_struct *mm) { - cpu_switch_mm(mm->pgd, mm); + if (mm == &init_mm) + cpu_set_reserved_ttbr0(); + else + cpu_switch_mm(mm->pgd, mm); + flush_tlb_all(); if (icache_is_aivivt()) __flush_icache_all(); @@ -354,3 +358,12 @@ void efi_virtmap_unload(void) efi_set_pgd(current->active_mm); preempt_enable(); } + +/* + * UpdateCapsule() depends on the system being shutdown via + * ResetSystem(). + */ +bool efi_poweroff_required(void) +{ + return efi_enabled(EFI_RUNTIME_SERVICES); +} diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 8ce88e0..07f9305 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -585,8 +585,8 @@ ENDPROC(set_cpu_boot_mode_flag) * zeroing of .bss would clobber it. */ .pushsection .data..cacheline_aligned -ENTRY(__boot_cpu_mode) .align L1_CACHE_SHIFT +ENTRY(__boot_cpu_mode) .long BOOT_CPU_MODE_EL2 .long 0 .popsection diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index fde9923..c6b1f3b 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -21,6 +21,7 @@ #include <stdarg.h> #include <linux/compat.h> +#include <linux/efi.h> #include <linux/export.h> #include <linux/sched.h> #include <linux/kernel.h> @@ -150,6 +151,13 @@ void machine_restart(char *cmd) local_irq_disable(); smp_send_stop(); + /* + * UpdateCapsule() depends on the system being reset via + * ResetSystem(). + */ + if (efi_enabled(EFI_RUNTIME_SERVICES)) + efi_reboot(reboot_mode, NULL); + /* Now call the architecture specific reboot code. */ if (arm_pm_restart) arm_pm_restart(reboot_mode, cmd); diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 58e0c2b..ef7d112 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -51,7 +51,7 @@ static int __init early_coherent_pool(char *p) } early_param("coherent_pool", early_coherent_pool); -static void *__alloc_from_pool(size_t size, struct page **ret_page) +static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags) { unsigned long val; void *ptr = NULL; @@ -67,6 +67,8 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page) *ret_page = phys_to_page(phys); ptr = (void *)val; + if (flags & __GFP_ZERO) + memset(ptr, 0, size); } return ptr; @@ -101,6 +103,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, flags |= GFP_DMA; if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { struct page *page; + void *addr; size = PAGE_ALIGN(size); page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, @@ -109,7 +112,10 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, return NULL; *dma_handle = phys_to_dma(dev, page_to_phys(page)); - return page_address(page); + addr = page_address(page); + if (flags & __GFP_ZERO) + memset(addr, 0, size); + return addr; } else { return swiotlb_alloc_coherent(dev, size, dma_handle, flags); } @@ -146,7 +152,7 @@ static void *__dma_alloc(struct device *dev, size_t size, if (!coherent && !(flags & __GFP_WAIT)) { struct page *page = NULL; - void *addr = __alloc_from_pool(size, &page); + void *addr = __alloc_from_pool(size, &page, flags); if (addr) *dma_handle = phys_to_dma(dev, page_to_phys(page)); |