diff options
Diffstat (limited to 'arch/x86/mm/mem_encrypt.c')
-rw-r--r-- | arch/x86/mm/mem_encrypt.c | 121 |
1 files changed, 74 insertions, 47 deletions
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index ff08dc463634..35487305d8af 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -20,6 +20,7 @@ #include <linux/bitops.h> #include <linux/dma-mapping.h> #include <linux/virtio_config.h> +#include <linux/cc_platform.h> #include <asm/tlbflush.h> #include <asm/fixmap.h> @@ -143,7 +144,7 @@ void __init sme_unmap_bootdata(char *real_mode_data) struct boot_params *boot_data; unsigned long cmdline_paddr; - if (!sme_active()) + if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) return; /* Get the command line address before unmapping the real_mode_data */ @@ -163,7 +164,7 @@ void __init sme_map_bootdata(char *real_mode_data) struct boot_params *boot_data; unsigned long cmdline_paddr; - if (!sme_active()) + if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) return; __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true); @@ -193,7 +194,7 @@ void __init sme_early_init(void) for (i = 0; i < ARRAY_SIZE(protection_map); i++) protection_map[i] = pgprot_encrypted(protection_map[i]); - if (sev_active()) + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) swiotlb_force = SWIOTLB_FORCE; } @@ -202,7 +203,7 @@ void __init sev_setup_arch(void) phys_addr_t total_mem = memblock_phys_mem_size(); unsigned long size; - if (!sev_active()) + if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) return; /* @@ -228,28 +229,75 @@ void __init sev_setup_arch(void) swiotlb_adjust_size(size); } -static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) +static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot) { - pgprot_t old_prot, new_prot; - unsigned long pfn, pa, size; - pte_t new_pte; + unsigned long pfn = 0; + pgprot_t prot; switch (level) { case PG_LEVEL_4K: pfn = pte_pfn(*kpte); - old_prot = pte_pgprot(*kpte); + prot = pte_pgprot(*kpte); break; case PG_LEVEL_2M: pfn = pmd_pfn(*(pmd_t *)kpte); - old_prot = pmd_pgprot(*(pmd_t *)kpte); + prot = pmd_pgprot(*(pmd_t *)kpte); break; case PG_LEVEL_1G: pfn = pud_pfn(*(pud_t *)kpte); - old_prot = pud_pgprot(*(pud_t *)kpte); + prot = pud_pgprot(*(pud_t *)kpte); break; default: - return; + WARN_ONCE(1, "Invalid level for kpte\n"); + return 0; + } + + if (ret_prot) + *ret_prot = prot; + + return pfn; +} + +void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc) +{ +#ifdef CONFIG_PARAVIRT + unsigned long sz = npages << PAGE_SHIFT; + unsigned long vaddr_end = vaddr + sz; + + while (vaddr < vaddr_end) { + int psize, pmask, level; + unsigned long pfn; + pte_t *kpte; + + kpte = lookup_address(vaddr, &level); + if (!kpte || pte_none(*kpte)) { + WARN_ONCE(1, "kpte lookup for vaddr\n"); + return; + } + + pfn = pg_level_to_pfn(level, kpte, NULL); + if (!pfn) + continue; + + psize = page_level_size(level); + pmask = page_level_mask(level); + + notify_page_enc_status_changed(pfn, psize >> PAGE_SHIFT, enc); + + vaddr = (vaddr & pmask) + psize; } +#endif +} + +static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) +{ + pgprot_t old_prot, new_prot; + unsigned long pfn, pa, size; + pte_t new_pte; + + pfn = pg_level_to_pfn(level, kpte, &old_prot); + if (!pfn) + return; new_prot = old_prot; if (enc) @@ -285,12 +333,13 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) static int __init early_set_memory_enc_dec(unsigned long vaddr, unsigned long size, bool enc) { - unsigned long vaddr_end, vaddr_next; + unsigned long vaddr_end, vaddr_next, start; unsigned long psize, pmask; int split_page_size_mask; int level, ret; pte_t *kpte; + start = vaddr; vaddr_next = vaddr; vaddr_end = vaddr + size; @@ -345,6 +394,7 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr, ret = 0; + notify_range_enc_status_changed(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc); out: __flush_tlb_all(); return ret; @@ -360,33 +410,9 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size) return early_set_memory_enc_dec(vaddr, size, true); } -/* - * SME and SEV are very similar but they are not the same, so there are - * times that the kernel will need to distinguish between SME and SEV. The - * sme_active() and sev_active() functions are used for this. When a - * distinction isn't needed, the mem_encrypt_active() function can be used. - * - * The trampoline code is a good example for this requirement. Before - * paging is activated, SME will access all memory as decrypted, but SEV - * will access all memory as encrypted. So, when APs are being brought - * up under SME the trampoline area cannot be encrypted, whereas under SEV - * the trampoline area must be encrypted. - */ -bool sev_active(void) -{ - return sev_status & MSR_AMD64_SEV_ENABLED; -} - -bool sme_active(void) -{ - return sme_me_mask && !sev_active(); -} -EXPORT_SYMBOL_GPL(sev_active); - -/* Needs to be called from non-instrumentable code */ -bool noinstr sev_es_active(void) +void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) { - return sev_status & MSR_AMD64_SEV_ES_ENABLED; + notify_range_enc_status_changed(vaddr, npages, enc); } /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ @@ -395,7 +421,7 @@ bool force_dma_unencrypted(struct device *dev) /* * For SEV, all DMA must be to unencrypted addresses. */ - if (sev_active()) + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) return true; /* @@ -403,7 +429,7 @@ bool force_dma_unencrypted(struct device *dev) * device does not support DMA to addresses that include the * encryption mask. */ - if (sme_active()) { + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask)); u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); @@ -428,7 +454,7 @@ void __init mem_encrypt_free_decrypted_mem(void) * The unused memory range was mapped decrypted, change the encryption * attribute from decrypted to encrypted before freeing it. */ - if (mem_encrypt_active()) { + if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) { r = set_memory_encrypted(vaddr, npages); if (r) { pr_warn("failed to free unused decrypted pages\n"); @@ -444,7 +470,7 @@ static void print_mem_encrypt_feature_info(void) pr_info("AMD Memory Encryption Features active:"); /* Secure Memory Encryption */ - if (sme_active()) { + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { /* * SME is mutually exclusive with any of the SEV * features below. @@ -454,11 +480,11 @@ static void print_mem_encrypt_feature_info(void) } /* Secure Encrypted Virtualization */ - if (sev_active()) + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) pr_cont(" SEV"); /* Encrypted Register State */ - if (sev_es_active()) + if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) pr_cont(" SEV-ES"); pr_cont("\n"); @@ -477,7 +503,8 @@ void __init mem_encrypt_init(void) * With SEV, we need to unroll the rep string I/O instructions, * but SEV-ES supports them through the #VC handler. */ - if (sev_active() && !sev_es_active()) + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) && + !cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) static_branch_enable(&sev_enable_key); print_mem_encrypt_feature_info(); @@ -485,6 +512,6 @@ void __init mem_encrypt_init(void) int arch_has_restricted_virtio_memory_access(void) { - return sev_active(); + return cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT); } EXPORT_SYMBOL_GPL(arch_has_restricted_virtio_memory_access); |