Message ID | 20231230161954.569267-8-michael.roth@amd.com |
---|---|
State | New |
Headers | show |
Series | Add AMD Secure Nested Paging (SEV-SNP) Initialization Support | expand |
On 1/10/24 05:13, Borislav Petkov wrote: > On Sat, Dec 30, 2023 at 10:19:35AM -0600, Michael Roth wrote: >> + while (pfn_current < pfn_end) { >> + e = __snp_lookup_rmpentry(pfn_current, &level); >> + if (IS_ERR(e)) { >> + pfn_current++; >> + continue; >> + } >> + >> + e_data = (u64 *)e; >> + if (e_data[0] || e_data[1]) { >> + pr_info("No assigned RMP entry for PFN 0x%llx, but the 2MB region contains populated RMP entries, e.g.: PFN 0x%llx: [high=0x%016llx low=0x%016llx]\n", >> + pfn, pfn_current, e_data[1], e_data[0]); >> + return; >> + } >> + pfn_current++; >> + } >> + >> + pr_info("No populated RMP entries in the 2MB region containing PFN 0x%llx\n", >> + pfn); >> +} > > Ok, I went and reworked this, see below. > > Yes, I think it is important - at least in the beginning - to dump the > whole 2M PFN region for debugging purposes. If that output starts > becoming too unwieldy and overflowing terminals or log files, we'd > shorten it or put it behind a debug option or so. > > Thx. > > --- > diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c > index a8cf33b7da71..259a1dd655a7 100644 > --- a/arch/x86/virt/svm/sev.c > +++ b/arch/x86/virt/svm/sev.c > + pr_info("PFN 0x%llx unassigned, dumping the whole 2M PFN region: [0x%llx - 0x%llx]\n", > + pfn, pfn_i, pfn_end); How about saying "... dumping all non-zero entries in the whole ..." and then removing the print below that prints the PFN and "..." > + > + while (pfn_i < pfn_end) { > + e = __snp_lookup_rmpentry(pfn_i, &level); > if (IS_ERR(e)) { > - pfn_current++; > + pr_err("Error %ld reading RMP entry for PFN 0x%llx\n", > + PTR_ERR(e), pfn_i); > + pfn_i++; > continue; > } > > - e_data = (u64 *)e; > - if (e_data[0] || e_data[1]) { > - pr_info("No assigned RMP entry for PFN 0x%llx, but the 2MB region contains populated RMP entries, e.g.: PFN 0x%llx: [high=0x%016llx low=0x%016llx]\n", > - pfn, pfn_current, e_data[1], e_data[0]); > - return; > - } > - pfn_current++; > - } > + if (e->lo || e->hi) > + pr_info("PFN: 0x%llx, [0x%016llx - 0x%016llx]\n", pfn_i, e->lo, e->hi); > + else > + pr_info("PFN: 0x%llx ...\n", pfn_i); Remove this one. That should cut down on excess output since you are really only concerned with non-zero RMP entries when the input PFN RMP entry is not assigned. Thanks, Tom > > - pr_info("No populated RMP entries in the 2MB region containing PFN 0x%llx\n", > - pfn); > + pfn_i++; > + } > } > > void snp_dump_hva_rmpentry(unsigned long hva) > @@ -339,4 +343,3 @@ void snp_dump_hva_rmpentry(unsigned long hva) > > dump_rmpentry(pte_pfn(*pte)); > } > -EXPORT_SYMBOL_GPL(snp_dump_hva_rmpentry); >
On Wed, Jan 10, 2024 at 09:51:04AM -0600, Tom Lendacky wrote: > I'm only suggesting getting rid of the else that prints "..." when the entry > is all zeroes. Printing the non-zero entries would still occur. Sure, one should be able to to infer that the missing entries are null. :-)
On Wed, Jan 10, 2024 at 10:18:37PM +0200, Jarkko Sakkinen wrote: > > > + if (!pte) { > > > + pr_info("Can't dump RMP entry for HVA %lx: no PTE/PFN found\n", hva); > ~~~~~~~ > is this correct log level? No, and I caught a couple of those already but missed this one, thanks. Mike, please make sure all your error prints are pr_err. Thx.
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h index 01ce61b283a3..2c53e3de0b71 100644 --- a/arch/x86/include/asm/sev.h +++ b/arch/x86/include/asm/sev.h @@ -247,9 +247,11 @@ static inline u64 sev_get_status(void) { return 0; } #ifdef CONFIG_KVM_AMD_SEV bool snp_probe_rmptable_info(void); int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level); +void snp_dump_hva_rmpentry(unsigned long address); #else static inline bool snp_probe_rmptable_info(void) { return false; } static inline int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) { return -ENODEV; } +static inline void snp_dump_hva_rmpentry(unsigned long address) {} #endif #endif diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c index 49fdfbf4e518..7c9ced8911e9 100644 --- a/arch/x86/virt/svm/sev.c +++ b/arch/x86/virt/svm/sev.c @@ -266,3 +266,80 @@ int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) return 0; } EXPORT_SYMBOL_GPL(snp_lookup_rmpentry); + +/* + * Dump the raw RMP entry for a particular PFN. These bits are documented in the + * PPR for a particular CPU model and provide useful information about how a + * particular PFN is being utilized by the kernel/firmware at the time certain + * unexpected events occur, such as RMP faults. + */ +static void dump_rmpentry(u64 pfn) +{ + u64 pfn_current, pfn_end; + struct rmpentry *e; + u64 *e_data; + int level; + + e = __snp_lookup_rmpentry(pfn, &level); + if (IS_ERR(e)) { + pr_info("Failed to read RMP entry for PFN 0x%llx, error %ld\n", + pfn, PTR_ERR(e)); + return; + } + + e_data = (u64 *)e; + if (e->assigned) { + pr_info("RMP entry for PFN 0x%llx: [high=0x%016llx low=0x%016llx]\n", + pfn, e_data[1], e_data[0]); + return; + } + + /* + * If the RMP entry for a particular PFN is not in an assigned state, + * then it is sometimes useful to get an idea of whether or not any RMP + * entries for other PFNs within the same 2MB region are assigned, since + * those too can affect the ability to access a particular PFN in + * certain situations, such as when the PFN is being accessed via a 2MB + * mapping in the host page table. + */ + pfn_current = ALIGN(pfn, PTRS_PER_PMD); + pfn_end = pfn_current + PTRS_PER_PMD; + + while (pfn_current < pfn_end) { + e = __snp_lookup_rmpentry(pfn_current, &level); + if (IS_ERR(e)) { + pfn_current++; + continue; + } + + e_data = (u64 *)e; + if (e_data[0] || e_data[1]) { + pr_info("No assigned RMP entry for PFN 0x%llx, but the 2MB region contains populated RMP entries, e.g.: PFN 0x%llx: [high=0x%016llx low=0x%016llx]\n", + pfn, pfn_current, e_data[1], e_data[0]); + return; + } + pfn_current++; + } + + pr_info("No populated RMP entries in the 2MB region containing PFN 0x%llx\n", + pfn); +} + +void snp_dump_hva_rmpentry(unsigned long hva) +{ + unsigned int level; + pgd_t *pgd; + pte_t *pte; + + pgd = __va(read_cr3_pa()); + pgd += pgd_index(hva); + pte = lookup_address_in_pgd(pgd, hva, &level); + + if (!pte) { + pr_info("Can't dump RMP entry for HVA %lx: no PTE/PFN found\n", hva); + return; + } + + dump_rmpentry(pte_pfn(*pte)); +} +EXPORT_SYMBOL_GPL(snp_dump_hva_rmpentry);