Date   

[PATCH v8 20/32] UefiCpuPkg: Define ConfidentialComputingGuestAttr

Brijesh Singh
 

BZ: https://bugzilla.tianocore.org/show_bug.cgi?id=3275

While initializing APs, the MpInitLib may need to know whether the
guest is running with active AMD SEV or Intel TDX memory encryption.

Add a new ConfidentialComputingGuestAttr PCD that can be used to query
the memory encryption attribute.

Cc: Michael Roth <michael.roth@amd.com>
Cc: Ray Ni <ray.ni@intel.com>
Cc: Rahul Kumar <rahul1.kumar@intel.com>
Cc: Eric Dong <eric.dong@intel.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: Min Xu <min.m.xu@intel.com>
Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Ard Biesheuvel <ardb+tianocore@kernel.org>
Cc: Erdem Aktas <erdemaktas@google.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Suggested-by: Jiewen Yao <jiewen.yao@intel.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
UefiCpuPkg/UefiCpuPkg.dec | 4 +++
.../Include/ConfidentialComputingGuestAttr.h | 25 +++++++++++++++++++
2 files changed, 29 insertions(+)
create mode 100644 UefiCpuPkg/Include/ConfidentialComputingGuestAttr.h

diff --git a/UefiCpuPkg/UefiCpuPkg.dec b/UefiCpuPkg/UefiCpuPkg.dec
index 62acb291f309..9dbaa407c399 100644
--- a/UefiCpuPkg/UefiCpuPkg.dec
+++ b/UefiCpuPkg/UefiCpuPkg.dec
@@ -396,5 +396,9 @@ [PcdsDynamic, PcdsDynamicEx]
# @Prompt SEV-ES Status
gUefiCpuPkgTokenSpaceGuid.PcdSevEsIsEnabled|FALSE|BOOLEAN|0x60000016

+ ## This dynamic PCD indicates the memory encryption attribute of the guest.
+ # @Prompt Memory encryption attribute
+ gUefiCpuPkgTokenSpaceGuid.PcdConfidentialComputingGuestAttr|0|UINT64|0x60000017
+
[UserExtensions.TianoCore."ExtraFiles"]
UefiCpuPkgExtra.uni
diff --git a/UefiCpuPkg/Include/ConfidentialComputingGuestAttr.h b/UefiCpuPkg/Include/ConfidentialComputingGuestAttr.h
new file mode 100644
index 000000000000..495b0df0ac33
--- /dev/null
+++ b/UefiCpuPkg/Include/ConfidentialComputingGuestAttr.h
@@ -0,0 +1,25 @@
+/** @file
+Definitions for Confidential Computing Attribute
+
+Copyright (c) 2021 AMD Inc. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#ifndef CONFIDENTIAL_COMPUTING_GUEST_ATTR_H_
+#define CONFIDENTIAL_COMPUTING_GUEST_ATTR_H_
+
+typedef enum {
+ /* The guest is running with memory encryption disabled. */
+ CCAttrNotEncrypted = 0,
+
+ /* The guest is running with AMD SEV memory encryption enabled. */
+ CCAttrAmdSev = 0x100,
+ CCAttrAmdSevEs = 0x101,
+ CCAttrAmdSevSnp = 0x102,
+
+ /* The guest is running with Intel TDX memory encryption enabled. */
+ CCAttrIntelTdx = 0x200,
+} CONFIDENTIAL_COMPUTING_GUEST_ATTR;
+
+#endif
--
2.25.1


[PATCH v8 19/32] OvmfPkg/PlatformPei: validate the system RAM when SNP is active

Brijesh Singh
 

BZ: https://bugzilla.tianocore.org/show_bug.cgi?id=3275

When SEV-SNP is active, a memory region mapped encrypted in the page
table must be validated before access. There are two approaches that
can be taken to validate the system RAM detected during the PEI phase:

1) Validate on-demand
OR
2) Validate before access

On-demand
=========
If memory is not validated before access, it will cause a #VC
exception with the page-not-validated error code. The VC exception
handler can perform the validation steps.

The pages that have been validated will need to be tracked to avoid
the double validation scenarios. The range of memory that has not
been validated will need to be communicated to the OS through the
recently introduced unaccepted memory type
https://github.com/microsoft/mu_basecore/pull/66, so that OS can
validate those ranges before using them.

Validate before access
======================
Since the PEI phase detects all the available system RAM, use the
MemEncryptSevSnpValidateSystemRam() function to pre-validate the
system RAM in the PEI phase.

For now, choose option 2 due to the dependency and the complexity
of the on-demand validation.

Cc: Michael Roth <michael.roth@amd.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: Min Xu <min.m.xu@intel.com>
Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Ard Biesheuvel <ardb+tianocore@kernel.org>
Cc: Erdem Aktas <erdemaktas@google.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Acked-by: Jiewen Yao <Jiewen.yao@intel.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
OvmfPkg/PlatformPei/AmdSev.c | 42 ++++++++++++++++++++++++++++++++++++
1 file changed, 42 insertions(+)

diff --git a/OvmfPkg/PlatformPei/AmdSev.c b/OvmfPkg/PlatformPei/AmdSev.c
index de876fdb478e..391e7bbb7dbd 100644
--- a/OvmfPkg/PlatformPei/AmdSev.c
+++ b/OvmfPkg/PlatformPei/AmdSev.c
@@ -23,6 +23,40 @@

#include "Platform.h"

+/**
+ Initialize SEV-SNP support if running as an SEV-SNP guest.
+
+**/
+STATIC
+VOID
+AmdSevSnpInitialize (
+ VOID
+ )
+{
+ EFI_PEI_HOB_POINTERS Hob;
+ EFI_HOB_RESOURCE_DESCRIPTOR *ResourceHob;
+
+ if (!MemEncryptSevSnpIsEnabled ()) {
+ return;
+ }
+
+ //
+ // Iterate through the system RAM and validate it.
+ //
+ for (Hob.Raw = GetHobList (); !END_OF_HOB_LIST (Hob); Hob.Raw = GET_NEXT_HOB (Hob)) {
+ if (Hob.Raw != NULL && GET_HOB_TYPE (Hob) == EFI_HOB_TYPE_RESOURCE_DESCRIPTOR) {
+ ResourceHob = Hob.ResourceDescriptor;
+
+ if (ResourceHob->ResourceType == EFI_RESOURCE_SYSTEM_MEMORY) {
+ MemEncryptSevSnpPreValidateSystemRam (
+ ResourceHob->PhysicalStart,
+ EFI_SIZE_TO_PAGES ((UINTN) ResourceHob->ResourceLength)
+ );
+ }
+ }
+ }
+}
+
/**
Handle an SEV-SNP/GHCB protocol check failure.

@@ -240,6 +274,14 @@ AmdSevInitialize (
return;
}

+ //
+ // Check and perform SEV-SNP initialization if required. This need to be
+ // done before the GHCB page is made shared in the AmdSevEsInitialize(). This
+ // is because the system RAM must be validated before it is made shared.
+ // The AmdSevSnpInitialize() validates the system RAM.
+ //
+ AmdSevSnpInitialize ();
+
//
// Set Memory Encryption Mask PCD
//
--
2.25.1


[PATCH v8 18/32] OvmfPkg/SecMain: validate the memory used for decompressing Fv

Brijesh Singh
 

BZ: https://bugzilla.tianocore.org/show_bug.cgi?id=3275

The VMM launch sequence should have pre-validated all the data pages used
in the Reset vector. The range does not cover the data pages used during
the SEC phase (mainly PEI and DXE firmware volume decompression memory).

When SEV-SNP is active, the memory must be pre-validated before the access.
Add support to pre-validate the memory range from SnpSecPreValidatedStart
to SnpSecPreValidatedEnd. This should be sufficent to enter into the PEI
phase.

Cc: Michael Roth <michael.roth@amd.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: Min Xu <min.m.xu@intel.com>
Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Ard Biesheuvel <ardb+tianocore@kernel.org>
Cc: Erdem Aktas <erdemaktas@google.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Acked-by: Jiewen Yao <Jiewen.yao@intel.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
OvmfPkg/OvmfPkg.dec | 4 ++++
.../PeiMemEncryptSevLib.inf | 2 ++
OvmfPkg/Sec/SecMain.inf | 3 +++
OvmfPkg/Sec/AmdSev.h | 23 +++++++++++++++++++
.../X64/PeiSnpSystemRamValidate.c | 5 ++++
OvmfPkg/Sec/AmdSev.c | 22 +++++++++++++++++-
OvmfPkg/Sec/SecMain.c | 5 ++++
OvmfPkg/FvmainCompactScratchEnd.fdf.inc | 5 ++++
8 files changed, 68 insertions(+), 1 deletion(-)

diff --git a/OvmfPkg/OvmfPkg.dec b/OvmfPkg/OvmfPkg.dec
index efa0de6d0600..052784c18864 100644
--- a/OvmfPkg/OvmfPkg.dec
+++ b/OvmfPkg/OvmfPkg.dec
@@ -354,6 +354,10 @@ [PcdsFixedAtBuild]
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfCpuidBase|0|UINT32|0x54
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfCpuidSize|0|UINT32|0x55

+ ## The range of memory that is validated by the SEC phase.
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecValidatedStart|0|UINT32|0x56
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecValidatedEnd|0|UINT32|0x57
+
[PcdsDynamic, PcdsDynamicEx]
gUefiOvmfPkgTokenSpaceGuid.PcdEmuVariableEvent|0|UINT64|2
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfFlashVariablesEnable|FALSE|BOOLEAN|0x10
diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/PeiMemEncryptSevLib.inf b/OvmfPkg/Library/BaseMemEncryptSevLib/PeiMemEncryptSevLib.inf
index 1cc9dd6691a2..291eef39ca8d 100644
--- a/OvmfPkg/Library/BaseMemEncryptSevLib/PeiMemEncryptSevLib.inf
+++ b/OvmfPkg/Library/BaseMemEncryptSevLib/PeiMemEncryptSevLib.inf
@@ -78,3 +78,5 @@ [FixedPcd]
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfLockBoxStorageSize
gEfiMdePkgTokenSpaceGuid.PcdGuidedExtractHandlerTableAddress
gUefiOvmfPkgTokenSpaceGuid.PcdGuidedExtractHandlerTableSize
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecValidatedEnd
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecValidatedStart
diff --git a/OvmfPkg/Sec/SecMain.inf b/OvmfPkg/Sec/SecMain.inf
index 9523a8ea6c8f..282e60a2764f 100644
--- a/OvmfPkg/Sec/SecMain.inf
+++ b/OvmfPkg/Sec/SecMain.inf
@@ -51,6 +51,7 @@ [LibraryClasses]
PeCoffExtraActionLib
ExtractGuidedSectionLib
LocalApicLib
+ MemEncryptSevLib
CpuExceptionHandlerLib

[Ppis]
@@ -73,6 +74,8 @@ [Pcd]
gEfiMdeModulePkgTokenSpaceGuid.PcdInitValueInTempStack
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfConfidentialComputingWorkAreaHeader
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfWorkAreaBase
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecValidatedStart
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecValidatedEnd

[FeaturePcd]
gUefiOvmfPkgTokenSpaceGuid.PcdSmmSmramRequire
diff --git a/OvmfPkg/Sec/AmdSev.h b/OvmfPkg/Sec/AmdSev.h
index adad96d23189..411bbedae4cf 100644
--- a/OvmfPkg/Sec/AmdSev.h
+++ b/OvmfPkg/Sec/AmdSev.h
@@ -69,4 +69,27 @@ SevEsIsEnabled (
VOID
);

+/**
+ Validate System RAM used for decompressing the PEI and DXE firmware volumes
+ when SEV-SNP is active. The PCDs SecValidatedStart and SecValidatedEnd are
+ set in OvmfPkg/FvmainCompactScratchEnd.fdf.inc.
+
+**/
+VOID
+SecValidateSystemRam (
+ VOID
+ );
+
+/**
+ Determine if SEV-SNP is active.
+
+ @retval TRUE SEV-SNP is enabled
+ @retval FALSE SEV-SNP is not enabled
+
+**/
+BOOLEAN
+SevSnpIsEnabled (
+ VOID
+ );
+
#endif
diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiSnpSystemRamValidate.c b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiSnpSystemRamValidate.c
index a0803e1255dc..f8b44fe040b6 100644
--- a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiSnpSystemRamValidate.c
+++ b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiSnpSystemRamValidate.c
@@ -65,6 +65,11 @@ STATIC SNP_PRE_VALIDATED_RANGE mPreValidatedRange[] = {
FixedPcdGet32 (PcdOvmfSecPeiTempRamBase),
FixedPcdGet32 (PcdOvmfSecPeiTempRamBase) + FixedPcdGet32 (PcdOvmfSecPeiTempRamSize),
},
+ // The below range is pre-validated by the Sec/SecMain.c
+ {
+ FixedPcdGet32 (PcdOvmfSecValidatedStart),
+ FixedPcdGet32 (PcdOvmfSecValidatedEnd)
+ },
};

STATIC
diff --git a/OvmfPkg/Sec/AmdSev.c b/OvmfPkg/Sec/AmdSev.c
index 9dd42b195785..aef27d9727b2 100644
--- a/OvmfPkg/Sec/AmdSev.c
+++ b/OvmfPkg/Sec/AmdSev.c
@@ -55,7 +55,6 @@ SevEsProtocolFailure (
@retval FALSE SEV-SNP is not enabled

**/
-STATIC
BOOLEAN
SevSnpIsEnabled (
VOID
@@ -277,3 +276,24 @@ SevEsIsEnabled (

return (SevEsWorkArea->SevEsEnabled != 0);
}
+
+/**
+ Validate System RAM used for decompressing the PEI and DXE firmware volumes
+ when SEV-SNP is active. The PCDs SecValidatedStart and SecValidatedEnd are
+ set in OvmfPkg/FvmainCompactScratchEnd.fdf.inc.
+
+**/
+VOID
+SecValidateSystemRam (
+ VOID
+ )
+{
+ PHYSICAL_ADDRESS Start, End;
+
+ if (IsSevGuest () && SevSnpIsEnabled ()) {
+ Start = (EFI_PHYSICAL_ADDRESS) PcdGet32 (PcdOvmfSecValidatedStart);
+ End = (EFI_PHYSICAL_ADDRESS) PcdGet32 (PcdOvmfSecValidatedEnd);
+
+ MemEncryptSevSnpPreValidateSystemRam (Start, EFI_SIZE_TO_PAGES (End - Start));
+ }
+}
diff --git a/OvmfPkg/Sec/SecMain.c b/OvmfPkg/Sec/SecMain.c
index 406e3a25d0cd..b173ff976073 100644
--- a/OvmfPkg/Sec/SecMain.c
+++ b/OvmfPkg/Sec/SecMain.c
@@ -847,6 +847,11 @@ SecCoreStartupWithStack (
SecCoreData.BootFirmwareVolumeBase = BootFv;
SecCoreData.BootFirmwareVolumeSize = (UINTN) BootFv->FvLength;

+ //
+ // Validate the System RAM used in the SEC Phase
+ //
+ SecValidateSystemRam ();
+
//
// Make sure the 8259 is masked before initializing the Debug Agent and the debug timer is enabled
//
diff --git a/OvmfPkg/FvmainCompactScratchEnd.fdf.inc b/OvmfPkg/FvmainCompactScratchEnd.fdf.inc
index 46f52583297c..d8d45fc9aa6d 100644
--- a/OvmfPkg/FvmainCompactScratchEnd.fdf.inc
+++ b/OvmfPkg/FvmainCompactScratchEnd.fdf.inc
@@ -63,3 +63,8 @@
DEFINE DECOMP_SCRATCH_BASE = (($(DECOMP_SCRATCH_BASE_UNALIGNED) + $(DECOMP_SCRATCH_BASE_ALIGNMENT)) & $(DECOMP_SCRATCH_BASE_MASK))

SET gUefiOvmfPkgTokenSpaceGuid.PcdOvmfDecompressionScratchEnd = $(DECOMP_SCRATCH_BASE) + $(DECOMP_SCRATCH_SIZE)
+
+#
+# The range of pages that should be pre-validated during the SEC phase when SEV-SNP is active in the guest VM.
+SET gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecValidatedStart = $(MEMFD_BASE_ADDRESS) + gUefiOvmfPkgTokenSpaceGuid.PcdOvmfPeiMemFvBase
+SET gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecValidatedEnd = $(DECOMP_SCRATCH_BASE) + $(DECOMP_SCRATCH_SIZE)
--
2.25.1


[PATCH v8 16/32] OvmfPkg/BaseMemEncryptSevLib: skip the pre-validated system RAM

Brijesh Singh
 

BZ: https://bugzilla.tianocore.org/show_bug.cgi?id=3275

The MemEncryptSevSnpPreValidateSystemRam() is used for pre-validating the
system RAM. As the boot progress, each phase validates a fixed region of
the RAM. In the PEI phase, the PlatformPei detects all the available RAM
and calls to pre-validate the detected system RAM.

While validating the system RAM in PEI phase, we must skip previously
validated system RAM to avoid the double validation.

Cc: Michael Roth <michael.roth@amd.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: Min Xu <min.m.xu@intel.com>
Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Ard Biesheuvel <ardb+tianocore@kernel.org>
Cc: Erdem Aktas <erdemaktas@google.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Acked-by: Jiewen Yao <Jiewen.yao@intel.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
.../PeiMemEncryptSevLib.inf | 20 ++++
.../X64/PeiSnpSystemRamValidate.c | 102 +++++++++++++++++-
2 files changed, 121 insertions(+), 1 deletion(-)

diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/PeiMemEncryptSevLib.inf b/OvmfPkg/Library/BaseMemEncryptSevLib/PeiMemEncryptSevLib.inf
index 0402e49a1028..1cc9dd6691a2 100644
--- a/OvmfPkg/Library/BaseMemEncryptSevLib/PeiMemEncryptSevLib.inf
+++ b/OvmfPkg/Library/BaseMemEncryptSevLib/PeiMemEncryptSevLib.inf
@@ -57,4 +57,24 @@ [FeaturePcd]
gUefiOvmfPkgTokenSpaceGuid.PcdSmmSmramRequire

[FixedPcd]
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfCpuidBase
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfCpuidSize
gUefiCpuPkgTokenSpaceGuid.PcdSevEsWorkAreaBase
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecGhcbBase
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecGhcbSize
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSnpSecretsBase
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSnpSecretsSize
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecGhcbBackupBase
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecGhcbBackupSize
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecGhcbPageTableBase
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecGhcbPageTableSize
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecPageTablesBase
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecPageTablesSize
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecPeiTempRamBase
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecPeiTempRamSize
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfWorkAreaBase
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfWorkAreaSize
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfLockBoxStorageBase
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfLockBoxStorageSize
+ gEfiMdePkgTokenSpaceGuid.PcdGuidedExtractHandlerTableAddress
+ gUefiOvmfPkgTokenSpaceGuid.PcdGuidedExtractHandlerTableSize
diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiSnpSystemRamValidate.c b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiSnpSystemRamValidate.c
index 64aab7f45b6d..eae7a31773a4 100644
--- a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiSnpSystemRamValidate.c
+++ b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiSnpSystemRamValidate.c
@@ -14,6 +14,81 @@

#include "SnpPageStateChange.h"

+typedef struct {
+ UINT64 StartAddress;
+ UINT64 EndAddress;
+} SNP_PRE_VALIDATED_RANGE;
+
+STATIC SNP_PRE_VALIDATED_RANGE mPreValidatedRange[] = {
+ // The below address range was part of the OVMF metadata, and range
+ // should be pre-validated by the Hypervisor.
+ {
+ FixedPcdGet32 (PcdOvmfSecPageTablesBase),
+ FixedPcdGet32 (PcdOvmfSecPageTablesBase) + FixedPcdGet32 (PcdOvmfSecPageTablesSize),
+ },
+ {
+ FixedPcdGet32 (PcdOvmfLockBoxStorageBase),
+ FixedPcdGet32 (PcdOvmfLockBoxStorageBase) + FixedPcdGet32 (PcdOvmfLockBoxStorageSize),
+ },
+ {
+ FixedPcdGet64 (PcdGuidedExtractHandlerTableAddress),
+ FixedPcdGet64 (PcdGuidedExtractHandlerTableAddress) + FixedPcdGet32 (PcdGuidedExtractHandlerTableSize)
+ },
+ {
+ FixedPcdGet32 (PcdOvmfSecGhcbPageTableBase),
+ FixedPcdGet32 (PcdOvmfSecGhcbPageTableBase) + FixedPcdGet32 (PcdOvmfSecGhcbPageTableSize),
+ },
+ {
+ FixedPcdGet32 (PcdOvmfSecGhcbBase),
+ FixedPcdGet32 (PcdOvmfSecGhcbBase) + FixedPcdGet32 (PcdOvmfSecGhcbSize),
+ },
+ {
+ FixedPcdGet32 (PcdOvmfWorkAreaBase),
+ FixedPcdGet32 (PcdOvmfWorkAreaBase) + FixedPcdGet32 (PcdOvmfWorkAreaSize),
+ },
+ {
+ FixedPcdGet32 (PcdOvmfSecGhcbBackupBase),
+ FixedPcdGet32 (PcdOvmfSecGhcbBackupBase) + FixedPcdGet32 (PcdOvmfSecGhcbBackupSize),
+ },
+ {
+ FixedPcdGet32 (PcdOvmfSnpSecretsBase),
+ FixedPcdGet32 (PcdOvmfSnpSecretsBase) + FixedPcdGet32 (PcdOvmfSnpSecretsSize),
+ },
+ {
+ FixedPcdGet32 (PcdOvmfCpuidBase),
+ FixedPcdGet32 (PcdOvmfCpuidBase) + FixedPcdGet32 (PcdOvmfCpuidSize),
+ },
+ {
+ FixedPcdGet32 (PcdOvmfSecPeiTempRamBase),
+ FixedPcdGet32 (PcdOvmfSecPeiTempRamBase) + FixedPcdGet32 (PcdOvmfSecPeiTempRamSize),
+ },
+};
+
+STATIC
+BOOLEAN
+DetectPreValidatedOverLap (
+ IN PHYSICAL_ADDRESS StartAddress,
+ IN PHYSICAL_ADDRESS EndAddress,
+ OUT SNP_PRE_VALIDATED_RANGE *OverlapRange
+ )
+{
+ UINTN i;
+
+ //
+ // Check if the specified address range exist in pre-validated array.
+ //
+ for (i = 0; i < ARRAY_SIZE (mPreValidatedRange); i++) {
+ if ((mPreValidatedRange[i].StartAddress < EndAddress) &&
+ (StartAddress < mPreValidatedRange[i].EndAddress)) {
+ OverlapRange->StartAddress = mPreValidatedRange[i].StartAddress;
+ OverlapRange->EndAddress = mPreValidatedRange[i].EndAddress;
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
/**
Pre-validate the system RAM when SEV-SNP is enabled in the guest VM.

@@ -28,9 +103,34 @@ MemEncryptSevSnpPreValidateSystemRam (
IN UINTN NumPages
)
{
+ PHYSICAL_ADDRESS EndAddress;
+ SNP_PRE_VALIDATED_RANGE OverlapRange;
+
if (!MemEncryptSevSnpIsEnabled ()) {
return;
}

- InternalSetPageState (BaseAddress, NumPages, SevSnpPagePrivate, TRUE);
+ EndAddress = BaseAddress + EFI_PAGES_TO_SIZE (NumPages);
+
+ while (BaseAddress < EndAddress) {
+ //
+ // Check if the range overlaps with the pre-validated ranges.
+ //
+ if (DetectPreValidatedOverLap (BaseAddress, EndAddress, &OverlapRange)) {
+ // Validate the non-overlap regions.
+ if (BaseAddress < OverlapRange.StartAddress) {
+ NumPages = EFI_SIZE_TO_PAGES (OverlapRange.StartAddress - BaseAddress);
+
+ InternalSetPageState (BaseAddress, NumPages, SevSnpPagePrivate, TRUE);
+ }
+
+ BaseAddress = OverlapRange.EndAddress;
+ continue;
+ }
+
+ // Validate the remaining pages.
+ NumPages = EFI_SIZE_TO_PAGES (EndAddress - BaseAddress);
+ InternalSetPageState (BaseAddress, NumPages, SevSnpPagePrivate, TRUE);
+ BaseAddress = EndAddress;
+ }
}
--
2.25.1


[PATCH v8 17/32] OvmfPkg/MemEncryptSevLib: add support to validate > 4GB memory in PEI phase

Brijesh Singh
 

BZ: https://bugzilla.tianocore.org/show_bug.cgi?id=3275

The initial page built during the SEC phase is used by the
MemEncryptSevSnpValidateSystemRam() for the system RAM validation. The
page validation process requires using the PVALIDATE instruction; the
instruction accepts a virtual address of the memory region that needs
to be validated. If hardware encounters a page table walk failure (due
to page-not-present) then it raises #GP.

The initial page table built in SEC phase address up to 4GB. Add an
internal function to extend the page table to cover > 4GB. The function
builds 1GB entries in the page table for access > 4GB. This will provide
the support to call PVALIDATE instruction for the virtual address >
4GB in PEI phase.

Cc: Michael Roth <michael.roth@amd.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: Min Xu <min.m.xu@intel.com>
Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Ard Biesheuvel <ardb+tianocore@kernel.org>
Cc: Erdem Aktas <erdemaktas@google.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Acked-by: Jiewen Yao <Jiewen.yao@intel.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
.../BaseMemEncryptSevLib/X64/VirtualMemory.h | 19 +++
.../X64/PeiDxeVirtualMemory.c | 115 ++++++++++++++++++
.../X64/PeiSnpSystemRamValidate.c | 22 ++++
3 files changed, 156 insertions(+)

diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/VirtualMemory.h b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/VirtualMemory.h
index 21bbbd1c4f9c..aefef68c30c0 100644
--- a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/VirtualMemory.h
+++ b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/VirtualMemory.h
@@ -143,4 +143,23 @@ InternalMemEncryptSevClearMmioPageEncMask (
IN PHYSICAL_ADDRESS PhysicalAddress,
IN UINTN Length
);
+
+/**
+ Create 1GB identity mapping for the specified virtual address range.
+
+ @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
+ current CR3)
+ @param[in] VirtualAddress Virtual address
+ @param[in] Length Length of virtual address range
+
+ @retval RETURN_INVALID_PARAMETER Number of pages is zero.
+
+**/
+RETURN_STATUS
+EFIAPI
+InternalMemEncryptSevCreateIdentityMap1G (
+ IN PHYSICAL_ADDRESS Cr3BaseAddress,
+ IN PHYSICAL_ADDRESS PhysicalAddress,
+ IN UINTN Length
+ );
#endif
diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiDxeVirtualMemory.c b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiDxeVirtualMemory.c
index c696745f9d26..f146f6d61cc5 100644
--- a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiDxeVirtualMemory.c
+++ b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiDxeVirtualMemory.c
@@ -536,6 +536,121 @@ EnableReadOnlyPageWriteProtect (
AsmWriteCr0 (AsmReadCr0() | BIT16);
}

+RETURN_STATUS
+EFIAPI
+InternalMemEncryptSevCreateIdentityMap1G (
+ IN PHYSICAL_ADDRESS Cr3BaseAddress,
+ IN PHYSICAL_ADDRESS PhysicalAddress,
+ IN UINTN Length
+ )
+{
+ PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;
+ PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;
+ UINT64 PgTableMask;
+ UINT64 AddressEncMask;
+ BOOLEAN IsWpEnabled;
+ RETURN_STATUS Status;
+
+ //
+ // Set PageMapLevel4Entry to suppress incorrect compiler/analyzer warnings.
+ //
+ PageMapLevel4Entry = NULL;
+
+ DEBUG ((
+ DEBUG_VERBOSE,
+ "%a:%a: Cr3Base=0x%Lx Physical=0x%Lx Length=0x%Lx\n",
+ gEfiCallerBaseName,
+ __FUNCTION__,
+ Cr3BaseAddress,
+ PhysicalAddress,
+ (UINT64)Length
+ ));
+
+ if (Length == 0) {
+ return RETURN_INVALID_PARAMETER;
+ }
+
+ //
+ // Check if we have a valid memory encryption mask
+ //
+ AddressEncMask = InternalGetMemEncryptionAddressMask ();
+ if (!AddressEncMask) {
+ return RETURN_ACCESS_DENIED;
+ }
+
+ PgTableMask = AddressEncMask | EFI_PAGE_MASK;
+
+
+ //
+ // Make sure that the page table is changeable.
+ //
+ IsWpEnabled = IsReadOnlyPageWriteProtected ();
+ if (IsWpEnabled) {
+ DisableReadOnlyPageWriteProtect ();
+ }
+
+ Status = EFI_SUCCESS;
+
+ while (Length)
+ {
+ //
+ // If Cr3BaseAddress is not specified then read the current CR3
+ //
+ if (Cr3BaseAddress == 0) {
+ Cr3BaseAddress = AsmReadCr3();
+ }
+
+ PageMapLevel4Entry = (VOID*) (Cr3BaseAddress & ~PgTableMask);
+ PageMapLevel4Entry += PML4_OFFSET(PhysicalAddress);
+ if (!PageMapLevel4Entry->Bits.Present) {
+ DEBUG ((
+ DEBUG_ERROR,
+ "%a:%a: bad PML4 for Physical=0x%Lx\n",
+ gEfiCallerBaseName,
+ __FUNCTION__,
+ PhysicalAddress
+ ));
+ Status = RETURN_NO_MAPPING;
+ goto Done;
+ }
+
+ PageDirectory1GEntry = (VOID *)(
+ (PageMapLevel4Entry->Bits.PageTableBaseAddress <<
+ 12) & ~PgTableMask
+ );
+ PageDirectory1GEntry += PDP_OFFSET(PhysicalAddress);
+ if (!PageDirectory1GEntry->Bits.Present) {
+ PageDirectory1GEntry->Bits.Present = 1;
+ PageDirectory1GEntry->Bits.MustBe1 = 1;
+ PageDirectory1GEntry->Bits.MustBeZero = 0;
+ PageDirectory1GEntry->Bits.ReadWrite = 1;
+ PageDirectory1GEntry->Uint64 |= (UINT64)PhysicalAddress | AddressEncMask;
+ }
+
+ if (Length <= BIT30) {
+ Length = 0;
+ } else {
+ Length -= BIT30;
+ }
+
+ PhysicalAddress += BIT30;
+ }
+
+ //
+ // Flush TLB
+ //
+ CpuFlushTlb();
+
+Done:
+ //
+ // Restore page table write protection, if any.
+ //
+ if (IsWpEnabled) {
+ EnableReadOnlyPageWriteProtect ();
+ }
+
+ return Status;
+}

/**
This function either sets or clears memory encryption bit for the memory
diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiSnpSystemRamValidate.c b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiSnpSystemRamValidate.c
index eae7a31773a4..a0803e1255dc 100644
--- a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiSnpSystemRamValidate.c
+++ b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiSnpSystemRamValidate.c
@@ -10,9 +10,12 @@

#include <Uefi/UefiBaseType.h>
#include <Library/BaseLib.h>
+#include <Library/PcdLib.h>
+#include <Library/DebugLib.h>
#include <Library/MemEncryptSevLib.h>

#include "SnpPageStateChange.h"
+#include "VirtualMemory.h"

typedef struct {
UINT64 StartAddress;
@@ -105,6 +108,7 @@ MemEncryptSevSnpPreValidateSystemRam (
{
PHYSICAL_ADDRESS EndAddress;
SNP_PRE_VALIDATED_RANGE OverlapRange;
+ EFI_STATUS Status;

if (!MemEncryptSevSnpIsEnabled ()) {
return;
@@ -112,6 +116,24 @@ MemEncryptSevSnpPreValidateSystemRam (

EndAddress = BaseAddress + EFI_PAGES_TO_SIZE (NumPages);

+ //
+ // The page table used in PEI can address up to 4GB memory. If we are asked to
+ // validate a range above the 4GB, then create an identity mapping so that the
+ // PVALIDATE instruction can execute correctly. If the page table entry is not
+ // present then PVALIDATE will #GP.
+ //
+ if (BaseAddress >= SIZE_4GB) {
+ Status = InternalMemEncryptSevCreateIdentityMap1G (
+ 0,
+ BaseAddress,
+ EFI_PAGES_TO_SIZE (NumPages)
+ );
+ if (EFI_ERROR (Status)) {
+ ASSERT (FALSE);
+ CpuDeadLoop ();
+ }
+ }
+
while (BaseAddress < EndAddress) {
//
// Check if the range overlaps with the pre-validated ranges.
--
2.25.1


[PATCH v8 14/32] OvmfPkg/MemEncryptSevLib: add support to validate system RAM

Brijesh Singh
 

BZ: https://bugzilla.tianocore.org/show_bug.cgi?id=3275

Many of the integrity guarantees of SEV-SNP are enforced through the
Reverse Map Table (RMP). Each RMP entry contains the GPA at which a
particular page of DRAM should be mapped. The guest can request the
hypervisor to add pages in the RMP table via the Page State Change VMGEXIT
defined in the GHCB specification section 2.5.1 and 4.1.6. Inside each RMP
entry is a Validated flag; this flag is automatically cleared to 0 by the
CPU hardware when a new RMP entry is created for a guest. Each VM page
can be either validated or invalidated, as indicated by the Validated
flag in the RMP entry. Memory access to a private page that is not
validated generates a #VC. A VM can use the PVALIDATE instruction to
validate the private page before using it.

During the guest creation, the boot ROM memory is pre-validated by the
AMD-SEV firmware. The MemEncryptSevSnpValidateSystemRam() can be called
during the SEC and PEI phase to validate the detected system RAM.

One of the fields in the Page State Change NAE is the RMP page size. The
page size input parameter indicates that either a 4KB or 2MB page should
be used while adding the RMP entry. During the validation, when possible,
the MemEncryptSevSnpValidateSystemRam() will use the 2MB entry. A
hypervisor backing the memory may choose to use the different page size
in the RMP entry. In those cases, the PVALIDATE instruction should return
SIZEMISMATCH. If a SIZEMISMATCH is detected, then validate all 512-pages
constituting a 2MB region.

Upon completion, the PVALIDATE instruction sets the rFLAGS.CF to 0 if
instruction changed the RMP entry and to 1 if the instruction did not
change the RMP entry. The rFlags.CF will be 1 only when a memory region
is already validated. We should not double validate a memory
as it could lead to a security compromise. If double validation is
detected, terminate the boot.

Cc: Michael Roth <michael.roth@amd.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: Min Xu <min.m.xu@intel.com>
Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Ard Biesheuvel <ardb+tianocore@kernel.org>
Cc: Erdem Aktas <erdemaktas@google.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Acked-by: Jiewen Yao <Jiewen.yao@intel.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
OvmfPkg/OvmfPkgIa32.dsc | 1 +
OvmfPkg/OvmfPkgIa32X64.dsc | 1 +
.../DxeMemEncryptSevLib.inf | 3 +
.../PeiMemEncryptSevLib.inf | 3 +
.../SecMemEncryptSevLib.inf | 3 +
OvmfPkg/Include/Library/MemEncryptSevLib.h | 14 +
.../X64/SnpPageStateChange.h | 31 ++
.../Ia32/MemEncryptSevLib.c | 17 +
.../X64/DxeSnpSystemRamValidate.c | 40 +++
.../X64/PeiSnpSystemRamValidate.c | 36 +++
.../X64/SecSnpSystemRamValidate.c | 36 +++
.../X64/SnpPageStateChangeInternal.c | 295 ++++++++++++++++++
12 files changed, 480 insertions(+)
create mode 100644 OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChange.h
create mode 100644 OvmfPkg/Library/BaseMemEncryptSevLib/X64/DxeSnpSystemRamValidate.c
create mode 100644 OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiSnpSystemRamValidate.c
create mode 100644 OvmfPkg/Library/BaseMemEncryptSevLib/X64/SecSnpSystemRamValidate.c
create mode 100644 OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChangeInternal.c

diff --git a/OvmfPkg/OvmfPkgIa32.dsc b/OvmfPkg/OvmfPkgIa32.dsc
index d1d92c97bae3..626dbc15786a 100644
--- a/OvmfPkg/OvmfPkgIa32.dsc
+++ b/OvmfPkg/OvmfPkgIa32.dsc
@@ -266,6 +266,7 @@ [LibraryClasses.common.SEC]
!else
CpuExceptionHandlerLib|UefiCpuPkg/Library/CpuExceptionHandlerLib/SecPeiCpuExceptionHandlerLib.inf
!endif
+ MemEncryptSevLib|OvmfPkg/Library/BaseMemEncryptSevLib/SecMemEncryptSevLib.inf

[LibraryClasses.common.PEI_CORE]
HobLib|MdePkg/Library/PeiHobLib/PeiHobLib.inf
diff --git a/OvmfPkg/OvmfPkgIa32X64.dsc b/OvmfPkg/OvmfPkgIa32X64.dsc
index f42abc041c0c..58be97eb0605 100644
--- a/OvmfPkg/OvmfPkgIa32X64.dsc
+++ b/OvmfPkg/OvmfPkgIa32X64.dsc
@@ -270,6 +270,7 @@ [LibraryClasses.common.SEC]
!else
CpuExceptionHandlerLib|UefiCpuPkg/Library/CpuExceptionHandlerLib/SecPeiCpuExceptionHandlerLib.inf
!endif
+ MemEncryptSevLib|OvmfPkg/Library/BaseMemEncryptSevLib/SecMemEncryptSevLib.inf

[LibraryClasses.common.PEI_CORE]
HobLib|MdePkg/Library/PeiHobLib/PeiHobLib.inf
diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/DxeMemEncryptSevLib.inf b/OvmfPkg/Library/BaseMemEncryptSevLib/DxeMemEncryptSevLib.inf
index f2e162d68076..f613bb314f5f 100644
--- a/OvmfPkg/Library/BaseMemEncryptSevLib/DxeMemEncryptSevLib.inf
+++ b/OvmfPkg/Library/BaseMemEncryptSevLib/DxeMemEncryptSevLib.inf
@@ -34,8 +34,10 @@ [Sources]
PeiDxeMemEncryptSevLibInternal.c

[Sources.X64]
+ X64/DxeSnpSystemRamValidate.c
X64/MemEncryptSevLib.c
X64/PeiDxeVirtualMemory.c
+ X64/SnpPageStateChangeInternal.c
X64/VirtualMemory.c
X64/VirtualMemory.h

@@ -49,6 +51,7 @@ [LibraryClasses]
DebugLib
MemoryAllocationLib
PcdLib
+ VmgExitLib

[FeaturePcd]
gUefiOvmfPkgTokenSpaceGuid.PcdSmmSmramRequire
diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/PeiMemEncryptSevLib.inf b/OvmfPkg/Library/BaseMemEncryptSevLib/PeiMemEncryptSevLib.inf
index 03a78c32df28..0402e49a1028 100644
--- a/OvmfPkg/Library/BaseMemEncryptSevLib/PeiMemEncryptSevLib.inf
+++ b/OvmfPkg/Library/BaseMemEncryptSevLib/PeiMemEncryptSevLib.inf
@@ -36,6 +36,8 @@ [Sources]
[Sources.X64]
X64/MemEncryptSevLib.c
X64/PeiDxeVirtualMemory.c
+ X64/PeiSnpSystemRamValidate.c
+ X64/SnpPageStateChangeInternal.c
X64/VirtualMemory.c
X64/VirtualMemory.h

@@ -49,6 +51,7 @@ [LibraryClasses]
DebugLib
MemoryAllocationLib
PcdLib
+ VmgExitLib

[FeaturePcd]
gUefiOvmfPkgTokenSpaceGuid.PcdSmmSmramRequire
diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/SecMemEncryptSevLib.inf b/OvmfPkg/Library/BaseMemEncryptSevLib/SecMemEncryptSevLib.inf
index 279c38bfbc2c..939af0a91ea4 100644
--- a/OvmfPkg/Library/BaseMemEncryptSevLib/SecMemEncryptSevLib.inf
+++ b/OvmfPkg/Library/BaseMemEncryptSevLib/SecMemEncryptSevLib.inf
@@ -35,6 +35,8 @@ [Sources]
[Sources.X64]
X64/MemEncryptSevLib.c
X64/SecVirtualMemory.c
+ X64/SecSnpSystemRamValidate.c
+ X64/SnpPageStateChangeInternal.c
X64/VirtualMemory.c
X64/VirtualMemory.h

@@ -46,6 +48,7 @@ [LibraryClasses]
CpuLib
DebugLib
PcdLib
+ VmgExitLib

[FixedPcd]
gUefiCpuPkgTokenSpaceGuid.PcdSevEsWorkAreaBase
diff --git a/OvmfPkg/Include/Library/MemEncryptSevLib.h b/OvmfPkg/Include/Library/MemEncryptSevLib.h
index 796de62ec2f8..f708a0cdaa72 100644
--- a/OvmfPkg/Include/Library/MemEncryptSevLib.h
+++ b/OvmfPkg/Include/Library/MemEncryptSevLib.h
@@ -215,4 +215,18 @@ MemEncryptSevClearMmioPageEncMask (
IN UINTN NumPages
);

+/**
+ Pre-validate the system RAM when SEV-SNP is enabled in the guest VM.
+
+ @param[in] BaseAddress Base address
+ @param[in] NumPages Number of pages starting from the base address
+
+**/
+VOID
+EFIAPI
+MemEncryptSevSnpPreValidateSystemRam (
+ IN PHYSICAL_ADDRESS BaseAddress,
+ IN UINTN NumPages
+ );
+
#endif // _MEM_ENCRYPT_SEV_LIB_H_
diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChange.h b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChange.h
new file mode 100644
index 000000000000..8bbdf06468b9
--- /dev/null
+++ b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChange.h
@@ -0,0 +1,31 @@
+/** @file
+
+ SEV-SNP Page Validation functions.
+
+ Copyright (c) 2021 AMD Incorporated. All rights reserved.<BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#ifndef SNP_PAGE_STATE_INTERNAL_H_
+#define SNP_PAGE_STATE_INTERNAL_H_
+
+//
+// SEV-SNP Page states
+//
+typedef enum {
+ SevSnpPagePrivate,
+ SevSnpPageShared,
+
+} SEV_SNP_PAGE_STATE;
+
+VOID
+InternalSetPageState (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINTN NumPages,
+ IN SEV_SNP_PAGE_STATE State,
+ IN BOOLEAN UseLargeEntry
+ );
+
+#endif
diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/Ia32/MemEncryptSevLib.c b/OvmfPkg/Library/BaseMemEncryptSevLib/Ia32/MemEncryptSevLib.c
index be260e0d1014..df5e4d61513d 100644
--- a/OvmfPkg/Library/BaseMemEncryptSevLib/Ia32/MemEncryptSevLib.c
+++ b/OvmfPkg/Library/BaseMemEncryptSevLib/Ia32/MemEncryptSevLib.c
@@ -136,3 +136,20 @@ MemEncryptSevClearMmioPageEncMask (
//
return RETURN_UNSUPPORTED;
}
+
+/**
+ Pre-validate the system RAM when SEV-SNP is enabled in the guest VM.
+
+ @param[in] BaseAddress Base address
+ @param[in] NumPages Number of pages starting from the base address
+
+**/
+VOID
+EFIAPI
+MemEncryptSevSnpPreValidateSystemRam (
+ IN PHYSICAL_ADDRESS BaseAddress,
+ IN UINTN NumPages
+ )
+{
+ ASSERT (FALSE);
+}
diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/DxeSnpSystemRamValidate.c b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/DxeSnpSystemRamValidate.c
new file mode 100644
index 000000000000..ad8d8b388dc8
--- /dev/null
+++ b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/DxeSnpSystemRamValidate.c
@@ -0,0 +1,40 @@
+/** @file
+
+ SEV-SNP Page Validation functions.
+
+ Copyright (c) 2021 AMD Incorporated. All rights reserved.<BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <Uefi/UefiBaseType.h>
+#include <Library/BaseLib.h>
+#include <Library/DebugLib.h>
+#include <Library/MemEncryptSevLib.h>
+
+#include "SnpPageStateChange.h"
+
+/**
+ Pre-validate the system RAM when SEV-SNP is enabled in the guest VM.
+
+ @param[in] BaseAddress Base address
+ @param[in] NumPages Number of pages starting from the base address
+
+**/
+VOID
+EFIAPI
+MemEncryptSevSnpPreValidateSystemRam (
+ IN PHYSICAL_ADDRESS BaseAddress,
+ IN UINTN NumPages
+ )
+{
+ if (!MemEncryptSevSnpIsEnabled ()) {
+ return;
+ }
+
+ //
+ // All the pre-validation must be completed in the PEI phase.
+ //
+ ASSERT (FALSE);
+}
diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiSnpSystemRamValidate.c b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiSnpSystemRamValidate.c
new file mode 100644
index 000000000000..64aab7f45b6d
--- /dev/null
+++ b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/PeiSnpSystemRamValidate.c
@@ -0,0 +1,36 @@
+/** @file
+
+ SEV-SNP Page Validation functions.
+
+ Copyright (c) 2021 AMD Incorporated. All rights reserved.<BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <Uefi/UefiBaseType.h>
+#include <Library/BaseLib.h>
+#include <Library/MemEncryptSevLib.h>
+
+#include "SnpPageStateChange.h"
+
+/**
+ Pre-validate the system RAM when SEV-SNP is enabled in the guest VM.
+
+ @param[in] BaseAddress Base address
+ @param[in] NumPages Number of pages starting from the base address
+
+**/
+VOID
+EFIAPI
+MemEncryptSevSnpPreValidateSystemRam (
+ IN PHYSICAL_ADDRESS BaseAddress,
+ IN UINTN NumPages
+ )
+{
+ if (!MemEncryptSevSnpIsEnabled ()) {
+ return;
+ }
+
+ InternalSetPageState (BaseAddress, NumPages, SevSnpPagePrivate, TRUE);
+}
diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SecSnpSystemRamValidate.c b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SecSnpSystemRamValidate.c
new file mode 100644
index 000000000000..64aab7f45b6d
--- /dev/null
+++ b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SecSnpSystemRamValidate.c
@@ -0,0 +1,36 @@
+/** @file
+
+ SEV-SNP Page Validation functions.
+
+ Copyright (c) 2021 AMD Incorporated. All rights reserved.<BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <Uefi/UefiBaseType.h>
+#include <Library/BaseLib.h>
+#include <Library/MemEncryptSevLib.h>
+
+#include "SnpPageStateChange.h"
+
+/**
+ Pre-validate the system RAM when SEV-SNP is enabled in the guest VM.
+
+ @param[in] BaseAddress Base address
+ @param[in] NumPages Number of pages starting from the base address
+
+**/
+VOID
+EFIAPI
+MemEncryptSevSnpPreValidateSystemRam (
+ IN PHYSICAL_ADDRESS BaseAddress,
+ IN UINTN NumPages
+ )
+{
+ if (!MemEncryptSevSnpIsEnabled ()) {
+ return;
+ }
+
+ InternalSetPageState (BaseAddress, NumPages, SevSnpPagePrivate, TRUE);
+}
diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChangeInternal.c b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChangeInternal.c
new file mode 100644
index 000000000000..506df12d4e51
--- /dev/null
+++ b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChangeInternal.c
@@ -0,0 +1,295 @@
+/** @file
+
+ SEV-SNP Page Validation functions.
+
+ Copyright (c) 2021 AMD Incorporated. All rights reserved.<BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <Uefi/UefiBaseType.h>
+#include <Library/BaseLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/MemEncryptSevLib.h>
+#include <Library/DebugLib.h>
+#include <Library/VmgExitLib.h>
+
+#include <Register/Amd/Ghcb.h>
+#include <Register/Amd/Msr.h>
+
+#include "SnpPageStateChange.h"
+
+#define IS_ALIGNED(x, y) ((((x) & (y - 1)) == 0))
+#define PAGES_PER_LARGE_ENTRY 512
+
+STATIC
+UINTN
+MemoryStateToGhcbOp (
+ IN SEV_SNP_PAGE_STATE State
+ )
+{
+ UINTN Cmd;
+
+ switch (State) {
+ case SevSnpPageShared: Cmd = SNP_PAGE_STATE_SHARED; break;
+ case SevSnpPagePrivate: Cmd = SNP_PAGE_STATE_PRIVATE; break;
+ default: ASSERT(0);
+ }
+
+ return Cmd;
+}
+
+STATIC
+VOID
+SnpPageStateFailureTerminate (
+ VOID
+ )
+{
+ MSR_SEV_ES_GHCB_REGISTER Msr;
+
+ //
+ // Use the GHCB MSR Protocol to request termination by the hypervisor
+ //
+ Msr.GhcbPhysicalAddress = 0;
+ Msr.GhcbTerminate.Function = GHCB_INFO_TERMINATE_REQUEST;
+ Msr.GhcbTerminate.ReasonCodeSet = GHCB_TERMINATE_GHCB;
+ Msr.GhcbTerminate.ReasonCode = GHCB_TERMINATE_GHCB_GENERAL;
+ AsmWriteMsr64 (MSR_SEV_ES_GHCB, Msr.GhcbPhysicalAddress);
+
+ AsmVmgExit ();
+
+ ASSERT (FALSE);
+ CpuDeadLoop ();
+}
+
+/**
+ This function issues the PVALIDATE instruction to validate or invalidate the memory
+ range specified. If PVALIDATE returns size mismatch then it retry validating with
+ smaller page size.
+
+ */
+STATIC
+VOID
+PvalidateRange (
+ IN SNP_PAGE_STATE_CHANGE_INFO *Info,
+ IN UINTN StartIndex,
+ IN UINTN EndIndex,
+ IN BOOLEAN Validate
+ )
+{
+ UINTN Address, RmpPageSize, Ret, i;
+
+ for (; StartIndex <= EndIndex; StartIndex++) {
+ //
+ // Get the address and the page size from the Info.
+ //
+ Address = Info->Entry[StartIndex].GuestFrameNumber << EFI_PAGE_SHIFT;
+ RmpPageSize = Info->Entry[StartIndex].PageSize;
+
+ Ret = AsmPvalidate (RmpPageSize, Validate, Address);
+
+ //
+ // If we fail to validate due to size mismatch then try with the
+ // smaller page size. This senario will occur if the backing page in
+ // the RMP entry is 4K and we are validating it as a 2MB.
+ //
+ if ((Ret == PVALIDATE_RET_SIZE_MISMATCH) && (RmpPageSize == PvalidatePageSize2MB)) {
+ for (i = 0; i < PAGES_PER_LARGE_ENTRY; i++) {
+ Ret = AsmPvalidate (PvalidatePageSize4K, Validate, Address);
+ if (Ret) {
+ break;
+ }
+
+ Address = Address + EFI_PAGE_SIZE;
+ }
+ }
+
+ //
+ // If validation failed then do not continue.
+ //
+ if (Ret) {
+ DEBUG ((
+ DEBUG_ERROR, "%a:%a: Failed to %a address 0x%Lx Error code %d\n",
+ gEfiCallerBaseName,
+ __FUNCTION__,
+ Validate ? "Validate" : "Invalidate",
+ Address,
+ Ret
+ ));
+ SnpPageStateFailureTerminate ();
+ }
+ }
+}
+
+STATIC
+EFI_PHYSICAL_ADDRESS
+BuildPageStateBuffer (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN EFI_PHYSICAL_ADDRESS EndAddress,
+ IN SEV_SNP_PAGE_STATE State,
+ IN BOOLEAN UseLargeEntry,
+ IN SNP_PAGE_STATE_CHANGE_INFO *Info
+ )
+{
+ EFI_PHYSICAL_ADDRESS NextAddress;
+ UINTN i, RmpPageSize;
+
+ // Clear the page state structure
+ SetMem (Info, sizeof (*Info), 0);
+
+ i = 0;
+ NextAddress = EndAddress;
+
+ //
+ // Populate the page state entry structure
+ //
+ while ((BaseAddress < EndAddress) && (i < SNP_PAGE_STATE_MAX_ENTRY)) {
+ //
+ // Is this a 2MB aligned page? Check if we can use the Large RMP entry.
+ //
+ if (UseLargeEntry && IS_ALIGNED (BaseAddress, SIZE_2MB) &&
+ ((EndAddress - BaseAddress) >= SIZE_2MB)) {
+ RmpPageSize = PvalidatePageSize2MB;
+ NextAddress = BaseAddress + SIZE_2MB;
+ } else {
+ RmpPageSize = PvalidatePageSize4K;
+ NextAddress = BaseAddress + EFI_PAGE_SIZE;
+ }
+
+ Info->Entry[i].GuestFrameNumber = BaseAddress >> EFI_PAGE_SHIFT;
+ Info->Entry[i].PageSize = RmpPageSize;
+ Info->Entry[i].Operation = MemoryStateToGhcbOp (State);
+ Info->Entry[i].CurrentPage = 0;
+ Info->Header.EndEntry = i;
+
+ BaseAddress = NextAddress;
+ i++;
+ }
+
+ return NextAddress;
+}
+
+STATIC
+VOID
+PageStateChangeVmgExit (
+ IN GHCB *Ghcb,
+ IN SNP_PAGE_STATE_CHANGE_INFO *Info
+ )
+{
+ EFI_STATUS Status;
+
+ //
+ // As per the GHCB specification, the hypervisor can resume the guest before
+ // processing all the entries. Checks whether all the entries are processed.
+ //
+ // The stragtegy here is to wait for the hypervisor to change the page
+ // state in the RMP table before guest access the memory pages. If the
+ // page state was not successful, then later memory access will result
+ // in the crash.
+ //
+ while (Info->Header.CurrentEntry <= Info->Header.EndEntry) {
+ Ghcb->SaveArea.SwScratch = (UINT64) Ghcb->SharedBuffer;
+ VmgSetOffsetValid (Ghcb, GhcbSwScratch);
+
+ Status = VmgExit (Ghcb, SVM_EXIT_SNP_PAGE_STATE_CHANGE, 0, 0);
+
+ //
+ // The Page State Change VMGEXIT can pass the failure through the
+ // ExitInfo2. Lets check both the return value as well as ExitInfo2.
+ //
+ if ((Status != 0) || (Ghcb->SaveArea.SwExitInfo2)) {
+ SnpPageStateFailureTerminate ();
+ }
+ }
+}
+
+/**
+ The function is used to set the page state when SEV-SNP is active. The page state
+ transition consist of changing the page ownership in the RMP table, and using the
+ PVALIDATE instruction to update the Validated bit in RMP table.
+
+ When the UseLargeEntry is set to TRUE, then function will try to use the large RMP
+ entry (whevever possible).
+ */
+VOID
+InternalSetPageState (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINTN NumPages,
+ IN SEV_SNP_PAGE_STATE State,
+ IN BOOLEAN UseLargeEntry
+ )
+{
+ GHCB *Ghcb;
+ EFI_PHYSICAL_ADDRESS NextAddress, EndAddress;
+ MSR_SEV_ES_GHCB_REGISTER Msr;
+ BOOLEAN InterruptState;
+ SNP_PAGE_STATE_CHANGE_INFO *Info;
+
+ Msr.GhcbPhysicalAddress = AsmReadMsr64 (MSR_SEV_ES_GHCB);
+ Ghcb = Msr.Ghcb;
+
+ EndAddress = BaseAddress + EFI_PAGES_TO_SIZE (NumPages);
+
+ DEBUG ((
+ DEBUG_VERBOSE, "%a:%a Address 0x%Lx - 0x%Lx State = %a LargeEntry = %d\n",
+ gEfiCallerBaseName,
+ __FUNCTION__,
+ BaseAddress,
+ EndAddress,
+ State == SevSnpPageShared ? "Shared" : "Private",
+ UseLargeEntry
+ ));
+
+ while (BaseAddress < EndAddress) {
+ UINTN CurrentEntry, EndEntry;
+
+ //
+ // Initialize the GHCB
+ //
+ VmgInit (Ghcb, &InterruptState);
+
+ //
+ // Build the page state structure
+ //
+ Info = (SNP_PAGE_STATE_CHANGE_INFO *) Ghcb->SharedBuffer;
+ NextAddress = BuildPageStateBuffer (BaseAddress,
+ EndAddress,
+ State,
+ UseLargeEntry,
+ Info
+ );
+
+ //
+ // Save the current and end entry from the page state structure. We need
+ // it later.
+ //
+ CurrentEntry = Info->Header.CurrentEntry;
+ EndEntry = Info->Header.EndEntry;
+
+ //
+ // If the caller requested to change the page state to shared then
+ // invalidate the pages before making the page shared in the RMP table.
+ //
+ if (State == SevSnpPageShared) {
+ PvalidateRange (Info, CurrentEntry, EndEntry, FALSE);
+ }
+
+ //
+ // Invoke the page state change VMGEXIT.
+ //
+ PageStateChangeVmgExit (Ghcb, Info);
+
+ //
+ // If the caller requested to change the page state to private then
+ // validate the pages after it has been added in the RMP table.
+ //
+ if (State == SevSnpPagePrivate) {
+ PvalidateRange (Info, CurrentEntry, EndEntry, TRUE);
+ }
+
+ VmgDone (Ghcb, InterruptState);
+
+ BaseAddress = NextAddress;
+ }
+}
--
2.25.1


[PATCH v8 13/32] OvmfPkg/AmdSevDxe: do not use extended PCI config space

Brijesh Singh
 

BZ: https://bugzilla.tianocore.org/show_bug.cgi?id=3275

Commit 85b8eac59b8c5bd9c7eb9afdb64357ce1aa2e803 added support to ensure
that MMIO is only performed against the un-encrypted memory. If MMIO
is performed against encrypted memory, a #GP is raised.

The AmdSevDxe uses the functions provided by the MemEncryptSevLib to
clear the memory encryption mask from the page table. If the
MemEncryptSevLib is extended to include VmgExitLib then depedency
chain will look like this:

OvmfPkg/AmdSevDxe/AmdSevDxe.inf
-----> MemEncryptSevLib class
-----> "OvmfPkg/BaseMemEncryptSevLib/DxeMemEncryptSevLib.inf" instance
-----> VmgExitLib class
-----> "OvmfPkg/VmgExitLib" instance
-----> LocalApicLib class
-----> "UefiCpuPkg/BaseXApicX2ApicLib/BaseXApicX2ApicLib.inf" instance
-----> TimerLib class
-----> "OvmfPkg/AcpiTimerLib/DxeAcpiTimerLib.inf" instance
-----> PciLib class
-----> "OvmfPkg/DxePciLibI440FxQ35/DxePciLibI440FxQ35.inf" instance
-----> PciExpressLib class
-----> "MdePkg/BasePciExpressLib/BasePciExpressLib.inf" instance

The LocalApicLib provides a constructor that gets called before the
AmdSevDxe can clear the memory encryption mask from the MMIO regions.

When running under the Q35 machine type, the call chain looks like this:

AcpiTimerLibConstructor () [AcpiTimerLib]
PciRead32 () [DxePciLibI440FxQ35]
PciExpressRead32 () [PciExpressLib]

The PciExpressRead32 () reads the MMIO region. The MMIO regions are not
yet mapped un-encrypted, so the check introduced in the commit
85b8eac59b8c5bd9c7eb9afdb64357ce1aa2e803 raises a #GP.

The AmdSevDxe driver does not require the access to the extended PCI
config space. Accessing a normal PCI config space, via IO port should be
sufficent. Use the module-scope override to make the AmdSevDxe use the
BasePciLib instead of BasePciExpressLib so that PciRead32 () uses the
IO ports instead of the extended config space.

Cc: Michael Roth <michael.roth@amd.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: Min Xu <min.m.xu@intel.com>
Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Ard Biesheuvel <ardb+tianocore@kernel.org>
Cc: Erdem Aktas <erdemaktas@google.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Acked-by: Jiewen Yao <Jiewen.yao@intel.com>
Suggested-by: Laszlo Ersek <lersek@redhat.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
OvmfPkg/AmdSev/AmdSevX64.dsc | 5 ++++-
OvmfPkg/Bhyve/BhyveX64.dsc | 5 ++++-
OvmfPkg/OvmfPkgIa32X64.dsc | 5 ++++-
OvmfPkg/OvmfPkgX64.dsc | 5 ++++-
OvmfPkg/OvmfXen.dsc | 5 ++++-
5 files changed, 20 insertions(+), 5 deletions(-)

diff --git a/OvmfPkg/AmdSev/AmdSevX64.dsc b/OvmfPkg/AmdSev/AmdSevX64.dsc
index e6cd10b75922..a8ea08a10a99 100644
--- a/OvmfPkg/AmdSev/AmdSevX64.dsc
+++ b/OvmfPkg/AmdSev/AmdSevX64.dsc
@@ -812,7 +812,10 @@ [Components]
!endif

OvmfPkg/PlatformDxe/Platform.inf
- OvmfPkg/AmdSevDxe/AmdSevDxe.inf
+ OvmfPkg/AmdSevDxe/AmdSevDxe.inf {
+ <LibraryClasses>
+ PciLib|MdePkg/Library/BasePciLibCf8/BasePciLibCf8.inf
+ }
OvmfPkg/IoMmuDxe/IoMmuDxe.inf

#
diff --git a/OvmfPkg/Bhyve/BhyveX64.dsc b/OvmfPkg/Bhyve/BhyveX64.dsc
index d8fe607d1cf7..f45634996247 100644
--- a/OvmfPkg/Bhyve/BhyveX64.dsc
+++ b/OvmfPkg/Bhyve/BhyveX64.dsc
@@ -790,7 +790,10 @@ [Components]
!endif

OvmfPkg/PlatformDxe/Platform.inf
- OvmfPkg/AmdSevDxe/AmdSevDxe.inf
+ OvmfPkg/AmdSevDxe/AmdSevDxe.inf {
+ <LibraryClasses>
+ PciLib|MdePkg/Library/BasePciLibCf8/BasePciLibCf8.inf
+ }
OvmfPkg/IoMmuDxe/IoMmuDxe.inf


diff --git a/OvmfPkg/OvmfPkgIa32X64.dsc b/OvmfPkg/OvmfPkgIa32X64.dsc
index a467ab7090fb..f42abc041c0c 100644
--- a/OvmfPkg/OvmfPkgIa32X64.dsc
+++ b/OvmfPkg/OvmfPkgIa32X64.dsc
@@ -965,7 +965,10 @@ [Components.X64]
!endif

OvmfPkg/PlatformDxe/Platform.inf
- OvmfPkg/AmdSevDxe/AmdSevDxe.inf
+ OvmfPkg/AmdSevDxe/AmdSevDxe.inf {
+ <LibraryClasses>
+ PciLib|MdePkg/Library/BasePciLibCf8/BasePciLibCf8.inf
+ }
OvmfPkg/IoMmuDxe/IoMmuDxe.inf

!if $(SMM_REQUIRE) == TRUE
diff --git a/OvmfPkg/OvmfPkgX64.dsc b/OvmfPkg/OvmfPkgX64.dsc
index e56b83d95e09..19cc5d4122e2 100644
--- a/OvmfPkg/OvmfPkgX64.dsc
+++ b/OvmfPkg/OvmfPkgX64.dsc
@@ -963,7 +963,10 @@ [Components]
!endif

OvmfPkg/PlatformDxe/Platform.inf
- OvmfPkg/AmdSevDxe/AmdSevDxe.inf
+ OvmfPkg/AmdSevDxe/AmdSevDxe.inf {
+ <LibraryClasses>
+ PciLib|MdePkg/Library/BasePciLibCf8/BasePciLibCf8.inf
+ }
OvmfPkg/IoMmuDxe/IoMmuDxe.inf

!if $(SMM_REQUIRE) == TRUE
diff --git a/OvmfPkg/OvmfXen.dsc b/OvmfPkg/OvmfXen.dsc
index a31519e356b7..383cb03d2a14 100644
--- a/OvmfPkg/OvmfXen.dsc
+++ b/OvmfPkg/OvmfXen.dsc
@@ -729,7 +729,10 @@ [Components]
}

OvmfPkg/PlatformDxe/Platform.inf
- OvmfPkg/AmdSevDxe/AmdSevDxe.inf
+ OvmfPkg/AmdSevDxe/AmdSevDxe.inf {
+ <LibraryClasses>
+ PciLib|MdePkg/Library/BasePciLibCf8/BasePciLibCf8.inf
+ }
OvmfPkg/IoMmuDxe/IoMmuDxe.inf

#
--
2.25.1


[PATCH v8 15/32] OvmfPkg/MemEncryptSevLib: add function to check the VMPL0

Brijesh Singh
 

BZ: https://bugzilla.tianocore.org/show_bug.cgi?id=3275

Virtual Machine Privilege Level (VMPL) feature in the SEV-SNP
architecture allows a guest VM to divide its address space into four
levels. The level can be used to provide the hardware isolated
abstraction layers with a VM. The VMPL0 is the highest privilege, and
VMPL3 is the least privilege. Certain operations must be done by the
VMPL0 software, such as:

* Validate or invalidate memory range (PVALIDATE instruction)
* Allocate VMSA page (RMPADJUST instruction when VMSA=1)

The initial SEV-SNP support assumes that the guest is running on VMPL0.
Let's add function in the MemEncryptSevLib that can be used for checking
whether guest is booted under the VMPL0.

Cc: Michael Roth <michael.roth@amd.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: Min Xu <min.m.xu@intel.com>
Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Ard Biesheuvel <ardb+tianocore@kernel.org>
Cc: Erdem Aktas <erdemaktas@google.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
.../X64/SnpPageStateChange.h | 5 ++
.../X64/SecSnpSystemRamValidate.c | 46 +++++++++++++++++++
.../X64/SnpPageStateChangeInternal.c | 1 -
3 files changed, 51 insertions(+), 1 deletion(-)

diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChange.h b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChange.h
index 8bbdf06468b9..cc1318075523 100644
--- a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChange.h
+++ b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChange.h
@@ -28,4 +28,9 @@ InternalSetPageState (
IN BOOLEAN UseLargeEntry
);

+VOID
+SnpPageStateFailureTerminate (
+ VOID
+ );
+
#endif
diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SecSnpSystemRamValidate.c b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SecSnpSystemRamValidate.c
index 64aab7f45b6d..3394094a65e5 100644
--- a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SecSnpSystemRamValidate.c
+++ b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SecSnpSystemRamValidate.c
@@ -14,6 +14,43 @@

#include "SnpPageStateChange.h"

+//
+// The variable used for the VMPL check.
+//
+STATIC UINT8 gVmpl0Data[4096];
+
+/**
+ The function checks whether SEV-SNP guest is booted under VMPL0.
+
+ @retval TRUE The guest is booted under VMPL0
+ @retval FALSE The guest is not booted under VMPL0
+ **/
+STATIC
+BOOLEAN
+SevSnpIsVmpl0 (
+ VOID
+ )
+{
+ UINT64 Rdx;
+ EFI_STATUS Status;
+
+ //
+ // There is no straightforward way to query the current VMPL level.
+ // The simplest method is to use the RMPADJUST instruction to change
+ // a page permission to a VMPL level-1, and if the guest kernel is
+ // launched at a level <= 1, then RMPADJUST instruction will return
+ // an error.
+ //
+ Rdx = 1;
+
+ Status = AsmRmpAdjust ((UINT64) gVmpl0Data, 0, Rdx);
+ if (EFI_ERROR (Status)) {
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
/**
Pre-validate the system RAM when SEV-SNP is enabled in the guest VM.

@@ -32,5 +69,14 @@ MemEncryptSevSnpPreValidateSystemRam (
return;
}

+ //
+ // The page state change uses the PVALIDATE instruction. The instruction
+ // can be run on VMPL-0 only. If its not VMPL-0 guest then terminate
+ // the boot.
+ //
+ if (!SevSnpIsVmpl0 ()) {
+ SnpPageStateFailureTerminate ();
+ }
+
InternalSetPageState (BaseAddress, NumPages, SevSnpPagePrivate, TRUE);
}
diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChangeInternal.c b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChangeInternal.c
index 506df12d4e51..653151d4a422 100644
--- a/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChangeInternal.c
+++ b/OvmfPkg/Library/BaseMemEncryptSevLib/X64/SnpPageStateChangeInternal.c
@@ -40,7 +40,6 @@ MemoryStateToGhcbOp (
return Cmd;
}

-STATIC
VOID
SnpPageStateFailureTerminate (
VOID
--
2.25.1


[PATCH v8 12/32] OvmfPkg/PlatformPei: register GHCB gpa for the SEV-SNP guest

Brijesh Singh
 

BZ: https://bugzilla.tianocore.org/show_bug.cgi?id=3275

The SEV-SNP guest requires that GHCB GPA must be registered before using.
See the GHCB specification section 2.3.2 for more details.

Cc: Michael Roth <michael.roth@amd.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: Min Xu <min.m.xu@intel.com>
Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Ard Biesheuvel <ardb+tianocore@kernel.org>
Cc: Erdem Aktas <erdemaktas@google.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Acked-by: Jiewen Yao <Jiewen.yao@intel.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
OvmfPkg/PlatformPei/AmdSev.c | 91 ++++++++++++++++++++++++++++++++++++
1 file changed, 91 insertions(+)

diff --git a/OvmfPkg/PlatformPei/AmdSev.c b/OvmfPkg/PlatformPei/AmdSev.c
index a8bf610022ba..de876fdb478e 100644
--- a/OvmfPkg/PlatformPei/AmdSev.c
+++ b/OvmfPkg/PlatformPei/AmdSev.c
@@ -19,9 +19,93 @@
#include <PiPei.h>
#include <Register/Amd/Msr.h>
#include <Register/Intel/SmramSaveStateMap.h>
+#include <Library/VmgExitLib.h>

#include "Platform.h"

+/**
+ Handle an SEV-SNP/GHCB protocol check failure.
+
+ Notify the hypervisor using the VMGEXIT instruction that the SEV-SNP guest
+ wishes to be terminated.
+
+ @param[in] ReasonCode Reason code to provide to the hypervisor for the
+ termination request.
+
+**/
+STATIC
+VOID
+SevEsProtocolFailure (
+ IN UINT8 ReasonCode
+ )
+{
+ MSR_SEV_ES_GHCB_REGISTER Msr;
+
+ //
+ // Use the GHCB MSR Protocol to request termination by the hypervisor
+ //
+ Msr.GhcbPhysicalAddress = 0;
+ Msr.GhcbTerminate.Function = GHCB_INFO_TERMINATE_REQUEST;
+ Msr.GhcbTerminate.ReasonCodeSet = GHCB_TERMINATE_GHCB;
+ Msr.GhcbTerminate.ReasonCode = ReasonCode;
+ AsmWriteMsr64 (MSR_SEV_ES_GHCB, Msr.GhcbPhysicalAddress);
+
+ AsmVmgExit ();
+
+ ASSERT (FALSE);
+ CpuDeadLoop ();
+}
+
+/**
+
+ This function can be used to register the GHCB GPA.
+
+ @param[in] Address The physical address to be registered.
+
+**/
+STATIC
+VOID
+GhcbRegister (
+ IN EFI_PHYSICAL_ADDRESS Address
+ )
+{
+ MSR_SEV_ES_GHCB_REGISTER Msr;
+ MSR_SEV_ES_GHCB_REGISTER CurrentMsr;
+ EFI_PHYSICAL_ADDRESS GuestFrameNumber;
+
+ GuestFrameNumber = Address >> EFI_PAGE_SHIFT;
+
+ //
+ // Save the current MSR Value
+ //
+ CurrentMsr.GhcbPhysicalAddress = AsmReadMsr64 (MSR_SEV_ES_GHCB);
+
+ //
+ // Use the GHCB MSR Protocol to request to register the GPA.
+ //
+ Msr.GhcbPhysicalAddress = 0;
+ Msr.GhcbGpaRegister.Function = GHCB_INFO_GHCB_GPA_REGISTER_REQUEST;
+ Msr.GhcbGpaRegister.GuestFrameNumber = GuestFrameNumber;
+ AsmWriteMsr64 (MSR_SEV_ES_GHCB, Msr.GhcbPhysicalAddress);
+
+ AsmVmgExit ();
+
+ Msr.GhcbPhysicalAddress = AsmReadMsr64 (MSR_SEV_ES_GHCB);
+
+ //
+ // If hypervisor responded with a different GPA than requested then fail.
+ //
+ if ((Msr.GhcbGpaRegister.Function != GHCB_INFO_GHCB_GPA_REGISTER_RESPONSE) ||
+ (Msr.GhcbGpaRegister.GuestFrameNumber != GuestFrameNumber)) {
+ SevEsProtocolFailure (GHCB_TERMINATE_GHCB_GENERAL);
+ }
+
+ //
+ // Restore the MSR
+ //
+ AsmWriteMsr64 (MSR_SEV_ES_GHCB, CurrentMsr.GhcbPhysicalAddress);
+}
+
/**

Initialize SEV-ES support if running as an SEV-ES guest.
@@ -109,6 +193,13 @@ AmdSevEsInitialize (
"SEV-ES is enabled, %lu GHCB backup pages allocated starting at 0x%p\n",
(UINT64)GhcbBackupPageCount, GhcbBackupBase));

+ //
+ // SEV-SNP guest requires that GHCB GPA must be registered before using it.
+ //
+ if (MemEncryptSevSnpIsEnabled ()) {
+ GhcbRegister (GhcbBasePa);
+ }
+
AsmWriteMsr64 (MSR_SEV_ES_GHCB, GhcbBasePa);

//
--
2.25.1


[PATCH v8 11/32] OvmfPkg/VmgExitLib: use SEV-SNP-validated CPUID values

Brijesh Singh
 

From: Michael Roth <michael.roth@amd.com>

SEV-SNP firmware allows a special guest page to be populated with
guest CPUID values so that they can be validated against supported
host features before being loaded into encrypted guest memory to be
used instead of hypervisor-provided values [1].

Add handling for this in the CPUID #VC handler and use it whenever
SEV-SNP is enabled. To do so, existing CPUID handling via VmgExit is
moved to a helper, GetCpuidHyp(), and a new helper that uses the CPUID
page to do the lookup, GetCpuidFw(), is used instead when SNP is
enabled. For cases where SNP CPUID lookups still rely on fetching
specific CPUID fields from hypervisor, GetCpuidHyp() is used there as
well.

[1]: SEV SNP Firmware ABI Specification, Rev. 0.8, 8.13.2.6

Cc: James Bottomley <jejb@linux.ibm.com>
Cc: Min Xu <min.m.xu@intel.com>
Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Ard Biesheuvel <ardb+tianocore@kernel.org>
Cc: Erdem Aktas <erdemaktas@google.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Acked-by: Jiewen Yao <Jiewen.yao@intel.com>
Signed-off-by: Michael Roth <michael.roth@amd.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
OvmfPkg/Library/VmgExitLib/SecVmgExitLib.inf | 2 +
OvmfPkg/Library/VmgExitLib/VmgExitLib.inf | 3 +
OvmfPkg/Library/VmgExitLib/VmgExitVcHandler.c | 444 ++++++++++++++++--
3 files changed, 419 insertions(+), 30 deletions(-)

diff --git a/OvmfPkg/Library/VmgExitLib/SecVmgExitLib.inf b/OvmfPkg/Library/VmgExitLib/SecVmgExitLib.inf
index e6f6ea7972fd..78207fa0f9c9 100644
--- a/OvmfPkg/Library/VmgExitLib/SecVmgExitLib.inf
+++ b/OvmfPkg/Library/VmgExitLib/SecVmgExitLib.inf
@@ -42,4 +42,6 @@ [LibraryClasses]
[FixedPcd]
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecGhcbBackupBase
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecGhcbBackupSize
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfCpuidBase
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfCpuidSize

diff --git a/OvmfPkg/Library/VmgExitLib/VmgExitLib.inf b/OvmfPkg/Library/VmgExitLib/VmgExitLib.inf
index c66c68726cdb..7963670e7d30 100644
--- a/OvmfPkg/Library/VmgExitLib/VmgExitLib.inf
+++ b/OvmfPkg/Library/VmgExitLib/VmgExitLib.inf
@@ -38,3 +38,6 @@ [LibraryClasses]
LocalApicLib
MemEncryptSevLib

+[Pcd]
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfCpuidBase
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfCpuidSize
diff --git a/OvmfPkg/Library/VmgExitLib/VmgExitVcHandler.c b/OvmfPkg/Library/VmgExitLib/VmgExitVcHandler.c
index 41b0c8cc5312..cab4e2b230d0 100644
--- a/OvmfPkg/Library/VmgExitLib/VmgExitVcHandler.c
+++ b/OvmfPkg/Library/VmgExitLib/VmgExitVcHandler.c
@@ -17,6 +17,7 @@
#include <IndustryStandard/InstructionParsing.h>

#include "VmgExitVcHandler.h"
+//#include <Library/MemEncryptSevLib.h>

//
// Instruction execution mode definition
@@ -130,6 +131,32 @@ UINT64
SEV_ES_INSTRUCTION_DATA *InstructionData
);

+//
+// SEV-SNP Cpuid table entry/function
+//
+typedef PACKED struct {
+ UINT32 EaxIn;
+ UINT32 EcxIn;
+ UINT64 Unused;
+ UINT64 Unused2;
+ UINT32 Eax;
+ UINT32 Ebx;
+ UINT32 Ecx;
+ UINT32 Edx;
+ UINT64 Reserved;
+} SEV_SNP_CPUID_FUNCTION;
+
+//
+// SEV-SNP Cpuid page format
+//
+typedef PACKED struct {
+ UINT32 Count;
+ UINT32 Reserved1;
+ UINT64 Reserved2;
+ SEV_SNP_CPUID_FUNCTION function[0];
+} SEV_SNP_CPUID_INFO;
+
+
/**
Return a pointer to the contents of the specified register.

@@ -1496,58 +1523,415 @@ InvdExit (
}

/**
- Handle a CPUID event.
+ Fetch CPUID leaf/function via hypervisor/VMGEXIT.

- Use the VMGEXIT instruction to handle a CPUID event.
+ @param[in, out] Ghcb Pointer to the Guest-Hypervisor Communication
+ Block
+ @param[in] EaxIn EAX input for cpuid instruction
+ @param[in] EcxIn ECX input for cpuid instruction
+ @param[in] Xcr0In XCR0 at time of cpuid instruction
+ @param[in, out] Eax Pointer to store leaf's EAX value
+ @param[in, out] Ebx Pointer to store leaf's EBX value
+ @param[in, out] Ecx Pointer to store leaf's ECX value
+ @param[in, out] Edx Pointer to store leaf's EDX value
+ @param[in, out] Status Pointer to store status from VMGEXIT (always 0
+ unless return value indicates failure)
+ @param[in, out] Unsupported Pointer to store indication of unsupported
+ VMGEXIT (always false unless return value
+ indicates failure)

- @param[in, out] Ghcb Pointer to the Guest-Hypervisor Communication
- Block
- @param[in, out] Regs x64 processor context
- @param[in] InstructionData Instruction parsing context
-
- @retval 0 Event handled successfully
- @return New exception value to propagate
+ @retval TRUE CPUID leaf fetch successfully.
+ @retval FALSE Error occurred while fetching CPUID leaf. Callers
+ should Status and Unsupported and handle
+ accordingly if they indicate a more precise
+ error condition.

**/
STATIC
-UINT64
-CpuidExit (
+BOOLEAN
+GetCpuidHyp (
IN OUT GHCB *Ghcb,
- IN OUT EFI_SYSTEM_CONTEXT_X64 *Regs,
- IN SEV_ES_INSTRUCTION_DATA *InstructionData
+ IN UINT32 EaxIn,
+ IN UINT32 EcxIn,
+ IN UINT64 XCr0,
+ IN OUT UINT32 *Eax,
+ IN OUT UINT32 *Ebx,
+ IN OUT UINT32 *Ecx,
+ IN OUT UINT32 *Edx,
+ IN OUT UINT64 *Status,
+ IN OUT BOOLEAN *UnsupportedExit
)
{
- UINT64 Status;
-
- Ghcb->SaveArea.Rax = Regs->Rax;
+ *UnsupportedExit = FALSE;
+ Ghcb->SaveArea.Rax = EaxIn;
VmgSetOffsetValid (Ghcb, GhcbRax);
- Ghcb->SaveArea.Rcx = Regs->Rcx;
+ Ghcb->SaveArea.Rcx = EcxIn;
VmgSetOffsetValid (Ghcb, GhcbRcx);
- if (Regs->Rax == CPUID_EXTENDED_STATE) {
- IA32_CR4 Cr4;
-
- Cr4.UintN = AsmReadCr4 ();
- Ghcb->SaveArea.XCr0 = (Cr4.Bits.OSXSAVE == 1) ? AsmXGetBv (0) : 1;
+ if (EaxIn == CPUID_EXTENDED_STATE) {
+ Ghcb->SaveArea.XCr0 = XCr0;
VmgSetOffsetValid (Ghcb, GhcbXCr0);
}

- Status = VmgExit (Ghcb, SVM_EXIT_CPUID, 0, 0);
- if (Status != 0) {
- return Status;
+ *Status = VmgExit (Ghcb, SVM_EXIT_CPUID, 0, 0);
+ if (*Status != 0) {
+ return FALSE;
}

if (!VmgIsOffsetValid (Ghcb, GhcbRax) ||
!VmgIsOffsetValid (Ghcb, GhcbRbx) ||
!VmgIsOffsetValid (Ghcb, GhcbRcx) ||
!VmgIsOffsetValid (Ghcb, GhcbRdx)) {
- return UnsupportedExit (Ghcb, Regs, InstructionData);
+ *UnsupportedExit = TRUE;
+ return FALSE;
}
- Regs->Rax = Ghcb->SaveArea.Rax;
- Regs->Rbx = Ghcb->SaveArea.Rbx;
- Regs->Rcx = Ghcb->SaveArea.Rcx;
- Regs->Rdx = Ghcb->SaveArea.Rdx;
+
+ if (Eax) {
+ *Eax = Ghcb->SaveArea.Rax;
+ }
+ if (Ebx) {
+ *Ebx = Ghcb->SaveArea.Rbx;
+ }
+ if (Ecx) {
+ *Ecx = Ghcb->SaveArea.Rcx;
+ }
+ if (Edx) {
+ *Edx = Ghcb->SaveArea.Rdx;
+ }
+
+ return TRUE;
+}
+
+/**
+ Check if SEV-SNP enabled.
+
+ @retval TRUE SEV-SNP is enabled.
+ @retval FALSE SEV-SNP is disabled.
+
+**/
+STATIC
+BOOLEAN
+SnpEnabled (VOID)
+{
+ MSR_SEV_STATUS_REGISTER Msr;
+
+ Msr.Uint32 = AsmReadMsr32(MSR_SEV_STATUS);
+
+ return !!Msr.Bits.SevSnpBit;
+}
+
+/**
+ Calculate the total XSAVE area size for enabled XSAVE areas
+
+ @param[in] XFeaturesEnabled Bit-mask of enabled XSAVE features/areas as
+ indicated by XCR0/MSR_IA32_XSS bits
+ @param[in] XSaveBaseSize Base/legacy XSAVE area size (e.g. when
+ XCR0 is 1)
+ @param[in, out] XSaveSize Pointer to storage for calculated XSAVE area
+ size
+ @param[in] Compacted Whether or not the calculation is for the
+ normal XSAVE area size (leaf 0xD,0x0,EBX) or
+ compacted XSAVE area size (leaf 0xD,0x1,EBX)
+
+
+ @retval TRUE XSAVE size calculation was successful.
+ @retval FALSE XSAVE size calculation was unsuccessful.
+**/
+STATIC
+BOOLEAN
+GetCpuidXSaveSize (
+ IN UINT64 XFeaturesEnabled,
+ IN UINT32 XSaveBaseSize,
+ IN OUT UINT32 *XSaveSize,
+ IN BOOLEAN Compacted
+ )
+{
+ SEV_SNP_CPUID_INFO *CpuidInfo;
+ UINT64 XFeaturesFound = 0;
+ UINT32 Idx;
+
+ *XSaveSize = XSaveBaseSize;
+ CpuidInfo = (SEV_SNP_CPUID_INFO *)(UINT64)PcdGet32(PcdOvmfCpuidBase);
+
+ for (Idx = 0; Idx < CpuidInfo->Count; Idx++) {
+ SEV_SNP_CPUID_FUNCTION *CpuidFn = &CpuidInfo->function[Idx];
+
+ if (!(CpuidFn->EaxIn == 0xD &&
+ (CpuidFn->EcxIn == 0 || CpuidFn->EcxIn == 1))) {
+ continue;
+ }
+
+ if (XFeaturesFound & (1UL << CpuidFn->EcxIn) ||
+ !(XFeaturesEnabled & (1UL << CpuidFn->EcxIn))) {
+ continue;
+ }
+
+ XFeaturesFound |= (1UL << CpuidFn->EcxIn);
+ if (Compacted) {
+ *XSaveSize += CpuidFn->Eax;
+ } else {
+ *XSaveSize = MAX(*XSaveSize, CpuidFn->Eax + CpuidFn->Ebx);
+ }
+ }
+
+ /*
+ * Either the guest set unsupported XCR0/XSS bits, or the corresponding
+ * entries in the CPUID table were not present. This is an invalid state.
+ */
+ if (XFeaturesFound != (XFeaturesEnabled & ~3UL)) {
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+/**
+ Check if a CPUID leaf/function is indexed via ECX sub-leaf/sub-function
+
+ @param[in] EaxIn EAX input for cpuid instruction
+
+ @retval FALSE cpuid leaf/function is not indexed by ECX input
+ @retval TRUE cpuid leaf/function is indexed by ECX input
+
+**/
+STATIC
+BOOLEAN
+IsFunctionIndexed (
+ IN UINT32 EaxIn
+ )
+{
+ switch (EaxIn) {
+ case CPUID_CACHE_PARAMS:
+ case CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS:
+ case CPUID_EXTENDED_TOPOLOGY:
+ case CPUID_EXTENDED_STATE:
+ case CPUID_INTEL_RDT_MONITORING:
+ case CPUID_INTEL_RDT_ALLOCATION:
+ case CPUID_INTEL_SGX:
+ case CPUID_INTEL_PROCESSOR_TRACE:
+ case CPUID_DETERMINISTIC_ADDRESS_TRANSLATION_PARAMETERS:
+ case CPUID_V2_EXTENDED_TOPOLOGY:
+ case 0x8000001D: /* Cache Topology Information */
+ return TRUE;
+ }
+ return FALSE;
+}
+
+/**
+ Fetch CPUID leaf/function via SEV-SNP CPUID table.
+
+ @param[in, out] Ghcb Pointer to the Guest-Hypervisor Communication
+ Block
+ @param[in] EaxIn EAX input for cpuid instruction
+ @param[in] EcxIn ECX input for cpuid instruction
+ @param[in] Xcr0In XCR0 at time of cpuid instruction
+ @param[in, out] Eax Pointer to store leaf's EAX value
+ @param[in, out] Ebx Pointer to store leaf's EBX value
+ @param[in, out] Ecx Pointer to store leaf's ECX value
+ @param[in, out] Edx Pointer to store leaf's EDX value
+ @param[in, out] Status Pointer to store status from VMGEXIT (always 0
+ unless return value indicates failure)
+ @param[in, out] Unsupported Pointer to store indication of unsupported
+ VMGEXIT (always false unless return value
+ indicates failure)
+
+ @retval TRUE CPUID leaf fetch successfully.
+ @retval FALSE Error occurred while fetching CPUID leaf. Callers
+ should Status and Unsupported and handle
+ accordingly if they indicate a more precise
+ error condition.
+
+**/
+STATIC
+BOOLEAN
+GetCpuidFw (
+ IN OUT GHCB *Ghcb,
+ IN UINT32 EaxIn,
+ IN UINT32 EcxIn,
+ IN UINT64 XCr0,
+ IN OUT UINT32 *Eax,
+ IN OUT UINT32 *Ebx,
+ IN OUT UINT32 *Ecx,
+ IN OUT UINT32 *Edx,
+ IN OUT UINT64 *Status,
+ IN OUT BOOLEAN *Unsupported
+ )
+{
+ SEV_SNP_CPUID_INFO *CpuidInfo;
+ BOOLEAN Found;
+ UINT32 Idx;
+
+ CpuidInfo = (SEV_SNP_CPUID_INFO *)(UINT64)PcdGet32(PcdOvmfCpuidBase);
+ Found = FALSE;
+
+ for (Idx = 0; Idx < CpuidInfo->Count; Idx++) {
+ SEV_SNP_CPUID_FUNCTION *CpuidFn = &CpuidInfo->function[Idx];
+
+ if (CpuidFn->EaxIn != EaxIn) {
+ continue;
+ }
+
+ if (IsFunctionIndexed(CpuidFn->EaxIn) && CpuidFn->EcxIn != EcxIn) {
+ continue;
+ }
+
+ *Eax = CpuidFn->Eax;
+ *Ebx = CpuidFn->Ebx;
+ *Ecx = CpuidFn->Ecx;
+ *Edx = CpuidFn->Edx;
+
+ Found = TRUE;
+ break;
+ }
+
+ if (!Found) {
+ *Eax = *Ebx = *Ecx = *Edx = 0;
+ goto Out;
+ }
+
+ if (EaxIn == CPUID_VERSION_INFO) {
+ IA32_CR4 Cr4;
+ UINT32 Ebx2;
+ UINT32 Edx2;
+
+ if (!GetCpuidHyp (Ghcb, EaxIn, EcxIn, XCr0, NULL, &Ebx2, NULL, &Edx2,
+ Status, Unsupported)) {
+ return FALSE;
+ }
+
+ /* initial APIC ID */
+ *Ebx = (*Ebx & 0x00FFFFFF) | (Ebx2 & 0xFF000000);
+ /* APIC enabled bit */
+ *Edx = (*Edx & ~BIT9) | (Edx2 & BIT9);
+ /* OSXSAVE enabled bit */
+ Cr4.UintN = AsmReadCr4 ();
+ *Ecx = (Cr4.Bits.OSXSAVE) ? (*Ecx & ~BIT27) | (*Ecx & BIT27)
+ : (*Ecx & ~BIT27);
+ } else if (EaxIn == CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS) {
+ IA32_CR4 Cr4;
+
+ Cr4.UintN = AsmReadCr4 ();
+ /* OSPKE enabled bit */
+ *Ecx = (Cr4.Bits.PKE) ? (*Ecx | BIT4) : (*Ecx & ~BIT4);
+ } else if (EaxIn == CPUID_EXTENDED_TOPOLOGY) {
+ if (!GetCpuidHyp (Ghcb, EaxIn, EcxIn, XCr0, NULL, NULL, NULL, Edx,
+ Status, Unsupported)) {
+ return FALSE;
+ }
+ } else if (EaxIn == CPUID_EXTENDED_STATE && (EcxIn == 0 || EcxIn == 1)) {
+ MSR_IA32_XSS_REGISTER XssMsr;
+ BOOLEAN Compacted;
+ UINT32 XSaveSize;
+
+ XssMsr.Uint64 = 0;
+ if (EcxIn == 1) {
+ /*
+ * The PPR and APM aren't clear on what size should be encoded in
+ * 0xD:0x1:EBX when compaction is not enabled by either XSAVEC or
+ * XSAVES, as these are generally fixed to 1 on real CPUs. Report
+ * this undefined case as an error.
+ */
+ if (!(*Eax & (BIT3 | BIT1))) { /* (XSAVES | XSAVEC) */
+ return FALSE;
+ }
+
+ Compacted = TRUE;
+ XssMsr.Uint64 = AsmReadMsr64 (MSR_IA32_XSS);
+ }
+
+ if (!GetCpuidXSaveSize (XCr0 | XssMsr.Uint64, *Ebx, &XSaveSize,
+ Compacted)) {
+ return FALSE;
+ }
+
+ *Ebx = XSaveSize;
+ } else if (EaxIn == 0x8000001E) {
+ UINT32 Ebx2;
+ UINT32 Ecx2;
+
+ /* extended APIC ID */
+ if (!GetCpuidHyp (Ghcb, EaxIn, EcxIn, XCr0, Eax, &Ebx2, &Ecx2, NULL,
+ Status, Unsupported)) {
+ return FALSE;
+ }
+ /* compute ID */
+ *Ebx = (*Ebx & 0xFFFFFF00) | (Ebx2 & 0x000000FF);
+ /* node ID */
+ *Ecx = (*Ecx & 0xFFFFFF00) | (Ecx2 & 0x000000FF);
+ }
+
+Out:
+ *Status = 0;
+ *Unsupported = FALSE;
+ return TRUE;
+}
+
+/**
+ Handle a CPUID event.
+
+ Use VMGEXIT instruction or CPUID table to handle a CPUID event.
+
+ @param[in, out] Ghcb Pointer to the Guest-Hypervisor Communication
+ Block
+ @param[in, out] Regs x64 processor context
+ @param[in] InstructionData Instruction parsing context
+
+ @retval 0 Event handled successfully
+ @return New exception value to propagate
+
+**/
+STATIC
+UINT64
+CpuidExit (
+ IN OUT GHCB *Ghcb,
+ IN OUT EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN SEV_ES_INSTRUCTION_DATA *InstructionData
+ )
+{
+ BOOLEAN Unsupported;
+ UINT64 Status;
+ UINT32 EaxIn;
+ UINT32 EcxIn;
+ UINT64 XCr0;
+ UINT32 Eax;
+ UINT32 Ebx;
+ UINT32 Ecx;
+ UINT32 Edx;
+
+ EaxIn = Regs->Rax;
+ EcxIn = Regs->Rcx;
+
+ if (EaxIn == CPUID_EXTENDED_STATE) {
+ IA32_CR4 Cr4;
+
+ Cr4.UintN = AsmReadCr4 ();
+ XCr0 = (Cr4.Bits.OSXSAVE == 1) ? AsmXGetBv (0) : 1;
+ }
+
+ if (SnpEnabled ()) {
+ if (!GetCpuidFw (Ghcb, EaxIn, EcxIn, XCr0, &Eax, &Ebx, &Ecx, &Edx,
+ &Status, &Unsupported)) {
+ goto CpuidFail;
+ }
+ } else {
+ if (!GetCpuidHyp (Ghcb, EaxIn, EcxIn, XCr0, &Eax, &Ebx, &Ecx, &Edx,
+ &Status, &Unsupported)) {
+ goto CpuidFail;
+ }
+ }
+
+ Regs->Rax = Eax;
+ Regs->Rbx = Ebx;
+ Regs->Rcx = Ecx;
+ Regs->Rdx = Edx;

return 0;
+
+CpuidFail:
+ if (Unsupported) {
+ return UnsupportedExit (Ghcb, Regs, InstructionData);
+ }
+
+ return Status;
}

/**
--
2.25.1


[PATCH v8 10/32] OvmfPkg/SecMain: register GHCB gpa for the SEV-SNP guest

Brijesh Singh
 

BZ: https://bugzilla.tianocore.org/show_bug.cgi?id=3275

The SEV-SNP guest requires that GHCB GPA must be registered before using.
See the GHCB specification section 2.3.2 for more details.

Cc: Michael Roth <michael.roth@amd.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: Min Xu <min.m.xu@intel.com>
Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Ard Biesheuvel <ardb+tianocore@kernel.org>
Cc: Erdem Aktas <erdemaktas@google.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Acked-by: Jiewen Yao <Jiewen.yao@intel.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
OvmfPkg/Sec/AmdSev.c | 118 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 118 insertions(+)

diff --git a/OvmfPkg/Sec/AmdSev.c b/OvmfPkg/Sec/AmdSev.c
index 7f74e8bfe88e..9dd42b195785 100644
--- a/OvmfPkg/Sec/AmdSev.c
+++ b/OvmfPkg/Sec/AmdSev.c
@@ -48,6 +48,103 @@ SevEsProtocolFailure (
CpuDeadLoop ();
}

+/**
+ Determine if SEV-SNP is active.
+
+ @retval TRUE SEV-SNP is enabled
+ @retval FALSE SEV-SNP is not enabled
+
+**/
+STATIC
+BOOLEAN
+SevSnpIsEnabled (
+ VOID
+ )
+{
+ MSR_SEV_STATUS_REGISTER Msr;
+
+ //
+ // Read the SEV_STATUS MSR to determine whether SEV-SNP is active.
+ //
+ Msr.Uint32 = AsmReadMsr32 (MSR_SEV_STATUS);
+
+ //
+ // Check MSR_0xC0010131 Bit 2 (Sev-Snp Enabled)
+ //
+ if (Msr.Bits.SevSnpBit) {
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+/**
+ Register the GHCB GPA
+
+*/
+STATIC
+VOID
+SevSnpGhcbRegister (
+ UINTN Address
+ )
+{
+ MSR_SEV_ES_GHCB_REGISTER Msr;
+ EFI_PHYSICAL_ADDRESS GuestFrameNumber;
+
+ GuestFrameNumber = Address >> EFI_PAGE_SHIFT;
+
+ //
+ // Use the GHCB MSR Protocol to request to register the GPA.
+ //
+ Msr.GhcbPhysicalAddress = 0;
+ Msr.GhcbGpaRegister.Function = GHCB_INFO_GHCB_GPA_REGISTER_REQUEST;
+ Msr.GhcbGpaRegister.GuestFrameNumber = GuestFrameNumber;
+ AsmWriteMsr64 (MSR_SEV_ES_GHCB, Msr.GhcbPhysicalAddress);
+
+ AsmVmgExit ();
+
+ Msr.GhcbPhysicalAddress = AsmReadMsr64 (MSR_SEV_ES_GHCB);
+
+ //
+ // If hypervisor responded with a different GPA than requested then fail.
+ //
+ if ((Msr.GhcbGpaRegister.Function != GHCB_INFO_GHCB_GPA_REGISTER_RESPONSE) ||
+ (Msr.GhcbGpaRegister.GuestFrameNumber != GuestFrameNumber)) {
+ SevEsProtocolFailure (GHCB_TERMINATE_GHCB_GENERAL);
+ }
+}
+
+/**
+ Verify that Hypervisor supports the SNP feature.
+
+ */
+STATIC
+BOOLEAN
+HypervisorSnpFeatureCheck (
+ VOID
+ )
+{
+ MSR_SEV_ES_GHCB_REGISTER Msr;
+
+ //
+ // Use the GHCB MSR Protocol to query the hypervisor capabilities
+ //
+ Msr.GhcbPhysicalAddress = 0;
+ Msr.GhcbHypervisorFeatures.Function = GHCB_HYPERVISOR_FEATURES_REQUEST;
+ AsmWriteMsr64 (MSR_SEV_ES_GHCB, Msr.GhcbPhysicalAddress);
+
+ AsmVmgExit ();
+
+ Msr.GhcbPhysicalAddress = AsmReadMsr64 (MSR_SEV_ES_GHCB);
+
+ if ((Msr.GhcbHypervisorFeatures.Function != GHCB_HYPERVISOR_FEATURES_RESPONSE) ||
+ (!(Msr.GhcbHypervisorFeatures.Features & GHCB_HV_FEATURES_SNP))) {
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
/**
Validate the SEV-ES/GHCB protocol level.

@@ -88,6 +185,27 @@ SevEsProtocolCheck (
SevEsProtocolFailure (GHCB_TERMINATE_GHCB_PROTOCOL);
}

+ //
+ // We cannot use the MemEncryptSevSnpIsEnabled () because the
+ // ProcessLibraryConstructorList () is not called yet.
+ //
+ if (SevSnpIsEnabled ()) {
+ //
+ // Check if hypervisor supports the SNP feature
+ //
+ if (!HypervisorSnpFeatureCheck ()) {
+ SevEsProtocolFailure (GHCB_TERMINATE_GHCB_PROTOCOL);
+ }
+
+ //
+ // Unlike the SEV-ES guest, the SNP requires that GHCB GPA must be
+ // registered with the Hypervisor before the use. This can be done
+ // using the new VMGEXIT defined in the GHCB v2. Register the GPA
+ // before it is used.
+ //
+ SevSnpGhcbRegister (FixedPcdGet32 (PcdOvmfSecGhcbBase));
+ }
+
//
// SEV-ES protocol checking succeeded, set the initial GHCB address
//
--
2.25.1


[PATCH v8 09/32] OvmfPkg/MemEncryptSevLib: add MemEncryptSevSnpEnabled()

Brijesh Singh
 

BZ: https://bugzilla.tianocore.org/show_bug.cgi?id=3275

Create a function that can be used to determine if VM is running as an
SEV-SNP guest.

Cc: Michael Roth <michael.roth@amd.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: Min Xu <min.m.xu@intel.com>
Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Ard Biesheuvel <ardb+tianocore@kernel.org>
Cc: Erdem Aktas <erdemaktas@google.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Acked-by: Jiewen Yao <Jiewen.yao@intel.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
OvmfPkg/Include/Library/MemEncryptSevLib.h | 12 +++++++++
.../DxeMemEncryptSevLibInternal.c | 27 +++++++++++++++++++
.../PeiMemEncryptSevLibInternal.c | 27 +++++++++++++++++++
.../SecMemEncryptSevLibInternal.c | 19 +++++++++++++
4 files changed, 85 insertions(+)

diff --git a/OvmfPkg/Include/Library/MemEncryptSevLib.h b/OvmfPkg/Include/Library/MemEncryptSevLib.h
index adc490e466ec..796de62ec2f8 100644
--- a/OvmfPkg/Include/Library/MemEncryptSevLib.h
+++ b/OvmfPkg/Include/Library/MemEncryptSevLib.h
@@ -47,6 +47,18 @@ typedef enum {
MemEncryptSevAddressRangeError,
} MEM_ENCRYPT_SEV_ADDRESS_RANGE_STATE;

+/**
+ Returns a boolean to indicate whether SEV-SNP is enabled
+
+ @retval TRUE SEV-SNP is enabled
+ @retval FALSE SEV-SNP is not enabled
+**/
+BOOLEAN
+EFIAPI
+MemEncryptSevSnpIsEnabled (
+ VOID
+ );
+
/**
Returns a boolean to indicate whether SEV-ES is enabled.

diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/DxeMemEncryptSevLibInternal.c b/OvmfPkg/Library/BaseMemEncryptSevLib/DxeMemEncryptSevLibInternal.c
index 2816f859a0c4..057129723824 100644
--- a/OvmfPkg/Library/BaseMemEncryptSevLib/DxeMemEncryptSevLibInternal.c
+++ b/OvmfPkg/Library/BaseMemEncryptSevLib/DxeMemEncryptSevLibInternal.c
@@ -19,6 +19,7 @@

STATIC BOOLEAN mSevStatus = FALSE;
STATIC BOOLEAN mSevEsStatus = FALSE;
+STATIC BOOLEAN mSevSnpStatus = FALSE;
STATIC BOOLEAN mSevStatusChecked = FALSE;

STATIC UINT64 mSevEncryptionMask = 0;
@@ -82,11 +83,37 @@ InternalMemEncryptSevStatus (
if (Msr.Bits.SevEsBit) {
mSevEsStatus = TRUE;
}
+
+ //
+ // Check MSR_0xC0010131 Bit 2 (Sev-Snp Enabled)
+ //
+ if (Msr.Bits.SevSnpBit) {
+ mSevSnpStatus = TRUE;
+ }
}

mSevStatusChecked = TRUE;
}

+/**
+ Returns a boolean to indicate whether SEV-SNP is enabled.
+
+ @retval TRUE SEV-SNP is enabled
+ @retval FALSE SEV-SNP is not enabled
+**/
+BOOLEAN
+EFIAPI
+MemEncryptSevSnpIsEnabled (
+ VOID
+ )
+{
+ if (!mSevStatusChecked) {
+ InternalMemEncryptSevStatus ();
+ }
+
+ return mSevSnpStatus;
+}
+
/**
Returns a boolean to indicate whether SEV-ES is enabled.

diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/PeiMemEncryptSevLibInternal.c b/OvmfPkg/Library/BaseMemEncryptSevLib/PeiMemEncryptSevLibInternal.c
index e2fd109d120f..b561f211f577 100644
--- a/OvmfPkg/Library/BaseMemEncryptSevLib/PeiMemEncryptSevLibInternal.c
+++ b/OvmfPkg/Library/BaseMemEncryptSevLib/PeiMemEncryptSevLibInternal.c
@@ -19,6 +19,7 @@

STATIC BOOLEAN mSevStatus = FALSE;
STATIC BOOLEAN mSevEsStatus = FALSE;
+STATIC BOOLEAN mSevSnpStatus = FALSE;
STATIC BOOLEAN mSevStatusChecked = FALSE;

STATIC UINT64 mSevEncryptionMask = 0;
@@ -82,11 +83,37 @@ InternalMemEncryptSevStatus (
if (Msr.Bits.SevEsBit) {
mSevEsStatus = TRUE;
}
+
+ //
+ // Check MSR_0xC0010131 Bit 2 (Sev-Snp Enabled)
+ //
+ if (Msr.Bits.SevSnpBit) {
+ mSevSnpStatus = TRUE;
+ }
}

mSevStatusChecked = TRUE;
}

+/**
+ Returns a boolean to indicate whether SEV-SNP is enabled.
+
+ @retval TRUE SEV-SNP is enabled
+ @retval FALSE SEV-SNP is not enabled
+**/
+BOOLEAN
+EFIAPI
+MemEncryptSevSnpIsEnabled (
+ VOID
+ )
+{
+ if (!mSevStatusChecked) {
+ InternalMemEncryptSevStatus ();
+ }
+
+ return mSevSnpStatus;
+}
+
/**
Returns a boolean to indicate whether SEV-ES is enabled.

diff --git a/OvmfPkg/Library/BaseMemEncryptSevLib/SecMemEncryptSevLibInternal.c b/OvmfPkg/Library/BaseMemEncryptSevLib/SecMemEncryptSevLibInternal.c
index 56d8f3f3183f..69852779e2ff 100644
--- a/OvmfPkg/Library/BaseMemEncryptSevLib/SecMemEncryptSevLibInternal.c
+++ b/OvmfPkg/Library/BaseMemEncryptSevLib/SecMemEncryptSevLibInternal.c
@@ -62,6 +62,25 @@ InternalMemEncryptSevStatus (
return ReadSevMsr ? AsmReadMsr32 (MSR_SEV_STATUS) : 0;
}

+/**
+ Returns a boolean to indicate whether SEV-SNP is enabled.
+
+ @retval TRUE SEV-SNP is enabled
+ @retval FALSE SEV-SNP is not enabled
+**/
+BOOLEAN
+EFIAPI
+MemEncryptSevSnpIsEnabled (
+ VOID
+ )
+{
+ MSR_SEV_STATUS_REGISTER Msr;
+
+ Msr.Uint32 = InternalMemEncryptSevStatus ();
+
+ return Msr.Bits.SevSnpBit ? TRUE : FALSE;
+}
+
/**
Returns a boolean to indicate whether SEV-ES is enabled.

--
2.25.1


[PATCH v8 08/32] OvmfPkg/ResetVector: use SEV-SNP-validated CPUID values

Brijesh Singh
 

From: Michael Roth <michael.roth@amd.com>

CPUID instructions are issued during early boot to do things like probe
for SEV support. Currently these are handled by a minimal #VC handler
that uses the MSR-based GHCB protocol to fetch the CPUID values from
the hypervisor. When SEV-SNP is enabled, use the firmware-validated
CPUID values from the CPUID page instead [1].

[1]: SEV SNP Firmware ABI Specification, Rev. 0.8, 8.13.2.6

Cc: Michael Roth <michael.roth@amd.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: Min Xu <min.m.xu@intel.com>
Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Ard Biesheuvel <ardb+tianocore@kernel.org>
Cc: Erdem Aktas <erdemaktas@google.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Acked-by: Jiewen Yao <Jiewen.yao@intel.com>
Signed-off-by: Michael Roth <michael.roth@amd.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
OvmfPkg/ResetVector/Ia32/AmdSev.asm | 80 +++++++++++++++++++++++++++--
1 file changed, 75 insertions(+), 5 deletions(-)

diff --git a/OvmfPkg/ResetVector/Ia32/AmdSev.asm b/OvmfPkg/ResetVector/Ia32/AmdSev.asm
index 48d9178168b0..1f827da3b929 100644
--- a/OvmfPkg/ResetVector/Ia32/AmdSev.asm
+++ b/OvmfPkg/ResetVector/Ia32/AmdSev.asm
@@ -34,6 +34,18 @@ BITS 32
%define GHCB_CPUID_REGISTER_SHIFT 30
%define CPUID_INSN_LEN 2

+; #VC handler offsets/sizes for accessing SNP CPUID page
+;
+%define SNP_CPUID_ENTRY_SZ 48
+%define SNP_CPUID_COUNT 0
+%define SNP_CPUID_ENTRY 16
+%define SNP_CPUID_ENTRY_EAX_IN 0
+%define SNP_CPUID_ENTRY_ECX_IN 4
+%define SNP_CPUID_ENTRY_EAX 24
+%define SNP_CPUID_ENTRY_EBX 28
+%define SNP_CPUID_ENTRY_ECX 32
+%define SNP_CPUID_ENTRY_EDX 36
+

%define SEV_GHCB_MSR 0xc0010130
%define SEV_STATUS_MSR 0xc0010131
@@ -335,11 +347,61 @@ SevEsIdtNotCpuid:
TerminateVmgExit TERM_VC_NOT_CPUID
iret

- ;
- ; Total stack usage for the #VC handler is 44 bytes:
- ; - 12 bytes for the exception IRET (after popping error code)
- ; - 32 bytes for the local variables.
- ;
+; Use the SNP CPUID page to handle the cpuid lookup
+;
+; Modified: EAX, EBX, ECX, EDX
+;
+; Relies on the stack setup/usage in #VC handler:
+;
+; On entry,
+; [esp + VC_CPUID_FUNCTION] contains EAX input to cpuid instruction
+;
+; On return, stores corresponding results of CPUID lookup in:
+; [esp + VC_CPUID_RESULT_EAX]
+; [esp + VC_CPUID_RESULT_EBX]
+; [esp + VC_CPUID_RESULT_ECX]
+; [esp + VC_CPUID_RESULT_EDX]
+;
+SnpCpuidLookup:
+ mov eax, [esp + VC_CPUID_FUNCTION]
+ mov ebx, [CPUID_BASE + SNP_CPUID_COUNT]
+ mov ecx, CPUID_BASE + SNP_CPUID_ENTRY
+ ; Zero these out now so we can simply return if lookup fails
+ mov dword[esp + VC_CPUID_RESULT_EAX], 0
+ mov dword[esp + VC_CPUID_RESULT_EBX], 0
+ mov dword[esp + VC_CPUID_RESULT_ECX], 0
+ mov dword[esp + VC_CPUID_RESULT_EDX], 0
+
+SnpCpuidCheckEntry:
+ cmp ebx, 0
+ je VmmDoneSnpCpuid
+ cmp dword[ecx + SNP_CPUID_ENTRY_EAX_IN], eax
+ jne SnpCpuidCheckEntryNext
+ ; As with SEV-ES handler we assume requested CPUID sub-leaf/index is 0
+ cmp dword[ecx + SNP_CPUID_ENTRY_ECX_IN], 0
+ je SnpCpuidEntryFound
+
+SnpCpuidCheckEntryNext:
+ dec ebx
+ add ecx, SNP_CPUID_ENTRY_SZ
+ jmp SnpCpuidCheckEntry
+
+SnpCpuidEntryFound:
+ mov eax, [ecx + SNP_CPUID_ENTRY_EAX]
+ mov [esp + VC_CPUID_RESULT_EAX], eax
+ mov eax, [ecx + SNP_CPUID_ENTRY_EBX]
+ mov [esp + VC_CPUID_RESULT_EBX], eax
+ mov eax, [ecx + SNP_CPUID_ENTRY_EDX]
+ mov [esp + VC_CPUID_RESULT_ECX], eax
+ mov eax, [ecx + SNP_CPUID_ENTRY_ECX]
+ mov [esp + VC_CPUID_RESULT_EDX], eax
+ jmp VmmDoneSnpCpuid
+
+;
+; Total stack usage for the #VC handler is 44 bytes:
+; - 12 bytes for the exception IRET (after popping error code)
+; - 32 bytes for the local variables.
+;
SevEsIdtVmmComm:
;
; If we're here, then we are an SEV-ES guest and this
@@ -367,6 +429,13 @@ SevEsIdtVmmComm:
; Save the CPUID function being requested
mov [esp + VC_CPUID_FUNCTION], eax

+ ; If SEV-SNP is enabled, use the CPUID page to handle the CPUID
+ ; instruction.
+ mov ecx, SEV_STATUS_MSR
+ rdmsr
+ bt eax, 2
+ jc SnpCpuidLookup
+
; The GHCB CPUID protocol uses the following mapping to request
; a specific register:
; 0 => EAX, 1 => EBX, 2 => ECX, 3 => EDX
@@ -424,6 +493,7 @@ VmmDone:
mov ecx, SEV_GHCB_MSR
wrmsr

+VmmDoneSnpCpuid:
mov eax, [esp + VC_CPUID_RESULT_EAX]
mov ebx, [esp + VC_CPUID_RESULT_EBX]
mov ecx, [esp + VC_CPUID_RESULT_ECX]
--
2.25.1


[PATCH v8 07/32] OvmfPkg/ResetVector: pre-validate the data pages used in SEC phase

Brijesh Singh
 

BZ: https://bugzilla.tianocore.org/show_bug.cgi?id=3275

An SEV-SNP guest requires that private memory (aka pages mapped encrypted)
must be validated before being accessed.

The validation process consist of the following sequence:

1) Set the memory encryption attribute in the page table (aka C-bit).
Note: If the processor is in non-PAE mode, then all the memory accesses
are considered private.
2) Add the memory range as private in the RMP table. This can be performed
using the Page State Change VMGEXIT defined in the GHCB specification.
3) Use the PVALIDATE instruction to set the Validated Bit in the RMP table.

During the guest creation time, the VMM encrypts the OVMF_CODE.fd using
the SEV-SNP firmware provided LAUNCH_UPDATE_DATA command. In addition to
encrypting the content, the command also validates the memory region.
This allows us to execute the code without going through the validation
sequence.

During execution, the reset vector need to access some data pages
(such as page tables, SevESWorkarea, Sec stack). The data pages are
accessed as private memory. The data pages are not part of the
OVMF_CODE.fd, so they were not validated during the guest creation.

There are two approaches we can take to validate the data pages before
the access:

a) Enhance the OVMF reset vector code to validate the pages as described
above (go through step 2 - 3).
OR
b) Validate the pages during the guest creation time. The SEV firmware
provides a command which can be used by the VMM to validate the pages
without affecting the measurement of the launch.

Approach #b seems much simpler; it does not require any changes to the
OVMF reset vector code.

Update the OVMF metadata with the list of regions that must be
pre-validated by the VMM before the boot.

Cc: Michael Roth <michael.roth@amd.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: Min Xu <min.m.xu@intel.com>
Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Ard Biesheuvel <ardb+tianocore@kernel.org>
Cc: Erdem Aktas <erdemaktas@google.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
OvmfPkg/ResetVector/ResetVector.inf | 7 +++
OvmfPkg/ResetVector/ResetVector.nasmb | 29 +++++++++++++
OvmfPkg/ResetVector/X64/OvmfMetadata.asm | 54 ++++++++++++++++++++++++
3 files changed, 90 insertions(+)

diff --git a/OvmfPkg/ResetVector/ResetVector.inf b/OvmfPkg/ResetVector/ResetVector.inf
index 4cb81a3233f0..af3cf610a2cd 100644
--- a/OvmfPkg/ResetVector/ResetVector.inf
+++ b/OvmfPkg/ResetVector/ResetVector.inf
@@ -37,6 +37,8 @@ [Pcd]
gUefiCpuPkgTokenSpaceGuid.PcdSevEsWorkAreaBase
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecGhcbBase
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecGhcbSize
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecGhcbBackupBase
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecGhcbBackupSize
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecGhcbPageTableBase
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecGhcbPageTableSize
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecPageTablesBase
@@ -44,6 +46,11 @@ [Pcd]
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecPeiTempRamBase
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecPeiTempRamSize
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfWorkAreaBase
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfWorkAreaSize
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfLockBoxStorageBase
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfLockBoxStorageSize
+ gEfiMdePkgTokenSpaceGuid.PcdGuidedExtractHandlerTableAddress
+ gUefiOvmfPkgTokenSpaceGuid.PcdGuidedExtractHandlerTableSize

[FixedPcd]
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfCpuidBase
diff --git a/OvmfPkg/ResetVector/ResetVector.nasmb b/OvmfPkg/ResetVector/ResetVector.nasmb
index 84cb5ae81b66..dc4dde2ff9d2 100644
--- a/OvmfPkg/ResetVector/ResetVector.nasmb
+++ b/OvmfPkg/ResetVector/ResetVector.nasmb
@@ -70,6 +70,7 @@
%define PT_ADDR(Offset) (FixedPcdGet32 (PcdOvmfSecPageTablesBase) + (Offset))

%define GHCB_PT_ADDR (FixedPcdGet32 (PcdOvmfSecGhcbPageTableBase))
+ %define GHCB_PT_SIZE (FixedPcdGet32 (PcdOvmfSecGhcbPageTableSize))
%define GHCB_BASE (FixedPcdGet32 (PcdOvmfSecGhcbBase))
%define GHCB_SIZE (FixedPcdGet32 (PcdOvmfSecGhcbSize))
%define WORK_AREA_GUEST_TYPE (FixedPcdGet32 (PcdOvmfWorkAreaBase))
@@ -81,6 +82,34 @@
%define SEV_SNP_SECRETS_SIZE (FixedPcdGet32 (PcdOvmfSnpSecretsSize))
%define CPUID_BASE (FixedPcdGet32 (PcdOvmfCpuidBase))
%define CPUID_SIZE (FixedPcdGet32 (PcdOvmfCpuidSize))
+ %define SEC_PAGE_TABLE_BASE (FixedPcdGet32 (PcdOvmfSecPageTablesBase))
+ %define SEC_PAGE_TABLE_SIZE (FixedPcdGet32 (PcdOvmfSecPageTablesSize))
+ %define LOCK_BOX_STORAGE_BASE (FixedPcdGet32 (PcdOvmfLockBoxStorageBase))
+ %define LOCK_BOX_STORAGE_SIZE (FixedPcdGet32 (PcdOvmfLockBoxStorageSize))
+ ;
+ ; The PcdGuidedExtractHandlerTableAddress is a 64-bit PCD. The FixedPcdGet64() returns
+ ; a number suffix with 'ULL' (e.g 0x1111ULL). NASM does not like the constant ending
+ ; with anything other than 'h'. So, instead of using the FixedPcdGet64(), calculate
+ ; the base address of GuidedExtractHandlerTableBase
+ ;
+ %define GUID_EXTRACT_HANDLER_TABLE_BASE (LOCK_BOX_STORAGE_BASE + LOCK_BOX_STORAGE_SIZE)
+ %define GUID_EXTRACT_HANDLER_TABLE_SIZE (FixedPcdGet32 (PcdGuidedExtractHandlerTableSize))
+ %define WORK_AREA_BASE (FixedPcdGet32 (PcdOvmfWorkAreaBase))
+ %define WORK_AREA_SIZE (FixedPcdGet32 (PcdOvmfWorkAreaSize))
+ %define SEC_PEI_TEMP_RAM_BASE (FixedPcdGet32 (PcdOvmfSecPeiTempRamBase))
+ %define SEC_PEI_TEMP_RAM_SIZE (FixedPcdGet32 (PcdOvmfSecPeiTempRamSize))
+
+ ;
+ ; The PcdOvmfSecGhcbBase reserves two GHCB pages. The first page is used
+ ; as GHCB shared page and second is used for bookkeeping to support the
+ ; nested GHCB in SEC phase. The bookkeeping page is mapped private. The VMM
+ ; does not need to validate the shared page but it need to validate the
+ ; bookkeeping page
+ ;
+ %define GHCB_SEC_BACKUP_BASE (FixedPcdGet32 (PcdOvmfSecGhcbBackupBase))
+ %define GHCB_SEC_BACKUP_SIZE (FixedPcdGet32 (PcdOvmfSecGhcbBackupSize))
+ %define GHCB_BOOKKEEPING_BASE (GHCB_BASE + 0x1000)
+ %define GHCB_BOOKKEEPING_SIZE (GHCB_SIZE - 0x1000)

%include "Ia32/Flat32ToFlat64.asm"
%include "Ia32/AmdSev.asm"
diff --git a/OvmfPkg/ResetVector/X64/OvmfMetadata.asm b/OvmfPkg/ResetVector/X64/OvmfMetadata.asm
index 3f4b017104fc..8df170e8e649 100644
--- a/OvmfPkg/ResetVector/X64/OvmfMetadata.asm
+++ b/OvmfPkg/ResetVector/X64/OvmfMetadata.asm
@@ -35,6 +35,12 @@ BITS 64
; AMD SEV-SNP specific sections
%define OVMF_SECTION_TYPE_SNP_SECRETS 0x200

+; The section must be validated by the VMM before the boot.
+; The section is used for the GHCB memory. The GHCB memory is used as mailbox
+; in the TDX guest. This special section is similar to the
+; OVMF_SECTION_TYPE_SEC_MEM but its applicable for the SEV guest only.
+%define OVMF_SECTION_TYPE_SNP_SEC_MEM 0x201
+
ALIGN 16

TIMES (15 - ((OvmfGuidedStructureEnd - OvmfGuidedStructureStart + 15) % 16)) DB 0
@@ -53,6 +59,48 @@ _Descriptor:
DD OVMF_METADATA_VERSION ; Version
DD (OvmfGuidedStructureEnd - _Descriptor - 16) / 12 ; Number of sections

+; Page table used during SEC
+SecPageTable:
+ DD SEC_PAGE_TABLE_BASE
+ DD SEC_PAGE_TABLE_SIZE
+ DD OVMF_SECTION_TYPE_SEC_MEM
+
+; Lockbox storage
+LockBoxStorage:
+ DD LOCK_BOX_STORAGE_BASE
+ DD LOCK_BOX_STORAGE_SIZE
+ DD OVMF_SECTION_TYPE_SEC_MEM
+
+; Guided Extract Handler Table
+ExtractHandlerTable:
+ DD GUID_EXTRACT_HANDLER_TABLE_BASE
+ DD GUID_EXTRACT_HANDLER_TABLE_SIZE
+ DD OVMF_SECTION_TYPE_SEC_MEM
+
+; GHCB page table
+GhcbPageTable:
+ DD GHCB_PT_ADDR
+ DD GHCB_PT_SIZE
+ DD OVMF_SECTION_TYPE_SNP_SEC_MEM
+
+; GHCB bookkeeping page used in SEC phase
+GhcbBookkeeping:
+ DD GHCB_BOOKKEEPING_BASE
+ DD GHCB_BOOKKEEPING_SIZE
+ DD OVMF_SECTION_TYPE_SNP_SEC_MEM
+
+; Confidential computing work area
+WorkArea:
+ DD WORK_AREA_BASE
+ DD WORK_AREA_SIZE
+ DD OVMF_SECTION_TYPE_SEC_MEM
+
+; GHCB backup page used in SEC
+GhcbBackup:
+ DD GHCB_SEC_BACKUP_BASE
+ DD GHCB_SEC_BACKUP_SIZE
+ DD OVMF_SECTION_TYPE_SNP_SEC_MEM
+
; SEV-SNP Secrets page
SevSnpSecrets:
DD SEV_SNP_SECRETS_BASE
@@ -65,5 +113,11 @@ CpuidSec:
DD CPUID_SIZE
DD OVMF_SECTION_TYPE_CPUID

+; Temporary RAM used in SEC phase
+SecPeiTempRam:
+ DD SEC_PEI_TEMP_RAM_BASE
+ DD SEC_PEI_TEMP_RAM_SIZE
+ DD OVMF_SECTION_TYPE_SEC_MEM
+
OvmfGuidedStructureEnd:
ALIGN 16
--
2.25.1


[PATCH v8 06/32] OvmfPkg: reserve CPUID page

Brijesh Singh
 

BZ: https://bugzilla.tianocore.org/show_bug.cgi?id=3275

Platform features and capabilities are traditionally discovered via the
CPUID instruction. Hypervisors typically trap and emulate the CPUID
instruction for a variety of reasons. There are some cases where incorrect
CPUID information can potentially lead to a security issue. The SEV-SNP
firmware provides a feature to filter the CPUID results through the PSP.
The filtered CPUID values are saved on a special page for the guest to
consume. Reserve a page in MEMFD that will contain the results of
filtered CPUID values.

Add a new section in Ovmf metadata structure so that hypervisor can
locate and populate the CPUID values before the boot.

Cc: Michael Roth <michael.roth@amd.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: Min Xu <min.m.xu@intel.com>
Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Ard Biesheuvel <ardb+tianocore@kernel.org>
Cc: Erdem Aktas <erdemaktas@google.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Acked-by: Jiewen Yao <Jiewen.yao@intel.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
OvmfPkg/OvmfPkg.dec | 7 +++++++
OvmfPkg/OvmfPkgX64.fdf | 3 +++
OvmfPkg/ResetVector/ResetVector.inf | 2 ++
OvmfPkg/ResetVector/ResetVector.nasmb | 2 ++
OvmfPkg/ResetVector/X64/OvmfMetadata.asm | 15 +++++++++++++++
5 files changed, 29 insertions(+)

diff --git a/OvmfPkg/OvmfPkg.dec b/OvmfPkg/OvmfPkg.dec
index 6266fdef6054..efa0de6d0600 100644
--- a/OvmfPkg/OvmfPkg.dec
+++ b/OvmfPkg/OvmfPkg.dec
@@ -347,6 +347,13 @@ [PcdsFixedAtBuild]
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSnpSecretsBase|0|UINT32|0x52
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSnpSecretsSize|0|UINT32|0x53

+ ## The base address and size of a CPUID Area that contains the hypervisor
+ # provided CPUID results. In the case of SEV-SNP, the CPUID results are
+ # filtered by the SEV-SNP firmware. If this is set in the .fdf, the
+ # platform is responsible to reserve this area from DXE phase overwrites.
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfCpuidBase|0|UINT32|0x54
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfCpuidSize|0|UINT32|0x55
+
[PcdsDynamic, PcdsDynamicEx]
gUefiOvmfPkgTokenSpaceGuid.PcdEmuVariableEvent|0|UINT64|2
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfFlashVariablesEnable|FALSE|BOOLEAN|0x10
diff --git a/OvmfPkg/OvmfPkgX64.fdf b/OvmfPkg/OvmfPkgX64.fdf
index 5b871db20ab2..b38c123b8341 100644
--- a/OvmfPkg/OvmfPkgX64.fdf
+++ b/OvmfPkg/OvmfPkgX64.fdf
@@ -91,6 +91,9 @@ [FD.MEMFD]
0x00D000|0x001000
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSnpSecretsBase|gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSnpSecretsSize

+0x00E000|0x001000
+gUefiOvmfPkgTokenSpaceGuid.PcdOvmfCpuidBase|gUefiOvmfPkgTokenSpaceGuid.PcdOvmfCpuidSize
+
0x010000|0x010000
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecPeiTempRamBase|gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecPeiTempRamSize

diff --git a/OvmfPkg/ResetVector/ResetVector.inf b/OvmfPkg/ResetVector/ResetVector.inf
index 09454d0797e6..4cb81a3233f0 100644
--- a/OvmfPkg/ResetVector/ResetVector.inf
+++ b/OvmfPkg/ResetVector/ResetVector.inf
@@ -46,6 +46,8 @@ [Pcd]
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfWorkAreaBase

[FixedPcd]
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfCpuidBase
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfCpuidSize
gUefiOvmfPkgTokenSpaceGuid.PcdSevLaunchSecretBase
gUefiOvmfPkgTokenSpaceGuid.PcdSevLaunchSecretSize
gUefiOvmfPkgTokenSpaceGuid.PcdQemuHashTableBase
diff --git a/OvmfPkg/ResetVector/ResetVector.nasmb b/OvmfPkg/ResetVector/ResetVector.nasmb
index f7d09acd33ed..84cb5ae81b66 100644
--- a/OvmfPkg/ResetVector/ResetVector.nasmb
+++ b/OvmfPkg/ResetVector/ResetVector.nasmb
@@ -79,6 +79,8 @@
%define SEV_ES_VC_TOP_OF_STACK (FixedPcdGet32 (PcdOvmfSecPeiTempRamBase) + FixedPcdGet32 (PcdOvmfSecPeiTempRamSize))
%define SEV_SNP_SECRETS_BASE (FixedPcdGet32 (PcdOvmfSnpSecretsBase))
%define SEV_SNP_SECRETS_SIZE (FixedPcdGet32 (PcdOvmfSnpSecretsSize))
+ %define CPUID_BASE (FixedPcdGet32 (PcdOvmfCpuidBase))
+ %define CPUID_SIZE (FixedPcdGet32 (PcdOvmfCpuidSize))

%include "Ia32/Flat32ToFlat64.asm"
%include "Ia32/AmdSev.asm"
diff --git a/OvmfPkg/ResetVector/X64/OvmfMetadata.asm b/OvmfPkg/ResetVector/X64/OvmfMetadata.asm
index bb348e1c6a79..3f4b017104fc 100644
--- a/OvmfPkg/ResetVector/X64/OvmfMetadata.asm
+++ b/OvmfPkg/ResetVector/X64/OvmfMetadata.asm
@@ -23,6 +23,15 @@ BITS 64
; The section must be accepted or validated by the VMM before the boot
%define OVMF_SECTION_TYPE_SEC_MEM 0x102

+;
+; The section contains the hypervisor pre-populated CPUID values.
+; In the case of SEV-SNP, the CPUID values are filtered and measured by
+; the SEV-SNP firmware.
+; The CPUID format is documented in SEV-SNP firmware spec 0.9 section 7.1
+; (CPUID function structure).
+;
+%define OVMF_SECTION_TYPE_CPUID 0x103
+
; AMD SEV-SNP specific sections
%define OVMF_SECTION_TYPE_SNP_SECRETS 0x200

@@ -50,5 +59,11 @@ SevSnpSecrets:
DD SEV_SNP_SECRETS_SIZE
DD OVMF_SECTION_TYPE_SNP_SECRETS

+; CPUID values
+CpuidSec:
+ DD CPUID_BASE
+ DD CPUID_SIZE
+ DD OVMF_SECTION_TYPE_CPUID
+
OvmfGuidedStructureEnd:
ALIGN 16
--
2.25.1


[PATCH v8 05/32] OvmfPkg: reserve SNP secrets page

Brijesh Singh
 

BZ: https://bugzilla.tianocore.org/show_bug.cgi?id=3275

During the SNP guest launch sequence, a special secrets page needs to be
inserted by the VMM. The PSP will populate the page; it will contain the
VM Platform Communication Key (VMPCKs) used by the guest to send and
receive secure messages to the PSP.

The purpose of the secrets page in the SEV-SNP is different from the one
used in SEV guests. In SEV, the secrets page contains the guest owner's
private data after the remote attestation.

Add a new section for the secrets page in the OVMF metadata structure so
that hypervisor can locate it.

Cc: Michael Roth <michael.roth@amd.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: Min Xu <min.m.xu@intel.com>
Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Ard Biesheuvel <ardb+tianocore@kernel.org>
Cc: Erdem Aktas <erdemaktas@google.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Acked-by: Jiewen Yao <Jiewen.yao@intel.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
OvmfPkg/OvmfPkg.dec | 6 ++++++
OvmfPkg/OvmfPkgX64.fdf | 3 +++
OvmfPkg/ResetVector/ResetVector.inf | 2 ++
OvmfPkg/ResetVector/ResetVector.nasmb | 3 +++
OvmfPkg/ResetVector/X64/OvmfMetadata.asm | 9 +++++++++
5 files changed, 23 insertions(+)

diff --git a/OvmfPkg/OvmfPkg.dec b/OvmfPkg/OvmfPkg.dec
index c37dafad49bb..6266fdef6054 100644
--- a/OvmfPkg/OvmfPkg.dec
+++ b/OvmfPkg/OvmfPkg.dec
@@ -340,6 +340,12 @@ [PcdsFixedAtBuild]
# header definition.
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfConfidentialComputingWorkAreaHeader|0|UINT32|0x51

+ ## The base address and size of the SEV-SNP Secrets Area that contains
+ # the VM platform communication key used to send and recieve the
+ # messages to the PSP. If this is set in the .fdf, the platform
+ # is responsible to reserve this area from DXE phase overwrites.
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSnpSecretsBase|0|UINT32|0x52
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSnpSecretsSize|0|UINT32|0x53

[PcdsDynamic, PcdsDynamicEx]
gUefiOvmfPkgTokenSpaceGuid.PcdEmuVariableEvent|0|UINT64|2
diff --git a/OvmfPkg/OvmfPkgX64.fdf b/OvmfPkg/OvmfPkgX64.fdf
index 23936242e74a..5b871db20ab2 100644
--- a/OvmfPkg/OvmfPkgX64.fdf
+++ b/OvmfPkg/OvmfPkgX64.fdf
@@ -88,6 +88,9 @@ [FD.MEMFD]
0x00C000|0x001000
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecGhcbBackupBase|gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecGhcbBackupSize

+0x00D000|0x001000
+gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSnpSecretsBase|gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSnpSecretsSize
+
0x010000|0x010000
gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecPeiTempRamBase|gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecPeiTempRamSize

diff --git a/OvmfPkg/ResetVector/ResetVector.inf b/OvmfPkg/ResetVector/ResetVector.inf
index a2520dde5508..09454d0797e6 100644
--- a/OvmfPkg/ResetVector/ResetVector.inf
+++ b/OvmfPkg/ResetVector/ResetVector.inf
@@ -50,3 +50,5 @@ [FixedPcd]
gUefiOvmfPkgTokenSpaceGuid.PcdSevLaunchSecretSize
gUefiOvmfPkgTokenSpaceGuid.PcdQemuHashTableBase
gUefiOvmfPkgTokenSpaceGuid.PcdQemuHashTableSize
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSnpSecretsBase
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSnpSecretsSize
diff --git a/OvmfPkg/ResetVector/ResetVector.nasmb b/OvmfPkg/ResetVector/ResetVector.nasmb
index bc61b1d05a24..f7d09acd33ed 100644
--- a/OvmfPkg/ResetVector/ResetVector.nasmb
+++ b/OvmfPkg/ResetVector/ResetVector.nasmb
@@ -77,6 +77,9 @@
%define SEV_ES_WORK_AREA_RDRAND (FixedPcdGet32 (PcdSevEsWorkAreaBase) + 8)
%define SEV_ES_WORK_AREA_ENC_MASK (FixedPcdGet32 (PcdSevEsWorkAreaBase) + 16)
%define SEV_ES_VC_TOP_OF_STACK (FixedPcdGet32 (PcdOvmfSecPeiTempRamBase) + FixedPcdGet32 (PcdOvmfSecPeiTempRamSize))
+ %define SEV_SNP_SECRETS_BASE (FixedPcdGet32 (PcdOvmfSnpSecretsBase))
+ %define SEV_SNP_SECRETS_SIZE (FixedPcdGet32 (PcdOvmfSnpSecretsSize))
+
%include "Ia32/Flat32ToFlat64.asm"
%include "Ia32/AmdSev.asm"
%include "Ia32/PageTables64.asm"
diff --git a/OvmfPkg/ResetVector/X64/OvmfMetadata.asm b/OvmfPkg/ResetVector/X64/OvmfMetadata.asm
index a1260a1ed029..bb348e1c6a79 100644
--- a/OvmfPkg/ResetVector/X64/OvmfMetadata.asm
+++ b/OvmfPkg/ResetVector/X64/OvmfMetadata.asm
@@ -23,6 +23,9 @@ BITS 64
; The section must be accepted or validated by the VMM before the boot
%define OVMF_SECTION_TYPE_SEC_MEM 0x102

+; AMD SEV-SNP specific sections
+%define OVMF_SECTION_TYPE_SNP_SECRETS 0x200
+
ALIGN 16

TIMES (15 - ((OvmfGuidedStructureEnd - OvmfGuidedStructureStart + 15) % 16)) DB 0
@@ -41,5 +44,11 @@ _Descriptor:
DD OVMF_METADATA_VERSION ; Version
DD (OvmfGuidedStructureEnd - _Descriptor - 16) / 12 ; Number of sections

+; SEV-SNP Secrets page
+SevSnpSecrets:
+ DD SEV_SNP_SECRETS_BASE
+ DD SEV_SNP_SECRETS_SIZE
+ DD OVMF_SECTION_TYPE_SNP_SECRETS
+
OvmfGuidedStructureEnd:
ALIGN 16
--
2.25.1


[PATCH v8 04/32] OvmfPkg/ResetVector: introduce metadata descriptor for VMM use

Brijesh Singh
 

BZ: https://bugzilla.tianocore.org/show_bug.cgi?id=3275

The OvmfPkgX86 build reserves memory regions in MEMFD. The memory regions
get accessed in the SEC phase. Both Intel TDX and AMD SEV-SNP require
that the guest's private memory be accepted or validated before access.

Introduce a Guided metadata structure that describes the reserved memory
regions. The VMM can locate the metadata structure by iterating through
the reset vector guid and process the areas based on the platform
specific requirements.

Min Xu introduced the metadata structure conceptin the TDX patch
series [1].

[1] https://www.mail-archive.com/devel@edk2.groups.io/msg33605.html

Cc: Michael Roth <michael.roth@amd.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: Min Xu <min.m.xu@intel.com>
Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Ard Biesheuvel <ardb+tianocore@kernel.org>
Cc: Erdem Aktas <erdemaktas@google.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Suggested-by: Gerd Hoffmann <kraxel@redhat.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
OvmfPkg/ResetVector/Ia16/ResetVectorVtf0.asm | 17 ++++++++
OvmfPkg/ResetVector/ResetVector.nasmb | 1 +
OvmfPkg/ResetVector/X64/OvmfMetadata.asm | 45 ++++++++++++++++++++
3 files changed, 63 insertions(+)
create mode 100644 OvmfPkg/ResetVector/X64/OvmfMetadata.asm

diff --git a/OvmfPkg/ResetVector/Ia16/ResetVectorVtf0.asm b/OvmfPkg/ResetVector/Ia16/ResetVectorVtf0.asm
index 7ec3c6e980c3..f0e509d0672e 100644
--- a/OvmfPkg/ResetVector/Ia16/ResetVectorVtf0.asm
+++ b/OvmfPkg/ResetVector/Ia16/ResetVectorVtf0.asm
@@ -47,6 +47,23 @@ TIMES (15 - ((guidedStructureEnd - guidedStructureStart + 15) % 16)) DB 0
;
guidedStructureStart:

+%ifdef ARCH_X64
+;
+; OVMF metadata descriptor for the TDX and SEV-SNP
+;
+; Provide the start offset of the metadata blob within the OVMF binary.
+
+; GUID : e47a6535-984a-4798-865e-4685a7bf8ec2
+;
+OvmfMetadataOffsetStart:
+ DD (fourGigabytes - OvmfMetadataGuid - 16)
+ DW OvmfMetadataOffsetEnd - OvmfMetadataOffsetStart
+ DB 0x35, 0x65, 0x7a, 0xe4, 0x4a, 0x98, 0x98, 0x47
+ DB 0x86, 0x5e, 0x46, 0x85, 0xa7, 0xbf, 0x8e, 0xc2
+OvmfMetadataOffsetEnd:
+
+%endif
+
; SEV Hash Table Block
;
; This describes the guest ram area where the hypervisor should
diff --git a/OvmfPkg/ResetVector/ResetVector.nasmb b/OvmfPkg/ResetVector/ResetVector.nasmb
index d1d800c56745..bc61b1d05a24 100644
--- a/OvmfPkg/ResetVector/ResetVector.nasmb
+++ b/OvmfPkg/ResetVector/ResetVector.nasmb
@@ -80,6 +80,7 @@
%include "Ia32/Flat32ToFlat64.asm"
%include "Ia32/AmdSev.asm"
%include "Ia32/PageTables64.asm"
+%include "X64/OvmfMetadata.asm"
%endif

%include "Ia16/Real16ToFlat32.asm"
diff --git a/OvmfPkg/ResetVector/X64/OvmfMetadata.asm b/OvmfPkg/ResetVector/X64/OvmfMetadata.asm
new file mode 100644
index 000000000000..a1260a1ed029
--- /dev/null
+++ b/OvmfPkg/ResetVector/X64/OvmfMetadata.asm
@@ -0,0 +1,45 @@
+;-----------------------------------------------------------------------------
+; @file
+; OVMF metadata for the confidential computing guests (TDX and SEV-SNP)
+;
+; Copyright (c) 2021, Intel Corporation. All rights reserved.<BR>
+; Copyright (c) 2021, AMD Inc. All rights reserved.<BR>
+;
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;-----------------------------------------------------------------------------
+
+BITS 64
+
+%define OVMF_METADATA_VERSION 1
+
+%define OVMF_SECTION_TYPE_UNDEFINED 0
+
+;The section contains the code
+%define OVMF_SECTION_TYPE_CODE 0x100
+
+; The section contains the varaibles
+%define OVMF_SECTION_TYPE_VARS 0x101
+
+; The section must be accepted or validated by the VMM before the boot
+%define OVMF_SECTION_TYPE_SEC_MEM 0x102
+
+ALIGN 16
+
+TIMES (15 - ((OvmfGuidedStructureEnd - OvmfGuidedStructureStart + 15) % 16)) DB 0
+
+OvmfGuidedStructureStart:
+;
+; Ovmf metadata descriptor
+;
+OvmfMetadataGuid:
+ DB 0xf3, 0xf9, 0xea, 0xe9, 0x8e, 0x16, 0xd5, 0x44
+ DB 0xa8, 0xeb, 0x7f, 0x4d, 0x87, 0x38, 0xf6, 0xae
+
+_Descriptor:
+ DB 'O','V','M','F' ; Signature
+ DD OvmfGuidedStructureEnd - _Descriptor ; Length
+ DD OVMF_METADATA_VERSION ; Version
+ DD (OvmfGuidedStructureEnd - _Descriptor - 16) / 12 ; Number of sections
+
+OvmfGuidedStructureEnd:
+ ALIGN 16
--
2.25.1


[PATCH v8 03/32] OvmfPkg/ResetVector: move clearing GHCB in SecMain

Brijesh Singh
 

BZ: https://bugzilla.tianocore.org/show_bug.cgi?id=3275

In preparation for SEV-SNP support move clearing of the GHCB memory from
the ResetVector/AmdSev.asm to SecMain/AmdSev.c. The GHCB page is not
accessed until SevEsProtocolCheck() switch to full GHCB. So, the move
does not make any changes in the code flow or logic. The move will
simplify the SEV-SNP support.

Cc: Michael Roth <michael.roth@amd.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: Min Xu <min.m.xu@intel.com>
Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Ard Biesheuvel <ardb+tianocore@kernel.org>
Cc: Erdem Aktas <erdemaktas@google.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
OvmfPkg/Sec/AmdSev.c | 2 +-
OvmfPkg/ResetVector/Ia32/AmdSev.asm | 6 ------
2 files changed, 1 insertion(+), 7 deletions(-)

diff --git a/OvmfPkg/Sec/AmdSev.c b/OvmfPkg/Sec/AmdSev.c
index 3b4adaae32c7..7f74e8bfe88e 100644
--- a/OvmfPkg/Sec/AmdSev.c
+++ b/OvmfPkg/Sec/AmdSev.c
@@ -95,7 +95,7 @@ SevEsProtocolCheck (
AsmWriteMsr64 (MSR_SEV_ES_GHCB, Msr.GhcbPhysicalAddress);

Ghcb = Msr.Ghcb;
- SetMem (Ghcb, sizeof (*Ghcb), 0);
+ SetMem (Ghcb, FixedPcdGet32 (PcdOvmfSecGhcbSize), 0);

//
// Set the version to the maximum that can be supported
diff --git a/OvmfPkg/ResetVector/Ia32/AmdSev.asm b/OvmfPkg/ResetVector/Ia32/AmdSev.asm
index 250ac8d8b180..48d9178168b0 100644
--- a/OvmfPkg/ResetVector/Ia32/AmdSev.asm
+++ b/OvmfPkg/ResetVector/Ia32/AmdSev.asm
@@ -177,12 +177,6 @@ pageTableEntries4kLoop:
mov ecx, (GHCB_BASE & 0x1F_FFFF) >> 12
mov [ecx * 8 + GHCB_PT_ADDR + 4], strict dword 0

- mov ecx, GHCB_SIZE / 4
- xor eax, eax
-clearGhcbMemoryLoop:
- mov dword[ecx * 4 + GHCB_BASE - 4], eax
- loop clearGhcbMemoryLoop
-
SevClearPageEncMaskForGhcbPageExit:
OneTimeCallRet SevClearPageEncMaskForGhcbPage

--
2.25.1


[PATCH v8 02/32] UefiCpuPkg/MpInitLib: move SEV specific routines in AmdSev.c

Brijesh Singh
 

BZ: https://bugzilla.tianocore.org/show_bug.cgi?id=3275

Move all the SEV specific function in AmdSev.c.

No functional change intended.

Cc: Eric Dong <eric.dong@intel.com>
Cc: Ray Ni <ray.ni@intel.com>
Cc: Rahul Kumar <rahul1.kumar@intel.com>
Cc: Michael Roth <michael.roth@amd.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: Min Xu <min.m.xu@intel.com>
Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Ard Biesheuvel <ardb+tianocore@kernel.org>
Cc: Erdem Aktas <erdemaktas@google.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Suggested-by: Jiewen Yao <Jiewen.yao@intel.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
UefiCpuPkg/Library/MpInitLib/DxeMpInitLib.inf | 1 +
UefiCpuPkg/Library/MpInitLib/PeiMpInitLib.inf | 1 +
UefiCpuPkg/Library/MpInitLib/MpLib.h | 33 +++
UefiCpuPkg/Library/MpInitLib/AmdSev.c | 239 ++++++++++++++++++
UefiCpuPkg/Library/MpInitLib/MpLib.c | 218 +---------------
UefiCpuPkg/Library/MpInitLib/X64/AmdSev.nasm | 119 +++++++++
UefiCpuPkg/Library/MpInitLib/X64/MpFuncs.nasm | 100 ++------
7 files changed, 413 insertions(+), 298 deletions(-)
create mode 100644 UefiCpuPkg/Library/MpInitLib/AmdSev.c
create mode 100644 UefiCpuPkg/Library/MpInitLib/X64/AmdSev.nasm

diff --git a/UefiCpuPkg/Library/MpInitLib/DxeMpInitLib.inf b/UefiCpuPkg/Library/MpInitLib/DxeMpInitLib.inf
index d34419c2a524..6e510aa89120 100644
--- a/UefiCpuPkg/Library/MpInitLib/DxeMpInitLib.inf
+++ b/UefiCpuPkg/Library/MpInitLib/DxeMpInitLib.inf
@@ -28,6 +28,7 @@ [Sources.X64]
X64/MpFuncs.nasm

[Sources.common]
+ AmdSev.c
MpEqu.inc
DxeMpLib.c
MpLib.c
diff --git a/UefiCpuPkg/Library/MpInitLib/PeiMpInitLib.inf b/UefiCpuPkg/Library/MpInitLib/PeiMpInitLib.inf
index 36fcb96b5852..2cbd9b8b8acc 100644
--- a/UefiCpuPkg/Library/MpInitLib/PeiMpInitLib.inf
+++ b/UefiCpuPkg/Library/MpInitLib/PeiMpInitLib.inf
@@ -28,6 +28,7 @@ [Sources.X64]
X64/MpFuncs.nasm

[Sources.common]
+ AmdSev.c
MpEqu.inc
PeiMpLib.c
MpLib.c
diff --git a/UefiCpuPkg/Library/MpInitLib/MpLib.h b/UefiCpuPkg/Library/MpInitLib/MpLib.h
index e88a5355c983..3d4446df8ce6 100644
--- a/UefiCpuPkg/Library/MpInitLib/MpLib.h
+++ b/UefiCpuPkg/Library/MpInitLib/MpLib.h
@@ -34,6 +34,9 @@
#include <Library/PcdLib.h>
#include <Library/MicrocodeLib.h>

+#include <Register/Amd/Fam17Msr.h>
+#include <Register/Amd/Ghcb.h>
+
#include <Guid/MicrocodePatchHob.h>

#define WAKEUP_AP_SIGNAL SIGNATURE_32 ('S', 'T', 'A', 'P')
@@ -741,5 +744,35 @@ PlatformShadowMicrocode (
IN OUT CPU_MP_DATA *CpuMpData
);

+/**
+ Allocate the SEV-ES AP jump table buffer.
+
+ @param[in, out] CpuMpData The pointer to CPU MP Data structure.
+**/
+VOID
+AllocateSevEsAPMemory (
+ IN OUT CPU_MP_DATA *CpuMpData
+ );
+
+/**
+ Program the SEV-ES AP jump table buffer.
+
+ @param[in] SipiVector The SIPI vector used for the AP Reset
+**/
+VOID
+SetSevEsJumpTable (
+ IN UINTN SipiVector
+ );
+
+/**
+ The function puts the AP in halt loop.
+
+ @param[in] CpuMpData The pointer to CPU MP Data structure.
+**/
+VOID
+SevEsPlaceApHlt (
+ CPU_MP_DATA *CpuMpData
+ );
+
#endif

diff --git a/UefiCpuPkg/Library/MpInitLib/AmdSev.c b/UefiCpuPkg/Library/MpInitLib/AmdSev.c
new file mode 100644
index 000000000000..7dbf117c2b71
--- /dev/null
+++ b/UefiCpuPkg/Library/MpInitLib/AmdSev.c
@@ -0,0 +1,239 @@
+/** @file
+ CPU MP Initialize helper function for AMD SEV.
+
+ Copyright (c) 2021, AMD Inc. All rights reserved.<BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "MpLib.h"
+#include <Library/VmgExitLib.h>
+
+/**
+ Get Protected mode code segment with 16-bit default addressing
+ from current GDT table.
+
+ @return Protected mode 16-bit code segment value.
+**/
+STATIC
+UINT16
+GetProtectedMode16CS (
+ VOID
+ )
+{
+ IA32_DESCRIPTOR GdtrDesc;
+ IA32_SEGMENT_DESCRIPTOR *GdtEntry;
+ UINTN GdtEntryCount;
+ UINT16 Index;
+
+ Index = (UINT16) -1;
+ AsmReadGdtr (&GdtrDesc);
+ GdtEntryCount = (GdtrDesc.Limit + 1) / sizeof (IA32_SEGMENT_DESCRIPTOR);
+ GdtEntry = (IA32_SEGMENT_DESCRIPTOR *) GdtrDesc.Base;
+ for (Index = 0; Index < GdtEntryCount; Index++) {
+ if (GdtEntry->Bits.L == 0 &&
+ GdtEntry->Bits.DB == 0 &&
+ GdtEntry->Bits.Type > 8) {
+ break;
+ }
+ GdtEntry++;
+ }
+ ASSERT (Index != GdtEntryCount);
+ return Index * 8;
+}
+
+/**
+ Get Protected mode code segment with 32-bit default addressing
+ from current GDT table.
+
+ @return Protected mode 32-bit code segment value.
+**/
+STATIC
+UINT16
+GetProtectedMode32CS (
+ VOID
+ )
+{
+ IA32_DESCRIPTOR GdtrDesc;
+ IA32_SEGMENT_DESCRIPTOR *GdtEntry;
+ UINTN GdtEntryCount;
+ UINT16 Index;
+
+ Index = (UINT16) -1;
+ AsmReadGdtr (&GdtrDesc);
+ GdtEntryCount = (GdtrDesc.Limit + 1) / sizeof (IA32_SEGMENT_DESCRIPTOR);
+ GdtEntry = (IA32_SEGMENT_DESCRIPTOR *) GdtrDesc.Base;
+ for (Index = 0; Index < GdtEntryCount; Index++) {
+ if (GdtEntry->Bits.L == 0 &&
+ GdtEntry->Bits.DB == 1 &&
+ GdtEntry->Bits.Type > 8) {
+ break;
+ }
+ GdtEntry++;
+ }
+ ASSERT (Index != GdtEntryCount);
+ return Index * 8;
+}
+
+/**
+ Reset an AP when in SEV-ES mode.
+
+ If successful, this function never returns.
+
+ @param[in] Ghcb Pointer to the GHCB
+ @param[in] CpuMpData Pointer to CPU MP Data
+
+**/
+VOID
+MpInitLibSevEsAPReset (
+ IN GHCB *Ghcb,
+ IN CPU_MP_DATA *CpuMpData
+ )
+{
+ EFI_STATUS Status;
+ UINTN ProcessorNumber;
+ UINT16 Code16, Code32;
+ AP_RESET *APResetFn;
+ UINTN BufferStart;
+ UINTN StackStart;
+
+ Status = GetProcessorNumber (CpuMpData, &ProcessorNumber);
+ ASSERT_EFI_ERROR (Status);
+
+ Code16 = GetProtectedMode16CS ();
+ Code32 = GetProtectedMode32CS ();
+
+ if (CpuMpData->WakeupBufferHigh != 0) {
+ APResetFn = (AP_RESET *) (CpuMpData->WakeupBufferHigh + CpuMpData->AddressMap.SwitchToRealNoNxOffset);
+ } else {
+ APResetFn = (AP_RESET *) (CpuMpData->MpCpuExchangeInfo->BufferStart + CpuMpData->AddressMap.SwitchToRealOffset);
+ }
+
+ BufferStart = CpuMpData->MpCpuExchangeInfo->BufferStart;
+ StackStart = CpuMpData->SevEsAPResetStackStart -
+ (AP_RESET_STACK_SIZE * ProcessorNumber);
+
+ //
+ // This call never returns.
+ //
+ APResetFn (BufferStart, Code16, Code32, StackStart);
+}
+
+/**
+ Allocate the SEV-ES AP jump table buffer.
+
+ @param[in, out] CpuMpData The pointer to CPU MP Data structure.
+**/
+VOID
+AllocateSevEsAPMemory (
+ IN OUT CPU_MP_DATA *CpuMpData
+ )
+{
+ if (CpuMpData->SevEsAPBuffer == (UINTN) -1) {
+ CpuMpData->SevEsAPBuffer =
+ CpuMpData->SevEsIsEnabled ? GetSevEsAPMemory () : 0;
+ }
+}
+
+/**
+ Program the SEV-ES AP jump table buffer.
+
+ @param[in] SipiVector The SIPI vector used for the AP Reset
+**/
+VOID
+SetSevEsJumpTable (
+ IN UINTN SipiVector
+ )
+{
+ SEV_ES_AP_JMP_FAR *JmpFar;
+ UINT32 Offset, InsnByte;
+ UINT8 LoNib, HiNib;
+
+ JmpFar = (SEV_ES_AP_JMP_FAR *) (UINTN) FixedPcdGet32 (PcdSevEsWorkAreaBase);
+ ASSERT (JmpFar != NULL);
+
+ //
+ // Obtain the address of the Segment/Rip location in the workarea.
+ // This will be set to a value derived from the SIPI vector and will
+ // be the memory address used for the far jump below.
+ //
+ Offset = FixedPcdGet32 (PcdSevEsWorkAreaBase);
+ Offset += sizeof (JmpFar->InsnBuffer);
+ LoNib = (UINT8) Offset;
+ HiNib = (UINT8) (Offset >> 8);
+
+ //
+ // Program the workarea (which is the initial AP boot address) with
+ // far jump to the SIPI vector (where XX and YY represent the
+ // address of where the SIPI vector is stored.
+ //
+ // JMP FAR [CS:XXYY] => 2E FF 2E YY XX
+ //
+ InsnByte = 0;
+ JmpFar->InsnBuffer[InsnByte++] = 0x2E; // CS override prefix
+ JmpFar->InsnBuffer[InsnByte++] = 0xFF; // JMP (FAR)
+ JmpFar->InsnBuffer[InsnByte++] = 0x2E; // ModRM (JMP memory location)
+ JmpFar->InsnBuffer[InsnByte++] = LoNib; // YY offset ...
+ JmpFar->InsnBuffer[InsnByte++] = HiNib; // XX offset ...
+
+ //
+ // Program the Segment/Rip based on the SIPI vector (always at least
+ // 16-byte aligned, so Rip is set to 0).
+ //
+ JmpFar->Rip = 0;
+ JmpFar->Segment = (UINT16) (SipiVector >> 4);
+}
+
+/**
+ The function puts the AP in halt loop.
+
+ @param[in] CpuMpData The pointer to CPU MP Data structure.
+**/
+VOID
+SevEsPlaceApHlt (
+ CPU_MP_DATA *CpuMpData
+ )
+{
+ MSR_SEV_ES_GHCB_REGISTER Msr;
+ GHCB *Ghcb;
+ UINT64 Status;
+ BOOLEAN DoDecrement;
+ BOOLEAN InterruptState;
+
+ DoDecrement = (BOOLEAN) (CpuMpData->InitFlag == ApInitConfig);
+
+ while (TRUE) {
+ Msr.GhcbPhysicalAddress = AsmReadMsr64 (MSR_SEV_ES_GHCB);
+ Ghcb = Msr.Ghcb;
+
+ VmgInit (Ghcb, &InterruptState);
+
+ if (DoDecrement) {
+ DoDecrement = FALSE;
+
+ //
+ // Perform the delayed decrement just before issuing the first
+ // VMGEXIT with AP_RESET_HOLD.
+ //
+ InterlockedDecrement ((UINT32 *) &CpuMpData->MpCpuExchangeInfo->NumApsExecuting);
+ }
+
+ Status = VmgExit (Ghcb, SVM_EXIT_AP_RESET_HOLD, 0, 0);
+ if ((Status == 0) && (Ghcb->SaveArea.SwExitInfo2 != 0)) {
+ VmgDone (Ghcb, InterruptState);
+ break;
+ }
+
+ VmgDone (Ghcb, InterruptState);
+ }
+
+ //
+ // Awakened in a new phase? Use the new CpuMpData
+ //
+ if (CpuMpData->NewCpuMpData != NULL) {
+ CpuMpData = CpuMpData->NewCpuMpData;
+ }
+
+ MpInitLibSevEsAPReset (Ghcb, CpuMpData);
+}
diff --git a/UefiCpuPkg/Library/MpInitLib/MpLib.c b/UefiCpuPkg/Library/MpInitLib/MpLib.c
index b9a06747edbf..890945bc5994 100644
--- a/UefiCpuPkg/Library/MpInitLib/MpLib.c
+++ b/UefiCpuPkg/Library/MpInitLib/MpLib.c
@@ -596,117 +596,6 @@ InitializeApData (
SetApState (&CpuMpData->CpuData[ProcessorNumber], CpuStateIdle);
}

-/**
- Get Protected mode code segment with 16-bit default addressing
- from current GDT table.
-
- @return Protected mode 16-bit code segment value.
-**/
-STATIC
-UINT16
-GetProtectedMode16CS (
- VOID
- )
-{
- IA32_DESCRIPTOR GdtrDesc;
- IA32_SEGMENT_DESCRIPTOR *GdtEntry;
- UINTN GdtEntryCount;
- UINT16 Index;
-
- Index = (UINT16) -1;
- AsmReadGdtr (&GdtrDesc);
- GdtEntryCount = (GdtrDesc.Limit + 1) / sizeof (IA32_SEGMENT_DESCRIPTOR);
- GdtEntry = (IA32_SEGMENT_DESCRIPTOR *) GdtrDesc.Base;
- for (Index = 0; Index < GdtEntryCount; Index++) {
- if (GdtEntry->Bits.L == 0 &&
- GdtEntry->Bits.DB == 0 &&
- GdtEntry->Bits.Type > 8) {
- break;
- }
- GdtEntry++;
- }
- ASSERT (Index != GdtEntryCount);
- return Index * 8;
-}
-
-/**
- Get Protected mode code segment with 32-bit default addressing
- from current GDT table.
-
- @return Protected mode 32-bit code segment value.
-**/
-STATIC
-UINT16
-GetProtectedMode32CS (
- VOID
- )
-{
- IA32_DESCRIPTOR GdtrDesc;
- IA32_SEGMENT_DESCRIPTOR *GdtEntry;
- UINTN GdtEntryCount;
- UINT16 Index;
-
- Index = (UINT16) -1;
- AsmReadGdtr (&GdtrDesc);
- GdtEntryCount = (GdtrDesc.Limit + 1) / sizeof (IA32_SEGMENT_DESCRIPTOR);
- GdtEntry = (IA32_SEGMENT_DESCRIPTOR *) GdtrDesc.Base;
- for (Index = 0; Index < GdtEntryCount; Index++) {
- if (GdtEntry->Bits.L == 0 &&
- GdtEntry->Bits.DB == 1 &&
- GdtEntry->Bits.Type > 8) {
- break;
- }
- GdtEntry++;
- }
- ASSERT (Index != GdtEntryCount);
- return Index * 8;
-}
-
-/**
- Reset an AP when in SEV-ES mode.
-
- If successful, this function never returns.
-
- @param[in] Ghcb Pointer to the GHCB
- @param[in] CpuMpData Pointer to CPU MP Data
-
-**/
-STATIC
-VOID
-MpInitLibSevEsAPReset (
- IN GHCB *Ghcb,
- IN CPU_MP_DATA *CpuMpData
- )
-{
- EFI_STATUS Status;
- UINTN ProcessorNumber;
- UINT16 Code16, Code32;
- AP_RESET *APResetFn;
- UINTN BufferStart;
- UINTN StackStart;
-
- Status = GetProcessorNumber (CpuMpData, &ProcessorNumber);
- ASSERT_EFI_ERROR (Status);
-
- Code16 = GetProtectedMode16CS ();
- Code32 = GetProtectedMode32CS ();
-
- if (CpuMpData->WakeupBufferHigh != 0) {
- APResetFn = (AP_RESET *) (CpuMpData->WakeupBufferHigh + CpuMpData->AddressMap.SwitchToRealNoNxOffset);
- } else {
- APResetFn = (AP_RESET *) (CpuMpData->MpCpuExchangeInfo->BufferStart + CpuMpData->AddressMap.SwitchToRealOffset);
- }
-
- BufferStart = CpuMpData->MpCpuExchangeInfo->BufferStart;
- StackStart = CpuMpData->SevEsAPResetStackStart -
- (AP_RESET_STACK_SIZE * ProcessorNumber);
-
- //
- // This call never returns.
- //
- APResetFn (BufferStart, Code16, Code32, StackStart);
-}
-
/**
This function will be called from AP reset code if BSP uses WakeUpAP.

@@ -884,47 +773,7 @@ ApWakeupFunction (
while (TRUE) {
DisableInterrupts ();
if (CpuMpData->SevEsIsEnabled) {
- MSR_SEV_ES_GHCB_REGISTER Msr;
- GHCB *Ghcb;
- UINT64 Status;
- BOOLEAN DoDecrement;
- BOOLEAN InterruptState;
-
- DoDecrement = (BOOLEAN) (CpuMpData->InitFlag == ApInitConfig);
-
- while (TRUE) {
- Msr.GhcbPhysicalAddress = AsmReadMsr64 (MSR_SEV_ES_GHCB);
- Ghcb = Msr.Ghcb;
-
- VmgInit (Ghcb, &InterruptState);
-
- if (DoDecrement) {
- DoDecrement = FALSE;
-
- //
- // Perform the delayed decrement just before issuing the first
- // VMGEXIT with AP_RESET_HOLD.
- //
- InterlockedDecrement ((UINT32 *) &CpuMpData->MpCpuExchangeInfo->NumApsExecuting);
- }
-
- Status = VmgExit (Ghcb, SVM_EXIT_AP_RESET_HOLD, 0, 0);
- if ((Status == 0) && (Ghcb->SaveArea.SwExitInfo2 != 0)) {
- VmgDone (Ghcb, InterruptState);
- break;
- }
-
- VmgDone (Ghcb, InterruptState);
- }
-
- //
- // Awakened in a new phase? Use the new CpuMpData
- //
- if (CpuMpData->NewCpuMpData != NULL) {
- CpuMpData = CpuMpData->NewCpuMpData;
- }
-
- MpInitLibSevEsAPReset (Ghcb, CpuMpData);
+ SevEsPlaceApHlt (CpuMpData);
} else {
CpuSleep ();
}
@@ -1252,71 +1101,6 @@ FreeResetVector (
}
}

-/**
- Allocate the SEV-ES AP jump table buffer.
-
- @param[in, out] CpuMpData The pointer to CPU MP Data structure.
-**/
-VOID
-AllocateSevEsAPMemory (
- IN OUT CPU_MP_DATA *CpuMpData
- )
-{
- if (CpuMpData->SevEsAPBuffer == (UINTN) -1) {
- CpuMpData->SevEsAPBuffer =
- CpuMpData->SevEsIsEnabled ? GetSevEsAPMemory () : 0;
- }
-}
-
-/**
- Program the SEV-ES AP jump table buffer.
-
- @param[in] SipiVector The SIPI vector used for the AP Reset
-**/
-VOID
-SetSevEsJumpTable (
- IN UINTN SipiVector
- )
-{
- SEV_ES_AP_JMP_FAR *JmpFar;
- UINT32 Offset, InsnByte;
- UINT8 LoNib, HiNib;
-
- JmpFar = (SEV_ES_AP_JMP_FAR *) (UINTN) FixedPcdGet32 (PcdSevEsWorkAreaBase);
- ASSERT (JmpFar != NULL);
-
- //
- // Obtain the address of the Segment/Rip location in the workarea.
- // This will be set to a value derived from the SIPI vector and will
- // be the memory address used for the far jump below.
- //
- Offset = FixedPcdGet32 (PcdSevEsWorkAreaBase);
- Offset += sizeof (JmpFar->InsnBuffer);
- LoNib = (UINT8) Offset;
- HiNib = (UINT8) (Offset >> 8);
-
- //
- // Program the workarea (which is the initial AP boot address) with
- // far jump to the SIPI vector (where XX and YY represent the
- // address of where the SIPI vector is stored.
- //
- // JMP FAR [CS:XXYY] => 2E FF 2E YY XX
- //
- InsnByte = 0;
- JmpFar->InsnBuffer[InsnByte++] = 0x2E; // CS override prefix
- JmpFar->InsnBuffer[InsnByte++] = 0xFF; // JMP (FAR)
- JmpFar->InsnBuffer[InsnByte++] = 0x2E; // ModRM (JMP memory location)
- JmpFar->InsnBuffer[InsnByte++] = LoNib; // YY offset ...
- JmpFar->InsnBuffer[InsnByte++] = HiNib; // XX offset ...
-
- //
- // Program the Segment/Rip based on the SIPI vector (always at least
- // 16-byte aligned, so Rip is set to 0).
- //
- JmpFar->Rip = 0;
- JmpFar->Segment = (UINT16) (SipiVector >> 4);
-}
-
/**
This function will be called by BSP to wakeup AP.

diff --git a/UefiCpuPkg/Library/MpInitLib/X64/AmdSev.nasm b/UefiCpuPkg/Library/MpInitLib/X64/AmdSev.nasm
new file mode 100644
index 000000000000..0ccafe25eca4
--- /dev/null
+++ b/UefiCpuPkg/Library/MpInitLib/X64/AmdSev.nasm
@@ -0,0 +1,119 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2021, AMD Inc. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+; Module Name:
+;
+; AmdSev.nasm
+;
+; Abstract:
+;
+; This provides helper used by the MpFunc.nasm. If AMD SEV-ES is active
+; then helpers perform the additional setups (such as GHCB).
+;
+;-------------------------------------------------------------------------------
+
+%define SIZE_4KB 0x1000
+
+;
+; The function checks whether SEV-ES is enabled, if enabled
+; then setup the GHCB page.
+;
+SevEsSetupGhcb:
+ lea edi, [esi + MP_CPU_EXCHANGE_INFO_FIELD (SevEsIsEnabled)]
+ cmp byte [edi], 1 ; SevEsIsEnabled
+ jne SevEsSetupGhcbExit
+
+ ;
+ ; program GHCB
+ ; Each page after the GHCB is a per-CPU page, so the calculation programs
+ ; a GHCB to be every 8KB.
+ ;
+ mov eax, SIZE_4KB
+ shl eax, 1 ; EAX = SIZE_4K * 2
+ mov ecx, ebx
+ mul ecx ; EAX = SIZE_4K * 2 * CpuNumber
+ mov edi, esi
+ add edi, MP_CPU_EXCHANGE_INFO_FIELD (GhcbBase)
+ add rax, qword [edi]
+ mov rdx, rax
+ shr rdx, 32
+ mov rcx, 0xc0010130
+ wrmsr
+
+SevEsSetupGhcbExit:
+ OneTimeCallRet SevEsSetupGhcb
+
+;
+; The function checks whether SEV-ES is enabled, if enabled, use
+; the GHCB
+;
+SevEsGetApicId:
+ lea edi, [esi + MP_CPU_EXCHANGE_INFO_FIELD (SevEsIsEnabled)]
+ cmp byte [edi], 1 ; SevEsIsEnabled
+ jne SevEsGetApicIdExit
+
+ ;
+ ; Since we don't have a stack yet, we can't take a #VC
+ ; exception. Use the GHCB protocol to perform the CPUID
+ ; calls.
+ ;
+ mov rcx, 0xc0010130
+ rdmsr
+ shl rdx, 32
+ or rax, rdx
+ mov rdi, rax ; RDI now holds the original GHCB GPA
+
+ mov rdx, 0 ; CPUID function 0
+ mov rax, 0 ; RAX register requested
+ or rax, 4
+ wrmsr
+ rep vmmcall
+ rdmsr
+ cmp edx, 0bh
+ jb NoX2ApicSevEs ; CPUID level below CPUID_EXTENDED_TOPOLOGY
+
+ mov rdx, 0bh ; CPUID function 0x0b
+ mov rax, 040000000h ; RBX register requested
+ or rax, 4
+ wrmsr
+ rep vmmcall
+ rdmsr
+ test edx, 0ffffh
+ jz NoX2ApicSevEs ; CPUID.0BH:EBX[15:0] is zero
+
+ mov rdx, 0bh ; CPUID function 0x0b
+ mov rax, 0c0000000h ; RDX register requested
+ or rax, 4
+ wrmsr
+ rep vmmcall
+ rdmsr
+
+ ; Processor is x2APIC capable; 32-bit x2APIC ID is now in EDX
+ jmp RestoreGhcb
+
+NoX2ApicSevEs:
+ ; Processor is not x2APIC capable, so get 8-bit APIC ID
+ mov rdx, 1 ; CPUID function 1
+ mov rax, 040000000h ; RBX register requested
+ or rax, 4
+ wrmsr
+ rep vmmcall
+ rdmsr
+ shr edx, 24
+
+RestoreGhcb:
+ mov rbx, rdx ; Save x2APIC/APIC ID
+
+ mov rdx, rdi ; RDI holds the saved GHCB GPA
+ shr rdx, 32
+ mov eax, edi
+ wrmsr
+
+ mov rdx, rbx
+
+ ; x2APIC ID or APIC ID is in EDX
+ jmp GetProcessorNumber
+
+SevEsGetApicIdExit:
+ OneTimeCallRet SevEsGetApicId
diff --git a/UefiCpuPkg/Library/MpInitLib/X64/MpFuncs.nasm b/UefiCpuPkg/Library/MpInitLib/X64/MpFuncs.nasm
index 50df802d1fca..f7f2937fafad 100644
--- a/UefiCpuPkg/Library/MpInitLib/X64/MpFuncs.nasm
+++ b/UefiCpuPkg/Library/MpInitLib/X64/MpFuncs.nasm
@@ -15,6 +15,15 @@
%include "MpEqu.inc"
extern ASM_PFX(InitializeFloatingPointUnits)

+%macro OneTimeCall 1
+ jmp %1
+%1 %+ OneTimerCallReturn:
+%endmacro
+
+%macro OneTimeCallRet 1
+ jmp %1 %+ OneTimerCallReturn
+%endmacro
+
DEFAULT REL

SECTION .text
@@ -144,6 +153,12 @@ SkipEnable5LevelPaging:
jmp far [edi]

BITS 64
+
+;
+; Required for the AMD SEV helper functions
+;
+%include "AmdSev.nasm"
+
LongModeStart:
mov esi, ebx
lea edi, [esi + MP_CPU_EXCHANGE_INFO_FIELD (InitFlag)]
@@ -175,94 +190,17 @@ LongModeStart:
add rax, qword [edi]
mov rsp, rax

- lea edi, [esi + MP_CPU_EXCHANGE_INFO_FIELD (SevEsIsEnabled)]
- cmp byte [edi], 1 ; SevEsIsEnabled
- jne CProcedureInvoke
-
;
- ; program GHCB
- ; Each page after the GHCB is a per-CPU page, so the calculation programs
- ; a GHCB to be every 8KB.
+ ; Setup the GHCB when AMD SEV-ES active.
;
- mov eax, SIZE_4KB
- shl eax, 1 ; EAX = SIZE_4K * 2
- mov ecx, ebx
- mul ecx ; EAX = SIZE_4K * 2 * CpuNumber
- mov edi, esi
- add edi, MP_CPU_EXCHANGE_INFO_FIELD (GhcbBase)
- add rax, qword [edi]
- mov rdx, rax
- shr rdx, 32
- mov rcx, 0xc0010130
- wrmsr
+ OneTimeCall SevEsSetupGhcb
jmp CProcedureInvoke

GetApicId:
- lea edi, [esi + MP_CPU_EXCHANGE_INFO_FIELD (SevEsIsEnabled)]
- cmp byte [edi], 1 ; SevEsIsEnabled
- jne DoCpuid
-
;
- ; Since we don't have a stack yet, we can't take a #VC
- ; exception. Use the GHCB protocol to perform the CPUID
- ; calls.
+ ; Use the GHCB protocol to get the ApicId when SEV-ES is active.
;
- mov rcx, 0xc0010130
- rdmsr
- shl rdx, 32
- or rax, rdx
- mov rdi, rax ; RDI now holds the original GHCB GPA
-
- mov rdx, 0 ; CPUID function 0
- mov rax, 0 ; RAX register requested
- or rax, 4
- wrmsr
- rep vmmcall
- rdmsr
- cmp edx, 0bh
- jb NoX2ApicSevEs ; CPUID level below CPUID_EXTENDED_TOPOLOGY
-
- mov rdx, 0bh ; CPUID function 0x0b
- mov rax, 040000000h ; RBX register requested
- or rax, 4
- wrmsr
- rep vmmcall
- rdmsr
- test edx, 0ffffh
- jz NoX2ApicSevEs ; CPUID.0BH:EBX[15:0] is zero
-
- mov rdx, 0bh ; CPUID function 0x0b
- mov rax, 0c0000000h ; RDX register requested
- or rax, 4
- wrmsr
- rep vmmcall
- rdmsr
-
- ; Processor is x2APIC capable; 32-bit x2APIC ID is now in EDX
- jmp RestoreGhcb
-
-NoX2ApicSevEs:
- ; Processor is not x2APIC capable, so get 8-bit APIC ID
- mov rdx, 1 ; CPUID function 1
- mov rax, 040000000h ; RBX register requested
- or rax, 4
- wrmsr
- rep vmmcall
- rdmsr
- shr edx, 24
-
-RestoreGhcb:
- mov rbx, rdx ; Save x2APIC/APIC ID
-
- mov rdx, rdi ; RDI holds the saved GHCB GPA
- shr rdx, 32
- mov eax, edi
- wrmsr
-
- mov rdx, rbx
-
- ; x2APIC ID or APIC ID is in EDX
- jmp GetProcessorNumber
+ OneTimeCall SevEsGetApicId

DoCpuid:
mov eax, 0
--
2.25.1


[PATCH v8 01/32] OvmfPkg/SecMain: move SEV specific routines in AmdSev.c

Brijesh Singh
 

BZ: https://bugzilla.tianocore.org/show_bug.cgi?id=3275

Move all the SEV specific function in AmdSev.c.

No functional change intended.

Cc: Michael Roth <michael.roth@amd.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: Min Xu <min.m.xu@intel.com>
Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Ard Biesheuvel <ardb+tianocore@kernel.org>
Cc: Erdem Aktas <erdemaktas@google.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Acked-by: Jiewen Yao <Jiewen.yao@intel.com>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
---
OvmfPkg/Sec/SecMain.inf | 1 +
OvmfPkg/Sec/AmdSev.h | 72 ++++++++++++++++++
OvmfPkg/Sec/AmdSev.c | 161 ++++++++++++++++++++++++++++++++++++++++
OvmfPkg/Sec/SecMain.c | 153 +-------------------------------------
4 files changed, 236 insertions(+), 151 deletions(-)
create mode 100644 OvmfPkg/Sec/AmdSev.h
create mode 100644 OvmfPkg/Sec/AmdSev.c

diff --git a/OvmfPkg/Sec/SecMain.inf b/OvmfPkg/Sec/SecMain.inf
index ea4b9611f52d..9523a8ea6c8f 100644
--- a/OvmfPkg/Sec/SecMain.inf
+++ b/OvmfPkg/Sec/SecMain.inf
@@ -23,6 +23,7 @@ [Defines]

[Sources]
SecMain.c
+ AmdSev.c

[Sources.IA32]
Ia32/SecEntry.nasm
diff --git a/OvmfPkg/Sec/AmdSev.h b/OvmfPkg/Sec/AmdSev.h
new file mode 100644
index 000000000000..adad96d23189
--- /dev/null
+++ b/OvmfPkg/Sec/AmdSev.h
@@ -0,0 +1,72 @@
+/** @file
+ File defines the Sec routines for the AMD SEV
+
+ Copyright (c) 2021, Advanced Micro Devices, Inc. All rights reserved.<BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#ifndef _AMD_SEV_SEC_INTERNAL_H__
+#define _AMD_SEV_SEC_INTERNAL_H__
+
+/**
+ Handle an SEV-ES/GHCB protocol check failure.
+
+ Notify the hypervisor using the VMGEXIT instruction that the SEV-ES guest
+ wishes to be terminated.
+
+ @param[in] ReasonCode Reason code to provide to the hypervisor for the
+ termination request.
+
+**/
+VOID
+SevEsProtocolFailure (
+ IN UINT8 ReasonCode
+ );
+
+
+/**
+ Validate the SEV-ES/GHCB protocol level.
+
+ Verify that the level of SEV-ES/GHCB protocol supported by the hypervisor
+ and the guest intersect. If they don't intersect, request termination.
+
+**/
+VOID
+SevEsProtocolCheck (
+ VOID
+ );
+
+/**
+ Determine if the SEV is active.
+
+ During the early booting, GuestType is set in the work area. Verify that it
+ is an SEV guest.
+
+ @retval TRUE SEV is enabled
+ @retval FALSE SEV is not enabled
+
+**/
+BOOLEAN
+IsSevGuest (
+ VOID
+ );
+
+/**
+ Determine if SEV-ES is active.
+
+ During early booting, SEV-ES support code will set a flag to indicate that
+ SEV-ES is enabled. Return the value of this flag as an indicator that SEV-ES
+ is enabled.
+
+ @retval TRUE SEV-ES is enabled
+ @retval FALSE SEV-ES is not enabled
+
+**/
+BOOLEAN
+SevEsIsEnabled (
+ VOID
+ );
+
+#endif
diff --git a/OvmfPkg/Sec/AmdSev.c b/OvmfPkg/Sec/AmdSev.c
new file mode 100644
index 000000000000..3b4adaae32c7
--- /dev/null
+++ b/OvmfPkg/Sec/AmdSev.c
@@ -0,0 +1,161 @@
+/** @file
+ File defines the Sec routines for the AMD SEV
+
+ Copyright (c) 2021, Advanced Micro Devices, Inc. All rights reserved.<BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <Library/BaseLib.h>
+#include <Library/DebugLib.h>
+#include <Library/MemEncryptSevLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Register/Amd/Ghcb.h>
+#include <Register/Amd/Msr.h>
+
+#include "AmdSev.h"
+
+/**
+ Handle an SEV-ES/GHCB protocol check failure.
+
+ Notify the hypervisor using the VMGEXIT instruction that the SEV-ES guest
+ wishes to be terminated.
+
+ @param[in] ReasonCode Reason code to provide to the hypervisor for the
+ termination request.
+
+**/
+VOID
+SevEsProtocolFailure (
+ IN UINT8 ReasonCode
+ )
+{
+ MSR_SEV_ES_GHCB_REGISTER Msr;
+
+ //
+ // Use the GHCB MSR Protocol to request termination by the hypervisor
+ //
+ Msr.GhcbPhysicalAddress = 0;
+ Msr.GhcbTerminate.Function = GHCB_INFO_TERMINATE_REQUEST;
+ Msr.GhcbTerminate.ReasonCodeSet = GHCB_TERMINATE_GHCB;
+ Msr.GhcbTerminate.ReasonCode = ReasonCode;
+ AsmWriteMsr64 (MSR_SEV_ES_GHCB, Msr.GhcbPhysicalAddress);
+
+ AsmVmgExit ();
+
+ ASSERT (FALSE);
+ CpuDeadLoop ();
+}
+
+/**
+ Validate the SEV-ES/GHCB protocol level.
+
+ Verify that the level of SEV-ES/GHCB protocol supported by the hypervisor
+ and the guest intersect. If they don't intersect, request termination.
+
+**/
+VOID
+SevEsProtocolCheck (
+ VOID
+ )
+{
+ MSR_SEV_ES_GHCB_REGISTER Msr;
+ GHCB *Ghcb;
+
+ //
+ // Use the GHCB MSR Protocol to obtain the GHCB SEV-ES Information for
+ // protocol checking
+ //
+ Msr.GhcbPhysicalAddress = 0;
+ Msr.GhcbInfo.Function = GHCB_INFO_SEV_INFO_GET;
+ AsmWriteMsr64 (MSR_SEV_ES_GHCB, Msr.GhcbPhysicalAddress);
+
+ AsmVmgExit ();
+
+ Msr.GhcbPhysicalAddress = AsmReadMsr64 (MSR_SEV_ES_GHCB);
+
+ if (Msr.GhcbInfo.Function != GHCB_INFO_SEV_INFO) {
+ SevEsProtocolFailure (GHCB_TERMINATE_GHCB_GENERAL);
+ }
+
+ if (Msr.GhcbProtocol.SevEsProtocolMin > Msr.GhcbProtocol.SevEsProtocolMax) {
+ SevEsProtocolFailure (GHCB_TERMINATE_GHCB_PROTOCOL);
+ }
+
+ if ((Msr.GhcbProtocol.SevEsProtocolMin > GHCB_VERSION_MAX) ||
+ (Msr.GhcbProtocol.SevEsProtocolMax < GHCB_VERSION_MIN)) {
+ SevEsProtocolFailure (GHCB_TERMINATE_GHCB_PROTOCOL);
+ }
+
+ //
+ // SEV-ES protocol checking succeeded, set the initial GHCB address
+ //
+ Msr.GhcbPhysicalAddress = FixedPcdGet32 (PcdOvmfSecGhcbBase);
+ AsmWriteMsr64 (MSR_SEV_ES_GHCB, Msr.GhcbPhysicalAddress);
+
+ Ghcb = Msr.Ghcb;
+ SetMem (Ghcb, sizeof (*Ghcb), 0);
+
+ //
+ // Set the version to the maximum that can be supported
+ //
+ Ghcb->ProtocolVersion = MIN (Msr.GhcbProtocol.SevEsProtocolMax, GHCB_VERSION_MAX);
+ Ghcb->GhcbUsage = GHCB_STANDARD_USAGE;
+}
+
+/**
+ Determine if the SEV is active.
+
+ During the early booting, GuestType is set in the work area. Verify that it
+ is an SEV guest.
+
+ @retval TRUE SEV is enabled
+ @retval FALSE SEV is not enabled
+
+**/
+BOOLEAN
+IsSevGuest (
+ VOID
+ )
+{
+ OVMF_WORK_AREA *WorkArea;
+
+ //
+ // Ensure that the size of the Confidential Computing work area header
+ // is same as what is provided through a fixed PCD.
+ //
+ ASSERT ((UINTN) FixedPcdGet32 (PcdOvmfConfidentialComputingWorkAreaHeader) ==
+ sizeof(CONFIDENTIAL_COMPUTING_WORK_AREA_HEADER));
+
+ WorkArea = (OVMF_WORK_AREA *) FixedPcdGet32 (PcdOvmfWorkAreaBase);
+
+ return ((WorkArea != NULL) && (WorkArea->Header.GuestType == GUEST_TYPE_AMD_SEV));
+}
+
+/**
+ Determine if SEV-ES is active.
+
+ During early booting, SEV-ES support code will set a flag to indicate that
+ SEV-ES is enabled. Return the value of this flag as an indicator that SEV-ES
+ is enabled.
+
+ @retval TRUE SEV-ES is enabled
+ @retval FALSE SEV-ES is not enabled
+
+**/
+BOOLEAN
+SevEsIsEnabled (
+ VOID
+ )
+{
+ SEC_SEV_ES_WORK_AREA *SevEsWorkArea;
+
+ if (!IsSevGuest()) {
+ return FALSE;
+ }
+
+ SevEsWorkArea = (SEC_SEV_ES_WORK_AREA *) FixedPcdGet32 (PcdSevEsWorkAreaBase);
+
+ return (SevEsWorkArea->SevEsEnabled != 0);
+}
diff --git a/OvmfPkg/Sec/SecMain.c b/OvmfPkg/Sec/SecMain.c
index 707b0d4bbff4..406e3a25d0cd 100644
--- a/OvmfPkg/Sec/SecMain.c
+++ b/OvmfPkg/Sec/SecMain.c
@@ -26,12 +26,11 @@
#include <Library/ExtractGuidedSectionLib.h>
#include <Library/LocalApicLib.h>
#include <Library/CpuExceptionHandlerLib.h>
-#include <Library/MemEncryptSevLib.h>
-#include <Register/Amd/Ghcb.h>
-#include <Register/Amd/Msr.h>

#include <Ppi/TemporaryRamSupport.h>

+#include "AmdSev.h"
+
#define SEC_IDT_ENTRY_COUNT 34

typedef struct _SEC_IDT_TABLE {
@@ -717,154 +716,6 @@ FindAndReportEntryPoints (
return;
}

-/**
- Handle an SEV-ES/GHCB protocol check failure.
-
- Notify the hypervisor using the VMGEXIT instruction that the SEV-ES guest
- wishes to be terminated.
-
- @param[in] ReasonCode Reason code to provide to the hypervisor for the
- termination request.
-
-**/
-STATIC
-VOID
-SevEsProtocolFailure (
- IN UINT8 ReasonCode
- )
-{
- MSR_SEV_ES_GHCB_REGISTER Msr;
-
- //
- // Use the GHCB MSR Protocol to request termination by the hypervisor
- //
- Msr.GhcbPhysicalAddress = 0;
- Msr.GhcbTerminate.Function = GHCB_INFO_TERMINATE_REQUEST;
- Msr.GhcbTerminate.ReasonCodeSet = GHCB_TERMINATE_GHCB;
- Msr.GhcbTerminate.ReasonCode = ReasonCode;
- AsmWriteMsr64 (MSR_SEV_ES_GHCB, Msr.GhcbPhysicalAddress);
-
- AsmVmgExit ();
-
- ASSERT (FALSE);
- CpuDeadLoop ();
-}
-
-/**
- Validate the SEV-ES/GHCB protocol level.
-
- Verify that the level of SEV-ES/GHCB protocol supported by the hypervisor
- and the guest intersect. If they don't intersect, request termination.
-
-**/
-STATIC
-VOID
-SevEsProtocolCheck (
- VOID
- )
-{
- MSR_SEV_ES_GHCB_REGISTER Msr;
- GHCB *Ghcb;
-
- //
- // Use the GHCB MSR Protocol to obtain the GHCB SEV-ES Information for
- // protocol checking
- //
- Msr.GhcbPhysicalAddress = 0;
- Msr.GhcbInfo.Function = GHCB_INFO_SEV_INFO_GET;
- AsmWriteMsr64 (MSR_SEV_ES_GHCB, Msr.GhcbPhysicalAddress);
-
- AsmVmgExit ();
-
- Msr.GhcbPhysicalAddress = AsmReadMsr64 (MSR_SEV_ES_GHCB);
-
- if (Msr.GhcbInfo.Function != GHCB_INFO_SEV_INFO) {
- SevEsProtocolFailure (GHCB_TERMINATE_GHCB_GENERAL);
- }
-
- if (Msr.GhcbProtocol.SevEsProtocolMin > Msr.GhcbProtocol.SevEsProtocolMax) {
- SevEsProtocolFailure (GHCB_TERMINATE_GHCB_PROTOCOL);
- }
-
- if ((Msr.GhcbProtocol.SevEsProtocolMin > GHCB_VERSION_MAX) ||
- (Msr.GhcbProtocol.SevEsProtocolMax < GHCB_VERSION_MIN)) {
- SevEsProtocolFailure (GHCB_TERMINATE_GHCB_PROTOCOL);
- }
-
- //
- // SEV-ES protocol checking succeeded, set the initial GHCB address
- //
- Msr.GhcbPhysicalAddress = FixedPcdGet32 (PcdOvmfSecGhcbBase);
- AsmWriteMsr64 (MSR_SEV_ES_GHCB, Msr.GhcbPhysicalAddress);
-
- Ghcb = Msr.Ghcb;
- SetMem (Ghcb, sizeof (*Ghcb), 0);
-
- //
- // Set the version to the maximum that can be supported
- //
- Ghcb->ProtocolVersion = MIN (Msr.GhcbProtocol.SevEsProtocolMax, GHCB_VERSION_MAX);
- Ghcb->GhcbUsage = GHCB_STANDARD_USAGE;
-}
-
-/**
- Determine if the SEV is active.
-
- During the early booting, GuestType is set in the work area. Verify that it
- is an SEV guest.
-
- @retval TRUE SEV is enabled
- @retval FALSE SEV is not enabled
-
-**/
-STATIC
-BOOLEAN
-IsSevGuest (
- VOID
- )
-{
- OVMF_WORK_AREA *WorkArea;
-
- //
- // Ensure that the size of the Confidential Computing work area header
- // is same as what is provided through a fixed PCD.
- //
- ASSERT ((UINTN) FixedPcdGet32 (PcdOvmfConfidentialComputingWorkAreaHeader) ==
- sizeof(CONFIDENTIAL_COMPUTING_WORK_AREA_HEADER));
-
- WorkArea = (OVMF_WORK_AREA *) FixedPcdGet32 (PcdOvmfWorkAreaBase);
-
- return ((WorkArea != NULL) && (WorkArea->Header.GuestType == GUEST_TYPE_AMD_SEV));
-}
-
-/**
- Determine if SEV-ES is active.
-
- During early booting, SEV-ES support code will set a flag to indicate that
- SEV-ES is enabled. Return the value of this flag as an indicator that SEV-ES
- is enabled.
-
- @retval TRUE SEV-ES is enabled
- @retval FALSE SEV-ES is not enabled
-
-**/
-STATIC
-BOOLEAN
-SevEsIsEnabled (
- VOID
- )
-{
- SEC_SEV_ES_WORK_AREA *SevEsWorkArea;
-
- if (!IsSevGuest()) {
- return FALSE;
- }
-
- SevEsWorkArea = (SEC_SEV_ES_WORK_AREA *) FixedPcdGet32 (PcdSevEsWorkAreaBase);
-
- return (SevEsWorkArea->SevEsEnabled != 0);
-}
-
VOID
EFIAPI
SecCoreStartupWithStack (
--
2.25.1

121 - 140 of 80949