UefiCpuPkg/PiSmmCpuDxeSmm: Remove SmBases relocation logic

This patch is to remove legacy SmBase relocation in
PiSmmCpuDxeSmm Driver. The responsibility for SmBase
relocation has been transferred to the SmmRelocationInit
interface, which now handles the following tasks:
1. Relocates the SmBase for each processor.
2. Generates the gSmmBaseHobGuid HOB.

As a result of this change, the PiSmmCpuDxeSmm driver's
role in SMM environment setup is simplified to:
1. Utilize the gSmmBaseHobGuid to determine the SmBase.
2. Perform the ExecuteFirstSmiInit() to do early SMM
initialization.

Cc: Ray Ni <ray.ni@intel.com>
Cc: Zeng Star <star.zeng@intel.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: Rahul Kumar <rahul1.kumar@intel.com>
Signed-off-by: Jiaxin Wu <jiaxin.wu@intel.com>
Reviewed-by: Ray Ni <ray.ni@intel.com>
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c b/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
index b14c289..d67fb49 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
@@ -641,23 +641,10 @@
   }

 

   if (IsBsp) {

-    DEBUG ((DEBUG_INFO, "SmmRestoreCpu: mSmmRelocated is %d\n", mSmmRelocated));

-

     //

-    // Check whether Smm Relocation is done or not.

-    // If not, will do the SmmBases Relocation here!!!

+    // Issue SMI IPI (All Excluding  Self SMM IPI + BSP SMM IPI) to execute first SMI init.

     //

-    if (!mSmmRelocated) {

-      //

-      // Restore SMBASE for BSP and all APs

-      //

-      SmmRelocateBases ();

-    } else {

-      //

-      // Issue SMI IPI (All Excluding  Self SMM IPI + BSP SMM IPI) to execute first SMI init.

-      //

-      ExecuteFirstSmiInit ();

-    }

+    ExecuteFirstSmiInit ();

   }

 

   //

@@ -980,9 +967,9 @@
       SmmS3ResumeState->SmmS3StackSize = 0;

     }

 

-    SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;

+    SmmS3ResumeState->SmmS3Cr0 = (UINT32)AsmReadCr0 ();

     SmmS3ResumeState->SmmS3Cr3 = Cr3;

-    SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;

+    SmmS3ResumeState->SmmS3Cr4 = (UINT32)AsmReadCr4 ();

 

     if (sizeof (UINTN) == sizeof (UINT64)) {

       SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;

diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c
deleted file mode 100644
index a9fcc89..0000000
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/** @file

-Semaphore mechanism to indicate to the BSP that an AP has exited SMM

-after SMBASE relocation.

-

-Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>

-SPDX-License-Identifier: BSD-2-Clause-Patent

-

-**/

-

-#include "PiSmmCpuDxeSmm.h"

-

-UINTN             mSmmRelocationOriginalAddress;

-volatile BOOLEAN  *mRebasedFlag;

-

-/**

-  Hook return address of SMM Save State so that semaphore code

-  can be executed immediately after AP exits SMM to indicate to

-  the BSP that an AP has exited SMM after SMBASE relocation.

-

-  @param[in] CpuIndex     The processor index.

-  @param[in] RebasedFlag  A pointer to a flag that is set to TRUE

-                          immediately after AP exits SMM.

-

-**/

-VOID

-SemaphoreHook (

-  IN UINTN             CpuIndex,

-  IN volatile BOOLEAN  *RebasedFlag

-  )

-{

-  SMRAM_SAVE_STATE_MAP  *CpuState;

-

-  mRebasedFlag = RebasedFlag;

-

-  CpuState                      = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);

-  mSmmRelocationOriginalAddress = (UINTN)HookReturnFromSmm (

-                                           CpuIndex,

-                                           CpuState,

-                                           (UINT64)(UINTN)&SmmRelocationSemaphoreComplete,

-                                           (UINT64)(UINTN)&SmmRelocationSemaphoreComplete

-                                           );

-}

diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.nasm b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.nasm
deleted file mode 100644
index b5e77a1..0000000
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.nasm
+++ /dev/null
@@ -1,96 +0,0 @@
-;------------------------------------------------------------------------------ ;

-; Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR>

-; SPDX-License-Identifier: BSD-2-Clause-Patent

-;

-; Module Name:

-;

-;   SmmInit.nasm

-;

-; Abstract:

-;

-;   Functions for relocating SMBASE's for all processors

-;

-;-------------------------------------------------------------------------------

-

-%include "StuffRsbNasm.inc"

-

-extern ASM_PFX(SmmInitHandler)

-extern ASM_PFX(mRebasedFlag)

-extern ASM_PFX(mSmmRelocationOriginalAddress)

-

-global ASM_PFX(gPatchSmmCr3)

-global ASM_PFX(gPatchSmmCr4)

-global ASM_PFX(gPatchSmmCr0)

-global ASM_PFX(gPatchSmmInitStack)

-global ASM_PFX(gcSmiInitGdtr)

-global ASM_PFX(gcSmmInitSize)

-global ASM_PFX(gcSmmInitTemplate)

-

-%define PROTECT_MODE_CS 0x8

-%define PROTECT_MODE_DS 0x20

-

-    SECTION .text

-

-ASM_PFX(gcSmiInitGdtr):

-            DW      0

-            DQ      0

-

-global ASM_PFX(SmmStartup)

-

-BITS 16

-ASM_PFX(SmmStartup):

-    mov     eax, 0x80000001             ; read capability

-    cpuid

-    mov     ebx, edx                    ; rdmsr will change edx. keep it in ebx.

-    and     ebx, BIT20                  ; extract NX capability bit

-    shr     ebx, 9                      ; shift bit to IA32_EFER.NXE[BIT11] position

-    mov     eax, strict dword 0         ; source operand will be patched

-ASM_PFX(gPatchSmmCr3):

-    mov     cr3, eax

-o32 lgdt    [cs:ebp + (ASM_PFX(gcSmiInitGdtr) - ASM_PFX(SmmStartup))]

-    mov     eax, strict dword 0         ; source operand will be patched

-ASM_PFX(gPatchSmmCr4):

-    mov     cr4, eax

-    mov     ecx, 0xc0000080             ; IA32_EFER MSR

-    rdmsr

-    or      eax, ebx                    ; set NXE bit if NX is available

-    wrmsr

-    mov     eax, strict dword 0         ; source operand will be patched

-ASM_PFX(gPatchSmmCr0):

-    mov     di, PROTECT_MODE_DS

-    mov     cr0, eax

-    jmp     PROTECT_MODE_CS : dword @32bit

-

-BITS 32

-@32bit:

-    mov     ds, edi

-    mov     es, edi

-    mov     fs, edi

-    mov     gs, edi

-    mov     ss, edi

-    mov     esp, strict dword 0         ; source operand will be patched

-ASM_PFX(gPatchSmmInitStack):

-    call    ASM_PFX(SmmInitHandler)

-    StuffRsb32

-    rsm

-

-BITS 16

-ASM_PFX(gcSmmInitTemplate):

-    mov ebp, ASM_PFX(SmmStartup)

-    sub ebp, 0x30000

-    jmp ebp

-

-ASM_PFX(gcSmmInitSize): DW $ - ASM_PFX(gcSmmInitTemplate)

-

-BITS 32

-global ASM_PFX(SmmRelocationSemaphoreComplete)

-ASM_PFX(SmmRelocationSemaphoreComplete):

-    push    eax

-    mov     eax, [ASM_PFX(mRebasedFlag)]

-    mov     byte [eax], 1

-    pop     eax

-    jmp     [ASM_PFX(mSmmRelocationOriginalAddress)]

-

-global ASM_PFX(PiSmmCpuSmmInitFixupAddress)

-ASM_PFX(PiSmmCpuSmmInitFixupAddress):

-    ret

diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
index 081f0c1..10baf3c 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
@@ -1513,9 +1513,7 @@
 

   ASSERT (CpuIndex < mMaxNumberOfCpus);

 

-  if (mSmmRelocated) {

-    ASSERT (mSmmInitialized != NULL);

-  }

+  ASSERT (mSmmInitialized != NULL);

 

   //

   // Save Cr2 because Page Fault exception in SMM may override its value,

@@ -1524,11 +1522,11 @@
   Cr2 = 0;

   SaveCr2 (&Cr2);

 

-  if (mSmmRelocated && !mSmmInitialized[CpuIndex]) {

+  if (!mSmmInitialized[CpuIndex]) {

     //

-    // Perform SmmInitHandler for CpuIndex

+    // Perform InitializeSmm for CpuIndex

     //

-    SmmInitHandler ();

+    InitializeSmm ();

 

     //

     // Restore Cr2

diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
index 74e494f..e400bee 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
@@ -56,11 +56,6 @@
 //

 SMM_CPU_PRIVATE_DATA  *gSmmCpuPrivate = &mSmmCpuPrivateData;

 

-//

-// SMM Relocation variables

-//

-volatile BOOLEAN  *mRebased;

-

 ///

 /// Handle for the SMM CPU Protocol

 ///

@@ -85,7 +80,6 @@
 

 EFI_CPU_INTERRUPT_HANDLER  mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];

 

-BOOLEAN           mSmmRelocated    = FALSE;

 volatile BOOLEAN  *mSmmInitialized = NULL;

 UINT32            mBspApicId       = 0;

 

@@ -135,12 +129,6 @@
 

 UINT8  mPhysicalAddressBits;

 

-//

-// Control register contents saved for SMM S3 resume state initialization.

-//

-UINT32  mSmmCr0;

-UINT32  mSmmCr4;

-

 /**

   Initialize IDT to setup exception handlers for SMM.

 

@@ -337,12 +325,11 @@
 }

 

 /**

-  C function for SMI handler. To change all processor's SMMBase Register.

+  Initialize SMM environment.

 

 **/

 VOID

-EFIAPI

-SmmInitHandler (

+InitializeSmm (

   VOID

   )

 {

@@ -350,10 +337,6 @@
   UINTN    Index;

   BOOLEAN  IsBsp;

 

-  //

-  // Update SMM IDT entries' code segment and load IDT

-  //

-  AsmWriteIdtr (&gcSmiIdtr);

   ApicId = GetApicId ();

 

   IsBsp = (BOOLEAN)(mBspApicId == ApicId);

@@ -363,7 +346,7 @@
   for (Index = 0; Index < mNumberOfCpus; Index++) {

     if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {

       PERF_CODE (

-        MpPerfBegin (Index, SMM_MP_PERF_PROCEDURE_ID (SmmInitHandler));

+        MpPerfBegin (Index, SMM_MP_PERF_PROCEDURE_ID (InitializeSmm));

         );

       //

       // Initialize SMM specific features on the currently executing CPU

@@ -388,15 +371,8 @@
         InitializeMpSyncData ();

       }

 

-      if (!mSmmRelocated) {

-        //

-        // Hook return after RSM to set SMM re-based flag

-        //

-        SemaphoreHook (Index, &mRebased[Index]);

-      }

-

       PERF_CODE (

-        MpPerfEnd (Index, SMM_MP_PERF_PROCEDURE_ID (SmmInitHandler));

+        MpPerfEnd (Index, SMM_MP_PERF_PROCEDURE_ID (InitializeSmm));

         );

 

       return;

@@ -457,107 +433,6 @@
 }

 

 /**

-  Relocate SmmBases for each processor.

-

-  Execute on first boot and all S3 resumes

-

-**/

-VOID

-EFIAPI

-SmmRelocateBases (

-  VOID

-  )

-{

-  UINT8                 BakBuf[BACK_BUF_SIZE];

-  SMRAM_SAVE_STATE_MAP  BakBuf2;

-  SMRAM_SAVE_STATE_MAP  *CpuStatePtr;

-  UINT8                 *U8Ptr;

-  UINTN                 Index;

-  UINTN                 BspIndex;

-

-  PERF_FUNCTION_BEGIN ();

-

-  //

-  // Make sure the reserved size is large enough for procedure SmmInitTemplate.

-  //

-  ASSERT (sizeof (BakBuf) >= gcSmmInitSize);

-

-  //

-  // Patch ASM code template with current CR0, CR3, and CR4 values

-  //

-  mSmmCr0 = (UINT32)AsmReadCr0 ();

-  PatchInstructionX86 (gPatchSmmCr0, mSmmCr0, 4);

-  PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);

-  mSmmCr4 = (UINT32)AsmReadCr4 ();

-  PatchInstructionX86 (gPatchSmmCr4, mSmmCr4 & (~CR4_CET_ENABLE), 4);

-

-  //

-  // Patch GDTR for SMM base relocation

-  //

-  gcSmiInitGdtr.Base  = gcSmiGdtr.Base;

-  gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;

-

-  U8Ptr       = (UINT8 *)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);

-  CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);

-

-  //

-  // Backup original contents at address 0x38000

-  //

-  CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));

-  CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));

-

-  //

-  // Load image for relocation

-  //

-  CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);

-

-  //

-  // Retrieve the local APIC ID of current processor

-  //

-  mBspApicId = GetApicId ();

-

-  //

-  // Relocate SM bases for all APs

-  // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate

-  //

-  BspIndex = (UINTN)-1;

-  for (Index = 0; Index < mNumberOfCpus; Index++) {

-    mRebased[Index] = FALSE;

-    if (mBspApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {

-      SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);

-      //

-      // Wait for this AP to finish its 1st SMI

-      //

-      while (!mRebased[Index]) {

-      }

-    } else {

-      //

-      // BSP will be Relocated later

-      //

-      BspIndex = Index;

-    }

-  }

-

-  //

-  // Relocate BSP's SMM base

-  //

-  ASSERT (BspIndex != (UINTN)-1);

-  SendSmiIpi (mBspApicId);

-  //

-  // Wait for the BSP to finish its 1st SMI

-  //

-  while (!mRebased[BspIndex]) {

-  }

-

-  //

-  // Restore contents at address 0x38000

-  //

-  CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));

-  CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));

-  PERF_FUNCTION_END ();

-}

-

-/**

   SMM Ready To Lock event notification handler.

 

   The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to

@@ -966,8 +841,6 @@
 {

   EFI_STATUS  Status;

   UINTN       Index;

-  VOID        *Buffer;

-  UINTN       BufferPages;

   UINTN       TileCodeSize;

   UINTN       TileDataSize;

   UINTN       TileSize;

@@ -986,7 +859,6 @@
   //

   // Initialize address fixup

   //

-  PiSmmCpuSmmInitFixupAddress ();

   PiSmmCpuSmiEntryFixupAddress ();

 

   //

@@ -1198,62 +1070,30 @@
   ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));

 

   //

-  // Retrive the allocated SmmBase from gSmmBaseHobGuid. If found,

+  // Check whether the Required TileSize is enough.

+  //

+  if (TileSize > SIZE_8KB) {

+    DEBUG ((DEBUG_ERROR, "The Range of Smbase in SMRAM is not enough -- Required TileSize = 0x%08x, Actual TileSize = 0x%08x\n", TileSize, SIZE_8KB));

+    FreePool (gSmmCpuPrivate->ProcessorInfo);

+    CpuDeadLoop ();

+    return RETURN_BUFFER_TOO_SMALL;

+  }

+

+  //

+  // Retrieve the allocated SmmBase from gSmmBaseHobGuid. If found,

   // means the SmBase relocation has been done.

   //

   mCpuHotPlugData.SmBase = NULL;

   Status                 = GetSmBase (mMaxNumberOfCpus, &mCpuHotPlugData.SmBase);

-  if (Status == EFI_OUT_OF_RESOURCES) {

-    ASSERT (Status != EFI_OUT_OF_RESOURCES);

+  ASSERT (!EFI_ERROR (Status));

+  if (EFI_ERROR (Status)) {

     CpuDeadLoop ();

   }

 

-  if (!EFI_ERROR (Status)) {

-    ASSERT (mCpuHotPlugData.SmBase != NULL);

-    //

-    // Check whether the Required TileSize is enough.

-    //

-    if (TileSize > SIZE_8KB) {

-      DEBUG ((DEBUG_ERROR, "The Range of Smbase in SMRAM is not enough -- Required TileSize = 0x%08x, Actual TileSize = 0x%08x\n", TileSize, SIZE_8KB));

-      FreePool (mCpuHotPlugData.SmBase);

-      FreePool (gSmmCpuPrivate->ProcessorInfo);

-      CpuDeadLoop ();

-      return RETURN_BUFFER_TOO_SMALL;

-    }

-

-    mSmmRelocated = TRUE;

-  } else {

-    ASSERT (Status == EFI_NOT_FOUND);

-    ASSERT (mCpuHotPlugData.SmBase == NULL);

-    //

-    // When the HOB doesn't exist, allocate new SMBASE itself.

-    //

-    DEBUG ((DEBUG_INFO, "PiCpuSmmEntry: gSmmBaseHobGuid not found!\n"));

-

-    mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);

-    if (mCpuHotPlugData.SmBase == NULL) {

-      ASSERT (mCpuHotPlugData.SmBase != NULL);

-      CpuDeadLoop ();

-    }

-

-    //

-    // very old processors (i486 + pentium) need 32k not 4k alignment, exclude them.

-    //

-    ASSERT (FamilyId >= 6);

-    //

-    // Allocate buffer for all of the tiles.

-    //

-    BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));

-    Buffer      = AllocateAlignedCodePages (BufferPages, SIZE_4KB);

-    if (Buffer == NULL) {

-      DEBUG ((DEBUG_ERROR, "Failed to allocate %Lu pages.\n", (UINT64)BufferPages));

-      CpuDeadLoop ();

-      return EFI_OUT_OF_RESOURCES;

-    }

-

-    ASSERT (Buffer != NULL);

-    DEBUG ((DEBUG_INFO, "New Allcoated SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE (BufferPages)));

-  }

+  //

+  // ASSERT SmBase has been relocated.

+  //

+  ASSERT (mCpuHotPlugData.SmBase != NULL);

 

   //

   // Allocate buffer for pointers to array in  SMM_CPU_PRIVATE_DATA.

@@ -1283,10 +1123,6 @@
   // size for each CPU in the platform

   //

   for (Index = 0; Index < mMaxNumberOfCpus; Index++) {

-    if (!mSmmRelocated) {

-      mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;

-    }

-

     gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof (SMRAM_SAVE_STATE_MAP);

     gSmmCpuPrivate->CpuSaveState[Index]     = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);

     gSmmCpuPrivate->Operation[Index]        = SmmCpuNone;

@@ -1383,41 +1219,11 @@
   }

 

   //

-  // Set SMI stack for SMM base relocation

-  //

-  PatchInstructionX86 (

-    gPatchSmmInitStack,

-    (UINTN)(Stacks + mSmmStackSize - sizeof (UINTN)),

-    sizeof (UINTN)

-    );

-

-  //

   // Initialize IDT

   //

   InitializeSmmIdt ();

 

   //

-  // Check whether Smm Relocation is done or not.

-  // If not, will do the SmmBases Relocation here!!!

-  //

-  if (!mSmmRelocated) {

-    //

-    // Relocate SMM Base addresses to the ones allocated from SMRAM

-    //

-    mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);

-    ASSERT (mRebased != NULL);

-    SmmRelocateBases ();

-

-    //

-    // Call hook for BSP to perform extra actions in normal mode after all

-    // SMM base addresses have been relocated on all CPUs

-    //

-    SmmCpuFeaturesSmmRelocationComplete ();

-  }

-

-  DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));

-

-  //

   // SMM Time initialization

   //

   InitializeSmmTimer ();

@@ -1453,15 +1259,15 @@
   // Those MSRs & CSRs must be configured before normal SMI sources happen.

   // So, here is to issue SMI IPI (All Excluding  Self SMM IPI + BSP SMM IPI) to execute first SMI init.

   //

-  if (mSmmRelocated) {

-    ExecuteFirstSmiInit ();

+  ExecuteFirstSmiInit ();

 

-    //

-    // Call hook for BSP to perform extra actions in normal mode after all

-    // SMM base addresses have been relocated on all CPUs

-    //

-    SmmCpuFeaturesSmmRelocationComplete ();

-  }

+  //

+  // Call hook for BSP to perform extra actions in normal mode after all

+  // SMM base addresses have been relocated on all CPUs

+  //

+  SmmCpuFeaturesSmmRelocationComplete ();

+

+  DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));

 

   //

   // Fill in SMM Reserved Regions

@@ -1853,88 +1659,6 @@
 }

 

 /**

-  Allocate aligned pages for code.

-

-  @param[in]  Pages                 Number of pages to be allocated.

-  @param[in]  Alignment             The requested alignment of the allocation.

-                                    Must be a power of two.

-                                    If Alignment is zero, then byte alignment is used.

-

-  @return Allocated memory.

-**/

-VOID *

-AllocateAlignedCodePages (

-  IN UINTN  Pages,

-  IN UINTN  Alignment

-  )

-{

-  EFI_STATUS            Status;

-  EFI_PHYSICAL_ADDRESS  Memory;

-  UINTN                 AlignedMemory;

-  UINTN                 AlignmentMask;

-  UINTN                 UnalignedPages;

-  UINTN                 RealPages;

-

-  //

-  // Alignment must be a power of two or zero.

-  //

-  ASSERT ((Alignment & (Alignment - 1)) == 0);

-

-  if (Pages == 0) {

-    return NULL;

-  }

-

-  if (Alignment > EFI_PAGE_SIZE) {

-    //

-    // Calculate the total number of pages since alignment is larger than page size.

-    //

-    AlignmentMask = Alignment - 1;

-    RealPages     = Pages + EFI_SIZE_TO_PAGES (Alignment);

-    //

-    // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.

-    //

-    ASSERT (RealPages > Pages);

-

-    Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);

-    if (EFI_ERROR (Status)) {

-      return NULL;

-    }

-

-    AlignedMemory  = ((UINTN)Memory + AlignmentMask) & ~AlignmentMask;

-    UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN)Memory);

-    if (UnalignedPages > 0) {

-      //

-      // Free first unaligned page(s).

-      //

-      Status = gSmst->SmmFreePages (Memory, UnalignedPages);

-      ASSERT_EFI_ERROR (Status);

-    }

-

-    Memory         = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);

-    UnalignedPages = RealPages - Pages - UnalignedPages;

-    if (UnalignedPages > 0) {

-      //

-      // Free last unaligned page(s).

-      //

-      Status = gSmst->SmmFreePages (Memory, UnalignedPages);

-      ASSERT_EFI_ERROR (Status);

-    }

-  } else {

-    //

-    // Do not over-allocate pages in this case.

-    //

-    Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);

-    if (EFI_ERROR (Status)) {

-      return NULL;

-    }

-

-    AlignedMemory = (UINTN)Memory;

-  }

-

-  return (VOID *)AlignedMemory;

-}

-

-/**

   Perform the remaining tasks.

 

 **/

diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
index 7f244ea..f42910d 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
@@ -188,14 +188,6 @@
 #define PROTECT_MODE_CODE_SEGMENT  0x08

 #define LONG_MODE_CODE_SEGMENT     0x38

 

-//

-// The size 0x20 must be bigger than

-// the size of template code of SmmInit. Currently,

-// the size of SmmInit requires the 0x16 Bytes buffer

-// at least.

-//

-#define BACK_BUF_SIZE  0x20

-

 #define EXCEPTION_VECTOR_NUMBER  0x20

 

 #define INVALID_APIC_ID  0xFFFFFFFFFFFFFFFFULL

@@ -357,12 +349,11 @@
   );

 

 /**

-  C function for SMI handler. To change all processor's SMMBase Register.

+  Initialize SMM environment.

 

 **/

 VOID

-EFIAPI

-SmmInitHandler (

+InitializeSmm (

   VOID

   );

 

@@ -375,18 +366,9 @@
   VOID

   );

 

-extern BOOLEAN            mSmmRelocated;

 extern volatile  BOOLEAN  *mSmmInitialized;

 extern UINT32             mBspApicId;

 

-extern CONST UINT8        gcSmmInitTemplate[];

-extern CONST UINT16       gcSmmInitSize;

-X86_ASSEMBLY_PATCH_LABEL  gPatchSmmCr0;

-extern UINT32             mSmmCr0;

-X86_ASSEMBLY_PATCH_LABEL  gPatchSmmCr3;

-extern UINT32             mSmmCr4;

-X86_ASSEMBLY_PATCH_LABEL  gPatchSmmCr4;

-X86_ASSEMBLY_PATCH_LABEL  gPatchSmmInitStack;

 X86_ASSEMBLY_PATCH_LABEL  mPatchCetSupported;

 extern BOOLEAN            mCetSupported;

 

@@ -475,7 +457,6 @@
 extern UINTN                         mSmmStackArrayEnd;

 extern UINTN                         mSmmStackSize;

 extern EFI_SMM_CPU_SERVICE_PROTOCOL  mSmmCpuService;

-extern IA32_DESCRIPTOR               gcSmiInitGdtr;

 extern SMM_CPU_SEMAPHORES            mSmmCpuSemaphores;

 extern UINTN                         mSemaphoreSize;

 extern SPIN_LOCK                     *mPFLock;

@@ -794,18 +775,6 @@
   );

 

 /**

-  Relocate SmmBases for each processor.

-

-  Execute on first boot and all S3 resumes

-

-**/

-VOID

-EFIAPI

-SmmRelocateBases (

-  VOID

-  );

-

-/**

   Page Fault handler for SMM use.

 

   @param  InterruptType    Defines the type of interrupt or exception that

@@ -850,22 +819,6 @@
   );

 

 /**

-  Hook return address of SMM Save State so that semaphore code

-  can be executed immediately after AP exits SMM to indicate to

-  the BSP that an AP has exited SMM after SMBASE relocation.

-

-  @param[in] CpuIndex     The processor index.

-  @param[in] RebasedFlag  A pointer to a flag that is set to TRUE

-                          immediately after AP exits SMM.

-

-**/

-VOID

-SemaphoreHook (

-  IN UINTN             CpuIndex,

-  IN volatile BOOLEAN  *RebasedFlag

-  );

-

-/**

 Configure SMM Code Access Check feature for all processors.

 SMM Feature Control MSR will be locked after configuration.

 **/

@@ -875,33 +828,6 @@
   );

 

 /**

-  Hook the code executed immediately after an RSM instruction on the currently

-  executing CPU.  The mode of code executed immediately after RSM must be

-  detected, and the appropriate hook must be selected.  Always clear the auto

-  HALT restart flag if it is set.

-

-  @param[in] CpuIndex                 The processor index for the currently

-                                      executing CPU.

-  @param[in] CpuState                 Pointer to SMRAM Save State Map for the

-                                      currently executing CPU.

-  @param[in] NewInstructionPointer32  Instruction pointer to use if resuming to

-                                      32-bit mode from 64-bit SMM.

-  @param[in] NewInstructionPointer    Instruction pointer to use if resuming to

-                                      same mode as SMM.

-

-  @retval The value of the original instruction pointer before it was hooked.

-

-**/

-UINT64

-EFIAPI

-HookReturnFromSmm (

-  IN UINTN              CpuIndex,

-  SMRAM_SAVE_STATE_MAP  *CpuState,

-  UINT64                NewInstructionPointer32,

-  UINT64                NewInstructionPointer

-  );

-

-/**

   Get the size of the SMI Handler in bytes.

 

   @retval The size, in bytes, of the SMI Handler.

@@ -1105,22 +1031,6 @@
   IN UINTN  Pages

   );

 

-/**

-  Allocate aligned pages for code.

-

-  @param[in]  Pages                 Number of pages to be allocated.

-  @param[in]  Alignment             The requested alignment of the allocation.

-                                    Must be a power of two.

-                                    If Alignment is zero, then byte alignment is used.

-

-  @return Allocated memory.

-**/

-VOID *

-AllocateAlignedCodePages (

-  IN UINTN  Pages,

-  IN UINTN  Alignment

-  );

-

 //

 // S3 related global variable and function prototype.

 //

@@ -1304,15 +1214,6 @@
 

 /**

   This function fixes up the address of the global variable or function

-  referred in SmmInit assembly files to be the absolute address.

-**/

-VOID

-EFIAPI

-PiSmmCpuSmmInitFixupAddress (

-  );

-

-/**

-  This function fixes up the address of the global variable or function

   referred in SmiEntry assembly files to be the absolute address.

 **/

 VOID

diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf
index db99a63..3354f94 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf
@@ -47,24 +47,20 @@
   SmmMpPerf.c

 

 [Sources.Ia32]

-  Ia32/Semaphore.c

   Ia32/PageTbl.c

   Ia32/SmmFuncsArch.c

   Ia32/SmmProfileArch.c

   Ia32/SmmProfileArch.h

-  Ia32/SmmInit.nasm

   Ia32/SmiEntry.nasm

   Ia32/SmiException.nasm

   Ia32/MpFuncs.nasm

   Ia32/Cet.nasm

 

 [Sources.X64]

-  X64/Semaphore.c

   X64/PageTbl.c

   X64/SmmFuncsArch.c

   X64/SmmProfileArch.c

   X64/SmmProfileArch.h

-  X64/SmmInit.nasm

   X64/SmiEntry.nasm

   X64/SmiException.nasm

   X64/MpFuncs.nasm

diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmMpPerf.h b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmMpPerf.h
index 591b212..2e2ea3c 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmMpPerf.h
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmMpPerf.h
@@ -14,7 +14,7 @@
 // The list of all MP procedures that need to be perf-logged.

 //

 #define  SMM_MP_PERF_PROCEDURE_LIST(_) \

-  _(SmmInitHandler), \

+  _(InitializeSmm), \

   _(SmmRendezvousEntry), \

   _(PlatformValidSmi), \

   _(SmmRendezvousExit), \

diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmramSaveState.c b/UefiCpuPkg/PiSmmCpuDxeSmm/SmramSaveState.c
index 1e316ee..b9a62ae 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmramSaveState.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmramSaveState.c
@@ -70,75 +70,6 @@
 UINT8  mSmmSaveStateRegisterLma;

 

 /**

-  Hook the code executed immediately after an RSM instruction on the currently

-  executing CPU.  The mode of code executed immediately after RSM must be

-  detected, and the appropriate hook must be selected.  Always clear the auto

-  HALT restart flag if it is set.

-

-  @param[in] CpuIndex                 The processor index for the currently

-                                      executing CPU.

-  @param[in] CpuState                 Pointer to SMRAM Save State Map for the

-                                      currently executing CPU.

-  @param[in] NewInstructionPointer32  Instruction pointer to use if resuming to

-                                      32-bit mode from 64-bit SMM.

-  @param[in] NewInstructionPointer    Instruction pointer to use if resuming to

-                                      same mode as SMM.

-

-  @retval The value of the original instruction pointer before it was hooked.

-

-**/

-UINT64

-EFIAPI

-HookReturnFromSmm (

-  IN UINTN              CpuIndex,

-  SMRAM_SAVE_STATE_MAP  *CpuState,

-  UINT64                NewInstructionPointer32,

-  UINT64                NewInstructionPointer

-  )

-{

-  UINT64  OriginalInstructionPointer;

-

-  OriginalInstructionPointer = SmmCpuFeaturesHookReturnFromSmm (

-                                 CpuIndex,

-                                 CpuState,

-                                 NewInstructionPointer32,

-                                 NewInstructionPointer

-                                 );

-  if (OriginalInstructionPointer != 0) {

-    return OriginalInstructionPointer;

-  }

-

-  if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {

-    OriginalInstructionPointer = (UINT64)CpuState->x86._EIP;

-    CpuState->x86._EIP         = (UINT32)NewInstructionPointer;

-    //

-    // Clear the auto HALT restart flag so the RSM instruction returns

-    // program control to the instruction following the HLT instruction.

-    //

-    if ((CpuState->x86.AutoHALTRestart & BIT0) != 0) {

-      CpuState->x86.AutoHALTRestart &= ~BIT0;

-    }

-  } else {

-    OriginalInstructionPointer = CpuState->x64._RIP;

-    if ((CpuState->x64.IA32_EFER & LMA) == 0) {

-      CpuState->x64._RIP = (UINT32)NewInstructionPointer32;

-    } else {

-      CpuState->x64._RIP = (UINT32)NewInstructionPointer;

-    }

-

-    //

-    // Clear the auto HALT restart flag so the RSM instruction returns

-    // program control to the instruction following the HLT instruction.

-    //

-    if ((CpuState->x64.AutoHALTRestart & BIT0) != 0) {

-      CpuState->x64.AutoHALTRestart &= ~BIT0;

-    }

-  }

-

-  return OriginalInstructionPointer;

-}

-

-/**

   Get the size of the SMI Handler in bytes.

 

   @retval The size, in bytes, of the SMI Handler.

diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Semaphore.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Semaphore.c
deleted file mode 100644
index dafbc33..0000000
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Semaphore.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/** @file

-Semaphore mechanism to indicate to the BSP that an AP has exited SMM

-after SMBASE relocation.

-

-Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>

-SPDX-License-Identifier: BSD-2-Clause-Patent

-

-**/

-

-#include "PiSmmCpuDxeSmm.h"

-

-X86_ASSEMBLY_PATCH_LABEL  gPatchSmmRelocationOriginalAddressPtr32;

-X86_ASSEMBLY_PATCH_LABEL  gPatchRebasedFlagAddr32;

-

-UINTN             mSmmRelocationOriginalAddress;

-volatile BOOLEAN  *mRebasedFlag;

-

-/**

-AP Semaphore operation in 32-bit mode while BSP runs in 64-bit mode.

-**/

-VOID

-SmmRelocationSemaphoreComplete32 (

-  VOID

-  );

-

-/**

-  Hook return address of SMM Save State so that semaphore code

-  can be executed immediately after AP exits SMM to indicate to

-  the BSP that an AP has exited SMM after SMBASE relocation.

-

-  @param[in] CpuIndex     The processor index.

-  @param[in] RebasedFlag  A pointer to a flag that is set to TRUE

-                          immediately after AP exits SMM.

-

-**/

-VOID

-SemaphoreHook (

-  IN UINTN             CpuIndex,

-  IN volatile BOOLEAN  *RebasedFlag

-  )

-{

-  SMRAM_SAVE_STATE_MAP  *CpuState;

-  UINTN                 TempValue;

-

-  mRebasedFlag = RebasedFlag;

-  PatchInstructionX86 (

-    gPatchRebasedFlagAddr32,

-    (UINT32)(UINTN)mRebasedFlag,

-    4

-    );

-

-  CpuState                      = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);

-  mSmmRelocationOriginalAddress = HookReturnFromSmm (

-                                    CpuIndex,

-                                    CpuState,

-                                    (UINT64)(UINTN)&SmmRelocationSemaphoreComplete32,

-                                    (UINT64)(UINTN)&SmmRelocationSemaphoreComplete

-                                    );

-

-  //

-  // Use temp value to fix ICC compiler warning

-  //

-  TempValue = (UINTN)&mSmmRelocationOriginalAddress;

-  PatchInstructionX86 (

-    gPatchSmmRelocationOriginalAddressPtr32,

-    (UINT32)TempValue,

-    4

-    );

-}

diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm
deleted file mode 100644
index 9cf3a6d..0000000
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm
+++ /dev/null
@@ -1,146 +0,0 @@
-;------------------------------------------------------------------------------ ;

-; Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR>

-; SPDX-License-Identifier: BSD-2-Clause-Patent

-;

-; Module Name:

-;

-;   SmmInit.nasm

-;

-; Abstract:

-;

-;   Functions for relocating SMBASE's for all processors

-;

-;-------------------------------------------------------------------------------

-

-%include "StuffRsbNasm.inc"

-

-extern ASM_PFX(SmmInitHandler)

-extern ASM_PFX(mRebasedFlag)

-extern ASM_PFX(mSmmRelocationOriginalAddress)

-

-global ASM_PFX(gPatchSmmCr3)

-global ASM_PFX(gPatchSmmCr4)

-global ASM_PFX(gPatchSmmCr0)

-global ASM_PFX(gPatchSmmInitStack)

-global ASM_PFX(gcSmiInitGdtr)

-global ASM_PFX(gcSmmInitSize)

-global ASM_PFX(gcSmmInitTemplate)

-global ASM_PFX(gPatchRebasedFlagAddr32)

-global ASM_PFX(gPatchSmmRelocationOriginalAddressPtr32)

-

-%define LONG_MODE_CS 0x38

-

-    DEFAULT REL

-    SECTION .text

-

-ASM_PFX(gcSmiInitGdtr):

-            DW      0

-            DQ      0

-

-global ASM_PFX(SmmStartup)

-

-BITS 16

-ASM_PFX(SmmStartup):

-    mov     eax, 0x80000001             ; read capability

-    cpuid

-    mov     ebx, edx                    ; rdmsr will change edx. keep it in ebx.

-    mov     eax, strict dword 0         ; source operand will be patched

-ASM_PFX(gPatchSmmCr3):

-    mov     cr3, eax

-o32 lgdt    [cs:ebp + (ASM_PFX(gcSmiInitGdtr) - ASM_PFX(SmmStartup))]

-    mov     eax, strict dword 0         ; source operand will be patched

-ASM_PFX(gPatchSmmCr4):

-    or      ah,  2                      ; enable XMM registers access

-    mov     cr4, eax

-    mov     ecx, 0xc0000080             ; IA32_EFER MSR

-    rdmsr

-    or      ah, BIT0                    ; set LME bit

-    test    ebx, BIT20                  ; check NXE capability

-    jz      .1

-    or      ah, BIT3                    ; set NXE bit

-.1:

-    wrmsr

-    mov     eax, strict dword 0         ; source operand will be patched

-ASM_PFX(gPatchSmmCr0):

-    mov     cr0, eax                    ; enable protected mode & paging

-    jmp     LONG_MODE_CS : dword 0      ; offset will be patched to @LongMode

-@PatchLongModeOffset:

-

-BITS 64

-@LongMode:                              ; long-mode starts here

-    mov     rsp, strict qword 0         ; source operand will be patched

-ASM_PFX(gPatchSmmInitStack):

-    and     sp, 0xfff0                  ; make sure RSP is 16-byte aligned

-    ;

-    ; According to X64 calling convention, XMM0~5 are volatile, we need to save

-    ; them before calling C-function.

-    ;

-    sub     rsp, 0x60

-    movdqa  [rsp], xmm0

-    movdqa  [rsp + 0x10], xmm1

-    movdqa  [rsp + 0x20], xmm2

-    movdqa  [rsp + 0x30], xmm3

-    movdqa  [rsp + 0x40], xmm4

-    movdqa  [rsp + 0x50], xmm5

-

-    add     rsp, -0x20

-    call    ASM_PFX(SmmInitHandler)

-    add     rsp, 0x20

-

-    ;

-    ; Restore XMM0~5 after calling C-function.

-    ;

-    movdqa  xmm0, [rsp]

-    movdqa  xmm1, [rsp + 0x10]

-    movdqa  xmm2, [rsp + 0x20]

-    movdqa  xmm3, [rsp + 0x30]

-    movdqa  xmm4, [rsp + 0x40]

-    movdqa  xmm5, [rsp + 0x50]

-

-    StuffRsb64

-    rsm

-

-BITS 16

-ASM_PFX(gcSmmInitTemplate):

-    mov ebp, [cs:@L1 - ASM_PFX(gcSmmInitTemplate) + 0x8000]

-    sub ebp, 0x30000

-    jmp ebp

-@L1:

-    DQ     0; ASM_PFX(SmmStartup)

-

-ASM_PFX(gcSmmInitSize): DW $ - ASM_PFX(gcSmmInitTemplate)

-

-BITS 64

-global ASM_PFX(SmmRelocationSemaphoreComplete)

-ASM_PFX(SmmRelocationSemaphoreComplete):

-    push    rax

-    mov     rax, [ASM_PFX(mRebasedFlag)]

-    mov     byte [rax], 1

-    pop     rax

-    jmp     [ASM_PFX(mSmmRelocationOriginalAddress)]

-

-;

-; Semaphore code running in 32-bit mode

-;

-BITS 32

-global ASM_PFX(SmmRelocationSemaphoreComplete32)

-ASM_PFX(SmmRelocationSemaphoreComplete32):

-    push    eax

-    mov     eax, strict dword 0                ; source operand will be patched

-ASM_PFX(gPatchRebasedFlagAddr32):

-    mov     byte [eax], 1

-    pop     eax

-    jmp     dword [dword 0]                    ; destination will be patched

-ASM_PFX(gPatchSmmRelocationOriginalAddressPtr32):

-

-BITS 64

-global ASM_PFX(PiSmmCpuSmmInitFixupAddress)

-ASM_PFX(PiSmmCpuSmmInitFixupAddress):

-    lea    rax, [@LongMode]

-    lea    rcx, [@PatchLongModeOffset - 6]

-    mov    dword [rcx], eax

-

-    lea    rax, [ASM_PFX(SmmStartup)]

-    lea    rcx, [@L1]

-    mov    qword [rcx], rax

-    ret