Add TGL_LP support
Change-Id: I522eeb8ea285eb900890ecf454ee12ed30f867f2
diff --git a/Source/GmmLib/CMakeLists.txt b/Source/GmmLib/CMakeLists.txt
index 4eadbef..dbb3d5e 100644
--- a/Source/GmmLib/CMakeLists.txt
+++ b/Source/GmmLib/CMakeLists.txt
@@ -24,11 +24,11 @@
project(igfx_gmmumd)
# GmmLib Api Version used for so naming
-set(GMMLIB_API_MAJOR_VERSION 9)
+set(GMMLIB_API_MAJOR_VERSION 10)
set(GMMLIB_API_MINOR_VERSION 0)
if(NOT DEFINED MAJOR_VERSION)
- set(MAJOR_VERSION 9)
+ set(MAJOR_VERSION 10)
endif()
if(NOT DEFINED MINOR_VERSION)
@@ -176,10 +176,12 @@
${BS_DIR_GMMLIB}/CachePolicy/GmmCachePolicyUndefineConditionals.h
${BS_DIR_GMMLIB}/CachePolicy/GmmGen10CachePolicy.h
${BS_DIR_GMMLIB}/CachePolicy/GmmGen11CachePolicy.h
+ ${BS_DIR_GMMLIB}/CachePolicy/GmmGen12CachePolicy.h
${BS_DIR_GMMLIB}/CachePolicy/GmmGen8CachePolicy.h
${BS_DIR_GMMLIB}/CachePolicy/GmmGen9CachePolicy.h
${BS_DIR_GMMLIB}/inc/External/Common/CachePolicy/GmmCachePolicyGen10.h
${BS_DIR_GMMLIB}/inc/External/Common/CachePolicy/GmmCachePolicyGen11.h
+ ${BS_DIR_GMMLIB}/inc/External/Common/CachePolicy/GmmCachePolicyGen12.h
${BS_DIR_GMMLIB}/inc/External/Common/CachePolicy/GmmCachePolicyGen8.h
${BS_DIR_GMMLIB}/inc/External/Common/CachePolicy/GmmCachePolicyGen9.h
${BS_DIR_GMMLIB}/inc/External/Common/GmmCachePolicy.h
@@ -205,10 +207,12 @@
${BS_DIR_GMMLIB}/inc/External/Linux/GmmResourceInfoLin.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Platform/GmmGen10Platform.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Platform/GmmGen11Platform.h
+ ${BS_DIR_GMMLIB}/inc/Internal/Common/Platform/GmmGen12Platform.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Platform/GmmGen8Platform.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Platform/GmmGen9Platform.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Texture/GmmGen10TextureCalc.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Texture/GmmGen11TextureCalc.h
+ ${BS_DIR_GMMLIB}/inc/Internal/Common/Texture/GmmGen12TextureCalc.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Texture/GmmGen7TextureCalc.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Texture/GmmGen8TextureCalc.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Texture/GmmGen9TextureCalc.h
@@ -235,7 +239,9 @@
${BS_DIR_GMMLIB}/CachePolicy/GmmGen9CachePolicy.cpp
${BS_DIR_GMMLIB}/CachePolicy/GmmGen10CachePolicy.cpp
${BS_DIR_GMMLIB}/CachePolicy/GmmGen11CachePolicy.cpp
+ ${BS_DIR_GMMLIB}/CachePolicy/GmmGen12CachePolicy.cpp
${BS_DIR_GMMLIB}/Platform/GmmGen11Platform.cpp
+ ${BS_DIR_GMMLIB}/Platform/GmmGen12Platform.cpp
${BS_DIR_GMMLIB}/Platform/GmmGen8Platform.cpp
${BS_DIR_GMMLIB}/Platform/GmmGen9Platform.cpp
${BS_DIR_GMMLIB}/Platform/GmmGen10Platform.cpp
@@ -249,6 +255,7 @@
${BS_DIR_GMMLIB}/Texture/GmmGen9Texture.cpp
${BS_DIR_GMMLIB}/Texture/GmmGen10Texture.cpp
${BS_DIR_GMMLIB}/Texture/GmmGen11Texture.cpp
+ ${BS_DIR_GMMLIB}/Texture/GmmGen12Texture.cpp
${BS_DIR_GMMLIB}/Texture/GmmTexture.cpp
${BS_DIR_GMMLIB}/Texture/GmmTextureAlloc.cpp
${BS_DIR_GMMLIB}/Texture/GmmTextureSpecialCases.cpp
@@ -271,6 +278,7 @@
${BS_DIR_GMMLIB}/CachePolicy/GmmCachePolicyResourceUsageDefinitions.h
${BS_DIR_GMMLIB}/CachePolicy/GmmGen10CachePolicy.h
${BS_DIR_GMMLIB}/CachePolicy/GmmGen11CachePolicy.h
+ ${BS_DIR_GMMLIB}/CachePolicy/GmmGen12CachePolicy.h
${BS_DIR_GMMLIB}/CachePolicy/GmmGen8CachePolicy.h
${BS_DIR_GMMLIB}/CachePolicy/GmmGen9CachePolicy.h
)
@@ -316,6 +324,7 @@
source_group("Header Files\\External\\Common\\Cache Policy" FILES
${BS_DIR_GMMLIB}/inc/External/Common/CachePolicy/GmmCachePolicyGen10.h
${BS_DIR_GMMLIB}/inc/External/Common/CachePolicy/GmmCachePolicyGen11.h
+ ${BS_DIR_GMMLIB}/inc/External/Common/CachePolicy/GmmCachePolicyGen12.h
${BS_DIR_GMMLIB}/inc/External/Common/CachePolicy/GmmCachePolicyGen8.h
${BS_DIR_GMMLIB}/inc/External/Common/CachePolicy/GmmCachePolicyGen9.h
)
@@ -332,6 +341,7 @@
source_group("Header Files\\Internal\\Common\\Platform" FILES
${BS_DIR_GMMLIB}/inc/Internal/Common/Platform/GmmGen10Platform.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Platform/GmmGen11Platform.h
+ ${BS_DIR_GMMLIB}/inc/Internal/Common/Platform/GmmGen12Platform.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Platform/GmmGen8Platform.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Platform/GmmGen9Platform.h
)
@@ -339,6 +349,7 @@
source_group("Header Files\\Internal\\Common\\Texture" FILES
${BS_DIR_GMMLIB}/inc/Internal/Common/Texture/GmmGen10TextureCalc.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Texture/GmmGen11TextureCalc.h
+ ${BS_DIR_GMMLIB}/inc/Internal/Common/Texture/GmmGen12TextureCalc.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Texture/GmmGen7TextureCalc.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Texture/GmmGen8TextureCalc.h
${BS_DIR_GMMLIB}/inc/Internal/Common/Texture/GmmGen9TextureCalc.h
diff --git a/Source/GmmLib/CachePolicy/GmmCachePolicyResourceUsageDefinitions.h b/Source/GmmLib/CachePolicy/GmmCachePolicyResourceUsageDefinitions.h
old mode 100755
new mode 100644
index 7d70657..7d97ff2
--- a/Source/GmmLib/CachePolicy/GmmCachePolicyResourceUsageDefinitions.h
+++ b/Source/GmmLib/CachePolicy/GmmCachePolicyResourceUsageDefinitions.h
@@ -49,6 +49,8 @@
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_UNMAP_PAGING_RESERVED_GTT_DMA_BUFFER )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_VSC_BATCH_BUFFER )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_WA_BATCH_BUFFER )
+DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_KMD_OCA_BUFFER)
+
//
// 3D Usages
//
@@ -99,20 +101,24 @@
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_TILED_UAV )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_VERTEX_BUFFER )
-DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_VERTEX_BUFFER_L3_COHERENT_UC )
-DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_VERTEX_BUFFER_L3_CACHED )
+DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_VERTEX_BUFFER_L3_COHERENT_UC)
+DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_VERTEX_BUFFER_L3_CACHED)
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_OGL_WSTN_VERTEX_BUFFER )
+DEFINE_RESOURCE_USAGE(GMM_RESOURCE_USAGE_POSH_VERTEX_BUFFER)
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_UAV )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_RENDER_TARGET_AND_SHADER_RESOURCE )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_RENDER_TARGET_AND_SHADER_RESOURCE_PARTIALENCSURFACES )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_WDDM_HISTORY_BUFFER )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_CONTEXT_SAVE_RESTORE )
+DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_PTBR_PAGE_POOL )
+DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_PTBR_BATCH_BUFFER )
+
//
// CM USAGES
//
DEFINE_RESOURCE_USAGE( CM_RESOURCE_USAGE_SurfaceState )
-DEFINE_RESOURCE_USAGE( CM_RESOURCE_USAGE_StateHeap )
+DEFINE_RESOURCE_USAGE( CM_RESOURCE_USAGE_StateHeap)
DEFINE_RESOURCE_USAGE( CM_RESOURCE_USAGE_NO_L3_SurfaceState )
DEFINE_RESOURCE_USAGE( CM_RESOURCE_USAGE_NO_LLC_ELLC_SurfaceState )
DEFINE_RESOURCE_USAGE( CM_RESOURCE_USAGE_NO_LLC_SurfaceState )
@@ -175,7 +181,7 @@
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_SURFACE_REF_ENCODE )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_SURFACE_MV_DATA_ENCODE )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_SURFACE_HME_DOWNSAMPLED_ENCODE )
-DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_SURFACE_HME_DOWNSAMPLED_ENCODE_FF )
+DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_SURFACE_HME_DOWNSAMPLED_ENCODE_FF)
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_SURFACE_HME_DOWNSAMPLED_ENCODE_DST )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_SURFACE_ME_DISTORTION_ENCODE )
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_SURFACE_BRC_ME_DISTORTION_ENCODE )
@@ -283,3 +289,7 @@
// Cross Adapter
DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_XADAPTER_SHARED_RESOURCE )
+
+// BCS usages
+DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_BLT_SOURCE )
+DEFINE_RESOURCE_USAGE( GMM_RESOURCE_USAGE_BLT_DESTINATION )
\ No newline at end of file
diff --git a/Source/GmmLib/CachePolicy/GmmGen10CachePolicy.cpp b/Source/GmmLib/CachePolicy/GmmGen10CachePolicy.cpp
index f26a864..59b456d 100644
--- a/Source/GmmLib/CachePolicy/GmmGen10CachePolicy.cpp
+++ b/Source/GmmLib/CachePolicy/GmmGen10CachePolicy.cpp
@@ -254,10 +254,7 @@
GMM_STATUS Status = GMM_SUCCESS;
#if(defined(__GMM_KMD__))
- if(pGmmGlobalContext->GetGtSysInfoPtr()->EdramSizeInKb)
- {
- const_cast<WA_TABLE &>(pGmmGlobalContext->GetWaTable()).WaNoMocsEllcOnly = 1;
- }
+
#else
Status = GMM_ERROR;
#endif
@@ -363,7 +360,7 @@
{
GMM_PRIVATE_PAT PAT = {0};
- if(pGmmGlobalContext->GetWaTable().WaNoMocsEllcOnly)
+ if(pGmmGlobalContext->GetWaTable().FtrMemTypeMocsDeferPAT)
{
GfxTargetCache = GMM_GFX_TC_ELLC_ONLY;
}
diff --git a/Source/GmmLib/CachePolicy/GmmGen12CachePolicy.cpp b/Source/GmmLib/CachePolicy/GmmGen12CachePolicy.cpp
new file mode 100644
index 0000000..61b289c
--- /dev/null
+++ b/Source/GmmLib/CachePolicy/GmmGen12CachePolicy.cpp
@@ -0,0 +1,686 @@
+/*==============================================================================
+Copyright(c) 2019 Intel Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files(the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and / or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+============================================================================*/
+
+
+#include "Internal/Common/GmmLibInc.h"
+#include "External/Common/GmmCachePolicy.h"
+#include "External/Common/CachePolicy/GmmCachePolicyGen10.h"
+#include "External/Common/CachePolicy/GmmCachePolicyGen11.h"
+#include "External/Common/CachePolicy/GmmCachePolicyGen12.h"
+
+#if __GMM_KMD__
+extern "C" NTSTATUS __GmmReadDwordKeyValue(void *pKmdGmmContext, char *pPath, WCHAR *pValueName, ULONG *pValueData);
+extern "C" NTSTATUS __GmmWriteDwordKeyValue(void *pKmdGmmContext, char *pCStringPath, WCHAR *pValueName, ULONG DWord);
+#endif
+
+//=============================================================================
+//
+// Function: IsSpecialMOCSUsage
+//
+// Desc: This function returns special(hw-reserved) MocsIdx based on usage
+//
+// Parameters: usage -> Resource usage type
+// UpdateMOCS -> True if MOCS Table must be updated, ow false
+//
+// Return: int32_t
+//
+//-----------------------------------------------------------------------------
+int32_t GmmLib::GmmGen12CachePolicy::IsSpecialMOCSUsage(GMM_RESOURCE_USAGE_TYPE Usage, bool &UpdateMOCS)
+{
+ int32_t MocsIdx = -1;
+ UpdateMOCS = true;
+//Macros for L3-Eviction Type
+#define NA 0x0
+#define RO 0x1
+#define RW 0x2
+#define SP 0x3
+
+ switch(Usage)
+ {
+ case GMM_RESOURCE_USAGE_CCS:
+ __GMM_ASSERT(pCachePolicy[Usage].L3 == 0); //Architecturally, CCS isn't L3-cacheable.
+
+ pCachePolicy[Usage].L3 = 0;
+ MocsIdx = 60;
+ break;
+ case GMM_RESOURCE_USAGE_MOCS_62:
+ __GMM_ASSERT(pCachePolicy[Usage].L3 == 0); //Architecturally, TR/Aux-TT node isn't L3-cacheable.
+
+ pCachePolicy[Usage].L3 = 0;
+ MocsIdx = 62;
+ break;
+ case GMM_RESOURCE_USAGE_L3_EVICTION:
+ __GMM_ASSERT(pCachePolicy[Usage].L3 == 0 &&
+ pCachePolicy[Usage].L3Eviction == RW); //Reserved MOCS for L3-evictions
+
+ pCachePolicy[Usage].L3 = 0;
+ pCachePolicy[Usage].L3Eviction = RW;
+ MocsIdx = 63;
+ break;
+ case GMM_RESOURCE_USAGE_L3_EVICTION_SPECIAL:
+ case GMM_RESOURCE_USAGE_CCS_MEDIA_WRITABLE:
+ __GMM_ASSERT(pCachePolicy[Usage].L3 &&
+ pCachePolicy[Usage].L3Eviction == SP); //Reserved MOCS for L3-evictions
+ //Special-case for Displayable, and similar non-LLC accesses
+ GMM_ASSERTDPF(pCachePolicy[Usage].LLC == 0, "MOCS#61's Special Eviction isn't for LLC caching");
+
+ pCachePolicy[Usage].L3 = 1;
+ pCachePolicy[Usage].L3Eviction = SP;
+ MocsIdx = 61;
+ break;
+ default:
+ UpdateMOCS = false;
+ break;
+ }
+
+ if(pCachePolicy[Usage].L3Eviction == RW)
+ {
+ GMM_CACHE_POLICY_ELEMENT L3Eviction;
+ L3Eviction.Value = pCachePolicy[GMM_RESOURCE_USAGE_L3_EVICTION].Value;
+
+ //For internal purpose, hw overrides MOCS#63 as L3-uncacheable, still using it for L3-evictions
+ if(Usage != GMM_RESOURCE_USAGE_L3_EVICTION)
+ {
+ L3Eviction.L3 = 1; //Override L3, to verify MOCS#63 applicable or not
+ }
+
+ __GMM_ASSERT(pCachePolicy[Usage].Value == L3Eviction.Value); //Allow mis-match due to override registries
+ //MocsIdx = 63; //Use non-#63 MOCS, #63 itself is L3-uncached
+ }
+ else if(pCachePolicy[Usage].L3Eviction == SP)
+ {
+ __GMM_ASSERT(pCachePolicy[Usage].Value == pCachePolicy[GMM_RESOURCE_USAGE_L3_EVICTION_SPECIAL].Value); //Allow mis-match due to override registries
+ MocsIdx = 61;
+ }
+
+ return MocsIdx;
+}
+//=============================================================================
+//
+// Function: __GmmGen12InitCachePolicy
+//
+// Desc: This function initializes the cache policy
+//
+// Parameters: pCachePolicy -> Ptr to array to be populated with the
+// mapping of usages -> cache settings.
+//
+// Return: GMM_STATUS
+//
+//-----------------------------------------------------------------------------
+GMM_STATUS GmmLib::GmmGen12CachePolicy::InitCachePolicy()
+{
+
+ __GMM_ASSERTPTR(pCachePolicy, GMM_ERROR);
+
+#define DEFINE_CACHE_ELEMENT(usage, llc, ellc, l3, wt, age, aom, lecc_scc, l3_scc, scf, sso, cos, hdcl1, l3evict) DEFINE_CP_ELEMENT(usage, llc, ellc, l3, wt, age, aom, lecc_scc, l3_scc, scf, sso, cos, hdcl1, l3evict, 0, 0, 0)
+
+#include "GmmGen12CachePolicy.h"
+
+#define TC_LLC (1)
+#define TC_ELLC (0)
+#define TC_LLC_ELLC (2)
+
+#define LeCC_UNCACHEABLE (0x0)
+#define LeCC_WC_UNCACHEABLE (0x1)
+#define LeCC_WT_CACHEABLE (0x2) //Only used as MemPushWRite disqualifier if set along with eLLC-only -still holds on gen12+?
+#define LeCC_WB_CACHEABLE (0x3)
+
+#define L3_UNCACHEABLE (0x1)
+#define L3_WB_CACHEABLE (0x3)
+
+#define DISABLE_SKIP_CACHING_CONTROL (0x0)
+#define ENABLE_SKIP_CACHING_CONTROL (0x1)
+
+#define DISABLE_SELF_SNOOP_OVERRIDE (0x0)
+#define ENABLE_SELF_SNOOP_OVERRIDE (0x1)
+#define ENABLE_SELF_SNOOP_ALWAYS (0x3)
+#define CLASS_SERVICE_ZERO (0x0)
+
+ {
+ SetUpMOCSTable();
+ }
+
+ {
+ // Define index of cache element
+ uint32_t Usage = 0;
+
+#if(_WIN32 && (_DEBUG || _RELEASE_INTERNAL))
+ void *pKmdGmmContext = NULL;
+#if(defined(__GMM_KMD__))
+ pKmdGmmContext = pGmmGlobalContext->GetGmmKmdContext();
+#endif
+
+ OverrideCachePolicy(pKmdGmmContext);
+#endif
+ // Process the cache policy and fill in the look up table
+ for(; Usage < GMM_RESOURCE_USAGE_MAX; Usage++)
+ {
+ bool CachePolicyError = false;
+ bool SpecialMOCS = false;
+ int32_t CPTblIdx = -1;
+ uint32_t j = 0;
+ uint32_t PTEValue = 0;
+ GMM_CACHE_POLICY_TBL_ELEMENT UsageEle = {0};
+
+ CPTblIdx = IsSpecialMOCSUsage((GMM_RESOURCE_USAGE_TYPE)Usage, SpecialMOCS);
+
+ UsageEle.LeCC.Reserved = 0; // Reserved bits zeroe'd, this is so we
+ // we can compare the unioned LeCC.DwordValue.
+ UsageEle.LeCC.SelfSnoop = DISABLE_SELF_SNOOP_OVERRIDE;
+ UsageEle.LeCC.CoS = CLASS_SERVICE_ZERO;
+ UsageEle.LeCC.SCC = 0;
+ UsageEle.LeCC.ESC = 0;
+
+ if(pCachePolicy[Usage].SCF && pGmmGlobalContext->GetSkuTable().FtrLLCBypass)
+ {
+ UsageEle.LeCC.SCF = pCachePolicy[Usage].SCF;
+ __GMM_ASSERT(pCachePolicy[Usage].LLC == 0); //LLC and ByPassLLC are mutually-exclusive
+ }
+
+ if(pCachePolicy[Usage].SSO & ENABLE_SELF_SNOOP_OVERRIDE)
+ {
+ UsageEle.LeCC.SelfSnoop = pCachePolicy[Usage].SSO & ENABLE_SELF_SNOOP_ALWAYS;
+ }
+ if(pCachePolicy[Usage].CoS)
+ {
+ UsageEle.LeCC.CoS = pCachePolicy[Usage].CoS;
+ }
+ if(pCachePolicy[Usage].HDCL1)
+ {
+ UsageEle.HDCL1 = 1;
+ }
+ if(pCachePolicy[Usage].LeCC_SCC)
+ {
+ UsageEle.LeCC.SCC = pCachePolicy[Usage].LeCC_SCC;
+ UsageEle.LeCC.ESC = ENABLE_SKIP_CACHING_CONTROL;
+ }
+ UsageEle.LeCC.LRUM = pCachePolicy[Usage].AGE;
+
+ // default to LLC target cache.
+ UsageEle.LeCC.TargetCache = TC_LLC;
+ UsageEle.LeCC.Cacheability = LeCC_WB_CACHEABLE;
+ if(pCachePolicy[Usage].LLC)
+ {
+ UsageEle.LeCC.TargetCache = TC_LLC;
+ __GMM_ASSERT(pCachePolicy[Usage].SCF == 0); //LLC and ByPassLLC are mutually-exclusive
+ }
+ else
+ {
+ UsageEle.LeCC.Cacheability = LeCC_WC_UNCACHEABLE;
+ }
+ UsageEle.L3.Reserved = 0; // Reserved bits zeroe'd, this is so we
+ // we can compare the unioned L3.UshortValue.
+ UsageEle.L3.ESC = DISABLE_SKIP_CACHING_CONTROL;
+ UsageEle.L3.SCC = 0;
+ UsageEle.L3.Cacheability = pCachePolicy[Usage].L3 ? L3_WB_CACHEABLE : L3_UNCACHEABLE;
+
+ __GMM_ASSERT((pCachePolicy[Usage].L3 && pCachePolicy[Usage].L3Eviction != 0) ||
+ (pCachePolicy[Usage].L3 == 0 && (pCachePolicy[Usage].L3Eviction == 0 || Usage == GMM_RESOURCE_USAGE_L3_EVICTION)));
+
+ if(pCachePolicy[Usage].L3_SCC)
+ {
+ UsageEle.L3.ESC = ENABLE_SKIP_CACHING_CONTROL;
+ UsageEle.L3.SCC = (uint16_t)pCachePolicy[Usage].L3_SCC;
+ }
+
+ //Special-case MOCS handling for MOCS Table Index 60-63
+ if(CPTblIdx >= GMM_GEN12_MAX_NUMBER_MOCS_INDEXES)
+ {
+ GMM_CACHE_POLICY_TBL_ELEMENT *TblEle = &pGmmGlobalContext->GetCachePolicyTlbElement()[CPTblIdx];
+
+ if(SpecialMOCS &&
+ !(TblEle->LeCC.DwordValue == UsageEle.LeCC.DwordValue &&
+ TblEle->L3.UshortValue == UsageEle.L3.UshortValue &&
+ TblEle->HDCL1 == UsageEle.HDCL1))
+ {
+ //Assert if being overwritten!
+ __GMM_ASSERT(TblEle->LeCC.DwordValue == 0 &&
+ TblEle->L3.UshortValue == 0 &&
+ TblEle->HDCL1 == 0);
+
+#if(_WIN32 && (_DEBUG || _RELEASE_INTERNAL))
+ if(pCachePolicy[Usage].IsOverridenByRegkey)
+ {
+ TblEle->LeCC.DwordValue = UsageEle.LeCC.DwordValue;
+ TblEle->L3.UshortValue = UsageEle.L3.UshortValue;
+ TblEle->HDCL1 = UsageEle.HDCL1;
+ }
+#endif
+ }
+ }
+ //For HDC L1 caching, MOCS Table index 48-59 should be used
+ else if(UsageEle.HDCL1)
+ {
+ for(j = GMM_GEN10_HDCL1_MOCS_INDEX_START; j <= CurrentMaxL1HdcMocsIndex; j++)
+ {
+ GMM_CACHE_POLICY_TBL_ELEMENT *TblEle = &pGmmGlobalContext->GetCachePolicyTlbElement()[j];
+ if(TblEle->LeCC.DwordValue == UsageEle.LeCC.DwordValue &&
+ TblEle->L3.UshortValue == UsageEle.L3.UshortValue &&
+ TblEle->HDCL1 == UsageEle.HDCL1)
+ {
+ CPTblIdx = j;
+ break;
+ }
+ }
+ }
+ else
+ {
+ // Due to unstable system behavior on TGLLP, MOCS #0 index had to be programmed as UC in MOCS lookup table - pCachePolicyTlbElement
+ // But still Index 0 is Reserved for Error by HW and should not be used.
+ // Hence Gmmlib will opt out from the MOCS#0 usage and Lookup into MOCS table and MOCS index assigment must start from Index 1.
+ for(j = 1; j <= CurrentMaxMocsIndex; j++)
+ {
+ GMM_CACHE_POLICY_TBL_ELEMENT *TblEle = &pGmmGlobalContext->GetCachePolicyTlbElement()[j];
+ if(TblEle->LeCC.DwordValue == UsageEle.LeCC.DwordValue &&
+ TblEle->L3.UshortValue == UsageEle.L3.UshortValue &&
+ TblEle->HDCL1 == UsageEle.HDCL1)
+ {
+ CPTblIdx = j;
+ break;
+ }
+ }
+ }
+
+ // Didn't find the caching settings in one of the already programmed lookup table entries.
+ // Need to add a new lookup table entry.
+ if(CPTblIdx == -1)
+ {
+
+#if(_WIN32 && (_DEBUG || _RELEASE_INTERNAL))
+ // If the Cache Policy setting is overriden through regkey,
+ // don't raise an assert/log error. Raising an assert for debug/perf testing isn't really helpful
+ if(pCachePolicy[Usage].IsOverridenByRegkey)
+ {
+ if(UsageEle.HDCL1 && CurrentMaxL1HdcMocsIndex < GMM_GEN12_MAX_NUMBER_MOCS_INDEXES - 1)
+ {
+ GMM_CACHE_POLICY_TBL_ELEMENT *TblEle = &(pGmmGlobalContext->GetCachePolicyTlbElement()[++CurrentMaxL1HdcMocsIndex]);
+ CPTblIdx = CurrentMaxL1HdcMocsIndex;
+
+ TblEle->LeCC.DwordValue = UsageEle.LeCC.DwordValue;
+ TblEle->L3.UshortValue = UsageEle.L3.UshortValue;
+ TblEle->HDCL1 = UsageEle.HDCL1;
+ }
+ else if(CurrentMaxMocsIndex < GMM_GEN10_HDCL1_MOCS_INDEX_START)
+ {
+ GMM_CACHE_POLICY_TBL_ELEMENT *TblEle = &(pGmmGlobalContext->GetCachePolicyTlbElement()[++CurrentMaxMocsIndex]);
+ CPTblIdx = CurrentMaxMocsIndex;
+
+ TblEle->LeCC.DwordValue = UsageEle.LeCC.DwordValue;
+ TblEle->L3.UshortValue = UsageEle.L3.UshortValue;
+ TblEle->HDCL1 = UsageEle.HDCL1;
+ }
+ else
+ {
+ // Too many unique caching combinations to program the
+ // MOCS lookup table.
+ CachePolicyError = true;
+ GMM_ASSERTDPF(
+ "Cache Policy Init Error: Invalid Cache Programming, too many unique caching combinations"
+ "(we only support GMM_GEN_MAX_NUMBER_MOCS_INDEXES = %d)",
+ GMM_MAX_NUMBER_MOCS_INDEXES - 1);
+ // Set cache policy index to uncached.
+ CPTblIdx = 3;
+ }
+ }
+ else
+#endif
+ {
+ GMM_ASSERTDPF(false, "CRITICAL ERROR: Cache Policy Usage value specified by Client is not defined in Fixed MOCS Table!");
+
+// Log Error using regkey to indicate the above error
+#if(_WIN32 && (_DEBUG || _RELEASE_INTERNAL) && __GMM_KMD__)
+ REGISTRY_OVERRIDE_WRITE(pKmdGmmContext, Usage, NewMOCSEntryLeCCValue, UsageEle.LeCC.DwordValue);
+ REGISTRY_OVERRIDE_WRITE(pKmdGmmContext, Usage, NewMOCSEntryL3Value, UsageEle.L3.UshortValue);
+ REGISTRY_OVERRIDE_WRITE(pKmdGmmContext, Usage, NewMOCSEntryHDCL1, UsageEle.HDCL1);
+#endif
+
+ CachePolicyError = true;
+ GMM_ASSERTDPF(
+ "Cache Policy Init Error: Invalid Cache Programming, too many unique caching combinations"
+ "(we only support GMM_GEN_MAX_NUMBER_MOCS_INDEXES = %d)",
+ CurrentMaxMocsIndex);
+
+ // Set cache policy index to uncached.
+ CPTblIdx = 3;
+ }
+ }
+
+ // PTE entries do not control caching on SKL+ (for legacy context)
+ if(!GetUsagePTEValue(pCachePolicy[Usage], Usage, &PTEValue))
+ {
+ CachePolicyError = true;
+ }
+
+ pCachePolicy[Usage].PTE.DwordValue = PTEValue;
+
+ pCachePolicy[Usage].MemoryObjectOverride.Gen12.Index = CPTblIdx;
+
+ pCachePolicy[Usage].Override = ALWAYS_OVERRIDE;
+
+ if(CachePolicyError)
+ {
+ GMM_ASSERTDPF("Cache Policy Init Error: Invalid Cache Programming - Element %d", Usage);
+ }
+ }
+ }
+
+ return GMM_SUCCESS;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+/// Return true if (MT2) is a better match for (WantedMT)
+/// than (MT1)
+///
+/// @param[in] WantedMT: Wanted Memory Type
+/// @param[in] MT1: Memory Type for PATIdx1
+/// @param[in] MT2: Memory Type for PATIdx2
+///
+/// @return Select the new PAT Index True/False
+/////////////////////////////////////////////////////////////////////////////////////
+uint8_t GmmLib::GmmGen12CachePolicy::SelectNewPATIdx(GMM_GFX_MEMORY_TYPE WantedMT,
+ GMM_GFX_MEMORY_TYPE MT1, GMM_GFX_MEMORY_TYPE MT2)
+{
+ uint8_t SelectPAT2 = 0;
+
+ // select on Memory Type
+ if(MT1 != WantedMT)
+ {
+ if(MT2 == WantedMT || MT2 == GMM_GFX_UC_WITH_FENCE)
+ {
+ SelectPAT2 = 1;
+ }
+ goto EXIT;
+ }
+
+EXIT:
+ return SelectPAT2;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+/// Returns the PAT idx that best matches the cache policy for this usage.
+///
+/// @param: CachePolicy: cache policy for a usage
+///
+/// @return PAT Idx to use in the PTE
+/////////////////////////////////////////////////////////////////////////////////////
+uint32_t GmmLib::GmmGen12CachePolicy::BestMatchingPATIdx(GMM_CACHE_POLICY_ELEMENT CachePolicy)
+{
+ uint32_t i;
+ uint32_t PATIdx = 0;
+ GMM_GFX_MEMORY_TYPE WantedMemoryType = GMM_GFX_UC_WITH_FENCE, MemoryType;
+ WA_TABLE * pWaTable = &const_cast<WA_TABLE &>(pGmmGlobalContext->GetWaTable());
+
+ WantedMemoryType = GetWantedMemoryType(CachePolicy);
+
+ // Override wantedMemoryType so that PAT.MT is UC
+ // Gen12 uses max function to resolve PAT-vs-MOCS MemType, So unless PTE.PAT says UC, MOCS won't be able to set UC!
+ if(pWaTable->WaMemTypeIsMaxOfPatAndMocs)
+ {
+ WantedMemoryType = GMM_GFX_UC_WITH_FENCE;
+ }
+
+ for(i = 1; i < GMM_NUM_PAT_ENTRIES; i++)
+ {
+ GMM_PRIVATE_PAT PAT1 = GetPrivatePATEntry(PATIdx);
+ GMM_PRIVATE_PAT PAT2 = GetPrivatePATEntry(i);
+
+ if(SelectNewPATIdx(WantedMemoryType,
+ (GMM_GFX_MEMORY_TYPE)PAT1.Gen12.MemoryType,
+ (GMM_GFX_MEMORY_TYPE)PAT2.Gen12.MemoryType))
+ {
+ PATIdx = i;
+ }
+ }
+
+ MemoryType = (GMM_GFX_MEMORY_TYPE)GetPrivatePATEntry(PATIdx).Gen12.MemoryType;
+
+ if(MemoryType != WantedMemoryType)
+ {
+ // Failed to find a matching PAT entry
+ return GMM_PAT_ERROR;
+ }
+ return PATIdx;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+/// Initializes WA's needed for setting up the Private PATs
+/// WaNoMocsEllcOnly (reset)
+/// WaGttPat0, WaGttPat0GttWbOverOsIommuEllcOnly, WaGttPat0WB (use from base class)
+///
+/// @return GMM_STATUS
+///
+/////////////////////////////////////////////////////////////////////////////////////
+GMM_STATUS GmmLib::GmmGen12CachePolicy::SetPATInitWA()
+{
+ GMM_STATUS Status = GMM_SUCCESS;
+ WA_TABLE * pWaTable = &const_cast<WA_TABLE &>(pGmmGlobalContext->GetWaTable());
+
+#if(defined(__GMM_KMD__))
+ __GMM_ASSERT(pGmmGlobalContext->GetSkuTable().FtrMemTypeMocsDeferPAT == 0x0); //MOCS.TargetCache supports eLLC only, PAT.TC -> reserved bits.
+ pWaTable->WaGttPat0WB = 0; //Override PAT #0
+#else
+ Status = GMM_ERROR;
+#endif
+
+ return Status;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+/// Initializes the Gfx PAT tables for AdvCtx and Gfx MMIO/Private PAT
+/// PAT0 = WB_COHERENT or UC depending on WaGttPat0WB
+/// PAT1 = UC or WB_COHERENT depending on WaGttPat0WB
+/// PAT2 = WB_MOCSLESS
+/// PAT3 = WB
+/// PAT4 = WT
+/// PAT5 = WC
+/// PAT6 = WC
+/// PAT7 = WC
+/// HLD says to set to PAT0/1 to WC, but since we don't have a WC in GPU,
+/// WC option is same as UC. Hence setting PAT0 or PAT1 to UC.
+/// Unused PAT's (5,6,7) are set to WC.
+///
+/// @return GMM_STATUS
+/////////////////////////////////////////////////////////////////////////////////////
+GMM_STATUS GmmLib::GmmGen12CachePolicy::SetupPAT()
+{
+ GMM_STATUS Status = GMM_SUCCESS;
+#if(defined(__GMM_KMD__))
+ uint32_t i = 0;
+
+ GMM_GFX_MEMORY_TYPE GfxMemType = GMM_GFX_UC_WITH_FENCE;
+ int32_t * pPrivatePATTableMemoryType = NULL;
+ pPrivatePATTableMemoryType = pGmmGlobalContext->GetPrivatePATTableMemoryType();
+
+ __GMM_ASSERT(pGmmGlobalContext->GetSkuTable().FtrIA32eGfxPTEs);
+
+ for(i = 0; i < GMM_NUM_GFX_PAT_TYPES; i++)
+ {
+ pPrivatePATTableMemoryType[i] = -1;
+ }
+
+ // Set values for GmmGlobalInfo PrivatePATTable
+ for(i = 0; i < GMM_NUM_PAT_ENTRIES; i++)
+ {
+ GMM_PRIVATE_PAT PAT = {0};
+
+ switch(i)
+ {
+ case PAT0:
+ if(pGmmGlobalContext->GetWaTable().WaGttPat0)
+ {
+ if(pGmmGlobalContext->GetWaTable().WaGttPat0WB)
+ {
+ GfxMemType = GMM_GFX_WB;
+ pPrivatePATTableMemoryType[GMM_GFX_PAT_WB_COHERENT] = PAT0;
+ }
+ else
+ {
+ GfxMemType = GMM_GFX_UC_WITH_FENCE;
+ pPrivatePATTableMemoryType[GMM_GFX_PAT_UC] = PAT0;
+ }
+ }
+ else // if GTT is not tied to PAT0 then WaGttPat0WB is NA
+ {
+ GfxMemType = GMM_GFX_WB;
+ pPrivatePATTableMemoryType[GMM_GFX_PAT_WB_COHERENT] = PAT0;
+ }
+ break;
+
+ case PAT1:
+ if(pGmmGlobalContext->GetWaTable().WaGttPat0 && !pGmmGlobalContext->GetWaTable().WaGttPat0WB)
+ {
+ GfxMemType = GMM_GFX_WB;
+ pPrivatePATTableMemoryType[GMM_GFX_PAT_WB_COHERENT] = PAT1;
+ }
+ else
+ {
+ GfxMemType = GMM_GFX_UC_WITH_FENCE;
+ pPrivatePATTableMemoryType[GMM_GFX_PAT_UC] = PAT1;
+ }
+ break;
+
+ case PAT2:
+ // This PAT idx shall be used for MOCS'Less resources like Page Tables
+ // Page Tables have TC hardcoded to eLLC+LLC in Adv Ctxt. Hence making this to have same in Leg Ctxt.
+ // For BDW-H, due to Perf issue, TC has to be eLLC only for Page Tables when eDRAM is present.
+ GfxMemType = GMM_GFX_WB;
+ pPrivatePATTableMemoryType[GMM_GFX_PAT_WB_MOCSLESS] = PAT2;
+ break;
+
+ case PAT3:
+ GfxMemType = GMM_GFX_WB;
+ pPrivatePATTableMemoryType[GMM_GFX_PAT_WB] = PAT3;
+ break;
+
+ case PAT4:
+ GfxMemType = GMM_GFX_WT;
+ pPrivatePATTableMemoryType[GMM_GFX_PAT_WT] = PAT4;
+ break;
+
+ case PAT5:
+ case PAT6:
+ case PAT7:
+ GfxMemType = GMM_GFX_WC;
+ pPrivatePATTableMemoryType[GMM_GFX_PAT_WC] = PAT5;
+ break;
+
+ default:
+ __GMM_ASSERT(0);
+ Status = GMM_ERROR;
+ }
+
+ PAT.Gen12.MemoryType = GfxMemType;
+
+ SetPrivatePATEntry(i, PAT);
+ }
+
+#else
+ Status = GMM_ERROR;
+#endif
+ return Status;
+}
+
+//=============================================================================
+//
+// Function: SetUpMOCSTable
+//
+// Desc:
+//
+// Parameters:
+//
+// Return: GMM_STATUS
+//
+//-----------------------------------------------------------------------------
+void GmmLib::GmmGen12CachePolicy::SetUpMOCSTable()
+{
+ GMM_CACHE_POLICY_TBL_ELEMENT *pCachePolicyTlbElement = &(pGmmGlobalContext->GetCachePolicyTlbElement()[0]);
+
+#define GMM_DEFINE_MOCS(Index, L3_ESC, L3_SCC, L3_CC, LeCC_CC, LeCC_TC, LeCC_LRUM, LeCC_AOM, LeCC_ESC, LeCC_SCC, LeCC_PFM, LeCC_SCF, LeCC_CoS, LeCC_SelfSnoop, _HDCL1) \
+ { \
+ pCachePolicyTlbElement[Index].L3.ESC = L3_ESC; \
+ pCachePolicyTlbElement[Index].L3.SCC = L3_SCC; \
+ pCachePolicyTlbElement[Index].L3.Cacheability = L3_CC; \
+ pCachePolicyTlbElement[Index].LeCC.Cacheability = LeCC_CC; \
+ pCachePolicyTlbElement[Index].LeCC.TargetCache = LeCC_TC; \
+ pCachePolicyTlbElement[Index].LeCC.LRUM = LeCC_LRUM; \
+ pCachePolicyTlbElement[Index].LeCC.AOM = LeCC_AOM; \
+ pCachePolicyTlbElement[Index].LeCC.ESC = LeCC_ESC; \
+ pCachePolicyTlbElement[Index].LeCC.SCC = LeCC_SCC; \
+ pCachePolicyTlbElement[Index].LeCC.PFM = LeCC_PFM; \
+ pCachePolicyTlbElement[Index].LeCC.SCF = LeCC_SCF; \
+ pCachePolicyTlbElement[Index].LeCC.CoS = LeCC_CoS; \
+ pCachePolicyTlbElement[Index].LeCC.SelfSnoop = LeCC_SelfSnoop; \
+ pCachePolicyTlbElement[Index].HDCL1 = _HDCL1; \
+ }
+
+ // clang-format off
+
+ // Fixed MOCS Table
+ // Index ESC SCC L3CC LeCC TC LRUM DAoM ERSC SCC PFM SCF CoS SSE HDCL1
+ GMM_DEFINE_MOCS( 0 , 0 , 0 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 2 , 0 , 0 , 3 , 3 , 1 , 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 3 , 0 , 0 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 4 , 0 , 0 , 3 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 5 , 0 , 0 , 1 , 3 , 1 , 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 6 , 0 , 0 , 1 , 3 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 7 , 0 , 0 , 3 , 3 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 8 , 0 , 0 , 1 , 3 , 1 , 2 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 9 , 0 , 0 , 3 , 3 , 1 , 2 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 10 , 0 , 0 , 1 , 3 , 1 , 3 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 11 , 0 , 0 , 3 , 3 , 1 , 3 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 12 , 0 , 0 , 1 , 3 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 13 , 0 , 0 , 3 , 3 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 14 , 0 , 0 , 1 , 3 , 1 , 2 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 15 , 0 , 0 , 3 , 3 , 1 , 2 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 16 , 0 , 0 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 1 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 17 , 0 , 0 , 3 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 1 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 18 , 0 , 0 , 3 , 3 , 1 , 3 , 0 , 0 , 0 , 0 , 0 , 0 , 3 , 0 )
+ GMM_DEFINE_MOCS( 19 , 0 , 0 , 3 , 3 , 1 , 3 , 0 , 0 , 7 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 20 , 0 , 0 , 3 , 3 , 1 , 3 , 0 , 0 , 3 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 21 , 0 , 0 , 3 , 3 , 1 , 3 , 0 , 0 , 1 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 22 , 0 , 0 , 3 , 3 , 1 , 3 , 0 , 1 , 3 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 23 , 0 , 0 , 3 , 3 , 1 , 3 , 0 , 1 , 7 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 48 , 0 , 0 , 3 , 3 , 1 , 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 )
+ GMM_DEFINE_MOCS( 49 , 0 , 0 , 3 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 )
+ GMM_DEFINE_MOCS( 50 , 0 , 0 , 1 , 3 , 1 , 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 )
+ GMM_DEFINE_MOCS( 51 , 0 , 0 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1 )
+ GMM_DEFINE_MOCS( 60 , 0 , 0 , 1 , 3 , 1 , 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 61 , 0 , 0 , 3 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 1 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 62 , 0 , 0 , 1 , 3 , 1 , 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 63 , 0 , 0 , 1 , 3 , 1 , 3 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
+
+
+ if(!pGmmGlobalContext->GetSkuTable().FtrLLCBypass)
+ {
+ GMM_DEFINE_MOCS( 16 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 17 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
+ GMM_DEFINE_MOCS( 61 , 0 , 0 , 3 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 )
+ }
+ // clang-format on
+
+ CurrentMaxMocsIndex = 23;
+ CurrentMaxL1HdcMocsIndex = 51;
+ CurrentMaxSpecialMocsIndex = 63;
+
+#undef GMM_DEFINE_MOCS
+}
diff --git a/Source/GmmLib/CachePolicy/GmmGen12CachePolicy.h b/Source/GmmLib/CachePolicy/GmmGen12CachePolicy.h
new file mode 100644
index 0000000..776e4ad
--- /dev/null
+++ b/Source/GmmLib/CachePolicy/GmmGen12CachePolicy.h
@@ -0,0 +1,300 @@
+/*==============================================================================
+Copyright(c) 2019 Intel Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files(the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and / or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+============================================================================*/
+
+
+#include "GmmCachePolicyConditionals.h"
+#define EDRAM (SKU(FtrEDram))
+#define FBLLC (SKU(FtrFrameBufferLLC))
+#define NS (SKU(FtrLLCBypass))
+//Macros for L3-Eviction Type
+#define NA 0x0
+#define RO 0x1
+#define RW 0x2
+#define SP 0x3
+
+// Cache Policy Definition
+// AOM = Do not allocate on miss (0 = allocate on miss [normal cache behavior], 1 = don't allocate on miss)
+// LeCC_SCC = LLC/eLLC skip caching control (disabled if LeCC_SCC = 0)
+// L3_SCC = L3 skip caching control (disabled if L3_SCC = 0)
+// SCF = Snoop Control Field (SCF)- Only for SKL/BXT and Gen12+ (as coherent/non-coherent)
+// SSO = Override MIDI self snoop settings (1 = never send to uncore, 3 = always send to uncore, 0 = [default] No override )
+// CoS = Class of Service ( allowed values 1, 2, 3 for class IDs 1, 2, 3 respectively, default class 0)
+// HDCL1 = HDC L1 cache control (1 = cached in HDC L1, 0 = not cached in HDC L1)
+// Faster PushWrite(Gen10+) used iff !WT, eLLC-only cacheable - Globally visible surface (eg display surface) should be marked WT
+// L3Evict = Type of L3-eviction (0= NA ie not L3 cacheable, 1= RO ie ReadOnly, 2 = RW ie Standard using MOCS#63), 3 = SP ie Special using MOCS#61 for non-LLC access)
+//***************************************************************************************************************/
+// USAGE TYPE , LLC , ELLC , L3 , WT , AGE , AOM , LeCC_SCC , L3_SCC, SCF, SSO, CoS, HDCL1, L3Evict)
+/****************************************************************************************************************/
+
+// KMD Usages
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_BATCH_BUFFER , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_COMP_FRAME_BUFFER , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_CONTEXT_SWITCH_BUFFER , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_CURSOR , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_DISPLAY_STATIC_IMG_FOR_SMOOTH_ROTATION_BUFFER , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_DUMMY_PAGE , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_GDI_SURFACE , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_GENERIC_KMD_RESOURCE , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+// GMM_RESOURCE_USAGE_GFX_RING is only used if WaEnableRingHostMapping is enabled.
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_GFX_RING , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_GTT_TRANSFER_REGION , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HW_CONTEXT , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_STATE_MANAGER_KERNEL_STATE , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_KMD_STAGING_SURFACE , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_MBM_BUFFER , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_NNDI_BUFFER , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OVERLAY_MBM , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_PRIMARY_SURFACE , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, NS, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SCREEN_PROTECTION_INTERMEDIATE_SURFACE , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SHADOW_SURFACE , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SM_SCRATCH_STATE , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_STATUS_PAGE , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_TIMER_PERF_QUEUE , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_UNKNOWN , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_UNMAP_PAGING_RESERVED_GTT_DMA_BUFFER , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_VSC_BATCH_BUFFER , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_WA_BATCH_BUFFER , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_KMD_OCA_BUFFER , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+
+//
+// 3D Usages
+//
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_UMD_BATCH_BUFFER , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_BINDING_TABLE_POOL , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_CCS , 1 , 0 , 0 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_CONSTANT_BUFFER_POOL , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_DEPTH_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_DISPLAYABLE_RENDER_TARGET , 0 , EDRAM, 1 , EDRAM , 0 , 0, 0, 0, NS, 0, 0, 0, SP );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_GATHER_POOL , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HEAP_SURFACE_STATE , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HEAP_DYNAMIC_STATE , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HEAP_GENERAL_STATE , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HEAP_GENERAL_STATE_UC , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HEAP_STATELESS_DATA_PORT , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HEAP_INDIRECT_OBJECT , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HEAP_INSTRUCTION , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HIZ , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_INDEX_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_INDEX_BUFFER_L3_COHERENT_UC , 1 , 1 , 0 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_INDEX_BUFFER_L3_CACHED , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_MCS , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_PUSH_CONSTANT_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_PULL_CONSTANT_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 1, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_QUERY , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_RENDER_TARGET , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SHADER_RESOURCE , 0 , 1 , 1 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_STAGING , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_STENCIL_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_STREAM_OUTPUT_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_TILE_POOL , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SHADER_RESOURCE_LLC_BYPASS , 0 , 1 , 1 , 0 , 0 , 0, 0, 0, NS, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_MOCS_62 , 1 , 0 , 0 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_L3_EVICTION , 1 , 1 , 0 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RW );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_L3_EVICTION_SPECIAL , 0 , EDRAM, 1 , EDRAM , 0 , 0, 0, 0, NS, 0, 0, 0, SP );
+
+// Tiled Resource
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_TILED_DEPTH_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_TILED_HIZ , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_TILED_MCS , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_TILED_CCS , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_TILED_RENDER_TARGET , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_TILED_RENDER_TARGET_AND_SHADER_RESOURCE , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_TILED_SHADER_RESOURCE , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_TILED_UAV , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_UAV , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_VERTEX_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_VERTEX_BUFFER_L3_COHERENT_UC , 1 , 1 , 0 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_VERTEX_BUFFER_L3_CACHED , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OGL_WSTN_VERTEX_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_POSH_VERTEX_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_RENDER_TARGET_AND_SHADER_RESOURCE , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_WDDM_HISTORY_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_CONTEXT_SAVE_RESTORE , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_PTBR_PAGE_POOL , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_PTBR_BATCH_BUFFER , 0 , 0 , 1 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, RO );
+
+//
+// CM USAGES
+//
+// USAGE TYPE , LLC , ELLC , L3 , WT , AGE , AOM , LeCC_SCC , L3_SCC, SCF, SSO, CoS, HDCL1, L3Evict )
+DEFINE_CACHE_ELEMENT(CM_RESOURCE_USAGE_SurfaceState, 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(CM_RESOURCE_USAGE_StateHeap, 1 , 0 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(CM_RESOURCE_USAGE_L1_Enabled_SurfaceState, 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 1, RO );
+DEFINE_CACHE_ELEMENT(CM_RESOURCE_USAGE_NO_L3_SurfaceState, 1 , 1 , 0 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(CM_RESOURCE_USAGE_NO_LLC_ELLC_SurfaceState, 0 , 0 , 1 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(CM_RESOURCE_USAGE_NO_LLC_SurfaceState, 0 , 1 , 1 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(CM_RESOURCE_USAGE_NO_ELLC_SurfaceState, 1 , 0 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(CM_RESOURCE_USAGE_NO_LLC_L3_SurfaceState, 0 , 1 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(CM_RESOURCE_USAGE_NO_ELLC_L3_SurfaceState, 1 , 0 , 0 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(CM_RESOURCE_USAGE_NO_CACHE_SurfaceState, 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+
+//
+// MP USAGES
+//
+DEFINE_CACHE_ELEMENT(MP_RESOURCE_USAGE_BEGIN, 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(MP_RESOURCE_USAGE_DEFAULT, 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(MP_RESOURCE_USAGE_SurfaceState, 1 , 1 , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(MP_RESOURCE_USAGE_END, 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
+
+// MHW - SFC
+DEFINE_CACHE_ELEMENT(MHW_RESOURCE_USAGE_Sfc_CurrentOutputSurface, 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(MHW_RESOURCE_USAGE_Sfc_AvsLineBufferSurface, 1 , 1 , 1 , 0 , 1, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(MHW_RESOURCE_USAGE_Sfc_IefLineBufferSurface, 1 , 1 , 1 , 0 , 1, 0, 0, 0, 0, 0, 0, 0, RO );
+
+//Media GMM Resource USAGES
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_PRE_DEBLOCKING_CODEC , 0 , EDRAM , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_POST_DEBLOCKING_CODEC , 0 , EDRAM , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_ORIGINAL_UNCOMPRESSED_PICTURE_ENCODE , 0 , EDRAM , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_ORIGINAL_UNCOMPRESSED_PICTURE_DECODE , 0 , EDRAM , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_STREAMOUT_DATA_CODEC , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_INTRA_ROWSTORE_SCRATCH_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_DEBLOCKINGFILTER_ROWSTORE_SCRATCH_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_REFERENCE_PICTURE_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_MACROBLOCK_STATUS_BUFFER_CODEC , 0 , EDRAM , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_MFX_INDIRECT_BITSTREAM_OBJECT_DECODE , 0 , EDRAM , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_MFX_INDIRECT_MV_OBJECT_CODEC , 0 , EDRAM , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_MFD_INDIRECT_IT_COEF_OBJECT_DECODE , 0 , EDRAM , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_MFC_INDIRECT_PAKBASE_OBJECT_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_BSDMPC_ROWSTORE_SCRATCH_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_MPR_ROWSTORE_SCRATCH_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_BITPLANE_READ_CODEC , 0 , EDRAM , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_AACSBIT_VECTOR_CODEC , 0 , EDRAM , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_DIRECTMV_BUFFER_CODEC , 0 , EDRAM , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_CURR_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_REF_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_MV_DATA_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_HME_DOWNSAMPLED_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_HME_DOWNSAMPLED_ENCODE_DST , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ME_DISTORTION_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_BRC_ME_DISTORTION_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_PAK_OBJECT_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_FLATNESS_CHECK_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_MBENC_CURBE_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_VDENC_ROW_STORE_BUFFER_CODEC , 1 , 0 , 0 , 0 , 3, 0 , 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_VDENC_STREAMIN_CODEC , 0 , EDRAM , 0 , 0 , 0, 0 , 0, 0, 0, 0, 0, 0, NA );
+
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HCP_MD_CODEC , 0 , EDRAM , 0 , 0 , 0, 0 , 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HCP_SAO_CODEC , 0 , EDRAM , 0 , 0 , 0, 0 , 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HCP_MV_CODEC , 0 , EDRAM , 0 , 0 , 0, 0 , 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HCP_STATUS_ERROR_CODEC , 0 , 0 , 0 , 0 , 0, 0 , 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HCP_LCU_ILDB_STREAMOUT_CODEC , 0 , 0 , 0 , 0 , 0, 0 , 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_VP9_PROBABILITY_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0 , 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_VP9_SEGMENT_ID_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0 , 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_VP9_HVD_ROWSTORE_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0 , 0, 0, 0, 0, 0, 0, NA );
+
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_MACROBLOCK_ILDB_STREAM_OUT_BUFFER_CODEC , 0 , 0 , 0 , 0 , 0, 0 , 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SSE_SRC_PIXEL_ROW_STORE_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0 , 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SLICE_STATE_STREAM_OUT_BUFFER_CODEC , 0 , 0 , 0 , 0 , 0, 0 , 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_CABAC_SYNTAX_STREAM_OUT_BUFFER_CODEC , 0 , EDRAM , 0 , 0 , 0, 0 , 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_PRED_COL_STORE_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0 , 0, 0, 0, 0, 0, 0, NA );
+
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_UNCACHED , 0 , 0 , 0 , 0 , 0, 0 , 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ELLC_ONLY , 0 , EDRAM , 0 , 0 , 0, 0 , 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ELLC_LLC_ONLY , 1 , EDRAM , 0 , 0 , 3, 0 , 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ELLC_LLC_L3 , 1 , EDRAM , 1 , 0 , 3, 0 , 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_CCS_MEDIA_WRITABLE , 0 , EDRAM , 1 , EDRAM , 0, 0, 0, 0, NS, 0, 0, 0, SP );
+
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_BRC_HISTORY_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_SOFTWARE_SCOREBOARD_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ME_MV_DATA_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_MV_DISTORTION_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_4XME_DISTORTION_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_INTRA_DISTORTION_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_MB_STATS_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_PAK_STATS_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_PIC_STATE_READ_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_PIC_STATE_WRITE_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_COMBINED_ENC_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_BRC_CONSTANT_DATA_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_INTERMEDIATE_CU_RECORD_SURFACE_ENCODE , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_SCRATCH_ENCODE , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_LCU_LEVEL_DATA_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ENC_HISTORY_INPUT_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ENC_HISTORY_OUTPUT_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_DEBUG_ENCODE , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ENC_CONSTANT_TABLE_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ENC_CU_RECORD_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ENC_MV_TEMPORAL_BUFFER_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ENC_CU_PACKET_FOR_PAK_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ENC_BCOMBINED1_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_ENC_BCOMBINED2_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_FRAME_STATS_STREAMOUT_DATA_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_DEBLOCKINGFILTER_ROWSTORE_TILE_LINE_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_DEBLOCKINGFILTER_ROWSTORE_TILE_COLUMN_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HCP_MD_TILE_LINE_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HCP_MD_TILE_COLUMN_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HCP_SAO_TILE_LINE_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HCP_SAO_TILE_COLUMN_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_VP9_PROBABILITY_COUNTER_BUFFER_CODEC , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_HUC_VIRTUAL_ADDR_REGION_BUFFER_CODEC , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SIZE_STREAMOUT_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_COMPRESSED_HEADER_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_PROBABILITY_DELTA_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_TILE_RECORD_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_TILE_SIZE_STAS_BUFFER_CODEC , 1 , EDRAM , 0 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_MAD_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_PAK_IMAGESTATE_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_MBENC_BRC_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_MB_BRC_CONST_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_BRC_MB_QP_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_BRC_ROI_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_MBDISABLE_SKIPMAP_CODEC , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_SLICE_MAP_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_WP_DOWNSAMPLED_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_SURFACE_VDENC_IMAGESTATE_ENCODE , 1 , EDRAM , 1 , 0 , 3, 0, 0, 0, 0, 0, 0, 0, RO );
+
+/**********************************************************************************/
+
+//
+// OCL Usages
+//
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_BUFFER_CONST , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 1, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_BUFFER_CSR_UC , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_BUFFER_CACHELINE_MISALIGNED , 1 , 1 , 0 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_IMAGE , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_INLINE_CONST , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_INLINE_CONST_HDC , 1 , 1 , 1, 0 , 3 , 0, 0, 0, 0, 0, 0, 1, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_SCRATCH , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_PRIVATE_MEM , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_PRINTF_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_STATE_HEAP_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_SYSTEM_MEMORY_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_SYSTEM_MEMORY_BUFFER_CACHELINE_MISALIGNED , 1 , 1 , 0 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_ISH_HEAP_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_TAG_MEMORY_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_TEXTURE_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 0, 0, 0, RO );
+DEFINE_CACHE_ELEMENT(GMM_RESOURCE_USAGE_OCL_SELF_SNOOP_BUFFER , 1 , 1 , 1 , 0 , 3 , 0, 0, 0, 0, 3, 0, 0, RO );
+/**********************************************************************************/
+
+// Cross Adapter
+DEFINE_CACHE_ELEMENT( GMM_RESOURCE_USAGE_XADAPTER_SHARED_RESOURCE , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+/**********************************************************************************/
+
+// BCS
+DEFINE_CACHE_ELEMENT( GMM_RESOURCE_USAGE_BLT_SOURCE , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+DEFINE_CACHE_ELEMENT( GMM_RESOURCE_USAGE_BLT_DESTINATION , 0 , 0 , 0 , 0 , 0 , 0, 0, 0, 0, 0, 0, 0, NA );
+/**********************************************************************************/
+
+#include "GmmCachePolicyUndefineConditionals.h"
+
diff --git a/Source/GmmLib/CachePolicy/GmmGen8CachePolicy.cpp b/Source/GmmLib/CachePolicy/GmmGen8CachePolicy.cpp
index 3d7b50c..f1aaddf 100644
--- a/Source/GmmLib/CachePolicy/GmmGen8CachePolicy.cpp
+++ b/Source/GmmLib/CachePolicy/GmmGen8CachePolicy.cpp
@@ -162,7 +162,7 @@
{
GMM_PRIVATE_PAT PAT = {0};
- if(pGmmGlobalContext->GetWaTable().WaNoMocsEllcOnly)
+ if(pGmmGlobalContext->GetWaTable().FtrMemTypeMocsDeferPAT)
{
GfxTargetCache = GMM_GFX_TC_ELLC_ONLY;
}
@@ -225,7 +225,7 @@
// For BDW-H, due to Perf issue, TC has to be eLLC only for Page Tables when eDRAM is present.
GfxMemType = GMM_GFX_WB;
- if(pGmmGlobalContext->GetWaTable().WaNoMocsEllcOnly)
+ if(pGmmGlobalContext->GetWaTable().FtrMemTypeMocsDeferPAT)
{
GfxTargetCache = GMM_GFX_TC_ELLC_ONLY;
}
@@ -285,10 +285,6 @@
WA_TABLE * pWaTable = &const_cast<WA_TABLE &>(pGmmGlobalContext->GetWaTable());
#if(defined(__GMM_KMD__))
- if(pGmmGlobalContext->GetGtSysInfoPtr()->EdramSizeInKb)
- {
- pWaTable->WaNoMocsEllcOnly = 1;
- }
pWaTable->WaGttPat0 = 1;
pWaTable->WaGttPat0WB = 1;
diff --git a/Source/GmmLib/CachePolicy/GmmGen9CachePolicy.cpp b/Source/GmmLib/CachePolicy/GmmGen9CachePolicy.cpp
index c16516f..5380a1a 100644
--- a/Source/GmmLib/CachePolicy/GmmGen9CachePolicy.cpp
+++ b/Source/GmmLib/CachePolicy/GmmGen9CachePolicy.cpp
@@ -290,7 +290,7 @@
{
GMM_PRIVATE_PAT PAT = {0};
- if(pGmmGlobalContext->GetWaTable().WaNoMocsEllcOnly)
+ if(pGmmGlobalContext->GetWaTable().FtrMemTypeMocsDeferPAT)
{
GfxTargetCache = GMM_GFX_TC_ELLC_ONLY;
}
diff --git a/Source/GmmLib/GlobalInfo/GmmClientContext.cpp b/Source/GmmLib/GlobalInfo/GmmClientContext.cpp
index 7b7ed58..655f9ff 100644
--- a/Source/GmmLib/GlobalInfo/GmmClientContext.cpp
+++ b/Source/GmmLib/GlobalInfo/GmmClientContext.cpp
@@ -284,6 +284,45 @@
}
/////////////////////////////////////////////////////////////////////////////////////
+/// Member function of ClientContext class for returning
+/// RENDER_SURFACE_STATE::CompressionFormat
+///
+/// @return uint8_t
+/////////////////////////////////////////////////////////////////////////////////////
+uint8_t GMM_STDCALL GmmLib::GmmClientContext::GetSurfaceStateCompressionFormat(GMM_RESOURCE_FORMAT Format)
+{
+ __GMM_ASSERT((Format > GMM_FORMAT_INVALID) && (Format < GMM_RESOURCE_FORMATS));
+
+ return pGmmGlobalContext->GetPlatformInfo().FormatTable[Format].CompressionFormat.AuxL1eFormat;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+/// Member function of ClientContext class for returning
+/// MEDIA_SURFACE_STATE::CompressionFormat
+///
+/// @return uint8_t
+/////////////////////////////////////////////////////////////////////////////////////
+uint8_t GMM_STDCALL GmmLib::GmmClientContext::GetMediaSurfaceStateCompressionFormat(GMM_RESOURCE_FORMAT Format)
+{
+ __GMM_ASSERT((Format > GMM_FORMAT_INVALID) && (Format < GMM_RESOURCE_FORMATS));
+
+ return pGmmGlobalContext->GetPlatformInfoObj()->OverrideCompressionFormat(Format, (uint8_t)0x1);
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+/// Member function of ClientContext class for returning E2E compression format
+///
+/// @return GMM_E2ECOMP_FORMAT
+/////////////////////////////////////////////////////////////////////////////////////
+GMM_E2ECOMP_FORMAT GMM_STDCALL GmmLib::GmmClientContext::GetLosslessCompressionType(GMM_RESOURCE_FORMAT Format)
+{
+ // ToDo: Remove the definition of GmmGetLosslessCompressionType(Format)
+ __GMM_ASSERT((Format > GMM_FORMAT_INVALID) && (Format < GMM_RESOURCE_FORMATS));
+
+ return pGmmGlobalContext->GetPlatformInfo().FormatTable[Format].CompressionFormat.AuxL1eFormat;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
/// Member function of ClientContext class to return InternalGpuVaMax value
/// stored in pGmmGlobalContext
///
diff --git a/Source/GmmLib/Platform/GmmGen10Platform.cpp b/Source/GmmLib/Platform/GmmGen10Platform.cpp
index c4a049c..95c8d8e 100644
--- a/Source/GmmLib/Platform/GmmGen10Platform.cpp
+++ b/Source/GmmLib/Platform/GmmGen10Platform.cpp
@@ -510,4 +510,7 @@
Data.ReconMaxHeight = Data.Texture2DSurface.MaxHeight; // Reconstructed surfaces require more height and width for higher resolutions.
Data.ReconMaxWidth = Data.Texture2DSurface.MaxWidth;
+
+ Data.NoOfBitsSupported = 39;
+ Data.HighestAcceptablePhysicalAddress = GFX_MASK_LARGE(0, 38);
}
diff --git a/Source/GmmLib/Platform/GmmGen12Platform.cpp b/Source/GmmLib/Platform/GmmGen12Platform.cpp
new file mode 100644
index 0000000..691db2d
--- /dev/null
+++ b/Source/GmmLib/Platform/GmmGen12Platform.cpp
@@ -0,0 +1,425 @@
+/*==============================================================================
+Copyright(c) 2019 Intel Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files(the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and / or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+============================================================================*/
+
+#include "Internal/Common/GmmLibInc.h"
+#include "Internal/Common/Platform/GmmGen12Platform.h"
+
+/************************ RT->CCS Sizing definitions ************************
+ H/V/D Align and Downscale factor to obtain CCS from given RT dimensions
+ Restrictions:
+ CCS's RT (2D/3D YF) alignment to 4x1 (2D/3D YF) pages sharing 1x1 Aux$line
+ (2D YS) 2x2 ( 2D YF) pages "
+ (3D YS) 2x1x2 ( 3D YF) pages " ie Slices share Aux$line
+ (Color MSAA'd YF) 4x1 (MSAA'dYF) pages " ie all samples share Aux$line (Samples are array'd ie YF 4KB = YF-MSAA x MSAA-Samples)
+ (Color MSAA 2x/4x YS) 2x2x1 ( 2D YF) pages " ie Single sample per Aux$line
+ (Color MSAA 8x YS) 1x2x2 ( 2D YF) pages " ie 2 samples share Aux$line
+ (Color MSAA 16x YS) 1x1x4 ( 2D YF) pages " ie 4 samples share Aux$line
+ (Depth MSAA YF) 4x1 ( 2D YF) pages " ie all samples share Aux$line
+ (Depth MSAA 2x/4x YS) 2x2x1 ( 2D YF) pages " ie Single sample per Aux$line
+ (Depth MSAA 8x YS) 1x2x2 ( 2D YF) pages " ie 2 samples share Aux$line
+ (Depth MSAA 16x YS) 1x1x4 ( 2D YF) pages " ie 4 samples share Aux$line
+ ie Depth/Color MSAA have common alignment, but due to different pixel packing (Depth MSS is interleaved, Color MSS is arrayed)
+ SamplePerAux$line samples are X-major (for Depth), while Y-major (for Color) packed ie For Depth MSAA, Hdownscale *=SamplePerAux$line;
+ for color MSAA, Vdownscale = Vdownscale; for both, MSAA-samples/SamplePerAux$line times sample shared CCS-size
+
+ HAlign: Horizontal Align in pixels
+ VAlign: Vertical Align in pixels
+ DAlign: Depth Align in pixels
+ HAlignxVAlignxDAlign [RT size] occupies one Aux$line
+ SamplesPerAux$line: Samples sharing CCS; NSamples divisor on MSAA-samples giving multiple (on shared CCS) to cover all samples
+ HDownscale: width divisor on CCSRTAlign`d width
+ VDownscale: height divisor on CCSRTAlign`d height
+ Convention:
+ (+ve) HDownscale/VDownscale are downscale factors, and used as divisors
+ (-ve) HDownscale/VDownscale are upscale factors, their absolute value used as multipliers
+ ie if HDownscale etc is smaller than 1, its reciprocal is stored with -ve sign
+ <---- CCSRTALIGN -----> <-- RT->CCS downscale-->
+ ( TileMode, HAlign , VAlign, DAlign, HDownscale, VDownscale)
+ or
+ SamplesPerAux$line,
+eg:
+ CCSRTALIGN(TILE_YF_2D_8bpe, 256, 64, 1, 16, 16 )
+**********************************************************************************************************/
+
+/////////////////////////////////////////////////////////////////////////////////////
+/// Allocates This function will initialize the necessary info based on platform.
+/// - Buffer type restrictions (Eg: Z, Color, Display)
+/// - X/Y tile dimensions
+///
+/// @param[in] Platform: Contains information about platform to initialize an object
+/////////////////////////////////////////////////////////////////////////////////////
+GmmLib::PlatformInfoGen12::PlatformInfoGen12(PLATFORM &Platform)
+ : PlatformInfoGen11(Platform)
+
+{
+ __GMM_ASSERTPTR(pGmmGlobalContext, VOIDRETURN);
+
+ //Compression format update
+ GMM_RESOURCE_FORMAT GmmFormat;
+#define GMM_FORMAT_SKU(FtrXxx) (pGmmGlobalContext->GetSkuTable().FtrXxx != 0)
+#define GMM_COMPR_FORMAT_INVALID (static_cast<uint8_t>(GMM_E2ECOMP_FORMAT_INVALID))
+#define GMM_FORMAT(Name, bpe, _Width, _Height, _Depth, IsRT, IsASTC, RcsSurfaceFormat, SSCompressionFmt, Availability) \
+ \
+ { \
+ GmmFormat = GMM_FORMAT_##Name; \
+ Data.FormatTable[GmmFormat].CompressionFormat.CompressionFormat = static_cast<uint8_t>(SSCompressionFmt); \
+ }
+
+#include "External/Common/GmmFormatTable.h"
+
+
+ // --------------------------
+ // Surface Alignment Units
+ // --------------------------
+
+ // 3DSTATE_DEPTH_BUFFER
+ //======================================================================
+ // Surf Format | MSAA | HAlign | VAlign |
+ //======================================================================
+ // D16_UNORM | 1x, 4x, 16x | 8 | 8 |
+ // D16_UNORM | 2x, 8x | 16 | 4 |
+ // Not D16_UNORM | 1x,2x,4x,8x,16x | 8 | 4 |
+ //======================================================================
+
+
+ // 3DSTATE_STENCIL_BUFFER
+ //======================================================================
+ // Surf Format | MSAA | HAlign | VAlign |
+ //======================================================================
+ // N/A | N/A | 16 | 8 |
+ //======================================================================
+
+ Data.SurfaceMaxSize = GMM_GBYTE(16384);
+ Data.MaxGpuVirtualAddressBitsPerResource = 44;
+
+ //Override the Height VP9 VdEnc requirement for Gen12 16k resolution.
+ Data.ReconMaxHeight = GMM_KBYTE(48);
+ Data.ReconMaxWidth = GMM_KBYTE(32);
+
+ Data.TexAlign.Depth.Width = 8; // Not D16_UNORM
+ Data.TexAlign.Depth.Height = 4;
+ Data.TexAlign.Depth_D16_UNORM_1x_4x_16x.Width = 8;
+ Data.TexAlign.Depth_D16_UNORM_1x_4x_16x.Height = 8;
+ Data.TexAlign.Depth_D16_UNORM_2x_8x.Width = 16;
+ Data.TexAlign.Depth_D16_UNORM_2x_8x.Height = 4;
+ Data.TexAlign.SeparateStencil.Width = 16;
+ Data.TexAlign.SeparateStencil.Height = 8;
+
+ //CCS unit size ie cacheline
+ Data.TexAlign.CCS.Align.Width = 16;
+ Data.TexAlign.CCS.Align.Height = 4;
+ Data.TexAlign.CCS.Align.Depth = 1;
+ Data.TexAlign.CCS.MaxPitchinTiles = 1024;
+
+ // clang-format off
+//Extended CCS alignment for per bpp/Tiling CCS alignment
+#define CCSRTALIGN(TileMode, HAlign, VAlign, DAlign, HDownscale, VDownscale) \
+ { \
+ TexAlignEx.CCSEx[CCS_MODE(TileMode)].Align.Width = HAlign; \
+ TexAlignEx.CCSEx[CCS_MODE(TileMode)].Align.Height = VAlign; \
+ TexAlignEx.CCSEx[CCS_MODE(TileMode)].Align.Depth = DAlign; \
+ TexAlignEx.CCSEx[CCS_MODE(TileMode)].Downscale.Width = HDownscale; \
+ TexAlignEx.CCSEx[CCS_MODE(TileMode)].Downscale.Height = VDownscale; \
+ TexAlignEx.CCSEx[CCS_MODE(TileMode)].Downscale.Depth = DAlign; \
+ }
+
+ // clang-format off
+//See "RT->CCS Sizing definitions" comments above for explanation on fields
+/********* TileMode HAlign, VAlign, DAlign, HDownscale, VDownscale ***/
+CCSRTALIGN(TILE_YF_2D_8bpe, 256, 64, 1, 16, 16 );
+CCSRTALIGN(TILE_YF_2D_16bpe, 256, 32, 1, 16, 8 );
+CCSRTALIGN(TILE_YF_2D_32bpe, 128, 32, 1, 8, 8 );
+CCSRTALIGN(TILE_YF_2D_64bpe, 128, 16, 1, 8, 4 );
+CCSRTALIGN(TILE_YF_2D_128bpe, 64, 16, 1, 4, 4 );
+
+CCSRTALIGN(TILE_YF_3D_8bpe, 64, 16, 16, 4, 4 );
+CCSRTALIGN(TILE_YF_3D_16bpe, 32, 16, 16, 2, 4 );
+CCSRTALIGN(TILE_YF_3D_32bpe, 32, 16, 8, 2, 4 );
+CCSRTALIGN(TILE_YF_3D_64bpe, 32, 8, 8, 2, 2 );
+CCSRTALIGN(TILE_YF_3D_128bpe, 16, 8, 8, 1, 2 );
+
+CCSRTALIGN(TILE_YF_2D_2X_8bpe, 128, 64, 2, 8, 16 );
+CCSRTALIGN(TILE_YF_2D_2X_16bpe, 128, 32, 2, 8, 8 );
+CCSRTALIGN(TILE_YF_2D_2X_32bpe, 64, 32, 2, 4, 8 );
+CCSRTALIGN(TILE_YF_2D_2X_64bpe, 64, 16, 2, 4, 4 );
+CCSRTALIGN(TILE_YF_2D_2X_128bpe, 32, 16, 2, 2, 4 );
+
+CCSRTALIGN(TILE_YF_2D_4X_8bpe, 128, 32, 4, 8, 8 );
+CCSRTALIGN(TILE_YF_2D_4X_16bpe, 128, 16, 4, 8, 4 );
+CCSRTALIGN(TILE_YF_2D_4X_32bpe, 64, 16, 4, 4, 4 );
+CCSRTALIGN(TILE_YF_2D_4X_64bpe, 64, 8, 4, 4, 2 );
+CCSRTALIGN(TILE_YF_2D_4X_128bpe, 32, 8, 4, 2, 2 );
+
+CCSRTALIGN(TILE_YF_2D_8X_8bpe, 64, 32, 8, 4, 8 );
+CCSRTALIGN(TILE_YF_2D_8X_16bpe, 64, 16, 8, 4, 4 );
+CCSRTALIGN(TILE_YF_2D_8X_32bpe, 32, 16, 8, 2, 4 );
+CCSRTALIGN(TILE_YF_2D_8X_64bpe, 32, 8, 8, 2, 2 );
+CCSRTALIGN(TILE_YF_2D_8X_128bpe, 16, 8, 8, 1, 2 );
+
+CCSRTALIGN(TILE_YF_2D_16X_8bpe, 64, 16, 16, 4, 4 );
+CCSRTALIGN(TILE_YF_2D_16X_16bpe, 64, 8, 16, 4, 2 );
+CCSRTALIGN(TILE_YF_2D_16X_32bpe, 32, 8, 16, 2, 2 );
+CCSRTALIGN(TILE_YF_2D_16X_64bpe, 32, 4, 16, 2, 1 );
+CCSRTALIGN(TILE_YF_2D_16X_128bpe, 16, 4, 16, 1, 1 );
+
+CCSRTALIGN(TILE_YS_2D_8bpe, 128, 128, 1, 8, 32 );
+CCSRTALIGN(TILE_YS_2D_16bpe, 128, 64, 1, 8, 16 );
+CCSRTALIGN(TILE_YS_2D_32bpe, 64, 64, 1, 4, 16 );
+CCSRTALIGN(TILE_YS_2D_64bpe, 64, 32, 1, 4, 8 );
+CCSRTALIGN(TILE_YS_2D_128bpe, 32, 32, 1, 2, 8 );
+
+CCSRTALIGN(TILE_YS_3D_8bpe, 32, 16, 32, 2, 4 );
+CCSRTALIGN(TILE_YS_3D_16bpe, 16, 16, 32, 1, 4 );
+CCSRTALIGN(TILE_YS_3D_32bpe, 16, 16, 16, 1, 4 );
+CCSRTALIGN(TILE_YS_3D_64bpe, 16, 8, 16, 1, 2 );
+CCSRTALIGN(TILE_YS_3D_128bpe, 8, 8, 16, -2, 2 );
+
+CCSRTALIGN(TILE_YS_2D_2X_8bpe, 128, 128, 1, 8, 32 );
+CCSRTALIGN(TILE_YS_2D_2X_16bpe, 128, 64, 1, 8, 16 );
+CCSRTALIGN(TILE_YS_2D_2X_32bpe, 64, 64, 1, 4, 16 );
+CCSRTALIGN(TILE_YS_2D_2X_64bpe, 64, 32, 1, 4, 8 );
+CCSRTALIGN(TILE_YS_2D_2X_128bpe, 32, 32, 1, 2, 8 );
+
+CCSRTALIGN(TILE_YS_2D_4X_8bpe, 128, 128, 1, 8, 32 );
+CCSRTALIGN(TILE_YS_2D_4X_16bpe, 128, 64, 1, 8, 16 );
+CCSRTALIGN(TILE_YS_2D_4X_32bpe, 64, 64, 1, 4, 16 );
+CCSRTALIGN(TILE_YS_2D_4X_64bpe, 64, 32, 1, 4, 8 );
+CCSRTALIGN(TILE_YS_2D_4X_128bpe, 32, 32, 1, 2, 8 );
+
+CCSRTALIGN(TILE_YS_2D_8X_8bpe, 64, 128, 2, 4, 32 );
+CCSRTALIGN(TILE_YS_2D_8X_16bpe, 64, 64, 2, 4, 16 );
+CCSRTALIGN(TILE_YS_2D_8X_32bpe, 32, 64, 2, 2, 16 );
+CCSRTALIGN(TILE_YS_2D_8X_64bpe, 32, 32, 2, 2, 8 );
+CCSRTALIGN(TILE_YS_2D_8X_128bpe, 16, 32, 2, 1, 8 );
+
+CCSRTALIGN(TILE_YS_2D_16X_8bpe, 64, 64, 4, 4, 16 );
+CCSRTALIGN(TILE_YS_2D_16X_16bpe, 64, 32, 4, 4, 8 );
+CCSRTALIGN(TILE_YS_2D_16X_32bpe, 32, 32, 4, 2, 8 );
+CCSRTALIGN(TILE_YS_2D_16X_64bpe, 32, 16, 4, 2, 4 );
+CCSRTALIGN(TILE_YS_2D_16X_128bpe, 16, 16, 4, 1, 4 );
+#undef CCSRTALIGN
+// clang-format on
+
+#define FCRECTALIGN(TileMode, bpp, HAlign, VAlign, HDownscale, VDownscale) \
+ { \
+ FCTileMode[FCMode(TileMode, bpp)].Align.Width = HAlign; \
+ FCTileMode[FCMode(TileMode, bpp)].Align.Height = VAlign; \
+ FCTileMode[FCMode(TileMode, bpp)].Align.Depth = 1; \
+ FCTileMode[FCMode(TileMode, bpp)].Downscale.Width = HDownscale; \
+ FCTileMode[FCMode(TileMode, bpp)].Downscale.Height = VDownscale; \
+ FCTileMode[FCMode(TileMode, bpp)].Downscale.Depth = 1; \
+ }
+
+ // clang-format off
+FCRECTALIGN(LEGACY_TILE_Y , 8, 512, 32, 256, 16);
+FCRECTALIGN(LEGACY_TILE_Y , 16, 256, 32, 128, 16);
+FCRECTALIGN(LEGACY_TILE_Y , 32, 128, 32, 64, 16);
+FCRECTALIGN(LEGACY_TILE_Y , 64, 64, 32, 32, 16);
+FCRECTALIGN(LEGACY_TILE_Y , 128, 32, 32, 16, 16);
+
+FCRECTALIGN(TILE_YF_2D_8bpe , 8, 256, 64, 128, 32);
+FCRECTALIGN(TILE_YF_2D_16bpe , 16, 256, 32, 128, 16);
+FCRECTALIGN(TILE_YF_2D_32bpe , 32, 128, 32, 64, 16);
+FCRECTALIGN(TILE_YF_2D_64bpe , 64, 128, 16, 64, 8);
+FCRECTALIGN(TILE_YF_2D_128bpe, 128, 64, 16, 32, 8);
+
+FCRECTALIGN(TILE_YS_2D_8bpe , 8, 128, 128, 64, 64);
+FCRECTALIGN(TILE_YS_2D_16bpe , 16, 128, 64, 64, 32);
+FCRECTALIGN(TILE_YS_2D_32bpe , 32, 64, 64, 32, 32);
+FCRECTALIGN(TILE_YS_2D_64bpe , 64, 64, 32, 32, 16);
+FCRECTALIGN(TILE_YS_2D_128bpe, 128, 32, 32, 16, 16);
+#undef FCRECTALIGN
+
+ // clang-format on
+ Data.NoOfBitsSupported = 39;
+ Data.HighestAcceptablePhysicalAddress = GFX_MASK_LARGE(0, 38);
+}
+
+void GmmLib::PlatformInfoGen12::ApplyExtendedTexAlign(uint32_t CCSMode, ALIGNMENT &UnitAlign)
+{
+ if(CCSMode < CCS_MODES)
+ {
+ UnitAlign.Width = TexAlignEx.CCSEx[CCSMode].Align.Width;
+ UnitAlign.Height = TexAlignEx.CCSEx[CCSMode].Align.Height;
+ UnitAlign.Depth = TexAlignEx.CCSEx[CCSMode].Align.Depth;
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+/// Copies parameters or sets flags based on info sent by the client.
+///
+/// @param[in] CreateParams: Flags which specify what sort of resource to create
+/////////////////////////////////////////////////////////////////////////////////////
+void GmmLib::PlatformInfoGen12::SetCCSFlag(GMM_RESOURCE_FLAG &Flags)
+{
+ if(Flags.Gpu.MMC)
+ {
+ Flags.Gpu.CCS = Flags.Gpu.MMC;
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+/// Validates the MMC parameters passed in by clients to make sure they do not
+/// conflict or ask for unsupporting combinations/features.
+///
+/// @param[in] GMM_TEXTURE_INFO which specify what sort of resource to create
+/// @return 1 is validation passed. 0 otherwise.
+/////////////////////////////////////////////////////////////////////////////////////
+uint8_t GmmLib::PlatformInfoGen12::ValidateMMC(GMM_TEXTURE_INFO &Surf)
+{
+ if(Surf.Flags.Gpu.MMC && //For Media Memory Compression --
+ (!(GMM_IS_4KB_TILE(Surf.Flags) || GMM_IS_64KB_TILE(Surf.Flags)) &&
+ (!Surf.Flags.Gpu.__NonMsaaLinearCCS)))
+ {
+ return 0;
+ }
+ return 1;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+/// Validates the parameters passed in by clients to make sure they do not
+/// conflict or ask for unsupporting combinations/features.
+///
+/// @param[in] GMM_TEXTURE_INFO which specify what sort of resource to create
+/// @return 1 is validation passed. 0 otherwise.
+/////////////////////////////////////////////////////////////////////////////////////
+uint8_t GmmLib::PlatformInfoGen12::ValidateCCS(GMM_TEXTURE_INFO &Surf)
+{
+
+ if(!( //--- Legitimate CCS Case ----------------------------------------
+ ((Surf.Type >= RESOURCE_2D && Surf.Type <= RESOURCE_BUFFER) && ////Not supported: 1D; Supported: Buffer, 2D, 3D, cube, Arrays, mip-maps, MSAA, Depth/Stencil
+ (!(Surf.Flags.Info.RenderCompressed || Surf.Flags.Info.MediaCompressed) || //Not compressed surface eg separate Aux Surf
+ (GMM_IS_4KB_TILE(Surf.Flags) || GMM_IS_64KB_TILE(Surf.Flags)) || //Only on Y/Ys
+ (Surf.Flags.Info.Linear && Surf.Type == RESOURCE_BUFFER && //Machine-Learning compression on untyped linear buffer
+ Surf.Flags.Info.RenderCompressed)))))
+ {
+ GMM_ASSERTDPF(0, "Invalid CCS usage!");
+ return 0;
+ }
+
+ //Compressed resource (main surf) must pre-define MC/RC type
+ if(!(Surf.Flags.Gpu.__NonMsaaTileYCcs || Surf.Flags.Gpu.__NonMsaaLinearCCS) &&
+ !Surf.Flags.Gpu.ProceduralTexture &&
+ !(Surf.Flags.Info.RenderCompressed || Surf.Flags.Info.MediaCompressed))
+ {
+ GMM_ASSERTDPF(0, "Invalid CCS usage - RC/MC type unspecified!");
+ return 0;
+ }
+
+ if(Surf.Flags.Info.RenderCompressed && Surf.Flags.Info.MediaCompressed)
+ {
+ GMM_ASSERTDPF(0, "Invalid CCS usage - can't be both RC and MC!");
+ return 0;
+ }
+
+ if(!pGmmGlobalContext->GetSkuTable().FtrLinearCCS &&
+ (Surf.Type == RESOURCE_3D || Surf.MaxLod > 0 || Surf.MSAA.NumSamples > 1 ||
+ !(Surf.Flags.Info.TiledYf || GMM_IS_64KB_TILE(Surf.Flags))))
+ {
+ GMM_ASSERTDPF(0, "CCS support for (volumetric, mip'd, MSAA'd, TileY) resources only enabled with Linear CCS!");
+ return 0;
+ }
+
+ GMM_ASSERTDPF((Surf.Flags.Wa.PreGen12FastClearOnly == 0), "FastClear Only unsupported on Gen12+!");
+ Surf.Flags.Wa.PreGen12FastClearOnly = 0;
+
+ return 1;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+/// Validates the UnifiedAuxSurface parameters passed in by clients to make sure they do not
+/// conflict or ask for unsupporting combinations/features.
+///
+/// @param[in] GMM_TEXTURE_INFO which specify what sort of resource to create
+/// @return 1 is validation passed. 0 otherwise.
+/////////////////////////////////////////////////////////////////////////////////////
+uint8_t GmmLib::PlatformInfoGen12::ValidateUnifiedAuxSurface(GMM_TEXTURE_INFO &Surf)
+{
+
+ if((Surf.Flags.Gpu.UnifiedAuxSurface) &&
+ !( //--- Legitimate UnifiedAuxSurface Case ------------------------------------------
+ Surf.Flags.Gpu.CCS &&
+ ((Surf.MSAA.NumSamples <= 1 && (Surf.Flags.Gpu.RenderTarget || Surf.Flags.Gpu.Texture)) ||
+ ((Surf.Flags.Gpu.Depth || Surf.Flags.Gpu.SeparateStencil || Surf.MSAA.NumSamples > 1)))))
+ {
+ GMM_ASSERTDPF(0, "Invalid UnifiedAuxSurface usage!");
+ return 0;
+ }
+
+ return 1;
+}
+
+//=============================================================================
+//
+// Function: CheckFmtDisplayDecompressible
+//
+// Desc: Returns true if display hw supports lossless render/media decompression
+// else returns false. Restrictions are from
+// Umds can call it to decide if full resolve is required
+//
+// Parameters:
+// See function arguments.
+//
+// Returns:
+// uint8_t
+//-----------------------------------------------------------------------------
+uint8_t GmmLib::PlatformInfoGen12::CheckFmtDisplayDecompressible(GMM_TEXTURE_INFO &Surf,
+ bool IsSupportedRGB64_16_16_16_16,
+ bool IsSupportedRGB32_8_8_8_8,
+ bool IsSupportedRGB32_2_10_10_10,
+ bool IsSupportedMediaFormats)
+{
+
+ //Check fmt is display decompressible
+ if(((Surf.Flags.Info.RenderCompressed || Surf.Flags.Info.MediaCompressed) &&
+ (IsSupportedRGB64_16_16_16_16 || //RGB64 16:16 : 16 : 16 FP16
+ IsSupportedRGB32_8_8_8_8 || //RGB32 8 : 8 : 8 : 8
+ IsSupportedRGB32_2_10_10_10)) || //RGB32 2 : 10 : 10 : 10) ||
+ (Surf.Flags.Info.MediaCompressed && IsSupportedMediaFormats)) //YUV444 - Y412, Y416
+ {
+ //Display supports compression on TileY, but not Yf/Ys (deprecated for display support)
+ if(GMM_IS_4KB_TILE(Surf.Flags) &&
+ !(Surf.Flags.Info.TiledYf || GMM_IS_64KB_TILE(Surf.Flags)))
+ {
+ return true;
+ }
+ }
+ return false;
+}
+
+//=============================================================================
+//
+// Function: OverrideCompressionFormat
+//
+// Desc: SurfaceState compression format encoding differ for MC vs RC on few formats. This function
+// overrides default RC encoding for MC requests
+//
+// Parameters:
+// See function arguments.
+//
+// Returns:
+// uint8_t
+//-----------------------------------------------------------------------------
+uint8_t GmmLib::PlatformInfoGen12::OverrideCompressionFormat(GMM_RESOURCE_FORMAT Format, uint8_t IsMC)
+{
+ return Data.FormatTable[Format].CompressionFormat.CompressionFormat;
+}
diff --git a/Source/GmmLib/Platform/GmmGen9Platform.cpp b/Source/GmmLib/Platform/GmmGen9Platform.cpp
index 2e41592..802cb81 100644
--- a/Source/GmmLib/Platform/GmmGen9Platform.cpp
+++ b/Source/GmmLib/Platform/GmmGen9Platform.cpp
@@ -494,4 +494,7 @@
Data.ReconMaxHeight = Data.Texture2DSurface.MaxHeight; // Reconstructed surfaces require more height and width for higher resolutions.
Data.ReconMaxWidth = Data.Texture2DSurface.MaxWidth;
+
+ Data.NoOfBitsSupported = 39;
+ Data.HighestAcceptablePhysicalAddress = GFX_MASK_LARGE(0, 38);
}
diff --git a/Source/GmmLib/Platform/GmmPlatform.cpp b/Source/GmmLib/Platform/GmmPlatform.cpp
index 498be60..d05e2fb 100644
--- a/Source/GmmLib/Platform/GmmPlatform.cpp
+++ b/Source/GmmLib/Platform/GmmPlatform.cpp
@@ -37,23 +37,24 @@
#define GMM_FORMAT_GEN(X) (GFX_GET_CURRENT_RENDERCORE(Data.Platform) >= IGFX_GEN##X##_CORE)
#define GMM_FORMAT_SKU(FtrXxx) (pGmmGlobalContext->GetSkuTable().FtrXxx != 0)
#define GMM_FORMAT_WA(WaXxx) (pGmmGlobalContext->GetWaTable().WaXxx != 0)
-#define GMM_FORMAT(Name, bpe, _Width, _Height, _Depth, IsRT, IsASTC, RcsSurfaceFormat, AuxL1Format, Availability) \
- \
- { \
- GmmFormat = GMM_FORMAT_##Name; \
- Data.FormatTable[GmmFormat].ASTC = (IsASTC); \
- Data.FormatTable[GmmFormat].Element.BitsPer = (bpe); \
- Data.FormatTable[GmmFormat].Element.Depth = (_Depth); \
- Data.FormatTable[GmmFormat].Element.Height = (_Height); \
- Data.FormatTable[GmmFormat].Element.Width = (_Width); \
- Data.FormatTable[GmmFormat].RenderTarget = ((IsRT) != 0); \
- Data.FormatTable[GmmFormat].SurfaceStateFormat = ((GMM_SURFACESTATE_FORMAT)(RcsSurfaceFormat)); \
- Data.FormatTable[GmmFormat].Reserved = ((uint32_t)(AuxL1Format)); \
- Data.FormatTable[GmmFormat].Supported = ((Availability) != 0); \
- if(((_Depth) > 1) || ((_Height) > 1) || ((_Width) > 1)) \
- { \
- Data.FormatTable[GmmFormat].Compressed = 1; \
- } \
+#define GMM_COMPR_FORMAT_INVALID GMM_E2ECOMP_FORMAT_INVALID
+#define GMM_FORMAT(Name, bpe, _Width, _Height, _Depth, IsRT, IsASTC, RcsSurfaceFormat, SSCompressionFmt, Availability) \
+ \
+ { \
+ GmmFormat = GMM_FORMAT_##Name; \
+ Data.FormatTable[GmmFormat].ASTC = (IsASTC); \
+ Data.FormatTable[GmmFormat].Element.BitsPer = (bpe); \
+ Data.FormatTable[GmmFormat].Element.Depth = (_Depth); \
+ Data.FormatTable[GmmFormat].Element.Height = (_Height); \
+ Data.FormatTable[GmmFormat].Element.Width = (_Width); \
+ Data.FormatTable[GmmFormat].RenderTarget = ((IsRT) != 0); \
+ Data.FormatTable[GmmFormat].SurfaceStateFormat = ((GMM_SURFACESTATE_FORMAT)(RcsSurfaceFormat)); \
+ Data.FormatTable[GmmFormat].CompressionFormat.CompressionFormat = (SSCompressionFmt); \
+ Data.FormatTable[GmmFormat].Supported = ((Availability) != 0); \
+ if(((_Depth) > 1) || ((_Height) > 1) || ((_Width) > 1)) \
+ { \
+ Data.FormatTable[GmmFormat].Compressed = 1; \
+ } \
}
#include "External/Common/GmmFormatTable.h"
diff --git a/Source/GmmLib/Platform/GmmPlatforms.h b/Source/GmmLib/Platform/GmmPlatforms.h
index 8d5f06b..0659c33 100644
--- a/Source/GmmLib/Platform/GmmPlatforms.h
+++ b/Source/GmmLib/Platform/GmmPlatforms.h
@@ -53,6 +53,13 @@
GMM_UNREFERENCED_PARAMETER(UnitAlign);
}
+ virtual uint8_t OverrideCompressionFormat(GMM_RESOURCE_FORMAT Format, uint8_t IsMC)
+ {
+ GMM_UNREFERENCED_PARAMETER(Format);
+ GMM_UNREFERENCED_PARAMETER(IsMC);
+ return 0;
+ }
+
void SetDataSurfaceMaxSize(uint64_t Size)
{
Data.SurfaceMaxSize = Size;
diff --git a/Source/GmmLib/Resource/GmmResourceInfoCommonEx.cpp b/Source/GmmLib/Resource/GmmResourceInfoCommonEx.cpp
index 053932a..25d86a7 100644
--- a/Source/GmmLib/Resource/GmmResourceInfoCommonEx.cpp
+++ b/Source/GmmLib/Resource/GmmResourceInfoCommonEx.cpp
@@ -147,6 +147,7 @@
{
//GMM_ASSERTDPF(Surf.Flags.Gpu.HiZ, "Lossless Z compression supported when Depth+HiZ+CCS is unified");
AuxSecSurf = Surf;
+ AuxSecSurf.Type = AuxSecSurf.Type;
Surf.Flags.Gpu.HiZ = 0; //Its depth buffer, so clear HiZ
AuxSecSurf.Flags.Gpu.HiZ = 0;
AuxSurf.Flags.Gpu.IndirectClearColor = 0; //Clear Depth flags from HiZ, contained with separate/legacy HiZ when Depth isn't compressible.
@@ -163,17 +164,24 @@
return false;
}
Surf.Flags.Gpu.CCS = 1;
+ AuxSurf.Type = AuxSurf.Type;
}
else if(Surf.MSAA.NumSamples > 1 && Surf.Flags.Gpu.CCS) //MSAA+MCS+CCS
{
GMM_ASSERTDPF(Surf.Flags.Gpu.MCS, "Lossless MSAA supported when MSAA+MCS+CCS is unified");
AuxSecSurf = Surf;
+ AuxSecSurf.Type = AuxSecSurf.Type;
AuxSecSurf.Flags.Gpu.MCS = 0;
AuxSurf.Flags.Gpu.CCS = 0;
AuxSurf.Flags.Info.RenderCompressed = AuxSurf.Flags.Info.MediaCompressed = 0;
}
+ else if(Surf.Flags.Gpu.CCS)
+ {
+ AuxSurf.Type = AuxSurf.Type;
+ }
- if(GMM_SUCCESS != pTextureCalc->PreProcessTexSpecialCases(&AuxSurf))
+ if(AuxSurf.Type != RESOURCE_INVALID &&
+ GMM_SUCCESS != pTextureCalc->PreProcessTexSpecialCases(&AuxSurf))
{
return false;
}
@@ -346,6 +354,13 @@
goto ERROR_CASE;
}
+ if((GFX_GET_CURRENT_RENDERCORE(pPlatformResource->Platform) > IGFX_GEN11_CORE) &&
+ Surf.Flags.Info.TiledW)
+ {
+ GMM_ASSERTDPF(0, "Flag not supported on this platform.");
+ goto ERROR_CASE;
+ }
+
if((GFX_GET_CURRENT_RENDERCORE(pPlatformResource->Platform) < IGFX_GEN9_CORE) &&
#if(_DEBUG || _RELEASE_INTERNAL)
!pGmmGlobalContext->GetWaTable().WaDisregardPlatformChecks &&
@@ -390,7 +405,7 @@
}
}
- GetRestrictions(Restrictions);
+ pTextureCalc->GetResRestrictions(&Surf, Restrictions);
// Check array size to make sure it meets HW limits
if((Surf.ArraySize > Restrictions.MaxArraySize) &&
@@ -529,9 +544,10 @@
// IndirectClearColor Restrictions
if((Surf.Flags.Gpu.IndirectClearColor) &&
!( //--- Legitimate IndirectClearColor Case ------------------------------------------
- (GFX_GET_CURRENT_RENDERCORE(pPlatformResource->Platform) >= IGFX_GEN9_CORE) &&
- Surf.Flags.Gpu.UnifiedAuxSurface
- ))
+ ((GFX_GET_CURRENT_RENDERCORE(pPlatformResource->Platform) >= IGFX_GEN9_CORE) &&
+ Surf.Flags.Gpu.UnifiedAuxSurface) ||
+ ((GFX_GET_CURRENT_RENDERCORE(pPlatformResource->Platform) > IGFX_GEN11_CORE) &&
+ (Surf.Flags.Gpu.HiZ || Surf.Flags.Gpu.SeparateStencil))))
{
GMM_ASSERTDPF(0, "Invalid IndirectClearColor usage!");
goto ERROR_CASE;
@@ -667,6 +683,7 @@
case GMM_FORMAT_B10G10R10A2_UINT:
case GMM_FORMAT_B10G10R10A2_UNORM_SRGB:
case GMM_FORMAT_B10G10R10A2_USCALED:
+ case GMM_FORMAT_R10G10B10_FLOAT_A2_UNORM:
case GMM_FORMAT_R10G10B10_SNORM_A2_UNORM:
case GMM_FORMAT_R10G10B10A2_SINT:
case GMM_FORMAT_R10G10B10A2_SNORM:
diff --git a/Source/GmmLib/Resource/GmmRestrictions.cpp b/Source/GmmLib/Resource/GmmRestrictions.cpp
index 07303c5..5f591fc 100644
--- a/Source/GmmLib/Resource/GmmRestrictions.cpp
+++ b/Source/GmmLib/Resource/GmmRestrictions.cpp
@@ -577,7 +577,7 @@
if(pTexinfo->Flags.Info.RenderCompressed ||
pTexinfo->Flags.Info.MediaCompressed)
{
- Restrictions.Alignment = GFX_ALIGN(Restrictions.Alignment, GMM_KBYTE(16));
+ Restrictions.Alignment = GFX_ALIGN(Restrictions.Alignment, GMM_KBYTE(64));
}
GMM_DPF_EXIT;
diff --git a/Source/GmmLib/Texture/GmmGen10Texture.cpp b/Source/GmmLib/Texture/GmmGen10Texture.cpp
index c0b86d5..1e6d378 100644
--- a/Source/GmmLib/Texture/GmmGen10Texture.cpp
+++ b/Source/GmmLib/Texture/GmmGen10Texture.cpp
@@ -395,6 +395,13 @@
AlignedWidth = __GMM_EXPAND_WIDTH(this, Width, HAlign, pTexInfo);
+ // For Non - planar surfaces, the alignment is done on the entire height of the allocation
+ if(pGmmGlobalContext->GetWaTable().WaAlignYUVResourceToLCU &&
+ GmmIsYUVFormatLCUAligned(pTexInfo->Format))
+ {
+ AlignedWidth = GFX_ALIGN(AlignedWidth, GMM_SCANLINES(GMM_MAX_LCU_SIZE));
+ }
+
// Calculate special pitch case of small dimensions where LOD1 + LOD2 widths
// are greater than LOD0. e.g. dimensions 4x4 and MinPitch == 1
if((pTexInfo->Flags.Info.TiledYf || pTexInfo->Flags.Info.TiledYs) &&
@@ -527,11 +534,7 @@
__GMM_ASSERTPTR(pTexInfo, GMM_ERROR);
__GMM_ASSERTPTR(pRestrictions, GMM_ERROR);
__GMM_ASSERT(!pTexInfo->Flags.Info.TiledW);
- // Client should always give us linear-fallback option for planar surfaces,
- // except for MMC surfaces, which are TileY.
- //__GMM_ASSERT(pTexInfo->Flags.Info.Linear || pTexInfo->Flags.Gpu.MMC);
- pTexInfo->Flags.Info.Linear = 1;
- pTexInfo->TileMode = TILE_NONE;
+ pTexInfo->TileMode = TILE_NONE;
const GMM_PLATFORM_INFO *pPlatform = GMM_OVERRIDE_PLATFORM_INFO(pTexInfo);
@@ -569,10 +572,7 @@
Height = YHeight + 2 * VHeight; // One VHeight for V and one for U.
- FillTexPlanar_SetTilingBasedOnRequiredAlignment(
- pTexInfo,
- YHeight, true, // <-- YHeight alignment needed (so U is properly aligned).
- VHeight, true); // <-- VHeight alignment needed (so V is properly aligned).
+ pTexInfo->OffsetInfo.Plane.NoOfPlanes = 3;
break;
}
@@ -590,10 +590,7 @@
Height = YHeight + 2 * VHeight;
- FillTexPlanar_SetTilingBasedOnRequiredAlignment(
- pTexInfo,
- YHeight, true, // <-- YHeight alignment needed (so U is properly aligned).
- VHeight, true); // <-- VHeight alignment needed (so V is properly aligned).
+ pTexInfo->OffsetInfo.Plane.NoOfPlanes = 3;
break;
}
@@ -644,10 +641,7 @@
Height = YHeight + 2 * VHeight;
- FillTexPlanar_SetTilingBasedOnRequiredAlignment(
- pTexInfo,
- YHeight, true, // <-- YHeight alignment needed (so U is properly aligned).
- YHeight, true); // <-- VHeight alignment needed (so V is properly aligned).
+ pTexInfo->OffsetInfo.Plane.NoOfPlanes = 3;
break;
}
@@ -668,11 +662,6 @@
Height = YHeight + VHeight;
- FillTexPlanar_SetTilingBasedOnRequiredAlignment(
- pTexInfo,
- YHeight, true, // <-- YHeight alignment needed (so U/V are properly aligned, vertically).
- 0, false); // <-- VHeight alignment NOT needed (since U/V aren't on top of eachother).
-
// With SURFACE_STATE.XOffset support, the U-V interface has
// much lighter restrictions--which will be naturally met by
// surface pitch restrictions (i.e. dividing an IMC2/4 pitch
@@ -681,7 +670,8 @@
// Not technically UV packed but sizing works out the same
// if the resource is std swizzled
- UVPacked = pTexInfo->Flags.Info.StdSwizzle ? true : false;
+ UVPacked = true;
+ pTexInfo->OffsetInfo.Plane.NoOfPlanes = 2;
break;
}
@@ -722,11 +712,6 @@
(pTexInfo->Format == GMM_FORMAT_P208))
{
WidthBytesPhysical = GFX_ALIGN(WidthBytesPhysical, 2); // If odd YWidth, pitch bumps-up to fit rounded-up U/V planes.
-
- FillTexPlanar_SetTilingBasedOnRequiredAlignment(
- pTexInfo,
- YHeight, true, // <-- YHeight alignment needed (so UV is properly aligned).
- 0, false); // <-- VHeight alignment NOT needed (since U/V aren't on top of eachother).
}
else //if(pTexInfo->Format == GMM_FORMAT_NV11)
{
@@ -738,8 +723,8 @@
pTexInfo->Flags.Info.Linear = 1;
}
- UVPacked = true;
-
+ UVPacked = true;
+ pTexInfo->OffsetInfo.Plane.NoOfPlanes = 2;
break;
}
case GMM_FORMAT_I420: // IYUV & I420: are identical to YV12 except,
@@ -788,6 +773,7 @@
pTexInfo->Flags.Info.TiledX = 0;
pTexInfo->Flags.Info.Linear = 1;
+ pTexInfo->OffsetInfo.Plane.NoOfPlanes = 1;
break;
}
default:
@@ -802,8 +788,7 @@
SetTileMode(pTexInfo);
- // If the Surface has Odd height dimension, we will fall back to Linear Format.
- // If MMC is enabled, disable MMC during such cases.
+ // MMC is not supported for linear formats.
if(pTexInfo->Flags.Gpu.MMC)
{
if(!(pTexInfo->Flags.Info.TiledY || pTexInfo->Flags.Info.TiledYf || pTexInfo->Flags.Info.TiledYs))
@@ -849,24 +834,25 @@
Height += AdjustedVHeight - VHeight;
}
- // For std swizzled and UV packed tile Ys/Yf cases, the planes
- // must be tile-boundary aligned. Actual alignment is handled
- // in FillPlanarOffsetAddress, but height and width must
- // be adjusted for correct size calculation
- if((pTexInfo->Flags.Info.TiledYs || pTexInfo->Flags.Info.TiledYf) &&
- (pTexInfo->Flags.Info.StdSwizzle || UVPacked))
+ // For Tiled Planar surfaces, the planes must be tile-boundary aligned.
+ // Actual alignment is handled in FillPlanarOffsetAddress, but height
+ // and width must be adjusted for correct size calculation
+ if(GMM_IS_TILED(pPlatform->TileInfo[pTexInfo->TileMode]))
{
uint32_t TileHeight = pGmmGlobalContext->GetPlatformInfo().TileInfo[pTexInfo->TileMode].LogicalTileHeight;
uint32_t TileWidth = pGmmGlobalContext->GetPlatformInfo().TileInfo[pTexInfo->TileMode].LogicalTileWidth;
+ pTexInfo->OffsetInfo.Plane.IsTileAlignedPlanes = true;
+
//for separate U and V planes, use U plane unaligned and V plane aligned
Height = GFX_ALIGN(YHeight, TileHeight) + (UVPacked ? GFX_ALIGN(AdjustedVHeight, TileHeight) :
(GFX_ALIGN(VHeight, TileHeight) + GFX_ALIGN(AdjustedVHeight, TileHeight)));
- if(UVPacked)
+ if(pTexInfo->Format == GMM_FORMAT_IMC2 || // IMC2, IMC4 needs even tile columns
+ pTexInfo->Format == GMM_FORMAT_IMC4)
{
- // If the UV planes are packed then the surface pitch must be
- // padded out so that the tile-aligned UV data will fit.
+ // If the U & V planes are side-by-side then the surface pitch must be
+ // padded out so that U and V planes will being on a tile boundary.
// This means that an odd Y plane width must be padded out
// with an additional tile. Even widths do not need padding
uint32_t TileCols = GFX_CEIL_DIV(WidthBytesPhysical, TileWidth);
@@ -876,9 +862,13 @@
}
}
- pTexInfo->Flags.Info.RedecribedPlanes = 1;
+ if(pTexInfo->Flags.Info.TiledYs || pTexInfo->Flags.Info.TiledYf)
+ {
+ pTexInfo->Flags.Info.RedecribedPlanes = true;
+ }
}
+ // Vary wide planar tiled planar formats do not support MMC pre gen11. All formats do not support
//Special case LKF MMC compressed surfaces
if(pTexInfo->Flags.Gpu.MMC &&
pTexInfo->Flags.Gpu.UnifiedAuxSurface &&
diff --git a/Source/GmmLib/Texture/GmmGen12Texture.cpp b/Source/GmmLib/Texture/GmmGen12Texture.cpp
new file mode 100644
index 0000000..d6b18a9
--- /dev/null
+++ b/Source/GmmLib/Texture/GmmGen12Texture.cpp
@@ -0,0 +1,1168 @@
+/*==============================================================================
+Copyright(c) 2019 Intel Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files(the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and / or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+============================================================================*/
+
+
+#include "Internal/Common/GmmLibInc.h"
+#include "Internal/Common/Texture/GmmGen10TextureCalc.h"
+#include "Internal/Common/Texture/GmmGen11TextureCalc.h"
+#include "Internal/Common/Texture/GmmGen12TextureCalc.h"
+
+/////////////////////////////////////////////////////////////////////////////////////
+/// Calculates height of the 2D mip layout on Gen9
+///
+/// @param[in] pTexInfo: ptr to ::GMM_TEXTURE_INFO,
+///
+/// @return height of 2D mip layout
+/////////////////////////////////////////////////////////////////////////////////////
+uint32_t GmmLib::GmmGen12TextureCalc::Get2DMipMapHeight(GMM_TEXTURE_INFO *pTexInfo)
+{
+ uint32_t BlockHeight, MipHeight;
+ uint32_t HeightLinesLevel0, HeightLinesLevel1, HeightLinesLevel2;
+ uint32_t i, MipLevel, VAlign, CompressHeight, CompressWidth, CompressDepth;
+ uint8_t Compressed;
+ GMM_DPF_ENTER;
+
+ const GMM_PLATFORM_INFO *pPlatform = GMM_OVERRIDE_PLATFORM_INFO(pTexInfo);
+
+ Compressed = GmmIsCompressed(pTexInfo->Format);
+ MipHeight = pTexInfo->BaseHeight;
+ MipLevel = pTexInfo->MaxLod;
+ VAlign = pTexInfo->Alignment.VAlign;
+ GetCompressionBlockDimensions(pTexInfo->Format, &CompressWidth, &CompressHeight, &CompressDepth);
+
+ HeightLinesLevel0 = __GMM_EXPAND_HEIGHT(this, MipHeight, VAlign, pTexInfo);
+
+ if(Compressed)
+ {
+ HeightLinesLevel0 /= CompressHeight;
+ }
+
+ // Mip0 height...
+ BlockHeight = HeightLinesLevel0;
+
+ if((pTexInfo->Flags.Info.TiledYf || GMM_IS_64KB_TILE(pTexInfo->Flags)) &&
+ ((pTexInfo->Alignment.MipTailStartLod == 0) || (pTexInfo->MaxLod == 0)))
+ {
+ // Do nothing. Height is already aligned.
+ }
+ else
+ {
+ // Height of Mip1 and Mip2..n needed later...
+ HeightLinesLevel1 = HeightLinesLevel2 = 0;
+ for(i = 1; i <= MipLevel; i++)
+ {
+ uint32_t AlignedHeightLines;
+
+ if((pTexInfo->Flags.Info.TiledYf || GMM_IS_64KB_TILE(pTexInfo->Flags)) &&
+ (i == pTexInfo->Alignment.MipTailStartLod))
+ {
+ AlignedHeightLines = pPlatform->TileInfo[pTexInfo->TileMode].LogicalTileHeight;
+
+ if(i == 1)
+ {
+ HeightLinesLevel1 = AlignedHeightLines;
+ }
+ else
+ {
+ HeightLinesLevel2 += AlignedHeightLines;
+ }
+
+ break;
+ }
+ else
+ {
+ MipHeight = GmmTexGetMipHeight(pTexInfo, i);
+
+ AlignedHeightLines = __GMM_EXPAND_HEIGHT(this, MipHeight, VAlign, pTexInfo);
+
+ if(Compressed)
+ {
+ AlignedHeightLines /= CompressHeight;
+ }
+
+ if(i == 1)
+ {
+ HeightLinesLevel1 = AlignedHeightLines;
+ }
+ else
+ {
+ HeightLinesLevel2 += AlignedHeightLines;
+ }
+ }
+ }
+
+ // If Mip1 height covers all others, then that is all we need...
+ if(!(pTexInfo->Flags.Info.TiledYf || GMM_IS_64KB_TILE(pTexInfo->Flags)))
+ {
+ if(HeightLinesLevel1 >= HeightLinesLevel2)
+ {
+ BlockHeight += GFX_ALIGN(HeightLinesLevel1, VAlign);
+ }
+ else
+ {
+ BlockHeight += GFX_ALIGN(HeightLinesLevel2, VAlign);
+ }
+ }
+ else
+ {
+ //TR mode- requires TileMode height alignment
+ BlockHeight += (HeightLinesLevel1 >= HeightLinesLevel2) ? HeightLinesLevel1 : HeightLinesLevel2;
+ BlockHeight = GFX_ALIGN(BlockHeight, pPlatform->TileInfo[pTexInfo->TileMode].LogicalTileHeight);
+ }
+ }
+
+ GMM_DPF_EXIT;
+
+ return (BlockHeight);
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+/// Calculates Linear CCS size from main surface size
+///
+/// @param[in] pSurf: ptr to ::GMM_TEXTURE_INFO of main surface
+/// @param[in] pAuxTexInfo: ptr to ::GMM_TEXTURE_INFO of Aux surface
+///
+/////////////////////////////////////////////////////////////////////////////////////
+GMM_STATUS GmmLib::GmmGen12TextureCalc::FillTexCCS(GMM_TEXTURE_INFO *pSurf,
+ GMM_TEXTURE_INFO *pAuxTexInfo)
+{
+
+
+ if(pAuxTexInfo->Flags.Gpu.__NonMsaaLinearCCS)
+ {
+ GMM_TEXTURE_INFO Surf = *pSurf;
+ const GMM_PLATFORM_INFO *pPlatform = GMM_OVERRIDE_PLATFORM_INFO(pSurf);
+ pAuxTexInfo->Flags.Info.TiledW = 0;
+ pAuxTexInfo->Flags.Info.TiledYf = 0;
+ pAuxTexInfo->Flags.Info.TiledX = 0;
+ pAuxTexInfo->Flags.Info.Linear = 1;
+ GMM_SET_64KB_TILE(pAuxTexInfo->Flags, 0);
+ GMM_SET_4KB_TILE(pAuxTexInfo->Flags, 0);
+
+ pAuxTexInfo->ArraySize = Surf.ArraySize;
+ pAuxTexInfo->BitsPerPixel = 8;
+ uint32_t ExpandedArraySize =
+ GFX_MAX(Surf.ArraySize, 1) *
+ ((Surf.Type == RESOURCE_CUBE) ? 6 : 1) * // Cubemaps simply 6-element, 2D arrays.
+ ((Surf.Type == RESOURCE_3D) ? Surf.Depth : 1) * // 3D's simply 2D arrays for sizing.
+ ((Surf.Flags.Gpu.Depth || Surf.Flags.Gpu.SeparateStencil ||
+ GMM_IS_64KB_TILE(Surf.Flags) || Surf.Flags.Info.TiledYf) ?
+ 1 :
+ Surf.MSAA.NumSamples); // MSAA (non-Depth/Stencil) RT samples stored as array planes.
+
+ if(GMM_IS_64KB_TILE(Surf.Flags) || Surf.Flags.Info.TiledYf)
+ {
+ ExpandedArraySize = GFX_ALIGN(ExpandedArraySize, pPlatform->TileInfo[Surf.TileMode].LogicalTileDepth);
+ }
+
+ if(GmmIsUVPacked(Surf.Format))
+ {
+ uint64_t YCcsSize = GFX_ALIGN((Surf.OffsetInfo.Plane.Y[GMM_PLANE_U] * Surf.Pitch), GMM_KBYTE(16)) >> 8;
+ YCcsSize = GFX_ALIGN(YCcsSize, PAGE_SIZE);
+
+ uint64_t PlanarSize = (Surf.ArraySize > 1) ? (Surf.OffsetInfo.Plane.ArrayQPitch) : Surf.Size;
+
+ uint64_t UVCcsSize = GFX_ALIGN(PlanarSize - (Surf.OffsetInfo.Plane.Y[GMM_PLANE_U] * Surf.Pitch), GMM_KBYTE(16)) >> 8;
+ if(UVCcsSize == 0)
+ {
+ //GMM_ASSERTDPF(UVCcsSize != 0, "Incorrect Planar Surface Size"); //Redescription of Yf/Ys planar surface P010 hits it (debug required?)
+ UVCcsSize = 1;
+ }
+ UVCcsSize = GFX_ALIGN_NP2(UVCcsSize, PAGE_SIZE);
+
+ pAuxTexInfo->OffsetInfo.Plane.X[GMM_PLANE_Y] = 0;
+ pAuxTexInfo->OffsetInfo.Plane.Y[GMM_PLANE_Y] = 0;
+ pAuxTexInfo->OffsetInfo.Plane.X[GMM_PLANE_U] = YCcsSize; //Being Linear CCS, fill X-offset - Test GetAuxOffset UV_CCS is proper
+ pAuxTexInfo->OffsetInfo.Plane.Y[GMM_PLANE_U] = 0;
+ pAuxTexInfo->OffsetInfo.Plane.X[GMM_PLANE_V] = YCcsSize; //Being Linear CCS, fill X-offset
+ pAuxTexInfo->OffsetInfo.Plane.Y[GMM_PLANE_V] = 0;
+
+ pAuxTexInfo->OffsetInfo.Plane.ArrayQPitch = YCcsSize + UVCcsSize;
+ pAuxTexInfo->Size = pAuxTexInfo->OffsetInfo.Plane.ArrayQPitch * ((Surf.ArraySize > 1) ? (Surf.ArraySize) : 1);
+ }
+ else if(GmmIsPlanar(Surf.Format))
+ {
+ //Doesn't require separate Aux surfaces since not displayable. Page-alignment ensures
+ //each hybrid plane is 4k-aligned, hence gets unique AuxT.L1e
+ uint64_t PlanarSize = (Surf.ArraySize > 1) ? (Surf.OffsetInfo.Plane.ArrayQPitch) : Surf.Size;
+ uint64_t CcsSize = GFX_ALIGN(PlanarSize, GMM_KBYTE(16)) >> 8;
+ CcsSize = GFX_ALIGN(CcsSize, PAGE_SIZE);
+
+ pAuxTexInfo->OffsetInfo.Plane.X[GMM_PLANE_Y] = 0;
+ pAuxTexInfo->OffsetInfo.Plane.Y[GMM_PLANE_Y] = 0;
+ pAuxTexInfo->OffsetInfo.Plane.X[GMM_PLANE_U] = GFX_ALIGN(Surf.OffsetInfo.Plane.Y[GMM_PLANE_U] * Surf.Pitch, GMM_KBYTE(16)) >> 8; //Being Linear CCS, fill X-offset - Test GetAuxOffset UV_CCS is proper
+ pAuxTexInfo->OffsetInfo.Plane.Y[GMM_PLANE_U] = 0;
+ pAuxTexInfo->OffsetInfo.Plane.X[GMM_PLANE_V] = GFX_ALIGN(Surf.OffsetInfo.Plane.Y[GMM_PLANE_V] * Surf.Pitch, GMM_KBYTE(16)) >> 8; //Being Linear CCS, fill X-offset
+ pAuxTexInfo->OffsetInfo.Plane.Y[GMM_PLANE_V] = 0;
+
+ pAuxTexInfo->OffsetInfo.Plane.ArrayQPitch = CcsSize;
+ pAuxTexInfo->Size = pAuxTexInfo->OffsetInfo.Plane.ArrayQPitch *
+ ((Surf.ArraySize > 1) ? (Surf.ArraySize) : 1);
+ }
+ else
+ {
+ if(ExpandedArraySize > 1)
+ {
+ pAuxTexInfo->Size = ((GFX_ALIGN(Surf.Pitch * Surf.Alignment.QPitch, GMM_KBYTE(16)) >> 8));
+ //pAuxTexInfo->Size = GFX_ALIGN(pAuxTexInfo->Size, PAGE_SIZE); //Uncomment to pad CCS to start at tile-boundary eg. for media/display CCS requirement
+ pAuxTexInfo->Alignment.QPitch = GFX_ULONG_CAST(pAuxTexInfo->Size); //HW doesn't use QPitch for Aux except MCS, how'd AMFS get sw-filled non-zero QPitch?
+
+ pAuxTexInfo->Size *= ExpandedArraySize;
+ }
+ else
+ {
+ pAuxTexInfo->Size = (GFX_ALIGN(Surf.Size, GMM_KBYTE(16)) >> 8);
+ }
+ }
+ pAuxTexInfo->Pitch = 0;
+ pAuxTexInfo->Type = RESOURCE_BUFFER;
+ pAuxTexInfo->Alignment = {0};
+ pAuxTexInfo->Alignment.QPitch = GFX_ULONG_CAST(pAuxTexInfo->Size) / ExpandedArraySize;
+ pAuxTexInfo->Alignment.BaseAlignment = GMM_KBYTE(4); //TODO: TiledResource?
+ pAuxTexInfo->Size = GFX_ALIGN(pAuxTexInfo->Size, PAGE_SIZE); //page-align final size
+
+ if(pAuxTexInfo->Flags.Gpu.TiledResource)
+ {
+ pAuxTexInfo->Alignment.BaseAlignment = GMM_KBYTE(64); //TODO: TiledResource?
+ pAuxTexInfo->Size = GFX_ALIGN(pAuxTexInfo->Size, GMM_KBYTE(64)); //page-align final size
+ }
+
+ //Clear compression request in CCS
+ pAuxTexInfo->Flags.Info.RenderCompressed = 0;
+ pAuxTexInfo->Flags.Info.MediaCompressed = 0;
+ pAuxTexInfo->Flags.Info.RedecribedPlanes = 0;
+ SetTileMode(pAuxTexInfo);
+
+ return GMM_SUCCESS;
+ }
+
+ return GMM_SUCCESS;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+/// Allocates the 2D mip layout for surface state programming.
+///
+/// @param[in] pTexInfo: ptr to ::GMM_TEXTURE_INFO,
+/// @param[in] pRestrictions: ptr to surface alignment and size restrictions
+///
+/// @return ::GMM_STATUS
+/////////////////////////////////////////////////////////////////////////////////////
+GMM_STATUS GMM_STDCALL GmmLib::GmmGen12TextureCalc::FillTex2D(GMM_TEXTURE_INFO * pTexInfo,
+ __GMM_BUFFER_TYPE *pRestrictions)
+{
+ uint32_t Width, Height, BitsPerPixel;
+ uint32_t HAlign, VAlign, DAlign, CompressHeight, CompressWidth, CompressDepth;
+ uint32_t AlignedWidth, BlockHeight, ExpandedArraySize, Pitch;
+ uint8_t Compress = 0;
+ GMM_STATUS Status;
+
+ GMM_DPF_ENTER;
+
+ __GMM_ASSERTPTR(pTexInfo, GMM_ERROR);
+ __GMM_ASSERTPTR(pRestrictions, GMM_ERROR);
+
+ const GMM_PLATFORM_INFO *pPlatform = GMM_OVERRIDE_PLATFORM_INFO(pTexInfo);
+
+ BitsPerPixel = pTexInfo->BitsPerPixel;
+ //TODO: Deprecate TileY usage
+ if((pTexInfo->Flags.Gpu.CCS && pTexInfo->Flags.Gpu.__NonMsaaTileYCcs))
+ {
+ // Aux Surfaces are 8bpp.
+ BitsPerPixel = 8;
+ }
+
+ Height = pTexInfo->BaseHeight;
+ Width = GFX_ULONG_CAST(pTexInfo->BaseWidth);
+
+ pTexInfo->MSAA.NumSamples = GFX_MAX(pTexInfo->MSAA.NumSamples, 1);
+
+ if(pTexInfo->Flags.Info.TiledYf || GMM_IS_64KB_TILE(pTexInfo->Flags))
+ {
+ FindMipTailStartLod(pTexInfo);
+ }
+
+ ExpandedArraySize =
+ GFX_MAX(pTexInfo->ArraySize, 1) *
+ ((pTexInfo->Type == RESOURCE_CUBE) ? 6 : 1) * // Cubemaps simply 6-element, 2D arrays.
+ ((pTexInfo->Type == RESOURCE_3D) ? pTexInfo->Depth : 1) * // 3D's simply 2D arrays for sizing.
+ ((pTexInfo->Flags.Gpu.Depth || pTexInfo->Flags.Gpu.SeparateStencil ||
+ (GMM_IS_64KB_TILE(pTexInfo->Flags) || pTexInfo->Flags.Info.TiledYf)) ? // MSAA Ys/Yf samples are ALSO stored as array planes, calculate size for single sample and expand it later.
+ 1 :
+ pTexInfo->MSAA.NumSamples) * // MSAA (non-Depth/Stencil) RT samples stored as array planes.
+ ((GMM_IS_64KB_TILE(pTexInfo->Flags) && !pGmmGlobalContext->GetSkuTable().FtrTileY && (pTexInfo->MSAA.NumSamples == 16)) ? 4 : // MSAA x8/x16 stored as pseudo array planes each with 4x samples
+ (GMM_IS_64KB_TILE(pTexInfo->Flags) && !pGmmGlobalContext->GetSkuTable().FtrTileY && (pTexInfo->MSAA.NumSamples == 8)) ? 2 : 1);
+
+ if(GMM_IS_64KB_TILE(pTexInfo->Flags) || pTexInfo->Flags.Info.TiledYf)
+ {
+ ExpandedArraySize = GFX_CEIL_DIV(ExpandedArraySize, pPlatform->TileInfo[pTexInfo->TileMode].LogicalTileDepth);
+ }
+
+ //
+ // Check for color separation
+ //
+ if(pTexInfo->Flags.Gpu.ColorSeparation || pTexInfo->Flags.Gpu.ColorSeparationRGBX)
+ {
+ bool csRestrictionsMet = (((ExpandedArraySize <= 2) &&
+ (ExpandedArraySize == pTexInfo->ArraySize) &&
+ ((pTexInfo->Format == GMM_FORMAT_R8G8B8A8_UNORM) ||
+ (pTexInfo->Format == GMM_FORMAT_R8G8B8A8_UNORM_SRGB) ||
+ (pTexInfo->Format == GMM_FORMAT_B8G8R8A8_UNORM) ||
+ (pTexInfo->Format == GMM_FORMAT_B8G8R8A8_UNORM_SRGB) ||
+ (pTexInfo->Format == GMM_FORMAT_B8G8R8X8_UNORM) ||
+ (pTexInfo->Format == GMM_FORMAT_B8G8R8X8_UNORM_SRGB)) &&
+ ((pTexInfo->Flags.Gpu.ColorSeparation && (Width % 16) == 0) ||
+ (pTexInfo->Flags.Gpu.ColorSeparationRGBX && (Width % 12) == 0))));
+
+ if(csRestrictionsMet)
+ {
+ ExpandedArraySize = GMM_COLOR_SEPARATION_ARRAY_SIZE;
+ }
+ else
+ {
+ pTexInfo->Flags.Gpu.ColorSeparation = 0;
+ pTexInfo->Flags.Gpu.ColorSeparationRGBX = 0;
+ }
+ }
+
+ HAlign = pTexInfo->Alignment.HAlign;
+ VAlign = pTexInfo->Alignment.VAlign;
+ DAlign = pTexInfo->Alignment.DAlign;
+
+ GetCompressionBlockDimensions(pTexInfo->Format, &CompressWidth, &CompressHeight, &CompressDepth);
+
+ Compress = GmmIsCompressed(pTexInfo->Format);
+
+ /////////////////////////////////
+ // Calculate Block Surface Height
+ /////////////////////////////////
+
+ if(ExpandedArraySize > 1)
+ {
+ uint32_t Alignment = VAlign;
+ if((pTexInfo->Type == RESOURCE_3D && !pTexInfo->Flags.Info.Linear) ||
+ (pTexInfo->Flags.Gpu.S3dDx && pGmmGlobalContext->GetSkuTable().FtrDisplayEngineS3d))
+ {
+ Alignment = pPlatform->TileInfo[pTexInfo->TileMode].LogicalTileHeight;
+ }
+
+ // Calculate the overall Block height...Mip0Height + Max(Mip1Height, Sum of Mip2Height..MipnHeight)
+ BlockHeight = Get2DMipMapTotalHeight(pTexInfo);
+ BlockHeight = GFX_ALIGN_NP2(BlockHeight, Alignment);
+
+ // GMM internally uses QPitch as the logical distance between slices, but translates
+ // as appropriate to service client queries in GmmResGetQPitch.
+ pTexInfo->Alignment.QPitch = BlockHeight;
+
+ if(Compress)
+ {
+ BlockHeight = GFX_CEIL_DIV(BlockHeight, CompressHeight);
+
+ BlockHeight = GetAligned3DBlockHeight(pTexInfo, BlockHeight, ExpandedArraySize);
+ }
+ else
+ {
+ BlockHeight = ScaleTextureHeight(pTexInfo, BlockHeight);
+ }
+
+ BlockHeight *= ExpandedArraySize;
+ }
+ else
+ {
+ pTexInfo->Alignment.QPitch = 0;
+
+ BlockHeight = Get2DMipMapHeight(pTexInfo);
+ BlockHeight = ScaleTextureHeight(pTexInfo, BlockHeight);
+ }
+
+ ///////////////////////////////////
+ // Calculate Pitch
+ ///////////////////////////////////
+
+ AlignedWidth = __GMM_EXPAND_WIDTH(this, Width, HAlign, pTexInfo);
+
+ // Calculate special pitch case of small dimensions where LOD1 + LOD2 widths
+ // are greater than LOD0. e.g. dimensions 4x4 and MinPitch == 1
+ if((pTexInfo->Flags.Info.TiledYf || GMM_IS_64KB_TILE(pTexInfo->Flags)) &&
+ (pTexInfo->Alignment.MipTailStartLod < 2))
+ {
+ // Do nothing -- all mips are in LOD0/LOD1, which is already width aligned.
+ }
+ else if(pTexInfo->MaxLod >= 2)
+ {
+ uint32_t AlignedWidthLod1, AlignedWidthLod2;
+
+ AlignedWidthLod1 = __GMM_EXPAND_WIDTH(this, Width >> 1, HAlign, pTexInfo);
+ AlignedWidthLod2 = __GMM_EXPAND_WIDTH(this, Width >> 2, HAlign, pTexInfo);
+
+ AlignedWidth = GFX_MAX(AlignedWidth, AlignedWidthLod1 + AlignedWidthLod2);
+ }
+
+ if(Compress)
+ {
+ AlignedWidth = GFX_CEIL_DIV(AlignedWidth, CompressWidth);
+ }
+ else
+ {
+ AlignedWidth = ScaleTextureWidth(pTexInfo, AlignedWidth);
+ }
+
+ // Default pitch
+ Pitch = AlignedWidth * BitsPerPixel >> 3;
+
+ // Make sure the pitch satisfy linear min pitch requirment
+ Pitch = GFX_MAX(Pitch, pRestrictions->MinPitch);
+
+ // Make sure pitch satisfy alignment restriction
+ Pitch = GFX_ALIGN(Pitch, pRestrictions->PitchAlignment);
+
+ ////////////////////
+ // Adjust for Tiling
+ ////////////////////
+
+ if(GMM_IS_TILED(pPlatform->TileInfo[pTexInfo->TileMode]))
+ {
+ Pitch = GFX_ALIGN(Pitch, pPlatform->TileInfo[pTexInfo->TileMode].LogicalTileWidth);
+ BlockHeight = GFX_ALIGN(BlockHeight, pPlatform->TileInfo[pTexInfo->TileMode].LogicalTileHeight);
+ }
+
+ GMM_ASSERTDPF(pTexInfo->Flags.Info.LayoutBelow || !pTexInfo->Flags.Info.LayoutRight, "MIPLAYOUT_RIGHT not supported after Gen6!");
+ pTexInfo->Flags.Info.LayoutBelow = 1;
+ pTexInfo->Flags.Info.LayoutRight = 0;
+
+ // If a texture is YUV packed, 96, or 48 bpp then one row plus 16 bytes of
+ // padding needs to be added. Since this will create a none pitch aligned
+ // surface the padding is aligned to the next row
+ if(GmmIsYUVPacked(pTexInfo->Format) ||
+ (pTexInfo->BitsPerPixel == GMM_BITS(96)) ||
+ (pTexInfo->BitsPerPixel == GMM_BITS(48)))
+ {
+ BlockHeight += GMM_SCANLINES(1) + GFX_CEIL_DIV(GMM_BYTES(16), Pitch);
+ }
+
+ // For Non-planar surfaces, the alignment is done on the entire height of the allocation
+ if(pGmmGlobalContext->GetWaTable().WaAlignYUVResourceToLCU &&
+ GmmIsYUVFormatLCUAligned(pTexInfo->Format) &&
+ !GmmIsPlanar(pTexInfo->Format))
+ {
+ BlockHeight = GFX_ALIGN(BlockHeight, GMM_SCANLINES(GMM_MAX_LCU_SIZE));
+ }
+
+ // Align height to even row to avoid hang if HW over-fetch
+ BlockHeight = GFX_ALIGN(BlockHeight, __GMM_EVEN_ROW);
+
+ if((Status = // <-- Note assignment.
+ FillTexPitchAndSize(
+ pTexInfo, Pitch, BlockHeight, pRestrictions)) == GMM_SUCCESS)
+ {
+ Fill2DTexOffsetAddress(pTexInfo);
+ }
+
+ GMM_DPF_EXIT;
+
+ return (Status);
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+/// This function will Setup a planar surface allocation.
+///
+/// @param[in] pTexInfo: Reference to ::GMM_TEXTURE_INFO
+/// @param[in] pRestrictions: Reference to surface alignment and size restrictions.
+///
+/// @return ::GMM_STATUS
+/////////////////////////////////////////////////////////////////////////////////////
+GMM_STATUS GMM_STDCALL GmmLib::GmmGen12TextureCalc::FillTexPlanar(GMM_TEXTURE_INFO * pTexInfo,
+ __GMM_BUFFER_TYPE *pRestrictions)
+{
+ uint32_t WidthBytesPhysical, Height, YHeight, VHeight;
+ uint32_t AdjustedVHeight = 0;
+ GMM_STATUS Status;
+ bool UVPacked = false;
+ uint32_t BitsPerPixel, AlignedWidth;
+
+ GMM_DPF_ENTER;
+
+ __GMM_ASSERTPTR(pTexInfo, GMM_ERROR);
+ __GMM_ASSERTPTR(pRestrictions, GMM_ERROR);
+ __GMM_ASSERT(!pTexInfo->Flags.Info.TiledW);
+ const GMM_PLATFORM_INFO *pPlatform = GMM_OVERRIDE_PLATFORM_INFO(pTexInfo);
+
+ BitsPerPixel = pTexInfo->BitsPerPixel;
+ AlignedWidth = GFX_ULONG_CAST(pTexInfo->BaseWidth);
+ if(!pTexInfo->Flags.Gpu.__NonMsaaTileYCcs)
+ {
+ pTexInfo->TileMode = TILE_NONE;
+ }
+ else
+ {
+ pTexInfo->TileMode = LEGACY_TILE_Y;
+ }
+
+ WidthBytesPhysical = AlignedWidth * BitsPerPixel >> 3;
+ Height = VHeight = 0;
+
+ YHeight = pTexInfo->BaseHeight;
+
+ switch(pTexInfo->Format)
+ {
+ case GMM_FORMAT_IMC1: // IMC1 = IMC3 with Swapped U/V
+ case GMM_FORMAT_IMC3:
+ case GMM_FORMAT_MFX_JPEG_YUV420: // Same as IMC3.
+ // YYYYYYYY
+ // YYYYYYYY
+ // YYYYYYYY
+ // YYYYYYYY
+ // UUUU
+ // UUUU
+ // VVVV
+ // VVVV
+ case GMM_FORMAT_MFX_JPEG_YUV422V: // Similar to IMC3 but U/V are full width.
+ // YYYYYYYY
+ // YYYYYYYY
+ // YYYYYYYY
+ // YYYYYYYY
+ // UUUUUUUU
+ // UUUUUUUU
+ // VVVVVVVV
+ // VVVVVVVV
+ {
+ VHeight = GFX_ALIGN(GFX_CEIL_DIV(YHeight, 2), GMM_IMCx_PLANE_ROW_ALIGNMENT);
+
+ YHeight = GFX_ALIGN(YHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
+
+ Height = YHeight + 2 * VHeight; // One VHeight for V and one for U.
+ pTexInfo->OffsetInfo.Plane.NoOfPlanes = 3;
+ break;
+ }
+ case GMM_FORMAT_MFX_JPEG_YUV411R_TYPE: //Similar to IMC3 but U/V are quarther height and full width.
+ //YYYYYYYY
+ //YYYYYYYY
+ //YYYYYYYY
+ //YYYYYYYY
+ //UUUUUUUU
+ //VVVVVVVV
+ {
+ VHeight = GFX_ALIGN(GFX_CEIL_DIV(YHeight, 4), GMM_IMCx_PLANE_ROW_ALIGNMENT);
+
+ YHeight = GFX_ALIGN(YHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
+
+ Height = YHeight + 2 * VHeight;
+ pTexInfo->OffsetInfo.Plane.NoOfPlanes = 3;
+ break;
+ }
+ case GMM_FORMAT_MFX_JPEG_YUV411: // Similar to IMC3 but U/V are quarter width and full height.
+ // YYYYYYYY
+ // YYYYYYYY
+ // YYYYYYYY
+ // YYYYYYYY
+ // UU
+ // UU
+ // UU
+ // UU
+ // VV
+ // VV
+ // VV
+ // VV
+ case GMM_FORMAT_MFX_JPEG_YUV422H: // Similar to IMC3 but U/V are full height.
+ // YYYYYYYY
+ // YYYYYYYY
+ // YYYYYYYY
+ // YYYYYYYY
+ // UUUU
+ // UUUU
+ // UUUU
+ // UUUU
+ // VVVV
+ // VVVV
+ // VVVV
+ // VVVV
+ case GMM_FORMAT_BGRP:
+ case GMM_FORMAT_RGBP:
+ case GMM_FORMAT_MFX_JPEG_YUV444: // Similar to IMC3 but U/V are full size.
+#if _WIN32
+ case GMM_FORMAT_WGBOX_YUV444:
+ case GMM_FORMAT_WGBOX_PLANAR_YUV444:
+#endif
+ // YYYYYYYY
+ // YYYYYYYY
+ // YYYYYYYY
+ // YYYYYYYY
+ // UUUUUUUU
+ // UUUUUUUU
+ // UUUUUUUU
+ // UUUUUUUU
+ // VVVVVVVV
+ // VVVVVVVV
+ // VVVVVVVV
+ // VVVVVVVV
+ {
+ YHeight = GFX_ALIGN(YHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
+ VHeight = YHeight;
+
+ Height = YHeight + 2 * VHeight;
+ pTexInfo->OffsetInfo.Plane.NoOfPlanes = 3;
+ break;
+ }
+ case GMM_FORMAT_IMC2: // IMC2 = IMC4 with Swapped U/V
+ case GMM_FORMAT_IMC4:
+ {
+ // YYYYYYYY
+ // YYYYYYYY
+ // YYYYYYYY
+ // YYYYYYYY
+ // UUUUVVVV
+ // UUUUVVVV
+
+ YHeight = GFX_ALIGN(YHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
+ VHeight = GFX_CEIL_DIV(YHeight, 2);
+
+ WidthBytesPhysical = GFX_ALIGN(WidthBytesPhysical, 2); // If odd YWidth, pitch bumps-up to fit rounded-up U/V planes.
+
+ Height = YHeight + VHeight;
+
+ // With SURFACE_STATE.XOffset support, the U-V interface has
+ // much lighter restrictions--which will be naturally met by
+ // surface pitch restrictions (i.e. dividing an IMC2/4 pitch
+ // by 2--to get the U/V interface--will always produce a safe
+ // XOffset value).
+ // Not technically UV packed but sizing works out the same
+ // if the resource is std swizzled
+ UVPacked = true;
+ pTexInfo->OffsetInfo.Plane.NoOfPlanes = 2;
+ break;
+ }
+ case GMM_FORMAT_NV12:
+ case GMM_FORMAT_NV21:
+ case GMM_FORMAT_NV11:
+ case GMM_FORMAT_P010:
+ case GMM_FORMAT_P012:
+ case GMM_FORMAT_P016:
+ case GMM_FORMAT_P208:
+ {
+ // YYYYYYYY
+ // YYYYYYYY
+ // YYYYYYYY
+ // YYYYYYYY
+ // [UV-Packing]
+
+ if((pTexInfo->Format == GMM_FORMAT_NV12) ||
+ (pTexInfo->Format == GMM_FORMAT_NV21) ||
+ (pTexInfo->Format == GMM_FORMAT_P010) ||
+ (pTexInfo->Format == GMM_FORMAT_P012) ||
+ (pTexInfo->Format == GMM_FORMAT_P016))
+ {
+ VHeight = GFX_CEIL_DIV(YHeight, 2); // U/V plane half of Y
+ Height = YHeight + VHeight;
+ }
+ else
+ {
+ VHeight = YHeight; // U/V plane is same as Y
+ Height = YHeight + VHeight;
+ }
+
+ if((pTexInfo->Format == GMM_FORMAT_NV12) ||
+ (pTexInfo->Format == GMM_FORMAT_NV21) ||
+ (pTexInfo->Format == GMM_FORMAT_P010) ||
+ (pTexInfo->Format == GMM_FORMAT_P012) ||
+ (pTexInfo->Format == GMM_FORMAT_P016) ||
+ (pTexInfo->Format == GMM_FORMAT_P208))
+ {
+ WidthBytesPhysical = GFX_ALIGN(WidthBytesPhysical, 2); // If odd YWidth, pitch bumps-up to fit rounded-up U/V planes.
+ pTexInfo->OffsetInfo.Plane.NoOfPlanes = 2;
+ }
+ else //if(pTexInfo->Format == GMM_FORMAT_NV11)
+ {
+ // Tiling not supported, since YPitch != UVPitch...
+ pTexInfo->Flags.Info.TiledYf = 0;
+ pTexInfo->Flags.Info.TiledX = 0;
+ pTexInfo->Flags.Info.Linear = 1;
+ GMM_SET_64KB_TILE(pTexInfo->Flags, 0);
+ GMM_SET_4KB_TILE(pTexInfo->Flags, 0);
+ }
+
+ UVPacked = true;
+ break;
+ }
+ case GMM_FORMAT_I420: // IYUV & I420: are identical to YV12 except,
+ case GMM_FORMAT_IYUV: // U & V pl.s are reversed.
+ case GMM_FORMAT_YV12:
+ case GMM_FORMAT_YVU9:
+ {
+ // YYYYYYYY
+ // YYYYYYYY
+ // YYYYYYYY
+ // YYYYYYYY
+ // VVVVVV.. <-- V and U planes follow the Y plane, as linear
+ // ..UUUUUU arrays--without respect to pitch.
+
+ uint32_t YSize, UVSize, YVSizeRShift;
+ uint32_t YSizeForUVPurposes, YSizeForUVPurposesDimensionalAlignment;
+
+ YSize = WidthBytesPhysical * YHeight;
+
+ // YVU9 has one U/V pixel for each 4x4 Y block.
+ // The others have one U/V pixel for each 2x2 Y block.
+
+ // YVU9 has a Y:V size ratio of 16 (4x4 --> 1).
+ // The others have a ratio of 4 (2x2 --> 1).
+ YVSizeRShift = (pTexInfo->Format != GMM_FORMAT_YVU9) ? 2 : 4;
+
+ // If a Y plane isn't fully-aligned to its Y-->U/V block size, the
+ // extra/unaligned Y pixels still need corresponding U/V pixels--So
+ // for the purpose of computing the UVSize, we must consider a
+ // dimensionally "rounded-up" YSize. (E.g. a 13x5 YVU9 Y plane would
+ // require 4x2 U/V planes--the same UVSize as a fully-aligned 16x8 Y.)
+ YSizeForUVPurposesDimensionalAlignment = (pTexInfo->Format != GMM_FORMAT_YVU9) ? 2 : 4;
+ YSizeForUVPurposes =
+ GFX_ALIGN(WidthBytesPhysical, YSizeForUVPurposesDimensionalAlignment) *
+ GFX_ALIGN(YHeight, YSizeForUVPurposesDimensionalAlignment);
+
+ UVSize = 2 * // <-- U + V
+ (YSizeForUVPurposes >> YVSizeRShift);
+
+ Height = GFX_CEIL_DIV(YSize + UVSize, WidthBytesPhysical);
+
+ // Tiling not supported, since YPitch != UVPitch...
+ pTexInfo->Flags.Info.TiledYf = 0;
+ pTexInfo->Flags.Info.TiledX = 0;
+ pTexInfo->Flags.Info.Linear = 1;
+ pTexInfo->OffsetInfo.Plane.NoOfPlanes = 1;
+ GMM_SET_64KB_TILE(pTexInfo->Flags, 0);
+ GMM_SET_4KB_TILE(pTexInfo->Flags, 0);
+
+ break;
+ }
+ default:
+ {
+ GMM_ASSERTDPF(0, "Unexpected format");
+ return GMM_ERROR;
+ }
+ }
+
+ // Align Height to even row to avoid hang if HW over-fetch
+ Height = GFX_ALIGN(Height, __GMM_EVEN_ROW);
+
+ SetTileMode(pTexInfo);
+
+ // If the Surface has Odd height dimension, we will fall back to Linear Format.
+ // If MMC is enabled, disable MMC during such cases.
+ if(pTexInfo->Flags.Gpu.MMC)
+ {
+ if(!(GMM_IS_4KB_TILE(pTexInfo->Flags) || GMM_IS_64KB_TILE(pTexInfo->Flags)))
+ {
+ pTexInfo->Flags.Gpu.MMC = 0;
+ }
+ }
+
+ // If the Surface has Odd height dimension, we will fall back to Linear Format.
+ // If MMC is enabled, disable .CCS/UnifiedAuxSurface during such cases.
+ if(pTexInfo->Flags.Gpu.CCS)
+ {
+ if(!(GMM_IS_4KB_TILE(pTexInfo->Flags) || GMM_IS_64KB_TILE(pTexInfo->Flags)) &&
+ !(pTexInfo->Flags.Gpu.__NonMsaaTileYCcs && GMM_IS_4KB_TILE(pTexInfo->Flags)))
+ {
+ pTexInfo->Flags.Gpu.MMC = 0;
+ pTexInfo->Flags.Gpu.CCS = 0;
+ pTexInfo->Flags.Gpu.UnifiedAuxSurface = 0;
+ pTexInfo->Flags.Gpu.__NonMsaaTileYCcs = 0;
+ }
+ }
+
+ // Legacy Planar "Linear Video" Restrictions...
+ if(pTexInfo->Flags.Info.Linear && !pTexInfo->Flags.Wa.NoLegacyPlanarLinearVideoRestrictions)
+ {
+ pRestrictions->LockPitchAlignment = GFX_MAX(pRestrictions->LockPitchAlignment, GMM_BYTES(64));
+ pRestrictions->MinPitch = GFX_MAX(pRestrictions->MinPitch, GMM_BYTES(64));
+ pRestrictions->PitchAlignment = GFX_MAX(pRestrictions->PitchAlignment, GMM_BYTES(64));
+ pRestrictions->RenderPitchAlignment = GFX_MAX(pRestrictions->RenderPitchAlignment, GMM_BYTES(64));
+ }
+
+ // Multiply overall pitch alignment for surfaces whose U/V planes have a
+ // pitch down-scaled from that of Y--Since the U/V pitches must meet the
+ // original restriction, the Y pitch must meet a scaled-up multiple.
+ if((pTexInfo->Format == GMM_FORMAT_I420) ||
+ (pTexInfo->Format == GMM_FORMAT_IYUV) ||
+ (pTexInfo->Format == GMM_FORMAT_NV11) ||
+ (pTexInfo->Format == GMM_FORMAT_YV12) ||
+ (pTexInfo->Format == GMM_FORMAT_YVU9))
+ {
+ uint32_t LShift =
+ (pTexInfo->Format != GMM_FORMAT_YVU9) ?
+ 1 : // UVPitch = 1/2 YPitch
+ 2; // UVPitch = 1/4 YPitch
+
+ pRestrictions->LockPitchAlignment <<= LShift;
+ pRestrictions->MinPitch <<= LShift;
+ pRestrictions->PitchAlignment <<= LShift;
+ pRestrictions->RenderPitchAlignment <<= LShift;
+ }
+
+ AdjustedVHeight = VHeight;
+
+ // In case of Planar surfaces, only the last Plane has to be aligned to 64 for LCU access
+ if(pGmmGlobalContext->GetWaTable().WaAlignYUVResourceToLCU && GmmIsYUVFormatLCUAligned(pTexInfo->Format) && VHeight > 0)
+ {
+ AdjustedVHeight = GFX_ALIGN(VHeight, GMM_SCANLINES(GMM_MAX_LCU_SIZE));
+ Height += AdjustedVHeight - VHeight;
+ }
+
+ // For std swizzled and UV packed tile Ys/Yf cases, the planes
+ // must be tile-boundary aligned. Actual alignment is handled
+ // in FillPlanarOffsetAddress, but height and width must
+ // be adjusted for correct size calculation
+ if(GMM_IS_TILED(pPlatform->TileInfo[pTexInfo->TileMode]) &&
+ !pTexInfo->Flags.Gpu.__NonMsaaTileYCcs)
+ {
+ uint32_t TileHeight = pGmmGlobalContext->GetPlatformInfo().TileInfo[pTexInfo->TileMode].LogicalTileHeight;
+ uint32_t TileWidth = pGmmGlobalContext->GetPlatformInfo().TileInfo[pTexInfo->TileMode].LogicalTileWidth;
+
+ pTexInfo->OffsetInfo.Plane.IsTileAlignedPlanes = true;
+
+ //U/V must be aligned to AuxT granularity, 4x pitchalign enforces 16K-align,
+ //add extra padding for 64K AuxT
+ TileHeight *= (!GMM_IS_64KB_TILE(pTexInfo->Flags)) ? 4 : 1;
+
+ if(pTexInfo->Format == GMM_FORMAT_IMC2 || // IMC2, IMC4 needs even tile columns
+ pTexInfo->Format == GMM_FORMAT_IMC4)
+ {
+ // If the U & V planes are side-by-side then the surface pitch must be
+ // padded out so that U and V planes will being on a tile boundary.
+ // This means that an odd Y plane width must be padded out
+ // with an additional tile. Even widths do not need padding
+ uint32_t TileCols = GFX_CEIL_DIV(WidthBytesPhysical, TileWidth);
+ if(TileCols % 2)
+ {
+ WidthBytesPhysical = (TileCols + 1) * TileWidth;
+ }
+ }
+
+ Height = GFX_ALIGN(YHeight, TileHeight) + (UVPacked ? GFX_ALIGN(AdjustedVHeight, TileHeight) :
+ (GFX_ALIGN(VHeight, TileHeight) + GFX_ALIGN(AdjustedVHeight, TileHeight)));
+
+ if(GMM_IS_64KB_TILE(pTexInfo->Flags) || pTexInfo->Flags.Info.TiledYf)
+ {
+ pTexInfo->Flags.Info.RedecribedPlanes = true;
+ }
+ }
+ else if(pTexInfo->Flags.Gpu.__NonMsaaTileYCcs)
+ {
+ uint32_t TileHeight = pGmmGlobalContext->GetPlatformInfo().TileInfo[pTexInfo->TileMode].LogicalTileHeight;
+
+ BitsPerPixel = 8;
+
+ if(pTexInfo->Format == GMM_FORMAT_IMC2 || // IMC2, IMC4 needs even tile columns
+ pTexInfo->Format == GMM_FORMAT_IMC4)
+ {
+// If the U & V planes are side-by-side then the surface pitch must be
+// padded out so that U and V planes will being on a tile boundary.
+// This means that an odd Y plane width must be padded out
+// with an additional tile. Even widths do not need padding
+
+// CCS must use padded main surface width, so get main surface TileWidth
+#define CCSMODE_TO_TILEMODE(y) ((y + TILE_YF_2D_8bpe) < TILE_YS_1D_8bpe) ? (y + TILE_YF_2D_8bpe) : \
+ ((y + TILE_YF_2D_8bpe + 5) >= TILE_YS_1D_128bpe) ? (y + TILE_YF_2D_8bpe + 5) : TILE_NONE
+
+ uint32_t BaseTileWidth = pPlatform->TileInfo[CCSMODE_TO_TILEMODE(pTexInfo->CCSModeAlign)].LogicalTileWidth;
+ WidthBytesPhysical = GFX_ALIGN(WidthBytesPhysical, 2 * BaseTileWidth);
+ }
+
+ AlignedWidth = GFX_ULONG_CAST(WidthBytesPhysical / (pTexInfo->BitsPerPixel >> 3));
+
+ WidthBytesPhysical = __GMM_EXPAND_WIDTH(this, AlignedWidth, pTexInfo->Alignment.HAlign, pTexInfo);
+ WidthBytesPhysical = ScaleTextureWidth(pTexInfo, WidthBytesPhysical); //Should both YAux and UVAux use same CCModeALign (ie using common bpe?)
+ //If different, then copy Aux info from per-plane Aux? HW has separate bpe or common?
+ YHeight = __GMM_EXPAND_HEIGHT(this, YHeight, pTexInfo->Alignment.VAlign, pTexInfo);
+ YHeight = ScaleTextureHeight(pTexInfo, YHeight);
+ YHeight = GFX_ALIGN(YHeight, TileHeight);
+
+ VHeight = __GMM_EXPAND_HEIGHT(this, VHeight, pTexInfo->Alignment.VAlign, pTexInfo);
+ VHeight = ScaleTextureHeight(pTexInfo, VHeight);
+ VHeight = GFX_ALIGN(VHeight, TileHeight);
+
+ Height = YHeight + VHeight;
+ }
+
+ if((Status = // <-- Note assignment.
+ FillTexPitchAndSize(
+ pTexInfo, WidthBytesPhysical, Height, pRestrictions)) == GMM_SUCCESS)
+ {
+ FillPlanarOffsetAddress(pTexInfo);
+ }
+
+ // Planar & hybrid 2D arrays supported in DX11.1+ spec but not HW. Memory layout
+ // is defined by SW requirements; Y plane must be 4KB aligned.
+ if(pTexInfo->ArraySize > 1)
+ {
+ GMM_GFX_SIZE_T ElementSizeBytes = pTexInfo->Size;
+ int64_t LargeSize;
+
+ // Size should always be page aligned.
+ __GMM_ASSERT((pTexInfo->Size % PAGE_SIZE) == 0);
+
+ if((LargeSize = (int64_t)ElementSizeBytes * pTexInfo->ArraySize) <= pPlatform->SurfaceMaxSize)
+ {
+ pTexInfo->OffsetInfo.Plane.ArrayQPitch = ElementSizeBytes;
+ pTexInfo->Size = LargeSize;
+ }
+ else
+ {
+ GMM_ASSERTDPF(0, "Surface too large!");
+ Status = GMM_ERROR;
+ }
+ }
+
+ GMM_DPF_EXIT;
+ return (Status);
+} // FillTexPlanar
+
+GMM_STATUS GMM_STDCALL GmmLib::GmmGen12TextureCalc::GetCCSScaleFactor(GMM_TEXTURE_INFO *pTexInfo,
+ CCS_UNIT & ScaleFactor)
+{
+ GMM_STATUS Status = GMM_SUCCESS;
+ GMM_TEXTURE_ALIGN_EX TexAlignEx = static_cast<PlatformInfoGen12 *>(pGmmGlobalContext->GetPlatformInfoObj())->GetExTextureAlign();
+ uint32_t CCSModeIdx = 0;
+
+ if(pTexInfo->Flags.Info.TiledYf || GMM_IS_64KB_TILE(pTexInfo->Flags)) //pTexInfo is RT Surf
+ {
+ CCSModeIdx = CCS_MODE(pTexInfo->TileMode);
+ __GMM_ASSERT(pTexInfo->TileMode < GMM_TILE_MODES);
+ }
+ else //pTexInfo is CCS Surf
+ {
+ CCSModeIdx = pTexInfo->CCSModeAlign;
+ }
+
+ if(!(CCSModeIdx < CCS_MODES))
+ {
+ __GMM_ASSERT(0); //indicates something wrong w/ H/V/D Align Filling function or Wrong TileMode set
+ return GMM_ERROR;
+ }
+
+ ScaleFactor = TexAlignEx.CCSEx[CCSModeIdx];
+
+ return (Status);
+}
+
+GMM_STATUS GMM_STDCALL GmmLib::GmmGen12TextureCalc::GetCCSExMode(GMM_TEXTURE_INFO *AuxSurf)
+{
+ if(GMM_IS_4KB_TILE(AuxSurf->Flags) || GMM_IS_64KB_TILE(AuxSurf->Flags) || AuxSurf->Flags.Info.Linear)
+ {
+ if(pGmmGlobalContext->GetSkuTable().FtrLinearCCS)
+ {
+ AuxSurf->Flags.Gpu.__NonMsaaLinearCCS = 1;
+ }
+ else
+ {
+ AuxSurf->Flags.Gpu.__NonMsaaTileYCcs = 1;
+ //CCS is always 2D, even for 3D surface
+ if(AuxSurf->Type == RESOURCE_CUBE)
+ {
+ AuxSurf->ArraySize = 6;
+ }
+ AuxSurf->Type = RESOURCE_2D;
+ }
+ if(AuxSurf->Flags.Gpu.__NonMsaaTileYCcs)
+ {
+ AuxSurf->CCSModeAlign = 0;
+ SetTileMode(AuxSurf);
+ /*if (AuxSurf->Flags.Gpu.UnifiedAuxSurface)*/
+ {
+ AuxSurf->CCSModeAlign = CCS_MODE(AuxSurf->TileMode);
+ }
+ AuxSurf->TileMode = TILE_NONE;
+
+ __GMM_ASSERT(AuxSurf->CCSModeAlign < CCS_MODES);
+ return (AuxSurf->CCSModeAlign < CCS_MODES) ? GMM_SUCCESS : GMM_INVALIDPARAM;
+ }
+ }
+ return GMM_SUCCESS;
+}
+
+uint32_t GMM_STDCALL GmmLib::GmmGen12TextureCalc::ScaleTextureHeight(GMM_TEXTURE_INFO *pTexInfo, uint32_t Height)
+{
+ uint32_t ScaledHeight = Height;
+ if(pTexInfo->Flags.Gpu.CCS && pTexInfo->Flags.Gpu.__NonMsaaTileYCcs)
+ {
+ CCS_UNIT ScaleFactor;
+ GetCCSScaleFactor(pTexInfo, ScaleFactor);
+
+ ScaledHeight /= ScaleFactor.Downscale.Height;
+ }
+
+ return ScaledHeight;
+}
+
+uint32_t GMM_STDCALL GmmLib::GmmGen12TextureCalc::ScaleTextureWidth(GMM_TEXTURE_INFO *pTexInfo, uint32_t Width)
+{
+ uint32_t ScaledWidth = Width;
+
+ if(pTexInfo->Flags.Gpu.CCS && pTexInfo->Flags.Gpu.__NonMsaaTileYCcs)
+ {
+ CCS_UNIT ScaleFactor;
+ GetCCSScaleFactor(pTexInfo, ScaleFactor);
+
+
+ if(ScaleFactor.Downscale.Width < 0)
+ {
+ ScaledWidth *= ((-1) * ScaleFactor.Downscale.Width);
+ }
+ else
+ {
+ ScaledWidth /= ScaleFactor.Downscale.Width;
+ }
+ }
+ else if(pTexInfo->Flags.Gpu.ColorSeparation)
+ {
+ ScaledWidth *= pTexInfo->ArraySize;
+ __GMM_ASSERT(0 == (ScaledWidth % GMM_COLOR_SEPARATION_WIDTH_DIVISION));
+ ScaledWidth /= GMM_COLOR_SEPARATION_WIDTH_DIVISION;
+ }
+ else if(pTexInfo->Flags.Gpu.ColorSeparationRGBX)
+ {
+ ScaledWidth *= pTexInfo->ArraySize;
+ __GMM_ASSERT(0 == (ScaledWidth % GMM_COLOR_SEPARATION_RGBX_WIDTH_DIVISION));
+ ScaledWidth /= GMM_COLOR_SEPARATION_RGBX_WIDTH_DIVISION;
+ }
+
+ return ScaledWidth;
+}
+
+uint32_t GMM_STDCALL GmmLib::GmmGen12TextureCalc::ScaleFCRectHeight(GMM_TEXTURE_INFO *pTexInfo, uint32_t Height)
+{
+ uint32_t ScaledHeight = Height;
+ if(pTexInfo->Flags.Gpu.CCS)
+ {
+ CCS_UNIT *FCRectAlign = static_cast<PlatformInfoGen12 *>(pGmmGlobalContext->GetPlatformInfoObj())->GetFCRectAlign();
+ uint8_t index = FCMaxModes;
+ if((index = FCMode(pTexInfo->TileMode, pTexInfo->BitsPerPixel)) < FCMaxModes)
+ {
+ ScaledHeight = GFX_ALIGN(ScaledHeight, FCRectAlign[index].Align.Height);
+ ScaledHeight /= FCRectAlign[index].Downscale.Height;
+ }
+ else
+ {
+ __GMM_ASSERT(0);
+ }
+ }
+
+ return ScaledHeight;
+}
+
+uint64_t GMM_STDCALL GmmLib::GmmGen12TextureCalc::ScaleFCRectWidth(GMM_TEXTURE_INFO *pTexInfo, uint64_t Width)
+{
+ uint64_t ScaledWidth = Width;
+ if(pTexInfo->Flags.Gpu.CCS)
+ {
+ CCS_UNIT *FCRectAlign = static_cast<PlatformInfoGen12 *>(pGmmGlobalContext->GetPlatformInfoObj())->GetFCRectAlign();
+ uint8_t index = FCMaxModes;
+ if((index = FCMode(pTexInfo->TileMode, pTexInfo->BitsPerPixel)) < FCMaxModes)
+ {
+ ScaledWidth = GFX_ALIGN(ScaledWidth, FCRectAlign[index].Align.Width);
+ ScaledWidth /= FCRectAlign[index].Downscale.Width;
+ }
+ else
+ {
+ //Unsupported tiling-type for FastClear
+ __GMM_ASSERT(0);
+ }
+ }
+
+ return ScaledWidth;
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+/// This function does any special-case conversion from client-provided pseudo creation
+/// parameters to actual parameters for CCS.
+///
+/// @param[in] pTexInfo: Reference to ::GMM_TEXTURE_INFO
+///
+/// @return ::GMM_STATUS
+/////////////////////////////////////////////////////////////////////////////////////
+GMM_STATUS GMM_STDCALL GmmLib::GmmGen12TextureCalc::MSAACCSUsage(GMM_TEXTURE_INFO *pTexInfo)
+{
+ GMM_STATUS Status = GMM_SUCCESS;
+
+ if(pTexInfo->MSAA.NumSamples > 1 && (pTexInfo->Flags.Gpu.MCS)) // CCS for MSAA Compression
+ {
+ Status = MSAACompression(pTexInfo);
+ }
+ else // Non-MSAA CCS Use (i.e. Render Target Fast Clear)
+ {
+ if(!pTexInfo->Flags.Info.TiledW &&
+ (!pTexInfo->Flags.Info.TiledX) &&
+ ((GMM_IS_4KB_TILE(pTexInfo->Flags) || GMM_IS_64KB_TILE(pTexInfo->Flags) ||
+ (pTexInfo->Type == RESOURCE_BUFFER && pTexInfo->Flags.Info.Linear)))) //!Yf - deprecate Yf)
+ {
+ // For non-MSAA CCS usage, the Doc has four tables of
+ // requirements:
+ // (1) RT Alignment (GMM Don't Care: Occurs Naturally)
+ // (2) ClearRect Alignment
+ // (3) ClearRect Scaling (GMM Don't Care: GHAL3D Matter)
+ // (4) Non-MSAA CCS Sizing
+
+ // Gen8+:
+ // Since mip-mapped and arrayed surfaces are supported, we
+ // deal with alignment later at per mip level. Here, we set
+ // tiling type only. TileX is not supported on Gen9+.
+ // Pre-Gen8:
+ // (!) For all the above, the doc has separate entries for
+ // 32/64/128bpp--and then deals with PIXEL widths--Here,
+ // though, we will unify by considering 8bpp table entries
+ // (unlisted--i.e. do the math)--and deal with BYTE widths.
+
+ // (1) RT Alignment -- The surface width and height don't
+ // need to be padded to RT CL granularity. On HSW, all tiled
+ // RT's will have appropriate alignment (given 4KB surface
+ // base and no mip-map support) and appropriate padding
+ // (due to tile padding). On BDW+, GMM uses H/VALIGN that
+ // will guarantee the MCS RT alignment for all subresources.
+
+ // (2) ClearRect Alignment -- I.e. FastClears must be done
+ // with certain granularity:
+ // TileY: 512 Bytes x 128 Lines
+ // TileX: 1024 Bytes x 64 Lines
+ // So a CCS must be sized to match that granularity (though
+ // the RT itself need not be fully padded to that
+ // granularity to use FastClear).
+
+ // (4) Non-MSAA CCS Sizing -- CCS sizing is based on the
+ // size of the FastClear (with granularity padding) for the
+ // paired RT. CCS's (byte widths and heights) are scaled
+ // down from their RT's by:
+ // TileY: 32 x 32
+ // TileX: 64 x 16
+
+ // ### Example #############################################
+ // RT: 800x600, 32bpp, TileY
+ // 8bpp: 3200x600
+ // FastClear: 3584x640 (for TileY FastClear Granularity of 512x128)
+ // CCS: 112x20 (for TileY RT:CCS Sizing Downscale of 32x32)
+
+ GetCCSExMode(pTexInfo);
+ }
+ else
+ {
+ GMM_ASSERTDPF(0, "Illegal CCS creation parameters!");
+ Status = GMM_ERROR;
+ }
+ }
+ return Status;
+}
diff --git a/Source/GmmLib/Texture/GmmTexture.cpp b/Source/GmmLib/Texture/GmmTexture.cpp
index 9fdf992..0aa0a04 100644
--- a/Source/GmmLib/Texture/GmmTexture.cpp
+++ b/Source/GmmLib/Texture/GmmTexture.cpp
@@ -34,8 +34,10 @@
{
GMM_GFX_SIZE_T *pUOffsetX, *pUOffsetY;
GMM_GFX_SIZE_T *pVOffsetX, *pVOffsetY;
+ uint32_t YHeight = 0, VHeight = 0;
bool UVPacked = false;
uint32_t Height;
+ uint32_t WidthBytesPhysical = GFX_ULONG_CAST(pTexInfo->BaseWidth) * pTexInfo->BitsPerPixel >> 3;
#define SWAP_UV() \
{ \
@@ -54,6 +56,8 @@
__GMM_ASSERTPTR(((pTexInfo->TileMode < GMM_TILE_MODES) && (pTexInfo->TileMode >= TILE_NONE)), VOIDRETURN);
GMM_DPF_ENTER;
+ const GMM_PLATFORM_INFO *pPlatform = GMM_OVERRIDE_PLATFORM_INFO(pTexInfo);
+
// GMM_PLANE_Y always at (0, 0)...
pTexInfo->OffsetInfo.Plane.X[GMM_PLANE_Y] = 0;
pTexInfo->OffsetInfo.Plane.Y[GMM_PLANE_Y] = 0;
@@ -100,9 +104,11 @@
// VVVVVVVV
{
*pUOffsetX = 0;
+ YHeight = GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
*pUOffsetY = GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
*pVOffsetX = 0;
+ VHeight = GFX_ALIGN(GFX_CEIL_DIV(pTexInfo->BaseHeight, 2), GMM_IMCx_PLANE_ROW_ALIGNMENT);
*pVOffsetY =
GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT) +
GFX_ALIGN(GFX_CEIL_DIV(pTexInfo->BaseHeight, 2), GMM_IMCx_PLANE_ROW_ALIGNMENT);
@@ -118,9 +124,11 @@
//VVVVVVVV
{
*pUOffsetX = 0;
+ YHeight = GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
*pUOffsetY = GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
*pVOffsetX = 0;
+ VHeight = GFX_ALIGN(GFX_CEIL_DIV(pTexInfo->BaseHeight, 4), GMM_IMCx_PLANE_ROW_ALIGNMENT);
*pVOffsetY =
GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT) +
GFX_ALIGN(GFX_CEIL_DIV(pTexInfo->BaseHeight, 4), GMM_IMCx_PLANE_ROW_ALIGNMENT);
@@ -170,9 +178,11 @@
// VVVVVVVV
{
*pUOffsetX = 0;
+ YHeight = GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
*pUOffsetY = GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
*pVOffsetX = 0;
+ VHeight = GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
*pVOffsetY = GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT) * 2;
break;
@@ -191,11 +201,16 @@
__GMM_ASSERT((pTexInfo->Pitch & 1) == 0);
*pUOffsetX = 0;
+ YHeight = GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
*pUOffsetY = GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
*pVOffsetX = pTexInfo->Pitch / 2;
+ VHeight = GFX_CEIL_DIV(YHeight, 2);
*pVOffsetY = GFX_ALIGN(pTexInfo->BaseHeight, GMM_IMCx_PLANE_ROW_ALIGNMENT);
+ // Not technically UV packed but sizing works out the same
+ UVPacked = true;
+
break;
}
case GMM_FORMAT_I420: // I420 = IYUV
@@ -242,6 +257,8 @@
*pUOffsetX = UOffset % pTexInfo->Pitch;
*pUOffsetY = UOffset / pTexInfo->Pitch;
+ YHeight = GFX_CEIL_DIV(YSize + 2 * VSize, WidthBytesPhysical);
+
break;
}
case GMM_FORMAT_NV12:
@@ -258,7 +275,21 @@
// YYYYYYYY
// [UV-Packing]
*pUOffsetX = *pVOffsetX = 0;
- *pUOffsetY = *pVOffsetY = Height;
+ YHeight = GFX_ALIGN(Height, __GMM_EVEN_ROW);
+ *pUOffsetY = *pVOffsetY = YHeight;
+
+ if((pTexInfo->Format == GMM_FORMAT_NV12) ||
+ (pTexInfo->Format == GMM_FORMAT_NV21) ||
+ (pTexInfo->Format == GMM_FORMAT_P010) ||
+ (pTexInfo->Format == GMM_FORMAT_P012) ||
+ (pTexInfo->Format == GMM_FORMAT_P016))
+ {
+ VHeight = GFX_CEIL_DIV(Height, 2);
+ }
+ else
+ {
+ VHeight = YHeight; // U/V plane is same as Y
+ }
UVPacked = true;
break;
@@ -270,9 +301,19 @@
}
}
- if(((pTexInfo->Flags.Info.TiledYs || pTexInfo->Flags.Info.TiledYf) &&
- (pTexInfo->Flags.Info.StdSwizzle || UVPacked)) ||
- pTexInfo->Flags.Gpu.__NonMsaaTileYCcs)
+ pTexInfo->OffsetInfo.Plane.UnAligned.Height[GMM_PLANE_Y] = YHeight;
+ if(pTexInfo->OffsetInfo.Plane.NoOfPlanes == 2)
+ {
+ pTexInfo->OffsetInfo.Plane.UnAligned.Height[GMM_PLANE_U] = VHeight;
+ }
+ else if(pTexInfo->OffsetInfo.Plane.NoOfPlanes == 3)
+ {
+ pTexInfo->OffsetInfo.Plane.UnAligned.Height[GMM_PLANE_U] =
+ pTexInfo->OffsetInfo.Plane.UnAligned.Height[GMM_PLANE_V] = VHeight;
+ }
+
+
+ if(GMM_IS_TILED(pPlatform->TileInfo[pTexInfo->TileMode]) || pTexInfo->Flags.Gpu.__NonMsaaTileYCcs)
{
GMM_GFX_SIZE_T TileHeight = pGmmGlobalContext->GetPlatformInfo().TileInfo[pTexInfo->TileMode].LogicalTileHeight;
GMM_GFX_SIZE_T TileWidth = pGmmGlobalContext->GetPlatformInfo().TileInfo[pTexInfo->TileMode].LogicalTileWidth;
@@ -280,7 +321,9 @@
*pUOffsetX = GFX_ALIGN(*pUOffsetX, TileWidth);
*pUOffsetY = GFX_ALIGN(*pUOffsetY, TileHeight);
*pVOffsetX = GFX_ALIGN(*pVOffsetX, TileWidth);
- *pVOffsetY = GFX_ALIGN(*pVOffsetY, TileHeight);
+ *pVOffsetY = UVPacked ?
+ GFX_ALIGN(*pVOffsetY, TileHeight) :
+ GFX_ALIGN(YHeight, TileHeight) + GFX_ALIGN(VHeight, TileHeight);
if(pTexInfo->Flags.Gpu.UnifiedAuxSurface && pTexInfo->Flags.Gpu.__NonMsaaTileYCcs)
{
diff --git a/Source/GmmLib/Texture/GmmTexture.h b/Source/GmmLib/Texture/GmmTexture.h
index cc5564c..ecb023e 100644
--- a/Source/GmmLib/Texture/GmmTexture.h
+++ b/Source/GmmLib/Texture/GmmTexture.h
@@ -253,7 +253,9 @@
UnitAlignHeight = pPlatform->TexAlign.XAdapter.Height;
UnitAlignWidth = pPlatform->TexAlign.XAdapter.Width;
}
- else if(((pTexInfo->Flags.Gpu.CCS && GFX_GET_CURRENT_RENDERCORE(pPlatform->Platform) >= IGFX_GEN9_CORE)) &&
+ else if(((pTexInfo->Flags.Gpu.MCS &&
+ GFX_GET_CURRENT_RENDERCORE(pPlatform->Platform) >= IGFX_GEN12_CORE) ||
+ (pTexInfo->Flags.Gpu.CCS && GFX_GET_CURRENT_RENDERCORE(pPlatform->Platform) >= IGFX_GEN9_CORE)) &&
(pTexInfo->MSAA.NumSamples > 1))
{
UnitAlignWidth = 16;
diff --git a/Source/GmmLib/Texture/GmmTextureAlloc.cpp b/Source/GmmLib/Texture/GmmTextureAlloc.cpp
index c39dfdf..a7505dd 100644
--- a/Source/GmmLib/Texture/GmmTextureAlloc.cpp
+++ b/Source/GmmLib/Texture/GmmTextureAlloc.cpp
@@ -980,11 +980,7 @@
__GMM_ASSERTPTR(pTexInfo, GMM_ERROR);
__GMM_ASSERTPTR(pRestrictions, GMM_ERROR);
__GMM_ASSERT(!pTexInfo->Flags.Info.TiledW);
- // Client should always give us linear-fallback option for planar surfaces,
- // except for MMC surfaces, which are TileY.
- //__GMM_ASSERT(pTexInfo->Flags.Info.Linear || pTexInfo->Flags.Gpu.MMC);
- pTexInfo->Flags.Info.Linear = 1;
- pTexInfo->TileMode = TILE_NONE;
+ pTexInfo->TileMode = TILE_NONE;
const GMM_PLATFORM_INFO *pPlatform = GMM_OVERRIDE_PLATFORM_INFO(pTexInfo);
@@ -1022,10 +1018,7 @@
Height = YHeight + 2 * VHeight; // One VHeight for V and one for U.
- FillTexPlanar_SetTilingBasedOnRequiredAlignment(
- pTexInfo,
- YHeight, true, // <-- YHeight alignment needed (so U is properly aligned).
- VHeight, true); // <-- VHeight alignment needed (so V is properly aligned).
+ pTexInfo->OffsetInfo.Plane.NoOfPlanes = 3;
break;
}
@@ -1043,10 +1036,7 @@
Height = YHeight + 2 * VHeight;
- FillTexPlanar_SetTilingBasedOnRequiredAlignment(
- pTexInfo,
- YHeight, true, // <-- YHeight alignment needed (so U is properly aligned).
- VHeight, true); // <-- VHeight alignment needed (so V is properly aligned).
+ pTexInfo->OffsetInfo.Plane.NoOfPlanes = 3;
break;
}
@@ -1101,10 +1091,7 @@
Height = YHeight + 2 * VHeight;
- FillTexPlanar_SetTilingBasedOnRequiredAlignment(
- pTexInfo,
- YHeight, true, // <-- YHeight alignment needed (so U is properly aligned).
- YHeight, true); // <-- VHeight alignment needed (so V is properly aligned).
+ pTexInfo->OffsetInfo.Plane.NoOfPlanes = 3;
break;
}
@@ -1125,11 +1112,6 @@
Height = YHeight + VHeight;
- FillTexPlanar_SetTilingBasedOnRequiredAlignment(
- pTexInfo,
- YHeight, true, // <-- YHeight alignment needed (so U/V are properly aligned, vertically).
- 0, false); // <-- VHeight alignment NOT needed (since U/V aren't on top of eachother).
-
// With SURFACE_STATE.XOffset support, the U-V interface has
// much lighter restrictions--which will be naturally met by
// surface pitch restrictions (i.e. dividing an IMC2/4 pitch
@@ -1138,7 +1120,8 @@
// Not technically UV packed but sizing works out the same
// if the resource is std swizzled
- UVPacked = pTexInfo->Flags.Info.StdSwizzle ? true : false;
+ UVPacked = true;
+ pTexInfo->OffsetInfo.Plane.NoOfPlanes = 2;
break;
}
@@ -1155,7 +1138,7 @@
// YYYYYYYY
// YYYYYYYY
// [UV-Packing]
-
+ YHeight = GFX_ALIGN(pTexInfo->BaseHeight, __GMM_EVEN_ROW);
if((pTexInfo->Format == GMM_FORMAT_NV12) ||
(pTexInfo->Format == GMM_FORMAT_NV21) ||
(pTexInfo->Format == GMM_FORMAT_P010) ||
@@ -1179,11 +1162,6 @@
(pTexInfo->Format == GMM_FORMAT_P208))
{
WidthBytesPhysical = GFX_ALIGN(WidthBytesPhysical, 2); // If odd YWidth, pitch bumps-up to fit rounded-up U/V planes.
-
- FillTexPlanar_SetTilingBasedOnRequiredAlignment(
- pTexInfo,
- YHeight, true, // <-- YHeight alignment needed (so UV is properly aligned).
- 0, false); // <-- VHeight alignment NOT needed (since U/V aren't on top of eachother).
}
else //if(pTexInfo->Format == GMM_FORMAT_NV11)
{
@@ -1195,8 +1173,8 @@
pTexInfo->Flags.Info.Linear = 1;
}
- UVPacked = true;
-
+ UVPacked = true;
+ pTexInfo->OffsetInfo.Plane.NoOfPlanes = 2;
break;
}
case GMM_FORMAT_I420: // IYUV & I420: are identical to YV12 except,
@@ -1239,12 +1217,12 @@
Height = GFX_CEIL_DIV(YSize + UVSize, WidthBytesPhysical);
// Tiling not supported, since YPitch != UVPitch...
- pTexInfo->Flags.Info.TiledY = 0;
- pTexInfo->Flags.Info.TiledYf = 0;
- pTexInfo->Flags.Info.TiledYs = 0;
- pTexInfo->Flags.Info.TiledX = 0;
- pTexInfo->Flags.Info.Linear = 1;
-
+ pTexInfo->Flags.Info.TiledY = 0;
+ pTexInfo->Flags.Info.TiledYf = 0;
+ pTexInfo->Flags.Info.TiledYs = 0;
+ pTexInfo->Flags.Info.TiledX = 0;
+ pTexInfo->Flags.Info.Linear = 1;
+ pTexInfo->OffsetInfo.Plane.NoOfPlanes = 1;
break;
}
default:
@@ -1259,8 +1237,7 @@
SetTileMode(pTexInfo);
- // If the Surface has Odd height dimension, we will fall back to Linear Format.
- // If MMC is enabled, disable MMC during such cases.
+ // MMC is not supported for linear formats.
if(pTexInfo->Flags.Gpu.MMC)
{
if(!(pTexInfo->Flags.Info.TiledY || pTexInfo->Flags.Info.TiledYf || pTexInfo->Flags.Info.TiledYs))
@@ -1298,19 +1275,20 @@
pRestrictions->RenderPitchAlignment <<= LShift;
}
- // For std swizzled and UV packed tile Ys/Yf cases, the planes
- // must be tile-boundary aligned. Actual alignment is handled
- // in FillPlanarOffsetAddress, but height and width must
- // be adjusted for correct size calculation
- if((pTexInfo->Flags.Info.TiledYs || pTexInfo->Flags.Info.TiledYf) &&
- (pTexInfo->Flags.Info.StdSwizzle || UVPacked))
+ // For Tiled Planar surfaces, the planes must be tile-boundary aligned.
+ // Actual alignment is handled in FillPlanarOffsetAddress, but height
+ // and width must be adjusted for correct size calculation
+ if(GMM_IS_TILED(pPlatform->TileInfo[pTexInfo->TileMode]))
{
uint32_t TileHeight = pGmmGlobalContext->GetPlatformInfo().TileInfo[pTexInfo->TileMode].LogicalTileHeight;
uint32_t TileWidth = pGmmGlobalContext->GetPlatformInfo().TileInfo[pTexInfo->TileMode].LogicalTileWidth;
+ pTexInfo->OffsetInfo.Plane.IsTileAlignedPlanes = true;
+
Height = GFX_ALIGN(YHeight, TileHeight) + (GFX_ALIGN(VHeight, TileHeight) * (UVPacked ? 1 : 2));
- if(UVPacked)
+ if(pTexInfo->Format == GMM_FORMAT_IMC2 || // IMC2, IMC4 needs even tile columns
+ pTexInfo->Format == GMM_FORMAT_IMC4)
{
// If the UV planes are packed then the surface pitch must be
// padded out so that the tile-aligned UV data will fit.
@@ -1323,7 +1301,10 @@
}
}
- pTexInfo->Flags.Info.RedecribedPlanes = 1;
+ if(pTexInfo->Flags.Info.TiledYs || pTexInfo->Flags.Info.TiledYf)
+ {
+ pTexInfo->Flags.Info.RedecribedPlanes = true;
+ }
}
//Special case LKF MMC compressed surfaces
@@ -1610,4 +1591,4 @@
WidthBytesRender += pPlatform->TileInfo[pTexInfo->TileMode].LogicalTileWidth;
WidthBytesPhysical = WidthBytesLock = WidthBytesRender;
}
-}
\ No newline at end of file
+}
diff --git a/Source/GmmLib/ULT/CMakeLists.txt b/Source/GmmLib/ULT/CMakeLists.txt
index 1d868fb..6f51725 100644
--- a/Source/GmmLib/ULT/CMakeLists.txt
+++ b/Source/GmmLib/ULT/CMakeLists.txt
@@ -1,131 +1,141 @@
-# Copyright(c) 2017 Intel Corporation
-
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files(the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and / or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included
-# in all copies or substantial portions of the Software.
-
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
-# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-
-set (EXE_NAME GMMULT)
-
-set(GMMULT_HEADERS
- GmmCachePolicyULT.h
- GmmCommonULT.h
- GmmGen10CachePolicyULT.h
- GmmGen10ResourceULT.h
- GmmGen11CachePolicyULT.h
- GmmGen11ResourceULT.h
- GmmGen9CachePolicyULT.h
- GmmGen9ResourceULT.h
- GmmResourceULT.h
- stdafx.h
- targetver.h
- )
-
-set(GMMULT_SOURCES
- GmmCachePolicyULT.cpp
- GmmCommonULT.cpp
- GmmGen10CachePolicyULT.cpp
- GmmGen10ResourceULT.cpp
- GmmGen11CachePolicyULT.cpp
- GmmGen11ResourceULT.cpp
- GmmGen9CachePolicyULT.cpp
- GmmGen9ResourceULT.cpp
- GmmResourceCpuBltULT.cpp
- GmmResourceULT.cpp
- googletest/src/gtest-all.cc
- GmmULT.cpp
-)
-
-source_group("Source Files\\Cache Policy" FILES
- GmmCachePolicyULT.cpp
- GmmGen9CachePolicyULT.cpp
- GmmGen10CachePolicyULT.cpp
- GmmGen11CachePolicyULT.cpp
- )
-
-source_group("Source Files\\Resource" FILES
- GmmGen10ResourceULT.cpp
- GmmGen9ResourceULT.cpp
- GmmResourceCpuBltULT.cpp
- GmmResourceULT.cpp
- )
-
-source_group("Header Files\\Cache Policy" FILES
- GmmCachePolicyULT.h
- GmmGen10CachePolicyULT.h
- GmmGen11CachePolicyULT.h
- GmmGen9CachePolicyULT.h
- )
-
-source_group("Header Files\\Resource" FILES
- GmmGen10ResourceULT.h
- GmmGen9ResourceULT.h
- GmmResourceULT.h
- )
-
-source_group("gtest" FILES
- googletest/gtest/gtest.h
- googletest/src/gtest-all.cc
- )
-
-include_directories(BEFORE ./)
-
-include_directories(BEFORE ${PROJECT_SOURCE_DIR})
-
-include_directories(
- googletest
- googletest/gtest
- ${BS_DIR_INC}/umKmInc
- ${BS_DIR_INC}
- ${BS_DIR_GMMLIB}/inc
- ${BS_DIR_INC}/common
- )
-
-macro(GmmLibULTSetTargetConfig ultTarget)
- if (TARGET ${ultTarget})
- set_property(TARGET ${ultTarget} APPEND PROPERTY COMPILE_DEFINITIONS
- $<$<CONFIG:Release>: _RELEASE>
- $<$<CONFIG:ReleaseInternal>: _RELEASE_INTERNAL>
- $<$<CONFIG:Debug>: _DEBUG>
- )
- endif()
-
-endmacro()
-
-add_executable(${EXE_NAME} ${GMMULT_HEADERS} ${GMMULT_SOURCES})
-
-GmmLibULTSetTargetConfig(${EXE_NAME})
-
-set_property(TARGET ${EXE_NAME} APPEND PROPERTY COMPILE_DEFINITIONS __GMM GMM_LIB_DLL __UMD)
-
-if(NOT TARGET igfx_gmmumd_dll)
- add_subdirectory("${BS_DIR_GMMLIB}" "${CMAKE_BINARY_DIR}/gmmlib/ult")
-endif()
-target_link_libraries(${EXE_NAME} igfx_gmmumd_dll)
-
-target_link_libraries(${EXE_NAME}
- pthread
- dl
-)
-
-add_custom_target(Run_ULT ALL DEPENDS GMMULT)
-
-add_custom_command(
- TARGET Run_ULT
- POST_BUILD
- COMMAND echo running ULTs
- COMMAND "${CMAKE_COMMAND}" -E env "LD_LIBRARY_PATH=$<TARGET_FILE_DIR:igfx_gmmumd_dll>" ${CMAKE_CFG_INTDIR}/${EXE_NAME} --gtest_filter=CTest*
+# Copyright(c) 2017 Intel Corporation
+
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files(the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and / or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+
+set (EXE_NAME GMMULT)
+
+set(GMMULT_HEADERS
+ GmmCachePolicyULT.h
+ GmmCommonULT.h
+ GmmGen10CachePolicyULT.h
+ GmmGen10ResourceULT.h
+ GmmGen11CachePolicyULT.h
+ GmmGen11ResourceULT.h
+ GmmGen12ResourceULT.h
+ GmmGen12CachePolicyULT.h
+ GmmGen9CachePolicyULT.h
+ GmmGen9ResourceULT.h
+ GmmResourceULT.h
+ stdafx.h
+ targetver.h
+ )
+
+set(GMMULT_SOURCES
+ GmmCachePolicyULT.cpp
+ GmmCommonULT.cpp
+ GmmGen10CachePolicyULT.cpp
+ GmmGen10ResourceULT.cpp
+ GmmGen11CachePolicyULT.cpp
+ GmmGen12CachePolicyULT.cpp
+ GmmGen11ResourceULT.cpp
+ GmmGen12ResourceULT.cpp
+ GmmGen9CachePolicyULT.cpp
+ GmmGen9ResourceULT.cpp
+ GmmResourceCpuBltULT.cpp
+ GmmResourceULT.cpp
+ googletest/src/gtest-all.cc
+ GmmULT.cpp
+)
+
+source_group("Source Files\\Cache Policy" FILES
+ GmmCachePolicyULT.cpp
+ GmmGen9CachePolicyULT.cpp
+ GmmGen10CachePolicyULT.cpp
+ GmmGen11CachePolicyULT.cpp
+ GmmGen12CachePolicyULT.cpp
+ )
+
+source_group("Source Files\\Resource" FILES
+ GmmGen10ResourceULT.cpp
+ GmmGen11ResourceULT.cpp
+ GmmGen12ResourceULT.cpp
+ GmmGen9ResourceULT.cpp
+ GmmResourceCpuBltULT.cpp
+ GmmResourceULT.cpp
+ )
+
+source_group("Header Files\\Cache Policy" FILES
+ GmmCachePolicyULT.h
+ GmmGen10CachePolicyULT.h
+ GmmGen11CachePolicyULT.h
+ GmmGen12CachePolicyULT.h
+ GmmGen9CachePolicyULT.h
+ )
+
+source_group("Header Files\\Resource" FILES
+ GmmGen12ResourceULT.h
+ GmmGen11ResourceULT.h
+ GmmGen10ResourceULT.h
+ GmmGen9ResourceULT.h
+ GmmResourceULT.h
+ )
+
+source_group("gtest" FILES
+ googletest/gtest/gtest.h
+ googletest/src/gtest-all.cc
+ )
+
+include_directories(BEFORE ./)
+
+include_directories(BEFORE ${PROJECT_SOURCE_DIR})
+
+include_directories(
+ googletest
+ googletest/gtest
+ ${BS_DIR_INC}/umKmInc
+ ${BS_DIR_INC}
+ ${BS_DIR_GMMLIB}/inc
+ ${BS_DIR_INC}/common
+ )
+
+macro(GmmLibULTSetTargetConfig ultTarget)
+ if (TARGET ${ultTarget})
+ set_property(TARGET ${ultTarget} APPEND PROPERTY COMPILE_DEFINITIONS
+ $<$<CONFIG:Release>: _RELEASE>
+ $<$<CONFIG:ReleaseInternal>: _RELEASE_INTERNAL>
+ $<$<CONFIG:Debug>: _DEBUG>
+ )
+ endif()
+
+endmacro()
+
+add_executable(${EXE_NAME} ${GMMULT_HEADERS} ${GMMULT_SOURCES})
+
+GmmLibULTSetTargetConfig(${EXE_NAME})
+
+set_property(TARGET ${EXE_NAME} APPEND PROPERTY COMPILE_DEFINITIONS __GMM GMM_LIB_DLL __UMD)
+
+if(NOT TARGET igfx_gmmumd_dll)
+ add_subdirectory("${BS_DIR_GMMLIB}" "${CMAKE_BINARY_DIR}/gmmlib/ult")
+endif()
+target_link_libraries(${EXE_NAME} igfx_gmmumd_dll)
+
+target_link_libraries(${EXE_NAME}
+ pthread
+ dl
+)
+
+add_custom_target(Run_ULT ALL DEPENDS GMMULT)
+
+add_custom_command(
+ TARGET Run_ULT
+ POST_BUILD
+ COMMAND echo running ULTs
+ COMMAND "${CMAKE_COMMAND}" -E env "LD_LIBRARY_PATH=$<TARGET_FILE_DIR:igfx_gmmumd_dll>" ${CMAKE_CFG_INTDIR}/${EXE_NAME} --gtest_filter=CTest*
)
\ No newline at end of file
diff --git a/Source/GmmLib/ULT/GmmGen12CachePolicyULT.cpp b/Source/GmmLib/ULT/GmmGen12CachePolicyULT.cpp
new file mode 100644
index 0000000..40b90c6
--- /dev/null
+++ b/Source/GmmLib/ULT/GmmGen12CachePolicyULT.cpp
@@ -0,0 +1,238 @@
+/*==============================================================================
+Copyright(c) 2019 Intel Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files(the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and / or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+============================================================================*/
+
+#include "GmmGen12CachePolicyULT.h"
+
+using namespace std;
+
+/////////////////////////////////////////////////////////////////////////////////////
+/// Sets up common environment for Cache Policy fixture tests. this is called once per
+/// test case before executing all tests under resource fixture test case.
+/// It also calls SetupTestCase from CommonULT to initialize global context and others.
+///
+/////////////////////////////////////////////////////////////////////////////////////
+void CTestGen12CachePolicy::SetUpTestCase()
+{
+ GfxPlatform.eProductFamily = IGFX_TIGERLAKE_LP;
+ GfxPlatform.eRenderCoreFamily = IGFX_GEN12_CORE;
+
+ AllocateAdapterInfo();
+
+ pGfxAdapterInfo->SystemInfo.L3CacheSizeInKb = 3072;
+
+ const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrEDram = false;
+ const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLLCBypass = 1;
+
+ CommonULT::SetUpTestCase();
+
+ printf("%s\n", __FUNCTION__);
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+/// cleans up once all the tests finish execution. It also calls TearDownTestCase
+/// from CommonULT to destroy global context and others.
+///
+/////////////////////////////////////////////////////////////////////////////////////
+void CTestGen12CachePolicy::TearDownTestCase()
+{
+ printf("%s\n", __FUNCTION__);
+
+ CommonULT::TearDownTestCase();
+}
+
+void CTestGen12CachePolicy::CheckL3CachePolicy()
+{
+ const uint32_t L3_WB_CACHEABLE = 0x3;
+ const uint32_t L3_UNCACHEABLE = 0x1;
+
+ // Check Usage MOCS index against MOCS settings
+ for(uint32_t Usage = GMM_RESOURCE_USAGE_UNKNOWN; Usage < GMM_RESOURCE_USAGE_MAX; Usage++)
+ {
+ GMM_CACHE_POLICY_ELEMENT ClientRequest = pGmmULTClientContext->GetCachePolicyElement((GMM_RESOURCE_USAGE_TYPE)Usage);
+ uint32_t AssignedMocsIdx = ClientRequest.MemoryObjectOverride.Gen12.Index;
+ GMM_CACHE_POLICY_TBL_ELEMENT Mocs = pGmmULTClientContext->GetCachePolicyTlbElement(AssignedMocsIdx);
+
+ //printf("Usage: %d --> Index: [%d]\n", Usage, AssignedMocsIdx);
+
+ EXPECT_EQ(0, Mocs.L3.ESC) << "Usage# " << Usage << ": ESC is non-zero";
+ EXPECT_EQ(0, Mocs.L3.SCC) << "Usage# " << Usage << ": SCC is non-zero";
+ EXPECT_EQ(0, Mocs.L3.Reserved) << "Usage# " << Usage << ": Reserved field is non-zero";
+
+ // Check if Mocs Index is not greater than GMM_MAX_NUMBER_MOCS_INDEXES
+ EXPECT_GT(GMM_MAX_NUMBER_MOCS_INDEXES, AssignedMocsIdx) << "Usage# " << Usage << ": MOCS Index greater than MAX allowed (63)";
+
+ if(ClientRequest.L3Eviction == 0x2) //63
+ {
+ if((GMM_RESOURCE_USAGE_TYPE)Usage == GMM_RESOURCE_USAGE_L3_EVICTION)
+ {
+ EXPECT_EQ(AssignedMocsIdx, 63) << "Usage# " << Usage << ": Incorrect Index for L3Eviction type# " << ClientRequest.L3Eviction;
+ EXPECT_EQ(0, ClientRequest.L3) << "Usage# " << Usage << ": Incorrect L3 cacheability for L3Eviction type# " << ClientRequest.L3Eviction;
+ }
+ else
+ {
+ EXPECT_NE(AssignedMocsIdx, 63) << "Usage# " << Usage << ": Incorrect Index for L3Eviction type# " << ClientRequest.L3Eviction;
+ EXPECT_EQ(1, ClientRequest.L3) << "Usage# " << Usage << ": Incorrect L3 cacheability for L3Eviction type# " << ClientRequest.L3Eviction;
+ }
+ }
+ else if(ClientRequest.L3Eviction == 0x3) //61
+ {
+ EXPECT_EQ(AssignedMocsIdx, 61) << "Usage# " << Usage << ": Incorrect Index for L3Eviction type# " << ClientRequest.L3Eviction;
+ EXPECT_EQ(1, ClientRequest.L3) << "Usage# " << Usage << ": Incorrect L3 cacheability for L3Eviction type# " << ClientRequest.L3Eviction;
+ }
+ else if(Usage == GMM_RESOURCE_USAGE_CCS) //60
+ {
+ EXPECT_EQ(AssignedMocsIdx, 60) << "Usage# " << Usage << ": Incorrect Index for CCS";
+ EXPECT_EQ(0, ClientRequest.L3) << "Usage# " << Usage << ": Incorrect L3 cacheability for CCS";
+ }
+ else if(Usage == GMM_RESOURCE_USAGE_MOCS_62) //62
+ {
+ EXPECT_EQ(AssignedMocsIdx, 62) << "Usage# " << Usage << ": Incorrect Index for MOCS_62";
+ EXPECT_EQ(0, ClientRequest.L3) << "Usage# " << Usage << ": Incorrect L3 cacheability for MOCS#62";
+ }
+ // Check of assigned Index setting is appropriate for HDCL1 setting
+ else if(ClientRequest.HDCL1)
+ {
+ EXPECT_GE(AssignedMocsIdx, GMM_GEN10_HDCL1_MOCS_INDEX_START) << "Usage# " << Usage << ": Incorrect Index for HDCL1 setting";
+ }
+ else
+ {
+ EXPECT_LT(AssignedMocsIdx, GMM_GEN10_HDCL1_MOCS_INDEX_START) << "Usage# " << Usage << ": Incorrect Index for HDCL1 setting";
+ }
+
+ if(ClientRequest.L3)
+ {
+ EXPECT_EQ(L3_WB_CACHEABLE, Mocs.L3.Cacheability) << "Usage# " << Usage << ": Incorrect L3 cachebility setting";
+ }
+ else
+ {
+ EXPECT_EQ(L3_UNCACHEABLE, Mocs.L3.Cacheability) << "Usage# " << Usage << ": Incorrect L3 cachebility setting";
+ }
+ }
+}
+
+
+TEST_F(CTestGen12CachePolicy, TestL3CachePolicy)
+{
+ CheckL3CachePolicy();
+}
+
+
+void CTestGen12CachePolicy::CheckLlcEdramCachePolicy()
+{
+ const uint32_t TargetCache_LLC = 1;
+
+ const uint32_t LeCC_UNCACHEABLE = 0x0;
+ const uint32_t LeCC_WC_UNCACHEABLE = 0x1;
+ const uint32_t LeCC_WB_CACHEABLE = 0x3;
+ const uint32_t LeCC_WT_CACHEABLE = 0x2;
+
+ // Check Usage MOCS index against MOCS settings
+ for(uint32_t Usage = GMM_RESOURCE_USAGE_UNKNOWN; Usage < GMM_RESOURCE_USAGE_MAX; Usage++)
+ {
+ GMM_CACHE_POLICY_ELEMENT ClientRequest = pGmmULTClientContext->GetCachePolicyElement((GMM_RESOURCE_USAGE_TYPE)Usage);
+ uint32_t AssignedMocsIdx = ClientRequest.MemoryObjectOverride.Gen12.Index;
+ GMM_CACHE_POLICY_TBL_ELEMENT Mocs = pGmmULTClientContext->GetCachePolicyTlbElement(AssignedMocsIdx);
+
+ // Check for unused fields
+ EXPECT_EQ(0, Mocs.LeCC.AOM) << "Usage# " << Usage << ": AOM is non-zero";
+ EXPECT_EQ(0, Mocs.LeCC.CoS) << "Usage# " << Usage << ": CoS is non-zero";
+ EXPECT_EQ(0, Mocs.LeCC.PFM) << "Usage# " << Usage << ": PFM is non-zero";
+ EXPECT_EQ(0, Mocs.LeCC.SCC) << "Usage# " << Usage << ": SCC is non-zero";
+ // SCF field might be set for LKF/Gen12+ platforms;
+ EXPECT_EQ(0, Mocs.LeCC.SCF & !const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLLCBypass) << "Usage# " << Usage << ": SCF is non-zero";
+ EXPECT_EQ(0, Mocs.LeCC.ESC) << "Usage# " << Usage << ": ESC is non-zero";
+ EXPECT_EQ(0, Mocs.LeCC.Reserved) << "Usage# " << Usage << ": Reserved field is non-zero";
+
+ // Check for age
+ EXPECT_EQ(ClientRequest.AGE, Mocs.LeCC.LRUM) << "Usage# " << Usage << ": Incorrect AGE settings";
+
+ // Check for Snoop Setting
+ EXPECT_EQ(ClientRequest.SSO, Mocs.LeCC.SelfSnoop) << "Usage# " << Usage << ": Self Snoop is non-zero";
+
+ // Check if Mocs Index is not greater than GMM_MAX_NUMBER_MOCS_INDEXES
+ EXPECT_GT(GMM_MAX_NUMBER_MOCS_INDEXES, AssignedMocsIdx) << "Usage# " << Usage << ": MOCS Index greater than MAX allowed (63)";
+
+ if(ClientRequest.L3Eviction == 0x2) //63
+ {
+ GMM_CACHE_POLICY_ELEMENT MOCS63 = pGmmULTClientContext->GetCachePolicyElement(GMM_RESOURCE_USAGE_L3_EVICTION);
+ if((GMM_RESOURCE_USAGE_TYPE)Usage == GMM_RESOURCE_USAGE_L3_EVICTION)
+ {
+ EXPECT_EQ(AssignedMocsIdx, 63) << "Usage# " << Usage << ": Incorrect Index for L3Eviction type# " << ClientRequest.L3Eviction;
+ }
+ else
+ {
+ MOCS63.L3 = 1; //Override L3 to test , since Hw forces it to L3-uncached
+ EXPECT_NE(AssignedMocsIdx, 63) << "Usage# " << Usage << ": Incorrect Index for L3Eviction type# " << ClientRequest.L3Eviction;
+ EXPECT_EQ(MOCS63.Value, ClientRequest.Value) << "Usage# " << Usage << ": Incorrect usage for L3Eviction type# " << ClientRequest.L3Eviction;
+ }
+ }
+ else if(ClientRequest.L3Eviction == 0x3) //61
+ {
+ GMM_CACHE_POLICY_ELEMENT MOCS61 = pGmmULTClientContext->GetCachePolicyElement(GMM_RESOURCE_USAGE_L3_EVICTION_SPECIAL);
+ EXPECT_EQ(AssignedMocsIdx, 61) << "Usage# " << Usage << ": Incorrect Index for L3Eviction type# " << ClientRequest.L3Eviction;
+ EXPECT_EQ(0, ClientRequest.LLC) << "Usage# " << Usage << ": Incorrect LLC cacheability for L3Eviction type# " << ClientRequest.L3Eviction;
+ EXPECT_EQ(MOCS61.Value, ClientRequest.Value) << "Usage# " << Usage << ": Incorrect usage for L3Eviction type# " << ClientRequest.L3Eviction;
+ }
+ else if(Usage == GMM_RESOURCE_USAGE_CCS) //60
+ {
+ EXPECT_EQ(AssignedMocsIdx, 60) << "Usage# " << Usage << ": Incorrect Index for CCS";
+ }
+ else if(Usage == GMM_RESOURCE_USAGE_MOCS_62) //62
+ {
+ EXPECT_EQ(AssignedMocsIdx, 62) << "Usage# " << Usage << ": Incorrect Index for MOCS_62";
+ }
+ // Check of assigned Index setting is appropriate for HDCL1 setting
+ else if(ClientRequest.HDCL1)
+ {
+ EXPECT_GE(AssignedMocsIdx, GMM_GEN10_HDCL1_MOCS_INDEX_START) << "Usage# " << Usage << ": Incorrect Index for HDCL1 setting";
+ }
+ else
+ {
+ EXPECT_LT(AssignedMocsIdx, GMM_GEN10_HDCL1_MOCS_INDEX_START) << "Usage# " << Usage << ": Incorrect Index for HDCL1 setting";
+ }
+
+ if(!ClientRequest.LLC && !ClientRequest.ELLC) // Uncached
+ {
+ EXPECT_EQ(LeCC_WC_UNCACHEABLE, Mocs.LeCC.Cacheability) << "Usage# " << Usage << ": Incorrect LLC/eDRAM cachebility setting";
+ }
+ else
+ {
+ if(ClientRequest.LLC) // LLC only
+ {
+ EXPECT_EQ(TargetCache_LLC, Mocs.LeCC.TargetCache) << "Usage# " << Usage << ": Incorrect target cache setting";
+
+ EXPECT_EQ(LeCC_WB_CACHEABLE, Mocs.LeCC.Cacheability) << "Usage# " << Usage << ": Incorrect LLC cachebility setting";
+ }
+ else
+ {
+ EXPECT_EQ(TargetCache_LLC, Mocs.LeCC.TargetCache) << "Usage# " << Usage << ": Incorrect target cache setting";
+
+ EXPECT_EQ(LeCC_WC_UNCACHEABLE, Mocs.LeCC.Cacheability) << "Usage# " << Usage << ": Incorrect LLC cachebility setting";
+ }
+ }
+ }
+}
+
+TEST_F(CTestGen12CachePolicy, TestLlcEdramCachePolicy)
+{
+ CheckLlcEdramCachePolicy();
+}
diff --git a/Source/GmmLib/ULT/GmmGen12CachePolicyULT.h b/Source/GmmLib/ULT/GmmGen12CachePolicyULT.h
new file mode 100644
index 0000000..256bded
--- /dev/null
+++ b/Source/GmmLib/ULT/GmmGen12CachePolicyULT.h
@@ -0,0 +1,37 @@
+/*==============================================================================
+Copyright(c) 2019 Intel Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files(the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and / or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+============================================================================*/
+
+#pragma once
+
+#include "GmmCachePolicyULT.h"
+
+class CTestGen12CachePolicy : public CTestCachePolicy
+{
+protected:
+ virtual void CheckL3CachePolicy();
+ virtual void CheckLlcEdramCachePolicy();
+
+public:
+ static void SetUpTestCase();
+ static void TearDownTestCase();
+};
+#pragma once
diff --git a/Source/GmmLib/ULT/GmmGen12ResourceULT.cpp b/Source/GmmLib/ULT/GmmGen12ResourceULT.cpp
new file mode 100644
index 0000000..da4f9a4
--- /dev/null
+++ b/Source/GmmLib/ULT/GmmGen12ResourceULT.cpp
@@ -0,0 +1,2757 @@
+/*==============================================================================
+Copyright(c) 2019 Intel Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files(the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and / or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+============================================================================*/
+
+#include "GmmGen12ResourceULT.h"
+
+using namespace std;
+
+/////////////////////////////////////////////////////////////////////////////////////
+/// Sets up common environment for Resource fixture tests. this is called once per
+/// test case before executing all tests under resource fixture test case.
+// It also calls SetupTestCase from CommonULT to initialize global context and others.
+///
+/// @see CTestGen12Resource::SetUpTestCase()
+///
+/////////////////////////////////////////////////////////////////////////////////////
+void CTestGen12Resource::SetUpTestCase()
+{
+ printf("%s\n", __FUNCTION__);
+ GfxPlatform.eProductFamily = IGFX_TIGERLAKE_LP;
+ GfxPlatform.eRenderCoreFamily = IGFX_GEN12_CORE;
+
+ pGfxAdapterInfo = (ADAPTER_INFO *)malloc(sizeof(ADAPTER_INFO));
+ if(pGfxAdapterInfo)
+ {
+ memset(pGfxAdapterInfo, 0, sizeof(ADAPTER_INFO));
+
+ pGfxAdapterInfo->SkuTable.FtrLinearCCS = 1; //legacy y =>0 - test both
+ pGfxAdapterInfo->SkuTable.FtrTileY = 1;
+ pGfxAdapterInfo->SkuTable.FtrLLCBypass = 1;
+ CommonULT::SetUpTestCase();
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////////////
+/// cleans up once all the tests finish execution. It also calls TearDownTestCase
+/// from CommonULT to destroy global context and others.
+///
+/// @see CTestGen12Resource::TearDownTestCase()
+/////////////////////////////////////////////////////////////////////////////////////
+void CTestGen12Resource::TearDownTestCase()
+{
+ printf("%s\n", __FUNCTION__);
+
+ CommonULT::TearDownTestCase();
+}
+
+/// @brief ULT for 2D TileYs Compressed Resource
+/// tests both Separate and Unified CCS allcoation
+TEST_F(CTestGen12Resource, Test2DTileYsCompressedResource)
+{
+ const uint32_t HAlign[TEST_BPP_MAX] = {256, 256, 128, 128, 64}; //REM-comment: YS-shape in pixels per-bpp
+ const uint32_t VAlign[TEST_BPP_MAX] = {256, 128, 128, 64, 64};
+
+ const uint32_t TileSize[TEST_BPP_MAX][2] = {{256, 256}, //REM-comment: YS-shape in bytes per-bpp
+ {512, 128},
+ {512, 128},
+ {1024, 64},
+ {1024, 64}};
+
+ GMM_RESCREATE_PARAMS gmmParams = {};
+ gmmParams.Type = RESOURCE_2D;
+ gmmParams.NoGfxMemory = 1;
+ gmmParams.Flags.Info.TiledY = 1;
+ gmmParams.Flags.Info.TiledYs = 1;
+ gmmParams.Flags.Gpu.Texture = 1;
+ gmmParams.Flags.Info.RenderCompressed = 1;
+ // Turn on .MC or .RC flag - mandatory to tell compression-type, for Yf its also used to pad surf
+ // to 4x1 tile (reqd by HW for perf reasons)
+ // If unifiedAuxSurf reqd (mandatory for displayable or cross-adapter shared), turn on .CCS/.MMC and .UnifiedAuxSurface too
+
+ //Allocate 1x1 surface
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = DEFINE_TILE(YS_2D, bpp);
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = 0x1;
+ gmmParams.BaseHeight = 0x1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign[i]);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign[i]);
+ VerifyResourcePitch<true>(ResourceInfo, TileSize[i][0]); // As wide as 1 Tile
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 1); // 1 Tile wide
+ VerifyResourceSize<true>(ResourceInfo, GMM_KBYTE(64)); // 1 Tile Big
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // Not Tested
+
+ //test main surface base alignment is 64KB
+ //For Yf test main surface pitch is 16KB aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ //EXPECT_EQ(0, ResourceInfo->GetRenderPitchInTiles() % GMM_KBYTE(16)); // Check on YF only
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+ else
+ {
+ //Test AuxSurf H/Valign, size etc (Not POR- can be removed)
+ ALIGNMENT UnitAlign;
+ pGmmULTClientContext->GetExtendedTextureAlign(CCS_MODE(TileMode), UnitAlign);
+
+ EXPECT_EQ(UnitAlign.Width, ResourceInfo->GetAuxHAlign());
+ EXPECT_EQ(UnitAlign.Height, ResourceInfo->GetAuxVAlign());
+ EXPECT_EQ(0x80, ResourceInfo->GetUnifiedAuxPitch()); //TileY = 0x80 x 0x20
+ }
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeAllocation());
+
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+ // Allocate surface that requires multi tiles in two dimension
+ // Allocate 2 tiles in X dimension
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = DEFINE_TILE(YS_2D, bpp);
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = (TileSize[i][0] / GetBppValue(bpp)) + 1; // 1 pixel larger than 1 tile width
+ gmmParams.BaseHeight = 0x1;
+ gmmParams.Depth = 0x1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign[i]);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign[i]);
+ VerifyResourcePitch<true>(ResourceInfo, TileSize[i][0] * 2); // As wide as 2 tile
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 2); // 2 tile wide
+ VerifyResourceSize<true>(ResourceInfo, GMM_KBYTE(64) * 2); // 2 tile big
+
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // Not tested
+
+ //test main surface base alignment is 64KB
+ //For Yf test main surface pitch is 16KB aligned
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+ else
+ { //Not POR
+ }
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+ // Allocate 2 tiles in X/Y dimension
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = DEFINE_TILE(YS_2D, bpp);
+
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = (TileSize[i][0] / GetBppValue(bpp)) + 1; // 1 pixel larger than 1 tile width
+ gmmParams.BaseHeight = TileSize[i][1] + 1; // 1 row larger than 1 tile height
+ gmmParams.Depth = 0x1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ //**Not compression specific -BEGIN
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign[i]);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign[i]);
+ VerifyResourcePitch<true>(ResourceInfo, TileSize[i][0] * 2); // As wide as 2 tile
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 2); // 2 tile wide
+ VerifyResourceSize<true>(ResourceInfo, GMM_KBYTE(64) * 4); // 4 tile big
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // Not tested
+ //**Not compression specific -END
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+}
+
+/// @brief ULT for 2D TileYf Compressed Resource
+TEST_F(CTestGen12Resource, Test2DTileYfCompressedResource)
+{
+ const uint32_t HAlign[TEST_BPP_MAX] = {64, 64, 32, 32, 16};
+ const uint32_t VAlign[TEST_BPP_MAX] = {64, 32, 32, 16, 16};
+
+ const uint32_t TileSize[TEST_BPP_MAX][2] = {{64, 64},
+ {128, 32},
+ {128, 32},
+ {256, 16},
+ {256, 16}};
+
+ GMM_RESCREATE_PARAMS gmmParams = {};
+ gmmParams.Type = RESOURCE_2D;
+ gmmParams.NoGfxMemory = 1;
+ gmmParams.Flags.Info.TiledY = 1;
+ gmmParams.Flags.Info.TiledYf = 1;
+ gmmParams.Flags.Gpu.Texture = 1;
+ gmmParams.Flags.Info.RenderCompressed = 1;
+ // Turn on .MC or .RC flag - mandatory to tell compression-type, for Yf its also used to pad surf
+ // to 4x1 tile (reqd by HW )
+ // If unifiedAuxSurf reqd (mandatory for displayable or cross-adapter shared), turn on .CCS/.MMC and .UnifiedAuxSurface too
+
+ //Allocate 1x1 surface
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = DEFINE_TILE(YF_2D, bpp);
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = 0x1;
+ gmmParams.BaseHeight = 0x1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign[i]);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign[i]);
+ VerifyResourcePitch<true>(ResourceInfo, 4 * TileSize[i][0]); // As wide as 4 Tile
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 4); // 4 Tile wide
+ VerifyResourceSize<true>(ResourceInfo, 4 * GMM_KBYTE(4)); // 4 Tile Big
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // Not Tested
+
+ //test main surface base alignment is 64KB
+ //For Yf/Y test main surface pitch is 4-tileYF/Y aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % 4);
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+ // Allocate surface that requires multi tiles in two dimension
+ // Allocate 2 tiles in X dimension
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = DEFINE_TILE(YF_2D, bpp);
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = (TileSize[i][0] / GetBppValue(bpp)) + 1; // 1 pixel larger than 1 tile width
+ gmmParams.BaseHeight = 0x1;
+ gmmParams.Depth = 0x1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign[i]);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign[i]);
+ VerifyResourcePitch<true>(ResourceInfo, TileSize[i][0] * 4); // As wide as 2 tile, but 4-tile pitch alignment
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 4); // 2 tile wide, but 4-tile pitch alignment
+ VerifyResourceSize<true>(ResourceInfo, GMM_KBYTE(4) * 4); // 2 tile big, but 4-tile pitch alignment
+
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // Not tested
+
+ //test main surface base alignment is 64KB
+ //For Yf/Y test main surface pitch is 4-tileYF/Y aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % 4);
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+ // Allocate surface that requires multi tiles in two dimension
+ // Allocate 2 tiles in X/Y dimension
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = DEFINE_TILE(YF_2D, bpp);
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = (TileSize[i][0] / GetBppValue(bpp)) + 1; // 1 pixel larger than 1 tile width
+ gmmParams.BaseHeight = TileSize[i][1] + 1; // 1 row larger than 1 tile height
+ gmmParams.Depth = 0x1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign[i]);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign[i]);
+ VerifyResourcePitch<true>(ResourceInfo, TileSize[i][0] * 4); // As wide as 2 tile, but 4-tile pitch alignment
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 4); // 2 tile wide, but 4-tile pitch alignment
+ VerifyResourceSize<true>(ResourceInfo, GMM_KBYTE(4) * 4 * 2); // 4 tile wide; and 2-tile high
+
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // Not tested
+ //test main surface base alignment is 64KB
+ //For Yf/Y test main surface pitch is 4-tileYF/Y aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % 4);
+
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+}
+
+/// @brief ULT for 2D TileY Compressed Resource
+TEST_F(CTestGen12Resource, Test2DTileYCompressedResource)
+{
+ const uint32_t HAlign = {16};
+ const uint32_t VAlign = {4};
+
+ const uint32_t TileSize[2] = {128, 32};
+
+ GMM_RESCREATE_PARAMS gmmParams = {};
+ gmmParams.Type = RESOURCE_2D;
+ gmmParams.NoGfxMemory = 1;
+ gmmParams.Flags.Info.TiledY = 1;
+
+ gmmParams.Flags.Gpu.Texture = 1;
+ gmmParams.Flags.Info.RenderCompressed = 1;
+ // Turn on .MC or .RC flag - mandatory to tell compression-type, for Yf/Y its also used to pad surf
+ // to 4x1 tile (reqd by HW for perf reasons)
+ // If unifiedAuxSurf reqd (mandatory for displayable or cross-adapter shared), turn on .CCS/.MMC and .UnifiedAuxSurface too
+
+ //Allocate 1x1 surface
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = LEGACY_TILE_Y;
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = 0x1;
+ gmmParams.BaseHeight = 0x1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign);
+ VerifyResourcePitch<true>(ResourceInfo, 4 * TileSize[0]); // As wide as 4 Tile
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 4); // 4 Tile wide
+ VerifyResourceSize<true>(ResourceInfo, 4 * GMM_KBYTE(4)); // 4 Tile Big
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // Not Tested
+
+ //test main surface base alignment is 64KB
+ //For Yf/Y test main surface pitch is 4-tileYF/Y aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % 4);
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+ // Allocate surface that requires multi tiles in two dimension
+ // Allocate 2 tiles in X dimension
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = LEGACY_TILE_Y;
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = (TileSize[0] / GetBppValue(bpp)) + 1; // 1 pixel larger than 1 tile width
+ gmmParams.BaseHeight = 0x1;
+ gmmParams.Depth = 0x1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign);
+ VerifyResourcePitch<true>(ResourceInfo, TileSize[0] * 4); // As wide as 2 tile, but 4-tile pitch alignment
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 4); // 2 tile wide, but 4-tile pitch alignment
+ VerifyResourceSize<true>(ResourceInfo, GMM_KBYTE(4) * 4); // 2 tile big, but 4-tile pitch alignment
+
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // Not tested
+
+ //test main surface base alignment is 64KB
+ //For Yf test main surface pitch is 4-tileYF aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % 4); // Check on YF only
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+ // Allocate surface that requires multi tiles in two dimension
+ // Allocate 2 tiles in X/Y dimension
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = LEGACY_TILE_Y;
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = (TileSize[0] / GetBppValue(bpp)) + 1; // 1 pixel larger than 1 tile width
+ gmmParams.BaseHeight = TileSize[1] + 1; // 1 row larger than 1 tile height
+ gmmParams.Depth = 0x1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign);
+ VerifyResourcePitch<true>(ResourceInfo, TileSize[0] * 4); // As wide as 2 tile, but 4-tile pitch alignment
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 4); // 2 tile wide, but 4-tile pitch alignment
+ VerifyResourceSize<true>(ResourceInfo, GMM_KBYTE(4) * 4 * 2); // 4 tile wide; and 2-tile high
+
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // Not tested
+ //test main surface base alignment is 64KB
+ //For Yf/Y test main surface pitch is 4-tileYF/Y aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % 4); // Check on YF only
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+}
+
+/// @brief ULT for 2D TileY lossless Compressed lossy Resource
+TEST_F(CTestGen12Resource, Test2DTileYLossyCompressedResource)
+{
+ const uint32_t HAlign = {4};
+ const uint32_t VAlign = {4};
+
+ const uint32_t TileSize[2] = {128, 32};
+
+ GMM_RESCREATE_PARAMS gmmParams = {};
+ gmmParams.Type = RESOURCE_2D;
+ gmmParams.NoGfxMemory = 1;
+ gmmParams.Flags.Info.TiledY = 1;
+
+ gmmParams.Flags.Gpu.Texture = 1;
+ gmmParams.Flags.Info.RenderCompressed = 1;
+ // Turn on .MC or .RC flag - mandatory to tell compression-type, for Yf its also used to pad surf
+ // to 4x1 tile (reqd by HW for perf reasons)
+ // If unifiedAuxSurf reqd (mandatory for displayable or cross-adapter shared), turn on .CCS/.MMC and .UnifiedAuxSurface too
+
+ //Allocate 1x1 surface
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = LEGACY_TILE_Y;
+ gmmParams.Format = GMM_FORMAT_ETC2_RGB8;
+ gmmParams.BaseWidth64 = 0x80;
+ gmmParams.BaseHeight = 0x20;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign);
+ VerifyResourcePitch<true>(ResourceInfo, 4 * TileSize[0]); // As wide as 4 Tile
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 4); // 4 Tile wide
+ VerifyResourceSize<true>(ResourceInfo, 4 * GMM_KBYTE(4)); // 4 Tile Big
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // Not Tested
+
+ //test main surface base alignment is 64KB
+ //For Yf/Y test main surface pitch is 4-tileYF/Y aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % 4);
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+ // Allocate surface that requires multi tiles in two dimension
+ // Allocate 2 tiles in X dimension
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = LEGACY_TILE_Y;
+ gmmParams.Format = GMM_FORMAT_ETC2_RGB8;
+ gmmParams.BaseWidth64 = (TileSize[0] / GetBppValue(bpp)) + 1; // 1 pixel larger than 1 tile width
+ gmmParams.BaseHeight = 0x1;
+ gmmParams.Depth = 0x1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign);
+ VerifyResourcePitch<true>(ResourceInfo, TileSize[0] * 4); // As wide as 2 tile, but 4-tile pitch alignment
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 4); // 2 tile wide, but 4-tile pitch alignment
+ VerifyResourceSize<true>(ResourceInfo, GMM_KBYTE(4) * 4); // 2 tile big, but 4-tile pitch alignment
+
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // Not tested
+
+ //test main surface base alignment is 64KB
+ //For Yf/Y test main surface pitch is 4-tileYF/Y aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % 4);
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+ // Allocate surface that requires multi tiles in two dimension
+ // Allocate 2 tiles in X/Y dimension
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = LEGACY_TILE_Y;
+ gmmParams.Format = GMM_FORMAT_ETC2_RGB8;
+ gmmParams.BaseWidth64 = (TileSize[0] / GetBppValue(bpp)) + 1; // 1 pixel larger than 1 tile width
+ gmmParams.BaseHeight = TileSize[1] + 1; // 1 row larger than 1 tile height
+ gmmParams.Depth = 0x1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign);
+ VerifyResourcePitch<true>(ResourceInfo, TileSize[0] * 4); // As wide as 2 tile, but 4-tile pitch alignment
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 4); // 2 tile wide, but 4-tile pitch alignment
+ VerifyResourceSize<true>(ResourceInfo, GMM_KBYTE(4) * 4); // 4 tile wide; max compressed height = 0x24/4 = 9, so fits in 1 tile-height
+
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // Not tested
+ //test main surface base alignment is 64KB
+ //For Yf/Y test main surface pitch is 4-tileYF/Y aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % 4);
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+}
+
+//Y416(64bpp -how is 10/12/16bit depth given?)
+//Y410(32bpp), Y216(64bpp), YCRCB_NORMAL(16bpp), YCRCB_SWAPUV(16bpp),
+//YCRCB_SWAPUVY(16bpp), YCRCB_SWAPY(16bpp)
+
+/// @brief ULT for Planar Compressed Resource
+TEST_F(CTestGen12Resource, DISABLED_TestPlanarYfCompressedResource)
+{
+ const uint32_t TileSize[TEST_BPP_MAX][2] = {
+ {64, 64}, {128, 32}, {128, 32}, {256, 16}, {256, 16}}; // TileYf
+
+ GMM_RESCREATE_PARAMS gmmParams = {};
+ gmmParams.Type = RESOURCE_2D;
+ gmmParams.NoGfxMemory = 1;
+ gmmParams.Flags.Gpu.Texture = 1;
+ //gmmParams.Flags.Gpu.MMC = 1;
+ gmmParams.Flags.Gpu.CCS = 1;
+ gmmParams.Flags.Info.MediaCompressed = 1;
+ gmmParams.BaseWidth64 = 0x100;
+ gmmParams.BaseHeight = 0x50;
+ gmmParams.Depth = 0x1;
+ SetTileFlag(gmmParams, TEST_TILEYF); // TileYF only
+
+ //UV-Packed formats
+ GMM_RESOURCE_FORMAT Format[4] = {GMM_FORMAT_NV12, GMM_FORMAT_NV21, GMM_FORMAT_P010, GMM_FORMAT_P016};
+ for(auto fmt : Format)
+ {
+ gmmParams.Format = fmt; // 8bpp (NV12, NV21), 16bpp (P016,P010)
+
+ TEST_BPP Ybpp, UVbpp;
+ //Yf/Ys could be accessed on CPU/app where UV plane bpp is double
+ switch(pGmmULTClientContext->GetPlatformInfo().FormatTable[fmt].Element.BitsPer)
+ {
+ case 8:
+ Ybpp = TEST_BPP_8;
+ UVbpp = TEST_BPP_16;
+ break;
+ case 16:
+ Ybpp = TEST_BPP_16;
+ UVbpp = TEST_BPP_32;
+ break;
+ default:
+ return;
+ }
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1;
+
+ /*Aux is TileY (later Linear), not redescribing, its bytes are allocated using one bpp*/
+ GMM_TILE_MODE TileMode = DEFINE_TILE(YF_2D, Ybpp);
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ //Redescribed Pitch isn't modified unless Y, UV pitch differ
+ //But, original Pitch is padded to have even Tile, hence use Ybpp ExpectedPitch
+ //to verify Pitch, but redescribed size
+ uint32_t ExpectedPitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64 * (int)pow(2.0, Ybpp), TileSize[Ybpp][0] * 4);
+ uint32_t RedescribedPitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64 / 2 * (int)pow(2.0, UVbpp), TileSize[UVbpp][0] * 4);
+
+ //ExpectedPitch = GMM_ULT_ALIGN(ExpectedPitch, 2 * TileSize[Ybpp][0]); //pad to even tile
+ if(ExpectedPitch != RedescribedPitch)
+ {
+ ExpectedPitch = RedescribedPitch;
+ }
+
+ VerifyResourcePitch<true>(ResourceInfo, ExpectedPitch);
+ VerifyResourcePitchInTiles<true>(ResourceInfo, ExpectedPitch / TileSize[Ybpp][0]);
+
+ int YSizeInTiles = (GMM_ULT_ALIGN(gmmParams.BaseHeight, TileSize[Ybpp][1]) / TileSize[Ybpp][1]) *
+ RedescribedPitch / TileSize[Ybpp][0];
+ int UVSizeInTiles = (GMM_ULT_ALIGN(gmmParams.BaseHeight / 2, TileSize[UVbpp][1]) / TileSize[UVbpp][1]) *
+ RedescribedPitch / TileSize[UVbpp][0];
+
+ VerifyResourceSize<true>(ResourceInfo, GMM_KBYTE(4) * (YSizeInTiles + UVSizeInTiles));
+ VerifyResourceHAlign<true>(ResourceInfo, TileSize[UVbpp][0] / pow(2.0, UVbpp)); // For Yf/Ys planar redescription causes UV width, Y height alignment
+ VerifyResourceVAlign<true>(ResourceInfo, TileSize[Ybpp][1]);
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // N/A for non-mipped surface
+
+ //test main surface base alignment is 64KB
+ //For Yf test main surface pitch is 4-tileYF aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % 4); // Check on YF only
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+
+ EXPECT_EQ(GMM_KBYTE(4) * 2, ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS)); // Y and UV Aux are on separate tiles
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ EXPECT_EQ(GMM_KBYTE(4) * 2, AuxResourceInfo->GetSizeSurface());
+
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+}
+
+/// @brief ULT for Planar Y Compressed Resource
+TEST_F(CTestGen12Resource, TestPlanarYCompressedResource)
+{
+ const uint32_t TileSize[2] = {128, 32};
+ const uint32_t HAlign = 16;
+ const uint32_t VAlign = 4;
+
+ GMM_RESCREATE_PARAMS gmmParams = {};
+ gmmParams.Type = RESOURCE_2D;
+ gmmParams.NoGfxMemory = 1;
+ gmmParams.Flags.Gpu.Texture = 1;
+ gmmParams.Flags.Gpu.RenderTarget = 1;
+ gmmParams.Flags.Gpu.MMC = 1;
+ gmmParams.Flags.Gpu.CCS = 1;
+ gmmParams.Flags.Gpu.IndirectClearColor = 1;
+ gmmParams.Flags.Info.MediaCompressed = 1;
+ gmmParams.Flags.Info.NotLockable = 1;
+ gmmParams.Flags.Info.Cacheable = 1;
+ gmmParams.BaseWidth64 = 0xB2;
+ gmmParams.BaseHeight = 0x92;
+ gmmParams.Depth = 0x1;
+ SetTileFlag(gmmParams, TEST_TILEY); // TileYF only
+
+ GMM_RESOURCE_FORMAT Format[4] = {GMM_FORMAT_NV12, GMM_FORMAT_NV21, GMM_FORMAT_P010, GMM_FORMAT_P016};
+ for(auto fmt : Format)
+ {
+ gmmParams.Format = fmt; // 8bpp (NV12, NV21), 16bpp (P016, P010)
+
+ TEST_BPP Ybpp, UVbpp;
+ //Yf/Ys could be accessed on CPU/app where UV plane bpp is double
+ switch(pGmmULTClientContext->GetPlatformInfo().FormatTable[fmt].Element.BitsPer)
+ {
+ case 8:
+ Ybpp = TEST_BPP_8;
+ UVbpp = TEST_BPP_16;
+ break;
+ case 16:
+ Ybpp = TEST_BPP_16;
+ UVbpp = TEST_BPP_32;
+ break;
+ default:
+ return;
+ }
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1;
+
+ GMM_TILE_MODE TileMode = LEGACY_TILE_Y;
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ //Redescribed Pitch isn't modified unless Y, UV pitch differ
+ //But, original Pitch is padded to have even Tile, hence use Ybpp ExpectedPitch
+ //to verify Pitch, but redescribed size
+ uint32_t ExpectedPitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64 * (int)pow(2.0, Ybpp), TileSize[0] * 4);
+ uint32_t RedescribedPitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64 / 2 * (int)pow(2.0, UVbpp), TileSize[0] * 4);
+
+ //ExpectedPitch = GMM_ULT_ALIGN(ExpectedPitch, 2 * TileSize[Ybpp][0]); //pad to even tile
+ if(ExpectedPitch != RedescribedPitch)
+ {
+ ExpectedPitch = RedescribedPitch;
+ }
+
+ VerifyResourcePitch<true>(ResourceInfo, ExpectedPitch);
+ VerifyResourcePitchInTiles<true>(ResourceInfo, ExpectedPitch / TileSize[0]);
+
+ int YSizeInTiles = (GMM_ULT_ALIGN(gmmParams.BaseHeight, 4 * TileSize[1]) / TileSize[1]) * //Default 64K-alignment for Y/UV base (AuxT 64K)
+ RedescribedPitch / TileSize[0];
+ int UVSizeInTiles = (GMM_ULT_ALIGN(gmmParams.BaseHeight / 2, 4 * TileSize[1]) / TileSize[1]) * //Default 64K-alignment for Y/UV base (AuxT 64K)
+ RedescribedPitch / TileSize[0];
+
+ VerifyResourceSize<true>(ResourceInfo, GMM_KBYTE(4) * (YSizeInTiles + UVSizeInTiles)); //when main-surf planes are tile-aligned, make it verify-true
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign);
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // N/A for non-mipped surface
+
+ //test main surface base alignment is 64KB
+ //For Yf test main surface pitch is 4-tileY aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % 4);
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_UV_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+ EXPECT_EQ(GMM_KBYTE(4) * 2, ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS)); // Y and UV Aux are on separate tiles
+
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+}
+
+/// @brief ULT for Planar Ys Compressed resource
+TEST_F(CTestGen12Resource, TestPlanarYsCompressedResource)
+{
+ const TEST_TILE_TYPE TileTypeSupported = {TEST_TILEYS};
+
+ const uint32_t TileSize[TEST_BPP_MAX][2] = {
+ {256, 256}, {512, 128}, {512, 128}, {1024, 64}, {1024, 64}}; // TileYS
+
+ GMM_RESCREATE_PARAMS gmmParams = {};
+ gmmParams.Type = RESOURCE_2D;
+ gmmParams.NoGfxMemory = 1;
+ gmmParams.Flags.Gpu.Texture = 1;
+ gmmParams.Flags.Gpu.MMC = 1;
+ //gmmParams.Flags.Gpu.CCS = 1;
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1;
+ gmmParams.Flags.Info.MediaCompressed = 1;
+ gmmParams.BaseWidth64 = 0x100;
+ gmmParams.BaseHeight = 0x50;
+ gmmParams.Depth = 0x1;
+ SetTileFlag(gmmParams, TEST_TILEYS); // TileYS only
+
+ GMM_RESOURCE_FORMAT Format[4] = {GMM_FORMAT_NV12, GMM_FORMAT_NV21, GMM_FORMAT_P010, GMM_FORMAT_P016};
+ for(auto fmt : Format)
+ {
+ gmmParams.Format = fmt; // 8bpp(NV12) , P016 (16bpp), P010 (16bpp), NV21(8bpp)
+
+ TEST_BPP Ybpp, UVbpp;
+ //Yf/Ys could be accessed on CPU/app where UV plane bpp is double
+ switch(pGmmULTClientContext->GetPlatformInfo().FormatTable[gmmParams.Format].Element.BitsPer)
+ {
+ case 8:
+ Ybpp = TEST_BPP_8;
+ UVbpp = TEST_BPP_16;
+ break;
+ case 16:
+ Ybpp = TEST_BPP_16;
+ UVbpp = TEST_BPP_32;
+ break;
+ default:
+ return;
+ }
+
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1;
+ GMM_TILE_MODE TileMode = DEFINE_TILE(YS_2D, Ybpp);
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ //Redescribed Pitch isn't modified unless Y, UV pitch differ
+ //But, original Pitch is padded to have even Tile, hence use Ybpp ExpectedPitch
+ //to verify Pitch, but redescribed pitch to verify size
+ uint32_t ExpectedPitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64 * (int)pow(2.0, Ybpp), TileSize[Ybpp][0]);
+ uint32_t RedescribedPitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64 / 2 * (int)pow(2.0, UVbpp), TileSize[UVbpp][0]);
+
+ if(ExpectedPitch != RedescribedPitch)
+ {
+ ExpectedPitch = RedescribedPitch;
+ }
+ else
+ {
+ //ExpectedPitch = GMM_ULT_ALIGN(ExpectedPitch, 2 * TileSize[Ybpp][0]); //pad to even tile
+ //ExpectedPitch = GMM_ULT_ALIGN(ExpectedPitch, (2 * TileSize[UVbpp][0]/ (int)pow(2.0, UVbpp))); //pad to even tile
+ }
+
+ VerifyResourcePitch<true>(ResourceInfo, ExpectedPitch);
+ VerifyResourcePitchInTiles<true>(ResourceInfo, ExpectedPitch / TileSize[Ybpp][0]);
+
+ int YSizeInTiles = (GMM_ULT_ALIGN(gmmParams.BaseHeight, TileSize[Ybpp][1]) / TileSize[Ybpp][1]) *
+ RedescribedPitch / TileSize[Ybpp][0];
+ int UVSizeInTiles = (GMM_ULT_ALIGN(gmmParams.BaseHeight / 2, TileSize[UVbpp][1]) / TileSize[UVbpp][1]) *
+ RedescribedPitch / TileSize[UVbpp][0];
+ VerifyResourceSize<true>(ResourceInfo, GMM_KBYTE(64) * (YSizeInTiles + UVSizeInTiles));
+ VerifyResourceHAlign<true>(ResourceInfo, TileSize[UVbpp][0] / pow(2.0, UVbpp)); // For Yf/Ys planar redescription causes UV width, Y height alignment
+ VerifyResourceVAlign<true>(ResourceInfo, TileSize[Ybpp][1]);
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // N/A for non-mipped surface
+
+ //test main surface base alignment is 64KB
+ //For Yf test main surface pitch is 4-tileYF aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ //EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % 4); // Check on YF only
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_UV_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+
+ EXPECT_EQ(GMM_KBYTE(4) * 2, ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS)); // Y and UV Aux are on separate tiles
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+}
+
+/// @brief ULT for 2D arrayed compressed Resource
+TEST_F(CTestGen12Resource, TestArrayedCompressedResource)
+{
+ //printf("%s\n", __FUNCTION__);
+ //Test for 3D array
+}
+
+/// @brief ULT for mip-mapped compressed Resource
+TEST_F(CTestGen12Resource, TestMipMapCompressedResource)
+{
+ //printf("%s\n", __FUNCTION__);
+}
+
+/// @brief ULT for cube Compressed Resource
+TEST_F(CTestGen12Resource, TestCubeCompressedResource)
+{
+ //Tests 2D array
+ const uint32_t HAlign[5][TEST_BPP_MAX] = {{0}, {0}, {0}, {256, 256, 128, 128, 64}, {64, 64, 32, 32, 16}};
+ const uint32_t VAlign[5][TEST_BPP_MAX] = {{0}, {0}, {0}, {256, 128, 128, 64, 64}, {64, 32, 32, 16, 16}};
+
+ const TEST_TILE_TYPE TileTypeSupported[2] = {TEST_TILEYS, TEST_TILEYF};
+
+ const uint32_t TileSize[5][TEST_BPP_MAX][2] = {
+ {0},
+ {0},
+ {0},
+ {{256, 256}, {512, 128}, {512, 128}, {1024, 64}, {1024, 64}}, // TileYS
+ {{64, 64}, {128, 32}, {128, 32}, {256, 16}, {256, 16}}}; //TileYf
+
+ GMM_RESCREATE_PARAMS gmmParams = {};
+ gmmParams.Type = RESOURCE_CUBE;
+ gmmParams.NoGfxMemory = 1;
+ gmmParams.Flags.Gpu.Texture = 1;
+ gmmParams.Flags.Info.RenderCompressed = 1;
+
+ // Allocate 1x1 surface so that it occupies 1 Tile in X dimension
+ for(auto Tiling : TileTypeSupported)
+ {
+ gmmParams.Flags.Info.TiledY = 1;
+ gmmParams.Flags.Info.TiledYf = (Tiling == TEST_TILEYF);
+ gmmParams.Flags.Info.TiledYs = (Tiling == TEST_TILEYS);
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1;
+ gmmParams.Flags.Gpu.CCS = 1;
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = (Tiling == TEST_TILEYF) ? DEFINE_TILE(YF_2D, bpp) : DEFINE_TILE(YS_2D, bpp);
+
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = 0x1;
+ gmmParams.BaseHeight = 0x1;
+ gmmParams.Depth = 0x1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign[Tiling][i]);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign[Tiling][i]);
+
+ uint32_t ExpectedPitch = 4 * TileSize[TEST_TILEYF][i][0];
+ VerifyResourcePitch<true>(ResourceInfo, ExpectedPitch); // As wide as 4 tile-YF
+ VerifyResourcePitchInTiles<true>(ResourceInfo, (Tiling == TEST_TILEYF) ? 4 : 1); // 1 tile wide
+
+ uint32_t ExpectedQPitch = VAlign[Tiling][i];
+ VerifyResourceQPitch<true>(ResourceInfo, ExpectedQPitch); // Each face should be VAlign rows apart within a tile
+
+ VerifyResourceSize<true>(ResourceInfo, // PitchInBytes * Rows where Rows = __GMM_MAX_CUBE_FACE x QPitch, then aligned to tile boundary
+ ExpectedPitch *
+ __GMM_MAX_CUBE_FACE * ExpectedQPitch);
+
+ //test main surface base alignment is 64KB
+ //For Yf test main surface pitch is 4-tileYF aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % ((Tiling == TEST_TILEYF) ? 4 : 1));
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+
+ for(uint32_t CubeFaceIndex = 0; CubeFaceIndex < __GMM_MAX_CUBE_FACE; CubeFaceIndex++)
+ {
+ GMM_REQ_OFFSET_INFO OffsetInfo = {};
+ OffsetInfo.ReqRender = 1;
+ OffsetInfo.CubeFace = static_cast<GMM_CUBE_FACE_ENUM>(CubeFaceIndex);
+ ResourceInfo->GetOffset(OffsetInfo);
+
+ EXPECT_EQ((CubeFaceIndex * ExpectedQPitch) * ExpectedPitch,
+ OffsetInfo.Render.Offset64); // Render offset is tile's base address on which cube face begins.
+ EXPECT_EQ(0, OffsetInfo.Render.XOffset); // X Offset should be 0 as cube face starts on tile boundary
+ EXPECT_EQ(0, OffsetInfo.Render.YOffset); // Y Offset should be 0 as cube face starts on tile boundary
+ EXPECT_EQ(0, OffsetInfo.Render.ZOffset); // Z offset N/A should be 0
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+ // Allocate 2 tiles in X dimension.
+ // Width and Height must be equal
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1;
+ gmmParams.Flags.Gpu.CCS = 1;
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = (Tiling == TEST_TILEYF) ? DEFINE_TILE(YF_2D, bpp) : DEFINE_TILE(YS_2D, bpp);
+
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = (TileSize[Tiling][i][0] / GetBppValue(bpp)) + 1; // 1 pixel larger than 1 tile width
+ gmmParams.BaseHeight = gmmParams.BaseWidth64; // Heigth must be equal to width.
+ gmmParams.Depth = 0x1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign[Tiling][i]);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign[Tiling][i]);
+
+ uint32_t ExpectedPitch = TileSize[Tiling][i][0] * 2 * ((Tiling == TEST_TILEYF) ? 2 : 1); // As wide as 2 tile, padded to 4 tile-pitch
+ VerifyResourcePitch<true>(ResourceInfo, ExpectedPitch);
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 2 * ((Tiling == TEST_TILEYF) ? 2 : 1)); // 2 tile wide
+
+ uint32_t ExpectedQPitch = GMM_ULT_ALIGN(gmmParams.BaseHeight, VAlign[Tiling][i]);
+ VerifyResourceQPitch<true>(ResourceInfo, ExpectedQPitch); // Each face should be Valigned-BaseHeight rows apart
+
+ VerifyResourceSize<true>(ResourceInfo, // PitchInBytes * Rows where Rows = __GMM_MAX_CUBE_FACE x QPitch, then aligned to tile boundary
+ ExpectedPitch *
+ __GMM_MAX_CUBE_FACE * ExpectedQPitch);
+
+ //test main surface base alignment is 64KB
+ //For Yf test main surface pitch is 4-tileYF aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % ((Tiling == TEST_TILEYF) ? 4 : 1)); // Check on YF only
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+
+ for(uint32_t CubeFaceIndex = 0; CubeFaceIndex < __GMM_MAX_CUBE_FACE; CubeFaceIndex++)
+ {
+ GMM_REQ_OFFSET_INFO OffsetInfo = {};
+ OffsetInfo.ReqRender = 1;
+ OffsetInfo.CubeFace = static_cast<GMM_CUBE_FACE_ENUM>(CubeFaceIndex);
+ ResourceInfo->GetOffset(OffsetInfo);
+ EXPECT_EQ((CubeFaceIndex * ExpectedQPitch) * ExpectedPitch,
+ OffsetInfo.Render.Offset64); // Render offset is tile's base address on which cube face begins.
+ EXPECT_EQ(0, OffsetInfo.Render.XOffset); // X Offset should be 0 as cube face starts on tile boundary
+ EXPECT_EQ(0, OffsetInfo.Render.YOffset); // Y Offset should be 0 as cube face starts on tile boundary
+ EXPECT_EQ(0, OffsetInfo.Render.ZOffset); // Z offset N/A should be 0
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+ }
+}
+
+/// @brief ULT for 3D TileYs Compressed Resource
+TEST_F(CTestGen12Resource, Test3DTileYsCompressedResource)
+{
+ if(!const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ return;
+ }
+ // Horizontal/Vertical pixel alignment
+ const uint32_t HAlign[TEST_BPP_MAX] = {64, 32, 32, 32, 16};
+ const uint32_t VAlign[TEST_BPP_MAX] = {32, 32, 32, 16, 16};
+
+ const uint32_t TileSize[TEST_BPP_MAX][3] = {{64, 32, 32},
+ {64, 32, 32},
+ {128, 32, 16},
+ {256, 16, 16},
+ {256, 16, 16}};
+
+ GMM_RESCREATE_PARAMS gmmParams = {};
+ gmmParams.Type = RESOURCE_3D;
+ gmmParams.NoGfxMemory = 1;
+ gmmParams.Flags.Info.TiledY = 1;
+ gmmParams.Flags.Info.TiledYs = 1;
+ gmmParams.Flags.Gpu.Texture = 1;
+ gmmParams.Flags.Info.RenderCompressed = 1; // Turn on .MC or .RC flag - mandatory to tell compression-type, for Yf its also used to pad surf
+ // to 4x1 tile (reqd by HW for perf reasons)
+
+ // Allocate 1x1x1 surface
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ // If unifiedAuxSurf reqd (mandatory for displayable or cross-device shared), turn on .CCS/.MMC and .UnifiedAuxSurface too
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = DEFINE_TILE(YS_3D, bpp);
+
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = 0x1;
+ gmmParams.BaseHeight = 0x1;
+ gmmParams.Depth = 0x1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign[i]);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign[i]);
+ VerifyResourcePitch<true>(ResourceInfo, TileSize[i][0]); // As wide as 1 tile
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 1); // 1 tile wide
+ VerifyResourceSize<false>(ResourceInfo, GMM_KBYTE(64)); // 1 tile big
+
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // Not tested
+
+ //test main surface base alignment is 64KB
+ //For Yf test main surface pitch is 4-tileYF aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+ // Allocate 2 tiles in X dimension
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ // If unifiedAuxSurf reqd (mandatory for displayable or cross-device shared), turn on .CCS/.MMC and .UnifiedAuxSurface too
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = DEFINE_TILE(YS_3D, bpp);
+
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = (TileSize[i][0] / GetBppValue(bpp)) + 1; // 1 pixel larger than 1 tile width
+ gmmParams.BaseHeight = 0x1;
+ gmmParams.Depth = 0x1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign[i]);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign[i]);
+ VerifyResourcePitch<true>(ResourceInfo, TileSize[i][0] * 2); // As wide as 2 tile
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 2); // 2 tile wide
+ VerifyResourceSize<false>(ResourceInfo, GMM_KBYTE(64) * 2); // 2 tile big
+
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // Not tested
+ //test main surface base alignment is 64KB
+ //For Yf test main surface pitch is 4-tileYF aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+ // Allocate 2 tiles in X/Y dimension
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ // If unifiedAuxSurf reqd (mandatory for displayable or cross-device shared), turn on .CCS/.MMC and .UnifiedAuxSurface too
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = DEFINE_TILE(YS_3D, bpp);
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = (TileSize[i][0] / GetBppValue(bpp)) + 1; // 1 pixel larger than 1 tile width
+ gmmParams.BaseHeight = TileSize[i][1] + 1; // 1 row larger than 1 tile height
+ gmmParams.Depth = 0x1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign[i]);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign[i]);
+ VerifyResourcePitch<true>(ResourceInfo, TileSize[i][0] * 2); // As wide as 2 tile
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 2); // 2 tile wide
+ VerifyResourceSize<false>(ResourceInfo, GMM_KBYTE(64) * 4); // 4 tile big
+
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // Not tested
+
+ //test main surface base alignment is 64KB
+ //For Yf test main surface pitch is 4-tileYF aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+ // Allocate 2 tiles in X/Y/Z dimension
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ // If unifiedAuxSurf reqd (mandatory for displayable or cross-device shared), turn on .CCS/.MMC and .UnifiedAuxSurface too
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = DEFINE_TILE(YS_3D, bpp);
+
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = (TileSize[i][0] / GetBppValue(bpp)) + 1; // 1 pixel larger than 1 tile width
+ gmmParams.BaseHeight = TileSize[i][1] + 1; // 1 row larger than 1 tile height
+ gmmParams.Depth = TileSize[i][2] + 1; // 1 plane larger than 1 tile depth
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign[i]);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign[i]);
+ VerifyResourcePitch<true>(ResourceInfo, TileSize[i][0] * 2); // As wide as 2 tile
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 2); // 2 tile wide
+ VerifyResourceSize<true>(ResourceInfo, GMM_KBYTE(64) * 8); // 8 tile big
+
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // Not tested
+ //test main surface base alignment is 64KB
+ //For Yf test main surface pitch is 4-tileYF aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+}
+
+/// @brief ULT for 3D TileYf Compressed Resource
+TEST_F(CTestGen12Resource, Test3DTileYfCompressedResource)
+{
+ if(!const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ return;
+ }
+ // Horizontal/Verticle pixel alignment
+ const uint32_t HAlign[TEST_BPP_MAX] = {16, 8, 8, 8, 4};
+ const uint32_t VAlign[TEST_BPP_MAX] = {16, 16, 16, 8, 8};
+ const uint32_t TileSize[TEST_BPP_MAX][3] = {{16, 16, 16},
+ {16, 16, 16},
+ {32, 16, 8},
+ {64, 8, 8},
+ {64, 8, 8}};
+
+ GMM_RESCREATE_PARAMS gmmParams = {};
+ gmmParams.Type = RESOURCE_3D;
+ gmmParams.NoGfxMemory = 1;
+ gmmParams.Flags.Info.TiledY = 1;
+ gmmParams.Flags.Info.TiledYf = 1;
+ gmmParams.Flags.Gpu.Texture = 1;
+ gmmParams.Flags.Info.RenderCompressed = 1; // Turn on .MC or .RC flag - mandatory to tell compression-type, for Yf its also used to pad surf
+ // to 4x1 tile (reqd by HW for perf reasons)
+
+ // Allocate 1x1x1 surface
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ // If unifiedAuxSurf reqd (mandatory for displayable or cross-device shared), turn on .CCS/.MMC and .UnifiedAuxSurface too
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+ //gmmParams.Flags.Gpu.MMC = 0; //Turn on to check unifiedaux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = DEFINE_TILE(YF_3D, bpp);
+
+ gmmParams.Format = SetResourceFormat(bpp);
+ //gmmParams.BaseWidth64 = 0x30;
+ //gmmParams.BaseHeight = 0x30;
+ //gmmParams.Depth = 0x20;
+ gmmParams.BaseWidth64 = 1;
+ gmmParams.BaseHeight = 1;
+ gmmParams.Depth = 1;
+ const uint32_t PitchAlignment = 32;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign[i]);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign[i]);
+ VerifyResourcePitch<true>(ResourceInfo, GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[i][0] * 4));
+ VerifyResourcePitchInTiles<true>(ResourceInfo, GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[i][0] * 4) / TileSize[i][0]);
+ VerifyResourceSize<true>(ResourceInfo, (GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[i][0] * 4) / TileSize[i][0]) *
+ (GMM_ULT_ALIGN(gmmParams.BaseHeight, TileSize[i][1]) / TileSize[i][1]) *
+ (GMM_ULT_ALIGN(gmmParams.Depth, TileSize[i][2]) / TileSize[i][2]) * GMM_KBYTE(4));
+ VerifyResourceQPitch<true>(ResourceInfo, (GMM_ULT_ALIGN(gmmParams.BaseHeight, TileSize[i][1])));
+
+ //test main surface base alignment is 64KB
+ //For Yf test main surface pitch is 4-tileYF aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % 4); // Check on YF only
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+ ASSERT_LE(GMM_ULT_ALIGN(ResourceInfo->GetSizeMainSurface() / (GMM_KBYTE(16) / GMM_BYTES(64)), GMM_KBYTE(4)),
+ ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+ // Allocate 2 tiles in X dimension
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ // If unifiedAuxSurf reqd (mandatory for displayable or cross-device shared), turn on .CCS/.MMC and .UnifiedAuxSurface too
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = DEFINE_TILE(YF_3D, bpp);
+
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = (TileSize[i][0] / GetBppValue(bpp)) + 1;
+ gmmParams.BaseHeight = 0x1;
+ gmmParams.Depth = 0x1;
+ const uint32_t PitchAlignment = 32;
+
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign[i]);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign[i]);
+ VerifyResourcePitch<true>(ResourceInfo, GMM_ULT_ALIGN(TileSize[i][0] * 4, PitchAlignment));
+ VerifyResourcePitchInTiles<true>(ResourceInfo, GMM_ULT_ALIGN(TileSize[i][0] * 4, PitchAlignment) / TileSize[i][0]);
+ VerifyResourceSize<true>(ResourceInfo, GMM_ULT_ALIGN(TileSize[i][0] * 4, PitchAlignment) / TileSize[i][0] * GMM_KBYTE(4));
+ VerifyResourceQPitch<true>(ResourceInfo, TileSize[i][1]);
+ //test main surface base alignment is 64KB
+ //For Yf test main surface pitch is 4-tileYF aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % 4); // Check on YF only
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+ ASSERT_LE(GMM_ULT_ALIGN(ResourceInfo->GetSizeMainSurface() / (GMM_KBYTE(16) / GMM_BYTES(64)), GMM_KBYTE(4)),
+ ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+ // Allocate 2 tiles in X/Y dimension
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ // If unifiedAuxSurf reqd (mandatory for displayable or cross-device shared), turn on .CCS/.MMC and .UnifiedAuxSurface too
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = DEFINE_TILE(YF_3D, bpp);
+
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = (TileSize[i][0] / GetBppValue(bpp)) + 1;
+ gmmParams.BaseHeight = TileSize[i][1] + 1;
+ gmmParams.Depth = 0x1;
+ const uint32_t PitchAlignment = 32;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign[i]);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign[i]);
+ VerifyResourcePitch<true>(ResourceInfo, GMM_ULT_ALIGN(TileSize[i][0] * 4, PitchAlignment));
+ VerifyResourcePitchInTiles<true>(ResourceInfo, GMM_ULT_ALIGN(TileSize[i][0] * 4, PitchAlignment) / TileSize[i][0]);
+ VerifyResourceSize<true>(ResourceInfo, GMM_ULT_ALIGN(TileSize[i][0] * 4, PitchAlignment) / TileSize[i][0] * 2 * GMM_KBYTE(4));
+ VerifyResourceQPitch<true>(ResourceInfo, TileSize[i][1] * 2);
+ //test main surface base alignment is 64KB
+ //For Yf test main surface pitch is 4-tileYF aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % 4); // Check on YF only
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+ ASSERT_LE(GMM_ULT_ALIGN(ResourceInfo->GetSizeMainSurface() / (GMM_KBYTE(16) / GMM_BYTES(64)), GMM_KBYTE(4)),
+ ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+ // Allocate 2 tiles in X/Y/Z dimension
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ // If unifiedAuxSurf reqd (mandatory for displayable or cross-device shared), turn on .CCS/.MMC and .UnifiedAuxSurface too
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = DEFINE_TILE(YF_3D, bpp);
+
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = (TileSize[i][0] / GetBppValue(bpp)) + 1;
+ gmmParams.BaseHeight = TileSize[i][1] + 1;
+ gmmParams.Depth = TileSize[i][2] + 1;
+ const uint32_t PitchAlignment = 32;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign[i]);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign[i]);
+ VerifyResourcePitch<true>(ResourceInfo, GMM_ULT_ALIGN(TileSize[i][0] * 4, PitchAlignment));
+ VerifyResourcePitchInTiles<true>(ResourceInfo, GMM_ULT_ALIGN(TileSize[i][0] * 4, PitchAlignment) / TileSize[i][0]);
+ VerifyResourceSize<true>(ResourceInfo, GMM_ULT_ALIGN(TileSize[i][0] * 4, PitchAlignment) / TileSize[i][0] * 2 * 2 * GMM_KBYTE(4));
+ VerifyResourceQPitch<true>(ResourceInfo, TileSize[i][1] * 2);
+ //test main surface base alignment is 64KB
+ //For Yf test main surface pitch is 4-tileYF aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % 4); // Check on YF only
+
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_GE(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), ResourceInfo->GetSizeMainSurface() >> 8);
+ }
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+ ASSERT_LE(GMM_ULT_ALIGN(ResourceInfo->GetSizeMainSurface() / (GMM_KBYTE(16) / GMM_BYTES(64)), GMM_KBYTE(4)),
+ ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+}
+
+/// @brief ULT for 3D TileY Compressed Resource
+TEST_F(CTestGen12Resource, Test3DTileYCompressedResource)
+{
+ if(!const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ return;
+ }
+ // Horizontal/Verticle pixel alignment
+ const uint32_t HAlign = {16};
+ const uint32_t VAlign = {4};
+ const uint32_t TileSize[3] = {128, 32, 1};
+
+ GMM_RESCREATE_PARAMS gmmParams = {};
+ gmmParams.Type = RESOURCE_3D;
+ gmmParams.NoGfxMemory = 1;
+ gmmParams.Flags.Info.TiledY = 1;
+ gmmParams.Flags.Gpu.Texture = 1;
+ gmmParams.Flags.Info.RenderCompressed = 1; // Turn on .MC or .RC flag - mandatory to tell compression-type, for Yf its also used to pad surf
+ // to 4x1 tile (reqd by HW for perf reasons)
+
+ // Allocate 1x1x1 surface
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ // If unifiedAuxSurf reqd (mandatory for displayable or cross-device shared), turn on .CCS/.MMC and .UnifiedAuxSurface too
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = LEGACY_TILE_Y;
+
+ gmmParams.Format = SetResourceFormat(bpp);
+ //gmmParams.BaseWidth64 = 0x30;
+ //gmmParams.BaseHeight = 0x30;
+ //gmmParams.Depth = 0x20;
+ gmmParams.BaseWidth64 = 1;
+ gmmParams.BaseHeight = 1;
+ gmmParams.Depth = 1;
+ const uint32_t PitchAlignment = 32;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign);
+ VerifyResourcePitch<true>(ResourceInfo, GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[0] * 4));
+ VerifyResourcePitchInTiles<true>(ResourceInfo, GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[0] * 4) / TileSize[0]);
+ VerifyResourceSize<true>(ResourceInfo, (GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[0] * 4) / TileSize[0]) *
+ (GMM_ULT_ALIGN(gmmParams.BaseHeight, TileSize[1]) / TileSize[1]) *
+ (GMM_ULT_ALIGN(gmmParams.Depth, TileSize[2]) / TileSize[2]) * GMM_KBYTE(4));
+ VerifyResourceQPitch<true>(ResourceInfo, (GMM_ULT_ALIGN(gmmParams.BaseHeight, TileSize[1])));
+
+ //test main surface base alignment is 64KB
+ //For Yf test main surface pitch is 4-tileYF aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % 4); // Check on YF only
+
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ ASSERT_LE(GMM_ULT_ALIGN(ResourceInfo->GetSizeMainSurface() / (GMM_KBYTE(16) / GMM_BYTES(64)), GMM_KBYTE(4)),
+ ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+ // Allocate 2 tiles in X dimension
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ // If unifiedAuxSurf reqd (mandatory for displayable or cross-device shared), turn on .CCS/.MMC and .UnifiedAuxSurface too
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = LEGACY_TILE_Y;
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = (TileSize[0] / GetBppValue(bpp)) + 1;
+ gmmParams.BaseHeight = 0x1;
+ gmmParams.Depth = 0x1;
+ const uint32_t PitchAlignment = 32;
+
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign);
+ VerifyResourcePitch<true>(ResourceInfo, GMM_ULT_ALIGN(TileSize[0] * 4, PitchAlignment));
+ VerifyResourcePitchInTiles<true>(ResourceInfo, GMM_ULT_ALIGN(TileSize[0] * 4, PitchAlignment) / TileSize[0]);
+ VerifyResourceSize<true>(ResourceInfo, GMM_ULT_ALIGN(TileSize[0] * 4, PitchAlignment) / TileSize[0] * GMM_KBYTE(4));
+ VerifyResourceQPitch<true>(ResourceInfo, TileSize[1]);
+ //test main surface base alignment is 64KB
+ //For Yf test main surface pitch is 4-tileYF aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % 4); // Check on YF only
+
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+ ASSERT_LE(GMM_ULT_ALIGN(ResourceInfo->GetSizeMainSurface() / (GMM_KBYTE(16) / GMM_BYTES(64)), GMM_KBYTE(4)),
+ ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+ // Allocate 2 tiles in X/Y dimension
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ // If unifiedAuxSurf reqd (mandatory for displayable or cross-device shared), turn on .CCS/.MMC and .UnifiedAuxSurface too
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = LEGACY_TILE_Y;
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = (TileSize[0] / GetBppValue(bpp)) + 1;
+ gmmParams.BaseHeight = TileSize[1] + 1;
+ gmmParams.Depth = 0x1;
+ const uint32_t PitchAlignment = 32;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign);
+ VerifyResourcePitch<true>(ResourceInfo, GMM_ULT_ALIGN(TileSize[0] * 4, PitchAlignment));
+ VerifyResourcePitchInTiles<true>(ResourceInfo, GMM_ULT_ALIGN(TileSize[0] * 4, PitchAlignment) / TileSize[0]);
+ VerifyResourceSize<true>(ResourceInfo, GMM_ULT_ALIGN(TileSize[0] * 4, PitchAlignment) / TileSize[0] * 2 * GMM_KBYTE(4));
+ VerifyResourceQPitch<true>(ResourceInfo, TileSize[1] * 2);
+ //test main surface base alignment is 64KB
+ //For Yf test main surface pitch is 4-tileYF aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % 4); // Check on YF only
+
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+ ASSERT_LE(GMM_ULT_ALIGN(ResourceInfo->GetSizeMainSurface() / (GMM_KBYTE(16) / GMM_BYTES(64)), GMM_KBYTE(4)),
+ ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+ // Allocate 2 tiles in X/Y/Z dimension
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ // If unifiedAuxSurf reqd (mandatory for displayable or cross-device shared), turn on .CCS/.MMC and .UnifiedAuxSurface too
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = LEGACY_TILE_Y;
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = (TileSize[0] / GetBppValue(bpp)) + 1;
+ gmmParams.BaseHeight = TileSize[1] + 1;
+ gmmParams.Depth = TileSize[2] + 1;
+ const uint32_t PitchAlignment = 32;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign);
+ VerifyResourcePitch<true>(ResourceInfo, GMM_ULT_ALIGN(TileSize[0] * 4, PitchAlignment));
+ VerifyResourcePitchInTiles<true>(ResourceInfo, GMM_ULT_ALIGN(TileSize[0] * 4, PitchAlignment) / TileSize[0]);
+ VerifyResourceSize<true>(ResourceInfo, GMM_ULT_ALIGN(TileSize[0] * 4, PitchAlignment) / TileSize[0] * 2 * 2 * GMM_KBYTE(4));
+ VerifyResourceQPitch<true>(ResourceInfo, TileSize[1] * 2);
+ //test main surface base alignment is 64KB
+ //For Yf test main surface pitch is 4-tileYF aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % 4); // Check on YF only
+
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+ ASSERT_LE(GMM_ULT_ALIGN(ResourceInfo->GetSizeMainSurface() / (GMM_KBYTE(16) / GMM_BYTES(64)), GMM_KBYTE(4)),
+ ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+}
+
+/// @brief ULT for 2D Yf mipped compressed resource
+TEST_F(CTestGen12Resource, Test2DTileYfMippedCompressedResource)
+{
+ if(!const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ return;
+ }
+ const uint32_t HAlign[TEST_BPP_MAX] = {64, 64, 32, 32, 16};
+ const uint32_t VAlign[TEST_BPP_MAX] = {64, 32, 32, 16, 16};
+
+ const uint32_t TileSize[TEST_BPP_MAX][2] = {{64, 64},
+ {128, 32},
+ {128, 32},
+ {256, 16},
+ {256, 16}};
+
+ const uint32_t MtsWidth[TEST_BPP_MAX] = {32, 32, 16, 16, 8};
+ const uint32_t MtsHeight[TEST_BPP_MAX] = {64, 32, 32, 16, 16};
+
+ GMM_RESCREATE_PARAMS gmmParams = {};
+ gmmParams.Type = RESOURCE_2D;
+ gmmParams.NoGfxMemory = 1;
+ gmmParams.Flags.Info.TiledY = 1;
+ gmmParams.Flags.Info.TiledYf = 1;
+ gmmParams.Flags.Gpu.Texture = 1;
+ gmmParams.Flags.Info.RenderCompressed = 1; // Turn on .MC or .RC flag - mandatory to tell compression-type, for Yf its also used to pad surf
+ // to 4x1 tile (reqd by HW for perf reasons)
+ gmmParams.MaxLod = 4;
+ gmmParams.ArraySize = 4;
+
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ uint32_t AlignedWidth = 0;
+ uint32_t AlignedHeight = 0;
+ uint32_t ExpectedPitch = 0;
+ uint32_t MipTailStartLod = 0;
+ // Valigned Mip Heights
+ uint32_t Mip0Height = 0;
+ uint32_t Mip1Height = 0;
+ uint32_t Mip2Height = 0;
+ uint32_t Mip3Height = 0;
+ uint32_t Mip4Height = 0;
+ uint32_t Mip5Height = 0;
+ uint32_t Mip2Higher = 0; // Sum of aligned heights of Mip2 and above
+ uint32_t MipTailHeight = 0;
+ // Haligned Mip Widths
+ uint32_t Mip0Width = 0;
+ uint32_t Mip1Width = 0;
+ uint32_t Mip2Width = 0;
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = 0x38;
+ gmmParams.BaseHeight = 0x38;
+
+ // If unifiedAuxSurf reqd (mandatory for displayable or cross-device shared), turn on .CCS/.MMC and .UnifiedAuxSurface too
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign[i]);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign[i]);
+
+ // find the miptail start level
+ {
+ uint32_t MipWidth = gmmParams.BaseWidth64;
+ uint32_t MipHeight = gmmParams.BaseHeight;
+ while(!(MipWidth <= MtsWidth[i] && MipHeight <= MtsHeight[i]))
+ {
+ MipTailStartLod++;
+ MipWidth = (uint32_t)(GMM_ULT_MAX(1, gmmParams.BaseWidth64 >> MipTailStartLod));
+ MipHeight = GMM_ULT_MAX(1, gmmParams.BaseHeight >> MipTailStartLod);
+ }
+ }
+
+ // Mip resource Aligned Width calculation
+ Mip0Width = GMM_ULT_ALIGN(gmmParams.BaseWidth64, HAlign[i]);
+ Mip0Height = GMM_ULT_ALIGN(gmmParams.BaseHeight, VAlign[i]);
+
+ if(MipTailStartLod == 1)
+ {
+ EXPECT_EQ(1, ResourceInfo->GetPackedMipTailStartLod());
+ // Block height...Mip0Height + Max(Mip1Height, Sum of Mip2Height..MipnHeight)
+ Mip1Height = GMM_ULT_ALIGN(gmmParams.BaseHeight >> 1, VAlign[i]);
+ AlignedWidth = Mip0Width;
+ }
+ if(MipTailStartLod == 2)
+ {
+ EXPECT_EQ(2, ResourceInfo->GetPackedMipTailStartLod());
+ // Block height...Mip0Height + Max(Mip1Height, Sum of Mip2Height..MipnHeight)
+ Mip1Height = GMM_ULT_ALIGN(gmmParams.BaseHeight >> 1, VAlign[i]);
+ Mip2Height = Mip2Higher = GMM_ULT_ALIGN(gmmParams.BaseHeight >> 2, VAlign[i]);
+
+ Mip1Width = GMM_ULT_ALIGN(gmmParams.BaseWidth64 >> 1, HAlign[i]);
+ Mip2Width = GMM_ULT_ALIGN(gmmParams.BaseWidth64 >> 2, HAlign[i]);
+ AlignedWidth = GMM_ULT_MAX(Mip0Width, Mip1Width + Mip2Width);
+ }
+ if(MipTailStartLod == 3)
+ {
+ EXPECT_EQ(3, ResourceInfo->GetPackedMipTailStartLod());
+ // Block height...Mip0Height + Max(Mip1Height, Sum of Mip2Height..MipnHeight)
+ Mip1Height = GMM_ULT_ALIGN(gmmParams.BaseHeight >> 1, VAlign[i]);
+ Mip2Height = GMM_ULT_ALIGN(gmmParams.BaseHeight >> 2, VAlign[i]);
+ // Miptail started lod
+ MipTailHeight = VAlign[i];
+ Mip2Higher = Mip2Height + Mip3Height + MipTailHeight;
+
+ Mip1Width = GMM_ULT_ALIGN(gmmParams.BaseWidth64 >> 1, HAlign[i]);
+ Mip2Width = GMM_ULT_ALIGN(gmmParams.BaseWidth64 >> 2, HAlign[i]);
+ AlignedWidth = GMM_ULT_MAX(Mip0Width, Mip1Width + Mip2Width);
+ }
+
+ uint32_t MaxHeight = GMM_ULT_MAX(Mip1Height, Mip2Higher);
+ AlignedHeight = Mip0Height + MaxHeight;
+ AlignedHeight = GMM_ULT_ALIGN(AlignedHeight, VAlign[i]);
+
+ ExpectedPitch = AlignedWidth * GetBppValue(bpp);
+ ExpectedPitch = GMM_ULT_ALIGN(ExpectedPitch, GMM_BYTES(32));
+ ExpectedPitch = GMM_ULT_ALIGN(ExpectedPitch, TileSize[i][0] * 4); //Only for displayables - 16K pitch align
+ VerifyResourcePitch<true>(ResourceInfo, ExpectedPitch);
+
+ VerifyResourcePitchInTiles<true>(ResourceInfo, static_cast<uint32_t>(ExpectedPitch / TileSize[i][0]));
+ VerifyResourceSize<true>(ResourceInfo, GMM_ULT_ALIGN(ExpectedPitch * AlignedHeight * gmmParams.ArraySize, PAGE_SIZE));
+ VerifyResourceQPitch<false>(ResourceInfo, AlignedHeight);
+ //test main surface base alignment is 64KB
+ //For Yf test main surface pitch is 4-tileYF aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % 4); // Check on YF only
+
+ //Aux-size enough to cover all
+ if(const_cast<SKU_FEATURE_TABLE &>(pGfxAdapterInfo->SkuTable).FtrLinearCCS)
+ {
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ }
+
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+ ASSERT_LE(GMM_ULT_ALIGN(ResourceInfo->GetSizeMainSurface() / (GMM_KBYTE(16) / GMM_BYTES(64)), GMM_KBYTE(4)),
+ ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+}
+
+/// @brief ULT for Linear Buffer Compressed Resource
+TEST_F(CTestGen12Resource, TestLinearCompressedResource)
+{
+ // Horizontal pixel alignment
+ const uint32_t MinPitch = 32;
+
+ GMM_RESCREATE_PARAMS gmmParams = {};
+ gmmParams.Type = RESOURCE_BUFFER;
+ gmmParams.NoGfxMemory = 1;
+ gmmParams.Flags.Info.Linear = 1;
+ gmmParams.Flags.Gpu.Texture = 1;
+ gmmParams.Flags.Info.RenderCompressed = 1;
+
+ // Allocate 1x1 surface
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = 0x1;
+ gmmParams.BaseHeight = 1;
+ gmmParams.Flags.Info.AllowVirtualPadding = (bpp != 8); //OCL uses 8bpp buffers. doc doesn't comment if Linear buffer compr allowed or not on bpp!=8.
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ uint32_t AlignedWidth = GMM_ULT_ALIGN(gmmParams.BaseWidth64, MinPitch);
+ uint32_t PitchInBytes = AlignedWidth * GetBppValue(bpp);
+ uint32_t AlignedSize = GMM_ULT_ALIGN(PitchInBytes, PAGE_SIZE);
+
+ VerifyResourceHAlign<false>(ResourceInfo, 0);
+ VerifyResourceVAlign<false>(ResourceInfo, 0); // N/A for buffer
+ VerifyResourcePitch<false>(ResourceInfo, 0); // N/A for buffer
+ VerifyResourcePitchInTiles<false>(ResourceInfo, 0); // N/A for linear
+ VerifyResourceSize<true>(ResourceInfo, AlignedSize);
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // N/A for non-arrayed
+
+ //test main surface base alignment is 64KB
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ ASSERT_LE(GMM_ULT_ALIGN(ResourceInfo->GetSizeMainSurface() / (GMM_KBYTE(16) / GMM_BYTES(64)), GMM_KBYTE(4)),
+ ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+
+ // Allocate more than 1 page
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = 0x1001;
+ gmmParams.BaseHeight = 1;
+ gmmParams.Flags.Info.AllowVirtualPadding = (bpp != 8); //OCL uses 8bpp buffers. document doesn't comment if Linear buffer compr allowed or not on bpp!=8.
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ uint32_t AlignedWidth = GMM_ULT_ALIGN(gmmParams.BaseWidth64, MinPitch);
+ uint32_t PitchInBytes = AlignedWidth * GetBppValue(bpp);
+ uint32_t AlignedSize = GMM_ULT_ALIGN(PitchInBytes, PAGE_SIZE);
+
+ VerifyResourceHAlign<false>(ResourceInfo, MinPitch);
+ VerifyResourceVAlign<false>(ResourceInfo, 0); // N/A for buffer
+ VerifyResourcePitch<false>(ResourceInfo, 0); // N/A for buffer
+ VerifyResourcePitchInTiles<false>(ResourceInfo, 0); // N/A for linear
+ VerifyResourceSize<true>(ResourceInfo, AlignedSize);
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // N/A for non-arrayed
+
+ //test main surface base alignment is 64KB
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ ASSERT_LE(GMM_ULT_ALIGN(ResourceInfo->GetSizeMainSurface() / (GMM_KBYTE(16) / GMM_BYTES(64)), GMM_KBYTE(4)),
+ ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+}
+///TODO Add MSAA/Depth Compressed Resource tests
+TEST_F(CTestGen12Resource, TestLosslessMSAACompressedResource)
+{
+}
+
+///TODO Add MSAA/Depth Compressed Resource tests
+TEST_F(CTestGen12Resource, DISABLED_TestDepthCompressedResource)
+{
+ const uint32_t HAlign = 8; //HiZ alignment (16x4 ie general alignment), [Depth 16bit: 8x8; ow 8x4]
+ uint32_t VAlign = 4; // 8; Need to debug why driver uses VAlign/2
+
+ //const uint32_t DepthTileSize[1][2] = { 64, 64 }; //Depth/Stencil buffer should be TileY/Ys/Yf only (16,24,32 bpp only) no 3D or MSAA
+ const uint32_t AllocTileSize[1][2] = {128, 32}; //HiZ is TileY
+
+ GMM_RESCREATE_PARAMS gmmParams = {};
+ gmmParams.NoGfxMemory = 1;
+ gmmParams.Flags.Info.TiledY = 1; //Not supported for Depth buffer, but HiZ output is TileY
+ gmmParams.Flags.Gpu.Depth = 1; //GPU Flags= Depth/SeparateStencil + HiZ
+ gmmParams.Flags.Gpu.HiZ = 1;
+ gmmParams.Flags.Gpu.IndirectClearColor = 1;
+ gmmParams.Flags.Gpu.CCS = 1;
+ gmmParams.Flags.Info.RenderCompressed = 1;
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1;
+
+ // Allocate 1x1 surface so that it occupies 1 Tile in X dimension
+ for(uint32_t j = TEST_BPP_8; j <= TEST_BPP_128; j++) //Depth bpp doesn't matter, Depth px dimensions decide HiZ size in HW
+ {
+ {
+ VAlign = (j == TEST_BPP_16) ? 8 : 4;
+ }
+ gmmParams.Format = SetResourceFormat(static_cast<TEST_BPP>(j)); //Only 16,24,32 supported; But driver creates the resource even for other bpps without failing
+ for(uint32_t i = RESOURCE_2D; i <= RESOURCE_CUBE; i++) //3D doesn't support HiZ - test driver returns proper?
+ {
+ gmmParams.Type = static_cast<GMM_RESOURCE_TYPE>(i);
+ gmmParams.BaseWidth64 = 0x1;
+ gmmParams.BaseHeight = 0x1; //0x24; //not 1 tile
+ //gmmParams.MaxLod = 6; --add expectedheight calc- mip0+max{mip1, sum{mip2,...n}}
+ gmmParams.Depth = 0x1;
+ if(i == RESOURCE_1D || i == RESOURCE_3D)
+ {
+ gmmParams.Flags.Gpu.HiZ = 0;
+ }
+
+ GMM_RESOURCE_INFO *ResourceInfo = NULL;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ //EXPECT_NE(NULL, ResourceInfo);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign);
+ uint32_t ExpectedPitch = AllocTileSize[0][0] * 4;
+ VerifyResourcePitch<true>(ResourceInfo, ExpectedPitch);
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 4); // 1 tileY wide
+ uint32_t ExpectedHeight = GMM_ULT_ALIGN(gmmParams.BaseHeight, VAlign);
+
+ if(gmmParams.ArraySize > 1 || gmmParams.Type == RESOURCE_CUBE)
+ {
+ uint32_t ExpectedQPitch = GMM_ULT_ALIGN(gmmParams.BaseHeight, VAlign); //Apply formula as per specification
+ ExpectedQPitch = GMM_ULT_ALIGN(ExpectedQPitch / 2, VAlign);
+ ExpectedHeight *= (gmmParams.Type == RESOURCE_CUBE) ? 6 : 1;
+
+ VerifyResourceQPitch<false>(ResourceInfo, ExpectedQPitch); // Each face should be VAlign rows apart within a tile, Turn on verification after clarity
+ }
+
+ VerifyResourceSize<true>(ResourceInfo,
+ GFX_ALIGN(ExpectedPitch * ExpectedHeight, 4 * PAGE_SIZE)); //1 Tile should be enough
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+ // Allocate 2 tiles in X dimension. (muti-tiles Tiles in Y dimension for cube/array)
+ for(uint32_t i = RESOURCE_2D; i <= RESOURCE_CUBE; i++)
+ {
+ gmmParams.Type = static_cast<GMM_RESOURCE_TYPE>(i);
+ gmmParams.BaseWidth64 = AllocTileSize[0][0] + 0x1;
+ gmmParams.BaseHeight = (gmmParams.Type == RESOURCE_1D) ? 0x1 :
+ (gmmParams.Type == RESOURCE_CUBE) ? gmmParams.BaseWidth64 :
+ VAlign / 2;
+ gmmParams.ArraySize = (gmmParams.Type != RESOURCE_3D) ? VAlign : 1; // Gen8 doesn't support 3D-arrays (so HiZ not supported) [test 3d arrays once -- HiZ would fail but ResCreate doesn't?]
+ gmmParams.Depth = 0x1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign);
+ uint32_t ExpectedPitch = GFX_ALIGN(gmmParams.BaseWidth * (int)pow(2, j), AllocTileSize[0][0] * 4);
+ VerifyResourcePitch<true>(ResourceInfo, ExpectedPitch);
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 4); // 2 tileY wide
+
+ uint32_t ExpectedQPitch;
+ if(gmmParams.ArraySize > 1 || gmmParams.Type == RESOURCE_CUBE)
+ {
+ ExpectedQPitch = GMM_ULT_ALIGN(gmmParams.BaseHeight, VAlign);
+ //ExpectedQPitch = GMM_ULT_ALIGN(ExpectedQPitch / 2, VAlign);
+
+ VerifyResourceQPitch<false>(ResourceInfo, ExpectedQPitch); // Each face should be VAlign rows apart within a tile. Turn on verification after clarity
+ }
+
+ VerifyResourceSize<true>(ResourceInfo, // PitchInBytes * Rows where Rows = (__GMM_MAX_CUBE_FACE x QPitch) /2 (Stencil height = halved due to interleaving), then aligned to tile boundary
+ ((gmmParams.Type == RESOURCE_CUBE) ?
+ ExpectedPitch * GMM_ULT_ALIGN(ExpectedQPitch * gmmParams.ArraySize * __GMM_MAX_CUBE_FACE, AllocTileSize[0][1]) : //cube
+ ((gmmParams.ArraySize > 1) ?
+ ExpectedPitch * GMM_ULT_ALIGN(ExpectedQPitch * gmmParams.ArraySize, AllocTileSize[0][1]) : //array
+ 4 * GMM_KBYTE(4))));
+
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+ // Allocate 2 tiles in X/Y dimension (non-arrayed) Multi-tiles for 3D
+ for(uint32_t i = RESOURCE_2D; i <= RESOURCE_3D; i++)
+ {
+ gmmParams.Type = static_cast<GMM_RESOURCE_TYPE>(i);
+ gmmParams.BaseWidth64 = AllocTileSize[0][0] + 0x1;
+ gmmParams.BaseHeight = 2 * AllocTileSize[0][1] + 0x1; //Half-Depth Height or QPitch (lod!=0), aligned to 8 required by HW
+ gmmParams.Depth = (gmmParams.Type == RESOURCE_2D) ? 0x1 :
+ VAlign + 1;
+ gmmParams.ArraySize = 1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign);
+ uint32_t ExpectedPitch = AllocTileSize[0][0] * 4;
+ VerifyResourcePitch<true>(ResourceInfo, ExpectedPitch);
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 4); // 2 tile wide
+
+ uint32_t TwoDQPitch, ExpectedQPitch;
+ if(gmmParams.Type == RESOURCE_3D)
+ {
+ TwoDQPitch = GMM_ULT_ALIGN(gmmParams.BaseHeight, VAlign);
+ ExpectedQPitch = gmmParams.Depth * GMM_ULT_ALIGN(TwoDQPitch / 2, VAlign); //Depth slices arranged as 2D-arrayed slices.
+ }
+ else
+ {
+ //HiZ for 3D not supported. Driver still allocates like IVB/HSW. (should Qpitch or only overall buffer height be Valigned ?)
+ VerifyResourceSize<true>(ResourceInfo,
+ ((gmmParams.Type == RESOURCE_3D) ?
+ ExpectedPitch * GMM_ULT_ALIGN(ExpectedQPitch, AllocTileSize[0][1]) :
+ 2 * 2 * GMM_KBYTE(4)));
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+ }
+}
+
+TEST_F(CTestGen12Resource, TestStencilCompressedResource)
+{
+ const uint32_t HAlign = {16};
+ const uint32_t VAlign = {8};
+
+ const uint32_t TileSize[2] = {128, 32};
+
+ GMM_RESCREATE_PARAMS gmmParams = {};
+ gmmParams.Type = RESOURCE_2D;
+ gmmParams.NoGfxMemory = 1;
+ gmmParams.Flags.Info.TiledY = 1;
+ //gmmParams.ArraySize = 4;
+
+ gmmParams.Flags.Gpu.SeparateStencil = 1;
+ gmmParams.Flags.Info.RenderCompressed = 1;
+ // Turn on .MC or .RC flag - mandatory to tell compression-type, for Yf its also used to pad surf
+ // to 4x1 tile (reqd by HW for perf reasons)
+ // If unifiedAuxSurf reqd (mandatory for displayable or cross-adapter shared), turn on .CCS/.MMC and .UnifiedAuxSurface too
+
+ //Allocate 1x1 surface
+ {
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.IndirectClearColor = 1;
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(TEST_BPP_8);
+ GMM_TILE_MODE TileMode = LEGACY_TILE_Y;
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = 0x1;
+ gmmParams.BaseHeight = 0x1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign);
+ VerifyResourcePitch<true>(ResourceInfo, 4 * TileSize[0]); // As wide as 4 Tile
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 4); // 4 Tile wide
+ VerifyResourceSize<true>(ResourceInfo, 4 * GMM_KBYTE(4)); // 4 Tile Big
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // Not Tested
+
+ //test main surface base alignment is 64KB
+ //For Yf test main surface pitch is 4-tileYF aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % 4); // Check on YF only
+
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+ // Allocate surface that requires multi tiles in two dimension
+ // Allocate 2 tiles in X dimension
+ {
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(TEST_BPP_8);
+ GMM_TILE_MODE TileMode = LEGACY_TILE_Y;
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = (TileSize[0] / GetBppValue(bpp)) + 1; // 1 pixel larger than 1 tile width
+ gmmParams.BaseHeight = 0x1;
+ gmmParams.Depth = 0x1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign);
+ VerifyResourcePitch<true>(ResourceInfo, TileSize[0] * 4); // As wide as 2 tile, but 4-tile pitch alignment
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 4); // 2 tile wide, but 4-tile pitch alignment
+ VerifyResourceSize<true>(ResourceInfo, GMM_KBYTE(4) * 4); // 2 tile big, but 4-tile pitch alignment
+
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // Not tested
+
+ //test main surface base alignment is 64KB
+ //For Y test main surface pitch is 4-tileY aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % 4);
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+ // Allocate surface that requires multi tiles in two dimension
+ // Allocate 2 tiles in X/Y dimension
+ {
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.IndirectClearColor = 1;
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(TEST_BPP_8);
+ GMM_TILE_MODE TileMode = LEGACY_TILE_Y;
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = (TileSize[0] / GetBppValue(bpp)) + 1; // 1 pixel larger than 1 tile width
+ gmmParams.BaseHeight = TileSize[1] + 1; // 1 row larger than 1 tile height
+ gmmParams.Depth = 0x1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign);
+ VerifyResourcePitch<true>(ResourceInfo, TileSize[0] * 4); // As wide as 2 tile, but 4-tile pitch alignment
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 4); // 2 tile wide, but 4-tile pitch alignment
+ VerifyResourceSize<true>(ResourceInfo, GMM_KBYTE(4) * 4 * 2); // 4 tile wide; and 2-tile high
+
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // Not tested
+ //test main surface base alignment is 64KB
+ //For Y test main surface pitch is 4-tileY aligned
+ EXPECT_EQ(GMM_KBYTE(64), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetRenderPitchTiles() % 4);
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+}
+
+
+/// @brief ULT for 2D TileYf Compressed Resource
+TEST_F(CTestGen12Resource, Test2DTileYfAMFSResource)
+{
+ const uint32_t HAlign[TEST_BPP_MAX] = {64, 64, 32, 32, 16};
+ const uint32_t VAlign[TEST_BPP_MAX] = {64, 32, 32, 16, 16};
+
+ const uint32_t TileSize[TEST_BPP_MAX][2] = {{64, 64},
+ {128, 32},
+ {128, 32},
+ {256, 16},
+ {256, 16}};
+
+ GMM_RESCREATE_PARAMS gmmParams = {};
+ gmmParams.Type = RESOURCE_2D;
+ gmmParams.NoGfxMemory = 1;
+ gmmParams.Flags.Info.TiledY = 1;
+ gmmParams.Flags.Info.TiledYf = 1;
+ gmmParams.Flags.Gpu.Texture = 1;
+ gmmParams.Flags.Gpu.ProceduralTexture = 1;
+ // If unifiedAuxSurf reqd (mandatory for displayable or cross-adapter shared), turn on .CCS/.MMC and .UnifiedAuxSurface too
+
+ //Allocate 1x1 surface
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+ //gmmParams.Flags.Gpu.MMC = 0; //Turn on to check unifiedaux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = DEFINE_TILE(YF_2D, bpp);
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = 0x1;
+ gmmParams.BaseHeight = 0x1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign[i]);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign[i]);
+ VerifyResourcePitch<true>(ResourceInfo, TileSize[i][0]); // As wide as 1 Tile
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 1); // 1 Tile wide
+ VerifyResourceSize<true>(ResourceInfo, GMM_KBYTE(4)); // 1 Tile Big
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // Not Tested
+
+ //test main surface base alignment is 4KB, since AMFS PT isn't compressed
+ //but uses same linear CCS as compression
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+ EXPECT_EQ(0, AuxResourceInfo->GmmGetTileMode());
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+ // Allocate surface that requires multi tiles in two dimension
+ // Allocate 2 tiles in X dimension
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = DEFINE_TILE(YF_2D, bpp);
+
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = (TileSize[i][0] / GetBppValue(bpp)) + 1; // 1 pixel larger than 1 tile width
+ gmmParams.BaseHeight = 0x1;
+ gmmParams.Depth = 0x1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign[i]);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign[i]);
+ VerifyResourcePitch<true>(ResourceInfo, TileSize[i][0] * 2); // As wide as 2 tile
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 2); // 2 tile wide
+ VerifyResourceSize<true>(ResourceInfo, GMM_KBYTE(4) * 2); // 2 tile big
+
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // Not tested
+
+ //test main surface base alignment is 4KB
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+ EXPECT_EQ(0, AuxResourceInfo->GmmGetTileMode());
+
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+
+ // Allocate surface that requires multi tiles in two dimension
+ // Allocate 2 tiles in X/Y dimension
+ for(uint32_t i = 0; i < TEST_BPP_MAX; i++)
+ {
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 1; //Turn off for separate aux creation
+ gmmParams.Flags.Gpu.CCS = 1; //Turn off for separate aux creation
+
+ TEST_BPP bpp = static_cast<TEST_BPP>(i);
+ GMM_TILE_MODE TileMode = DEFINE_TILE(YF_2D, bpp);
+ gmmParams.Format = SetResourceFormat(bpp);
+ gmmParams.BaseWidth64 = (TileSize[i][0] / GetBppValue(bpp)) + 1; // 1 pixel larger than 1 tile width
+ gmmParams.BaseHeight = TileSize[i][1] + 1; // 1 row larger than 1 tile height
+ gmmParams.Depth = 0x1;
+
+ GMM_RESOURCE_INFO *ResourceInfo;
+ ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ VerifyResourceHAlign<true>(ResourceInfo, HAlign[i]);
+ VerifyResourceVAlign<true>(ResourceInfo, VAlign[i]);
+ VerifyResourcePitch<true>(ResourceInfo, TileSize[i][0] * 2); // As wide as 2 tile
+ VerifyResourcePitchInTiles<true>(ResourceInfo, 2); // 2 tile wide
+ VerifyResourceSize<true>(ResourceInfo, GMM_KBYTE(4) * 2 * 2); // 2 tile wide; and 2-tile high
+
+ VerifyResourceQPitch<false>(ResourceInfo, 0); // Not tested
+ //test main surface base alignment is 4KB
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetBaseAlignment());
+ EXPECT_EQ(0, ResourceInfo->GetUnifiedAuxSurfaceOffset(GMM_AUX_CCS) % PAGE_SIZE);
+ EXPECT_EQ(GMM_KBYTE(4), ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS));
+
+ { //separate Aux
+ gmmParams.Flags.Gpu.UnifiedAuxSurface = 0;
+
+ GMM_RESOURCE_INFO *AuxResourceInfo;
+ AuxResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+ EXPECT_EQ(ResourceInfo->GetAuxHAlign(), AuxResourceInfo->GetHAlign());
+ EXPECT_EQ(ResourceInfo->GetAuxVAlign(), AuxResourceInfo->GetVAlign());
+ EXPECT_EQ(ResourceInfo->GetUnifiedAuxPitch(), AuxResourceInfo->GetRenderPitch());
+ EXPECT_EQ(ResourceInfo->GetSizeAuxSurface(GMM_AUX_CCS), AuxResourceInfo->GetSizeSurface());
+ EXPECT_EQ(0, AuxResourceInfo->GmmGetTileMode());
+ pGmmULTClientContext->DestroyResInfoObject(AuxResourceInfo);
+ }
+ pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ }
+}
+
+/// @brief ULT for MSAA Resource - TODO adddepth MSAA, MCS surf param verificaton, compression case
+TEST_F(CTestGen12Resource, TestColorMSAA)
+{
+ //Tile dimensions in Bytes
+ const uint32_t MCSTileSize[1][2] = {128, 32}; //MCS is TileY
+
+ const uint32_t TestDimensions[4][2] = {
+ //Input dimensions in #Tiles
+ {15, 20}, //16 Tiles x 20 <Max Width: Depth MSS crosses Pitch limit beyond this>
+ {0, 0}, //1x1x1
+ {1, 0}, //2 Tilesx1
+ {1, 1}, //2 Tiles x 2
+ };
+
+ uint32_t TestArraySize[2] = {1, 5};
+ uint32_t MinPitch = 32;
+
+ uint32_t HAlign, VAlign, TileDimX, TileDimY, MCSHAlign, MCSVAlign, TileSize;
+ uint32_t ExpectedMCSBpp;
+ std::vector<tuple<int, int, int, bool, int, int>> List; //TEST_TILE_TYPE, TEST_BPP, TEST_RESOURCE_TYPE, Depth or RT, TestDimension index, ArraySize
+ auto Size = BuildInputIterator(List, 4, 2); // Size of arrays TestDimensions, TestArraySize
+
+ for(auto element : List)
+ {
+ GMM_RESCREATE_PARAMS gmmParams = {};
+ gmmParams.Flags.Info = {0};
+
+ TEST_TILE_TYPE Tiling = (TEST_TILE_TYPE)std::get<0>(element);
+ TEST_BPP Bpp = (TEST_BPP)std::get<1>(element);
+ TEST_RESOURCE_TYPE ResType = (TEST_RESOURCE_TYPE)std::get<2>(element);
+ bool IsRT = std::get<3>(element); // True for RT, False for Depth
+ int TestDimIdx = std::get<4>(element); //index into TestDimensions array
+ int ArrayIdx = std::get<5>(element); //index into TestArraySize
+ TileSize = (Tiling == TEST_TILEYS) ? GMM_KBYTE(64) : GMM_KBYTE(4);
+
+ //Discard un-supported Tiling/Res_type/bpp for this test
+ if(ResType != TEST_RESOURCE_2D || //No 1D/3D/Cube. Supported 2D mip-maps/array
+ (!IsRT && (Tiling == TEST_TILEX || // doesn't support TileX for Depth
+ !(Bpp == TEST_BPP_16 || Bpp == TEST_BPP_32)))) //depth supported on 16bit, 32bit formats only
+ continue;
+
+ if(!IsRT)
+ continue; //comment depth msaa for now (requires change in h/v align)
+
+ SetTileFlag(gmmParams, Tiling);
+ SetResType(gmmParams, ResType);
+ SetResGpuFlags(gmmParams, IsRT);
+ SetResArraySize(gmmParams, TestArraySize[ArrayIdx]);
+
+ gmmParams.NoGfxMemory = 1;
+ gmmParams.Format = SetResourceFormat(Bpp);
+ for(uint32_t k = MSAA_2x; k <= MSAA_16x; k++)
+ {
+ GetAlignmentAndTileDimensionsForMSAA(Bpp, IsRT, Tiling, (TEST_MSAA)k,
+ TileDimX, TileDimY, HAlign, VAlign,
+ ExpectedMCSBpp, MCSHAlign, MCSVAlign);
+
+ //gmmParams.BaseWidth64 = TestDimensions[TestDimIdx][0] * TileDimX + 0x1;
+ //gmmParams.BaseHeight = TestDimensions[TestDimIdx][1] * TileDimY + 0x1;
+ gmmParams.BaseWidth64 = 4;
+ gmmParams.BaseHeight = 4;
+ gmmParams.Depth = 0x1;
+ gmmParams.MSAA.NumSamples = static_cast<uint32_t>(pow((double)2, k));
+ gmmParams.Flags.Gpu.MCS = 0;
+
+ //MSS surface
+ GMM_RESOURCE_INFO *MSSResourceInfo;
+ MSSResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
+
+ if(MSSResourceInfo)
+ {
+ VerifyResourceHAlign<true>(MSSResourceInfo, HAlign);
+ VerifyResourceVAlign<true>(MSSResourceInfo, VAlign);
+ if(IsRT) //Arrayed MSS
+ {
+ uint32_t ExpectedPitch = 0, ExpectedQPitch = 0;
+ ExpectedPitch = GMM_ULT_ALIGN(GMM_ULT_ALIGN(gmmParams.BaseWidth64, HAlign) * (int)pow(2.0, Bpp), TileDimX); // Aligned width * bpp, aligned to TileWidth
+ ExpectedPitch = GFX_MAX(ExpectedPitch, MinPitch);
+ VerifyResourcePitch<true>(MSSResourceInfo, ExpectedPitch);
+ if(Tiling != TEST_LINEAR)
+ VerifyResourcePitchInTiles<true>(MSSResourceInfo, ExpectedPitch / TileDimX);
+
+ ExpectedQPitch = GMM_ULT_ALIGN(gmmParams.BaseHeight, VAlign);
+ if(gmmParams.ArraySize > 1) //Gen9: Qpitch is distance between array slices (not sample slices)
+ {
+ VerifyResourceQPitch<true>(MSSResourceInfo, ExpectedQPitch);
+ }
+
+ uint32_t ExpectedHeight = GMM_ULT_ALIGN(ExpectedQPitch * gmmParams.MSAA.NumSamples * gmmParams.ArraySize, TileDimY); //Align Height =ExpectedPitch * NumSamples * ExpectedQPitch, to Tile-Height
+ VerifyResourceSize<true>(MSSResourceInfo, GMM_ULT_ALIGN(ExpectedPitch * ExpectedHeight, TileSize));
+ }
+ }
+
+ pGmmULTClientContext->DestroyResInfoObject(MSSResourceInfo);
+ } //NumSamples = k
+ } //Iterate through all Input types
+
+ //Mip-mapped, MSAA case:
+}
diff --git a/Source/GmmLib/ULT/GmmGen12ResourceULT.h b/Source/GmmLib/ULT/GmmGen12ResourceULT.h
new file mode 100644
index 0000000..60b524a
--- /dev/null
+++ b/Source/GmmLib/ULT/GmmGen12ResourceULT.h
@@ -0,0 +1,40 @@
+/*==============================================================================
+Copyright(c) 2019 Intel Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files(the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and / or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+============================================================================*/
+#pragma once
+
+#include "GmmGen10ResourceULT.h"
+#include "../GmmLib/inc/Internal/Common/Platform/GmmGen12Platform.h"
+
+class CTestGen12Resource : public CTestGen10Resource
+{
+public:
+ static void SetUpTestCase();
+ static void TearDownTestCase();
+};
+
+#define DEFINE_TILE(xxx, bpp) \
+ (bpp == TEST_BPP_8) ? TILE_##xxx##_8bpe : \
+ (bpp == TEST_BPP_16) ? TILE_##xxx##_16bpe : \
+ (bpp == TEST_BPP_32) ? TILE_##xxx##_32bpe : \
+ (bpp == TEST_BPP_64) ? TILE_##xxx##_64bpe : \
+ TILE_##xxx##_128bpe
+
\ No newline at end of file
diff --git a/Source/GmmLib/ULT/GmmResourceULT.cpp b/Source/GmmLib/ULT/GmmResourceULT.cpp
index e88f6dc..ce3796d 100644
--- a/Source/GmmLib/ULT/GmmResourceULT.cpp
+++ b/Source/GmmLib/ULT/GmmResourceULT.cpp
@@ -2832,7 +2832,8 @@
// VVVVVVVV
// VVVVVVVV
// VVVVVVVV
- const TEST_TILE_TYPE TileTypes[] = {TEST_LINEAR, TEST_TILEX, TEST_TILEY};
+ const TEST_TILE_TYPE TileTypes[] = {TEST_LINEAR, TEST_TILEX, TEST_TILEY};
+ const uint32_t PlaneRowAlignment = 16;
const uint32_t TileSize[3][2] = {{1, 1}, //Linear
{512, 8}, // TileX
@@ -2845,19 +2846,29 @@
gmmParams.Type = RESOURCE_2D;
gmmParams.NoGfxMemory = 1;
gmmParams.Flags.Gpu.Texture = 1;
- gmmParams.BaseWidth64 = 0x100;
- gmmParams.BaseHeight = 0x100;
+ gmmParams.BaseWidth64 = 0x101;
+ gmmParams.BaseHeight = 0x101;
gmmParams.Depth = 0x1;
SetTileFlag(gmmParams, static_cast<TEST_TILE_TYPE>(Tile));
- gmmParams.Flags.Info.Linear = 1; // GmmLib needs linear to be set as fallback for all planar surfaces
- gmmParams.Format = GMM_FORMAT_RGBP;
+ gmmParams.Format = GMM_FORMAT_RGBP;
GMM_RESOURCE_INFO *ResourceInfo;
ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
- uint32_t Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[TileIndex][0]);
- uint32_t Height = GMM_ULT_ALIGN(gmmParams.BaseHeight * 3 /*Y, U, V*/, TileSize[TileIndex][1]);
- uint32_t Size = GMM_ULT_ALIGN(Pitch * Height, GMM_KBYTE(4));
+ uint32_t Pitch, Height;
+ if(Tile != TEST_LINEAR)
+ {
+ Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[TileIndex][0]);
+ //Since Tile alignment factor is greater than GMM_IMCx_PLANE_ROW_ALIGNMENT=16
+ Height = GMM_ULT_ALIGN(gmmParams.BaseHeight, PlaneRowAlignment);
+ Height = GMM_ULT_ALIGN(Height, TileSize[TileIndex][1]) * 3 /*Y, U, V*/;
+ }
+ else
+ {
+ Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, GMM_BYTES(64));
+ Height = GMM_ULT_ALIGN(gmmParams.BaseHeight, PlaneRowAlignment /* min16 rows*/) * 3 /*Y, U, V*/;
+ }
+ uint32_t Size = GMM_ULT_ALIGN(Pitch * Height, GMM_KBYTE(4));
VerifyResourcePitch<true>(ResourceInfo, Pitch);
if(Tile != TEST_LINEAR)
@@ -2875,11 +2886,11 @@
// U plane should be at end of Y plane
EXPECT_EQ(0, ResourceInfo->GetPlanarXOffset(GMM_PLANE_U));
- EXPECT_EQ(gmmParams.BaseHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_U));
+ EXPECT_EQ(Height / 3, ResourceInfo->GetPlanarYOffset(GMM_PLANE_U));
// V plane should be at end of U plane
EXPECT_EQ(0, ResourceInfo->GetPlanarXOffset(GMM_PLANE_V));
- EXPECT_EQ(gmmParams.BaseHeight * 2, ResourceInfo->GetPlanarYOffset(GMM_PLANE_V));
+ EXPECT_EQ(2 * (Height / 3), ResourceInfo->GetPlanarYOffset(GMM_PLANE_V));
pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
}
@@ -2897,7 +2908,8 @@
// UUUUUUUU
// VVVVVVVV
// VVVVVVVV
- const TEST_TILE_TYPE TileTypes[] = {TEST_LINEAR, TEST_TILEX, TEST_TILEY};
+ const TEST_TILE_TYPE TileTypes[] = {TEST_LINEAR, TEST_TILEX, TEST_TILEY};
+ const uint32_t PlaneRowAlignment = 16;
const uint32_t TileSize[3][2] = {{1, 1}, //Linear
{512, 8}, // TileX
@@ -2910,8 +2922,8 @@
gmmParams.Type = RESOURCE_2D;
gmmParams.NoGfxMemory = 1;
gmmParams.Flags.Gpu.Texture = 1;
- gmmParams.BaseWidth64 = 0x100;
- gmmParams.BaseHeight = 0x100;
+ gmmParams.BaseWidth64 = 0x101;
+ gmmParams.BaseHeight = 0x101;
gmmParams.Depth = 0x1;
SetTileFlag(gmmParams, static_cast<TEST_TILE_TYPE>(Tile));
gmmParams.Flags.Info.Linear = 1; // GmmLib needs linear to be set as fallback for all planar surfaces
@@ -2920,9 +2932,25 @@
GMM_RESOURCE_INFO *ResourceInfo;
ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
- uint32_t Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[TileIndex][0]);
- uint32_t Height = GMM_ULT_ALIGN(gmmParams.BaseHeight /*Y */ + gmmParams.BaseHeight /*U, V*/, TileSize[TileIndex][1]);
- uint32_t Size = GMM_ULT_ALIGN(Pitch * Height, GMM_KBYTE(4));
+ uint32_t Pitch, Height;
+ uint32_t YHeight, VHeight;
+ if(Tile != TEST_LINEAR)
+ {
+ Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[TileIndex][0]);
+ YHeight = GMM_ULT_ALIGN(gmmParams.BaseHeight, PlaneRowAlignment);
+ YHeight = GMM_ULT_ALIGN(YHeight, TileSize[TileIndex][1]);
+
+ VHeight = GMM_ULT_ALIGN(GMM_ULT_ALIGN(gmmParams.BaseHeight, 2) / 2, PlaneRowAlignment);
+ VHeight = GMM_ULT_ALIGN(VHeight, TileSize[TileIndex][1]);
+ }
+ else
+ {
+ Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, GMM_BYTES(64));
+ YHeight = GMM_ULT_ALIGN(gmmParams.BaseHeight, PlaneRowAlignment);
+ VHeight = GMM_ULT_ALIGN(GMM_ULT_ALIGN(gmmParams.BaseHeight, 2) / 2, PlaneRowAlignment);
+ }
+ Height = YHeight + 2 * VHeight;
+ uint32_t Size = GMM_ULT_ALIGN(Pitch * Height, GMM_KBYTE(4));
VerifyResourcePitch<true>(ResourceInfo, Pitch);
if(Tile != TEST_LINEAR)
@@ -2940,11 +2968,11 @@
// U plane should be at end of Y plane
EXPECT_EQ(0, ResourceInfo->GetPlanarXOffset(GMM_PLANE_U));
- EXPECT_EQ(gmmParams.BaseHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_U));
+ EXPECT_EQ(YHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_U));
// V plane should be at end of U plane
EXPECT_EQ(0, ResourceInfo->GetPlanarXOffset(GMM_PLANE_V));
- EXPECT_EQ(gmmParams.BaseHeight + gmmParams.BaseHeight / 2, ResourceInfo->GetPlanarYOffset(GMM_PLANE_V));
+ EXPECT_EQ(YHeight + VHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_V));
pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
}
@@ -2960,7 +2988,8 @@
//YYYYYYYY
//UUUUUUUU
//VVVVVVVV
- const TEST_TILE_TYPE TileTypes[] = {TEST_LINEAR, TEST_TILEX, TEST_TILEY};
+ const TEST_TILE_TYPE TileTypes[] = {TEST_LINEAR, TEST_TILEX, TEST_TILEY};
+ const uint32_t PlaneRowAlignment = 16;
const uint32_t TileSize[3][2] = {{1, 1}, //Linear
{512, 8}, // TileX
@@ -2973,19 +3002,35 @@
gmmParams.Type = RESOURCE_2D;
gmmParams.NoGfxMemory = 1;
gmmParams.Flags.Gpu.Texture = 1;
- gmmParams.BaseWidth64 = 0x100;
- gmmParams.BaseHeight = 0x100;
+ gmmParams.BaseWidth64 = 0x101;
+ gmmParams.BaseHeight = 0x101;
gmmParams.Depth = 0x1;
SetTileFlag(gmmParams, static_cast<TEST_TILE_TYPE>(Tile));
- gmmParams.Flags.Info.Linear = 1; // GmmLib needs linear to be set as fallback for all planar surfaces
- gmmParams.Format = GMM_FORMAT_MFX_JPEG_YUV411R_TYPE;
+ gmmParams.Format = GMM_FORMAT_MFX_JPEG_YUV411R_TYPE;
GMM_RESOURCE_INFO *ResourceInfo;
ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
- uint32_t Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[TileIndex][0]);
- uint32_t Height = GMM_ULT_ALIGN(gmmParams.BaseHeight /*Y */ + gmmParams.BaseHeight / 2 /*U, V*/, TileSize[TileIndex][1]);
- uint32_t Size = GMM_ULT_ALIGN(Pitch * Height, GMM_KBYTE(4));
+ uint32_t Pitch, Height;
+ uint32_t YHeight, VHeight;
+ if(Tile != TEST_LINEAR)
+ {
+ Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[TileIndex][0]);
+ YHeight = GMM_ULT_ALIGN(gmmParams.BaseHeight, PlaneRowAlignment);
+ YHeight = GMM_ULT_ALIGN(YHeight, TileSize[TileIndex][1]);
+
+ VHeight = GMM_ULT_ALIGN(GMM_ULT_ALIGN(gmmParams.BaseHeight, 4) / 4, PlaneRowAlignment);
+ VHeight = GMM_ULT_ALIGN(VHeight, TileSize[TileIndex][1]);
+ }
+ else
+ {
+ Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, GMM_BYTES(64));
+ YHeight = GMM_ULT_ALIGN(gmmParams.BaseHeight, PlaneRowAlignment);
+ VHeight = GMM_ULT_ALIGN(GMM_ULT_ALIGN(gmmParams.BaseHeight, 4) / 4, PlaneRowAlignment);
+ }
+
+ Height = YHeight + 2 * VHeight;
+ uint32_t Size = GMM_ULT_ALIGN(Pitch * Height, GMM_KBYTE(4));
VerifyResourcePitch<true>(ResourceInfo, Pitch);
if(Tile != TEST_LINEAR)
@@ -3003,11 +3048,11 @@
// U plane should be at end of Y plane
EXPECT_EQ(0, ResourceInfo->GetPlanarXOffset(GMM_PLANE_U));
- EXPECT_EQ(gmmParams.BaseHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_U));
+ EXPECT_EQ(YHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_U));
// V plane should be at end of U plane
EXPECT_EQ(0, ResourceInfo->GetPlanarXOffset(GMM_PLANE_V));
- EXPECT_EQ(gmmParams.BaseHeight + gmmParams.BaseHeight / 4, ResourceInfo->GetPlanarYOffset(GMM_PLANE_V));
+ EXPECT_EQ(YHeight + VHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_V));
pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
}
@@ -3040,15 +3085,26 @@
gmmParams.BaseHeight = 0x100;
gmmParams.Depth = 0x1;
SetTileFlag(gmmParams, static_cast<TEST_TILE_TYPE>(Tile));
- gmmParams.Flags.Info.Linear = 1; // GmmLib needs linear to be set as fallback for all tiled planar surfaces
- gmmParams.Format = GMM_FORMAT_NV12;
+ gmmParams.Format = GMM_FORMAT_NV12;
GMM_RESOURCE_INFO *ResourceInfo;
ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
- uint32_t Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[TileIndex][0]);
- uint32_t Height = GMM_ULT_ALIGN(gmmParams.BaseHeight /*Y*/ + gmmParams.BaseHeight / 2 /*UV*/, TileSize[TileIndex][1]);
- uint32_t Size = GMM_ULT_ALIGN(Pitch * Height, GMM_KBYTE(4));
+ uint32_t Pitch, Height;
+
+ if(Tile != TEST_LINEAR)
+ {
+ Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[TileIndex][0]);
+
+ Height = GMM_ULT_ALIGN(gmmParams.BaseHeight, TileSize[TileIndex][1]) +
+ GMM_ULT_ALIGN(gmmParams.BaseHeight / 2, TileSize[TileIndex][1]);
+ }
+ else
+ {
+ Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[TileIndex][0]);
+ Height = GMM_ULT_ALIGN(gmmParams.BaseHeight /*Y*/ + gmmParams.BaseHeight / 2 /*UV*/, TileSize[TileIndex][1]);
+ }
+ uint32_t Size = GMM_ULT_ALIGN(Pitch * Height, GMM_KBYTE(4));
VerifyResourcePitch<true>(ResourceInfo, Pitch);
if(Tile != TEST_LINEAR)
@@ -3066,9 +3122,18 @@
// U/V plane should be at end of Y plane
EXPECT_EQ(0, ResourceInfo->GetPlanarXOffset(GMM_PLANE_U));
- EXPECT_EQ(gmmParams.BaseHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_U));
EXPECT_EQ(0, ResourceInfo->GetPlanarXOffset(GMM_PLANE_V));
- EXPECT_EQ(gmmParams.BaseHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_V));
+
+ if(Tile != TEST_LINEAR)
+ {
+ EXPECT_EQ(GMM_ULT_ALIGN(gmmParams.BaseHeight, TileSize[TileIndex][1]), ResourceInfo->GetPlanarYOffset(GMM_PLANE_U));
+ EXPECT_EQ(GMM_ULT_ALIGN(gmmParams.BaseHeight, TileSize[TileIndex][1]), ResourceInfo->GetPlanarYOffset(GMM_PLANE_V));
+ }
+ else
+ {
+ EXPECT_EQ(gmmParams.BaseHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_U));
+ EXPECT_EQ(gmmParams.BaseHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_V));
+ }
pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
}
@@ -3084,7 +3149,8 @@
// YYYYYYYY
// UUUUVVVV
// UUUUVVVV
- const TEST_TILE_TYPE TileTypes[] = {TEST_LINEAR, TEST_TILEX, TEST_TILEY};
+ const TEST_TILE_TYPE TileTypes[] = {TEST_LINEAR, TEST_TILEX, TEST_TILEY};
+ const uint32_t PlaneRowAlignment = 16;
const uint32_t TileSize[3][2] = {{1, 1}, //Linear
{512, 8}, // TileX
@@ -3097,19 +3163,41 @@
gmmParams.Type = RESOURCE_2D;
gmmParams.NoGfxMemory = 1;
gmmParams.Flags.Gpu.Texture = 1;
- gmmParams.BaseWidth64 = 0x100;
- gmmParams.BaseHeight = 0x100;
+ gmmParams.BaseWidth64 = 0x101;
+ gmmParams.BaseHeight = 0x101;
gmmParams.Depth = 0x1;
SetTileFlag(gmmParams, static_cast<TEST_TILE_TYPE>(Tile));
- gmmParams.Flags.Info.Linear = 1; // GmmLib needs linear to be set as fallback for all planar surfaces
- gmmParams.Format = GMM_FORMAT_IMC4;
+ gmmParams.Format = GMM_FORMAT_IMC4;
GMM_RESOURCE_INFO *ResourceInfo;
ResourceInfo = pGmmULTClientContext->CreateResInfoObject(&gmmParams);
- uint32_t Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[TileIndex][0]);
- uint32_t Height = GMM_ULT_ALIGN(gmmParams.BaseHeight /*Y*/ + gmmParams.BaseHeight / 2 /*UV*/, TileSize[TileIndex][1]);
- uint32_t Size = GMM_ULT_ALIGN(Pitch * Height, GMM_KBYTE(4));
+ uint32_t Pitch, Height;
+ uint32_t YHeight, VHeight;
+ if(Tile != TEST_LINEAR)
+ {
+ Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, TileSize[TileIndex][0]);
+ if(Pitch / TileSize[TileIndex][0] % 2)
+ {
+ Pitch += TileSize[TileIndex][0];
+ }
+
+ YHeight = GMM_ULT_ALIGN(gmmParams.BaseHeight, PlaneRowAlignment);
+ VHeight = YHeight / 2;
+
+ YHeight = GMM_ULT_ALIGN(YHeight, TileSize[TileIndex][1]);
+ VHeight = GMM_ULT_ALIGN(VHeight, TileSize[TileIndex][1]); // No need of PlaneRowAlignment since last plane
+ }
+ else
+ {
+ Pitch = GMM_ULT_ALIGN(gmmParams.BaseWidth64, GMM_BYTES(64));
+ YHeight = GMM_ULT_ALIGN(gmmParams.BaseHeight, PlaneRowAlignment);
+ VHeight = YHeight / 2;
+ }
+
+ Height = YHeight + VHeight;
+
+ uint32_t Size = GMM_ULT_ALIGN(Pitch * Height, GMM_KBYTE(4));
VerifyResourcePitch<true>(ResourceInfo, Pitch);
if(Tile != TEST_LINEAR)
@@ -3127,13 +3215,18 @@
// U plane should be at end of Y plane
EXPECT_EQ(0, ResourceInfo->GetPlanarXOffset(GMM_PLANE_U));
- EXPECT_EQ(gmmParams.BaseHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_U));
-
- // V plane should be at end of U plane
EXPECT_EQ(Pitch / 2, ResourceInfo->GetPlanarXOffset(GMM_PLANE_V));
- EXPECT_EQ(gmmParams.BaseHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_V));
- pGmmULTClientContext->DestroyResInfoObject(ResourceInfo);
+ if(Tile != TEST_LINEAR)
+ {
+ EXPECT_EQ(YHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_U));
+ EXPECT_EQ(YHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_V));
+ }
+ else
+ {
+ EXPECT_EQ(YHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_U));
+ EXPECT_EQ(YHeight, ResourceInfo->GetPlanarYOffset(GMM_PLANE_V));
+ }
}
}
diff --git a/Source/GmmLib/Utility/GmmLibObject.cpp b/Source/GmmLib/Utility/GmmLibObject.cpp
index a9cfb70..af3c287 100644
--- a/Source/GmmLib/Utility/GmmLibObject.cpp
+++ b/Source/GmmLib/Utility/GmmLibObject.cpp
@@ -24,11 +24,14 @@
#include "Internal/Common/Platform/GmmGen10Platform.h"
#include "Internal/Common/Platform/GmmGen11Platform.h"
+#include "Internal/Common/Platform/GmmGen12Platform.h"
#include "External/Common/CachePolicy/GmmCachePolicyGen10.h"
#include "External/Common/CachePolicy/GmmCachePolicyGen11.h"
+#include "External/Common/CachePolicy/GmmCachePolicyGen12.h"
#include "Internal/Common/Texture/GmmTextureCalc.h"
#include "Internal/Common/Texture/GmmGen10TextureCalc.h"
#include "Internal/Common/Texture/GmmGen11TextureCalc.h"
+#include "Internal/Common/Texture/GmmGen12TextureCalc.h"
/////////////////////////////////////////////////////////////////////////////////////
/// Static function to return a PlatformInfo object based on input platform
@@ -60,7 +63,11 @@
}
#endif
GMM_DPF_EXIT;
- if(GFX_GET_CURRENT_RENDERCORE(Platform) >= IGFX_GEN11_CORE)
+ if (GFX_GET_CURRENT_RENDERCORE(Platform) >= IGFX_GEN12_CORE)
+ {
+ return new GmmLib::PlatformInfoGen12(Platform);
+ }
+ else if(GFX_GET_CURRENT_RENDERCORE(Platform) >= IGFX_GEN11_CORE)
{
return new GmmLib::PlatformInfoGen11(Platform);
}
@@ -95,7 +102,11 @@
return pGmmGlobalContext->GetCachePolicyObj();
}
- if(GFX_GET_CURRENT_RENDERCORE(pGmmGlobalContext->GetPlatformInfo().Platform) >= IGFX_GEN11_CORE)
+ if (GFX_GET_CURRENT_RENDERCORE(pGmmGlobalContext->GetPlatformInfo().Platform) >= IGFX_GEN12_CORE)
+ {
+ pGmmCachePolicy = new GmmLib::GmmGen12CachePolicy(CachePolicy);
+ }
+ else if(GFX_GET_CURRENT_RENDERCORE(pGmmGlobalContext->GetPlatformInfo().Platform) >= IGFX_GEN11_CORE)
{
pGmmCachePolicy = new GmmLib::GmmGen11CachePolicy(CachePolicy);
}
@@ -158,8 +169,9 @@
case IGFX_GEN11_CORE:
return new GmmGen11TextureCalc();
break;
+ case IGFX_GEN12_CORE:
default:
- return new GmmGen11TextureCalc();
+ return new GmmGen12TextureCalc();
break;
}
}
\ No newline at end of file
diff --git a/Source/GmmLib/Utility/GmmUtility.cpp b/Source/GmmLib/Utility/GmmUtility.cpp
index 2ca11e1..19d1100 100644
--- a/Source/GmmLib/Utility/GmmUtility.cpp
+++ b/Source/GmmLib/Utility/GmmUtility.cpp
@@ -177,6 +177,7 @@
case GMM_FORMAT_P010:
case GMM_FORMAT_P016:
case GMM_FORMAT_YUY2:
+ case GMM_FORMAT_Y210:
case GMM_FORMAT_Y410:
case GMM_FORMAT_Y216:
case GMM_FORMAT_Y416:
diff --git a/Source/GmmLib/inc/External/Common/CachePolicy/GmmCachePolicyGen12.h b/Source/GmmLib/inc/External/Common/CachePolicy/GmmCachePolicyGen12.h
new file mode 100644
index 0000000..d0fadab
--- /dev/null
+++ b/Source/GmmLib/inc/External/Common/CachePolicy/GmmCachePolicyGen12.h
@@ -0,0 +1,68 @@
+/*==============================================================================
+Copyright(c) 2019 Intel Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files(the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and / or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+============================================================================*/
+
+#pragma once
+
+#ifdef __cplusplus
+#include "../GmmCachePolicyCommon.h"
+
+#define GMM_GEN12_MAX_NUMBER_MOCS_INDEXES (60) // On TGL last four (#60-#63) are reserved by h/w, few? are sw configurable though (#60)
+
+namespace GmmLib
+{
+ class NON_PAGED_SECTION GmmGen12CachePolicy :
+ public GmmGen11CachePolicy
+ {
+ public:
+
+ /* Constructors */
+ GmmGen12CachePolicy(GMM_CACHE_POLICY_ELEMENT *pCachePolicy) :GmmGen11CachePolicy(pCachePolicy)
+ {
+#if(defined(__GMM_KMD__))
+ {
+ // Set the WA's needed for Private PAT initialization
+ SetPATInitWA();
+ SetupPAT();
+ }
+#endif
+ }
+ virtual ~GmmGen12CachePolicy()
+ {
+ }
+
+ virtual uint32_t GetMaxSpecialMocsIndex()
+ {
+ return CurrentMaxSpecialMocsIndex;
+ }
+
+ int32_t IsSpecialMOCSUsage(GMM_RESOURCE_USAGE_TYPE Usage, bool& UpdateMOCS);
+
+ /* Function prototypes */
+ GMM_STATUS InitCachePolicy();
+ uint8_t SelectNewPATIdx(GMM_GFX_MEMORY_TYPE WantedMT, GMM_GFX_MEMORY_TYPE MT1, GMM_GFX_MEMORY_TYPE MT2);
+ uint32_t BestMatchingPATIdx(GMM_CACHE_POLICY_ELEMENT CachePolicy);
+ GMM_STATUS SetPATInitWA();
+ GMM_STATUS SetupPAT();
+ void SetUpMOCSTable();
+ };
+}
+#endif // #ifdef __cplusplus
\ No newline at end of file
diff --git a/Source/GmmLib/inc/External/Common/GmmCachePolicy.h b/Source/GmmLib/inc/External/Common/GmmCachePolicy.h
index e03e7ce..91b9f1a 100644
--- a/Source/GmmLib/inc/External/Common/GmmCachePolicy.h
+++ b/Source/GmmLib/inc/External/Common/GmmCachePolicy.h
@@ -173,6 +173,12 @@
}Gen10;
#endif
+ struct
+ {
+ uint32_t MemoryType : 2;
+ uint32_t Reserved : 30;
+ }Gen12;
+
uint32_t Value;
} GMM_PRIVATE_PAT;
diff --git a/Source/GmmLib/inc/External/Common/GmmCachePolicyExt.h b/Source/GmmLib/inc/External/Common/GmmCachePolicyExt.h
index 34c8489..decfbbd 100644
--- a/Source/GmmLib/inc/External/Common/GmmCachePolicyExt.h
+++ b/Source/GmmLib/inc/External/Common/GmmCachePolicyExt.h
@@ -110,7 +110,7 @@
uint32_t EncryptedData : 1;
uint32_t Index : 6 ;
uint32_t : 25;
- }Gen9, Gen10, Gen11;
+ }Gen9, Gen10, Gen11,Gen12;
uint32_t DwordValue;
}MEMORY_OBJECT_CONTROL_STATE;
diff --git a/Source/GmmLib/inc/External/Common/GmmClientContext.h b/Source/GmmLib/inc/External/Common/GmmClientContext.h
index 6887f6c..47f2ff2 100644
--- a/Source/GmmLib/inc/External/Common/GmmClientContext.h
+++ b/Source/GmmLib/inc/External/Common/GmmClientContext.h
@@ -112,6 +112,9 @@
GMM_VIRTUAL uint8_t GMM_STDCALL IsCompressed(GMM_RESOURCE_FORMAT Format);
GMM_VIRTUAL uint8_t GMM_STDCALL IsYUVPacked(GMM_RESOURCE_FORMAT Format);
GMM_VIRTUAL GMM_SURFACESTATE_FORMAT GMM_STDCALL GetSurfaceStateFormat(GMM_RESOURCE_FORMAT Format);
+ GMM_VIRTUAL uint8_t GMM_STDCALL GetSurfaceStateCompressionFormat(GMM_RESOURCE_FORMAT Format);
+ GMM_VIRTUAL uint8_t GMM_STDCALL GetMediaSurfaceStateCompressionFormat(GMM_RESOURCE_FORMAT Format);
+ GMM_VIRTUAL GMM_E2ECOMP_FORMAT GMM_STDCALL GetLosslessCompressionType(GMM_RESOURCE_FORMAT Format);
GMM_VIRTUAL uint64_t GMM_STDCALL GetInternalGpuVaRangeLimit();
/* ResourceInfo Creation and Destroy API's */
diff --git a/Source/GmmLib/inc/External/Common/GmmCommonExt.h b/Source/GmmLib/inc/External/Common/GmmCommonExt.h
index c4266df..d0b7c1e 100644
--- a/Source/GmmLib/inc/External/Common/GmmCommonExt.h
+++ b/Source/GmmLib/inc/External/Common/GmmCommonExt.h
@@ -198,6 +198,20 @@
//===========================================================================
// typedef:
+// GMM_E2E_COMPRESSION_TYPE_ENUM
+//
+// Description:
+// This enum details compression type (i.e. render or media compressed, or uncompressed )
+//---------------------------------------------------------------------------
+typedef enum GMM_E2E_COMPRESSION_TYPE_ENUM
+{
+ GMM_UNCOMPRESSED,
+ GMM_RENDER_COMPRESSED,
+ GMM_MEDIA_COMPRESSED
+}GMM_E2E_COMPRESSION_TYPE;
+
+//===========================================================================
+// typedef:
// GMM_CPU_CACHE_TYPE_ENUM
//
// Description:
@@ -416,6 +430,70 @@
#include "GmmFormatTable.h"
} GMM_SURFACESTATE_FORMAT;
+typedef enum GMM_E2ECOMP_FORMAT_ENUM
+{
+ GMM_E2ECOMP_FORMAT_INVALID = 0,
+ GMM_E2ECOMP_FORMAT_RGB64, //1h - Reserved
+ GMM_E2ECOMP_FORMAT_RGB32, //2h - Reserved
+
+ GMM_E2ECOMP_MIN_FORMAT = GMM_E2ECOMP_FORMAT_RGB32,
+
+ GMM_E2ECOMP_FORMAT_YUY2, //3h
+ GMM_E2ECOMP_FORMAT_YCRCB_SWAPUV = GMM_E2ECOMP_FORMAT_YUY2,
+ GMM_E2ECOMP_FORMAT_YCRCB_SWAPUVY = GMM_E2ECOMP_FORMAT_YUY2,
+ GMM_E2ECOMP_FORMAT_YCRCB_SWAPY = GMM_E2ECOMP_FORMAT_YUY2,
+
+ GMM_E2ECOMP_FORMAT_Y410, //4h
+
+ GMM_E2ECOMP_FORMAT_Y210, //5h
+ GMM_E2ECOMP_FORMAT_Y216 = GMM_E2ECOMP_FORMAT_Y210,
+
+ GMM_E2ECOMP_FORMAT_Y416, //6h
+ GMM_E2ECOMP_FORMAT_P010, //7h
+ GMM_E2ECOMP_FORMAT_P016, //8h
+ GMM_E2ECOMP_FORMAT_AYUV, //9h
+
+ GMM_E2ECOMP_FORMAT_ARGB8b, //Ah
+ GMM_E2ECOMP_FORMAT_RGB5A1 = GMM_E2ECOMP_FORMAT_ARGB8b,
+ GMM_E2ECOMP_FORMAT_RGBA4 = GMM_E2ECOMP_FORMAT_ARGB8b,
+ GMM_E2ECOMP_FORMAT_B5G6R5 = GMM_E2ECOMP_FORMAT_ARGB8b,
+
+ GMM_E2ECOMP_FORMAT_SWAPY, //Bh
+ GMM_E2ECOMP_FORMAT_SWAPUV, //Ch
+ GMM_E2ECOMP_FORMAT_SWAPUVY, //Dh
+ GMM_E2ECOMP_FORMAT_RGB10b, //Eh --Which media format is it?
+ GMM_E2ECOMP_FORMAT_NV12, //Fh
+
+ GMM_E2ECOMP_FORMAT_RGBAFLOAT16, //0x10h
+
+ GMM_E2ECOMP_FORMAT_R32G32B32A32_FLOAT, //0x11h
+ GMM_E2ECOMP_FORMAT_R32G32B32A32_SINT, //0x12h
+ GMM_E2ECOMP_FORMAT_R32G32B32A32_UINT, //0x13h
+ GMM_E2ECOMP_FORMAT_R16G16B16A16_UNORM, //0x14h
+ GMM_E2ECOMP_FORMAT_R16G16B16A16_SNORM, //0x15h
+ GMM_E2ECOMP_FORMAT_R16G16B16A16_SINT, //0x16h
+ GMM_E2ECOMP_FORMAT_R16G16B16A16_UINT, //0x17h
+
+ GMM_E2ECOMP_FORMAT_R10G10B10A2_UNORM, //0x18h
+ GMM_E2ECOMP_FORMAT_RGB10A2 = GMM_E2ECOMP_FORMAT_R10G10B10A2_UNORM,
+
+ GMM_E2ECOMP_FORMAT_R10G10B10FLOAT_A2_UNORM, //0x19h
+ GMM_E2ECOMP_FORMAT_R10G10B10A2_UINT, //0x1Ah
+ GMM_E2ECOMP_FORMAT_R8G8B8A8_SNORM, //0x1Bh
+ GMM_E2ECOMP_FORMAT_R8G8B8A8_SINT, //0x1Ch
+ GMM_E2ECOMP_FORMAT_R8G8B8A8_UINT, //0x1Dh
+
+ GMM_E2ECOMP_FORMAT_R11G11B10_FLOAT, //0x1Eh
+ GMM_E2ECOMP_FORMAT_RG11B10 = GMM_E2ECOMP_FORMAT_R11G11B10_FLOAT,
+
+ GMM_E2ECOMP_MAX_FORMAT = GMM_E2ECOMP_FORMAT_R11G11B10_FLOAT, //should always be equal to last format encoding
+
+ GMM_E2ECOMP_FORMAT_RGBA = GMM_E2ECOMP_FORMAT_INVALID,
+ GMM_E2ECOMP_FORMAT_R = GMM_E2ECOMP_FORMAT_INVALID,
+ GMM_E2ECOMP_FORMAT_RG = GMM_E2ECOMP_FORMAT_INVALID,
+
+} GMM_E2ECOMP_FORMAT;
+
//===========================================================================
// typedef:
// GMM_TILE_WALK
diff --git a/Source/GmmLib/inc/External/Common/GmmFormatTable.h b/Source/GmmLib/inc/External/Common/GmmFormatTable.h
index c23b139..f394b99 100644
--- a/Source/GmmLib/inc/External/Common/GmmFormatTable.h
+++ b/Source/GmmLib/inc/External/Common/GmmFormatTable.h
@@ -46,418 +46,428 @@
#define VLV2 GFX_IS_PRODUCT(Data.Platform,IGFX_VALLEYVIEW)
#define WA GMM_FORMAT_WA
#define x 0
-#define NC 0
+#define NC GMM_COMPR_FORMAT_INVALID
+
+#define FC(ver, bpc, fmtstr, bpcstr, typestr) \
+ (ver == 1 || SKU(FtrE2ECompression)) ? \
+ ((bpc == 16) ? GMM_E2ECOMP_FORMAT_RGBAFLOAT16 : \
+ (bpc == 32) ? GMM_E2ECOMP_FORMAT_R32G32B32A32_FLOAT : \
+ (bpc == 8) ? GMM_E2ECOMP_FORMAT_ARGB8b : \
+ (bpc == x) ? GMM_E2ECOMP_FORMAT_##fmtstr : NC) :NC
/****************************************************************************\
GMM FORMAT TABLE
(See bottom of file for more info.)
- Supported (ALWAYS / *) -----------------------------------------------------------o
- Reserved ---------------------------------------------------------------o |
- RCS SURFACE_STATE.Format (or NA) --------------------------------o | |
- ASTC Format (A / x) ----------------------------------------o | | |
- Render Target Eligibility (R / x / *) -------------------o | | | |
- Element Depth (Pixels) -------------------------------o | | | | |
- Element Height (Pixels) ---------------------------o | | | | | |
- Element Width (Pixels) ------------------------o | | | | | | |
- Bits-per-Element -------------------------o | | | | | | | |
- | | | | | | | | |
- Name bpe w h d R A RCS.SS RESV Available
---------------------------------------------------------------------------------------*/
+ Supported (ALWAYS / *) -----------------------------------------------------------------o
+ SURFACE_STATE.CompressionFormat (or NC) --------------------------------------o |
+ RCS SURFACE_STATE.Format (or NA) --------------------------------o | |
+ ASTC Format (A / x) ----------------------------------------o | | |
+ Render Target Eligibility (R / x / *) -------------------o | | | |
+ Element Depth (Pixels) -------------------------------o | | | | |
+ Element Height (Pixels) ---------------------------o | | | | | |
+ Element Width (Pixels) ------------------------o | | | | | | |
+ Bits-per-Element -------------------------o | | | | | | | |
+ | | | | | | | | |
+ Name bpe w h d R A RCS.SS CompressFormat Available
+------------------------------------------------------------------------------------------*/
#ifdef INCLUDE_SURFACESTATE_FORMATS
-GMM_FORMAT( A1B5G5R5_UNORM , 16, 1, 1, 1, R, x, 0x124, 0xA , GEN(8) || VLV2 )
-GMM_FORMAT( A4B4G4R4_UNORM , 16, 1, 1, 1, R, x, 0x125, 0xA , GEN(8) )
-GMM_FORMAT( A4P4_UNORM_PALETTE0 , 8, 1, 1, 1, R, x, 0x148, NC , ALWAYS )
-GMM_FORMAT( A4P4_UNORM_PALETTE1 , 8, 1, 1, 1, R, x, 0x14F, NC , ALWAYS )
-GMM_FORMAT( A8_UNORM , 8, 1, 1, 1, R, x, 0x144, 0xA , GEN(7) )
-GMM_FORMAT( A8P8_UNORM_PALETTE0 , 16, 1, 1, 1, R, x, 0x10F, NC , ALWAYS )
-GMM_FORMAT( A8P8_UNORM_PALETTE1 , 16, 1, 1, 1, R, x, 0x110, NC , ALWAYS )
-GMM_FORMAT( A8X8_UNORM_G8R8_SNORM , 32, 1, 1, 1, R, x, 0x0E7, NC , ALWAYS )
-GMM_FORMAT( A16_FLOAT , 16, 1, 1, 1, R, x, 0x117, NC , GEN(7) )
-GMM_FORMAT( A16_UNORM , 16, 1, 1, 1, R, x, 0x113, NC , GEN(7) )
-GMM_FORMAT( A24X8_UNORM , 32, 1, 1, 1, R, x, 0x0E2, NC , GEN(7) )
-GMM_FORMAT( A32_FLOAT , 32, 1, 1, 1, R, x, 0x0E5, NC , GEN(7) )
-GMM_FORMAT( A32_UNORM , 32, 1, 1, 1, R, x, 0x0DE, NC , GEN(7) )
-GMM_FORMAT( A32X32_FLOAT , 64, 1, 1, 1, R, x, 0x090, NC , ALWAYS )
-GMM_FORMAT( B4G4R4A4_UNORM , 16, 1, 1, 1, R, x, 0x104, 0xA , ALWAYS )
-GMM_FORMAT( B4G4R4A4_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x105, 0xA , ALWAYS )
-GMM_FORMAT( B5G5R5A1_UNORM , 16, 1, 1, 1, R, x, 0x102, 0xA , ALWAYS )
-GMM_FORMAT( B5G5R5A1_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x103, 0xA , ALWAYS )
-GMM_FORMAT( B5G5R5X1_UNORM , 16, 1, 1, 1, R, x, 0x11A, 0xA , ALWAYS )
-GMM_FORMAT( B5G5R5X1_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x11B, 0xA , ALWAYS )
-GMM_FORMAT( B5G6R5_UNORM , 16, 1, 1, 1, R, x, 0x100, 0xA , ALWAYS )
-GMM_FORMAT( B5G6R5_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x101, 0xA , ALWAYS )
-GMM_FORMAT( B8G8R8A8_UNORM , 32, 1, 1, 1, R, x, 0x0C0, 0xA , ALWAYS )
-GMM_FORMAT( B8G8R8A8_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0C1, 0xA , ALWAYS )
-GMM_FORMAT( B8G8R8X8_UNORM , 32, 1, 1, 1, R, x, 0x0E9, 0xA , ALWAYS )
-GMM_FORMAT( B8G8R8X8_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0EA, 0xA , ALWAYS )
-GMM_FORMAT( B8X8_UNORM_G8R8_SNORM , 32, 1, 1, 1, R, x, 0x0E8, NC , ALWAYS )
-GMM_FORMAT( B10G10R10A2_SINT , 32, 1, 1, 1, R, x, 0x1BB, 0x18, GEN(8) )
-GMM_FORMAT( B10G10R10A2_SNORM , 32, 1, 1, 1, R, x, 0x1B7, 0x18, GEN(8) )
-GMM_FORMAT( B10G10R10A2_SSCALED , 32, 1, 1, 1, R, x, 0x1B9, 0x18, GEN(8) )
-GMM_FORMAT( B10G10R10A2_UINT , 32, 1, 1, 1, R, x, 0x1BA, 0x18, GEN(8) )
-GMM_FORMAT( B10G10R10A2_UNORM , 32, 1, 1, 1, R, x, 0x0D1, 0x18, ALWAYS )
-GMM_FORMAT( B10G10R10A2_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0D2, 0x18, ALWAYS )
-GMM_FORMAT( B10G10R10A2_USCALED , 32, 1, 1, 1, R, x, 0x1B8, 0x18, GEN(8) )
-GMM_FORMAT( B10G10R10X2_UNORM , 32, 1, 1, 1, R, x, 0x0EE, 0x18, ALWAYS )
-GMM_FORMAT( BC1_UNORM , 64, 4, 4, 1, x, x, 0x186, NC , ALWAYS )
-GMM_FORMAT( BC1_UNORM_SRGB , 64, 4, 4, 1, x, x, 0x18B, NC , ALWAYS )
-GMM_FORMAT( BC2_UNORM , 128, 4, 4, 1, x, x, 0x187, NC , ALWAYS )
-GMM_FORMAT( BC2_UNORM_SRGB , 128, 4, 4, 1, x, x, 0x18C, NC , ALWAYS )
-GMM_FORMAT( BC3_UNORM , 128, 4, 4, 1, x, x, 0x188, NC , ALWAYS )
-GMM_FORMAT( BC3_UNORM_SRGB , 128, 4, 4, 1, x, x, 0x18D, NC , ALWAYS )
-GMM_FORMAT( BC4_SNORM , 64, 4, 4, 1, x, x, 0x199, NC , ALWAYS )
-GMM_FORMAT( BC4_UNORM , 64, 4, 4, 1, x, x, 0x189, NC , ALWAYS )
-GMM_FORMAT( BC5_SNORM , 128, 4, 4, 1, x, x, 0x19A, NC , ALWAYS )
-GMM_FORMAT( BC5_UNORM , 128, 4, 4, 1, x, x, 0x18A, NC , ALWAYS )
-GMM_FORMAT( BC6H_SF16 , 128, 4, 4, 1, x, x, 0x1A1, NC , GEN(7) )
-GMM_FORMAT( BC6H_UF16 , 128, 4, 4, 1, x, x, 0x1A4, NC , GEN(7) )
-GMM_FORMAT( BC7_UNORM , 128, 4, 4, 1, x, x, 0x1A2, NC , GEN(7) )
-GMM_FORMAT( BC7_UNORM_SRGB , 128, 4, 4, 1, x, x, 0x1A3, NC , GEN(7) )
-GMM_FORMAT( DXT1_RGB , 64, 4, 4, 1, x, x, 0x191, NC , ALWAYS )
-GMM_FORMAT( DXT1_RGB_SRGB , 64, 4, 4, 1, x, x, 0x180, NC , ALWAYS )
-GMM_FORMAT( EAC_R11 , 64, 4, 4, 1, x, x, 0x1AB, NC , GEN(8) || VLV2 )
-GMM_FORMAT( EAC_RG11 , 128, 4, 4, 1, x, x, 0x1AC, NC , GEN(8) || VLV2 )
-GMM_FORMAT( EAC_SIGNED_R11 , 64, 4, 4, 1, x, x, 0x1AD, NC , GEN(8) || VLV2 )
-GMM_FORMAT( EAC_SIGNED_RG11 , 128, 4, 4, 1, x, x, 0x1AE, NC , GEN(8) || VLV2 )
-GMM_FORMAT( ETC1_RGB8 , 64, 4, 4, 1, x, x, 0x1A9, NC , GEN(8) || VLV2 )
-GMM_FORMAT( ETC2_EAC_RGBA8 , 128, 4, 4, 1, x, x, 0x1C2, NC , GEN(8) || VLV2 )
-GMM_FORMAT( ETC2_EAC_SRGB8_A8 , 128, 4, 4, 1, x, x, 0x1C3, NC , GEN(8) || VLV2 )
-GMM_FORMAT( ETC2_RGB8 , 64, 4, 4, 1, x, x, 0x1AA, NC , GEN(8) || VLV2 )
-GMM_FORMAT( ETC2_RGB8_PTA , 64, 4, 4, 1, x, x, 0x1C0, NC , GEN(8) || VLV2 )
-GMM_FORMAT( ETC2_SRGB8 , 64, 4, 4, 1, x, x, 0x1AF, NC , GEN(8) || VLV2 )
-GMM_FORMAT( ETC2_SRGB8_PTA , 64, 4, 4, 1, x, x, 0x1C1, NC , GEN(8) || VLV2 )
-GMM_FORMAT( FXT1 , 128, 8, 4, 1, x, x, 0x192, NC , ALWAYS )
-GMM_FORMAT( I8_SINT , 8, 1, 1, 1, R, x, 0x155, NC , GEN(9) )
-GMM_FORMAT( I8_UINT , 8, 1, 1, 1, R, x, 0x154, NC , GEN(9) )
-GMM_FORMAT( I8_UNORM , 8, 1, 1, 1, R, x, 0x145, NC , ALWAYS )
-GMM_FORMAT( I16_FLOAT , 16, 1, 1, 1, R, x, 0x115, NC , ALWAYS )
-GMM_FORMAT( I16_UNORM , 16, 1, 1, 1, R, x, 0x111, NC , ALWAYS )
-GMM_FORMAT( I24X8_UNORM , 32, 1, 1, 1, R, x, 0x0E0, NC , ALWAYS )
-GMM_FORMAT( I32_FLOAT , 32, 1, 1, 1, R, x, 0x0E3, NC , ALWAYS )
-GMM_FORMAT( I32X32_FLOAT , 64, 1, 1, 1, R, x, 0x092, NC , ALWAYS )
-GMM_FORMAT( L8_SINT , 8, 1, 1, 1, R, x, 0x153, NC , GEN(9) )
-GMM_FORMAT( L8_UINT , 8, 1, 1, 1, R, x, 0x152, NC , GEN(9) )
-GMM_FORMAT( L8_UNORM , 8, 1, 1, 1, R, x, 0x146, NC , ALWAYS )
-GMM_FORMAT( L8_UNORM_SRGB , 8, 1, 1, 1, R, x, 0x14C, NC , ALWAYS )
-GMM_FORMAT( L8A8_SINT , 16, 1, 1, 1, R, x, 0x127, NC , GEN(9) )
-GMM_FORMAT( L8A8_UINT , 16, 1, 1, 1, R, x, 0x126, NC , GEN(9) )
-GMM_FORMAT( L8A8_UNORM , 16, 1, 1, 1, R, x, 0x114, NC , ALWAYS )
-GMM_FORMAT( L8A8_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x118, NC , ALWAYS )
-GMM_FORMAT( L16_FLOAT , 16, 1, 1, 1, R, x, 0x116, NC , ALWAYS )
-GMM_FORMAT( L16_UNORM , 16, 1, 1, 1, R, x, 0x112, NC , ALWAYS )
-GMM_FORMAT( L16A16_FLOAT , 32, 1, 1, 1, R, x, 0x0F0, NC , ALWAYS )
-GMM_FORMAT( L16A16_UNORM , 32, 1, 1, 1, R, x, 0x0DF, NC , ALWAYS )
-GMM_FORMAT( L24X8_UNORM , 32, 1, 1, 1, R, x, 0x0E1, NC , ALWAYS )
-GMM_FORMAT( L32_FLOAT , 32, 1, 1, 1, R, x, 0x0E4, NC , ALWAYS )
-GMM_FORMAT( L32_UNORM , 32, 1, 1, 1, R, x, 0x0DD, NC , ALWAYS )
-GMM_FORMAT( L32A32_FLOAT , 64, 1, 1, 1, R, x, 0x08A, NC , ALWAYS )
-GMM_FORMAT( L32X32_FLOAT , 64, 1, 1, 1, R, x, 0x091, NC , ALWAYS )
-GMM_FORMAT( MONO8 , 1, 1, 1, 1, R, x, 0x18E, NC , x ) // No current GMM support by this name.
-GMM_FORMAT( P2_UNORM_PALETTE0 , 2, 1, 1, 1, R, x, 0x184, NC , x ) // No current GMM support by this name.
-GMM_FORMAT( P2_UNORM_PALETTE1 , 2, 1, 1, 1, R, x, 0x185, NC , x ) // "
-GMM_FORMAT( P4A4_UNORM_PALETTE0 , 8, 1, 1, 1, R, x, 0x147, NC , ALWAYS )
-GMM_FORMAT( P4A4_UNORM_PALETTE1 , 8, 1, 1, 1, R, x, 0x14E, NC , ALWAYS )
-GMM_FORMAT( P8_UNORM_PALETTE0 , 8, 1, 1, 1, R, x, 0x14B, NC , ALWAYS )
-GMM_FORMAT( P8_UNORM_PALETTE1 , 8, 1, 1, 1, R, x, 0x14D, NC , ALWAYS )
-GMM_FORMAT( P8A8_UNORM_PALETTE0 , 16, 1, 1, 1, R, x, 0x122, NC , ALWAYS )
-GMM_FORMAT( P8A8_UNORM_PALETTE1 , 16, 1, 1, 1, R, x, 0x123, NC , ALWAYS )
-GMM_FORMAT( PLANAR_420_8 , 8, 1, 1, 1, R, x, 0x1A5, NC , x ) // No current GMM support by this name.
-GMM_FORMAT( PLANAR_420_16 , 16, 1, 1, 1, R, x, 0x1A6, NC , x ) // "
-GMM_FORMAT( PLANAR_422_8 , 8, 1, 1, 1, R, x, 0x00F, NC , x ) // <-- TODO(Minor): Remove this HW-internal format.
-GMM_FORMAT( R1_UNORM , 1, 1, 1, 1, R, x, 0x181, NC , x ) // "
-GMM_FORMAT( R8_SINT , 8, 1, 1, 1, R, x, 0x142, 0xA , ALWAYS )
-GMM_FORMAT( R8_SNORM , 8, 1, 1, 1, R, x, 0x141, 0xA , ALWAYS )
-GMM_FORMAT( R8_SSCALED , 8, 1, 1, 1, R, x, 0x149, 0xA , ALWAYS )
-GMM_FORMAT( R8_UINT , 8, 1, 1, 1, R, x, 0x143, 0xA , ALWAYS )
-GMM_FORMAT( R8_UNORM , 8, 1, 1, 1, R, x, 0x140, 0xA , ALWAYS )
-GMM_FORMAT( R8_USCALED , 8, 1, 1, 1, R, x, 0x14A, 0xA , ALWAYS )
-GMM_FORMAT( R8G8_SINT , 16, 1, 1, 1, R, x, 0x108, 0xA , ALWAYS )
-GMM_FORMAT( R8G8_SNORM , 16, 1, 1, 1, R, x, 0x107, 0xA , ALWAYS )
-GMM_FORMAT( R8G8_SSCALED , 16, 1, 1, 1, R, x, 0x11C, 0xA , ALWAYS )
-GMM_FORMAT( R8G8_UINT , 16, 1, 1, 1, R, x, 0x109, 0xA , ALWAYS )
-GMM_FORMAT( R8G8_UNORM , 16, 1, 1, 1, R, x, 0x106, 0xA , ALWAYS )
-GMM_FORMAT( R8G8_USCALED , 16, 1, 1, 1, R, x, 0x11D, 0xA , ALWAYS )
-GMM_FORMAT( R8G8B8_SINT , 24, 1, 1, 1, R, x, 0x1C9, NC , GEN(8) )
-GMM_FORMAT( R8G8B8_SNORM , 24, 1, 1, 1, R, x, 0x194, NC , ALWAYS )
-GMM_FORMAT( R8G8B8_SSCALED , 24, 1, 1, 1, R, x, 0x195, NC , ALWAYS )
-GMM_FORMAT( R8G8B8_UINT , 24, 1, 1, 1, R, x, 0x1C8, NC , GEN(8) || VLV2 )
-GMM_FORMAT( R8G8B8_UNORM , 24, 1, 1, 1, R, x, 0x193, NC , ALWAYS )
-GMM_FORMAT( R8G8B8_UNORM_SRGB , 24, 1, 1, 1, R, x, 0x1A8, NC , GEN(7_5) )
-GMM_FORMAT( R8G8B8_USCALED , 24, 1, 1, 1, R, x, 0x196, NC , ALWAYS )
-GMM_FORMAT( R8G8B8A8_SINT , 32, 1, 1, 1, R, x, 0x0CA, 0xA , ALWAYS )
-GMM_FORMAT( R8G8B8A8_SNORM , 32, 1, 1, 1, R, x, 0x0C9, 0xA , ALWAYS )
-GMM_FORMAT( R8G8B8A8_SSCALED , 32, 1, 1, 1, R, x, 0x0F4, 0xA , ALWAYS )
-GMM_FORMAT( R8G8B8A8_UINT , 32, 1, 1, 1, R, x, 0x0CB, 0xA , ALWAYS )
-GMM_FORMAT( R8G8B8A8_UNORM , 32, 1, 1, 1, R, x, 0x0C7, 0xA , ALWAYS )
-GMM_FORMAT( R8G8B8A8_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0C8, 0xA , ALWAYS )
-GMM_FORMAT( R8G8B8A8_USCALED , 32, 1, 1, 1, R, x, 0x0F5, 0xA , ALWAYS )
-GMM_FORMAT( R8G8B8X8_UNORM , 32, 1, 1, 1, R, x, 0x0EB, 0xA , ALWAYS )
-GMM_FORMAT( R8G8B8X8_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0EC, 0xA , ALWAYS )
-GMM_FORMAT( R9G9B9E5_SHAREDEXP , 32, 1, 1, 1, R, x, 0x0ED, NC , ALWAYS )
-GMM_FORMAT( R10G10B10_SNORM_A2_UNORM , 32, 1, 1, 1, R, x, 0x0C5, 0x18, ALWAYS )
-GMM_FORMAT( R10G10B10A2_SINT , 32, 1, 1, 1, R, x, 0x1B6, 0x18, GEN(8) )
-GMM_FORMAT( R10G10B10A2_SNORM , 32, 1, 1, 1, R, x, 0x1B3, 0x18, GEN(8) )
-GMM_FORMAT( R10G10B10A2_SSCALED , 32, 1, 1, 1, R, x, 0x1B5, 0x18, GEN(8) )
-GMM_FORMAT( R10G10B10A2_UINT , 32, 1, 1, 1, R, x, 0x0C4, 0x18, ALWAYS )
-GMM_FORMAT( R10G10B10A2_UNORM , 32, 1, 1, 1, R, x, 0x0C2, 0x18, ALWAYS )
-GMM_FORMAT( R10G10B10A2_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0C3, 0x18, ALWAYS )
-GMM_FORMAT( R10G10B10A2_USCALED , 32, 1, 1, 1, R, x, 0x1B4, 0x18, GEN(8) )
-GMM_FORMAT( R10G10B10X2_USCALED , 32, 1, 1, 1, R, x, 0x0F3, 0x18, ALWAYS )
-GMM_FORMAT( R11G11B10_FLOAT , 32, 1, 1, 1, R, x, 0x0D3, 0x1E, ALWAYS )
-GMM_FORMAT( R16_FLOAT , 16, 1, 1, 1, R, x, 0x10E, 0x10, ALWAYS )
-GMM_FORMAT( R16_SINT , 16, 1, 1, 1, R, x, 0x10C, 0x10, ALWAYS )
-GMM_FORMAT( R16_SNORM , 16, 1, 1, 1, R, x, 0x10B, 0x10, ALWAYS )
-GMM_FORMAT( R16_SSCALED , 16, 1, 1, 1, R, x, 0x11E, 0x10, ALWAYS )
-GMM_FORMAT( R16_UINT , 16, 1, 1, 1, R, x, 0x10D, 0x10, ALWAYS )
-GMM_FORMAT( R16_UNORM , 16, 1, 1, 1, R, x, 0x10A, 0x10, ALWAYS )
-GMM_FORMAT( R16_USCALED , 16, 1, 1, 1, R, x, 0x11F, 0x10, ALWAYS )
-GMM_FORMAT( R16G16_FLOAT , 32, 1, 1, 1, R, x, 0x0D0, 0x10, ALWAYS )
-GMM_FORMAT( R16G16_SINT , 32, 1, 1, 1, R, x, 0x0CE, 0x10, ALWAYS )
-GMM_FORMAT( R16G16_SNORM , 32, 1, 1, 1, R, x, 0x0CD, 0x10, ALWAYS )
-GMM_FORMAT( R16G16_SSCALED , 32, 1, 1, 1, R, x, 0x0F6, 0x10, ALWAYS )
-GMM_FORMAT( R16G16_UINT , 32, 1, 1, 1, R, x, 0x0CF, 0x10, ALWAYS )
-GMM_FORMAT( R16G16_UNORM , 32, 1, 1, 1, R, x, 0x0CC, 0x10, ALWAYS )
-GMM_FORMAT( R16G16_USCALED , 32, 1, 1, 1, R, x, 0x0F7, 0x10, ALWAYS )
-GMM_FORMAT( R16G16B16_FLOAT , 48, 1, 1, 1, R, x, 0x19B, NC , ALWAYS )
-GMM_FORMAT( R16G16B16_SINT , 48, 1, 1, 1, R, x, 0x1B1, NC , GEN(8) )
-GMM_FORMAT( R16G16B16_SNORM , 48, 1, 1, 1, R, x, 0x19D, NC , ALWAYS )
-GMM_FORMAT( R16G16B16_SSCALED , 48, 1, 1, 1, R, x, 0x19E, NC , ALWAYS )
-GMM_FORMAT( R16G16B16_UINT , 48, 1, 1, 1, R, x, 0x1B0, NC , GEN(8) || VLV2 )
-GMM_FORMAT( R16G16B16_UNORM , 48, 1, 1, 1, R, x, 0x19C, NC , ALWAYS )
-GMM_FORMAT( R16G16B16_USCALED , 48, 1, 1, 1, R, x, 0x19F, NC , ALWAYS )
-GMM_FORMAT( R16G16B16A16_FLOAT , 64, 1, 1, 1, R, x, 0x084, 0x10, ALWAYS )
-GMM_FORMAT( R16G16B16A16_SINT , 64, 1, 1, 1, R, x, 0x082, 0x10, ALWAYS )
-GMM_FORMAT( R16G16B16A16_SNORM , 64, 1, 1, 1, R, x, 0x081, 0x10, ALWAYS )
-GMM_FORMAT( R16G16B16A16_SSCALED , 64, 1, 1, 1, R, x, 0x093, 0x10, ALWAYS )
-GMM_FORMAT( R16G16B16A16_UINT , 64, 1, 1, 1, R, x, 0x083, 0x10, ALWAYS )
-GMM_FORMAT( R16G16B16A16_UNORM , 64, 1, 1, 1, R, x, 0x080, 0x10, ALWAYS )
-GMM_FORMAT( R16G16B16A16_USCALED , 64, 1, 1, 1, R, x, 0x094, 0x10, ALWAYS )
-GMM_FORMAT( R16G16B16X16_FLOAT , 64, 1, 1, 1, R, x, 0x08F, 0x10, ALWAYS )
-GMM_FORMAT( R16G16B16X16_UNORM , 64, 1, 1, 1, R, x, 0x08E, 0x10, ALWAYS )
-GMM_FORMAT( R24_UNORM_X8_TYPELESS , 32, 1, 1, 1, R, x, 0x0D9, 0x11, ALWAYS )
-GMM_FORMAT( R32_FLOAT , 32, 1, 1, 1, R, x, 0x0D8, 0x11, ALWAYS )
-GMM_FORMAT( R32_FLOAT_X8X24_TYPELESS , 64, 1, 1, 1, R, x, 0x088, 0x11, ALWAYS )
-GMM_FORMAT( R32_SFIXED , 32, 1, 1, 1, R, x, 0x1B2, 0x11, GEN(8) )
-GMM_FORMAT( R32_SINT , 32, 1, 1, 1, R, x, 0x0D6, 0x11, ALWAYS )
-GMM_FORMAT( R32_SNORM , 32, 1, 1, 1, R, x, 0x0F2, 0x11, ALWAYS )
-GMM_FORMAT( R32_SSCALED , 32, 1, 1, 1, R, x, 0x0F8, 0x11, ALWAYS )
-GMM_FORMAT( R32_UINT , 32, 1, 1, 1, R, x, 0x0D7, 0x11, ALWAYS )
-GMM_FORMAT( R32_UNORM , 32, 1, 1, 1, R, x, 0x0F1, 0x11, ALWAYS )
-GMM_FORMAT( R32_USCALED , 32, 1, 1, 1, R, x, 0x0F9, 0x11, ALWAYS )
-GMM_FORMAT( R32G32_FLOAT , 64, 1, 1, 1, R, x, 0x085, 0x11, ALWAYS )
-GMM_FORMAT( R32G32_SFIXED , 64, 1, 1, 1, R, x, 0x0A0, 0x11, ALWAYS )
-GMM_FORMAT( R32G32_SINT , 64, 1, 1, 1, R, x, 0x086, 0x11, ALWAYS )
-GMM_FORMAT( R32G32_SNORM , 64, 1, 1, 1, R, x, 0x08C, 0x11, ALWAYS )
-GMM_FORMAT( R32G32_SSCALED , 64, 1, 1, 1, R, x, 0x095, 0x11, ALWAYS )
-GMM_FORMAT( R32G32_UINT , 64, 1, 1, 1, R, x, 0x087, 0x11, ALWAYS )
-GMM_FORMAT( R32G32_UNORM , 64, 1, 1, 1, R, x, 0x08B, 0x11, ALWAYS )
-GMM_FORMAT( R32G32_USCALED , 64, 1, 1, 1, R, x, 0x096, 0x11, ALWAYS )
-GMM_FORMAT( R32G32B32_FLOAT , 96, 1, 1, 1, R, x, 0x040, NC , ALWAYS )
-GMM_FORMAT( R32G32B32_SFIXED , 96, 1, 1, 1, R, x, 0x050, NC , ALWAYS )
-GMM_FORMAT( R32G32B32_SINT , 96, 1, 1, 1, R, x, 0x041, NC , ALWAYS )
-GMM_FORMAT( R32G32B32_SNORM , 96, 1, 1, 1, R, x, 0x044, NC , ALWAYS )
-GMM_FORMAT( R32G32B32_SSCALED , 96, 1, 1, 1, R, x, 0x045, NC , ALWAYS )
-GMM_FORMAT( R32G32B32_UINT , 96, 1, 1, 1, R, x, 0x042, NC , ALWAYS )
-GMM_FORMAT( R32G32B32_UNORM , 96, 1, 1, 1, R, x, 0x043, NC , ALWAYS )
-GMM_FORMAT( R32G32B32_USCALED , 96, 1, 1, 1, R, x, 0x046, NC , ALWAYS )
-GMM_FORMAT( R32G32B32A32_FLOAT , 128, 1, 1, 1, R, x, 0x000, 0x11, ALWAYS )
-GMM_FORMAT( R32G32B32A32_SFIXED , 128, 1, 1, 1, R, x, 0x020, 0x11, ALWAYS )
-GMM_FORMAT( R32G32B32A32_SINT , 128, 1, 1, 1, R, x, 0x001, 0x11, ALWAYS )
-GMM_FORMAT( R32G32B32A32_SNORM , 128, 1, 1, 1, R, x, 0x004, 0x11, ALWAYS )
-GMM_FORMAT( R32G32B32A32_SSCALED , 128, 1, 1, 1, R, x, 0x007, 0x11, ALWAYS )
-GMM_FORMAT( R32G32B32A32_UINT , 128, 1, 1, 1, R, x, 0x002, 0x11, ALWAYS )
-GMM_FORMAT( R32G32B32A32_UNORM , 128, 1, 1, 1, R, x, 0x003, 0x11, ALWAYS )
-GMM_FORMAT( R32G32B32A32_USCALED , 128, 1, 1, 1, R, x, 0x008, 0x11, ALWAYS )
-GMM_FORMAT( R32G32B32X32_FLOAT , 128, 1, 1, 1, R, x, 0x006, 0x11, ALWAYS )
-GMM_FORMAT( R5G5_SNORM_B6_UNORM , 16, 1, 1, 1, R, x, 0x119, NC , ALWAYS )
-GMM_FORMAT( R64_FLOAT , 64, 1, 1, 1, R, x, 0x08D, NC , ALWAYS )
-GMM_FORMAT( R64_PASSTHRU , 64, 1, 1, 1, R, x, 0x0A1, NC , ALWAYS )
-GMM_FORMAT( R64G64_FLOAT , 128, 1, 1, 1, R, x, 0x005, NC , ALWAYS )
-GMM_FORMAT( R64G64_PASSTHRU , 128, 1, 1, 1, R, x, 0x021, NC , ALWAYS )
-GMM_FORMAT( R64G64B64_FLOAT , 192, 1, 1, 1, R, x, 0x198, NC , ALWAYS )
-GMM_FORMAT( R64G64B64_PASSTHRU , 192, 1, 1, 1, R, x, 0x1BD, NC , GEN(8) )
-GMM_FORMAT( R64G64B64A64_FLOAT , 256, 1, 1, 1, R, x, 0x197, NC , ALWAYS )
-GMM_FORMAT( R64G64B64A64_PASSTHRU , 256, 1, 1, 1, R, x, 0x1BC, NC , GEN(8) )
-GMM_FORMAT( RAW , 8, 1, 1, 1, R, x, 0x1FF, NC , GEN(7) ) // "8bpp" for current GMM implementation.
-GMM_FORMAT( X24_TYPELESS_G8_UINT , 32, 1, 1, 1, R, x, 0x0DA, 0xA , ALWAYS )
-GMM_FORMAT( X32_TYPELESS_G8X24_UINT , 64, 1, 1, 1, R, x, 0x089, 0xA , ALWAYS )
-GMM_FORMAT( X8B8_UNORM_G8R8_SNORM , 32, 1, 1, 1, R, x, 0x0E6, NC , ALWAYS )
-GMM_FORMAT( Y8_UNORM , 8, 1, 1, 1, R, x, 0x150, 0xF , ALWAYS )
-GMM_FORMAT( YCRCB_NORMAL , 16, 1, 1, 1, R, x, 0x182, 0x3 , ALWAYS )
-GMM_FORMAT( YCRCB_SWAPUV , 16, 1, 1, 1, R, x, 0x18F, 0xC , ALWAYS )
-GMM_FORMAT( YCRCB_SWAPUVY , 16, 1, 1, 1, R, x, 0x183, 0xD , ALWAYS )
-GMM_FORMAT( YCRCB_SWAPY , 16, 1, 1, 1, R, x, 0x190, 0xB , ALWAYS )
+GMM_FORMAT( A1B5G5R5_UNORM , 16, 1, 1, 1, R, x, 0x124, FC(3, x, RGB5A1, , ), GEN(8) || VLV2 )
+GMM_FORMAT( A4B4G4R4_UNORM , 16, 1, 1, 1, R, x, 0x125, FC(3, x, RGB5A1, , ), GEN(8) )
+GMM_FORMAT( A4P4_UNORM_PALETTE0 , 8, 1, 1, 1, R, x, 0x148, NC , ALWAYS )
+GMM_FORMAT( A4P4_UNORM_PALETTE1 , 8, 1, 1, 1, R, x, 0x14F, NC , ALWAYS )
+GMM_FORMAT( A8_UNORM , 8, 1, 1, 1, R, x, 0x144, FC(3, 8, R, 8, U), GEN(7) )
+GMM_FORMAT( A8P8_UNORM_PALETTE0 , 16, 1, 1, 1, R, x, 0x10F, NC , ALWAYS )
+GMM_FORMAT( A8P8_UNORM_PALETTE1 , 16, 1, 1, 1, R, x, 0x110, NC , ALWAYS )
+GMM_FORMAT( A8X8_UNORM_G8R8_SNORM , 32, 1, 1, 1, R, x, 0x0E7, NC , ALWAYS )
+GMM_FORMAT( A16_FLOAT , 16, 1, 1, 1, R, x, 0x117, NC , GEN(7) )
+GMM_FORMAT( A16_UNORM , 16, 1, 1, 1, R, x, 0x113, NC , GEN(7) )
+GMM_FORMAT( A24X8_UNORM , 32, 1, 1, 1, R, x, 0x0E2, NC , GEN(7) )
+GMM_FORMAT( A32_FLOAT , 32, 1, 1, 1, R, x, 0x0E5, NC , GEN(7) )
+GMM_FORMAT( A32_UNORM , 32, 1, 1, 1, R, x, 0x0DE, NC , GEN(7) )
+GMM_FORMAT( A32X32_FLOAT , 64, 1, 1, 1, R, x, 0x090, NC , ALWAYS )
+GMM_FORMAT( B4G4R4A4_UNORM , 16, 1, 1, 1, R, x, 0x104, FC(3, x, RGBA4, , ), ALWAYS )
+GMM_FORMAT( B4G4R4A4_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x105, FC(3, x, RGBA4, , ), ALWAYS )
+GMM_FORMAT( B5G5R5A1_UNORM , 16, 1, 1, 1, R, x, 0x102, FC(3, x, RGB5A1, , ), ALWAYS )
+GMM_FORMAT( B5G5R5A1_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x103, FC(3, x, RGB5A1, , ), ALWAYS )
+GMM_FORMAT( B5G5R5X1_UNORM , 16, 1, 1, 1, R, x, 0x11A, FC(3, x, RGB5A1, , ), ALWAYS )
+GMM_FORMAT( B5G5R5X1_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x11B, FC(3, x, RGB5A1, , ), ALWAYS )
+GMM_FORMAT( B5G6R5_UNORM , 16, 1, 1, 1, R, x, 0x100, FC(3, x, B5G6R5, , ), ALWAYS )
+GMM_FORMAT( B5G6R5_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x101, FC(3, x, B5G6R5, , ), ALWAYS )
+GMM_FORMAT( B8G8R8A8_UNORM , 32, 1, 1, 1, R, x, 0x0C0, FC(3, 8, RGBA, 8, U), ALWAYS )
+GMM_FORMAT( B8G8R8A8_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0C1, FC(3, 8, RGBA, 8, U), ALWAYS )
+GMM_FORMAT( B8G8R8X8_UNORM , 32, 1, 1, 1, R, x, 0x0E9, FC(3, 8, RGBA, 8, U), ALWAYS )
+GMM_FORMAT( B8G8R8X8_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0EA, FC(3, 8, RGBA, 8, U), ALWAYS )
+GMM_FORMAT( B8X8_UNORM_G8R8_SNORM , 32, 1, 1, 1, R, x, 0x0E8, NC , ALWAYS )
+GMM_FORMAT( B10G10R10A2_SINT , 32, 1, 1, 1, R, x, 0x1BB, FC(3, x, RGB10A2, , ), GEN(8) )
+GMM_FORMAT( B10G10R10A2_SNORM , 32, 1, 1, 1, R, x, 0x1B7, FC(3, x, RGB10A2, , ), GEN(8) )
+GMM_FORMAT( B10G10R10A2_SSCALED , 32, 1, 1, 1, R, x, 0x1B9, FC(3, x, RGB10A2, , ), GEN(8) )
+GMM_FORMAT( B10G10R10A2_UINT , 32, 1, 1, 1, R, x, 0x1BA, FC(3, x, RGB10A2, , ), GEN(8) )
+GMM_FORMAT( B10G10R10A2_UNORM , 32, 1, 1, 1, R, x, 0x0D1, FC(3, x, RGB10A2, , ), ALWAYS )
+GMM_FORMAT( B10G10R10A2_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0D2, FC(3, x, RGB10A2, , ), ALWAYS )
+GMM_FORMAT( B10G10R10A2_USCALED , 32, 1, 1, 1, R, x, 0x1B8, FC(3, x, RGB10A2, , ), GEN(8) )
+GMM_FORMAT( B10G10R10X2_UNORM , 32, 1, 1, 1, R, x, 0x0EE, FC(3, x, RGB10A2, , ), ALWAYS )
+GMM_FORMAT( BC1_UNORM , 64, 4, 4, 1, x, x, 0x186, NC , ALWAYS )
+GMM_FORMAT( BC1_UNORM_SRGB , 64, 4, 4, 1, x, x, 0x18B, NC , ALWAYS )
+GMM_FORMAT( BC2_UNORM , 128, 4, 4, 1, x, x, 0x187, NC , ALWAYS )
+GMM_FORMAT( BC2_UNORM_SRGB , 128, 4, 4, 1, x, x, 0x18C, NC , ALWAYS )
+GMM_FORMAT( BC3_UNORM , 128, 4, 4, 1, x, x, 0x188, NC , ALWAYS )
+GMM_FORMAT( BC3_UNORM_SRGB , 128, 4, 4, 1, x, x, 0x18D, NC , ALWAYS )
+GMM_FORMAT( BC4_SNORM , 64, 4, 4, 1, x, x, 0x199, NC , ALWAYS )
+GMM_FORMAT( BC4_UNORM , 64, 4, 4, 1, x, x, 0x189, NC , ALWAYS )
+GMM_FORMAT( BC5_SNORM , 128, 4, 4, 1, x, x, 0x19A, NC , ALWAYS )
+GMM_FORMAT( BC5_UNORM , 128, 4, 4, 1, x, x, 0x18A, NC , ALWAYS )
+GMM_FORMAT( BC6H_SF16 , 128, 4, 4, 1, x, x, 0x1A1, NC , GEN(7) )
+GMM_FORMAT( BC6H_UF16 , 128, 4, 4, 1, x, x, 0x1A4, NC , GEN(7) )
+GMM_FORMAT( BC7_UNORM , 128, 4, 4, 1, x, x, 0x1A2, NC , GEN(7) )
+GMM_FORMAT( BC7_UNORM_SRGB , 128, 4, 4, 1, x, x, 0x1A3, NC , GEN(7) )
+GMM_FORMAT( DXT1_RGB , 64, 4, 4, 1, x, x, 0x191, NC , ALWAYS )
+GMM_FORMAT( DXT1_RGB_SRGB , 64, 4, 4, 1, x, x, 0x180, NC , ALWAYS )
+GMM_FORMAT( EAC_R11 , 64, 4, 4, 1, x, x, 0x1AB, NC , GEN(8) || VLV2 )
+GMM_FORMAT( EAC_RG11 , 128, 4, 4, 1, x, x, 0x1AC, NC , GEN(8) || VLV2 )
+GMM_FORMAT( EAC_SIGNED_R11 , 64, 4, 4, 1, x, x, 0x1AD, NC , GEN(8) || VLV2 )
+GMM_FORMAT( EAC_SIGNED_RG11 , 128, 4, 4, 1, x, x, 0x1AE, NC , GEN(8) || VLV2 )
+GMM_FORMAT( ETC1_RGB8 , 64, 4, 4, 1, x, x, 0x1A9, NC , GEN(8) || VLV2 )
+GMM_FORMAT( ETC2_EAC_RGBA8 , 128, 4, 4, 1, x, x, 0x1C2, NC , GEN(8) || VLV2 )
+GMM_FORMAT( ETC2_EAC_SRGB8_A8 , 128, 4, 4, 1, x, x, 0x1C3, NC , GEN(8) || VLV2 )
+GMM_FORMAT( ETC2_RGB8 , 64, 4, 4, 1, x, x, 0x1AA, NC , GEN(8) || VLV2 )
+GMM_FORMAT( ETC2_RGB8_PTA , 64, 4, 4, 1, x, x, 0x1C0, NC , GEN(8) || VLV2 )
+GMM_FORMAT( ETC2_SRGB8 , 64, 4, 4, 1, x, x, 0x1AF, NC , GEN(8) || VLV2 )
+GMM_FORMAT( ETC2_SRGB8_PTA , 64, 4, 4, 1, x, x, 0x1C1, NC , GEN(8) || VLV2 )
+GMM_FORMAT( FXT1 , 128, 8, 4, 1, x, x, 0x192, NC , ALWAYS )
+GMM_FORMAT( I8_SINT , 8, 1, 1, 1, R, x, 0x155, NC , GEN(9) )
+GMM_FORMAT( I8_UINT , 8, 1, 1, 1, R, x, 0x154, NC , GEN(9) )
+GMM_FORMAT( I8_UNORM , 8, 1, 1, 1, R, x, 0x145, NC , ALWAYS )
+GMM_FORMAT( I16_FLOAT , 16, 1, 1, 1, R, x, 0x115, NC , ALWAYS )
+GMM_FORMAT( I16_UNORM , 16, 1, 1, 1, R, x, 0x111, NC , ALWAYS )
+GMM_FORMAT( I24X8_UNORM , 32, 1, 1, 1, R, x, 0x0E0, NC , ALWAYS )
+GMM_FORMAT( I32_FLOAT , 32, 1, 1, 1, R, x, 0x0E3, NC , ALWAYS )
+GMM_FORMAT( I32X32_FLOAT , 64, 1, 1, 1, R, x, 0x092, NC , ALWAYS )
+GMM_FORMAT( L8_SINT , 8, 1, 1, 1, R, x, 0x153, NC , GEN(9) )
+GMM_FORMAT( L8_UINT , 8, 1, 1, 1, R, x, 0x152, NC , GEN(9) )
+GMM_FORMAT( L8_UNORM , 8, 1, 1, 1, R, x, 0x146, NC , ALWAYS )
+GMM_FORMAT( L8_UNORM_SRGB , 8, 1, 1, 1, R, x, 0x14C, NC , ALWAYS )
+GMM_FORMAT( L8A8_SINT , 16, 1, 1, 1, R, x, 0x127, NC , GEN(9) )
+GMM_FORMAT( L8A8_UINT , 16, 1, 1, 1, R, x, 0x126, NC , GEN(9) )
+GMM_FORMAT( L8A8_UNORM , 16, 1, 1, 1, R, x, 0x114, NC , ALWAYS )
+GMM_FORMAT( L8A8_UNORM_SRGB , 16, 1, 1, 1, R, x, 0x118, NC , ALWAYS )
+GMM_FORMAT( L16_FLOAT , 16, 1, 1, 1, R, x, 0x116, NC , ALWAYS )
+GMM_FORMAT( L16_UNORM , 16, 1, 1, 1, R, x, 0x112, NC , ALWAYS )
+GMM_FORMAT( L16A16_FLOAT , 32, 1, 1, 1, R, x, 0x0F0, NC , ALWAYS )
+GMM_FORMAT( L16A16_UNORM , 32, 1, 1, 1, R, x, 0x0DF, NC , ALWAYS )
+GMM_FORMAT( L24X8_UNORM , 32, 1, 1, 1, R, x, 0x0E1, NC , ALWAYS )
+GMM_FORMAT( L32_FLOAT , 32, 1, 1, 1, R, x, 0x0E4, NC , ALWAYS )
+GMM_FORMAT( L32_UNORM , 32, 1, 1, 1, R, x, 0x0DD, NC , ALWAYS )
+GMM_FORMAT( L32A32_FLOAT , 64, 1, 1, 1, R, x, 0x08A, NC , ALWAYS )
+GMM_FORMAT( L32X32_FLOAT , 64, 1, 1, 1, R, x, 0x091, NC , ALWAYS )
+GMM_FORMAT( MONO8 , 1, 1, 1, 1, R, x, 0x18E, NC , x ) // No current GMM support by this name.
+GMM_FORMAT( P2_UNORM_PALETTE0 , 2, 1, 1, 1, R, x, 0x184, NC , x ) // No current GMM support by this name.
+GMM_FORMAT( P2_UNORM_PALETTE1 , 2, 1, 1, 1, R, x, 0x185, NC , x ) // "
+GMM_FORMAT( P4A4_UNORM_PALETTE0 , 8, 1, 1, 1, R, x, 0x147, NC , ALWAYS )
+GMM_FORMAT( P4A4_UNORM_PALETTE1 , 8, 1, 1, 1, R, x, 0x14E, NC , ALWAYS )
+GMM_FORMAT( P8_UNORM_PALETTE0 , 8, 1, 1, 1, R, x, 0x14B, NC , ALWAYS )
+GMM_FORMAT( P8_UNORM_PALETTE1 , 8, 1, 1, 1, R, x, 0x14D, NC , ALWAYS )
+GMM_FORMAT( P8A8_UNORM_PALETTE0 , 16, 1, 1, 1, R, x, 0x122, NC , ALWAYS )
+GMM_FORMAT( P8A8_UNORM_PALETTE1 , 16, 1, 1, 1, R, x, 0x123, NC , ALWAYS )
+GMM_FORMAT( PACKED_422_16 , 64, 2, 1, 1, R, x, 0x1A7, NC , GEN(12) )
+GMM_FORMAT( PLANAR_420_8 , 8, 1, 1, 1, R, x, 0x1A5, NC , x ) // No current GMM support by this name.
+GMM_FORMAT( PLANAR_420_16 , 16, 1, 1, 1, R, x, 0x1A6, NC , x ) // "
+GMM_FORMAT( PLANAR_422_8 , 8, 1, 1, 1, R, x, 0x00F, NC , x ) // <-- TODO(Minor): Remove this HW-internal format.
+GMM_FORMAT( R1_UNORM , 1, 1, 1, 1, R, x, 0x181, NC , x ) // "
+GMM_FORMAT( R8_SINT , 8, 1, 1, 1, R, x, 0x142, FC(3, 8, R, 8, S1), ALWAYS )
+GMM_FORMAT( R8_SNORM , 8, 1, 1, 1, R, x, 0x141, FC(3, 8, R, 8, S), ALWAYS )
+GMM_FORMAT( R8_SSCALED , 8, 1, 1, 1, R, x, 0x149, FC(3, 8, R, 8, S), ALWAYS )
+GMM_FORMAT( R8_UINT , 8, 1, 1, 1, R, x, 0x143, FC(3, 8, R, 8, U1), ALWAYS )
+GMM_FORMAT( R8_UNORM , 8, 1, 1, 1, R, x, 0x140, FC(3, 8, R, 8, U), ALWAYS )
+GMM_FORMAT( R8_USCALED , 8, 1, 1, 1, R, x, 0x14A, FC(3, 8, R, 8, U), ALWAYS )
+GMM_FORMAT( R8G8_SINT , 16, 1, 1, 1, R, x, 0x108, FC(3, 8, RG, 8, S), ALWAYS )
+GMM_FORMAT( R8G8_SNORM , 16, 1, 1, 1, R, x, 0x107, FC(3, 8, RG, 8, S), ALWAYS )
+GMM_FORMAT( R8G8_SSCALED , 16, 1, 1, 1, R, x, 0x11C, FC(3, 8, RG, 8, S), ALWAYS )
+GMM_FORMAT( R8G8_UINT , 16, 1, 1, 1, R, x, 0x109, FC(3, 8, RG, 8, U), ALWAYS )
+GMM_FORMAT( R8G8_UNORM , 16, 1, 1, 1, R, x, 0x106, FC(3, 8, RG, 8, U), ALWAYS )
+GMM_FORMAT( R8G8_USCALED , 16, 1, 1, 1, R, x, 0x11D, FC(3, 8, RG, 8, U), ALWAYS )
+GMM_FORMAT( R8G8B8_SINT , 24, 1, 1, 1, R, x, 0x1C9, NC , GEN(8) )
+GMM_FORMAT( R8G8B8_SNORM , 24, 1, 1, 1, R, x, 0x194, NC , ALWAYS )
+GMM_FORMAT( R8G8B8_SSCALED , 24, 1, 1, 1, R, x, 0x195, NC , ALWAYS )
+GMM_FORMAT( R8G8B8_UINT , 24, 1, 1, 1, R, x, 0x1C8, NC , GEN(8) || VLV2 )
+GMM_FORMAT( R8G8B8_UNORM , 24, 1, 1, 1, R, x, 0x193, NC , ALWAYS )
+GMM_FORMAT( R8G8B8_UNORM_SRGB , 24, 1, 1, 1, R, x, 0x1A8, NC , GEN(7_5) )
+GMM_FORMAT( R8G8B8_USCALED , 24, 1, 1, 1, R, x, 0x196, NC , ALWAYS )
+GMM_FORMAT( R8G8B8A8_SINT , 32, 1, 1, 1, R, x, 0x0CA, FC(3, 8, RGBA, 8, S), ALWAYS )
+GMM_FORMAT( R8G8B8A8_SNORM , 32, 1, 1, 1, R, x, 0x0C9, FC(3, 8, RGBA, 8, S), ALWAYS )
+GMM_FORMAT( R8G8B8A8_SSCALED , 32, 1, 1, 1, R, x, 0x0F4, FC(3, 8, RGBA, 8, S), ALWAYS )
+GMM_FORMAT( R8G8B8A8_UINT , 32, 1, 1, 1, R, x, 0x0CB, FC(3, 8, RGBA, 8, U), ALWAYS )
+GMM_FORMAT( R8G8B8A8_UNORM , 32, 1, 1, 1, R, x, 0x0C7, FC(3, 8, RGBA, 8, U), ALWAYS )
+GMM_FORMAT( R8G8B8A8_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0C8, FC(3, 8, RGBA, 8, U), ALWAYS )
+GMM_FORMAT( R8G8B8A8_USCALED , 32, 1, 1, 1, R, x, 0x0F5, FC(3, 8, RGBA, 8, U), ALWAYS )
+GMM_FORMAT( R8G8B8X8_UNORM , 32, 1, 1, 1, R, x, 0x0EB, FC(3, 8, RGBA, 8, U), ALWAYS )
+GMM_FORMAT( R8G8B8X8_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0EC, FC(3, 8, RGBA, 8, U), ALWAYS )
+GMM_FORMAT( R9G9B9E5_SHAREDEXP , 32, 1, 1, 1, R, x, 0x0ED, NC , ALWAYS )
+GMM_FORMAT( R10G10B10_FLOAT_A2_UNORM , 32, 1, 1, 1, R, x, 0x0D5, FC(3, x, RGB10A2, , ), GEN(12) )
+GMM_FORMAT( R10G10B10_SNORM_A2_UNORM , 32, 1, 1, 1, R, x, 0x0C5, FC(3, x, RGB10A2, , ), ALWAYS )
+GMM_FORMAT( R10G10B10A2_SINT , 32, 1, 1, 1, R, x, 0x1B6, FC(3, x, RGB10A2, , ), GEN(8) )
+GMM_FORMAT( R10G10B10A2_SNORM , 32, 1, 1, 1, R, x, 0x1B3, FC(3, x, RGB10A2, , ), GEN(8) )
+GMM_FORMAT( R10G10B10A2_SSCALED , 32, 1, 1, 1, R, x, 0x1B5, FC(3, x, RGB10A2, , ), GEN(8) )
+GMM_FORMAT( R10G10B10A2_UINT , 32, 1, 1, 1, R, x, 0x0C4, FC(3, x, RGB10A2, , ), ALWAYS )
+GMM_FORMAT( R10G10B10A2_UNORM , 32, 1, 1, 1, R, x, 0x0C2, FC(3, x, RGB10A2, , ), ALWAYS )
+GMM_FORMAT( R10G10B10A2_UNORM_SRGB , 32, 1, 1, 1, R, x, 0x0C3, FC(3, x, RGB10A2, , ), ALWAYS )
+GMM_FORMAT( R10G10B10A2_USCALED , 32, 1, 1, 1, R, x, 0x1B4, FC(3, x, RGB10A2, , ), GEN(8) )
+GMM_FORMAT( R10G10B10X2_USCALED , 32, 1, 1, 1, R, x, 0x0F3, FC(3, x, RGB10A2, , ), ALWAYS )
+GMM_FORMAT( R11G11B10_FLOAT , 32, 1, 1, 1, R, x, 0x0D3, FC(3, x, RG11B10, , ), ALWAYS )
+GMM_FORMAT( R16_FLOAT , 16, 1, 1, 1, R, x, 0x10E, FC(3, 16, R, 16, F1), ALWAYS )
+GMM_FORMAT( R16_SINT , 16, 1, 1, 1, R, x, 0x10C, FC(3, 16, R, 16, S1), ALWAYS )
+GMM_FORMAT( R16_SNORM , 16, 1, 1, 1, R, x, 0x10B, FC(3, 16, R, 16, S), ALWAYS )
+GMM_FORMAT( R16_SSCALED , 16, 1, 1, 1, R, x, 0x11E, FC(3, 16, R, 16, S), ALWAYS )
+GMM_FORMAT( R16_UINT , 16, 1, 1, 1, R, x, 0x10D, FC(3, 16, R, 16, U1), ALWAYS )
+GMM_FORMAT( R16_UNORM , 16, 1, 1, 1, R, x, 0x10A, FC(3, 16, R, 16, U), ALWAYS )
+GMM_FORMAT( R16_USCALED , 16, 1, 1, 1, R, x, 0x11F, FC(3, 16, R, 16, U), ALWAYS )
+GMM_FORMAT( R16G16_FLOAT , 32, 1, 1, 1, R, x, 0x0D0, FC(3, 16, RG, 16, F), ALWAYS )
+GMM_FORMAT( R16G16_SINT , 32, 1, 1, 1, R, x, 0x0CE, FC(3, 16, RG, 16, S), ALWAYS )
+GMM_FORMAT( R16G16_SNORM , 32, 1, 1, 1, R, x, 0x0CD, FC(3, 16, RG, 16, S), ALWAYS )
+GMM_FORMAT( R16G16_SSCALED , 32, 1, 1, 1, R, x, 0x0F6, FC(3, 16, RG, 16, S), ALWAYS )
+GMM_FORMAT( R16G16_UINT , 32, 1, 1, 1, R, x, 0x0CF, FC(3, 16, RG, 16, U), ALWAYS )
+GMM_FORMAT( R16G16_UNORM , 32, 1, 1, 1, R, x, 0x0CC, FC(3, 16, RG, 16, U), ALWAYS )
+GMM_FORMAT( R16G16_USCALED , 32, 1, 1, 1, R, x, 0x0F7, FC(3, 16, RG, 16, U), ALWAYS )
+GMM_FORMAT( R16G16B16_FLOAT , 48, 1, 1, 1, R, x, 0x19B, NC , ALWAYS )
+GMM_FORMAT( R16G16B16_SINT , 48, 1, 1, 1, R, x, 0x1B1, NC , GEN(8) )
+GMM_FORMAT( R16G16B16_SNORM , 48, 1, 1, 1, R, x, 0x19D, NC , ALWAYS )
+GMM_FORMAT( R16G16B16_SSCALED , 48, 1, 1, 1, R, x, 0x19E, NC , ALWAYS )
+GMM_FORMAT( R16G16B16_UINT , 48, 1, 1, 1, R, x, 0x1B0, NC , GEN(8) || VLV2 )
+GMM_FORMAT( R16G16B16_UNORM , 48, 1, 1, 1, R, x, 0x19C, NC , ALWAYS )
+GMM_FORMAT( R16G16B16_USCALED , 48, 1, 1, 1, R, x, 0x19F, NC , ALWAYS )
+GMM_FORMAT( R16G16B16A16_FLOAT , 64, 1, 1, 1, R, x, 0x084, FC(3, 16, RGBA, 16, F), ALWAYS )
+GMM_FORMAT( R16G16B16A16_SINT , 64, 1, 1, 1, R, x, 0x082, FC(3, 16, RGBA, 16, S), ALWAYS )
+GMM_FORMAT( R16G16B16A16_SNORM , 64, 1, 1, 1, R, x, 0x081, FC(3, 16, RGBA, 16, S), ALWAYS )
+GMM_FORMAT( R16G16B16A16_SSCALED , 64, 1, 1, 1, R, x, 0x093, FC(3, 16, RGBA, 16, S), ALWAYS )
+GMM_FORMAT( R16G16B16A16_UINT , 64, 1, 1, 1, R, x, 0x083, FC(3, 16, RGBA, 16, U), ALWAYS )
+GMM_FORMAT( R16G16B16A16_UNORM , 64, 1, 1, 1, R, x, 0x080, FC(3, 16, RGBA, 16, U), ALWAYS )
+GMM_FORMAT( R16G16B16A16_USCALED , 64, 1, 1, 1, R, x, 0x094, FC(3, 16, RGBA, 16, U), ALWAYS )
+GMM_FORMAT( R16G16B16X16_FLOAT , 64, 1, 1, 1, R, x, 0x08F, FC(3, 16, RGBA, 16, F), ALWAYS )
+GMM_FORMAT( R16G16B16X16_UNORM , 64, 1, 1, 1, R, x, 0x08E, FC(3, 16, RGBA, 16, U), ALWAYS )
+GMM_FORMAT( R24_UNORM_X8_TYPELESS , 32, 1, 1, 1, R, x, 0x0D9, FC(3, 32, R, 32, U1), ALWAYS )
+GMM_FORMAT( R32_FLOAT , 32, 1, 1, 1, R, x, 0x0D8, FC(3, 32, R, 32, F1), ALWAYS )
+GMM_FORMAT( R32_FLOAT_X8X24_TYPELESS , 64, 1, 1, 1, R, x, 0x088, FC(3, 32, R, 32, F), ALWAYS )
+GMM_FORMAT( R32_SFIXED , 32, 1, 1, 1, R, x, 0x1B2, FC(3, 32, R, 32, S), GEN(8) )
+GMM_FORMAT( R32_SINT , 32, 1, 1, 1, R, x, 0x0D6, FC(3, 32, R, 32, S1), ALWAYS )
+GMM_FORMAT( R32_SNORM , 32, 1, 1, 1, R, x, 0x0F2, FC(3, 32, R, 32, S), ALWAYS )
+GMM_FORMAT( R32_SSCALED , 32, 1, 1, 1, R, x, 0x0F8, FC(3, 32, R, 32, S), ALWAYS )
+GMM_FORMAT( R32_UINT , 32, 1, 1, 1, R, x, 0x0D7, FC(3, 32, R, 32, U1), ALWAYS )
+GMM_FORMAT( R32_UNORM , 32, 1, 1, 1, R, x, 0x0F1, FC(3, 32, R, 32, U), ALWAYS )
+GMM_FORMAT( R32_USCALED , 32, 1, 1, 1, R, x, 0x0F9, FC(3, 32, R, 32, U), ALWAYS )
+GMM_FORMAT( R32G32_FLOAT , 64, 1, 1, 1, R, x, 0x085, FC(3, 32, RG, 32, F), ALWAYS )
+GMM_FORMAT( R32G32_SFIXED , 64, 1, 1, 1, R, x, 0x0A0, FC(3, 32, RG, 32, S), ALWAYS )
+GMM_FORMAT( R32G32_SINT , 64, 1, 1, 1, R, x, 0x086, FC(3, 32, RG, 32, S), ALWAYS )
+GMM_FORMAT( R32G32_SNORM , 64, 1, 1, 1, R, x, 0x08C, FC(3, 32, RG, 32, S), ALWAYS )
+GMM_FORMAT( R32G32_SSCALED , 64, 1, 1, 1, R, x, 0x095, FC(3, 32, RG, 32, S), ALWAYS )
+GMM_FORMAT( R32G32_UINT , 64, 1, 1, 1, R, x, 0x087, FC(3, 32, RG, 32, U), ALWAYS )
+GMM_FORMAT( R32G32_UNORM , 64, 1, 1, 1, R, x, 0x08B, FC(3, 32, RG, 32, U), ALWAYS )
+GMM_FORMAT( R32G32_USCALED , 64, 1, 1, 1, R, x, 0x096, FC(3, 32, RG, 32, U), ALWAYS )
+GMM_FORMAT( R32G32B32_FLOAT , 96, 1, 1, 1, R, x, 0x040, NC , ALWAYS )
+GMM_FORMAT( R32G32B32_SFIXED , 96, 1, 1, 1, R, x, 0x050, NC , ALWAYS )
+GMM_FORMAT( R32G32B32_SINT , 96, 1, 1, 1, R, x, 0x041, NC , ALWAYS )
+GMM_FORMAT( R32G32B32_SNORM , 96, 1, 1, 1, R, x, 0x044, NC , ALWAYS )
+GMM_FORMAT( R32G32B32_SSCALED , 96, 1, 1, 1, R, x, 0x045, NC , ALWAYS )
+GMM_FORMAT( R32G32B32_UINT , 96, 1, 1, 1, R, x, 0x042, NC , ALWAYS )
+GMM_FORMAT( R32G32B32_UNORM , 96, 1, 1, 1, R, x, 0x043, NC , ALWAYS )
+GMM_FORMAT( R32G32B32_USCALED , 96, 1, 1, 1, R, x, 0x046, NC , ALWAYS )
+GMM_FORMAT( R32G32B32A32_FLOAT , 128, 1, 1, 1, R, x, 0x000, FC(3, 32, RGBA, 32, F), ALWAYS )
+GMM_FORMAT( R32G32B32A32_SFIXED , 128, 1, 1, 1, R, x, 0x020, FC(3, 32, RGBA, 32, S), ALWAYS )
+GMM_FORMAT( R32G32B32A32_SINT , 128, 1, 1, 1, R, x, 0x001, FC(3, 32, RGBA, 32, S), ALWAYS )
+GMM_FORMAT( R32G32B32A32_SNORM , 128, 1, 1, 1, R, x, 0x004, FC(3, 32, RGBA, 32, S), ALWAYS )
+GMM_FORMAT( R32G32B32A32_SSCALED , 128, 1, 1, 1, R, x, 0x007, FC(3, 32, RGBA, 32, S), ALWAYS )
+GMM_FORMAT( R32G32B32A32_UINT , 128, 1, 1, 1, R, x, 0x002, FC(3, 32, RGBA, 32, U), ALWAYS )
+GMM_FORMAT( R32G32B32A32_UNORM , 128, 1, 1, 1, R, x, 0x003, FC(3, 32, RGBA, 32, U), ALWAYS )
+GMM_FORMAT( R32G32B32A32_USCALED , 128, 1, 1, 1, R, x, 0x008, FC(3, 32, RGBA, 32, U), ALWAYS )
+GMM_FORMAT( R32G32B32X32_FLOAT , 128, 1, 1, 1, R, x, 0x006, FC(3, 32, RGBA, 32, F), ALWAYS )
+GMM_FORMAT( R5G5_SNORM_B6_UNORM , 16, 1, 1, 1, R, x, 0x119, NC , ALWAYS )
+GMM_FORMAT( R64_FLOAT , 64, 1, 1, 1, R, x, 0x08D, NC , ALWAYS )
+GMM_FORMAT( R64_PASSTHRU , 64, 1, 1, 1, R, x, 0x0A1, NC , ALWAYS )
+GMM_FORMAT( R64G64_FLOAT , 128, 1, 1, 1, R, x, 0x005, NC , ALWAYS )
+GMM_FORMAT( R64G64_PASSTHRU , 128, 1, 1, 1, R, x, 0x021, NC , ALWAYS )
+GMM_FORMAT( R64G64B64_FLOAT , 192, 1, 1, 1, R, x, 0x198, NC , ALWAYS )
+GMM_FORMAT( R64G64B64_PASSTHRU , 192, 1, 1, 1, R, x, 0x1BD, NC , GEN(8) )
+GMM_FORMAT( R64G64B64A64_FLOAT , 256, 1, 1, 1, R, x, 0x197, NC , ALWAYS )
+GMM_FORMAT( R64G64B64A64_PASSTHRU , 256, 1, 1, 1, R, x, 0x1BC, NC , GEN(8) )
+GMM_FORMAT( RAW , 8, 1, 1, 1, R, x, 0x1FF, NC , GEN(7) ) // "8bpp" for current GMM implementation.
+GMM_FORMAT( X24_TYPELESS_G8_UINT , 32, 1, 1, 1, R, x, 0x0DA, FC(3, 32, R, 32, U1), ALWAYS )
+GMM_FORMAT( X32_TYPELESS_G8X24_UINT , 64, 1, 1, 1, R, x, 0x089, FC(3, 32, RG, 32, U), ALWAYS )
+GMM_FORMAT( X8B8_UNORM_G8R8_SNORM , 32, 1, 1, 1, R, x, 0x0E6, NC , ALWAYS )
+GMM_FORMAT( Y8_UNORM , 8, 1, 1, 1, R, x, 0x150, FC(2, x, NV12, , ), ALWAYS )
+GMM_FORMAT( YCRCB_NORMAL , 16, 1, 1, 1, R, x, 0x182, FC(2, x, YUY2, , ), ALWAYS )
+GMM_FORMAT( YCRCB_SWAPUV , 16, 1, 1, 1, R, x, 0x18F, FC(2, x, YCRCB_SWAPUV, ,), ALWAYS )
+GMM_FORMAT( YCRCB_SWAPUVY , 16, 1, 1, 1, R, x, 0x183, FC(2, x, YCRCB_SWAPUVY,,), ALWAYS )
+GMM_FORMAT( YCRCB_SWAPY , 16, 1, 1, 1, R, x, 0x190, FC(2, x, YCRCB_SWAPY, , ), ALWAYS )
#endif // INCLUDE_SURFACESTATE_FORMATS
#ifdef INCLUDE_ASTC_FORMATS
-GMM_FORMAT( ASTC_FULL_2D_4x4_FLT16 , 128, 4, 4, 1, x, A, 0x140, NC , ASTC_HDR_2D )
-GMM_FORMAT( ASTC_FULL_2D_5x4_FLT16 , 128, 5, 4, 1, x, A, 0x148, NC , ASTC_HDR_2D )
-GMM_FORMAT( ASTC_FULL_2D_5x5_FLT16 , 128, 5, 5, 1, x, A, 0x149, NC , ASTC_HDR_2D )
-GMM_FORMAT( ASTC_FULL_2D_6x5_FLT16 , 128, 6, 5, 1, x, A, 0x151, NC , ASTC_HDR_2D )
-GMM_FORMAT( ASTC_FULL_2D_6x6_FLT16 , 128, 6, 6, 1, x, A, 0x152, NC , ASTC_HDR_2D )
-GMM_FORMAT( ASTC_FULL_2D_8x5_FLT16 , 128, 8, 5, 1, x, A, 0x161, NC , ASTC_HDR_2D )
-GMM_FORMAT( ASTC_FULL_2D_8x6_FLT16 , 128, 8, 6, 1, x, A, 0x162, NC , ASTC_HDR_2D )
-GMM_FORMAT( ASTC_FULL_2D_8x8_FLT16 , 128, 8, 8, 1, x, A, 0x164, NC , ASTC_HDR_2D )
-GMM_FORMAT( ASTC_FULL_2D_10x5_FLT16 , 128, 10, 5, 1, x, A, 0x171, NC , ASTC_HDR_2D )
-GMM_FORMAT( ASTC_FULL_2D_10x6_FLT16 , 128, 10, 6, 1, x, A, 0x172, NC , ASTC_HDR_2D )
-GMM_FORMAT( ASTC_FULL_2D_10x8_FLT16 , 128, 10, 8, 1, x, A, 0x174, NC , ASTC_HDR_2D )
-GMM_FORMAT( ASTC_FULL_2D_10x10_FLT16 , 128, 10, 10, 1, x, A, 0x176, NC , ASTC_HDR_2D )
-GMM_FORMAT( ASTC_FULL_2D_12x10_FLT16 , 128, 12, 10, 1, x, A, 0x17e, NC , ASTC_HDR_2D )
-GMM_FORMAT( ASTC_FULL_2D_12x12_FLT16 , 128, 12, 12, 1, x, A, 0x17f, NC , ASTC_HDR_2D )
-GMM_FORMAT( ASTC_FULL_3D_3x3x3_FLT16 , 128, 3, 3, 3, x, A, 0x1c0, NC , ASTC_3D )
-GMM_FORMAT( ASTC_FULL_3D_4x3x3_FLT16 , 128, 4, 3, 3, x, A, 0x1d0, NC , ASTC_3D )
-GMM_FORMAT( ASTC_FULL_3D_4x4x3_FLT16 , 128, 4, 4, 3, x, A, 0x1d4, NC , ASTC_3D )
-GMM_FORMAT( ASTC_FULL_3D_4x4x4_FLT16 , 128, 4, 4, 4, x, A, 0x1d5, NC , ASTC_3D )
-GMM_FORMAT( ASTC_FULL_3D_5x4x4_FLT16 , 128, 5, 4, 4, x, A, 0x1e5, NC , ASTC_3D )
-GMM_FORMAT( ASTC_FULL_3D_5x5x4_FLT16 , 128, 5, 5, 4, x, A, 0x1e9, NC , ASTC_3D )
-GMM_FORMAT( ASTC_FULL_3D_5x5x5_FLT16 , 128, 5, 5, 5, x, A, 0x1ea, NC , ASTC_3D )
-GMM_FORMAT( ASTC_FULL_3D_6x5x5_FLT16 , 128, 6, 5, 5, x, A, 0x1fa, NC , ASTC_3D )
-GMM_FORMAT( ASTC_FULL_3D_6x6x5_FLT16 , 128, 6, 6, 5, x, A, 0x1fe, NC , ASTC_3D )
-GMM_FORMAT( ASTC_FULL_3D_6x6x6_FLT16 , 128, 6, 6, 6, x, A, 0x1ff, NC , ASTC_3D )
-GMM_FORMAT( ASTC_LDR_2D_4x4_FLT16 , 128, 4, 4, 1, x, A, 0x040, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_4x4_U8sRGB , 128, 4, 4, 1, x, A, 0x000, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_5x4_FLT16 , 128, 5, 4, 1, x, A, 0x048, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_5x4_U8sRGB , 128, 5, 4, 1, x, A, 0x008, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_5x5_FLT16 , 128, 5, 5, 1, x, A, 0x049, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_5x5_U8sRGB , 128, 5, 5, 1, x, A, 0x009, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_6x5_FLT16 , 128, 6, 5, 1, x, A, 0x051, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_6x5_U8sRGB , 128, 6, 5, 1, x, A, 0x011, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_6x6_FLT16 , 128, 6, 6, 1, x, A, 0x052, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_6x6_U8sRGB , 128, 6, 6, 1, x, A, 0x012, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_8x5_FLT16 , 128, 8, 5, 1, x, A, 0x061, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_8x5_U8sRGB , 128, 8, 5, 1, x, A, 0x021, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_8x6_FLT16 , 128, 8, 6, 1, x, A, 0x062, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_8x6_U8sRGB , 128, 8, 6, 1, x, A, 0x022, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_8x8_FLT16 , 128, 8, 8, 1, x, A, 0x064, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_8x8_U8sRGB , 128, 8, 8, 1, x, A, 0x024, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_10x5_FLT16 , 128, 10, 5, 1, x, A, 0x071, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_10x5_U8sRGB , 128, 10, 5, 1, x, A, 0x031, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_10x6_FLT16 , 128, 10, 6, 1, x, A, 0x072, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_10x6_U8sRGB , 128, 10, 6, 1, x, A, 0x032, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_10x8_FLT16 , 128, 10, 8, 1, x, A, 0x074, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_10x8_U8sRGB , 128, 10, 8, 1, x, A, 0x034, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_10x10_FLT16 , 128, 10, 10, 1, x, A, 0x076, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_10x10_U8sRGB , 128, 10, 10, 1, x, A, 0x036, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_12x10_FLT16 , 128, 12, 10, 1, x, A, 0x07e, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_12x10_U8sRGB , 128, 12, 10, 1, x, A, 0x03e, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_12x12_FLT16 , 128, 12, 12, 1, x, A, 0x07f, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_2D_12x12_U8sRGB , 128, 12, 12, 1, x, A, 0x03f, NC , ASTC_LDR_2D )
-GMM_FORMAT( ASTC_LDR_3D_3x3x3_U8sRGB , 128, 3, 3, 3, x, A, 0x080, NC , ASTC_3D )
-GMM_FORMAT( ASTC_LDR_3D_3x3x3_FLT16 , 128, 3, 3, 3, x, A, 0x0c0, NC , ASTC_3D )
-GMM_FORMAT( ASTC_LDR_3D_4x3x3_U8sRGB , 128, 4, 3, 3, x, A, 0x090, NC , ASTC_3D )
-GMM_FORMAT( ASTC_LDR_3D_4x3x3_FLT16 , 128, 4, 3, 3, x, A, 0x0d0, NC , ASTC_3D )
-GMM_FORMAT( ASTC_LDR_3D_4x4x3_U8sRGB , 128, 4, 4, 3, x, A, 0x094, NC , ASTC_3D )
-GMM_FORMAT( ASTC_LDR_3D_4x4x3_FLT16 , 128, 4, 4, 3, x, A, 0x0d4, NC , ASTC_3D )
-GMM_FORMAT( ASTC_LDR_3D_4x4x4_U8sRGB , 128, 4, 4, 4, x, A, 0x095, NC , ASTC_3D )
-GMM_FORMAT( ASTC_LDR_3D_4x4x4_FLT16 , 128, 4, 4, 4, x, A, 0x0d5, NC , ASTC_3D )
-GMM_FORMAT( ASTC_LDR_3D_5x4x4_U8sRGB , 128, 5, 4, 4, x, A, 0x0a5, NC , ASTC_3D )
-GMM_FORMAT( ASTC_LDR_3D_5x4x4_FLT16 , 128, 5, 4, 4, x, A, 0x0e5, NC , ASTC_3D )
-GMM_FORMAT( ASTC_LDR_3D_5x5x4_U8sRGB , 128, 5, 5, 4, x, A, 0x0a9, NC , ASTC_3D )
-GMM_FORMAT( ASTC_LDR_3D_5x5x4_FLT16 , 128, 5, 5, 4, x, A, 0x0e9, NC , ASTC_3D )
-GMM_FORMAT( ASTC_LDR_3D_5x5x5_U8sRGB , 128, 5, 5, 5, x, A, 0x0aa, NC , ASTC_3D )
-GMM_FORMAT( ASTC_LDR_3D_5x5x5_FLT16 , 128, 5, 5, 5, x, A, 0x0ea, NC , ASTC_3D )
-GMM_FORMAT( ASTC_LDR_3D_6x5x5_U8sRGB , 128, 6, 5, 5, x, A, 0x0ba, NC , ASTC_3D )
-GMM_FORMAT( ASTC_LDR_3D_6x5x5_FLT16 , 128, 6, 5, 5, x, A, 0x0fa, NC , ASTC_3D )
-GMM_FORMAT( ASTC_LDR_3D_6x6x5_U8sRGB , 128, 6, 6, 5, x, A, 0x0be, NC , ASTC_3D )
-GMM_FORMAT( ASTC_LDR_3D_6x6x5_FLT16 , 128, 6, 6, 5, x, A, 0x0fe, NC , ASTC_3D )
-GMM_FORMAT( ASTC_LDR_3D_6x6x6_U8sRGB , 128, 6, 6, 6, x, A, 0x0bf, NC , ASTC_3D )
-GMM_FORMAT( ASTC_LDR_3D_6x6x6_FLT16 , 128, 6, 6, 6, x, A, 0x0ff, NC , ASTC_3D )
+GMM_FORMAT( ASTC_FULL_2D_4x4_FLT16 , 128, 4, 4, 1, x, A, 0x140, NC , ASTC_HDR_2D )
+GMM_FORMAT( ASTC_FULL_2D_5x4_FLT16 , 128, 5, 4, 1, x, A, 0x148, NC , ASTC_HDR_2D )
+GMM_FORMAT( ASTC_FULL_2D_5x5_FLT16 , 128, 5, 5, 1, x, A, 0x149, NC , ASTC_HDR_2D )
+GMM_FORMAT( ASTC_FULL_2D_6x5_FLT16 , 128, 6, 5, 1, x, A, 0x151, NC , ASTC_HDR_2D )
+GMM_FORMAT( ASTC_FULL_2D_6x6_FLT16 , 128, 6, 6, 1, x, A, 0x152, NC , ASTC_HDR_2D )
+GMM_FORMAT( ASTC_FULL_2D_8x5_FLT16 , 128, 8, 5, 1, x, A, 0x161, NC , ASTC_HDR_2D )
+GMM_FORMAT( ASTC_FULL_2D_8x6_FLT16 , 128, 8, 6, 1, x, A, 0x162, NC , ASTC_HDR_2D )
+GMM_FORMAT( ASTC_FULL_2D_8x8_FLT16 , 128, 8, 8, 1, x, A, 0x164, NC , ASTC_HDR_2D )
+GMM_FORMAT( ASTC_FULL_2D_10x5_FLT16 , 128, 10, 5, 1, x, A, 0x171, NC , ASTC_HDR_2D )
+GMM_FORMAT( ASTC_FULL_2D_10x6_FLT16 , 128, 10, 6, 1, x, A, 0x172, NC , ASTC_HDR_2D )
+GMM_FORMAT( ASTC_FULL_2D_10x8_FLT16 , 128, 10, 8, 1, x, A, 0x174, NC , ASTC_HDR_2D )
+GMM_FORMAT( ASTC_FULL_2D_10x10_FLT16 , 128, 10, 10, 1, x, A, 0x176, NC , ASTC_HDR_2D )
+GMM_FORMAT( ASTC_FULL_2D_12x10_FLT16 , 128, 12, 10, 1, x, A, 0x17e, NC , ASTC_HDR_2D )
+GMM_FORMAT( ASTC_FULL_2D_12x12_FLT16 , 128, 12, 12, 1, x, A, 0x17f, NC , ASTC_HDR_2D )
+GMM_FORMAT( ASTC_FULL_3D_3x3x3_FLT16 , 128, 3, 3, 3, x, A, 0x1c0, NC , ASTC_3D )
+GMM_FORMAT( ASTC_FULL_3D_4x3x3_FLT16 , 128, 4, 3, 3, x, A, 0x1d0, NC , ASTC_3D )
+GMM_FORMAT( ASTC_FULL_3D_4x4x3_FLT16 , 128, 4, 4, 3, x, A, 0x1d4, NC , ASTC_3D )
+GMM_FORMAT( ASTC_FULL_3D_4x4x4_FLT16 , 128, 4, 4, 4, x, A, 0x1d5, NC , ASTC_3D )
+GMM_FORMAT( ASTC_FULL_3D_5x4x4_FLT16 , 128, 5, 4, 4, x, A, 0x1e5, NC , ASTC_3D )
+GMM_FORMAT( ASTC_FULL_3D_5x5x4_FLT16 , 128, 5, 5, 4, x, A, 0x1e9, NC , ASTC_3D )
+GMM_FORMAT( ASTC_FULL_3D_5x5x5_FLT16 , 128, 5, 5, 5, x, A, 0x1ea, NC , ASTC_3D )
+GMM_FORMAT( ASTC_FULL_3D_6x5x5_FLT16 , 128, 6, 5, 5, x, A, 0x1fa, NC , ASTC_3D )
+GMM_FORMAT( ASTC_FULL_3D_6x6x5_FLT16 , 128, 6, 6, 5, x, A, 0x1fe, NC , ASTC_3D )
+GMM_FORMAT( ASTC_FULL_3D_6x6x6_FLT16 , 128, 6, 6, 6, x, A, 0x1ff, NC , ASTC_3D )
+GMM_FORMAT( ASTC_LDR_2D_4x4_FLT16 , 128, 4, 4, 1, x, A, 0x040, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_4x4_U8sRGB , 128, 4, 4, 1, x, A, 0x000, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_5x4_FLT16 , 128, 5, 4, 1, x, A, 0x048, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_5x4_U8sRGB , 128, 5, 4, 1, x, A, 0x008, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_5x5_FLT16 , 128, 5, 5, 1, x, A, 0x049, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_5x5_U8sRGB , 128, 5, 5, 1, x, A, 0x009, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_6x5_FLT16 , 128, 6, 5, 1, x, A, 0x051, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_6x5_U8sRGB , 128, 6, 5, 1, x, A, 0x011, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_6x6_FLT16 , 128, 6, 6, 1, x, A, 0x052, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_6x6_U8sRGB , 128, 6, 6, 1, x, A, 0x012, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_8x5_FLT16 , 128, 8, 5, 1, x, A, 0x061, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_8x5_U8sRGB , 128, 8, 5, 1, x, A, 0x021, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_8x6_FLT16 , 128, 8, 6, 1, x, A, 0x062, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_8x6_U8sRGB , 128, 8, 6, 1, x, A, 0x022, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_8x8_FLT16 , 128, 8, 8, 1, x, A, 0x064, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_8x8_U8sRGB , 128, 8, 8, 1, x, A, 0x024, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_10x5_FLT16 , 128, 10, 5, 1, x, A, 0x071, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_10x5_U8sRGB , 128, 10, 5, 1, x, A, 0x031, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_10x6_FLT16 , 128, 10, 6, 1, x, A, 0x072, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_10x6_U8sRGB , 128, 10, 6, 1, x, A, 0x032, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_10x8_FLT16 , 128, 10, 8, 1, x, A, 0x074, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_10x8_U8sRGB , 128, 10, 8, 1, x, A, 0x034, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_10x10_FLT16 , 128, 10, 10, 1, x, A, 0x076, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_10x10_U8sRGB , 128, 10, 10, 1, x, A, 0x036, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_12x10_FLT16 , 128, 12, 10, 1, x, A, 0x07e, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_12x10_U8sRGB , 128, 12, 10, 1, x, A, 0x03e, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_12x12_FLT16 , 128, 12, 12, 1, x, A, 0x07f, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_2D_12x12_U8sRGB , 128, 12, 12, 1, x, A, 0x03f, NC , ASTC_LDR_2D )
+GMM_FORMAT( ASTC_LDR_3D_3x3x3_U8sRGB , 128, 3, 3, 3, x, A, 0x080, NC , ASTC_3D )
+GMM_FORMAT( ASTC_LDR_3D_3x3x3_FLT16 , 128, 3, 3, 3, x, A, 0x0c0, NC , ASTC_3D )
+GMM_FORMAT( ASTC_LDR_3D_4x3x3_U8sRGB , 128, 4, 3, 3, x, A, 0x090, NC , ASTC_3D )
+GMM_FORMAT( ASTC_LDR_3D_4x3x3_FLT16 , 128, 4, 3, 3, x, A, 0x0d0, NC , ASTC_3D )
+GMM_FORMAT( ASTC_LDR_3D_4x4x3_U8sRGB , 128, 4, 4, 3, x, A, 0x094, NC , ASTC_3D )
+GMM_FORMAT( ASTC_LDR_3D_4x4x3_FLT16 , 128, 4, 4, 3, x, A, 0x0d4, NC , ASTC_3D )
+GMM_FORMAT( ASTC_LDR_3D_4x4x4_U8sRGB , 128, 4, 4, 4, x, A, 0x095, NC , ASTC_3D )
+GMM_FORMAT( ASTC_LDR_3D_4x4x4_FLT16 , 128, 4, 4, 4, x, A, 0x0d5, NC , ASTC_3D )
+GMM_FORMAT( ASTC_LDR_3D_5x4x4_U8sRGB , 128, 5, 4, 4, x, A, 0x0a5, NC , ASTC_3D )
+GMM_FORMAT( ASTC_LDR_3D_5x4x4_FLT16 , 128, 5, 4, 4, x, A, 0x0e5, NC , ASTC_3D )
+GMM_FORMAT( ASTC_LDR_3D_5x5x4_U8sRGB , 128, 5, 5, 4, x, A, 0x0a9, NC , ASTC_3D )
+GMM_FORMAT( ASTC_LDR_3D_5x5x4_FLT16 , 128, 5, 5, 4, x, A, 0x0e9, NC , ASTC_3D )
+GMM_FORMAT( ASTC_LDR_3D_5x5x5_U8sRGB , 128, 5, 5, 5, x, A, 0x0aa, NC , ASTC_3D )
+GMM_FORMAT( ASTC_LDR_3D_5x5x5_FLT16 , 128, 5, 5, 5, x, A, 0x0ea, NC , ASTC_3D )
+GMM_FORMAT( ASTC_LDR_3D_6x5x5_U8sRGB , 128, 6, 5, 5, x, A, 0x0ba, NC , ASTC_3D )
+GMM_FORMAT( ASTC_LDR_3D_6x5x5_FLT16 , 128, 6, 5, 5, x, A, 0x0fa, NC , ASTC_3D )
+GMM_FORMAT( ASTC_LDR_3D_6x6x5_U8sRGB , 128, 6, 6, 5, x, A, 0x0be, NC , ASTC_3D )
+GMM_FORMAT( ASTC_LDR_3D_6x6x5_FLT16 , 128, 6, 6, 5, x, A, 0x0fe, NC , ASTC_3D )
+GMM_FORMAT( ASTC_LDR_3D_6x6x6_U8sRGB , 128, 6, 6, 6, x, A, 0x0bf, NC , ASTC_3D )
+GMM_FORMAT( ASTC_LDR_3D_6x6x6_FLT16 , 128, 6, 6, 6, x, A, 0x0ff, NC , ASTC_3D )
#endif // INCLUDE_ASTC_FORMATS
#ifdef INCLUDE_MISC_FORMATS
-GMM_FORMAT( AUYV , 32, 1, 1, 1, R, x, NA , NC , ALWAYS )
-GMM_FORMAT( AYUV , 32, 1, 1, 1, R, x, NA , 0x9 , ALWAYS )
-GMM_FORMAT( BAYER_BGGR8 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = B
-GMM_FORMAT( BAYER_BGGR16 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = B
-GMM_FORMAT( BAYER_GBRG8 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = G, (1, 0) = B
-GMM_FORMAT( BAYER_GBRG16 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = G, (1, 0) = B
-GMM_FORMAT( BAYER_GRBG8 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = G, (1, 0) = R
-GMM_FORMAT( BAYER_GRBG16 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = G, (1, 0) = R
-GMM_FORMAT( BAYER_RGGB8 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = R
-GMM_FORMAT( BAYER_RGGB16 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = R
-GMM_FORMAT( BC1 , 64, 4, 4, 1, x, x, NA , NC , ALWAYS ) // Legacy GMM name for related HW format.
-GMM_FORMAT( BC2 , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
-GMM_FORMAT( BC3 , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
-GMM_FORMAT( BC4 , 64, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
-GMM_FORMAT( BC5 , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
-GMM_FORMAT( BC6 , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
-GMM_FORMAT( BC6H , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
-GMM_FORMAT( BC7 , 128, 4, 4, 1, x, x, NA , NC , GEN(7) ) // "
-GMM_FORMAT( BGRP , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // FOURCC:BGRP
-GMM_FORMAT( D16_UNORM , 16, 1, 1, 1, x, x, NA , 0x10, ALWAYS ) //Depth uses color format L1e.En
-GMM_FORMAT( D24_UNORM_X8_UINT , 32, 1, 1, 1, x, x, NA , 0x11, ALWAYS )
-GMM_FORMAT( D32_FLOAT , 32, 1, 1, 1, x, x, NA , 0x11, ALWAYS )
-GMM_FORMAT( DXT1 , 64, 4, 4, 1, x, x, NA , NC , ALWAYS ) // Legacy GMM name for related HW format.
-GMM_FORMAT( DXT2_5 , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
-GMM_FORMAT( ETC1 , 64, 4, 4, 1, x, x, NA , NC , GEN(8) || VLV2 ) // "
-GMM_FORMAT( ETC2 , 64, 4, 4, 1, x, x, NA , NC , GEN(8) || VLV2 ) // "
-GMM_FORMAT( ETC2_EAC , 128, 4, 4, 1, x, x, NA , NC , GEN(8) || VLV2 ) // "
-GMM_FORMAT( GENERIC_8BIT , 8, 1, 1, 1, x, x, NA , NC , ALWAYS )
-GMM_FORMAT( GENERIC_16BIT , 16, 1, 1, 1, x, x, NA , NC , ALWAYS )
-GMM_FORMAT( GENERIC_24BIT , 24, 1, 1, 1, x, x, NA , NC , ALWAYS )
-GMM_FORMAT( GENERIC_32BIT , 32, 1, 1, 1, x, x, NA , NC , ALWAYS )
-GMM_FORMAT( GENERIC_48BIT , 48, 1, 1, 1, x, x, NA , NC , ALWAYS )
-GMM_FORMAT( GENERIC_64BIT , 64, 1, 1, 1, x, x, NA , NC , ALWAYS )
-GMM_FORMAT( GENERIC_96BIT , 96, 1, 1, 1, x, x, NA , NC , ALWAYS )
-GMM_FORMAT( GENERIC_128BIT , 128, 1, 1, 1, x, x, NA , NC , ALWAYS )
-GMM_FORMAT( GENERIC_192BIT , 192, 1, 1, 1, x, x, NA , NC , GEN(8) )
-GMM_FORMAT( GENERIC_256BIT , 256, 1, 1, 1, x, x, NA , NC , GEN(8) )
-GMM_FORMAT( I420 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // Same as IYUV.
-GMM_FORMAT( IYUV , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
-GMM_FORMAT( IMC1 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
-GMM_FORMAT( IMC2 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
-GMM_FORMAT( IMC3 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
-GMM_FORMAT( IMC4 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
-GMM_FORMAT( L4A4 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
-GMM_FORMAT( MFX_JPEG_YUV411 , 8, 1, 1, 1, R, x, NA , 0xF , GEN(7) )
-GMM_FORMAT( MFX_JPEG_YUV411R , 8, 1, 1, 1, R, x, NA , 0xF , GEN(7) )
-GMM_FORMAT( MFX_JPEG_YUV420 , 8, 1, 1, 1, R, x, NA , 0xF , GEN(7) ) // Same as IMC3.
-GMM_FORMAT( MFX_JPEG_YUV422H , 8, 1, 1, 1, R, x, NA , 0xF , GEN(7) )
-GMM_FORMAT( MFX_JPEG_YUV422V , 8, 1, 1, 1, R, x, NA , 0xF , GEN(7) )
-GMM_FORMAT( MFX_JPEG_YUV444 , 8, 1, 1, 1, R, x, NA , 0xF , GEN(7) )
-GMM_FORMAT( NV11 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
-GMM_FORMAT( NV12 , 8, 1, 1, 1, R, x, NA , 0xF , ALWAYS )
-GMM_FORMAT( NV21 , 8, 1, 1, 1, R, x, NA , 0xF , ALWAYS )
-GMM_FORMAT( P8 , 8, 1, 1, 1, R, x, NA, NC , ALWAYS )
-GMM_FORMAT( P010 , 16, 1, 1, 1, R, x, NA , 0x7 , ALWAYS )
-GMM_FORMAT( P012 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS )
-GMM_FORMAT( P016 , 16, 1, 1, 1, R, x, NA , 0x8 , ALWAYS )
-GMM_FORMAT( P208 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
-GMM_FORMAT( R10G10B10_XR_BIAS_A2_UNORM , 32, 1, 1, 1, x, x, NA , 0x18, ALWAYS ) // DXGI_FORMAT_R10G10B10_XR_BIAS_A2_UNORM
-GMM_FORMAT( R24G8_TYPELESS , 32, 1, 1, 1, x, x, NA , 0x11, ALWAYS ) // DXGI_FORMAT_R24G8_TYPELESS (To differentiate between GENERIC_32BIT.)
-GMM_FORMAT( R32G8X24_TYPELESS , 64, 1, 1, 1, x, x, NA , 0x11, ALWAYS ) // DXGI_FORMAT_R32G8X24_TYPELESS (To differentiate between GENERIC_64BIT.)
-GMM_FORMAT( RENDER_8BIT , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
-GMM_FORMAT( RGBP , 8, 1, 1, 1, R, x, NA , 0xF , ALWAYS ) // FOURCC:RGBP
-GMM_FORMAT( Y1_UNORM , 1, 1, 1, 1, x, x, NA , NC , GEN(8) )
-GMM_FORMAT( Y8_UNORM_VA , 8, 1, 1, 1, x, x, NA , 0xF , GEN(8) )
-GMM_FORMAT( Y16_SNORM , 16, 1, 1, 1, x, x, NA , 0x7 , GEN(8) )
-GMM_FORMAT( Y16_UNORM , 16, 1, 1, 1, x, x, NA , 0x7 , GEN(8) )
+GMM_FORMAT( AUYV , 32, 1, 1, 1, R, x, NA , NC , ALWAYS )
+GMM_FORMAT( AYUV , 32, 1, 1, 1, R, x, NA , FC(2, x, AYUV, , ), ALWAYS )
+GMM_FORMAT( BAYER_BGGR8 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = B
+GMM_FORMAT( BAYER_BGGR16 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = B
+GMM_FORMAT( BAYER_GBRG8 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = G, (1, 0) = B
+GMM_FORMAT( BAYER_GBRG16 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = G, (1, 0) = B
+GMM_FORMAT( BAYER_GRBG8 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = G, (1, 0) = R
+GMM_FORMAT( BAYER_GRBG16 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = G, (1, 0) = R
+GMM_FORMAT( BAYER_RGGB8 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = R
+GMM_FORMAT( BAYER_RGGB16 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS ) // (0, 0) = R
+GMM_FORMAT( BC1 , 64, 4, 4, 1, x, x, NA , NC , ALWAYS ) // Legacy GMM name for related HW format.
+GMM_FORMAT( BC2 , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
+GMM_FORMAT( BC3 , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
+GMM_FORMAT( BC4 , 64, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
+GMM_FORMAT( BC5 , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
+GMM_FORMAT( BC6 , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
+GMM_FORMAT( BC6H , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
+GMM_FORMAT( BC7 , 128, 4, 4, 1, x, x, NA , NC , GEN(7) ) // "
+GMM_FORMAT( BGRP , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // FOURCC:BGRP
+GMM_FORMAT( D16_UNORM , 16, 1, 1, 1, x, x, NA , FC(3, 16, R, 16, U), ALWAYS ) //Depth uses color format L1e.En
+GMM_FORMAT( D24_UNORM_X8_UINT , 32, 1, 1, 1, x, x, NA , FC(3, 32, R, 32, U1), ALWAYS )
+GMM_FORMAT( D32_FLOAT , 32, 1, 1, 1, x, x, NA , FC(3, 32, R, 32, F1), ALWAYS )
+GMM_FORMAT( DXT1 , 64, 4, 4, 1, x, x, NA , NC , ALWAYS ) // Legacy GMM name for related HW format.
+GMM_FORMAT( DXT2_5 , 128, 4, 4, 1, x, x, NA , NC , ALWAYS ) // "
+GMM_FORMAT( ETC1 , 64, 4, 4, 1, x, x, NA , NC , GEN(8) || VLV2 ) // "
+GMM_FORMAT( ETC2 , 64, 4, 4, 1, x, x, NA , NC , GEN(8) || VLV2 ) // "
+GMM_FORMAT( ETC2_EAC , 128, 4, 4, 1, x, x, NA , NC , GEN(8) || VLV2 ) // "
+GMM_FORMAT( GENERIC_8BIT , 8, 1, 1, 1, x, x, NA , NC , ALWAYS )
+GMM_FORMAT( GENERIC_16BIT , 16, 1, 1, 1, x, x, NA , NC , ALWAYS )
+GMM_FORMAT( GENERIC_24BIT , 24, 1, 1, 1, x, x, NA , NC , ALWAYS )
+GMM_FORMAT( GENERIC_32BIT , 32, 1, 1, 1, x, x, NA , NC , ALWAYS )
+GMM_FORMAT( GENERIC_48BIT , 48, 1, 1, 1, x, x, NA , NC , ALWAYS )
+GMM_FORMAT( GENERIC_64BIT , 64, 1, 1, 1, x, x, NA , NC , ALWAYS )
+GMM_FORMAT( GENERIC_96BIT , 96, 1, 1, 1, x, x, NA , NC , ALWAYS )
+GMM_FORMAT( GENERIC_128BIT , 128, 1, 1, 1, x, x, NA , NC , ALWAYS )
+GMM_FORMAT( GENERIC_192BIT , 192, 1, 1, 1, x, x, NA , NC , GEN(8) )
+GMM_FORMAT( GENERIC_256BIT , 256, 1, 1, 1, x, x, NA , NC , GEN(8) )
+GMM_FORMAT( I420 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS ) // Same as IYUV.
+GMM_FORMAT( IYUV , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
+GMM_FORMAT( IMC1 , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), ALWAYS )
+GMM_FORMAT( IMC2 , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), ALWAYS )
+GMM_FORMAT( IMC3 , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), ALWAYS )
+GMM_FORMAT( IMC4 , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), ALWAYS )
+GMM_FORMAT( L4A4 , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), ALWAYS ) // A4L4. No HW support.
+GMM_FORMAT( MFX_JPEG_YUV411 , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), GEN(7) )
+GMM_FORMAT( MFX_JPEG_YUV411R , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), GEN(7) )
+GMM_FORMAT( MFX_JPEG_YUV420 , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), GEN(7) ) // Same as IMC3.
+GMM_FORMAT( MFX_JPEG_YUV422H , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), GEN(7) )
+GMM_FORMAT( MFX_JPEG_YUV422V , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), GEN(7) )
+GMM_FORMAT( MFX_JPEG_YUV444 , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), GEN(7) )
+GMM_FORMAT( NV11 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
+GMM_FORMAT( NV12 , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), ALWAYS )
+GMM_FORMAT( NV21 , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), ALWAYS )
+GMM_FORMAT( P8 , 8, 1, 1, 1, R, x, NA, NC , ALWAYS )
+GMM_FORMAT( P010 , 16, 1, 1, 1, R, x, NA , FC(2, x, P010, , ), ALWAYS )
+GMM_FORMAT( P012 , 16, 1, 1, 1, R, x, NA , NC , ALWAYS )
+GMM_FORMAT( P016 , 16, 1, 1, 1, R, x, NA , FC(2, x, P016, , ), ALWAYS )
+GMM_FORMAT( P208 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
+GMM_FORMAT( R10G10B10_XR_BIAS_A2_UNORM , 32, 1, 1, 1, x, x, NA , FC(2, x, RGB10A2, , ), ALWAYS ) // DXGI_FORMAT_R10G10B10_XR_BIAS_A2_UNORM
+GMM_FORMAT( R24G8_TYPELESS , 32, 1, 1, 1, x, x, NA , FC(2, 32, R, 32, U), ALWAYS ) // DXGI_FORMAT_R24G8_TYPELESS (To differentiate between GENERIC_32BIT.)
+GMM_FORMAT( R32G8X24_TYPELESS , 64, 1, 1, 1, x, x, NA , FC(2, 32, R, 32, U), ALWAYS ) // DXGI_FORMAT_R32G8X24_TYPELESS (To differentiate between GENERIC_64BIT.)
+GMM_FORMAT( RENDER_8BIT , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
+GMM_FORMAT( RGBP , 8, 1, 1, 1, R, x, NA , FC(2, x, NV12, , ), ALWAYS ) // FOURCC:RGBP
+GMM_FORMAT( Y1_UNORM , 1, 1, 1, 1, x, x, NA , NC , GEN(8) )
+GMM_FORMAT( Y8_UNORM_VA , 8, 1, 1, 1, x, x, NA , FC(2, x, NV12, , ), GEN(8) )
+GMM_FORMAT( Y16_SNORM , 16, 1, 1, 1, x, x, NA , FC(2, x, P010, , ), GEN(8) )
+GMM_FORMAT( Y16_UNORM , 16, 1, 1, 1, x, x, NA , FC(2, x, P010, , ), GEN(8) )
#if (IGFX_GEN >= IGFX_GEN10)
-GMM_FORMAT( Y32_UNORM , 32, 1, 1, 1, x, x, NA , NC , GEN(10) )
+GMM_FORMAT( Y32_UNORM , 32, 1, 1, 1, x, x, NA , NC , GEN(10) ) // Y32 removed from Gen9 but still referenced, only available Gen10+
#endif
-GMM_FORMAT( Y210 , 64, 2, 1, 1, R, x, NA , 0x5 , GEN(11) ) // Packed 422 10/12/16 bit
-GMM_FORMAT( Y212 , 64, 2, 1, 1, R, x, NA , 0x5 , GEN(11) )
-GMM_FORMAT( Y410 , 32, 1, 1, 1, R, x, NA , 0x4 , GEN(11) )
-GMM_FORMAT( Y412 , 64, 1, 1, 1, R, x, NA , 0x6 , GEN(11) )
-GMM_FORMAT( Y216 , 64, 2, 1, 1, R, x, NA, 0x5, ALWAYS )
-GMM_FORMAT( Y416 , 64, 1, 1, 1, R, x, NA , 0x6 , ALWAYS ) // Packed 444 10/12/16 bit,
-GMM_FORMAT( YV12 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
-GMM_FORMAT( YVU9 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
+GMM_FORMAT( Y210 , 64, 2, 1, 1, R, x, NA , FC(2, x, Y210, , ), GEN(11) ) // Packed 422 10/12/16 bit
+GMM_FORMAT( Y212 , 64, 2, 1, 1, R, x, NA , FC(2, x, Y216, , ), GEN(11) )
+GMM_FORMAT( Y410 , 32, 1, 1, 1, R, x, NA , FC(2, x, Y410, , ), GEN(11) )
+GMM_FORMAT( Y412 , 64, 1, 1, 1, R, x, NA , FC(2, x, Y416, , ), GEN(11) )
+GMM_FORMAT( Y216 , 64, 2, 1, 1, R, x, NA, FC(2, x, Y216, , ), ALWAYS )
+GMM_FORMAT( Y416 , 64, 1, 1, 1, R, x, NA , FC(2, x, Y416, , ), ALWAYS ) // Packed 444 10/12/16 bit,
+GMM_FORMAT( YV12 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
+GMM_FORMAT( YVU9 , 8, 1, 1, 1, R, x, NA , NC , ALWAYS )
// Implement packed 4:2:2 YUV format (UYVY, VYUY, YUY2, YVYU) as compressed block format by suffixing _2x1.(i.e. 32bpe 2x1 pixel blocks instead of 16bpp 1x1 block)
// All OS components(UMDs/KMD) can switch to *_2x1 style independent of legacy implementation.
// Refer GmmCommonExt.h for legacy implemenation of UYVY, VYUY, YUY2, YVYU)
// TODO : Unify them when all OS-components switch to compressed block format
-GMM_FORMAT( UYVY_2x1 , 32, 2, 1, 1, R, x, NA , NC , ALWAYS )
-GMM_FORMAT( VYUY_2x1 , 32, 2, 1, 1, R, x, NA , NC , ALWAYS )
-GMM_FORMAT( YUY2_2x1 , 32, 2, 1, 1, R, x, NA , 0x3 , ALWAYS )
-GMM_FORMAT( YVYU_2x1 , 32, 2, 1, 1, R, x, NA , NC , ALWAYS )
-GMM_FORMAT( MEDIA_Y1_UNORM , 1, 1, 1, 1, x, x, NA , NC , GEN(8) )
-GMM_FORMAT( MEDIA_Y8_UNORM , 8, 1, 1, 1, x, x, NA , 0xF , GEN(8) )
-GMM_FORMAT( MEDIA_Y16_SNORM , 16, 1, 1, 1, x, x, NA , 0x7 , GEN(8) )
-GMM_FORMAT( MEDIA_Y16_UNORM , 16, 1, 1, 1, x, x, NA , 0x7 , GEN(8) )
-GMM_FORMAT( MEDIA_Y32_UNORM , 1, 1, 1, 1, x, x, NA , NC , GEN(8) ) // Y32 is BDW name for SKL Y1, and is 1bpp with 32b granularity
+GMM_FORMAT( UYVY_2x1 , 32, 2, 1, 1, R, x, NA , FC(2, x, SWAPY, , ), ALWAYS )
+GMM_FORMAT( VYUY_2x1 , 32, 2, 1, 1, R, x, NA , FC(2, x, SWAPUVY, , ), ALWAYS )
+GMM_FORMAT( YUY2_2x1 , 32, 2, 1, 1, R, x, NA , FC(2, x, YUY2, , ), ALWAYS )
+GMM_FORMAT( YVYU_2x1 , 32, 2, 1, 1, R, x, NA , FC(2, x, SWAPUV, , ), ALWAYS )
+GMM_FORMAT( MEDIA_Y1_UNORM , 1, 1, 1, 1, x, x, NA , NC , GEN(8) )
+GMM_FORMAT( MEDIA_Y8_UNORM , 8, 1, 1, 1, x, x, NA , FC(2, x, NV12, , ), GEN(8) )
+GMM_FORMAT( MEDIA_Y16_SNORM , 16, 1, 1, 1, x, x, NA , FC(2, x, P010, , ), GEN(8) )
+GMM_FORMAT( MEDIA_Y16_UNORM , 16, 1, 1, 1, x, x, NA , FC(2, x, P010, , ), GEN(8) )
+GMM_FORMAT( MEDIA_Y32_UNORM , 1, 1, 1, 1, x, x, NA , NC , GEN(8) ) // Y32 is BDW name for SKL Y1, and is 1bpp with 32b granularity
+GMM_FORMAT( B16G16R16A16_UNORM , 64, 1, 1, 1, R, x, NA , FC(3, 16, RGBA, 16, U), ALWAYS ) // Swapped ARGB16 for media-SFC output
#if _WIN32
-GMM_FORMAT( WGBOX_YUV444 , 32, 1, 1, 1, x, x, NA , NC , GEN(9) ) // For testing purposes only.
-GMM_FORMAT( WGBOX_PLANAR_YUV444 , 32, 1, 1, 1, x, x, NA , NC , GEN(9) ) // For testing purposes only.
+GMM_FORMAT( WGBOX_YUV444 , 32, 1, 1, 1, x, x, NA , NC , GEN(9) ) // For testing purposes only.
+GMM_FORMAT( WGBOX_PLANAR_YUV444 , 32, 1, 1, 1, x, x, NA , NC , GEN(9) ) // For testing purposes only.
#endif
#endif // INCLUDE_MISC_FORMATS
@@ -497,6 +507,7 @@
#undef INCLUDE_MISC_FORMATS
#undef INCLUDE_SURFACESTATE_FORMATS
#undef NA
+#undef NC
#undef R
#undef SKU
#undef VLV2
diff --git a/Source/GmmLib/inc/External/Common/GmmInternal.h b/Source/GmmLib/inc/External/Common/GmmInternal.h
index 842174e..40df939 100644
--- a/Source/GmmLib/inc/External/Common/GmmInternal.h
+++ b/Source/GmmLib/inc/External/Common/GmmInternal.h
@@ -73,10 +73,16 @@
#define GMM_ENABLE_GEN11 0
#endif
+#if (!defined(GMM_GFX_GEN) || (GMM_GFX_GEN == 120))
+#define GMM_ENABLE_GEN12 1
+#else
+#define GMM_ENABLE_GEN12 0
+#endif
+
#if (IGFX_GEN >= IGFX_GEN11)
#if !(GMM_ENABLE_GEN8 || GMM_ENABLE_GEN9 || GMM_ENABLE_GEN10 || \
- GMM_ENABLE_GEN11)
+ GMM_ENABLE_GEN11 || GMM_ENABLE_GEN12)
#error "Unrecognized GMM_GFX_GEN !"
#endif
#elif (IGFX_GEN >= IGFX_GEN10)
diff --git a/Source/GmmLib/inc/External/Common/GmmLibDllName.h b/Source/GmmLib/inc/External/Common/GmmLibDllName.h
index 306e507..8d16227 100755
--- a/Source/GmmLib/inc/External/Common/GmmLibDllName.h
+++ b/Source/GmmLib/inc/External/Common/GmmLibDllName.h
@@ -29,7 +29,7 @@
#if defined(_WIN64)
#define GMM_UMD_DLL "igdgmm64.dll"
#else
- #define GMM_UMD_DLL "libigdgmm.so.9"
+ #define GMM_UMD_DLL "libigdgmm.so.10"
#endif
#else
#define GMM_ENTRY_NAME "_OpenGmm@4"
@@ -40,6 +40,6 @@
#if defined(_WIN32)
#define GMM_UMD_DLL "igdgmm32.dll"
#else
- #define GMM_UMD_DLL "libigdgmm.so.9"
+ #define GMM_UMD_DLL "libigdgmm.so.10"
#endif
#endif
diff --git a/Source/GmmLib/inc/External/Common/GmmPlatformExt.h b/Source/GmmLib/inc/External/Common/GmmPlatformExt.h
index 0c76bfc..8651732 100644
--- a/Source/GmmLib/inc/External/Common/GmmPlatformExt.h
+++ b/Source/GmmLib/inc/External/Common/GmmPlatformExt.h
@@ -67,7 +67,10 @@
uint8_t Width;
} Element;
GMM_SURFACESTATE_FORMAT SurfaceStateFormat;
- uint32_t Reserved;
+ union {
+ GMM_E2ECOMP_FORMAT AuxL1eFormat;
+ uint8_t CompressionFormat;
+ } CompressionFormat;
}GMM_FORMAT_ENTRY;
//===========================================================================
@@ -267,6 +270,8 @@
uint8_t HiZPixelsPerByte; //HiZ-Bpp is < 1, keep inverse
uint64_t ReconMaxHeight;
uint64_t ReconMaxWidth;
+ uint8_t NoOfBitsSupported; // No of bits supported for System physcial address on GPU
+ uint64_t HighestAcceptablePhysicalAddress; // Highest acceptable System physical Address
}__GMM_PLATFORM_RESOURCE, GMM_PLATFORM_INFO;
//***************************************************************************
diff --git a/Source/GmmLib/inc/External/Common/GmmResourceFlags.h b/Source/GmmLib/inc/External/Common/GmmResourceFlags.h
index c457c1d..50944bb 100644
--- a/Source/GmmLib/inc/External/Common/GmmResourceFlags.h
+++ b/Source/GmmLib/inc/External/Common/GmmResourceFlags.h
@@ -148,6 +148,8 @@
uint32_t __ForceOtherHVALIGN4 : 1;
uint32_t DisableDisplayCcsClearColor : 1; // Disables display clear color
uint32_t DisableDisplayCcsCompression : 1; // Disables display decompression on the surface (it disables display awareness of both fast clear/render compression)
+ uint32_t PreGen12FastClearOnly : 1; // i.e. AUX_CCS_D (instead of AUX_CCS_E). Flag carried by GMM between UMDs to support shared resources.
+ uint32_t Reserved : 1; // Reserved
} Wa;
} GMM_RESOURCE_FLAG;
diff --git a/Source/GmmLib/inc/Internal/Common/GmmLibInc.h b/Source/GmmLib/inc/Internal/Common/GmmLibInc.h
index 68c4592..a66365b 100644
--- a/Source/GmmLib/inc/Internal/Common/GmmLibInc.h
+++ b/Source/GmmLib/inc/Internal/Common/GmmLibInc.h
@@ -33,16 +33,27 @@
#include "External/Common/GmmCommonExt.h"
#include "External/Common/GmmPlatformExt.h"
#include "External/Common/GmmCachePolicy.h"
+#include "External/Common/CachePolicy/GmmCachePolicyGen8.h"
+#include "External/Common/CachePolicy/GmmCachePolicyGen9.h"
+#include "External/Common/CachePolicy/GmmCachePolicyGen10.h"
+#include "External/Common/CachePolicy/GmmCachePolicyGen11.h"
+#include "External/Common/CachePolicy/GmmCachePolicyGen12.h"
#include "External/Common/GmmResourceInfoExt.h"
#include "../Platform/GmmPlatforms.h"
#include "Platform/GmmGen8Platform.h"
#include "Platform/GmmGen9Platform.h"
+#include "Platform/GmmGen10Platform.h"
+#include "Platform/GmmGen11Platform.h"
+#include "Platform/GmmGen12Platform.h"
#include "External/Common/GmmTextureExt.h"
#include "../Texture/GmmTexture.h"
#include "Texture/GmmTextureCalc.h"
#include "Texture/GmmGen7TextureCalc.h"
#include "Texture/GmmGen8TextureCalc.h"
#include "Texture/GmmGen9TextureCalc.h"
+#include "Texture/GmmGen10TextureCalc.h"
+#include "Texture/GmmGen11TextureCalc.h"
+#include "Texture/GmmGen12TextureCalc.h"
#include "External/Common/GmmResourceInfo.h"
#include "External/Common/GmmInfoExt.h"
#include "External/Common/GmmInfo.h"
diff --git a/Source/GmmLib/inc/Internal/Common/Platform/GmmGen12Platform.h b/Source/GmmLib/inc/Internal/Common/Platform/GmmGen12Platform.h
new file mode 100644
index 0000000..fb9f8c8
--- /dev/null
+++ b/Source/GmmLib/inc/Internal/Common/Platform/GmmGen12Platform.h
@@ -0,0 +1,104 @@
+/*==============================================================================
+Copyright(c) 2019 Intel Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files(the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and / or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+============================================================================*/
+#pragma once
+#include "GmmGen11Platform.h"
+
+typedef struct __CCS_UNIT
+{
+ ALIGNMENT Align;
+ struct
+ {
+ //represents downscale factor if msb = 0,
+ // upscale factor if msb = 1,
+ //factor value is absolute (+ve)
+ int32_t Width;
+ int32_t Height;
+ uint32_t Depth; //Depth slices or Samples sharing CCS$line
+ } Downscale;
+} CCS_UNIT;
+
+//Gen12 CCS supported on Yf/Ys 2D/MSAA/3D tiling
+#define CCS_MODES (GMM_TILE_MODES - TILE_YF_2D_8bpe)
+#define CCS_MODE(x) (x >= TILE_YF_2D_8bpe) ? (x - TILE_YF_2D_8bpe) : CCS_MODES
+
+typedef enum _FC_TileType
+{
+ FC_TILE_Y,
+ FC_TILE_YF,
+ FC_TILE_YS,
+ //max equals last supported plus one
+ FC_TILE_MAX
+} FC_TILE_TYPE;
+
+#define FCTilingType(x) (((x) == LEGACY_TILE_Y) ? (FC_TILE_Y) : \
+ (((x) >= TILE_YF_2D_8bpe && (x) <= TILE_YF_2D_128bpe) ? (FC_TILE_YF) : \
+ (((x) >= TILE_YS_2D_8bpe && (x) <= TILE_YS_2D_128bpe) ? (FC_TILE_YS) : \
+ (FC_TILE_MAX))))
+#define FCMaxBppModes 5
+#define FCMaxModes FC_TILE_MAX * FCMaxBppModes
+#define FCBppMode(bpp) __GmmLog2(bpp) - 3
+#define FCMode(TileMode, bpp) (FCTilingType(TileMode) < FC_TILE_MAX) ? (FCTilingType(TileMode) * FCMaxBppModes + FCBppMode(bpp)) : FCMaxModes
+
+//===========================================================================
+// typedef:
+// GMM_TEXTURE_ALIGN_EX
+//
+// Description:
+// The following struct extends the texture mip map unit alignment
+// required for each map format. The alignment values are platform
+// dependent.
+//
+//---------------------------------------------------------------------------
+typedef struct GMM_TEXTURE_ALIGN_EX_REC
+{
+ CCS_UNIT CCSEx[CCS_MODES];
+}GMM_TEXTURE_ALIGN_EX;
+
+#ifdef __cplusplus
+
+namespace GmmLib
+{
+ class NON_PAGED_SECTION PlatformInfoGen12 : public PlatformInfoGen11
+ {
+ protected:
+ GMM_TEXTURE_ALIGN_EX TexAlignEx;
+ CCS_UNIT FCTileMode[FCMaxModes];
+ public:
+ PlatformInfoGen12(PLATFORM &Platform);
+ ~PlatformInfoGen12(){};
+ virtual GMM_TEXTURE_ALIGN_EX GetExTextureAlign() { return TexAlignEx; }
+ virtual void ApplyExtendedTexAlign(uint32_t CCSMode, ALIGNMENT& UnitAlign);
+ virtual CCS_UNIT* GetFCRectAlign() { return FCTileMode; }
+ virtual void SetCCSFlag(GMM_RESOURCE_FLAG &Flags);
+ virtual uint8_t ValidateMMC(GMM_TEXTURE_INFO &Surf);
+ virtual uint8_t ValidateCCS(GMM_TEXTURE_INFO &Surf);
+ virtual uint8_t ValidateUnifiedAuxSurface(GMM_TEXTURE_INFO &Surf);
+ virtual uint8_t CheckFmtDisplayDecompressible(GMM_TEXTURE_INFO &Surf,
+ bool IsSupportedRGB64_16_16_16_16,
+ bool IsSupportedRGB32_8_8_8_8,
+ bool IsSupportedRGB32_2_10_10_10,
+ bool IsSupportedMediaFormats);
+ virtual uint8_t OverrideCompressionFormat(GMM_RESOURCE_FORMAT Format, uint8_t IsMC);
+ };
+}
+
+#endif
\ No newline at end of file
diff --git a/Source/GmmLib/inc/Internal/Common/Texture/GmmGen12TextureCalc.h b/Source/GmmLib/inc/Internal/Common/Texture/GmmGen12TextureCalc.h
new file mode 100644
index 0000000..97a298c
--- /dev/null
+++ b/Source/GmmLib/inc/Internal/Common/Texture/GmmGen12TextureCalc.h
@@ -0,0 +1,105 @@
+/*==============================================================================
+Copyright(c) 2019 Intel Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files(the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and / or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+============================================================================*/
+
+#pragma once
+#ifdef __cplusplus
+#include "GmmGen11TextureCalc.h"
+#include "../Platform/GmmGen12Platform.h"
+
+/////////////////////////////////////////////////////////////////////////////////////
+/// @file GmmGen12TextureCalc.h
+/// @brief This file contains the functions and members definations for texture alloc-
+/// ation on all Gen12 platforms.
+/////////////////////////////////////////////////////////////////////////////////////
+namespace GmmLib
+{
+ /////////////////////////////////////////////////////////////////////////
+ /// Contains texture calc functions and members for Gen12 platforms.
+ /// This class is derived from the base GmmTextureCalc class so clients
+ /// shouldn't have to ever interact with this class directly.
+ /////////////////////////////////////////////////////////////////////////
+ class NON_PAGED_SECTION GmmGen12TextureCalc :
+ public GmmGen11TextureCalc
+ {
+ private:
+
+ protected:
+
+ virtual uint32_t Get2DMipMapHeight(
+ GMM_TEXTURE_INFO *pTexInfo);
+ virtual GMM_STATUS FillTexCCS(
+ GMM_TEXTURE_INFO *pSurf,
+ GMM_TEXTURE_INFO *pAuxTexInfo);
+ public:
+ /* Constructors */
+
+ GmmGen12TextureCalc()
+ {
+
+ }
+
+ ~GmmGen12TextureCalc()
+ {
+
+ }
+
+ /* Function prototypes */
+
+ virtual GMM_STATUS GMM_STDCALL FillTex2D(GMM_TEXTURE_INFO *pTexInfo,
+ __GMM_BUFFER_TYPE *pRestrictions);
+
+
+ virtual GMM_STATUS GMM_STDCALL FillTexPlanar(GMM_TEXTURE_INFO *pTexInfo,
+ __GMM_BUFFER_TYPE *pRestrictions);
+
+ virtual GMM_STATUS GMM_STDCALL GetCCSScaleFactor(GMM_TEXTURE_INFO * pTexInfo,
+ CCS_UNIT& ScaleFactor);
+
+ GMM_STATUS GMM_STDCALL GetCCSExMode(GMM_TEXTURE_INFO * AuxSurf);
+
+ virtual uint32_t GMM_STDCALL ScaleTextureHeight(GMM_TEXTURE_INFO * pTexInfo,
+ uint32_t Height);
+
+ virtual uint32_t GMM_STDCALL ScaleTextureWidth (GMM_TEXTURE_INFO* pTexInfo,
+ uint32_t Width);
+
+ virtual uint32_t GMM_STDCALL ScaleFCRectHeight(GMM_TEXTURE_INFO * pTexInfo,
+ uint32_t Height);
+
+ virtual uint64_t GMM_STDCALL ScaleFCRectWidth(GMM_TEXTURE_INFO* pTexInfo,
+ uint64_t Width);
+ virtual GMM_STATUS GMM_STDCALL MSAACCSUsage(GMM_TEXTURE_INFO *pTexInfo);
+ virtual void GMM_STDCALL AllocateOneTileThanRequied(GMM_TEXTURE_INFO *pTexInfo,
+ GMM_GFX_SIZE_T &WidthBytesRender,
+ GMM_GFX_SIZE_T &WidthBytesPhysical,
+ GMM_GFX_SIZE_T &WidthBytesLock)
+ {
+ GMM_UNREFERENCED_PARAMETER(pTexInfo);
+ GMM_UNREFERENCED_PARAMETER(WidthBytesRender);
+ GMM_UNREFERENCED_PARAMETER(WidthBytesPhysical);
+ GMM_UNREFERENCED_PARAMETER(WidthBytesLock);
+ }
+
+ /* inline functions */
+ };
+}
+#endif // #ifdef __cplusplus
\ No newline at end of file
diff --git a/Source/inc/common/gtsysinfo.h b/Source/inc/common/gtsysinfo.h
index de00f96..783cb87 100644
--- a/Source/inc/common/gtsysinfo.h
+++ b/Source/inc/common/gtsysinfo.h
@@ -35,6 +35,8 @@
// Maximums which bound all supported GT
#define GT_MAX_SLICE (4)
#define GT_MAX_SUBSLICE_PER_SLICE (8)
+#define GT_MAX_SUBSLICE_PER_DSS (2) // Currently max value based on Gen12
+#define GT_MAX_DUALSUBSLICE_PER_SLICE (6) // Currently max value based on Gen12LP
typedef struct GT_SUBSLICE_INFO
{
@@ -43,11 +45,19 @@
uint32_t EuEnabledMask; // Mask of EUs enabled on this SubSlice
} GT_SUBSLICE_INFO;
+typedef struct GT_DUALSUBSLICE_INFO
+{
+ bool Enabled; // Bool to determine if this SS is enabled.
+ GT_SUBSLICE_INFO SubSlice[GT_MAX_SUBSLICE_PER_DSS]; // SS details that belong to this DualSubSlice.
+} GT_DUALSUBSLICE_INFO;
+
typedef struct GT_SLICE_INFO
{
bool Enabled; // determine if this slice is enabled.
GT_SUBSLICE_INFO SubSliceInfo[GT_MAX_SUBSLICE_PER_SLICE]; // SS details that belong to this slice.
+ GT_DUALSUBSLICE_INFO DSSInfo[GT_MAX_DUALSUBSLICE_PER_SLICE]; // DSS details that belong to this slice.
uint32_t SubSliceEnabledCount; // No. of SS enabled in this slice
+ uint32_t DualSubSliceEnabledCount; // No. of DSS enabled in this slice
} GT_SLICE_INFO;
typedef struct GT_VEBOX_INFO
@@ -119,6 +129,25 @@
bool IsValid; // flag to check if VDBoxInfo is valid.
} GT_VDBOX_INFO;
+typedef struct GT_CCS_INFO
+{
+ union CCSInstances
+ {
+ struct CCSBitStruct
+ {
+ uint32_t CCS0Enabled : 1; // To determine if CCS0 is enabled
+ uint32_t Reserved : 31; // Reserved bits
+ } Bits;
+
+ uint32_t CCSEnableMask; // Union for all CCS instances. It can be used to know which CCS is enabled.
+
+ } Instances;
+
+ uint32_t NumberOfCCSEnabled; // Number of bits set among bit 0-3 of CCSEnableMask;
+
+ bool IsValid; // flag to check if CCSInfo is valid.
+
+} GT_CCS_INFO;
typedef struct GT_SQIDI_INFO
{
@@ -133,6 +162,7 @@
uint32_t ThreadCount; // total no of system threads available
uint32_t SliceCount; // Total no. of enabled slices
uint32_t SubSliceCount; // Total no. of enabled subslices
+ uint32_t DualSubSliceCount; // Total no. of enabled dualsubslices
uint64_t L3CacheSizeInKb; // Total L3 cache size in kilo bytes
uint64_t LLCCacheSizeInKb; // Total LLC cache size in kilo bytes
uint64_t EdramSizeInKb; // Total EDRAM size in kilo bytes
@@ -147,6 +177,8 @@
uint32_t TotalGsThreads; // Total threads in GS
uint32_t TotalPsThreadsWindowerRange; // Total threads in PS Windower Range
+ uint32_t TotalVsThreads_Pocs; // Total threads in VS for POCS
+
// Note: The CSR size requirement is not clear at this moment. Till then the driver will set
// the maximum size that should be sufficient for all platform SKUs.
uint32_t CsrSizeInMb; // Total size that driver needs to allocate for CSR.
@@ -159,6 +191,7 @@
uint32_t MaxEuPerSubSlice; // Max available EUs per sub-slice.
uint32_t MaxSlicesSupported; // Max slices this platfrom can have.
uint32_t MaxSubSlicesSupported; // Max total sub-slices this platform can have (not per slice)
+ uint32_t MaxDualSubSlicesSupported; // Max total dual sub-slices this platform can have (not per slice)
/*------------------------------------*/
// Flag to determine if hashing is enabled. If enabled then one of the L3 banks will be disabled.
@@ -186,11 +219,10 @@
GT_SQIDI_INFO SqidiInfo;
uint32_t ReservedCCSWays; // Reserved CCS ways provides value of reserved L3 ways for CCS when CCS is enabled.
- // This is a hardcoded value as suggested by HW. No MMIO read is needed for same.
-
+ // This is a hardcoded value as suggested by HW. No MMIO read is needed for same.
+ GT_CCS_INFO CCSInfo; // CCSInfo provides details(enabled/disabled) of all CCS instances.
} GT_SYSTEM_INFO, *PGT_SYSTEM_INFO;
-
#pragma pack(pop)
#endif //__GT_SYS_INFO_H__
diff --git a/Source/inc/common/igfxfmid.h b/Source/inc/common/igfxfmid.h
index 034a9ee..73507db 100644
--- a/Source/inc/common/igfxfmid.h
+++ b/Source/inc/common/igfxfmid.h
@@ -64,6 +64,7 @@
IGFX_ICELAKE_LP,
IGFX_LAKEFIELD,
IGFX_ELKHARTLAKE,
+ IGFX_TIGERLAKE_LP,
IGFX_MAX_PRODUCT,
@@ -87,6 +88,7 @@
PCH_ICP_LP, // ICL LP PCH
PCH_ICP_N, // ICL N PCH
PCH_LKF, // LKF PCH
+ PCH_TGL_LP, // TGL LP PCH
PCH_CMP_LP, // CML LP PCH
PCH_CMP_H, // CML Halo PCH
PCH_CMP_V, // CML V PCH
@@ -111,6 +113,8 @@
IGFX_GEN10LP_CORE = 14, //Gen10 LP Family
IGFX_GEN11_CORE = 15, //Gen11 Family
IGFX_GEN11LP_CORE = 16, //Gen11 LP Family
+ IGFX_GEN12_CORE = 17, //Gen12 Family
+ IGFX_GEN12LP_CORE = 18, //Gen12 LP Family
//Please add new GENs BEFORE THIS !
IGFX_MAX_CORE,
@@ -280,6 +284,7 @@
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN9_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN10_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN11_CORE ) || \
+ ( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN12_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GENNEXT_CORE ) )
#define GFX_IS_GEN_5_OR_LATER(p) ( ( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN5_CORE ) || \
@@ -292,6 +297,7 @@
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN9_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN10_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN11_CORE ) || \
+ ( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN12_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GENNEXT_CORE ) )
#define GFX_IS_GEN_5_75_OR_LATER(p) ( ( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN5_75_CORE ) || \
@@ -302,6 +308,7 @@
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN9_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN10_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN11_CORE ) || \
+ ( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN12_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GENNEXT_CORE ) )
#define GFX_IS_GEN_6_OR_LATER(p) ( ( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN6_CORE ) || \
@@ -319,6 +326,7 @@
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN9_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN10_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN11_CORE ) || \
+ ( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN12_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GENNEXT_CORE ) )
#define GFX_IS_GEN_7_5_OR_LATER(p) ( ( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN7_5_CORE ) || \
@@ -326,31 +334,38 @@
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN9_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN10_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN11_CORE ) || \
+ ( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN12_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GENNEXT_CORE ) )
#define GFX_IS_GEN_8_OR_LATER(p) ( ( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN8_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN9_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN10_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN11_CORE ) || \
+ ( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN12_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GENNEXT_CORE ) )
#define GFX_IS_GEN_8_CHV_OR_LATER(p) ( ( GFX_GET_CURRENT_PRODUCT(p) == IGFX_CHERRYVIEW ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN9_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN10_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN11_CORE ) || \
+ ( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN12_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GENNEXT_CORE ) )
#define GFX_IS_GEN_9_OR_LATER(p) ( ( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN9_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN10_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN11_CORE ) || \
+ ( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN12_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GENNEXT_CORE ) )
#define GFX_IS_GEN_10_OR_LATER(p) (( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN10_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN11_CORE ) || \
+ ( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN12_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GENNEXT_CORE ) )
#define GFX_IS_GEN_11_OR_LATER(p) (( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN11_CORE ) || \
+ ( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GEN12_CORE ) || \
( GFX_GET_CURRENT_RENDERCORE(p) == IGFX_GENNEXT_CORE ) )
+#define GFX_IS_GEN_12_OR_LATER(p) (( GFX_GET_CURRENT_RENDERCORE(p) >= IGFX_GEN12_CORE ))
#define GFX_IS_ATOM_PRODUCT_FAMILY(p) ( GFX_IS_PRODUCT(p, IGFX_VALLEYVIEW) || \
GFX_IS_PRODUCT(p, IGFX_CHERRYVIEW) || \
GFX_IS_PRODUCT(p, IGFX_BROXTON) )
@@ -1144,6 +1159,18 @@
#define IICL_LP_1x4x8_LOW_MEDIA_ULT_DEVICE_F0_ID 0x8A56
#define IICL_LP_1x4x8_LOW_MEDIA_ULX_DEVICE_F0_ID 0x8A58
+//TGL LP
+#define IGEN12LP_GT1_MOB_DEVICE_F0_ID 0xFF20
+#define ITGL_LP_1x6x16_UNKNOWN_SKU_F0_ID_5 0x9A49 // Remove this once newer enums are merged in OpenCL. Added this to avoid build failure with Linux/OpenCL.
+#define ITGL_LP_1x6x16_ULT_15W_DEVICE_F0_ID 0x9A49 // Mobile - U42 - 15W
+#define ITGL_LP_1x6x16_ULX_5_2W_DEVICE_F0_ID 0x9A40 // Mobile - Y42 - 5.2W
+#define ITGL_LP_1x6x16_ULT_12W_DEVICE_F0_ID 0x9A59 // Mobile - U42 - 12W
+#define ITGL_LP_1x2x16_HALO_45W_DEVICE_F0_ID 0x9A60 // Halo - H81 - 45W
+#define ITGL_LP_1x2x16_DESK_65W_DEVICE_F0_ID 0x9A68 // Desktop - S81 - 35W/65W/95W
+#define ITGL_LP_1x2x16_HALO_WS_45W_DEVICE_F0_ID 0x9A70 // Mobile WS - H81 - 45W
+#define ITGL_LP_1x2x16_DESK_WS_65W_DEVICE_F0_ID 0x9A78 // Desktop WS- S81 - 35W/65W/95W
+#define ITGL_LP_GT0_ULT_DEVICE_F0_ID 0x9A7F // GT0 - No GFX, Display Only
+
//LKF
#define ILKF_1x8x8_DESK_DEVICE_F0_ID 0x9840
#define ILKF_GT0_DESK_DEVICE_A0_ID 0x9850
@@ -1193,6 +1220,39 @@
// LKF-PCH Device IDs
#define PCH_LKF_UNFUSED_SKU_ID 0x9880
#define PCH_LKF_SUPER_SKU_ID 0x9881
+// TGL_LP PCH Device ID range 0xA080-0xA09F
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_1 0xA080
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_2 0xA081
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_3 0xA082
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_4 0xA083
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_5 0xA084
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_6 0xA085
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_7 0xA086
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_8 0xA087
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_9 0xA088
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_10 0xA089
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_11 0xA08A
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_12 0xA08B
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_13 0xA08C
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_14 0xA08D
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_15 0xA08E
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_16 0xA08F
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_17 0xA090
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_18 0xA091
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_19 0xA092
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_20 0xA093
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_21 0xA094
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_22 0xA095
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_23 0xA096
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_24 0xA097
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_25 0xA098
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_26 0xA099
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_27 0xA09A
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_28 0xA09B
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_29 0xA09C
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_30 0xA09D
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_31 0xA09E
+#define PCH_TGL_LP_UNKNOWN_SKU_ID_32 0xA09F
//define CML LP PCH Device Ids
#define PCH_CMP_LP_DEV_P1_ID 0x0280
diff --git a/Source/inc/common/sku_wa.h b/Source/inc/common/sku_wa.h
index a82a5bf..e9e46a7 100644
--- a/Source/inc/common/sku_wa.h
+++ b/Source/inc/common/sku_wa.h
@@ -66,6 +66,8 @@
unsigned int FtrVERing : 1; // Separate Ring for VideoEnhancement commands
unsigned int FtrVcs2 : 1; // Second VCS engine supported on Gen8 to Gen10 (in some configurations);
unsigned int FtrLCIA : 1; // Indicates Atom (Low Cost Intel Architecture)
+ unsigned int FtrCCSRing : 1; // To indicate if CCS hardware ring support is present.
+ unsigned int FtrCCSNode : 1; // To indicate if CCS Node support is present.
unsigned int FtrTileY : 1; // Identifies Legacy tiles TileY/Yf/Ys on the platform
};
@@ -75,6 +77,7 @@
unsigned int FtrPPGTT : 1; // Per-Process GTT
unsigned int FtrIA32eGfxPTEs : 1; // GTT/PPGTT's use 64-bit IA-32e PTE format.
+ unsigned int FtrMemTypeMocsDeferPAT : 1; // Pre-Gen12 MOCS can defers to PAT, e.g. eLLC Target Cache for MOCS
unsigned int FtrPml4Support : 1; // PML4-based gfx page tables are supported (in addition to PD-based tables).
unsigned int FtrSVM : 1; // Shared Virtual Memory (i.e. support for SVM buffers which can be accessed by both the CPU and GPU at numerically equivalent addresses.)
unsigned int FtrTileMappedResource : 1; // Tiled Resource support aka Sparse Textures.
@@ -82,17 +85,19 @@
unsigned int FtrUserModeTranslationTable : 1; // User mode managed Translation Table support for Tiled Resources.
unsigned int FtrNullPages : 1; // Support for PTE-based Null pages for Sparse/Tiled Resources).
unsigned int FtrEDram : 1; // embedded DRAM enable
+ unsigned int FtrLLCBypass : 1; // Partial tunneling of UC memory traffic via CCF (LLC Bypass)
unsigned int FtrCrystalwell : 1; // Crystalwell Sku
unsigned int FtrCentralCachePolicy : 1; // Centralized Cache Policy
unsigned int FtrWddm2GpuMmu : 1; // WDDMv2 GpuMmu Model (Set in platform SKU files, but disabled by GMM as appropriate for given system.)
unsigned int FtrWddm2Svm : 1; // WDDMv2 SVM Model (Set in platform SKU files, but disabled by GMM as appropriate for given system.)
unsigned int FtrStandardMipTailFormat : 1; // Dx Standard MipTail Format for TileYf/Ys
unsigned int FtrWddm2_1_64kbPages : 1; // WDDMv2.1 64KB page support
+ unsigned int FtrE2ECompression : 1; // E2E Compression ie Aux Table support
+ unsigned int FtrLinearCCS : 1; // Linear Aux surface is supported
unsigned int FtrFrameBufferLLC : 1; // Displayable Frame buffers cached in LLC
unsigned int FtrDriverFLR : 1; // Enable Function Level Reset (Gen11+)
unsigned int FtrLocalMemory : 1;
- unsigned int FtrLLCBypass : 1; // Partial tunneling of UC memory traffic via CCF (LLC Bypass)
- };
+ };
struct //_sku_3d
@@ -363,6 +368,12 @@
WA_BUG_PERF_IMPACT, WA_COMPONENT_GMM)
WA_DECLARE(
+ WaMemTypeIsMaxOfPatAndMocs,
+ "WA to set PAT.MT = UC. Since TGLLP uses MAX function to resolve PAT vs MOCS MemType So unless PTE.PAT says UC, MOCS won't be able to set UC!",
+ WA_BUG_TYPE_FUNCTIONAL,
+ WA_BUG_PERF_IMPACT, WA_COMPONENT_GMM)
+
+ WA_DECLARE(
WaGttPat0GttWbOverOsIommuEllcOnly,
"WA to set PAT0 to full cacheable (LLC+eLLC) for GTT access over eLLC only usage for OS based SVM",
WA_BUG_TYPE_FUNCTIONAL,
diff --git a/Source/inc/umKmInc/UmKmDmaPerfTimer.h b/Source/inc/umKmInc/UmKmDmaPerfTimer.h
index b780893..e32bd0e 100644
--- a/Source/inc/umKmInc/UmKmDmaPerfTimer.h
+++ b/Source/inc/umKmInc/UmKmDmaPerfTimer.h
@@ -104,6 +104,7 @@
#define PERFTAG_UNKNOWN_BITS(PerfTag) ( PerfTag & (ULONG)0xFFFF0000 ) // Bits[16,31] Usage component specific
#define PERFTAG_FRAMEID(PerfTag) ( PerfTag & (ULONG)0x00FF0000 ) // Bits[16,23] Media Specific - frame id
#define PERFTAG_BUFFERID(PerfTag) ( PerfTag & (ULONG)0x0F000000 ) // Bits[24,27] Media Specific - buffer id
+#define PERFTAG_BATCHBUFFERID(PerfTag) ( PerfTag & (ULONG)0xF0000000 ) // Bits[28,31] Media Specific - batch buffer id
#define PERFTAG_FRAMEID_SHIFT 16
#define PERFTAG_BUFFERID_SHIFT 24
#define PERFTAG_BATCHBUFFERID_SHIFT 28
@@ -328,6 +329,13 @@
VPHAL_FDFB_FB_EYE_SAHDOW,
VPHAL_FDFB_FB_EYE_COLOR,
+ // SR
+ VPHAL_SR_CONV_1X1_32_5,
+ VPHAL_SR_CONV_1X1_5_32,
+ VPHAL_SR_CONV_3X3,
+ VPHAL_SR_SUBPIXEL_CONV_2X2,
+ VPHAL_SR_CONV_5X5_Y8,
+
// ADD TAGS FOR NEW ADVPROC KRNS HERE
VPHAL_PERFTAG_MAX
diff --git a/Source/inc/umKmInc/UmKmEnum.h b/Source/inc/umKmInc/UmKmEnum.h
index b08da31..f4b633b 100644
--- a/Source/inc/umKmInc/UmKmEnum.h
+++ b/Source/inc/umKmInc/UmKmEnum.h
@@ -54,7 +54,7 @@
GPUNODE_BLT = 2, // available on GT
GPUNODE_VE = 3, // available on HSW+ (VideoEnhancement), virtual node
GPUNODE_VCS2 = 4, // available on BDW/SKL/KBL GT3+ and CNL,
- GPUNODE_RESERVED = 5, //
+ GPUNODE_CCS0 = 5, //
GPUNODE_REAL_MAX, // all nodes beyond this are virtual nodes - they don't have an actual GPU engine
GPUNODE_PICS = 6, // available on CNL+. Real node but only for KMD internal use. Hence kept after GPUNODE_REAL_MAX (Note: We need to keep it before overlay node)
GPUNODE_OVERLAY = 7,