| /*************************************************************************/ /*! |
| @File |
| @Title Implementation of PMR functions for OS managed memory |
| @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved |
| @Description Part of the memory management. This module is responsible for |
| implementing the function callbacks for physical memory borrowed |
| from that normally managed by the operating system. |
| @License MIT |
| |
| The contents of this file are subject to the MIT license as set out below. |
| |
| Permission is hereby granted, free of charge, to any person obtaining a copy |
| of this software and associated documentation files (the "Software"), to deal |
| in the Software without restriction, including without limitation the rights |
| to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
| copies of the Software, and to permit persons to whom the Software is |
| furnished to do so, subject to the following conditions: |
| |
| The above copyright notice and this permission notice shall be included in |
| all copies or substantial portions of the Software. |
| |
| This License is also included in this distribution in the file called |
| "MIT-COPYING". |
| |
| EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS |
| PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING |
| BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR |
| PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR |
| COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER |
| IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| */ /**************************************************************************/ |
| /* include/ */ |
| |
| #include <mutex> |
| #include <unordered_map> |
| #include <vector> |
| |
| extern "C" { |
| #include "rgx_heaps.h" |
| #include "img_types.h" |
| #include "pvrsrv_error.h" |
| #include "pvrsrv_memallocflags.h" |
| /* services/server/include/ */ |
| #include "allocmem.h" |
| #include "osfunc.h" |
| #include "pmr.h" |
| #include "pmr_impl.h" |
| #include "devicemem_server_utils.h" |
| #include "physmem_dmabuf.h" |
| |
| /* ourselves */ |
| #include "physmem_osmem.h" |
| } |
| |
| #include "fuchsia/msd_img_buffer.h" |
| #include "fuchsia/msd_img_connection.h" |
| #include "fuchsia/msd_img_device.h" |
| #include "magma_util/macros.h" |
| #include "platform_buffer.h" |
| #include "platform_bus_mapper.h" |
| |
| #define NOT_IMPLEMENTED() fprintf(stderr, PVR_BUILD_DIR ": Not implemented in %s:%s:%d\n", __func__, __FILE__, __LINE__); |
| |
| |
| namespace |
| { |
| std::mutex imported_pmr_map_mutex; |
| // Map from koids to PMRs that use them (for imported PMRs only). We keep this |
| // map around to ensure we don't make too many PMRs for a buffer, because |
| // pinning each of them could run into the VM_PAGE_OBJECT_MAX_PIN_COUNT limit on |
| // pins for a page. |
| MAGMA_GUARDED(imported_pmr_map_mutex) std::unordered_map<uint64_t, PMR *> imported_pmr_map; |
| } // namespace |
| |
| struct MagmaPhysicalMemoryResource |
| { |
| ~MagmaPhysicalMemoryResource() |
| { |
| if (in_imported_pmr_map) |
| { |
| std::lock_guard<std::mutex> lock(imported_pmr_map_mutex); |
| imported_pmr_map.erase(buffer->id()); |
| } |
| } |
| PVRSRV_DEVICE_NODE *psDevNode; |
| std::unique_ptr<magma::PlatformBuffer> buffer; |
| std::unique_ptr<magma::PlatformBusMapper::BusMapping> bus_mapping; |
| bool in_imported_pmr_map = false; |
| bool imported_dma_buf = false; |
| }; |
| |
| static PVRSRV_ERROR |
| LockPhysAddresses(PMR_IMPL_PRIVDATA data) |
| { |
| MagmaPhysicalMemoryResource *pmr = reinterpret_cast<MagmaPhysicalMemoryResource *>(data); |
| auto device = MsdImgDevice::cast(pmr->psDevNode->psDevConfig->pvOSDevice); |
| if (pmr->bus_mapping) |
| { |
| PVR_ASSERT(pmr->imported_dma_buf); |
| return PVRSRV_OK; |
| } |
| pmr->bus_mapping = |
| device->bus_mapper()->MapPageRangeBus(pmr->buffer.get(), 0, pmr->buffer->size() / magma::page_size()); |
| if (!pmr->bus_mapping) |
| { |
| // Could instead be PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, or similar. |
| return PVRSRV_ERROR_OUT_OF_MEMORY; |
| } |
| return PVRSRV_OK; |
| } |
| |
| static PVRSRV_ERROR |
| UnlockPhysAddresses(PMR_IMPL_PRIVDATA data) |
| { |
| MagmaPhysicalMemoryResource *pmr = reinterpret_cast<MagmaPhysicalMemoryResource *>(data); |
| // For imported memory we don't trust all the clients to keep the lock |
| // and unlock counts correct, so never unpin. |
| if (!pmr->imported_dma_buf) |
| pmr->bus_mapping.reset(); |
| return PVRSRV_OK; |
| } |
| |
| static PVRSRV_ERROR |
| DevPhysAddr(PMR_IMPL_PRIVDATA pvPriv, |
| IMG_UINT32 ui32Log2PageSize, |
| IMG_UINT32 ui32NumOfAddr, |
| IMG_DEVMEM_OFFSET_T *puiOffset, |
| IMG_BOOL *pbValid, |
| IMG_DEV_PHYADDR *psDevAddrPtr) |
| { |
| MagmaPhysicalMemoryResource *pmr = reinterpret_cast<MagmaPhysicalMemoryResource *>(pvPriv); |
| if (ui32Log2PageSize != magma::page_shift()) |
| { |
| NOT_IMPLEMENTED(); |
| return PVRSRV_ERROR_NOT_SUPPORTED; |
| } |
| for (uint32_t i = 0; i < ui32NumOfAddr; i++) |
| { |
| if (pbValid[i]) |
| { |
| uint64_t page_index = puiOffset[i] >> magma::page_shift(); |
| uint64_t in_page_offset = puiOffset[i] & (magma::page_size() - 1); |
| psDevAddrPtr[i].uiAddr = pmr->bus_mapping->Get()[page_index] + in_page_offset; |
| } |
| } |
| return PVRSRV_OK; |
| } |
| |
| struct MapHandle |
| { |
| uint32_t unused; |
| }; |
| |
| static PVRSRV_ERROR |
| AcquireKernelMappingData(PMR_IMPL_PRIVDATA pvPriv, |
| size_t uiOffset, |
| size_t uiSize, |
| void **ppvKernelAddressOut, |
| IMG_HANDLE *phHandleOut, |
| PMR_FLAGS_T ulFlags) |
| { |
| MagmaPhysicalMemoryResource *pmr = reinterpret_cast<MagmaPhysicalMemoryResource *>(pvPriv); |
| void *cpu_map; |
| // Map entire buffer for now. Also ignore flags. |
| if (!pmr->buffer->MapCpu(&cpu_map)) |
| { |
| return PVRSRV_ERROR_OUT_OF_MEMORY; |
| } |
| *ppvKernelAddressOut = reinterpret_cast<uint8_t *>(cpu_map) + uiOffset; |
| *phHandleOut = new MapHandle; |
| return PVRSRV_OK; |
| } |
| |
| static void |
| ReleaseKernelMappingData(PMR_IMPL_PRIVDATA pvPriv, IMG_HANDLE hHandle) |
| { |
| MagmaPhysicalMemoryResource *pmr = reinterpret_cast<MagmaPhysicalMemoryResource *>(pvPriv); |
| pmr->buffer->UnmapCpu(); |
| delete reinterpret_cast<MapHandle *>(hHandle); |
| } |
| |
| static PVRSRV_ERROR |
| ChangeSparseMem(PMR_IMPL_PRIVDATA pPriv, |
| const PMR *psPMR, |
| IMG_UINT32 ui32AllocPageCount, |
| IMG_UINT32 *pai32AllocIndices, |
| IMG_UINT32 ui32FreePageCount, |
| IMG_UINT32 *pai32FreeIndices, |
| IMG_UINT32 uiFlags) |
| { |
| NOT_IMPLEMENTED(); |
| return PVRSRV_ERROR_NOT_SUPPORTED; |
| } |
| |
| static PVRSRV_ERROR |
| ChangeSparseMemCpuMap(PMR_IMPL_PRIVDATA pPriv, |
| const PMR *psPMR, |
| IMG_UINT64 sCpuVAddrBase, |
| IMG_UINT32 ui32AllocPageCount, |
| IMG_UINT32 *pai32AllocIndices, |
| IMG_UINT32 ui32FreePageCount, |
| IMG_UINT32 *pai32FreeIndices) |
| { |
| NOT_IMPLEMENTED(); |
| return PVRSRV_ERROR_NOT_SUPPORTED; |
| } |
| |
| static PVRSRV_ERROR |
| Finalize(PMR_IMPL_PRIVDATA data) |
| { |
| MagmaPhysicalMemoryResource *pmr = reinterpret_cast<MagmaPhysicalMemoryResource *>(data); |
| delete pmr; |
| return PVRSRV_OK; |
| } |
| |
| static const PMR_IMPL_FUNCTAB kPmrTable = { |
| .pfnLockPhysAddresses = LockPhysAddresses, |
| .pfnUnlockPhysAddresses = UnlockPhysAddresses, |
| .pfnDevPhysAddr = DevPhysAddr, |
| .pfnAcquireKernelMappingData = AcquireKernelMappingData, |
| .pfnReleaseKernelMappingData = ReleaseKernelMappingData, |
| .pfnChangeSparseMem = ChangeSparseMem, |
| .pfnChangeSparseMemCPUMap = ChangeSparseMemCpuMap, |
| .pfnFinalize = Finalize, |
| }; |
| |
| |
| PVRSRV_ERROR |
| PhysmemNewOSRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode, |
| IMG_DEVMEM_SIZE_T uiSize, |
| IMG_DEVMEM_SIZE_T uiChunkSize, |
| IMG_UINT32 ui32NumPhysChunks, |
| IMG_UINT32 ui32NumVirtChunks, |
| IMG_UINT32 *puiAllocIndices, |
| IMG_UINT32 uiLog2AllocPageSize, |
| PVRSRV_MEMALLOCFLAGS_T uiFlags, |
| const IMG_CHAR *pszAnnotation, |
| IMG_PID uiPid, |
| PMR **ppsPMRPtr) |
| { |
| auto pmr = std::make_unique<MagmaPhysicalMemoryResource>(); |
| // This may be called before the pvOSDevice is initialized, so store the |
| // entire device node for use later. |
| pmr->psDevNode = psDevNode; |
| |
| auto connection = MsdImgConnection::GetCurrentConnection(); |
| if (connection) |
| { |
| auto additional_buffer = connection->TakeAdditionalBuffer(); |
| if (additional_buffer) |
| { |
| uint32_t buffer_handle; |
| if (!additional_buffer->platform_buffer()->duplicate_handle(&buffer_handle)) |
| { |
| return DRET(PVRSRV_ERROR_INTERNAL_ERROR); |
| } |
| pmr->buffer = magma::PlatformBuffer::Import(buffer_handle); |
| if (!pmr->buffer) |
| { |
| return DRET_MSG(PVRSRV_ERROR_INTERNAL_ERROR, "Failed to import platform buffer"); |
| } |
| if (pmr->buffer->size() != uiSize) |
| { |
| return DRET_MSG(PVRSRV_ERROR_INTERNAL_ERROR, "Input buffer size: %ld doesn't match %ld", |
| pmr->buffer->size(), uiSize); |
| } |
| } |
| } |
| if (!pmr->buffer) |
| { |
| pmr->buffer = magma::PlatformBuffer::Create(uiSize, pszAnnotation); |
| } |
| uint32_t cpu_cache_flags; |
| PVRSRV_ERROR eError; |
| eError = DevmemCPUCacheMode(psDevNode, uiFlags, &cpu_cache_flags); |
| if (eError != PVRSRV_OK) |
| { |
| return DRET(eError); |
| } |
| if (PVRSRV_CHECK_CPU_UNCACHED(cpu_cache_flags)) |
| { |
| if (!pmr->buffer->SetCachePolicy(MAGMA_CACHE_POLICY_UNCACHED)) |
| { |
| return DRET(PVRSRV_ERROR_OUT_OF_MEMORY); |
| } |
| } |
| else if (PVRSRV_CHECK_CPU_WRITE_COMBINE(cpu_cache_flags)) |
| { |
| if (!pmr->buffer->SetCachePolicy(MAGMA_CACHE_POLICY_WRITE_COMBINING)) |
| { |
| return DRET(PVRSRV_ERROR_OUT_OF_MEMORY); |
| } |
| } |
| else if (PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags)) |
| { |
| if (!pmr->buffer->CleanCache(0u, pmr->buffer->size(), true)) |
| { |
| return DRET(PVRSRV_ERROR_OUT_OF_MEMORY); |
| } |
| } |
| |
| PHYS_HEAP *psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]; |
| // Don't insert in the PMR map because it's theoretically possible the |
| // newly-allocated PMR could be treated differently from an imported |
| // PMR. |
| return PMRCreatePMR(psDevNode, |
| psPhysHeap, |
| uiSize, |
| uiChunkSize, |
| ui32NumPhysChunks, |
| ui32NumVirtChunks, |
| puiAllocIndices, |
| magma::page_shift(), |
| 0, |
| pszAnnotation, |
| &kPmrTable, |
| pmr.release(), |
| PMR_TYPE_OSMEM, |
| ppsPMRPtr, |
| 0); |
| } |
| |
| |
| PVRSRV_ERROR |
| PhysmemExportDmaBuf(CONNECTION_DATA *psConnection, PVRSRV_DEVICE_NODE *psDevNode, PMR *psPMR, IMG_INT *piFd) |
| { |
| NOT_IMPLEMENTED(); |
| return PVRSRV_ERROR_NOT_IMPLEMENTED; |
| } |
| |
| PVRSRV_ERROR |
| PhysmemImportDmaBuf(CONNECTION_DATA *psConnection, |
| PVRSRV_DEVICE_NODE *psDevNode, |
| IMG_INT fd, |
| PVRSRV_MEMALLOCFLAGS_T uiFlags, |
| IMG_UINT32 ui32NameSize, |
| IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], |
| PMR **ppsPMRPtr, |
| IMG_DEVMEM_SIZE_T *puiSize, |
| IMG_DEVMEM_ALIGN_T *puiAlign) |
| { |
| auto connection = MsdImgConnection::Cast(psConnection->hOsPrivateData); |
| if (!connection) |
| { |
| return DRET(PVRSRV_ERROR_INVALID_PARAMS); |
| } |
| auto additional_buffer = connection->TakeAdditionalBuffer(); |
| if (!additional_buffer) |
| { |
| return DRET(PVRSRV_ERROR_INTERNAL_ERROR); |
| } |
| |
| |
| *puiAlign = magma::page_size(); |
| *puiSize = additional_buffer->platform_buffer()->size(); |
| { |
| std::lock_guard<std::mutex> lock(imported_pmr_map_mutex); |
| auto it = imported_pmr_map.find(additional_buffer->platform_buffer()->id()); |
| if (it != imported_pmr_map.end()) |
| { |
| PMR *found_pmr = it->second; |
| if (PMR_DeviceNode(found_pmr) != psDevNode) |
| { |
| // On Fuchsia there should be only one dev node, but |
| // better to check for sure. |
| PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device\n", __func__)); |
| return PVRSRV_ERROR_PMR_NOT_PERMITTED; |
| } |
| |
| PMRRefPMR(found_pmr); |
| *ppsPMRPtr = found_pmr; |
| return PVRSRV_OK; |
| } |
| } |
| |
| auto pmr = std::make_unique<MagmaPhysicalMemoryResource>(); |
| pmr->psDevNode = psDevNode; |
| uint32_t buffer_handle; |
| if (!additional_buffer->platform_buffer()->duplicate_handle(&buffer_handle)) |
| { |
| return DRET(PVRSRV_ERROR_INTERNAL_ERROR); |
| } |
| pmr->buffer = magma::PlatformBuffer::Import(buffer_handle); |
| if (!pmr->buffer) |
| { |
| return DRET(PVRSRV_ERROR_INTERNAL_ERROR); |
| } |
| IMG_UINT32 ui32MappingTable = 0; |
| // Ensure name from client is null terminated. |
| pszName[DEVMEM_ANNOTATION_MAX_LEN - 1] = 0; |
| pmr->imported_dma_buf = true; |
| auto pmr_ptr = pmr.get(); |
| // Force addresses to be locked. Don't use PMRLockSysPhysAddresses |
| // because that'll increase the refcount and that will never be dropped. |
| PVRSRV_ERROR eError = LockPhysAddresses(pmr_ptr); |
| if (eError != PVRSRV_OK) |
| { |
| return DRET(eError); |
| } |
| eError = PMRCreatePMR(psDevNode, // psDevNode |
| psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL], // psPhysHeap |
| pmr->buffer->size(), // uiLogicalSize |
| pmr->buffer->size(), // uiChunkSize |
| 1, // ui32NumPhysChunks |
| 1, // ui32NumVirtChunks |
| &ui32MappingTable, // pui32MappingTable |
| magma::page_shift(), // uiLog2ContiguityGuarantee |
| static_cast<PMR_FLAGS_T>(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK), // uiFlags |
| pszName, // pszAnnotation |
| &kPmrTable, // psFuncTab |
| pmr.release(), // pvPrivData |
| PMR_TYPE_OSMEM, // eType |
| ppsPMRPtr, // ppsPMRPtr |
| 0); // ui32PDumpFlags |
| if (eError == PVRSRV_OK) |
| { |
| std::lock_guard<std::mutex> lock(imported_pmr_map_mutex); |
| if (imported_pmr_map.insert(std::make_pair(pmr_ptr->buffer->id(), *ppsPMRPtr)).second) |
| pmr_ptr->in_imported_pmr_map = true; |
| } |
| |
| return eError; |
| } |
| |
| PVRSRV_ERROR |
| PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection, |
| PVRSRV_DEVICE_NODE *psDevNode, |
| IMG_INT fd, |
| PVRSRV_MEMALLOCFLAGS_T uiFlags, |
| IMG_DEVMEM_SIZE_T uiChunkSize, |
| IMG_UINT32 ui32NumPhysChunks, |
| IMG_UINT32 ui32NumVirtChunks, |
| IMG_UINT32 *pui32MappingTable, |
| IMG_UINT32 ui32NameSize, |
| const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], |
| PMR **ppsPMRPtr, |
| IMG_DEVMEM_SIZE_T *puiSize, |
| IMG_DEVMEM_ALIGN_T *puiAlign) |
| { |
| NOT_IMPLEMENTED(); |
| return PVRSRV_ERROR_NOT_IMPLEMENTED; |
| } |