blob: d34d5ffb9832dd59d739b3f67c9889033055d3c6 [file] [log] [blame]
/*
* Copyright © 2021 The Fuchsia Authors
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <zircon/process.h>
#include <zircon/syscalls.h>
#include "inflight_list.h"
#include "mos_bufmgr.h"
#include "mos_bufmgr_priv.h"
#include "simple_allocator.h"
#include <lib/magma/magma.h>
#include <magma_intel_gen_defs.h>
#include <magma_fd.h>
#include <assert.h>
#include <stdio.h>
#include <unistd.h>
#include <mutex>
#include <vector>
#define LOG_VERBOSE(msg, ...) do { \
if (false) {\
fprintf(stderr, "mos_bufmgr_magma:%d " msg "\n", __LINE__, ##__VA_ARGS__); \
fflush(stderr); \
} \
} while (0)
static inline uint64_t page_size() { return sysconf(_SC_PAGESIZE); }
class MagmaBo;
class MagmaBufMgr : public mos_bufmgr {
public:
MagmaBufMgr(magma_connection_t connection, uint32_t device_id);
~MagmaBufMgr();
magma_connection_t connection() const { return connection_; }
magma_handle_t notification_handle() const { return notification_handle_; }
uint32_t device_id() const { return device_id_; }
bool Alloc(size_t size, uint8_t align_pow2, uint64_t* addr_out) {
std::lock_guard<std::mutex> lock(allocator_mutex_);
return allocator_->Alloc(size, align_pow2, addr_out);
}
bool Free(uint64_t addr) {
std::lock_guard<std::mutex> lock(allocator_mutex_);
return allocator_->Free(addr);
}
MagmaBo* CreateBo(const char *name, unsigned long size);
MagmaBo* ImportBo(int zx_handle, unsigned long size);
InflightList* inflight_list() { return &inflight_list_;}
uint32_t next_vm_id() {
uint32_t id = next_pseudo_vm_id_.fetch_add(1);
// Don't support wrap around.
assert(id != 0);
return id;
}
static constexpr uint32_t kInvalidVmId = 0;
// Sets the pseudo VM ID if not already set.
uint32_t GetVmId(uint32_t vm_id) {
uint32_t invalid_id = kInvalidVmId;
vm_id_.compare_exchange_strong(invalid_id, vm_id);
return vm_id_.load();
}
private:
magma_connection_t connection_ {};
magma_handle_t notification_handle_{};
uint32_t device_id_;
std::mutex allocator_mutex_;
std::unique_ptr<SimpleAllocator> allocator_ __attribute__((__guarded_by__(allocator_mutex_)));
InflightList inflight_list_;
std::atomic_uint next_pseudo_vm_id_ {1};
std::atomic_uint vm_id_ { kInvalidVmId };
};
class MagmaBo : public mos_linux_bo {
public:
// MagmaBufMgr must outlive MagmaBo.
MagmaBo(magma_buffer_t buffer, uint64_t size, magma_buffer_id_t id, MagmaBufMgr *bufmgr);
private:
~MagmaBo();
public:
void AddRef() { ref_count_ += 1; }
void RemoveRef() {
ref_count_ -= 1;
int32_t count = ref_count_;
assert(count >= 0);
if (count == 0) {
delete this;
}
}
MagmaBufMgr* magma_bufmgr() const { return static_cast<MagmaBufMgr*>(bufmgr); }
uint64_t id() { return id_; }
magma_buffer_t buffer() const { return buffer_; }
bool is_mapped_gpu() { return is_mapped_gpu_; }
std::vector<magma_exec_resource>& exec_resources() { return exec_resources_; }
bool MapGpu();
void AddExecResource(MagmaBo* bo);
void clear_exec_resources() { exec_resources_.clear(); }
private:
std::atomic_int ref_count_{};
magma_buffer_t buffer_ {};
uint64_t id_ {};
bool is_mapped_gpu_= false;
std::vector<magma_exec_resource> exec_resources_;
};
MagmaBo::MagmaBo(magma_buffer_t buffer, uint64_t size, magma_buffer_id_t id, MagmaBufMgr *bufmgr)
: buffer_(buffer), id_(id) {
mos_linux_bo::size = size;
assert(mos_linux_bo::size == size);
mos_linux_bo::align = 0;
mos_linux_bo::virt = nullptr;
mos_linux_bo::bufmgr = bufmgr;
mos_linux_bo::handle = 0;
mos_linux_bo::offset = 0;
mos_linux_bo::offset64 = 0;
mos_linux_bo::aux_mapped = false;
AddRef();
}
MagmaBo::~MagmaBo() {
magma_connection_release_buffer(magma_bufmgr()->connection(), buffer());
if (is_mapped_gpu_) {
magma_bufmgr()->Free(offset64);
}
}
void MagmaBo::AddExecResource(MagmaBo* bo) {
if (exec_resources_.empty()) {
// Add this as the first resource.
exec_resources_.push_back({
.buffer_id = id(),
.offset = 0,
.length = size,
});
}
exec_resources_.push_back({ .buffer_id = bo->id(),
.offset = 0,
.length = bo->size}
);
}
bool MagmaBo::MapGpu() {
if (is_mapped_gpu_)
return true;
if (!magma_bufmgr()->Alloc(size, __builtin_ctz(PAGE_SIZE_64K), &offset64)) {
LOG_VERBOSE("Alloc failed: size %lu", size);
return false;
}
uint64_t page_count = size / page_size();
constexpr uint64_t kFlags = MAGMA_MAP_FLAG_READ | MAGMA_MAP_FLAG_EXECUTE |
MAGMA_MAP_FLAG_WRITE;
LOG_VERBOSE("Map buffer %lu addr 0x%lx", id(), offset64);
magma_status_t status =
magma_connection_map_buffer(magma_bufmgr()->connection(), offset64, buffer(),
/*page_offset=*/0, page_count * PAGE_SIZE, kFlags);
if (status != MAGMA_STATUS_OK) {
LOG_VERBOSE("magma_map_buffer failed: %d", status);
return false;
}
is_mapped_gpu_ = true;
return true;
}
class MagmaContext : public mos_linux_context {
public:
MagmaContext(struct mos_bufmgr* bufmgr, uint32_t context_id) {
this->ctx_id = context_id;
this->bufmgr = bufmgr;
this->pOsContext = nullptr;
this->vm = nullptr;
}
// Should only be called once.
void set_target_engines(std::vector<uint32_t>&& engines) {
assert(!engines.empty());
assert(target_engines_.empty());
target_engines_ = std::move(engines);
}
const std::vector<uint32_t>& target_engines() { return target_engines_; }
// Should only be called once.
void set_vm_id(uint32_t vm_id) {
assert(vm_id != 0);
assert(vm_id_ == 0);
vm_id_ = vm_id;
}
uint32_t vm_id() { return vm_id_; }
// Should only be called once.
void set_ensure_ordering_across_engines() {
assert(!ensure_ordering_across_engines_);
ensure_ordering_across_engines_ = true;
}
bool ensure_ordering_across_engines() { return ensure_ordering_across_engines_; }
static constexpr uint64_t kInitialFlags = 0;
// Sets the command buffer flags if not already set.
uint64_t GetCommandBufferFlags(uint64_t command_buffer_flags) {
uint64_t unset = kInitialFlags;
command_buffer_flags_.compare_exchange_strong(unset, command_buffer_flags);
return command_buffer_flags_.load();
}
private:
std::vector<uint32_t> target_engines_;
uint32_t vm_id_ = 0;
bool ensure_ordering_across_engines_ = false;
std::atomic_uint64_t command_buffer_flags_ {kInitialFlags};
};
static void bufmgr_destroy(struct mos_bufmgr* mos_bufmgr)
{
LOG_VERBOSE("bufmgr_destroy");
delete static_cast<MagmaBufMgr*>(mos_bufmgr);
}
static struct mos_linux_bo *bufmgr_bo_alloc_for_render(struct mos_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment,
int mem_type)
{
LOG_VERBOSE("bo_alloc_for_render not implemented");
return nullptr;
}
static struct mos_linux_bo *bufmgr_bo_alloc(struct mos_bufmgr *mos_bufmgr,
const char *name,
unsigned long size,
unsigned int /*alignment*/,
int /*mem_type*/)
{
MagmaBo* bo = static_cast<MagmaBufMgr*>(mos_bufmgr)->CreateBo(name, size);
if (bo)
LOG_VERBOSE("bo_alloc '%s' size %lu buffer %lu", name, size, bo->id());
return bo;
}
struct mos_linux_bo *mos_bo_gem_create_from_prime(struct mos_bufmgr *mos_bufmgr, int prime_fd, int size)
{
MagmaBo* bo = static_cast<MagmaBufMgr*>(mos_bufmgr)->ImportBo(prime_fd, size);
if (bo)
LOG_VERBOSE("bo_create_from_prime size %d buffer %lu", size, bo->id());
return bo;
}
static uint64_t get_pitch_with_tiling(uint64_t pitch, uint32_t tiling)
{
switch (tiling) {
case I915_TILING_NONE:
return ROUND_UP_TO(pitch, 64);
case I915_TILING_X:
return ROUND_UP_TO(pitch, 512);
case I915_TILING_Y:
return ROUND_UP_TO(pitch, 128);
}
assert(false);
return 0;
}
static uint64_t get_height_with_tiling(uint64_t height, uint32_t tiling)
{
switch (tiling) {
case I915_TILING_NONE:
return ROUND_UP_TO(height, 2);
case I915_TILING_X:
return ROUND_UP_TO(height, 8);
case I915_TILING_Y:
return ROUND_UP_TO(height, 32);
}
assert(false);
return 0;
}
static struct mos_linux_bo *bufmgr_bo_alloc_tiled(struct mos_bufmgr *bufmgr, const char *name,
int x, int y, int cpp, uint32_t *tiling_mode,
unsigned long *pitch, unsigned long flags,
int mem_type)
{
uint32_t tiling = *tiling_mode;
uint64_t stride = get_pitch_with_tiling(x * cpp, tiling);
uint64_t height = get_height_with_tiling(y, tiling);
uint64_t size = ROUND_UP_TO(stride * height, 4096);
*pitch = stride;
MagmaBo* bo = static_cast<MagmaBufMgr*>(bufmgr)->CreateBo(name, size);
if (bo)
LOG_VERBOSE("bo_alloc_tiled '%s' x %d y %d cpp %d tiling %u size %lu buffer %lu",
name, x, y, cpp, tiling, size, bo->id());
return bo;
}
static void bufmgr_bo_reference(struct mos_linux_bo *mos_linux_bo)
{
auto bo = static_cast<MagmaBo*>(mos_linux_bo);
LOG_VERBOSE("bo_reference %lu\n", bo->id());
bo->AddRef();
}
static void bufmgr_bo_unreference(struct mos_linux_bo *mos_linux_bo)
{
auto bo = static_cast<MagmaBo*>(mos_linux_bo);
LOG_VERBOSE("bo_unreference %lu", bo->id());
bo->RemoveRef();
}
static int bufmgr_bo_map(struct mos_linux_bo *mos_linux_bo, int write_enable)
{
auto bo = static_cast<MagmaBo*>(mos_linux_bo);
assert(!bo->virt);
zx_handle_t vmo_handle;
{
magma_handle_t handle;
magma_status_t status = magma_buffer_get_handle(bo->buffer(), &handle);
if (status != MAGMA_STATUS_OK) {
LOG_VERBOSE("magma_get_buffer_handle failed: status %d", status);
return -1;
}
vmo_handle = handle;
}
// mos_bufmgr always maps RW on the CPU
zx_vaddr_t zx_vaddr;
zx_status_t zx_status = zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
0 /*vmar_offset*/, vmo_handle, 0 /*offset*/, bo->size, &zx_vaddr);
zx_handle_close(vmo_handle);
if (zx_status != ZX_OK) {
LOG_VERBOSE("zx_vmar_map failed: status %d", zx_status);
return -1;
}
bo->virt = reinterpret_cast<void*>(zx_vaddr);
LOG_VERBOSE("mapped buffer %lu address %p write_enable %d", bo->id(), bo->virt, write_enable);
return 0;
}
static int bufmgr_bo_unmap(struct mos_linux_bo *mos_linux_bo)
{
auto bo = static_cast<MagmaBo*>(mos_linux_bo);
zx_status_t status = zx_vmar_unmap(zx_vmar_root_self(), reinterpret_cast<uintptr_t>(bo->virt), bo->size);
if (status != ZX_OK) {
LOG_VERBOSE("zx_vmar_unmap failed: %d", status);
return -1;
}
bo->virt = nullptr;
LOG_VERBOSE("unmapped buffer %lu", bo->id());
return 0;
}
static int bufmgr_bo_subdata(struct mos_linux_bo *mos_linux_bo, unsigned long offset,
unsigned long size, const void *data)
{
auto bo = static_cast<MagmaBo*>(mos_linux_bo);
assert(offset + size >= offset);
assert(offset + size <= bo->size);
if (!bo->virt) {
LOG_VERBOSE("bo_subdata: not mapped");
return -1;
}
LOG_VERBOSE("bo_subdata %lu offset %lu size %lu", bo->id(), offset, size);
memcpy(reinterpret_cast<uint8_t*>(bo->virt) + offset, data, size);
return 0;
}
static int bufmgr_bo_set_softpin(struct mos_linux_bo *bo)
{
// Never called because buffers mapped at creation.
assert(false);
return 0;
}
static int bufmgr_bo_add_softpin_target(struct mos_linux_bo *bo_base, struct mos_linux_bo *target_bo_base, bool /*write_flag*/)
{
auto bo = static_cast<MagmaBo*>(bo_base);
auto target_bo = static_cast<MagmaBo*>(target_bo_base);
bo->AddExecResource(target_bo);
LOG_VERBOSE("bo_add_softpin_target bo %lu target_bo %lu", bo->id(), target_bo->id());
return 0;
}
// Stubs
extern int bufmgr_bo_get_subdata(struct mos_linux_bo *bo, unsigned long offset,
unsigned long size, void *data);
extern void bufmgr_bo_wait_rendering(struct mos_linux_bo *bo);
extern int bufmgr_bo_pad_to_size(struct mos_linux_bo *bo, uint64_t pad_to_size);
extern int bufmgr_bo_emit_reloc(struct mos_linux_bo *bo, uint32_t offset,
struct mos_linux_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain);
extern int bufmgr_bo_emit_reloc2(struct mos_linux_bo *bo, uint32_t offset,
struct mos_linux_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain,
uint64_t presumed_offset);
extern int bufmgr_bo_emit_reloc_fence(struct mos_linux_bo *bo, uint32_t offset,
struct mos_linux_bo *target_bo,
uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain);
extern int bufmgr_bo_pin(struct mos_linux_bo *bo, uint32_t alignment);
extern int bufmgr_bo_unpin(struct mos_linux_bo *bo);
extern int bufmgr_bo_get_tiling(struct mos_linux_bo *bo, uint32_t * tiling_mode,
uint32_t * swizzle_mode);
extern int bufmgr_bo_set_tiling(struct mos_linux_bo *bo, uint32_t * tiling_mode,
uint32_t stride);
extern int bufmgr_bo_flink(struct mos_linux_bo *bo, uint32_t * name);
extern int bufmgr_bo_exec2(struct mos_linux_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects,
int DR4);
extern int bufmgr_bo_mrb_exec2(struct mos_linux_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
unsigned int flags);
extern int bufmgr_bo_busy(struct mos_linux_bo *bo);
extern int bufmgr_bo_madvise(struct mos_linux_bo *bo, int madv);
extern int bufmgr_check_aperture_space(struct mos_linux_bo **bo_array, int count);
extern int bufmgr_bo_disable_reuse(struct mos_linux_bo *bo);
extern int bufmgr_bo_is_reusable(struct mos_linux_bo *bo);
extern int bufmgr_get_pipe_from_crtc_id(struct mos_bufmgr *bufmgr, int crtc_id);
extern int bufmgr_bo_references(struct mos_linux_bo *bo, struct mos_linux_bo *target_bo);
MagmaBufMgr::MagmaBufMgr(magma_connection_t connection, uint32_t device_id) :
connection_(connection), device_id_(device_id) {
notification_handle_ = magma_connection_get_notification_channel_handle(connection_);
mos_bufmgr::bo_alloc = bufmgr_bo_alloc;
mos_bufmgr::bo_alloc_for_render = bufmgr_bo_alloc_for_render;
mos_bufmgr::bo_alloc_tiled = bufmgr_bo_alloc_tiled;
mos_bufmgr::bo_reference = bufmgr_bo_reference;
mos_bufmgr::bo_unreference = bufmgr_bo_unreference;
mos_bufmgr::bo_map = bufmgr_bo_map;
mos_bufmgr::bo_unmap = bufmgr_bo_unmap;
mos_bufmgr::bo_subdata = bufmgr_bo_subdata;
mos_bufmgr::bo_get_subdata = bufmgr_bo_get_subdata;
mos_bufmgr::bo_wait_rendering = bufmgr_bo_wait_rendering;
mos_bufmgr::bo_pad_to_size = bufmgr_bo_pad_to_size;
mos_bufmgr::bo_emit_reloc = bufmgr_bo_emit_reloc;
mos_bufmgr::bo_emit_reloc2 = bufmgr_bo_emit_reloc2;
mos_bufmgr::bo_emit_reloc_fence = bufmgr_bo_emit_reloc_fence;
mos_bufmgr::bo_pin = bufmgr_bo_pin;
mos_bufmgr::bo_unpin = bufmgr_bo_unpin;
mos_bufmgr::bo_get_tiling = bufmgr_bo_get_tiling;
mos_bufmgr::bo_set_tiling = bufmgr_bo_set_tiling;
mos_bufmgr::bo_flink = bufmgr_bo_flink;
mos_bufmgr::bo_exec = bufmgr_bo_exec2;
mos_bufmgr::bo_mrb_exec = bufmgr_bo_mrb_exec2;
mos_bufmgr::bo_busy = bufmgr_bo_busy;
mos_bufmgr::bo_madvise = bufmgr_bo_madvise;
mos_bufmgr::destroy = bufmgr_destroy;
mos_bufmgr::check_aperture_space = bufmgr_check_aperture_space;
mos_bufmgr::bo_disable_reuse = bufmgr_bo_disable_reuse;
mos_bufmgr::bo_is_reusable = bufmgr_bo_is_reusable;
mos_bufmgr::get_pipe_from_crtc_id = bufmgr_get_pipe_from_crtc_id;
mos_bufmgr::bo_references = bufmgr_bo_references;
mos_bufmgr::bo_set_softpin = bufmgr_bo_set_softpin;
mos_bufmgr::bo_add_softpin_target = bufmgr_bo_add_softpin_target;
#if DEBUG
mos_bufmgr::debug = 1;
#else
mos_bufmgr::debug = 0;
#endif
constexpr uint64_t kSize = MEMZONE_TOTAL - MEMZONE_SYS_START;
allocator_ = SimpleAllocator::Create(MEMZONE_SYS_START, kSize);
}
MagmaBufMgr::~MagmaBufMgr() {
if (connection_) {
magma_connection_release(connection_);
}
}
MagmaBo* MagmaBufMgr::CreateBo(const char *name, unsigned long size)
{
magma_buffer_t buffer;
uint64_t size_actual;
magma_buffer_id_t buffer_id;
magma_status_t status =
magma_connection_create_buffer(connection(), size, &size_actual, &buffer, &buffer_id);
if (status != MAGMA_STATUS_OK) {
LOG_VERBOSE("magma_connection_create_buffer failed: %d", status);
return nullptr;
}
magma_buffer_set_name(buffer, name);
auto bo = new MagmaBo(buffer, size_actual, buffer_id, this);
if (!bo->MapGpu()) {
bo->RemoveRef();
return nullptr;
}
return bo;
}
MagmaBo* MagmaBufMgr::ImportBo(int handle, unsigned long size)
{
magma_buffer_t buffer;
size_t imported_size;
magma_buffer_id_t id;
magma_status_t status =
magma_connection_import_buffer(connection(), handle, &imported_size, &buffer, &id);
if (status != MAGMA_STATUS_OK) {
LOG_VERBOSE("magma_connection_import_buffer failed: %d", status);
return nullptr;
}
auto bo = new MagmaBo(buffer, imported_size, id, this);
if (!bo->MapGpu()) {
bo->RemoveRef();
return nullptr;
}
return bo;
}
///////////////////////////////////////////////////////////////////////////////
struct mos_bufmgr* mos_bufmgr_gem_init(int fd, int /*batch_size*/)
{
magma_device_t unowned_device = get_device_for_magma_fd(fd);
assert(unowned_device);
uint32_t device_id;
{
uint64_t val;
magma_status_t status =
magma_device_query(unowned_device, MAGMA_QUERY_DEVICE_ID, nullptr, &val);
if (status != MAGMA_STATUS_OK) {
LOG_VERBOSE("magma_query failed: %d", status);
return nullptr;
}
device_id = static_cast<uint32_t>(val);
}
magma_connection_t connection;
magma_status_t status = magma_device_create_connection(unowned_device, &connection);
if (status != MAGMA_STATUS_OK) {
LOG_VERBOSE("magma_create_connection2 failed: %d", status);
return nullptr;
}
LOG_VERBOSE("mos_bufmgr_gem_init connected to magma");
return new MagmaBufMgr(connection, device_id);
}
struct mos_linux_context* mos_gem_context_create(struct mos_bufmgr *mos_bufmgr)
{
auto bufmgr = static_cast<MagmaBufMgr*>(mos_bufmgr);
uint32_t context_id;
magma_status_t status = magma_connection_create_context(bufmgr->connection(), &context_id);
if (status != MAGMA_STATUS_OK) {
LOG_VERBOSE("magma_create_context failed: %d", status);
return nullptr;
}
assert(context_id);
auto context = new MagmaContext(mos_bufmgr, context_id);
LOG_VERBOSE("mos_gem_context_create context_id %u", context_id);
return context;
}
struct mos_linux_context *mos_gem_context_create_ext(struct mos_bufmgr *bufmgr, __u32 flags)
{
assert(flags == 0);
return mos_gem_context_create(bufmgr);
}
// Creates a new context and associates with it the pseudo VM referenced by the given context.
// Note the reference context may not have been created with mos_gem_context_create_shared.
// The pseudo VMs are just IDs and are used to track which "VMs" are used to execute command
// buffers. We assume that only one VM is ever used for all executed contexts, but if that
// assumption proves false then we'll have to create additional connections for each VM.
struct mos_linux_context* mos_gem_context_create_shared(struct mos_bufmgr *bufmgr,
mos_linux_context* ref_context, __u32 flags)
{
LOG_VERBOSE("mos_gem_context_create_shared vm_id %u flags 0x%x", ref_context->vm->vm_id, flags);
bool ensure_ordering_across_engines = flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE;
flags &= ~I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE;
assert(flags == 0);
auto context = static_cast<MagmaContext*>(mos_gem_context_create(bufmgr));
if (!context)
return nullptr;
context->set_vm_id(ref_context->vm->vm_id);
if (ensure_ordering_across_engines) {
context->set_ensure_ordering_across_engines();
}
return context;
}
void mos_gem_context_destroy(struct mos_linux_context *mos_context)
{
auto context = static_cast<MagmaContext*>(mos_context);
LOG_VERBOSE("mos_gem_context_destroy context_id %u", context->ctx_id);
auto bufmgr = static_cast<MagmaBufMgr*>(context->bufmgr);
magma_connection_release_context(bufmgr->connection(), context->ctx_id);
delete context;
}
int mos_set_context_param_load_balance(struct mos_linux_context *mos_context,
struct i915_engine_class_instance *ci, unsigned int count)
{
LOG_VERBOSE("mos_set_context_param_load_balance: context_id %u count %u [%u]",
mos_context->ctx_id, count, count == 0 ? 0 : ci->engine_class);
// May be called with count 0 in the case mos_query_engines sets engine count to 0.
if (count == 0)
return 0;
std::vector<uint32_t> target_engines;
for (uint32_t i = 0; i < count; i++) {
if (ci->engine_class != I915_ENGINE_CLASS_RENDER &&
ci->engine_class != I915_ENGINE_CLASS_VIDEO) {
LOG_VERBOSE("Unhandled engine class: %u", ci->engine_class);
return -1;
}
if (ci->engine_instance != 0) {
LOG_VERBOSE("Unhandled engine instance: %u", ci->engine_instance);
return -1;
}
target_engines.push_back(ci->engine_class);
}
// Magma doesn't support load balancing so just check that we're only targetting a single
// video engine; however, enabling this on Linux uses I915_CONTEXT_PARAM_ENGINES which changes
// the meaning of the exec ring to an array index (see mos_gem_bo_context_exec2).
assert(target_engines.size() == 1);
auto context = static_cast<MagmaContext*>(mos_context);
context->set_target_engines(std::move(target_engines));
return 0;
}
struct drm_i915_gem_vm_control* mos_gem_vm_create(struct mos_bufmgr *mos_bufmgr)
{
// Magma only supports one VM per connection, so for now we fake this support, assuming that
// contexts used for command buffer submission will only use one shared VM.
auto bufmgr = static_cast<MagmaBufMgr*>(mos_bufmgr);
auto vm = new drm_i915_gem_vm_control;
vm->extensions = 0;
vm->flags = 0;
vm->vm_id = bufmgr->next_vm_id();
LOG_VERBOSE("mos_gem_vm_create vm_id %u", vm->vm_id);
return vm;
}
void mos_gem_vm_destroy(struct mos_bufmgr *bufmgr, struct drm_i915_gem_vm_control* vm)
{
LOG_VERBOSE("mos_gem_vm_destroy vm_id %u", vm->vm_id);
delete vm;
}
int mos_bufmgr_gem_get_devid(struct mos_bufmgr *mos_bufmgr)
{
auto bufmgr = static_cast<MagmaBufMgr*>(mos_bufmgr);
LOG_VERBOSE("mos_bufmgr_gem_get_devid returning 0x%x", bufmgr->device_id());
return bufmgr->device_id();
}
bool mos_gem_bo_is_softpin(struct mos_linux_bo *bo)
{
assert(static_cast<MagmaBo*>(bo)->is_mapped_gpu());
return true;
}
static uint64_t get_magma_flags(MagmaContext* context, uint32_t exec_ring)
{
uint64_t flags = 0;
if (context->target_engines().size()) {
if (exec_ring >= context->target_engines().size()) {
LOG_VERBOSE("Unexpected exec ring index: %u", exec_ring);
assert(false);
} else {
uint32_t engine = context->target_engines()[exec_ring];
switch (engine) {
case I915_ENGINE_CLASS_RENDER:
flags |= kMagmaIntelGenCommandBufferForRender;
break;
case I915_ENGINE_CLASS_VIDEO:
flags |= kMagmaIntelGenCommandBufferForVideo;
break;
default:
LOG_VERBOSE("Unrecognized engine: %u", engine);
assert(false);
}
}
} else switch (exec_ring) {
case I915_EXEC_RENDER:
flags |= kMagmaIntelGenCommandBufferForRender;
break;
case I915_EXEC_BSD:
flags |= kMagmaIntelGenCommandBufferForVideo;
break;
default:
LOG_VERBOSE("unexpected ring 0x%x", exec_ring);
assert(false);
}
return flags;
}
int mos_gem_bo_context_exec2(struct mos_linux_bo *mos_linux_bo, int used, struct mos_linux_context *mos_context,
drm_clip_rect_t *cliprects, int num_cliprects, int DR4, unsigned int flags, int *fence) {
constexpr uint32_t kAllowedFlags = I915_EXEC_BSD|I915_EXEC_BSD_RING1|I915_EXEC_RENDER;
assert((flags & ~kAllowedFlags) == 0); // only these flags
auto context = static_cast<MagmaContext*>(mos_context);
auto bo = static_cast<MagmaBo*>(mos_linux_bo);
auto bufmgr = static_cast<MagmaBufMgr*>(context->bufmgr);
uint64_t magma_flags = get_magma_flags(context, flags & I915_EXEC_RING_MASK);
uint32_t vm_id = bufmgr->GetVmId(context->vm_id());
LOG_VERBOSE("mos_gem_bo_context_exec2 bo %lu used %d context_id %u vm_id %u bufmgr vm_id %u "
"num_cliprects %d DR4 %d flags 0x%x kAllowedFlags 0x%x magma_flags 0x%lx",
bo->id(), used, context->ctx_id, context->vm_id(), vm_id, num_cliprects, DR4,
flags, kAllowedFlags, magma_flags);
if (vm_id != context->vm_id()) {
// We don't support more than one VM per connection (see mos_gem_context_create_shared)
LOG_VERBOSE("Incompatible VM bufmgr vm_id %u context vm_id %u", vm_id, context->vm_id());
assert(false);
return -1;
}
if (context->ensure_ordering_across_engines()) {
uint64_t flags = context->GetCommandBufferFlags(magma_flags);
if (flags != magma_flags) {
LOG_VERBOSE("Ordering across engines not implemented flags 0x%lx magma_flags 0x%lx",
flags, magma_flags);
assert(false);
return -1;
}
}
// TODO(fxbug.78281)
uint64_t* semaphore_ids = nullptr;
auto& resources = bo->exec_resources();
magma_exec_command_buffer command_buffer = {
.resource_index = 0,
.start_offset = 0
};
magma_command_descriptor descriptor = {
.resource_count = resources.size(),
.command_buffer_count = 1,
.wait_semaphore_count = 0,
.signal_semaphore_count = 0,
.resources = resources.data(),
.command_buffers = &command_buffer,
.semaphore_ids = semaphore_ids,
.flags = magma_flags,
};
// Add to inflight list first to avoid race with any other thread reading completions from the
// notification channel, in case this thread is preempted just after sending the command buffer
// and the completion happens quickly.
bufmgr->inflight_list()->AddAndUpdate(
bufmgr->connection(), resources.data(),
resources.size());
magma_status_t status = magma_connection_execute_command(bo->magma_bufmgr()->connection(),
context->ctx_id, &descriptor);
if (status != MAGMA_STATUS_OK) {
LOG_VERBOSE("magma_execute_command failed: %d", status);
return -1;
}
return 0;
}
void mos_gem_bo_clear_relocs(struct mos_linux_bo *bo, int start)
{
static_cast<MagmaBo*>(bo)->clear_exec_resources();
}
int mos_bo_gem_export_to_prime(struct mos_linux_bo *mos_linux_bo, int *prime_fd)
{
auto bo = static_cast<MagmaBo*>(mos_linux_bo);
magma_handle_t handle;
magma_status_t status = magma_buffer_export(bo->buffer(), &handle);
if (status != MAGMA_STATUS_OK) {
LOG_VERBOSE("magma_export failed: %d", status);
return -1;
}
*prime_fd = handle;
return 0;
}
int mos_gem_bo_wait(struct mos_linux_bo *mos_linux_bo, int64_t timeout_ns)
{
auto bo = static_cast<MagmaBo*>(mos_linux_bo);
auto bufmgr = bo->magma_bufmgr();
magma_status_t status = bufmgr->inflight_list()->WaitForBuffer(
bufmgr->connection(),
bufmgr->notification_handle(),
bo->id(),
timeout_ns);
if (status != MAGMA_STATUS_OK) {
LOG_VERBOSE("WaitForbuffer failed: %d", status);
return -1;
}
return 0;
}
int bufmgr_bo_busy(struct mos_linux_bo *mos_linux_bo)
{
auto bo = static_cast<MagmaBo*>(mos_linux_bo);
auto bufmgr = bo->magma_bufmgr();
magma_status_t status = bufmgr->inflight_list()->WaitForBuffer(
bufmgr->connection(),
bufmgr->notification_handle(),
bo->id(),
0 /* timeout_ns */);
if (status == MAGMA_STATUS_TIMED_OUT) {
return 1;
} else if (status == MAGMA_STATUS_OK) {
return 0;
} else {
LOG_VERBOSE("bufmgr_bo_busy failed: %d", status);
return 0;
}
}
void bufmgr_bo_wait_rendering(struct mos_linux_bo *bo)
{
mos_gem_bo_wait(bo, INT64_MAX);
}
int mos_gem_bo_map_wc(struct mos_linux_bo *bo) {
// Currently only cache-coherent systems are supported, so WC mapping should be unnecessary.
// TODO(fxbug.dev/97510): Add support for WC mappings.
return bufmgr_bo_map(bo, 1);
}
int mos_gem_bo_unmap_wc(struct mos_linux_bo *bo)
{
return bufmgr_bo_unmap(bo);
}
int mos_query_engines_count(struct mos_bufmgr *bufmgr, unsigned int *nengine)
{
// TODO(fxbug.dev/110484) - query the MSD for set of available engines
*nengine = 2;
return 0;
}
int mos_query_engines(struct mos_bufmgr *bufmgr,
__u16 engine_class,
__u64 caps,
unsigned int *nengine,
struct i915_engine_class_instance *ci)
{
unsigned int max = *nengine;
LOG_VERBOSE("mos_query_engines: class %u max %u", engine_class, max);
*nengine = 0;
// TODO(fxbug.dev/110484) - query the MSD for set of available engines
switch (engine_class) {
case I915_ENGINE_CLASS_RENDER:
case I915_ENGINE_CLASS_VIDEO:
if (*nengine >= max) {
LOG_VERBOSE("Can't report engines max %u", max);
return -1;
}
ci[*nengine].engine_class = engine_class;
ci[*nengine].engine_instance = 0;
*nengine += 1;
break;
default:
LOG_VERBOSE("mos_query_engines: class %u not supported", engine_class);
break;
}
return 0;
}