| /* |
| * Copyright © 2018 Google, LLC |
| * |
| * Permission is hereby granted, free of charge, to any person obtaining a |
| * copy of this software and associated documentation files (the "Software"), |
| * to deal in the Software without restriction, including without limitation |
| * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| * and/or sell copies of the Software, and to permit persons to whom the |
| * Software is furnished to do so, subject to the following conditions: |
| * |
| * The above copyright notice and this permission notice (including the next |
| * paragraph) shall be included in all copies or substantial portions of the |
| * Software. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| * IN THE SOFTWARE. |
| */ |
| |
| #include "intel_gem.h" |
| #include "drm-uapi/i915_drm.h" |
| #include "magma/u_magma.h" |
| #include "magma/u_magma_mmap.h" |
| #include "util/log.h" |
| #include <assert.h> |
| #include <inttypes.h> |
| #include <string.h> |
| #include <magma_intel_gen_defs.h> |
| |
| static bool intel_getparam(int fd, uint32_t param, int* value_out) |
| { |
| magma_status_t status = MAGMA_STATUS_OK; |
| uint64_t value; |
| |
| magma_device_t handle = u_magma_device_from_fd(fd); |
| |
| switch (param) { |
| case I915_PARAM_CHIPSET_ID: |
| status = magma_device_query(handle, MAGMA_QUERY_DEVICE_ID, NULL, &value); |
| break; |
| case I915_PARAM_SUBSLICE_TOTAL: |
| status = magma_device_query(handle, kMagmaIntelGenQuerySubsliceAndEuTotal, NULL, &value); |
| value >>= 32; |
| break; |
| case I915_PARAM_EU_TOTAL: |
| status = magma_device_query(handle, kMagmaIntelGenQuerySubsliceAndEuTotal, NULL, &value); |
| value = (uint32_t)value; |
| break; |
| case I915_PARAM_HAS_WAIT_TIMEOUT: |
| case I915_PARAM_HAS_EXECBUF2: |
| value = 1; |
| break; |
| case I915_PARAM_HAS_EXEC_FENCE_ARRAY: // Used for semaphores |
| value = 1; |
| break; |
| case I915_PARAM_HAS_EXEC_SOFTPIN: { |
| // client driver manages GPU address space |
| status = magma_device_query(handle, kMagmaIntelGenQueryExtraPageCount, NULL, &value); |
| break; |
| case I915_PARAM_REVISION: |
| value = 1; |
| break; |
| case I915_PARAM_HAS_CONTEXT_ISOLATION: |
| status = magma_device_query(handle, kMagmaIntelGenQueryHasContextIsolation, NULL, &value); |
| value = (uint32_t) value; |
| break; |
| case I915_PARAM_CS_TIMESTAMP_FREQUENCY: |
| status = magma_device_query(handle, kMagmaIntelGenQueryTimestampFrequency, NULL, &value); |
| break; |
| } |
| default: |
| status = MAGMA_STATUS_INVALID_ARGS; |
| } |
| if (status != MAGMA_STATUS_OK) |
| return false; |
| |
| *value_out = value; |
| assert(*value_out == value); |
| return true; |
| } |
| |
| static bool intel_context_getparam(int fd, uint32_t param, uint64_t* value) |
| { |
| magma_status_t status; |
| switch (param) { |
| case I915_CONTEXT_PARAM_GTT_SIZE: |
| status = magma_device_query(u_magma_device_from_fd(fd), kMagmaIntelGenQueryGttSize, NULL, value); |
| if (status != MAGMA_STATUS_OK) { |
| mesa_logd("magma_device_query failed: %d", status); |
| return false; |
| } |
| return true; |
| |
| default: |
| mesa_logd("intel_context_getparam: unhandled param 0x%x", param); |
| return false; |
| } |
| } |
| |
| static uint32_t bitmask_size(uint32_t max) { |
| if (max <= 8) |
| return 1; |
| if (max <= 8 * 2) |
| return 2; |
| assert(max <= 8 * 4); |
| return 4; |
| } |
| |
| static uint8_t* get_ss_mask_ptr(uint32_t data_length, struct drm_i915_query_topology_info* info, uint32_t slice) { |
| uint32_t offset = info->subslice_offset + slice * info->subslice_stride; |
| assert(offset + info->subslice_stride <= data_length); |
| return &info->data[offset]; |
| } |
| |
| static uint8_t* get_eu_mask_ptr(uint32_t data_length, struct drm_i915_query_topology_info* info, uint32_t slice, uint32_t subslice) { |
| uint32_t offset = info->eu_offset + (slice * info->max_subslices + subslice) * info->eu_stride; |
| assert(offset + info->eu_stride <= data_length); |
| return &info->data[offset]; |
| } |
| |
| static int query_topology(magma_device_t device, struct drm_i915_query_item* item) { |
| magma_handle_t handle; |
| magma_status_t status = magma_device_query(device, kMagmaIntelGenQueryTopology, &handle, NULL); |
| if (status != MAGMA_STATUS_OK) { |
| mesa_logd("magma_device_query failed: %d", status); |
| return -1; |
| } |
| |
| struct magma_intel_gen_topology* magma_top = u_magma_mmap_handle( |
| handle, /*offset=*/0, sizeof(struct magma_intel_gen_topology)); |
| if (magma_top == MAP_FAILED) { |
| mesa_logd("failed to map topology"); |
| u_magma_close_handle(handle); |
| return -1; |
| } |
| |
| uint64_t map_size = magma_top->data_byte_count + sizeof(struct magma_intel_gen_topology); |
| |
| u_magma_munmap(magma_top, sizeof(struct magma_intel_gen_topology)); |
| |
| magma_top = u_magma_mmap_handle(handle, /*offset=*/0, map_size); |
| |
| u_magma_close_handle(handle); |
| |
| if (magma_top == MAP_FAILED) { |
| mesa_logd("failed to map topology size %" PRIu64, map_size); |
| u_magma_munmap(magma_top, map_size); |
| return -1; |
| } |
| |
| uint8_t* mask_data = (uint8_t*)magma_top + sizeof(struct magma_intel_gen_topology); |
| |
| uint32_t slice_mask_stride = bitmask_size(magma_top->max_slice_count); |
| uint32_t subslice_mask_stride = bitmask_size(magma_top->max_subslice_count); |
| uint32_t eu_mask_stride = bitmask_size(magma_top->max_eu_count); |
| |
| uint32_t data_length = slice_mask_stride + |
| subslice_mask_stride * magma_top->max_slice_count + |
| eu_mask_stride * (magma_top->max_slice_count * magma_top->max_subslice_count); |
| |
| if (item->length == 0) { |
| // Return the length of data the client should allocate. |
| item->length = sizeof(struct magma_intel_gen_topology) + data_length; |
| u_magma_munmap(magma_top, map_size); |
| return 0; |
| } |
| assert(item->length == sizeof(struct magma_intel_gen_topology) + data_length); |
| |
| struct drm_i915_query_topology_info *info = (struct drm_i915_query_topology_info *)item->data_ptr; |
| |
| memset(info, 0, sizeof(struct drm_i915_query_topology_info)); |
| info->max_slices = magma_top->max_slice_count; |
| info->max_subslices = magma_top->max_subslice_count; |
| info->max_eus_per_subslice = magma_top->max_eu_count; |
| |
| // Init all masks to zero. |
| memset(info->data, 0, data_length); |
| |
| // Copy the slice mask. |
| uint8_t* src_ptr = mask_data; |
| assert(src_ptr); |
| |
| uint8_t* dst_slice_ptr = info->data; |
| memcpy(dst_slice_ptr, src_ptr, slice_mask_stride); |
| src_ptr += slice_mask_stride; |
| |
| // Subslices (1 per slice) start after the single slice mask (offset 0) |
| info->subslice_offset = slice_mask_stride; |
| // EU masks (1 per subslice) come after all the subslice masks |
| info->eu_offset = info->subslice_offset + subslice_mask_stride * info->max_slices; |
| |
| info->subslice_stride = subslice_mask_stride; |
| info->eu_stride = eu_mask_stride; |
| |
| // Process the enabled slices. |
| for (uint32_t s = 0; s < info->max_slices; s++) { |
| bool slice_enable = (dst_slice_ptr[s / 8] >> (s % 8)) & 1; |
| if (slice_enable) { |
| // Copy subslice mask for this slice. |
| uint8_t* dst_subslice_ptr = get_ss_mask_ptr(data_length, info, s); |
| memcpy(dst_subslice_ptr, src_ptr, subslice_mask_stride); |
| src_ptr += subslice_mask_stride; |
| |
| for (uint32_t ss = 0; ss < info->max_subslices; ss++) { |
| bool subslice_enable = (dst_subslice_ptr[ss / 8] >> (ss % 8)) & 1; |
| if (subslice_enable) { |
| // Copy eu masks |
| uint8_t* dst_eu_ptr = get_eu_mask_ptr(data_length, info, s, ss); |
| memcpy(dst_eu_ptr, src_ptr, eu_mask_stride); |
| src_ptr += eu_mask_stride; |
| } |
| } |
| } |
| } |
| |
| u_magma_munmap(magma_top, map_size); |
| return 0; |
| } |
| |
| int intel_ioctl(int fd, unsigned long request, void *arg) |
| { |
| magma_device_t device = u_magma_device_from_fd(fd); |
| |
| switch (request) { |
| case DRM_IOCTL_I915_GETPARAM: { |
| struct drm_i915_getparam* getparam = (struct drm_i915_getparam*)arg; |
| |
| return intel_getparam(fd, getparam->param, getparam->value) ? 0 : -1; |
| } |
| case DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM: { |
| struct drm_i915_gem_context_param* context_param = (struct drm_i915_gem_context_param*)arg; |
| |
| uint64_t value; |
| bool success = intel_context_getparam(fd, context_param->param, &value); |
| context_param->value = value; |
| return success ? 0 : -1; |
| } |
| case DRM_IOCTL_I915_QUERY: { |
| struct drm_i915_query* query = (struct drm_i915_query*)arg; |
| struct drm_i915_query_item* item = (struct drm_i915_query_item*) query->items_ptr; |
| |
| if (query->num_items == 1 && item->query_id == DRM_I915_QUERY_TOPOLOGY_INFO) { |
| return query_topology(device, item); |
| } |
| break; |
| } |
| default: |
| break; |
| } |
| errno = ENOTSUP; |
| return -1; |
| } |
| |
| bool intel_gem_supports_syncobj_wait(int fd) { return true; } |
| |
| int |
| intel_gem_create_context_engines(int fd, |
| const struct intel_query_engine_info *info, |
| int num_engines, enum intel_engine_class *engine_classes) |
| { |
| mesa_logd("intel_gem_create_context_engines - STUB"); |
| assert(false); |
| return 0; |
| } |
| |
| /* TODO(fxbug.dev/42083241): replace this stub with a functioning implementation. */ |
| bool intel_gem_read_render_timestamp(int fd, uint64_t *value) |
| { |
| *value = 0; |
| return true; |
| } |
| |
| bool intel_gem_supports_protected_context(int fd) |
| { |
| return false; |
| } |