diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst index bcaefc952764e..4556439271af9 100644 --- a/Documentation/gpu/i915.rst +++ b/Documentation/gpu/i915.rst @@ -541,6 +541,22 @@ GuC ABI .. kernel-doc:: drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h .. kernel-doc:: drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h +GuC Virtualization ABI +~~~~~~~~~~~~~~~~~~~~~~ + +.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/abi/guc_actions_pf_abi.h +.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/abi/guc_actions_vf_abi.h + +GuC VF/PF Virtualization ABI +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. kernel-doc:: drivers/gpu/drm/i915/gt/iov/abi/iov_communication_abi.h +.. kernel-doc:: drivers/gpu/drm/i915/gt/iov/abi/iov_communication_mmio_abi.h +.. kernel-doc:: drivers/gpu/drm/i915/gt/iov/abi/iov_messages_abi.h +.. kernel-doc:: drivers/gpu/drm/i915/gt/iov/abi/iov_actions_abi.h +.. kernel-doc:: drivers/gpu/drm/i915/gt/iov/abi/iov_actions_mmio_abi.h +.. kernel-doc:: drivers/gpu/drm/i915/gt/iov/abi/iov_errors_abi.h + HuC --- .. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_huc.c diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 2df1cc7432188..4efd64e3f6c9c 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -167,6 +167,18 @@ config DRM_I915_DEBUG_GUC If in doubt, say "N". +config DRM_I915_DEBUG_IOV + bool "Enable additional driver debugging for IOV" + depends on DRM_I915 + default n + help + Choose this option to turn on extra driver debugging that may affect + performance but will help resolve IOV related issues. + + Recommended for driver developers only. + + If in doubt, say "N". + config DRM_I915_SELFTEST bool "Enable selftests upon driver load" depends on DRM_I915 diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 537a5ce8ef0e5..3dceb04d01395 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -198,6 +198,25 @@ i915-y += gt/uc/intel_uc.o \ gt/uc/intel_huc_debugfs.o \ gt/uc/intel_huc_fw.o +# Virtualization support +iov-y += \ + i915_sriov.o \ + i915_sriov_sysfs.o \ + gt/iov/intel_iov.o \ + gt/iov/intel_iov_debugfs.o \ + gt/iov/intel_iov_event.o \ + gt/iov/intel_iov_provisioning.o \ + gt/iov/intel_iov_query.o \ + gt/iov/intel_iov_relay.o \ + gt/iov/intel_iov_service.o \ + gt/iov/intel_iov_state.o \ + gt/iov/intel_iov_sysfs.o +i915-y += $(iov-y) + +iov-$(CONFIG_DRM_I915_SELFTEST) += \ + gt/iov/selftests/iov_selftest_actions.o +i915-y += $(iov-$(CONFIG_DRM_I915_SELFTEST)) + # modesetting core code i915-y += \ display/intel_atomic.o \ diff --git a/drivers/gpu/drm/i915/README.sriov b/drivers/gpu/drm/i915/README.sriov new file mode 100644 index 0000000000000..fb5f5d051c05f --- /dev/null +++ b/drivers/gpu/drm/i915/README.sriov @@ -0,0 +1,6 @@ +BEGIN SR-IOV ENABLING +D: Michal Wajdeczko +M: Michal Wajdeczko +T: drm/i915/iov +T: drm/i915/pf +END TGL diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 05babdcf5f2e9..fc75d408967b4 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -5264,6 +5264,23 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) return err; } +/** + * intel_power_domains_prune - prunes the power domain structures + * @dev_priv: i915 device instance + * + * We might have detected that power domain initialization done earlier + * requires some additional tweaks. + */ +void intel_power_domains_prune(struct drm_i915_private *dev_priv) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + + if (IS_SRIOV_VF(dev_priv)) { + set_power_wells(power_domains, i9xx_always_on_power_well); + return; + } +} + /** * intel_power_domains_cleanup - clean up power domains resources * @dev_priv: i915 device instance diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index 686d18eaa24c8..1748df65d49e2 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -211,6 +211,7 @@ struct intel_display_power_domain_set { for_each_if((__power_well)->desc->domains & (__domain_mask)) int intel_power_domains_init(struct drm_i915_private *dev_priv); +void intel_power_domains_prune(struct drm_i915_private *dev_priv); void intel_power_domains_cleanup(struct drm_i915_private *dev_priv); void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume); void intel_power_domains_driver_remove(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c index ce760402a89a9..fca0230661ee7 100644 --- a/drivers/gpu/drm/i915/display/intel_dpt.c +++ b/drivers/gpu/drm/i915/display/intel_dpt.c @@ -29,11 +29,6 @@ i915_vm_to_dpt(struct i915_address_space *vm) #define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT) -static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) -{ - writeq(pte, addr); -} - static void dpt_insert_page(struct i915_address_space *vm, dma_addr_t addr, u64 offset, diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index 4a2662838cd8d..553edde450b8d 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -868,6 +868,9 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100); BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400); + if (IS_SRIOV_VF(dev_priv)) + return 0; + pci_read_config_dword(pdev, ASLS, &asls); drm_dbg(&dev_priv->drm, "graphic opregion physical addr: 0x%x\n", asls); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 9eb0d5c4dfe39..285c29d92bf17 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -3335,12 +3335,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, eb.batch_flags = 0; if (args->flags & I915_EXEC_SECURE) { - if (GRAPHICS_VER(i915) >= 11) - return -ENODEV; + if (!i915->params.enable_secure_batch) { + if (GRAPHICS_VER(i915) >= 11) + return -ENODEV; - /* Return -EPERM to trigger fallback code on old binaries. */ - if (!HAS_SECURE_BATCHES(i915)) - return -EPERM; + /* + * Return -EPERM to trigger fallback code on old + * binaries. + */ + if (!HAS_SECURE_BATCHES(i915)) + return -EPERM; + } if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN)) return -EPERM; diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 8576fc5b0087a..f1953cefe4db0 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -296,6 +296,9 @@ static void __sprint_engine_name(struct intel_engine_cs *engine) void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask) { + if (IS_SRIOV_VF(engine->i915)) + return; + /* * Though they added more rings on g4x/ilk, they did not add * per-engine HWSTAM until gen6. @@ -1635,6 +1638,10 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, struct intel_engine_execlists * const execlists = &engine->execlists; u64 addr; + /* VF can't access these registers */ + if (IS_SRIOV_VF(dev_priv)) + return; + if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(dev_priv, 4, 7)) drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID)); if (HAS_EXECLISTS(dev_priv)) { diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index 2b6a8379d3085..a7a77f1679955 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -18,6 +18,7 @@ #include "i915_drv.h" #include "i915_scatterlist.h" #include "i915_vgpu.h" +#include "iov/intel_iov.h" #include "intel_gtt.h" #include "gen8_ppgtt.h" @@ -205,6 +206,19 @@ static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt) intel_gtt_chipset_flush(); } +static void gen12vf_ggtt_invalidate(struct i915_ggtt *ggtt) +{ + struct intel_gt *gt = ggtt->vm.gt; + struct intel_guc *guc = >->uc.guc; + intel_wakeref_t wakeref; + + if (!guc->ct.enabled) + return; + + with_intel_runtime_pm(gt->uncore->rpm, wakeref) + intel_guc_invalidate_tlb_guc(guc, INTEL_GUC_TLB_INVAL_MODE_HEAVY); +} + u64 gen8_ggtt_pte_encode(dma_addr_t addr, enum i915_cache_level level, u32 flags) @@ -217,11 +231,23 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr, return pte; } -static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) +void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) { writeq(pte, addr); } +gen8_pte_t gen8_get_pte(void __iomem *addr) +{ + return readq(addr); +} + +u64 ggtt_addr_to_pte_offset(u64 ggtt_addr) +{ + GEM_BUG_ON(!IS_ALIGNED(ggtt_addr, I915_GTT_PAGE_SIZE_4K)); + + return (ggtt_addr / I915_GTT_PAGE_SIZE_4K) * sizeof(gen8_pte_t); +} + static void gen8_ggtt_insert_page(struct i915_address_space *vm, dma_addr_t addr, u64 offset, @@ -548,6 +574,10 @@ static int init_ggtt(struct i915_ggtt *ggtt) if (ret) return ret; + ret = intel_iov_init_ggtt(&ggtt->vm.gt->iov); + if (ret) + return ret; + mutex_init(&ggtt->error_mutex); if (ggtt->mappable_end) { /* @@ -744,6 +774,7 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) mutex_destroy(&ggtt->error_mutex); ggtt_release_guc_top(ggtt); + intel_iov_fini_ggtt(&ggtt->vm.gt->iov); intel_vgt_deballoon(ggtt); ggtt->vm.cleanup(&ggtt->vm); @@ -1160,6 +1191,39 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) return 0; } +static int gen12vf_ggtt_probe(struct i915_ggtt *ggtt) +{ + struct drm_i915_private *i915 = ggtt->vm.i915; + + GEM_BUG_ON(!IS_SRIOV_VF(i915)); + GEM_BUG_ON(GRAPHICS_VER(i915) < 12); + + /* there is no apperture on VFs */ + ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 0); + ggtt->mappable_end = 0; + + ggtt->vm.alloc_pt_dma = alloc_pt_dma; + + /* safe guess as native expects the same minimum */ + ggtt->vm.total = 1ULL << (ilog2(GUC_GGTT_TOP - 1) + 1); /* roundup_pow_of_two(GUC_GGTT_TOP); */ + + ggtt->vm.pte_encode = gen8_ggtt_pte_encode; + ggtt->vm.clear_range = nop_clear_range; + ggtt->vm.insert_page = gen8_ggtt_insert_page; + ggtt->vm.insert_entries = gen8_ggtt_insert_entries; + ggtt->vm.cleanup = gen6_gmch_remove; + + ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; + ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; + ggtt->vm.vma_ops.set_pages = ggtt_set_pages; + ggtt->vm.vma_ops.clear_pages = clear_pages; + + ggtt->invalidate = gen12vf_ggtt_invalidate; + + return ggtt_probe_common(ggtt, sizeof(gen8_pte_t) * + (ggtt->vm.total >> PAGE_SHIFT)); +} + static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; @@ -1174,6 +1238,8 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt) ret = i915_gmch_probe(ggtt); else if (GRAPHICS_VER(i915) < 8) ret = gen6_gmch_probe(ggtt); + else if (IS_SRIOV_VF(i915)) + ret = gen12vf_ggtt_probe(ggtt); else ret = gen8_gmch_probe(ggtt); if (ret) { @@ -1751,3 +1817,141 @@ void i915_ggtt_deballoon(struct i915_ggtt *ggtt, struct drm_mm_node *node) ggtt->vm.reserved -= node->size; drm_mm_remove_node(node); } + +static gen8_pte_t tgl_prepare_vf_pte_vfid(u16 vfid) +{ + GEM_BUG_ON(!FIELD_FIT(TGL_GGTT_PTE_VFID_MASK, vfid)); + + return FIELD_PREP(TGL_GGTT_PTE_VFID_MASK, vfid); +} + +static gen8_pte_t prepare_vf_pte(u16 vfid) +{ + return tgl_prepare_vf_pte_vfid(vfid) | GEN8_PAGE_PRESENT; +} + +void i915_ggtt_set_space_owner(struct i915_ggtt *ggtt, u16 vfid, + const struct drm_mm_node *node) +{ + gen8_pte_t __iomem *gtt_entries = ggtt->gsm; + const gen8_pte_t pte = prepare_vf_pte(vfid); + u64 base = node->start; + u64 size = node->size; + + GEM_BUG_ON(!IS_SRIOV_PF(ggtt->vm.i915)); + GEM_BUG_ON(base % PAGE_SIZE); + GEM_BUG_ON(size % PAGE_SIZE); + + drm_dbg(&ggtt->vm.i915->drm, "GGTT VF%u [%#llx-%#llx] %lluK\n", + vfid, base, base + size, size / SZ_1K); + + gtt_entries += base >> PAGE_SHIFT; + while (size) { + gen8_set_pte(gtt_entries++, pte); + size -= PAGE_SIZE; + } + + ggtt->invalidate(ggtt); +} + +static inline unsigned int __ggtt_size_to_ptes_size(u64 ggtt_size) +{ + GEM_BUG_ON(!IS_ALIGNED(ggtt_size, I915_GTT_MIN_ALIGNMENT)); + + return (ggtt_size >> PAGE_SHIFT) * sizeof(gen8_pte_t); +} + +static void ggtt_pte_clear_vfid(void *buf, u64 size) +{ + while (size) { + *(gen8_pte_t *)buf &= ~TGL_GGTT_PTE_VFID_MASK; + + buf += sizeof(gen8_pte_t); + size -= sizeof(gen8_pte_t); + } +} + +/** + * i915_ggtt_save_ptes - copy GGTT PTEs to preallocated buffer + * @ggtt: the &struct i915_ggtt + * @node: the &struct drm_mm_node - the @node->start is used as the start offset for save + * @buf: preallocated buffer in which PTEs will be saved + * @size: size of prealocated buffer (in bytes) + * - must be sizeof(gen8_pte_t) aligned + * @flags: function flags: + * - #I915_GGTT_SAVE_PTES_NO_VFID BIT - save PTEs without VFID + * + * Returns: size of the buffer used (or needed if both @buf and @size are (0)) to store all PTEs + * for a given node, -EINVAL if one of @buf or @size is 0. + */ +int i915_ggtt_save_ptes(struct i915_ggtt *ggtt, const struct drm_mm_node *node, void *buf, + unsigned int size, unsigned int flags) +{ + gen8_pte_t __iomem *gtt_entries = ggtt->gsm; + + if (!buf && !size) + return __ggtt_size_to_ptes_size(node->size); + + if (!buf || !size) + return -EINVAL; + + GEM_BUG_ON(!IS_ALIGNED(size, sizeof(gen8_pte_t))); + GEM_WARN_ON(size > __ggtt_size_to_ptes_size(SZ_4G)); + + if (size < __ggtt_size_to_ptes_size(node->size)) + return -ENOSPC; + size = __ggtt_size_to_ptes_size(node->size); + + gtt_entries += node->start >> PAGE_SHIFT; + + memcpy_fromio(buf, gtt_entries, size); + + if (flags & I915_GGTT_SAVE_PTES_NO_VFID) + ggtt_pte_clear_vfid(buf, size); + + return size; +} + +/** + * i915_ggtt_restore_ptes() - restore GGTT PTEs from buffer + * @ggtt: the &struct i915_ggtt + * @node: the &struct drm_mm_node - the @node->start is used as the start offset for restore + * @buf: buffer from which PTEs will be restored + * @size: size of prealocated buffer (in bytes) + * - must be sizeof(gen8_pte_t) aligned + * @flags: function flags: + * - #I915_GGTT_RESTORE_PTES_VFID_MASK - VFID for restored PTEs + * - #I915_GGTT_RESTORE_PTES_NEW_VFID - restore PTEs with new VFID + * (from #I915_GGTT_RESTORE_PTES_VFID_MASK) + * + * Returns: 0 on success, -ENOSPC if @node->size is less than size. + */ +int i915_ggtt_restore_ptes(struct i915_ggtt *ggtt, const struct drm_mm_node *node, const void *buf, + unsigned int size, unsigned int flags) +{ + gen8_pte_t __iomem *gtt_entries = ggtt->gsm; + u32 vfid = FIELD_GET(I915_GGTT_RESTORE_PTES_VFID_MASK, flags); + gen8_pte_t pte; + + GEM_BUG_ON(!size); + GEM_BUG_ON(!IS_ALIGNED(size, sizeof(gen8_pte_t))); + + if (size > __ggtt_size_to_ptes_size(node->size)) + return -ENOSPC; + + gtt_entries += node->start >> PAGE_SHIFT; + + while (size) { + pte = *(gen8_pte_t *)buf; + if (flags & I915_GGTT_RESTORE_PTES_NEW_VFID) + pte |= tgl_prepare_vf_pte_vfid(vfid); + gen8_set_pte(gtt_entries++, pte); + + buf += sizeof(gen8_pte_t); + size -= sizeof(gen8_pte_t); + } + + ggtt->invalidate(ggtt); + + return 0; +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 6c517b1735297..e929cb270e75a 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -22,6 +22,8 @@ #include "intel_renderstate.h" #include "intel_rps.h" #include "intel_uncore.h" +#include "intel_pm.h" +#include "iov/intel_iov.h" #include "shmem_utils.h" #include "pxp/intel_pxp.h" @@ -127,10 +129,15 @@ static u16 slicemask(struct intel_gt *gt, int count) int intel_gt_init_mmio(struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; + int ret; - intel_gt_init_clock_frequency(gt); + ret = intel_iov_init_mmio(>->iov); + if (ret) + return ret; + intel_gt_init_clock_frequency(gt); intel_uc_init_mmio(>->uc); + intel_sseu_info_init(gt); /* @@ -239,6 +246,13 @@ int intel_gt_init_hw(struct intel_gt *gt) goto out; } + ret = intel_iov_init_hw(>->iov); + if (unlikely(ret)) { + i915_probe_error(i915, "Enabling IOV failed (%pe)\n", + ERR_PTR(ret)); + goto out; + } + intel_mocs_init(gt); out: @@ -380,6 +394,9 @@ void intel_gt_check_and_clear_faults(struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; + if (IS_SRIOV_VF(i915)) + return; + /* From GEN8 onwards we only have one 'All Engine Fault Register' */ if (GRAPHICS_VER(i915) >= 8) gen8_check_faults(gt); @@ -680,10 +697,14 @@ int intel_gt_init(struct intel_gt *gt) */ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); + err = intel_iov_init(>->iov); + if (unlikely(err)) + goto out_fw; + err = intel_gt_init_scratch(gt, GRAPHICS_VER(gt->i915) == 2 ? SZ_256K : SZ_4K); if (err) - goto out_fw; + goto err_iov; intel_gt_pm_init(gt); @@ -707,6 +728,10 @@ int intel_gt_init(struct intel_gt *gt) if (err) goto err_uc_init; + err = intel_iov_init_late(>->iov); + if (err) + goto err_gt; + err = __engines_record_defaults(gt); if (err) goto err_gt; @@ -737,6 +762,8 @@ int intel_gt_init(struct intel_gt *gt) err_pm: intel_gt_pm_fini(gt); intel_gt_fini_scratch(gt); +err_iov: + intel_iov_fini(>->iov); out_fw: if (err) intel_gt_set_wedged_on_init(gt); @@ -746,6 +773,10 @@ int intel_gt_init(struct intel_gt *gt) void intel_gt_driver_remove(struct intel_gt *gt) { + intel_gt_fini_clock_frequency(gt); + + intel_iov_fini_hw(>->iov); + __intel_gt_disable(gt); intel_migrate_fini(>->migrate); @@ -788,6 +819,7 @@ void intel_gt_driver_release(struct intel_gt *gt) intel_gt_pm_fini(gt); intel_gt_fini_scratch(gt); intel_gt_fini_buffer_pool(gt); + intel_iov_fini(>->iov); } void intel_gt_driver_late_release(struct intel_gt *gt) @@ -795,6 +827,7 @@ void intel_gt_driver_late_release(struct intel_gt *gt) /* We need to wait for inflight RCU frees to release their grip */ rcu_barrier(); + intel_iov_release(>->iov); intel_uc_driver_late_release(>->uc); intel_gt_fini_requests(gt); intel_gt_fini_reset(gt); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c index 3513d6f907476..5100f33f21fcf 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c @@ -167,9 +167,18 @@ void intel_gt_init_clock_frequency(struct intel_gt *gt) USEC_PER_SEC)); } +void intel_gt_fini_clock_frequency(struct intel_gt *gt) +{ + /* Clock registers no longer accessible, stop checking */ + gt->clock_frequency = 0; +} + #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) void intel_gt_check_clock_frequency(const struct intel_gt *gt) { + if (!gt->clock_frequency) + return; + if (gt->clock_frequency != read_clock_frequency(gt->uncore)) { dev_err(gt->i915->drm.dev, "GT clock frequency changed, was %uHz, now %uHz!\n", diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h index 8b03e97a85df3..c923b1866b082 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h @@ -11,6 +11,7 @@ struct intel_gt; void intel_gt_init_clock_frequency(struct intel_gt *gt); +void intel_gt_fini_clock_frequency(struct intel_gt *gt); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) void intel_gt_check_clock_frequency(const struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c index f103664b71d44..4644cfe1a06ab 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c @@ -10,6 +10,7 @@ #include "intel_gt_engines_debugfs.h" #include "intel_gt_pm_debugfs.h" #include "intel_sseu_debugfs.h" +#include "iov/intel_iov_debugfs.h" #include "pxp/intel_pxp_debugfs.h" #include "uc/intel_uc_debugfs.h" @@ -84,6 +85,7 @@ void intel_gt_debugfs_register(struct intel_gt *gt) intel_sseu_debugfs_register(gt, root); intel_uc_debugfs_register(>->uc, root); + intel_iov_debugfs_register(>->iov, root); intel_pxp_debugfs_register(>->pxp, root); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index f206877964908..25565afc35c2b 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -15,6 +15,7 @@ #include #include +#include "iov/intel_iov_types.h" #include "uc/intel_uc.h" #include "i915_vma.h" @@ -72,6 +73,7 @@ struct intel_gt { struct i915_ggtt *ggtt; struct intel_uc uc; + struct intel_iov iov; struct mutex tlb_invalidate_lock; diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 9fee968d57db6..34d0de726f6e8 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -355,6 +355,9 @@ void gtt_write_workarounds(struct intel_gt *gt) struct drm_i915_private *i915 = gt->i915; struct intel_uncore *uncore = gt->uncore; + if (IS_SRIOV_VF(i915)) + return; + /* * This function is for gtt related workarounds. This function is * called on driver load and after a GPU reset, so you can place @@ -529,6 +532,9 @@ void setup_private_pat(struct intel_uncore *uncore) GEM_BUG_ON(GRAPHICS_VER(i915) < 8); + if (IS_SRIOV_VF(i915)) + return; + if (GRAPHICS_VER(i915) >= 12) tgl_setup_private_ppat(uncore); else if (GRAPHICS_VER(i915) >= 11) diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index 4d1b4e3c6b550..0d38a630ac3d5 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -88,6 +88,7 @@ typedef u64 gen8_pte_t; #define GEN12_PPGTT_PTE_LM BIT_ULL(11) #define GEN12_GGTT_PTE_LM BIT_ULL(1) +#define TGL_GGTT_PTE_VFID_MASK GENMASK_ULL(4, 2) /* * Cacheability Control is a 4-bit value. The low three bits are stored in bits @@ -547,6 +548,20 @@ int i915_ggtt_balloon(struct i915_ggtt *ggtt, u64 start, u64 end, struct drm_mm_node *node); void i915_ggtt_deballoon(struct i915_ggtt *ggtt, struct drm_mm_node *node); +void i915_ggtt_set_space_owner(struct i915_ggtt *ggtt, u16 vfid, + const struct drm_mm_node *node); + +#define I915_GGTT_SAVE_PTES_NO_VFID BIT(31) + +int i915_ggtt_save_ptes(struct i915_ggtt *ggtt, const struct drm_mm_node *node, void *buf, + unsigned int size, unsigned int flags); + +#define I915_GGTT_RESTORE_PTES_NEW_VFID BIT(31) +#define I915_GGTT_RESTORE_PTES_VFID_MASK GENMASK(19, 0) + +int i915_ggtt_restore_ptes(struct i915_ggtt *ggtt, const struct drm_mm_node *node, const void *buf, + unsigned int size, unsigned int flags); + int i915_ppgtt_init_hw(struct intel_gt *gt); struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt, @@ -604,6 +619,11 @@ release_pd_entry(struct i915_page_directory * const pd, const struct drm_i915_gem_object * const scratch); void gen6_ggtt_invalidate(struct i915_ggtt *ggtt); +void gen8_set_pte(void __iomem *addr, gen8_pte_t pte); +gen8_pte_t gen8_get_pte(void __iomem *addr); + +u64 ggtt_addr_to_pte_offset(u64 ggtt_addr); + int ggtt_set_pages(struct i915_vma *vma); int ppgtt_set_pages(struct i915_vma *vma); void clear_pages(struct i915_vma *vma); diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 9c253ba593c65..cafe41b84ca08 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -480,6 +480,9 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915, if (GEM_DEBUG_WARN_ON(table->size > table->n_entries)) return 0; + if (IS_SRIOV_VF(i915)) + return 0; + /* WaDisableSkipCaching:skl,bxt,kbl,glk */ if (GRAPHICS_VER(i915) == 9) { int i; diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 7be0002d9d707..69baee8a155d9 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -610,6 +610,49 @@ static int gen8_reset_engines(struct intel_gt *gt, return ret; } +static int gen12_vf_reset(struct intel_gt *gt, + intel_engine_mask_t mask, + unsigned int retry) +{ + struct intel_uncore *uncore = gt->uncore; + u32 request[VF2GUC_VF_RESET_REQUEST_MSG_LEN] = { + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_VF2GUC_VF_RESET), + }; + const i915_reg_t reg = GEN11_SOFT_SCRATCH(0); + u32 response; + int err; + + /* No engine reset since VFs always run with GuC submission enabled */ + if (GEM_WARN_ON(mask != ALL_ENGINES)) + return -ENODEV; + + /* + * Can't use intel_guc_send_mmio() since it uses mutex, + * but we don't expect any other MMIO action in flight, + * as we use them only during init and teardown. + */ + GEM_WARN_ON(mutex_is_locked(>->uc.guc.send_mutex)); + + intel_uncore_write_fw(uncore, reg, request[0]); + intel_uncore_write_fw(uncore, GEN11_GUC_HOST_INTERRUPT, 1); + + err = __intel_wait_for_register_fw(uncore, reg, + GUC_HXG_MSG_0_ORIGIN, + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, + GUC_HXG_ORIGIN_GUC), + 1000, 0, &response); + if (unlikely(err)) { + drm_dbg(>->i915->drm, "VF reset not completed (%pe)\n", + ERR_PTR(err)); + } else if (FIELD_GET(GUC_HXG_MSG_0_TYPE, response) != GUC_HXG_TYPE_RESPONSE_SUCCESS) { + drm_dbg(>->i915->drm, "VF reset not completed (%#x)\n", + response); + } + return 0; +} + static int mock_reset(struct intel_gt *gt, intel_engine_mask_t mask, unsigned int retry) @@ -627,6 +670,8 @@ static reset_func intel_get_gpu_reset(const struct intel_gt *gt) if (is_mock_gt(gt)) return mock_reset; + else if (IS_SRIOV_VF(i915)) + return gen12_vf_reset; else if (GRAPHICS_VER(i915) >= 8) return gen8_reset_engines; else if (GRAPHICS_VER(i915) >= 6) diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index ab3277a3d5939..2156a5b0ffc6c 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -762,6 +762,9 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine, { struct drm_i915_private *i915 = engine->i915; + if (IS_SRIOV_VF(i915)) + return; + wa_init_start(wal, name, engine->name); /* Applies to all engines */ @@ -1561,6 +1564,9 @@ void intel_gt_init_workarounds(struct intel_gt *gt) { struct i915_wa_list *wal = >->wa_list; + if (IS_SRIOV_VF(gt->i915)) + return; + wa_init_start(wal, "GT", "global"); gt_init_workarounds(gt, wal); wa_init_finish(wal); @@ -1945,6 +1951,9 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine) struct drm_i915_private *i915 = engine->i915; struct i915_wa_list *w = &engine->whitelist; + if (IS_SRIOV_VF(engine->i915)) + return; + wa_init_start(w, "whitelist", engine->name); if (IS_DG2(i915)) @@ -2573,6 +2582,9 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine) { struct i915_wa_list *wal = &engine->wa_list; + if (IS_SRIOV_VF(engine->i915)) + return; + if (GRAPHICS_VER(engine->i915) < 4) return; diff --git a/drivers/gpu/drm/i915/gt/iov/abi/iov_actions_abi.h b/drivers/gpu/drm/i915/gt/iov/abi/iov_actions_abi.h new file mode 100644 index 0000000000000..05d34438e507a --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/abi/iov_actions_abi.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef _ABI_IOV_ACTIONS_ABI_H_ +#define _ABI_IOV_ACTIONS_ABI_H_ + +#include "iov_messages_abi.h" + +/** + * DOC: IOV Actions + * + * TBD + */ + +/** + * DOC: VF2PF_HANDSHAKE + * + * This `IOV Message`_ is used by the VF to establish ABI version with the PF. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | DATA0 = MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`IOV_ACTION_VF2PF_HANDSHAKE` = 0x0001 | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:16 | **MAJOR** - requested major version of the VFPF interface | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | **MINOR** - requested minor version of the VFPF interface | + * +---+-------+--------------------------------------------------------------+ + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:0 | DATA0 = MBZ | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:16 | **MAJOR** - agreed major version of the VFPF interface | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | **MINOR** - agreed minor version of the VFPF interface | + * +---+-------+--------------------------------------------------------------+ + */ +#define IOV_ACTION_VF2PF_HANDSHAKE 0x0001 + +#define VF2PF_HANDSHAKE_REQUEST_MSG_LEN 2u +#define VF2PF_HANDSHAKE_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0 +#define VF2PF_HANDSHAKE_REQUEST_MSG_1_MAJOR (0xffff << 16) +#define VF2PF_HANDSHAKE_REQUEST_MSG_1_MINOR (0xffff << 0) + +#define VF2PF_HANDSHAKE_RESPONSE_MSG_LEN 2u +#define VF2PF_HANDSHAKE_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0 +#define VF2PF_HANDSHAKE_RESPONSE_MSG_1_MAJOR (0xffff << 16) +#define VF2PF_HANDSHAKE_RESPONSE_MSG_1_MINOR (0xffff << 0) + +/** + * DOC: VF2PF_QUERY_RUNTIME + * + * This `IOV Message`_ is used by the VF to query values of runtime registers. + * + * VF provides @START index to the requested register entry. + * VF can use @LIMIT to limit number of returned register entries. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | DATA0 = **LIMIT** - limit number of returned entries | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`IOV_ACTION_VF2PF_QUERY_RUNTIME` = 0x0101 | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | DATA1 = **START** - index of the first requested entry | + * +---+-------+--------------------------------------------------------------+ + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:0 | DATA0 = **COUNT** - number of entries included in response | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | DATA1 = **REMAINING** - number of remaining entries | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | DATA2 = **REG_OFFSET** - offset of register[START] | + * +---+-------+--------------------------------------------------------------+ + * | 3 | 31:0 | DATA3 = **REG_VALUE** - value of register[START] | + * +---+-------+--------------------------------------------------------------+ + * | | | | + * +---+-------+--------------------------------------------------------------+ + * |n-1| 31:0 | REG_OFFSET - offset of register[START + x] | + * +---+-------+--------------------------------------------------------------+ + * | n | 31:0 | REG_VALUE - value of register[START + x] | + * +---+-------+--------------------------------------------------------------+ + */ +#define IOV_ACTION_VF2PF_QUERY_RUNTIME 0x0101 + +#define VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN 2u +#define VF2PF_QUERY_RUNTIME_REQUEST_MSG_0_LIMIT GUC_HXG_REQUEST_MSG_0_DATA0 +#define VF2PF_QUERY_RUNTIME_REQUEST_MSG_1_START GUC_HXG_REQUEST_MSG_n_DATAn + +#define VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN (GUC_HXG_MSG_MIN_LEN + 1u) +#define VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MAX_LEN 20 // FIXME RELAY_PAYLOAD_MAX_SIZE +#define VF2PF_QUERY_RUNTIME_RESPONSE_MSG_0_COUNT GUC_HXG_RESPONSE_MSG_0_DATA0 +#define VF2PF_QUERY_RUNTIME_RESPONSE_MSG_1_REMAINING GUC_HXG_RESPONSE_MSG_n_DATAn +#define VF2PF_QUERY_RUNTIME_RESPONSE_DATAn_REG_OFFSETx GUC_HXG_RESPONSE_MSG_n_DATAn +#define VF2PF_QUERY_RUNTIME_RESPONSE_DATAn_REG_VALUEx GUC_HXG_RESPONSE_MSG_n_DATAn + +#endif /* _ABI_IOV_ACTIONS_ABI_H_ */ diff --git a/drivers/gpu/drm/i915/gt/iov/abi/iov_actions_mmio_abi.h b/drivers/gpu/drm/i915/gt/iov/abi/iov_actions_mmio_abi.h new file mode 100644 index 0000000000000..c9cbef90d3645 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/abi/iov_actions_mmio_abi.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef _ABI_IOV_ACTIONS_MMIO_ABI_H_ +#define _ABI_IOV_ACTIONS_MMIO_ABI_H_ + +#include "iov_messages_abi.h" + +/** + * DOC: IOV MMIO Opcodes + * + * + _`IOV_OPCODE_VF2PF_MMIO_HANDSHAKE` = 0x01 + * + _`IOV_OPCODE_VF2PF_MMIO_GET_RUNTIME` = 0x10 + */ + +/** + * DOC: VF2PF_MMIO_HANDSHAKE + * + * This VF2PF MMIO message is used by the VF to establish ABI version with the PF. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:24 | MAGIC - see VF2GUC_MMIO_RELAY_SERVICE_ | + * | +-------+--------------------------------------------------------------+ + * | | 23:16 | OPCODE = IOV_OPCODE_VF2PF_MMIO_HANDSHAKE_ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = GUC_ACTION_VF2GUC_MMIO_RELAY_SERVICE_ | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:16 | **MAJOR** - requested major version of the VFPF interface | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | **MINOR** - requested minor version of the VFPF interface | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | MBZ | + * +---+-------+--------------------------------------------------------------+ + * | 3 | 31:0 | MBZ | + * +---+-------+--------------------------------------------------------------+ + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:0 | MBZ | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:16 | **MAJOR** - agreed major version of the VFPF interface | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | **MINOR** - agreed minor version of the VFPF interface | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | MBZ | + * +---+-------+--------------------------------------------------------------+ + * | 3 | 31:0 | MBZ | + * +---+-------+--------------------------------------------------------------+ + */ +#define IOV_OPCODE_VF2PF_MMIO_HANDSHAKE 0x01 + +#define VF2PF_MMIO_HANDSHAKE_REQUEST_MSG_LEN 4u +#define VF2PF_MMIO_HANDSHAKE_REQUEST_MSG_1_MAJOR (0xffff << 16) +#define VF2PF_MMIO_HANDSHAKE_REQUEST_MSG_1_MINOR (0xffff << 0) + +#define VF2PF_MMIO_HANDSHAKE_RESPONSE_MSG_LEN 4u +#define VF2PF_MMIO_HANDSHAKE_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0 +#define VF2PF_MMIO_HANDSHAKE_RESPONSE_MSG_1_MAJOR (0xffff << 16) +#define VF2PF_MMIO_HANDSHAKE_RESPONSE_MSG_1_MINOR (0xffff << 0) + +/** + * DOC: VF2PF_MMIO_GET_RUNTIME + * + * This opcode can be used by VFs to request values of some runtime registers + * (fuses) that are not directly available for VFs. + * + * Only registers that are on the allow-list maintained by the PF are available. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:24 | MAGIC - see VF2GUC_MMIO_RELAY_SERVICE_ | + * | +-------+--------------------------------------------------------------+ + * | | 23:16 | OPCODE = IOV_OPCODE_VF2PF_MMIO_GET_RUNTIME_ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = GUC_ACTION_VF2GUC_MMIO_RELAY_SERVICE_ | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | **OFFSET1** - offset of register1 (can't be zero) | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | **OFFSET2** - offset of register2 (or zero) | + * +---+-------+--------------------------------------------------------------+ + * | 3 | 31:0 | **OFFSET3** - offset of register3 (or zero) | + * +---+-------+--------------------------------------------------------------+ + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:24 | MAGIC - see _VF2GUC_MMIO_RELAY_SERVICE | + * | +-------+--------------------------------------------------------------+ + * | | 23:0 | MBZ | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | **VALUE1** - value of the register1 | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | **VALUE2** - value of the register2 (or zero) | + * +---+-------+--------------------------------------------------------------+ + * | 3 | 31:0 | **VALUE3** - value of the register3 (or zero) | + * +---+-------+--------------------------------------------------------------+ + */ +#define IOV_OPCODE_VF2PF_MMIO_GET_RUNTIME 0x10 + +#define VF2PF_MMIO_GET_RUNTIME_REQUEST_MSG_LEN 4u +#define VF2PF_MMIO_GET_RUNTIME_REQUEST_MSG_n_OFFSETn GUC_HXG_REQUEST_MSG_n_DATAn +#define VF2PF_MMIO_GET_RUNTIME_REQUEST_MSG_NUM_OFFSET 3u + +#define VF2PF_MMIO_GET_RUNTIME_RESPONSE_MSG_LEN 4u +#define VF2PF_MMIO_GET_RUNTIME_RESPONSE_MSG_n_VALUEn GUC_HXG_RESPONSE_MSG_n_DATAn +#define VF2PF_MMIO_GET_RUNTIME_RESPONSE_MSG_NUM_VALUE 3u + +#endif /* _ABI_IOV_ACTIONS_MMIO_ABI_H_ */ diff --git a/drivers/gpu/drm/i915/gt/iov/abi/iov_actions_selftest_abi.h b/drivers/gpu/drm/i915/gt/iov/abi/iov_actions_selftest_abi.h new file mode 100644 index 0000000000000..d0bd3a9c09b84 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/abi/iov_actions_selftest_abi.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef _ABI_IOV_ACTIONS_SELFTEST_ABI_H_ +#define _ABI_IOV_ACTIONS_SELFTEST_ABI_H_ + +#include "iov_actions_abi.h" + +/** + * DOC: VF2PF_PF_ST_ACTION + * + * This `IOV Message`_ is used by VF to initiate some selftest action on the PF. + * + * See `IOV SELFTEST Opcodes`_ for available selftest operations. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | DATA0 = **OPCODE** - see `IOV SELFTEST Opcodes`_ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = VF2PF_PF_ST_ACTION_ = TBD | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | DATA1 = **ST_DATA1** - VF/PF selftest message data | + * +---+-------+--------------------------------------------------------------+ + * |...| | | + * +---+-------+--------------------------------------------------------------+ + * | n | 31:0 | DATAn = **ST_DATAn** - VF/PF selftest message data | + * +---+-------+--------------------------------------------------------------+ + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:0 | DATA0 = MBZ | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | DATA1 = **RET_DATA1** - PF/VF selftest return data | + * +---+-------+--------------------------------------------------------------+ + * |...| | | + * +---+-------+--------------------------------------------------------------+ + * | n | 31:0 | DATAn = **RET_DATAn** - PF/VF selftest return data | + * +---+-------+--------------------------------------------------------------+ + */ +#define IOV_ACTION_VF2PF_PF_ST_ACTION 0x0DEB // FIXME + +#define VF2PF_PF_ST_ACTION_REQUEST_MSG_MIN_LEN GUC_HXG_MSG_MIN_LEN +#define VF2PF_PF_ST_ACTION_REQUEST_MSG_MAX_LEN 20 // FIXME RELAY_PAYLOAD_MAX_SIZE +#define VF2PF_PF_ST_ACTION_REQUEST_MSG_0_OPCODE GUC_HXG_REQUEST_MSG_0_DATA0 +#define VF2PF_PF_ST_ACTION_REQUEST_MSG_n_ST_DATAn GUC_HXG_RESPONSE_MSG_n_DATAn + +#define VF2PF_PF_ST_ACTION_RESPONSE_MSG_MIN_LEN GUC_HXG_MSG_MIN_LEN +#define VF2PF_PF_ST_ACTION_RESPONSE_MSG_MAX_LEN 20 // FIXME RELAY_PAYLOAD_MAX_SIZE +#define VF2PF_PF_ST_ACTION_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0 +#define VF2PF_PF_ST_ACTION_RESPONSE_MSG_n_RET_DATAn GUC_HXG_RESPONSE_MSG_n_DATAn + +/** + * DOC: IOV SELFTEST Opcodes + * + * - IOV_OPCODE_ST_GET_GGTT_PTE_ = 1 + */ + +/** + * DOC: IOV_OPCODE_ST_GET_GGTT_PTE + * + * Action to get value of PTE, for a given GGTT address, from PF. + * + * See VF2PF_PF_ST_ACTION_. + * + * Note: GGTT address must be aligned to 4K, or action will fail + * with IOV_ERROR_INVALID_ARGUMENT. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | OPCODE = IOV_OPCODE_ST_GET_GGTT_PTE_ = 1 | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = VF2PF_PF_ST_ACTION_ = TBD | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | DATA1 = **ADDRESS_LO** - lower bits of GGTT address | + * | | | (aligned to 4K) | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | DATA2 = **ADDRESS_HI** - upper bits of GGTT address | + * | | | (aligned to 4K) | + * +---+-------+--------------------------------------------------------------+ + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:0 | DATA0 = MBZ | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | DATA1 = **PTE_LO** - lower bits of returned PTE | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | DATA2 = **PTE_HI** - upper bits of returned PTE | + * +---+-------+--------------------------------------------------------------+ + */ +#define IOV_OPCODE_ST_GET_GGTT_PTE 0x1 + +#define VF2PF_ST_GET_GGTT_PTE_REQUEST_MSG_LEN (VF2PF_PF_ST_ACTION_REQUEST_MSG_MIN_LEN + \ + 2u) +#define VF2PF_ST_GET_GGTT_PTE_REQUEST_MSG_1_ADDRESS_LO VF2PF_PF_ST_ACTION_REQUEST_MSG_n_ST_DATAn +#define VF2PF_ST_GET_GGTT_PTE_REQUEST_MSG_2_ADDRESS_HI VF2PF_PF_ST_ACTION_REQUEST_MSG_n_ST_DATAn + +#define VF2PF_ST_GET_GGTT_PTE_RESPONSE_MSG_LEN (VF2PF_PF_ST_ACTION_RESPONSE_MSG_MIN_LEN + \ + 2u) +#define VF2PF_ST_GET_GGTT_PTE_RESPONSE_MSG_1_PTE_LO VF2PF_PF_ST_ACTION_RESPONSE_MSG_n_RET_DATAn +#define VF2PF_ST_GET_GGTT_PTE_RESPONSE_MSG_2_PTE_HI VF2PF_PF_ST_ACTION_RESPONSE_MSG_n_RET_DATAn + +#endif /* _ABI_IOV_ACTIONS_SELFTEST_ABI_H_ */ diff --git a/drivers/gpu/drm/i915/gt/iov/abi/iov_communication_abi.h b/drivers/gpu/drm/i915/gt/iov/abi/iov_communication_abi.h new file mode 100644 index 0000000000000..12863798d66f3 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/abi/iov_communication_abi.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef _ABI_IOV_COMMUNICATION_ABI_H_ +#define _ABI_IOV_COMMUNICATION_ABI_H_ + +#include "gt/uc/abi/guc_communication_ctb_abi.h" + +/** + * DOC: IOV Communication + * + * The communication between VFs and PF is based on the relay messages with GuC + * acting a proxy agent. All relay messages are defined as `CTB HXG Message`_. + * The `IOV Message`_ is embedded in these messages as opaque payload. + * + * To send `IOV Message`_ to the PF, VFs are using `VF2GUC_RELAY_TO_PF`_ + * that takes the message identifier as additional parameter. + * + * +--------------------------------------------------------------------------+ + * | `CTB Message`_ | + * | | + * +===+======================================================================+ + * | | `CTB HXG Message`_ | + * | | | + * | +---+------------------------------------------------------------------+ + * | | | `HXG Message`_ | + * | | | | + * | | +---+--------------------------------------------------------------+ + * | | | | `HXG Request`_ | + * | | | | | + * | | | +---+----------------------------------------------------------+ + * | | | | | `VF2GUC_RELAY_TO_PF`_ | + * | | | | | | + * | | | | +------------+---------------------------------------------+ + * | | | | | | +----------------------------+ | + * | | | | | Message ID | | `IOV Message`_ | | + * | | | | | | +----------------------------+ | + * +---+---+---+---+------------+---------------------------------------------+ + * + * The `IOV Message`_ from a VF is delivered to the PF in `GUC2PF_RELAY_FROM_VF`_. + * This message contains also identifier of the origin VF and message identifier + * that is used in any replies. + * + * +--------------------------------------------------------------------------+ + * | `CTB Message`_ | + * | | + * +===+======================================================================+ + * | | `CTB HXG Message`_ | + * | | | + * | +---+------------------------------------------------------------------+ + * | | | `HXG Message`_ | + * | | | | + * | | +---+--------------------------------------------------------------+ + * | | | | `HXG Request`_ | + * | | | | | + * | | | +---+----------------------------------------------------------+ + * | | | | | `GUC2PF_RELAY_FROM_VF`_ | + * | | | | | | + * | | | | +------------+------------+--------------------------------+ + * | | | | | | | +----------------------------+ | + * | | | | | Origin | Message ID | | `IOV Message`_ | | + * | | | | | | | +----------------------------+ | + * +---+---+---+---+------------+------------+--------------------------------+ + * + * To send `IOV Message`_ to the particular VF, PF is using `PF2GUC_RELAY_TO_VF`_ + * that takes target VF identifier and the message identifier. + * + * +--------------------------------------------------------------------------+ + * | `CTB Message`_ | + * | | + * +===+======================================================================+ + * | | `CTB HXG Message`_ | + * | | | + * | +---+------------------------------------------------------------------+ + * | | | `HXG Message`_ | + * | | | | + * | | +---+--------------------------------------------------------------+ + * | | | | `HXG Request`_ | + * | | | | | + * | | | +---+----------------------------------------------------------+ + * | | | | | `PF2GUC_RELAY_TO_VF`_ | + * | | | | | | + * | | | | +------------+------------+--------------------------------+ + * | | | | | | | +----------------------------+ | + * | | | | | Target | Message ID | | `IOV Message`_ | | + * | | | | | | | +----------------------------+ | + * +---+---+---+---+------------+------------+--------------------------------+ + * + * The `IOV Message`_ from the PF is delivered to VFs in `GUC2VF_RELAY_FROM_PF`_. + * The message identifier is used to match IOV requests/response messages. + * + * +--------------------------------------------------------------------------+ + * | `CTB Message`_ | + * | | + * +===+======================================================================+ + * | | `CTB HXG Message`_ | + * | | | + * | +---+------------------------------------------------------------------+ + * | | | `HXG Message`_ | + * | | | | + * | | +---+--------------------------------------------------------------+ + * | | | | `HXG Request`_ | + * | | | | | + * | | | +---+----------------------------------------------------------+ + * | | | | | `GUC2VF_RELAY_FROM_PF`_ | + * | | | | | | + * | | | | +------------+---------------------------------------------+ + * | | | | | | +----------------------------+ | + * | | | | | Message ID | | `IOV Message`_ | | + * | | | | | | +----------------------------+ | + * +---+---+---+---+------------+---------------------------------------------+ + */ + +#endif /* _ABI_IOV_COMMUNICATION_ABI_H_ */ diff --git a/drivers/gpu/drm/i915/gt/iov/abi/iov_communication_mmio_abi.h b/drivers/gpu/drm/i915/gt/iov/abi/iov_communication_mmio_abi.h new file mode 100644 index 0000000000000..c62012f86be4a --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/abi/iov_communication_mmio_abi.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef _ABI_IOV_COMMUNICATION_MMIO_ABI_H_ +#define _ABI_IOV_COMMUNICATION_MMIO_ABI_H_ + +#include "gt/uc/abi/guc_communication_ctb_abi.h" + +/** + * DOC: IOV MMIO Communication + * + * The communication between VF and PF is based on GuC acting as a proxy agent. + * + * In case when `CTB based communication`_ is not yet available on VF side, + * VF may initiate IOV relay using `GuC MMIO based communication`_. + * + * The `MMIO HXG Message`_ `VF2GUC_MMIO_RELAY_SERVICE`_ allows the VF to pass + * @OPCODE and @DATA1..@DATA3 as a IOV relay data. Additional @MAGIC field will + * be returned in any future replies and will help detect communication mismatch. + * + * +--------------------------------------------------------------------------+ + * | `MMIO HXG Message`_ | + * | | + * +===+======================================================================+ + * | | `HXG Request`_ | + * | | | + * | +---+------------------------------------------------------------------+ + * | | | `VF2GUC_MMIO_RELAY_SERVICE`_ | + * | | | | + * | | +---+--------------------------------------------------------------+ + * | | | | MAGIC | + * | | | | | + * | | | +--------------------------------------------------------------+ + * | | | | OPCODE | + * | | | | | + * | | | +--------------------------------------------------------------+ + * | | | | DATA1..DATA3 | + * | | | | | + * +---+---+---+--------------------------------------------------------------+ + * + * The IOV relay data together with requesting VF identifier is then send by the + * GuC to the PF using `CTB HXG Message`_ GUC2PF_MMIO_RELAY_SERVICE_ for further + * processing. + * + * +--------------------------------------------------------------------------+ + * | `CTB HXG Message`_ | + * | | + * +===+======================================================================+ + * | | `HXG Event`_ | + * | | | + * | +---+------------------------------------------------------------------+ + * | | | `GUC2PF_MMIO_RELAY_SERVICE`_ | + * | | | | + * | | +---+--------------------------------------------------------------+ + * | | | | VFID | + * | | | | | + * | | | +--------------------------------------------------------------+ + * | | | | MAGIC | + * | | | | | + * | | | +--------------------------------------------------------------+ + * | | | | OPCODE | + * | | | | | + * | | | +--------------------------------------------------------------+ + * | | | | DATA1..DATA3 | + * | | | | | + * +---+---+---+--------------------------------------------------------------+ + * + * After completing processing of the IOV relay data PF shall reply to the VF + * using `CTB HXG Message`_ `PF2GUC_MMIO_RELAY_SUCCESS`_ + * + * +--------------------------------------------------------------------------+ + * | `CTB HXG Message`_ | + * | | + * +===+======================================================================+ + * | | `HXG Request`_ | + * | | | + * | +---+------------------------------------------------------------------+ + * | | | `PF2GUC_MMIO_RELAY_SUCCESS`_ | + * | | | | + * | | +---+--------------------------------------------------------------+ + * | | | | VFID | + * | | | | | + * | | | +--------------------------------------------------------------+ + * | | | | MAGIC | + * | | | | | + * | | | +--------------------------------------------------------------+ + * | | | | DATA0..DATA3 | + * | | | | | + * +---+---+---+--------------------------------------------------------------+ + * + * or `CTB HXG Message`_ `PF2GUC_MMIO_RELAY_FAILURE`_ + * + * +--------------------------------------------------------------------------+ + * | `CTB HXG Message`_ | + * | | + * +===+======================================================================+ + * | | `HXG Request`_ | + * | | | + * | +---+------------------------------------------------------------------+ + * | | | `PF2GUC_MMIO_RELAY_FAILURE`_ | + * | | | | + * | | +---+--------------------------------------------------------------+ + * | | | | VFID | + * | | | | | + * | | | +--------------------------------------------------------------+ + * | | | | MAGIC | + * | | | | | + * | | | +--------------------------------------------------------------+ + * | | | | FAULT | + * | | | | | + * +---+---+---+--------------------------------------------------------------+ + * + * Above PF messages will be converted by the GuC back to `MMIO HXG Message`_ + * `HXG Response`_ + * + * +--------------------------------------------------------------------------+ + * | `MMIO HXG Message`_ | + * | | + * +===+======================================================================+ + * | | `HXG Response`_ | + * | | | + * | +---+------------------------------------------------------------------+ + * | | | MAGIC | + * | | | | + * | | +------------------------------------------------------------------+ + * | | | DATA0..DATA3 | + * | | | | + * +---+---+------------------------------------------------------------------+ + * + * or `MMIO HXG Message`_ `HXG Failure`_ + * + * +--------------------------------------------------------------------------+ + * | `MMIO HXG Message`_ | + * | | + * +===+======================================================================+ + * | | `HXG Failure`_ | + * | | | + * | +---+------------------------------------------------------------------+ + * | | | MAGIC (part of the HINT) | + * | | | | + * | | +------------------------------------------------------------------+ + * | | | FAULT (part of the ERROR) | + * | | | | + * +---+---+------------------------------------------------------------------+ + * + */ + +#endif /* _ABI_IOV_COMMUNICATION_MMIO_ABI_H_ */ diff --git a/drivers/gpu/drm/i915/gt/iov/abi/iov_errors_abi.h b/drivers/gpu/drm/i915/gt/iov/abi/iov_errors_abi.h new file mode 100644 index 0000000000000..3cc53bdb746f1 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/abi/iov_errors_abi.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef _ABI_IOV_ERRORS_ABI_H_ +#define _ABI_IOV_ERRORS_ABI_H_ + +/** + * DOC: IOV Error Codes + * + * IOV uses error codes that mostly match errno values. + */ + +#define IOV_ERROR_UNDISCLOSED 0 +#define IOV_ERROR_OPERATION_NOT_PERMITTED 1 /* EPERM */ +#define IOV_ERROR_PERMISSION_DENIED 13 /* EACCES */ +#define IOV_ERROR_INVALID_ARGUMENT 22 /* EINVAL */ +#define IOV_ERROR_INVALID_REQUEST_CODE 56 /* EBADRQC */ +#define IOV_ERROR_NO_DATA_AVAILABLE 61 /* ENODATA */ +#define IOV_ERROR_PROTOCOL_ERROR 71 /* EPROTO */ +#define IOV_ERROR_MESSAGE_SIZE 90 /* EMSGSIZE */ + +#endif /* _ABI_IOV_ERRORS_ABI_H_ */ diff --git a/drivers/gpu/drm/i915/gt/iov/abi/iov_messages_abi.h b/drivers/gpu/drm/i915/gt/iov/abi/iov_messages_abi.h new file mode 100644 index 0000000000000..08c182926a120 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/abi/iov_messages_abi.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef _ABI_IOV_MESSAGES_ABI_H_ +#define _ABI_IOV_MESSAGES_ABI_H_ + +#include "gt/uc/abi/guc_messages_abi.h" + +/** + * DOC: IOV Message + * + * `IOV Message`_ is used in `IOV Communication`_. + * Format of the `IOV Message`_ follows format of the generic `HXG Message`_. + * + * +--------------------------------------------------------------------------+ + * | `IOV Message`_ | + * +==========================================================================+ + * | `HXG Message`_ | + * +--------------------------------------------------------------------------+ + * + * In particular format of the _`IOV Request` is same as the `HXG Request`_. + * Supported actions codes are listed in `IOV Actions`_. + * + * Format of the _`IOV Failure` is same as `HXG Failure`_. + * See `IOV Error Codes`_ for possible error codes. + */ + +#endif /* _ABI_IOV_MESSAGES_ABI_H_ */ diff --git a/drivers/gpu/drm/i915/gt/iov/abi/iov_version_abi.h b/drivers/gpu/drm/i915/gt/iov/abi/iov_version_abi.h new file mode 100644 index 0000000000000..1486616b30719 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/abi/iov_version_abi.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef _ABI_IOV_VERSION_ABI_H_ +#define _ABI_IOV_VERSION_ABI_H_ + +#define IOV_VERSION_LATEST_MAJOR 1u +#define IOV_VERSION_LATEST_MINOR 0u + +#endif /* _ABI_IOV_VERSION_ABI_H_ */ diff --git a/drivers/gpu/drm/i915/gt/iov/intel_iov.c b/drivers/gpu/drm/i915/gt/iov/intel_iov.c new file mode 100644 index 0000000000000..50a7019755197 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/intel_iov.c @@ -0,0 +1,293 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include "intel_iov.h" +#include "intel_iov_provisioning.h" +#include "intel_iov_query.h" +#include "intel_iov_relay.h" +#include "intel_iov_service.h" +#include "intel_iov_utils.h" + +/** + * intel_iov_init_early - Prepare IOV data. + * @iov: the IOV struct + * + * Early initialization of the I/O Virtualization data. + */ +void intel_iov_init_early(struct intel_iov *iov) +{ + if (intel_iov_is_pf(iov)) { + intel_iov_provisioning_init_early(iov); + intel_iov_service_init_early(iov); + } + + intel_iov_relay_init_early(&iov->relay); +} + +/** + * intel_iov_release - Release IOV data. + * @iov: the IOV struct + * + * This function will release any data prepared in @intel_iov_init_early. + */ +void intel_iov_release(struct intel_iov *iov) +{ + if (intel_iov_is_pf(iov)) { + intel_iov_service_release(iov); + intel_iov_provisioning_release(iov); + } +} + +/** + * intel_iov_init_mmio - Initialize IOV based on MMIO data. + * @iov: the IOV struct + * + * On VF this function will read SR-IOV INIT message from GuC. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_init_mmio(struct intel_iov *iov) +{ + int ret; + + if (intel_iov_is_vf(iov)) { + ret = intel_iov_query_bootstrap(iov); + if (unlikely(ret)) + return ret; + ret = intel_iov_query_config(iov); + if (unlikely(ret)) + return ret; + ret = intel_iov_query_runtime(iov, true); + if (unlikely(ret)) + return ret; + } + + return 0; +} + +static int vf_tweak_guc_submission(struct intel_iov *iov) +{ + int err; + + GEM_BUG_ON(!intel_iov_is_vf(iov)); + + err = intel_guc_submission_limit_ids(iov_to_guc(iov), + iov->vf.config.num_ctxs); + if (unlikely(err)) + IOV_ERROR(iov, "Failed to limit %s to %u (%pe)\n", + "contexts", iov->vf.config.num_ctxs, ERR_PTR(err)); + + return err; +} + +/** + * intel_iov_init - Initialize IOV. + * @iov: the IOV struct + * + * On PF this function performs initial partitioning of the shared resources + * that can't be changed later (GuC submission contexts) to allow early PF + * provisioning. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_init(struct intel_iov *iov) +{ + if (intel_iov_is_pf(iov)) + intel_iov_provisioning_init(iov); + + if (intel_iov_is_vf(iov)) + vf_tweak_guc_submission(iov); + + return 0; +} + +/** + * intel_iov_fini - Cleanup IOV. + * @iov: the IOV struct + * + * This function will cleanup any data prepared in @intel_iov_init. + */ +void intel_iov_fini(struct intel_iov *iov) +{ + if (intel_iov_is_pf(iov)) + intel_iov_provisioning_fini(iov); +} + +static int vf_balloon_ggtt(struct intel_iov *iov) +{ + struct i915_ggtt *ggtt = iov_to_gt(iov)->ggtt; + u64 start, end; + int err; + + GEM_BUG_ON(!intel_iov_is_vf(iov)); + + /* + * We can only use part of the GGTT as allocated by PF. + * + * 0 GUC_GGTT_TOP + * |<------------ Total GGTT size ------------------>| + * + * |<-- VF GGTT base -->|<- size ->| + * + * +--------------------+----------+-----------------+ + * |////////////////////| block |\\\\\\\\\\\\\\\\\| + * +--------------------+----------+-----------------+ + * + * |<--- balloon[0] --->|<-- VF -->|<-- balloon[1] ->| + */ + + start = 0; + end = iov->vf.config.ggtt_base; + err = i915_ggtt_balloon(ggtt, start, end, &iov->vf.ggtt_balloon[0]); + if (unlikely(err)) + return err; + + start = iov->vf.config.ggtt_base + iov->vf.config.ggtt_size; + end = GUC_GGTT_TOP; + err = i915_ggtt_balloon(ggtt, start, end, &iov->vf.ggtt_balloon[1]); + + return err; +} + +static void vf_deballoon_ggtt(struct intel_iov *iov) +{ + struct i915_ggtt *ggtt = iov_to_gt(iov)->ggtt; + + i915_ggtt_deballoon(ggtt, &iov->vf.ggtt_balloon[1]); + i915_ggtt_deballoon(ggtt, &iov->vf.ggtt_balloon[0]); +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +static int igt_vf_iov_own_ggtt(struct intel_iov *iov, bool sanitycheck); +#endif + +/** + * intel_iov_init_ggtt - Initialize GGTT for SR-IOV. + * @iov: the IOV struct + * + * On the VF this function will balloon GGTT to make sure only assigned region + * will be used for allocations. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_init_ggtt(struct intel_iov *iov) +{ + int err; + + if (intel_iov_is_vf(iov)) { + err = vf_balloon_ggtt(iov); + if (unlikely(err)) + return err; +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) + igt_vf_iov_own_ggtt(iov, true); +#endif + } + + return 0; +} + +/** + * intel_iov_fini_ggtt - Cleanup SR-IOV hardware support. + * @iov: the IOV struct + */ +void intel_iov_fini_ggtt(struct intel_iov *iov) +{ + if (intel_iov_is_vf(iov)) + vf_deballoon_ggtt(iov); +} + +static void pf_enable_ggtt_guest_update(struct intel_iov *iov) +{ + struct intel_gt *gt = iov_to_gt(iov); + + /* Guest Direct GGTT Update Enable */ + intel_uncore_write(gt->uncore, GEN12_VIRTUAL_CTRL_REG, + GEN12_GUEST_GTT_UPDATE_EN); +} + +/** + * intel_iov_init_hw - Initialize SR-IOV hardware support. + * @iov: the IOV struct + * + * PF must configure hardware to enable VF's access to GGTT. + * PF also updates here runtime info (snapshot of registers values) + * that will be shared with VFs. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_init_hw(struct intel_iov *iov) +{ + int err; + + if (intel_iov_is_pf(iov)) { + pf_enable_ggtt_guest_update(iov); + intel_iov_service_update(iov); + intel_iov_provisioning_restart(iov); + } + + if (intel_iov_is_vf(iov)) { + err = intel_iov_query_runtime(iov, false); + if (unlikely(err)) + return -EIO; + } + + return 0; +} + +/** + * intel_iov_fini_hw - Cleanup data initialized in iov_init_hw. + * @iov: the IOV struct + */ +void intel_iov_fini_hw(struct intel_iov *iov) +{ + if (intel_iov_is_pf(iov)) + intel_iov_service_reset(iov); + + if (intel_iov_is_vf(iov)) + intel_iov_query_fini(iov); +} + +/** + * intel_iov_init_late - Late initialization of SR-IOV support. + * @iov: the IOV struct + * + * This function continues necessary initialization of the SR-IOV + * support in the driver and the hardware. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_init_late(struct intel_iov *iov) +{ + struct intel_gt *gt = iov_to_gt(iov); + + if (intel_iov_is_pf(iov)) { + /* + * GuC submission must be working on PF to allow VFs to work. + * If unavailable, mark as PF error, but it's safe to continue. + */ + if (unlikely(!intel_uc_uses_guc_submission(>->uc))) { + pf_update_status(iov, -EIO, "GuC"); + return 0; + } + } + + if (intel_iov_is_vf(iov)) { + /* + * If we try to start VF driver without GuC submission enabled, + * then use -EIO error to keep driver alive but without GEM. + */ + if (!intel_uc_uses_guc_submission(>->uc)) { + dev_warn(gt->i915->drm.dev, "GuC submission is %s\n", + enableddisabled(false)); + return -EIO; + } + } + + return 0; +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "./selftests/iov_live_selftest_ggtt.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/iov/intel_iov.h b/drivers/gpu/drm/i915/gt/iov/intel_iov.h new file mode 100644 index 0000000000000..3cc5d18f470c7 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/intel_iov.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_IOV_H__ +#define __INTEL_IOV_H__ + +struct intel_iov; + +void intel_iov_init_early(struct intel_iov *iov); +void intel_iov_release(struct intel_iov *iov); + +int intel_iov_init_mmio(struct intel_iov *iov); +int intel_iov_init_ggtt(struct intel_iov *iov); +void intel_iov_fini_ggtt(struct intel_iov *iov); +int intel_iov_init(struct intel_iov *iov); +void intel_iov_fini(struct intel_iov *iov); + +int intel_iov_init_hw(struct intel_iov *iov); +void intel_iov_fini_hw(struct intel_iov *iov); +int intel_iov_init_late(struct intel_iov *iov); + +#endif /* __INTEL_IOV_H__ */ diff --git a/drivers/gpu/drm/i915/gt/iov/intel_iov_debugfs.c b/drivers/gpu/drm/i915/gt/iov/intel_iov_debugfs.c new file mode 100644 index 0000000000000..f9bfeeb28cfe7 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/intel_iov_debugfs.c @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include + +#include "gt/intel_gt_debugfs.h" +#include "intel_iov.h" +#include "intel_iov_utils.h" +#include "intel_iov_debugfs.h" +#include "intel_iov_provisioning.h" +#include "intel_iov_query.h" +#include "intel_iov_relay.h" + +static bool eval_is_pf(void *data) +{ + struct intel_iov *iov = data; + + return intel_iov_is_pf(iov); +} + +static bool eval_is_vf(void *data) +{ + struct intel_iov *iov = data; + + return intel_iov_is_vf(iov); +} + +static int ggtt_provisioning_show(struct seq_file *m, void *data) +{ + struct intel_iov *iov = m->private; + struct drm_printer p = drm_seq_file_printer(m); + + return intel_iov_provisioning_print_ggtt(iov, &p); +} +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(ggtt_provisioning); + +static int ctxs_provisioning_show(struct seq_file *m, void *data) +{ + struct intel_iov *iov = m->private; + struct drm_printer p = drm_seq_file_printer(m); + + return intel_iov_provisioning_print_ctxs(iov, &p); +} +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(ctxs_provisioning); + +static int dbs_provisioning_show(struct seq_file *m, void *data) +{ + struct intel_iov *iov = m->private; + struct drm_printer p = drm_seq_file_printer(m); + + return intel_iov_provisioning_print_dbs(iov, &p); +} +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(dbs_provisioning); + +static int vf_self_config_show(struct seq_file *m, void *data) +{ + struct intel_iov *iov = m->private; + struct drm_printer p = drm_seq_file_printer(m); + + intel_iov_query_print_config(iov, &p); + return 0; +} +DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(vf_self_config); + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_IOV) + +#define RELAY_MAX_LEN 60 + +static ssize_t relay_to_vf_write(struct file *file, const char __user *user, + size_t count, loff_t *ppos) +{ + struct intel_iov *iov = file->private_data; + struct intel_runtime_pm *rpm = iov_to_gt(iov)->uncore->rpm; + intel_wakeref_t wakeref; + u32 message[1 + RELAY_MAX_LEN]; /* target + message */ + u32 reply[RELAY_MAX_LEN]; + int ret; + + if (*ppos) + return 0; + + ret = from_user_to_u32array(user, count, message, ARRAY_SIZE(message)); + if (ret < 0) + return ret; + + if (ret < 1 + GUC_HXG_MSG_MIN_LEN) + return -EINVAL; + + if (message[0] == PFID) + return -EINVAL; + + with_intel_runtime_pm(rpm, wakeref) + ret = intel_iov_relay_send_to_vf(&iov->relay, message[0], + message + 1, ret - 1, + reply, ARRAY_SIZE(reply)); + if (ret < 0) + return ret; + + return count; +} + +static const struct file_operations relay_to_vf_fops = { + .write = relay_to_vf_write, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t relay_to_pf_write(struct file *file, const char __user *user, + size_t count, loff_t *ppos) +{ + struct intel_iov *iov = file->private_data; + struct intel_runtime_pm *rpm = iov_to_gt(iov)->uncore->rpm; + intel_wakeref_t wakeref; + u32 message[RELAY_MAX_LEN]; + u32 reply[RELAY_MAX_LEN]; + int ret; + + if (*ppos) + return 0; + + ret = from_user_to_u32array(user, count, message, ARRAY_SIZE(message)); + if (ret < 0) + return ret; + + if (ret < GUC_HXG_MSG_MIN_LEN) + return -EINVAL; + + with_intel_runtime_pm(rpm, wakeref) + ret = intel_iov_relay_send_to_pf(&iov->relay, message, ret, + reply, ARRAY_SIZE(reply)); + if (ret < 0) + return ret; + + return count; +} + +static const struct file_operations relay_to_pf_fops = { + .write = relay_to_pf_write, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t relocate_ggtt_write(struct file *file, const char __user *user, + size_t count, loff_t *ppos) +{ + struct intel_iov *iov = file->private_data; + u32 vfid; + int ret; + + if (*ppos) + return 0; + + ret = kstrtou32_from_user(user, count, 0, &vfid); + if (ret < 0) + return ret; + + if (!vfid || vfid > pf_get_totalvfs(iov)) + return -EINVAL; + + ret = intel_iov_provisioning_move_ggtt(iov, vfid); + if (ret < 0) + return ret; + + return count; +} + +static const struct file_operations relocate_ggtt_fops = { + .write = relocate_ggtt_write, + .open = simple_open, + .llseek = default_llseek, +}; + +#endif /* CONFIG_DRM_I915_DEBUG_IOV */ + +/** + * intel_iov_debugfs_register - Register IOV specific entries in GT debugfs. + * @iov: the IOV struct + * @root: the GT debugfs root directory entry + * + * Some IOV entries are GT related so better to show them under GT debugfs. + */ +void intel_iov_debugfs_register(struct intel_iov *iov, struct dentry *root) +{ + static const struct intel_gt_debugfs_file files[] = { + { "ggtt_provisioning", &ggtt_provisioning_fops, eval_is_pf }, + { "contexts_provisioning", &ctxs_provisioning_fops, eval_is_pf }, + { "doorbells_provisioning", &dbs_provisioning_fops, eval_is_pf }, + { "self_config", &vf_self_config_fops, eval_is_vf }, +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_IOV) + { "relay_to_vf", &relay_to_vf_fops, eval_is_pf }, + { "relay_to_pf", &relay_to_pf_fops, eval_is_vf }, + { "relocate_ggtt", &relocate_ggtt_fops, eval_is_pf }, +#endif + }; + struct dentry *dir; + + if (unlikely(!root)) + return; + + if (!intel_iov_is_enabled(iov)) + return; + + dir = debugfs_create_dir("iov", root); + if (IS_ERR(root)) + return; + + intel_gt_debugfs_register_files(dir, files, ARRAY_SIZE(files), iov); +} diff --git a/drivers/gpu/drm/i915/gt/iov/intel_iov_debugfs.h b/drivers/gpu/drm/i915/gt/iov/intel_iov_debugfs.h new file mode 100644 index 0000000000000..d90567ff74e76 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/intel_iov_debugfs.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_IOV_DEBUGFS_H__ +#define __INTEL_IOV_DEBUGFS_H__ + +struct intel_iov; +struct dentry; + +void intel_iov_debugfs_register(struct intel_iov *iov, struct dentry *root); + +#endif /* __INTEL_IOV_DEBUGFS_H__ */ diff --git a/drivers/gpu/drm/i915/gt/iov/intel_iov_event.c b/drivers/gpu/drm/i915/gt/iov/intel_iov_event.c new file mode 100644 index 0000000000000..430a79c0edb5b --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/intel_iov_event.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include "intel_iov.h" +#include "intel_iov_event.h" +#include "intel_iov_utils.h" + +#define I915_UEVENT_THRESHOLD_EXCEEDED "THRESHOLD_EXCEEDED" +#define I915_UEVENT_THRESHOLD_ID "THRESHOLD_ID" +#define I915_UEVENT_VFID "VF_ID" + +static void pf_emit_threshold_uevent(struct intel_iov *iov, u32 vfid, u32 threshold) +{ + struct kobject *kobj = &iov_to_i915(iov)->drm.primary->kdev->kobj; + char *envp[] = { + I915_UEVENT_THRESHOLD_EXCEEDED"=1", + kasprintf(GFP_KERNEL, I915_UEVENT_THRESHOLD_ID"=%#x", threshold), + kasprintf(GFP_KERNEL, I915_UEVENT_VFID"=%u", vfid), + NULL, + }; + + kobject_uevent_env(kobj, KOBJ_CHANGE, envp); + + kfree(envp[1]); + kfree(envp[2]); +} + +static int pf_handle_vf_threshold_event(struct intel_iov *iov, u32 vfid, u32 threshold) +{ + if (unlikely(!vfid || vfid > pf_get_totalvfs(iov))) + return -EINVAL; + + IOV_DEBUG(iov, "VF%u threshold %04x\n", vfid, threshold); + + if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) + pf_emit_threshold_uevent(iov, vfid, threshold); + + return 0; +} + +/** + * intel_iov_event_process_guc2pf - Handle adverse event notification from GuC. + * @iov: the IOV struct + * @msg: message from the GuC + * @len: length of the message + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_event_process_guc2pf(struct intel_iov *iov, + const u32 *msg, u32 len) +{ + u32 vfid; + u32 threshold; + + GEM_BUG_ON(!len); + GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) != GUC_HXG_ORIGIN_GUC); + GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) != GUC_HXG_TYPE_EVENT); + GEM_BUG_ON(FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[0]) != GUC_ACTION_GUC2PF_ADVERSE_EVENT); + + if (unlikely(!intel_iov_is_pf(iov))) + return -EPROTO; + + if (unlikely(FIELD_GET(GUC2PF_ADVERSE_EVENT_EVENT_MSG_0_MBZ, msg[0]))) + return -EPFNOSUPPORT; + + if (unlikely(len != GUC2PF_ADVERSE_EVENT_EVENT_MSG_LEN)) + return -EPROTO; + + vfid = FIELD_GET(GUC2PF_ADVERSE_EVENT_EVENT_MSG_1_VFID, msg[1]); + threshold = FIELD_GET(GUC2PF_ADVERSE_EVENT_EVENT_MSG_2_THRESHOLD, msg[2]); + + return pf_handle_vf_threshold_event(iov, vfid, threshold); +} diff --git a/drivers/gpu/drm/i915/gt/iov/intel_iov_event.h b/drivers/gpu/drm/i915/gt/iov/intel_iov_event.h new file mode 100644 index 0000000000000..331d62a28d169 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/intel_iov_event.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __INTEL_IOV_EVENT_H__ +#define __INTEL_IOV_EVENT_H__ + +#include + +struct intel_iov; + +int intel_iov_event_process_guc2pf(struct intel_iov *iov, const u32 *msg, u32 len); + +#endif /* __INTEL_IOV_EVENT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/iov/intel_iov_provisioning.c b/drivers/gpu/drm/i915/gt/iov/intel_iov_provisioning.c new file mode 100644 index 0000000000000..be36d11f1b675 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/intel_iov_provisioning.c @@ -0,0 +1,2182 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include "intel_iov.h" +#include "intel_iov_provisioning.h" +#include "intel_iov_utils.h" +#include "gt/uc/abi/guc_actions_pf_abi.h" +#include "gt/uc/abi/guc_klvs_abi.h" + +#define MAKE_GUC_KLV(__K) \ + (FIELD_PREP(GUC_KLV_0_KEY, GUC_KLV_##__K##_KEY) | \ + FIELD_PREP(GUC_KLV_0_LEN, GUC_KLV_##__K##_LEN)) + +static void pf_init_reprovisioning_worker(struct intel_iov *iov); +static void pf_start_reprovisioning_worker(struct intel_iov *iov); +static void pf_fini_reprovisioning_worker(struct intel_iov *iov); + +/* + * Resource configuration for VFs provisioning is maintained in the + * flexible array where: + * - entry [0] contains resource config for the PF, + * - entries [1..n] contain provisioning configs for VF1..VFn:: + * + * <--------------------------- 1 + total_vfs -----------> + * +-------+-------+-------+-----------------------+-------+ + * | 0 | 1 | 2 | | n | + * +-------+-------+-------+-----------------------+-------+ + * | PF | VF1 | VF2 | ... ... | VFn | + * +-------+-------+-------+-----------------------+-------+ + */ + +/** + * intel_iov_provisioning_init_early - Allocate structures for provisioning. + * @iov: the IOV struct + * + * VFs provisioning requires some data to be stored on the PF. Allocate + * flexible structures to hold all required information for every possible + * VF. In case of allocation failure PF will be in error state and will not + * be able to create VFs. + * + * This function can only be called on PF. + */ +void intel_iov_provisioning_init_early(struct intel_iov *iov) +{ + struct intel_iov_config *configs; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(iov->pf.provisioning.configs); + + configs = kcalloc(1 + pf_get_totalvfs(iov), sizeof(*configs), GFP_KERNEL); + if (unlikely(!configs)) { + pf_update_status(iov, -ENOMEM, "provisioning"); + return; + } + + iov->pf.provisioning.configs = configs; + + pf_init_reprovisioning_worker(iov); +} + +/** + * intel_iov_provisioning_release - Release structures used for provisioning. + * @iov: the IOV struct + * + * Release structures used for provisioning. + * This function can only be called on PF. + */ +void intel_iov_provisioning_release(struct intel_iov *iov) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + kfree(fetch_and_zero(&iov->pf.provisioning.configs)); +} + +/* + * Return: number of klvs that were successfully parsed and saved, + * negative error code on failure. + */ +static int guc_action_update_policy_cfg(struct intel_guc *guc, u64 addr, u32 size) +{ + u32 request[] = { + GUC_ACTION_PF2GUC_UPDATE_VGT_POLICY, + lower_32_bits(addr), + upper_32_bits(addr), + size, + }; + + return intel_guc_send(guc, request, ARRAY_SIZE(request)); +} + +/* + * Return: 0 on success, -ENOKEY if klv was not parsed, -EPROTO if reply was malformed, + * negative error code on failure. + */ +static int guc_update_policy_klv32(struct intel_guc *guc, u16 key, u32 value) +{ + const u32 len = 1; /* 32bit value fits into 1 klv dword */ + const u32 cfg_size = (GUC_KLV_LEN_MIN + len); + struct i915_vma *vma; + u32 *cfg; + int ret; + + ret = intel_guc_allocate_and_map_vma(guc, cfg_size * sizeof(u32), &vma, (void **)&cfg); + if (unlikely(ret)) + return ret; + + *cfg++ = FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, len); + *cfg++ = value; + + ret = guc_action_update_policy_cfg(guc, intel_guc_ggtt_offset(guc, vma), cfg_size); + + i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP); + + if (unlikely(ret < 0)) + return ret; + if (unlikely(!ret)) + return -ENOKEY; + if (unlikely(ret > 1)) + return -EPROTO; + + return 0; +} + +static const char *policy_key_to_string(u16 key) +{ + switch (key) { + case GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_KEY: + return "sched_if_idle"; + case GUC_KLV_VGT_POLICY_ADVERSE_SAMPLE_PERIOD_KEY: + return "sample_period"; + case GUC_KLV_VGT_POLICY_RESET_AFTER_VF_SWITCH_KEY: + return "reset_engine"; + default: + return ""; + } +} + +static int pf_update_bool_policy(struct intel_iov *iov, u16 key, bool *policy, bool value) +{ + struct intel_guc *guc = iov_to_guc(iov); + const char *name = policy_key_to_string(key); + int err; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + IOV_DEBUG(iov, "updating policy %#04x (%s) %s -> %s\n", + key, name, enabledisable(*policy), enabledisable(value)); + + err = guc_update_policy_klv32(guc, key, value); + if (unlikely(err)) + goto failed; + + *policy = value; + return 0; + +failed: + IOV_ERROR(iov, "Failed to %s '%s' policy (%pe)\n", + enabledisable(value), name, ERR_PTR(err)); + return err; +} + +static int pf_update_policy_u32(struct intel_iov *iov, u16 key, u32 *policy, u32 value) +{ + struct intel_guc *guc = iov_to_guc(iov); + const char *name = policy_key_to_string(key); + int err; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + IOV_DEBUG(iov, "updating policy %#04x (%s) %u -> %u\n", + key, name, *policy, value); + + err = guc_update_policy_klv32(guc, key, value); + if (unlikely(err)) + goto failed; + + *policy = value; + return 0; + +failed: + IOV_ERROR(iov, "Failed to update policy '%s=%u' (%pe)\n", + name, value, ERR_PTR(err)); + return err; +} + +static int pf_provision_sched_if_idle(struct intel_iov *iov, bool enable) +{ + return pf_update_bool_policy(iov, GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_KEY, + &iov->pf.provisioning.policies.sched_if_idle, enable); +} + +/** + * intel_iov_provisioning_set_sched_if_idle - Set 'sched_if_idle' policy. + * @iov: the IOV struct + * @enable: controls sched_if_idle policy + * + * This function can only be called on PF. + */ +int intel_iov_provisioning_set_sched_if_idle(struct intel_iov *iov, bool enable) +{ + struct intel_runtime_pm *rpm = iov_to_gt(iov)->uncore->rpm; + intel_wakeref_t wakeref; + int err = -ENONET; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + with_intel_runtime_pm(rpm, wakeref) + err = pf_provision_sched_if_idle(iov, enable); + + return err; +} + +/** + * intel_iov_provisioning_get_sched_if_idle - Get 'sched_if_idle' policy. + * @iov: the IOV struct + * + * This function can only be called on PF. + */ +bool intel_iov_provisioning_get_sched_if_idle(struct intel_iov *iov) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + return iov->pf.provisioning.policies.sched_if_idle; +} + +static int pf_provision_reset_engine(struct intel_iov *iov, bool enable) +{ + return pf_update_bool_policy(iov, GUC_KLV_VGT_POLICY_RESET_AFTER_VF_SWITCH_KEY, + &iov->pf.provisioning.policies.reset_engine, enable); +} + +/** + * intel_iov_provisioning_set_reset_engine - Set 'reset_engine' policy. + * @iov: the IOV struct + * @enable: controls reset_engine policy + * + * This function can only be called on PF. + */ +int intel_iov_provisioning_set_reset_engine(struct intel_iov *iov, bool enable) +{ + struct intel_runtime_pm *rpm = iov_to_gt(iov)->uncore->rpm; + intel_wakeref_t wakeref; + int err = -ENONET; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + with_intel_runtime_pm(rpm, wakeref) + err = pf_provision_reset_engine(iov, enable); + + return err; +} + +/** + * intel_iov_provisioning_get_reset_engine - Get 'reset_engine' policy. + * @iov: the IOV struct + * + * This function can only be called on PF. + */ +bool intel_iov_provisioning_get_reset_engine(struct intel_iov *iov) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + return iov->pf.provisioning.policies.reset_engine; +} + +static int pf_provision_sample_period(struct intel_iov *iov, u32 value) +{ + return pf_update_policy_u32(iov, GUC_KLV_VGT_POLICY_ADVERSE_SAMPLE_PERIOD_KEY, + &iov->pf.provisioning.policies.sample_period, value); +} + +/** + * intel_iov_provisioning_set_sample_period - Set 'sample_period' policy. + * @iov: the IOV struct + * @value: sample period in milliseconds + * + * This function can only be called on PF. + */ +int intel_iov_provisioning_set_sample_period(struct intel_iov *iov, u32 value) +{ + struct intel_runtime_pm *rpm = iov_to_gt(iov)->uncore->rpm; + intel_wakeref_t wakeref; + int err = -ENONET; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + with_intel_runtime_pm(rpm, wakeref) + err = pf_provision_sample_period(iov, value); + + return err; +} + +/** + * intel_iov_provisioning_get_sample_period - Get 'sample_period' policy. + * @iov: the IOV struct + * + * This function can only be called on PF. + */ +u32 intel_iov_provisioning_get_sample_period(struct intel_iov *iov) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + return iov->pf.provisioning.policies.sample_period; +} + +static inline bool pf_is_auto_provisioned(struct intel_iov *iov) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + return iov->pf.provisioning.auto_mode; +} + +static void pf_set_auto_provisioning(struct intel_iov *iov, bool value) +{ + if (pf_is_auto_provisioned(iov) == value) + return; + + IOV_DEBUG(iov, "%ps auto provisioning: %s\n", + __builtin_return_address(0), yesno(value)); + iov->pf.provisioning.auto_mode = value; +} + +static bool pf_is_vf_enabled(struct intel_iov *iov, unsigned int id) +{ + return id <= pf_get_numvfs(iov); +} + +static bool pf_is_config_pushed(struct intel_iov *iov, unsigned int id) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + return id <= iov->pf.provisioning.num_pushed; +} + +static bool pf_needs_push_config(struct intel_iov *iov, unsigned int id) +{ + return id != PFID && pf_is_vf_enabled(iov, id) && pf_is_config_pushed(iov, id); +} + +/* + * Return: number of klvs that were successfully parsed and saved, + * negative error code on failure. + */ +static int guc_action_update_vf_cfg(struct intel_guc *guc, u32 vfid, + u64 addr, u32 size) +{ + u32 request[] = { + GUC_ACTION_PF2GUC_UPDATE_VF_CFG, + vfid, + lower_32_bits(addr), + upper_32_bits(addr), + size, + }; + + return intel_guc_send(guc, request, ARRAY_SIZE(request)); +} + +/* + * Return: 0 on success, -ENOKEY if klv was not parsed, -EPROTO if reply was malformed, + * negative error code on failure. + */ +static int guc_update_vf_klv32(struct intel_guc *guc, u32 vfid, u16 key, u32 value) +{ + const u32 len = 1; /* 32bit value fits into 1 klv dword */ + const u32 cfg_size = (GUC_KLV_LEN_MIN + len); + struct i915_vma *vma; + u32 *cfg; + int ret; + + ret = intel_guc_allocate_and_map_vma(guc, cfg_size * sizeof(u32), &vma, (void **)&cfg); + if (unlikely(ret)) + return ret; + + *cfg++ = FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, len); + *cfg++ = value; + + ret = guc_action_update_vf_cfg(guc, vfid, intel_guc_ggtt_offset(guc, vma), cfg_size); + + i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP); + + if (unlikely(ret < 0)) + return ret; + if (unlikely(!ret)) + return -ENOKEY; + if (unlikely(ret > 1)) + return -EPROTO; + + return 0; +} + +static int guc_update_vf_klv64(struct intel_guc *guc, u32 vfid, u16 key, u64 value) +{ + const u32 len = 2; /* 64bit value fits into 2 klv dwords */ + const u32 cfg_size = (GUC_KLV_LEN_MIN + len); + struct i915_vma *vma; + u32 *cfg; + int ret; + + ret = intel_guc_allocate_and_map_vma(guc, cfg_size * sizeof(u32), &vma, (void **)&cfg); + if (unlikely(ret)) + return ret; + + *cfg++ = FIELD_PREP(GUC_KLV_0_KEY, key) | FIELD_PREP(GUC_KLV_0_LEN, len); + *cfg++ = lower_32_bits(value); + *cfg++ = upper_32_bits(value); + + ret = guc_action_update_vf_cfg(guc, vfid, intel_guc_ggtt_offset(guc, vma), cfg_size); + + i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP); + + if (unlikely(ret < 0)) + return ret; + if (unlikely(!ret)) + return -ENOKEY; + if (unlikely(ret > 1)) + return -EPROTO; + + return 0; +} + +static u64 pf_get_ggtt_alignment(struct intel_iov *iov) +{ + /* this might be platform dependent */ + return SZ_4K; +} + +static u64 pf_get_free_ggtt(struct intel_iov *iov) +{ + struct i915_ggtt *ggtt = iov_to_gt(iov)->ggtt; + const struct drm_mm *mm = &ggtt->vm.mm; + const struct drm_mm_node *entry; + u64 alignment = pf_get_ggtt_alignment(iov); + u64 hole_min_start = ggtt->pin_bias; + u64 hole_start, hole_end; + u64 spare = alignment; + u64 free_ggtt = 0; + + mutex_lock(&ggtt->vm.mutex); + + drm_mm_for_each_hole(entry, mm, hole_start, hole_end) { + hole_start = max(hole_start, hole_min_start); + hole_start = ALIGN(hole_start, alignment); + hole_end = ALIGN_DOWN(hole_end, alignment); + if (hole_start >= hole_end) + continue; + free_ggtt += hole_end - hole_start; + } + + mutex_unlock(&ggtt->vm.mutex); + + return free_ggtt > spare ? free_ggtt - spare : 0; +} + +static u64 pf_get_max_ggtt(struct intel_iov *iov) +{ + struct i915_ggtt *ggtt = iov_to_gt(iov)->ggtt; + const struct drm_mm *mm = &ggtt->vm.mm; + const struct drm_mm_node *entry; + u64 alignment = pf_get_ggtt_alignment(iov); + u64 hole_min_start = ggtt->pin_bias; + u64 hole_start, hole_end, hole_size; + u64 spare = alignment; + u64 max_hole = 0; + + mutex_lock(&ggtt->vm.mutex); + + drm_mm_for_each_hole(entry, mm, hole_start, hole_end) { + hole_start = max(hole_start, hole_min_start); + hole_start = ALIGN(hole_start, alignment); + hole_end = ALIGN_DOWN(hole_end, alignment); + if (hole_start >= hole_end) + continue; + hole_size = hole_end - hole_start; + IOV_DEBUG(iov, "start %llx size %lluK\n", hole_start, hole_size / SZ_1K); + spare -= min3(spare, hole_size, max_hole); + max_hole = max(max_hole, hole_size); + } + + mutex_unlock(&ggtt->vm.mutex); + + IOV_DEBUG(iov, "spare %lluK\n", spare / SZ_1K); + return max_hole > spare ? max_hole - spare : 0; +} + +static bool pf_is_valid_config_ggtt(struct intel_iov *iov, unsigned int id) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + return drm_mm_node_allocated(&iov->pf.provisioning.configs[id].ggtt_region); +} + +static int pf_push_config_ggtt(struct intel_iov *iov, unsigned int id, u64 start, u64 size) +{ + struct intel_guc *guc = iov_to_guc(iov); + int err; + + if (!pf_needs_push_config(iov, id)) + return 0; + + err = guc_update_vf_klv64(guc, id, GUC_KLV_VF_CFG_GGTT_SIZE_KEY, size); + if (unlikely(err)) + return err; + + err = guc_update_vf_klv64(guc, id, GUC_KLV_VF_CFG_GGTT_START_KEY, start); + if (unlikely(err)) + return err; + + return 0; +} + +static int pf_provision_ggtt(struct intel_iov *iov, unsigned int id, u64 size) +{ + struct intel_iov_provisioning *provisioning = &iov->pf.provisioning; + struct intel_iov_config *config = &provisioning->configs[id]; + struct drm_mm_node *node = &config->ggtt_region; + struct i915_ggtt *ggtt = iov_to_gt(iov)->ggtt; + u64 alignment = pf_get_ggtt_alignment(iov); + int err; + + size = round_up(size, alignment); + + if (drm_mm_node_allocated(node)) { + if (size == node->size) + return 0; + + err = pf_push_config_ggtt(iov, id, 0, 0); +release: + i915_ggtt_set_space_owner(ggtt, 0, node); + + mutex_lock(&ggtt->vm.mutex); + drm_mm_remove_node(node); + mutex_unlock(&ggtt->vm.mutex); + + if (unlikely(err)) + return err; + } + GEM_BUG_ON(drm_mm_node_allocated(node)); + + if (!size) + return 0; + + if (size > ggtt->vm.total) + return -E2BIG; + + if (size > pf_get_max_ggtt(iov)) + return -EDQUOT; + + mutex_lock(&ggtt->vm.mutex); + err = i915_gem_gtt_insert(&ggtt->vm, node, size, alignment, + I915_COLOR_UNEVICTABLE, + 0, ggtt->vm.total, + PIN_HIGH); + mutex_unlock(&ggtt->vm.mutex); + if (unlikely(err)) + return err; + + i915_ggtt_set_space_owner(ggtt, id, node); + + err = pf_push_config_ggtt(iov, id, node->start, node->size); + if (unlikely(err)) + goto release; + + IOV_DEBUG(iov, "VF%u provisioned GGTT %llx-%llx (%lluK)\n", + id, node->start, node->start + node->size - 1, node->size / SZ_1K); + return 0; +} + +/** + * intel_iov_provisioning_set_ggtt - Provision VF with GGTT. + * @iov: the IOV struct + * @id: VF identifier + * @size: requested GGTT size + * + * This function can only be called on PF. + */ +int intel_iov_provisioning_set_ggtt(struct intel_iov *iov, unsigned int id, u64 size) +{ + struct intel_runtime_pm *rpm = iov_to_gt(iov)->uncore->rpm; + intel_wakeref_t wakeref; + bool reprovisioning; + int err = -ENONET; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(id > pf_get_totalvfs(iov)); + GEM_BUG_ON(id == PFID); + + reprovisioning = pf_is_valid_config_ggtt(iov, id) || size; + + with_intel_runtime_pm(rpm, wakeref) + err = pf_provision_ggtt(iov, id, size); + + if (unlikely(err)) + IOV_ERROR(iov, "Failed to provision VF%u with %llu of GGTT (%pe)\n", + id, size, ERR_PTR(err)); + else if (reprovisioning) + pf_mark_manual_provisioning(iov); + + return err; +} + +/** + * intel_iov_provisioning_get_ggtt - Query size of GGTT provisioned for VF. + * @iov: the IOV struct + * @id: VF identifier + * + * This function can only be called on PF. + */ +u64 intel_iov_provisioning_get_ggtt(struct intel_iov *iov, unsigned int id) +{ + struct intel_iov_provisioning *provisioning = &iov->pf.provisioning; + struct drm_mm_node *node = &provisioning->configs[id].ggtt_region; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(id > pf_get_totalvfs(iov)); + GEM_BUG_ON(id == PFID); + + return drm_mm_node_allocated(node) ? node->size : 0; +} + +/** + * intel_iov_provisioning_query_free_ggtt - Query free GGTT available for provisioning. + * @iov: the IOV struct + * + * This function can only be called on PF. + */ +u64 intel_iov_provisioning_query_free_ggtt(struct intel_iov *iov) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + return pf_get_free_ggtt(iov); +} + +/** + * intel_iov_provisioning_query_max_ggtt - Query max GGTT available for provisioning. + * @iov: the IOV struct + * + * This function can only be called on PF. + */ +u64 intel_iov_provisioning_query_max_ggtt(struct intel_iov *iov) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + return pf_get_max_ggtt(iov); +} + +static bool pf_is_valid_config_ctxs(struct intel_iov *iov, unsigned int id) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + return iov->pf.provisioning.configs[id].num_ctxs; +} + +static int pf_push_config_ctxs(struct intel_iov *iov, unsigned int id, u16 begin, u16 num) +{ + struct intel_guc *guc = iov_to_guc(iov); + int err; + + if (!pf_needs_push_config(iov, id)) + return 0; + + err = guc_update_vf_klv32(guc, id, GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID_KEY, begin); + if (unlikely(err)) + return err; + + err = guc_update_vf_klv32(guc, id, GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY, num); + if (unlikely(err)) + return err; + + return 0; +} + +/* + * To facilitate the implementation of dynamic context provisioning, we introduced + * the concept of granularity of contexts. For this purpose, we divided all contexts + * into packages with size CTXS_GRANULARITY. The exception is the first package, whose + * size is CTXS_MODULO, because GUC_MAX_LRC_DESCRIPTORS is an odd number. + */ +#define CTXS_GRANULARITY 128 +#define CTXS_MODULO (GUC_MAX_LRC_DESCRIPTORS % CTXS_GRANULARITY) +#define CTXS_DELTA (CTXS_GRANULARITY - CTXS_MODULO) + +static u16 ctxs_bitmap_total_bits(void) +{ + return ALIGN(GUC_MAX_LRC_DESCRIPTORS, CTXS_GRANULARITY) / CTXS_GRANULARITY; +} + +static u16 __encode_ctxs_count(u16 num_ctxs, bool first) +{ + GEM_BUG_ON(!first && !IS_ALIGNED(num_ctxs, CTXS_GRANULARITY)); + GEM_BUG_ON(first && !IS_ALIGNED(num_ctxs + CTXS_DELTA, CTXS_GRANULARITY)); + + return (!first) ? num_ctxs / CTXS_GRANULARITY : + (num_ctxs + CTXS_DELTA) / CTXS_GRANULARITY; +} + +static u16 encode_vf_ctxs_count(u16 num_ctxs) +{ + return __encode_ctxs_count(num_ctxs, false); +} + +static u16 __encode_ctxs_start(u16 start_ctx, bool first) +{ + if (!start_ctx) + return 0; + + GEM_BUG_ON(!first && !IS_ALIGNED(start_ctx + CTXS_DELTA, CTXS_GRANULARITY)); + GEM_BUG_ON(first && start_ctx); + + return (!first) ? (start_ctx + CTXS_DELTA) / CTXS_GRANULARITY : 0; +} + +static u16 __decode_ctxs_count(u16 num_bits, bool first) +{ + return (!first) ? num_bits * CTXS_GRANULARITY : + num_bits * CTXS_GRANULARITY - CTXS_DELTA; +} + +static u16 decode_vf_ctxs_count(u16 num_bits) +{ + return __decode_ctxs_count(num_bits, false); +} + +static u16 decode_pf_ctxs_count(u16 num_bits) +{ + return __decode_ctxs_count(num_bits, true); +} + +static u16 __decode_ctxs_start(u16 start_bit, bool first) +{ + GEM_BUG_ON(first && start_bit); + + return (!first) ? start_bit * CTXS_GRANULARITY - CTXS_DELTA : 0; +} + +static u16 decode_vf_ctxs_start(u16 start_bit) +{ + return __decode_ctxs_start(start_bit, false); +} + +static u16 pf_get_ctxs_quota(struct intel_iov *iov, unsigned int id) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + return iov->pf.provisioning.configs[id].num_ctxs; +} + +static u16 align_ctxs(unsigned int id, u16 num_ctxs) +{ + if (num_ctxs == 0) + return 0; + + num_ctxs = ALIGN(num_ctxs, CTXS_GRANULARITY); + return id ? num_ctxs : num_ctxs - CTXS_DELTA; +} + +static unsigned long *pf_get_ctxs_bitmap(struct intel_iov *iov) +{ + struct intel_iov_provisioning *provisioning = &iov->pf.provisioning; + unsigned int id, total_vfs = pf_get_totalvfs(iov); + const u16 total_bits = ctxs_bitmap_total_bits(); + unsigned long *ctxs_bitmap = bitmap_zalloc(total_bits, GFP_KERNEL); + + if (unlikely(!ctxs_bitmap)) + return NULL; + + for (id = 0; id <= total_vfs; id++) { + struct intel_iov_config *config = &provisioning->configs[id]; + + if (!config->num_ctxs) + continue; + + bitmap_set(ctxs_bitmap, __encode_ctxs_start(config->begin_ctx, !id), + __encode_ctxs_count(config->num_ctxs, !id)); + } + + /* caller must use bitmap_free */ + return ctxs_bitmap; +} + +static int pf_alloc_vf_ctxs_range(struct intel_iov *iov, unsigned int id, u16 num_ctxs) +{ + unsigned long *ctxs_bitmap = pf_get_ctxs_bitmap(iov); + u16 num_bits = encode_vf_ctxs_count(num_ctxs); + u16 max_size = U16_MAX; + u16 index = U16_MAX; + u16 last_equal = 0; + unsigned int rs, re; + + if (unlikely(!ctxs_bitmap)) + return -ENOMEM; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + bitmap_for_each_clear_region(ctxs_bitmap, rs, re, 0, ctxs_bitmap_total_bits()) { + u16 size_bits = re - rs; + + /* + * The best-fit hole would be one that was as close to the end as possible and + * equal to the number of contexts searched. + * Second, we look for a hole that is as small as possible but larger than + * the required size + * + */ + if (size_bits == num_bits) { + last_equal = rs; + } else if (size_bits > num_bits && num_bits < max_size) { + index = re - num_bits; + max_size = size_bits; + } + } + + bitmap_free(ctxs_bitmap); + + if (last_equal != 0) + index = last_equal; + + if (index >= U16_MAX) + return -ENOSPC; + + return decode_vf_ctxs_start(index); +} + +static int pf_alloc_ctxs_range(struct intel_iov *iov, unsigned int id, u16 num_ctxs) +{ + int ret; + + ret = pf_alloc_vf_ctxs_range(iov, id, num_ctxs); + + if (ret >= 0) + IOV_DEBUG(iov, "ctxs found %u-%u (%u)\n", ret, ret + num_ctxs - 1, num_ctxs); + + return ret; +} + +static int __pf_provision_vf_ctxs(struct intel_iov *iov, unsigned int id, u16 start_ctx, u16 num_ctxs) +{ + struct intel_iov_config *config = &iov->pf.provisioning.configs[id]; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(id == PFID); + + config->begin_ctx = start_ctx; + config->num_ctxs = num_ctxs; + + return 0; +} + +static int __pf_provision_ctxs(struct intel_iov *iov, unsigned int id, u16 start_ctx, u16 num_ctxs) +{ + int err; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + err = pf_push_config_ctxs(iov, id, start_ctx, num_ctxs); + if (unlikely(err)) { + __pf_provision_vf_ctxs(iov, id, 0, 0); + return err; + } + + return __pf_provision_vf_ctxs(iov, id, start_ctx, num_ctxs); +} + +static int pf_provision_ctxs(struct intel_iov *iov, unsigned int id, u16 num_ctxs) +{ + u16 ctxs_quota = align_ctxs(id, num_ctxs); + int ret; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + if (id == PFID) + return -EOPNOTSUPP; + + if (ctxs_quota == pf_get_ctxs_quota(iov, id)) + return 0; + + IOV_DEBUG(iov, "provisioning VF%u with %hu contexts (aligned to %hu)\n", + id, num_ctxs, ctxs_quota); + + if (!num_ctxs) + return __pf_provision_ctxs(iov, id, 0, 0); + + ret = pf_alloc_ctxs_range(iov, id, ctxs_quota); + if (ret >= 0) + return __pf_provision_ctxs(iov, id, ret, ctxs_quota); + + return ret; +} + +/** + * intel_iov_provisioning_set_ctxs - Provision VF with contexts. + * @iov: the IOV struct + * @id: VF identifier + * @num_ctxs: requested contexts + * + * This function can only be called on PF. + */ +int intel_iov_provisioning_set_ctxs(struct intel_iov *iov, unsigned int id, u16 num_ctxs) +{ + struct intel_runtime_pm *rpm = iov_to_gt(iov)->uncore->rpm; + intel_wakeref_t wakeref; + bool reprovisioning; + int err = -ENONET; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(id > pf_get_totalvfs(iov)); + + reprovisioning = pf_is_valid_config_ctxs(iov, id) || num_ctxs; + + with_intel_runtime_pm(rpm, wakeref) + err = pf_provision_ctxs(iov, id, num_ctxs); + + if (unlikely(err)) + IOV_ERROR(iov, "Failed to provision VF%u with %hu contexts (%pe)\n", + id, num_ctxs, ERR_PTR(err)); + else if (reprovisioning && id != PFID) + pf_mark_manual_provisioning(iov); + + return err; +} + +/** + * intel_iov_provisioning_get_ctxs - Get VF contexts quota. + * @iov: the IOV struct + * @id: VF identifier + * + * This function can only be called on PF. + */ +u16 intel_iov_provisioning_get_ctxs(struct intel_iov *iov, unsigned int id) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(id > pf_get_totalvfs(iov)); + + return pf_get_ctxs_quota(iov, id); +} + +static u16 pf_get_ctxs_free(struct intel_iov *iov) +{ + unsigned long *ctxs_bitmap = pf_get_ctxs_bitmap(iov); + unsigned int rs, re; + u16 sum = 0; + + if (unlikely(!ctxs_bitmap)) + return 0; + + bitmap_for_each_clear_region(ctxs_bitmap, rs, re, 0, ctxs_bitmap_total_bits()) { + IOV_DEBUG(iov, "ctxs hole %u-%u (%u)\n", decode_vf_ctxs_start(rs), + decode_vf_ctxs_start(re) - 1, decode_vf_ctxs_count(re - rs)); + sum += re - rs; + } + bitmap_free(ctxs_bitmap); + + return decode_vf_ctxs_count(sum); +} + +/** + * intel_iov_provisioning_query_free_ctxs - Get number of total unused contexts. + * @iov: the IOV struct + * + * This function can only be called on PF. + */ +u16 intel_iov_provisioning_query_free_ctxs(struct intel_iov *iov) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + return pf_get_ctxs_free(iov); +} + +static u16 pf_get_ctxs_max_quota(struct intel_iov *iov) +{ + unsigned long *ctxs_bitmap = pf_get_ctxs_bitmap(iov); + unsigned int rs, re; + u16 max = 0; + + if (unlikely(!ctxs_bitmap)) + return 0; + + bitmap_for_each_clear_region(ctxs_bitmap, rs, re, 0, ctxs_bitmap_total_bits()) { + IOV_DEBUG(iov, "ctxs hole %u-%u (%u)\n", decode_vf_ctxs_start(rs), + decode_vf_ctxs_start(re) - 1, decode_vf_ctxs_count(re - rs)); + max = max_t(u16, max, re - rs); + } + bitmap_free(ctxs_bitmap); + + return decode_vf_ctxs_count(max); +} + +/** + * intel_iov_provisioning_query_max_ctxs - Get maximum available contexts quota. + * @iov: the IOV struct + * + * This function can only be called on PF. + */ +u16 intel_iov_provisioning_query_max_ctxs(struct intel_iov *iov) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + return pf_get_ctxs_max_quota(iov); +} + +static bool pf_is_valid_config_dbs(struct intel_iov *iov, unsigned int id) +{ + struct intel_iov_config *config = &iov->pf.provisioning.configs[id]; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + return config->num_dbs || config->begin_db; +} + +static unsigned long *pf_get_dbs_bitmap(struct intel_iov *iov) +{ + unsigned long *dbs_bitmap = bitmap_zalloc(GUC_NUM_DOORBELLS, GFP_KERNEL); + struct intel_iov_provisioning *provisioning = &iov->pf.provisioning; + unsigned int n, total_vfs = pf_get_totalvfs(iov); + struct intel_iov_config *config; + + if (unlikely(!dbs_bitmap)) + return NULL; + + for (n = 0; n <= total_vfs; n++) { + config = &provisioning->configs[n]; + if (!config->num_dbs) + continue; + bitmap_set(dbs_bitmap, config->begin_db, config->num_dbs); + } + + /* caller must use bitmap_free */ + return dbs_bitmap; +} + +static int pf_alloc_dbs_range(struct intel_iov *iov, u16 num_dbs) +{ + unsigned long *dbs_bitmap = pf_get_dbs_bitmap(iov); + unsigned long index; + + if (unlikely(!dbs_bitmap)) + return -ENOMEM; + + index = bitmap_find_next_zero_area(dbs_bitmap, GUC_NUM_DOORBELLS, 0, num_dbs, 0); + bitmap_free(dbs_bitmap); + + if (index >= GUC_NUM_DOORBELLS) + return -ENOSPC; + + IOV_DEBUG(iov, "dbs found %lu-%lu (%u)\n", + index, index + num_dbs - 1, num_dbs); + return index; +} + +static int pf_push_config_dbs(struct intel_iov *iov, unsigned int id, u16 begin, u16 num) +{ + struct intel_guc *guc = iov_to_guc(iov); + int err; + + if (!pf_needs_push_config(iov, id)) + return 0; + + err = guc_update_vf_klv32(guc, id, GUC_KLV_VF_CFG_BEGIN_DOORBELL_ID_KEY, begin); + if (unlikely(err)) + return err; + + err = guc_update_vf_klv32(guc, id, GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY, num); + if (unlikely(err)) + return err; + + return 0; +} + +static int pf_provision_dbs(struct intel_iov *iov, unsigned int id, u16 num_dbs) +{ + struct intel_iov_provisioning *provisioning = &iov->pf.provisioning; + struct intel_iov_config *config = &provisioning->configs[id]; + int err, ret; + + if (num_dbs == config->num_dbs) + return 0; + + IOV_DEBUG(iov, "provisioning VF%u with %hu doorbells\n", id, num_dbs); + + if (config->num_dbs) { + config->begin_db = 0; + config->num_dbs = 0; + + err = pf_push_config_dbs(iov, id, 0, 0); + if (unlikely(err)) + return err; + } + + if (!num_dbs) + return 0; + + ret = pf_alloc_dbs_range(iov, num_dbs); + if (unlikely(ret < 0)) + return ret; + + err = pf_push_config_dbs(iov, id, ret, num_dbs); + if (unlikely(err)) + return err; + + config->begin_db = ret; + config->num_dbs = num_dbs; + + return 0; +} + +/** + * intel_iov_provisioning_set_dbs - Set VF doorbells quota. + * @iov: the IOV struct + * @id: VF identifier + * @num_dbs: requested doorbells + * + * This function can only be called on PF. + */ +int intel_iov_provisioning_set_dbs(struct intel_iov *iov, unsigned int id, u16 num_dbs) +{ + struct intel_runtime_pm *rpm = iov_to_gt(iov)->uncore->rpm; + intel_wakeref_t wakeref; + bool reprovisioning; + int err = -ENONET; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(id > pf_get_totalvfs(iov)); + + reprovisioning = pf_is_valid_config_dbs(iov, id) || num_dbs; + + with_intel_runtime_pm(rpm, wakeref) + err = pf_provision_dbs(iov, id, num_dbs); + + if (unlikely(err)) + IOV_ERROR(iov, "Failed to provision VF%u with %hu doorbells (%pe)\n", + id, num_dbs, ERR_PTR(err)); + else if (reprovisioning && id != PFID) + pf_mark_manual_provisioning(iov); + + return err; +} + +/** + * intel_iov_provisioning_get_dbs - Get VF doorbells quota. + * @iov: the IOV struct + * @id: VF identifier + * + * This function can only be called on PF. + */ +u16 intel_iov_provisioning_get_dbs(struct intel_iov *iov, unsigned int id) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(id > pf_get_totalvfs(iov)); + + return iov->pf.provisioning.configs[id].num_dbs; +} + +static u16 pf_get_free_dbs(struct intel_iov *iov) +{ + unsigned long *dbs_bitmap = pf_get_dbs_bitmap(iov); + int used; + + if (unlikely(!dbs_bitmap)) + return 0; + + used = bitmap_weight(dbs_bitmap, GUC_NUM_DOORBELLS); + GEM_WARN_ON(used > GUC_NUM_DOORBELLS); + + bitmap_free(dbs_bitmap); + + return GUC_NUM_DOORBELLS - used; +} + +/** + * intel_iov_provisioning_query_free_dbs - Get available doorbells. + * @iov: the IOV struct + * + * This function can only be called on PF. + */ +u16 intel_iov_provisioning_query_free_dbs(struct intel_iov *iov) +{ + return pf_get_free_dbs(iov); +} + +static u16 pf_get_max_dbs(struct intel_iov *iov) +{ + unsigned long *dbs_bitmap = pf_get_dbs_bitmap(iov); + unsigned int rs, re; + u16 limit = 0; + + if (unlikely(!dbs_bitmap)) + return 0; + + bitmap_for_each_clear_region(dbs_bitmap, rs, re, 0, GUC_NUM_DOORBELLS) { + IOV_DEBUG(iov, "dbs hole %u-%u (%u)\n", rs, re, re - rs); + limit = max_t(u16, limit, re - rs); + } + bitmap_free(dbs_bitmap); + + return limit; +} + +/** + * intel_iov_provisioning_query_max_dbs - Get maximum available doorbells quota. + * @iov: the IOV struct + * + * This function can only be called on PF. + */ +u16 intel_iov_provisioning_query_max_dbs(struct intel_iov *iov) +{ + return pf_get_max_dbs(iov); +} + +static const char *exec_quantum_unit(u32 exec_quantum) +{ + return exec_quantum ? "ms" : "(inifinity)"; +} + +static int pf_provision_exec_quantum(struct intel_iov *iov, unsigned int id, + u32 exec_quantum) +{ + struct intel_iov_provisioning *provisioning = &iov->pf.provisioning; + struct intel_iov_config *config = &provisioning->configs[id]; + int err; + + if (exec_quantum == config->exec_quantum) + return 0; + + err = guc_update_vf_klv32(iov_to_guc(iov), id, + GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY, exec_quantum); + if (unlikely(err)) + return err; + + config->exec_quantum = exec_quantum; + + IOV_DEBUG(iov, "VF%u provisioned with %u%s execution quantum\n", + id, exec_quantum, exec_quantum_unit(exec_quantum)); + return 0; +} + +/** + * intel_iov_provisioning_set_exec_quantum - Provision VF with execution quantum. + * @iov: the IOV struct + * @id: VF identifier + * @exec_quantum: requested execution quantum + * + * This function can only be called on PF. + */ +int intel_iov_provisioning_set_exec_quantum(struct intel_iov *iov, unsigned int id, + u32 exec_quantum) +{ + struct intel_runtime_pm *rpm = iov_to_gt(iov)->uncore->rpm; + intel_wakeref_t wakeref; + int err = -ENONET; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(id > pf_get_totalvfs(iov)); + + with_intel_runtime_pm(rpm, wakeref) + err = pf_provision_exec_quantum(iov, id, exec_quantum); + + if (unlikely(err)) + IOV_ERROR(iov, "Failed to provision VF%u with %u%s execution quantum (%pe)\n", + id, exec_quantum, exec_quantum_unit(exec_quantum), ERR_PTR(err)); + else if (exec_quantum && id != PFID) + pf_mark_manual_provisioning(iov); + + return err; +} + +/** + * intel_iov_provisioning_get_exec_quantum - Get VF execution quantum. + * @iov: the IOV struct + * @id: VF identifier + * + * This function can only be called on PF. + */ +u32 intel_iov_provisioning_get_exec_quantum(struct intel_iov *iov, unsigned int id) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(id > pf_get_totalvfs(iov)); + + return iov->pf.provisioning.configs[id].exec_quantum; +} + +static const char *preempt_timeout_unit(u32 preempt_timeout) +{ + return preempt_timeout ? "us" : "(inifinity)"; +} + +static int pf_provision_preempt_timeout(struct intel_iov *iov, unsigned int id, + u32 preempt_timeout) +{ + struct intel_iov_provisioning *provisioning = &iov->pf.provisioning; + struct intel_iov_config *config = &provisioning->configs[id]; + int err; + + if (preempt_timeout == config->preempt_timeout) + return 0; + + err = guc_update_vf_klv32(iov_to_guc(iov), id, + GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, + preempt_timeout); + if (unlikely(err)) + return err; + + config->preempt_timeout = preempt_timeout; + + IOV_DEBUG(iov, "VF%u provisioned with %u%s preemption timeout\n", + id, preempt_timeout, preempt_timeout_unit(preempt_timeout)); + return 0; +} + +/** + * intel_iov_provisioning_set_preempt_timeout - Provision VF with preemption timeout. + * @iov: the IOV struct + * @id: VF identifier + * @preempt_timeout: requested preemption timeout + */ +int intel_iov_provisioning_set_preempt_timeout(struct intel_iov *iov, unsigned int id, u32 preempt_timeout) +{ + struct intel_runtime_pm *rpm = iov_to_gt(iov)->uncore->rpm; + intel_wakeref_t wakeref; + int err = -ENONET; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(id > pf_get_totalvfs(iov)); + + with_intel_runtime_pm(rpm, wakeref) + err = pf_provision_preempt_timeout(iov, id, preempt_timeout); + + if (unlikely(err)) + IOV_ERROR(iov, "Failed to provision VF%u with %u%s preemption timeout (%pe)\n", + id, preempt_timeout, preempt_timeout_unit(preempt_timeout), ERR_PTR(err)); + else if (preempt_timeout && id != PFID) + pf_mark_manual_provisioning(iov); + + return err; +} + +/** + * intel_iov_provisioning_get_preempt_timeout - Get VF preemption timeout. + * @iov: the IOV struct + * @id: VF identifier + * + * This function can only be called on PF. + */ +u32 intel_iov_provisioning_get_preempt_timeout(struct intel_iov *iov, unsigned int id) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(id > pf_get_totalvfs(iov)); + + return iov->pf.provisioning.configs[id].preempt_timeout; +} + +static inline const char *intel_iov_threshold_to_string(enum intel_iov_threshold threshold) +{ + switch (threshold) { +#define __iov_threshold_to_string(N, K) \ + case IOV_THRESHOLD_##K: return #N; + IOV_THRESHOLDS(__iov_threshold_to_string) + } +#undef __iov_threshold_to_string + return ""; +} + +static u32 intel_iov_threshold_to_klv_key(enum intel_iov_threshold threshold) +{ + switch (threshold) { +#define __iov_threshold_enum_to_klv(N, K) \ + case IOV_THRESHOLD_##K: return GUC_KLV_VF_CFG_THRESHOLD_##K##_KEY; + IOV_THRESHOLDS(__iov_threshold_enum_to_klv) +#undef __iov_threshold_enum_to_klv + } + GEM_BUG_ON(true); + return 0; /* unreachable */ +} + +static int pf_provision_threshold(struct intel_iov *iov, unsigned int id, + enum intel_iov_threshold threshold, u32 value) +{ + struct intel_iov_provisioning *provisioning = &iov->pf.provisioning; + struct intel_iov_config *config = &provisioning->configs[id]; + int err; + + GEM_BUG_ON(threshold >= IOV_THRESHOLD_MAX); + + if (value == config->thresholds[threshold]) + return 0; + + err = guc_update_vf_klv32(iov_to_guc(iov), id, + intel_iov_threshold_to_klv_key(threshold), value); + if (unlikely(err)) + return err; + + config->thresholds[threshold] = value; + + IOV_DEBUG(iov, "VF%u threshold %s=%u\n", + id, intel_iov_threshold_to_string(threshold), value); + return 0; +} + +/** + * intel_iov_provisioning_set_threshold - Set threshold for the VF. + * @iov: the IOV struct + * @id: VF identifier + * @threshold: threshold identifier + * @value: requested threshold value + * + * This function can only be called on PF. + */ +int intel_iov_provisioning_set_threshold(struct intel_iov *iov, unsigned int id, + enum intel_iov_threshold threshold, u32 value) +{ + struct intel_runtime_pm *rpm = iov_to_gt(iov)->uncore->rpm; + intel_wakeref_t wakeref; + int err = -ENONET; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(id > pf_get_totalvfs(iov)); + + with_intel_runtime_pm(rpm, wakeref) + err = pf_provision_threshold(iov, id, threshold, value); + + if (unlikely(err)) + IOV_ERROR(iov, "Failed to set threshold %s=%u for VF%u (%pe)\n", + intel_iov_threshold_to_string(threshold), value, id, ERR_PTR(err)); + else if (value) + pf_mark_manual_provisioning(iov); + + return err; +} + +/** + * intel_iov_provisioning_get_threshold - Get threshold of the VF. + * @iov: the IOV struct + * @id: VF identifier + * @threshold: threshold identifier + * + * This function can only be called on PF. + */ +u32 intel_iov_provisioning_get_threshold(struct intel_iov *iov, unsigned int id, + enum intel_iov_threshold threshold) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(id > pf_get_totalvfs(iov)); + GEM_BUG_ON(threshold >= IOV_THRESHOLD_MAX); + + return iov->pf.provisioning.configs[id].thresholds[threshold]; +} + +static void pf_unprovision_thresholds(struct intel_iov *iov, unsigned int id) +{ +#define __iov_threshold_unprovision(N, K) pf_provision_threshold(iov, id, IOV_THRESHOLD_##K, 0); + IOV_THRESHOLDS(__iov_threshold_unprovision) +#undef __iov_threshold_unprovision +} + +static void pf_assign_ctxs_for_pf(struct intel_iov *iov) +{ + struct intel_iov_provisioning *provisioning = &iov->pf.provisioning; + u16 total_vfs = pf_get_totalvfs(iov); + const u16 total_ctxs_bits = ctxs_bitmap_total_bits(); + u16 pf_ctxs_bits; + u16 pf_ctxs; + int err; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(!total_vfs); + GEM_BUG_ON(provisioning->configs[0].num_ctxs); + + pf_ctxs_bits = total_ctxs_bits - ((total_ctxs_bits / (1 + total_vfs)) * total_vfs); + pf_ctxs = decode_pf_ctxs_count(pf_ctxs_bits); + + IOV_DEBUG(iov, "config: %s %u = %u pf + %u available\n", + "contexts", GUC_MAX_LRC_DESCRIPTORS, pf_ctxs, GUC_MAX_LRC_DESCRIPTORS - pf_ctxs); + + provisioning->configs[0].begin_ctx = 0; + provisioning->configs[0].num_ctxs = pf_ctxs; + + /* make sure to do not use context ids beyond our limit */ + err = intel_guc_submission_limit_ids(iov_to_guc(iov), pf_ctxs); + if (unlikely(err)) + IOV_ERROR(iov, "Failed to limit PF %s to %u (%pe)\n", + "contexts", pf_ctxs, ERR_PTR(err)); +} + +/** + * intel_iov_provisioning_init - Perform initial provisioning of the resources. + * @iov: the IOV struct + * + * Some resources shared between PF and VFs need to partitioned early, as PF + * allocation can't be changed later, only VFs allocations can be modified until + * all VFs are enabled. Perform initial partitioning to get fixed PF resources. + * + * This function can only be called on PF. + */ +void intel_iov_provisioning_init(struct intel_iov *iov) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + if (unlikely(pf_in_error(iov))) + return; + + pf_assign_ctxs_for_pf(iov); +} + +static bool pf_is_auto_provisioning_enabled(struct intel_iov *iov) +{ + return i915_sriov_pf_is_auto_provisioning_enabled(iov_to_i915(iov)); +} + +static bool pf_is_admin_only(struct intel_iov *iov) +{ + return false; +} + +static void pf_unprovision_config(struct intel_iov *iov, unsigned int id) +{ + pf_provision_ggtt(iov, id, 0); + pf_provision_ctxs(iov, id, 0); + pf_provision_dbs(iov, id, 0); + pf_provision_exec_quantum(iov, id, 0); + pf_provision_preempt_timeout(iov, id, 0); + + pf_unprovision_thresholds(iov, id); +} + +static void pf_unprovision_all(struct intel_iov *iov) +{ + unsigned int num_vfs = pf_get_totalvfs(iov); + unsigned int n; + + for (n = num_vfs; n > 0; n--) + pf_unprovision_config(iov, n); +} + +static void pf_auto_unprovision(struct intel_iov *iov) +{ + if (pf_is_auto_provisioned(iov)) + pf_unprovision_all(iov); + + pf_set_auto_provisioning(iov, false); +} + +static int pf_auto_provision_ggtt(struct intel_iov *iov, unsigned int num_vfs) +{ + u64 free = pf_get_free_ggtt(iov); + u64 available = pf_get_max_ggtt(iov); + u64 alignment = pf_get_ggtt_alignment(iov); + u64 fair, leftover; + unsigned int n; + int err; + + /* use largest block to make sure all VFs allocations will fit */ + fair = div_u64(available, num_vfs); + fair = ALIGN_DOWN(fair, alignment); + GEM_BUG_ON(free < fair * num_vfs); + + /* recalculate if PF is undervalued */ + if (!pf_is_admin_only(iov)) { + leftover = free - fair * num_vfs; + if (leftover < fair) { + fair = div_u64(available, 1 + num_vfs); + fair = ALIGN_DOWN(fair, alignment); + } + } + + IOV_DEBUG(iov, "GGTT available(%llu/%llu) fair(%u x %llu)\n", + available, free, num_vfs, fair); + if (!fair) + return -ENOSPC; + + for (n = 1; n <= num_vfs; n++) { + if (pf_is_valid_config_ggtt(iov, n)) + return -EUCLEAN; + + err = pf_provision_ggtt(iov, n, fair); + if (unlikely(err)) + return err; + } + + return 0; +} + +static int pf_auto_provision_ctxs(struct intel_iov *iov, unsigned int num_vfs) +{ + u16 n, fair; + u16 available; + int err; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + available = pf_get_ctxs_free(iov); + fair = ALIGN_DOWN(available / num_vfs, CTXS_GRANULARITY); + + if (!fair) + return -ENOSPC; + + IOV_DEBUG(iov, "contexts available(%hu) fair(%u x %hu)\n", available, num_vfs, fair); + + for (n = 1; n <= num_vfs; n++) { + if (pf_is_valid_config_ctxs(iov, n)) + return -EUCLEAN; + + err = pf_provision_ctxs(iov, n, fair); + if (unlikely(err)) + return err; + } + + return 0; +} + +static int pf_auto_provision_dbs(struct intel_iov *iov, unsigned int num_vfs) +{ + struct intel_iov_provisioning *provisioning = &iov->pf.provisioning; + u16 available, fair; + unsigned int n; + int err; + + available = GUC_NUM_DOORBELLS - provisioning->configs[0].num_dbs; + fair = available / num_vfs; + + IOV_DEBUG(iov, "doorbells available(%hu) fair(%u x %hu)\n", + available, num_vfs, fair); + if (!fair) + return -ENOSPC; + + for (n = 1; n <= num_vfs; n++) { + if (pf_is_valid_config_dbs(iov, n)) + return -EUCLEAN; + + err = pf_provision_dbs(iov, n, fair); + if (unlikely(err)) + return err; + } + + return 0; +} + +static int pf_auto_provision(struct intel_iov *iov, unsigned int num_vfs) +{ + int err; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(num_vfs > pf_get_totalvfs(iov)); + GEM_BUG_ON(num_vfs < 1); + + if (!pf_is_auto_provisioning_enabled(iov)) { + err = -EPERM; + goto fail; + } + + pf_set_auto_provisioning(iov, true); + + err = pf_auto_provision_ggtt(iov, num_vfs); + if (unlikely(err)) + goto fail; + + err = pf_auto_provision_ctxs(iov, num_vfs); + if (unlikely(err)) + goto fail; + + err = pf_auto_provision_dbs(iov, num_vfs); + if (unlikely(err)) + goto fail; + + return 0; +fail: + IOV_ERROR(iov, "Failed to auto provision %u VFs (%pe)", + num_vfs, ERR_PTR(err)); + pf_auto_unprovision(iov); + return err; +} + +/** + * intel_iov_provisioning_auto() - Perform auto provisioning of VFs + * @iov: the IOV struct + * @num_vfs: number of VFs to auto configure or 0 to unprovision + * + * Perform auto provisioning by allocating fair amount of available + * resources for each VF that are to be enabled. + * + * This function shall be called only on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_provisioning_auto(struct intel_iov *iov, unsigned int num_vfs) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + if (num_vfs) + return pf_auto_provision(iov, num_vfs); + + pf_auto_unprovision(iov); + return 0; +} + +static int pf_validate_config(struct intel_iov *iov, unsigned int id) +{ + bool valid_ggtt = pf_is_valid_config_ggtt(iov, id); + bool valid_ctxs = pf_is_valid_config_ctxs(iov, id); + bool valid_dbs = pf_is_valid_config_dbs(iov, id); + bool valid_any = valid_ggtt || valid_ctxs || valid_dbs; + bool valid_all = valid_ggtt && valid_ctxs; + + /* we don't require doorbells, but will check if were assigned */ + + if (!valid_all) { + IOV_DEBUG(iov, "%u: invalid config: %s%s%s\n", id, + valid_ggtt ? "" : "GGTT ", + valid_ctxs ? "" : "contexts ", + valid_dbs ? "" : "doorbells "); + return valid_any ? -ENOKEY : -ENODATA; + } + + return 0; +} + +/** + * intel_iov_provisioning_verify() - TBD + * @iov: the IOV struct + * @num_vfs: number of VFs configurations to verify + * + * Verify that VFs configurations are valid. + * + * This function shall be called only on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_provisioning_verify(struct intel_iov *iov, unsigned int num_vfs) +{ + unsigned int num_empty = 0; + unsigned int num_valid = 0; + unsigned int n; + int err; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(num_vfs > pf_get_totalvfs(iov)); + GEM_BUG_ON(num_vfs < 1); + + for (n = 1; n <= num_vfs; n++) { + err = pf_validate_config(iov, n); + if (!err) + num_valid++; + else if (err == -ENODATA) + num_empty++; + } + + IOV_DEBUG(iov, "found valid(%u) invalid(%u) empty(%u) configs\n", + num_valid, num_vfs - num_valid, num_empty); + + if (num_empty == num_vfs) + return -ENODATA; + + if (num_valid + num_empty != num_vfs) + return -ENOKEY; + + return 0; +} + +/* Return: number of configuration dwords written */ +static u32 encode_config(u32 *cfg, const struct intel_iov_config *config) +{ + u32 n = 0; + + if (drm_mm_node_allocated(&config->ggtt_region)) { + cfg[n++] = MAKE_GUC_KLV(VF_CFG_GGTT_START); + cfg[n++] = lower_32_bits(config->ggtt_region.start); + cfg[n++] = upper_32_bits(config->ggtt_region.start); + + cfg[n++] = MAKE_GUC_KLV(VF_CFG_GGTT_SIZE); + cfg[n++] = lower_32_bits(config->ggtt_region.size); + cfg[n++] = upper_32_bits(config->ggtt_region.size); + } + + cfg[n++] = MAKE_GUC_KLV(VF_CFG_BEGIN_CONTEXT_ID); + cfg[n++] = config->begin_ctx; + + cfg[n++] = MAKE_GUC_KLV(VF_CFG_NUM_CONTEXTS); + cfg[n++] = config->num_ctxs; + + cfg[n++] = MAKE_GUC_KLV(VF_CFG_BEGIN_DOORBELL_ID); + cfg[n++] = config->begin_db; + + cfg[n++] = MAKE_GUC_KLV(VF_CFG_NUM_DOORBELLS); + cfg[n++] = config->num_dbs; + + return n; +} + +static int pf_push_configs(struct intel_iov *iov, unsigned int num) +{ + struct intel_iov_provisioning *provisioning = &iov->pf.provisioning; + struct intel_guc *guc = iov_to_guc(iov); + struct i915_vma *vma; + unsigned int n; + u32 cfg_size; + u32 cfg_addr; + u32 *cfg; + int err; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + err = intel_guc_allocate_and_map_vma(guc, SZ_4K, &vma, (void **)&cfg); + if (unlikely(err)) + return err; + + cfg_addr = intel_guc_ggtt_offset(guc, vma); + + for (n = 1; n <= num; n++) { + cfg_size = 0; + + err = pf_validate_config(iov, n); + if (err != -ENODATA) + cfg_size = encode_config(cfg, &provisioning->configs[n]); + + GEM_BUG_ON(cfg_size * sizeof(u32) > SZ_4K); + + if (cfg_size) { + err = guc_action_update_vf_cfg(guc, n, cfg_addr, cfg_size); + if (unlikely(err < 0)) + goto fail; + } + } + err = 0; + provisioning->num_pushed = num; + +fail: + i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP); + return err; +} + +static int pf_push_no_configs(struct intel_iov *iov) +{ + struct intel_guc *guc = iov_to_guc(iov); + unsigned int n; + int err; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + for (n = iov->pf.provisioning.num_pushed; n > 0; n--) { + err = guc_action_update_vf_cfg(guc, n, 0, 0); + if (unlikely(err < 0)) + break; + } + iov->pf.provisioning.num_pushed = n; + + return n ? -ESTALE : 0; +} + +/** + * intel_iov_provisioning_push() - Push provisioning configs to GuC. + * @iov: the IOV struct + * @num: number of configurations to push + * + * Push provisioning configs for @num VFs or reset configs for previously + * configured VFs. + * + * This function shall be called only on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_provisioning_push(struct intel_iov *iov, unsigned int num) +{ + int err; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(num > pf_get_totalvfs(iov)); + + err = pf_get_status(iov); + if (unlikely(err < 0)) + goto fail; + + if (num) + err = pf_push_configs(iov, num); + else + err = pf_push_no_configs(iov); + if (unlikely(err)) + goto fail; + + return 0; +fail: + IOV_ERROR(iov, "Failed to push configurations (%pe)", ERR_PTR(err)); + return err; +} + +/** + * intel_iov_provisioning_fini - Unprovision all resources. + * @iov: the IOV struct + * + * This function can only be called on PF. + */ +void intel_iov_provisioning_fini(struct intel_iov *iov) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + pf_fini_reprovisioning_worker(iov); + pf_unprovision_all(iov); +} + +/** + * intel_iov_provisioning_restart() - Restart provisioning state. + * @iov: the IOV struct + * + * Mark provisioning state as not pushed to GuC. + * + * This function shall be called only on PF. + */ +void intel_iov_provisioning_restart(struct intel_iov *iov) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + iov->pf.provisioning.num_pushed = 0; + + if (pf_get_status(iov) > 0) + pf_start_reprovisioning_worker(iov); +} + +/* + * pf_do_reprovisioning - Push again provisioning of the resources. + * @iov: the IOV struct from within the GT to be affected + */ +static void pf_do_reprovisioning(struct intel_iov *iov) +{ + struct intel_runtime_pm *rpm = iov_to_gt(iov)->uncore->rpm; + unsigned int numvfs = pf_get_numvfs(iov); + intel_wakeref_t wakeref; + + IOV_DEBUG(iov, "reprovisioning %u VFs\n", numvfs); + with_intel_runtime_pm(rpm, wakeref) + intel_iov_provisioning_push(iov, numvfs); +} + +/* + * pf_reprovisioning_worker_func - Worker to re-push provisioning of the resources. + * @w: the worker struct from inside IOV struct + * + * After GuC reset, provisioning information within is lost. This worker function + * allows to schedule re-sending the provisioning outside of reset handler. + */ +static void pf_reprovisioning_worker_func(struct work_struct *w) +{ + struct intel_iov *iov = container_of(w, typeof(*iov), pf.provisioning.worker); + + pf_do_reprovisioning(iov); +} + +static void pf_init_reprovisioning_worker(struct intel_iov *iov) +{ + INIT_WORK(&iov->pf.provisioning.worker, pf_reprovisioning_worker_func); +} + +static void pf_start_reprovisioning_worker(struct intel_iov *iov) +{ + queue_work(system_unbound_wq, &iov->pf.provisioning.worker); +} + +static void pf_fini_reprovisioning_worker(struct intel_iov *iov) +{ + cancel_work_sync(&iov->pf.provisioning.worker); +} + +/** + * intel_iov_provisioning_clear - Clear VF provisioning data. + * @iov: the IOV struct + * @id: VF identifier + * + * This function can only be called on PF. + */ +int intel_iov_provisioning_clear(struct intel_iov *iov, unsigned int id) +{ + struct intel_runtime_pm *rpm = iov_to_gt(iov)->uncore->rpm; + struct intel_guc *guc = iov_to_guc(iov); + intel_wakeref_t wakeref; + int err = -ENONET; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(id > pf_get_totalvfs(iov)); + GEM_BUG_ON(id == PFID); + + with_intel_runtime_pm(rpm, wakeref) { + err = guc_action_update_vf_cfg(guc, id, 0, 0); + if (!err) + pf_unprovision_config(iov, id); + } + + if (unlikely(err)) + IOV_ERROR(iov, "Failed to unprovision VF%u (%pe)\n", + id, ERR_PTR(err)); + + return err; +} + +/** + * intel_iov_provisioning_print_ggtt - Print GGTT provisioning data. + * @iov: the IOV struct + * @p: the DRM printer + * + * Print GGTT provisioning data for all VFs. + * VFs without GGTT provisioning are ignored. + * + * This function can only be called on PF. + */ +int intel_iov_provisioning_print_ggtt(struct intel_iov *iov, struct drm_printer *p) +{ + struct intel_iov_provisioning *provisioning = &iov->pf.provisioning; + unsigned int n, total_vfs = pf_get_totalvfs(iov); + const struct intel_iov_config *config; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + if (unlikely(!provisioning)) + return -ENODATA; + + for (n = 1; n <= total_vfs; n++) { + config = &provisioning->configs[n]; + if (!drm_mm_node_allocated(&config->ggtt_region)) + continue; + + drm_printf(p, "VF%u:\t%#08llx-%#08llx\t(%lluK)\n", + n, + config->ggtt_region.start, + config->ggtt_region.start + config->ggtt_region.size - 1, + config->ggtt_region.size / SZ_1K); + } + + return 0; +} + +/** + * intel_iov_provisioning_print_ctxs - Print contexts provisioning data. + * @iov: the IOV struct + * @p: the DRM printer + * + * Print contexts provisioning data for all VFs. + * VFs without contexts provisioning are ignored. + * + * This function can only be called on PF. + */ +int intel_iov_provisioning_print_ctxs(struct intel_iov *iov, struct drm_printer *p) +{ + struct intel_iov_provisioning *provisioning = &iov->pf.provisioning; + unsigned int n, total_vfs = pf_get_totalvfs(iov); + const struct intel_iov_config *config; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + if (unlikely(!provisioning)) + return -ENODATA; + + for (n = 1; n <= total_vfs; n++) { + config = &provisioning->configs[n]; + if (!config->num_ctxs) + continue; + + drm_printf(p, "VF%u:\t%hu-%u\t(%hu)\n", + n, + config->begin_ctx, + config->begin_ctx + config->num_ctxs - 1, + config->num_ctxs); + } + + return 0; +} + +/** + * intel_iov_provisioning_print_dbs - Print doorbells provisioning data. + * @iov: the IOV struct + * @p: the DRM printer + * + * Print doorbells provisioning data for all VFs. + * VFs without doorbells provisioning are ignored. + * + * This function can only be called on PF. + */ +int intel_iov_provisioning_print_dbs(struct intel_iov *iov, struct drm_printer *p) +{ + struct intel_iov_provisioning *provisioning = &iov->pf.provisioning; + unsigned int n, total_vfs = pf_get_totalvfs(iov); + const struct intel_iov_config *config; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + if (unlikely(!provisioning)) + return -ENODATA; + + for (n = 1; n <= total_vfs; n++) { + config = &provisioning->configs[n]; + if (!config->num_dbs) + continue; + + drm_printf(p, "VF%u:\t%hu-%u\t(%hu)\n", + n, + config->begin_db, + config->begin_db + config->num_dbs - 1, + config->num_dbs); + } + + return 0; +} + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_IOV) + +static int pf_reprovision_ggtt(struct intel_iov *iov, unsigned int id) +{ + struct i915_ggtt *ggtt = iov_to_gt(iov)->ggtt; + struct intel_iov_provisioning *provisioning = &iov->pf.provisioning; + struct intel_iov_config *config = &provisioning->configs[id]; + struct drm_mm_node *node = &config->ggtt_region; + struct drm_mm_node new_node = {}; + u64 alignment = pf_get_ggtt_alignment(iov); + u64 node_size = node->size; + unsigned int ptes_size; + void *ptes; + int err; + + if (!drm_mm_node_allocated(node)) + return -ENODATA; + + /* save PTEs */ + ptes_size = i915_ggtt_save_ptes(ggtt, node, NULL, 0, 0); + ptes = kmalloc(ptes_size, GFP_KERNEL); + if (!ptes) + return -ENOMEM; + err = i915_ggtt_save_ptes(ggtt, node, ptes, ptes_size, 0); + if (err < 0) + goto out; + + /* allocate new block */ + mutex_lock(&ggtt->vm.mutex); + err = i915_gem_gtt_insert(&ggtt->vm, &new_node, node_size, alignment, + I915_COLOR_UNEVICTABLE, + 0, ggtt->vm.total, + PIN_HIGH); + mutex_unlock(&ggtt->vm.mutex); + if (err) + goto out; + GEM_WARN_ON(node_size != new_node.size); + + /* reprovision */ + err = pf_push_config_ggtt(iov, id, new_node.start, new_node.size); + if (err) { + mutex_lock(&ggtt->vm.mutex); + drm_mm_remove_node(&new_node); + mutex_unlock(&ggtt->vm.mutex); + goto out; + } + + /* replace node */ + mutex_lock(&ggtt->vm.mutex); + drm_mm_remove_node(node); + drm_mm_replace_node(&new_node, node); + mutex_unlock(&ggtt->vm.mutex); + + /* restore PTEs */ + err = i915_ggtt_restore_ptes(ggtt, node, ptes, ptes_size, 0); + if (err) + i915_ggtt_set_space_owner(ggtt, id, node); + +out: + kfree(ptes); + return err; +} + +/** + * intel_iov_provisioning_move_ggtt - Move existing GGTT allocation to other location. + * @iov: the IOV struct + * @id: VF identifier + * + * This function is for internal testing of VF migration scenarios. + * This function can only be called on PF. + */ +int intel_iov_provisioning_move_ggtt(struct intel_iov *iov, unsigned int id) +{ + struct intel_runtime_pm *rpm = iov_to_gt(iov)->uncore->rpm; + intel_wakeref_t wakeref; + int err = -ENONET; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(id > pf_get_totalvfs(iov)); + GEM_BUG_ON(id == PFID); + + with_intel_runtime_pm(rpm, wakeref) + err = pf_reprovision_ggtt(iov, id); + + return err; +} + +#endif /* CONFIG_DRM_I915_DEBUG_IOV */ diff --git a/drivers/gpu/drm/i915/gt/iov/intel_iov_provisioning.h b/drivers/gpu/drm/i915/gt/iov/intel_iov_provisioning.h new file mode 100644 index 0000000000000..ea9fe2dc1babc --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/intel_iov_provisioning.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_IOV_PROVISIONING_H__ +#define __INTEL_IOV_PROVISIONING_H__ + +#include +#include "intel_iov_types.h" + +struct drm_printer; +struct intel_iov; + +void intel_iov_provisioning_init_early(struct intel_iov *iov); +void intel_iov_provisioning_release(struct intel_iov *iov); +void intel_iov_provisioning_init(struct intel_iov *iov); +void intel_iov_provisioning_fini(struct intel_iov *iov); + +int intel_iov_provisioning_set_sched_if_idle(struct intel_iov *iov, bool enable); +bool intel_iov_provisioning_get_sched_if_idle(struct intel_iov *iov); +int intel_iov_provisioning_set_reset_engine(struct intel_iov *iov, bool enable); +bool intel_iov_provisioning_get_reset_engine(struct intel_iov *iov); +int intel_iov_provisioning_set_sample_period(struct intel_iov *iov, u32 value); +u32 intel_iov_provisioning_get_sample_period(struct intel_iov *iov); + +void intel_iov_provisioning_restart(struct intel_iov *iov); +int intel_iov_provisioning_auto(struct intel_iov *iov, unsigned int num_vfs); +int intel_iov_provisioning_verify(struct intel_iov *iov, unsigned int num_vfs); +int intel_iov_provisioning_push(struct intel_iov *iov, unsigned int num); + +int intel_iov_provisioning_set_ggtt(struct intel_iov *iov, unsigned int id, u64 size); +u64 intel_iov_provisioning_get_ggtt(struct intel_iov *iov, unsigned int id); +u64 intel_iov_provisioning_query_free_ggtt(struct intel_iov *iov); +u64 intel_iov_provisioning_query_max_ggtt(struct intel_iov *iov); + +int intel_iov_provisioning_set_ctxs(struct intel_iov *iov, unsigned int id, u16 num_ctxs); +u16 intel_iov_provisioning_get_ctxs(struct intel_iov *iov, unsigned int id); +u16 intel_iov_provisioning_query_max_ctxs(struct intel_iov *iov); +u16 intel_iov_provisioning_query_free_ctxs(struct intel_iov *iov); + +int intel_iov_provisioning_set_dbs(struct intel_iov *iov, unsigned int id, u16 num_dbs); +u16 intel_iov_provisioning_get_dbs(struct intel_iov *iov, unsigned int id); +u16 intel_iov_provisioning_query_free_dbs(struct intel_iov *iov); +u16 intel_iov_provisioning_query_max_dbs(struct intel_iov *iov); + +int intel_iov_provisioning_set_exec_quantum(struct intel_iov *iov, unsigned int id, u32 exec_quantum); +u32 intel_iov_provisioning_get_exec_quantum(struct intel_iov *iov, unsigned int id); + +int intel_iov_provisioning_set_preempt_timeout(struct intel_iov *iov, unsigned int id, u32 preempt_timeout); +u32 intel_iov_provisioning_get_preempt_timeout(struct intel_iov *iov, unsigned int id); + +int intel_iov_provisioning_set_threshold(struct intel_iov *iov, unsigned int id, + enum intel_iov_threshold threshold, u32 value); +u32 intel_iov_provisioning_get_threshold(struct intel_iov *iov, unsigned int id, + enum intel_iov_threshold threshold); + +int intel_iov_provisioning_clear(struct intel_iov *iov, unsigned int id); + +int intel_iov_provisioning_print_ggtt(struct intel_iov *iov, struct drm_printer *p); +int intel_iov_provisioning_print_ctxs(struct intel_iov *iov, struct drm_printer *p); +int intel_iov_provisioning_print_dbs(struct intel_iov *iov, struct drm_printer *p); + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_IOV) +int intel_iov_provisioning_move_ggtt(struct intel_iov *iov, unsigned int id); +#endif /* CONFIG_DRM_I915_DEBUG_IOV */ + +#endif /* __INTEL_IOV_PROVISIONING_H__ */ diff --git a/drivers/gpu/drm/i915/gt/iov/intel_iov_query.c b/drivers/gpu/drm/i915/gt/iov/intel_iov_query.c new file mode 100644 index 0000000000000..14ab1d1ca63db --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/intel_iov_query.c @@ -0,0 +1,740 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include +#include +#include + +#include "abi/iov_actions_abi.h" +#include "abi/iov_actions_mmio_abi.h" +#include "abi/iov_version_abi.h" +#include "gt/uc/abi/guc_actions_vf_abi.h" +#include "gt/uc/abi/guc_klvs_abi.h" +#include "i915_drv.h" +#include "intel_iov_relay.h" +#include "intel_iov_utils.h" +#include "intel_iov_types.h" +#include "intel_iov_query.h" + +static int guc_action_vf_reset(struct intel_guc *guc) +{ + u32 request[GUC_HXG_REQUEST_MSG_MIN_LEN] = { + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_VF2GUC_VF_RESET), + }; + int ret; + + ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0); + + return ret > 0 ? -EPROTO : ret; +} + +static int vf_reset_guc_state(struct intel_iov *iov) +{ + struct intel_guc *guc = iov_to_guc(iov); + int err; + + GEM_BUG_ON(!intel_iov_is_vf(iov)); + + err = guc_action_vf_reset(guc); + if (unlikely(err)) + IOV_PROBE_ERROR(iov, "Failed to reset GuC state (%pe)\n", + ERR_PTR(err)); + + return err; +} + +static int guc_action_match_version(struct intel_guc *guc, u32 *branch, + u32 *major, u32 *minor, u32 *patch) +{ + u32 request[VF2GUC_MATCH_VERSION_REQUEST_MSG_LEN] = { + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, + GUC_ACTION_VF2GUC_MATCH_VERSION), + FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_BRANCH, + *branch) | + FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MAJOR, + *major) | + FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MINOR, + *minor), + }; + u32 response[VF2GUC_MATCH_VERSION_RESPONSE_MSG_LEN]; + int ret; + + ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), + response, ARRAY_SIZE(response)); + if (unlikely(ret < 0)) + return ret; + + GEM_BUG_ON(ret != VF2GUC_MATCH_VERSION_RESPONSE_MSG_LEN); + if (unlikely(FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_0_MBZ, response[0]))) + return -EPROTO; + + *branch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_BRANCH, response[1]); + *major = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MAJOR, response[1]); + *minor = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MINOR, response[1]); + *patch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_PATCH, response[1]); + + return 0; +} + +static int vf_handshake_with_guc(struct intel_iov *iov) +{ + struct intel_guc *guc = iov_to_guc(iov); + u32 branch, major, minor, patch; + int err; + + GEM_BUG_ON(!intel_iov_is_vf(iov)); + + branch = GUC_VERSION_BRANCH_ANY; + major = guc->fw.major_vf_ver_wanted; + minor = guc->fw.minor_vf_ver_wanted; + err = guc_action_match_version(guc, &branch, &major, &minor, &patch); + if (unlikely(err)) + goto fail; + + dev_info(iov_to_dev(iov), "%s interface version %u.%u.%u.%u\n", + intel_uc_fw_type_repr(guc->fw.type), + branch, major, minor, patch); + + err = intel_uc_fw_set_preloaded(&guc->fw, major, minor); + if (unlikely(err)) + return err; + + return 0; + +fail: + IOV_PROBE_ERROR(iov, "Unable to confirm version %u.%u (%pe)\n", + major, minor, ERR_PTR(err)); + + /* try again with *any* just to query which version is supported */ + branch = GUC_VERSION_BRANCH_ANY; + major = GUC_VERSION_MAJOR_ANY; + minor = GUC_VERSION_MINOR_ANY; + if (!guc_action_match_version(guc, &branch, &major, &minor, &patch)) + IOV_PROBE_ERROR(iov, "Found interface version %u.%u.%u.%u\n", + branch, major, minor, patch); + + return err; +} + +/** + * intel_iov_query_bootstrap - Query interface version data over MMIO. + * @iov: the IOV struct + * + * This function is for VF use only. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_query_bootstrap(struct intel_iov *iov) +{ + int err; + + GEM_BUG_ON(!intel_iov_is_vf(iov)); + + err = vf_reset_guc_state(iov); + if (unlikely(err)) + return err; + + err = vf_handshake_with_guc(iov); + if (unlikely(err)) + return err; + + return 0; +} + +static int guc_action_query_single_klv(struct intel_guc *guc, u32 key, + u32 *value, u32 value_len) +{ + u32 request[VF2GUC_QUERY_SINGLE_KLV_REQUEST_MSG_LEN] = { + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, + GUC_ACTION_VF2GUC_QUERY_SINGLE_KLV), + FIELD_PREP(VF2GUC_QUERY_SINGLE_KLV_REQUEST_MSG_1_KEY, key), + }; + u32 response[VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_MAX_LEN]; + u32 length; + int ret; + + ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), + response, ARRAY_SIZE(response)); + if (unlikely(ret < 0)) + return ret; + + GEM_BUG_ON(ret != VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_MAX_LEN); + if (unlikely(FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_0_MBZ, response[0]))) + return -EPROTO; + + length = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_0_LENGTH, response[0]); + if (unlikely(length > value_len)) + return -EOVERFLOW; + if (unlikely(length < value_len)) + return -ENODATA; + + GEM_BUG_ON(length != value_len); + switch (value_len) { + default: + GEM_BUG_ON(value_len); + return -EINVAL; + case 3: + value[2] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_3_VALUE96, response[3]); + fallthrough; + case 2: + value[1] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_2_VALUE64, response[2]); + fallthrough; + case 1: + value[0] = FIELD_GET(VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_1_VALUE32, response[1]); + fallthrough; + case 0: + break; + } + + return 0; +} + +static int guc_action_query_single_klv32(struct intel_guc *guc, u32 key, u32 *value32) +{ + return guc_action_query_single_klv(guc, key, value32, 1); +} + +static int guc_action_query_single_klv64(struct intel_guc *guc, u32 key, u64 *value64) +{ + u32 value[2]; + int err; + + err = guc_action_query_single_klv(guc, key, value, ARRAY_SIZE(value)); + if (unlikely(err)) + return err; + + *value64 = (u64)value[1] << 32 | value[0]; + return 0; +} + +static int vf_get_ggtt_info(struct intel_iov *iov) +{ + struct intel_guc *guc = iov_to_guc(iov); + u64 start, size; + int err; + + GEM_BUG_ON(!intel_iov_is_vf(iov)); + + err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_GGTT_START_KEY, &start); + if (unlikely(err)) + return err; + + err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_GGTT_SIZE_KEY, &size); + if (unlikely(err)) + return err; + + IOV_DEBUG(iov, "GGTT %#llx-%#llx = %lluK\n", + start, start + size - 1, size / SZ_1K); + + if (iov->vf.config.ggtt_size && iov->vf.config.ggtt_size != size) { + IOV_ERROR(iov, "Unexpected GGTT reassignment: %lluK != %lluK\n", + size / SZ_1K, iov->vf.config.ggtt_size / SZ_1K); + return -EREMCHG; + } + + iov->vf.config.ggtt_base = start; + iov->vf.config.ggtt_size = size; + + return iov->vf.config.ggtt_size ? 0 : -ENODATA; +} + +static int vf_get_submission_cfg(struct intel_iov *iov) +{ + struct intel_guc *guc = iov_to_guc(iov); + u32 num_ctxs, num_dbs; + int err; + + GEM_BUG_ON(!intel_iov_is_vf(iov)); + + err = guc_action_query_single_klv32(guc, GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY, &num_ctxs); + if (unlikely(err)) + return err; + + err = guc_action_query_single_klv32(guc, GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY, &num_dbs); + if (unlikely(err)) + return err; + + IOV_DEBUG(iov, "CTXs %u DBs %u\n", num_ctxs, num_dbs); + + if (iov->vf.config.num_ctxs && iov->vf.config.num_ctxs != num_ctxs) { + IOV_ERROR(iov, "Unexpected CTXs reassignment: %u != %u\n", + num_ctxs, iov->vf.config.num_ctxs); + return -EREMCHG; + } + if (iov->vf.config.num_dbs && iov->vf.config.num_dbs != num_dbs) { + IOV_ERROR(iov, "Unexpected DBs reassignment: %u != %u\n", + num_dbs, iov->vf.config.num_dbs); + return -EREMCHG; + } + + iov->vf.config.num_ctxs = num_ctxs; + iov->vf.config.num_dbs = num_dbs; + + return iov->vf.config.num_ctxs ? 0 : -ENODATA; +} + +/** + * intel_iov_query_config - Query IOV config data over MMIO. + * @iov: the IOV struct + * + * This function is for VF use only. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_query_config(struct intel_iov *iov) +{ + int err; + + GEM_BUG_ON(!intel_iov_is_vf(iov)); + + err = vf_get_ggtt_info(iov); + if (unlikely(err)) + return err; + + err = vf_get_submission_cfg(iov); + if (unlikely(err)) + return err; + + return 0; +} + +static int iov_action_handshake(struct intel_iov *iov, u32 *major, u32 *minor) +{ + u32 request[VF2PF_HANDSHAKE_REQUEST_MSG_LEN] = { + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, IOV_ACTION_VF2PF_HANDSHAKE), + FIELD_PREP(VF2PF_HANDSHAKE_REQUEST_MSG_1_MAJOR, *major) | + FIELD_PREP(VF2PF_HANDSHAKE_REQUEST_MSG_1_MINOR, *minor), + }; + u32 response[VF2PF_HANDSHAKE_RESPONSE_MSG_LEN]; + int ret; + + GEM_BUG_ON(!intel_iov_is_vf(iov)); + + ret = intel_iov_relay_send_to_pf(&iov->relay, + request, ARRAY_SIZE(request), + response, ARRAY_SIZE(response)); + if (unlikely(ret < 0)) + return ret; + + if (unlikely(ret != VF2PF_HANDSHAKE_RESPONSE_MSG_LEN)) + return -EPROTO; + + if (unlikely(FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_0_MBZ, response[0]))) + return -EPROTO; + + *major = FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MAJOR, response[1]); + *minor = FIELD_GET(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MINOR, response[1]); + + return 0; +} + +static int vf_handshake_with_pf(struct intel_iov *iov) +{ + u32 major_wanted = IOV_VERSION_LATEST_MAJOR; + u32 minor_wanted = IOV_VERSION_LATEST_MINOR; + u32 major = major_wanted, minor = minor_wanted; + int err; + + GEM_BUG_ON(!intel_iov_is_vf(iov)); + + err = iov_action_handshake(iov, &major, &minor); + if (unlikely(err)) + goto failed; + + IOV_DEBUG(iov, "Using ABI %u.%02u\n", major, minor); + return 0; + +failed: + IOV_PROBE_ERROR(iov, "Unable to confirm ABI version %u.%02u (%pe)\n", + major, minor, ERR_PTR(err)); + return err; +} + +/** + * intel_iov_query_version - Query IOV version info. + * @iov: the IOV struct + * + * This function is for VF use only. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_query_version(struct intel_iov *iov) +{ + int err; + + GEM_BUG_ON(!intel_iov_is_vf(iov)); + + err = vf_handshake_with_pf(iov); + if (unlikely(err)) + goto failed; + + return 0; + +failed: + IOV_PROBE_ERROR(iov, "Failed to get version info (%pe)\n", ERR_PTR(err)); + return err; +} + +static const i915_reg_t tgl_early_regs[] = { + RPM_CONFIG0, /* _MMIO(0x0D00) */ + GEN10_MIRROR_FUSE3, /* _MMIO(0x9118) */ + GEN11_EU_DISABLE, /* _MMIO(0x9134) */ + GEN11_GT_SLICE_ENABLE, /* _MMIO(0x9138) */ + GEN12_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913C) */ + GEN11_GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */ + CTC_MODE, /* _MMIO(0xA26C) */ +}; + +static const i915_reg_t *get_early_regs(struct drm_i915_private *i915, + unsigned int *size) +{ + const i915_reg_t *regs; + + if (IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) { + regs = tgl_early_regs; + *size = ARRAY_SIZE(tgl_early_regs); + } else { + MISSING_CASE(GRAPHICS_VER(i915)); + regs = ERR_PTR(-ENODEV); + *size = 0; + } + + return regs; +} + +static void vf_cleanup_runtime_info(struct intel_iov *iov) +{ + GEM_BUG_ON(!intel_iov_is_vf(iov)); + + kfree(iov->vf.runtime.regs); + iov->vf.runtime.regs = NULL; + iov->vf.runtime.regs_size = 0; +} + +static int vf_prepare_runtime_info(struct intel_iov *iov, unsigned int regs_size, + unsigned int alignment) +{ + unsigned int regs_size_up = roundup(regs_size, alignment); + + GEM_BUG_ON(!intel_iov_is_vf(iov)); + GEM_BUG_ON(iov->vf.runtime.regs_size && !iov->vf.runtime.regs); + + iov->vf.runtime.regs = krealloc(iov->vf.runtime.regs, + regs_size_up * sizeof(struct vf_runtime_reg), + __GFP_ZERO | GFP_NOWAIT | __GFP_NOWARN); + if (unlikely(!iov->vf.runtime.regs)) + return -ENOMEM; + + iov->vf.runtime.regs_size = regs_size; + + return regs_size_up; +} + +static void vf_show_runtime_info(struct intel_iov *iov) +{ + struct vf_runtime_reg *vf_regs = iov->vf.runtime.regs; + unsigned int size = iov->vf.runtime.regs_size; + + GEM_BUG_ON(!intel_iov_is_vf(iov)); + + for ( ; size--; vf_regs++) { + IOV_DEBUG(iov, "RUNTIME reg[%#x] = %#x\n", + vf_regs->offset, vf_regs->value); + } +} + +static int guc_send_mmio_relay(struct intel_guc *guc, const u32 *request, u32 len, + u32 *response, u32 response_size) +{ + u32 magic1, magic2; + int ret; + + GEM_BUG_ON(len < VF2GUC_MMIO_RELAY_SERVICE_REQUEST_MSG_MIN_LEN); + GEM_BUG_ON(response_size < VF2GUC_MMIO_RELAY_SERVICE_RESPONSE_MSG_MIN_LEN); + + GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) != GUC_HXG_ORIGIN_HOST); + GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) != GUC_HXG_TYPE_REQUEST); + GEM_BUG_ON(FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, request[0]) != + GUC_ACTION_VF2GUC_MMIO_RELAY_SERVICE); + + magic1 = FIELD_GET(VF2GUC_MMIO_RELAY_SERVICE_REQUEST_MSG_0_MAGIC, request[0]); + + ret = intel_guc_send_mmio(guc, request, len, response, response_size); + if (unlikely(ret < 0)) + return ret; + + GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, response[0]) != GUC_HXG_ORIGIN_GUC); + GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, response[0]) != GUC_HXG_TYPE_RESPONSE_SUCCESS); + + magic2 = FIELD_GET(VF2GUC_MMIO_RELAY_SERVICE_RESPONSE_MSG_0_MAGIC, response[0]); + + if (unlikely(magic1 != magic2)) + return -EPROTO; + + return ret; +} + +static u32 mmio_relay_header(u32 opcode, u32 magic) +{ + return FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_VF2GUC_MMIO_RELAY_SERVICE) | + FIELD_PREP(VF2GUC_MMIO_RELAY_SERVICE_REQUEST_MSG_0_MAGIC, magic) | + FIELD_PREP(VF2GUC_MMIO_RELAY_SERVICE_REQUEST_MSG_0_OPCODE, opcode); +} + +static int vf_handshake_with_pf_mmio(struct intel_iov *iov) +{ + u32 major_wanted = IOV_VERSION_LATEST_MAJOR; + u32 minor_wanted = IOV_VERSION_LATEST_MINOR; + u32 request[VF2GUC_MMIO_RELAY_SERVICE_REQUEST_MSG_MAX_LEN] = { + mmio_relay_header(IOV_OPCODE_VF2PF_MMIO_HANDSHAKE, 0xF), + FIELD_PREP(VF2PF_MMIO_HANDSHAKE_REQUEST_MSG_1_MAJOR, major_wanted) | + FIELD_PREP(VF2PF_MMIO_HANDSHAKE_REQUEST_MSG_1_MINOR, minor_wanted), + }; + u32 response[VF2GUC_MMIO_RELAY_SERVICE_RESPONSE_MSG_MAX_LEN]; + u32 major, minor; + int ret; + + GEM_BUG_ON(!intel_iov_is_vf(iov)); + + ret = guc_send_mmio_relay(iov_to_guc(iov), request, ARRAY_SIZE(request), + response, ARRAY_SIZE(response)); + if (unlikely(ret < 0)) + goto failed; + + major = FIELD_GET(VF2PF_MMIO_HANDSHAKE_RESPONSE_MSG_1_MAJOR, response[1]); + minor = FIELD_GET(VF2PF_MMIO_HANDSHAKE_RESPONSE_MSG_1_MINOR, response[1]); + if (unlikely(major != major_wanted || minor != minor_wanted)) { + ret = -ENOPKG; + goto failed; + } + + IOV_DEBUG(iov, "Using ABI %u.%02u\n", major, minor); + return 0; + +failed: + IOV_PROBE_ERROR(iov, "Unable to confirm ABI version %u.%02u (%pe)\n", + major_wanted, minor_wanted, ERR_PTR(ret)); + return -ECONNREFUSED; +} + +static int vf_get_runtime_info_mmio(struct intel_iov *iov) +{ + u32 request[VF2GUC_MMIO_RELAY_SERVICE_REQUEST_MSG_MAX_LEN]; + u32 response[VF2GUC_MMIO_RELAY_SERVICE_RESPONSE_MSG_MAX_LEN]; + u32 chunk = VF2PF_MMIO_GET_RUNTIME_REQUEST_MSG_NUM_OFFSET; + unsigned int size, size_up, i, n; + struct vf_runtime_reg *vf_regs; + const i915_reg_t *regs; + int ret; + + GEM_BUG_ON(!intel_iov_is_vf(iov)); + BUILD_BUG_ON(VF2PF_MMIO_GET_RUNTIME_REQUEST_MSG_NUM_OFFSET > + VF2PF_MMIO_GET_RUNTIME_RESPONSE_MSG_NUM_VALUE); + + regs = get_early_regs(iov_to_i915(iov), &size); + if (IS_ERR(regs)) { + ret = PTR_ERR(regs); + goto failed; + } + if (!size) + return 0; + + /* + * We want to allocate slightly larger buffer in order to align + * ourselves with GuC interface and avoid out-of-bounds write. + */ + ret = vf_prepare_runtime_info(iov, size, chunk); + if (unlikely(ret < 0)) + goto failed; + vf_regs = iov->vf.runtime.regs; + size_up = ret; + GEM_BUG_ON(!size_up); + + for (i = 0; i < size; i++) + vf_regs[i].offset = i915_mmio_reg_offset(regs[i]); + + for (i = 0; i < size_up; i += chunk) { + + request[0] = mmio_relay_header(IOV_OPCODE_VF2PF_MMIO_GET_RUNTIME, 0); + + for (n = 0; n < chunk; n++) + request[1 + n] = vf_regs[i + n].offset; + + /* we will use few bits from crc32 as magic */ + u32p_replace_bits(request, crc32_le(0, (void *)request, sizeof(request)), + VF2GUC_MMIO_RELAY_SERVICE_REQUEST_MSG_0_MAGIC); + + ret = guc_send_mmio_relay(iov_to_guc(iov), request, ARRAY_SIZE(request), + response, ARRAY_SIZE(response)); + if (unlikely(ret < 0)) + goto failed; + GEM_BUG_ON(ret != ARRAY_SIZE(response)); + + for (n = 0; n < chunk; n++) + vf_regs[i + n].value = response[1 + n]; + } + + return 0; + +failed: + vf_cleanup_runtime_info(iov); + return ret; +} + +static int vf_get_runtime_info_relay(struct intel_iov *iov) +{ + struct drm_i915_private *i915 = iov_to_i915(iov); + u32 request[VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN]; + u32 response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MAX_LEN]; + u32 limit = (ARRAY_SIZE(response) - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / 2; + u32 start = 0; + u32 count, remaining, num, i; + int ret; + + GEM_BUG_ON(!intel_iov_is_vf(iov)); + GEM_BUG_ON(!limit); + assert_rpm_wakelock_held(&i915->runtime_pm); + + request[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, IOV_ACTION_VF2PF_QUERY_RUNTIME) | + FIELD_PREP(VF2PF_QUERY_RUNTIME_REQUEST_MSG_0_LIMIT, limit); + +repeat: + request[1] = FIELD_PREP(VF2PF_QUERY_RUNTIME_REQUEST_MSG_1_START, start); + ret = intel_iov_relay_send_to_pf(&iov->relay, + request, ARRAY_SIZE(request), + response, ARRAY_SIZE(response)); + if (unlikely(ret < 0)) + goto failed; + + if (unlikely(ret < VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN)) { + ret = -EPROTO; + goto failed; + } + if (unlikely((ret - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) % 2)) { + ret = -EPROTO; + goto failed; + } + + num = (ret - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / 2; + count = FIELD_GET(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_0_COUNT, response[0]); + remaining = FIELD_GET(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_1_REMAINING, response[1]); + + IOV_DEBUG(iov, "count=%u num=%u ret=%d start=%u remaining=%u\n", + count, num, ret, start, remaining); + + if (unlikely(count != num)) { + ret = -EPROTO; + goto failed; + } + + if (start == 0) { + ret = vf_prepare_runtime_info(iov, num + remaining, 1); + if (unlikely(ret < 0)) + goto failed; + } else if (unlikely(start + num > iov->vf.runtime.regs_size)) { + ret = -EPROTO; + goto failed; + } + + for (i = 0; i < num; ++i) { + struct vf_runtime_reg *reg = &iov->vf.runtime.regs[start + i]; + + reg->offset = response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 2 * i]; + reg->value = response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN + 2 * i + 1]; + } + + if (remaining) { + start += num; + goto repeat; + } + + return 0; + +failed: + vf_cleanup_runtime_info(iov); + return ret; +} + +/** + * intel_iov_query_runtime - Query IOV runtime data. + * @iov: the IOV struct + * @early: use early MMIO access + * + * This function is for VF use only. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_query_runtime(struct intel_iov *iov, bool early) +{ + int err; + + GEM_BUG_ON(!intel_iov_is_vf(iov)); + + if (early) { + err = vf_handshake_with_pf_mmio(iov); + if (unlikely(err)) + goto failed; + } + + if (early) + err = vf_get_runtime_info_mmio(iov); + else + err = vf_get_runtime_info_relay(iov); + if (unlikely(err)) + goto failed; + + vf_show_runtime_info(iov); + return 0; + +failed: + IOV_PROBE_ERROR(iov, "Failed to get runtime info (%pe)\n", + ERR_PTR(err)); + return err; +} + +/** + * intel_iov_query_fini - Cleanup all queried IOV data. + * @iov: the IOV struct + * + * This function is for VF use only. + */ +void intel_iov_query_fini(struct intel_iov *iov) +{ + GEM_BUG_ON(!intel_iov_is_vf(iov)); + + vf_cleanup_runtime_info(iov); +} + +/** + * intel_iov_query_print_config - Print queried VF config. + * @iov: the IOV struct + * @p: the DRM printer + * + * This function is for VF use only. + */ +void intel_iov_query_print_config(struct intel_iov *iov, struct drm_printer *p) +{ + GEM_BUG_ON(!intel_iov_is_vf(iov)); + + drm_printf(p, "GGTT range:\t%#08llx-%#08llx\n", + iov->vf.config.ggtt_base, + iov->vf.config.ggtt_base + iov->vf.config.ggtt_size - 1); + drm_printf(p, "GGTT size:\t%lluK\n", iov->vf.config.ggtt_size / SZ_1K); + + drm_printf(p, "contexts:\t%hu\n", iov->vf.config.num_ctxs); + drm_printf(p, "doorbells:\t%hu\n", iov->vf.config.num_dbs); +} diff --git a/drivers/gpu/drm/i915/gt/iov/intel_iov_query.h b/drivers/gpu/drm/i915/gt/iov/intel_iov_query.h new file mode 100644 index 0000000000000..527d64490714d --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/intel_iov_query.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __INTEL_IOV_QUERY_H__ +#define __INTEL_IOV_QUERY_H__ + +#include + +struct drm_printer; +struct intel_iov; + +int intel_iov_query_bootstrap(struct intel_iov *iov); +int intel_iov_query_config(struct intel_iov *iov); +int intel_iov_query_version(struct intel_iov *iov); +int intel_iov_query_runtime(struct intel_iov *iov, bool early); +void intel_iov_query_fini(struct intel_iov *iov); + +void intel_iov_query_print_config(struct intel_iov *iov, struct drm_printer *p); + +#endif /* __INTEL_IOV_QUERY_H__ */ diff --git a/drivers/gpu/drm/i915/gt/iov/intel_iov_relay.c b/drivers/gpu/drm/i915/gt/iov/intel_iov_relay.c new file mode 100644 index 0000000000000..338192fc7724f --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/intel_iov_relay.c @@ -0,0 +1,663 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include + +#include "abi/iov_actions_abi.h" +#include "abi/iov_errors_abi.h" +#include "abi/iov_messages_abi.h" +#include "gt/intel_gt.h" +#include "intel_iov.h" +#include "intel_iov_relay.h" +#include "intel_iov_service.h" +#include "intel_iov_utils.h" +#include "intel_runtime_pm.h" +#include "i915_drv.h" +#include "i915_gem.h" + +static struct intel_iov *relay_to_iov(struct intel_iov_relay *relay) +{ + return container_of(relay, struct intel_iov, relay); +} + +static struct intel_gt *relay_to_gt(struct intel_iov_relay *relay) +{ + return iov_to_gt(relay_to_iov(relay)); +} + +static struct intel_guc *relay_to_guc(struct intel_iov_relay *relay) +{ + return &relay_to_gt(relay)->uc.guc; +} + +static struct drm_i915_private *relay_to_i915(struct intel_iov_relay *relay) +{ + return relay_to_gt(relay)->i915; +} + +__maybe_unused +static struct device *relay_to_dev(struct intel_iov_relay *relay) +{ + return relay_to_i915(relay)->drm.dev; +} + +#define RELAY_DEBUG(_r, _f, ...) \ + IOV_DEBUG(relay_to_iov(_r), "relay: " _f, ##__VA_ARGS__) +#define RELAY_ERROR(_r, _f, ...) \ + IOV_ERROR(relay_to_iov(_r), "relay: " _f, ##__VA_ARGS__) +#define RELAY_PROBE_ERROR(_r, _f, ...) \ + IOV_PROBE_ERROR(relay_to_iov(_r), "relay: " _f, ##__VA_ARGS__) + +/* + * How long should we wait for the response? + * For default timeout use CONFIG_DRM_I915_HEARTBEAT_INTERVAL like CTB does. + * If hearbeat interval is not enabled then wait forever. + */ +#define RELAY_TIMEOUT (CONFIG_DRM_I915_HEARTBEAT_INTERVAL ?: MAX_SCHEDULE_TIMEOUT) + +static u32 relay_get_next_fence(struct intel_iov_relay *relay) +{ + u32 fence; + + spin_lock(&relay->lock); + fence = ++relay->last_fence; + if (unlikely(!fence)) + fence = relay->last_fence = 1; + spin_unlock(&relay->lock); + return fence; +} + +struct pending_relay { + struct list_head link; + struct completion done; + u32 target; + u32 fence; + int reply; + u32 *response; /* can't be null */ + u32 response_size; +}; + +static int pf_relay_send(struct intel_iov_relay *relay, u32 target, + u32 relay_id, const u32 *msg, u32 len) +{ + u32 request[PF2GUC_RELAY_TO_VF_REQUEST_MSG_MAX_LEN] = { + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_PF2GUC_RELAY_TO_VF), + FIELD_PREP(PF2GUC_RELAY_TO_VF_REQUEST_MSG_1_VFID, target), + FIELD_PREP(PF2GUC_RELAY_TO_VF_REQUEST_MSG_2_RELAY_ID, relay_id), + }; + int err; + + GEM_BUG_ON(!IS_SRIOV_PF(relay_to_i915(relay))); + GEM_BUG_ON(!target); + GEM_BUG_ON(!len); + GEM_BUG_ON(len + 4 > PF2GUC_RELAY_TO_VF_REQUEST_MSG_MAX_LEN); + + memcpy(&request[3], msg, 4 * len); + +retry: + err = intel_guc_send_nb(relay_to_guc(relay), request, 3 + len, 0); + if (unlikely(err == -EBUSY)) + goto retry; + + return err; +} + +static int vf_relay_send(struct intel_iov_relay *relay, + u32 relay_id, const u32 *msg, u32 len) +{ + u32 request[VF2GUC_RELAY_TO_PF_REQUEST_MSG_MAX_LEN] = { + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_VF2GUC_RELAY_TO_PF), + FIELD_PREP(VF2GUC_RELAY_TO_PF_REQUEST_MSG_1_RELAY_ID, relay_id), + }; + int err; + + GEM_BUG_ON(!IS_SRIOV_VF(relay_to_i915(relay))); + GEM_BUG_ON(!len); + GEM_BUG_ON(len + VF2GUC_RELAY_TO_PF_REQUEST_MSG_MIN_LEN > + VF2GUC_RELAY_TO_PF_REQUEST_MSG_MAX_LEN); + + memcpy(&request[VF2GUC_RELAY_TO_PF_REQUEST_MSG_MIN_LEN], msg, 4 * len); + +retry: + err = intel_guc_send_nb(relay_to_guc(relay), request, + VF2GUC_RELAY_TO_PF_REQUEST_MSG_MIN_LEN + len, 0); + if (unlikely(err == -EBUSY)) + goto retry; + + return err; +} + +static int relay_send(struct intel_iov_relay *relay, u32 target, + u32 relay_id, const u32 *msg, u32 len) +{ + int err; + + GEM_BUG_ON(!len); + RELAY_DEBUG(relay, "sending %s.%u to %u = %*ph\n", + hxg_type_to_string(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0])), + relay_id, target, 4 * len, msg); + + if (target) + err = pf_relay_send(relay, target, relay_id, msg, len); + else + err = vf_relay_send(relay, relay_id, msg, len); + + if (unlikely(err < 0)) + RELAY_PROBE_ERROR(relay, "Failed to send %s.%u to %u (%pe) %*ph\n", + hxg_type_to_string(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0])), + relay_id, target, ERR_PTR(err), 4 * len, msg); + return err; +} + +/** + * intel_iov_relay_reply_to_vf - Send reply message to VF. + * @relay: the Relay struct + * @target: target VF number + * @relay_id: relay message ID (must match message ID from the request) + * @msg: response message (can't be NULL) + * @len: length of the response message (in dwords, can't be 0) + * + * This function will embed and send provided `IOV Message`_ to the GuC. + * + * This function can only be used by driver running in SR-IOV PF mode. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_relay_reply_to_vf(struct intel_iov_relay *relay, u32 target, + u32 relay_id, const u32 *msg, u32 len) +{ + GEM_BUG_ON(!IS_SRIOV_PF(relay_to_i915(relay))); + GEM_BUG_ON(!target); + GEM_BUG_ON(len < GUC_HXG_MSG_MIN_LEN); + GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) == GUC_HXG_TYPE_REQUEST); + GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) == GUC_HXG_TYPE_EVENT); + + return relay_send(relay, target, relay_id, msg, len); +} + +static int relay_send_success(struct intel_iov_relay *relay, u32 target, + u32 relay_id, u32 data) +{ + u32 msg[] = { + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS) | + FIELD_PREP(GUC_HXG_RESPONSE_MSG_0_DATA0, data), + }; + + GEM_WARN_ON(!FIELD_FIT(GUC_HXG_RESPONSE_MSG_0_DATA0, data)); + + return relay_send(relay, target, relay_id, msg, ARRAY_SIZE(msg)); +} + +/** + * intel_iov_relay_reply_ack_to_vf - Send simple success response to VF. + * @relay: the Relay struct + * @target: target VF number (can't be 0) + * @relay_id: relay message ID (must match message ID from the request) + * @data: optional data + * + * This utility function will prepare success response message based on + * given return data and and embed it in relay message for the GuC. + * + * This function can only be used by driver running in SR-IOV PF mode. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_relay_reply_ack_to_vf(struct intel_iov_relay *relay, u32 target, + u32 relay_id, u32 data) +{ + GEM_BUG_ON(!IS_SRIOV_PF(relay_to_i915(relay))); + GEM_BUG_ON(!target); + + return relay_send_success(relay, target, relay_id, data); +} + +static u32 from_err_to_iov_error(int err) +{ + GEM_BUG_ON(err >= 0); + return -err; +} + +static u32 sanitize_iov_error(u32 error) +{ + /* XXX TBD if generic error codes will be allowed */ + if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) + error = IOV_ERROR_UNDISCLOSED; + return error; +} + +static u32 sanitize_iov_error_hint(u32 hint) +{ + /* XXX TBD if generic error codes will be allowed */ + if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) + hint = 0; + return hint; +} + +static int relay_send_failure(struct intel_iov_relay *relay, u32 target, + u32 relay_id, u32 error, u32 hint) +{ + u32 msg[] = { + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_FAILURE) | + FIELD_PREP(GUC_HXG_FAILURE_MSG_0_HINT, hint) | + FIELD_PREP(GUC_HXG_FAILURE_MSG_0_ERROR, error), + }; + + GEM_WARN_ON(!FIELD_FIT(GUC_HXG_FAILURE_MSG_0_ERROR, error)); + GEM_WARN_ON(!FIELD_FIT(GUC_HXG_FAILURE_MSG_0_HINT, hint)); + + return relay_send(relay, target, relay_id, msg, ARRAY_SIZE(msg)); +} + +/** + * intel_iov_relay_reply_err_to_vf - Send failure response to VF. + * @relay: the Relay struct + * @target: target VF number (can't be 0) + * @relay_id: relay message ID (must match message ID from the request) + * @err: errno code (must be < 0) + * + * This utility function will prepare failure response message based on + * given error and hint and and embed it in relay message for the GuC. + * + * This function can only be used by driver running in SR-IOV PF mode. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_relay_reply_err_to_vf(struct intel_iov_relay *relay, u32 target, + u32 relay_id, int err) +{ + u32 error = from_err_to_iov_error(err); + + GEM_BUG_ON(!IS_SRIOV_PF(relay_to_i915(relay))); + GEM_BUG_ON(!target); + + return relay_send_failure(relay, target, relay_id, + sanitize_iov_error(error), 0); +} + +/** + * intel_iov_relay_reply_error_to_vf - Reply with error and hint to VF. + * @relay: the Relay struct + * @target: target VF number (can't be 0) + * @relay_id: relay message ID (must match message ID from the request) + * @error: error code + * @hint: additional optional hint + * + * This utility function will prepare failure response message based on + * given error and hint and and embed it in relay message for the GuC. + * + * This function can only be used by driver running in SR-IOV PF mode. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_relay_reply_error_to_vf(struct intel_iov_relay *relay, u32 target, + u32 relay_id, u16 error, u16 hint) +{ + GEM_BUG_ON(!IS_SRIOV_PF(relay_to_i915(relay))); + GEM_BUG_ON(!target); + + return relay_send_failure(relay, target, relay_id, + sanitize_iov_error(error), + sanitize_iov_error_hint(hint)); +} + +static int relay_send_and_wait(struct intel_iov_relay *relay, u32 target, + u32 relay_id, const u32 *msg, u32 len, + u32 *buf, u32 buf_size) +{ + unsigned long timeout = msecs_to_jiffies(RELAY_TIMEOUT); + u32 action; + u32 data0; + struct pending_relay pending; + int ret; + long n; + + GEM_BUG_ON(!len); + GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) != GUC_HXG_ORIGIN_HOST); + GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) != GUC_HXG_TYPE_REQUEST); + + action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]); + data0 = FIELD_GET(GUC_HXG_REQUEST_MSG_0_DATA0, msg[0]); + RELAY_DEBUG(relay, "%s.%u to %u action %#x:%u\n", + hxg_type_to_string(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0])), + relay_id, target, action, data0); + + init_completion(&pending.done); + pending.target = target; + pending.fence = relay_id; + pending.reply = -ENOMSG; + pending.response = buf; + pending.response_size = buf_size; + + /* list ordering does not need to match fence ordering */ + spin_lock(&relay->lock); + list_add_tail(&pending.link, &relay->pending_relays); + spin_unlock(&relay->lock); + +retry: + ret = relay_send(relay, target, relay_id, msg, len); + if (unlikely(ret < 0)) + goto unlink; + +wait: + n = wait_for_completion_timeout(&pending.done, timeout); + RELAY_DEBUG(relay, "%u.%u wait n=%ld\n", target, relay_id, n); + if (unlikely(n == 0)) { + ret = -ETIME; + goto unlink; + } + + RELAY_DEBUG(relay, "%u.%u reply=%d\n", target, relay_id, pending.reply); + if (unlikely(pending.reply != 0)) { + reinit_completion(&pending.done); + ret = pending.reply; + if (ret == -EAGAIN) + goto retry; + if (ret == -EBUSY) + goto wait; + if (ret > 0) + ret = -ret; + goto unlink; + } + + GEM_BUG_ON(pending.response_size > buf_size); + ret = pending.response_size; + RELAY_DEBUG(relay, "%u.%u response %*ph\n", target, relay_id, 4 * ret, buf); + +unlink: + spin_lock(&relay->lock); + list_del(&pending.link); + spin_unlock(&relay->lock); + + if (unlikely(ret < 0)) { + RELAY_PROBE_ERROR(relay, "Unsuccessful %s.%u %#x:%u to %u (%pe) %*ph\n", + hxg_type_to_string(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0])), + relay_id, action, data0, target, ERR_PTR(ret), 4 * len, msg); + } + + return ret; +} + +/** + * intel_iov_relay_send_to_vf - Send message to VF. + * @relay: the Relay struct + * @target: target VF number + * @data: request payload data + * @dat_len: length of the payload data (in dwords, can be 0) + * @buf: placeholder for the response message + * @buf_size: size of the response message placeholder (in dwords) + * + * This function embed provided `IOV Message`_ into GuC relay. + * + * This function can only be used by driver running in SR-IOV PF mode. + * + * Return: Non-negative response length (in dwords) or + * a negative error code on failure. + */ +int intel_iov_relay_send_to_vf(struct intel_iov_relay *relay, u32 target, + const u32 *msg, u32 len, u32 *buf, u32 buf_size) +{ + u32 relay_type; + u32 relay_id; + + GEM_BUG_ON(!IS_SRIOV_PF(relay_to_i915(relay))); + GEM_BUG_ON(!target); + GEM_BUG_ON(len < GUC_HXG_MSG_MIN_LEN); + GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) != GUC_HXG_ORIGIN_HOST); + + relay_type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]); + relay_id = relay_get_next_fence(relay); + + if (relay_type == GUC_HXG_TYPE_EVENT) + return relay_send(relay, target, relay_id, msg, len); + + GEM_BUG_ON(relay_type != GUC_HXG_TYPE_REQUEST); + return relay_send_and_wait(relay, target, relay_id, msg, len, buf, buf_size); +} + +/** + * intel_iov_relay_send_to_pf - Send message to PF. + * @relay: the Relay struct + * @msg: message to be sent + * @len: length of the message (in dwords, can't be 0) + * @buf: placeholder for the response message + * @buf_size: size of the response message placeholder (in dwords) + * + * This function embed provided `IOV Message`_ into GuC relay. + * + * This function can only be used by driver running in SR-IOV VF mode. + * + * Return: Non-negative response length (in dwords) or + * a negative error code on failure. + */ +int intel_iov_relay_send_to_pf(struct intel_iov_relay *relay, + const u32 *msg, u32 len, u32 *buf, u32 buf_size) +{ + u32 relay_type; + u32 relay_id; + + GEM_BUG_ON(!IS_SRIOV_VF(relay_to_i915(relay))); + GEM_BUG_ON(len < GUC_HXG_MSG_MIN_LEN); + GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) != GUC_HXG_ORIGIN_HOST); + + relay_type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]); + relay_id = relay_get_next_fence(relay); + + if (relay_type == GUC_HXG_TYPE_EVENT) + return relay_send(relay, 0, relay_id, msg, len); + + GEM_BUG_ON(relay_type != GUC_HXG_TYPE_REQUEST); + return relay_send_and_wait(relay, 0, relay_id, msg, len, buf, buf_size); +} +ALLOW_ERROR_INJECTION(intel_iov_relay_send_to_pf, ERRNO); + +static int relay_handle_reply(struct intel_iov_relay *relay, u32 origin, + u32 relay_id, int reply, const u32 *msg, u32 len) +{ + struct pending_relay *pending; + int err = -ESRCH; + + spin_lock(&relay->lock); + list_for_each_entry(pending, &relay->pending_relays, link) { + if (pending->target != origin || pending->fence != relay_id) { + RELAY_DEBUG(relay, "%u.%u still awaits response\n", + pending->target, pending->fence); + continue; + } + err = 0; + if (reply == 0) { + if (unlikely(len > pending->response_size)) { + reply = -ENOBUFS; + err = -ENOBUFS; + } else { + pending->response[0] = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, msg[0]); + memcpy(pending->response + 1, msg + 1, 4 * (len - 1)); + pending->response_size = len; + } + } + pending->reply = reply; + complete_all(&pending->done); + break; + } + spin_unlock(&relay->lock); + + return err; +} + +static int relay_handle_failure(struct intel_iov_relay *relay, u32 origin, + u32 relay_id, const u32 *msg, u32 len) +{ + u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, msg[0]); + u32 hint __maybe_unused = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, msg[0]); + + GEM_BUG_ON(!len); + RELAY_DEBUG(relay, "%u.%u error %#x (%pe) hint %u debug %*ph\n", + origin, relay_id, error, ERR_PTR(error), hint, 4 * (len - 1), msg + 1); + + return relay_handle_reply(relay, origin, relay_id, error ?: -ERFKILL, NULL, 0); +} + +static int relay_handle_request(struct intel_iov_relay *relay, u32 origin, + u32 relay_id, const u32 *msg, u32 len) +{ + struct intel_iov *iov = relay_to_iov(relay); + struct drm_i915_private *i915 = relay_to_i915(relay); + struct intel_runtime_pm *rpm = &i915->runtime_pm; + intel_wakeref_t wakeref = intel_runtime_pm_get(rpm); + int err = -EOPNOTSUPP; + + if (intel_iov_is_pf(iov)) + err = intel_iov_service_process_msg(iov, origin, + relay_id, msg, len); + + if (unlikely(err < 0)) { + u32 error = from_err_to_iov_error(err); + + RELAY_ERROR(relay, "Failed to handle %s.%u from %u (%pe) %*ph\n", + hxg_type_to_string(GUC_HXG_TYPE_REQUEST), relay_id, + origin, ERR_PTR(err), 4 * len, msg); + err = relay_send_failure(relay, origin, relay_id, + origin ? sanitize_iov_error(error) : error, 0); + } + + intel_runtime_pm_put(rpm, wakeref); + return err; +} + +static int relay_handle_event(struct intel_iov_relay *relay, u32 origin, + u32 relay_id, const u32 *msg, u32 len) +{ + return -EOPNOTSUPP; +} + +static int relay_process_msg(struct intel_iov_relay *relay, u32 origin, + u32 relay_id, const u32 *relay_msg, u32 relay_len) +{ + u32 relay_type; + int err; + + if (unlikely(relay_len < GUC_HXG_MSG_MIN_LEN)) + return -EPROTO; + + if (FIELD_GET(GUC_HXG_MSG_0_ORIGIN, relay_msg[0]) != GUC_HXG_ORIGIN_HOST) + return -EPROTO; + + relay_type = FIELD_GET(GUC_HXG_MSG_0_TYPE, relay_msg[0]); + RELAY_DEBUG(relay, "received %s.%u from %u = %*ph\n", + hxg_type_to_string(relay_type), relay_id, origin, + 4 * relay_len, relay_msg); + + switch (relay_type) { + case GUC_HXG_TYPE_REQUEST: + err = relay_handle_request(relay, origin, relay_id, relay_msg, relay_len); + break; + case GUC_HXG_TYPE_EVENT: + err = relay_handle_event(relay, origin, relay_id, relay_msg, relay_len); + break; + case GUC_HXG_TYPE_RESPONSE_SUCCESS: + err = relay_handle_reply(relay, origin, relay_id, 0, relay_msg, relay_len); + break; + case GUC_HXG_TYPE_NO_RESPONSE_BUSY: + err = relay_handle_reply(relay, origin, relay_id, -EBUSY, NULL, 0); + break; + case GUC_HXG_TYPE_NO_RESPONSE_RETRY: + err = relay_handle_reply(relay, origin, relay_id, -EAGAIN, NULL, 0); + break; + case GUC_HXG_TYPE_RESPONSE_FAILURE: + err = relay_handle_failure(relay, origin, relay_id, relay_msg, relay_len); + break; + default: + err = -EBADRQC; + } + + if (unlikely(err)) + RELAY_ERROR(relay, "Failed to process %s.%u from %u (%pe) %*ph\n", + hxg_type_to_string(relay_type), relay_id, origin, + ERR_PTR(err), 4 * relay_len, relay_msg); + + return err; +} + +/** + * intel_iov_relay_process_guc2pf - Handle relay notification message from the GuC. + * @relay: the Relay struct + * @msg: message to be handled + * @len: length of the message (in dwords) + * + * This function will handle RELAY messages received from the GuC. + * + * This function can only be used if driver is running in SR-IOV PF mode. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_relay_process_guc2pf(struct intel_iov_relay *relay, const u32 *msg, u32 len) +{ + u32 origin, relay_id; + + if (unlikely(!IS_SRIOV_PF(relay_to_i915(relay)))) + return -EPERM; + + GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) != GUC_HXG_ORIGIN_GUC); + GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) != GUC_HXG_TYPE_EVENT); + GEM_BUG_ON(FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[0]) != GUC_ACTION_GUC2PF_RELAY_FROM_VF); + + if (unlikely(len < GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN)) + return -EPROTO; + + if (unlikely(FIELD_GET(GUC_HXG_EVENT_MSG_0_DATA0, msg[0]))) + return -EPFNOSUPPORT; + + origin = FIELD_GET(GUC2PF_RELAY_FROM_VF_EVENT_MSG_1_VFID, msg[1]); + relay_id = FIELD_GET(GUC2PF_RELAY_FROM_VF_EVENT_MSG_2_RELAY_ID, msg[2]); + + if (unlikely(!origin)) + return -EPROTO; + + return relay_process_msg(relay, origin, relay_id, + msg + GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN, + len - GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN); +} + +/** + * intel_iov_relay_process_guc2vf - Handle relay notification message from the GuC. + * @relay: the Relay struct + * @msg: message to be handled + * @len: length of the message (in dwords) + * + * This function will handle RELAY messages received from the GuC. + * + * This function can only be used if driver is running in SR-IOV VF mode. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_relay_process_guc2vf(struct intel_iov_relay *relay, const u32 *msg, u32 len) +{ + struct drm_i915_private *i915 = relay_to_i915(relay); + u32 relay_id; + + if (unlikely(!IS_SRIOV_VF(i915))) + return -EPERM; + + GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) != GUC_HXG_ORIGIN_GUC); + GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) != GUC_HXG_TYPE_EVENT); + GEM_BUG_ON(FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[0]) != GUC_ACTION_GUC2VF_RELAY_FROM_PF); + + if (unlikely(len < GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN)) + return -EPROTO; + + if (unlikely(FIELD_GET(GUC_HXG_EVENT_MSG_0_DATA0, msg[0]))) + return -EPFNOSUPPORT; + + relay_id = FIELD_GET(GUC2VF_RELAY_FROM_PF_EVENT_MSG_1_RELAY_ID, msg[1]); + + return relay_process_msg(relay, 0, relay_id, + msg + GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN, + len - GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN); +} diff --git a/drivers/gpu/drm/i915/gt/iov/intel_iov_relay.h b/drivers/gpu/drm/i915/gt/iov/intel_iov_relay.h new file mode 100644 index 0000000000000..ccd08fb35484b --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/intel_iov_relay.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_IOV_RELAY_H__ +#define __INTEL_IOV_RELAY_H__ + +#include "intel_iov_types.h" + +static inline void intel_iov_relay_init_early(struct intel_iov_relay *relay) +{ + spin_lock_init(&relay->lock); + INIT_LIST_HEAD(&relay->pending_relays); +} + +int intel_iov_relay_send_to_vf(struct intel_iov_relay *relay, u32 target, + const u32 *msg, u32 len, u32 *buf, u32 buf_size); +int intel_iov_relay_reply_to_vf(struct intel_iov_relay *relay, u32 target, + u32 relay_id, const u32 *msg, u32 len); +int intel_iov_relay_reply_ack_to_vf(struct intel_iov_relay *relay, u32 target, + u32 relay_id, u32 data); +int intel_iov_relay_reply_err_to_vf(struct intel_iov_relay *relay, u32 target, + u32 relay_id, int err); +int intel_iov_relay_reply_error_to_vf(struct intel_iov_relay *relay, u32 target, + u32 relay_id, u16 error, u16 hint); + +int intel_iov_relay_send_to_pf(struct intel_iov_relay *relay, + const u32 *msg, u32 len, u32 *buf, u32 buf_size); + +int intel_iov_relay_process_guc2pf(struct intel_iov_relay *relay, + const u32 *msg, u32 len); +int intel_iov_relay_process_guc2vf(struct intel_iov_relay *relay, + const u32 *msg, u32 len); + +#endif /* __INTEL_IOV_RELAY_H__ */ diff --git a/drivers/gpu/drm/i915/gt/iov/intel_iov_service.c b/drivers/gpu/drm/i915/gt/iov/intel_iov_service.c new file mode 100644 index 0000000000000..310c17f272182 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/intel_iov_service.c @@ -0,0 +1,533 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include +#include + +#include "abi/iov_actions_abi.h" +#include "abi/iov_actions_mmio_abi.h" +#include "abi/iov_actions_selftest_abi.h" +#include "abi/iov_errors_abi.h" +#include "abi/iov_messages_abi.h" +#include "abi/iov_version_abi.h" + +#include "intel_iov_relay.h" +#include "intel_iov_service.h" +#include "intel_iov_types.h" +#include "intel_iov_utils.h" + +#include "selftests/iov_selftest_actions.h" + +static void __uncore_read_many(struct intel_uncore *uncore, unsigned int count, + const i915_reg_t *regs, u32 *values) +{ + while (count--) { + *values++ = intel_uncore_read(uncore, *regs++); + } +} + +static const i915_reg_t tgl_runtime_regs[] = { + RPM_CONFIG0, /* _MMIO(0x0D00) */ + GEN10_MIRROR_FUSE3, /* _MMIO(0x9118) */ + GEN11_EU_DISABLE, /* _MMIO(0x9134) */ + GEN11_GT_SLICE_ENABLE, /* _MMIO(0x9138) */ + GEN12_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913C) */ + GEN11_GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */ + CTC_MODE, /* _MMIO(0xA26C) */ + GEN11_HUC_KERNEL_LOAD_INFO, /* _MMIO(0xC1DC) */ + GEN9_TIMESTAMP_OVERRIDE, /* _MMIO(0x44074) */ +}; + +static const i915_reg_t *get_runtime_regs(struct drm_i915_private *i915, + unsigned int *size) +{ + const i915_reg_t *regs; + + if (IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) { + regs = tgl_runtime_regs; + *size = ARRAY_SIZE(tgl_runtime_regs); + } else { + MISSING_CASE(GRAPHICS_VER(i915)); + regs = ERR_PTR(-ENODEV); + *size = 0; + } + + return regs; +} + +static bool regs_selftest(const i915_reg_t *regs, unsigned int count) +{ + u32 offset = 0; + + while (IS_ENABLED(CONFIG_DRM_I915_SELFTEST) && count--) { + if (i915_mmio_reg_offset(*regs) < offset) { + pr_err("invalid register order: %#x < %#x\n", + i915_mmio_reg_offset(*regs), offset); + return false; + } + offset = i915_mmio_reg_offset(*regs++); + } + + return true; +} + +static int pf_alloc_runtime_info(struct intel_iov *iov) +{ + const i915_reg_t *regs; + unsigned int size; + u32 *values; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(iov->pf.service.runtime.size); + GEM_BUG_ON(iov->pf.service.runtime.regs); + GEM_BUG_ON(iov->pf.service.runtime.values); + + regs = get_runtime_regs(iov_to_i915(iov), &size); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + if (unlikely(!size)) + return 0; + + if (unlikely(!regs_selftest(regs, size))) + return -EBADSLT; + + values = kcalloc(size, sizeof(u32), GFP_KERNEL); + if (!values) + return -ENOMEM; + + iov->pf.service.runtime.size = size; + iov->pf.service.runtime.regs = regs; + iov->pf.service.runtime.values = values; + + return 0; +} + +static void pf_release_runtime_info(struct intel_iov *iov) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + kfree(iov->pf.service.runtime.values); + iov->pf.service.runtime.values = NULL; + iov->pf.service.runtime.regs = NULL; + iov->pf.service.runtime.size = 0; +} + +static void pf_prepare_runtime_info(struct intel_iov *iov) +{ + const i915_reg_t *regs; + unsigned int size; + u32 *values; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + if (!iov->pf.service.runtime.size) + return; + + size = iov->pf.service.runtime.size; + regs = iov->pf.service.runtime.regs; + values = iov->pf.service.runtime.values; + + __uncore_read_many(iov_to_gt(iov)->uncore, size, regs, values); + + while (size--) { + IOV_DEBUG(iov, "reg[%#x] = %#x\n", + i915_mmio_reg_offset(*regs++), *values++); + } +} + +static void pf_reset_runtime_info(struct intel_iov *iov) +{ + unsigned int size; + u32 *values; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + if (!iov->pf.service.runtime.size) + return; + + size = iov->pf.service.runtime.size; + values = iov->pf.service.runtime.values; + + while (size--) + *values++ = 0; +} + +/** + * intel_iov_service_init_early - Early initialization of the PF IOV services. + * @iov: the IOV struct + * + * Performs early initialization of the IOV PF services, including preparation + * of the runtime info that will be shared with VFs. + * + * This function can only be called on PF. + */ +void intel_iov_service_init_early(struct intel_iov *iov) +{ + int err; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + err = pf_alloc_runtime_info(iov); + if (unlikely(err)) + pf_update_status(iov, err, "runtime"); +} + +/** + * intel_iov_service_release - Cleanup PF IOV services. + * @iov: the IOV struct + * + * Releases any data allocated during initialization. + * + * This function can only be called on PF. + */ +void intel_iov_service_release(struct intel_iov *iov) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + pf_release_runtime_info(iov); +} + +/** + * intel_iov_service_update - Update PF IOV services. + * @iov: the IOV struct + * + * Updates runtime data shared with VFs. + * + * This function can be called more than once. + * This function can only be called on PF. + */ +void intel_iov_service_update(struct intel_iov *iov) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + pf_prepare_runtime_info(iov); +} + +/** + * intel_iov_service_reset - Update PF IOV services. + * @iov: the IOV struct + * + * Resets runtime data to avoid sharing stale info with VFs. + * + * This function can be called more than once. + * This function can only be called on PF. + */ +void intel_iov_service_reset(struct intel_iov *iov) +{ + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + pf_reset_runtime_info(iov); +} + +static int reply_handshake(struct intel_iov *iov, u32 origin, + u32 relay_id, const u32 *msg, u32 len) +{ + struct intel_iov_relay *relay = &iov->relay; + u32 response[VF2PF_HANDSHAKE_RESPONSE_MSG_LEN]; + u32 major, minor, mbz; + + GEM_BUG_ON(!origin); + + if (unlikely(len > VF2PF_HANDSHAKE_REQUEST_MSG_LEN)) + return -EMSGSIZE; + + mbz = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_0_MBZ, msg[0]); + if (unlikely(mbz)) + return -EINVAL; + + major = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_1_MAJOR, msg[1]); + if (major && major != IOV_VERSION_LATEST_MAJOR) + return -ENODATA; + + minor = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_1_MINOR, msg[1]); + if (unlikely(!major && minor)) + return -EINVAL; + if (minor > IOV_VERSION_LATEST_MINOR) + return -ENODATA; + + response[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS) | + FIELD_PREP(GUC_HXG_RESPONSE_MSG_0_DATA0, 0); + + response[1] = FIELD_PREP(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MAJOR, + IOV_VERSION_LATEST_MAJOR) | + FIELD_PREP(VF2PF_HANDSHAKE_RESPONSE_MSG_1_MINOR, + IOV_VERSION_LATEST_MINOR); + + return intel_iov_relay_reply_to_vf(relay, origin, relay_id, + response, ARRAY_SIZE(response)); +} + +static int pf_reply_runtime_query(struct intel_iov *iov, u32 origin, + u32 relay_id, const u32 *msg, u32 len) +{ + struct intel_iov_runtime_regs *runtime = &iov->pf.service.runtime; + u32 response[VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MAX_LEN]; + u32 max_chunk = (ARRAY_SIZE(response) - VF2PF_QUERY_RUNTIME_RESPONSE_MSG_MIN_LEN) / 2; + u32 limit, start, chunk, i; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + if (unlikely(len > VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN)) + return -EMSGSIZE; + if (unlikely(len < VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN)) + return -EPROTO; + + limit = FIELD_GET(VF2PF_QUERY_RUNTIME_REQUEST_MSG_0_LIMIT, msg[0]); + start = FIELD_GET(VF2PF_QUERY_RUNTIME_REQUEST_MSG_1_START, msg[1]); + if (unlikely(start > runtime->size)) + return -EINVAL; + + chunk = min_t(u32, runtime->size - start, max_chunk); + if (limit) + chunk = min_t(u32, chunk, limit); + + response[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS) | + FIELD_PREP(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_0_COUNT, chunk); + + response[1] = FIELD_PREP(VF2PF_QUERY_RUNTIME_RESPONSE_MSG_1_REMAINING, + runtime->size - start - chunk); + + for (i = 0; i < chunk; ++i) { + i915_reg_t reg = runtime->regs[start + i]; + u32 offset = i915_mmio_reg_offset(reg); + u32 value = runtime->values[start + i]; + + response[2 + 2 * i] = offset; + response[2 + 2 * i + 1] = value; + } + + return intel_iov_relay_reply_to_vf(&iov->relay, origin, relay_id, + response, 2 + 2 * chunk); +} + +/** + * intel_iov_service_process_msg - Service request message from VF. + * @iov: the IOV struct + * @origin: origin VF number + * @relay_id: message ID + * @msg: request message + * @len: length of the message (in dwords) + * + * This function processes `IOV Message`_ from the VF. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_service_process_msg(struct intel_iov *iov, u32 origin, + u32 relay_id, const u32 *msg, u32 len) +{ + int err = -EOPNOTSUPP; + u32 action; + u32 data; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(len < GUC_HXG_MSG_MIN_LEN); + GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) != GUC_HXG_TYPE_REQUEST); + + action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]); + data = FIELD_GET(GUC_HXG_REQUEST_MSG_0_DATA0, msg[0]); + IOV_DEBUG(iov, "servicing action %#x:%u from %u\n", action, data, origin); + + if (!origin) + return -EPROTO; + + switch (action) { + case IOV_ACTION_VF2PF_HANDSHAKE: + err = reply_handshake(iov, origin, relay_id, msg, len); + break; + case IOV_ACTION_VF2PF_QUERY_RUNTIME: + err = pf_reply_runtime_query(iov, origin, relay_id, msg, len); + break; + case IOV_ACTION_VF2PF_PF_ST_ACTION: + err = intel_iov_service_perform_selftest_action(iov, origin, relay_id, msg, len); + break; + default: + break; + } + + return err; +} + +static int send_mmio_relay_error(struct intel_iov *iov, + u32 vfid, u32 magic, int fault) +{ + u32 request[PF2GUC_MMIO_RELAY_FAILURE_REQUEST_MSG_LEN] = { + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_PF2GUC_MMIO_RELAY_FAILURE), + FIELD_PREP(PF2GUC_MMIO_RELAY_FAILURE_REQUEST_MSG_1_VFID, vfid), + FIELD_PREP(PF2GUC_MMIO_RELAY_FAILURE_REQUEST_MSG_2_MAGIC, magic) | + FIELD_PREP(PF2GUC_MMIO_RELAY_FAILURE_REQUEST_MSG_2_FAULT, fault), + }; + + return intel_guc_send(iov_to_guc(iov), request, ARRAY_SIZE(request)); +} + +static int send_mmio_relay_reply(struct intel_iov *iov, + u32 vfid, u32 magic, u32 data[4]) +{ + u32 request[PF2GUC_MMIO_RELAY_SUCCESS_REQUEST_MSG_LEN] = { + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_PF2GUC_MMIO_RELAY_SUCCESS), + FIELD_PREP(PF2GUC_MMIO_RELAY_SUCCESS_REQUEST_MSG_1_VFID, vfid), + FIELD_PREP(PF2GUC_MMIO_RELAY_SUCCESS_REQUEST_MSG_2_MAGIC, magic) | + FIELD_PREP(PF2GUC_MMIO_RELAY_SUCCESS_REQUEST_MSG_2_DATA0, data[0]), + FIELD_PREP(PF2GUC_MMIO_RELAY_SUCCESS_REQUEST_MSG_n_DATAx, data[1]), + FIELD_PREP(PF2GUC_MMIO_RELAY_SUCCESS_REQUEST_MSG_n_DATAx, data[2]), + FIELD_PREP(PF2GUC_MMIO_RELAY_SUCCESS_REQUEST_MSG_n_DATAx, data[3]), + }; + + return intel_guc_send(iov_to_guc(iov), request, ARRAY_SIZE(request)); +} + +static int reply_mmio_relay_handshake(struct intel_iov *iov, + u32 vfid, u32 magic, const u32 *msg) +{ + u32 data[PF2GUC_MMIO_RELAY_SUCCESS_REQUEST_MSG_NUM_DATA + 1] = { }; + u32 wanted_major = FIELD_GET(VF2PF_MMIO_HANDSHAKE_REQUEST_MSG_1_MAJOR, msg[1]); + u32 wanted_minor = FIELD_GET(VF2PF_MMIO_HANDSHAKE_REQUEST_MSG_1_MINOR, msg[1]); + u32 major = 0, minor = 0; + int fault = 0; + + IOV_DEBUG(iov, "VF%u wants ABI version %u.%02u\n", vfid, wanted_major, wanted_minor); + + /* XXX for now we only support single major version (latest) */ + + if (!wanted_major && !wanted_minor) { + major = IOV_VERSION_LATEST_MAJOR; + minor = IOV_VERSION_LATEST_MINOR; + } else if (wanted_major > IOV_VERSION_LATEST_MAJOR) { + major = IOV_VERSION_LATEST_MAJOR; + minor = IOV_VERSION_LATEST_MINOR; + } else if (wanted_major < IOV_VERSION_LATEST_MAJOR) { + fault = ENOPKG; + } else { + GEM_BUG_ON(wanted_major != IOV_VERSION_LATEST_MAJOR); + GEM_BUG_ON(IOV_VERSION_LATEST_MAJOR != 1); + + if (unlikely(!msg[0] || msg[2] || msg[3])) { + fault = EPROTO; + } else { + major = wanted_major; + minor = min_t(u32, IOV_VERSION_LATEST_MINOR, wanted_minor); + } + } + + if (fault) + return send_mmio_relay_error(iov, vfid, magic, fault); + + IOV_DEBUG(iov, "VF%u will use ABI version %u.%02u\n", vfid, major, minor); + + data[1] = FIELD_PREP(VF2PF_MMIO_HANDSHAKE_RESPONSE_MSG_1_MAJOR, major) | + FIELD_PREP(VF2PF_MMIO_HANDSHAKE_RESPONSE_MSG_1_MINOR, minor); + + return send_mmio_relay_reply(iov, vfid, magic, data); +} + +static int __i915_reg_cmp(const void *a, const void *b) +{ + return i915_mmio_reg_offset(*(const i915_reg_t *)a) - + i915_mmio_reg_offset(*(const i915_reg_t *)b); +} + +static int lookup_reg_index(struct intel_iov *iov, u32 offset) +{ + i915_reg_t key = _MMIO(offset); + i915_reg_t *found = bsearch(&key, iov->pf.service.runtime.regs, + iov->pf.service.runtime.size, sizeof(key), + __i915_reg_cmp); + + return found ? found - iov->pf.service.runtime.regs : -ENODATA; +} + +static int reply_mmio_relay_get_reg(struct intel_iov *iov, + u32 vfid, u32 magic, const u32 *msg) +{ + u32 data[PF2GUC_MMIO_RELAY_SUCCESS_REQUEST_MSG_NUM_DATA + 1] = { }; + unsigned int i; + int found; + + BUILD_BUG_ON(VF2PF_MMIO_GET_RUNTIME_REQUEST_MSG_NUM_OFFSET > + GUC2PF_MMIO_RELAY_SERVICE_EVENT_MSG_NUM_DATA); + BUILD_BUG_ON(VF2PF_MMIO_GET_RUNTIME_REQUEST_MSG_NUM_OFFSET != + PF2GUC_MMIO_RELAY_SUCCESS_REQUEST_MSG_NUM_DATA); + + if (unlikely(!msg[0])) + return -EPROTO; + if (unlikely(!msg[1])) + return -EINVAL; + + for (i = 0; i < VF2PF_MMIO_GET_RUNTIME_REQUEST_MSG_NUM_OFFSET; i++) { + u32 offset = msg[i + 1]; + + if (unlikely(!offset)) + continue; + found = lookup_reg_index(iov, offset); + if (found < 0) + return -EACCES; + data[i + 1] = iov->pf.service.runtime.values[found]; + } + + return send_mmio_relay_reply(iov, vfid, magic, data); +} + +/** + * intel_iov_service_process_mmio_relay - Process MMIO Relay notification. + * @iov: the IOV struct + * @msg: mmio relay notification data + * @len: length of the message data (in dwords) + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_service_process_mmio_relay(struct intel_iov *iov, const u32 *msg, + u32 len) +{ + struct drm_i915_private *i915 = iov_to_i915(iov); + struct intel_runtime_pm *rpm = &i915->runtime_pm; + intel_wakeref_t wakeref; + u32 vfid, magic, opcode; + int err = -EPROTO; + + GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) != GUC_HXG_ORIGIN_GUC); + GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) != GUC_HXG_TYPE_EVENT); + GEM_BUG_ON(FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[0]) != + GUC_ACTION_GUC2PF_MMIO_RELAY_SERVICE); + + if (unlikely(!IS_SRIOV_PF(i915))) + return -EPERM; + if (unlikely(len != GUC2PF_MMIO_RELAY_SERVICE_EVENT_MSG_LEN)) + return -EPROTO; + + vfid = FIELD_GET(GUC2PF_MMIO_RELAY_SERVICE_EVENT_MSG_1_VFID, msg[1]); + magic = FIELD_GET(GUC2PF_MMIO_RELAY_SERVICE_EVENT_MSG_2_MAGIC, msg[2]); + opcode = FIELD_GET(GUC2PF_MMIO_RELAY_SERVICE_EVENT_MSG_2_OPCODE, msg[2]); + + if (unlikely(!vfid)) + return -EPROTO; + + wakeref = intel_runtime_pm_get(rpm); + + switch (opcode) { + case IOV_OPCODE_VF2PF_MMIO_HANDSHAKE: + err = reply_mmio_relay_handshake(iov, vfid, magic, msg + 2); + break; + case IOV_OPCODE_VF2PF_MMIO_GET_RUNTIME: + err = reply_mmio_relay_get_reg(iov, vfid, magic, msg + 2); + break; + default: + IOV_DEBUG(iov, "unsupported request %#x from VF%u\n", + opcode, vfid); + err = -EOPNOTSUPP; + } + + if (unlikely(err < 0)) + send_mmio_relay_error(iov, vfid, magic, -err); + + intel_runtime_pm_put(rpm, wakeref); + return err; +} diff --git a/drivers/gpu/drm/i915/gt/iov/intel_iov_service.h b/drivers/gpu/drm/i915/gt/iov/intel_iov_service.h new file mode 100644 index 0000000000000..3fa3f8f3e5cd5 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/intel_iov_service.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_IOV_SERVICE_H__ +#define __INTEL_IOV_SERVICE_H__ + +#include +#include + +struct intel_iov; + +void intel_iov_service_init_early(struct intel_iov *iov); +void intel_iov_service_update(struct intel_iov *iov); +void intel_iov_service_reset(struct intel_iov *iov); +void intel_iov_service_release(struct intel_iov *iov); + +int intel_iov_service_process_msg(struct intel_iov *iov, u32 origin, + u32 relay_id, const u32 *msg, u32 len); + +int intel_iov_service_process_mmio_relay(struct intel_iov *iov, const u32 *msg, + u32 len); + +#endif /* __INTEL_IOV_SERVICE_H__ */ diff --git a/drivers/gpu/drm/i915/gt/iov/intel_iov_state.c b/drivers/gpu/drm/i915/gt/iov/intel_iov_state.c new file mode 100644 index 0000000000000..4858cf53d8275 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/intel_iov_state.c @@ -0,0 +1,367 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include "intel_iov.h" +#include "intel_iov_state.h" +#include "intel_iov_utils.h" +#include "gt/uc/abi/guc_actions_pf_abi.h" + +static int guc_action_vf_control_cmd(struct intel_guc *guc, u32 vfid, u32 cmd) +{ + u32 request[PF2GUC_VF_CONTROL_REQUEST_MSG_LEN] = { + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_PF2GUC_VF_CONTROL), + FIELD_PREP(PF2GUC_VF_CONTROL_REQUEST_MSG_1_VFID, vfid), + FIELD_PREP(PF2GUC_VF_CONTROL_REQUEST_MSG_2_COMMAND, cmd), + }; + + return intel_guc_send(guc, request, ARRAY_SIZE(request)); +} + +static int pf_control_vf(struct intel_iov *iov, u32 vfid, u32 cmd) +{ + struct intel_runtime_pm *rpm = iov_to_gt(iov)->uncore->rpm; + intel_wakeref_t wakeref; + int err = -ENONET; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(vfid > pf_get_totalvfs(iov)); + GEM_BUG_ON(!vfid); + + with_intel_runtime_pm(rpm, wakeref) + err = guc_action_vf_control_cmd(iov_to_guc(iov), vfid, cmd); + + return err; +} + +static void pf_trigger_vf_flr_start(struct intel_iov *iov, u32 vfid) +{ + int ret; + + ret = pf_control_vf(iov, vfid, GUC_PF_TRIGGER_VF_FLR_START); + if (unlikely(ret < 0)) + IOV_ERROR(iov, "Failed to start FLR for VF%u (%pe)\n", + vfid, ERR_PTR(ret)); +} + +static void pf_confirm_vf_flr_done(struct intel_iov *iov, u32 vfid) +{ + int ret; + + ret = pf_control_vf(iov, vfid, GUC_PF_TRIGGER_VF_FLR_FINISH); + if (unlikely(ret < 0)) + IOV_ERROR(iov, "Failed to confirm FLR for VF%u (%pe)\n", + vfid, ERR_PTR(ret)); +} + +static bool pf_vfs_flr_enabled(struct intel_iov *iov, u32 vfid) +{ + return iov_to_i915(iov)->params.vfs_flr_mask & BIT(vfid); +} + +static void pf_handle_vf_flr(struct intel_iov *iov, u32 vfid) +{ + struct device *dev = iov_to_dev(iov); + + dev_info(dev, "VF%u FLR\n", vfid); + pf_trigger_vf_flr_start(iov, vfid); +} + +static void pf_clear_vf_ggtt_entries(struct intel_iov *iov, u32 vfid) +{ + struct intel_iov_config *config = &iov->pf.provisioning.configs[vfid]; + struct intel_gt *gt = iov_to_gt(iov); + + GEM_BUG_ON(vfid > pf_get_totalvfs(iov)); + + if (!drm_mm_node_allocated(&config->ggtt_region)) + return; + + i915_ggtt_set_space_owner(gt->ggtt, vfid, &config->ggtt_region); +} + +static void pf_handle_vf_flr_done(struct intel_iov *iov, u32 vfid) +{ + if (!pf_vfs_flr_enabled(iov, vfid)) { + IOV_DEBUG(iov, "VF%u FLR processing skipped\n", vfid); + goto confirm; + } + + IOV_DEBUG(iov, "processing VF%u FLR\n", vfid); + + pf_clear_vf_ggtt_entries(iov, vfid); + +confirm: + pf_confirm_vf_flr_done(iov, vfid); +} + +static void pf_handle_vf_pause_done(struct intel_iov *iov, u32 vfid) +{ + struct device *dev = iov_to_dev(iov); + + dev_info(dev, "VF%u %s\n", vfid, "paused"); +} + +static void pf_handle_vf_fixup_done(struct intel_iov *iov, u32 vfid) +{ + struct device *dev = iov_to_dev(iov); + + dev_info(dev, "VF%u %s\n", vfid, "has completed migration"); +} + +static int pf_handle_vf_event(struct intel_iov *iov, u32 vfid, u32 eventid) +{ + switch (eventid) { + case GUC_PF_NOTIFY_VF_FLR: + pf_handle_vf_flr(iov, vfid); + break; + case GUC_PF_NOTIFY_VF_FLR_DONE: + pf_handle_vf_flr_done(iov, vfid); + break; + case GUC_PF_NOTIFY_VF_PAUSE_DONE: + pf_handle_vf_pause_done(iov, vfid); + break; + case GUC_PF_NOTIFY_VF_FIXUP_DONE: + pf_handle_vf_fixup_done(iov, vfid); + break; + default: + return -ENOPKG; + } + + return 0; +} + +static int pf_handle_pf_event(struct intel_iov *iov, u32 eventid) +{ + switch (eventid) { + case GUC_PF_NOTIFY_VF_ENABLE: + IOV_DEBUG(iov, "VFs %s/%s\n", enableddisabled(true), enableddisabled(false)); + break; + default: + return -ENOPKG; + } + + return 0; +} + +/** + * intel_iov_state_process_guc2pf - Handle VF state notification from GuC. + * @iov: the IOV struct + * @msg: message from the GuC + * @len: length of the message + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_state_process_guc2pf(struct intel_iov *iov, + const u32 *msg, u32 len) +{ + u32 vfid; + u32 eventid; + + GEM_BUG_ON(!len); + GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) != GUC_HXG_ORIGIN_GUC); + GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) != GUC_HXG_TYPE_EVENT); + GEM_BUG_ON(FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[0]) != GUC_ACTION_GUC2PF_VF_STATE_NOTIFY); + + if (unlikely(!intel_iov_is_pf(iov))) + return -EPROTO; + + if (unlikely(FIELD_GET(GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_0_MBZ, msg[0]))) + return -EPFNOSUPPORT; + + if (unlikely(len != GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_LEN)) + return -EPROTO; + + vfid = FIELD_GET(GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_1_VFID, msg[1]); + eventid = FIELD_GET(GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_2_EVENT, msg[2]); + + if (unlikely(vfid > pf_get_totalvfs(iov))) + return -EINVAL; + + return vfid ? pf_handle_vf_event(iov, vfid, eventid) : pf_handle_pf_event(iov, eventid); +} + +/** + * intel_iov_state_pause_vf - Pause VF. + * @iov: the IOV struct + * @vfid: VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_state_pause_vf(struct intel_iov *iov, u32 vfid) +{ + return pf_control_vf(iov, vfid, GUC_PF_TRIGGER_VF_PAUSE); +} + +/** + * intel_iov_state_resume_vf - Resume VF. + * @iov: the IOV struct + * @vfid: VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_state_resume_vf(struct intel_iov *iov, u32 vfid) +{ + return pf_control_vf(iov, vfid, GUC_PF_TRIGGER_VF_RESUME); +} + +/** + * intel_iov_state_stop_vf - Stop VF. + * @iov: the IOV struct + * @vfid: VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_state_stop_vf(struct intel_iov *iov, u32 vfid) +{ + return pf_control_vf(iov, vfid, GUC_PF_TRIGGER_VF_STOP); +} + +static int guc_action_save_restore_vf(struct intel_guc *guc, u32 vfid, u32 opcode, u64 offset) +{ + u32 request[PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_LEN] = { + FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_PF2GUC_SAVE_RESTORE_VF) | + FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_0_OPCODE, opcode), + FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_1_VFID, vfid), + FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_2_BUFF_LO, lower_32_bits(offset)), + FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_3_BUFF_HI, upper_32_bits(offset)), + }; + int ret; + + ret = intel_guc_send(guc, request, ARRAY_SIZE(request)); + + return ret > SZ_4K ? -EPROTO : ret; +} + +static int pf_save_vf(struct intel_iov *iov, u32 vfid, void *buf) +{ + struct intel_guc *guc = iov_to_guc(iov); + struct i915_vma *vma; + void *blob; + int ret; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(vfid > pf_get_totalvfs(iov)); + GEM_BUG_ON(!vfid); + + ret = intel_guc_allocate_and_map_vma(guc, SZ_4K, &vma, (void **)&blob); + if (unlikely(ret)) + goto failed; + + ret = guc_action_save_restore_vf(guc, vfid, GUC_PF_OPCODE_VF_SAVE, + intel_guc_ggtt_offset(guc, vma)); + + if (likely(ret > 0)) { + memcpy(buf, blob, SZ_4K); + + if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST) && + memchr_inv(buf + ret, 0, SZ_4K - ret)) { + pr_err("non-zero state found beyond offset %d!\n", ret); + } + } + + i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP); + + if (unlikely(ret < 0)) + goto failed; + + IOV_DEBUG(iov, "VF%u: state saved (%d bytes) %*ph ..\n", + vfid, ret, min_t(int, 16, ret), buf); + return 0; + +failed: + IOV_ERROR(iov, "Failed to save VF%u state (%pe)\n", vfid, ERR_PTR(ret)); + return ret; +} + +/** + * intel_iov_state_save_vf - Save VF state. + * @iov: the IOV struct + * @vfid: VF identifier + * @buf: buffer to save VF state (must be at least 4K) + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_state_save_vf(struct intel_iov *iov, u32 vfid, void *buf) +{ + struct intel_runtime_pm *rpm = iov_to_gt(iov)->uncore->rpm; + intel_wakeref_t wakeref; + int err = -ENONET; + + with_intel_runtime_pm(rpm, wakeref) + err = pf_save_vf(iov, vfid, buf); + + return err; +} + +static int pf_restore_vf(struct intel_iov *iov, u32 vfid, const void *buf) +{ + struct intel_guc *guc = iov_to_guc(iov); + struct i915_vma *vma; + void *blob; + int ret; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + GEM_BUG_ON(vfid > pf_get_totalvfs(iov)); + GEM_BUG_ON(!vfid); + + ret = intel_guc_allocate_and_map_vma(guc, SZ_4K, &vma, (void **)&blob); + if (unlikely(ret < 0)) + goto failed; + + memcpy(blob, buf, SZ_4K); + + ret = guc_action_save_restore_vf(guc, vfid, GUC_PF_OPCODE_VF_RESTORE, + intel_guc_ggtt_offset(guc, vma)); + + i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP); + + if (unlikely(ret < 0)) + goto failed; + + IOV_DEBUG(iov, "VF%u: state restored (%u bytes) %*ph\n", + vfid, ret, min_t(int, 16, ret), buf); + return 0; + +failed: + IOV_ERROR(iov, "Failed to restore VF%u state (%pe) %*ph\n", + vfid, ERR_PTR(ret), 16, buf); + return ret; +} + +/** + * intel_iov_state_restore_vf - Restore VF state. + * @iov: the IOV struct + * @vfid: VF identifier + * @buf: buffer with VF state to restore (must be 4K) + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_state_restore_vf(struct intel_iov *iov, u32 vfid, const void *buf) +{ + struct intel_runtime_pm *rpm = iov_to_gt(iov)->uncore->rpm; + intel_wakeref_t wakeref; + int err = -ENONET; + + with_intel_runtime_pm(rpm, wakeref) + err = pf_restore_vf(iov, vfid, buf); + + return err; +} diff --git a/drivers/gpu/drm/i915/gt/iov/intel_iov_state.h b/drivers/gpu/drm/i915/gt/iov/intel_iov_state.h new file mode 100644 index 0000000000000..605a6ff3f4287 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/intel_iov_state.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_IOV_STATE_H__ +#define __INTEL_IOV_STATE_H__ + +#include + +struct intel_iov; + +int intel_iov_state_pause_vf(struct intel_iov *iov, u32 vfid); +int intel_iov_state_resume_vf(struct intel_iov *iov, u32 vfid); +int intel_iov_state_stop_vf(struct intel_iov *iov, u32 vfid); +int intel_iov_state_save_vf(struct intel_iov *iov, u32 vfid, void *buf); +int intel_iov_state_restore_vf(struct intel_iov *iov, u32 vfid, const void *buf); + +int intel_iov_state_process_guc2pf(struct intel_iov *iov, + const u32 *msg, u32 len); + +#endif /* __INTEL_IOV_STATE_H__ */ diff --git a/drivers/gpu/drm/i915/gt/iov/intel_iov_sysfs.c b/drivers/gpu/drm/i915/gt/iov/intel_iov_sysfs.c new file mode 100644 index 0000000000000..92f6834a228a9 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/intel_iov_sysfs.c @@ -0,0 +1,652 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include "intel_iov_provisioning.h" +#include "intel_iov_state.h" +#include "intel_iov_sysfs.h" +#include "intel_iov_types.h" +#include "intel_iov_utils.h" + +/* + * /sys/class/drm/card* + * └── iov + *    ├── pf/ + *    │   └── gt/ + *    │      └── ... + *    ├── vf1/ + *    │   └── gt/ + *    │      └── ... + */ + +#define IOV_KOBJ_GT_NAME "gt" + +struct iov_kobj { + struct kobject base; + struct intel_iov *iov; +}; +#define to_iov_kobj(x) container_of(x, struct iov_kobj, base) + +static struct intel_iov *kobj_to_iov(struct kobject *kobj) +{ + return to_iov_kobj(kobj)->iov; +} + +static unsigned int kobj_to_id(struct kobject *kobj) +{ + return to_sriov_ext_kobj(kobj->parent)->id; +} + +struct iov_attr { + struct attribute attr; + ssize_t (*show)(struct intel_iov *iov, unsigned int id, char *buf); + ssize_t (*store)(struct intel_iov *iov, unsigned int id, + const char *buf, size_t count); +}; +#define to_iov_attr(x) container_of(x, struct iov_attr, attr) + +#define IOV_ATTR(name) \ +static struct iov_attr name##_iov_attr = \ + __ATTR(name, 0644, name##_iov_attr_show, name##_iov_attr_store) + +#define IOV_ATTR_RO(name) \ +static struct iov_attr name##_iov_attr = \ + __ATTR(name, 0444, name##_iov_attr_show, NULL) + +/* common attributes */ + +static ssize_t contexts_quota_iov_attr_show(struct intel_iov *iov, + unsigned int id, char *buf) +{ + u16 num_ctxs = intel_iov_provisioning_get_ctxs(iov, id); + + return sysfs_emit(buf, "%hu\n", num_ctxs); +} + +static ssize_t contexts_quota_iov_attr_store(struct intel_iov *iov, + unsigned int id, + const char *buf, size_t count) +{ + u16 num_ctxs; + int err; + + err = kstrtou16(buf, 0, &num_ctxs); + if (err) + return err; + + err = intel_iov_provisioning_set_ctxs(iov, id, num_ctxs); + return err ?: count; +} + +static ssize_t doorbells_quota_iov_attr_show(struct intel_iov *iov, + unsigned int id, char *buf) +{ + u16 num_dbs = intel_iov_provisioning_get_dbs(iov, id); + + return sysfs_emit(buf, "%hu\n", num_dbs); +} + +static ssize_t doorbells_quota_iov_attr_store(struct intel_iov *iov, + unsigned int id, + const char *buf, size_t count) +{ + u16 num_dbs; + int err; + + err = kstrtou16(buf, 0, &num_dbs); + if (err) + return err; + + err = intel_iov_provisioning_set_dbs(iov, id, num_dbs); + return err ?: count; +} + +static ssize_t exec_quantum_ms_iov_attr_show(struct intel_iov *iov, + unsigned int id, char *buf) +{ + u32 exec_quantum = intel_iov_provisioning_get_exec_quantum(iov, id); + + return sysfs_emit(buf, "%u\n", exec_quantum); +} + +static ssize_t exec_quantum_ms_iov_attr_store(struct intel_iov *iov, + unsigned int id, + const char *buf, size_t count) +{ + u32 exec_quantum; + int err; + + err = kstrtou32(buf, 0, &exec_quantum); + if (err) + return err; + + err = intel_iov_provisioning_set_exec_quantum(iov, id, exec_quantum); + return err ?: count; +} + +static ssize_t preempt_timeout_us_iov_attr_show(struct intel_iov *iov, + unsigned int id, char *buf) +{ + u32 preempt_timeout = intel_iov_provisioning_get_preempt_timeout(iov, id); + + return sysfs_emit(buf, "%u\n", preempt_timeout); +} + +static ssize_t preempt_timeout_us_iov_attr_store(struct intel_iov *iov, + unsigned int id, + const char *buf, size_t count) +{ + u32 preempt_timeout; + int err; + + err = kstrtou32(buf, 0, &preempt_timeout); + if (err) + return err; + + err = intel_iov_provisioning_set_preempt_timeout(iov, id, preempt_timeout); + return err ?: count; +} + +IOV_ATTR(contexts_quota); +IOV_ATTR(doorbells_quota); +IOV_ATTR(exec_quantum_ms); +IOV_ATTR(preempt_timeout_us); + +static struct attribute *iov_attrs[] = { + &contexts_quota_iov_attr.attr, + &doorbells_quota_iov_attr.attr, + &exec_quantum_ms_iov_attr.attr, + &preempt_timeout_us_iov_attr.attr, + NULL +}; + +static const struct attribute_group iov_attr_group = { + .attrs = iov_attrs, +}; + +static const struct attribute_group *default_iov_attr_groups[] = { + &iov_attr_group, + NULL +}; + +/* PF only attributes */ + +static ssize_t ggtt_free_iov_attr_show(struct intel_iov *iov, + unsigned int id, char *buf) +{ + GEM_WARN_ON(id); + return sysfs_emit(buf, "%llu\n", intel_iov_provisioning_query_free_ggtt(iov)); +} + +static ssize_t ggtt_max_quota_iov_attr_show(struct intel_iov *iov, + unsigned int id, char *buf) +{ + GEM_WARN_ON(id); + return sysfs_emit(buf, "%llu\n", intel_iov_provisioning_query_max_ggtt(iov)); +} + +static ssize_t contexts_free_iov_attr_show(struct intel_iov *iov, unsigned int id, char *buf) +{ + GEM_WARN_ON(id); + return sysfs_emit(buf, "%hu\n", intel_iov_provisioning_query_free_ctxs(iov)); +} + +static ssize_t contexts_max_quota_iov_attr_show(struct intel_iov *iov, unsigned int id, char *buf) +{ + GEM_WARN_ON(id); + return sysfs_emit(buf, "%hu\n", intel_iov_provisioning_query_max_ctxs(iov)); +} + +static ssize_t doorbells_free_iov_attr_show(struct intel_iov *iov, + unsigned int id, char *buf) +{ + GEM_WARN_ON(id); + return sysfs_emit(buf, "%hu\n", intel_iov_provisioning_query_free_dbs(iov)); +} + +static ssize_t doorbells_max_quota_iov_attr_show(struct intel_iov *iov, + unsigned int id, char *buf) +{ + GEM_WARN_ON(id); + return sysfs_emit(buf, "%hu\n", intel_iov_provisioning_query_max_dbs(iov)); +} + +static ssize_t sched_if_idle_iov_attr_show(struct intel_iov *iov, + unsigned int id, char *buf) +{ + u32 value = intel_iov_provisioning_get_sched_if_idle(iov); + + return sysfs_emit(buf, "%u\n", value); +} + +static ssize_t sched_if_idle_iov_attr_store(struct intel_iov *iov, + unsigned int id, + const char *buf, size_t count) +{ + bool value; + int err; + + err = kstrtobool(buf, &value); + if (err) + return err; + + err = intel_iov_provisioning_set_sched_if_idle(iov, value); + return err ?: count; +} + +static ssize_t engine_reset_iov_attr_show(struct intel_iov *iov, + unsigned int id, char *buf) +{ + u32 value = intel_iov_provisioning_get_reset_engine(iov); + + return sysfs_emit(buf, "%u\n", value); +} + +static ssize_t engine_reset_iov_attr_store(struct intel_iov *iov, + unsigned int id, + const char *buf, size_t count) +{ + bool value; + int err; + + err = kstrtobool(buf, &value); + if (err) + return err; + + err = intel_iov_provisioning_set_reset_engine(iov, value); + return err ?: count; +} + +static ssize_t sample_period_ms_iov_attr_show(struct intel_iov *iov, + unsigned int id, char *buf) +{ + u32 value = intel_iov_provisioning_get_sample_period(iov); + + return sysfs_emit(buf, "%u\n", value); +} + +static ssize_t sample_period_ms_iov_attr_store(struct intel_iov *iov, + unsigned int id, + const char *buf, size_t count) +{ + u32 value; + int err; + + err = kstrtou32(buf, 0, &value); + if (err) + return err; + + err = intel_iov_provisioning_set_sample_period(iov, value); + return err ?: count; +} + +IOV_ATTR_RO(ggtt_free); +IOV_ATTR_RO(ggtt_max_quota); +IOV_ATTR_RO(contexts_free); +IOV_ATTR_RO(contexts_max_quota); +IOV_ATTR_RO(doorbells_free); +IOV_ATTR_RO(doorbells_max_quota); + +IOV_ATTR(sched_if_idle); +IOV_ATTR(engine_reset); +IOV_ATTR(sample_period_ms); + +static struct attribute *pf_attrs[] = { + NULL +}; + +static const struct attribute_group pf_attr_group = { + .attrs = pf_attrs, +}; + +static struct attribute *pf_available_attrs[] = { + &ggtt_free_iov_attr.attr, + &ggtt_max_quota_iov_attr.attr, + &contexts_free_iov_attr.attr, + &contexts_max_quota_iov_attr.attr, + &doorbells_free_iov_attr.attr, + &doorbells_max_quota_iov_attr.attr, + NULL +}; + +static const struct attribute_group pf_available_attr_group = { + .name = "available", + .attrs = pf_available_attrs, +}; + +static struct attribute *pf_policies_attrs[] = { + &sched_if_idle_iov_attr.attr, + &engine_reset_iov_attr.attr, + &sample_period_ms_iov_attr.attr, + NULL +}; + +static const struct attribute_group pf_policies_attr_group = { + .name = "policies", + .attrs = pf_policies_attrs, +}; + +static const struct attribute_group *pf_attr_groups[] = { + &pf_attr_group, + &pf_available_attr_group, + &pf_policies_attr_group, + NULL +}; + +/* VFs only attributes */ + +static ssize_t ggtt_quota_iov_attr_show(struct intel_iov *iov, + unsigned int id, char *buf) +{ + u64 size = intel_iov_provisioning_get_ggtt(iov, id); + + return sysfs_emit(buf, "%llu\n", size); +} + +static ssize_t ggtt_quota_iov_attr_store(struct intel_iov *iov, + unsigned int id, + const char *buf, size_t count) +{ + u64 size; + int err; + + err = kstrtou64(buf, 0, &size); + if (err) + return err; + + err = intel_iov_provisioning_set_ggtt(iov, id, size); + return err ?: count; +} + +IOV_ATTR(ggtt_quota); + +static struct attribute *vf_attrs[] = { + &ggtt_quota_iov_attr.attr, + NULL +}; + +#define __iov_threshold_to_attr_impl(N, K) \ +static ssize_t N##_iov_attr_show(struct intel_iov *iov, unsigned int id, char *buf) \ +{ \ + u32 value = intel_iov_provisioning_get_threshold(iov, id, IOV_THRESHOLD_##K); \ + \ + return sysfs_emit(buf, "%u\n", value); \ +} \ + \ +static ssize_t N##_iov_attr_store(struct intel_iov *iov, unsigned int id, \ + const char *buf, size_t count) \ +{ \ + u32 value; \ + int err; \ + \ + err = kstrtou32(buf, 0, &value); \ + if (err) \ + return err; \ + \ + err = intel_iov_provisioning_set_threshold(iov, id, IOV_THRESHOLD_##K, value); \ + return err ?: count; \ +} \ + \ +IOV_ATTR(N); + +IOV_THRESHOLDS(__iov_threshold_to_attr_impl) +#undef __iov_threshold_to_attr_impl + +static struct attribute *vf_threshold_attrs[] = { +#define __iov_threshold_to_attr_list(N, K) \ + &N##_iov_attr.attr, + IOV_THRESHOLDS(__iov_threshold_to_attr_list) +#undef __iov_threshold_to_attr_list + NULL +}; + +static ssize_t bin_attr_state_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, + loff_t off, size_t count) +{ + struct intel_iov *iov = kobj_to_iov(kobj); + unsigned int id = kobj_to_id(kobj); + int err; + + if (off > 0 || count < SZ_4K) + return -EINVAL; + + err = intel_iov_state_save_vf(iov, id, buf); + if (unlikely(err)) + return err; + + return SZ_4K; +} + +static ssize_t bin_attr_state_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, + loff_t off, size_t count) +{ + struct intel_iov *iov = kobj_to_iov(kobj); + unsigned int id = kobj_to_id(kobj); + int err; + + if (off > 0 || count < SZ_4K) + return -EINVAL; + + err = intel_iov_state_restore_vf(iov, id, buf); + if (unlikely(err)) + return err; + + return count; +} + +static BIN_ATTR(state, 0600, bin_attr_state_read, bin_attr_state_write, SZ_4K); + +static struct bin_attribute *vf_bin_attrs[] = { + &bin_attr_state, + NULL +}; + +static const struct attribute_group vf_attr_group = { + .attrs = vf_attrs, + .bin_attrs = vf_bin_attrs, +}; + +static const struct attribute_group vf_threshold_attr_group = { + .name = "threshold", + .attrs = vf_threshold_attrs, +}; + +static const struct attribute_group *vf_attr_groups[] = { + &vf_attr_group, + &vf_threshold_attr_group, + NULL +}; + +static const struct attribute_group **iov_attr_groups(unsigned int id) +{ + return id ? vf_attr_groups : pf_attr_groups; +} + +/* no user serviceable parts below */ + +static ssize_t iov_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct iov_attr *iov_attr = to_iov_attr(attr); + struct intel_iov *iov = kobj_to_iov(kobj); + unsigned int id = kobj_to_id(kobj); + + return iov_attr->show ? iov_attr->show(iov, id, buf) : -EIO; +} + +static ssize_t iov_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t count) +{ + struct iov_attr *iov_attr = to_iov_attr(attr); + struct intel_iov *iov = kobj_to_iov(kobj); + unsigned int id = kobj_to_id(kobj); + + return iov_attr->store ? iov_attr->store(iov, id, buf, count) : -EIO; +} + +static const struct sysfs_ops iov_sysfs_ops = { + .show = iov_attr_show, + .store = iov_attr_store, +}; + +static struct kobject *iov_kobj_alloc(struct intel_iov *iov) +{ + struct iov_kobj *iov_kobj; + + iov_kobj = kzalloc(sizeof(*iov_kobj), GFP_KERNEL); + if (!iov_kobj) + return NULL; + + iov_kobj->iov = iov; + + return &iov_kobj->base; +} + +static void iov_kobj_release(struct kobject *kobj) +{ + struct iov_kobj *iov_kobj = to_iov_kobj(kobj); + + kfree(iov_kobj); +} + +static struct kobj_type iov_ktype = { + .release = iov_kobj_release, + .sysfs_ops = &iov_sysfs_ops, + .default_groups = default_iov_attr_groups, +}; + +static int pf_setup_provisioning(struct intel_iov *iov) +{ + struct i915_sriov_ext_kobj **parents = iov_to_i915(iov)->sriov.pf.sysfs.kobjs; + struct kobject **kobjs; + struct kobject *kobj; + unsigned int count = 1 + pf_get_totalvfs(iov); + unsigned int n; + int err; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + if (!parents) { + err = -ENODEV; + goto failed; + } + + err = i915_inject_probe_error(iov_to_i915(iov), -ENOMEM); + if (unlikely(err)) + goto failed; + + kobjs = kcalloc(count, sizeof(*kobjs), GFP_KERNEL); + if (unlikely(!kobjs)) { + err = -ENOMEM; + goto failed; + } + + for (n = 0; n < count; n++) { + struct kobject *parent; + + err = i915_inject_probe_error(iov_to_i915(iov), -ENOMEM); + if (unlikely(err)) { + kobj = NULL; + goto failed_kobj_n; + } + + kobj = iov_kobj_alloc(iov); + if (unlikely(!kobj)) { + err = -ENOMEM; + goto failed_kobj_n; + } + + parent = &parents[n]->base; + + err = kobject_init_and_add(kobj, &iov_ktype, parent, IOV_KOBJ_GT_NAME); + if (unlikely(err)) + goto failed_kobj_n; + + err = i915_inject_probe_error(iov_to_i915(iov), -EEXIST); + if (unlikely(err)) + goto failed_kobj_n; + + err = sysfs_create_groups(kobj, iov_attr_groups(n)); + if (unlikely(err)) + goto failed_kobj_n; + + kobjs[n] = kobj; + } + + GEM_BUG_ON(iov->pf.sysfs.entries); + iov->pf.sysfs.entries = kobjs; + return 0; + +failed_kobj_n: + if (kobj) + kobject_put(kobj); + while (n--) { + sysfs_remove_groups(kobjs[n], iov_attr_groups(n)); + kobject_put(kobjs[n]); + } + kfree(kobjs); +failed: + return err; +} + +static void pf_teardown_provisioning(struct intel_iov *iov) +{ + struct kobject **kobjs; + unsigned int count = 1 + pf_get_totalvfs(iov); + unsigned int n; + + kobjs = fetch_and_zero(&iov->pf.sysfs.entries); + if (!kobjs) + return; + + for (n = 0; n < count; n++) { + sysfs_remove_groups(kobjs[n], iov_attr_groups(n)); + kobject_put(kobjs[n]); + } + + kfree(kobjs); +} + +/** + * intel_iov_sysfs_setup - Setup GT IOV sysfs. + * @iov: the IOV struct + * + * Setup GT IOV provisioning sysfs. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_sysfs_setup(struct intel_iov *iov) +{ + int err; + + if (!intel_iov_is_pf(iov)) + return 0; + + err = pf_setup_provisioning(iov); + if (unlikely(err)) + goto failed; + + return 0; + +failed: + IOV_PROBE_ERROR(iov, "Failed to setup sysfs (%pe)\n", ERR_PTR(err)); + return err; +} + +/** + * intel_iov_sysfs_teardown - Cleanup GT IOV sysfs. + * @iov: the IOV struct + * + * Remove GT IOV provisioning sysfs. + */ +void intel_iov_sysfs_teardown(struct intel_iov *iov) +{ + if (!intel_iov_is_pf(iov)) + return; + + pf_teardown_provisioning(iov); +} diff --git a/drivers/gpu/drm/i915/gt/iov/intel_iov_sysfs.h b/drivers/gpu/drm/i915/gt/iov/intel_iov_sysfs.h new file mode 100644 index 0000000000000..8aa0661313535 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/intel_iov_sysfs.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_IOV_SYSFS_H__ +#define __INTEL_IOV_SYSFS_H__ + +struct intel_iov; + +int intel_iov_sysfs_setup(struct intel_iov *iov); +void intel_iov_sysfs_teardown(struct intel_iov *iov); + +#endif /* __INTEL_IOV_SYSFS_H__ */ diff --git a/drivers/gpu/drm/i915/gt/iov/intel_iov_types.h b/drivers/gpu/drm/i915/gt/iov/intel_iov_types.h new file mode 100644 index 0000000000000..8eb06056b6685 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/intel_iov_types.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_IOV_TYPES_H__ +#define __INTEL_IOV_TYPES_H__ + +#include +#include +#include +#include "i915_reg.h" +#include "intel_wakeref.h" + +/* threshold(name, klv key) */ +#define IOV_THRESHOLDS(threshold) \ + threshold(cat_error_count, CAT_ERR) \ + threshold(engine_reset_count, ENGINE_RESET) \ + threshold(page_fault_count, PAGE_FAULT) \ + threshold(h2g_time_us, H2G_STORM) \ + threshold(irq_time_us, IRQ_STORM) \ + threshold(doorbell_time_us, DOORBELL_STORM) \ + /*end*/ + +enum intel_iov_threshold { +#define __to_intel_iov_threshold_enum(N, K) IOV_THRESHOLD_##K, +IOV_THRESHOLDS(__to_intel_iov_threshold_enum) +#undef __to_intel_iov_threshold_enum +}; + +#define __count_iov_thresholds(N, K) + 1 +#define IOV_THRESHOLD_MAX IOV_THRESHOLDS(__count_iov_thresholds) + +/** + * struct intel_iov_config - IOV configuration data. + * @ggtt_region: GGTT region. + * @num_ctxs: number of GuC submission contexts. + * @begin_ctx: start index of GuC contexts. + * @num_dbs: number of GuC doorbells. + * @begin_db: start index of GuC doorbells. + * @exec_quantum: execution-quantum in milliseconds. + * @preempt_timeout: preemption timeout in microseconds. + */ +struct intel_iov_config { + struct drm_mm_node ggtt_region; + u16 num_ctxs; + u16 begin_ctx; + u16 num_dbs; + u16 begin_db; + u32 exec_quantum; + u32 preempt_timeout; + u32 thresholds[IOV_THRESHOLD_MAX]; +}; + +/** + * struct intel_iov_sysfs - IOV sysfs data. + * @entries: array with kobjects that represent PF and VFs. + */ +struct intel_iov_sysfs { + struct kobject **entries; +}; + +/** + * struct intel_iov_policies - IOV policies. + * @sched_if_idle: controls strict scheduling. + * @reset_engine: controls engines reset on VF switch. + * @sample_period: sample period of adverse events in milliseconds. + */ +struct intel_iov_policies { + bool sched_if_idle; + bool reset_engine; + u32 sample_period; +}; + +/** + * struct intel_iov_provisioning - IOV provisioning data. + * @auto_mode: indicates manual or automatic provisioning mode. + * @policies: provisioning policies. + * @configs: flexible array with configuration data for PF and VFs. + */ +struct intel_iov_provisioning { + bool auto_mode; + unsigned int num_pushed; + struct work_struct worker; + struct intel_iov_policies policies; + struct intel_iov_config *configs; +}; + +#define VFID(n) (n) +#define PFID VFID(0) + +/** + * struct intel_iov_runtime_regs - Register runtime info shared with VFs. + * @size: size of the regs and value arrays. + * @regs: pointer to static array with register offsets. + * @values: pointer to array with captured register values. + */ +struct intel_iov_runtime_regs { + u32 size; + const i915_reg_t *regs; + u32 *values; +}; + +/** + * struct intel_iov_service - Placeholder for service data shared with VFs. + * @runtime: register runtime info shared with VFs. + */ +struct intel_iov_service { + struct intel_iov_runtime_regs runtime; +}; + +/** + * struct intel_iov_vf_runtime - Placeholder for the VF runtime data. + * @regs_size: size of runtime register array. + * @regs: pointer to array of register offset/value pairs. + */ +struct intel_iov_vf_runtime { + u32 regs_size; + struct vf_runtime_reg { + u32 offset; + u32 value; + } *regs; +}; + +/** + * struct intel_iov_relay - IOV Relay Communication data. + * @lock: protects #pending_relays and #last_fence. + * @pending_relays: list of relay requests that await a response. + * @last_fence: fence used with last message. + */ +struct intel_iov_relay { + spinlock_t lock; + struct list_head pending_relays; + u32 last_fence; +}; + +/** + * struct intel_iov_vf_config - VF configuration data. + * @ggtt_base: base of GGTT region. + * @ggtt_size: size of GGTT region. + * @num_ctxs: number of GuC submission contexts. + * @num_dbs: number of GuC doorbells. + */ +struct intel_iov_vf_config { + u64 ggtt_base; + u64 ggtt_size; + u16 num_ctxs; + u16 num_dbs; +}; + +/** + * struct intel_iov - I/O Virtualization related data. + * @pf.sysfs: sysfs data. + * @pf.provisioning: provisioning data. + * @pf.service: placeholder for service data. + * @vf.config: configuration of the resources assigned to VF. + * @vf.runtime: retrieved runtime info. + * @relay: data related to VF/PF communication based on GuC Relay messages. + */ +struct intel_iov { + union { + struct { + struct intel_iov_sysfs sysfs; + struct intel_iov_provisioning provisioning; + struct intel_iov_service service; + } pf; + + struct { + struct intel_iov_vf_config config; + struct intel_iov_vf_runtime runtime; + struct drm_mm_node ggtt_balloon[2]; + } vf; + }; + + struct intel_iov_relay relay; +}; + +#endif /* __INTEL_IOV_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/gt/iov/intel_iov_utils.h b/drivers/gpu/drm/i915/gt/iov/intel_iov_utils.h new file mode 100644 index 0000000000000..37d78ee24b000 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/intel_iov_utils.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_IOV_UTILS_H__ +#define __INTEL_IOV_UTILS_H__ + +#include "i915_drv.h" + +static inline struct intel_gt *iov_to_gt(struct intel_iov *iov) +{ + return container_of(iov, struct intel_gt, iov); +} + +static inline struct intel_guc *iov_to_guc(struct intel_iov *iov) +{ + return &iov_to_gt(iov)->uc.guc; +} + +static inline struct drm_i915_private *iov_to_i915(struct intel_iov *iov) +{ + return iov_to_gt(iov)->i915; +} + +static inline struct device *iov_to_dev(struct intel_iov *iov) +{ + return iov_to_i915(iov)->drm.dev; +} + +static inline bool intel_iov_is_pf(struct intel_iov *iov) +{ + return IS_SRIOV_PF(iov_to_i915(iov)); +} + +static inline bool intel_iov_is_vf(struct intel_iov *iov) +{ + return IS_SRIOV_VF(iov_to_i915(iov)); +} + +static inline bool intel_iov_is_enabled(struct intel_iov *iov) +{ + return intel_iov_is_pf(iov) || intel_iov_is_vf(iov); +} + +static inline u16 pf_get_totalvfs(struct intel_iov *iov) +{ + return i915_sriov_pf_get_totalvfs(iov_to_i915(iov)); +} + +static inline u16 pf_get_numvfs(struct intel_iov *iov) +{ + return pci_num_vf(to_pci_dev(iov_to_dev(iov))); +} + +static inline bool pf_in_error(struct intel_iov *iov) +{ + return i915_sriov_pf_aborted(iov_to_i915(iov)); +} + +static inline int pf_get_status(struct intel_iov *iov) +{ + return i915_sriov_pf_status(iov_to_i915(iov)); +} + +#define IOV_ERROR(_iov, _fmt, ...) \ + drm_notice(&iov_to_i915(_iov)->drm, "IOV: " _fmt, ##__VA_ARGS__) +#define IOV_PROBE_ERROR(_iov, _fmt, ...) \ + i915_probe_error(iov_to_i915(_iov), "IOV: " _fmt, ##__VA_ARGS__) + +#ifdef CONFIG_DRM_I915_DEBUG_IOV +#define IOV_DEBUG(_iov, _fmt, ...) \ + drm_dbg(&iov_to_i915(_iov)->drm, "IOV: " _fmt, ##__VA_ARGS__) +#else +#define IOV_DEBUG(_iov, _fmt, ...) typecheck(struct intel_iov *, _iov) +#endif + +static inline void pf_update_status(struct intel_iov *iov, int status, const char *reason) +{ + GEM_BUG_ON(status >= 0); + IOV_PROBE_ERROR(iov, "Initialization failed (%pe) %s\n", ERR_PTR(status), reason); + i915_sriov_pf_abort(iov_to_i915(iov), status); +} + +static inline void pf_mark_manual_provisioning(struct intel_iov *iov) +{ + i915_sriov_pf_set_auto_provisioning(iov_to_i915(iov), false); +} + +#endif /* __INTEL_IOV_UTILS_H__ */ diff --git a/drivers/gpu/drm/i915/gt/iov/selftests/iov_live_selftest_ggtt.c b/drivers/gpu/drm/i915/gt/iov/selftests/iov_live_selftest_ggtt.c new file mode 100644 index 0000000000000..446f8bdc778d0 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/selftests/iov_live_selftest_ggtt.c @@ -0,0 +1,601 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright(c) 2021 Intel Corporation. All rights reserved. + */ + +#include "i915_utils.h" +#include "gt/intel_gt.h" +#include "iov_selftest_actions.h" +#include "../abi/iov_actions_selftest_abi.h" +#include "../intel_iov_relay.h" + +struct pte_testcase { + bool (*test)(struct intel_iov *iov, void __iomem *pte_addr, u64 ggtt_addr, gen8_pte_t *out); +}; + +static void gen8_set_masked_pte_val(void __iomem *pte_addr, const u64 mask_size, + const u8 mask_shift, u64 val) +{ + gen8_pte_t old_pte = gen8_get_pte(pte_addr) & ~(mask_size << mask_shift); + gen8_pte_t pte = old_pte | (val << mask_shift); + + gen8_set_pte(pte_addr, pte); +} + +static bool +vf_pte_is_value_not_modifiable(struct intel_iov *iov, void __iomem *pte_addr, u64 ggtt_addr, + const u64 mask_size, const u8 mask_shift, gen8_pte_t *out) +{ + const u64 mask = mask_size << mask_shift; + int err; + u64 new_val; + u64 val; + + err = intel_iov_selftest_send_vfpf_get_ggtt_pte(iov, ggtt_addr, &val); + if (err < 0) + return false; + + val = (val & mask) >> mask_shift; + + new_val = val + 1; + if (new_val > mask_size) + new_val = 0; + + gen8_set_masked_pte_val(pte_addr, mask_size, mask_shift, new_val); + + err = intel_iov_selftest_send_vfpf_get_ggtt_pte(iov, ggtt_addr, &val); + if (err < 0) + return false; + + val = (val & mask) >> mask_shift; + + *out = gen8_get_pte(pte_addr); + + return val != new_val; +} + +static bool pte_not_accessible(struct intel_iov *iov, void __iomem *pte_addr, u64 ggtt_addr, + gen8_pte_t *out) +{ + *out = gen8_get_pte(pte_addr); + + return *out == 0; +} + +static bool +pte_is_value_modifiable(struct intel_iov *iov, void __iomem *pte_addr, u64 ggtt_addr, + const u64 mask, gen8_pte_t *out) +{ + gen8_pte_t original_pte; + bool ret_val = true; + gen8_pte_t read_pte; + gen8_pte_t write_pte; + + original_pte = gen8_get_pte(pte_addr); + + write_pte = original_pte ^ mask; + gen8_set_pte(pte_addr, write_pte); + read_pte = gen8_get_pte(pte_addr); + + *out = read_pte; + + if ((read_pte & mask) != (write_pte & mask)) + ret_val = false; + + gen8_set_pte(pte_addr, original_pte); + + return ret_val; +} + +static bool +pte_gpa_modifiable(struct intel_iov *iov, void __iomem *pte_addr, u64 ggtt_addr, gen8_pte_t *out) +{ + return pte_is_value_modifiable(iov, pte_addr, ggtt_addr, GEN12_GGTT_PTE_ADDR_MASK, out); +} + +static bool +pte_gpa_not_modifiable(struct intel_iov *iov, void __iomem *pte_addr, u64 ggtt_addr, + gen8_pte_t *out) +{ + return !pte_gpa_modifiable(iov, pte_addr, ggtt_addr, out); +} + +static bool +pte_valid_modifiable(struct intel_iov *iov, void __iomem *pte_addr, u64 ggtt_addr, gen8_pte_t *out) +{ + return pte_is_value_modifiable(iov, pte_addr, ggtt_addr, GEN6_PTE_VALID, out); +} + +static bool +pte_valid_not_modifiable(struct intel_iov *iov, void __iomem *pte_addr, u64 ggtt_addr, + gen8_pte_t *out) +{ + const u64 mask = GEN6_PTE_VALID; + bool ret = false; + gen8_pte_t original_pte; + gen8_pte_t read_pte; + gen8_pte_t write_pte; + + original_pte = gen8_get_pte(pte_addr); + + write_pte = original_pte ^ FIELD_MAX(mask); + gen8_set_pte(pte_addr, write_pte); + read_pte = gen8_get_pte(pte_addr); + + *out = read_pte; + + if ((read_pte & mask) == (original_pte & mask)) + ret = true; + + gen8_set_pte(pte_addr, original_pte); + + return ret; +} + +static bool +pte_vfid_modifiable(struct intel_iov *iov, void __iomem *pte_addr, u64 ggtt_addr, gen8_pte_t *out) +{ + return pte_is_value_modifiable(iov, pte_addr, ggtt_addr, TGL_GGTT_PTE_VFID_MASK, out); +} + +static bool +pte_vfid_not_modifiable(struct intel_iov *iov, void __iomem *pte_addr, u64 ggtt_addr, + gen8_pte_t *out) +{ + return !pte_vfid_modifiable(iov, pte_addr, ggtt_addr, out); +} + +static bool +pte_vfid_not_readable(struct intel_iov *iov, void __iomem *pte_addr, u64 ggtt_addr, u64 *out) +{ + *out = gen8_get_pte(pte_addr); + + return u64_get_bits(*out, TGL_GGTT_PTE_VFID_MASK) == 0; +} + +static bool +pte_gpa_not_modifiable_check_via_pf(struct intel_iov *iov, void __iomem *pte_addr, u64 ggtt_addr, + gen8_pte_t *out) +{ + const u64 mask = GEN12_GGTT_PTE_ADDR_MASK; + + return vf_pte_is_value_not_modifiable(iov, pte_addr, ggtt_addr, FIELD_MAX(mask), + __bf_shf(mask), out); +} + +static bool +pte_vfid_not_modifiable_check_via_pf(struct intel_iov *iov, void __iomem *pte_addr, u64 ggtt_addr, + gen8_pte_t *out) +{ + const u64 mask = TGL_GGTT_PTE_VFID_MASK; + + return vf_pte_is_value_not_modifiable(iov, pte_addr, ggtt_addr, FIELD_MAX(mask), + __bf_shf(mask), out); +} + +static bool +pte_valid_not_modifiable_check_via_pf(struct intel_iov *iov, void __iomem *pte_addr, u64 ggtt_addr, + gen8_pte_t *out) +{ + const u64 mask = GEN6_PTE_VALID; + + return vf_pte_is_value_not_modifiable(iov, pte_addr, ggtt_addr, FIELD_MAX(mask), + __bf_shf(mask), out); +} + +static bool run_test_on_pte(struct intel_iov *iov, void __iomem *pte_addr, u64 ggtt_addr, + const struct pte_testcase *tc, u16 vfid) +{ + gen8_pte_t read_val; + + if (!tc->test(iov, pte_addr, ggtt_addr, &read_val)) { + IOV_ERROR(iov, "%ps.%u failed at GGTT address %#llx. PTE is: %#llx\n", + tc->test, vfid, ggtt_addr, read_val); + return false; + } + + return true; +} + +#define for_each_pte(pte_addr__, ggtt_addr__, gsm__, ggtt_block__, step__) \ + for ((ggtt_addr__) = ((ggtt_block__)->start), \ + (pte_addr__) = (gsm__) + (ggtt_addr_to_pte_offset((ggtt_addr__))); \ + (ggtt_addr__) < ((ggtt_block__)->start + (ggtt_block__)->size); \ + (ggtt_addr__) += (step__), \ + (pte_addr__) = (gsm__) + (ggtt_addr_to_pte_offset((ggtt_addr__)))) + +static bool +run_test_on_ggtt_block(struct intel_iov *iov, void __iomem *gsm, struct drm_mm_node *ggtt_block, + const struct pte_testcase *tc, u16 vfid, bool sanitycheck) +{ + int mul = 1; + void __iomem *pte_addr; + u64 ggtt_addr; + + GEM_BUG_ON(!IS_ALIGNED(ggtt_block->start, I915_GTT_PAGE_SIZE_4K)); + + for_each_pte(pte_addr, ggtt_addr, gsm, ggtt_block, I915_GTT_PAGE_SIZE_4K * mul) { + if (!run_test_on_pte(iov, pte_addr, ggtt_addr, tc, vfid)) + return false; + cond_resched(); + + /* + * Sanity check is done during driver probe, so we want to do it quickly. + * Therefore, we'll check only some entries that are a multiple of 2. + */ + if (sanitycheck) + mul *= 2; + } + + /* + * During sanity check we want to check the last PTE in the range. To be sure, + * we will perform this test explicitly outside the main checking loop. + */ + if (sanitycheck) { + ggtt_addr = ggtt_block->start + ggtt_block->size - I915_GTT_PAGE_SIZE_4K; + pte_addr = gsm + ggtt_addr_to_pte_offset(ggtt_addr); + if (!run_test_on_pte(iov, pte_addr, ggtt_addr, tc, vfid)) + return false; + } + + return true; +} + +#define for_each_pte_test(tc__, testcases__) \ + for ((tc__) = (testcases__); (tc__)->test; (tc__)++) + +/* + * We want to check state of GGTT entries of VF's. + * PF has the right to modify the GGTT PTE in the whole range, + * so any problem in writing an entry will be reported as an error + */ +static int igt_pf_iov_ggtt(struct intel_iov *iov) +{ + const u64 size_ggtt_block = SZ_2M; + struct i915_ggtt *ggtt = iov_to_gt(iov)->ggtt; + struct drm_mm_node ggtt_block = {}; + static struct pte_testcase pte_testcases[] = { + { pte_gpa_modifiable }, + { pte_vfid_modifiable }, + { pte_valid_modifiable }, + { }, + }; + int failed = 0; + int err; + u16 vfid; + struct pte_testcase *tc; + + BUILD_BUG_ON(!IS_ALIGNED(size_ggtt_block, I915_GTT_PAGE_SIZE_4K)); + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + mutex_lock(&ggtt->vm.mutex); + err = i915_gem_gtt_insert(&ggtt->vm, &ggtt_block, size_ggtt_block, 0, + I915_COLOR_UNEVICTABLE, 0, U64_MAX, PIN_HIGH); + mutex_unlock(&ggtt->vm.mutex); + + if (err < 0) + goto out; + + for (vfid = 1; vfid <= pf_get_totalvfs(iov); vfid++) { + IOV_DEBUG(iov, "Checking VF%u range [%#llx-%#llx]", vfid, ggtt_block.start, + ggtt_block.start + ggtt_block.size); + i915_ggtt_set_space_owner(ggtt, vfid, &ggtt_block); + for_each_pte_test(tc, pte_testcases) { + IOV_DEBUG(iov, "Run '%ps' check\n", tc->test); + if (!run_test_on_ggtt_block(iov, ggtt->gsm, &ggtt_block, tc, vfid, false)) + failed++; + } + + i915_ggtt_set_space_owner(ggtt, 0, &ggtt_block); + } + + drm_mm_remove_node(&ggtt_block); + + if (failed) + IOV_ERROR(iov, "%s: Count of failed test cases: %d", __func__, failed); + + return failed ? -EPERM : 0; +out: + return err; +} + +static int igt_pf_ggtt(void *arg) +{ + struct drm_i915_private *i915 = arg; + + GEM_BUG_ON(!IS_SRIOV_PF(i915)); + + return igt_pf_iov_ggtt(&to_gt(i915)->iov); +} + +static int igt_vf_iov_own_ggtt(struct intel_iov *iov, bool sanitycheck) +{ + gen8_pte_t __iomem *gsm = iov_to_gt(iov)->ggtt->gsm; + static struct pte_testcase pte_testcases[] = { + { pte_gpa_modifiable }, + { pte_vfid_not_readable }, + { pte_vfid_not_modifiable }, + { pte_valid_not_modifiable }, + { }, + }; + int failed = 0; + struct drm_mm_node ggtt_block; + struct pte_testcase *tc; + + GEM_BUG_ON(!intel_iov_is_vf(iov)); + + ggtt_block.start = iov->vf.config.ggtt_base; + ggtt_block.size = iov->vf.config.ggtt_size; + + GEM_BUG_ON(!IS_ALIGNED(ggtt_block.start, I915_GTT_PAGE_SIZE_4K)); + GEM_BUG_ON(!IS_ALIGNED(ggtt_block.size, I915_GTT_PAGE_SIZE_4K)); + + IOV_DEBUG(iov, "Subtest %s, gsm: %#llx base: %#llx size: %#llx\n", + __func__, ptr_to_u64(gsm), ggtt_block.start, ggtt_block.size); + + for_each_pte_test(tc, pte_testcases) { + IOV_DEBUG(iov, "Run '%ps' check\n", tc->test); + if (!run_test_on_ggtt_block(iov, gsm, &ggtt_block, tc, 0, sanitycheck)) + failed++; + } + + if (failed) + IOV_ERROR(iov, "%s: Count of failed test cases: %d", __func__, failed); + + return failed ? -EPERM : 0; +} + +static int igt_vf_own_ggtt(void *arg) +{ + struct drm_i915_private *i915 = arg; + + GEM_BUG_ON(!IS_SRIOV_VF(i915)); + + return igt_vf_iov_own_ggtt(&to_gt(i915)->iov, false); +} + +static int igt_vf_iov_own_ggtt_via_pf(struct intel_iov *iov) +{ + const u64 size_ggtt_block = SZ_64K; + struct drm_mm_node ggtt_block = {}; + struct i915_ggtt *ggtt = iov_to_gt(iov)->ggtt; + gen8_pte_t __iomem *gsm = ggtt->gsm; + static struct pte_testcase pte_testcases[] = { + { pte_vfid_not_modifiable_check_via_pf }, + { pte_valid_not_modifiable_check_via_pf }, + { }, + }; + int failed = 0, err; + struct pte_testcase *tc; + + BUILD_BUG_ON(!IS_ALIGNED(size_ggtt_block, I915_GTT_PAGE_SIZE_4K)); + GEM_BUG_ON(!intel_iov_is_vf(iov)); + + mutex_lock(&ggtt->vm.mutex); + err = i915_gem_gtt_insert(&ggtt->vm, &ggtt_block, size_ggtt_block, + 0, I915_COLOR_UNEVICTABLE, 0, U64_MAX, + PIN_HIGH); + mutex_unlock(&ggtt->vm.mutex); + if (err < 0) + goto out; + + IOV_DEBUG(iov, "Subtest %s, gsm: %#llx base: %#llx size: %#llx\n", + __func__, ptr_to_u64(gsm), ggtt_block.start, ggtt_block.size); + + for_each_pte_test(tc, pte_testcases) { + IOV_DEBUG(iov, "Run '%ps' check \n", tc->test); + if (!run_test_on_ggtt_block(iov, gsm, &ggtt_block, tc, 0, false)) + failed++; + } + + drm_mm_remove_node(&ggtt_block); + + if (failed) + IOV_ERROR(iov, "%s: Count of failed test cases: %d", __func__, failed); + + return failed ? -EPERM : 0; +out: + return err; +} + +static int igt_vf_own_ggtt_via_pf(void *arg) +{ + struct drm_i915_private *i915 = arg; + + GEM_BUG_ON(!IS_SRIOV_VF(i915)); + + return igt_vf_iov_own_ggtt_via_pf(&to_gt(i915)->iov); +} + +static int +_test_other_ggtt_region(struct intel_iov *iov, gen8_pte_t __iomem *gsm, + struct drm_mm_node *ggtt_region) +{ + static struct pte_testcase pte_testcases[] = { + { pte_not_accessible }, + { pte_gpa_not_modifiable }, + { pte_vfid_not_modifiable }, + { pte_valid_not_modifiable }, + { }, + }; + int failed = 0; + struct pte_testcase *tc; + + IOV_DEBUG(iov, "Subtest %s, gsm: %#llx base: %#llx size: %#llx\n", + __func__, ptr_to_u64(gsm), ggtt_region->start, + ggtt_region->size); + + for_each_pte_test(tc, pte_testcases) { + IOV_DEBUG(iov, "Run '%ps' check\n", tc->test); + if (!run_test_on_ggtt_block(iov, gsm, ggtt_region, tc, 0, false)) + failed++; + } + + return failed ? -EPERM : 0; +} + +static int +_test_other_ggtt_region_via_pf(struct intel_iov *iov, gen8_pte_t __iomem *gsm, + struct drm_mm_node *ggtt_region) +{ + static struct pte_testcase pte_testcases[] = { + { pte_gpa_not_modifiable_check_via_pf }, + { pte_vfid_not_modifiable_check_via_pf }, + { pte_valid_not_modifiable_check_via_pf }, + { }, + }; + int failed = 0; + struct pte_testcase *tc; + + IOV_DEBUG(iov, "Subtest %s, gsm: %#llx base: %#llx size: %#llx\n", + __func__, ptr_to_u64(gsm), ggtt_region->start, + ggtt_region->size); + + for_each_pte_test(tc, pte_testcases) { + IOV_DEBUG(iov, "Run '%ps' check\n", tc->test); + if (!run_test_on_ggtt_block(iov, gsm, ggtt_region, tc, 0, false)) + failed++; + } + + return failed ? -EPERM : 0; +} + +static int +test_other_ggtt_region(struct intel_iov *iov, gen8_pte_t __iomem *gsm, + struct drm_mm_node *ggtt_region, bool check_via_pf) +{ + return check_via_pf ? + _test_other_ggtt_region_via_pf(iov, gsm, ggtt_region) : + _test_other_ggtt_region(iov, gsm, ggtt_region); +} + +static void *map_gsm(struct intel_gt *gt, u64 ggtt_size) +{ + struct pci_dev *pdev = gt->i915->drm.pdev; + struct device *dev = gt->i915->drm.dev; + u64 gsm_ggtt_size = (ggtt_size / I915_GTT_PAGE_SIZE_4K) * + sizeof(gen8_pte_t); + phys_addr_t phys_addr; + u32 gttaddr; + void *gsm; + + /* + * Since GEN8 GTTADDR starts at 8MB offset + */ + gttaddr = SZ_8M; + phys_addr = pci_resource_start(pdev, 0) + gttaddr; + + gsm = ioremap(phys_addr, gsm_ggtt_size); + if (!gsm) { + dev_err(dev, "Failed to map the GGTT page table\n"); + return ERR_PTR(-ENOMEM); + } + + return gsm; +} + +static int igt_vf_iov_other_ggtt(struct intel_iov *iov, bool check_via_pf) +{ + u64 offset_vf = iov->vf.config.ggtt_base; + u64 size_vf = iov->vf.config.ggtt_size; + int failed = 0; + gen8_pte_t __iomem *gsm; + struct drm_mm_node test_region; + + GEM_BUG_ON(!IS_ALIGNED(offset_vf, I915_GTT_PAGE_SIZE_4K)); + GEM_BUG_ON(!IS_ALIGNED(size_vf, I915_GTT_PAGE_SIZE_4K)); + + /* + * We want to test GGTT block not assigned to current VF. + * There are two regions which we can test: + * - before current VF range, + * - after current VF range. + * + * |<---------------- Total GGTT size -------------->| + * + * +-------------------------------------------------+ + * | WOPCM | available for PF and VFs | GUC_TOP | + * +-----------------+---------------+---------------+ + * |//// before /////| current VF |//// after ////| + * +-----------------+---------------+---------------+ + * + * |<-- offset_vf -->|<-- size_vf -->| + * + * The current implementation of driver allows test at least + * one page of GGTT before and after VF's GGTT range. + * + * +------------------+------------+-----------------+ + * | before GGTT page | current VF | after GGTT page | + * +------------------+------------+-----------------+ + * + * |<-- 4K -->| |<-- 4K -->| + * + * We will run two tests in which we will check these two areas before + * and after VF, called as "other regions". + * Before the tests, we must additionally map the GGTT in the size + * corresponding to the last GGTT address used in the test. + */ + gsm = map_gsm(iov_to_gt(iov), offset_vf + size_vf + + I915_GTT_PAGE_SIZE_4K); + if (IS_ERR(gsm)) + return PTR_ERR(gsm); + + test_region.size = I915_GTT_PAGE_SIZE_4K; + + test_region.start = offset_vf - I915_GTT_PAGE_SIZE_4K; + + if (test_other_ggtt_region(iov, gsm, &test_region, check_via_pf) < 0) + failed++; + + test_region.start = offset_vf + size_vf; + if (test_other_ggtt_region(iov, gsm, &test_region, check_via_pf) < 0) + failed++; + + iounmap(gsm); + + return failed ? -EPERM : 0; +} + +static int igt_vf_other_ggtt(void *arg) +{ + struct drm_i915_private *i915 = arg; + + GEM_BUG_ON(!IS_SRIOV_VF(i915)); + + return igt_vf_iov_other_ggtt(&to_gt(i915)->iov, false); +} + +static int igt_vf_other_ggtt_via_pf(void *arg) +{ + struct drm_i915_private *i915 = arg; + + GEM_BUG_ON(!IS_SRIOV_VF(i915)); + + return igt_vf_iov_other_ggtt(&to_gt(i915)->iov, true); +} + +int intel_iov_ggtt_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest pf_tests[] = { + SUBTEST(igt_pf_ggtt), + }; + static const struct i915_subtest vf_tests[] = { + SUBTEST(igt_vf_own_ggtt), + SUBTEST(igt_vf_own_ggtt_via_pf), + SUBTEST(igt_vf_other_ggtt), + SUBTEST(igt_vf_other_ggtt_via_pf), + }; + intel_wakeref_t wakeref; + int ret = 0; + + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + + if (IS_SRIOV_PF(i915)) + ret = i915_subtests(pf_tests, i915); + else if (IS_SRIOV_VF(i915)) + ret = i915_subtests(vf_tests, i915); + + intel_runtime_pm_put(&i915->runtime_pm, wakeref); + + return ret; +} diff --git a/drivers/gpu/drm/i915/gt/iov/selftests/iov_selftest_actions.c b/drivers/gpu/drm/i915/gt/iov/selftests/iov_selftest_actions.c new file mode 100644 index 0000000000000..abda45ec9447e --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/selftests/iov_selftest_actions.c @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include "gt/iov/abi/iov_actions_selftest_abi.h" +#include "gt/iov/intel_iov_utils.h" +#include "gt/iov/intel_iov_relay.h" +#include "iov_selftest_actions.h" + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +/** + * intel_iov_selftest_send_vfpf_get_ggtt_pte - Get the PTE value from PF. + * @iov: the IOV struct + * @ggtt_addr: GGTT address + * @pte: pointer to PTE value + * + * The function will get the PTE value from PF using VFPF debug communication. + * + * This function can only be called on VF. + * + * Return: 0 on success or a negative error code on failure. + */ +int intel_iov_selftest_send_vfpf_get_ggtt_pte(struct intel_iov *iov, u64 ggtt_addr, u64 *pte) +{ + u32 request[VF2PF_ST_GET_GGTT_PTE_REQUEST_MSG_LEN]; + u32 response[VF2PF_ST_GET_GGTT_PTE_RESPONSE_MSG_LEN]; + u32 pte_lo, pte_hi; + int err; + + GEM_BUG_ON(!intel_iov_is_vf(iov)); + + request[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | + FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, IOV_ACTION_VF2PF_PF_ST_ACTION) | + FIELD_PREP(VF2PF_PF_ST_ACTION_REQUEST_MSG_0_OPCODE, + IOV_OPCODE_ST_GET_GGTT_PTE); + request[1] = FIELD_PREP(VF2PF_ST_GET_GGTT_PTE_REQUEST_MSG_1_ADDRESS_LO, + lower_32_bits(ggtt_addr)); + request[2] = FIELD_PREP(VF2PF_ST_GET_GGTT_PTE_REQUEST_MSG_2_ADDRESS_HI, + upper_32_bits(ggtt_addr)); + + err = intel_iov_relay_send_to_pf(&iov->relay, + request, ARRAY_SIZE(request), + response, ARRAY_SIZE(response)); + + if (err < 0) { + IOV_ERROR(iov, "ST: failed to get PTE value for %#llx, %d\n", ggtt_addr, err); + return err; + } + + pte_lo = FIELD_GET(VF2PF_ST_GET_GGTT_PTE_RESPONSE_MSG_1_PTE_LO, response[1]); + pte_hi = FIELD_GET(VF2PF_ST_GET_GGTT_PTE_RESPONSE_MSG_2_PTE_HI, response[2]); + + *pte = make_u64(pte_hi, pte_lo); + + return err; +} + +static int pf_handle_action_get_ggtt_pte(struct intel_iov *iov, u32 origin, u32 relay_id, + const u32 *msg, u32 len) +{ + u32 response[VF2PF_ST_GET_GGTT_PTE_RESPONSE_MSG_LEN]; + u32 addr_lo, addr_hi; + u64 ggtt_addr; + void __iomem *pte_addr; + gen8_pte_t pte; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + if (unlikely(len != VF2PF_ST_GET_GGTT_PTE_REQUEST_MSG_LEN)) + return -EPROTO; + + addr_lo = FIELD_GET(VF2PF_ST_GET_GGTT_PTE_REQUEST_MSG_1_ADDRESS_LO, msg[1]); + addr_hi = FIELD_GET(VF2PF_ST_GET_GGTT_PTE_REQUEST_MSG_2_ADDRESS_HI, msg[2]); + + ggtt_addr = make_u64(addr_hi, addr_lo); + + if (!IS_ALIGNED(ggtt_addr, I915_GTT_PAGE_SIZE_4K)) + return -EINVAL; + + pte_addr = iov_to_gt(iov)->ggtt->gsm + ggtt_addr_to_pte_offset(ggtt_addr); + + pte = gen8_get_pte(pte_addr); + + response[0] = FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_RESPONSE_SUCCESS) | + FIELD_PREP(VF2PF_PF_ST_ACTION_RESPONSE_MSG_0_MBZ, 0); + response[1] = FIELD_PREP(VF2PF_ST_GET_GGTT_PTE_RESPONSE_MSG_1_PTE_LO, lower_32_bits(pte)); + response[2] = FIELD_PREP(VF2PF_ST_GET_GGTT_PTE_RESPONSE_MSG_2_PTE_HI, upper_32_bits(pte)); + + return intel_iov_relay_reply_to_vf(&iov->relay, origin, relay_id, response, + ARRAY_SIZE(response)); +} + +int intel_iov_service_perform_selftest_action(struct intel_iov *iov, u32 origin, u32 relay_id, + const u32 *msg, u32 len) +{ + u32 opcode; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + if (unlikely(len < VF2PF_PF_ST_ACTION_REQUEST_MSG_MIN_LEN || + len > VF2PF_PF_ST_ACTION_REQUEST_MSG_MAX_LEN)) + return -EPROTO; + + opcode = FIELD_GET(VF2PF_PF_ST_ACTION_REQUEST_MSG_0_OPCODE, msg[0]); + + switch (opcode) { + case IOV_OPCODE_ST_GET_GGTT_PTE: + return pf_handle_action_get_ggtt_pte(iov, origin, relay_id, msg, len); + default: + IOV_ERROR(iov, "Unsupported selftest opcode %#x from VF%u\n", opcode, origin); + return -EBADRQC; + } + + return intel_iov_relay_reply_ack_to_vf(&iov->relay, origin, relay_id, 0); +} +#endif /* IS_ENABLED(CONFIG_DRM_I915_SELFTEST) */ diff --git a/drivers/gpu/drm/i915/gt/iov/selftests/iov_selftest_actions.h b/drivers/gpu/drm/i915/gt/iov/selftests/iov_selftest_actions.h new file mode 100644 index 0000000000000..29e6512429287 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/iov/selftests/iov_selftest_actions.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef _IOV_SELFTEST_ACTIONS_H_ +#define _IOV_SELFTEST_ACTIONS_H_ + +#include +#include + +struct intel_iov; + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +int intel_iov_service_perform_selftest_action(struct intel_iov *iov, u32 origin, u32 relay_id, + const u32 *msg, u32 len); +int intel_iov_selftest_send_vfpf_get_ggtt_pte(struct intel_iov *iov, u64 ggtt_addr, u64 *pte); +#else +static inline int intel_iov_service_perform_selftest_action(struct intel_iov *iov, u32 origin, + u32 relay_id, const u32 *msg, u32 len) +{ + return -EOPNOTSUPP; +} +#endif /* IS_ENABLED(CONFIG_DRM_I915_SELFTEST) */ + +#endif /* _IOV_SELFTEST_ACTIONS_H_ */ diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_pf_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_pf_abi.h new file mode 100644 index 0000000000000..a6be57bdbf44a --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_pf_abi.h @@ -0,0 +1,500 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __GUC_ACTIONS_PF_ABI_H__ +#define __GUC_ACTIONS_PF_ABI_H__ + +#include "guc_communication_ctb_abi.h" + +/** + * DOC: PF2GUC_UPDATE_VGT_POLICY + * + * This message is optionaly used by the PF to set `GuC VGT Policy KLVs`_. + * + * This message must be sent as `CTB HXG Message`_. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`GUC_ACTION_PF2GUC_UPDATE_VGT_POLICY` = 0x5502 | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | **CFG_ADDR_LO** - dword aligned GGTT offset that | + * | | | represents the start of `GuC VGT Policy KLVs`_ list. | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | **CFG_ADDR_HI** - upper 32 bits of above offset. | + * +---+-------+--------------------------------------------------------------+ + * | 3 | 31:0 | **CFG_SIZE** - size (in dwords) of the config buffer | + * +---+-------+--------------------------------------------------------------+ + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:0 | **COUNT** - number of KLVs successfully applied | + * +---+-------+--------------------------------------------------------------+ + */ +#define GUC_ACTION_PF2GUC_UPDATE_VGT_POLICY 0x5502 + +#define PF2GUC_UPDATE_VGT_POLICY_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 3u) +#define PF2GUC_UPDATE_VGT_POLICY_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0 +#define PF2GUC_UPDATE_VGT_POLICY_REQUEST_MSG_1_CFG_ADDR_LO GUC_HXG_REQUEST_MSG_n_DATAn +#define PF2GUC_UPDATE_VGT_POLICY_REQUEST_MSG_2_CFG_ADDR_HI GUC_HXG_REQUEST_MSG_n_DATAn +#define PF2GUC_UPDATE_VGT_POLICY_REQUEST_MSG_3_CFG_SIZE GUC_HXG_REQUEST_MSG_n_DATAn + +#define PF2GUC_UPDATE_VGT_POLICY_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN +#define PF2GUC_UPDATE_VGT_POLICY_RESPONSE_MSG_0_COUNT GUC_HXG_RESPONSE_MSG_0_DATA0 + +/** + * DOC: PF2GUC_UPDATE_VF_CFG + * + * The `PF2GUC_UPDATE_VF_CFG`_ message is used by PF to provision single VF in GuC. + * + * This message must be sent as `CTB HXG Message`_. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`GUC_ACTION_PF2GUC_UPDATE_VF_CFG` = 0x5503 | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | **VFID** - identifier of the VF that the KLV | + * | | | configurations are being applied to | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | **CFG_ADDR_LO** - dword aligned GGTT offset that represents | + * | | | the start of a list of virtualization related KLV configs | + * | | | that are to be applied to the VF. | + * | | | If this parameter is zero, the list is not parsed. | + * | | | If full configs address parameter is zero and configs_size is| + * | | | zero associated VF config shall be reset to its default state| + * +---+-------+--------------------------------------------------------------+ + * | 3 | 31:0 | **CFG_ADDR_HI** - upper 32 bits of configs address. | + * +---+-------+--------------------------------------------------------------+ + * | 4 | 31:0 | **CFG_SIZE** - size (in dwords) of the config buffer | + * +---+-------+--------------------------------------------------------------+ + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:0 | **COUNT** - number of KLVs successfully applied | + * +---+-------+--------------------------------------------------------------+ + */ +#define GUC_ACTION_PF2GUC_UPDATE_VF_CFG 0x5503 + +#define PF2GUC_UPDATE_VF_CFG_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 4u) +#define PF2GUC_UPDATE_VF_CFG_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0 +#define PF2GUC_UPDATE_VF_CFG_REQUEST_MSG_1_VFID GUC_HXG_REQUEST_MSG_n_DATAn +#define PF2GUC_UPDATE_VF_CFG_REQUEST_MSG_2_CFG_ADDR_LO GUC_HXG_REQUEST_MSG_n_DATAn +#define PF2GUC_UPDATE_VF_CFG_REQUEST_MSG_3_CFG_ADDR_HI GUC_HXG_REQUEST_MSG_n_DATAn +#define PF2GUC_UPDATE_VF_CFG_REQUEST_MSG_4_CFG_SIZE GUC_HXG_REQUEST_MSG_n_DATAn + +#define PF2GUC_UPDATE_VF_CFG_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN +#define PF2GUC_UPDATE_VF_CFG_RESPONSE_MSG_0_COUNT GUC_HXG_RESPONSE_MSG_0_DATA0 + +/** + * DOC: GUC2PF_RELAY_FROM_VF + * + * The `GUC2PF_RELAY_FROM_VF`_ message is used by the GuC to forward VF/PF IOV + * messages received from the VF to the PF. + * + * This H2G message must be sent as `CTB HXG Message`_. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_EVENT_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`GUC_ACTION_GUC2PF_RELAY_FROM_VF` = 0x5100 | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | **VFID** - source VF identifier | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | **RELAY_ID** - VF/PF message ID | + * +---+-------+--------------------------------------------------------------+ + * | 3 | 31:0 | **RELAY_DATA1** - VF/PF message payload data | + * +---+-------+--------------------------------------------------------------+ + * |...| | | + * +---+-------+--------------------------------------------------------------+ + * | n | 31:0 | **RELAY_DATAx** - VF/PF message payload data | + * +---+-------+--------------------------------------------------------------+ + */ +#define GUC_ACTION_GUC2PF_RELAY_FROM_VF 0x5100 + +#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN (GUC_HXG_EVENT_MSG_MIN_LEN + 2u) +#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_MAX_LEN (GUC2PF_RELAY_FROM_VF_EVENT_MSG_MIN_LEN + 60u) +#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_0_MBZ GUC_HXG_EVENT_MSG_0_DATA0 +#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_1_VFID GUC_HXG_EVENT_MSG_n_DATAn +#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_2_RELAY_ID GUC_HXG_EVENT_MSG_n_DATAn +#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_3_RELAY_DATA1 GUC_HXG_EVENT_MSG_n_DATAn +#define GUC2PF_RELAY_FROM_VF_EVENT_MSG_n_RELAY_DATAx GUC_HXG_EVENT_MSG_n_DATAn + +/** + * DOC: PF2GUC_RELAY_TO_VF + * + * The `PF2GUC_RELAY_TO_VF`_ message is used by the PF to send VF/PF IOV messages + * to the VF. + * + * This action message must be sent over CTB as `CTB HXG Message`_. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = `GUC_HXG_TYPE_FAST_REQUEST`_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`GUC_ACTION_PF2GUC_RELAY_TO_VF` = 0x5101 | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | **VFID** - target VF identifier | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | **RELAY_ID** - VF/PF message ID | + * +---+-------+--------------------------------------------------------------+ + * | 3 | 31:0 | **RELAY_DATA1** - VF/PF message payload data | + * +---+-------+--------------------------------------------------------------+ + * |...| | | + * +---+-------+--------------------------------------------------------------+ + * | n | 31:0 | **RELAY_DATAx** - VF/PF message payload data | + * +---+-------+--------------------------------------------------------------+ + */ +#define GUC_ACTION_PF2GUC_RELAY_TO_VF 0x5101 + +#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 2u) +#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_MAX_LEN (PF2GUC_RELAY_TO_VF_REQUEST_MSG_MIN_LEN + 60u) +#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0 +#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_1_VFID GUC_HXG_REQUEST_MSG_n_DATAn +#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_2_RELAY_ID GUC_HXG_REQUEST_MSG_n_DATAn +#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_3_RELAY_DATA1 GUC_HXG_REQUEST_MSG_n_DATAn +#define PF2GUC_RELAY_TO_VF_REQUEST_MSG_n_RELAY_DATAx GUC_HXG_REQUEST_MSG_n_DATAn + +/** + * DOC: GUC2PF_MMIO_RELAY_SERVICE + * + * The `GUC2PF_MMIO_RELAY_SERVICE`_ message is used by the GuC to forward data + * from `VF2GUC_MMIO_RELAY_SERVICE`_ request message that was sent by the VF. + * + * To reply to `VF2GUC_MMIO_RELAY_SERVICE`_ request message PF must be either + * `PF2GUC_MMIO_RELAY_SUCCESS`_ or `PF2GUC_MMIO_RELAY_FAILURE`_. + * + * This G2H message must be sent as `CTB HXG Message`_. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_EVENT_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`GUC_ACTION_GUC2PF_MMIO_RELAY_SERVICE` = 0x5006 | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | **VFID** - identifier of the VF which sent this message | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:28 | MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 27:24 | **MAGIC** - see `VF2GUC_MMIO_RELAY_SERVICE`_ request | + * | +-------+--------------------------------------------------------------+ + * | | 23:16 | **OPCODE** - see `VF2GUC_MMIO_RELAY_SERVICE`_ request | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | MBZ | + * +---+-------+--------------------------------------------------------------+ + * | 3 | 31:0 | **DATA1** - see `VF2GUC_MMIO_RELAY_SERVICE`_ request | + * +---+-------+--------------------------------------------------------------+ + * | 4 | 31:0 | **DATA2** - see `VF2GUC_MMIO_RELAY_SERVICE`_ request | + * +---+-------+--------------------------------------------------------------+ + * | 5 | 31:0 | **DATA3** - see `VF2GUC_MMIO_RELAY_SERVICE`_ request | + * +---+-------+--------------------------------------------------------------+ + */ +#define GUC_ACTION_GUC2PF_MMIO_RELAY_SERVICE 0x5006 + +#define GUC2PF_MMIO_RELAY_SERVICE_EVENT_MSG_LEN (GUC_HXG_EVENT_MSG_MIN_LEN + 5u) +#define GUC2PF_MMIO_RELAY_SERVICE_EVENT_MSG_1_VFID GUC_HXG_EVENT_MSG_n_DATAn +#define GUC2PF_MMIO_RELAY_SERVICE_EVENT_MSG_2_MAGIC (0xf << 24) +#define GUC2PF_MMIO_RELAY_SERVICE_EVENT_MSG_2_OPCODE (0xff << 16) +#define GUC2PF_MMIO_RELAY_SERVICE_EVENT_MSG_n_DATAx GUC_HXG_EVENT_MSG_n_DATAn +#define GUC2PF_MMIO_RELAY_SERVICE_EVENT_MSG_NUM_DATA 3u + +/** + * DOC: PF2GUC_MMIO_RELAY_SUCCESS + * + * The `PF2GUC_MMIO_RELAY_SUCCESS`_ message is used by the PF to send success + * response data related to `VF2GUC_MMIO_RELAY_SERVICE`_ request message that + * was received in `GUC2PF_MMIO_RELAY_SERVICE`_. + * + * This message must be sent as `CTB HXG Message`_. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_FAST_REQUEST_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`GUC_ACTION_PF2GUC_MMIO_RELAY_SUCCESS` = 0x5007 | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | **VFID** - identifier of the VF where to send this reply | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:28 | MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 27:24 | **MAGIC** - see `VF2GUC_MMIO_RELAY_SERVICE`_ response | + * | +-------+--------------------------------------------------------------+ + * | | 23:0 | **DATA0** - see `VF2GUC_MMIO_RELAY_SERVICE`_ response | + * +---+-------+--------------------------------------------------------------+ + * | 3 | 31:0 | **DATA1** - see `VF2GUC_MMIO_RELAY_SERVICE`_ response | + * +---+-------+--------------------------------------------------------------+ + * | 4 | 31:0 | **DATA2** - see `VF2GUC_MMIO_RELAY_SERVICE`_ response | + * +---+-------+--------------------------------------------------------------+ + * | 5 | 31:0 | **DATA3** - see `VF2GUC_MMIO_RELAY_SERVICE`_ response | + * +---+-------+--------------------------------------------------------------+ + */ +#define GUC_ACTION_PF2GUC_MMIO_RELAY_SUCCESS 0x5007 + +#define PF2GUC_MMIO_RELAY_SUCCESS_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 5u) +#define PF2GUC_MMIO_RELAY_SUCCESS_REQUEST_MSG_1_VFID GUC_HXG_REQUEST_MSG_n_DATAn +#define PF2GUC_MMIO_RELAY_SUCCESS_REQUEST_MSG_2_MAGIC (0xf << 24) +#define PF2GUC_MMIO_RELAY_SUCCESS_REQUEST_MSG_2_DATA0 (0xffffff << 0) +#define PF2GUC_MMIO_RELAY_SUCCESS_REQUEST_MSG_n_DATAx GUC_HXG_REQUEST_MSG_n_DATAn +#define PF2GUC_MMIO_RELAY_SUCCESS_REQUEST_MSG_NUM_DATA 3u + +/** + * DOC: PF2GUC_MMIO_RELAY_FAILURE + * + * The `PF2GUC_MMIO_RELAY_FAILURE`_ message is used by PF to send error response + * data related to `VF2GUC_MMIO_RELAY_SERVICE`_ request message that + * was received in `GUC2PF_MMIO_RELAY_SERVICE`_. + * + * This message must be sent as `CTB HXG Message`_. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_FAST_REQUEST_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`GUC_ACTION_PF2GUC_MMIO_RELAY_FAILURE` = 0x5008 | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | **VFID** - identifier of the VF where to send reply | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:28 | MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 27:24 | **MAGIC** - see `VF2GUC_MMIO_RELAY_SERVICE`_ request | + * | +-------+--------------------------------------------------------------+ + * | | 23:8 | MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 7:0 | **FAULT** - see `IOV Error Codes`_ | + * +---+-------+--------------------------------------------------------------+ + */ +#define GUC_ACTION_PF2GUC_MMIO_RELAY_FAILURE 0x5008 + +#define PF2GUC_MMIO_RELAY_FAILURE_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 2u) +#define PF2GUC_MMIO_RELAY_FAILURE_REQUEST_MSG_1_VFID GUC_HXG_REQUEST_MSG_n_DATAn +#define PF2GUC_MMIO_RELAY_FAILURE_REQUEST_MSG_2_MAGIC (0xf << 24) +#define PF2GUC_MMIO_RELAY_FAILURE_REQUEST_MSG_2_FAULT (0xff << 0) + +/** + * DOC: GUC2PF_ADVERSE_EVENT + * + * This message is used by the GuC to notify PF about adverse events. + * + * This G2H message must be sent as `CTB HXG Message`_. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_EVENT_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | DATA0 = MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`GUC_ACTION_GUC2PF_ADVERSE_EVENT` = 0x5104 | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | DATA1 = **VFID** - VF identifier | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | DATA2 = **THRESHOLD** - key of the exceeded threshold | + * +---+-------+--------------------------------------------------------------+ + */ +#define GUC_ACTION_GUC2PF_ADVERSE_EVENT 0x5104 + +#define GUC2PF_ADVERSE_EVENT_EVENT_MSG_LEN (GUC_HXG_EVENT_MSG_MIN_LEN + 2u) +#define GUC2PF_ADVERSE_EVENT_EVENT_MSG_0_MBZ GUC_HXG_EVENT_MSG_0_DATA0 +#define GUC2PF_ADVERSE_EVENT_EVENT_MSG_1_VFID GUC_HXG_EVENT_MSG_n_DATAn +#define GUC2PF_ADVERSE_EVENT_EVENT_MSG_2_THRESHOLD GUC_HXG_EVENT_MSG_n_DATAn + +/** + * DOC: GUC2PF_VF_STATE_NOTIFY + * + * The GUC2PF_VF_STATE_NOTIFY message is used by the GuC to notify PF about change + * of the VF state. + * + * This G2H message must be sent as `CTB HXG Message`_. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_EVENT_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | DATA0 = MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`GUC_ACTION_GUC2PF_VF_STATE_NOTIFY` = 0x5106 | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | DATA1 = **VFID** - VF identifier | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | DATA2 = **EVENT** - notification event: | + * | | | | + * | | | - _`GUC_PF_NOTIFY_VF_ENABLE` = 1 (only if VFID = 0) | + * | | | - _`GUC_PF_NOTIFY_VF_FLR` = 1 | + * | | | - _`GUC_PF_NOTIFY_VF_FLR_DONE` = 2 | + * | | | - _`GUC_PF_NOTIFY_VF_PAUSE_DONE` = 3 | + * | | | - _`GUC_PF_NOTIFY_VF_FIXUP_DONE` = 4 | + * +---+-------+--------------------------------------------------------------+ + */ +#define GUC_ACTION_GUC2PF_VF_STATE_NOTIFY 0x5106 + +#define GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_LEN (GUC_HXG_EVENT_MSG_MIN_LEN + 2u) +#define GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_0_MBZ GUC_HXG_EVENT_MSG_0_DATA0 +#define GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_1_VFID GUC_HXG_EVENT_MSG_n_DATAn +#define GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_2_EVENT GUC_HXG_EVENT_MSG_n_DATAn +#define GUC_PF_NOTIFY_VF_ENABLE 1 +#define GUC_PF_NOTIFY_VF_FLR 1 +#define GUC_PF_NOTIFY_VF_FLR_DONE 2 +#define GUC_PF_NOTIFY_VF_PAUSE_DONE 3 +#define GUC_PF_NOTIFY_VF_FIXUP_DONE 4 + +/** + * DOC: PF2GUC_VF_CONTROL + * + * The PF2GUC_VF_CONTROL message is used by the PF to trigger VF state change + * maintained by the GuC. + * + * This H2G message must be sent as `CTB HXG Message`_. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | DATA0 = MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`GUC_ACTION_PF2GUC_VF_CONTROL_CMD` = 0x5506 | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | DATA1 = **VFID** - VF identifier | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | DATA2 = **COMMAND** - control command: | + * | | | | + * | | | - _`GUC_PF_TRIGGER_VF_PAUSE` = 1 | + * | | | - _`GUC_PF_TRIGGER_VF_RESUME` = 2 | + * | | | - _`GUC_PF_TRIGGER_VF_STOP` = 3 | + * | | | - _`GUC_PF_TRIGGER_VF_FLR_START` = 4 | + * | | | - _`GUC_PF_TRIGGER_VF_FLR_FINISH` = 5 | + * +---+-------+--------------------------------------------------------------+ + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:0 | DATA0 = MBZ | + * +---+-------+--------------------------------------------------------------+ + */ +#define GUC_ACTION_PF2GUC_VF_CONTROL 0x5506 + +#define PF2GUC_VF_CONTROL_REQUEST_MSG_LEN (GUC_HXG_EVENT_MSG_MIN_LEN + 2u) +#define PF2GUC_VF_CONTROL_REQUEST_MSG_0_MBZ GUC_HXG_EVENT_MSG_0_DATA0 +#define PF2GUC_VF_CONTROL_REQUEST_MSG_1_VFID GUC_HXG_EVENT_MSG_n_DATAn +#define PF2GUC_VF_CONTROL_REQUEST_MSG_2_COMMAND GUC_HXG_EVENT_MSG_n_DATAn +#define GUC_PF_TRIGGER_VF_PAUSE 1 +#define GUC_PF_TRIGGER_VF_RESUME 2 +#define GUC_PF_TRIGGER_VF_STOP 3 +#define GUC_PF_TRIGGER_VF_FLR_START 4 +#define GUC_PF_TRIGGER_VF_FLR_FINISH 5 + +/** + * DOC: PF2GUC_SAVE_RESTORE_VF + * + * This message is used by the PF to migrate VF info state maintained by the GuC. + * + * This message must be sent as `CTB HXG Message`_. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | DATA0 = **OPCODE** - operation to take: | + * | | | | + * | | | - _`GUC_PF_OPCODE_VF_SAVE` = 0 | + * | | | - _`GUC_PF_OPCODE_VF_RESTORE` = 1 | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`GUC_ACTION_PF2GUC_SAVE_RESTORE_VF` = 0x550B | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | DATA1 = **VFID** - VF identifier | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | DATA2 = **BUFF_LO** - lower 32-bits of GGTT offset to the 4K | + * | | | buffer where the VF info will be save to or restored from. | + * +---+-------+--------------------------------------------------------------+ + * | 3 | 31:0 | DATA3 = **BUFF_HI** - upper 32-bits of GGTT offset to the 4K | + * | | | buffer where the VF info will be save to or restored from. | + * +---+-------+--------------------------------------------------------------+ + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:0 | DATA0 = **USED** - size of buffer used (in bytes) | + * +---+-------+--------------------------------------------------------------+ + */ +#define GUC_ACTION_PF2GUC_SAVE_RESTORE_VF 0x550B + +#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_LEN (GUC_HXG_EVENT_MSG_MIN_LEN + 3u) +#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_0_OPCODE GUC_HXG_EVENT_MSG_0_DATA0 +#define GUC_PF_OPCODE_VF_SAVE 0 +#define GUC_PF_OPCODE_VF_RESTORE 1 +#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_1_VFID GUC_HXG_EVENT_MSG_n_DATAn +#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_2_BUFF_LO GUC_HXG_EVENT_MSG_n_DATAn +#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_3_BUFF_HI GUC_HXG_EVENT_MSG_n_DATAn + +#define PF2GUC_SAVE_RESTORE_VF_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN +#define PF2GUC_SAVE_RESTORE_VF_RESPONSE_MSG_0_USED GUC_HXG_RESPONSE_MSG_0_DATA0 + +#endif /* __GUC_ACTIONS_PF_ABI_H__ */ diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_vf_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_vf_abi.h new file mode 100644 index 0000000000000..59887a3bae9b5 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_vf_abi.h @@ -0,0 +1,313 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef _ABI_GUC_ACTIONS_VF_ABI_H +#define _ABI_GUC_ACTIONS_VF_ABI_H + +#include "guc_communication_mmio_abi.h" +#include "guc_communication_ctb_abi.h" + +/** + * DOC: VF2GUC_MATCH_VERSION + * + * This action is used to match VF interface version used by VF and GuC. + * + * This action must be sent over MMIO. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | DATA0 = MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`GUC_ACTION_VF2GUC_MATCH_VERSION` = 0x5500 | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:24 | **BRANCH** - branch ID of the VF interface | + * | +-------+--------------------------------------------------------------+ + * | | 23:16 | **MAJOR** - major version of the VF interface | + * | +-------+--------------------------------------------------------------+ + * | | 15:8 | **MINOR** - minor version of the VF interface | + * | +-------+--------------------------------------------------------------+ + * | | 7:0 | **MBZ** | + * +---+-------+--------------------------------------------------------------+ + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:0 | DATA0 = MBZ | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:24 | **BRANCH** - branch ID of the VF interface | + * | +-------+--------------------------------------------------------------+ + * | | 23:16 | **MAJOR** - major version of the VF interface | + * | +-------+--------------------------------------------------------------+ + * | | 15:8 | **MINOR** - minor version of the VF interface | + * | +-------+--------------------------------------------------------------+ + * | | 7:0 | **PATCH** - patch version of the VF interface | + * +---+-------+--------------------------------------------------------------+ + */ +#define GUC_ACTION_VF2GUC_MATCH_VERSION 0x5500 + +#define VF2GUC_MATCH_VERSION_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 1u) +#define VF2GUC_MATCH_VERSION_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0 +#define VF2GUC_MATCH_VERSION_REQUEST_MSG_1_BRANCH (0xff << 24) +#define GUC_VERSION_BRANCH_ANY 0 +#define VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MAJOR (0xff << 16) +#define GUC_VERSION_MAJOR_ANY 0 +#define VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MINOR (0xff << 8) +#define GUC_VERSION_MINOR_ANY 0 +#define VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MBZ (0xff << 0) + +#define VF2GUC_MATCH_VERSION_RESPONSE_MSG_LEN (GUC_HXG_RESPONSE_MSG_MIN_LEN + 1u) +#define VF2GUC_MATCH_VERSION_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0 +#define VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_BRANCH (0xff << 24) +#define VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MAJOR (0xff << 16) +#define VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MINOR (0xff << 8) +#define VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_PATCH (0xff << 0) + +/** + * DOC: VF2GUC_VF_RESET + * + * This action is used by VF to reset GuC's VF state. + * + * This message must be sent as `MMIO HXG Message`_. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | DATA0 = MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`GUC_ACTION_VF2GUC_VF_RESET` = 0x5507 | + * +---+-------+--------------------------------------------------------------+ + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:0 | DATA0 = MBZ | + * +---+-------+--------------------------------------------------------------+ + */ +#define GUC_ACTION_VF2GUC_VF_RESET 0x5507 + +#define VF2GUC_VF_RESET_REQUEST_MSG_LEN GUC_HXG_REQUEST_MSG_MIN_LEN +#define VF2GUC_VF_RESET_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0 + +#define VF2GUC_VF_RESET_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN +#define VF2GUC_VF_RESET_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0 + +/** + * DOC: VF2GUC_QUERY_SINGLE_KLV + * + * This action is used by VF to query value of the single KLV data. + * + * This action must be sent over MMIO. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`GUC_ACTION_VF2GUC_QUERY_SINGLE_KLV` = 0x5509 | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:16 | MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | **KEY** - key for which value is requested | + * +---+-------+--------------------------------------------------------------+ + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | **LENGTH** - length of data in dwords | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | **VALUE32** - bits 31:0 of value if **LENGTH** >= 1 | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | **VALUE64** - bits 63:32 of value if **LENGTH** >= 2 | + * +---+-------+--------------------------------------------------------------+ + * | 3 | 31:0 | **VALUE96** - bits 95:64 of value if **LENGTH** >= 3 | + * +---+-------+--------------------------------------------------------------+ + */ +#define GUC_ACTION_VF2GUC_QUERY_SINGLE_KLV 0x5509 + +#define VF2GUC_QUERY_SINGLE_KLV_REQUEST_MSG_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 1u) +#define VF2GUC_QUERY_SINGLE_KLV_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0 +#define VF2GUC_QUERY_SINGLE_KLV_REQUEST_MSG_1_MBZ (0xffff << 16) +#define VF2GUC_QUERY_SINGLE_KLV_REQUEST_MSG_1_KEY (0xffff << 0) + +#define VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_MIN_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN +#define VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_MAX_LEN (GUC_HXG_RESPONSE_MSG_MIN_LEN + 3u) +#define VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_0_MBZ (0xfff << 16) +#define VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_0_LENGTH (0xffff << 0) +#define VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_1_VALUE32 GUC_HXG_REQUEST_MSG_n_DATAn +#define VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_2_VALUE64 GUC_HXG_REQUEST_MSG_n_DATAn +#define VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_3_VALUE96 GUC_HXG_REQUEST_MSG_n_DATAn + +/** + * DOC: VF2GUC_RELAY_TO_PF + * + * The `VF2GUC_RELAY_TO_PF`_ message is used to send VF/PF messages to the PF. + * + * This message must be sent over CTB. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ or GUC_HXG_TYPE_FAST_REQUEST_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`GUC_ACTION_VF2GUC_RELAY_TO_PF` = 0x5103 | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | **RELAY_ID** - VF/PF message ID | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | **RELAY_DATA1** - VF/PF message payload data | + * +---+-------+--------------------------------------------------------------+ + * |...| | | + * +---+-------+--------------------------------------------------------------+ + * | n | 31:0 | **RELAY_DATAx** - VF/PF message payload data | + * +---+-------+--------------------------------------------------------------+ + */ +#define GUC_ACTION_VF2GUC_RELAY_TO_PF 0x5103 + +#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_MIN_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 1u) +#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_MAX_LEN (VF2GUC_RELAY_TO_PF_REQUEST_MSG_MIN_LEN + 60u) +#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0 +#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_1_RELAY_ID GUC_HXG_REQUEST_MSG_n_DATAn +#define VF2GUC_RELAY_TO_PF_REQUEST_MSG_n_RELAY_DATAx GUC_HXG_REQUEST_MSG_n_DATAn + +/** + * DOC: GUC2VF_RELAY_FROM_PF + * + * The `GUC2VF_RELAY_FROM_PF`_ message is used by GuC to forward VF/PF messages + * received from the PF. + * + * This message must be sent over CTB. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_EVENT_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:16 | MBZ | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`GUC_ACTION_GUC2VF_RELAY_FROM_PF` = 0x5102 | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | **RELAY_ID** - VF/PF message ID | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | **RELAY_DATA1** - VF/PF message payload data | + * +---+-------+--------------------------------------------------------------+ + * |...| | | + * +---+-------+--------------------------------------------------------------+ + * | n | 31:0 | **RELAY_DATAx** - VF/PF message payload data | + * +---+-------+--------------------------------------------------------------+ + */ +#define GUC_ACTION_GUC2VF_RELAY_FROM_PF 0x5102 + +#define GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN (GUC_HXG_EVENT_MSG_MIN_LEN + 1u) +#define GUC2VF_RELAY_FROM_PF_EVENT_MSG_MAX_LEN (GUC2VF_RELAY_FROM_PF_EVENT_MSG_MIN_LEN + 60u) +#define GUC2VF_RELAY_FROM_PF_EVENT_MSG_0_MBZ GUC_HXG_EVENT_MSG_0_DATA0 +#define GUC2VF_RELAY_FROM_PF_EVENT_MSG_1_RELAY_ID GUC_HXG_EVENT_MSG_n_DATAn +#define GUC2VF_RELAY_FROM_PF_EVENT_MSG_n_RELAY_DATAx GUC_HXG_EVENT_MSG_n_DATAn + +/** + * DOC: VF2GUC_MMIO_RELAY_SERVICE + * + * The VF2GUC_MMIO_RELAY_SERVICE action allows to send early MMIO VF/PF messages + * from the VF to the PF. + * + * Note that support for the sending such messages to the PF is not guaranteed + * and might be disabled or blocked in the future releases. + * + * The value of **MAGIC** used in the GUC_HXG_TYPE_REQUEST_ shall be generated + * by the VF and value of **MAGIC** included in GUC_HXG_TYPE_RESPONSE_SUCCESS_ + * shall be the same. + * + * In case of GUC_HXG_TYPE_RESPONSE_FAILURE_, **MAGIC** shall be encoded in upper + * bits of **HINT** field. + * + * This action may take longer time to completion and VFs should expect intermediate + * `HXG Busy`_ response message. + * + * This action is only available over MMIO. + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:24 | **MAGIC** - MMIO VF/PF message magic number (like CRC) | + * | +-------+--------------------------------------------------------------+ + * | | 23:16 | **OPCODE** - MMIO VF/PF message opcode | + * | +-------+--------------------------------------------------------------+ + * | | 15:0 | ACTION = _`GUC_ACTION_VF2GUC_MMIO_RELAY_SERVICE` = 0x5005 | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | **DATA1** - optional MMIO VF/PF payload data (or zero) | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | **DATA2** - optional MMIO VF/PF payload data (or zero) | + * +---+-------+--------------------------------------------------------------+ + * | 3 | 31:0 | **DATA3** - optional MMIO VF/PF payload data (or zero) | + * +---+-------+--------------------------------------------------------------+ + * + * +---+-------+--------------------------------------------------------------+ + * | | Bits | Description | + * +===+=======+==============================================================+ + * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ | + * | +-------+--------------------------------------------------------------+ + * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ | + * | +-------+--------------------------------------------------------------+ + * | | 27:24 | **MAGIC** - must match value from the REQUEST | + * | +-------+--------------------------------------------------------------+ + * | | 23:0 | **DATA0** - MMIO VF/PF response data | + * +---+-------+--------------------------------------------------------------+ + * | 1 | 31:0 | **DATA1** - MMIO VF/PF response data (or zero) | + * +---+-------+--------------------------------------------------------------+ + * | 2 | 31:0 | **DATA2** - MMIO VF/PF response data (or zero) | + * +---+-------+--------------------------------------------------------------+ + * | 3 | 31:0 | **DATA3** - MMIO VF/PF response data (or zero) | + * +---+-------+--------------------------------------------------------------+ + */ +#define GUC_ACTION_VF2GUC_MMIO_RELAY_SERVICE 0x5005 + +#define VF2GUC_MMIO_RELAY_SERVICE_REQUEST_MSG_MIN_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN) +#define VF2GUC_MMIO_RELAY_SERVICE_REQUEST_MSG_MAX_LEN (GUC_HXG_REQUEST_MSG_MIN_LEN + 3u) +#define VF2GUC_MMIO_RELAY_SERVICE_REQUEST_MSG_0_MAGIC (0xf << 24) +#define VF2GUC_MMIO_RELAY_SERVICE_REQUEST_MSG_0_OPCODE (0xff << 16) +#define VF2GUC_MMIO_RELAY_SERVICE_REQUEST_MSG_n_DATAn GUC_HXG_REQUEST_MSG_n_DATAn + +#define VF2GUC_MMIO_RELAY_SERVICE_RESPONSE_MSG_MIN_LEN (GUC_HXG_RESPONSE_MSG_MIN_LEN) +#define VF2GUC_MMIO_RELAY_SERVICE_RESPONSE_MSG_MAX_LEN (GUC_HXG_RESPONSE_MSG_MIN_LEN + 3u) +#define VF2GUC_MMIO_RELAY_SERVICE_RESPONSE_MSG_0_MAGIC (0xf << 24) +#define VF2GUC_MMIO_RELAY_SERVICE_RESPONSE_MSG_0_DATA0 (0xffffff << 0) +#define VF2GUC_MMIO_RELAY_SERVICE_RESPONSE_MSG_n_DATAn GUC_HXG_RESPONSE_MSG_n_DATAn + +#endif /* _ABI_GUC_ACTIONS_VF_ABI_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h index 1158f343c1044..a58d5e8b64f7a 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h @@ -38,6 +38,7 @@ * | | | - _`GUC_CTB_STATUS_UNDERFLOW` = 2 (truncated message) | * | | | - _`GUC_CTB_STATUS_MISMATCH` = 4 (head/tail modified) | * | | | - _`GUC_CTB_STATUS_UNUSED` = 8 (CTB is not in use) | + * | | | - _`GUC_CTB_STATUS_MIGRATED` = 16 (VF was migrated) | * +---+-------+--------------------------------------------------------------+ * |...| | RESERVED = MBZ | * +---+-------+--------------------------------------------------------------+ @@ -54,6 +55,7 @@ struct guc_ct_buffer_desc { #define GUC_CTB_STATUS_UNDERFLOW (1 << 1) #define GUC_CTB_STATUS_MISMATCH (1 << 2) #define GUC_CTB_STATUS_UNUSED (1 << 3) +#define GUC_CTB_STATUS_MIGRATED (1 << 4) u32 reserved[13]; } __packed; static_assert(sizeof(struct guc_ct_buffer_desc) == 64); diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h index 8085fb1812748..721384a7c39ee 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h @@ -9,6 +9,7 @@ enum intel_guc_response_status { INTEL_GUC_RESPONSE_STATUS_SUCCESS = 0x0, INTEL_GUC_RESPONSE_NOT_SUPPORTED = 0x20, + INTEL_GUC_RESPONSE_VF_MIGRATED = 0x107, INTEL_GUC_RESPONSE_NO_ATTRIBUTE_TABLE = 0x201, INTEL_GUC_RESPONSE_NO_DECRYPTION_KEY = 0x202, INTEL_GUC_RESPONSE_DECRYPTION_FAILED = 0x204, diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h index f0814a57c191e..4c96c39b0631a 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h @@ -14,6 +14,8 @@ * +===+=======+==============================================================+ * | 0 | 31:16 | **KEY** - KLV key identifier | * | | | - `GuC Self Config KLVs`_ | + * | | | - `GuC VGT Policy KLVs`_ | + * | | | - `GuC VF Configuration KLVs`_ | * | | | | * | +-------+--------------------------------------------------------------+ * | | 15:0 | **LEN** - length of VALUE (in 32bit dwords) | @@ -79,4 +81,195 @@ #define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY 0x0907 #define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_LEN 1u +/** + * DOC: GuC VGT Policy KLVs + * + * `GuC KLV`_ keys available for use with PF2GUC_UPDATE_VGT_POLICY. + * + * _`GUC_KLV_VGT_POLICY_SCHED_IF_IDLE` : 0x8001 + * This config sets whether strict scheduling is enabled whereby any VF + * that doesn’t have work to submit is still allocated a fixed execution + * time-slice to ensure active VFs execution is always consitent even + * during other VF reprovisiong / rebooting events. Changing this KLV + * impacts all VFs and takes effect on the next VF-Switch event. + * + * :0: don't schedule idle (default) + * :1: schedule if idle + * + * _`GUC_KLV_VGT_POLICY_ADVERSE_SAMPLE_PERIOD` : 0x8002 + * This config sets the sample period for tracking adverse event counters. + * A sample period is the period in millisecs during which events are counted. + * This is applicable for all the VFs. + * + * :0: adverse events are not counted (default) + * :n: sample period in milliseconds + * + * _`GUC_KLV_VGT_POLICY_RESET_AFTER_VF_SWITCH` : 0x8D00 + * This enum is to reset utilized HW engine after VF Switch (i.e to clean + * up Stale HW register left behind by previous VF) + * + * :0: don't reset (default) + * :1: reset + */ + +#define GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_KEY 0x8001 +#define GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_LEN 1u + +#define GUC_KLV_VGT_POLICY_ADVERSE_SAMPLE_PERIOD_KEY 0x8002 +#define GUC_KLV_VGT_POLICY_ADVERSE_SAMPLE_PERIOD_LEN 1u + +#define GUC_KLV_VGT_POLICY_RESET_AFTER_VF_SWITCH_KEY 0x8D00 +#define GUC_KLV_VGT_POLICY_RESET_AFTER_VF_SWITCH_LEN 1u + +/** + * DOC: GuC VF Configuration KLVs + * + * `GuC KLV`_ keys available for use with PF2GUC_UPDATE_VF_CFG. + * + * _`GUC_KLV_VF_CFG_GGTT_START` : 0x0001 + * A 4K aligned start GTT address/offset assigned to VF. + * Value is 64 bits. + * + * _`GUC_KLV_VF_CFG_GGTT_SIZE` : 0x0002 + * A 4K aligned size of GGTT assigned to VF. + * Value is 64 bits. + * + * _`GUC_KLV_VF_CFG_NUM_CONTEXTS` : 0x0004 + * Refers to the number of contexts allocated to this VF. + * + * :0: no contexts (default) + * :1-65535: number of contexts (Gen12) + * + * _`GUC_KLV_VF_CFG_NUM_DOORBELLS` : 0x0006 + * Refers to the number of doorbells allocated to this VF. + * + * :0: no doorbells (default) + * :1-255: number of doorbells (Gen12) + * + * _`GUC_KLV_VF_CFG_EXEC_QUANTUM` : 0x8A01 + * This config sets the VFs-execution-quantum in milliseconds. + * GUC will attempt to obey the maximum values as much as HW is capable + * of and this will never be perfectly-exact (accumulated nano-second + * granularity) since the GPUs clock time runs off a different crystal + * from the CPUs clock. Changing this KLV on a VF that is currently + * running a context wont take effect until a new context is scheduled in. + * That said, when the PF is changing this value from 0xFFFFFFFF to + * something else, it might never take effect if the VF is running an + * inifinitely long compute or shader kernel. In such a scenario, the + * PF would need to trigger a VM PAUSE and then change the KLV to force + * it to take effect. Such cases might typically happen on a 1PF+1VF + * Virtualization config enabled for heavier workloads like AI/ML. + * + * :0: infinite exec quantum (default) + * + * _`GUC_KLV_VF_CFG_PREEMPT_TIMEOUT` : 0x8A02 + * This config sets the VF-preemption-timeout in microseconds. + * GUC will attempt to obey the minimum and maximum values as much as + * HW is capable and this will never be perfectly-exact (accumulated + * nano-second granularity) since the GPUs clock time runs off a + * different crystal from the CPUs clock. Changing this KLV on a VF + * that is currently running a context wont take effect until a new + * context is scheduled in. + * That said, when the PF is changing this value from 0xFFFFFFFF to + * something else, it might never take effect if the VF is running an + * inifinitely long compute or shader kernel. + * In this case, the PF would need to trigger a VM PAUSE and then change + * the KLV to force it to take effect. Such cases might typically happen + * on a 1PF+1VF Virtualization config enabled for heavier workloads like + * AI/ML. + * + * :0: no preemption timeout (default) + * + * _`GUC_KLV_VF_CFG_THRESHOLD_CAT_ERR` : 0x8A03 + * This config sets threshold for CAT errors caused by the VF. + * + * :0: adverse events or error will not be reported (default) + * :n: event occurrence count per sampling interval + * + * _`GUC_KLV_VF_CFG_THRESHOLD_ENGINE_RESET` : 0x8A04 + * This config sets threshold for engine reset caused by the VF. + * + * :0: adverse events or error will not be reported (default) + * :n: event occurrence count per sampling interval + * + * _`GUC_KLV_VF_CFG_THRESHOLD_PAGE_FAULT` : 0x8A05 + * This config sets threshold for page fault errors caused by the VF. + * + * :0: adverse events or error will not be reported (default) + * :n: event occurrence count per sampling interval + * + * _`GUC_KLV_VF_CFG_THRESHOLD_H2G_STORM` : 0x8A06 + * This config sets threshold for H2G interrupts triggered by the VF. + * + * :0: adverse events or error will not be reported (default) + * :n: time (us) per sampling interval + * + * _`GUC_KLV_VF_CFG_THRESHOLD_IRQ_STORM` : 0x8A07 + * This config sets threshold for GT interrupts triggered by the VF's + * workloads. + * + * :0: adverse events or error will not be reported (default) + * :n: time (us) per sampling interval + * + * _`GUC_KLV_VF_CFG_THRESHOLD_DOORBELL_STORM` : 0x8A08 + * This config sets threshold for doorbell's ring triggered by the VF. + * + * :0: adverse events or error will not be reported (default) + * :n: time (us) per sampling interval + * + * _`GUC_KLV_VF_CFG_BEGIN_DOORBELL_ID` : 0x8A0A + * Refers to the start index of doorbell assigned to this VF. + * + * :0: (default) + * :1-255: number of doorbells (Gen12) + * + * _`GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID` : 0x8A0B + * Refers to the start index in context array allocated to this VF’s use. + * + * :0: (default) + * :1-65535: number of contexts (Gen12) + */ + +#define GUC_KLV_VF_CFG_GGTT_START_KEY 0x0001 +#define GUC_KLV_VF_CFG_GGTT_START_LEN 2u + +#define GUC_KLV_VF_CFG_GGTT_SIZE_KEY 0x0002 +#define GUC_KLV_VF_CFG_GGTT_SIZE_LEN 2u + +#define GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY 0x0004 +#define GUC_KLV_VF_CFG_NUM_CONTEXTS_LEN 1u + +#define GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY 0x0006 +#define GUC_KLV_VF_CFG_NUM_DOORBELLS_LEN 1u + +#define GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY 0x8a01 +#define GUC_KLV_VF_CFG_EXEC_QUANTUM_LEN 1u + +#define GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY 0x8a02 +#define GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_LEN 1u + +#define GUC_KLV_VF_CFG_THRESHOLD_CAT_ERR_KEY 0x8a03 +#define GUC_KLV_VF_CFG_THRESHOLD_CAT_ERR_LEN 1u + +#define GUC_KLV_VF_CFG_THRESHOLD_ENGINE_RESET_KEY 0x8a04 +#define GUC_KLV_VF_CFG_THRESHOLD_ENGINE_RESET_LEN 1u + +#define GUC_KLV_VF_CFG_THRESHOLD_PAGE_FAULT_KEY 0x8a05 +#define GUC_KLV_VF_CFG_THRESHOLD_PAGE_FAULT_LEN 1u + +#define GUC_KLV_VF_CFG_THRESHOLD_H2G_STORM_KEY 0x8a06 +#define GUC_KLV_VF_CFG_THRESHOLD_H2G_STORM_LEN 1u + +#define GUC_KLV_VF_CFG_THRESHOLD_IRQ_STORM_KEY 0x8a07 +#define GUC_KLV_VF_CFG_THRESHOLD_IRQ_STORM_LEN 1u + +#define GUC_KLV_VF_CFG_THRESHOLD_DOORBELL_STORM_KEY 0x8a08 +#define GUC_KLV_VF_CFG_THRESHOLD_DOORBELL_STORM_LEN 1u + +#define GUC_KLV_VF_CFG_BEGIN_DOORBELL_ID_KEY 0x8a0a +#define GUC_KLV_VF_CFG_BEGIN_DOORBELL_ID_LEN 1u + +#define GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID_KEY 0x8a0b +#define GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID_LEN 1u + #endif /* _ABI_GUC_KLVS_ABI_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_version_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_version_abi.h new file mode 100644 index 0000000000000..351f8a943f639 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_version_abi.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef _ABI_GUC_VERSION_ABI_H +#define _ABI_GUC_VERSION_ABI_H + +/* XXX: preliminary VF version is 0.10 */ +#define GUC_VF_VERSION_LATEST_MAJOR 0 +#define GUC_VF_VERSION_LATEST_MINOR 10 + +#endif /* _ABI_GUC_VERSION_ABI_H */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index a774b88c4aee2..010a6f90b0fe1 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -20,6 +20,9 @@ #define GUC_DEBUG(_guc, _fmt, ...) typecheck(struct intel_guc *, _guc) #endif +static const struct intel_guc_ops guc_ops_default; +static const struct intel_guc_ops guc_ops_vf; + /** * DOC: GuC * @@ -78,6 +81,10 @@ void intel_guc_init_send_regs(struct intel_guc *guc) FW_REG_READ | FW_REG_WRITE); } guc->send_regs.fw_domains = fw_domains; + + /* XXX: move to init_early when safe to call IS_SRIOV_VF */ + if (IS_SRIOV_VF(guc_to_gt(guc)->i915)) + guc->ops = &guc_ops_vf; } static void gen9_reset_guc_interrupts(struct intel_guc *guc) @@ -192,6 +199,8 @@ void intel_guc_init_early(struct intel_guc *guc) intel_guc_enable_msg(guc, INTEL_GUC_RECV_MSG_EXCEPTION | INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED); + + guc->ops = &guc_ops_default; } void intel_guc_init_late(struct intel_guc *guc) @@ -223,6 +232,8 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc) if (intel_guc_slpc_is_used(guc)) flags |= GUC_CTL_ENABLE_SLPC; + flags |= i915_modparams.guc_feature_flags; + return flags; } @@ -354,7 +365,7 @@ void intel_guc_write_params(struct intel_guc *guc) intel_uncore_forcewake_put(uncore, FORCEWAKE_GT); } -int intel_guc_init(struct intel_guc *guc) +static int __guc_init(struct intel_guc *guc) { struct intel_gt *gt = guc_to_gt(guc); int ret; @@ -417,7 +428,7 @@ int intel_guc_init(struct intel_guc *guc) return ret; } -void intel_guc_fini(struct intel_guc *guc) +static void __guc_fini(struct intel_guc *guc) { struct intel_gt *gt = guc_to_gt(guc); @@ -439,6 +450,50 @@ void intel_guc_fini(struct intel_guc *guc) intel_uc_fw_fini(&guc->fw); } +static int __vf_guc_init(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + int err; + + GEM_BUG_ON(!IS_SRIOV_VF(gt->i915)); + + err = intel_guc_ct_init(&guc->ct); + if (err) + return err; + + /* GuC submission is mandatory for VFs */ + err = intel_guc_submission_init(guc); + if (err) + goto err_ct; + + /* + * Disable slpc controls for VF. This cannot be done in + * __guc_slpc_selected since the VF probe is not complete + * at that point. + */ + guc->slpc.supported = false; + guc->slpc.selected = false; + + /* Disable GUCRC for VF */ + guc->rc_supported = false; + + return 0; + +err_ct: + intel_guc_ct_fini(&guc->ct); + return err; +} + +static void __vf_guc_fini(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + GEM_BUG_ON(!IS_SRIOV_VF(gt->i915)); + + intel_guc_submission_fini(guc); + intel_guc_ct_fini(&guc->ct); +} + /* * This function implements the MMIO based host to GuC interface. */ @@ -492,11 +547,19 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len, } if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_BUSY) { + int loop = IS_SRIOV_VF(i915) ? 20 : 1; + #define done ({ header = intel_uncore_read(uncore, guc_send_reg(guc, 0)); \ FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != GUC_HXG_ORIGIN_GUC || \ FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_NO_RESPONSE_BUSY; }) +busy_loop: ret = wait_for(done, 1000); + if (unlikely(ret && --loop)) { + drm_dbg(&i915->drm, "mmio request %#x: still busy, countdown %u\n", + request[0], loop); + goto busy_loop; + } if (unlikely(ret)) goto timeout; if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != @@ -517,6 +580,13 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len, u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header); u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header); + if (error == INTEL_GUC_RESPONSE_VF_MIGRATED) { + drm_dbg(&i915->drm, "mmio request %#x: migrated!\n", request[0]); + i915_sriov_vf_start_migration_recovery(i915); + ret = -EREMOTEIO; + goto out; + } + drm_err(&i915->drm, "mmio request %#x: failure %x/%u\n", request[0], error, hint); ret = -ENXIO; @@ -558,6 +628,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len, return ret; } +ALLOW_ERROR_INJECTION(intel_guc_send_mmio, ERRNO); int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, const u32 *payload, u32 len) @@ -964,3 +1035,13 @@ void intel_guc_write_barrier(struct intel_guc *guc) wmb(); } } + +static const struct intel_guc_ops guc_ops_default = { + .init = __guc_init, + .fini = __guc_fini, +}; + +static const struct intel_guc_ops guc_ops_vf = { + .init = __vf_guc_init, + .fini = __vf_guc_fini, +}; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 9b5a86bff15bc..6841078ff1ac2 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -22,6 +22,12 @@ #include "i915_vma.h" struct __guc_ads_blob; +struct intel_guc; + +struct intel_guc_ops { + int (*init)(struct intel_guc *guc); + void (*fini)(struct intel_guc *guc); +}; /** * struct intel_guc - Top level structure of GuC. @@ -30,6 +36,8 @@ struct __guc_ads_blob; * i915_sched_engine for submission. */ struct intel_guc { + /** @ops: Operations to init / fini the GuC */ + struct intel_guc_ops const *ops; /** @fw: the GuC firmware */ struct intel_uc_fw fw; /** @log: sub-structure containing GuC log related data and objects */ @@ -101,7 +109,8 @@ struct intel_guc { struct ida guc_ids; /** * @num_guc_ids: Number of guc_ids, selftest feature to be able - * to reduce this number while testing. + * to reduce this number while testing. Also used on VFs to + * reduce the pool of guc_ids. */ int num_guc_ids; /** @@ -326,6 +335,19 @@ static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc, return offset; } +static inline int intel_guc_init(struct intel_guc *guc) +{ + if (guc->ops->init) + return guc->ops->init(guc); + return 0; +} + +static inline void intel_guc_fini(struct intel_guc *guc) +{ + if (guc->ops->fini) + guc->ops->fini(guc); +} + void intel_guc_init_early(struct intel_guc *guc); void intel_guc_init_late(struct intel_guc *guc); void intel_guc_init_send_regs(struct intel_guc *guc); @@ -362,7 +384,8 @@ static inline bool intel_guc_is_wanted(struct intel_guc *guc) static inline bool intel_guc_is_used(struct intel_guc *guc) { GEM_BUG_ON(__intel_uc_fw_status(&guc->fw) == INTEL_UC_FIRMWARE_SELECTED); - return intel_uc_fw_is_available(&guc->fw); + return intel_uc_fw_is_available(&guc->fw) || + intel_uc_fw_is_preloaded(&guc->fw); } static inline bool intel_guc_is_fw_running(struct intel_guc *guc) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index baa54498e8783..0db7740c0d24e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -480,6 +480,9 @@ static void guc_init_golden_context(struct intel_guc *guc) if (!intel_uc_uses_guc_submission(>->uc)) return; + if (IS_SRIOV_VF(gt->i915)) + return; + GEM_BUG_ON(!blob); /* diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c index eeafff0e7b0e7..a222200a07e4d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -12,6 +12,10 @@ #include "intel_guc_ct.h" #include "intel_pagefault.h" #include "gt/intel_gt.h" +#include "gt/iov/intel_iov_event.h" +#include "gt/iov/intel_iov_relay.h" +#include "gt/iov/intel_iov_service.h" +#include "gt/iov/intel_iov_state.h" enum { CT_DEAD_ALIVE = 0, @@ -398,8 +402,19 @@ static int ct_write(struct intel_guc_ct *ct, u32 *cmds = ctb->cmds; unsigned int i; - if (unlikely(desc->status)) - goto corrupted; + if (unlikely(desc->status)) { + /* + * after VF migration H2G is not usable any more + * start recovery procedure and let caller retry + * any other non-migration status is still fatal + */ + if (desc->status & ~GUC_CTB_STATUS_MIGRATED) + goto corrupted; + if (!IS_SRIOV_VF(ct_to_i915(ct))) + goto corrupted; + i915_sriov_vf_start_migration_recovery(ct_to_i915(ct)); + return -EBUSY; + } GEM_BUG_ON(tail > size); @@ -812,6 +827,7 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len, return ret; } +ALLOW_ERROR_INJECTION(intel_guc_ct_send, ERRNO); static struct ct_incoming_msg *ct_alloc_msg(u32 num_dwords) { @@ -861,8 +877,18 @@ static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg) status &= ~GUC_CTB_STATUS_UNUSED; } - if (status) - goto corrupted; + if (status) { + /* + * after VF migration G2H shall be still usable + * only any other non-migration status is fatal + */ + if (status & ~GUC_CTB_STATUS_MIGRATED) + goto corrupted; + if (!IS_SRIOV_VF(ct_to_i915(ct))) + goto corrupted; + desc->status &= ~GUC_CTB_STATUS_MIGRATED; + i915_sriov_vf_start_migration_recovery(ct_to_i915(ct)); + } } GEM_BUG_ON(head > size); @@ -1008,6 +1034,8 @@ static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *r static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *request) { struct intel_guc *guc = ct_to_guc(ct); + struct intel_gt *gt = guc_to_gt(guc); + struct intel_iov *iov = >->iov; const u32 *hxg; const u32 *payload; u32 hxg_len, action, len; @@ -1050,6 +1078,21 @@ static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *r case INTEL_GUC_ACTION_PAGE_FAULT_NOTIFICATION: ret = intel_pagefault_process_page_fault_msg(guc, payload, len); break; + case GUC_ACTION_GUC2PF_VF_STATE_NOTIFY: + ret = intel_iov_state_process_guc2pf(iov, hxg, hxg_len); + break; + case GUC_ACTION_GUC2PF_ADVERSE_EVENT: + ret = intel_iov_event_process_guc2pf(iov, hxg, hxg_len); + break; + case GUC_ACTION_GUC2PF_RELAY_FROM_VF: + ret = intel_iov_relay_process_guc2pf(&iov->relay, hxg, hxg_len); + break; + case GUC_ACTION_GUC2VF_RELAY_FROM_PF: + ret = intel_iov_relay_process_guc2vf(&iov->relay, hxg, hxg_len); + break; + case GUC_ACTION_GUC2PF_MMIO_RELAY_SERVICE: + ret = intel_iov_service_process_mmio_relay(iov, hxg, hxg_len); + break; case INTEL_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE: intel_guc_log_handle_flush_event(&guc->log); ret = 0; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c index 25f09a420561b..fd24cbcb49526 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c @@ -5,6 +5,7 @@ #include +#include "gt/intel_gt.h" #include "gt/intel_gt_debugfs.h" #include "gt/uc/intel_guc_ads.h" #include "gt/uc/intel_guc_ct.h" @@ -13,6 +14,7 @@ #include "intel_guc.h" #include "intel_guc_debugfs.h" #include "intel_guc_log_debugfs.h" +#include "intel_runtime_pm.h" static int guc_info_show(struct seq_file *m, void *data) { @@ -64,6 +66,69 @@ static int guc_slpc_info_show(struct seq_file *m, void *unused) } DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_slpc_info); +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC) +static ssize_t guc_send_mmio_write(struct file *file, const char __user *user, + size_t count, loff_t *ppos) +{ + struct intel_guc *guc = file->private_data; + struct intel_runtime_pm *rpm = guc_to_gt(guc)->uncore->rpm; + u32 request[GUC_MAX_MMIO_MSG_LEN]; + u32 response[GUC_MAX_MMIO_MSG_LEN]; + intel_wakeref_t wakeref; + int ret; + + if (*ppos) + return 0; + + ret = from_user_to_u32array(user, count, request, ARRAY_SIZE(request)); + if (ret < 0) + return ret; + + with_intel_runtime_pm(rpm, wakeref) + ret = intel_guc_send_mmio(guc, request, ret, response, ARRAY_SIZE(response)); + if (ret < 0) + return ret; + + return count; +} + +static const struct file_operations guc_send_mmio_fops = { + .write = guc_send_mmio_write, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t guc_send_ctb_write(struct file *file, const char __user *user, + size_t count, loff_t *ppos) +{ + struct intel_guc *guc = file->private_data; + struct intel_runtime_pm *rpm = guc_to_gt(guc)->uncore->rpm; + u32 request[32], response[8]; /* reasonable limits */ + intel_wakeref_t wakeref; + int ret; + + if (*ppos) + return 0; + + ret = from_user_to_u32array(user, count, request, ARRAY_SIZE(request)); + if (ret < 0) + return ret; + + with_intel_runtime_pm(rpm, wakeref) + ret = intel_guc_send_and_receive(guc, request, ret, response, ARRAY_SIZE(response)); + if (ret < 0) + return ret; + + return count; +} + +static const struct file_operations guc_send_ctb_fops = { + .write = guc_send_ctb_write, + .open = simple_open, + .llseek = default_llseek, +}; +#endif + static bool intel_eval_slpc_support(void *data) { struct intel_guc *guc = (struct intel_guc *)data; @@ -77,6 +142,10 @@ void intel_guc_debugfs_register(struct intel_guc *guc, struct dentry *root) { "guc_info", &guc_info_fops, NULL }, { "guc_registered_contexts", &guc_registered_contexts_fops, NULL }, { "guc_slpc_info", &guc_slpc_info_fops, &intel_eval_slpc_support}, +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC) + { "guc_send_mmio", &guc_send_mmio_fops, NULL }, + { "guc_send_ctb", &guc_send_ctb_fops, NULL }, +#endif }; if (!intel_guc_is_supported(guc)) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index bdb447c98dcb6..f63df3af46163 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -12,6 +12,8 @@ #include "gt/intel_engine_types.h" #include "abi/guc_actions_abi.h" +#include "abi/guc_actions_pf_abi.h" +#include "abi/guc_actions_vf_abi.h" #include "abi/guc_actions_slpc_abi.h" #include "abi/guc_errors_abi.h" #include "abi/guc_communication_mmio_abi.h" @@ -19,6 +21,26 @@ #include "abi/guc_klvs_abi.h" #include "abi/guc_messages_abi.h" +static inline const char *hxg_type_to_string(u32 type) +{ + switch (type) { + case GUC_HXG_TYPE_REQUEST: + return "request"; + case GUC_HXG_TYPE_EVENT: + return "event"; + case GUC_HXG_TYPE_NO_RESPONSE_BUSY: + return "busy"; + case GUC_HXG_TYPE_NO_RESPONSE_RETRY: + return "retry"; + case GUC_HXG_TYPE_RESPONSE_FAILURE: + return "failure"; + case GUC_HXG_TYPE_RESPONSE_SUCCESS: + return "response"; + default: + return ""; + } +} + /* Payload length only i.e. don't include G2H header length */ #define G2H_LEN_DW_SCHED_CONTEXT_MODE_SET 2 #define G2H_LEN_DW_DEREGISTER_CONTEXT 1 diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 0f763a111e9ae..0f1ce27a7f966 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1275,7 +1275,8 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now) * start_gt_clk is derived from GuC state. To get a consistent * view of activity, we query the GuC state only if gt is awake. */ - if (!in_reset && (wakeref = intel_gt_pm_get_if_awake(gt))) { + if (!in_reset && !IS_SRIOV_VF(gt->i915) && + (wakeref = intel_gt_pm_get_if_awake(gt))) { stats_saved = *stats; gt_stamp_saved = guc->timestamp.gt_stamp; /* @@ -1400,6 +1401,9 @@ void intel_guc_busyness_park(struct intel_gt *gt) { struct intel_guc *guc = >->uc.guc; + if (IS_SRIOV_VF(gt->i915)) + return; + if (!guc_submission_initialized(guc)) return; @@ -1413,6 +1417,9 @@ void intel_guc_busyness_unpark(struct intel_gt *gt) unsigned long flags; ktime_t unused; + if (IS_SRIOV_VF(gt->i915)) + return; + if (!guc_submission_initialized(guc)) return; @@ -1484,7 +1491,9 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc) intel_gt_park_heartbeats(guc_to_gt(guc)); disable_submission(guc); guc->interrupts.disable(guc); - __reset_guc_busyness_stats(guc); + + if (!IS_SRIOV_VF(guc_to_gt(guc)->i915)) + __reset_guc_busyness_stats(guc); /* Flush IRQ handler */ spin_lock_irq(&guc_to_gt(guc)->irq_lock); @@ -1945,6 +1954,18 @@ static void guc_submit_request(struct i915_request *rq) spin_unlock_irqrestore(&sched_engine->lock, flags); } +int intel_guc_submission_limit_ids(struct intel_guc *guc, u32 limit) +{ + if (limit > GUC_MAX_LRC_DESCRIPTORS) + return -E2BIG; + + if (!ida_is_empty(&guc->submission_state.guc_ids)) + return -ETXTBSY; + + guc->submission_state.num_guc_ids = limit; + return 0; +} + static int new_guc_id(struct intel_guc *guc, struct intel_context *ce) { int ret; @@ -3654,6 +3675,12 @@ static bool guc_sched_engine_disabled(struct i915_sched_engine *sched_engine) return !sched_engine->tasklet.callback; } +static int vf_guc_resume(struct intel_engine_cs *engine) +{ + intel_breadcrumbs_reset(engine->breadcrumbs); + return 0; +} + static int gen12_rcs_resume(struct intel_engine_cs *engine) { int ret; @@ -3846,6 +3873,9 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine) if (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE) rcs_submission_override(engine); + if (IS_SRIOV_VF(engine->i915)) + engine->resume = vf_guc_resume; + lrc_init_wa_ctx(engine); /* Finally, take ownership and responsibility for cleanup! */ @@ -3858,7 +3888,9 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine) void intel_guc_submission_enable(struct intel_guc *guc) { guc_init_lrc_mapping(guc); - guc_init_engine_stats(guc); + + if (!IS_SRIOV_VF(gt->i915)) + guc_init_engine_stats(guc); } void intel_guc_submission_disable(struct intel_guc *guc) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h index 5a95a9f0a8e31..094d6e7fcd890 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h @@ -14,6 +14,7 @@ struct drm_printer; struct intel_engine_cs; void intel_guc_submission_init_early(struct intel_guc *guc); +int intel_guc_submission_limit_ids(struct intel_guc *guc, u32 limit); int intel_guc_submission_init(struct intel_guc *guc); void intel_guc_submission_enable(struct intel_guc *guc); void intel_guc_submission_disable(struct intel_guc *guc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index 556829de9c172..1ff3667376c6d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -170,6 +170,10 @@ int intel_huc_check_status(struct intel_huc *huc) with_intel_runtime_pm(gt->uncore->rpm, wakeref) status = intel_uncore_read(gt->uncore, huc->status.reg); + /* if status is suspicious, VFs must trust PF that HuC was loaded */ + if ((!status || !~status) && IS_SRIOV_VF(gt->i915)) + return 1; + return (status & huc->status.mask) == huc->status.value; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h index ae8c8a6c8cc85..0063f0a809b25 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h @@ -47,7 +47,8 @@ static inline bool intel_huc_is_wanted(struct intel_huc *huc) static inline bool intel_huc_is_used(struct intel_huc *huc) { GEM_BUG_ON(__intel_uc_fw_status(&huc->fw) == INTEL_UC_FIRMWARE_SELECTED); - return intel_uc_fw_is_available(&huc->fw); + return intel_uc_fw_is_available(&huc->fw) || + intel_uc_fw_is_preloaded(&huc->fw); } static inline bool intel_huc_is_authenticated(struct intel_huc *huc) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 3e9716138a0d2..edfa11470300a 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -5,6 +5,7 @@ #include "gt/intel_gt.h" #include "gt/intel_reset.h" +#include "gt/iov/intel_iov_query.h" #include "intel_guc.h" #include "intel_guc_ads.h" #include "intel_guc_submission.h" @@ -15,6 +16,7 @@ static const struct intel_uc_ops uc_ops_off; static const struct intel_uc_ops uc_ops_on; +static const struct intel_uc_ops uc_ops_vf; static void uc_expand_default_options(struct intel_uc *uc) { @@ -148,6 +150,10 @@ void intel_uc_driver_late_release(struct intel_uc *uc) void intel_uc_init_mmio(struct intel_uc *uc) { intel_guc_init_send_regs(&uc->guc); + + /* XXX can't do it in intel_uc_init_early, it's too early */ + if (IS_SRIOV_VF(uc_to_gt(uc)->i915)) + uc->ops = &uc_ops_vf; } static void __uc_capture_load_err_log(struct intel_uc *uc) @@ -577,6 +583,95 @@ static void __uc_fini_hw(struct intel_uc *uc) __uc_sanitize(uc); } +static int __vf_uc_sanitize(struct intel_uc *uc) +{ + intel_huc_sanitize(&uc->huc); + intel_guc_sanitize(&uc->guc); + + return 0; +} + +static int __vf_uc_init(struct intel_uc *uc) +{ + return intel_guc_init(&uc->guc); +} + +static void __vf_uc_fini(struct intel_uc *uc) +{ + intel_guc_fini(&uc->guc); +} + +static int __vf_uc_init_hw(struct intel_uc *uc) +{ + struct intel_gt *gt = uc_to_gt(uc); + struct drm_i915_private *i915 = gt->i915; + struct intel_guc *guc = &uc->guc; + struct intel_huc *huc = &uc->huc; + int err; + + GEM_BUG_ON(!HAS_GT_UC(i915)); + GEM_BUG_ON(!IS_SRIOV_VF(i915)); + GEM_BUG_ON(!intel_uc_uses_guc_submission(>->uc)); + + err = intel_iov_query_bootstrap(>->iov); + if (unlikely(err)) + goto err_out; + + if (!intel_uc_fw_is_running(&guc->fw)) { + err = intel_uc_fw_status_to_error(guc->fw.status); + goto err_out; + } + + intel_guc_reset_interrupts(guc); + + err = guc_enable_communication(guc); + if (unlikely(err)) + goto err_out; + + err = intel_iov_query_version(>->iov); + if (unlikely(err)) + goto err_out; + + /* + * pretend that HuC is running if it is supported + * for status rely on runtime reg shared by PF + */ + if (intel_uc_fw_is_supported(&huc->fw)) { + /* XXX: We don't know how to get the HuC version yet */ + intel_uc_fw_set_preloaded(&huc->fw, 0, 0); + } + + intel_guc_submission_enable(guc); + + dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n", + intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path, + guc->fw.major_ver_found, guc->fw.minor_ver_found, + "submission", i915_iov_mode_to_string(IOV_MODE(i915))); + + dev_info(i915->drm.dev, "%s firmware %s\n", + intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC), + intel_uc_fw_status_repr(__intel_uc_fw_status(&huc->fw))); + + return 0; + +err_out: + __vf_uc_sanitize(uc); + i915_probe_error(i915, "GuC initialization failed (%pe)\n", ERR_PTR(err)); + return -EIO; +} + +static void __vf_uc_fini_hw(struct intel_uc *uc) +{ + struct intel_guc *guc = &uc->guc; + + intel_guc_submission_disable(guc); + + if (intel_guc_ct_enabled(&guc->ct)) + guc_disable_communication(guc); + + __vf_uc_sanitize(uc); +} + /** * intel_uc_reset_prepare - Prepare for reset * @uc: the intel_uc structure @@ -601,7 +696,7 @@ void intel_uc_reset_prepare(struct intel_uc *uc) intel_guc_submission_reset_prepare(guc); sanitize: - __uc_sanitize(uc); + intel_uc_sanitize(uc); } void intel_uc_reset(struct intel_uc *uc, bool stalled) @@ -733,3 +828,11 @@ static const struct intel_uc_ops uc_ops_on = { .init_hw = __uc_init_hw, .fini_hw = __uc_fini_hw, }; + +static const struct intel_uc_ops uc_ops_vf = { + .sanitize = __vf_uc_sanitize, + .init = __vf_uc_init, + .fini = __vf_uc_fini, + .init_hw = __vf_uc_init_hw, + .fini_hw = __vf_uc_fini_hw, +}; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 12917421b864d..1d4d4e5790263 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -7,6 +7,7 @@ #include #include +#include "abi/guc_version_abi.h" #include "gem/i915_gem_lmem.h" #include "intel_uc_fw.h" #include "intel_uc_fw_abi.h" @@ -167,6 +168,9 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw) uc_fw->path = blob->path; uc_fw->major_ver_wanted = blob->major; uc_fw->minor_ver_wanted = blob->minor; + /* XXX for now, all platforms use same latest version */ + uc_fw->major_vf_ver_wanted = GUC_VF_VERSION_LATEST_MAJOR; + uc_fw->minor_vf_ver_wanted = GUC_VF_VERSION_LATEST_MINOR; break; } } @@ -259,6 +263,46 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED); } +/** + * intel_uc_fw_set_preloaded() - set uC firmware as pre-loaded + * @uc_fw: uC firmware structure + * @major: major version of the pre-loaded firmware + * @minor: minor version of the pre-loaded firmware + * + * If the uC firmware was loaded to h/w by other entity, just + * mark it as loaded. + * + * Return: 0 on success or a negative error code on version mismatch. + */ +int intel_uc_fw_set_preloaded(struct intel_uc_fw *uc_fw, u16 major, u16 minor) +{ + struct device *dev = __uc_fw_to_gt(uc_fw)->i915->drm.dev; + + uc_fw->path = "PRELOADED"; + uc_fw->major_ver_found = major; + uc_fw->minor_ver_found = minor; + + if (!major && !minor) + goto done; + + if (uc_fw->major_ver_found != uc_fw->major_vf_ver_wanted || + uc_fw->minor_ver_found < uc_fw->minor_vf_ver_wanted) { + dev_notice(dev, "%s firmware %s: unexpected version: %u.%u != %u.%u\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, + uc_fw->major_ver_found, uc_fw->minor_ver_found, + uc_fw->major_vf_ver_wanted, uc_fw->minor_vf_ver_wanted); + goto mismatch; + } + +done: + intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_PRELOADED); + return 0; + +mismatch: + intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_ERROR); + return -ENOEXEC; +} + static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e) { struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h index d9d1dc0b4cbb8..2835fa22572a4 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -55,7 +55,8 @@ enum intel_uc_fw_status { INTEL_UC_FIRMWARE_LOADABLE, /* all fw-required objects are ready */ INTEL_UC_FIRMWARE_LOAD_FAIL, /* failed to xfer or init/auth the fw */ INTEL_UC_FIRMWARE_TRANSFERRED, /* dma xfer done */ - INTEL_UC_FIRMWARE_RUNNING /* init/auth done */ + INTEL_UC_FIRMWARE_RUNNING, /* init/auth done */ + INTEL_UC_FIRMWARE_PRELOADED, /* already pre-loaded */ }; enum intel_uc_fw_type { @@ -95,6 +96,8 @@ struct intel_uc_fw { */ u16 major_ver_wanted; u16 minor_ver_wanted; + u16 major_vf_ver_wanted; + u16 minor_vf_ver_wanted; u16 major_ver_found; u16 minor_ver_found; @@ -143,6 +146,8 @@ const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status) return "TRANSFERRED"; case INTEL_UC_FIRMWARE_RUNNING: return "RUNNING"; + case INTEL_UC_FIRMWARE_PRELOADED: + return "PRELOADED"; } return ""; } @@ -169,6 +174,7 @@ static inline int intel_uc_fw_status_to_error(enum intel_uc_fw_status status) case INTEL_UC_FIRMWARE_LOADABLE: case INTEL_UC_FIRMWARE_TRANSFERRED: case INTEL_UC_FIRMWARE_RUNNING: + case INTEL_UC_FIRMWARE_PRELOADED: return 0; } return -EINVAL; @@ -205,12 +211,14 @@ static inline bool intel_uc_fw_is_enabled(struct intel_uc_fw *uc_fw) static inline bool intel_uc_fw_is_available(struct intel_uc_fw *uc_fw) { - return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_AVAILABLE; + return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_AVAILABLE && + __intel_uc_fw_status(uc_fw) != INTEL_UC_FIRMWARE_PRELOADED; } static inline bool intel_uc_fw_is_loadable(struct intel_uc_fw *uc_fw) { - return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_LOADABLE; + return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_LOADABLE && + __intel_uc_fw_status(uc_fw) != INTEL_UC_FIRMWARE_PRELOADED; } static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw) @@ -220,7 +228,12 @@ static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw) static inline bool intel_uc_fw_is_running(struct intel_uc_fw *uc_fw) { - return __intel_uc_fw_status(uc_fw) == INTEL_UC_FIRMWARE_RUNNING; + return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_RUNNING; +} + +static inline bool intel_uc_fw_is_preloaded(struct intel_uc_fw *uc_fw) +{ + return __intel_uc_fw_status(uc_fw) == INTEL_UC_FIRMWARE_PRELOADED; } static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw) @@ -230,7 +243,7 @@ static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw) static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw) { - if (intel_uc_fw_is_loaded(uc_fw)) + if (intel_uc_fw_is_loadable(uc_fw)) intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOADABLE); } @@ -257,6 +270,7 @@ static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw) void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type); +int intel_uc_fw_set_preloaded(struct intel_uc_fw *uc_fw, u16 major, u16 minor); int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw); void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw); int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 offset, u32 dma_flags); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index e0e052cdf8b82..a09227d5c1d26 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -75,6 +75,16 @@ static int i915_capabilities(struct seq_file *m, void *data) return 0; } +static int sriov_info(struct seq_file *m, void *data) +{ + struct drm_i915_private *i915 = node_to_i915(m->private); + struct drm_printer p = drm_seq_file_printer(m); + + i915_sriov_print_info(i915, &p); + + return 0; +} + static char get_tiling_flag(struct drm_i915_gem_object *obj) { switch (i915_gem_object_get_tiling(obj)) { @@ -740,6 +750,7 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_wa_registers", i915_wa_registers, 0}, {"i915_sseu_status", i915_sseu_status, 0}, {"i915_rps_boost_info", i915_rps_boost_info, 0}, + {"i915_sriov_info", sriov_info, 0}, }; #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.c b/drivers/gpu/drm/i915/i915_debugfs_params.c index 783c8676eee21..4b65eaa904ce1 100644 --- a/drivers/gpu/drm/i915/i915_debugfs_params.c +++ b/drivers/gpu/drm/i915/i915_debugfs_params.c @@ -40,6 +40,17 @@ static int notify_guc(struct drm_i915_private *i915) { int ret = 0; + /* + * FIXME: This needs to return -EPERM to userland to indicate + * that a VF is not allowed to change the scheduling policies. + * However, doing so will currently 'break' a whole bunch of IGT + * tests that rely on disabling engine reset. Although, they are + * already broken as they will not correctly detect hang failures + * and are potentially returning false successes. + */ + if (IS_SRIOV_VF(i915)) + return 0; + if (intel_uc_uses_guc_submission(&to_gt(i915)->uc)) ret = intel_guc_global_policies_update(&to_gt(i915)->uc.guc); diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index 3a2b1610f8efb..68de2608cee11 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -68,6 +68,7 @@ #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gt/intel_rc6.h" +#include "gt/iov/intel_iov.h" #include "pxp/intel_pxp_pm.h" @@ -435,6 +436,8 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) if (ret) goto err_uncore; + intel_power_domains_prune(dev_priv); + /* As early as possible, scrub existing GPU state before clobbering */ sanitize_gpu(dev_priv); @@ -602,6 +605,10 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) pci_set_master(pdev); + /* Assume that VF is up, otherwise we may end with unknown state */ + if (IS_SRIOV_VF(dev_priv)) + ret = pci_set_power_state(pdev, PCI_D0); + /* On the 945G/GM, the chipset reports the MSI capability on the * integrated graphics even though the support isn't actually there * according to the published specs. It doesn't appear to function @@ -767,6 +774,8 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv) intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p); i915_print_iommu_status(dev_priv, &p); intel_gt_info_print(&to_gt(dev_priv)->info, &p); + + drm_printf(&p, "mode: %s\n", i915_iov_mode_to_string(IOV_MODE(dev_priv))); } if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) @@ -804,6 +813,29 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) return i915; } +static void i915_virtualization_probe(struct drm_i915_private *i915) +{ + GEM_BUG_ON(i915->__mode); + + intel_vgpu_detect(i915); + if (intel_vgpu_active(i915)) + i915->__mode = I915_IOV_MODE_GVT_VGPU; + else + i915->__mode = i915_sriov_probe(i915); + + GEM_BUG_ON(!i915->__mode); + + if (IS_IOV_ACTIVE(i915)) + dev_info(i915->drm.dev, "Running in %s mode\n", + i915_iov_mode_to_string(IOV_MODE(i915))); +} + +static void i915_virtualization_commit(struct drm_i915_private *i915) +{ + if (IS_SRIOV_PF(i915)) + i915_sriov_pf_confirm(i915); +} + /** * i915_driver_probe - setup chip and create an initial config * @pdev: PCI device @@ -870,7 +902,15 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) disable_rpm_wakeref_asserts(&i915->runtime_pm); - intel_vgpu_detect(i915); + /* This must be called before any calls to IS/IOV_MODE() macros */ + i915_virtualization_probe(i915); + + ret = i915_sriov_early_tweaks(i915); + if (ret < 0) + goto out_pci_disable; + + /* XXX find better place */ + intel_iov_init_early(&to_gt(i915)->iov); ret = i915_driver_mmio_probe(i915); if (ret < 0) @@ -904,6 +944,8 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) enable_rpm_wakeref_asserts(&i915->runtime_pm); + i915_virtualization_commit(i915); + i915_welcome_messages(i915); i915->do_release = true; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index aaecb5f6e08d5..e34823663e431 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -102,7 +102,10 @@ #include "i915_perf_types.h" #include "i915_request.h" #include "i915_scheduler.h" +#include "i915_sriov.h" +#include "i915_sriov_types.h" #include "gt/intel_timeline.h" +#include "i915_virtualization.h" #include "i915_vma.h" #include "i915_irq.h" @@ -668,6 +671,14 @@ struct drm_i915_private { /* i915 device parameters */ struct i915_params params; + /* i915 virtualization mode, use IOV_MODE() to access */ + enum i915_iov_mode __mode; +#define IOV_MODE(i915) ({ \ + BUILD_BUG_ON(!I915_IOV_MODE_NONE); \ + GEM_BUG_ON(!(i915)->__mode); \ + (i915)->__mode; \ +}) + const struct intel_device_info __info; /* Use INTEL_INFO() to access. */ struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */ struct intel_driver_caps caps; @@ -699,6 +710,7 @@ struct drm_i915_private { struct intel_uncore uncore; struct intel_uncore_mmio_debug mmio_debug; + struct i915_sriov sriov; struct i915_virtual_gpu vgpu; struct intel_gvt *gvt; @@ -1543,6 +1555,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc) +#define HAS_SRIOV(dev_priv) (INTEL_INFO(dev_priv)->has_sriov) + #define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu) #define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs) @@ -1606,6 +1620,8 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915) return IS_BROXTON(i915) && intel_vtd_active(i915); } +bool __pci_resource_valid(struct pci_dev *pdev, int bar); + static inline bool intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915) { diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 525ae832aa9a2..de5372f1e2336 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -111,6 +111,9 @@ i915_param_named_unsafe(force_probe, charp, 0400, "Force probe the driver for specified devices. " "See CONFIG_DRM_I915_FORCE_PROBE for details."); +i915_param_named_unsafe(enable_secure_batch, bool, 0400, + "Enable for legacy tests I915_EXEC_SECURE. (default: 0)"); + i915_param_named_unsafe(disable_power_well, int, 0400, "Disable display power wells when possible " "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)"); @@ -163,7 +166,11 @@ i915_param_named_unsafe(edp_vswing, int, 0400, i915_param_named_unsafe(enable_guc, int, 0400, "Enable GuC load for GuC submission and/or HuC load. " "Required functionality can be selected using bitmask values. " - "(-1=auto [default], 0=disable, 1=GuC submission, 2=HuC load)"); + "(-1=auto [default], 0=disable, 1=GuC submission, 2=HuC load, " + "4=SR-IOV PF)"); + +i915_param_named_unsafe(guc_feature_flags, uint, 0400, + "GuC feature flags. Requires GuC to be loaded. (0=none [default])"); i915_param_named(guc_log_level, int, 0400, "GuC firmware logging level. Requires GuC to be loaded. " @@ -205,6 +212,17 @@ i915_param_named_unsafe(request_timeout_ms, uint, 0600, "Default request/fence/batch buffer expiration timeout."); #endif +i915_param_named(max_vfs, uint, 0400, + "Limit number of virtual functions to allocate. " + "(default: no limit; N=limit to N, 0=no VFs)"); + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_IOV) +i915_param_named_unsafe(vfs_flr_mask, ulong, 0600, + "Bitmask to enable (1) or disable (0) cleaning by PF VF's resources " + "(GGTT and LMEM) after FLR (default: ~0 - cleaning enable for all VFs) " + "Bit number indicates VF number, e.g. bit 1 indicates VF1"); +#endif + static __always_inline void _print_param(struct drm_printer *p, const char *name, const char *type, diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index c9d53ff910a0e..f95e7be668740 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -32,7 +32,8 @@ struct drm_printer; #define ENABLE_GUC_SUBMISSION BIT(0) #define ENABLE_GUC_LOAD_HUC BIT(1) -#define ENABLE_GUC_MASK GENMASK(1, 0) +#define ENABLE_GUC_SRIOV_PF BIT(2) +#define ENABLE_GUC_MASK GENMASK(2, 0) /* * Invoke param, a function-like macro, for each i915 param, with arguments: @@ -60,6 +61,7 @@ struct drm_printer; param(int, enable_ips, 1, 0600) \ param(int, invert_brightness, 0, 0600) \ param(int, enable_guc, -1, 0400) \ + param(unsigned int, guc_feature_flags, 0, 0400) \ param(int, guc_log_level, -1, 0400) \ param(char *, guc_firmware_path, NULL, 0400) \ param(char *, huc_firmware_path, NULL, 0400) \ @@ -74,6 +76,9 @@ struct drm_printer; param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE, 0400) \ param(unsigned long, fake_lmem_start, 0, IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM) ? 0400 : 0) \ param(unsigned int, request_timeout_ms, CONFIG_DRM_I915_REQUEST_TIMEOUT, CONFIG_DRM_I915_REQUEST_TIMEOUT ? 0600 : 0) \ + param(unsigned int, max_vfs, ~0, 0400) \ + param(unsigned long, vfs_flr_mask, ~0, IS_ENABLED(CONFIG_DRM_I915_DEBUG_IOV) ? 0600 : 0) \ + param(bool, enable_secure_batch, false, 0400) \ /* leave bools at the end to not create holes */ \ param(bool, enable_hangcheck, true, 0600) \ param(bool, load_detect_test, false, 0600) \ diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index b888bb2ea7971..9cf0e5e440a60 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -879,6 +879,7 @@ static const struct intel_device_info tgl_info = { .display.has_modular_fia = 1, .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), + .has_sriov = 1, }; static const struct intel_device_info rkl_info = { @@ -924,6 +925,7 @@ static const struct intel_device_info adl_s_info = { .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), .dma_mask_size = 39, + .has_sriov = 1, }; #define XE_LPD_CURSOR_OFFSETS \ @@ -988,6 +990,7 @@ static const struct intel_device_info adl_p_info = { BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), .ppgtt_size = 48, .dma_mask_size = 39, + .has_sriov = 1, }; #undef GEN @@ -1144,6 +1147,9 @@ static void i915_pci_remove(struct pci_dev *pdev) if (!i915) /* driver load aborted, nothing to cleanup */ return; + if (IS_SRIOV_PF(i915)) + i915_sriov_pf_disable_vfs(i915); + i915_driver_remove(i915); pci_set_drvdata(pdev, NULL); } @@ -1179,7 +1185,7 @@ static bool force_probe(u16 device_id, const char *devices) return ret; } -static bool __pci_resource_valid(struct pci_dev *pdev, int bar) +bool __pci_resource_valid(struct pci_dev *pdev, int bar) { if (!pci_resource_flags(pdev, bar)) return false; @@ -1224,12 +1230,13 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENODEV; } - /* Only bind to function 0 of the device. Early generations - * used function 1 as a placeholder for multi-head. This causes - * us confusion instead, especially on the systems where both - * functions have the same PCI-ID! + /* + * Don't bind to non-zero function, unless it is a virtual function. + * Early generations used function 1 as a placeholder for multi-head. + * This causes us confusion instead, especially on the systems where + * both functions have the same PCI-ID! */ - if (PCI_FUNC(pdev->devfn)) + if (PCI_FUNC(pdev->devfn) && !pdev->is_virtfn) return -ENODEV; if (!intel_bars_valid(pdev, intel_info)) @@ -1267,9 +1274,47 @@ static void i915_pci_shutdown(struct pci_dev *pdev) { struct drm_i915_private *i915 = pci_get_drvdata(pdev); + if (IS_SRIOV_PF(i915)) + i915_sriov_pf_disable_vfs(i915); + i915_driver_shutdown(i915); } +/** + * i915_pci_sriov_configure - Configure SR-IOV (enable/disable VFs). + * @pdev: pci_dev struct + * @num_vfs: number of VFs to enable (or zero to disable all) + * + * This function will be called when user requests SR-IOV configuration via the + * sysfs interface. Note that VFs configuration can be done only on the PF and + * after successful PF initialization. + * + * Return: number of configured VFs or a negative error code on failure. + */ +static int i915_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct drm_i915_private *i915 = to_i915(dev); + int ret; + + /* handled in drivers/pci/pci-sysfs.c */ + GEM_BUG_ON(num_vfs < 0); + GEM_BUG_ON(num_vfs > U16_MAX); + GEM_BUG_ON(num_vfs > pci_sriov_get_totalvfs(pdev)); + GEM_BUG_ON(num_vfs && pci_num_vf(pdev)); + GEM_BUG_ON(!num_vfs && !pci_num_vf(pdev)); + + if (!IS_SRIOV_PF(i915)) + return -ENODEV; + + if (num_vfs > 0) + ret = i915_sriov_pf_enable_vfs(i915, num_vfs); + else + ret = i915_sriov_pf_disable_vfs(i915); + + return ret; +} + static struct pci_driver i915_pci_driver = { .name = DRIVER_NAME, .id_table = pciidlist, @@ -1277,6 +1322,7 @@ static struct pci_driver i915_pci_driver = { .remove = i915_pci_remove, .shutdown = i915_pci_shutdown, .driver.pm = &i915_pm_ops, + .sriov_configure = i915_pci_sriov_configure, }; int i915_pci_register_driver(void) diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 170bba913c30c..472e239b2fb85 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -4352,6 +4352,9 @@ void i915_perf_init(struct drm_i915_private *i915) /* XXX const struct i915_perf_ops! */ + if (IS_SRIOV_VF(i915)) + return; + perf->oa_formats = oa_formats; if (IS_HASWELL(i915)) { perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr; diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 9e23f844873a2..b8e7fba5004af 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -1149,7 +1149,7 @@ void i915_pmu_register(struct drm_i915_private *i915) int ret = -ENOMEM; - if (GRAPHICS_VER(i915) <= 2) { + if (GRAPHICS_VER(i915) <= 2 || IS_SRIOV_VF(i915)) { drm_info(&i915->drm, "PMU not supported for this GPU."); return; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ad353de72d591..c4909641f1c17 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -289,6 +289,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GTTMMADR_BAR (0) #define GEN2_GTTMMADR_BAR (1) #define GFXMEM_BAR (2) +#define GTT_APERTURE_BAR GFXMEM_BAR +#define GEN12_LMEM_BAR GFXMEM_BAR + +#ifdef CONFIG_PCI_IOV +#define GEN12_VF_GTTMMADR_BAR (PCI_IOV_RESOURCES + GTTMMADR_BAR) +#define GEN12_VF_LMEM_BAR (PCI_IOV_RESOURCES + GEN12_LMEM_BAR) +#endif /* BSM in include/drm/i915_drm.h */ @@ -8287,6 +8294,14 @@ enum { #define ENGINE1_MASK REG_GENMASK(31, 16) #define ENGINE0_MASK REG_GENMASK(15, 0) +/* VF_CAPABILITY_REGISTER */ +#define GEN12_VF_CAP_REG _MMIO(0x1901f8) +#define GEN12_VF REG_BIT(0) + +/* VIRTUALIZATION CONTROL REGISTER */ +#define GEN12_VIRTUAL_CTRL_REG _MMIO(0x10108C) +#define GEN12_GUEST_GTT_UPDATE_EN REG_BIT(8) + #define ILK_DISPLAY_CHICKEN2 _MMIO(0x42004) /* Required on all Ironlake and Sandybridge according to the B-Spec. */ #define ILK_ELPIN_409_SELECT (1 << 25) diff --git a/drivers/gpu/drm/i915/i915_sriov.c b/drivers/gpu/drm/i915/i915_sriov.c new file mode 100644 index 0000000000000..f06bd1ef5fa41 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_sriov.c @@ -0,0 +1,545 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include "i915_sriov.h" +#include "i915_sriov_sysfs.h" +#include "i915_drv.h" + +#include "gt/intel_gt.h" +#include "gt/intel_gt_pm.h" +#include "gt/iov/intel_iov_provisioning.h" +#include "gt/iov/intel_iov_utils.h" + +/* safe for use before register access via uncore is completed */ +static u32 pci_peek_mmio_read32(struct pci_dev *pdev, i915_reg_t reg) +{ + unsigned long offset = i915_mmio_reg_offset(reg); + void __iomem *addr; + u32 value; + + addr = pci_iomap_range(pdev, 0, offset, sizeof(u32)); + if (WARN(!addr, "Failed to map MMIO at %#lx\n", offset)) + return 0; + + value = readl(addr); + pci_iounmap(pdev, addr); + + return value; +} + +static bool gen12_pci_capability_is_vf(struct pci_dev *pdev) +{ + u32 value = pci_peek_mmio_read32(pdev, GEN12_VF_CAP_REG); + + /* + * Bugs in PCI programming (or failing hardware) can occasionally cause + * lost access to the MMIO BAR. When this happens, register reads will + * come back with 0xFFFFFFFF for every register, including VF_CAP, and + * then we may wrongly claim that we are running on the VF device. + * Since VF_CAP has only one bit valid, make sure no other bits are set. + */ + if (WARN(value & ~GEN12_VF, "MMIO BAR malfunction, %#x returned %#x\n", + i915_mmio_reg_offset(GEN12_VF_CAP_REG), value)) + return false; + + return value & GEN12_VF; +} + +#ifdef CONFIG_PCI_IOV + +static bool wants_pf(struct drm_i915_private *i915) +{ + return i915->params.enable_guc & ENABLE_GUC_SRIOV_PF; +} + +static unsigned int wanted_max_vfs(struct drm_i915_private *i915) +{ + return i915->params.max_vfs; +} + +static int pf_reduce_totalvfs(struct drm_i915_private *i915, int limit) +{ + int err; + + err = pci_sriov_set_totalvfs(to_pci_dev(i915->drm.dev), limit); + drm_WARN(&i915->drm, err, "Failed to set number of VFs to %d (%pe)\n", + limit, ERR_PTR(err)); + return err; +} + +static bool pf_has_valid_vf_bars(struct drm_i915_private *i915) +{ + struct device *dev = i915->drm.dev; + struct pci_dev *pdev = to_pci_dev(dev); + + return __pci_resource_valid(pdev, GEN12_VF_GTTMMADR_BAR) && + __pci_resource_valid(pdev, GEN12_VF_LMEM_BAR); +} + +static bool pf_continue_as_native(struct drm_i915_private *i915, const char *why) +{ +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) + drm_dbg(&i915->drm, "PF: %s, continuing as native\n", why); +#endif + pf_reduce_totalvfs(i915, 0); + return false; +} + +static bool pf_verify_readiness(struct drm_i915_private *i915) +{ + struct device *dev = i915->drm.dev; + struct pci_dev *pdev = to_pci_dev(dev); + int totalvfs = pci_sriov_get_totalvfs(pdev); + int newlimit = min_t(u16, wanted_max_vfs(i915), totalvfs); + + GEM_BUG_ON(!dev_is_pf(dev)); + GEM_WARN_ON(totalvfs > U16_MAX); + + if (!newlimit) + return pf_continue_as_native(i915, "all VFs disabled"); + + if (!wants_pf(i915)) + return pf_continue_as_native(i915, "GuC virtualization disabled"); + + if (!intel_uc_wants_guc_submission(&to_gt(i915)->uc)) + return pf_continue_as_native(i915, "GuC submission disabled"); + + if (!pf_has_valid_vf_bars(i915)) + return pf_continue_as_native(i915, "VFs BAR not ready"); + + pf_reduce_totalvfs(i915, newlimit); + + i915->sriov.pf.device_vfs = totalvfs; + i915->sriov.pf.driver_vfs = newlimit; + + return true; +} + +#else + +static int pf_reduce_totalvfs(struct drm_i915_private *i915, int limit) +{ + return 0; +} + +#endif + +/** + * i915_sriov_probe - Probe I/O Virtualization mode. + * @i915: the i915 struct + * + * This function should be called once and as soon as possible during + * driver probe to detect whether we are driving a PF or a VF device. + * SR-IOV PF mode detection is based on PCI @dev_is_pf() function. + * SR-IOV VF mode detection is based on MMIO register read. + */ +enum i915_iov_mode i915_sriov_probe(struct drm_i915_private *i915) +{ + struct device *dev = i915->drm.dev; + struct pci_dev *pdev = to_pci_dev(dev); + + if (!HAS_SRIOV(i915)) + return I915_IOV_MODE_NONE; + + if (gen12_pci_capability_is_vf(pdev)) + return I915_IOV_MODE_SRIOV_VF; + +#ifdef CONFIG_PCI_IOV + if (dev_is_pf(dev) && pf_verify_readiness(i915)) + return I915_IOV_MODE_SRIOV_PF; +#endif + + return I915_IOV_MODE_NONE; +} + +static void migration_worker_func(struct work_struct *w); + +static void vf_init_early(struct drm_i915_private *i915) +{ + INIT_WORK(&i915->sriov.vf.migration_worker, migration_worker_func); +} + +static int vf_check_guc_submission_support(struct drm_i915_private *i915) +{ + if (!intel_guc_submission_is_wanted(&to_gt(i915)->uc.guc)) { + drm_err(&i915->drm, "GuC submission disabled\n"); + return -ENODEV; + } + + return 0; +} + +static void vf_tweak_device_info(struct drm_i915_private *i915) +{ + struct intel_device_info *info = mkwrite_device_info(i915); + + /* Force PCH_NOOP. We have no access to display */ + i915->pch_type = PCH_NOP; + memset(&info->display, 0, sizeof(info->display)); + info->memory_regions &= ~(REGION_STOLEN_SMEM | + REGION_STOLEN_LMEM); +} + +/** + * i915_sriov_early_tweaks - Perform early tweaks needed for SR-IOV. + * @i915: the i915 struct + * + * This function should be called once and as soon as possible during + * driver probe to perform early checks and required tweaks to + * the driver data. + */ +int i915_sriov_early_tweaks(struct drm_i915_private *i915) +{ + int err; + + if (IS_SRIOV_VF(i915)) { + vf_init_early(i915); + err = vf_check_guc_submission_support(i915); + if (unlikely(err)) + return err; + vf_tweak_device_info(i915); + } + + return 0; +} + +int i915_sriov_pf_get_device_totalvfs(struct drm_i915_private *i915) +{ + GEM_BUG_ON(!IS_SRIOV_PF(i915)); + return i915->sriov.pf.device_vfs; +} + +int i915_sriov_pf_get_totalvfs(struct drm_i915_private *i915) +{ + GEM_BUG_ON(!IS_SRIOV_PF(i915)); + return i915->sriov.pf.driver_vfs; +} + +static void pf_set_status(struct drm_i915_private *i915, int status) +{ + GEM_BUG_ON(!IS_SRIOV_PF(i915)); + GEM_BUG_ON(!status); + GEM_WARN_ON(i915->sriov.pf.__status); + + i915->sriov.pf.__status = status; +} + +static bool pf_checklist(struct drm_i915_private *i915) +{ + GEM_BUG_ON(!IS_SRIOV_PF(i915)); + + if (intel_gt_has_unrecoverable_error(to_gt(i915))) { + pf_update_status(&to_gt(i915)->iov, -EIO, "GT wedged"); + return false; + } + + return true; +} + +/** + * i915_sriov_pf_confirm - Confirm that PF is ready to enable VFs. + * @i915: the i915 struct + * + * This function shall be called by the PF when all necessary + * initialization steps were successfully completed and PF is + * ready to enable VFs. + */ +void i915_sriov_pf_confirm(struct drm_i915_private *i915) +{ + struct device *dev = i915->drm.dev; + int totalvfs = i915_sriov_pf_get_totalvfs(i915); + + GEM_BUG_ON(!IS_SRIOV_PF(i915)); + + if (i915_sriov_pf_aborted(i915) || !pf_checklist(i915)) { + dev_notice(dev, "No VFs could be associated with this PF!\n"); + pf_reduce_totalvfs(i915, 0); + return; + } + + dev_info(dev, "%d VFs could be associated with this PF\n", totalvfs); + pf_set_status(i915, totalvfs); +} + +/** + * i915_sriov_pf_abort - Abort PF initialization. + * @i915: the i915 struct + * + * This function should be called by the PF when some of the necessary + * initialization steps failed and PF won't be able to manage VFs. + */ +void i915_sriov_pf_abort(struct drm_i915_private *i915, int err) +{ + GEM_BUG_ON(!IS_SRIOV_PF(i915)); + GEM_BUG_ON(err >= 0); + + __i915_printk(i915, KERN_NOTICE, "PF aborted (%pe) %pS\n", + ERR_PTR(err), (void *)_RET_IP_); + + pf_set_status(i915, err); +} + +/** + * i915_sriov_pf_aborted - Check if PF initialization was aborted. + * @i915: the i915 struct + * + * This function may be called by the PF to check if any previous + * initialization step has failed. + * + * Return: true if already aborted + */ +bool i915_sriov_pf_aborted(struct drm_i915_private *i915) +{ + GEM_BUG_ON(!IS_SRIOV_PF(i915)); + GEM_WARN_ON(i915->sriov.pf.__status > 0); + + return i915->sriov.pf.__status < 0; +} + +/** + * i915_sriov_pf_status - Status of the PF initialization. + * @i915: the i915 struct + * + * This function may be called by the PF to get its status. + * + * Return: number of supported VFs if PF is ready or + * a negative error code on failure (-EBUSY if + * PF initialization is still in progress). + */ +int i915_sriov_pf_status(struct drm_i915_private *i915) +{ + GEM_BUG_ON(!IS_SRIOV_PF(i915)); + + return i915->sriov.pf.__status ?: -EBUSY; +} + +bool i915_sriov_pf_is_auto_provisioning_enabled(struct drm_i915_private *i915) +{ + GEM_BUG_ON(!IS_SRIOV_PF(i915)); + + return !i915->sriov.pf.disable_auto_provisioning; +} + +int i915_sriov_pf_set_auto_provisioning(struct drm_i915_private *i915, bool enable) +{ + u16 num_vfs = i915_sriov_pf_get_totalvfs(i915); + int err; + + GEM_BUG_ON(!IS_SRIOV_PF(i915)); + + if (enable == i915_sriov_pf_is_auto_provisioning_enabled(i915)) + return 0; + + /* disabling is always allowed */ + if (!enable) + goto set; + + /* enabling is only allowed if all provisioning is empty */ + err = intel_iov_provisioning_verify(&to_gt(i915)->iov, num_vfs); + if (err != -ENODATA) + return -ESTALE; + +set: + dev_info(i915->drm.dev, "VFs auto-provisioning was turned %s\n", + onoff(enable)); + + i915->sriov.pf.disable_auto_provisioning = !enable; + return 0; +} + +/** + * i915_sriov_print_info - Print SR-IOV information. + * @iov: the i915 struct + * @p: the DRM printer + * + * Print SR-IOV related info into provided DRM printer. + */ +void i915_sriov_print_info(struct drm_i915_private *i915, struct drm_printer *p) +{ + struct device *dev = i915->drm.dev; + struct pci_dev *pdev = to_pci_dev(dev); + + drm_printf(p, "supported: %s\n", yesno(HAS_SRIOV(i915))); + drm_printf(p, "enabled: %s\n", yesno(IS_SRIOV(i915))); + + if (!IS_SRIOV(i915)) + return; + + drm_printf(p, "mode: %s\n", i915_iov_mode_to_string(IOV_MODE(i915))); + + if (IS_SRIOV_PF(i915)) { + int status = i915_sriov_pf_status(i915); + + drm_printf(p, "status: %s\n", onoff(status > 0)); + if (status < 0) + drm_printf(p, "error: %d (%pe)\n", + status, ERR_PTR(status)); + + drm_printf(p, "device vfs: %u\n", i915_sriov_pf_get_device_totalvfs(i915)); + drm_printf(p, "driver vfs: %u\n", i915_sriov_pf_get_totalvfs(i915)); + drm_printf(p, "supported vfs: %u\n", pci_sriov_get_totalvfs(pdev)); + drm_printf(p, "enabled vfs: %u\n", pci_num_vf(pdev)); + + /* XXX legacy igt */ + drm_printf(p, "total_vfs: %d\n", pci_sriov_get_totalvfs(pdev)); + } + + /*XXX legacy igt */ + drm_printf(p, "virtualization: %s\n", enableddisabled(true)); +} + +static int pf_update_guc_clients(struct intel_iov *iov, unsigned int num_vfs) +{ + int err; + + GEM_BUG_ON(!intel_iov_is_pf(iov)); + + err = intel_iov_provisioning_push(iov, num_vfs); + if (unlikely(err)) + IOV_DEBUG(iov, "err=%d", err); + + return err; +} + +/** + * i915_sriov_pf_enable_vfs - Enable VFs. + * @i915: the i915 struct + * @num_vfs: number of VFs to enable (shall not be zero) + * + * This function will enable specified number of VFs. Note that VFs can be + * enabled only after successful PF initialization. + * This function shall be called only on PF. + * + * Return: number of configured VFs or a negative error code on failure. + */ +int i915_sriov_pf_enable_vfs(struct drm_i915_private *i915, int num_vfs) +{ + bool auto_provisioning = i915_sriov_pf_is_auto_provisioning_enabled(i915); + struct device *dev = i915->drm.dev; + struct pci_dev *pdev = to_pci_dev(dev); + int err; + + GEM_BUG_ON(!IS_SRIOV_PF(i915)); + GEM_BUG_ON(num_vfs < 0); + drm_dbg(&i915->drm, "enabling %d VFs\n", num_vfs); + + /* verify that all initialization was successfully completed */ + err = i915_sriov_pf_status(i915); + if (err < 0) + goto fail; + + /* hold the reference to runtime pm as long as VFs are enabled */ + intel_gt_pm_get_untracked(to_gt(i915)); + + err = intel_iov_provisioning_verify(&to_gt(i915)->iov, num_vfs); + if (err == -ENODATA) { + if (auto_provisioning) + err = intel_iov_provisioning_auto(&to_gt(i915)->iov, num_vfs); + else + err = 0; /* trust late provisioning */ + } + if (unlikely(err)) + goto fail_pm; + + err = pf_update_guc_clients(&to_gt(i915)->iov, num_vfs); + if (unlikely(err < 0)) + goto fail_pm; + + err = pci_enable_sriov(pdev, num_vfs); + if (err < 0) + goto fail_guc; + + i915_sriov_sysfs_update_links(i915, true); + + dev_info(dev, "Enabled %u VFs\n", num_vfs); + return num_vfs; + +fail_guc: + pf_update_guc_clients(&to_gt(i915)->iov, 0); +fail_pm: + intel_iov_provisioning_auto(&to_gt(i915)->iov, 0); + intel_gt_pm_put_untracked(to_gt(i915)); +fail: + drm_err(&i915->drm, "Failed to enable %u VFs (%pe)\n", + num_vfs, ERR_PTR(err)); + return err; +} + +/** + * i915_sriov_pf_disable_vfs - Disable VFs. + * @i915: the i915 struct + * + * This function will disable all previously enabled VFs. + * This function shall be called only on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int i915_sriov_pf_disable_vfs(struct drm_i915_private *i915) +{ + struct device *dev = i915->drm.dev; + struct pci_dev *pdev = to_pci_dev(dev); + u16 num_vfs = pci_num_vf(pdev); + u16 vfs_assigned = pci_vfs_assigned(pdev); + + GEM_BUG_ON(!IS_SRIOV_PF(i915)); + drm_dbg(&i915->drm, "disabling %u VFs\n", num_vfs); + + if (vfs_assigned) { + dev_warn(dev, "Can't disable %u VFs, %u are still assigned\n", + num_vfs, vfs_assigned); + return -EPERM; + } + + if (!num_vfs) + return 0; + + i915_sriov_sysfs_update_links(i915, false); + + pci_disable_sriov(pdev); + + pf_update_guc_clients(&to_gt(i915)->iov, 0); + intel_iov_provisioning_auto(&to_gt(i915)->iov, 0); + intel_gt_pm_put_untracked(to_gt(i915)); + + dev_info(dev, "Disabled %u VFs\n", num_vfs); + return 0; +} + +static void vf_migration_recovery(struct drm_i915_private *i915) +{ + struct intel_gt *gt = to_gt(i915); + + drm_dbg(&i915->drm, "migration recovery in progress\n"); + + intel_gt_set_wedged(gt); + intel_gt_handle_error(gt, ALL_ENGINES, 0, "migration"); + + drm_dbg(&i915->drm, "migration recovery completed\n"); +} + +static void migration_worker_func(struct work_struct *w) +{ + struct drm_i915_private *i915 = container_of(w, struct drm_i915_private, + sriov.vf.migration_worker); + + vf_migration_recovery(i915); +} + +/** + * i915_sriov_vf_start_migration_recovery - Start VF migration recovery. + * @i915: the i915 struct + * + * This function shall be called only by VF. + */ +void i915_sriov_vf_start_migration_recovery(struct drm_i915_private *i915) +{ + bool started; + + GEM_BUG_ON(!IS_SRIOV_VF(i915)); + + started = queue_work(system_unbound_wq, &i915->sriov.vf.migration_worker); + dev_info(i915->drm.dev, "VF migration recovery %s\n", started ? + "scheduled" : "already in progress"); +} diff --git a/drivers/gpu/drm/i915/i915_sriov.h b/drivers/gpu/drm/i915/i915_sriov.h new file mode 100644 index 0000000000000..0fc9f7e27bf70 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_sriov.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __I915_SRIOV_H__ +#define __I915_SRIOV_H__ + +#include "i915_drv.h" +#include "i915_virtualization.h" + +struct drm_i915_private; +struct drm_printer; + +#ifdef CONFIG_PCI_IOV +#define IS_SRIOV_PF(i915) (IOV_MODE(i915) == I915_IOV_MODE_SRIOV_PF) +#else +#define IS_SRIOV_PF(i915) false +#endif +#define IS_SRIOV_VF(i915) (IOV_MODE(i915) == I915_IOV_MODE_SRIOV_VF) + +#define IS_SRIOV(i915) (IS_SRIOV_PF(i915) || IS_SRIOV_VF(i915)) + +enum i915_iov_mode i915_sriov_probe(struct drm_i915_private *i915); +int i915_sriov_early_tweaks(struct drm_i915_private *i915); +void i915_sriov_print_info(struct drm_i915_private *i915, struct drm_printer *p); + +/* PF only */ +void i915_sriov_pf_confirm(struct drm_i915_private *i915); +void i915_sriov_pf_abort(struct drm_i915_private *i915, int err); +bool i915_sriov_pf_aborted(struct drm_i915_private *i915); +int i915_sriov_pf_status(struct drm_i915_private *i915); +int i915_sriov_pf_get_device_totalvfs(struct drm_i915_private *i915); +int i915_sriov_pf_get_totalvfs(struct drm_i915_private *i915); +int i915_sriov_pf_enable_vfs(struct drm_i915_private *i915, int numvfs); +int i915_sriov_pf_disable_vfs(struct drm_i915_private *i915); + +bool i915_sriov_pf_is_auto_provisioning_enabled(struct drm_i915_private *i915); +int i915_sriov_pf_set_auto_provisioning(struct drm_i915_private *i915, bool enable); + +/* VF only */ +void i915_sriov_vf_start_migration_recovery(struct drm_i915_private *i915); + +#endif /* __I915_SRIOV_H__ */ diff --git a/drivers/gpu/drm/i915/i915_sriov_sysfs.c b/drivers/gpu/drm/i915/i915_sriov_sysfs.c new file mode 100644 index 0000000000000..3012c6559b7f1 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_sriov_sysfs.c @@ -0,0 +1,609 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include "i915_drv.h" +#include "i915_sriov_sysfs.h" +#include "i915_sriov_sysfs_types.h" +#include "i915_sysfs.h" + +#include "gt/iov/intel_iov_provisioning.h" +#include "gt/iov/intel_iov_state.h" + +/* + * /sys/class/drm/card* + * └── iov/ + * ├── ... + * ├── pf/ + * │   └── ... + * ├── vf1/ + * │   └── ... + */ + +#define SRIOV_KOBJ_HOME_NAME "iov" +#define SRIOV_EXT_KOBJ_PF_NAME "pf" +#define SRIOV_EXT_KOBJ_VFn_NAME "vf%u" +#define SRIOV_DEVICE_LINK_NAME "device" + +struct drm_i915_private *sriov_kobj_to_i915(struct i915_sriov_kobj *kobj) +{ + struct device *kdev = kobj_to_dev(kobj->base.parent); + struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); + + return i915; +} + +struct drm_i915_private *sriov_ext_kobj_to_i915(struct i915_sriov_ext_kobj *kobj) +{ + return sriov_kobj_to_i915(to_sriov_kobj(kobj->base.parent)); +} + +static inline bool sriov_ext_kobj_is_pf(struct i915_sriov_ext_kobj *kobj) +{ + return !kobj->id; +} + +/* core SR-IOV attributes */ + +static ssize_t mode_sriov_attr_show(struct drm_i915_private *i915, char *buf) +{ + return sysfs_emit(buf, "%s\n", i915_iov_mode_to_string(IOV_MODE(i915))); +} + +I915_SRIOV_ATTR_RO(mode); + +static struct attribute *sriov_attrs[] = { + &mode_sriov_attr.attr, + NULL +}; + +static const struct attribute_group sriov_attr_group = { + .attrs = sriov_attrs, +}; + +static const struct attribute_group *default_sriov_attr_groups[] = { + &sriov_attr_group, + NULL +}; + +/* extended (PF and VFs) SR-IOV attributes */ + +static ssize_t auto_provisioning_sriov_ext_attr_show(struct drm_i915_private *i915, + unsigned int id, char *buf) +{ + int value = i915_sriov_pf_is_auto_provisioning_enabled(i915); + + return sysfs_emit(buf, "%d\n", value); +} + +static ssize_t auto_provisioning_sriov_ext_attr_store(struct drm_i915_private *i915, + unsigned int id, + const char *buf, size_t count) +{ + bool value; + int err; + + err = kstrtobool(buf, &value); + if (err) + return err; + + err = i915_sriov_pf_set_auto_provisioning(i915, value); + return err ?: count; +} + +I915_SRIOV_EXT_ATTR(auto_provisioning); + +static ssize_t id_sriov_ext_attr_show(struct drm_i915_private *i915, + unsigned int id, char *buf) +{ + return sysfs_emit(buf, "%u\n", id); +} + +#define CONTROL_STOP "stop" +#define CONTROL_PAUSE "pause" +#define CONTROL_RESUME "resume" +#define CONTROL_CLEAR "clear" + +static ssize_t control_sriov_ext_attr_store(struct drm_i915_private *i915, + unsigned int id, + const char *buf, size_t count) +{ + struct intel_iov *iov = &to_gt(i915)->iov; + int err = -EPERM; + + if (sysfs_streq(buf, CONTROL_STOP)) { + err = intel_iov_state_stop_vf(iov, id); + } else if (sysfs_streq(buf, CONTROL_PAUSE)) { + err = intel_iov_state_pause_vf(iov, id); + } else if (sysfs_streq(buf, CONTROL_RESUME)) { + err = intel_iov_state_resume_vf(iov, id); + } else if (sysfs_streq(buf, CONTROL_CLEAR)) { + err = intel_iov_provisioning_clear(iov, id); + } else { + err = -EINVAL; + } + + return err ?: count; +} + +I915_SRIOV_EXT_ATTR_RO(id); +I915_SRIOV_EXT_ATTR_WO(control); + +static struct attribute *sriov_ext_attrs[] = { + NULL +}; + +static const struct attribute_group sriov_ext_attr_group = { + .attrs = sriov_ext_attrs, +}; + +static struct attribute *pf_ext_attrs[] = { + &auto_provisioning_sriov_ext_attr.attr, + NULL +}; + +static umode_t pf_ext_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int index) +{ + struct i915_sriov_ext_kobj *sriov_kobj = to_sriov_ext_kobj(kobj); + + if (!sriov_ext_kobj_is_pf(sriov_kobj)) + return 0; + + return attr->mode; +} + +static const struct attribute_group pf_ext_attr_group = { + .attrs = pf_ext_attrs, + .is_visible = pf_ext_attr_is_visible, +}; + +static struct attribute *vf_ext_attrs[] = { + &id_sriov_ext_attr.attr, + &control_sriov_ext_attr.attr, + NULL +}; + +static umode_t vf_ext_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int index) +{ + struct i915_sriov_ext_kobj *sriov_kobj = to_sriov_ext_kobj(kobj); + + if (sriov_ext_kobj_is_pf(sriov_kobj)) + return 0; + + return attr->mode; +} + +static const struct attribute_group vf_ext_attr_group = { + .attrs = vf_ext_attrs, + .is_visible = vf_ext_attr_is_visible, +}; + +static const struct attribute_group *default_sriov_ext_attr_groups[] = { + &sriov_ext_attr_group, + &pf_ext_attr_group, + &vf_ext_attr_group, + NULL, +}; + +/* no user serviceable parts below */ + +static ssize_t sriov_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) +{ + struct drm_i915_private *i915 = sriov_kobj_to_i915(to_sriov_kobj(kobj)); + struct i915_sriov_attr *sriov_attr = to_sriov_attr(attr); + + return sriov_attr->show ? sriov_attr->show(i915, buf) : -EIO; +} + +static ssize_t sriov_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct drm_i915_private *i915 = sriov_kobj_to_i915(to_sriov_kobj(kobj)); + struct i915_sriov_attr *sriov_attr = to_sriov_attr(attr); + + return sriov_attr->store ? sriov_attr->store(i915, buf, count) : -EIO; +} + +static const struct sysfs_ops sriov_sysfs_ops = { + .show = sriov_attr_show, + .store = sriov_attr_store, +}; + +static void sriov_kobj_release(struct kobject *kobj) +{ + struct i915_sriov_kobj *sriov_kobj = to_sriov_kobj(kobj); + + kfree(sriov_kobj); +} + +static struct kobj_type sriov_ktype = { + .release = sriov_kobj_release, + .sysfs_ops = &sriov_sysfs_ops, + .default_groups = default_sriov_attr_groups, +}; + +static ssize_t sriov_ext_attr_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct i915_sriov_ext_kobj *sriov_kobj = to_sriov_ext_kobj(kobj); + struct i915_sriov_ext_attr *sriov_attr = to_sriov_ext_attr(attr); + struct drm_i915_private *i915 = sriov_ext_kobj_to_i915(sriov_kobj); + unsigned int id = sriov_kobj->id; + + return sriov_attr->show ? sriov_attr->show(i915, id, buf) : -EIO; +} + +static ssize_t sriov_ext_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct i915_sriov_ext_kobj *sriov_kobj = to_sriov_ext_kobj(kobj); + struct i915_sriov_ext_attr *sriov_attr = to_sriov_ext_attr(attr); + struct drm_i915_private *i915 = sriov_ext_kobj_to_i915(sriov_kobj); + unsigned int id = sriov_kobj->id; + + return sriov_attr->store ? sriov_attr->store(i915, id, buf, count) : -EIO; +} + +static const struct sysfs_ops sriov_ext_sysfs_ops = { + .show = sriov_ext_attr_show, + .store = sriov_ext_attr_store, +}; + +static void sriov_ext_kobj_release(struct kobject *kobj) +{ + struct i915_sriov_ext_kobj *sriov_kobj = to_sriov_ext_kobj(kobj); + + kfree(sriov_kobj); +} + +static struct kobj_type sriov_ext_ktype = { + .release = sriov_ext_kobj_release, + .sysfs_ops = &sriov_ext_sysfs_ops, + .default_groups = default_sriov_ext_attr_groups, +}; + +static unsigned int pf_nodes_count(struct drm_i915_private *i915) +{ + /* 1 x PF + n x VFs */ + return 1 + i915_sriov_pf_get_totalvfs(i915); +} + +static int pf_setup_failed(struct drm_i915_private *i915, int err, const char *what) +{ + i915_probe_error(i915, "Failed to setup SR-IOV sysfs %s (%pe)\n", + what, ERR_PTR(err)); + return err; +} + +static int pf_setup_home(struct drm_i915_private *i915) +{ + struct device *kdev = i915->drm.primary->kdev; + struct i915_sriov_pf *pf = &i915->sriov.pf; + struct i915_sriov_kobj *home = pf->sysfs.home; + int err; + + GEM_BUG_ON(!IS_SRIOV_PF(i915)); + GEM_BUG_ON(home); + + err = i915_inject_probe_error(i915, -ENOMEM); + if (unlikely(err)) + goto failed; + + home = kzalloc(sizeof(*home), GFP_KERNEL); + if (unlikely(!home)) { + err = -ENOMEM; + goto failed; + } + + err = kobject_init_and_add(&home->base, &sriov_ktype, &kdev->kobj, SRIOV_KOBJ_HOME_NAME); + if (unlikely(err)) { + goto failed_init; + } + + GEM_BUG_ON(pf->sysfs.home); + pf->sysfs.home = home; + return 0; + +failed_init: + kobject_put(&home->base); +failed: + return pf_setup_failed(i915, err, "home"); +} + +static void pf_teardown_home(struct drm_i915_private *i915) +{ + struct i915_sriov_pf *pf = &i915->sriov.pf; + struct i915_sriov_kobj *home = fetch_and_zero(&pf->sysfs.home); + + if (home) + kobject_put(&home->base); +} + +static int pf_setup_tree(struct drm_i915_private *i915) +{ + struct i915_sriov_pf *pf = &i915->sriov.pf; + struct i915_sriov_kobj *home = pf->sysfs.home; + struct i915_sriov_ext_kobj **kobjs; + struct i915_sriov_ext_kobj *kobj; + unsigned int count = pf_nodes_count(i915); + unsigned int n; + int err; + + err = i915_inject_probe_error(i915, -ENOMEM); + if (unlikely(err)) + goto failed; + + kobjs = kcalloc(count, sizeof(*kobjs), GFP_KERNEL); + if (unlikely(!kobjs)) { + err = -ENOMEM; + goto failed; + } + + for (n = 0; n < count; n++) { + kobj = kzalloc(sizeof(*kobj), GFP_KERNEL); + if (!kobj) { + err = -ENOMEM; + goto failed_kobj_n; + } + + kobj->id = n; + if (n) { + err = kobject_init_and_add(&kobj->base, &sriov_ext_ktype, + &home->base, SRIOV_EXT_KOBJ_VFn_NAME, n); + } else { + err = kobject_init_and_add(&kobj->base, &sriov_ext_ktype, + &home->base, SRIOV_EXT_KOBJ_PF_NAME); + } + if (unlikely(err)) + goto failed_kobj_n; + + err = i915_inject_probe_error(i915, -EEXIST); + if (unlikely(err)) + goto failed_kobj_n; + + kobjs[n] = kobj; + } + + GEM_BUG_ON(pf->sysfs.kobjs); + pf->sysfs.kobjs = kobjs; + return 0; + +failed_kobj_n: + if (kobj) + kobject_put(&kobj->base); + while (n--) + kobject_put(&kobjs[n]->base); +failed: + return pf_setup_failed(i915, err, "tree"); +} + +static void pf_teardown_tree(struct drm_i915_private *i915) +{ + struct i915_sriov_pf *pf = &i915->sriov.pf; + struct i915_sriov_ext_kobj **kobjs = fetch_and_zero(&pf->sysfs.kobjs); + unsigned int count = pf_nodes_count(i915); + unsigned int n; + + if (!kobjs) + return; + + for (n = 0; n < count; n++) + kobject_put(&kobjs[n]->base); + + kfree(kobjs); +} + +static int pf_setup_device_link(struct drm_i915_private *i915) +{ + struct i915_sriov_pf *pf = &i915->sriov.pf; + struct i915_sriov_ext_kobj **kobjs = pf->sysfs.kobjs; + int err; + + err = i915_inject_probe_error(i915, -EEXIST); + if (unlikely(err)) + goto failed; + + err = sysfs_create_link(&kobjs[0]->base, &i915->drm.dev->kobj, SRIOV_DEVICE_LINK_NAME); + if (unlikely(err)) + goto failed; + + return 0; + +failed: + return pf_setup_failed(i915, err, "link"); +} + +static void pf_teardown_device_link(struct drm_i915_private *i915) +{ + struct i915_sriov_pf *pf = &i915->sriov.pf; + struct i915_sriov_ext_kobj **kobjs = pf->sysfs.kobjs; + + sysfs_remove_link(&kobjs[0]->base, SRIOV_DEVICE_LINK_NAME); +} + +static void pf_welcome(struct drm_i915_private *i915) +{ +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) + struct i915_sriov_pf *pf = &i915->sriov.pf; + const char *path = kobject_get_path(&pf->sysfs.home->base, GFP_KERNEL); + + drm_dbg(&i915->drm, "SR-IOV sysfs available at /sys%s\n", path); + kfree(path); +#endif + GEM_BUG_ON(!i915->sriov.pf.sysfs.kobjs); +} + +static void pf_goodbye(struct drm_i915_private *i915) +{ + GEM_WARN_ON(i915->sriov.pf.sysfs.kobjs); + GEM_WARN_ON(i915->sriov.pf.sysfs.home); +} + +/** + * i915_sriov_sysfs_setup - Setup SR-IOV sysfs tree. + * @i915: the i915 struct + * + * On SR-IOV PF this function will setup dedicated sysfs tree + * with PF and VFs attributes. + * + * Return: 0 on success or a negative error code on failure. + */ +int i915_sriov_sysfs_setup(struct drm_i915_private *i915) +{ + int err; + + if (!IS_SRIOV_PF(i915)) + return 0; + + if (i915_sriov_pf_aborted(i915)) + return 0; + + err = pf_setup_home(i915); + if (unlikely(err)) + goto failed; + + err = pf_setup_tree(i915); + if (unlikely(err)) + goto failed_tree; + + err = pf_setup_device_link(i915); + if (unlikely(err)) + goto failed_link; + + pf_welcome(i915); + return 0; + +failed_link: + pf_teardown_tree(i915); +failed_tree: + pf_teardown_home(i915); +failed: + return pf_setup_failed(i915, err, ""); +} + +/** + * i915_sriov_sysfs_teardown - Cleanup SR-IOV sysfs tree. + * @i915: the i915 struct + * + * Cleanup data initialized by @i915_sriov_sysfs_setup. + */ +void i915_sriov_sysfs_teardown(struct drm_i915_private *i915) +{ + if (!IS_SRIOV_PF(i915)) + return; + + pf_teardown_device_link(i915); + pf_teardown_tree(i915); + pf_teardown_home(i915); + pf_goodbye(i915); +} + +/* our Gen12 SR-IOV platforms are simple */ +#define GEN12_VF_OFFSET 1 +#define GEN12_VF_STRIDE 1 +#define GEN12_VF_ROUTING_OFFSET(id) (GEN12_VF_OFFSET + ((id) - 1) * GEN12_VF_STRIDE) + +static struct pci_dev *pf_get_vf_pci_dev(struct drm_i915_private *i915, unsigned int id) +{ + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); + u16 vf_devid = pci_dev_id(pdev) + GEN12_VF_ROUTING_OFFSET(id); + + GEM_BUG_ON(!dev_is_pf(&pdev->dev)); + GEM_BUG_ON(!id); + + /* caller must use pci_dev_put() */ + return pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), + PCI_BUS_NUM(vf_devid), + PCI_DEVFN(PCI_SLOT(vf_devid), + PCI_FUNC(vf_devid))); +} + +static int pf_add_vfs_device_links(struct drm_i915_private *i915) +{ + struct i915_sriov_pf *pf = &i915->sriov.pf; + struct i915_sriov_ext_kobj **kobjs = pf->sysfs.kobjs; + struct pci_dev *pf_pdev = to_pci_dev(i915->drm.dev); + struct pci_dev *vf_pdev = NULL; + unsigned int numvfs = pci_num_vf(pf_pdev); + unsigned int n; + int err; + + if (!kobjs) + return 0; + + GEM_BUG_ON(numvfs > pf_nodes_count(i915)); + + for (n = 1; n <= numvfs; n++) { + + err = i915_inject_probe_error(i915, -ENODEV); + if (unlikely(err)) { + vf_pdev = NULL; + goto failed_n; + } + + vf_pdev = pf_get_vf_pci_dev(i915, n); + if (unlikely(!vf_pdev)) { + err = -ENODEV; + goto failed_n; + } + + err = i915_inject_probe_error(i915, -EEXIST); + if (unlikely(err)) + goto failed_n; + + err = sysfs_create_link(&kobjs[n]->base, &vf_pdev->dev.kobj, + SRIOV_DEVICE_LINK_NAME); + if (unlikely(err)) + goto failed_n; + + /* balance pf_get_vf_pci_dev() */ + pci_dev_put(vf_pdev); + } + + return 0; + +failed_n: + if (vf_pdev) + pci_dev_put(vf_pdev); + while (n-- > 1) + sysfs_remove_link(&kobjs[n]->base, SRIOV_DEVICE_LINK_NAME); + + return pf_setup_failed(i915, err, "links"); +} + +static void pf_remove_vfs_device_links(struct drm_i915_private *i915) +{ + struct i915_sriov_pf *pf = &i915->sriov.pf; + struct i915_sriov_ext_kobj **kobjs = pf->sysfs.kobjs; + struct pci_dev *pf_pdev = to_pci_dev(i915->drm.dev); + unsigned int numvfs = pci_num_vf(pf_pdev); + unsigned int n; + + if (!kobjs) + return; + + GEM_BUG_ON(numvfs > pf_nodes_count(i915)); + + for (n = 1; n <= numvfs; n++) + sysfs_remove_link(&kobjs[n]->base, SRIOV_DEVICE_LINK_NAME); +} + +/** + * i915_sriov_sysfs_update_links - Update links in SR-IOV sysfs tree. + * @i915: the i915 struct + * + * On PF this function will add or remove PCI device links from VFs. + */ +void i915_sriov_sysfs_update_links(struct drm_i915_private *i915, bool add) +{ + if (!IS_SRIOV_PF(i915)) + return; + + if (add) + pf_add_vfs_device_links(i915); + else + pf_remove_vfs_device_links(i915); +} diff --git a/drivers/gpu/drm/i915/i915_sriov_sysfs.h b/drivers/gpu/drm/i915/i915_sriov_sysfs.h new file mode 100644 index 0000000000000..7fab9f795fba5 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_sriov_sysfs.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __I915_SRIOV_SYSFS_H__ +#define __I915_SRIOV_SYSFS_H__ + +#include "i915_sriov_sysfs_types.h" + +int i915_sriov_sysfs_setup(struct drm_i915_private *i915); +void i915_sriov_sysfs_teardown(struct drm_i915_private *i915); +void i915_sriov_sysfs_update_links(struct drm_i915_private *i915, bool add); + +struct drm_i915_private *sriov_kobj_to_i915(struct i915_sriov_kobj *kobj); +struct drm_i915_private *sriov_ext_kobj_to_i915(struct i915_sriov_ext_kobj *kobj); + +#endif /* __I915_SRIOV_SYSFS_H__ */ diff --git a/drivers/gpu/drm/i915/i915_sriov_sysfs_types.h b/drivers/gpu/drm/i915/i915_sriov_sysfs_types.h new file mode 100644 index 0000000000000..aabfadd3c3740 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_sriov_sysfs_types.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __I915_SRIOV_SYSFS_TYPES_H__ +#define __I915_SRIOV_SYSFS_TYPES_H__ + +#include + +struct drm_i915_private; + +struct i915_sriov_kobj { + struct kobject base; +}; +#define to_sriov_kobj(x) container_of(x, struct i915_sriov_kobj, base) + +struct i915_sriov_attr { + struct attribute attr; + ssize_t (*show)(struct drm_i915_private *i915, char *buf); + ssize_t (*store)(struct drm_i915_private *i915, const char *buf, size_t count); +}; +#define to_sriov_attr(x) container_of(x, struct i915_sriov_attr, attr) + +#define I915_SRIOV_ATTR(name) \ +static struct i915_sriov_attr name##_sriov_attr = \ + __ATTR(name, 0644, name##_sriov_attr_show, name##_sriov_attr_store) + +#define I915_SRIOV_ATTR_RO(name) \ +static struct i915_sriov_attr name##_sriov_attr = \ + __ATTR(name, 0444, name##_sriov_attr_show, NULL) + +struct i915_sriov_ext_kobj { + struct kobject base; + unsigned int id; +}; +#define to_sriov_ext_kobj(x) container_of(x, struct i915_sriov_ext_kobj, base) + +struct i915_sriov_ext_attr { + struct attribute attr; + ssize_t (*show)(struct drm_i915_private *i915, unsigned int id, char *buf); + ssize_t (*store)(struct drm_i915_private *i915, unsigned int id, + const char *buf, size_t count); +}; +#define to_sriov_ext_attr(x) container_of(x, struct i915_sriov_ext_attr, attr) + +#define I915_SRIOV_EXT_ATTR(name) \ +static struct i915_sriov_ext_attr name##_sriov_ext_attr = \ + __ATTR(name, 0644, name##_sriov_ext_attr_show, name##_sriov_ext_attr_store) + +#define I915_SRIOV_EXT_ATTR_RO(name) \ +static struct i915_sriov_ext_attr name##_sriov_ext_attr = \ + __ATTR(name, 0644, name##_sriov_ext_attr_show, NULL) + +#define I915_SRIOV_EXT_ATTR_WO(name) \ +static struct i915_sriov_ext_attr name##_sriov_ext_attr = \ + __ATTR(name, 0644, NULL, name##_sriov_ext_attr_store) + +#endif /* __I915_SRIOV_SYSFS_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/i915_sriov_types.h b/drivers/gpu/drm/i915/i915_sriov_types.h new file mode 100644 index 0000000000000..167e8248fa9a8 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_sriov_types.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __I915_SRIOV_TYPES_H__ +#define __I915_SRIOV_TYPES_H__ + +#include +#include "i915_sriov_sysfs_types.h" + +/** + * struct i915_sriov_pf - i915 SR-IOV PF data. + * @__status: Status of the PF. Don't access directly! + * @device_vfs: Number of VFs supported by the device. + * @driver_vfs: Number of VFs supported by the driver. + * @sysfs.home: Home object for all entries in sysfs. + * @sysfs.kobjs: Array with PF and VFs objects exposed in sysfs. + */ +struct i915_sriov_pf { + int __status; + u16 device_vfs; + u16 driver_vfs; + struct { + struct i915_sriov_kobj *home; + struct i915_sriov_ext_kobj **kobjs; + } sysfs; + + /** @disable_auto_provisioning: flag to control VFs auto-provisioning */ + bool disable_auto_provisioning; +}; + +/** + * struct i915_sriov_vf - i915 SR-IOV VF data. + */ +struct i915_sriov_vf { + + /** @migration_worker: migration recovery worker */ + struct work_struct migration_worker; +}; + +/** + * struct i915_sriov - i915 SR-IOV data. + * @pf: PF only data. + */ +struct i915_sriov { + union { + struct i915_sriov_pf pf; + struct i915_sriov_vf vf; + }; +}; + +#endif /* __I915_SRIOV_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index fae4d1f4f275a..834296edb1286 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -33,12 +33,14 @@ #include "gt/intel_rc6.h" #include "gt/intel_rps.h" #include "gt/sysfs_engines.h" +#include "gt/iov/intel_iov_sysfs.h" #include "i915_drv.h" +#include "i915_sriov_sysfs.h" #include "i915_sysfs.h" #include "intel_pm.h" -static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev) +struct drm_i915_private *kdev_minor_to_i915(struct device *kdev) { struct drm_minor *minor = dev_get_drvdata(kdev); return to_i915(minor->dev); @@ -532,25 +534,32 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv) ret = 0; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) ret = sysfs_create_files(&kdev->kobj, vlv_attrs); - else if (GRAPHICS_VER(dev_priv) >= 6) + else if (GRAPHICS_VER(dev_priv) >= 6 && !IS_SRIOV_VF(dev_priv)) ret = sysfs_create_files(&kdev->kobj, gen6_attrs); if (ret) drm_err(&dev_priv->drm, "RPS sysfs setup failed\n"); + i915_sriov_sysfs_setup(dev_priv); + i915_setup_error_capture(kdev); intel_engines_add_sysfs(dev_priv); + + intel_iov_sysfs_setup(&to_gt(dev_priv)->iov); } void i915_teardown_sysfs(struct drm_i915_private *dev_priv) { struct device *kdev = dev_priv->drm.primary->kdev; + intel_iov_sysfs_teardown(&to_gt(dev_priv)->iov); i915_teardown_error_capture(kdev); + i915_sriov_sysfs_teardown(dev_priv); + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) sysfs_remove_files(&kdev->kobj, vlv_attrs); - else + else if (GRAPHICS_VER(dev_priv) >= 6 && !IS_SRIOV_VF(dev_priv)) sysfs_remove_files(&kdev->kobj, gen6_attrs); device_remove_bin_file(kdev, &dpf_attrs_1); device_remove_bin_file(kdev, &dpf_attrs); diff --git a/drivers/gpu/drm/i915/i915_sysfs.h b/drivers/gpu/drm/i915/i915_sysfs.h index 41afd4366416a..243a17741e3f1 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.h +++ b/drivers/gpu/drm/i915/i915_sysfs.h @@ -6,8 +6,11 @@ #ifndef __I915_SYSFS_H__ #define __I915_SYSFS_H__ +struct device; struct drm_i915_private; +struct drm_i915_private *kdev_minor_to_i915(struct device *kdev); + void i915_setup_sysfs(struct drm_i915_private *i915); void i915_teardown_sysfs(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c index f9e780dee9dec..72a09ef6bd769 100644 --- a/drivers/gpu/drm/i915/i915_utils.c +++ b/drivers/gpu/drm/i915/i915_utils.c @@ -114,3 +114,61 @@ void set_timer_ms(struct timer_list *t, unsigned long timeout) /* Keep t->expires = 0 reserved to indicate a canceled timer. */ mod_timer(t, jiffies + timeout ?: 1); } + +/** + * from_user_to_u32array - convert user input into array of u32 + * @from: user input + * @count: number of characters to read + * @array: array with results + * @size: size of the array + * + * We expect input formatted as comma-separated list of integer values. + * + * Returns number of entries parsed or negative errno on failure. + */ +int from_user_to_u32array(const char __user *from, size_t count, + u32 *array, unsigned int size) +{ + unsigned int num = 0; + char *buf, *p, save; + int ret; + + /* [(sign + longest representation) + comma] + newline + terminator */ + if (count > (1 + sizeof(u32) * 8 + 1) * size + 1 + 1) + return -EFBIG; + + p = buf = kzalloc(count + 1, GFP_USER); + if (!buf) + return -ENOMEM; + + if (copy_from_user(buf, from, count)) { + ret = -EFAULT; + goto out_free; + } + + do { + int len; + + if (num == size) { + ret = -EINVAL; + goto out_free; + } + len = strcspn(p, ","); + + /* nul-terminate and parse */ + save = p[len]; + p[len] = '\0'; + + ret = kstrtou32(p, 0, &array[num]); + if (ret) + goto out_free; + + p += len + 1; + num++; + } while (save == ','); + + ret = num; +out_free: + kfree(buf); + return ret; +} diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 7a5925072466a..e11513fc54fba 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -459,4 +459,9 @@ static inline bool timer_expired(const struct timer_list *t) return timer_active(t) && !timer_pending(t); } +#define make_u64(hi__, low__) ((u64)(hi__) << 32 | (u64)(low__)) + +int from_user_to_u32array(const char __user *from, size_t count, + u32 *array, unsigned int size); + #endif /* !__I915_UTILS_H */ diff --git a/drivers/gpu/drm/i915/i915_virtualization.h b/drivers/gpu/drm/i915/i915_virtualization.h new file mode 100644 index 0000000000000..1e80e66eec167 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_virtualization.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __I915_VIRTUALIZATION_H__ +#define __I915_VIRTUALIZATION_H__ + +#include + +#include "i915_gem.h" +#include "i915_virtualization_types.h" + +static inline const char *i915_iov_mode_to_string(enum i915_iov_mode mode) +{ + switch (mode) { + case I915_IOV_MODE_NONE: + return "non virtualized"; + case I915_IOV_MODE_GVT_VGPU: + return "GVT VGPU"; + case I915_IOV_MODE_SRIOV_PF: + return "SR-IOV PF"; + case I915_IOV_MODE_SRIOV_VF: + return "SR-IOV VF"; + default: + return ""; + } +} + +#define IS_IOV_ACTIVE(i915) (IOV_MODE(i915) != I915_IOV_MODE_NONE) + +#endif /* __I915_VIRTUALIZATION_H__ */ diff --git a/drivers/gpu/drm/i915/i915_virtualization_types.h b/drivers/gpu/drm/i915/i915_virtualization_types.h new file mode 100644 index 0000000000000..243a24b7c9afe --- /dev/null +++ b/drivers/gpu/drm/i915/i915_virtualization_types.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __I915_VIRTUALIZATION_TYPES_H__ +#define __I915_VIRTUALIZATION_TYPES_H__ + +/** + * enum i915_iov_mode - I/O Virtualization mode. + */ +enum i915_iov_mode { + I915_IOV_MODE_NONE = 1, + I915_IOV_MODE_GVT_VGPU, + I915_IOV_MODE_SRIOV_PF, + I915_IOV_MODE_SRIOV_VF, +}; + +#endif /* __I915_VIRTUALIZATION_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index fd1f15f8ff773..6adaddd73e7b0 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -144,6 +144,7 @@ enum intel_ppgtt_type { func(has_rps); \ func(has_runtime_pm); \ func(has_snoop); \ + func(has_sriov); \ func(has_coherent_ggtt); \ func(unfenced_needs_alignment); \ func(hws_needs_physical); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index fae4f7818d28b..8dc993a04e6fc 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -8166,6 +8166,12 @@ static const struct drm_i915_wm_disp_funcs nop_funcs = { /* Set up chip specific power management-related functions */ void intel_init_pm(struct drm_i915_private *dev_priv) { + if (IS_SRIOV_VF(dev_priv)) { + /* XXX */ + dev_priv->wm_disp = &skl_wm_funcs; + return; + } + /* For cxsr */ if (IS_PINEVIEW(dev_priv)) pnv_get_mem_freq(dev_priv); diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 778da3179b3cc..02a23c599874e 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1751,6 +1751,43 @@ __vgpu_write(8) __vgpu_write(16) __vgpu_write(32) +static int __vf_runtime_reg_cmp(u32 key, const struct vf_runtime_reg *reg) +{ + u32 offset = reg->offset; + + if (key < offset) + return -1; + else if (key > offset) + return 1; + else + return 0; +} + +static const struct vf_runtime_reg * +__vf_runtime_reg_find(struct drm_i915_private *i915, u32 offset) +{ + const struct vf_runtime_reg *regs = to_gt(i915)->iov.vf.runtime.regs; + u32 regs_num = to_gt(i915)->iov.vf.runtime.regs_size; + + return BSEARCH(offset, regs, regs_num, __vf_runtime_reg_cmp); +} + +#define __vf_read(x) \ +static u##x vf_read##x(struct intel_uncore *uncore, \ + i915_reg_t reg, bool trace) \ +{ \ + u32 offset = i915_mmio_reg_offset(reg); \ + const struct vf_runtime_reg *vf_reg = __vf_runtime_reg_find(uncore->i915, offset); \ + if (vf_reg) \ + return vf_reg->value; \ + return gen2_read##x(uncore, reg, trace); \ +} + +__vf_read(8) +__vf_read(16) +__vf_read(32) +#define vf_read64 gen2_read64 /* no support for 64 */ + #define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \ do { \ (uncore)->funcs.mmio_writeb = x##_write8; \ @@ -2098,6 +2135,9 @@ static void uncore_raw_init(struct intel_uncore *uncore) } else if (GRAPHICS_VER(uncore->i915) == 5) { ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5); ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5); + } else if (IS_SRIOV_VF(uncore->i915)) { + ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2); + ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vf); } else { ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2); ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2); @@ -2171,13 +2211,15 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) * keep the GT powered down; we won't be able to communicate with it * and we should not continue with driver initialization. */ - if (IS_DGFX(i915) && + if (IS_DGFX(i915) && !IS_SRIOV_VF(i915) && !(__raw_uncore_read32(uncore, GU_CNTL) & LMEM_INIT)) { drm_err(&i915->drm, "LMEM not initialized by firmware\n"); return -ENODEV; } - if (GRAPHICS_VER(i915) > 5 && !intel_vgpu_active(i915)) + if (GRAPHICS_VER(i915) > 5 && + !IS_SRIOV_VF(i915) && + !intel_vgpu_active(i915)) uncore->flags |= UNCORE_HAS_FORCEWAKE; if (!intel_uncore_has_forcewake(uncore)) { diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h index bdd290f2bf3cd..d8f5c248af9cf 100644 --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h @@ -49,5 +49,6 @@ selftest(perf, i915_perf_live_selftests) selftest(slpc, intel_slpc_live_selftests) selftest(guc, intel_guc_live_selftests) selftest(guc_multi_lrc, intel_guc_multi_lrc_live_selftests) +selftest(iov_ggtt, intel_iov_ggtt_live_selftests) /* Here be dragons: keep last to run last! */ selftest(late_gt_pm, intel_gt_pm_late_selftests) diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 8aa7b1d338659..cda2d59865337 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -177,6 +177,7 @@ struct drm_i915_private *mock_gem_device(void) spin_lock_init(&i915->gpu_error.lock); + i915->__mode = I915_IOV_MODE_NONE; i915_gem_init__mm(i915); intel_gt_init_early(to_gt(i915), i915); __intel_gt_init_early(to_gt(i915), i915);