mirror of
https://github.com/lkl/linux.git
synced 2025-12-19 16:13:19 +09:00
Merge tag 'drm-intel-gt-next-2023-08-11' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
Cross-subsystem Changes: - Backmerge of drm-next Driver Changes: - Apply workaround 22016122933 correctly (Jonathan, Matt R) - Simplify shmem_create_from_object map_type selection (Jonathan, Tvrtko) - Make i915_coherent_map_type GT-centric (Jonathan, Matt R) - Selftest improvements (John) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/ZNYR3bKFquGc7u9w@jlahtine-mobl.ger.corp.intel.com
This commit is contained in:
@@ -6,6 +6,7 @@
|
||||
#include <drm/i915_hdcp_interface.h>
|
||||
|
||||
#include "gem/i915_gem_region.h"
|
||||
#include "gt/intel_gt.h"
|
||||
#include "gt/uc/intel_gsc_uc_heci_cmd_submit.h"
|
||||
#include "i915_drv.h"
|
||||
#include "i915_utils.h"
|
||||
@@ -632,7 +633,7 @@ static int intel_hdcp_gsc_initialize_message(struct drm_i915_private *i915,
|
||||
return PTR_ERR(obj);
|
||||
}
|
||||
|
||||
cmd_in = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true));
|
||||
cmd_in = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, true));
|
||||
if (IS_ERR(cmd_in)) {
|
||||
drm_err(&i915->drm, "Failed to map gsc message page!\n");
|
||||
err = PTR_ERR(cmd_in);
|
||||
|
||||
@@ -716,10 +716,6 @@ void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
|
||||
void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
|
||||
enum i915_map_type type);
|
||||
|
||||
enum i915_map_type i915_coherent_map_type(struct drm_i915_private *i915,
|
||||
struct drm_i915_gem_object *obj,
|
||||
bool always_coherent);
|
||||
|
||||
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
|
||||
unsigned long offset,
|
||||
unsigned long size);
|
||||
|
||||
@@ -468,21 +468,6 @@ void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
|
||||
return ret;
|
||||
}
|
||||
|
||||
enum i915_map_type i915_coherent_map_type(struct drm_i915_private *i915,
|
||||
struct drm_i915_gem_object *obj,
|
||||
bool always_coherent)
|
||||
{
|
||||
/*
|
||||
* Wa_22016122933: always return I915_MAP_WC for MTL
|
||||
*/
|
||||
if (i915_gem_object_is_lmem(obj) || IS_METEORLAKE(i915))
|
||||
return I915_MAP_WC;
|
||||
if (HAS_LLC(i915) || always_coherent)
|
||||
return I915_MAP_WB;
|
||||
else
|
||||
return I915_MAP_WC;
|
||||
}
|
||||
|
||||
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
|
||||
unsigned long offset,
|
||||
unsigned long size)
|
||||
|
||||
@@ -13,12 +13,12 @@
|
||||
#include "selftests/igt_spinner.h"
|
||||
|
||||
static int igt_fill_check_buffer(struct drm_i915_gem_object *obj,
|
||||
struct intel_gt *gt,
|
||||
bool fill)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
unsigned int i, count = obj->base.size / sizeof(u32);
|
||||
enum i915_map_type map_type =
|
||||
i915_coherent_map_type(i915, obj, false);
|
||||
intel_gt_coherent_map_type(gt, obj, false);
|
||||
u32 *cur;
|
||||
int err = 0;
|
||||
|
||||
@@ -66,7 +66,7 @@ static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src,
|
||||
if (err)
|
||||
continue;
|
||||
|
||||
err = igt_fill_check_buffer(obj, true);
|
||||
err = igt_fill_check_buffer(obj, gt, true);
|
||||
if (err)
|
||||
continue;
|
||||
|
||||
@@ -86,7 +86,7 @@ static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src,
|
||||
if (err)
|
||||
continue;
|
||||
|
||||
err = igt_fill_check_buffer(obj, false);
|
||||
err = igt_fill_check_buffer(obj, gt, false);
|
||||
}
|
||||
i915_gem_object_put(obj);
|
||||
|
||||
@@ -233,7 +233,7 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt,
|
||||
continue;
|
||||
|
||||
if (!vma) {
|
||||
err = igt_fill_check_buffer(obj, true);
|
||||
err = igt_fill_check_buffer(obj, gt, true);
|
||||
if (err)
|
||||
continue;
|
||||
}
|
||||
@@ -276,7 +276,7 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt,
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
} else {
|
||||
err = igt_fill_check_buffer(obj, false);
|
||||
err = igt_fill_check_buffer(obj, gt, false);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
|
||||
@@ -39,7 +39,7 @@ static void dbg_poison_ce(struct intel_context *ce)
|
||||
|
||||
if (ce->state) {
|
||||
struct drm_i915_gem_object *obj = ce->state->obj;
|
||||
int type = i915_coherent_map_type(ce->engine->i915, obj, true);
|
||||
int type = intel_gt_coherent_map_type(ce->engine->gt, obj, true);
|
||||
void *map;
|
||||
|
||||
if (!i915_gem_object_trylock(obj, NULL))
|
||||
|
||||
@@ -1003,3 +1003,19 @@ void intel_gt_info_print(const struct intel_gt_info *info,
|
||||
|
||||
intel_sseu_dump(&info->sseu, p);
|
||||
}
|
||||
|
||||
enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
|
||||
struct drm_i915_gem_object *obj,
|
||||
bool always_coherent)
|
||||
{
|
||||
/*
|
||||
* Wa_22016122933: always return I915_MAP_WC for Media
|
||||
* version 13.0 when the object is on the Media GT
|
||||
*/
|
||||
if (i915_gem_object_is_lmem(obj) || intel_gt_needs_wa_22016122933(gt))
|
||||
return I915_MAP_WC;
|
||||
if (HAS_LLC(gt->i915) || always_coherent)
|
||||
return I915_MAP_WB;
|
||||
else
|
||||
return I915_MAP_WC;
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
#ifndef __INTEL_GT__
|
||||
#define __INTEL_GT__
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_engine_types.h"
|
||||
#include "intel_gt_types.h"
|
||||
#include "intel_reset.h"
|
||||
@@ -24,6 +25,11 @@ static inline bool gt_is_root(struct intel_gt *gt)
|
||||
return !gt->info.id;
|
||||
}
|
||||
|
||||
static inline bool intel_gt_needs_wa_22016122933(struct intel_gt *gt)
|
||||
{
|
||||
return MEDIA_VER_FULL(gt->i915) == IP_VER(13, 0) && gt->type == GT_MEDIA;
|
||||
}
|
||||
|
||||
static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
|
||||
{
|
||||
return container_of(uc, struct intel_gt, uc);
|
||||
@@ -107,4 +113,8 @@ void intel_gt_info_print(const struct intel_gt_info *info,
|
||||
|
||||
void intel_gt_watchdog_work(struct work_struct *work);
|
||||
|
||||
enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
|
||||
struct drm_i915_gem_object *obj,
|
||||
bool always_coherent);
|
||||
|
||||
#endif /* __INTEL_GT_H__ */
|
||||
|
||||
@@ -89,7 +89,7 @@ int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
|
||||
enum i915_map_type type;
|
||||
void *vaddr;
|
||||
|
||||
type = i915_coherent_map_type(vm->i915, obj, true);
|
||||
type = intel_gt_coherent_map_type(vm->gt, obj, true);
|
||||
vaddr = i915_gem_object_pin_map_unlocked(obj, type);
|
||||
if (IS_ERR(vaddr))
|
||||
return PTR_ERR(vaddr);
|
||||
@@ -103,7 +103,7 @@ int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object
|
||||
enum i915_map_type type;
|
||||
void *vaddr;
|
||||
|
||||
type = i915_coherent_map_type(vm->i915, obj, true);
|
||||
type = intel_gt_coherent_map_type(vm->gt, obj, true);
|
||||
vaddr = i915_gem_object_pin_map(obj, type);
|
||||
if (IS_ERR(vaddr))
|
||||
return PTR_ERR(vaddr);
|
||||
|
||||
@@ -1095,10 +1095,11 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
|
||||
if (IS_ERR(obj)) {
|
||||
obj = i915_gem_object_create_shmem(engine->i915, context_size);
|
||||
/*
|
||||
* Wa_22016122933: For MTL the shared memory needs to be mapped
|
||||
* as WC on CPU side and UC (PAT index 2) on GPU side
|
||||
* Wa_22016122933: For Media version 13.0, all Media GT shared
|
||||
* memory needs to be mapped as WC on CPU side and UC (PAT
|
||||
* index 2) on GPU side.
|
||||
*/
|
||||
if (IS_METEORLAKE(engine->i915))
|
||||
if (intel_gt_needs_wa_22016122933(engine->gt))
|
||||
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
|
||||
}
|
||||
if (IS_ERR(obj))
|
||||
@@ -1191,9 +1192,9 @@ lrc_pre_pin(struct intel_context *ce,
|
||||
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
|
||||
|
||||
*vaddr = i915_gem_object_pin_map(ce->state->obj,
|
||||
i915_coherent_map_type(ce->engine->i915,
|
||||
ce->state->obj,
|
||||
false) |
|
||||
intel_gt_coherent_map_type(ce->engine->gt,
|
||||
ce->state->obj,
|
||||
false) |
|
||||
I915_MAP_OVERRIDE);
|
||||
|
||||
return PTR_ERR_OR_ZERO(*vaddr);
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include "intel_engine_regs.h"
|
||||
#include "intel_gpu_commands.h"
|
||||
#include "intel_ring.h"
|
||||
#include "intel_gt.h"
|
||||
#include "intel_timeline.h"
|
||||
|
||||
unsigned int intel_ring_update_space(struct intel_ring *ring)
|
||||
@@ -56,7 +57,7 @@ int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
|
||||
if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915)) {
|
||||
addr = (void __force *)i915_vma_pin_iomap(vma);
|
||||
} else {
|
||||
int type = i915_coherent_map_type(vma->vm->i915, vma->obj, false);
|
||||
int type = intel_gt_coherent_map_type(vma->vm->gt, vma->obj, false);
|
||||
|
||||
addr = i915_gem_object_pin_map(vma->obj, type);
|
||||
}
|
||||
|
||||
@@ -88,8 +88,9 @@ static int __live_context_size(struct intel_engine_cs *engine)
|
||||
goto err;
|
||||
|
||||
vaddr = i915_gem_object_pin_map_unlocked(ce->state->obj,
|
||||
i915_coherent_map_type(engine->i915,
|
||||
ce->state->obj, false));
|
||||
intel_gt_coherent_map_type(engine->gt,
|
||||
ce->state->obj,
|
||||
false));
|
||||
if (IS_ERR(vaddr)) {
|
||||
err = PTR_ERR(vaddr);
|
||||
intel_context_unpin(ce);
|
||||
|
||||
@@ -73,7 +73,7 @@ static int hang_init(struct hang *h, struct intel_gt *gt)
|
||||
h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
|
||||
|
||||
vaddr = i915_gem_object_pin_map_unlocked(h->obj,
|
||||
i915_coherent_map_type(gt->i915, h->obj, false));
|
||||
intel_gt_coherent_map_type(gt, h->obj, false));
|
||||
if (IS_ERR(vaddr)) {
|
||||
err = PTR_ERR(vaddr);
|
||||
goto err_unpin_hws;
|
||||
@@ -119,7 +119,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
|
||||
return ERR_CAST(obj);
|
||||
}
|
||||
|
||||
vaddr = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(gt->i915, obj, false));
|
||||
vaddr = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, false));
|
||||
if (IS_ERR(vaddr)) {
|
||||
i915_gem_object_put(obj);
|
||||
i915_vm_put(vm);
|
||||
|
||||
@@ -1292,9 +1292,9 @@ static int compare_isolation(struct intel_engine_cs *engine,
|
||||
}
|
||||
|
||||
lrc = i915_gem_object_pin_map_unlocked(ce->state->obj,
|
||||
i915_coherent_map_type(engine->i915,
|
||||
ce->state->obj,
|
||||
false));
|
||||
intel_gt_coherent_map_type(engine->gt,
|
||||
ce->state->obj,
|
||||
false));
|
||||
if (IS_ERR(lrc)) {
|
||||
err = PTR_ERR(lrc);
|
||||
goto err_B1;
|
||||
|
||||
@@ -33,7 +33,6 @@ struct file *shmem_create_from_data(const char *name, void *data, size_t len)
|
||||
|
||||
struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
enum i915_map_type map_type;
|
||||
struct file *file;
|
||||
void *ptr;
|
||||
@@ -44,7 +43,7 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
|
||||
return file;
|
||||
}
|
||||
|
||||
map_type = i915_coherent_map_type(i915, obj, true);
|
||||
map_type = i915_gem_object_is_lmem(obj) ? I915_MAP_WC : I915_MAP_WB;
|
||||
ptr = i915_gem_object_pin_map_unlocked(obj, map_type);
|
||||
if (IS_ERR(ptr))
|
||||
return ERR_CAST(ptr);
|
||||
|
||||
@@ -282,7 +282,6 @@ out_rq:
|
||||
static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
|
||||
{
|
||||
struct intel_gt *gt = gsc_uc_to_gt(gsc);
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
void *src;
|
||||
|
||||
if (!gsc->local)
|
||||
@@ -292,17 +291,13 @@ static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
|
||||
return -ENOSPC;
|
||||
|
||||
src = i915_gem_object_pin_map_unlocked(gsc->fw.obj,
|
||||
i915_coherent_map_type(i915, gsc->fw.obj, true));
|
||||
intel_gt_coherent_map_type(gt, gsc->fw.obj, true));
|
||||
if (IS_ERR(src))
|
||||
return PTR_ERR(src);
|
||||
|
||||
memcpy_toio(gsc->local_vaddr, src, gsc->fw.size);
|
||||
memset_io(gsc->local_vaddr + gsc->fw.size, 0, gsc->local->size - gsc->fw.size);
|
||||
|
||||
/*
|
||||
* Wa_22016122933: Making sure the data in dst is
|
||||
* visible to GSC right away
|
||||
*/
|
||||
intel_guc_write_barrier(>->uc.guc);
|
||||
|
||||
i915_gem_object_unpin_map(gsc->fw.obj);
|
||||
|
||||
@@ -745,10 +745,11 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
|
||||
return ERR_CAST(obj);
|
||||
|
||||
/*
|
||||
* Wa_22016122933: For MTL the shared memory needs to be mapped
|
||||
* as WC on CPU side and UC (PAT index 2) on GPU side
|
||||
* Wa_22016122933: For Media version 13.0, all Media GT shared
|
||||
* memory needs to be mapped as WC on CPU side and UC (PAT
|
||||
* index 2) on GPU side.
|
||||
*/
|
||||
if (IS_METEORLAKE(gt->i915))
|
||||
if (intel_gt_needs_wa_22016122933(gt))
|
||||
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
|
||||
|
||||
vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
|
||||
@@ -792,8 +793,8 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
|
||||
return PTR_ERR(vma);
|
||||
|
||||
vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
|
||||
i915_coherent_map_type(guc_to_gt(guc)->i915,
|
||||
vma->obj, true));
|
||||
intel_gt_coherent_map_type(guc_to_gt(guc),
|
||||
vma->obj, true));
|
||||
if (IS_ERR(vaddr)) {
|
||||
i915_vma_unpin_and_release(&vma, 0);
|
||||
return PTR_ERR(vaddr);
|
||||
|
||||
@@ -960,10 +960,6 @@ static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
|
||||
/* now update descriptor */
|
||||
WRITE_ONCE(desc->head, head);
|
||||
|
||||
/*
|
||||
* Wa_22016122933: Making sure the head update is
|
||||
* visible to GuC right away
|
||||
*/
|
||||
intel_guc_write_barrier(ct_to_guc(ct));
|
||||
|
||||
return available - len;
|
||||
|
||||
@@ -27,7 +27,6 @@ struct mtl_huc_auth_msg_out {
|
||||
int intel_huc_fw_auth_via_gsccs(struct intel_huc *huc)
|
||||
{
|
||||
struct intel_gt *gt = huc_to_gt(huc);
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct mtl_huc_auth_msg_in *msg_in;
|
||||
struct mtl_huc_auth_msg_out *msg_out;
|
||||
@@ -43,7 +42,7 @@ int intel_huc_fw_auth_via_gsccs(struct intel_huc *huc)
|
||||
pkt_offset = i915_ggtt_offset(huc->heci_pkt);
|
||||
|
||||
pkt_vaddr = i915_gem_object_pin_map_unlocked(obj,
|
||||
i915_coherent_map_type(i915, obj, true));
|
||||
intel_gt_coherent_map_type(gt, obj, true));
|
||||
if (IS_ERR(pkt_vaddr))
|
||||
return PTR_ERR(pkt_vaddr);
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
#include "gem/i915_gem_lmem.h"
|
||||
#include "gt/intel_gt.h"
|
||||
#include "gt/intel_gt_print.h"
|
||||
#include "intel_gsc_binary_headers.h"
|
||||
#include "intel_gsc_fw.h"
|
||||
@@ -1197,7 +1198,7 @@ static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
|
||||
return PTR_ERR(vma);
|
||||
|
||||
vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
|
||||
i915_coherent_map_type(gt->i915, vma->obj, true));
|
||||
intel_gt_coherent_map_type(gt, vma->obj, true));
|
||||
if (IS_ERR(vaddr)) {
|
||||
i915_vma_unpin_and_release(&vma, 0);
|
||||
err = PTR_ERR(vaddr);
|
||||
|
||||
@@ -204,9 +204,9 @@ static int intel_guc_steal_guc_ids(void *arg)
|
||||
if (IS_ERR(rq)) {
|
||||
ret = PTR_ERR(rq);
|
||||
rq = NULL;
|
||||
if (ret != -EAGAIN) {
|
||||
guc_err(guc, "Failed to create request %d: %pe\n",
|
||||
context_index, ERR_PTR(ret));
|
||||
if ((ret != -EAGAIN) || !last) {
|
||||
guc_err(guc, "Failed to create %srequest %d: %pe\n",
|
||||
last ? "" : "first ", context_index, ERR_PTR(ret));
|
||||
goto err_spin_rq;
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
#include "gem/i915_gem_internal.h"
|
||||
|
||||
#include "gt/intel_context.h"
|
||||
#include "gt/intel_gt.h"
|
||||
#include "gt/uc/intel_gsc_fw.h"
|
||||
#include "gt/uc/intel_gsc_uc_heci_cmd_submit.h"
|
||||
|
||||
@@ -336,7 +337,7 @@ gsccs_create_buffer(struct intel_gt *gt,
|
||||
}
|
||||
|
||||
/* return a virtual pointer */
|
||||
*map = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true));
|
||||
*map = i915_gem_object_pin_map_unlocked(obj, intel_gt_coherent_map_type(gt, obj, true));
|
||||
if (IS_ERR(*map)) {
|
||||
drm_err(&i915->drm, "Failed to map gsccs backend %s.\n", bufname);
|
||||
err = PTR_ERR(*map);
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
#include "gem/i915_gem_lmem.h"
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "gt/intel_gt.h"
|
||||
|
||||
#include "intel_pxp.h"
|
||||
#include "intel_pxp_cmd_interface_42.h"
|
||||
@@ -245,7 +246,9 @@ static int alloc_streaming_command(struct intel_pxp *pxp)
|
||||
}
|
||||
|
||||
/* map the lmem into the virtual memory pointer */
|
||||
cmd = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(i915, obj, true));
|
||||
cmd = i915_gem_object_pin_map_unlocked(obj,
|
||||
intel_gt_coherent_map_type(pxp->ctrl_gt,
|
||||
obj, true));
|
||||
if (IS_ERR(cmd)) {
|
||||
drm_err(&i915->drm, "Failed to map gsc message page!\n");
|
||||
err = PTR_ERR(cmd);
|
||||
|
||||
@@ -97,7 +97,7 @@ int igt_spinner_pin(struct igt_spinner *spin,
|
||||
if (!spin->batch) {
|
||||
unsigned int mode;
|
||||
|
||||
mode = i915_coherent_map_type(spin->gt->i915, spin->obj, false);
|
||||
mode = intel_gt_coherent_map_type(spin->gt, spin->obj, false);
|
||||
vaddr = igt_spinner_pin_obj(ce, ww, spin->obj, mode, &spin->batch_vma);
|
||||
if (IS_ERR(vaddr))
|
||||
return PTR_ERR(vaddr);
|
||||
|
||||
Reference in New Issue
Block a user