mirror of
https://github.com/lkl/linux.git
synced 2025-12-19 16:13:19 +09:00
drm/scheduler: properly forward fence errors
When a hw fence is signaled with an error properly forward that to the finished fence. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Luben Tuikov <luben.tuikov@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230420115752.31470-1-christian.koenig@amd.com
This commit is contained in:
@@ -144,7 +144,7 @@ static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
|
|||||||
{
|
{
|
||||||
struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
|
struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
|
||||||
|
|
||||||
drm_sched_fence_finished(job->s_fence);
|
drm_sched_fence_finished(job->s_fence, -ESRCH);
|
||||||
WARN_ON(job->s_fence->parent);
|
WARN_ON(job->s_fence->parent);
|
||||||
job->sched->ops->free_job(job);
|
job->sched->ops->free_job(job);
|
||||||
}
|
}
|
||||||
@@ -195,8 +195,6 @@ static void drm_sched_entity_kill(struct drm_sched_entity *entity)
|
|||||||
while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
|
while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
|
||||||
struct drm_sched_fence *s_fence = job->s_fence;
|
struct drm_sched_fence *s_fence = job->s_fence;
|
||||||
|
|
||||||
dma_fence_set_error(&s_fence->finished, -ESRCH);
|
|
||||||
|
|
||||||
dma_fence_get(&s_fence->finished);
|
dma_fence_get(&s_fence->finished);
|
||||||
if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
|
if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
|
||||||
drm_sched_entity_kill_jobs_cb))
|
drm_sched_entity_kill_jobs_cb))
|
||||||
|
|||||||
@@ -53,8 +53,10 @@ void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
|
|||||||
dma_fence_signal(&fence->scheduled);
|
dma_fence_signal(&fence->scheduled);
|
||||||
}
|
}
|
||||||
|
|
||||||
void drm_sched_fence_finished(struct drm_sched_fence *fence)
|
void drm_sched_fence_finished(struct drm_sched_fence *fence, int result)
|
||||||
{
|
{
|
||||||
|
if (result)
|
||||||
|
dma_fence_set_error(&fence->finished, result);
|
||||||
dma_fence_signal(&fence->finished);
|
dma_fence_signal(&fence->finished);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -262,7 +262,7 @@ drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
|
|||||||
*
|
*
|
||||||
* Finish the job's fence and wake up the worker thread.
|
* Finish the job's fence and wake up the worker thread.
|
||||||
*/
|
*/
|
||||||
static void drm_sched_job_done(struct drm_sched_job *s_job)
|
static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
|
||||||
{
|
{
|
||||||
struct drm_sched_fence *s_fence = s_job->s_fence;
|
struct drm_sched_fence *s_fence = s_job->s_fence;
|
||||||
struct drm_gpu_scheduler *sched = s_fence->sched;
|
struct drm_gpu_scheduler *sched = s_fence->sched;
|
||||||
@@ -273,7 +273,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job)
|
|||||||
trace_drm_sched_process_job(s_fence);
|
trace_drm_sched_process_job(s_fence);
|
||||||
|
|
||||||
dma_fence_get(&s_fence->finished);
|
dma_fence_get(&s_fence->finished);
|
||||||
drm_sched_fence_finished(s_fence);
|
drm_sched_fence_finished(s_fence, result);
|
||||||
dma_fence_put(&s_fence->finished);
|
dma_fence_put(&s_fence->finished);
|
||||||
wake_up_interruptible(&sched->wake_up_worker);
|
wake_up_interruptible(&sched->wake_up_worker);
|
||||||
}
|
}
|
||||||
@@ -287,7 +287,7 @@ static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
|
|||||||
{
|
{
|
||||||
struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
|
struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
|
||||||
|
|
||||||
drm_sched_job_done(s_job);
|
drm_sched_job_done(s_job, f->error);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -537,12 +537,12 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
|
|||||||
r = dma_fence_add_callback(fence, &s_job->cb,
|
r = dma_fence_add_callback(fence, &s_job->cb,
|
||||||
drm_sched_job_done_cb);
|
drm_sched_job_done_cb);
|
||||||
if (r == -ENOENT)
|
if (r == -ENOENT)
|
||||||
drm_sched_job_done(s_job);
|
drm_sched_job_done(s_job, fence->error);
|
||||||
else if (r)
|
else if (r)
|
||||||
DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
|
DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
|
||||||
r);
|
r);
|
||||||
} else
|
} else
|
||||||
drm_sched_job_done(s_job);
|
drm_sched_job_done(s_job, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (full_recovery) {
|
if (full_recovery) {
|
||||||
@@ -1059,15 +1059,13 @@ static int drm_sched_main(void *param)
|
|||||||
r = dma_fence_add_callback(fence, &sched_job->cb,
|
r = dma_fence_add_callback(fence, &sched_job->cb,
|
||||||
drm_sched_job_done_cb);
|
drm_sched_job_done_cb);
|
||||||
if (r == -ENOENT)
|
if (r == -ENOENT)
|
||||||
drm_sched_job_done(sched_job);
|
drm_sched_job_done(sched_job, fence->error);
|
||||||
else if (r)
|
else if (r)
|
||||||
DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
|
DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
|
||||||
r);
|
r);
|
||||||
} else {
|
} else {
|
||||||
if (IS_ERR(fence))
|
drm_sched_job_done(sched_job, IS_ERR(fence) ?
|
||||||
dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
|
PTR_ERR(fence) : 0);
|
||||||
|
|
||||||
drm_sched_job_done(sched_job);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
wake_up(&sched->job_scheduled);
|
wake_up(&sched->job_scheduled);
|
||||||
|
|||||||
@@ -598,7 +598,7 @@ void drm_sched_fence_init(struct drm_sched_fence *fence,
|
|||||||
void drm_sched_fence_free(struct drm_sched_fence *fence);
|
void drm_sched_fence_free(struct drm_sched_fence *fence);
|
||||||
|
|
||||||
void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
|
void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
|
||||||
void drm_sched_fence_finished(struct drm_sched_fence *fence);
|
void drm_sched_fence_finished(struct drm_sched_fence *fence, int result);
|
||||||
|
|
||||||
unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
|
unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
|
||||||
void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
|
void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
|
||||||
|
|||||||
Reference in New Issue
Block a user