Skip to content

Commit

Permalink
drm/i915/gvt: Move request alloc to dispatch_workload path only
Browse files Browse the repository at this point in the history
Previously the performance is improved through the workload auditing
and shadowing ahead of vGPU scheduling, however, there is the case that
more requests are allocated in submit_context before the previous request
is added, the timeline will hold its seqno which is later.

This patch is to move the request alloc to dispatch_workload function,
where is the same place as request is added.

It will fix the issue of kernel BUG for (timeline->seqno != request->fence.seqno)
check when add_request.

Fixes: 89ea20b ("drm/i915/gvt: Factor out scan and shadow from workload dispatch")
Signed-off-by: Chuanxiao Dong <[email protected]>
Signed-off-by: fred gao <[email protected]>
Signed-off-by: Zhenyu Wang <[email protected]>
  • Loading branch information
fred1gao authored and zhenyw committed Nov 16, 2017
1 parent b2d6ef7 commit f2880e0
Showing 1 changed file with 27 additions and 4 deletions.
31 changes: 27 additions & 4 deletions drivers/gpu/drm/i915/gvt/scheduler.c
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int ring_id = workload->ring_id;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq;
struct intel_ring *ring;
int ret;

Expand Down Expand Up @@ -315,6 +314,27 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
ret = populate_shadow_context(workload);
if (ret)
goto err_unpin;
workload->shadowed = true;
return 0;

err_unpin:
engine->context_unpin(engine, shadow_ctx);
err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
err_scan:
return ret;
}

static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
{
int ring_id = workload->ring_id;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx;
int ret;

rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
Expand All @@ -329,14 +349,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
ret = copy_workload_to_ring_buffer(workload);
if (ret)
goto err_unpin;
workload->shadowed = true;
return 0;

err_unpin:
engine->context_unpin(engine, shadow_ctx);
err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
err_scan:
return ret;
}

Expand Down Expand Up @@ -496,6 +513,12 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
goto err_unpin_mm;
}

ret = intel_gvt_generate_request(workload);
if (ret) {
gvt_vgpu_err("fail to generate request\n");
goto err_unpin_mm;
}

ret = prepare_shadow_batch_buffer(workload);
if (ret) {
gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
Expand Down

0 comments on commit f2880e0

Please sign in to comment.