-
Notifications
You must be signed in to change notification settings - Fork 122
VE2 drm scheduler enable #1276
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
VE2 drm scheduler enable #1276
Changes from 1 commit
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
|
|
@@ -263,7 +263,7 @@ static void ve2_job_release(struct kref *ref) | |||||
| amdxdna_sched_job_cleanup(job); | ||||||
| } | ||||||
|
|
||||||
| static void ve2_job_put(struct amdxdna_sched_job *job) | ||||||
| void ve2_job_put(struct amdxdna_sched_job *job) | ||||||
| { | ||||||
| kref_put(&job->refcnt, ve2_job_release); | ||||||
| } | ||||||
|
|
@@ -343,7 +343,11 @@ static inline void ve2_hwctx_job_release_locked(struct amdxdna_ctx *hwctx, | |||||
| */ | ||||||
| // Reset the pending list | ||||||
| priv_ctx->pending[get_job_idx(job->seq)] = NULL; | ||||||
| ve2_job_put(job); | ||||||
|
|
||||||
| /* Signal DRM scheduler fence */ | ||||||
| if (job->fence) | ||||||
| dma_fence_signal(job->fence); | ||||||
|
|
||||||
| mutex_unlock(&priv_ctx->privctx_lock); | ||||||
| } | ||||||
|
|
||||||
|
|
@@ -975,8 +979,21 @@ int ve2_cmd_submit(struct amdxdna_sched_job *job, u32 *syncobj_hdls, | |||||
|
|
||||||
| XDNA_DBG(xdna, "hwctx %p cmd submitted: seq=%llu, total_submitted=%llu", | ||||||
| hwctx, *seq, hwctx->submitted); | ||||||
| /* command_index = read_index when this job completes (last_slot + 1) */ | ||||||
| ve2_mgmt_schedule_cmd(xdna, hwctx, *seq + 1); | ||||||
|
|
||||||
| /* DRM scheduler - initialize and push job */ | ||||||
| ret = drm_sched_job_init(&job->base, &hwctx->priv->entity, 1, NULL); | ||||||
| if (ret) { | ||||||
| return ret; | ||||||
| } | ||||||
|
||||||
|
|
||||||
| /* Arm job - this creates the scheduler fence */ | ||||||
| drm_sched_job_arm(&job->base); | ||||||
|
|
||||||
| /* Set out_fence to the scheduler's finished fence */ | ||||||
| job->out_fence = dma_fence_get(&job->base.s_fence->finished); | ||||||
|
|
||||||
| /* Push to DRM scheduler - run_job callback will handle scheduling */ | ||||||
| drm_sched_entity_push_job(&job->base); | ||||||
|
|
||||||
| trace_amdxdna_trace_point("XRT_PROFILING_TRACE_EXIT", | ||||||
| hwctx->client->pid, hwctx->priv->start_col, | ||||||
|
|
@@ -1403,6 +1420,7 @@ int ve2_hwctx_init(struct amdxdna_ctx *hwctx) | |||||
| struct amdxdna_client *client = hwctx->client; | ||||||
| struct amdxdna_dev *xdna = client->xdna; | ||||||
| struct amdxdna_ctx_priv *priv = NULL; | ||||||
| u32 hwctx_id; | ||||||
| int ret; | ||||||
|
|
||||||
| priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||||||
|
|
@@ -1411,19 +1429,15 @@ int ve2_hwctx_init(struct amdxdna_ctx *hwctx) | |||||
|
|
||||||
| hwctx->priv = priv; | ||||||
|
|
||||||
| /* Allocate unique ID using XArray (thread-safe, supports ID recycling) */ | ||||||
| { | ||||||
| u32 hwctx_id; | ||||||
|
|
||||||
| ret = xa_alloc_cyclic(&xdna->dev_handle->hwctx_ids, &hwctx_id, priv, | ||||||
| XA_LIMIT(1, U32_MAX), | ||||||
| &xdna->dev_handle->next_hwctx_id, GFP_KERNEL); | ||||||
| if (ret < 0) { | ||||||
| XDNA_ERR(xdna, "Failed to allocate hwctx ID, ret=%d", ret); | ||||||
| goto cleanup_priv; | ||||||
| } | ||||||
| priv->id = hwctx_id; | ||||||
| } | ||||||
| /* Allocate unique ID using XArray (thread-safe, supports ID recycling) */ | ||||||
| ret = xa_alloc_cyclic(&xdna->dev_handle->hwctx_ids, &hwctx_id, priv, | ||||||
| XA_LIMIT(1, U32_MAX), | ||||||
| &xdna->dev_handle->next_hwctx_id, GFP_KERNEL); | ||||||
| if (ret < 0) { | ||||||
| XDNA_ERR(xdna, "Failed to allocate hwctx ID, ret=%d", ret); | ||||||
| goto cleanup_priv; | ||||||
| } | ||||||
| priv->id = hwctx_id; | ||||||
|
Comment on lines
1466
to
+1483
|
||||||
|
|
||||||
| trace_amdxdna_trace_point("XRT_PROFILING_TRACE_ENTER", | ||||||
| client->pid, 0, priv->id, 0); | ||||||
|
|
@@ -1460,6 +1474,26 @@ int ve2_hwctx_init(struct amdxdna_ctx *hwctx) | |||||
| mutex_init(&priv->privctx_lock); | ||||||
| priv->state = AMDXDNA_HWCTX_STATE_IDLE; | ||||||
|
|
||||||
| /* Initialize DRM scheduler entity */ | ||||||
| { | ||||||
| struct amdxdna_mgmtctx *mgmtctx = &xdna->dev_handle->ve2_mgmtctx[hwctx->start_col]; | ||||||
|
|
||||||
| if (!mgmtctx) | ||||||
| goto cleanup_xrs; | ||||||
|
|
||||||
|
||||||
| if (!mgmtctx) | |
| goto cleanup_xrs; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The job completion fence is being signaled here, but this helper is only called from the synchronous wait path (
ve2_cmd_wait) and cleanup paths—not from the IRQ handler. With DRM scheduler enabled, this means jobs may never complete from the scheduler’s perspective unless userspace callscmd_wait, leading to scheduler stalls/timeouts. Fence signaling (and any job-done notification required by drm_sched) should happen from the actual completion event path (IRQ / polling work) independent of userspace waiting.