Message ID | 20250224164849.3746751-7-anshuman.gupta@intel.com |
---|---|
State | New |
Headers | show |
Series | VRAM Self Refresh | expand |
On 24-02-2025 22:18, Anshuman Gupta wrote: > Enabling VRSR in runtime suspend and also in System wide suspend. > Also fix couple of typo in xe_pm.c. > > Signed-off-by: Anshuman Gupta <anshuman.gupta@intel.com> > Signed-off-by: Badal Nilawar <badal.nilawar@intel.com> > --- > drivers/gpu/drm/xe/xe_pci.c | 4 +-- > drivers/gpu/drm/xe/xe_pm.c | 49 +++++++++++++++++++++++++++---------- > 2 files changed, 38 insertions(+), 15 deletions(-) > > diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c > index 70b697fde5b9..55b42b3a10d2 100644 > --- a/drivers/gpu/drm/xe/xe_pci.c > +++ b/drivers/gpu/drm/xe/xe_pci.c > @@ -967,7 +967,7 @@ static int xe_pci_suspend(struct device *dev) > > /* > * Enabling D3Cold is needed for S2Idle/S0ix. > - * It is save to allow here since xe_pm_suspend has evicted > + * It is safe to allow here since xe_pm_suspend has evicted > * the local memory and the direct complete optimization is disabled. > */ > d3cold_toggle(pdev, D3COLD_ENABLE); > @@ -983,7 +983,7 @@ static int xe_pci_resume(struct device *dev) > struct pci_dev *pdev = to_pci_dev(dev); > int err; > > - /* Give back the D3Cold decision to the runtime P M*/ > + /* Give back the D3Cold decision to the runtime PM */ > d3cold_toggle(pdev, D3COLD_DISABLE); > > err = pci_set_power_state(pdev, PCI_D0); > diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c > index 6d28aedcb062..5c96f8629a87 100644 > --- a/drivers/gpu/drm/xe/xe_pm.c > +++ b/drivers/gpu/drm/xe/xe_pm.c > @@ -232,10 +232,12 @@ int xe_pm_suspend(struct xe_device *xe) > > xe_display_pm_suspend(xe); > > - /* FIXME: Super racey... */ > - err = xe_bo_evict_all(xe); > - if (err) > - goto err_pxp; > + if (xe->d3cold.allowed == XE_D3COLD_OFF) { > + /* FIXME: Super racey... */ > + err = xe_bo_evict_all(xe); > + if (err) > + goto err_pxp; > + } > > for_each_gt(gt, xe, id) { > err = xe_gt_suspend(gt); > @@ -247,6 +249,12 @@ int xe_pm_suspend(struct xe_device *xe) > > xe_display_pm_suspend_late(xe); > > + if (xe->d3cold.allowed == XE_D3COLD_VRSR) { > + err = xe_pm_enable_vrsr(xe, true); > + if (err) > + goto err_display; > + } > + > drm_dbg(&xe->drm, "Device suspended\n"); > return 0; > > @@ -288,9 +296,11 @@ int xe_pm_resume(struct xe_device *xe) > * This only restores pinned memory which is the memory required for the > * GT(s) to resume. > */ > - err = xe_bo_restore_kernel(xe); > - if (err) > - goto err; > + if (xe->d3cold.allowed == XE_D3COLD_OFF) { > + err = xe_bo_restore_kernel(xe); > + if (err) > + goto err; > + } > > xe_irq_resume(xe); > > @@ -299,9 +309,11 @@ int xe_pm_resume(struct xe_device *xe) > > xe_display_pm_resume(xe); > > - err = xe_bo_restore_user(xe); > - if (err) > - goto err; > + if (xe->d3cold.allowed == XE_D3COLD_OFF) { > + err = xe_bo_restore_user(xe); > + if (err) > + goto err; > + } > > xe_pxp_pm_resume(xe->pxp); > > @@ -543,7 +555,7 @@ int xe_pm_runtime_suspend(struct xe_device *xe) > > xe_display_pm_runtime_suspend(xe); > > - if (xe->d3cold.allowed) { > + if (xe->d3cold.allowed == XE_D3COLD_OFF) { > err = xe_bo_evict_all(xe); > if (err) > goto out_resume; > @@ -559,6 +571,14 @@ int xe_pm_runtime_suspend(struct xe_device *xe) > > xe_display_pm_runtime_suspend_late(xe); > > + if (xe->d3cold.allowed == XE_D3COLD_VRSR) { > + err = xe_pm_enable_vrsr(xe, true); > + if (err) { > + drm_err(&xe->drm, "Failed to enable VRSR: %d\n", err); > + goto out_resume; > + } > + } > + > xe_rpm_lockmap_release(xe); > xe_pm_write_callback_task(xe, NULL); > return 0; > @@ -590,7 +610,7 @@ int xe_pm_runtime_resume(struct xe_device *xe) > > xe_rpm_lockmap_acquire(xe); > > - if (xe->d3cold.allowed) { > + if (xe->d3cold.allowed == XE_D3COLD_OFF) { > err = xe_pcode_ready(xe, true); > if (err) > goto out; > @@ -606,6 +626,9 @@ int xe_pm_runtime_resume(struct xe_device *xe) > goto out; > } > > + if (xe->d3cold.allowed == XE_D3COLD_VRSR) > + xe_display_pm_resume_early(xe); > + > xe_irq_resume(xe); > > for_each_gt(gt, xe, id) > @@ -613,7 +636,7 @@ int xe_pm_runtime_resume(struct xe_device *xe) > > xe_display_pm_runtime_resume(xe); > > - if (xe->d3cold.allowed) { > + if (xe->d3cold.allowed == XE_D3COLD_OFF) { > err = xe_bo_restore_user(xe); > if (err) > goto out; LGTM. Acked-by: Karthik Poosa <karthik.poosa@intel.com>
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index 70b697fde5b9..55b42b3a10d2 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -967,7 +967,7 @@ static int xe_pci_suspend(struct device *dev) /* * Enabling D3Cold is needed for S2Idle/S0ix. - * It is save to allow here since xe_pm_suspend has evicted + * It is safe to allow here since xe_pm_suspend has evicted * the local memory and the direct complete optimization is disabled. */ d3cold_toggle(pdev, D3COLD_ENABLE); @@ -983,7 +983,7 @@ static int xe_pci_resume(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); int err; - /* Give back the D3Cold decision to the runtime P M*/ + /* Give back the D3Cold decision to the runtime PM */ d3cold_toggle(pdev, D3COLD_DISABLE); err = pci_set_power_state(pdev, PCI_D0); diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index 6d28aedcb062..5c96f8629a87 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -232,10 +232,12 @@ int xe_pm_suspend(struct xe_device *xe) xe_display_pm_suspend(xe); - /* FIXME: Super racey... */ - err = xe_bo_evict_all(xe); - if (err) - goto err_pxp; + if (xe->d3cold.allowed == XE_D3COLD_OFF) { + /* FIXME: Super racey... */ + err = xe_bo_evict_all(xe); + if (err) + goto err_pxp; + } for_each_gt(gt, xe, id) { err = xe_gt_suspend(gt); @@ -247,6 +249,12 @@ int xe_pm_suspend(struct xe_device *xe) xe_display_pm_suspend_late(xe); + if (xe->d3cold.allowed == XE_D3COLD_VRSR) { + err = xe_pm_enable_vrsr(xe, true); + if (err) + goto err_display; + } + drm_dbg(&xe->drm, "Device suspended\n"); return 0; @@ -288,9 +296,11 @@ int xe_pm_resume(struct xe_device *xe) * This only restores pinned memory which is the memory required for the * GT(s) to resume. */ - err = xe_bo_restore_kernel(xe); - if (err) - goto err; + if (xe->d3cold.allowed == XE_D3COLD_OFF) { + err = xe_bo_restore_kernel(xe); + if (err) + goto err; + } xe_irq_resume(xe); @@ -299,9 +309,11 @@ int xe_pm_resume(struct xe_device *xe) xe_display_pm_resume(xe); - err = xe_bo_restore_user(xe); - if (err) - goto err; + if (xe->d3cold.allowed == XE_D3COLD_OFF) { + err = xe_bo_restore_user(xe); + if (err) + goto err; + } xe_pxp_pm_resume(xe->pxp); @@ -543,7 +555,7 @@ int xe_pm_runtime_suspend(struct xe_device *xe) xe_display_pm_runtime_suspend(xe); - if (xe->d3cold.allowed) { + if (xe->d3cold.allowed == XE_D3COLD_OFF) { err = xe_bo_evict_all(xe); if (err) goto out_resume; @@ -559,6 +571,14 @@ int xe_pm_runtime_suspend(struct xe_device *xe) xe_display_pm_runtime_suspend_late(xe); + if (xe->d3cold.allowed == XE_D3COLD_VRSR) { + err = xe_pm_enable_vrsr(xe, true); + if (err) { + drm_err(&xe->drm, "Failed to enable VRSR: %d\n", err); + goto out_resume; + } + } + xe_rpm_lockmap_release(xe); xe_pm_write_callback_task(xe, NULL); return 0; @@ -590,7 +610,7 @@ int xe_pm_runtime_resume(struct xe_device *xe) xe_rpm_lockmap_acquire(xe); - if (xe->d3cold.allowed) { + if (xe->d3cold.allowed == XE_D3COLD_OFF) { err = xe_pcode_ready(xe, true); if (err) goto out; @@ -606,6 +626,9 @@ int xe_pm_runtime_resume(struct xe_device *xe) goto out; } + if (xe->d3cold.allowed == XE_D3COLD_VRSR) + xe_display_pm_resume_early(xe); + xe_irq_resume(xe); for_each_gt(gt, xe, id) @@ -613,7 +636,7 @@ int xe_pm_runtime_resume(struct xe_device *xe) xe_display_pm_runtime_resume(xe); - if (xe->d3cold.allowed) { + if (xe->d3cold.allowed == XE_D3COLD_OFF) { err = xe_bo_restore_user(xe); if (err) goto out;