diff mbox series

[v2,1/2] scsi: ufs: core: Introduce a new clock_gating lock

Message ID 20241029102938.685835-2-avri.altman@wdc.com
State Superseded
Headers show
Series Untie the host lock entanglement - part 2 | expand

Commit Message

Avri Altman Oct. 29, 2024, 10:29 a.m. UTC
Introduce a new clock gating lock to serialize access to some of the
clock gating members instead of the host_lock.

While at it, simplify the code with the guard() macro and co for
automatic cleanup of the new lock. There are some explicit
spin_lock_irqsave/spin_unlock_irqrestore snaking instances I left behind
because I couldn't make heads or tails of it.

Signed-off-by: Avri Altman <avri.altman@wdc.com>
---
 drivers/ufs/core/ufshcd.c | 97 ++++++++++++++++-----------------------
 include/ufs/ufshcd.h      |  8 +++-
 2 files changed, 46 insertions(+), 59 deletions(-)

Comments

Bart Van Assche Oct. 29, 2024, 6:11 p.m. UTC | #1
On 10/29/24 3:29 AM, Avri Altman wrote:
> +	scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
> +		/*
> +		 * In case you are here to cancel this work the gating state
> +		 * would be marked as REQ_CLKS_ON. In this case save time by
> +		 * skipping the gating work and exit after changing the clock
> +		 * state to CLKS_ON.
> +		 */
> +		if (hba->clk_gating.is_suspended || (hba->clk_gating.state != REQ_CLKS_OFF)) {
> +			hba->clk_gating.state = CLKS_ON;
> +			trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
> +			return;
> +		}
> +		if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
> +			return;
>   	}

Please remove the superfluous parentheses from around the REQ_CLKS_OFF 
test and do not exceed the 80 column limit. git clang-format HEAD^ can
help with restricting code to the 80 column limit.

> @@ -2072,18 +2055,18 @@ static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
>   
>   	value = !!value;
>   
> -	spin_lock_irqsave(hba->host->host_lock, flags);
> -	if (value == hba->clk_gating.is_enabled)
> -		goto out;
> +	scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
> +		if (value == hba->clk_gating.is_enabled)
> +			goto out;
>   
> -	if (value)
> -		__ufshcd_release(hba);
> -	else
> -		hba->clk_gating.active_reqs++;
> +		if (value)
> +			__ufshcd_release(hba);
> +		else
> +			hba->clk_gating.active_reqs++;
>   
> -	hba->clk_gating.is_enabled = value;
> +		hba->clk_gating.is_enabled = value;
> +	}
>   out:
> -	spin_unlock_irqrestore(hba->host->host_lock, flags);
>   	return count;
>   }

Please use guard() instead of scoped_guard() and remove the "out:"
label.

> @@ -9173,11 +9157,10 @@ static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
>   				clk_disable_unprepare(clki->clk);
>   		}
>   	} else if (!ret && on) {
> -		spin_lock_irqsave(hba->host->host_lock, flags);
> -		hba->clk_gating.state = CLKS_ON;
> +		scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
> +			hba->clk_gating.state = CLKS_ON;
>   		trace_ufshcd_clk_gating(dev_name(hba->dev),
>   					hba->clk_gating.state);
> -		spin_unlock_irqrestore(hba->host->host_lock, flags);
>   	}

The above change moves the trace_ufshcd_clk_gating() call from inside
the region protected by the host lock to outside the region protected
by clk_gating.lock. If this is intentional, shouldn't this be mentioned
in the patch description?

Thanks,

Bart.
Avri Altman Oct. 29, 2024, 6:39 p.m. UTC | #2
> On 10/29/24 3:29 AM, Avri Altman wrote:
> > +     scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
> > +             /*
> > +              * In case you are here to cancel this work the gating state
> > +              * would be marked as REQ_CLKS_ON. In this case save time by
> > +              * skipping the gating work and exit after changing the clock
> > +              * state to CLKS_ON.
> > +              */
> > +             if (hba->clk_gating.is_suspended || (hba->clk_gating.state !=
> REQ_CLKS_OFF)) {
> > +                     hba->clk_gating.state = CLKS_ON;
> > +                     trace_ufshcd_clk_gating(dev_name(hba->dev), hba-
> >clk_gating.state);
> > +                     return;
> > +             }
> > +             if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state !=
> UFSHCD_STATE_OPERATIONAL)
> > +                     return;
> >       }
> 
> Please remove the superfluous parentheses from around the REQ_CLKS_OFF
> test 
OK.
But this is a format change while making functional change.

> and do not exceed the 80 column limit. git clang-format HEAD^ can help
> with restricting code to the 80 column limit.
Isn't the 80 characters restriction was changed long ago to 100 characters?
I always use strict checkpatch and doesn't get any warning about this.

> 
> > @@ -2072,18 +2055,18 @@ static ssize_t
> > ufshcd_clkgate_enable_store(struct device *dev,
> >
> >       value = !!value;
> >
> > -     spin_lock_irqsave(hba->host->host_lock, flags);
> > -     if (value == hba->clk_gating.is_enabled)
> > -             goto out;
> > +     scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
> > +             if (value == hba->clk_gating.is_enabled)
> > +                     goto out;
> >
> > -     if (value)
> > -             __ufshcd_release(hba);
> > -     else
> > -             hba->clk_gating.active_reqs++;
> > +             if (value)
> > +                     __ufshcd_release(hba);
> > +             else
> > +                     hba->clk_gating.active_reqs++;
> >
> > -     hba->clk_gating.is_enabled = value;
> > +             hba->clk_gating.is_enabled = value;
> > +     }
> >   out:
> > -     spin_unlock_irqrestore(hba->host->host_lock, flags);
> >       return count;
> >   }
> 
> Please use guard() instead of scoped_guard() and remove the "out:"
> label.
Done.

> 
> > @@ -9173,11 +9157,10 @@ static int ufshcd_setup_clocks(struct ufs_hba
> *hba, bool on)
> >                               clk_disable_unprepare(clki->clk);
> >               }
> >       } else if (!ret && on) {
> > -             spin_lock_irqsave(hba->host->host_lock, flags);
> > -             hba->clk_gating.state = CLKS_ON;
> > +             scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
> > +                     hba->clk_gating.state = CLKS_ON;
> >               trace_ufshcd_clk_gating(dev_name(hba->dev),
> >                                       hba->clk_gating.state);
> > -             spin_unlock_irqrestore(hba->host->host_lock, flags);
> >       }
> 
> The above change moves the trace_ufshcd_clk_gating() call from inside the
> region protected by the host lock to outside the region protected by
> clk_gating.lock. If this is intentional, shouldn't this be mentioned in the patch
> description?
Yes. Intentional.
Done.

Thanks,
Avri

> 
> Thanks,
> 
> Bart.
Bart Van Assche Oct. 29, 2024, 7:12 p.m. UTC | #3
On 10/29/24 11:39 AM, Avri Altman wrote:
>> On 10/29/24 3:29 AM, Avri Altman wrote:
>> and do not exceed the 80 column limit. git clang-format HEAD^ can help
>> with restricting code to the 80 column limit.
> Isn't the 80 characters restriction was changed long ago to 100 characters?
> I always use strict checkpatch and doesn't get any warning about this.

80 columns is the limit currently used in the UFS driver so I think we
should stick to this limit to keep formatting of the UFS driver
consistent. Here are two additional arguments:
* From Documentation/process/coding-style.rst:

   The preferred limit on the length of a single line is 80 columns.

* From .clang-format:

   ColumnLimit: 80


Thanks,

Bart.
Bart Van Assche Oct. 29, 2024, 7:20 p.m. UTC | #4
On 10/29/24 11:39 AM, Avri Altman wrote:
> But this is a format change while making functional change.

I think it's common to fix code style issues if code has to be modified
for one reason or another, in this case the indentation level?

Thanks,

Bart.
Avri Altman Oct. 29, 2024, 9:32 p.m. UTC | #5
> On 10/29/24 11:39 AM, Avri Altman wrote:
> > But this is a format change while making functional change.
> 
> I think it's common to fix code style issues if code has to be modified for one
> reason or another, in this case the indentation level?
Done.

Thanks,
Avri

> 
> Thanks,
> 
> Bart.
Avri Altman Oct. 29, 2024, 9:33 p.m. UTC | #6
> On 10/29/24 11:39 AM, Avri Altman wrote:
> >> On 10/29/24 3:29 AM, Avri Altman wrote:
> >> and do not exceed the 80 column limit. git clang-format HEAD^ can
> >> help with restricting code to the 80 column limit.
> > Isn't the 80 characters restriction was changed long ago to 100 characters?
> > I always use strict checkpatch and doesn't get any warning about this.
> 
> 80 columns is the limit currently used in the UFS driver so I think we should stick
> to this limit to keep formatting of the UFS driver consistent. Here are two
> additional arguments:
> * From Documentation/process/coding-style.rst:
> 
>    The preferred limit on the length of a single line is 80 columns.
> 
> * From .clang-format:
> 
>    ColumnLimit: 80
Done.

Thanks,
Avri

> 
> 
> Thanks,
> 
> Bart.
diff mbox series

Patch

diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 099373a25017..3373debe7b94 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -1811,19 +1811,16 @@  static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
 static void ufshcd_ungate_work(struct work_struct *work)
 {
 	int ret;
-	unsigned long flags;
 	struct ufs_hba *hba = container_of(work, struct ufs_hba,
 			clk_gating.ungate_work);
 
 	cancel_delayed_work_sync(&hba->clk_gating.gate_work);
 
-	spin_lock_irqsave(hba->host->host_lock, flags);
-	if (hba->clk_gating.state == CLKS_ON) {
-		spin_unlock_irqrestore(hba->host->host_lock, flags);
-		return;
+	scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
+		if (hba->clk_gating.state == CLKS_ON)
+			return;
 	}
 
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
 	ufshcd_hba_vreg_set_hpm(hba);
 	ufshcd_setup_clocks(hba, true);
 
@@ -1858,7 +1855,7 @@  void ufshcd_hold(struct ufs_hba *hba)
 	if (!ufshcd_is_clkgating_allowed(hba) ||
 	    !hba->clk_gating.is_initialized)
 		return;
-	spin_lock_irqsave(hba->host->host_lock, flags);
+	spin_lock_irqsave(&hba->clk_gating.lock, flags);
 	hba->clk_gating.active_reqs++;
 
 start:
@@ -1874,11 +1871,11 @@  void ufshcd_hold(struct ufs_hba *hba)
 		 */
 		if (ufshcd_can_hibern8_during_gating(hba) &&
 		    ufshcd_is_link_hibern8(hba)) {
-			spin_unlock_irqrestore(hba->host->host_lock, flags);
+			spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
 			flush_result = flush_work(&hba->clk_gating.ungate_work);
 			if (hba->clk_gating.is_suspended && !flush_result)
 				return;
-			spin_lock_irqsave(hba->host->host_lock, flags);
+			spin_lock_irqsave(&hba->clk_gating.lock, flags);
 			goto start;
 		}
 		break;
@@ -1907,17 +1904,17 @@  void ufshcd_hold(struct ufs_hba *hba)
 		 */
 		fallthrough;
 	case REQ_CLKS_ON:
-		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
 		flush_work(&hba->clk_gating.ungate_work);
 		/* Make sure state is CLKS_ON before returning */
-		spin_lock_irqsave(hba->host->host_lock, flags);
+		spin_lock_irqsave(&hba->clk_gating.lock, flags);
 		goto start;
 	default:
 		dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
 				__func__, hba->clk_gating.state);
 		break;
 	}
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	spin_unlock_irqrestore(&hba->clk_gating.lock, flags);
 }
 EXPORT_SYMBOL_GPL(ufshcd_hold);
 
@@ -1925,29 +1922,24 @@  static void ufshcd_gate_work(struct work_struct *work)
 {
 	struct ufs_hba *hba = container_of(work, struct ufs_hba,
 			clk_gating.gate_work.work);
-	unsigned long flags;
 	int ret;
 
-	spin_lock_irqsave(hba->host->host_lock, flags);
-	/*
-	 * In case you are here to cancel this work the gating state
-	 * would be marked as REQ_CLKS_ON. In this case save time by
-	 * skipping the gating work and exit after changing the clock
-	 * state to CLKS_ON.
-	 */
-	if (hba->clk_gating.is_suspended ||
-		(hba->clk_gating.state != REQ_CLKS_OFF)) {
-		hba->clk_gating.state = CLKS_ON;
-		trace_ufshcd_clk_gating(dev_name(hba->dev),
-					hba->clk_gating.state);
-		goto rel_lock;
+	scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
+		/*
+		 * In case you are here to cancel this work the gating state
+		 * would be marked as REQ_CLKS_ON. In this case save time by
+		 * skipping the gating work and exit after changing the clock
+		 * state to CLKS_ON.
+		 */
+		if (hba->clk_gating.is_suspended || (hba->clk_gating.state != REQ_CLKS_OFF)) {
+			hba->clk_gating.state = CLKS_ON;
+			trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
+			return;
+		}
+		if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+			return;
 	}
 
-	if (ufshcd_is_ufs_dev_busy(hba) || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
-		goto rel_lock;
-
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
-
 	/* put the link into hibern8 mode before turning off clocks */
 	if (ufshcd_can_hibern8_during_gating(hba)) {
 		ret = ufshcd_uic_hibern8_enter(hba);
@@ -1957,7 +1949,7 @@  static void ufshcd_gate_work(struct work_struct *work)
 					__func__, ret);
 			trace_ufshcd_clk_gating(dev_name(hba->dev),
 						hba->clk_gating.state);
-			goto out;
+			return;
 		}
 		ufshcd_set_link_hibern8(hba);
 	}
@@ -1977,16 +1969,12 @@  static void ufshcd_gate_work(struct work_struct *work)
 	 * prevent from doing cancel work multiple times when there are
 	 * new requests arriving before the current cancel work is done.
 	 */
-	spin_lock_irqsave(hba->host->host_lock, flags);
+	guard(spinlock_irqsave)(&hba->clk_gating.lock);
 	if (hba->clk_gating.state == REQ_CLKS_OFF) {
 		hba->clk_gating.state = CLKS_OFF;
 		trace_ufshcd_clk_gating(dev_name(hba->dev),
 					hba->clk_gating.state);
 	}
-rel_lock:
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
-out:
-	return;
 }
 
 /* host lock must be held before calling this variant */
@@ -2013,11 +2001,9 @@  static void __ufshcd_release(struct ufs_hba *hba)
 
 void ufshcd_release(struct ufs_hba *hba)
 {
-	unsigned long flags;
 
-	spin_lock_irqsave(hba->host->host_lock, flags);
+	guard(spinlock_irqsave)(&hba->clk_gating.lock);
 	__ufshcd_release(hba);
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
 }
 EXPORT_SYMBOL_GPL(ufshcd_release);
 
@@ -2032,11 +2018,9 @@  static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
 void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value)
 {
 	struct ufs_hba *hba = dev_get_drvdata(dev);
-	unsigned long flags;
 
-	spin_lock_irqsave(hba->host->host_lock, flags);
+	guard(spinlock_irqsave)(&hba->clk_gating.lock);
 	hba->clk_gating.delay_ms = value;
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
 }
 EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);
 
@@ -2064,7 +2048,6 @@  static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
 	struct ufs_hba *hba = dev_get_drvdata(dev);
-	unsigned long flags;
 	u32 value;
 
 	if (kstrtou32(buf, 0, &value))
@@ -2072,18 +2055,18 @@  static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
 
 	value = !!value;
 
-	spin_lock_irqsave(hba->host->host_lock, flags);
-	if (value == hba->clk_gating.is_enabled)
-		goto out;
+	scoped_guard(spinlock_irqsave, &hba->clk_gating.lock) {
+		if (value == hba->clk_gating.is_enabled)
+			goto out;
 
-	if (value)
-		__ufshcd_release(hba);
-	else
-		hba->clk_gating.active_reqs++;
+		if (value)
+			__ufshcd_release(hba);
+		else
+			hba->clk_gating.active_reqs++;
 
-	hba->clk_gating.is_enabled = value;
+		hba->clk_gating.is_enabled = value;
+	}
 out:
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
 	return count;
 }
 
@@ -2125,6 +2108,8 @@  static void ufshcd_init_clk_gating(struct ufs_hba *hba)
 	INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
 	INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
 
+	spin_lock_init(&hba->clk_gating.lock);
+
 	hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(
 		"ufs_clk_gating_%d", WQ_MEM_RECLAIM | WQ_HIGHPRI,
 		hba->host->host_no);
@@ -9122,7 +9107,6 @@  static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
 	int ret = 0;
 	struct ufs_clk_info *clki;
 	struct list_head *head = &hba->clk_list_head;
-	unsigned long flags;
 	ktime_t start = ktime_get();
 	bool clk_state_changed = false;
 
@@ -9173,11 +9157,10 @@  static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
 				clk_disable_unprepare(clki->clk);
 		}
 	} else if (!ret && on) {
-		spin_lock_irqsave(hba->host->host_lock, flags);
-		hba->clk_gating.state = CLKS_ON;
+		scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
+			hba->clk_gating.state = CLKS_ON;
 		trace_ufshcd_clk_gating(dev_name(hba->dev),
 					hba->clk_gating.state);
-		spin_unlock_irqrestore(hba->host->host_lock, flags);
 	}
 
 	if (clk_state_changed)
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 9ea2a7411bb5..3d42532efbd1 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -402,6 +402,8 @@  enum clk_gating_state {
  * delay_ms
  * @ungate_work: worker to turn on clocks that will be used in case of
  * interrupt context
+ * @clk_gating_workq: workqueue for clock gating work.
+ * @lock: serialize access to some struct ufs_clk_gating members
  * @state: the current clocks state
  * @delay_ms: gating delay in ms
  * @is_suspended: clk gating is suspended when set to 1 which can be used
@@ -412,11 +414,14 @@  enum clk_gating_state {
  * @is_initialized: Indicates whether clock gating is initialized or not
  * @active_reqs: number of requests that are pending and should be waited for
  * completion before gating clocks.
- * @clk_gating_workq: workqueue for clock gating work.
  */
 struct ufs_clk_gating {
 	struct delayed_work gate_work;
 	struct work_struct ungate_work;
+	struct workqueue_struct *clk_gating_workq;
+
+	spinlock_t lock;
+
 	enum clk_gating_state state;
 	unsigned long delay_ms;
 	bool is_suspended;
@@ -425,7 +430,6 @@  struct ufs_clk_gating {
 	bool is_enabled;
 	bool is_initialized;
 	int active_reqs;
-	struct workqueue_struct *clk_gating_workq;
 };
 
 /**