@@ -19,7 +19,6 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/gpio/consumer.h>
-#include <linux/seqlock.h>
#include <linux/idr.h>
#include <linux/netdevice.h>
#include <linux/linkmode.h>
@@ -34,7 +33,6 @@ struct fixed_mdio_bus {
struct fixed_phy {
int addr;
struct phy_device *phydev;
- seqcount_t seqcount;
struct fixed_phy_status status;
bool no_carrier;
int (*link_update)(struct net_device *, struct fixed_phy_status *);
@@ -80,19 +78,17 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
list_for_each_entry(fp, &fmb->phys, node) {
if (fp->addr == phy_addr) {
struct fixed_phy_status state;
- int s;
-
- do {
- s = read_seqcount_begin(&fp->seqcount);
- fp->status.link = !fp->no_carrier;
- /* Issue callback if user registered it. */
- if (fp->link_update)
- fp->link_update(fp->phydev->attached_dev,
- &fp->status);
- /* Check the GPIO for change in status */
- fixed_phy_update(fp);
- state = fp->status;
- } while (read_seqcount_retry(&fp->seqcount, s));
+
+ fp->status.link = !fp->no_carrier;
+
+ /* Issue callback if user registered it. */
+ if (fp->link_update)
+ fp->link_update(fp->phydev->attached_dev,
+ &fp->status);
+
+ /* Check the GPIO for change in status */
+ fixed_phy_update(fp);
+ state = fp->status;
return swphy_read_reg(reg_num, &state);
}
@@ -150,8 +146,6 @@ static int fixed_phy_add_gpiod(unsigned int irq, int phy_addr,
if (!fp)
return -ENOMEM;
- seqcount_init(&fp->seqcount);
-
if (irq != PHY_POLL)
fmb->mii_bus->irq[phy_addr] = irq;
@@ -299,18 +299,4 @@ do { \
__ret; \
})
-#define __swait_event_lock_irq(wq, condition, lock, cmd) \
- ___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
- raw_spin_unlock_irq(&lock); \
- cmd; \
- schedule(); \
- raw_spin_lock_irq(&lock))
-
-#define swait_event_lock_irq(wq_head, condition, lock) \
- do { \
- if (condition) \
- break; \
- __swait_event_lock_irq(wq_head, condition, lock, ); \
- } while (0)
-
#endif /* _LINUX_SWAIT_H */
@@ -1616,15 +1616,15 @@ void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
}
EXPORT_SYMBOL(_down_write_nest_lock);
-#ifndef CONFIG_PREEMPT_RT
void down_read_non_owner(struct rw_semaphore *sem)
{
might_sleep();
__down_read(sem);
+#ifndef CONFIG_PREEMPT_RT
__rwsem_set_reader_owned(sem, NULL);
+#endif
}
EXPORT_SYMBOL(down_read_non_owner);
-#endif
void down_write_nested(struct rw_semaphore *sem, int subclass)
{
@@ -1649,13 +1649,13 @@ int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
}
EXPORT_SYMBOL(down_write_killable_nested);
-#ifndef CONFIG_PREEMPT_RT
void up_read_non_owner(struct rw_semaphore *sem)
{
+#ifndef CONFIG_PREEMPT_RT
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
+#endif
__up_read(sem);
}
EXPORT_SYMBOL(up_read_non_owner);
-#endif
#endif
@@ -494,8 +494,8 @@ static void sigqueue_free_current(struct sigqueue *q)
up = q->user;
if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) {
- atomic_dec(&up->sigpending);
- free_uid(up);
+ if (atomic_dec_and_test(&up->sigpending))
+ free_uid(up);
} else
__sigqueue_free(q);
}
@@ -50,7 +50,6 @@
#include <linux/uaccess.h>
#include <linux/sched/isolation.h>
#include <linux/nmi.h>
-#include <linux/swait.h>
#include "workqueue_internal.h"
@@ -302,7 +301,8 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
static DEFINE_RAW_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
-static DECLARE_SWAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
+/* wait for manager to go away */
+static struct rcuwait manager_wait = __RCUWAIT_INITIALIZER(manager_wait);
static LIST_HEAD(workqueues); /* PR: list of all workqueues */
static bool workqueue_freezing; /* PL: have wqs started freezing? */
@@ -1615,11 +1615,9 @@ EXPORT_SYMBOL_GPL(queue_work_node);
void delayed_work_timer_fn(struct timer_list *t)
{
struct delayed_work *dwork = from_timer(dwork, t, timer);
- unsigned long flags;
- local_irq_save(flags);
+ /* should have been called from irqsafe timer with irq already off */
__queue_work(dwork->cpu, dwork->wq, &dwork->work);
- local_irq_restore(flags);
}
EXPORT_SYMBOL(delayed_work_timer_fn);
@@ -2147,7 +2145,7 @@ static bool manage_workers(struct worker *worker)
pool->manager = NULL;
pool->flags &= ~POOL_MANAGER_ACTIVE;
- swake_up_one(&wq_manager_wait);
+ rcuwait_wake_up(&manager_wait);
return true;
}
@@ -3511,6 +3509,18 @@ static void rcu_free_pool(struct rcu_head *rcu)
kfree(pool);
}
+/* This returns with the lock held on success (pool manager is inactive). */
+static bool wq_manager_inactive(struct worker_pool *pool)
+{
+ raw_spin_lock_irq(&pool->lock);
+
+ if (pool->flags & POOL_MANAGER_ACTIVE) {
+ raw_spin_unlock_irq(&pool->lock);
+ return false;
+ }
+ return true;
+}
+
/**
* put_unbound_pool - put a worker_pool
* @pool: worker_pool to put
@@ -3546,10 +3556,10 @@ static void put_unbound_pool(struct worker_pool *pool)
* Become the manager and destroy all workers. This prevents
* @pool's workers from blocking on attach_mutex. We're the last
* manager and @pool gets freed with the flag set.
+ * Because of how wq_manager_inactive() works, we will hold the
+ * spinlock after a successful wait.
*/
- raw_spin_lock_irq(&pool->lock);
- swait_event_lock_irq(wq_manager_wait,
- !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
+ rcuwait_wait_event(&manager_wait, wq_manager_inactive(pool));
pool->flags |= POOL_MANAGER_ACTIVE;
while ((worker = first_idle_worker(pool)))
@@ -1 +1 @@
--rt32
+-rt33
@@ -64,15 +64,13 @@ static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb)
static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
{
struct sock *sk = d->owner, *parent;
- unsigned long flags;
if (!sk)
return;
BT_DBG("dlc %p state %ld err %d", d, d->state, err);
- local_irq_save(flags);
- bh_lock_sock(sk);
+ spin_lock_bh(&sk->sk_lock.slock);
if (err)
sk->sk_err = err;
@@ -93,8 +91,7 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
sk->sk_state_change(sk);
}
- bh_unlock_sock(sk);
- local_irq_restore(flags);
+ spin_unlock_bh(&sk->sk_lock.slock);
if (parent && sock_flag(sk, SOCK_ZAPPED)) {
/* We have to drop DLC lock here, otherwise