@@ -93,8 +93,10 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
struct inet_timewait_death_row *dr,
const int state);
-void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
- struct inet_hashinfo *hashinfo);
+void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw,
+ struct sock *sk,
+ struct inet_hashinfo *hashinfo,
+ int timeo);
void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
bool rearm);
@@ -58,11 +58,10 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
* we complete the initialization.
*/
local_bh_disable();
- inet_twsk_schedule(tw, timeo);
- /* Linkage updates.
- * Note that access to tw after this point is illegal.
- */
- inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
+ /* Linkage updates. */
+ inet_twsk_hashdance_schedule(tw, sk, &dccp_hashinfo, timeo);
+ /* Access to tw after this point is illegal. */
+ inet_twsk_put(tw);
local_bh_enable();
} else {
/* Sorry, if we're out of memory, just CLOSE this
@@ -44,14 +44,14 @@ void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
__sock_put((struct sock *)tw);
}
-/* Must be called with locally disabled BHs. */
-static void inet_twsk_kill(struct inet_timewait_sock *tw)
+static void __inet_twsk_kill(struct inet_timewait_sock *tw, spinlock_t *lock)
+__releases(lock)
{
struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
- spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
struct inet_bind_hashbucket *bhead, *bhead2;
- spin_lock(lock);
+ lockdep_assert_held(lock);
+
sk_nulls_del_node_init_rcu((struct sock *)tw);
spin_unlock(lock);
@@ -71,6 +71,16 @@ static void inet_twsk_kill(struct inet_timewait_sock *tw)
inet_twsk_put(tw);
}
+/* Must be called with locally disabled BHs. */
+static void inet_twsk_kill(struct inet_timewait_sock *tw)
+{
+ struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
+ spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
+
+ spin_lock(lock);
+ __inet_twsk_kill(tw, lock);
+}
+
void inet_twsk_free(struct inet_timewait_sock *tw)
{
struct module *owner = tw->tw_prot->owner;
@@ -97,8 +107,10 @@ static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
* Essentially we whip up a timewait bucket, copy the relevant info into it
* from the SK, and mess with hash chains and list linkage.
*/
-void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
- struct inet_hashinfo *hashinfo)
+void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw,
+ struct sock *sk,
+ struct inet_hashinfo *hashinfo,
+ int timeo)
{
const struct inet_sock *inet = inet_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -129,26 +141,33 @@ void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
spin_lock(lock);
+ /* Step 2: Hash TW into tcp ehash chain */
inet_twsk_add_node_rcu(tw, &ehead->chain);
/* Step 3: Remove SK from hash chain */
if (__sk_nulls_del_node_init_rcu(sk))
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
- spin_unlock(lock);
- /* tw_refcnt is set to 3 because we have :
+ /* Ensure above writes are committed into memory before updating the
+ * refcount.
+ * Provides ordering vs later refcount_inc().
+ */
+ smp_wmb();
+ /* tw_refcnt is set to 4 because we have :
* - one reference for bhash chain.
* - one reference for ehash chain.
* - one reference for timer.
- * We can use atomic_set() because prior spin_lock()/spin_unlock()
- * committed into memory all tw fields.
- * Also note that after this point, we lost our implicit reference
- * so we are not allowed to use tw anymore.
+ * - one reference for ourself (our caller will release it).
*/
- refcount_set(&tw->tw_refcnt, 3);
+ refcount_set(&tw->tw_refcnt, 4);
+
+ inet_twsk_schedule(tw, timeo);
+
+ spin_unlock(lock);
+
}
-EXPORT_SYMBOL_GPL(inet_twsk_hashdance);
+EXPORT_SYMBOL_GPL(inet_twsk_hashdance_schedule);
static void tw_expiry_workfn(struct work_struct *work)
{
@@ -219,8 +238,16 @@ EXPORT_SYMBOL_GPL(inet_twsk_alloc);
*/
void inet_twsk_deschedule_put(struct inet_timewait_sock *tw)
{
- if (cancel_delayed_work_sync(&tw->tw_expiry_work))
- inet_twsk_kill(tw);
+ struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
+ spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
+
+ spin_lock(lock);
+ if (cancel_delayed_work_sync(&tw->tw_expiry_work)) {
+ /* releases @lock */
+ __inet_twsk_kill(tw, lock);
+ } else {
+ spin_unlock(lock);
+ }
inet_twsk_put(tw);
}
EXPORT_SYMBOL(inet_twsk_deschedule_put);
@@ -343,11 +343,10 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
* we complete the initialization.
*/
local_bh_disable();
- inet_twsk_schedule(tw, timeo);
- /* Linkage updates.
- * Note that access to tw after this point is illegal.
- */
- inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo);
+ /* Linkage updates. */
+ inet_twsk_hashdance_schedule(tw, sk, net->ipv4.tcp_death_row.hashinfo, timeo);
+ /* Access to tw after this point is illegal. */
+ inet_twsk_put(tw);
local_bh_enable();
} else {
/* Sorry, if we're out of memory, just CLOSE this
Context ======= The TCP timewait timer is proving to be problematic for setups where scheduler CPU isolation is achieved at runtime via cpusets (as opposed to statically via isolcpus=domains). What happens there is a CPU goes through tcp_time_wait(), arming the time_wait timer, then gets isolated. TCP_TIMEWAIT_LEN later, the timer fires, causing interference for the now-isolated CPU. This is conceptually similar to the issue described in e02b93124855 ("workqueue: Unbind kworkers before sending them to exit()") Changes ======= Move inet_twsk_schedule() to within inet_twsk_hashdance(), with the ehash lock held. Expand the lock's critical section from inet_twsk_kill() to inet_twsk_deschedule_put(), serializing the scheduling vs descheduling of the timer. IOW, this prevents the following race: tcp_time_wait() inet_twsk_hashdance() inet_twsk_deschedule_put() del_timer_sync() inet_twsk_schedule() Lastly, to prevent the ehash lock from being taken both in timer context and in timer deletion context (which leads to deadlocks), move the inet_twsk_kill() from timer expiry context to workqueue context by changing the tw_timer to a delayed_work. Thanks to Paolo for suggesting to leverage the ehash lock. Note on partial reverts ======================= This partially reverts ec94c2696f0b ("tcp/dccp: avoid one atomic operation for timewait hashdance") This also reinstores a comment from ec94c2696f0b ("tcp/dccp: avoid one atomic operation for timewait hashdance") as inet_twsk_hashdance() had a "Step 1" and "Step 3" comment, but the "Step 2" had gone missing. Link: https://lore.kernel.org/all/ZPhpfMjSiHVjQkTk@localhost.localdomain/ Signed-off-by: Valentin Schneider <vschneid@redhat.com> --- include/net/inet_timewait_sock.h | 6 ++-- net/dccp/minisocks.c | 9 +++-- net/ipv4/inet_timewait_sock.c | 59 +++++++++++++++++++++++--------- net/ipv4/tcp_minisocks.c | 9 +++-- 4 files changed, 55 insertions(+), 28 deletions(-)