@@ -607,22 +607,12 @@ tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
#endif
/* The TLS context is valid until sk_destruct is called */
-#define RESYNC_REQ (1 << 0)
-#define RESYNC_REQ_FORCE (1 << 1)
static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
- atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
-}
-
-static inline void tls_offload_rx_force_resync_request(struct sock *sk)
-{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
-
- atomic64_set(&rx_ctx->resync_req, RESYNC_REQ | RESYNC_REQ_FORCE);
+ atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1);
}
static inline void
@@ -694,11 +694,10 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx;
- bool is_req_pending, is_force_resync;
u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
+ u32 sock_data, is_req_pending;
struct tls_prot_info *prot;
s64 resync_req;
- u32 sock_data;
u32 req_seq;
if (tls_ctx->rx_conf != TLS_HW)
@@ -713,11 +712,9 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
resync_req = atomic64_read(&rx_ctx->resync_req);
req_seq = resync_req >> 32;
seq += TLS_HEADER_SIZE - 1;
- is_req_pending = resync_req & RESYNC_REQ;
- is_force_resync = resync_req & RESYNC_REQ_FORCE;
+ is_req_pending = resync_req;
- if (likely(!is_req_pending) ||
- (!is_force_resync && req_seq != seq) ||
+ if (likely(!is_req_pending) || req_seq != seq ||
!atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
return;
break;