@@ -4486,12 +4486,20 @@ rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
if (!tail) {
- rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
+ /* Stores to cb->fn and cb->param should complete before
+ * cb is visible to data plane.
+ */
+ __atomic_store_n(
+ &rte_eth_devices[port_id].post_rx_burst_cbs[queue_id],
+ cb, __ATOMIC_RELEASE);
} else {
while (tail->next)
tail = tail->next;
- tail->next = cb;
+ /* Stores to cb->fn and cb->param should complete before
+ * cb is visible to data plane.
+ */
+ __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
}
rte_spinlock_unlock(&rte_eth_rx_cb_lock);
@@ -4576,12 +4584,20 @@ rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
if (!tail) {
- rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
+ /* Stores to cb->fn and cb->param should complete before
+ * cb is visible to data plane.
+ */
+ __atomic_store_n(
+ &rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id],
+ cb, __ATOMIC_RELEASE);
} else {
while (tail->next)
tail = tail->next;
- tail->next = cb;
+ /* Stores to cb->fn and cb->param should complete before
+ * cb is visible to data plane.
+ */
+ __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE);
}
rte_spinlock_unlock(&rte_eth_tx_cb_lock);
@@ -4612,7 +4628,7 @@ rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
cb = *prev_cb;
if (cb == user_cb) {
/* Remove the user cb from the callback list. */
- *prev_cb = cb->next;
+ __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
ret = 0;
break;
}
@@ -4646,7 +4662,7 @@ rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
cb = *prev_cb;
if (cb == user_cb) {
/* Remove the user cb from the callback list. */
- *prev_cb = cb->next;
+ __atomic_store_n(prev_cb, cb->next, __ATOMIC_RELAXED);
ret = 0;
break;
}
@@ -3734,7 +3734,8 @@ struct rte_eth_rxtx_callback;
* The callback function
* @param user_param
* A generic pointer parameter which will be passed to each invocation of the
- * callback function on this port and queue.
+ * callback function on this port and queue. Inter-thread synchronization
+ * of any user data changes is the responsibility of the user.
*
* @return
* NULL on error.
@@ -3763,7 +3764,8 @@ rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
* The callback function
* @param user_param
* A generic pointer parameter which will be passed to each invocation of the
- * callback function on this port and queue.
+ * callback function on this port and queue. Inter-thread synchronization
+ * of any user data changes is the responsibility of the user.
*
* @return
* NULL on error.
@@ -3791,7 +3793,8 @@ rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
* The callback function
* @param user_param
* A generic pointer parameter which will be passed to each invocation of the
- * callback function on this port and queue.
+ * callback function on this port and queue. Inter-thread synchronization
+ * of any user data changes is the responsibility of the user.
*
* @return
* NULL on error.
@@ -3816,7 +3819,9 @@ rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
* on that queue.
*
* - After a short delay - where the delay is sufficient to allow any
- * in-flight callbacks to complete.
+ * in-flight callbacks to complete. Alternately, the RCU mechanism can be
+ * used to detect when data plane threads have ceased referencing the
+ * callback memory.
*
* @param port_id
* The port identifier of the Ethernet device.
@@ -3849,7 +3854,9 @@ int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
* on that queue.
*
* - After a short delay - where the delay is sufficient to allow any
- * in-flight callbacks to complete.
+ * in-flight callbacks to complete. Alternately, the RCU mechanism can be
+ * used to detect when data plane threads have ceased referencing the
+ * callback memory.
*
* @param port_id
* The port identifier of the Ethernet device.
@@ -4510,10 +4517,18 @@ rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
rx_pkts, nb_pkts);
#ifdef RTE_ETHDEV_RXTX_CALLBACKS
- if (unlikely(dev->post_rx_burst_cbs[queue_id] != NULL)) {
- struct rte_eth_rxtx_callback *cb =
- dev->post_rx_burst_cbs[queue_id];
+ struct rte_eth_rxtx_callback *cb;
+ /* __ATOMIC_RELEASE memory order was used when the
+ * call back was inserted into the list.
+ * Since there is a clear dependency between loading
+ * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
+ * not required.
+ */
+ cb = __atomic_load_n(&dev->post_rx_burst_cbs[queue_id],
+ __ATOMIC_RELAXED);
+
+ if (unlikely(cb != NULL)) {
do {
nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
nb_pkts, cb->param);
@@ -4775,7 +4790,16 @@ rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
#endif
#ifdef RTE_ETHDEV_RXTX_CALLBACKS
- struct rte_eth_rxtx_callback *cb = dev->pre_tx_burst_cbs[queue_id];
+ struct rte_eth_rxtx_callback *cb;
+
+ /* __ATOMIC_RELEASE memory order was used when the
+ * call back was inserted into the list.
+ * Since there is a clear dependency between loading
+ * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order is
+ * not required.
+ */
+ cb = __atomic_load_n(&dev->pre_tx_burst_cbs[queue_id],
+ __ATOMIC_RELAXED);
if (unlikely(cb != NULL)) {
do {