@@ -461,7 +461,7 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
__skb_queue_tail(&dev->done, skb);
if (dev->done.qlen == 1)
- tasklet_schedule(&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
spin_unlock(&dev->done.lock);
spin_unlock_irqrestore(&list->lock, flags);
return old_state;
@@ -549,7 +549,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
default:
netif_dbg(dev, rx_err, dev->net,
"rx submit, %d\n", retval);
- tasklet_schedule (&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
break;
case 0:
__usbnet_queue_skb(&dev->rxq, skb, rx_start);
@@ -709,7 +709,7 @@ void usbnet_resume_rx(struct usbnet *dev)
num++;
}
- tasklet_schedule(&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
netif_dbg(dev, rx_status, dev->net,
"paused rx queue disabled, %d skbs requeued\n", num);
@@ -778,7 +778,7 @@ void usbnet_unlink_rx_urbs(struct usbnet *dev)
{
if (netif_running(dev->net)) {
(void) unlink_urbs (dev, &dev->rxq);
- tasklet_schedule(&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
}
}
EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
@@ -861,14 +861,14 @@ int usbnet_stop (struct net_device *net)
/* deferred work (timer, softirq, task) must also stop */
dev->flags = 0;
timer_delete_sync(&dev->delay);
- tasklet_kill(&dev->bh);
+ disable_work_sync(&dev->bh_work);
cancel_work_sync(&dev->kevent);
/* We have cyclic dependencies. Those calls are needed
* to break a cycle. We cannot fall into the gaps because
* we have a flag
*/
- tasklet_kill(&dev->bh);
+ disable_work_sync(&dev->bh_work);
timer_delete_sync(&dev->delay);
cancel_work_sync(&dev->kevent);
@@ -955,7 +955,7 @@ int usbnet_open (struct net_device *net)
clear_bit(EVENT_RX_KILL, &dev->flags);
// delay posting reads until we're fully open
- tasklet_schedule (&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
if (info->manage_power) {
retval = info->manage_power(dev, 1);
if (retval < 0) {
@@ -1123,7 +1123,7 @@ static void __handle_link_change(struct usbnet *dev)
*/
} else {
/* submitting URBs for reading packets */
- tasklet_schedule(&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
}
/* hard_mtu or rx_urb_size may change during link change */
@@ -1198,11 +1198,11 @@ usbnet_deferred_kevent (struct work_struct *work)
} else {
clear_bit (EVENT_RX_HALT, &dev->flags);
if (!usbnet_going_away(dev))
- tasklet_schedule(&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
}
}
- /* tasklet could resubmit itself forever if memory is tight */
+ /* workqueue could resubmit itself forever if memory is tight */
if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
struct urb *urb = NULL;
int resched = 1;
@@ -1224,7 +1224,7 @@ usbnet_deferred_kevent (struct work_struct *work)
fail_lowmem:
if (resched)
if (!usbnet_going_away(dev))
- tasklet_schedule(&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
}
}
@@ -1325,7 +1325,7 @@ void usbnet_tx_timeout (struct net_device *net, unsigned int txqueue)
struct usbnet *dev = netdev_priv(net);
unlink_urbs (dev, &dev->txq);
- tasklet_schedule (&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
/* this needs to be handled individually because the generic layer
* doesn't know what is sufficient and could not restore private
* information if a remedy of an unconditional reset were used.
@@ -1547,7 +1547,7 @@ static inline void usb_free_skb(struct sk_buff *skb)
/*-------------------------------------------------------------------------*/
-// tasklet (work deferred from completions, in_irq) or timer
+// workqueue (work deferred from completions, in_irq) or timer
static void usbnet_bh (struct timer_list *t)
{
@@ -1594,23 +1594,23 @@ static void usbnet_bh (struct timer_list *t)
int temp = dev->rxq.qlen;
if (temp < RX_QLEN(dev)) {
- if (rx_alloc_submit(dev, GFP_ATOMIC) == -ENOLINK)
+ if (rx_alloc_submit(dev, GFP_NOIO) == -ENOLINK)
return;
if (temp != dev->rxq.qlen)
netif_dbg(dev, link, dev->net,
"rxqlen %d --> %d\n",
temp, dev->rxq.qlen);
if (dev->rxq.qlen < RX_QLEN(dev))
- tasklet_schedule (&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
}
if (dev->txq.qlen < TX_QLEN (dev))
netif_wake_queue (dev->net);
}
}
-static void usbnet_bh_tasklet(struct tasklet_struct *t)
+static void usbnet_bh_workqueue(struct work_struct *work)
{
- struct usbnet *dev = from_tasklet(dev, t, bh);
+ struct usbnet *dev = from_work(dev, work, bh_work);
usbnet_bh(&dev->delay);
}
@@ -1742,7 +1742,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
skb_queue_head_init (&dev->txq);
skb_queue_head_init (&dev->done);
skb_queue_head_init(&dev->rxq_pause);
- tasklet_setup(&dev->bh, usbnet_bh_tasklet);
+ INIT_WORK(&dev->bh_work, usbnet_bh_workqueue);
INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
init_usb_anchor(&dev->deferred);
timer_setup(&dev->delay, usbnet_bh, 0);
@@ -1971,7 +1971,7 @@ int usbnet_resume (struct usb_interface *intf)
if (!(dev->txq.qlen >= TX_QLEN(dev)))
netif_tx_wake_all_queues(dev->net);
- tasklet_schedule (&dev->bh);
+ queue_work(system_bh_wq, &dev->bh_work);
}
}
@@ -58,7 +58,7 @@ struct usbnet {
unsigned interrupt_count;
struct mutex interrupt_mutex;
struct usb_anchor deferred;
- struct tasklet_struct bh;
+ struct work_struct bh_work;
struct work_struct kevent;
unsigned long flags;