summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-11-22 14:57:56 +0000
committerDavid Howells <dhowells@redhat.com>2006-11-22 14:57:56 +0000
commitc4028958b6ecad064b1a6303a6a5906d4fe48d73 (patch)
tree1c4c89652c62a75da09f9b9442012007e4ac6250 /net
parent65f27f38446e1976cc98fd3004b110fedcddd189 (diff)
downloadlwn-c4028958b6ecad064b1a6303a6a5906d4fe48d73.tar.gz
lwn-c4028958b6ecad064b1a6303a6a5906d4fe48d73.zip
WorkStruct: make allyesconfig
Fix up for make allyesconfig. Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'net')
-rw-r--r--net/atm/lec.c9
-rw-r--r--net/atm/lec.h2
-rw-r--r--net/bluetooth/hci_sysfs.c12
-rw-r--r--net/bridge/br_if.c10
-rw-r--r--net/bridge/br_private.h2
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/dccp/minisocks.c3
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_assoc.c18
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_auth.c23
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_event.c12
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_module.c4
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_priv.h13
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_scan.c13
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_wx.c6
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c6
-rw-r--r--net/irda/ircomm/ircomm_tty.c11
-rw-r--r--net/sctp/associola.c11
-rw-r--r--net/sctp/endpointola.c10
-rw-r--r--net/sctp/inqueue.c9
-rw-r--r--net/xfrm/xfrm_policy.c8
-rw-r--r--net/xfrm/xfrm_state.c8
21 files changed, 107 insertions, 87 deletions
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 66c57c1091a8..e801fff69dc0 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1458,7 +1458,7 @@ static void lane2_associate_ind(struct net_device *dev, u8 *mac_addr,
#define LEC_ARP_REFRESH_INTERVAL (3*HZ)
-static void lec_arp_check_expire(void *data);
+static void lec_arp_check_expire(struct work_struct *work);
static void lec_arp_expire_arp(unsigned long data);
/*
@@ -1481,7 +1481,7 @@ static void lec_arp_init(struct lec_priv *priv)
INIT_HLIST_HEAD(&priv->lec_no_forward);
INIT_HLIST_HEAD(&priv->mcast_fwds);
spin_lock_init(&priv->lec_arp_lock);
- INIT_WORK(&priv->lec_arp_work, lec_arp_check_expire, priv);
+ INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire);
schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL);
}
@@ -1879,10 +1879,11 @@ static void lec_arp_expire_vcc(unsigned long data)
* to ESI_FORWARD_DIRECT. This causes the flush period to end
* regardless of the progress of the flush protocol.
*/
-static void lec_arp_check_expire(void *data)
+static void lec_arp_check_expire(struct work_struct *work)
{
unsigned long flags;
- struct lec_priv *priv = data;
+ struct lec_priv *priv =
+ container_of(work, struct lec_priv, lec_arp_work.work);
struct hlist_node *node, *next;
struct lec_arp_table *entry;
unsigned long now;
diff --git a/net/atm/lec.h b/net/atm/lec.h
index 877f50939696..984e8e6e083a 100644
--- a/net/atm/lec.h
+++ b/net/atm/lec.h
@@ -92,7 +92,7 @@ struct lec_priv {
spinlock_t lec_arp_lock;
struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */
struct atm_vcc *lecd;
- struct work_struct lec_arp_work; /* C10 */
+ struct delayed_work lec_arp_work; /* C10 */
unsigned int maximum_unknown_frame_count;
/*
* Within the period of time defined by this variable, the client will send
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 3eeeb7a86e75..d4c935692ccf 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -237,9 +237,9 @@ static void bt_release(struct device *dev)
kfree(data);
}
-static void add_conn(void *data)
+static void add_conn(struct work_struct *work)
{
- struct hci_conn *conn = data;
+ struct hci_conn *conn = container_of(work, struct hci_conn, work);
int i;
if (device_register(&conn->dev) < 0) {
@@ -272,14 +272,14 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
dev_set_drvdata(&conn->dev, conn);
- INIT_WORK(&conn->work, add_conn, (void *) conn);
+ INIT_WORK(&conn->work, add_conn);
schedule_work(&conn->work);
}
-static void del_conn(void *data)
+static void del_conn(struct work_struct *work)
{
- struct hci_conn *conn = data;
+ struct hci_conn *conn = container_of(work, struct hci_conn, work);
device_del(&conn->dev);
}
@@ -287,7 +287,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
{
BT_DBG("conn %p", conn);
- INIT_WORK(&conn->work, del_conn, (void *) conn);
+ INIT_WORK(&conn->work, del_conn);
schedule_work(&conn->work);
}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index f753c40c11d2..55bb2634c088 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -77,12 +77,16 @@ static int port_cost(struct net_device *dev)
* Called from work queue to allow for calling functions that
* might sleep (such as speed check), and to debounce.
*/
-static void port_carrier_check(void *arg)
+static void port_carrier_check(struct work_struct *work)
{
- struct net_device *dev = arg;
struct net_bridge_port *p;
+ struct net_device *dev;
struct net_bridge *br;
+ dev = container_of(work, struct net_bridge_port,
+ carrier_check.work)->dev;
+ work_release(work);
+
rtnl_lock();
p = dev->br_port;
if (!p)
@@ -276,7 +280,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
p->port_no = index;
br_init_port(p);
p->state = BR_STATE_DISABLED;
- INIT_WORK(&p->carrier_check, port_carrier_check, dev);
+ INIT_DELAYED_WORK_NAR(&p->carrier_check, port_carrier_check);
br_stp_port_timer_init(p);
kobject_init(&p->kobj);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 74258d86f256..3a534e94c7f3 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -82,7 +82,7 @@ struct net_bridge_port
struct timer_list hold_timer;
struct timer_list message_age_timer;
struct kobject kobj;
- struct work_struct carrier_check;
+ struct delayed_work carrier_check;
struct rcu_head rcu;
};
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 6589adb14cbf..63f24c914ddb 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -56,7 +56,7 @@ static atomic_t trapped;
static void zap_completion_queue(void);
static void arp_reply(struct sk_buff *skb);
-static void queue_process(void *p)
+static void queue_process(struct work_struct *work)
{
unsigned long flags;
struct sk_buff *skb;
@@ -77,7 +77,7 @@ static void queue_process(void *p)
}
}
-static DECLARE_WORK(send_queue, queue_process, NULL);
+static DECLARE_WORK(send_queue, queue_process);
void netpoll_queue(struct sk_buff *skb)
{
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 9045438d6b36..36db5be2a9e9 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -31,8 +31,7 @@ struct inet_timewait_death_row dccp_death_row = {
.tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
(unsigned long)&dccp_death_row),
.twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work,
- inet_twdr_twkill_work,
- &dccp_death_row),
+ inet_twdr_twkill_work),
/* Short-time timewait calendar */
.twcal_hand = -1,
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c
index cf51c87a971d..08386c102954 100644
--- a/net/ieee80211/softmac/ieee80211softmac_assoc.c
+++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c
@@ -58,9 +58,11 @@ ieee80211softmac_assoc(struct ieee80211softmac_device *mac, struct ieee80211soft
}
void
-ieee80211softmac_assoc_timeout(void *d)
+ieee80211softmac_assoc_timeout(struct work_struct *work)
{
- struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d;
+ struct ieee80211softmac_device *mac =
+ container_of(work, struct ieee80211softmac_device,
+ associnfo.timeout.work);
struct ieee80211softmac_network *n;
mutex_lock(&mac->associnfo.mutex);
@@ -186,9 +188,11 @@ ieee80211softmac_assoc_notify_auth(struct net_device *dev, int event_type, void
/* This function is called to handle userspace requests (asynchronously) */
void
-ieee80211softmac_assoc_work(void *d)
+ieee80211softmac_assoc_work(struct work_struct *work)
{
- struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d;
+ struct ieee80211softmac_device *mac =
+ container_of(work, struct ieee80211softmac_device,
+ associnfo.work.work);
struct ieee80211softmac_network *found = NULL;
struct ieee80211_network *net = NULL, *best = NULL;
int bssvalid;
@@ -412,7 +416,7 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev,
network->authenticated = 0;
/* we don't want to do this more than once ... */
network->auth_desynced_once = 1;
- schedule_work(&mac->associnfo.work);
+ schedule_delayed_work(&mac->associnfo.work, 0);
break;
}
default:
@@ -446,7 +450,7 @@ ieee80211softmac_handle_disassoc(struct net_device * dev,
ieee80211softmac_disassoc(mac);
/* try to reassociate */
- schedule_work(&mac->associnfo.work);
+ schedule_delayed_work(&mac->associnfo.work, 0);
return 0;
}
@@ -466,7 +470,7 @@ ieee80211softmac_handle_reassoc_req(struct net_device * dev,
dprintkl(KERN_INFO PFX "reassoc request from unknown network\n");
return 0;
}
- schedule_work(&mac->associnfo.work);
+ schedule_delayed_work(&mac->associnfo.work, 0);
return 0;
}
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c
index 4cef39e171d0..2ae1833b657a 100644
--- a/net/ieee80211/softmac/ieee80211softmac_auth.c
+++ b/net/ieee80211/softmac/ieee80211softmac_auth.c
@@ -26,7 +26,7 @@
#include "ieee80211softmac_priv.h"
-static void ieee80211softmac_auth_queue(void *data);
+static void ieee80211softmac_auth_queue(struct work_struct *work);
/* Queues an auth request to the desired AP */
int
@@ -54,14 +54,14 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac,
auth->mac = mac;
auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT;
auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST;
- INIT_WORK(&auth->work, &ieee80211softmac_auth_queue, (void *)auth);
+ INIT_DELAYED_WORK(&auth->work, ieee80211softmac_auth_queue);
/* Lock (for list) */
spin_lock_irqsave(&mac->lock, flags);
/* add to list */
list_add_tail(&auth->list, &mac->auth_queue);
- schedule_work(&auth->work);
+ schedule_delayed_work(&auth->work, 0);
spin_unlock_irqrestore(&mac->lock, flags);
return 0;
@@ -70,14 +70,15 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac,
/* Sends an auth request to the desired AP and handles timeouts */
static void
-ieee80211softmac_auth_queue(void *data)
+ieee80211softmac_auth_queue(struct work_struct *work)
{
struct ieee80211softmac_device *mac;
struct ieee80211softmac_auth_queue_item *auth;
struct ieee80211softmac_network *net;
unsigned long flags;
- auth = (struct ieee80211softmac_auth_queue_item *)data;
+ auth = container_of(work, struct ieee80211softmac_auth_queue_item,
+ work.work);
net = auth->net;
mac = auth->mac;
@@ -118,9 +119,11 @@ ieee80211softmac_auth_queue(void *data)
/* Sends a response to an auth challenge (for shared key auth). */
static void
-ieee80211softmac_auth_challenge_response(void *_aq)
+ieee80211softmac_auth_challenge_response(struct work_struct *work)
{
- struct ieee80211softmac_auth_queue_item *aq = _aq;
+ struct ieee80211softmac_auth_queue_item *aq =
+ container_of(work, struct ieee80211softmac_auth_queue_item,
+ work.work);
/* Send our response */
ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state);
@@ -228,8 +231,8 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth)
* we have obviously already sent the initial auth
* request. */
cancel_delayed_work(&aq->work);
- INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq);
- schedule_work(&aq->work);
+ INIT_DELAYED_WORK(&aq->work, &ieee80211softmac_auth_challenge_response);
+ schedule_delayed_work(&aq->work, 0);
spin_unlock_irqrestore(&mac->lock, flags);
return 0;
case IEEE80211SOFTMAC_AUTH_SHARED_PASS:
@@ -392,6 +395,6 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de
ieee80211softmac_deauth_from_net(mac, net);
/* let's try to re-associate */
- schedule_work(&mac->associnfo.work);
+ schedule_delayed_work(&mac->associnfo.work, 0);
return 0;
}
diff --git a/net/ieee80211/softmac/ieee80211softmac_event.c b/net/ieee80211/softmac/ieee80211softmac_event.c
index f34fa2ef666b..b9015656cfb3 100644
--- a/net/ieee80211/softmac/ieee80211softmac_event.c
+++ b/net/ieee80211/softmac/ieee80211softmac_event.c
@@ -73,10 +73,12 @@ static char *event_descriptions[IEEE80211SOFTMAC_EVENT_LAST+1] = {
static void
-ieee80211softmac_notify_callback(void *d)
+ieee80211softmac_notify_callback(struct work_struct *work)
{
- struct ieee80211softmac_event event = *(struct ieee80211softmac_event*) d;
- kfree(d);
+ struct ieee80211softmac_event *pevent =
+ container_of(work, struct ieee80211softmac_event, work.work);
+ struct ieee80211softmac_event event = *pevent;
+ kfree(pevent);
event.fun(event.mac->dev, event.event_type, event.context);
}
@@ -99,7 +101,7 @@ ieee80211softmac_notify_internal(struct ieee80211softmac_device *mac,
return -ENOMEM;
eventptr->event_type = event;
- INIT_WORK(&eventptr->work, ieee80211softmac_notify_callback, eventptr);
+ INIT_DELAYED_WORK(&eventptr->work, ieee80211softmac_notify_callback);
eventptr->fun = fun;
eventptr->context = context;
eventptr->mac = mac;
@@ -170,7 +172,7 @@ ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int eve
/* User may have subscribed to ANY event, so
* we tell them which event triggered it. */
eventptr->event_type = event;
- schedule_work(&eventptr->work);
+ schedule_delayed_work(&eventptr->work, 0);
}
}
}
diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c
index 33aff4f4a471..256207b71dc9 100644
--- a/net/ieee80211/softmac/ieee80211softmac_module.c
+++ b/net/ieee80211/softmac/ieee80211softmac_module.c
@@ -58,8 +58,8 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv)
INIT_LIST_HEAD(&softmac->events);
mutex_init(&softmac->associnfo.mutex);
- INIT_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work, softmac);
- INIT_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout, softmac);
+ INIT_DELAYED_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work);
+ INIT_DELAYED_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout);
softmac->start_scan = ieee80211softmac_start_scan_implementation;
softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation;
softmac->stop_scan = ieee80211softmac_stop_scan_implementation;
diff --git a/net/ieee80211/softmac/ieee80211softmac_priv.h b/net/ieee80211/softmac/ieee80211softmac_priv.h
index 0642e090b8a7..c0dbe070e548 100644
--- a/net/ieee80211/softmac/ieee80211softmac_priv.h
+++ b/net/ieee80211/softmac/ieee80211softmac_priv.h
@@ -78,7 +78,7 @@
/* private definitions and prototypes */
/*** prototypes from _scan.c */
-void ieee80211softmac_scan(void *sm);
+void ieee80211softmac_scan(struct work_struct *work);
/* for internal use if scanning is needed */
int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac);
void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac);
@@ -149,7 +149,7 @@ int ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *au
int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth);
/*** prototypes from _assoc.c */
-void ieee80211softmac_assoc_work(void *d);
+void ieee80211softmac_assoc_work(struct work_struct *work);
int ieee80211softmac_handle_assoc_response(struct net_device * dev,
struct ieee80211_assoc_response * resp,
struct ieee80211_network * network);
@@ -157,7 +157,7 @@ int ieee80211softmac_handle_disassoc(struct net_device * dev,
struct ieee80211_disassoc * disassoc);
int ieee80211softmac_handle_reassoc_req(struct net_device * dev,
struct ieee80211_reassoc_request * reassoc);
-void ieee80211softmac_assoc_timeout(void *d);
+void ieee80211softmac_assoc_timeout(struct work_struct *work);
void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason);
void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac);
@@ -207,7 +207,7 @@ struct ieee80211softmac_auth_queue_item {
struct ieee80211softmac_device *mac; /* SoftMAC device */
u8 retry; /* Retry limit */
u8 state; /* Auth State */
- struct work_struct work; /* Work queue */
+ struct delayed_work work; /* Work queue */
};
/* scanning information */
@@ -219,7 +219,8 @@ struct ieee80211softmac_scaninfo {
stop:1;
u8 skip_flags;
struct completion finished;
- struct work_struct softmac_scan;
+ struct delayed_work softmac_scan;
+ struct ieee80211softmac_device *mac;
};
/* private event struct */
@@ -227,7 +228,7 @@ struct ieee80211softmac_event {
struct list_head list;
int event_type;
void *event_context;
- struct work_struct work;
+ struct delayed_work work;
notify_function_ptr fun;
void *context;
struct ieee80211softmac_device *mac;
diff --git a/net/ieee80211/softmac/ieee80211softmac_scan.c b/net/ieee80211/softmac/ieee80211softmac_scan.c
index d31cf77498c4..a8326076581a 100644
--- a/net/ieee80211/softmac/ieee80211softmac_scan.c
+++ b/net/ieee80211/softmac/ieee80211softmac_scan.c
@@ -91,12 +91,14 @@ ieee80211softmac_wait_for_scan(struct ieee80211softmac_device *sm)
/* internal scanning implementation follows */
-void ieee80211softmac_scan(void *d)
+void ieee80211softmac_scan(struct work_struct *work)
{
int invalid_channel;
u8 current_channel_idx;
- struct ieee80211softmac_device *sm = (struct ieee80211softmac_device *)d;
- struct ieee80211softmac_scaninfo *si = sm->scaninfo;
+ struct ieee80211softmac_scaninfo *si =
+ container_of(work, struct ieee80211softmac_scaninfo,
+ softmac_scan.work);
+ struct ieee80211softmac_device *sm = si->mac;
unsigned long flags;
while (!(si->stop) && (si->current_channel_idx < si->number_channels)) {
@@ -146,7 +148,8 @@ static inline struct ieee80211softmac_scaninfo *allocate_scaninfo(struct ieee802
struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC);
if (unlikely(!info))
return NULL;
- INIT_WORK(&info->softmac_scan, ieee80211softmac_scan, mac);
+ INIT_DELAYED_WORK(&info->softmac_scan, ieee80211softmac_scan);
+ info->mac = mac;
init_completion(&info->finished);
return info;
}
@@ -189,7 +192,7 @@ int ieee80211softmac_start_scan_implementation(struct net_device *dev)
sm->scaninfo->started = 1;
sm->scaninfo->stop = 0;
INIT_COMPLETION(sm->scaninfo->finished);
- schedule_work(&sm->scaninfo->softmac_scan);
+ schedule_delayed_work(&sm->scaninfo->softmac_scan, 0);
spin_unlock_irqrestore(&sm->lock, flags);
return 0;
}
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index 23068a830f7d..2ffaebd21c53 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -122,7 +122,7 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev,
sm->associnfo.associating = 1;
/* queue lower level code to do work (if necessary) */
- schedule_work(&sm->associnfo.work);
+ schedule_delayed_work(&sm->associnfo.work, 0);
out:
mutex_unlock(&sm->associnfo.mutex);
@@ -356,7 +356,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev,
/* force reassociation */
mac->associnfo.bssvalid = 0;
if (mac->associnfo.associated)
- schedule_work(&mac->associnfo.work);
+ schedule_delayed_work(&mac->associnfo.work, 0);
} else if (is_zero_ether_addr(data->ap_addr.sa_data)) {
/* the bssid we have is no longer fixed */
mac->associnfo.bssfixed = 0;
@@ -373,7 +373,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev,
/* tell the other code that this bssid should be used no matter what */
mac->associnfo.bssfixed = 1;
/* queue associate if new bssid or (old one again and not associated) */
- schedule_work(&mac->associnfo.work);
+ schedule_delayed_work(&mac->associnfo.work, 0);
}
out:
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index f261616e4602..9b933381ebbe 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -221,10 +221,10 @@ static void update_defense_level(void)
* Timer for checking the defense
*/
#define DEFENSE_TIMER_PERIOD 1*HZ
-static void defense_work_handler(void *data);
-static DECLARE_WORK(defense_work, defense_work_handler, NULL);
+static void defense_work_handler(struct work_struct *work);
+static DECLARE_DELAYED_WORK(defense_work, defense_work_handler);
-static void defense_work_handler(void *data)
+static void defense_work_handler(struct work_struct *work)
{
update_defense_level();
if (atomic_read(&ip_vs_dropentry))
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index d50a02030ad7..262bda808d96 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -61,7 +61,7 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty);
static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch);
static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout);
static void ircomm_tty_hangup(struct tty_struct *tty);
-static void ircomm_tty_do_softint(void *private_);
+static void ircomm_tty_do_softint(struct work_struct *work);
static void ircomm_tty_shutdown(struct ircomm_tty_cb *self);
static void ircomm_tty_stop(struct tty_struct *tty);
@@ -389,7 +389,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
self->flow = FLOW_STOP;
self->line = line;
- INIT_WORK(&self->tqueue, ircomm_tty_do_softint, self);
+ INIT_WORK(&self->tqueue, ircomm_tty_do_softint);
self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED;
self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED;
self->close_delay = 5*HZ/10;
@@ -594,15 +594,16 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty)
}
/*
- * Function ircomm_tty_do_softint (private_)
+ * Function ircomm_tty_do_softint (work)
*
* We use this routine to give the write wakeup to the user at at a
* safe time (as fast as possible after write have completed). This
* can be compared to the Tx interrupt.
*/
-static void ircomm_tty_do_softint(void *private_)
+static void ircomm_tty_do_softint(struct work_struct *work)
{
- struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) private_;
+ struct ircomm_tty_cb *self =
+ container_of(work, struct ircomm_tty_cb, tqueue);
struct tty_struct *tty;
unsigned long flags;
struct sk_buff *skb, *ctrl_skb;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index ed0445fe85e7..88124696ba60 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -61,7 +61,7 @@
#include <net/sctp/sm.h>
/* Forward declarations for internal functions. */
-static void sctp_assoc_bh_rcv(struct sctp_association *asoc);
+static void sctp_assoc_bh_rcv(struct work_struct *work);
/* 1st Level Abstractions. */
@@ -269,9 +269,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
/* Create an input queue. */
sctp_inq_init(&asoc->base.inqueue);
- sctp_inq_set_th_handler(&asoc->base.inqueue,
- (void (*)(void *))sctp_assoc_bh_rcv,
- asoc);
+ sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
/* Create an output queue. */
sctp_outq_init(asoc, &asoc->outqueue);
@@ -944,8 +942,11 @@ out:
}
/* Do delayed input processing. This is scheduled by sctp_rcv(). */
-static void sctp_assoc_bh_rcv(struct sctp_association *asoc)
+static void sctp_assoc_bh_rcv(struct work_struct *work)
{
+ struct sctp_association *asoc =
+ container_of(work, struct sctp_association,
+ base.inqueue.immediate);
struct sctp_endpoint *ep;
struct sctp_chunk *chunk;
struct sock *sk;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 9b6b394b66f6..a2b553721514 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -61,7 +61,7 @@
#include <net/sctp/sm.h>
/* Forward declarations for internal helpers. */
-static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep);
+static void sctp_endpoint_bh_rcv(struct work_struct *work);
/*
* Initialize the base fields of the endpoint structure.
@@ -85,8 +85,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
sctp_inq_init(&ep->base.inqueue);
/* Set its top-half handler */
- sctp_inq_set_th_handler(&ep->base.inqueue,
- (void (*)(void *))sctp_endpoint_bh_rcv, ep);
+ sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv);
/* Initialize the bind addr area */
sctp_bind_addr_init(&ep->base.bind_addr, 0);
@@ -311,8 +310,11 @@ int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep,
/* Do delayed input processing. This is scheduled by sctp_rcv().
* This may be called on BH or task time.
*/
-static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep)
+static void sctp_endpoint_bh_rcv(struct work_struct *work)
{
+ struct sctp_endpoint *ep =
+ container_of(work, struct sctp_endpoint,
+ base.inqueue.immediate);
struct sctp_association *asoc;
struct sock *sk;
struct sctp_transport *transport;
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index cf6deed7e849..71b07466e880 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -54,7 +54,7 @@ void sctp_inq_init(struct sctp_inq *queue)
queue->in_progress = NULL;
/* Create a task for delivering data. */
- INIT_WORK(&queue->immediate, NULL, NULL);
+ INIT_WORK(&queue->immediate, NULL);
queue->malloced = 0;
}
@@ -97,7 +97,7 @@ void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
* on the BH related data structures.
*/
list_add_tail(&chunk->list, &q->in_chunk_list);
- q->immediate.func(q->immediate.data);
+ q->immediate.func(&q->immediate);
}
/* Extract a chunk from an SCTP inqueue.
@@ -205,9 +205,8 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
* The intent is that this routine will pull stuff out of the
* inqueue and process it.
*/
-void sctp_inq_set_th_handler(struct sctp_inq *q,
- void (*callback)(void *), void *arg)
+void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback)
{
- INIT_WORK(&q->immediate, callback, arg);
+ INIT_WORK(&q->immediate, callback);
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 7736b23c3f03..ba924d40df7d 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -358,7 +358,7 @@ static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
xfrm_pol_put(policy);
}
-static void xfrm_policy_gc_task(void *data)
+static void xfrm_policy_gc_task(struct work_struct *work)
{
struct xfrm_policy *policy;
struct hlist_node *entry, *tmp;
@@ -546,7 +546,7 @@ static inline int xfrm_byidx_should_resize(int total)
static DEFINE_MUTEX(hash_resize_mutex);
-static void xfrm_hash_resize(void *__unused)
+static void xfrm_hash_resize(struct work_struct *__unused)
{
int dir, total;
@@ -563,7 +563,7 @@ static void xfrm_hash_resize(void *__unused)
mutex_unlock(&hash_resize_mutex);
}
-static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL);
+static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
/* Generate new index... KAME seems to generate them ordered by cost
* of an absolute inpredictability of ordering of rules. This will not pass. */
@@ -2080,7 +2080,7 @@ static void __init xfrm_policy_init(void)
panic("XFRM: failed to allocate bydst hash\n");
}
- INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL);
+ INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task);
register_netdevice_notifier(&xfrm_dev_notifier);
}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 899de9ed22a6..40c527179843 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -115,7 +115,7 @@ static unsigned long xfrm_hash_new_size(void)
static DEFINE_MUTEX(hash_resize_mutex);
-static void xfrm_hash_resize(void *__unused)
+static void xfrm_hash_resize(struct work_struct *__unused)
{
struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
unsigned long nsize, osize;
@@ -168,7 +168,7 @@ out_unlock:
mutex_unlock(&hash_resize_mutex);
}
-static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL);
+static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize);
DECLARE_WAIT_QUEUE_HEAD(km_waitq);
EXPORT_SYMBOL(km_waitq);
@@ -207,7 +207,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
kfree(x);
}
-static void xfrm_state_gc_task(void *data)
+static void xfrm_state_gc_task(struct work_struct *data)
{
struct xfrm_state *x;
struct hlist_node *entry, *tmp;
@@ -1568,6 +1568,6 @@ void __init xfrm_state_init(void)
panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes.");
xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
- INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL);
+ INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task);
}