summaryrefslogtreecommitdiff
path: root/drivers/connector/connector.c
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2011-03-28 08:39:36 +0000
committerDavid S. Miller <davem@davemloft.net>2011-03-30 17:14:33 -0700
commit04f482faf50535229a5a5c8d629cf963899f857c (patch)
tree698d5c8e22e68f9a621c03972556e3a73c525465 /drivers/connector/connector.c
parente2666f84958adb3a034b98e99699b55705117e01 (diff)
downloadlwn-04f482faf50535229a5a5c8d629cf963899f857c.tar.gz
lwn-04f482faf50535229a5a5c8d629cf963899f857c.zip
connector: convert to synchronous netlink message processing
Commits 01a16b21 (netlink: kill eff_cap from struct netlink_skb_parms) and c53fa1ed (netlink: kill loginuid/sessionid/sid members from struct netlink_skb_parms) removed some members from struct netlink_skb_parms that depend on the current context, all netlink users are now required to do synchronous message processing. connector however queues received messages and processes them in a work queue, which is not valid anymore. This patch converts connector to do synchronous message processing by invoking the registered callback handler directly from the netlink receive function. In order to avoid invoking the callback with connector locks held, a reference count is added to struct cn_callback_entry, the reference is taken when finding a matching callback entry on the device's queue_list and released after the callback handler has been invoked. Signed-off-by: Patrick McHardy <kaber@trash.net> Acked-by: Evgeniy Polyakov <zbr@ioremap.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/connector/connector.c')
-rw-r--r--drivers/connector/connector.c47
1 files changed, 12 insertions, 35 deletions
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index f7554de3be5e..d77005849af8 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -122,51 +122,28 @@ EXPORT_SYMBOL_GPL(cn_netlink_send);
*/
static int cn_call_callback(struct sk_buff *skb)
{
- struct cn_callback_entry *__cbq, *__new_cbq;
+ struct cn_callback_entry *i, *cbq = NULL;
struct cn_dev *dev = &cdev;
struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(skb));
+ struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
int err = -ENODEV;
spin_lock_bh(&dev->cbdev->queue_lock);
- list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
- if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
- if (likely(!work_pending(&__cbq->work) &&
- __cbq->data.skb == NULL)) {
- __cbq->data.skb = skb;
-
- if (queue_work(dev->cbdev->cn_queue,
- &__cbq->work))
- err = 0;
- else
- err = -EINVAL;
- } else {
- struct cn_callback_data *d;
-
- err = -ENOMEM;
- __new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC);
- if (__new_cbq) {
- d = &__new_cbq->data;
- d->skb = skb;
- d->callback = __cbq->data.callback;
- d->free = __new_cbq;
-
- INIT_WORK(&__new_cbq->work,
- &cn_queue_wrapper);
-
- if (queue_work(dev->cbdev->cn_queue,
- &__new_cbq->work))
- err = 0;
- else {
- kfree(__new_cbq);
- err = -EINVAL;
- }
- }
- }
+ list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) {
+ if (cn_cb_equal(&i->id.id, &msg->id)) {
+ atomic_inc(&i->refcnt);
+ cbq = i;
break;
}
}
spin_unlock_bh(&dev->cbdev->queue_lock);
+ if (cbq != NULL) {
+ cbq->callback(msg, nsp);
+ kfree_skb(skb);
+ cn_queue_release_callback(cbq);
+ }
+
return err;
}