// SPDX-License-Identifier: GPL-2.0
/*
* BlueZ - Bluetooth protocol stack for Linux
*
* Copyright (C) 2021 Intel Corporation
*/
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/mgmt.h>
#include "hci_request.h"
#include "smp.h"
static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
struct sk_buff *skb)
{
bt_dev_dbg(hdev, "result 0x%2.2x", result);
if (hdev->req_status != HCI_REQ_PEND)
return;
hdev->req_result = result;
hdev->req_status = HCI_REQ_DONE;
wake_up_interruptible(&hdev->req_wait_q);
}
static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode,
u32 plen, const void *param,
struct sock *sk)
{
int len = HCI_COMMAND_HDR_SIZE + plen;
struct hci_command_hdr *hdr;
struct sk_buff *skb;
skb = bt_skb_alloc(len, GFP_ATOMIC);
if (!skb)
return NULL;
hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
hdr->opcode = cpu_to_le16(opcode);
hdr->plen = plen;
if (plen)
skb_put_data(skb, param, plen);
bt_dev_dbg(hdev, "skb len %d", skb->len);
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
hci_skb_opcode(skb) = opcode;
return skb;
}
static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen,
const void *param, u8 event, struct sock *sk)
{
struct hci_dev *hdev = req->hdev;
struct sk_buff *skb;
bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
/* If an error occurred during request building, there is no point in
* queueing the HCI command. We can simply return.
*/
if (req->err)
return;
skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk);
if (!skb) {
bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
opcode);
req->err = -ENOMEM;
return;
}
if (skb_queue_empty(&req->cmd_q))
bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
bt_cb(skb)->hci.req_event = event;
skb_queue_tail(&req->cmd_q, skb);
}
static int hci_cmd_sync_run(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
struct sk_buff *skb;
unsigned long flags;
bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
/* If an error occurred during request building, remove all HCI
* commands queued on the HCI request queue.
*/
if (req->err) {
skb_queue_purge(&req->cmd_q);
return req->err;
}
/* Do not allow empty requests */
if (skb_queue_empty(&req->cmd_q))
return -ENODATA;
skb = skb_peek_tail(&req->cmd_q);
bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete;
bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
spin_lock_irqsave(&hdev->cmd_q.lock, flags);
skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
queue_work(hdev->workqueue, &hdev->cmd_work);
return 0;
}
/* This function requires the caller holds hdev->req_lock. */
struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u8 event, u32 timeout,
struct sock *sk)
{
struct hci_request req;
struct sk_buff *skb;
int err = 0;
bt_dev_dbg(hdev, "");
hci_req_init(&req, hdev);
hci_cmd_sync_add(&req, opcode, plen, param, event, sk);
hdev->req_status = HCI_REQ_PEND;
err = hci_cmd_sync_run(&req);
if (err < 0)
return ERR_PTR(err);
err = wait_event_interruptible_timeout(hdev->req_wait_q,
hdev->req_status != HCI_REQ_PEND,
timeout);
if (err == -ERESTARTSYS)
return ERR_PTR(-EINTR);
switch (hdev->req_status) {
case HCI_REQ_DONE:
err = -bt_to_errno(hdev->req_result);
break;
case HCI_REQ_CANCELED:
err = -hdev->req_result;
break;
default:
err = -ETIMEDOUT;
break;
}
hdev->req_status = 0;
hdev->req_result = 0;
skb = hdev->req_skb;
hdev->req_skb = NULL;
bt_dev_dbg(hdev, "end: err %d", err);
if (err < 0) {
kfree_skb(skb);
return ERR_PTR(err);
}
if (!skb)
return ERR_PTR(-ENODATA);
return skb;
}
EXPORT_SYMBOL(__hci_cmd_sync_sk);
/* This function requires the caller holds hdev->req_lock. */
struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u32 timeout)
{
return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL);
}
EXPORT_SYMBOL(__hci_cmd_sync);
/* Send HCI command and wait for command complete event */
struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u32 timeout)
{
struct sk_buff *skb;
if (!test_bit(HCI_UP, &hdev->flags))
return ERR_PTR(-ENETDOWN);
bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
hci_req_sync_lock(hdev);
skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
hci_req_sync_unlock(hdev);
return skb;
}
EXPORT_SYMBOL(hci_cmd_sync);
/* This function requires the caller holds hdev->req_lock. */
struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u8 event, u32 timeout)
{
return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout,
NULL);
}
EXPORT_SYMBOL(__hci_cmd_sync_ev);
/* This function requires the caller holds hdev->req_lock. */
int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u8 event, u32 timeout,
struct sock *sk)
{
struct sk_buff *skb;
u8 status;
skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
if (IS_ERR_OR_NULL(skb)) {
bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
PTR_ERR(skb));
return PTR_ERR(skb);
}
status = skb->data[0];
kfree_skb(skb);
return status;
}
EXPORT_SYMBOL(__hci_cmd_sync_status_sk);
int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u32 timeout)
{
return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout,
NULL);
}
EXPORT_SYMBOL(__hci_cmd_sync_status);
static void hci_cmd_sync_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
struct hci_cmd_sync_work_entry *entry;
hci_cmd_sync_work_func_t func;
hci_cmd_sync_work_destroy_t destroy;
void *data;
bt_dev_dbg(hdev, "");
mutex_lock(&hdev->cmd_sync_work_lock);
entry = list_first_entry(&hdev->cmd_sync_work_list,
struct hci_cmd_sync_work_entry, list);
if (entry) {
list_del(&entry->list);
func = entry->func;
data = entry->data;
destroy = entry->destroy;
kfree(entry);
} else {
func = NULL;
data = NULL;
destroy = NULL;
}
mutex_unlock(&hdev->cmd_sync_work_lock);
if (func) {
int err;
hci_req_sync_lock(hdev);
err = func(hdev, data);
if (destroy)
destroy(hdev, data, err);
hci_req_sync_unlock(hdev);
}
}
void hci_cmd_sync_init(struct hci_dev *hdev)
{
INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
mutex_init(&hdev->cmd_sync_work_lock);
}
void hci_cmd_sync_clear(struct hci_dev *hdev)
{
struct hci_cmd_sync_work_entry *entry, *tmp;
cancel_work_sync(&hdev->cmd_sync_work);
list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
if (entry->destroy)
entry->destroy(hdev, entry->data, -ECANCELED);
list_del(&entry->list);
kfree(entry);
}
}
int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
void *data, hci_cmd_sync_work_destroy_t destroy)
{
struct hci_cmd_sync_work_entry *entry;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->func = func;
entry->data = data;
entry->destroy = destroy;
mutex_lock(&hdev->cmd_sync_work_lock);
list_add_tail(&entry->list, &hdev->cmd_sync_work_list);
mutex_unlock(&hdev->cmd_sync_work_lock);
queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
return 0;
}
EXPORT_SYMBOL(hci_cmd_sync_queue);