mirror of https://gitee.com/openkylin/linux.git
Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next
Johan Hedberg says: ==================== pull request: bluetooth-next 2015-11-23 Here's the first bluetooth-next pull request for the 4.5 kernel. - Add new Get Advertising Size Information management command - Add support for new system note message type on monitor channel - Refactor LE scan changes behind separate workqueue to avoid races - Fix issue with privacy feature when powering on adapter - Various minor fixes & cleanups here and there Please let me know if there are any issues pulling. Thanks. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
54f1aa2e57
|
@ -324,7 +324,7 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
bt_cb(skb)->pkt_type = pkt_type;
|
||||
hci_skb_pkt_type(skb) = pkt_type;
|
||||
|
||||
data->reassembly = skb;
|
||||
} else {
|
||||
|
@ -469,9 +469,10 @@ static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
unsigned char buf[3];
|
||||
int sent = 0, size, count;
|
||||
|
||||
BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, bt_cb(skb)->pkt_type, skb->len);
|
||||
BT_DBG("hdev %p skb %p type %d len %d", hdev, skb,
|
||||
hci_skb_pkt_type(skb), skb->len);
|
||||
|
||||
switch (bt_cb(skb)->pkt_type) {
|
||||
switch (hci_skb_pkt_type(skb)) {
|
||||
case HCI_COMMAND_PKT:
|
||||
hdev->stat.cmd_tx++;
|
||||
break;
|
||||
|
@ -484,7 +485,7 @@ static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
/* Prepend skb with frame type */
|
||||
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
|
||||
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
|
||||
|
||||
count = skb->len;
|
||||
|
||||
|
|
|
@ -261,7 +261,7 @@ static void bluecard_write_wakeup(struct bluecard_info *info)
|
|||
if (!skb)
|
||||
break;
|
||||
|
||||
if (bt_cb(skb)->pkt_type & 0x80) {
|
||||
if (hci_skb_pkt_type(skb) & 0x80) {
|
||||
/* Disable RTS */
|
||||
info->ctrl_reg |= REG_CONTROL_RTS;
|
||||
outb(info->ctrl_reg, iobase + REG_CONTROL);
|
||||
|
@ -279,13 +279,13 @@ static void bluecard_write_wakeup(struct bluecard_info *info)
|
|||
/* Mark the buffer as dirty */
|
||||
clear_bit(ready_bit, &(info->tx_state));
|
||||
|
||||
if (bt_cb(skb)->pkt_type & 0x80) {
|
||||
if (hci_skb_pkt_type(skb) & 0x80) {
|
||||
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
unsigned char baud_reg;
|
||||
|
||||
switch (bt_cb(skb)->pkt_type) {
|
||||
switch (hci_skb_pkt_type(skb)) {
|
||||
case PKT_BAUD_RATE_460800:
|
||||
baud_reg = REG_CONTROL_BAUD_RATE_460800;
|
||||
break;
|
||||
|
@ -402,9 +402,9 @@ static void bluecard_receive(struct bluecard_info *info,
|
|||
|
||||
if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
|
||||
|
||||
bt_cb(info->rx_skb)->pkt_type = buf[i];
|
||||
hci_skb_pkt_type(info->rx_skb) = buf[i];
|
||||
|
||||
switch (bt_cb(info->rx_skb)->pkt_type) {
|
||||
switch (hci_skb_pkt_type(info->rx_skb)) {
|
||||
|
||||
case 0x00:
|
||||
/* init packet */
|
||||
|
@ -436,7 +436,8 @@ static void bluecard_receive(struct bluecard_info *info,
|
|||
|
||||
default:
|
||||
/* unknown packet */
|
||||
BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
|
||||
BT_ERR("Unknown HCI packet with type 0x%02x received",
|
||||
hci_skb_pkt_type(info->rx_skb));
|
||||
info->hdev->stat.err_rx++;
|
||||
|
||||
kfree_skb(info->rx_skb);
|
||||
|
@ -578,21 +579,21 @@ static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud)
|
|||
switch (baud) {
|
||||
case 460800:
|
||||
cmd[4] = 0x00;
|
||||
bt_cb(skb)->pkt_type = PKT_BAUD_RATE_460800;
|
||||
hci_skb_pkt_type(skb) = PKT_BAUD_RATE_460800;
|
||||
break;
|
||||
case 230400:
|
||||
cmd[4] = 0x01;
|
||||
bt_cb(skb)->pkt_type = PKT_BAUD_RATE_230400;
|
||||
hci_skb_pkt_type(skb) = PKT_BAUD_RATE_230400;
|
||||
break;
|
||||
case 115200:
|
||||
cmd[4] = 0x02;
|
||||
bt_cb(skb)->pkt_type = PKT_BAUD_RATE_115200;
|
||||
hci_skb_pkt_type(skb) = PKT_BAUD_RATE_115200;
|
||||
break;
|
||||
case 57600:
|
||||
/* Fall through... */
|
||||
default:
|
||||
cmd[4] = 0x03;
|
||||
bt_cb(skb)->pkt_type = PKT_BAUD_RATE_57600;
|
||||
hci_skb_pkt_type(skb) = PKT_BAUD_RATE_57600;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -660,7 +661,7 @@ static int bluecard_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
{
|
||||
struct bluecard_info *info = hci_get_drvdata(hdev);
|
||||
|
||||
switch (bt_cb(skb)->pkt_type) {
|
||||
switch (hci_skb_pkt_type(skb)) {
|
||||
case HCI_COMMAND_PKT:
|
||||
hdev->stat.cmd_tx++;
|
||||
break;
|
||||
|
@ -673,7 +674,7 @@ static int bluecard_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
/* Prepend skb with frame type */
|
||||
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
|
||||
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
|
||||
skb_queue_tail(&(info->txq), skb);
|
||||
|
||||
bluecard_write_wakeup(info);
|
||||
|
|
|
@ -295,9 +295,9 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
return -ENOMEM;
|
||||
|
||||
/* Prepend skb with frame type */
|
||||
*skb_push(skb, 1) = bt_cb(skb)->pkt_type;
|
||||
*skb_push(skb, 1) = hci_skb_pkt_type(skb);
|
||||
|
||||
switch (bt_cb(skb)->pkt_type) {
|
||||
switch (hci_skb_pkt_type(skb)) {
|
||||
case HCI_COMMAND_PKT:
|
||||
dr = kmalloc(sizeof(*dr), GFP_ATOMIC);
|
||||
if (!dr) {
|
||||
|
|
|
@ -246,10 +246,10 @@ static void bt3c_receive(struct bt3c_info *info)
|
|||
|
||||
if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
|
||||
|
||||
bt_cb(info->rx_skb)->pkt_type = inb(iobase + DATA_L);
|
||||
hci_skb_pkt_type(info->rx_skb) = inb(iobase + DATA_L);
|
||||
inb(iobase + DATA_H);
|
||||
|
||||
switch (bt_cb(info->rx_skb)->pkt_type) {
|
||||
switch (hci_skb_pkt_type(info->rx_skb)) {
|
||||
|
||||
case HCI_EVENT_PKT:
|
||||
info->rx_state = RECV_WAIT_EVENT_HEADER;
|
||||
|
@ -268,7 +268,8 @@ static void bt3c_receive(struct bt3c_info *info)
|
|||
|
||||
default:
|
||||
/* Unknown packet */
|
||||
BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
|
||||
BT_ERR("Unknown HCI packet with type 0x%02x received",
|
||||
hci_skb_pkt_type(info->rx_skb));
|
||||
info->hdev->stat.err_rx++;
|
||||
|
||||
kfree_skb(info->rx_skb);
|
||||
|
@ -411,7 +412,7 @@ static int bt3c_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
struct bt3c_info *info = hci_get_drvdata(hdev);
|
||||
unsigned long flags;
|
||||
|
||||
switch (bt_cb(skb)->pkt_type) {
|
||||
switch (hci_skb_pkt_type(skb)) {
|
||||
case HCI_COMMAND_PKT:
|
||||
hdev->stat.cmd_tx++;
|
||||
break;
|
||||
|
@ -424,7 +425,7 @@ static int bt3c_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
/* Prepend skb with frame type */
|
||||
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
|
||||
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
|
||||
skb_queue_tail(&(info->txq), skb);
|
||||
|
||||
spin_lock_irqsave(&(info->lock), flags);
|
||||
|
|
|
@ -196,7 +196,7 @@ static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 opcode,
|
|||
if (len)
|
||||
memcpy(skb_put(skb, len), param, len);
|
||||
|
||||
bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
|
||||
hci_skb_pkt_type(skb) = MRVL_VENDOR_PKT;
|
||||
|
||||
skb_queue_head(&priv->adapter->tx_queue, skb);
|
||||
|
||||
|
@ -387,7 +387,7 @@ static int btmrvl_tx_pkt(struct btmrvl_private *priv, struct sk_buff *skb)
|
|||
skb->data[0] = (skb->len & 0x0000ff);
|
||||
skb->data[1] = (skb->len & 0x00ff00) >> 8;
|
||||
skb->data[2] = (skb->len & 0xff0000) >> 16;
|
||||
skb->data[3] = bt_cb(skb)->pkt_type;
|
||||
skb->data[3] = hci_skb_pkt_type(skb);
|
||||
|
||||
if (priv->hw_host_to_card)
|
||||
ret = priv->hw_host_to_card(priv, skb->data, skb->len);
|
||||
|
@ -434,9 +434,9 @@ static int btmrvl_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
{
|
||||
struct btmrvl_private *priv = hci_get_drvdata(hdev);
|
||||
|
||||
BT_DBG("type=%d, len=%d", skb->pkt_type, skb->len);
|
||||
BT_DBG("type=%d, len=%d", hci_skb_pkt_type(skb), skb->len);
|
||||
|
||||
switch (bt_cb(skb)->pkt_type) {
|
||||
switch (hci_skb_pkt_type(skb)) {
|
||||
case HCI_COMMAND_PKT:
|
||||
hdev->stat.cmd_tx++;
|
||||
break;
|
||||
|
|
|
@ -698,7 +698,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
|
|||
case HCI_ACLDATA_PKT:
|
||||
case HCI_SCODATA_PKT:
|
||||
case HCI_EVENT_PKT:
|
||||
bt_cb(skb)->pkt_type = type;
|
||||
hci_skb_pkt_type(skb) = type;
|
||||
skb_put(skb, buf_len);
|
||||
skb_pull(skb, SDIO_HEADER_LEN);
|
||||
|
||||
|
@ -713,7 +713,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
|
|||
break;
|
||||
|
||||
case MRVL_VENDOR_PKT:
|
||||
bt_cb(skb)->pkt_type = HCI_VENDOR_PKT;
|
||||
hci_skb_pkt_type(skb) = HCI_VENDOR_PKT;
|
||||
skb_put(skb, buf_len);
|
||||
skb_pull(skb, SDIO_HEADER_LEN);
|
||||
|
||||
|
|
|
@ -86,7 +86,7 @@ static int btsdio_tx_packet(struct btsdio_data *data, struct sk_buff *skb)
|
|||
skb->data[0] = (skb->len & 0x0000ff);
|
||||
skb->data[1] = (skb->len & 0x00ff00) >> 8;
|
||||
skb->data[2] = (skb->len & 0xff0000) >> 16;
|
||||
skb->data[3] = bt_cb(skb)->pkt_type;
|
||||
skb->data[3] = hci_skb_pkt_type(skb);
|
||||
|
||||
err = sdio_writesb(data->func, REG_TDAT, skb->data, skb->len);
|
||||
if (err < 0) {
|
||||
|
@ -158,7 +158,7 @@ static int btsdio_rx_packet(struct btsdio_data *data)
|
|||
|
||||
data->hdev->stat.byte_rx += len;
|
||||
|
||||
bt_cb(skb)->pkt_type = hdr[3];
|
||||
hci_skb_pkt_type(skb) = hdr[3];
|
||||
|
||||
err = hci_recv_frame(data->hdev, skb);
|
||||
if (err < 0)
|
||||
|
@ -252,7 +252,7 @@ static int btsdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
switch (bt_cb(skb)->pkt_type) {
|
||||
switch (hci_skb_pkt_type(skb)) {
|
||||
case HCI_COMMAND_PKT:
|
||||
hdev->stat.cmd_tx++;
|
||||
break;
|
||||
|
|
|
@ -200,9 +200,9 @@ static void btuart_receive(struct btuart_info *info)
|
|||
|
||||
if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
|
||||
|
||||
bt_cb(info->rx_skb)->pkt_type = inb(iobase + UART_RX);
|
||||
hci_skb_pkt_type(info->rx_skb) = inb(iobase + UART_RX);
|
||||
|
||||
switch (bt_cb(info->rx_skb)->pkt_type) {
|
||||
switch (hci_skb_pkt_type(info->rx_skb)) {
|
||||
|
||||
case HCI_EVENT_PKT:
|
||||
info->rx_state = RECV_WAIT_EVENT_HEADER;
|
||||
|
@ -221,7 +221,8 @@ static void btuart_receive(struct btuart_info *info)
|
|||
|
||||
default:
|
||||
/* Unknown packet */
|
||||
BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
|
||||
BT_ERR("Unknown HCI packet with type 0x%02x received",
|
||||
hci_skb_pkt_type(info->rx_skb));
|
||||
info->hdev->stat.err_rx++;
|
||||
|
||||
kfree_skb(info->rx_skb);
|
||||
|
@ -424,7 +425,7 @@ static int btuart_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
{
|
||||
struct btuart_info *info = hci_get_drvdata(hdev);
|
||||
|
||||
switch (bt_cb(skb)->pkt_type) {
|
||||
switch (hci_skb_pkt_type(skb)) {
|
||||
case HCI_COMMAND_PKT:
|
||||
hdev->stat.cmd_tx++;
|
||||
break;
|
||||
|
@ -437,7 +438,7 @@ static int btuart_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
/* Prepend skb with frame type */
|
||||
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
|
||||
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
|
||||
skb_queue_tail(&(info->txq), skb);
|
||||
|
||||
btuart_write_wakeup(info);
|
||||
|
|
|
@ -437,22 +437,22 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
|
|||
break;
|
||||
}
|
||||
|
||||
bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
|
||||
bt_cb(skb)->expect = HCI_EVENT_HDR_SIZE;
|
||||
hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
|
||||
hci_skb_expect(skb) = HCI_EVENT_HDR_SIZE;
|
||||
}
|
||||
|
||||
len = min_t(uint, bt_cb(skb)->expect, count);
|
||||
len = min_t(uint, hci_skb_expect(skb), count);
|
||||
memcpy(skb_put(skb, len), buffer, len);
|
||||
|
||||
count -= len;
|
||||
buffer += len;
|
||||
bt_cb(skb)->expect -= len;
|
||||
hci_skb_expect(skb) -= len;
|
||||
|
||||
if (skb->len == HCI_EVENT_HDR_SIZE) {
|
||||
/* Complete event header */
|
||||
bt_cb(skb)->expect = hci_event_hdr(skb)->plen;
|
||||
hci_skb_expect(skb) = hci_event_hdr(skb)->plen;
|
||||
|
||||
if (skb_tailroom(skb) < bt_cb(skb)->expect) {
|
||||
if (skb_tailroom(skb) < hci_skb_expect(skb)) {
|
||||
kfree_skb(skb);
|
||||
skb = NULL;
|
||||
|
||||
|
@ -461,7 +461,7 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
|
|||
}
|
||||
}
|
||||
|
||||
if (bt_cb(skb)->expect == 0) {
|
||||
if (!hci_skb_expect(skb)) {
|
||||
/* Complete frame */
|
||||
data->recv_event(data->hdev, skb);
|
||||
skb = NULL;
|
||||
|
@ -492,24 +492,24 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
|
|||
break;
|
||||
}
|
||||
|
||||
bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
|
||||
bt_cb(skb)->expect = HCI_ACL_HDR_SIZE;
|
||||
hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
|
||||
hci_skb_expect(skb) = HCI_ACL_HDR_SIZE;
|
||||
}
|
||||
|
||||
len = min_t(uint, bt_cb(skb)->expect, count);
|
||||
len = min_t(uint, hci_skb_expect(skb), count);
|
||||
memcpy(skb_put(skb, len), buffer, len);
|
||||
|
||||
count -= len;
|
||||
buffer += len;
|
||||
bt_cb(skb)->expect -= len;
|
||||
hci_skb_expect(skb) -= len;
|
||||
|
||||
if (skb->len == HCI_ACL_HDR_SIZE) {
|
||||
__le16 dlen = hci_acl_hdr(skb)->dlen;
|
||||
|
||||
/* Complete ACL header */
|
||||
bt_cb(skb)->expect = __le16_to_cpu(dlen);
|
||||
hci_skb_expect(skb) = __le16_to_cpu(dlen);
|
||||
|
||||
if (skb_tailroom(skb) < bt_cb(skb)->expect) {
|
||||
if (skb_tailroom(skb) < hci_skb_expect(skb)) {
|
||||
kfree_skb(skb);
|
||||
skb = NULL;
|
||||
|
||||
|
@ -518,7 +518,7 @@ static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count)
|
|||
}
|
||||
}
|
||||
|
||||
if (bt_cb(skb)->expect == 0) {
|
||||
if (!hci_skb_expect(skb)) {
|
||||
/* Complete frame */
|
||||
hci_recv_frame(data->hdev, skb);
|
||||
skb = NULL;
|
||||
|
@ -549,22 +549,22 @@ static int btusb_recv_isoc(struct btusb_data *data, void *buffer, int count)
|
|||
break;
|
||||
}
|
||||
|
||||
bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
|
||||
bt_cb(skb)->expect = HCI_SCO_HDR_SIZE;
|
||||
hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
|
||||
hci_skb_expect(skb) = HCI_SCO_HDR_SIZE;
|
||||
}
|
||||
|
||||
len = min_t(uint, bt_cb(skb)->expect, count);
|
||||
len = min_t(uint, hci_skb_expect(skb), count);
|
||||
memcpy(skb_put(skb, len), buffer, len);
|
||||
|
||||
count -= len;
|
||||
buffer += len;
|
||||
bt_cb(skb)->expect -= len;
|
||||
hci_skb_expect(skb) -= len;
|
||||
|
||||
if (skb->len == HCI_SCO_HDR_SIZE) {
|
||||
/* Complete SCO header */
|
||||
bt_cb(skb)->expect = hci_sco_hdr(skb)->dlen;
|
||||
hci_skb_expect(skb) = hci_sco_hdr(skb)->dlen;
|
||||
|
||||
if (skb_tailroom(skb) < bt_cb(skb)->expect) {
|
||||
if (skb_tailroom(skb) < hci_skb_expect(skb)) {
|
||||
kfree_skb(skb);
|
||||
skb = NULL;
|
||||
|
||||
|
@ -573,7 +573,7 @@ static int btusb_recv_isoc(struct btusb_data *data, void *buffer, int count)
|
|||
}
|
||||
}
|
||||
|
||||
if (bt_cb(skb)->expect == 0) {
|
||||
if (!hci_skb_expect(skb)) {
|
||||
/* Complete frame */
|
||||
hci_recv_frame(data->hdev, skb);
|
||||
skb = NULL;
|
||||
|
@ -1257,7 +1257,7 @@ static int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
switch (bt_cb(skb)->pkt_type) {
|
||||
switch (hci_skb_pkt_type(skb)) {
|
||||
case HCI_COMMAND_PKT:
|
||||
urb = alloc_ctrl_urb(hdev, skb);
|
||||
if (IS_ERR(urb))
|
||||
|
@ -1853,7 +1853,7 @@ static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
|
|||
|
||||
*skb_put(skb, 1) = 0x00;
|
||||
|
||||
bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
|
||||
hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
|
||||
|
||||
return hci_recv_frame(hdev, skb);
|
||||
}
|
||||
|
@ -1945,7 +1945,7 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
switch (bt_cb(skb)->pkt_type) {
|
||||
switch (hci_skb_pkt_type(skb)) {
|
||||
case HCI_COMMAND_PKT:
|
||||
if (test_bit(BTUSB_BOOTLOADER, &data->flags)) {
|
||||
struct hci_command_hdr *cmd = (void *)skb->data;
|
||||
|
|
|
@ -249,10 +249,10 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
hst = hci_get_drvdata(hdev);
|
||||
|
||||
/* Prepend skb with frame type */
|
||||
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
|
||||
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
|
||||
|
||||
BT_DBG("%s: type %d len %d", hdev->name, bt_cb(skb)->pkt_type,
|
||||
skb->len);
|
||||
BT_DBG("%s: type %d len %d", hdev->name, hci_skb_pkt_type(skb),
|
||||
skb->len);
|
||||
|
||||
/* Insert skb to shared transport layer's transmit queue.
|
||||
* Freeing skb memory is taken care in shared transport layer,
|
||||
|
@ -268,7 +268,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
|
||||
/* ST accepted our skb. So, Go ahead and do rest */
|
||||
hdev->stat.byte_tx += len;
|
||||
ti_st_tx_complete(hst, bt_cb(skb)->pkt_type);
|
||||
ti_st_tx_complete(hst, hci_skb_pkt_type(skb));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -239,7 +239,7 @@ static void dtl1_receive(struct dtl1_info *info)
|
|||
info->rx_count = nsh->len + (nsh->len & 0x0001);
|
||||
break;
|
||||
case RECV_WAIT_DATA:
|
||||
bt_cb(info->rx_skb)->pkt_type = nsh->type;
|
||||
hci_skb_pkt_type(info->rx_skb) = nsh->type;
|
||||
|
||||
/* remove PAD byte if it exists */
|
||||
if (nsh->len & 0x0001) {
|
||||
|
@ -250,7 +250,7 @@ static void dtl1_receive(struct dtl1_info *info)
|
|||
/* remove NSH */
|
||||
skb_pull(info->rx_skb, NSHL);
|
||||
|
||||
switch (bt_cb(info->rx_skb)->pkt_type) {
|
||||
switch (hci_skb_pkt_type(info->rx_skb)) {
|
||||
case 0x80:
|
||||
/* control data for the Nokia Card */
|
||||
dtl1_control(info, info->rx_skb);
|
||||
|
@ -259,12 +259,13 @@ static void dtl1_receive(struct dtl1_info *info)
|
|||
case 0x83:
|
||||
case 0x84:
|
||||
/* send frame to the HCI layer */
|
||||
bt_cb(info->rx_skb)->pkt_type &= 0x0f;
|
||||
hci_skb_pkt_type(info->rx_skb) &= 0x0f;
|
||||
hci_recv_frame(info->hdev, info->rx_skb);
|
||||
break;
|
||||
default:
|
||||
/* unknown packet */
|
||||
BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type);
|
||||
BT_ERR("Unknown HCI packet with type 0x%02x received",
|
||||
hci_skb_pkt_type(info->rx_skb));
|
||||
kfree_skb(info->rx_skb);
|
||||
break;
|
||||
}
|
||||
|
@ -386,7 +387,7 @@ static int dtl1_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
struct sk_buff *s;
|
||||
struct nsh nsh;
|
||||
|
||||
switch (bt_cb(skb)->pkt_type) {
|
||||
switch (hci_skb_pkt_type(skb)) {
|
||||
case HCI_COMMAND_PKT:
|
||||
hdev->stat.cmd_tx++;
|
||||
nsh.type = 0x81;
|
||||
|
|
|
@ -205,7 +205,7 @@ static int ath_enqueue(struct hci_uart *hu, struct sk_buff *skb)
|
|||
{
|
||||
struct ath_struct *ath = hu->priv;
|
||||
|
||||
if (bt_cb(skb)->pkt_type == HCI_SCODATA_PKT) {
|
||||
if (hci_skb_pkt_type(skb) == HCI_SCODATA_PKT) {
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
|
@ -213,7 +213,7 @@ static int ath_enqueue(struct hci_uart *hu, struct sk_buff *skb)
|
|||
/* Update power management enable flag with parameters of
|
||||
* HCI sleep enable vendor specific HCI command.
|
||||
*/
|
||||
if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
|
||||
if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
|
||||
struct hci_command_hdr *hdr = (void *)skb->data;
|
||||
|
||||
if (__le16_to_cpu(hdr->opcode) == HCI_OP_ATH_SLEEP)
|
||||
|
@ -223,7 +223,7 @@ static int ath_enqueue(struct hci_uart *hu, struct sk_buff *skb)
|
|||
BT_DBG("hu %p skb %p", hu, skb);
|
||||
|
||||
/* Prepend skb with frame type */
|
||||
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
|
||||
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
|
||||
|
||||
skb_queue_tail(&ath->txq, skb);
|
||||
set_bit(HCI_UART_SENDING, &hu->tx_state);
|
||||
|
|
|
@ -472,7 +472,7 @@ static int bcm_enqueue(struct hci_uart *hu, struct sk_buff *skb)
|
|||
bt_dev_dbg(hu->hdev, "hu %p skb %p", hu, skb);
|
||||
|
||||
/* Prepend skb with frame type */
|
||||
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
|
||||
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
|
||||
skb_queue_tail(&bcm->txq, skb);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -155,7 +155,7 @@ static int bcsp_enqueue(struct hci_uart *hu, struct sk_buff *skb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
switch (bt_cb(skb)->pkt_type) {
|
||||
switch (hci_skb_pkt_type(skb)) {
|
||||
case HCI_ACLDATA_PKT:
|
||||
case HCI_COMMAND_PKT:
|
||||
skb_queue_tail(&bcsp->rel, skb);
|
||||
|
@ -231,7 +231,7 @@ static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data,
|
|||
if (!nskb)
|
||||
return NULL;
|
||||
|
||||
bt_cb(nskb)->pkt_type = pkt_type;
|
||||
hci_skb_pkt_type(nskb) = pkt_type;
|
||||
|
||||
bcsp_slip_msgdelim(nskb);
|
||||
|
||||
|
@ -291,7 +291,10 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
|
|||
|
||||
skb = skb_dequeue(&bcsp->unrel);
|
||||
if (skb != NULL) {
|
||||
struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type);
|
||||
struct sk_buff *nskb;
|
||||
|
||||
nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len,
|
||||
hci_skb_pkt_type(skb));
|
||||
if (nskb) {
|
||||
kfree_skb(skb);
|
||||
return nskb;
|
||||
|
@ -310,8 +313,10 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
|
|||
if (bcsp->unack.qlen < BCSP_TXWINSIZE) {
|
||||
skb = skb_dequeue(&bcsp->rel);
|
||||
if (skb != NULL) {
|
||||
struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len,
|
||||
bt_cb(skb)->pkt_type);
|
||||
struct sk_buff *nskb;
|
||||
|
||||
nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len,
|
||||
hci_skb_pkt_type(skb));
|
||||
if (nskb) {
|
||||
__skb_queue_tail(&bcsp->unack, skb);
|
||||
mod_timer(&bcsp->tbcsp, jiffies + HZ / 4);
|
||||
|
@ -412,7 +417,7 @@ static void bcsp_handle_le_pkt(struct hci_uart *hu)
|
|||
if (!nskb)
|
||||
return;
|
||||
memcpy(skb_put(nskb, 4), conf_rsp_pkt, 4);
|
||||
bt_cb(nskb)->pkt_type = BCSP_LE_PKT;
|
||||
hci_skb_pkt_type(nskb) = BCSP_LE_PKT;
|
||||
|
||||
skb_queue_head(&bcsp->unrel, nskb);
|
||||
hci_uart_tx_wakeup(hu);
|
||||
|
@ -494,14 +499,14 @@ static void bcsp_complete_rx_pkt(struct hci_uart *hu)
|
|||
bcsp_pkt_cull(bcsp);
|
||||
if ((bcsp->rx_skb->data[1] & 0x0f) == 6 &&
|
||||
bcsp->rx_skb->data[0] & 0x80) {
|
||||
bt_cb(bcsp->rx_skb)->pkt_type = HCI_ACLDATA_PKT;
|
||||
hci_skb_pkt_type(bcsp->rx_skb) = HCI_ACLDATA_PKT;
|
||||
pass_up = 1;
|
||||
} else if ((bcsp->rx_skb->data[1] & 0x0f) == 5 &&
|
||||
bcsp->rx_skb->data[0] & 0x80) {
|
||||
bt_cb(bcsp->rx_skb)->pkt_type = HCI_EVENT_PKT;
|
||||
hci_skb_pkt_type(bcsp->rx_skb) = HCI_EVENT_PKT;
|
||||
pass_up = 1;
|
||||
} else if ((bcsp->rx_skb->data[1] & 0x0f) == 7) {
|
||||
bt_cb(bcsp->rx_skb)->pkt_type = HCI_SCODATA_PKT;
|
||||
hci_skb_pkt_type(bcsp->rx_skb) = HCI_SCODATA_PKT;
|
||||
pass_up = 1;
|
||||
} else if ((bcsp->rx_skb->data[1] & 0x0f) == 1 &&
|
||||
!(bcsp->rx_skb->data[0] & 0x80)) {
|
||||
|
@ -523,7 +528,7 @@ static void bcsp_complete_rx_pkt(struct hci_uart *hu)
|
|||
hdr.evt = 0xff;
|
||||
hdr.plen = bcsp->rx_skb->len;
|
||||
memcpy(skb_push(bcsp->rx_skb, HCI_EVENT_HDR_SIZE), &hdr, HCI_EVENT_HDR_SIZE);
|
||||
bt_cb(bcsp->rx_skb)->pkt_type = HCI_EVENT_PKT;
|
||||
hci_skb_pkt_type(bcsp->rx_skb) = HCI_EVENT_PKT;
|
||||
|
||||
hci_recv_frame(hu->hdev, bcsp->rx_skb);
|
||||
} else {
|
||||
|
|
|
@ -108,7 +108,7 @@ static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb)
|
|||
BT_DBG("hu %p skb %p", hu, skb);
|
||||
|
||||
/* Prepend skb with frame type */
|
||||
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
|
||||
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
|
||||
skb_queue_tail(&h4->txq, skb);
|
||||
|
||||
return 0;
|
||||
|
@ -184,8 +184,8 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
|
|||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
bt_cb(skb)->pkt_type = (&pkts[i])->type;
|
||||
bt_cb(skb)->expect = (&pkts[i])->hlen;
|
||||
hci_skb_pkt_type(skb) = (&pkts[i])->type;
|
||||
hci_skb_expect(skb) = (&pkts[i])->hlen;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -197,18 +197,18 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
|
|||
buffer += 1;
|
||||
}
|
||||
|
||||
len = min_t(uint, bt_cb(skb)->expect - skb->len, count);
|
||||
len = min_t(uint, hci_skb_expect(skb) - skb->len, count);
|
||||
memcpy(skb_put(skb, len), buffer, len);
|
||||
|
||||
count -= len;
|
||||
buffer += len;
|
||||
|
||||
/* Check for partial packet */
|
||||
if (skb->len < bt_cb(skb)->expect)
|
||||
if (skb->len < hci_skb_expect(skb))
|
||||
continue;
|
||||
|
||||
for (i = 0; i < pkts_count; i++) {
|
||||
if (bt_cb(skb)->pkt_type == (&pkts[i])->type)
|
||||
if (hci_skb_pkt_type(skb) == (&pkts[i])->type)
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -228,7 +228,7 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
|
|||
case 1:
|
||||
/* Single octet variable length */
|
||||
dlen = skb->data[(&pkts[i])->loff];
|
||||
bt_cb(skb)->expect += dlen;
|
||||
hci_skb_expect(skb) += dlen;
|
||||
|
||||
if (skb_tailroom(skb) < dlen) {
|
||||
kfree_skb(skb);
|
||||
|
@ -239,7 +239,7 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
|
|||
/* Double octet variable length */
|
||||
dlen = get_unaligned_le16(skb->data +
|
||||
(&pkts[i])->loff);
|
||||
bt_cb(skb)->expect += dlen;
|
||||
hci_skb_expect(skb) += dlen;
|
||||
|
||||
if (skb_tailroom(skb) < dlen) {
|
||||
kfree_skb(skb);
|
||||
|
|
|
@ -51,7 +51,7 @@
|
|||
#define H5_HDR_CRC(hdr) (((hdr)[0] >> 6) & 0x01)
|
||||
#define H5_HDR_RELIABLE(hdr) (((hdr)[0] >> 7) & 0x01)
|
||||
#define H5_HDR_PKT_TYPE(hdr) ((hdr)[1] & 0x0f)
|
||||
#define H5_HDR_LEN(hdr) ((((hdr)[1] >> 4) & 0xff) + ((hdr)[2] << 4))
|
||||
#define H5_HDR_LEN(hdr) ((((hdr)[1] >> 4) & 0x0f) + ((hdr)[2] << 4))
|
||||
|
||||
#define SLIP_DELIMITER 0xc0
|
||||
#define SLIP_ESC 0xdb
|
||||
|
@ -107,7 +107,7 @@ static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
|
|||
if (!nskb)
|
||||
return;
|
||||
|
||||
bt_cb(nskb)->pkt_type = HCI_3WIRE_LINK_PKT;
|
||||
hci_skb_pkt_type(nskb) = HCI_3WIRE_LINK_PKT;
|
||||
|
||||
memcpy(skb_put(nskb, len), data, len);
|
||||
|
||||
|
@ -119,7 +119,7 @@ static u8 h5_cfg_field(struct h5 *h5)
|
|||
u8 field = 0;
|
||||
|
||||
/* Sliding window size (first 3 bits) */
|
||||
field |= (h5->tx_win & 7);
|
||||
field |= (h5->tx_win & 0x07);
|
||||
|
||||
return field;
|
||||
}
|
||||
|
@ -360,7 +360,7 @@ static void h5_complete_rx_pkt(struct hci_uart *hu)
|
|||
case HCI_EVENT_PKT:
|
||||
case HCI_ACLDATA_PKT:
|
||||
case HCI_SCODATA_PKT:
|
||||
bt_cb(h5->rx_skb)->pkt_type = H5_HDR_PKT_TYPE(hdr);
|
||||
hci_skb_pkt_type(h5->rx_skb) = H5_HDR_PKT_TYPE(hdr);
|
||||
|
||||
/* Remove Three-wire header */
|
||||
skb_pull(h5->rx_skb, 4);
|
||||
|
@ -562,7 +562,7 @@ static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
switch (bt_cb(skb)->pkt_type) {
|
||||
switch (hci_skb_pkt_type(skb)) {
|
||||
case HCI_ACLDATA_PKT:
|
||||
case HCI_COMMAND_PKT:
|
||||
skb_queue_tail(&h5->rel, skb);
|
||||
|
@ -573,7 +573,7 @@ static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
|
|||
break;
|
||||
|
||||
default:
|
||||
BT_ERR("Unknown packet type %u", bt_cb(skb)->pkt_type);
|
||||
BT_ERR("Unknown packet type %u", hci_skb_pkt_type(skb));
|
||||
kfree_skb(skb);
|
||||
break;
|
||||
}
|
||||
|
@ -642,7 +642,7 @@ static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
|
|||
if (!nskb)
|
||||
return NULL;
|
||||
|
||||
bt_cb(nskb)->pkt_type = pkt_type;
|
||||
hci_skb_pkt_type(nskb) = pkt_type;
|
||||
|
||||
h5_slip_delim(nskb);
|
||||
|
||||
|
@ -697,7 +697,7 @@ static struct sk_buff *h5_dequeue(struct hci_uart *hu)
|
|||
|
||||
skb = skb_dequeue(&h5->unrel);
|
||||
if (skb) {
|
||||
nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
|
||||
nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
|
||||
skb->data, skb->len);
|
||||
if (nskb) {
|
||||
kfree_skb(skb);
|
||||
|
@ -715,7 +715,7 @@ static struct sk_buff *h5_dequeue(struct hci_uart *hu)
|
|||
|
||||
skb = skb_dequeue(&h5->rel);
|
||||
if (skb) {
|
||||
nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
|
||||
nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
|
||||
skb->data, skb->len);
|
||||
if (nskb) {
|
||||
__skb_queue_tail(&h5->unack, skb);
|
||||
|
|
|
@ -186,7 +186,7 @@ static int intel_lpm_suspend(struct hci_uart *hu)
|
|||
}
|
||||
|
||||
memcpy(skb_put(skb, sizeof(suspend)), suspend, sizeof(suspend));
|
||||
bt_cb(skb)->pkt_type = HCI_LPM_PKT;
|
||||
hci_skb_pkt_type(skb) = HCI_LPM_PKT;
|
||||
|
||||
set_bit(STATE_LPM_TRANSACTION, &intel->flags);
|
||||
|
||||
|
@ -230,7 +230,7 @@ static int intel_lpm_resume(struct hci_uart *hu)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
bt_cb(skb)->pkt_type = HCI_LPM_WAKE_PKT;
|
||||
hci_skb_pkt_type(skb) = HCI_LPM_WAKE_PKT;
|
||||
|
||||
set_bit(STATE_LPM_TRANSACTION, &intel->flags);
|
||||
|
||||
|
@ -272,7 +272,7 @@ static int intel_lpm_host_wake(struct hci_uart *hu)
|
|||
|
||||
memcpy(skb_put(skb, sizeof(lpm_resume_ack)), lpm_resume_ack,
|
||||
sizeof(lpm_resume_ack));
|
||||
bt_cb(skb)->pkt_type = HCI_LPM_PKT;
|
||||
hci_skb_pkt_type(skb) = HCI_LPM_PKT;
|
||||
|
||||
/* LPM flow is a priority, enqueue packet at list head */
|
||||
skb_queue_head(&intel->txq, skb);
|
||||
|
@ -467,7 +467,7 @@ static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
|
|||
|
||||
*skb_put(skb, 1) = 0x00;
|
||||
|
||||
bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
|
||||
hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
|
||||
|
||||
return hci_recv_frame(hdev, skb);
|
||||
}
|
||||
|
@ -517,7 +517,7 @@ static int intel_set_baudrate(struct hci_uart *hu, unsigned int speed)
|
|||
}
|
||||
|
||||
memcpy(skb_put(skb, sizeof(speed_cmd)), speed_cmd, sizeof(speed_cmd));
|
||||
bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
|
||||
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
|
||||
|
||||
hci_uart_set_flow_control(hu, true);
|
||||
|
||||
|
@ -1126,7 +1126,7 @@ static struct sk_buff *intel_dequeue(struct hci_uart *hu)
|
|||
return skb;
|
||||
|
||||
if (test_bit(STATE_BOOTLOADER, &intel->flags) &&
|
||||
(bt_cb(skb)->pkt_type == HCI_COMMAND_PKT)) {
|
||||
(hci_skb_pkt_type(skb) == HCI_COMMAND_PKT)) {
|
||||
struct hci_command_hdr *cmd = (void *)skb->data;
|
||||
__u16 opcode = le16_to_cpu(cmd->opcode);
|
||||
|
||||
|
@ -1140,7 +1140,7 @@ static struct sk_buff *intel_dequeue(struct hci_uart *hu)
|
|||
}
|
||||
|
||||
/* Prepend skb with frame type */
|
||||
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
|
||||
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
|
|
@ -162,7 +162,7 @@ static void hci_uart_write_work(struct work_struct *work)
|
|||
break;
|
||||
}
|
||||
|
||||
hci_uart_tx_complete(hu, bt_cb(skb)->pkt_type);
|
||||
hci_uart_tx_complete(hu, hci_skb_pkt_type(skb));
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
|
@ -248,7 +248,8 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
{
|
||||
struct hci_uart *hu = hci_get_drvdata(hdev);
|
||||
|
||||
BT_DBG("%s: type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
|
||||
BT_DBG("%s: type %d len %d", hdev->name, hci_skb_pkt_type(skb),
|
||||
skb->len);
|
||||
|
||||
hu->proto->enqueue(hu, skb);
|
||||
|
||||
|
|
|
@ -307,7 +307,7 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb)
|
|||
BT_DBG("hu %p skb %p", hu, skb);
|
||||
|
||||
/* Prepend skb with frame type */
|
||||
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
|
||||
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
|
||||
|
||||
/* lock hcill state */
|
||||
spin_lock_irqsave(&ll->hcill_lock, flags);
|
||||
|
@ -493,7 +493,7 @@ static int ll_recv(struct hci_uart *hu, const void *data, int count)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
bt_cb(ll->rx_skb)->pkt_type = type;
|
||||
hci_skb_pkt_type(ll->rx_skb) = type;
|
||||
}
|
||||
|
||||
return count;
|
||||
|
|
|
@ -678,7 +678,7 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
|
|||
qca->tx_ibs_state);
|
||||
|
||||
/* Prepend skb with frame type */
|
||||
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
|
||||
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
|
||||
|
||||
/* Don't go to sleep in middle of patch download or
|
||||
* Out-Of-Band(GPIOs control) sleep is selected.
|
||||
|
@ -873,7 +873,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
|
|||
|
||||
/* Assign commands to change baudrate and packet type. */
|
||||
memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd));
|
||||
bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
|
||||
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
|
||||
|
||||
skb_queue_tail(&qca->txq, skb);
|
||||
hci_uart_tx_wakeup(hu);
|
||||
|
|
|
@ -80,7 +80,7 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
{
|
||||
struct vhci_data *data = hci_get_drvdata(hdev);
|
||||
|
||||
memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
|
||||
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
|
||||
skb_queue_tail(&data->readq, skb);
|
||||
|
||||
wake_up_interruptible(&data->read_wait);
|
||||
|
@ -140,7 +140,7 @@ static int vhci_create_device(struct vhci_data *data, __u8 opcode)
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
bt_cb(skb)->pkt_type = HCI_VENDOR_PKT;
|
||||
hci_skb_pkt_type(skb) = HCI_VENDOR_PKT;
|
||||
|
||||
*skb_put(skb, 1) = 0xff;
|
||||
*skb_put(skb, 1) = opcode;
|
||||
|
@ -183,7 +183,7 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
bt_cb(skb)->pkt_type = pkt_type;
|
||||
hci_skb_pkt_type(skb) = pkt_type;
|
||||
|
||||
ret = hci_recv_frame(data->hdev, skb);
|
||||
break;
|
||||
|
@ -234,7 +234,7 @@ static inline ssize_t vhci_put_user(struct vhci_data *data,
|
|||
|
||||
data->hdev->stat.byte_tx += len;
|
||||
|
||||
switch (bt_cb(skb)->pkt_type) {
|
||||
switch (hci_skb_pkt_type(skb)) {
|
||||
case HCI_COMMAND_PKT:
|
||||
data->hdev->stat.cmd_tx++;
|
||||
break;
|
||||
|
|
|
@ -29,6 +29,8 @@
|
|||
#include <net/sock.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#define BT_SUBSYS_VERSION "2.21"
|
||||
|
||||
#ifndef AF_BLUETOOTH
|
||||
#define AF_BLUETOOTH 31
|
||||
#define PF_BLUETOOTH AF_BLUETOOTH
|
||||
|
@ -296,12 +298,17 @@ typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode);
|
|||
typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status,
|
||||
u16 opcode, struct sk_buff *skb);
|
||||
|
||||
#define HCI_REQ_START BIT(0)
|
||||
#define HCI_REQ_SKB BIT(1)
|
||||
|
||||
struct hci_ctrl {
|
||||
__u16 opcode;
|
||||
bool req_start;
|
||||
u8 req_flags;
|
||||
u8 req_event;
|
||||
hci_req_complete_t req_complete;
|
||||
hci_req_complete_skb_t req_complete_skb;
|
||||
union {
|
||||
hci_req_complete_t req_complete;
|
||||
hci_req_complete_skb_t req_complete_skb;
|
||||
};
|
||||
};
|
||||
|
||||
struct bt_skb_cb {
|
||||
|
@ -316,15 +323,17 @@ struct bt_skb_cb {
|
|||
};
|
||||
#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
|
||||
|
||||
#define hci_skb_pkt_type(skb) bt_cb((skb))->pkt_type
|
||||
#define hci_skb_expect(skb) bt_cb((skb))->expect
|
||||
#define hci_skb_opcode(skb) bt_cb((skb))->hci.opcode
|
||||
|
||||
static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = alloc_skb(len + BT_SKB_RESERVE, how);
|
||||
if (skb) {
|
||||
if (skb)
|
||||
skb_reserve(skb, BT_SKB_RESERVE);
|
||||
bt_cb(skb)->incoming = 0;
|
||||
}
|
||||
return skb;
|
||||
}
|
||||
|
||||
|
@ -334,10 +343,8 @@ static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk,
|
|||
struct sk_buff *skb;
|
||||
|
||||
skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err);
|
||||
if (skb) {
|
||||
if (skb)
|
||||
skb_reserve(skb, BT_SKB_RESERVE);
|
||||
bt_cb(skb)->incoming = 0;
|
||||
}
|
||||
|
||||
if (!skb && *err)
|
||||
return NULL;
|
||||
|
|
|
@ -452,7 +452,8 @@ enum {
|
|||
#define HCI_ERROR_REMOTE_POWER_OFF 0x15
|
||||
#define HCI_ERROR_LOCAL_HOST_TERM 0x16
|
||||
#define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18
|
||||
#define HCI_ERROR_INVALID_LL_PARAMS 0x1E
|
||||
#define HCI_ERROR_INVALID_LL_PARAMS 0x1e
|
||||
#define HCI_ERROR_UNSPECIFIED 0x1f
|
||||
#define HCI_ERROR_ADVERTISING_TIMEOUT 0x3c
|
||||
|
||||
/* Flow control modes */
|
||||
|
|
|
@ -327,6 +327,11 @@ struct hci_dev {
|
|||
struct work_struct cmd_work;
|
||||
struct work_struct tx_work;
|
||||
|
||||
struct work_struct discov_update;
|
||||
struct work_struct bg_scan_update;
|
||||
struct delayed_work le_scan_disable;
|
||||
struct delayed_work le_scan_restart;
|
||||
|
||||
struct sk_buff_head rx_q;
|
||||
struct sk_buff_head raw_q;
|
||||
struct sk_buff_head cmd_q;
|
||||
|
@ -370,9 +375,6 @@ struct hci_dev {
|
|||
|
||||
DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS);
|
||||
|
||||
struct delayed_work le_scan_disable;
|
||||
struct delayed_work le_scan_restart;
|
||||
|
||||
__s8 adv_tx_power;
|
||||
__u8 adv_data[HCI_MAX_AD_LENGTH];
|
||||
__u8 adv_data_len;
|
||||
|
@ -875,7 +877,7 @@ struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
|
|||
|
||||
struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
|
||||
u8 dst_type, u8 sec_level,
|
||||
u16 conn_timeout, u8 role);
|
||||
u16 conn_timeout);
|
||||
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
|
||||
u8 dst_type, u8 sec_level, u16 conn_timeout,
|
||||
u8 role);
|
||||
|
@ -1036,7 +1038,6 @@ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
|
|||
struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
|
||||
bdaddr_t *addr, u8 addr_type);
|
||||
void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type);
|
||||
void hci_conn_params_clear_all(struct hci_dev *hdev);
|
||||
void hci_conn_params_clear_disabled(struct hci_dev *hdev);
|
||||
|
||||
struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
|
||||
|
@ -1473,6 +1474,8 @@ void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
|
|||
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
|
||||
u8 status);
|
||||
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
|
||||
void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status);
|
||||
void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status);
|
||||
void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
|
||||
u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
|
||||
u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len);
|
||||
|
|
|
@ -43,6 +43,8 @@ struct hci_mon_hdr {
|
|||
#define HCI_MON_CLOSE_INDEX 9
|
||||
#define HCI_MON_INDEX_INFO 10
|
||||
#define HCI_MON_VENDOR_DIAG 11
|
||||
#define HCI_MON_SYSTEM_NOTE 12
|
||||
#define HCI_MON_USER_LOGGING 13
|
||||
|
||||
struct hci_mon_new_index {
|
||||
__u8 type;
|
||||
|
|
|
@ -45,6 +45,7 @@ struct sockaddr_hci {
|
|||
#define HCI_CHANNEL_USER 1
|
||||
#define HCI_CHANNEL_MONITOR 2
|
||||
#define HCI_CHANNEL_CONTROL 3
|
||||
#define HCI_CHANNEL_LOGGING 4
|
||||
|
||||
struct hci_filter {
|
||||
unsigned long type_mask;
|
||||
|
|
|
@ -571,6 +571,19 @@ struct mgmt_rp_remove_advertising {
|
|||
__u8 instance;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_OP_GET_ADV_SIZE_INFO 0x0040
|
||||
struct mgmt_cp_get_adv_size_info {
|
||||
__u8 instance;
|
||||
__le32 flags;
|
||||
} __packed;
|
||||
#define MGMT_GET_ADV_SIZE_INFO_SIZE 5
|
||||
struct mgmt_rp_get_adv_size_info {
|
||||
__u8 instance;
|
||||
__le32 flags;
|
||||
__u8 max_adv_data_len;
|
||||
__u8 max_scan_rsp_len;
|
||||
} __packed;
|
||||
|
||||
#define MGMT_EV_CMD_COMPLETE 0x0001
|
||||
struct mgmt_ev_cmd_complete {
|
||||
__le16 opcode;
|
||||
|
|
|
@ -33,8 +33,6 @@
|
|||
|
||||
#include "selftest.h"
|
||||
|
||||
#define VERSION "2.21"
|
||||
|
||||
/* Bluetooth sockets */
|
||||
#define BT_MAX_PROTO 8
|
||||
static const struct net_proto_family *bt_proto[BT_MAX_PROTO];
|
||||
|
@ -671,7 +669,7 @@ static const struct file_operations bt_fops = {
|
|||
};
|
||||
|
||||
int bt_procfs_init(struct net *net, const char *name,
|
||||
struct bt_sock_list* sk_list,
|
||||
struct bt_sock_list *sk_list,
|
||||
int (* seq_show)(struct seq_file *, void *))
|
||||
{
|
||||
sk_list->custom_seq_show = seq_show;
|
||||
|
@ -687,7 +685,7 @@ void bt_procfs_cleanup(struct net *net, const char *name)
|
|||
}
|
||||
#else
|
||||
int bt_procfs_init(struct net *net, const char *name,
|
||||
struct bt_sock_list* sk_list,
|
||||
struct bt_sock_list *sk_list,
|
||||
int (* seq_show)(struct seq_file *, void *))
|
||||
{
|
||||
return 0;
|
||||
|
@ -715,7 +713,7 @@ static int __init bt_init(void)
|
|||
|
||||
sock_skb_cb_check_size(sizeof(struct bt_skb_cb));
|
||||
|
||||
BT_INFO("Core ver %s", VERSION);
|
||||
BT_INFO("Core ver %s", BT_SUBSYS_VERSION);
|
||||
|
||||
err = bt_selftest();
|
||||
if (err < 0)
|
||||
|
@ -789,7 +787,7 @@ subsys_initcall(bt_init);
|
|||
module_exit(bt_exit);
|
||||
|
||||
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
|
||||
MODULE_DESCRIPTION("Bluetooth Core ver " VERSION);
|
||||
MODULE_VERSION(VERSION);
|
||||
MODULE_DESCRIPTION("Bluetooth Core ver " BT_SUBSYS_VERSION);
|
||||
MODULE_VERSION(BT_SUBSYS_VERSION);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);
|
||||
|
|
|
@ -178,8 +178,7 @@ static inline int cmtp_recv_frame(struct cmtp_session *session, struct sk_buff *
|
|||
cmtp_add_msgpart(session, id, skb->data + hdrlen, len);
|
||||
break;
|
||||
default:
|
||||
if (session->reassembly[id] != NULL)
|
||||
kfree_skb(session->reassembly[id]);
|
||||
kfree_skb(session->reassembly[id]);
|
||||
session->reassembly[id] = NULL;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -178,6 +178,10 @@ static void hci_connect_le_scan_remove(struct hci_conn *conn)
|
|||
hci_dev_hold(conn->hdev);
|
||||
hci_conn_get(conn);
|
||||
|
||||
/* Even though we hold a reference to the hdev, many other
|
||||
* things might get cleaned up meanwhile, including the hdev's
|
||||
* own workqueue, so we can't use that for scheduling.
|
||||
*/
|
||||
schedule_work(&conn->le_scan_cleanup);
|
||||
}
|
||||
|
||||
|
@ -781,7 +785,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
|
|||
u8 role)
|
||||
{
|
||||
struct hci_conn_params *params;
|
||||
struct hci_conn *conn, *conn_unfinished;
|
||||
struct hci_conn *conn;
|
||||
struct smp_irk *irk;
|
||||
struct hci_request req;
|
||||
int err;
|
||||
|
@ -794,35 +798,22 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
|
|||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
|
||||
/* Some devices send ATT messages as soon as the physical link is
|
||||
* established. To be able to handle these ATT messages, the user-
|
||||
* space first establishes the connection and then starts the pairing
|
||||
* process.
|
||||
*
|
||||
* So if a hci_conn object already exists for the following connection
|
||||
* attempt, we simply update pending_sec_level and auth_type fields
|
||||
* and return the object found.
|
||||
*/
|
||||
conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
|
||||
conn_unfinished = NULL;
|
||||
if (conn) {
|
||||
if (conn->state == BT_CONNECT &&
|
||||
test_bit(HCI_CONN_SCANNING, &conn->flags)) {
|
||||
BT_DBG("will continue unfinished conn %pMR", dst);
|
||||
conn_unfinished = conn;
|
||||
} else {
|
||||
if (conn->pending_sec_level < sec_level)
|
||||
conn->pending_sec_level = sec_level;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
/* Since the controller supports only one LE connection attempt at a
|
||||
* time, we return -EBUSY if there is any connection attempt running.
|
||||
*/
|
||||
if (hci_lookup_le_connect(hdev))
|
||||
return ERR_PTR(-EBUSY);
|
||||
|
||||
/* If there's already a connection object but it's not in
|
||||
* scanning state it means it must already be established, in
|
||||
* which case we can't do anything else except report a failure
|
||||
* to connect.
|
||||
*/
|
||||
conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
|
||||
if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) {
|
||||
return ERR_PTR(-EBUSY);
|
||||
}
|
||||
|
||||
/* When given an identity address with existing identity
|
||||
* resolving key, the connection needs to be established
|
||||
* to a resolvable random address.
|
||||
|
@ -838,23 +829,20 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
|
|||
dst_type = ADDR_LE_DEV_RANDOM;
|
||||
}
|
||||
|
||||
if (conn_unfinished) {
|
||||
conn = conn_unfinished;
|
||||
if (conn) {
|
||||
bacpy(&conn->dst, dst);
|
||||
} else {
|
||||
conn = hci_conn_add(hdev, LE_LINK, dst, role);
|
||||
if (!conn)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
hci_conn_hold(conn);
|
||||
conn->pending_sec_level = sec_level;
|
||||
}
|
||||
|
||||
if (!conn)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
conn->dst_type = dst_type;
|
||||
conn->sec_level = BT_SECURITY_LOW;
|
||||
conn->conn_timeout = conn_timeout;
|
||||
|
||||
if (!conn_unfinished)
|
||||
conn->pending_sec_level = sec_level;
|
||||
|
||||
hci_req_init(&req, hdev);
|
||||
|
||||
/* Disable advertising if we're active. For master role
|
||||
|
@ -918,37 +906,9 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
|
|||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
done:
|
||||
/* If this is continuation of connect started by hci_connect_le_scan,
|
||||
* it already called hci_conn_hold and calling it again would mess the
|
||||
* counter.
|
||||
*/
|
||||
if (!conn_unfinished)
|
||||
hci_conn_hold(conn);
|
||||
|
||||
return conn;
|
||||
}
|
||||
|
||||
static void hci_connect_le_scan_complete(struct hci_dev *hdev, u8 status,
|
||||
u16 opcode)
|
||||
{
|
||||
struct hci_conn *conn;
|
||||
|
||||
if (!status)
|
||||
return;
|
||||
|
||||
BT_ERR("Failed to add device to auto conn whitelist: status 0x%2.2x",
|
||||
status);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
|
||||
if (conn)
|
||||
hci_le_conn_failed(conn, status);
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
|
||||
{
|
||||
struct hci_conn *conn;
|
||||
|
@ -964,10 +924,9 @@ static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
|
|||
}
|
||||
|
||||
/* This function requires the caller holds hdev->lock */
|
||||
static int hci_explicit_conn_params_set(struct hci_request *req,
|
||||
static int hci_explicit_conn_params_set(struct hci_dev *hdev,
|
||||
bdaddr_t *addr, u8 addr_type)
|
||||
{
|
||||
struct hci_dev *hdev = req->hdev;
|
||||
struct hci_conn_params *params;
|
||||
|
||||
if (is_connected(hdev, addr, addr_type))
|
||||
|
@ -995,7 +954,6 @@ static int hci_explicit_conn_params_set(struct hci_request *req,
|
|||
}
|
||||
|
||||
params->explicit_connect = true;
|
||||
__hci_update_background_scan(req);
|
||||
|
||||
BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
|
||||
params->auto_connect);
|
||||
|
@ -1006,11 +964,9 @@ static int hci_explicit_conn_params_set(struct hci_request *req,
|
|||
/* This function requires the caller holds hdev->lock */
|
||||
struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
|
||||
u8 dst_type, u8 sec_level,
|
||||
u16 conn_timeout, u8 role)
|
||||
u16 conn_timeout)
|
||||
{
|
||||
struct hci_conn *conn;
|
||||
struct hci_request req;
|
||||
int err;
|
||||
|
||||
/* Let's make sure that le is enabled.*/
|
||||
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
|
||||
|
@ -1038,29 +994,22 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
|
|||
|
||||
BT_DBG("requesting refresh of dst_addr");
|
||||
|
||||
conn = hci_conn_add(hdev, LE_LINK, dst, role);
|
||||
conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER);
|
||||
if (!conn)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
hci_req_init(&req, hdev);
|
||||
|
||||
if (hci_explicit_conn_params_set(&req, dst, dst_type) < 0)
|
||||
if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0)
|
||||
return ERR_PTR(-EBUSY);
|
||||
|
||||
conn->state = BT_CONNECT;
|
||||
set_bit(HCI_CONN_SCANNING, &conn->flags);
|
||||
|
||||
err = hci_req_run(&req, hci_connect_le_scan_complete);
|
||||
if (err && err != -ENODATA) {
|
||||
hci_conn_del(conn);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
conn->dst_type = dst_type;
|
||||
conn->sec_level = BT_SECURITY_LOW;
|
||||
conn->pending_sec_level = sec_level;
|
||||
conn->conn_timeout = conn_timeout;
|
||||
|
||||
hci_update_background_scan(hdev);
|
||||
|
||||
done:
|
||||
hci_conn_hold(conn);
|
||||
return conn;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -27,6 +27,10 @@
|
|||
#include "smp.h"
|
||||
#include "hci_request.h"
|
||||
|
||||
#define HCI_REQ_DONE 0
|
||||
#define HCI_REQ_PEND 1
|
||||
#define HCI_REQ_CANCELED 2
|
||||
|
||||
void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
|
||||
{
|
||||
skb_queue_head_init(&req->cmd_q);
|
||||
|
@ -56,8 +60,12 @@ static int req_run(struct hci_request *req, hci_req_complete_t complete,
|
|||
return -ENODATA;
|
||||
|
||||
skb = skb_peek_tail(&req->cmd_q);
|
||||
bt_cb(skb)->hci.req_complete = complete;
|
||||
bt_cb(skb)->hci.req_complete_skb = complete_skb;
|
||||
if (complete) {
|
||||
bt_cb(skb)->hci.req_complete = complete;
|
||||
} else if (complete_skb) {
|
||||
bt_cb(skb)->hci.req_complete_skb = complete_skb;
|
||||
bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&hdev->cmd_q.lock, flags);
|
||||
skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
|
||||
|
@ -78,6 +86,203 @@ int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
|
|||
return req_run(req, NULL, complete);
|
||||
}
|
||||
|
||||
static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
BT_DBG("%s result 0x%2.2x", hdev->name, result);
|
||||
|
||||
if (hdev->req_status == HCI_REQ_PEND) {
|
||||
hdev->req_result = result;
|
||||
hdev->req_status = HCI_REQ_DONE;
|
||||
if (skb)
|
||||
hdev->req_skb = skb_get(skb);
|
||||
wake_up_interruptible(&hdev->req_wait_q);
|
||||
}
|
||||
}
|
||||
|
||||
void hci_req_sync_cancel(struct hci_dev *hdev, int err)
|
||||
{
|
||||
BT_DBG("%s err 0x%2.2x", hdev->name, err);
|
||||
|
||||
if (hdev->req_status == HCI_REQ_PEND) {
|
||||
hdev->req_result = err;
|
||||
hdev->req_status = HCI_REQ_CANCELED;
|
||||
wake_up_interruptible(&hdev->req_wait_q);
|
||||
}
|
||||
}
|
||||
|
||||
struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
|
||||
const void *param, u8 event, u32 timeout)
|
||||
{
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
struct hci_request req;
|
||||
struct sk_buff *skb;
|
||||
int err = 0;
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
hci_req_init(&req, hdev);
|
||||
|
||||
hci_req_add_ev(&req, opcode, plen, param, event);
|
||||
|
||||
hdev->req_status = HCI_REQ_PEND;
|
||||
|
||||
add_wait_queue(&hdev->req_wait_q, &wait);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
err = hci_req_run_skb(&req, hci_req_sync_complete);
|
||||
if (err < 0) {
|
||||
remove_wait_queue(&hdev->req_wait_q, &wait);
|
||||
set_current_state(TASK_RUNNING);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
schedule_timeout(timeout);
|
||||
|
||||
remove_wait_queue(&hdev->req_wait_q, &wait);
|
||||
|
||||
if (signal_pending(current))
|
||||
return ERR_PTR(-EINTR);
|
||||
|
||||
switch (hdev->req_status) {
|
||||
case HCI_REQ_DONE:
|
||||
err = -bt_to_errno(hdev->req_result);
|
||||
break;
|
||||
|
||||
case HCI_REQ_CANCELED:
|
||||
err = -hdev->req_result;
|
||||
break;
|
||||
|
||||
default:
|
||||
err = -ETIMEDOUT;
|
||||
break;
|
||||
}
|
||||
|
||||
hdev->req_status = hdev->req_result = 0;
|
||||
skb = hdev->req_skb;
|
||||
hdev->req_skb = NULL;
|
||||
|
||||
BT_DBG("%s end: err %d", hdev->name, err);
|
||||
|
||||
if (err < 0) {
|
||||
kfree_skb(skb);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENODATA);
|
||||
|
||||
return skb;
|
||||
}
|
||||
EXPORT_SYMBOL(__hci_cmd_sync_ev);
|
||||
|
||||
struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
|
||||
const void *param, u32 timeout)
|
||||
{
|
||||
return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
|
||||
}
|
||||
EXPORT_SYMBOL(__hci_cmd_sync);
|
||||
|
||||
/* Execute request and wait for completion. */
|
||||
int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
|
||||
unsigned long opt),
|
||||
unsigned long opt, u32 timeout, u8 *hci_status)
|
||||
{
|
||||
struct hci_request req;
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
int err = 0;
|
||||
|
||||
BT_DBG("%s start", hdev->name);
|
||||
|
||||
hci_req_init(&req, hdev);
|
||||
|
||||
hdev->req_status = HCI_REQ_PEND;
|
||||
|
||||
err = func(&req, opt);
|
||||
if (err) {
|
||||
if (hci_status)
|
||||
*hci_status = HCI_ERROR_UNSPECIFIED;
|
||||
return err;
|
||||
}
|
||||
|
||||
add_wait_queue(&hdev->req_wait_q, &wait);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
err = hci_req_run_skb(&req, hci_req_sync_complete);
|
||||
if (err < 0) {
|
||||
hdev->req_status = 0;
|
||||
|
||||
remove_wait_queue(&hdev->req_wait_q, &wait);
|
||||
set_current_state(TASK_RUNNING);
|
||||
|
||||
/* ENODATA means the HCI request command queue is empty.
|
||||
* This can happen when a request with conditionals doesn't
|
||||
* trigger any commands to be sent. This is normal behavior
|
||||
* and should not trigger an error return.
|
||||
*/
|
||||
if (err == -ENODATA) {
|
||||
if (hci_status)
|
||||
*hci_status = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (hci_status)
|
||||
*hci_status = HCI_ERROR_UNSPECIFIED;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
schedule_timeout(timeout);
|
||||
|
||||
remove_wait_queue(&hdev->req_wait_q, &wait);
|
||||
|
||||
if (signal_pending(current))
|
||||
return -EINTR;
|
||||
|
||||
switch (hdev->req_status) {
|
||||
case HCI_REQ_DONE:
|
||||
err = -bt_to_errno(hdev->req_result);
|
||||
if (hci_status)
|
||||
*hci_status = hdev->req_result;
|
||||
break;
|
||||
|
||||
case HCI_REQ_CANCELED:
|
||||
err = -hdev->req_result;
|
||||
if (hci_status)
|
||||
*hci_status = HCI_ERROR_UNSPECIFIED;
|
||||
break;
|
||||
|
||||
default:
|
||||
err = -ETIMEDOUT;
|
||||
if (hci_status)
|
||||
*hci_status = HCI_ERROR_UNSPECIFIED;
|
||||
break;
|
||||
}
|
||||
|
||||
hdev->req_status = hdev->req_result = 0;
|
||||
|
||||
BT_DBG("%s end: err %d", hdev->name, err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
|
||||
unsigned long opt),
|
||||
unsigned long opt, u32 timeout, u8 *hci_status)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!test_bit(HCI_UP, &hdev->flags))
|
||||
return -ENETDOWN;
|
||||
|
||||
/* Serialize all requests */
|
||||
hci_req_sync_lock(hdev);
|
||||
ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
|
||||
hci_req_sync_unlock(hdev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
|
||||
const void *param)
|
||||
{
|
||||
|
@ -98,8 +303,8 @@ struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
|
|||
|
||||
BT_DBG("skb len %d", skb->len);
|
||||
|
||||
bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
|
||||
bt_cb(skb)->hci.opcode = opcode;
|
||||
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
|
||||
hci_skb_opcode(skb) = opcode;
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
@ -128,7 +333,7 @@ void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
|
|||
}
|
||||
|
||||
if (skb_queue_empty(&req->cmd_q))
|
||||
bt_cb(skb)->hci.req_start = true;
|
||||
bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
|
||||
|
||||
bt_cb(skb)->hci.req_event = event;
|
||||
|
||||
|
@ -476,7 +681,7 @@ void hci_update_page_scan(struct hci_dev *hdev)
|
|||
*
|
||||
* This function requires the caller holds hdev->lock.
|
||||
*/
|
||||
void __hci_update_background_scan(struct hci_request *req)
|
||||
static void __hci_update_background_scan(struct hci_request *req)
|
||||
{
|
||||
struct hci_dev *hdev = req->hdev;
|
||||
|
||||
|
@ -543,28 +748,6 @@ void __hci_update_background_scan(struct hci_request *req)
|
|||
}
|
||||
}
|
||||
|
||||
static void update_background_scan_complete(struct hci_dev *hdev, u8 status,
|
||||
u16 opcode)
|
||||
{
|
||||
if (status)
|
||||
BT_DBG("HCI request failed to update background scanning: "
|
||||
"status 0x%2.2x", status);
|
||||
}
|
||||
|
||||
void hci_update_background_scan(struct hci_dev *hdev)
|
||||
{
|
||||
int err;
|
||||
struct hci_request req;
|
||||
|
||||
hci_req_init(&req, hdev);
|
||||
|
||||
__hci_update_background_scan(&req);
|
||||
|
||||
err = hci_req_run(&req, update_background_scan_complete);
|
||||
if (err && err != -ENODATA)
|
||||
BT_ERR("Failed to run HCI request: err %d", err);
|
||||
}
|
||||
|
||||
void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
|
||||
u8 reason)
|
||||
{
|
||||
|
@ -657,3 +840,446 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int update_bg_scan(struct hci_request *req, unsigned long opt)
|
||||
{
|
||||
hci_dev_lock(req->hdev);
|
||||
__hci_update_background_scan(req);
|
||||
hci_dev_unlock(req->hdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bg_scan_update(struct work_struct *work)
|
||||
{
|
||||
struct hci_dev *hdev = container_of(work, struct hci_dev,
|
||||
bg_scan_update);
|
||||
struct hci_conn *conn;
|
||||
u8 status;
|
||||
int err;
|
||||
|
||||
err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
|
||||
if (!err)
|
||||
return;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
|
||||
if (conn)
|
||||
hci_le_conn_failed(conn, status);
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
static int le_scan_disable(struct hci_request *req, unsigned long opt)
|
||||
{
|
||||
hci_req_add_le_scan_disable(req);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bredr_inquiry(struct hci_request *req, unsigned long opt)
|
||||
{
|
||||
u8 length = opt;
|
||||
/* General inquiry access code (GIAC) */
|
||||
u8 lap[3] = { 0x33, 0x8b, 0x9e };
|
||||
struct hci_cp_inquiry cp;
|
||||
|
||||
BT_DBG("%s", req->hdev->name);
|
||||
|
||||
hci_dev_lock(req->hdev);
|
||||
hci_inquiry_cache_flush(req->hdev);
|
||||
hci_dev_unlock(req->hdev);
|
||||
|
||||
memset(&cp, 0, sizeof(cp));
|
||||
memcpy(&cp.lap, lap, sizeof(cp.lap));
|
||||
cp.length = length;
|
||||
|
||||
hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void le_scan_disable_work(struct work_struct *work)
|
||||
{
|
||||
struct hci_dev *hdev = container_of(work, struct hci_dev,
|
||||
le_scan_disable.work);
|
||||
u8 status;
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
|
||||
return;
|
||||
|
||||
cancel_delayed_work(&hdev->le_scan_restart);
|
||||
|
||||
hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
|
||||
if (status) {
|
||||
BT_ERR("Failed to disable LE scan: status 0x%02x", status);
|
||||
return;
|
||||
}
|
||||
|
||||
hdev->discovery.scan_start = 0;
|
||||
|
||||
/* If we were running LE only scan, change discovery state. If
|
||||
* we were running both LE and BR/EDR inquiry simultaneously,
|
||||
* and BR/EDR inquiry is already finished, stop discovery,
|
||||
* otherwise BR/EDR inquiry will stop discovery when finished.
|
||||
* If we will resolve remote device name, do not change
|
||||
* discovery state.
|
||||
*/
|
||||
|
||||
if (hdev->discovery.type == DISCOV_TYPE_LE)
|
||||
goto discov_stopped;
|
||||
|
||||
if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
|
||||
return;
|
||||
|
||||
if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
|
||||
if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
|
||||
hdev->discovery.state != DISCOVERY_RESOLVING)
|
||||
goto discov_stopped;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
|
||||
HCI_CMD_TIMEOUT, &status);
|
||||
if (status) {
|
||||
BT_ERR("Inquiry failed: status 0x%02x", status);
|
||||
goto discov_stopped;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
discov_stopped:
|
||||
hci_dev_lock(hdev);
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
|
||||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
static int le_scan_restart(struct hci_request *req, unsigned long opt)
|
||||
{
|
||||
struct hci_dev *hdev = req->hdev;
|
||||
struct hci_cp_le_set_scan_enable cp;
|
||||
|
||||
/* If controller is not scanning we are done. */
|
||||
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
|
||||
return 0;
|
||||
|
||||
hci_req_add_le_scan_disable(req);
|
||||
|
||||
memset(&cp, 0, sizeof(cp));
|
||||
cp.enable = LE_SCAN_ENABLE;
|
||||
cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
|
||||
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void le_scan_restart_work(struct work_struct *work)
|
||||
{
|
||||
struct hci_dev *hdev = container_of(work, struct hci_dev,
|
||||
le_scan_restart.work);
|
||||
unsigned long timeout, duration, scan_start, now;
|
||||
u8 status;
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
|
||||
if (status) {
|
||||
BT_ERR("Failed to restart LE scan: status %d", status);
|
||||
return;
|
||||
}
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
|
||||
!hdev->discovery.scan_start)
|
||||
goto unlock;
|
||||
|
||||
/* When the scan was started, hdev->le_scan_disable has been queued
|
||||
* after duration from scan_start. During scan restart this job
|
||||
* has been canceled, and we need to queue it again after proper
|
||||
* timeout, to make sure that scan does not run indefinitely.
|
||||
*/
|
||||
duration = hdev->discovery.scan_duration;
|
||||
scan_start = hdev->discovery.scan_start;
|
||||
now = jiffies;
|
||||
if (now - scan_start <= duration) {
|
||||
int elapsed;
|
||||
|
||||
if (now >= scan_start)
|
||||
elapsed = now - scan_start;
|
||||
else
|
||||
elapsed = ULONG_MAX - scan_start + now;
|
||||
|
||||
timeout = duration - elapsed;
|
||||
} else {
|
||||
timeout = 0;
|
||||
}
|
||||
|
||||
queue_delayed_work(hdev->req_workqueue,
|
||||
&hdev->le_scan_disable, timeout);
|
||||
|
||||
unlock:
|
||||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
static void cancel_adv_timeout(struct hci_dev *hdev)
|
||||
{
|
||||
if (hdev->adv_instance_timeout) {
|
||||
hdev->adv_instance_timeout = 0;
|
||||
cancel_delayed_work(&hdev->adv_instance_expire);
|
||||
}
|
||||
}
|
||||
|
||||
static void disable_advertising(struct hci_request *req)
|
||||
{
|
||||
u8 enable = 0x00;
|
||||
|
||||
hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
|
||||
}
|
||||
|
||||
static int active_scan(struct hci_request *req, unsigned long opt)
|
||||
{
|
||||
uint16_t interval = opt;
|
||||
struct hci_dev *hdev = req->hdev;
|
||||
struct hci_cp_le_set_scan_param param_cp;
|
||||
struct hci_cp_le_set_scan_enable enable_cp;
|
||||
u8 own_addr_type;
|
||||
int err;
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
/* Don't let discovery abort an outgoing connection attempt
|
||||
* that's using directed advertising.
|
||||
*/
|
||||
if (hci_lookup_le_connect(hdev)) {
|
||||
hci_dev_unlock(hdev);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
cancel_adv_timeout(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
disable_advertising(req);
|
||||
}
|
||||
|
||||
/* If controller is scanning, it means the background scanning is
|
||||
* running. Thus, we should temporarily stop it in order to set the
|
||||
* discovery scanning parameters.
|
||||
*/
|
||||
if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
|
||||
hci_req_add_le_scan_disable(req);
|
||||
|
||||
/* All active scans will be done with either a resolvable private
|
||||
* address (when privacy feature has been enabled) or non-resolvable
|
||||
* private address.
|
||||
*/
|
||||
err = hci_update_random_address(req, true, &own_addr_type);
|
||||
if (err < 0)
|
||||
own_addr_type = ADDR_LE_DEV_PUBLIC;
|
||||
|
||||
memset(¶m_cp, 0, sizeof(param_cp));
|
||||
param_cp.type = LE_SCAN_ACTIVE;
|
||||
param_cp.interval = cpu_to_le16(interval);
|
||||
param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
|
||||
param_cp.own_address_type = own_addr_type;
|
||||
|
||||
hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
|
||||
¶m_cp);
|
||||
|
||||
memset(&enable_cp, 0, sizeof(enable_cp));
|
||||
enable_cp.enable = LE_SCAN_ENABLE;
|
||||
enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
|
||||
|
||||
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
|
||||
&enable_cp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int interleaved_discov(struct hci_request *req, unsigned long opt)
|
||||
{
|
||||
int err;
|
||||
|
||||
BT_DBG("%s", req->hdev->name);
|
||||
|
||||
err = active_scan(req, opt);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
|
||||
}
|
||||
|
||||
static void start_discovery(struct hci_dev *hdev, u8 *status)
|
||||
{
|
||||
unsigned long timeout;
|
||||
|
||||
BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
|
||||
|
||||
switch (hdev->discovery.type) {
|
||||
case DISCOV_TYPE_BREDR:
|
||||
if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
|
||||
hci_req_sync(hdev, bredr_inquiry,
|
||||
DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
|
||||
status);
|
||||
return;
|
||||
case DISCOV_TYPE_INTERLEAVED:
|
||||
/* When running simultaneous discovery, the LE scanning time
|
||||
* should occupy the whole discovery time sine BR/EDR inquiry
|
||||
* and LE scanning are scheduled by the controller.
|
||||
*
|
||||
* For interleaving discovery in comparison, BR/EDR inquiry
|
||||
* and LE scanning are done sequentially with separate
|
||||
* timeouts.
|
||||
*/
|
||||
if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
|
||||
&hdev->quirks)) {
|
||||
timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
|
||||
/* During simultaneous discovery, we double LE scan
|
||||
* interval. We must leave some time for the controller
|
||||
* to do BR/EDR inquiry.
|
||||
*/
|
||||
hci_req_sync(hdev, interleaved_discov,
|
||||
DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
|
||||
status);
|
||||
break;
|
||||
}
|
||||
|
||||
timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
|
||||
hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
|
||||
HCI_CMD_TIMEOUT, status);
|
||||
break;
|
||||
case DISCOV_TYPE_LE:
|
||||
timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
|
||||
hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
|
||||
HCI_CMD_TIMEOUT, status);
|
||||
break;
|
||||
default:
|
||||
*status = HCI_ERROR_UNSPECIFIED;
|
||||
return;
|
||||
}
|
||||
|
||||
if (*status)
|
||||
return;
|
||||
|
||||
BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
|
||||
|
||||
/* When service discovery is used and the controller has a
|
||||
* strict duplicate filter, it is important to remember the
|
||||
* start and duration of the scan. This is required for
|
||||
* restarting scanning during the discovery phase.
|
||||
*/
|
||||
if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
|
||||
hdev->discovery.result_filtering) {
|
||||
hdev->discovery.scan_start = jiffies;
|
||||
hdev->discovery.scan_duration = timeout;
|
||||
}
|
||||
|
||||
queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
|
||||
timeout);
|
||||
}
|
||||
|
||||
bool hci_req_stop_discovery(struct hci_request *req)
|
||||
{
|
||||
struct hci_dev *hdev = req->hdev;
|
||||
struct discovery_state *d = &hdev->discovery;
|
||||
struct hci_cp_remote_name_req_cancel cp;
|
||||
struct inquiry_entry *e;
|
||||
bool ret = false;
|
||||
|
||||
BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
|
||||
|
||||
if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
|
||||
if (test_bit(HCI_INQUIRY, &hdev->flags))
|
||||
hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
|
||||
|
||||
if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
|
||||
cancel_delayed_work(&hdev->le_scan_disable);
|
||||
hci_req_add_le_scan_disable(req);
|
||||
}
|
||||
|
||||
ret = true;
|
||||
} else {
|
||||
/* Passive scanning */
|
||||
if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
|
||||
hci_req_add_le_scan_disable(req);
|
||||
ret = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* No further actions needed for LE-only discovery */
|
||||
if (d->type == DISCOV_TYPE_LE)
|
||||
return ret;
|
||||
|
||||
if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
|
||||
e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
|
||||
NAME_PENDING);
|
||||
if (!e)
|
||||
return ret;
|
||||
|
||||
bacpy(&cp.bdaddr, &e->data.bdaddr);
|
||||
hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
|
||||
&cp);
|
||||
ret = true;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int stop_discovery(struct hci_request *req, unsigned long opt)
|
||||
{
|
||||
hci_dev_lock(req->hdev);
|
||||
hci_req_stop_discovery(req);
|
||||
hci_dev_unlock(req->hdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void discov_update(struct work_struct *work)
|
||||
{
|
||||
struct hci_dev *hdev = container_of(work, struct hci_dev,
|
||||
discov_update);
|
||||
u8 status = 0;
|
||||
|
||||
switch (hdev->discovery.state) {
|
||||
case DISCOVERY_STARTING:
|
||||
start_discovery(hdev, &status);
|
||||
mgmt_start_discovery_complete(hdev, status);
|
||||
if (status)
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
|
||||
else
|
||||
hci_discovery_set_state(hdev, DISCOVERY_FINDING);
|
||||
break;
|
||||
case DISCOVERY_STOPPING:
|
||||
hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
|
||||
mgmt_stop_discovery_complete(hdev, status);
|
||||
if (!status)
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
|
||||
break;
|
||||
case DISCOVERY_STOPPED:
|
||||
default:
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void hci_request_setup(struct hci_dev *hdev)
|
||||
{
|
||||
INIT_WORK(&hdev->discov_update, discov_update);
|
||||
INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
|
||||
INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
|
||||
INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
|
||||
}
|
||||
|
||||
void hci_request_cancel_all(struct hci_dev *hdev)
|
||||
{
|
||||
hci_req_sync_cancel(hdev, ENODEV);
|
||||
|
||||
cancel_work_sync(&hdev->discov_update);
|
||||
cancel_work_sync(&hdev->bg_scan_update);
|
||||
cancel_delayed_work_sync(&hdev->le_scan_disable);
|
||||
cancel_delayed_work_sync(&hdev->le_scan_restart);
|
||||
}
|
||||
|
|
|
@ -20,6 +20,9 @@
|
|||
SOFTWARE IS DISCLAIMED.
|
||||
*/
|
||||
|
||||
#define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock)
|
||||
#define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock)
|
||||
|
||||
struct hci_request {
|
||||
struct hci_dev *hdev;
|
||||
struct sk_buff_head cmd_q;
|
||||
|
@ -41,21 +44,37 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
|
|||
hci_req_complete_t *req_complete,
|
||||
hci_req_complete_skb_t *req_complete_skb);
|
||||
|
||||
int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
|
||||
unsigned long opt),
|
||||
unsigned long opt, u32 timeout, u8 *hci_status);
|
||||
int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
|
||||
unsigned long opt),
|
||||
unsigned long opt, u32 timeout, u8 *hci_status);
|
||||
void hci_req_sync_cancel(struct hci_dev *hdev, int err);
|
||||
|
||||
struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
|
||||
const void *param);
|
||||
|
||||
void hci_req_add_le_scan_disable(struct hci_request *req);
|
||||
void hci_req_add_le_passive_scan(struct hci_request *req);
|
||||
|
||||
/* Returns true if HCI commands were queued */
|
||||
bool hci_req_stop_discovery(struct hci_request *req);
|
||||
|
||||
void hci_update_page_scan(struct hci_dev *hdev);
|
||||
void __hci_update_page_scan(struct hci_request *req);
|
||||
|
||||
int hci_update_random_address(struct hci_request *req, bool require_privacy,
|
||||
u8 *own_addr_type);
|
||||
|
||||
void hci_update_background_scan(struct hci_dev *hdev);
|
||||
void __hci_update_background_scan(struct hci_request *req);
|
||||
|
||||
int hci_abort_conn(struct hci_conn *conn, u8 reason);
|
||||
void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
|
||||
u8 reason);
|
||||
|
||||
static inline void hci_update_background_scan(struct hci_dev *hdev)
|
||||
{
|
||||
queue_work(hdev->req_workqueue, &hdev->bg_scan_update);
|
||||
}
|
||||
|
||||
void hci_request_setup(struct hci_dev *hdev);
|
||||
void hci_request_cancel_all(struct hci_dev *hdev);
|
||||
|
|
|
@ -26,6 +26,8 @@
|
|||
|
||||
#include <linux/export.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <generated/compile.h>
|
||||
#include <generated/utsrelease.h>
|
||||
|
||||
#include <net/bluetooth/bluetooth.h>
|
||||
#include <net/bluetooth/hci_core.h>
|
||||
|
@ -120,13 +122,13 @@ static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
|
|||
/* Apply filter */
|
||||
flt = &hci_pi(sk)->filter;
|
||||
|
||||
flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
|
||||
flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
|
||||
|
||||
if (!test_bit(flt_type, &flt->type_mask))
|
||||
return true;
|
||||
|
||||
/* Extra filter for event packets only */
|
||||
if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
|
||||
if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
|
||||
return false;
|
||||
|
||||
flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
|
||||
|
@ -170,19 +172,19 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
continue;
|
||||
|
||||
if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
|
||||
if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
|
||||
bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
|
||||
bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
|
||||
bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
|
||||
if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
|
||||
hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
|
||||
hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
|
||||
hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
|
||||
continue;
|
||||
if (is_filtered_packet(sk, skb))
|
||||
continue;
|
||||
} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
|
||||
if (!bt_cb(skb)->incoming)
|
||||
continue;
|
||||
if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
|
||||
bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
|
||||
bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
|
||||
if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
|
||||
hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
|
||||
hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
|
||||
continue;
|
||||
} else {
|
||||
/* Don't send frame to other channel types */
|
||||
|
@ -196,7 +198,7 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
continue;
|
||||
|
||||
/* Put type byte before the data */
|
||||
memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
|
||||
memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
|
||||
}
|
||||
|
||||
nskb = skb_clone(skb_copy, GFP_ATOMIC);
|
||||
|
@ -262,7 +264,7 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
|
||||
BT_DBG("hdev %p len %d", hdev, skb->len);
|
||||
|
||||
switch (bt_cb(skb)->pkt_type) {
|
||||
switch (hci_skb_pkt_type(skb)) {
|
||||
case HCI_COMMAND_PKT:
|
||||
opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
|
||||
break;
|
||||
|
@ -294,7 +296,7 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
return;
|
||||
|
||||
/* Put header before the data */
|
||||
hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
|
||||
hdr = (void *)skb_push(skb_copy, HCI_MON_HDR_SIZE);
|
||||
hdr->opcode = opcode;
|
||||
hdr->index = cpu_to_le16(hdev->id);
|
||||
hdr->len = cpu_to_le16(skb->len);
|
||||
|
@ -375,7 +377,7 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
|
|||
|
||||
__net_timestamp(skb);
|
||||
|
||||
hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
|
||||
hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
|
||||
hdr->opcode = opcode;
|
||||
hdr->index = cpu_to_le16(hdev->id);
|
||||
hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
|
||||
|
@ -383,6 +385,29 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
|
|||
return skb;
|
||||
}
|
||||
|
||||
static void send_monitor_note(struct sock *sk, const char *text)
|
||||
{
|
||||
size_t len = strlen(text);
|
||||
struct hci_mon_hdr *hdr;
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
strcpy(skb_put(skb, len + 1), text);
|
||||
|
||||
__net_timestamp(skb);
|
||||
|
||||
hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
|
||||
hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
|
||||
hdr->index = cpu_to_le16(HCI_DEV_NONE);
|
||||
hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
|
||||
|
||||
if (sock_queue_rcv_skb(sk, skb))
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
static void send_monitor_replay(struct sock *sk)
|
||||
{
|
||||
struct hci_dev *hdev;
|
||||
|
@ -436,18 +461,18 @@ static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
|
|||
if (!skb)
|
||||
return;
|
||||
|
||||
hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
|
||||
hdr = (void *)skb_put(skb, HCI_EVENT_HDR_SIZE);
|
||||
hdr->evt = HCI_EV_STACK_INTERNAL;
|
||||
hdr->plen = sizeof(*ev) + dlen;
|
||||
|
||||
ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
|
||||
ev = (void *)skb_put(skb, sizeof(*ev) + dlen);
|
||||
ev->type = type;
|
||||
memcpy(ev->data, data, dlen);
|
||||
|
||||
bt_cb(skb)->incoming = 1;
|
||||
__net_timestamp(skb);
|
||||
|
||||
bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
|
||||
hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
|
||||
hci_send_to_sock(hdev, skb);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
@ -653,20 +678,20 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
|
|||
return -EOPNOTSUPP;
|
||||
|
||||
case HCIGETCONNINFO:
|
||||
return hci_get_conn_info(hdev, (void __user *) arg);
|
||||
return hci_get_conn_info(hdev, (void __user *)arg);
|
||||
|
||||
case HCIGETAUTHINFO:
|
||||
return hci_get_auth_info(hdev, (void __user *) arg);
|
||||
return hci_get_auth_info(hdev, (void __user *)arg);
|
||||
|
||||
case HCIBLOCKADDR:
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
return hci_sock_blacklist_add(hdev, (void __user *) arg);
|
||||
return hci_sock_blacklist_add(hdev, (void __user *)arg);
|
||||
|
||||
case HCIUNBLOCKADDR:
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
return hci_sock_blacklist_del(hdev, (void __user *) arg);
|
||||
return hci_sock_blacklist_del(hdev, (void __user *)arg);
|
||||
}
|
||||
|
||||
return -ENOIOCTLCMD;
|
||||
|
@ -675,7 +700,7 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
|
|||
static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
void __user *argp = (void __user *) arg;
|
||||
void __user *argp = (void __user *)arg;
|
||||
struct sock *sk = sock->sk;
|
||||
int err;
|
||||
|
||||
|
@ -872,11 +897,27 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
|
|||
*/
|
||||
hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
|
||||
|
||||
send_monitor_note(sk, "Linux version " UTS_RELEASE
|
||||
" (" UTS_MACHINE ")");
|
||||
send_monitor_note(sk, "Bluetooth subsystem version "
|
||||
BT_SUBSYS_VERSION);
|
||||
send_monitor_replay(sk);
|
||||
|
||||
atomic_inc(&monitor_promisc);
|
||||
break;
|
||||
|
||||
case HCI_CHANNEL_LOGGING:
|
||||
if (haddr.hci_dev != HCI_DEV_NONE) {
|
||||
err = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (!capable(CAP_NET_ADMIN)) {
|
||||
err = -EPERM;
|
||||
goto done;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
if (!hci_mgmt_chan_find(haddr.hci_channel)) {
|
||||
err = -EINVAL;
|
||||
|
@ -926,7 +967,7 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
|
|||
static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
|
||||
int *addr_len, int peer)
|
||||
{
|
||||
struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
|
||||
struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
|
||||
struct sock *sk = sock->sk;
|
||||
struct hci_dev *hdev;
|
||||
int err = 0;
|
||||
|
@ -991,8 +1032,8 @@ static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
|
|||
}
|
||||
}
|
||||
|
||||
static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
||||
int flags)
|
||||
static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||
size_t len, int flags)
|
||||
{
|
||||
int noblock = flags & MSG_DONTWAIT;
|
||||
struct sock *sk = sock->sk;
|
||||
|
@ -1004,6 +1045,9 @@ static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
|||
if (flags & MSG_OOB)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (sk->sk_state == BT_CLOSED)
|
||||
return 0;
|
||||
|
||||
|
@ -1150,6 +1194,90 @@ static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
|
|||
return err;
|
||||
}
|
||||
|
||||
static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
|
||||
{
|
||||
struct hci_mon_hdr *hdr;
|
||||
struct sk_buff *skb;
|
||||
struct hci_dev *hdev;
|
||||
u16 index;
|
||||
int err;
|
||||
|
||||
/* The logging frame consists at minimum of the standard header,
|
||||
* the priority byte, the ident length byte and at least one string
|
||||
* terminator NUL byte. Anything shorter are invalid packets.
|
||||
*/
|
||||
if (len < sizeof(*hdr) + 3)
|
||||
return -EINVAL;
|
||||
|
||||
skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
|
||||
if (!skb)
|
||||
return err;
|
||||
|
||||
if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
|
||||
err = -EFAULT;
|
||||
goto drop;
|
||||
}
|
||||
|
||||
hdr = (void *)skb->data;
|
||||
|
||||
if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
|
||||
err = -EINVAL;
|
||||
goto drop;
|
||||
}
|
||||
|
||||
if (__le16_to_cpu(hdr->opcode) == 0x0000) {
|
||||
__u8 priority = skb->data[sizeof(*hdr)];
|
||||
__u8 ident_len = skb->data[sizeof(*hdr) + 1];
|
||||
|
||||
/* Only the priorities 0-7 are valid and with that any other
|
||||
* value results in an invalid packet.
|
||||
*
|
||||
* The priority byte is followed by an ident length byte and
|
||||
* the NUL terminated ident string. Check that the ident
|
||||
* length is not overflowing the packet and also that the
|
||||
* ident string itself is NUL terminated. In case the ident
|
||||
* length is zero, the length value actually doubles as NUL
|
||||
* terminator identifier.
|
||||
*
|
||||
* The message follows the ident string (if present) and
|
||||
* must be NUL terminated. Otherwise it is not a valid packet.
|
||||
*/
|
||||
if (priority > 7 || skb->data[len - 1] != 0x00 ||
|
||||
ident_len > len - sizeof(*hdr) - 3 ||
|
||||
skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
|
||||
err = -EINVAL;
|
||||
goto drop;
|
||||
}
|
||||
} else {
|
||||
err = -EINVAL;
|
||||
goto drop;
|
||||
}
|
||||
|
||||
index = __le16_to_cpu(hdr->index);
|
||||
|
||||
if (index != MGMT_INDEX_NONE) {
|
||||
hdev = hci_dev_get(index);
|
||||
if (!hdev) {
|
||||
err = -ENODEV;
|
||||
goto drop;
|
||||
}
|
||||
} else {
|
||||
hdev = NULL;
|
||||
}
|
||||
|
||||
hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
|
||||
|
||||
hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
|
||||
err = len;
|
||||
|
||||
if (hdev)
|
||||
hci_dev_put(hdev);
|
||||
|
||||
drop:
|
||||
kfree_skb(skb);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||
size_t len)
|
||||
{
|
||||
|
@ -1179,6 +1307,9 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
|
|||
case HCI_CHANNEL_MONITOR:
|
||||
err = -EOPNOTSUPP;
|
||||
goto done;
|
||||
case HCI_CHANNEL_LOGGING:
|
||||
err = hci_logging_frame(sk, msg, len);
|
||||
goto done;
|
||||
default:
|
||||
mutex_lock(&mgmt_chan_list_lock);
|
||||
chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
|
||||
|
@ -1211,7 +1342,7 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
|
|||
goto drop;
|
||||
}
|
||||
|
||||
bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
|
||||
hci_skb_pkt_type(skb) = skb->data[0];
|
||||
skb_pull(skb, 1);
|
||||
|
||||
if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
|
||||
|
@ -1220,16 +1351,16 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
|
|||
*
|
||||
* However check that the packet type is valid.
|
||||
*/
|
||||
if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
|
||||
bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
|
||||
bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
|
||||
if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
|
||||
hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
|
||||
hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
|
||||
err = -EINVAL;
|
||||
goto drop;
|
||||
}
|
||||
|
||||
skb_queue_tail(&hdev->raw_q, skb);
|
||||
queue_work(hdev->workqueue, &hdev->tx_work);
|
||||
} else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
|
||||
} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
|
||||
u16 opcode = get_unaligned_le16(skb->data);
|
||||
u16 ogf = hci_opcode_ogf(opcode);
|
||||
u16 ocf = hci_opcode_ocf(opcode);
|
||||
|
@ -1242,6 +1373,11 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
|
|||
goto drop;
|
||||
}
|
||||
|
||||
/* Since the opcode has already been extracted here, store
|
||||
* a copy of the value for later use by the drivers.
|
||||
*/
|
||||
hci_skb_opcode(skb) = opcode;
|
||||
|
||||
if (ogf == 0x3f) {
|
||||
skb_queue_tail(&hdev->raw_q, skb);
|
||||
queue_work(hdev->workqueue, &hdev->tx_work);
|
||||
|
@ -1249,7 +1385,7 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
|
|||
/* Stand-alone HCI commands must be flagged as
|
||||
* single-command requests.
|
||||
*/
|
||||
bt_cb(skb)->hci.req_start = true;
|
||||
bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
|
||||
|
||||
skb_queue_tail(&hdev->cmd_q, skb);
|
||||
queue_work(hdev->workqueue, &hdev->cmd_work);
|
||||
|
@ -1260,8 +1396,8 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
|
|||
goto drop;
|
||||
}
|
||||
|
||||
if (bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
|
||||
bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
|
||||
if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
|
||||
hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
|
||||
err = -EINVAL;
|
||||
goto drop;
|
||||
}
|
||||
|
|
|
@ -6538,8 +6538,6 @@ static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
|
|||
static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
|
||||
chan->rx_state);
|
||||
|
||||
|
@ -6570,7 +6568,7 @@ static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
|
|||
chan->last_acked_seq = control->txseq;
|
||||
chan->expected_tx_seq = __next_seq(chan, control->txseq);
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
|
||||
|
@ -7113,8 +7111,6 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
|
|||
chan->dcid = cid;
|
||||
|
||||
if (bdaddr_type_is_le(dst_type)) {
|
||||
u8 role;
|
||||
|
||||
/* Convert from L2CAP channel address type to HCI address type
|
||||
*/
|
||||
if (dst_type == BDADDR_LE_PUBLIC)
|
||||
|
@ -7123,14 +7119,15 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
|
|||
dst_type = ADDR_LE_DEV_RANDOM;
|
||||
|
||||
if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
|
||||
role = HCI_ROLE_SLAVE;
|
||||
hcon = hci_connect_le(hdev, dst, dst_type,
|
||||
chan->sec_level,
|
||||
HCI_LE_CONN_TIMEOUT,
|
||||
HCI_ROLE_SLAVE);
|
||||
else
|
||||
role = HCI_ROLE_MASTER;
|
||||
hcon = hci_connect_le_scan(hdev, dst, dst_type,
|
||||
chan->sec_level,
|
||||
HCI_LE_CONN_TIMEOUT);
|
||||
|
||||
hcon = hci_connect_le_scan(hdev, dst, dst_type,
|
||||
chan->sec_level,
|
||||
HCI_LE_CONN_TIMEOUT,
|
||||
role);
|
||||
} else {
|
||||
u8 auth_type = l2cap_get_auth_type(chan);
|
||||
hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
#include "mgmt_util.h"
|
||||
|
||||
#define MGMT_VERSION 1
|
||||
#define MGMT_REVISION 10
|
||||
#define MGMT_REVISION 11
|
||||
|
||||
static const u16 mgmt_commands[] = {
|
||||
MGMT_OP_READ_INDEX_LIST,
|
||||
|
@ -102,6 +102,7 @@ static const u16 mgmt_commands[] = {
|
|||
MGMT_OP_READ_ADV_FEATURES,
|
||||
MGMT_OP_ADD_ADVERTISING,
|
||||
MGMT_OP_REMOVE_ADVERTISING,
|
||||
MGMT_OP_GET_ADV_SIZE_INFO,
|
||||
};
|
||||
|
||||
static const u16 mgmt_events[] = {
|
||||
|
@ -1416,49 +1417,6 @@ static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
|
|||
}
|
||||
}
|
||||
|
||||
static bool hci_stop_discovery(struct hci_request *req)
|
||||
{
|
||||
struct hci_dev *hdev = req->hdev;
|
||||
struct hci_cp_remote_name_req_cancel cp;
|
||||
struct inquiry_entry *e;
|
||||
|
||||
switch (hdev->discovery.state) {
|
||||
case DISCOVERY_FINDING:
|
||||
if (test_bit(HCI_INQUIRY, &hdev->flags))
|
||||
hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
|
||||
|
||||
if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
|
||||
cancel_delayed_work(&hdev->le_scan_disable);
|
||||
hci_req_add_le_scan_disable(req);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
case DISCOVERY_RESOLVING:
|
||||
e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
|
||||
NAME_PENDING);
|
||||
if (!e)
|
||||
break;
|
||||
|
||||
bacpy(&cp.bdaddr, &e->data.bdaddr);
|
||||
hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
|
||||
&cp);
|
||||
|
||||
return true;
|
||||
|
||||
default:
|
||||
/* Passive scanning */
|
||||
if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
|
||||
hci_req_add_le_scan_disable(req);
|
||||
return true;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void advertising_added(struct sock *sk, struct hci_dev *hdev,
|
||||
u8 instance)
|
||||
{
|
||||
|
@ -1636,7 +1594,7 @@ static int clean_up_hci_state(struct hci_dev *hdev)
|
|||
if (hci_dev_test_flag(hdev, HCI_LE_ADV))
|
||||
disable_advertising(&req);
|
||||
|
||||
discov_stopped = hci_stop_discovery(&req);
|
||||
discov_stopped = hci_req_stop_discovery(&req);
|
||||
|
||||
list_for_each_entry(conn, &hdev->conn_hash.list, list) {
|
||||
/* 0x15 == Terminated due to Power Off */
|
||||
|
@ -2510,8 +2468,8 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
|
|||
hci_req_init(&req, hdev);
|
||||
update_adv_data(&req);
|
||||
update_scan_rsp_data(&req);
|
||||
__hci_update_background_scan(&req);
|
||||
hci_req_run(&req, NULL);
|
||||
hci_update_background_scan(hdev);
|
||||
}
|
||||
|
||||
unlock:
|
||||
|
@ -3561,8 +3519,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
|
|||
|
||||
conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
|
||||
addr_type, sec_level,
|
||||
HCI_LE_CONN_TIMEOUT,
|
||||
HCI_ROLE_MASTER);
|
||||
HCI_LE_CONN_TIMEOUT);
|
||||
}
|
||||
|
||||
if (IS_ERR(conn)) {
|
||||
|
@ -4164,145 +4121,9 @@ static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
|
|||
return err;
|
||||
}
|
||||
|
||||
static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status)
|
||||
{
|
||||
struct hci_dev *hdev = req->hdev;
|
||||
struct hci_cp_inquiry cp;
|
||||
/* General inquiry access code (GIAC) */
|
||||
u8 lap[3] = { 0x33, 0x8b, 0x9e };
|
||||
|
||||
*status = mgmt_bredr_support(hdev);
|
||||
if (*status)
|
||||
return false;
|
||||
|
||||
if (hci_dev_test_flag(hdev, HCI_INQUIRY)) {
|
||||
*status = MGMT_STATUS_BUSY;
|
||||
return false;
|
||||
}
|
||||
|
||||
hci_inquiry_cache_flush(hdev);
|
||||
|
||||
memset(&cp, 0, sizeof(cp));
|
||||
memcpy(&cp.lap, lap, sizeof(cp.lap));
|
||||
cp.length = DISCOV_BREDR_INQUIRY_LEN;
|
||||
|
||||
hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
|
||||
{
|
||||
struct hci_dev *hdev = req->hdev;
|
||||
struct hci_cp_le_set_scan_param param_cp;
|
||||
struct hci_cp_le_set_scan_enable enable_cp;
|
||||
u8 own_addr_type;
|
||||
int err;
|
||||
|
||||
*status = mgmt_le_support(hdev);
|
||||
if (*status)
|
||||
return false;
|
||||
|
||||
if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
|
||||
/* Don't let discovery abort an outgoing connection attempt
|
||||
* that's using directed advertising.
|
||||
*/
|
||||
if (hci_lookup_le_connect(hdev)) {
|
||||
*status = MGMT_STATUS_REJECTED;
|
||||
return false;
|
||||
}
|
||||
|
||||
cancel_adv_timeout(hdev);
|
||||
disable_advertising(req);
|
||||
}
|
||||
|
||||
/* If controller is scanning, it means the background scanning is
|
||||
* running. Thus, we should temporarily stop it in order to set the
|
||||
* discovery scanning parameters.
|
||||
*/
|
||||
if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
|
||||
hci_req_add_le_scan_disable(req);
|
||||
|
||||
/* All active scans will be done with either a resolvable private
|
||||
* address (when privacy feature has been enabled) or non-resolvable
|
||||
* private address.
|
||||
*/
|
||||
err = hci_update_random_address(req, true, &own_addr_type);
|
||||
if (err < 0) {
|
||||
*status = MGMT_STATUS_FAILED;
|
||||
return false;
|
||||
}
|
||||
|
||||
memset(¶m_cp, 0, sizeof(param_cp));
|
||||
param_cp.type = LE_SCAN_ACTIVE;
|
||||
param_cp.interval = cpu_to_le16(interval);
|
||||
param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
|
||||
param_cp.own_address_type = own_addr_type;
|
||||
|
||||
hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
|
||||
¶m_cp);
|
||||
|
||||
memset(&enable_cp, 0, sizeof(enable_cp));
|
||||
enable_cp.enable = LE_SCAN_ENABLE;
|
||||
enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
|
||||
|
||||
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
|
||||
&enable_cp);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trigger_discovery(struct hci_request *req, u8 *status)
|
||||
{
|
||||
struct hci_dev *hdev = req->hdev;
|
||||
|
||||
switch (hdev->discovery.type) {
|
||||
case DISCOV_TYPE_BREDR:
|
||||
if (!trigger_bredr_inquiry(req, status))
|
||||
return false;
|
||||
break;
|
||||
|
||||
case DISCOV_TYPE_INTERLEAVED:
|
||||
if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
|
||||
&hdev->quirks)) {
|
||||
/* During simultaneous discovery, we double LE scan
|
||||
* interval. We must leave some time for the controller
|
||||
* to do BR/EDR inquiry.
|
||||
*/
|
||||
if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2,
|
||||
status))
|
||||
return false;
|
||||
|
||||
if (!trigger_bredr_inquiry(req, status))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
|
||||
*status = MGMT_STATUS_NOT_SUPPORTED;
|
||||
return false;
|
||||
}
|
||||
/* fall through */
|
||||
|
||||
case DISCOV_TYPE_LE:
|
||||
if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status))
|
||||
return false;
|
||||
break;
|
||||
|
||||
default:
|
||||
*status = MGMT_STATUS_INVALID_PARAMS;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void start_discovery_complete(struct hci_dev *hdev, u8 status,
|
||||
u16 opcode)
|
||||
void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
|
||||
{
|
||||
struct mgmt_pending_cmd *cmd;
|
||||
unsigned long timeout;
|
||||
|
||||
BT_DBG("status %d", status);
|
||||
|
||||
|
@ -4317,62 +4138,34 @@ static void start_discovery_complete(struct hci_dev *hdev, u8 status,
|
|||
mgmt_pending_remove(cmd);
|
||||
}
|
||||
|
||||
if (status) {
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
|
||||
goto unlock;
|
||||
}
|
||||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
hci_discovery_set_state(hdev, DISCOVERY_FINDING);
|
||||
|
||||
/* If the scan involves LE scan, pick proper timeout to schedule
|
||||
* hdev->le_scan_disable that will stop it.
|
||||
*/
|
||||
switch (hdev->discovery.type) {
|
||||
static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
|
||||
uint8_t *mgmt_status)
|
||||
{
|
||||
switch (type) {
|
||||
case DISCOV_TYPE_LE:
|
||||
timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
|
||||
*mgmt_status = mgmt_le_support(hdev);
|
||||
if (*mgmt_status)
|
||||
return false;
|
||||
break;
|
||||
case DISCOV_TYPE_INTERLEAVED:
|
||||
/* When running simultaneous discovery, the LE scanning time
|
||||
* should occupy the whole discovery time sine BR/EDR inquiry
|
||||
* and LE scanning are scheduled by the controller.
|
||||
*
|
||||
* For interleaving discovery in comparison, BR/EDR inquiry
|
||||
* and LE scanning are done sequentially with separate
|
||||
* timeouts.
|
||||
*/
|
||||
if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
|
||||
timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
|
||||
else
|
||||
timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
|
||||
break;
|
||||
*mgmt_status = mgmt_le_support(hdev);
|
||||
if (*mgmt_status)
|
||||
return false;
|
||||
/* Intentional fall-through */
|
||||
case DISCOV_TYPE_BREDR:
|
||||
timeout = 0;
|
||||
*mgmt_status = mgmt_bredr_support(hdev);
|
||||
if (*mgmt_status)
|
||||
return false;
|
||||
break;
|
||||
default:
|
||||
BT_ERR("Invalid discovery type %d", hdev->discovery.type);
|
||||
timeout = 0;
|
||||
break;
|
||||
*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (timeout) {
|
||||
/* When service discovery is used and the controller has
|
||||
* a strict duplicate filter, it is important to remember
|
||||
* the start and duration of the scan. This is required
|
||||
* for restarting scanning during the discovery phase.
|
||||
*/
|
||||
if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
|
||||
&hdev->quirks) &&
|
||||
hdev->discovery.result_filtering) {
|
||||
hdev->discovery.scan_start = jiffies;
|
||||
hdev->discovery.scan_duration = timeout;
|
||||
}
|
||||
|
||||
queue_delayed_work(hdev->workqueue,
|
||||
&hdev->le_scan_disable, timeout);
|
||||
}
|
||||
|
||||
unlock:
|
||||
hci_dev_unlock(hdev);
|
||||
return true;
|
||||
}
|
||||
|
||||
static int start_discovery(struct sock *sk, struct hci_dev *hdev,
|
||||
|
@ -4380,7 +4173,6 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
|
|||
{
|
||||
struct mgmt_cp_start_discovery *cp = data;
|
||||
struct mgmt_pending_cmd *cmd;
|
||||
struct hci_request req;
|
||||
u8 status;
|
||||
int err;
|
||||
|
||||
|
@ -4403,14 +4195,12 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
|
|||
goto failed;
|
||||
}
|
||||
|
||||
cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
|
||||
if (!cmd) {
|
||||
err = -ENOMEM;
|
||||
if (!discovery_type_is_valid(hdev, cp->type, &status)) {
|
||||
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
|
||||
status, &cp->type, sizeof(cp->type));
|
||||
goto failed;
|
||||
}
|
||||
|
||||
cmd->cmd_complete = generic_cmd_complete;
|
||||
|
||||
/* Clear the discovery filter first to free any previously
|
||||
* allocated memory for the UUID list.
|
||||
*/
|
||||
|
@ -4419,22 +4209,17 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
|
|||
hdev->discovery.type = cp->type;
|
||||
hdev->discovery.report_invalid_rssi = false;
|
||||
|
||||
hci_req_init(&req, hdev);
|
||||
|
||||
if (!trigger_discovery(&req, &status)) {
|
||||
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
|
||||
status, &cp->type, sizeof(cp->type));
|
||||
mgmt_pending_remove(cmd);
|
||||
cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
|
||||
if (!cmd) {
|
||||
err = -ENOMEM;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
err = hci_req_run(&req, start_discovery_complete);
|
||||
if (err < 0) {
|
||||
mgmt_pending_remove(cmd);
|
||||
goto failed;
|
||||
}
|
||||
cmd->cmd_complete = generic_cmd_complete;
|
||||
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STARTING);
|
||||
queue_work(hdev->req_workqueue, &hdev->discov_update);
|
||||
err = 0;
|
||||
|
||||
failed:
|
||||
hci_dev_unlock(hdev);
|
||||
|
@ -4453,7 +4238,6 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
|
|||
{
|
||||
struct mgmt_cp_start_service_discovery *cp = data;
|
||||
struct mgmt_pending_cmd *cmd;
|
||||
struct hci_request req;
|
||||
const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
|
||||
u16 uuid_count, expected_len;
|
||||
u8 status;
|
||||
|
@ -4502,6 +4286,13 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
|
|||
goto failed;
|
||||
}
|
||||
|
||||
if (!discovery_type_is_valid(hdev, cp->type, &status)) {
|
||||
err = mgmt_cmd_complete(sk, hdev->id,
|
||||
MGMT_OP_START_SERVICE_DISCOVERY,
|
||||
status, &cp->type, sizeof(cp->type));
|
||||
goto failed;
|
||||
}
|
||||
|
||||
cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
|
||||
hdev, data, len);
|
||||
if (!cmd) {
|
||||
|
@ -4534,30 +4325,16 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
|
|||
}
|
||||
}
|
||||
|
||||
hci_req_init(&req, hdev);
|
||||
|
||||
if (!trigger_discovery(&req, &status)) {
|
||||
err = mgmt_cmd_complete(sk, hdev->id,
|
||||
MGMT_OP_START_SERVICE_DISCOVERY,
|
||||
status, &cp->type, sizeof(cp->type));
|
||||
mgmt_pending_remove(cmd);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
err = hci_req_run(&req, start_discovery_complete);
|
||||
if (err < 0) {
|
||||
mgmt_pending_remove(cmd);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STARTING);
|
||||
queue_work(hdev->req_workqueue, &hdev->discov_update);
|
||||
err = 0;
|
||||
|
||||
failed:
|
||||
hci_dev_unlock(hdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
|
||||
void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
|
||||
{
|
||||
struct mgmt_pending_cmd *cmd;
|
||||
|
||||
|
@ -4571,9 +4348,6 @@ static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
|
|||
mgmt_pending_remove(cmd);
|
||||
}
|
||||
|
||||
if (!status)
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
|
@ -4582,7 +4356,6 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
|
|||
{
|
||||
struct mgmt_cp_stop_discovery *mgmt_cp = data;
|
||||
struct mgmt_pending_cmd *cmd;
|
||||
struct hci_request req;
|
||||
int err;
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
@ -4611,24 +4384,9 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
|
|||
|
||||
cmd->cmd_complete = generic_cmd_complete;
|
||||
|
||||
hci_req_init(&req, hdev);
|
||||
|
||||
hci_stop_discovery(&req);
|
||||
|
||||
err = hci_req_run(&req, stop_discovery_complete);
|
||||
if (!err) {
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
mgmt_pending_remove(cmd);
|
||||
|
||||
/* If no HCI commands were sent we're done */
|
||||
if (err == -ENODATA) {
|
||||
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
|
||||
&mgmt_cp->type, sizeof(mgmt_cp->type));
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
|
||||
}
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
|
||||
queue_work(hdev->req_workqueue, &hdev->discov_update);
|
||||
err = 0;
|
||||
|
||||
unlock:
|
||||
hci_dev_unlock(hdev);
|
||||
|
@ -6076,10 +5834,9 @@ static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
|
|||
}
|
||||
|
||||
/* This function requires the caller holds hdev->lock */
|
||||
static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
|
||||
static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
|
||||
u8 addr_type, u8 auto_connect)
|
||||
{
|
||||
struct hci_dev *hdev = req->hdev;
|
||||
struct hci_conn_params *params;
|
||||
|
||||
params = hci_conn_params_add(hdev, addr, addr_type);
|
||||
|
@ -6099,26 +5856,17 @@ static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
|
|||
*/
|
||||
if (params->explicit_connect)
|
||||
list_add(¶ms->action, &hdev->pend_le_conns);
|
||||
|
||||
__hci_update_background_scan(req);
|
||||
break;
|
||||
case HCI_AUTO_CONN_REPORT:
|
||||
if (params->explicit_connect)
|
||||
list_add(¶ms->action, &hdev->pend_le_conns);
|
||||
else
|
||||
list_add(¶ms->action, &hdev->pend_le_reports);
|
||||
__hci_update_background_scan(req);
|
||||
break;
|
||||
case HCI_AUTO_CONN_DIRECT:
|
||||
case HCI_AUTO_CONN_ALWAYS:
|
||||
if (!is_connected(hdev, addr, addr_type)) {
|
||||
if (!is_connected(hdev, addr, addr_type))
|
||||
list_add(¶ms->action, &hdev->pend_le_conns);
|
||||
/* If we are in scan phase of connecting, we were
|
||||
* already added to pend_le_conns and scanning.
|
||||
*/
|
||||
if (params->auto_connect != HCI_AUTO_CONN_EXPLICIT)
|
||||
__hci_update_background_scan(req);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -6142,31 +5890,10 @@ static void device_added(struct sock *sk, struct hci_dev *hdev,
|
|||
mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
|
||||
}
|
||||
|
||||
static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
|
||||
{
|
||||
struct mgmt_pending_cmd *cmd;
|
||||
|
||||
BT_DBG("status 0x%02x", status);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev);
|
||||
if (!cmd)
|
||||
goto unlock;
|
||||
|
||||
cmd->cmd_complete(cmd, mgmt_status(status));
|
||||
mgmt_pending_remove(cmd);
|
||||
|
||||
unlock:
|
||||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
static int add_device(struct sock *sk, struct hci_dev *hdev,
|
||||
void *data, u16 len)
|
||||
{
|
||||
struct mgmt_cp_add_device *cp = data;
|
||||
struct mgmt_pending_cmd *cmd;
|
||||
struct hci_request req;
|
||||
u8 auto_conn, addr_type;
|
||||
int err;
|
||||
|
||||
|
@ -6183,24 +5910,15 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
|
|||
MGMT_STATUS_INVALID_PARAMS,
|
||||
&cp->addr, sizeof(cp->addr));
|
||||
|
||||
hci_req_init(&req, hdev);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
|
||||
if (!cmd) {
|
||||
err = -ENOMEM;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
cmd->cmd_complete = addr_cmd_complete;
|
||||
|
||||
if (cp->addr.type == BDADDR_BREDR) {
|
||||
/* Only incoming connections action is supported for now */
|
||||
if (cp->action != 0x01) {
|
||||
err = cmd->cmd_complete(cmd,
|
||||
MGMT_STATUS_INVALID_PARAMS);
|
||||
mgmt_pending_remove(cmd);
|
||||
err = mgmt_cmd_complete(sk, hdev->id,
|
||||
MGMT_OP_ADD_DEVICE,
|
||||
MGMT_STATUS_INVALID_PARAMS,
|
||||
&cp->addr, sizeof(cp->addr));
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
|
@ -6209,7 +5927,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
|
|||
if (err)
|
||||
goto unlock;
|
||||
|
||||
__hci_update_page_scan(&req);
|
||||
hci_update_page_scan(hdev);
|
||||
|
||||
goto added;
|
||||
}
|
||||
|
@ -6229,33 +5947,31 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
|
|||
* hci_conn_params_lookup.
|
||||
*/
|
||||
if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
|
||||
err = cmd->cmd_complete(cmd, MGMT_STATUS_INVALID_PARAMS);
|
||||
mgmt_pending_remove(cmd);
|
||||
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
|
||||
MGMT_STATUS_INVALID_PARAMS,
|
||||
&cp->addr, sizeof(cp->addr));
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* If the connection parameters don't exist for this device,
|
||||
* they will be created and configured with defaults.
|
||||
*/
|
||||
if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
|
||||
if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
|
||||
auto_conn) < 0) {
|
||||
err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
|
||||
mgmt_pending_remove(cmd);
|
||||
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
|
||||
MGMT_STATUS_FAILED, &cp->addr,
|
||||
sizeof(cp->addr));
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
hci_update_background_scan(hdev);
|
||||
|
||||
added:
|
||||
device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
|
||||
|
||||
err = hci_req_run(&req, add_device_complete);
|
||||
if (err < 0) {
|
||||
/* ENODATA means no HCI commands were needed (e.g. if
|
||||
* the adapter is powered off).
|
||||
*/
|
||||
if (err == -ENODATA)
|
||||
err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
|
||||
mgmt_pending_remove(cmd);
|
||||
}
|
||||
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
|
||||
MGMT_STATUS_SUCCESS, &cp->addr,
|
||||
sizeof(cp->addr));
|
||||
|
||||
unlock:
|
||||
hci_dev_unlock(hdev);
|
||||
|
@ -6273,55 +5989,25 @@ static void device_removed(struct sock *sk, struct hci_dev *hdev,
|
|||
mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
|
||||
}
|
||||
|
||||
static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
|
||||
{
|
||||
struct mgmt_pending_cmd *cmd;
|
||||
|
||||
BT_DBG("status 0x%02x", status);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
|
||||
if (!cmd)
|
||||
goto unlock;
|
||||
|
||||
cmd->cmd_complete(cmd, mgmt_status(status));
|
||||
mgmt_pending_remove(cmd);
|
||||
|
||||
unlock:
|
||||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
static int remove_device(struct sock *sk, struct hci_dev *hdev,
|
||||
void *data, u16 len)
|
||||
{
|
||||
struct mgmt_cp_remove_device *cp = data;
|
||||
struct mgmt_pending_cmd *cmd;
|
||||
struct hci_request req;
|
||||
int err;
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
hci_req_init(&req, hdev);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
|
||||
if (!cmd) {
|
||||
err = -ENOMEM;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
cmd->cmd_complete = addr_cmd_complete;
|
||||
|
||||
if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
|
||||
struct hci_conn_params *params;
|
||||
u8 addr_type;
|
||||
|
||||
if (!bdaddr_type_is_valid(cp->addr.type)) {
|
||||
err = cmd->cmd_complete(cmd,
|
||||
MGMT_STATUS_INVALID_PARAMS);
|
||||
mgmt_pending_remove(cmd);
|
||||
err = mgmt_cmd_complete(sk, hdev->id,
|
||||
MGMT_OP_REMOVE_DEVICE,
|
||||
MGMT_STATUS_INVALID_PARAMS,
|
||||
&cp->addr, sizeof(cp->addr));
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
|
@ -6330,13 +6016,15 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
|
|||
&cp->addr.bdaddr,
|
||||
cp->addr.type);
|
||||
if (err) {
|
||||
err = cmd->cmd_complete(cmd,
|
||||
MGMT_STATUS_INVALID_PARAMS);
|
||||
mgmt_pending_remove(cmd);
|
||||
err = mgmt_cmd_complete(sk, hdev->id,
|
||||
MGMT_OP_REMOVE_DEVICE,
|
||||
MGMT_STATUS_INVALID_PARAMS,
|
||||
&cp->addr,
|
||||
sizeof(cp->addr));
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
__hci_update_page_scan(&req);
|
||||
hci_update_page_scan(hdev);
|
||||
|
||||
device_removed(sk, hdev, &cp->addr.bdaddr,
|
||||
cp->addr.type);
|
||||
|
@ -6351,33 +6039,36 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
|
|||
* hci_conn_params_lookup.
|
||||
*/
|
||||
if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
|
||||
err = cmd->cmd_complete(cmd,
|
||||
MGMT_STATUS_INVALID_PARAMS);
|
||||
mgmt_pending_remove(cmd);
|
||||
err = mgmt_cmd_complete(sk, hdev->id,
|
||||
MGMT_OP_REMOVE_DEVICE,
|
||||
MGMT_STATUS_INVALID_PARAMS,
|
||||
&cp->addr, sizeof(cp->addr));
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
|
||||
addr_type);
|
||||
if (!params) {
|
||||
err = cmd->cmd_complete(cmd,
|
||||
MGMT_STATUS_INVALID_PARAMS);
|
||||
mgmt_pending_remove(cmd);
|
||||
err = mgmt_cmd_complete(sk, hdev->id,
|
||||
MGMT_OP_REMOVE_DEVICE,
|
||||
MGMT_STATUS_INVALID_PARAMS,
|
||||
&cp->addr, sizeof(cp->addr));
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
|
||||
params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
|
||||
err = cmd->cmd_complete(cmd,
|
||||
MGMT_STATUS_INVALID_PARAMS);
|
||||
mgmt_pending_remove(cmd);
|
||||
err = mgmt_cmd_complete(sk, hdev->id,
|
||||
MGMT_OP_REMOVE_DEVICE,
|
||||
MGMT_STATUS_INVALID_PARAMS,
|
||||
&cp->addr, sizeof(cp->addr));
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
list_del(¶ms->action);
|
||||
list_del(¶ms->list);
|
||||
kfree(params);
|
||||
__hci_update_background_scan(&req);
|
||||
hci_update_background_scan(hdev);
|
||||
|
||||
device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
|
||||
} else {
|
||||
|
@ -6385,9 +6076,10 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
|
|||
struct bdaddr_list *b, *btmp;
|
||||
|
||||
if (cp->addr.type) {
|
||||
err = cmd->cmd_complete(cmd,
|
||||
MGMT_STATUS_INVALID_PARAMS);
|
||||
mgmt_pending_remove(cmd);
|
||||
err = mgmt_cmd_complete(sk, hdev->id,
|
||||
MGMT_OP_REMOVE_DEVICE,
|
||||
MGMT_STATUS_INVALID_PARAMS,
|
||||
&cp->addr, sizeof(cp->addr));
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
|
@ -6397,7 +6089,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
|
|||
kfree(b);
|
||||
}
|
||||
|
||||
__hci_update_page_scan(&req);
|
||||
hci_update_page_scan(hdev);
|
||||
|
||||
list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
|
||||
if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
|
||||
|
@ -6414,20 +6106,13 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
|
|||
|
||||
BT_DBG("All LE connection parameters were removed");
|
||||
|
||||
__hci_update_background_scan(&req);
|
||||
hci_update_background_scan(hdev);
|
||||
}
|
||||
|
||||
complete:
|
||||
err = hci_req_run(&req, remove_device_complete);
|
||||
if (err < 0) {
|
||||
/* ENODATA means no HCI commands were needed (e.g. if
|
||||
* the adapter is powered off).
|
||||
*/
|
||||
if (err == -ENODATA)
|
||||
err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
|
||||
mgmt_pending_remove(cmd);
|
||||
}
|
||||
|
||||
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
|
||||
MGMT_STATUS_SUCCESS, &cp->addr,
|
||||
sizeof(cp->addr));
|
||||
unlock:
|
||||
hci_dev_unlock(hdev);
|
||||
return err;
|
||||
|
@ -7016,17 +6701,19 @@ static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
|
|||
int i, cur_len;
|
||||
bool flags_managed = false;
|
||||
bool tx_power_managed = false;
|
||||
u32 flags_params = MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV |
|
||||
MGMT_ADV_FLAG_MANAGED_FLAGS;
|
||||
|
||||
if (is_adv_data && (adv_flags & flags_params)) {
|
||||
flags_managed = true;
|
||||
max_len -= 3;
|
||||
}
|
||||
if (is_adv_data) {
|
||||
if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
|
||||
MGMT_ADV_FLAG_LIMITED_DISCOV |
|
||||
MGMT_ADV_FLAG_MANAGED_FLAGS)) {
|
||||
flags_managed = true;
|
||||
max_len -= 3;
|
||||
}
|
||||
|
||||
if (is_adv_data && (adv_flags & MGMT_ADV_FLAG_TX_POWER)) {
|
||||
tx_power_managed = true;
|
||||
max_len -= 3;
|
||||
if (adv_flags & MGMT_ADV_FLAG_TX_POWER) {
|
||||
tx_power_managed = true;
|
||||
max_len -= 3;
|
||||
}
|
||||
}
|
||||
|
||||
if (len > max_len)
|
||||
|
@ -7155,6 +6842,10 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
|
|||
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
|
||||
status);
|
||||
|
||||
if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
|
||||
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
|
||||
MGMT_STATUS_INVALID_PARAMS);
|
||||
|
||||
flags = __le32_to_cpu(cp->flags);
|
||||
timeout = __le16_to_cpu(cp->timeout);
|
||||
duration = __le16_to_cpu(cp->duration);
|
||||
|
@ -7369,6 +7060,62 @@ static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
|
|||
return err;
|
||||
}
|
||||
|
||||
static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data)
|
||||
{
|
||||
u8 max_len = HCI_MAX_AD_LENGTH;
|
||||
|
||||
if (is_adv_data) {
|
||||
if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
|
||||
MGMT_ADV_FLAG_LIMITED_DISCOV |
|
||||
MGMT_ADV_FLAG_MANAGED_FLAGS))
|
||||
max_len -= 3;
|
||||
|
||||
if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
|
||||
max_len -= 3;
|
||||
}
|
||||
|
||||
return max_len;
|
||||
}
|
||||
|
||||
static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
|
||||
void *data, u16 data_len)
|
||||
{
|
||||
struct mgmt_cp_get_adv_size_info *cp = data;
|
||||
struct mgmt_rp_get_adv_size_info rp;
|
||||
u32 flags, supported_flags;
|
||||
int err;
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
if (!lmp_le_capable(hdev))
|
||||
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
|
||||
MGMT_STATUS_REJECTED);
|
||||
|
||||
if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
|
||||
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
|
||||
MGMT_STATUS_INVALID_PARAMS);
|
||||
|
||||
flags = __le32_to_cpu(cp->flags);
|
||||
|
||||
/* The current implementation only supports a subset of the specified
|
||||
* flags.
|
||||
*/
|
||||
supported_flags = get_supported_adv_flags(hdev);
|
||||
if (flags & ~supported_flags)
|
||||
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
|
||||
MGMT_STATUS_INVALID_PARAMS);
|
||||
|
||||
rp.instance = cp->instance;
|
||||
rp.flags = cp->flags;
|
||||
rp.max_adv_data_len = tlv_data_max_len(flags, true);
|
||||
rp.max_scan_rsp_len = tlv_data_max_len(flags, false);
|
||||
|
||||
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
|
||||
MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static const struct hci_mgmt_handler mgmt_handlers[] = {
|
||||
{ NULL }, /* 0x0000 (no command) */
|
||||
{ read_version, MGMT_READ_VERSION_SIZE,
|
||||
|
@ -7456,6 +7203,7 @@ static const struct hci_mgmt_handler mgmt_handlers[] = {
|
|||
{ add_advertising, MGMT_ADD_ADVERTISING_SIZE,
|
||||
HCI_MGMT_VAR_LEN },
|
||||
{ remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
|
||||
{ get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
|
||||
};
|
||||
|
||||
void mgmt_index_added(struct hci_dev *hdev)
|
||||
|
@ -7526,9 +7274,8 @@ void mgmt_index_removed(struct hci_dev *hdev)
|
|||
}
|
||||
|
||||
/* This function requires the caller holds hdev->lock */
|
||||
static void restart_le_actions(struct hci_request *req)
|
||||
static void restart_le_actions(struct hci_dev *hdev)
|
||||
{
|
||||
struct hci_dev *hdev = req->hdev;
|
||||
struct hci_conn_params *p;
|
||||
|
||||
list_for_each_entry(p, &hdev->le_conn_params, list) {
|
||||
|
@ -7549,8 +7296,6 @@ static void restart_le_actions(struct hci_request *req)
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
__hci_update_background_scan(req);
|
||||
}
|
||||
|
||||
static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
|
||||
|
@ -7560,12 +7305,8 @@ static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
|
|||
BT_DBG("status 0x%02x", status);
|
||||
|
||||
if (!status) {
|
||||
/* Register the available SMP channels (BR/EDR and LE) only
|
||||
* when successfully powering on the controller. This late
|
||||
* registration is required so that LE SMP can clearly
|
||||
* decide if the public address or static address is used.
|
||||
*/
|
||||
smp_register(hdev);
|
||||
restart_le_actions(hdev);
|
||||
hci_update_background_scan(hdev);
|
||||
}
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
@ -7644,8 +7385,6 @@ static int powered_update_hci(struct hci_dev *hdev)
|
|||
hdev->cur_adv_instance)
|
||||
schedule_adv_instance(&req, hdev->cur_adv_instance,
|
||||
true);
|
||||
|
||||
restart_le_actions(&req);
|
||||
}
|
||||
|
||||
link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
|
||||
|
@ -7677,6 +7416,13 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
|
|||
return 0;
|
||||
|
||||
if (powered) {
|
||||
/* Register the available SMP channels (BR/EDR and LE) only
|
||||
* when successfully powering on the controller. This late
|
||||
* registration is required so that LE SMP can clearly
|
||||
* decide if the public address or static address is used.
|
||||
*/
|
||||
smp_register(hdev);
|
||||
|
||||
if (powered_update_hci(hdev) == 0)
|
||||
return 0;
|
||||
|
||||
|
@ -8452,7 +8198,7 @@ static void restart_le_scan(struct hci_dev *hdev)
|
|||
hdev->discovery.scan_duration))
|
||||
return;
|
||||
|
||||
queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
|
||||
queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
|
||||
DISCOV_LE_RESTART_DELAY);
|
||||
}
|
||||
|
||||
|
|
|
@ -217,8 +217,7 @@ __ieee802154_rx_handle_packet(struct ieee802154_local *local,
|
|||
break;
|
||||
}
|
||||
|
||||
if (skb)
|
||||
kfree_skb(skb);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
Loading…
Reference in New Issue