Merge tag 'for-net-next-2021-11-16' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next

Luiz Augusto von Dentz says:

====================
bluetooth-next pull request for net-next:

 - Add support for AOSP Bluetooth Quality Report
 - Enables AOSP extension for Mediatek Chip (MT7921 & MT7922)
 - Rework of HCI command execution serialization
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller
2021-11-17 14:52:44 +00:00
38 changed files with 7688 additions and 3860 deletions

View File

@@ -19,6 +19,10 @@ config BT_QCA
tristate tristate
select FW_LOADER select FW_LOADER
config BT_MTK
tristate
select FW_LOADER
config BT_HCIBTUSB config BT_HCIBTUSB
tristate "HCI USB driver" tristate "HCI USB driver"
depends on USB depends on USB
@@ -55,6 +59,7 @@ config BT_HCIBTUSB_BCM
config BT_HCIBTUSB_MTK config BT_HCIBTUSB_MTK
bool "MediaTek protocol support" bool "MediaTek protocol support"
depends on BT_HCIBTUSB depends on BT_HCIBTUSB
select BT_MTK
default n default n
help help
The MediaTek protocol support enables firmware download The MediaTek protocol support enables firmware download
@@ -383,6 +388,7 @@ config BT_ATH3K
config BT_MTKSDIO config BT_MTKSDIO
tristate "MediaTek HCI SDIO driver" tristate "MediaTek HCI SDIO driver"
depends on MMC depends on MMC
select BT_MTK
help help
MediaTek Bluetooth HCI SDIO driver. MediaTek Bluetooth HCI SDIO driver.
This driver is required if you want to use MediaTek Bluetooth This driver is required if you want to use MediaTek Bluetooth

View File

@@ -25,6 +25,7 @@ obj-$(CONFIG_BT_QCOMSMD) += btqcomsmd.o
obj-$(CONFIG_BT_BCM) += btbcm.o obj-$(CONFIG_BT_BCM) += btbcm.o
obj-$(CONFIG_BT_RTL) += btrtl.o obj-$(CONFIG_BT_RTL) += btrtl.o
obj-$(CONFIG_BT_QCA) += btqca.o obj-$(CONFIG_BT_QCA) += btqca.o
obj-$(CONFIG_BT_MTK) += btmtk.o
obj-$(CONFIG_BT_VIRTIO) += virtio_bt.o obj-$(CONFIG_BT_VIRTIO) += virtio_bt.o

View File

@@ -628,6 +628,9 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
data->bulk_out_ep = bulk_out_ep->desc.bEndpointAddress; data->bulk_out_ep = bulk_out_ep->desc.bEndpointAddress;
data->bulk_pkt_size = le16_to_cpu(bulk_out_ep->desc.wMaxPacketSize); data->bulk_pkt_size = le16_to_cpu(bulk_out_ep->desc.wMaxPacketSize);
if (!data->bulk_pkt_size)
goto done;
rwlock_init(&data->lock); rwlock_init(&data->lock);
data->reassembly = NULL; data->reassembly = NULL;

View File

@@ -2081,15 +2081,17 @@ static int btintel_prepare_fw_download_tlv(struct hci_dev *hdev,
if (ver->img_type == 0x03) { if (ver->img_type == 0x03) {
btintel_clear_flag(hdev, INTEL_BOOTLOADER); btintel_clear_flag(hdev, INTEL_BOOTLOADER);
btintel_check_bdaddr(hdev); btintel_check_bdaddr(hdev);
} } else {
/*
/* If the OTP has no valid Bluetooth device address, then there will * Check for valid bd address in boot loader mode. Device
* also be no valid address for the operational firmware. * will be marked as unconfigured if empty bd address is
* found.
*/ */
if (!bacmp(&ver->otp_bd_addr, BDADDR_ANY)) { if (!bacmp(&ver->otp_bd_addr, BDADDR_ANY)) {
bt_dev_info(hdev, "No device address configured"); bt_dev_info(hdev, "No device address configured");
set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
} }
}
btintel_get_fw_name_tlv(ver, fwname, sizeof(fwname), "sfi"); btintel_get_fw_name_tlv(ver, fwname, sizeof(fwname), "sfi");
err = firmware_request_nowarn(&fw, fwname, &hdev->dev); err = firmware_request_nowarn(&fw, fwname, &hdev->dev);
@@ -2466,6 +2468,10 @@ static int btintel_setup_combined(struct hci_dev *hdev)
goto exit_error; goto exit_error;
} }
/* memset ver_tlv to start with clean state as few fields are exclusive
* to bootloader mode and are not populated in operational mode
*/
memset(&ver_tlv, 0, sizeof(ver_tlv));
/* For TLV type device, parse the tlv data */ /* For TLV type device, parse the tlv data */
err = btintel_parse_version_tlv(hdev, &ver_tlv, skb); err = btintel_parse_version_tlv(hdev, &ver_tlv, skb);
if (err) { if (err) {

View File

@@ -1,4 +1,4 @@
/** /*
* Marvell Bluetooth driver * Marvell Bluetooth driver
* *
* Copyright (C) 2009, Marvell International Ltd. * Copyright (C) 2009, Marvell International Ltd.

289
drivers/bluetooth/btmtk.c Normal file
View File

@@ -0,0 +1,289 @@
// SPDX-License-Identifier: ISC
/* Copyright (C) 2021 MediaTek Inc.
*
*/
#include <linux/module.h>
#include <linux/firmware.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include "btmtk.h"
#define VERSION "0.1"
/* It is for mt79xx download rom patch*/
#define MTK_FW_ROM_PATCH_HEADER_SIZE 32
#define MTK_FW_ROM_PATCH_GD_SIZE 64
#define MTK_FW_ROM_PATCH_SEC_MAP_SIZE 64
#define MTK_SEC_MAP_COMMON_SIZE 12
#define MTK_SEC_MAP_NEED_SEND_SIZE 52
struct btmtk_patch_header {
u8 datetime[16];
u8 platform[4];
__le16 hwver;
__le16 swver;
__le32 magicnum;
} __packed;
struct btmtk_global_desc {
__le32 patch_ver;
__le32 sub_sys;
__le32 feature_opt;
__le32 section_num;
} __packed;
struct btmtk_section_map {
__le32 sectype;
__le32 secoffset;
__le32 secsize;
union {
__le32 u4SecSpec[13];
struct {
__le32 dlAddr;
__le32 dlsize;
__le32 seckeyidx;
__le32 alignlen;
__le32 sectype;
__le32 dlmodecrctype;
__le32 crc;
__le32 reserved[6];
} bin_info_spec;
};
} __packed;
int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname,
wmt_cmd_sync_func_t wmt_cmd_sync)
{
struct btmtk_hci_wmt_params wmt_params;
struct btmtk_global_desc *globaldesc = NULL;
struct btmtk_section_map *sectionmap;
const struct firmware *fw;
const u8 *fw_ptr;
const u8 *fw_bin_ptr;
int err, dlen, i, status;
u8 flag, first_block, retry;
u32 section_num, dl_size, section_offset;
u8 cmd[64];
err = request_firmware(&fw, fwname, &hdev->dev);
if (err < 0) {
bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
return err;
}
fw_ptr = fw->data;
fw_bin_ptr = fw_ptr;
globaldesc = (struct btmtk_global_desc *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE);
section_num = le32_to_cpu(globaldesc->section_num);
for (i = 0; i < section_num; i++) {
first_block = 1;
fw_ptr = fw_bin_ptr;
sectionmap = (struct btmtk_section_map *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE +
MTK_FW_ROM_PATCH_GD_SIZE + MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i);
section_offset = le32_to_cpu(sectionmap->secoffset);
dl_size = le32_to_cpu(sectionmap->bin_info_spec.dlsize);
if (dl_size > 0) {
retry = 20;
while (retry > 0) {
cmd[0] = 0; /* 0 means legacy dl mode. */
memcpy(cmd + 1,
fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE +
MTK_FW_ROM_PATCH_GD_SIZE +
MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i +
MTK_SEC_MAP_COMMON_SIZE,
MTK_SEC_MAP_NEED_SEND_SIZE + 1);
wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
wmt_params.status = &status;
wmt_params.flag = 0;
wmt_params.dlen = MTK_SEC_MAP_NEED_SEND_SIZE + 1;
wmt_params.data = &cmd;
err = wmt_cmd_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
err);
goto err_release_fw;
}
if (status == BTMTK_WMT_PATCH_UNDONE) {
break;
} else if (status == BTMTK_WMT_PATCH_PROGRESS) {
msleep(100);
retry--;
} else if (status == BTMTK_WMT_PATCH_DONE) {
goto next_section;
} else {
bt_dev_err(hdev, "Failed wmt patch dwnld status (%d)",
status);
goto err_release_fw;
}
}
fw_ptr += section_offset;
wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
wmt_params.status = NULL;
while (dl_size > 0) {
dlen = min_t(int, 250, dl_size);
if (first_block == 1) {
flag = 1;
first_block = 0;
} else if (dl_size - dlen <= 0) {
flag = 3;
} else {
flag = 2;
}
wmt_params.flag = flag;
wmt_params.dlen = dlen;
wmt_params.data = fw_ptr;
err = wmt_cmd_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
err);
goto err_release_fw;
}
dl_size -= dlen;
fw_ptr += dlen;
}
}
next_section:
continue;
}
/* Wait a few moments for firmware activation done */
usleep_range(100000, 120000);
err_release_fw:
release_firmware(fw);
return err;
}
EXPORT_SYMBOL_GPL(btmtk_setup_firmware_79xx);
int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname,
wmt_cmd_sync_func_t wmt_cmd_sync)
{
struct btmtk_hci_wmt_params wmt_params;
const struct firmware *fw;
const u8 *fw_ptr;
size_t fw_size;
int err, dlen;
u8 flag, param;
err = request_firmware(&fw, fwname, &hdev->dev);
if (err < 0) {
bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
return err;
}
/* Power on data RAM the firmware relies on. */
param = 1;
wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 3;
wmt_params.dlen = sizeof(param);
wmt_params.data = &param;
wmt_params.status = NULL;
err = wmt_cmd_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
goto err_release_fw;
}
fw_ptr = fw->data;
fw_size = fw->size;
/* The size of patch header is 30 bytes, should be skip */
if (fw_size < 30) {
err = -EINVAL;
goto err_release_fw;
}
fw_size -= 30;
fw_ptr += 30;
flag = 1;
wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
wmt_params.status = NULL;
while (fw_size > 0) {
dlen = min_t(int, 250, fw_size);
/* Tell device the position in sequence */
if (fw_size - dlen <= 0)
flag = 3;
else if (fw_size < fw->size - 30)
flag = 2;
wmt_params.flag = flag;
wmt_params.dlen = dlen;
wmt_params.data = fw_ptr;
err = wmt_cmd_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
err);
goto err_release_fw;
}
fw_size -= dlen;
fw_ptr += dlen;
}
wmt_params.op = BTMTK_WMT_RST;
wmt_params.flag = 4;
wmt_params.dlen = 0;
wmt_params.data = NULL;
wmt_params.status = NULL;
/* Activate funciton the firmware providing to */
err = wmt_cmd_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
goto err_release_fw;
}
/* Wait a few moments for firmware activation done */
usleep_range(10000, 12000);
err_release_fw:
release_firmware(fw);
return err;
}
EXPORT_SYMBOL_GPL(btmtk_setup_firmware);
int btmtk_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
{
struct sk_buff *skb;
long ret;
skb = __hci_cmd_sync(hdev, 0xfc1a, 6, bdaddr, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
ret = PTR_ERR(skb);
bt_dev_err(hdev, "changing Mediatek device address failed (%ld)",
ret);
return ret;
}
kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL_GPL(btmtk_set_bdaddr);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("Mark Chen <mark-yw.chen@mediatek.com>");
MODULE_DESCRIPTION("Bluetooth support for MediaTek devices ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_MT7663);
MODULE_FIRMWARE(FIRMWARE_MT7668);
MODULE_FIRMWARE(FIRMWARE_MT7961);

111
drivers/bluetooth/btmtk.h Normal file
View File

@@ -0,0 +1,111 @@
/* SPDX-License-Identifier: ISC */
/* Copyright (C) 2021 MediaTek Inc. */
#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
#define FIRMWARE_MT7961 "mediatek/BT_RAM_CODE_MT7961_1_2_hdr.bin"
#define HCI_WMT_MAX_EVENT_SIZE 64
#define BTMTK_WMT_REG_READ 0x2
enum {
BTMTK_WMT_PATCH_DWNLD = 0x1,
BTMTK_WMT_TEST = 0x2,
BTMTK_WMT_WAKEUP = 0x3,
BTMTK_WMT_HIF = 0x4,
BTMTK_WMT_FUNC_CTRL = 0x6,
BTMTK_WMT_RST = 0x7,
BTMTK_WMT_REGISTER = 0x8,
BTMTK_WMT_SEMAPHORE = 0x17,
};
enum {
BTMTK_WMT_INVALID,
BTMTK_WMT_PATCH_UNDONE,
BTMTK_WMT_PATCH_PROGRESS,
BTMTK_WMT_PATCH_DONE,
BTMTK_WMT_ON_UNDONE,
BTMTK_WMT_ON_DONE,
BTMTK_WMT_ON_PROGRESS,
};
struct btmtk_wmt_hdr {
u8 dir;
u8 op;
__le16 dlen;
u8 flag;
} __packed;
struct btmtk_hci_wmt_cmd {
struct btmtk_wmt_hdr hdr;
u8 data[];
} __packed;
struct btmtk_hci_wmt_evt {
struct hci_event_hdr hhdr;
struct btmtk_wmt_hdr whdr;
} __packed;
struct btmtk_hci_wmt_evt_funcc {
struct btmtk_hci_wmt_evt hwhdr;
__be16 status;
} __packed;
struct btmtk_hci_wmt_evt_reg {
struct btmtk_hci_wmt_evt hwhdr;
u8 rsv[2];
u8 num;
__le32 addr;
__le32 val;
} __packed;
struct btmtk_tci_sleep {
u8 mode;
__le16 duration;
__le16 host_duration;
u8 host_wakeup_pin;
u8 time_compensation;
} __packed;
struct btmtk_hci_wmt_params {
u8 op;
u8 flag;
u16 dlen;
const void *data;
u32 *status;
};
typedef int (*wmt_cmd_sync_func_t)(struct hci_dev *,
struct btmtk_hci_wmt_params *);
#if IS_ENABLED(CONFIG_BT_MTK)
int btmtk_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname,
wmt_cmd_sync_func_t wmt_cmd_sync);
int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname,
wmt_cmd_sync_func_t wmt_cmd_sync);
#else
static inline int btmtk_set_bdaddr(struct hci_dev *hdev,
const bdaddr_t *bdaddr)
{
return -EOPNOTSUPP;
}
static int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname,
wmt_cmd_sync_func_t wmt_cmd_sync)
{
return -EOPNOTSUPP;
}
static int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname,
wmt_cmd_sync_func_t wmt_cmd_sync)
{
return -EOPNOTSUPP;
}
#endif

View File

@@ -12,7 +12,6 @@
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/firmware.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/iopoll.h> #include <linux/iopoll.h>
#include <linux/kernel.h> #include <linux/kernel.h>
@@ -28,26 +27,32 @@
#include <net/bluetooth/hci_core.h> #include <net/bluetooth/hci_core.h>
#include "h4_recv.h" #include "h4_recv.h"
#include "btmtk.h"
#define VERSION "0.1" #define VERSION "0.1"
#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
#define MTKBTSDIO_AUTOSUSPEND_DELAY 8000 #define MTKBTSDIO_AUTOSUSPEND_DELAY 8000
static bool enable_autosuspend; static bool enable_autosuspend;
struct btmtksdio_data { struct btmtksdio_data {
const char *fwname; const char *fwname;
u16 chipid;
}; };
static const struct btmtksdio_data mt7663_data = { static const struct btmtksdio_data mt7663_data = {
.fwname = FIRMWARE_MT7663, .fwname = FIRMWARE_MT7663,
.chipid = 0x7663,
}; };
static const struct btmtksdio_data mt7668_data = { static const struct btmtksdio_data mt7668_data = {
.fwname = FIRMWARE_MT7668, .fwname = FIRMWARE_MT7668,
.chipid = 0x7668,
};
static const struct btmtksdio_data mt7921_data = {
.fwname = FIRMWARE_MT7961,
.chipid = 0x7921,
}; };
static const struct sdio_device_id btmtksdio_table[] = { static const struct sdio_device_id btmtksdio_table[] = {
@@ -55,6 +60,8 @@ static const struct sdio_device_id btmtksdio_table[] = {
.driver_data = (kernel_ulong_t)&mt7663_data }, .driver_data = (kernel_ulong_t)&mt7663_data },
{SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, SDIO_DEVICE_ID_MEDIATEK_MT7668), {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, SDIO_DEVICE_ID_MEDIATEK_MT7668),
.driver_data = (kernel_ulong_t)&mt7668_data }, .driver_data = (kernel_ulong_t)&mt7668_data },
{SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, SDIO_DEVICE_ID_MEDIATEK_MT7961),
.driver_data = (kernel_ulong_t)&mt7921_data },
{ } /* Terminating entry */ { } /* Terminating entry */
}; };
MODULE_DEVICE_TABLE(sdio, btmtksdio_table); MODULE_DEVICE_TABLE(sdio, btmtksdio_table);
@@ -86,81 +93,27 @@ MODULE_DEVICE_TABLE(sdio, btmtksdio_table);
#define MTK_REG_CRDR 0x1c #define MTK_REG_CRDR 0x1c
#define MTK_REG_CRPLR 0x24
#define MTK_SDIO_BLOCK_SIZE 256 #define MTK_SDIO_BLOCK_SIZE 256
#define BTMTKSDIO_TX_WAIT_VND_EVT 1 #define BTMTKSDIO_TX_WAIT_VND_EVT 1
enum {
MTK_WMT_PATCH_DWNLD = 0x1,
MTK_WMT_TEST = 0x2,
MTK_WMT_WAKEUP = 0x3,
MTK_WMT_HIF = 0x4,
MTK_WMT_FUNC_CTRL = 0x6,
MTK_WMT_RST = 0x7,
MTK_WMT_SEMAPHORE = 0x17,
};
enum {
BTMTK_WMT_INVALID,
BTMTK_WMT_PATCH_UNDONE,
BTMTK_WMT_PATCH_DONE,
BTMTK_WMT_ON_UNDONE,
BTMTK_WMT_ON_DONE,
BTMTK_WMT_ON_PROGRESS,
};
struct mtkbtsdio_hdr { struct mtkbtsdio_hdr {
__le16 len; __le16 len;
__le16 reserved; __le16 reserved;
u8 bt_type; u8 bt_type;
} __packed; } __packed;
struct mtk_wmt_hdr {
u8 dir;
u8 op;
__le16 dlen;
u8 flag;
} __packed;
struct mtk_hci_wmt_cmd {
struct mtk_wmt_hdr hdr;
u8 data[256];
} __packed;
struct btmtk_hci_wmt_evt {
struct hci_event_hdr hhdr;
struct mtk_wmt_hdr whdr;
} __packed;
struct btmtk_hci_wmt_evt_funcc {
struct btmtk_hci_wmt_evt hwhdr;
__be16 status;
} __packed;
struct btmtk_tci_sleep {
u8 mode;
__le16 duration;
__le16 host_duration;
u8 host_wakeup_pin;
u8 time_compensation;
} __packed;
struct btmtk_hci_wmt_params {
u8 op;
u8 flag;
u16 dlen;
const void *data;
u32 *status;
};
struct btmtksdio_dev { struct btmtksdio_dev {
struct hci_dev *hdev; struct hci_dev *hdev;
struct sdio_func *func; struct sdio_func *func;
struct device *dev; struct device *dev;
struct work_struct tx_work; struct work_struct txrx_work;
unsigned long tx_state; unsigned long tx_state;
struct sk_buff_head txq; struct sk_buff_head txq;
bool hw_tx_ready;
struct sk_buff *evt_skb; struct sk_buff *evt_skb;
@@ -172,29 +125,35 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
{ {
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc; struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
struct btmtk_hci_wmt_evt_reg *wmt_evt_reg;
u32 hlen, status = BTMTK_WMT_INVALID; u32 hlen, status = BTMTK_WMT_INVALID;
struct btmtk_hci_wmt_evt *wmt_evt; struct btmtk_hci_wmt_evt *wmt_evt;
struct mtk_hci_wmt_cmd wc; struct btmtk_hci_wmt_cmd *wc;
struct mtk_wmt_hdr *hdr; struct btmtk_wmt_hdr *hdr;
int err; int err;
/* Send the WMT command and wait until the WMT event returns */
hlen = sizeof(*hdr) + wmt_params->dlen; hlen = sizeof(*hdr) + wmt_params->dlen;
if (hlen > 255) if (hlen > 255)
return -EINVAL; return -EINVAL;
hdr = (struct mtk_wmt_hdr *)&wc; wc = kzalloc(hlen, GFP_KERNEL);
if (!wc)
return -ENOMEM;
hdr = &wc->hdr;
hdr->dir = 1; hdr->dir = 1;
hdr->op = wmt_params->op; hdr->op = wmt_params->op;
hdr->dlen = cpu_to_le16(wmt_params->dlen + 1); hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
hdr->flag = wmt_params->flag; hdr->flag = wmt_params->flag;
memcpy(wc.data, wmt_params->data, wmt_params->dlen); memcpy(wc->data, wmt_params->data, wmt_params->dlen);
set_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state); set_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc); err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc);
if (err < 0) { if (err < 0) {
clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state); clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
return err; goto err_free_wc;
} }
/* The vendor specific WMT commands are all answered by a vendor /* The vendor specific WMT commands are all answered by a vendor
@@ -211,13 +170,14 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
if (err == -EINTR) { if (err == -EINTR) {
bt_dev_err(hdev, "Execution of wmt command interrupted"); bt_dev_err(hdev, "Execution of wmt command interrupted");
clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state); clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
return err; goto err_free_wc;
} }
if (err) { if (err) {
bt_dev_err(hdev, "Execution of wmt command timed out"); bt_dev_err(hdev, "Execution of wmt command timed out");
clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state); clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
return -ETIMEDOUT; err = -ETIMEDOUT;
goto err_free_wc;
} }
/* Parse and handle the return WMT event */ /* Parse and handle the return WMT event */
@@ -230,13 +190,13 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
} }
switch (wmt_evt->whdr.op) { switch (wmt_evt->whdr.op) {
case MTK_WMT_SEMAPHORE: case BTMTK_WMT_SEMAPHORE:
if (wmt_evt->whdr.flag == 2) if (wmt_evt->whdr.flag == 2)
status = BTMTK_WMT_PATCH_UNDONE; status = BTMTK_WMT_PATCH_UNDONE;
else else
status = BTMTK_WMT_PATCH_DONE; status = BTMTK_WMT_PATCH_DONE;
break; break;
case MTK_WMT_FUNC_CTRL: case BTMTK_WMT_FUNC_CTRL:
wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt; wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
if (be16_to_cpu(wmt_evt_funcc->status) == 0x404) if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
status = BTMTK_WMT_ON_DONE; status = BTMTK_WMT_ON_DONE;
@@ -245,6 +205,19 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
else else
status = BTMTK_WMT_ON_UNDONE; status = BTMTK_WMT_ON_UNDONE;
break; break;
case BTMTK_WMT_PATCH_DWNLD:
if (wmt_evt->whdr.flag == 2)
status = BTMTK_WMT_PATCH_DONE;
else if (wmt_evt->whdr.flag == 1)
status = BTMTK_WMT_PATCH_PROGRESS;
else
status = BTMTK_WMT_PATCH_UNDONE;
break;
case BTMTK_WMT_REGISTER:
wmt_evt_reg = (struct btmtk_hci_wmt_evt_reg *)wmt_evt;
if (le16_to_cpu(wmt_evt->whdr.dlen) == 12)
status = le32_to_cpu(wmt_evt_reg->val);
break;
} }
if (wmt_params->status) if (wmt_params->status)
@@ -253,6 +226,8 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
err_free_skb: err_free_skb:
kfree_skb(bdev->evt_skb); kfree_skb(bdev->evt_skb);
bdev->evt_skb = NULL; bdev->evt_skb = NULL;
err_free_wc:
kfree(wc);
return err; return err;
} }
@@ -279,6 +254,7 @@ static int btmtksdio_tx_packet(struct btmtksdio_dev *bdev,
sdio_hdr->reserved = cpu_to_le16(0); sdio_hdr->reserved = cpu_to_le16(0);
sdio_hdr->bt_type = hci_skb_pkt_type(skb); sdio_hdr->bt_type = hci_skb_pkt_type(skb);
bdev->hw_tx_ready = false;
err = sdio_writesb(bdev->func, MTK_REG_CTDR, skb->data, err = sdio_writesb(bdev->func, MTK_REG_CTDR, skb->data,
round_up(skb->len, MTK_SDIO_BLOCK_SIZE)); round_up(skb->len, MTK_SDIO_BLOCK_SIZE));
if (err < 0) if (err < 0)
@@ -301,32 +277,6 @@ static u32 btmtksdio_drv_own_query(struct btmtksdio_dev *bdev)
return sdio_readl(bdev->func, MTK_REG_CHLPCR, NULL); return sdio_readl(bdev->func, MTK_REG_CHLPCR, NULL);
} }
static void btmtksdio_tx_work(struct work_struct *work)
{
struct btmtksdio_dev *bdev = container_of(work, struct btmtksdio_dev,
tx_work);
struct sk_buff *skb;
int err;
pm_runtime_get_sync(bdev->dev);
sdio_claim_host(bdev->func);
while ((skb = skb_dequeue(&bdev->txq))) {
err = btmtksdio_tx_packet(bdev, skb);
if (err < 0) {
bdev->hdev->stat.err_tx++;
skb_queue_head(&bdev->txq, skb);
break;
}
}
sdio_release_host(bdev->func);
pm_runtime_mark_last_busy(bdev->dev);
pm_runtime_put_autosuspend(bdev->dev);
}
static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb) static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
{ {
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
@@ -477,28 +427,26 @@ err_kfree_skb:
return err; return err;
} }
static void btmtksdio_interrupt(struct sdio_func *func) static void btmtksdio_txrx_work(struct work_struct *work)
{ {
struct btmtksdio_dev *bdev = sdio_get_drvdata(func); struct btmtksdio_dev *bdev = container_of(work, struct btmtksdio_dev,
u32 int_status; txrx_work);
u16 rx_size; unsigned long txrx_timeout;
u32 int_status, rx_size;
/* It is required that the host gets ownership from the device before struct sk_buff *skb;
* accessing any register, however, if SDIO host is not being released, int err;
* a potential deadlock probably happens in a circular wait between SDIO
* IRQ work and PM runtime work. So, we have to explicitly release SDIO
* host here and claim again after the PM runtime work is all done.
*/
sdio_release_host(bdev->func);
pm_runtime_get_sync(bdev->dev); pm_runtime_get_sync(bdev->dev);
sdio_claim_host(bdev->func); sdio_claim_host(bdev->func);
/* Disable interrupt */ /* Disable interrupt */
sdio_writel(func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL); sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, 0);
int_status = sdio_readl(func, MTK_REG_CHISR, NULL); txrx_timeout = jiffies + 5 * HZ;
do {
int_status = sdio_readl(bdev->func, MTK_REG_CHISR, NULL);
/* Ack an interrupt as soon as possible before any operation on /* Ack an interrupt as soon as possible before any operation on
* hardware. * hardware.
@@ -509,38 +457,59 @@ static void btmtksdio_interrupt(struct sdio_func *func)
* not be raised again but there is still pending data in the hardware * not be raised again but there is still pending data in the hardware
* FIFO. * FIFO.
*/ */
sdio_writel(func, int_status, MTK_REG_CHISR, NULL); sdio_writel(bdev->func, int_status, MTK_REG_CHISR, NULL);
if (unlikely(!int_status))
bt_dev_err(bdev->hdev, "CHISR is 0");
if (int_status & FW_OWN_BACK_INT) if (int_status & FW_OWN_BACK_INT)
bt_dev_dbg(bdev->hdev, "Get fw own back"); bt_dev_dbg(bdev->hdev, "Get fw own back");
if (int_status & TX_EMPTY) if (int_status & TX_EMPTY)
schedule_work(&bdev->tx_work); bdev->hw_tx_ready = true;
else if (unlikely(int_status & TX_FIFO_OVERFLOW)) else if (unlikely(int_status & TX_FIFO_OVERFLOW))
bt_dev_warn(bdev->hdev, "Tx fifo overflow"); bt_dev_warn(bdev->hdev, "Tx fifo overflow");
if (int_status & RX_DONE_INT) { if (bdev->hw_tx_ready) {
rx_size = (int_status & RX_PKT_LEN) >> 16; skb = skb_dequeue(&bdev->txq);
if (skb) {
err = btmtksdio_tx_packet(bdev, skb);
if (err < 0) {
bdev->hdev->stat.err_tx++;
skb_queue_head(&bdev->txq, skb);
}
}
}
if (int_status & RX_DONE_INT) {
rx_size = sdio_readl(bdev->func, MTK_REG_CRPLR, NULL);
rx_size = (rx_size & RX_PKT_LEN) >> 16;
if (btmtksdio_rx_packet(bdev, rx_size) < 0) if (btmtksdio_rx_packet(bdev, rx_size) < 0)
bdev->hdev->stat.err_rx++; bdev->hdev->stat.err_rx++;
} }
} while (int_status || time_is_before_jiffies(txrx_timeout));
/* Enable interrupt */ /* Enable interrupt */
sdio_writel(func, C_INT_EN_SET, MTK_REG_CHLPCR, NULL); sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, 0);
sdio_release_host(bdev->func);
pm_runtime_mark_last_busy(bdev->dev); pm_runtime_mark_last_busy(bdev->dev);
pm_runtime_put_autosuspend(bdev->dev); pm_runtime_put_autosuspend(bdev->dev);
} }
static void btmtksdio_interrupt(struct sdio_func *func)
{
struct btmtksdio_dev *bdev = sdio_get_drvdata(func);
/* Disable interrupt */
sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, 0);
schedule_work(&bdev->txrx_work);
}
static int btmtksdio_open(struct hci_dev *hdev) static int btmtksdio_open(struct hci_dev *hdev)
{ {
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
u32 status, val;
int err; int err;
u32 status;
sdio_claim_host(bdev->func); sdio_claim_host(bdev->func);
@@ -580,13 +549,22 @@ static int btmtksdio_open(struct hci_dev *hdev)
/* SDIO CMD 5 allows the SDIO device back to idle state an /* SDIO CMD 5 allows the SDIO device back to idle state an
* synchronous interrupt is supported in SDIO 4-bit mode * synchronous interrupt is supported in SDIO 4-bit mode
*/ */
sdio_writel(bdev->func, SDIO_INT_CTL | SDIO_RE_INIT_EN, val = sdio_readl(bdev->func, MTK_REG_CSDIOCSR, &err);
MTK_REG_CSDIOCSR, &err);
if (err < 0) if (err < 0)
goto err_release_irq; goto err_release_irq;
/* Setup write-1-clear for CHISR register */ val |= SDIO_INT_CTL;
sdio_writel(bdev->func, C_INT_CLR_CTRL, MTK_REG_CHCR, &err); sdio_writel(bdev->func, val, MTK_REG_CSDIOCSR, &err);
if (err < 0)
goto err_release_irq;
/* Explitly set write-1-clear method */
val = sdio_readl(bdev->func, MTK_REG_CHCR, &err);
if (err < 0)
goto err_release_irq;
val |= C_INT_CLR_CTRL;
sdio_writel(bdev->func, val, MTK_REG_CHCR, &err);
if (err < 0) if (err < 0)
goto err_release_irq; goto err_release_irq;
@@ -630,6 +608,8 @@ static int btmtksdio_close(struct hci_dev *hdev)
sdio_release_irq(bdev->func); sdio_release_irq(bdev->func);
cancel_work_sync(&bdev->txrx_work);
/* Return ownership to the device */ /* Return ownership to the device */
sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, NULL); sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, NULL);
@@ -651,7 +631,7 @@ static int btmtksdio_flush(struct hci_dev *hdev)
skb_queue_purge(&bdev->txq); skb_queue_purge(&bdev->txq);
cancel_work_sync(&bdev->tx_work); cancel_work_sync(&bdev->txrx_work);
return 0; return 0;
} }
@@ -663,7 +643,7 @@ static int btmtksdio_func_query(struct hci_dev *hdev)
u8 param = 0; u8 param = 0;
/* Query whether the function is enabled */ /* Query whether the function is enabled */
wmt_params.op = MTK_WMT_FUNC_CTRL; wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 4; wmt_params.flag = 4;
wmt_params.dlen = sizeof(param); wmt_params.dlen = sizeof(param);
wmt_params.data = &param; wmt_params.data = &param;
@@ -678,111 +658,16 @@ static int btmtksdio_func_query(struct hci_dev *hdev)
return status; return status;
} }
static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname) static int mt76xx_setup(struct hci_dev *hdev, const char *fwname)
{ {
struct btmtk_hci_wmt_params wmt_params; struct btmtk_hci_wmt_params wmt_params;
const struct firmware *fw;
const u8 *fw_ptr;
size_t fw_size;
int err, dlen;
u8 flag, param;
err = request_firmware(&fw, fwname, &hdev->dev);
if (err < 0) {
bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
return err;
}
/* Power on data RAM the firmware relies on. */
param = 1;
wmt_params.op = MTK_WMT_FUNC_CTRL;
wmt_params.flag = 3;
wmt_params.dlen = sizeof(param);
wmt_params.data = &param;
wmt_params.status = NULL;
err = mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
goto free_fw;
}
fw_ptr = fw->data;
fw_size = fw->size;
/* The size of patch header is 30 bytes, should be skip */
if (fw_size < 30) {
err = -EINVAL;
goto free_fw;
}
fw_size -= 30;
fw_ptr += 30;
flag = 1;
wmt_params.op = MTK_WMT_PATCH_DWNLD;
wmt_params.status = NULL;
while (fw_size > 0) {
dlen = min_t(int, 250, fw_size);
/* Tell device the position in sequence */
if (fw_size - dlen <= 0)
flag = 3;
else if (fw_size < fw->size - 30)
flag = 2;
wmt_params.flag = flag;
wmt_params.dlen = dlen;
wmt_params.data = fw_ptr;
err = mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
err);
goto free_fw;
}
fw_size -= dlen;
fw_ptr += dlen;
}
wmt_params.op = MTK_WMT_RST;
wmt_params.flag = 4;
wmt_params.dlen = 0;
wmt_params.data = NULL;
wmt_params.status = NULL;
/* Activate funciton the firmware providing to */
err = mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
goto free_fw;
}
/* Wait a few moments for firmware activation done */
usleep_range(10000, 12000);
free_fw:
release_firmware(fw);
return err;
}
static int btmtksdio_setup(struct hci_dev *hdev)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
struct btmtk_hci_wmt_params wmt_params;
ktime_t calltime, delta, rettime;
struct btmtk_tci_sleep tci_sleep; struct btmtk_tci_sleep tci_sleep;
unsigned long long duration;
struct sk_buff *skb; struct sk_buff *skb;
int err, status; int err, status;
u8 param = 0x1; u8 param = 0x1;
calltime = ktime_get();
/* Query whether the firmware is already download */ /* Query whether the firmware is already download */
wmt_params.op = MTK_WMT_SEMAPHORE; wmt_params.op = BTMTK_WMT_SEMAPHORE;
wmt_params.flag = 1; wmt_params.flag = 1;
wmt_params.dlen = 0; wmt_params.dlen = 0;
wmt_params.data = NULL; wmt_params.data = NULL;
@@ -800,7 +685,7 @@ static int btmtksdio_setup(struct hci_dev *hdev)
} }
/* Setup a firmware which the device definitely requires */ /* Setup a firmware which the device definitely requires */
err = mtk_setup_firmware(hdev, bdev->data->fwname); err = btmtk_setup_firmware(hdev, fwname, mtk_hci_wmt_sync);
if (err < 0) if (err < 0)
return err; return err;
@@ -823,7 +708,7 @@ ignore_setup_fw:
} }
/* Enable Bluetooth protocol */ /* Enable Bluetooth protocol */
wmt_params.op = MTK_WMT_FUNC_CTRL; wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 0; wmt_params.flag = 0;
wmt_params.dlen = sizeof(param); wmt_params.dlen = sizeof(param);
wmt_params.data = &param; wmt_params.data = &param;
@@ -852,6 +737,113 @@ ignore_func_on:
} }
kfree_skb(skb); kfree_skb(skb);
return 0;
}
static int mt79xx_setup(struct hci_dev *hdev, const char *fwname)
{
struct btmtk_hci_wmt_params wmt_params;
u8 param = 0x1;
int err;
err = btmtk_setup_firmware_79xx(hdev, fwname, mtk_hci_wmt_sync);
if (err < 0) {
bt_dev_err(hdev, "Failed to setup 79xx firmware (%d)", err);
return err;
}
/* Enable Bluetooth protocol */
wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 0;
wmt_params.dlen = sizeof(param);
wmt_params.data = &param;
wmt_params.status = NULL;
err = mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
return err;
}
return err;
}
static int btsdio_mtk_reg_read(struct hci_dev *hdev, u32 reg, u32 *val)
{
struct btmtk_hci_wmt_params wmt_params;
struct reg_read_cmd {
u8 type;
u8 rsv;
u8 num;
__le32 addr;
} __packed reg_read = {
.type = 1,
.num = 1,
};
u32 status;
int err;
reg_read.addr = cpu_to_le32(reg);
wmt_params.op = BTMTK_WMT_REGISTER;
wmt_params.flag = BTMTK_WMT_REG_READ;
wmt_params.dlen = sizeof(reg_read);
wmt_params.data = &reg_read;
wmt_params.status = &status;
err = mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to read reg(%d)", err);
return err;
}
*val = status;
return err;
}
static int btmtksdio_setup(struct hci_dev *hdev)
{
struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
ktime_t calltime, delta, rettime;
unsigned long long duration;
char fwname[64];
int err, dev_id;
u32 fw_version = 0;
calltime = ktime_get();
bdev->hw_tx_ready = true;
switch (bdev->data->chipid) {
case 0x7921:
err = btsdio_mtk_reg_read(hdev, 0x70010200, &dev_id);
if (err < 0) {
bt_dev_err(hdev, "Failed to get device id (%d)", err);
return err;
}
err = btsdio_mtk_reg_read(hdev, 0x80021004, &fw_version);
if (err < 0) {
bt_dev_err(hdev, "Failed to get fw version (%d)", err);
return err;
}
snprintf(fwname, sizeof(fwname),
"mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
dev_id & 0xffff, (fw_version & 0xff) + 1);
err = mt79xx_setup(hdev, fwname);
if (err < 0)
return err;
break;
case 0x7663:
case 0x7668:
err = mt76xx_setup(hdev, bdev->data->fwname);
if (err < 0)
return err;
break;
default:
return -ENODEV;
}
rettime = ktime_get(); rettime = ktime_get();
delta = ktime_sub(rettime, calltime); delta = ktime_sub(rettime, calltime);
duration = (unsigned long long)ktime_to_ns(delta) >> 10; duration = (unsigned long long)ktime_to_ns(delta) >> 10;
@@ -891,7 +883,7 @@ static int btmtksdio_shutdown(struct hci_dev *hdev)
pm_runtime_get_sync(bdev->dev); pm_runtime_get_sync(bdev->dev);
/* Disable the device */ /* Disable the device */
wmt_params.op = MTK_WMT_FUNC_CTRL; wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 0; wmt_params.flag = 0;
wmt_params.dlen = sizeof(param); wmt_params.dlen = sizeof(param);
wmt_params.data = &param; wmt_params.data = &param;
@@ -932,7 +924,7 @@ static int btmtksdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
skb_queue_tail(&bdev->txq, skb); skb_queue_tail(&bdev->txq, skb);
schedule_work(&bdev->tx_work); schedule_work(&bdev->txrx_work);
return 0; return 0;
} }
@@ -955,7 +947,7 @@ static int btmtksdio_probe(struct sdio_func *func,
bdev->dev = &func->dev; bdev->dev = &func->dev;
bdev->func = func; bdev->func = func;
INIT_WORK(&bdev->tx_work, btmtksdio_tx_work); INIT_WORK(&bdev->txrx_work, btmtksdio_txrx_work);
skb_queue_head_init(&bdev->txq); skb_queue_head_init(&bdev->txq);
/* Initialize and register HCI device */ /* Initialize and register HCI device */
@@ -976,6 +968,8 @@ static int btmtksdio_probe(struct sdio_func *func,
hdev->setup = btmtksdio_setup; hdev->setup = btmtksdio_setup;
hdev->shutdown = btmtksdio_shutdown; hdev->shutdown = btmtksdio_shutdown;
hdev->send = btmtksdio_send_frame; hdev->send = btmtksdio_send_frame;
hdev->set_bdaddr = btmtk_set_bdaddr;
SET_HCIDEV_DEV(hdev, &func->dev); SET_HCIDEV_DEV(hdev, &func->dev);
hdev->manufacturer = 70; hdev->manufacturer = 70;
@@ -1112,5 +1106,3 @@ MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_DESCRIPTION("MediaTek Bluetooth SDIO driver ver " VERSION); MODULE_DESCRIPTION("MediaTek Bluetooth SDIO driver ver " VERSION);
MODULE_VERSION(VERSION); MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_MT7663);
MODULE_FIRMWARE(FIRMWARE_MT7668);

View File

@@ -295,6 +295,8 @@ static int btsdio_probe(struct sdio_func *func,
switch (func->device) { switch (func->device) {
case SDIO_DEVICE_ID_BROADCOM_43341: case SDIO_DEVICE_ID_BROADCOM_43341:
case SDIO_DEVICE_ID_BROADCOM_43430: case SDIO_DEVICE_ID_BROADCOM_43430:
case SDIO_DEVICE_ID_BROADCOM_4345:
case SDIO_DEVICE_ID_BROADCOM_43455:
case SDIO_DEVICE_ID_BROADCOM_4356: case SDIO_DEVICE_ID_BROADCOM_4356:
return -ENODEV; return -ENODEV;
} }

View File

@@ -24,6 +24,7 @@
#include "btintel.h" #include "btintel.h"
#include "btbcm.h" #include "btbcm.h"
#include "btrtl.h" #include "btrtl.h"
#include "btmtk.h"
#define VERSION "0.8" #define VERSION "0.8"
@@ -2131,122 +2132,6 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
#define MTK_BT_RST_DONE 0x00000100 #define MTK_BT_RST_DONE 0x00000100
#define MTK_BT_RESET_WAIT_MS 100 #define MTK_BT_RESET_WAIT_MS 100
#define MTK_BT_RESET_NUM_TRIES 10 #define MTK_BT_RESET_NUM_TRIES 10
#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
#define HCI_WMT_MAX_EVENT_SIZE 64
/* It is for mt79xx download rom patch*/
#define MTK_FW_ROM_PATCH_HEADER_SIZE 32
#define MTK_FW_ROM_PATCH_GD_SIZE 64
#define MTK_FW_ROM_PATCH_SEC_MAP_SIZE 64
#define MTK_SEC_MAP_COMMON_SIZE 12
#define MTK_SEC_MAP_NEED_SEND_SIZE 52
enum {
BTMTK_WMT_PATCH_DWNLD = 0x1,
BTMTK_WMT_FUNC_CTRL = 0x6,
BTMTK_WMT_RST = 0x7,
BTMTK_WMT_SEMAPHORE = 0x17,
};
enum {
BTMTK_WMT_INVALID,
BTMTK_WMT_PATCH_UNDONE,
BTMTK_WMT_PATCH_PROGRESS,
BTMTK_WMT_PATCH_DONE,
BTMTK_WMT_ON_UNDONE,
BTMTK_WMT_ON_DONE,
BTMTK_WMT_ON_PROGRESS,
};
struct btmtk_wmt_hdr {
u8 dir;
u8 op;
__le16 dlen;
u8 flag;
} __packed;
struct btmtk_hci_wmt_cmd {
struct btmtk_wmt_hdr hdr;
u8 data[];
} __packed;
struct btmtk_hci_wmt_evt {
struct hci_event_hdr hhdr;
struct btmtk_wmt_hdr whdr;
} __packed;
struct btmtk_hci_wmt_evt_funcc {
struct btmtk_hci_wmt_evt hwhdr;
__be16 status;
} __packed;
struct btmtk_tci_sleep {
u8 mode;
__le16 duration;
__le16 host_duration;
u8 host_wakeup_pin;
u8 time_compensation;
} __packed;
struct btmtk_hci_wmt_params {
u8 op;
u8 flag;
u16 dlen;
const void *data;
u32 *status;
};
struct btmtk_patch_header {
u8 datetime[16];
u8 platform[4];
__le16 hwver;
__le16 swver;
__le32 magicnum;
} __packed;
struct btmtk_global_desc {
__le32 patch_ver;
__le32 sub_sys;
__le32 feature_opt;
__le32 section_num;
} __packed;
struct btmtk_section_map {
__le32 sectype;
__le32 secoffset;
__le32 secsize;
union {
__le32 u4SecSpec[13];
struct {
__le32 dlAddr;
__le32 dlsize;
__le32 seckeyidx;
__le32 alignlen;
__le32 sectype;
__le32 dlmodecrctype;
__le32 crc;
__le32 reserved[6];
} bin_info_spec;
};
} __packed;
static int btusb_set_bdaddr_mtk(struct hci_dev *hdev, const bdaddr_t *bdaddr)
{
struct sk_buff *skb;
long ret;
skb = __hci_cmd_sync(hdev, 0xfc1a, sizeof(bdaddr), bdaddr, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
ret = PTR_ERR(skb);
bt_dev_err(hdev, "changing Mediatek device address failed (%ld)",
ret);
return ret;
}
kfree_skb(skb);
return 0;
}
static void btusb_mtk_wmt_recv(struct urb *urb) static void btusb_mtk_wmt_recv(struct urb *urb)
{ {
@@ -2265,6 +2150,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC); skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC);
if (!skb) { if (!skb) {
hdev->stat.err_rx++; hdev->stat.err_rx++;
kfree(urb->setup_packet);
return; return;
} }
@@ -2285,6 +2171,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
data->evt_skb = skb_clone(skb, GFP_ATOMIC); data->evt_skb = skb_clone(skb, GFP_ATOMIC);
if (!data->evt_skb) { if (!data->evt_skb) {
kfree_skb(skb); kfree_skb(skb);
kfree(urb->setup_packet);
return; return;
} }
} }
@@ -2293,6 +2180,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
if (err < 0) { if (err < 0) {
kfree_skb(data->evt_skb); kfree_skb(data->evt_skb);
data->evt_skb = NULL; data->evt_skb = NULL;
kfree(urb->setup_packet);
return; return;
} }
@@ -2303,6 +2191,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
wake_up_bit(&data->flags, wake_up_bit(&data->flags,
BTUSB_TX_WAIT_VND_EVT); BTUSB_TX_WAIT_VND_EVT);
} }
kfree(urb->setup_packet);
return; return;
} else if (urb->status == -ENOENT) { } else if (urb->status == -ENOENT) {
/* Avoid suspend failed when usb_kill_urb */ /* Avoid suspend failed when usb_kill_urb */
@@ -2323,6 +2212,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
usb_anchor_urb(urb, &data->ctrl_anchor); usb_anchor_urb(urb, &data->ctrl_anchor);
err = usb_submit_urb(urb, GFP_ATOMIC); err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) { if (err < 0) {
kfree(urb->setup_packet);
/* -EPERM: urb is being killed; /* -EPERM: urb is being killed;
* -ENODEV: device got disconnected * -ENODEV: device got disconnected
*/ */
@@ -2497,209 +2387,6 @@ err_free_wc:
return err; return err;
} }
static int btusb_mtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname)
{
struct btmtk_hci_wmt_params wmt_params;
struct btmtk_global_desc *globaldesc = NULL;
struct btmtk_section_map *sectionmap;
const struct firmware *fw;
const u8 *fw_ptr;
const u8 *fw_bin_ptr;
int err, dlen, i, status;
u8 flag, first_block, retry;
u32 section_num, dl_size, section_offset;
u8 cmd[64];
err = request_firmware(&fw, fwname, &hdev->dev);
if (err < 0) {
bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
return err;
}
fw_ptr = fw->data;
fw_bin_ptr = fw_ptr;
globaldesc = (struct btmtk_global_desc *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE);
section_num = le32_to_cpu(globaldesc->section_num);
for (i = 0; i < section_num; i++) {
first_block = 1;
fw_ptr = fw_bin_ptr;
sectionmap = (struct btmtk_section_map *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE +
MTK_FW_ROM_PATCH_GD_SIZE + MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i);
section_offset = le32_to_cpu(sectionmap->secoffset);
dl_size = le32_to_cpu(sectionmap->bin_info_spec.dlsize);
if (dl_size > 0) {
retry = 20;
while (retry > 0) {
cmd[0] = 0; /* 0 means legacy dl mode. */
memcpy(cmd + 1,
fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE +
MTK_FW_ROM_PATCH_GD_SIZE + MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i +
MTK_SEC_MAP_COMMON_SIZE,
MTK_SEC_MAP_NEED_SEND_SIZE + 1);
wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
wmt_params.status = &status;
wmt_params.flag = 0;
wmt_params.dlen = MTK_SEC_MAP_NEED_SEND_SIZE + 1;
wmt_params.data = &cmd;
err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
err);
goto err_release_fw;
}
if (status == BTMTK_WMT_PATCH_UNDONE) {
break;
} else if (status == BTMTK_WMT_PATCH_PROGRESS) {
msleep(100);
retry--;
} else if (status == BTMTK_WMT_PATCH_DONE) {
goto next_section;
} else {
bt_dev_err(hdev, "Failed wmt patch dwnld status (%d)",
status);
goto err_release_fw;
}
}
fw_ptr += section_offset;
wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
wmt_params.status = NULL;
while (dl_size > 0) {
dlen = min_t(int, 250, dl_size);
if (first_block == 1) {
flag = 1;
first_block = 0;
} else if (dl_size - dlen <= 0) {
flag = 3;
} else {
flag = 2;
}
wmt_params.flag = flag;
wmt_params.dlen = dlen;
wmt_params.data = fw_ptr;
err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
err);
goto err_release_fw;
}
dl_size -= dlen;
fw_ptr += dlen;
}
}
next_section:
continue;
}
/* Wait a few moments for firmware activation done */
usleep_range(100000, 120000);
err_release_fw:
release_firmware(fw);
return err;
}
static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
{
struct btmtk_hci_wmt_params wmt_params;
const struct firmware *fw;
const u8 *fw_ptr;
size_t fw_size;
int err, dlen;
u8 flag, param;
err = request_firmware(&fw, fwname, &hdev->dev);
if (err < 0) {
bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
return err;
}
/* Power on data RAM the firmware relies on. */
param = 1;
wmt_params.op = BTMTK_WMT_FUNC_CTRL;
wmt_params.flag = 3;
wmt_params.dlen = sizeof(param);
wmt_params.data = &param;
wmt_params.status = NULL;
err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
goto err_release_fw;
}
fw_ptr = fw->data;
fw_size = fw->size;
/* The size of patch header is 30 bytes, should be skip */
if (fw_size < 30) {
err = -EINVAL;
goto err_release_fw;
}
fw_size -= 30;
fw_ptr += 30;
flag = 1;
wmt_params.op = BTMTK_WMT_PATCH_DWNLD;
wmt_params.status = NULL;
while (fw_size > 0) {
dlen = min_t(int, 250, fw_size);
/* Tell device the position in sequence */
if (fw_size - dlen <= 0)
flag = 3;
else if (fw_size < fw->size - 30)
flag = 2;
wmt_params.flag = flag;
wmt_params.dlen = dlen;
wmt_params.data = fw_ptr;
err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
err);
goto err_release_fw;
}
fw_size -= dlen;
fw_ptr += dlen;
}
wmt_params.op = BTMTK_WMT_RST;
wmt_params.flag = 4;
wmt_params.dlen = 0;
wmt_params.data = NULL;
wmt_params.status = NULL;
/* Activate funciton the firmware providing to */
err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
if (err < 0) {
bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
goto err_release_fw;
}
/* Wait a few moments for firmware activation done */
usleep_range(10000, 12000);
err_release_fw:
release_firmware(fw);
return err;
}
static int btusb_mtk_func_query(struct hci_dev *hdev) static int btusb_mtk_func_query(struct hci_dev *hdev)
{ {
struct btmtk_hci_wmt_params wmt_params; struct btmtk_hci_wmt_params wmt_params;
@@ -2857,7 +2544,8 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
snprintf(fw_bin_name, sizeof(fw_bin_name), snprintf(fw_bin_name, sizeof(fw_bin_name),
"mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin", "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
dev_id & 0xffff, (fw_version & 0xff) + 1); dev_id & 0xffff, (fw_version & 0xff) + 1);
err = btusb_mtk_setup_firmware_79xx(hdev, fw_bin_name); err = btmtk_setup_firmware_79xx(hdev, fw_bin_name,
btusb_mtk_hci_wmt_sync);
/* It's Device EndPoint Reset Option Register */ /* It's Device EndPoint Reset Option Register */
btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT); btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT);
@@ -2877,6 +2565,7 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
} }
hci_set_msft_opcode(hdev, 0xFD30); hci_set_msft_opcode(hdev, 0xFD30);
hci_set_aosp_capable(hdev);
goto done; goto done;
default: default:
bt_dev_err(hdev, "Unsupported hardware variant (%08x)", bt_dev_err(hdev, "Unsupported hardware variant (%08x)",
@@ -2903,7 +2592,8 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
} }
/* Setup a firmware which the device definitely requires */ /* Setup a firmware which the device definitely requires */
err = btusb_mtk_setup_firmware(hdev, fwname); err = btmtk_setup_firmware(hdev, fwname,
btusb_mtk_hci_wmt_sync);
if (err < 0) if (err < 0)
return err; return err;
@@ -3064,9 +2754,6 @@ static int btusb_recv_acl_mtk(struct hci_dev *hdev, struct sk_buff *skb)
return hci_recv_frame(hdev, skb); return hci_recv_frame(hdev, skb);
} }
MODULE_FIRMWARE(FIRMWARE_MT7663);
MODULE_FIRMWARE(FIRMWARE_MT7668);
#ifdef CONFIG_PM #ifdef CONFIG_PM
/* Configure an out-of-band gpio as wake-up pin, if specified in device tree */ /* Configure an out-of-band gpio as wake-up pin, if specified in device tree */
static int marvell_config_oob_wake(struct hci_dev *hdev) static int marvell_config_oob_wake(struct hci_dev *hdev)
@@ -3190,6 +2877,9 @@ static int btusb_set_bdaddr_wcn6855(struct hci_dev *hdev,
#define QCA_DFU_TIMEOUT 3000 #define QCA_DFU_TIMEOUT 3000
#define QCA_FLAG_MULTI_NVM 0x80 #define QCA_FLAG_MULTI_NVM 0x80
#define WCN6855_2_0_RAM_VERSION_GF 0x400c1200
#define WCN6855_2_1_RAM_VERSION_GF 0x400c1211
struct qca_version { struct qca_version {
__le32 rom_version; __le32 rom_version;
__le32 patch_version; __le32 patch_version;
@@ -3221,6 +2911,7 @@ static const struct qca_device_info qca_devices_table[] = {
{ 0x00000302, 28, 4, 16 }, /* Rome 3.2 */ { 0x00000302, 28, 4, 16 }, /* Rome 3.2 */
{ 0x00130100, 40, 4, 16 }, /* WCN6855 1.0 */ { 0x00130100, 40, 4, 16 }, /* WCN6855 1.0 */
{ 0x00130200, 40, 4, 16 }, /* WCN6855 2.0 */ { 0x00130200, 40, 4, 16 }, /* WCN6855 2.0 */
{ 0x00130201, 40, 4, 16 }, /* WCN6855 2.1 */
}; };
static int btusb_qca_send_vendor_req(struct usb_device *udev, u8 request, static int btusb_qca_send_vendor_req(struct usb_device *udev, u8 request,
@@ -3375,6 +3066,40 @@ done:
return err; return err;
} }
static void btusb_generate_qca_nvm_name(char *fwname, size_t max_size,
const struct qca_version *ver)
{
u32 rom_version = le32_to_cpu(ver->rom_version);
u16 flag = le16_to_cpu(ver->flag);
if (((flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) {
u16 board_id = le16_to_cpu(ver->board_id);
const char *variant;
switch (le32_to_cpu(ver->ram_version)) {
case WCN6855_2_0_RAM_VERSION_GF:
case WCN6855_2_1_RAM_VERSION_GF:
variant = "_gf";
break;
default:
variant = "";
break;
}
if (board_id == 0) {
snprintf(fwname, max_size, "qca/nvm_usb_%08x%s.bin",
rom_version, variant);
} else {
snprintf(fwname, max_size, "qca/nvm_usb_%08x%s_%04x.bin",
rom_version, variant, board_id);
}
} else {
snprintf(fwname, max_size, "qca/nvm_usb_%08x.bin",
rom_version);
}
}
static int btusb_setup_qca_load_nvm(struct hci_dev *hdev, static int btusb_setup_qca_load_nvm(struct hci_dev *hdev,
struct qca_version *ver, struct qca_version *ver,
const struct qca_device_info *info) const struct qca_device_info *info)
@@ -3383,20 +3108,7 @@ static int btusb_setup_qca_load_nvm(struct hci_dev *hdev,
char fwname[64]; char fwname[64];
int err; int err;
if (((ver->flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) { btusb_generate_qca_nvm_name(fwname, sizeof(fwname), ver);
/* if boardid equal 0, use default nvm without surfix */
if (le16_to_cpu(ver->board_id) == 0x0) {
snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x.bin",
le32_to_cpu(ver->rom_version));
} else {
snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x_%04x.bin",
le32_to_cpu(ver->rom_version),
le16_to_cpu(ver->board_id));
}
} else {
snprintf(fwname, sizeof(fwname), "qca/nvm_usb_%08x.bin",
le32_to_cpu(ver->rom_version));
}
err = request_firmware(&fw, fwname, &hdev->dev); err = request_firmware(&fw, fwname, &hdev->dev);
if (err) { if (err) {
@@ -3868,7 +3580,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->shutdown = btusb_mtk_shutdown; hdev->shutdown = btusb_mtk_shutdown;
hdev->manufacturer = 70; hdev->manufacturer = 70;
hdev->cmd_timeout = btusb_mtk_cmd_timeout; hdev->cmd_timeout = btusb_mtk_cmd_timeout;
hdev->set_bdaddr = btusb_set_bdaddr_mtk; hdev->set_bdaddr = btmtk_set_bdaddr;
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
data->recv_acl = btusb_recv_acl_mtk; data->recv_acl = btusb_recv_acl_mtk;
} }

View File

@@ -1508,7 +1508,6 @@ static const struct of_device_id bcm_bluetooth_of_match[] = {
{ .compatible = "brcm,bcm4330-bt" }, { .compatible = "brcm,bcm4330-bt" },
{ .compatible = "brcm,bcm4334-bt" }, { .compatible = "brcm,bcm4334-bt" },
{ .compatible = "brcm,bcm4345c5" }, { .compatible = "brcm,bcm4345c5" },
{ .compatible = "brcm,bcm4330-bt" },
{ .compatible = "brcm,bcm43438-bt", .data = &bcm43438_device_data }, { .compatible = "brcm,bcm43438-bt", .data = &bcm43438_device_data },
{ .compatible = "brcm,bcm43540-bt", .data = &bcm4354_device_data }, { .compatible = "brcm,bcm43540-bt", .data = &bcm4354_device_data },
{ .compatible = "brcm,bcm4335a0" }, { .compatible = "brcm,bcm4335a0" },

View File

@@ -252,7 +252,7 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
} }
if (!dlen) { if (!dlen) {
hu->padding = (skb->len - 1) % alignment; hu->padding = (skb->len + 1) % alignment;
hu->padding = (alignment - hu->padding) % alignment; hu->padding = (alignment - hu->padding) % alignment;
/* No more data, complete frame */ /* No more data, complete frame */
@@ -260,7 +260,7 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
skb = NULL; skb = NULL;
} }
} else { } else {
hu->padding = (skb->len - 1) % alignment; hu->padding = (skb->len + 1) % alignment;
hu->padding = (alignment - hu->padding) % alignment; hu->padding = (alignment - hu->padding) % alignment;
/* Complete frame */ /* Complete frame */

View File

@@ -38,9 +38,12 @@ struct vhci_data {
struct mutex open_mutex; struct mutex open_mutex;
struct delayed_work open_timeout; struct delayed_work open_timeout;
struct work_struct suspend_work;
bool suspended; bool suspended;
bool wakeup; bool wakeup;
__u16 msft_opcode;
bool aosp_capable;
}; };
static int vhci_open_dev(struct hci_dev *hdev) static int vhci_open_dev(struct hci_dev *hdev)
@@ -114,6 +117,17 @@ static ssize_t force_suspend_read(struct file *file, char __user *user_buf,
return simple_read_from_buffer(user_buf, count, ppos, buf, 2); return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
} }
static void vhci_suspend_work(struct work_struct *work)
{
struct vhci_data *data = container_of(work, struct vhci_data,
suspend_work);
if (data->suspended)
hci_suspend_dev(data->hdev);
else
hci_resume_dev(data->hdev);
}
static ssize_t force_suspend_write(struct file *file, static ssize_t force_suspend_write(struct file *file,
const char __user *user_buf, const char __user *user_buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
@@ -129,16 +143,10 @@ static ssize_t force_suspend_write(struct file *file,
if (data->suspended == enable) if (data->suspended == enable)
return -EALREADY; return -EALREADY;
if (enable)
err = hci_suspend_dev(data->hdev);
else
err = hci_resume_dev(data->hdev);
if (err)
return err;
data->suspended = enable; data->suspended = enable;
schedule_work(&data->suspend_work);
return count; return count;
} }
@@ -176,6 +184,8 @@ static ssize_t force_wakeup_write(struct file *file,
if (data->wakeup == enable) if (data->wakeup == enable)
return -EALREADY; return -EALREADY;
data->wakeup = enable;
return count; return count;
} }
@@ -186,6 +196,88 @@ static const struct file_operations force_wakeup_fops = {
.llseek = default_llseek, .llseek = default_llseek,
}; };
static int msft_opcode_set(void *data, u64 val)
{
struct vhci_data *vhci = data;
if (val > 0xffff || hci_opcode_ogf(val) != 0x3f)
return -EINVAL;
if (vhci->msft_opcode)
return -EALREADY;
vhci->msft_opcode = val;
return 0;
}
static int msft_opcode_get(void *data, u64 *val)
{
struct vhci_data *vhci = data;
*val = vhci->msft_opcode;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(msft_opcode_fops, msft_opcode_get, msft_opcode_set,
"%llu\n");
static ssize_t aosp_capable_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct vhci_data *vhci = file->private_data;
char buf[3];
buf[0] = vhci->aosp_capable ? 'Y' : 'N';
buf[1] = '\n';
buf[2] = '\0';
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
}
static ssize_t aosp_capable_write(struct file *file,
const char __user *user_buf, size_t count,
loff_t *ppos)
{
struct vhci_data *vhci = file->private_data;
bool enable;
int err;
err = kstrtobool_from_user(user_buf, count, &enable);
if (err)
return err;
if (!enable)
return -EINVAL;
if (vhci->aosp_capable)
return -EALREADY;
vhci->aosp_capable = enable;
return count;
}
static const struct file_operations aosp_capable_fops = {
.open = simple_open,
.read = aosp_capable_read,
.write = aosp_capable_write,
.llseek = default_llseek,
};
static int vhci_setup(struct hci_dev *hdev)
{
struct vhci_data *vhci = hci_get_drvdata(hdev);
if (vhci->msft_opcode)
hci_set_msft_opcode(hdev, vhci->msft_opcode);
if (vhci->aosp_capable)
hci_set_aosp_capable(hdev);
return 0;
}
static int __vhci_create_device(struct vhci_data *data, __u8 opcode) static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
{ {
struct hci_dev *hdev; struct hci_dev *hdev;
@@ -228,6 +320,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
hdev->get_data_path_id = vhci_get_data_path_id; hdev->get_data_path_id = vhci_get_data_path_id;
hdev->get_codec_config_data = vhci_get_codec_config_data; hdev->get_codec_config_data = vhci_get_codec_config_data;
hdev->wakeup = vhci_wakeup; hdev->wakeup = vhci_wakeup;
hdev->setup = vhci_setup;
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
/* bit 6 is for external configuration */ /* bit 6 is for external configuration */
if (opcode & 0x40) if (opcode & 0x40)
@@ -251,6 +345,14 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data, debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data,
&force_wakeup_fops); &force_wakeup_fops);
if (IS_ENABLED(CONFIG_BT_MSFTEXT))
debugfs_create_file("msft_opcode", 0644, hdev->debugfs, data,
&msft_opcode_fops);
if (IS_ENABLED(CONFIG_BT_AOSPEXT))
debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data,
&aosp_capable_fops);
hci_skb_pkt_type(skb) = HCI_VENDOR_PKT; hci_skb_pkt_type(skb) = HCI_VENDOR_PKT;
skb_put_u8(skb, 0xff); skb_put_u8(skb, 0xff);
@@ -440,6 +542,7 @@ static int vhci_open(struct inode *inode, struct file *file)
mutex_init(&data->open_mutex); mutex_init(&data->open_mutex);
INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout); INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout);
INIT_WORK(&data->suspend_work, vhci_suspend_work);
file->private_data = data; file->private_data = data;
nonseekable_open(inode, file); nonseekable_open(inode, file);
@@ -455,6 +558,7 @@ static int vhci_release(struct inode *inode, struct file *file)
struct hci_dev *hdev; struct hci_dev *hdev;
cancel_delayed_work_sync(&data->open_timeout); cancel_delayed_work_sync(&data->open_timeout);
flush_work(&data->suspend_work);
hdev = data->hdev; hdev = data->hdev;

View File

@@ -202,6 +202,9 @@ static void virtbt_rx_handle(struct virtio_bluetooth *vbt, struct sk_buff *skb)
hci_skb_pkt_type(skb) = pkt_type; hci_skb_pkt_type(skb) = pkt_type;
hci_recv_frame(vbt->hdev, skb); hci_recv_frame(vbt->hdev, skb);
break; break;
default:
kfree_skb(skb);
break;
} }
} }

View File

@@ -105,6 +105,7 @@
#define SDIO_VENDOR_ID_MEDIATEK 0x037a #define SDIO_VENDOR_ID_MEDIATEK 0x037a
#define SDIO_DEVICE_ID_MEDIATEK_MT7663 0x7663 #define SDIO_DEVICE_ID_MEDIATEK_MT7663 0x7663
#define SDIO_DEVICE_ID_MEDIATEK_MT7668 0x7668 #define SDIO_DEVICE_ID_MEDIATEK_MT7668 0x7668
#define SDIO_DEVICE_ID_MEDIATEK_MT7961 0x7961
#define SDIO_VENDOR_ID_MICROCHIP_WILC 0x0296 #define SDIO_VENDOR_ID_MICROCHIP_WILC 0x0296
#define SDIO_DEVICE_ID_MICROCHIP_WILC1000 0x5347 #define SDIO_DEVICE_ID_MICROCHIP_WILC1000 0x5347

View File

@@ -380,6 +380,7 @@ typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status,
#define HCI_REQ_SKB BIT(1) #define HCI_REQ_SKB BIT(1)
struct hci_ctrl { struct hci_ctrl {
struct sock *sk;
u16 opcode; u16 opcode;
u8 req_flags; u8 req_flags;
u8 req_event; u8 req_event;
@@ -405,6 +406,7 @@ struct bt_skb_cb {
#define hci_skb_pkt_type(skb) bt_cb((skb))->pkt_type #define hci_skb_pkt_type(skb) bt_cb((skb))->pkt_type
#define hci_skb_expect(skb) bt_cb((skb))->expect #define hci_skb_expect(skb) bt_cb((skb))->expect
#define hci_skb_opcode(skb) bt_cb((skb))->hci.opcode #define hci_skb_opcode(skb) bt_cb((skb))->hci.opcode
#define hci_skb_sk(skb) bt_cb((skb))->hci.sk
static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how) static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how)
{ {

View File

@@ -566,6 +566,7 @@ enum {
#define HCI_ERROR_INVALID_LL_PARAMS 0x1e #define HCI_ERROR_INVALID_LL_PARAMS 0x1e
#define HCI_ERROR_UNSPECIFIED 0x1f #define HCI_ERROR_UNSPECIFIED 0x1f
#define HCI_ERROR_ADVERTISING_TIMEOUT 0x3c #define HCI_ERROR_ADVERTISING_TIMEOUT 0x3c
#define HCI_ERROR_CANCELLED_BY_HOST 0x44
/* Flow control modes */ /* Flow control modes */
#define HCI_FLOW_CTL_MODE_PACKET_BASED 0x00 #define HCI_FLOW_CTL_MODE_PACKET_BASED 0x00

View File

@@ -30,6 +30,7 @@
#include <linux/rculist.h> #include <linux/rculist.h>
#include <net/bluetooth/hci.h> #include <net/bluetooth/hci.h>
#include <net/bluetooth/hci_sync.h>
#include <net/bluetooth/hci_sock.h> #include <net/bluetooth/hci_sock.h>
/* HCI priority */ /* HCI priority */
@@ -475,6 +476,9 @@ struct hci_dev {
struct work_struct power_on; struct work_struct power_on;
struct delayed_work power_off; struct delayed_work power_off;
struct work_struct error_reset; struct work_struct error_reset;
struct work_struct cmd_sync_work;
struct list_head cmd_sync_work_list;
struct mutex cmd_sync_work_lock;
__u16 discov_timeout; __u16 discov_timeout;
struct delayed_work discov_off; struct delayed_work discov_off;
@@ -489,10 +493,7 @@ struct hci_dev {
struct work_struct tx_work; struct work_struct tx_work;
struct work_struct discov_update; struct work_struct discov_update;
struct work_struct bg_scan_update;
struct work_struct scan_update; struct work_struct scan_update;
struct work_struct connectable_update;
struct work_struct discoverable_update;
struct delayed_work le_scan_disable; struct delayed_work le_scan_disable;
struct delayed_work le_scan_restart; struct delayed_work le_scan_restart;
@@ -519,7 +520,6 @@ struct hci_dev {
bool advertising_paused; bool advertising_paused;
struct notifier_block suspend_notifier; struct notifier_block suspend_notifier;
struct work_struct suspend_prepare;
enum suspended_state suspend_state_next; enum suspended_state suspend_state_next;
enum suspended_state suspend_state; enum suspended_state suspend_state;
bool scanning_paused; bool scanning_paused;
@@ -528,9 +528,6 @@ struct hci_dev {
bdaddr_t wake_addr; bdaddr_t wake_addr;
u8 wake_addr_type; u8 wake_addr_type;
wait_queue_head_t suspend_wait_q;
DECLARE_BITMAP(suspend_tasks, __SUSPEND_NUM_TASKS);
struct hci_conn_hash conn_hash; struct hci_conn_hash conn_hash;
struct list_head mgmt_pending; struct list_head mgmt_pending;
@@ -603,6 +600,7 @@ struct hci_dev {
#if IS_ENABLED(CONFIG_BT_AOSPEXT) #if IS_ENABLED(CONFIG_BT_AOSPEXT)
bool aosp_capable; bool aosp_capable;
bool aosp_quality_report;
#endif #endif
int (*open)(struct hci_dev *hdev); int (*open)(struct hci_dev *hdev);
@@ -1461,8 +1459,11 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \ #define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \
((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED)) ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED))
#define ll_privacy_capable(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY)
/* Use LL Privacy based address resolution if supported */ /* Use LL Privacy based address resolution if supported */
#define use_ll_privacy(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY) #define use_ll_privacy(dev) (ll_privacy_capable(dev) && \
hci_dev_test_flag(dev, HCI_ENABLE_LL_PRIVACY))
/* Use enhanced synchronous connection if command is supported */ /* Use enhanced synchronous connection if command is supported */
#define enhanced_sco_capable(dev) ((dev)->commands[29] & 0x08) #define enhanced_sco_capable(dev) ((dev)->commands[29] & 0x08)
@@ -1690,10 +1691,6 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
int hci_register_cb(struct hci_cb *hcb); int hci_register_cb(struct hci_cb *hcb);
int hci_unregister_cb(struct hci_cb *hcb); int hci_unregister_cb(struct hci_cb *hcb);
struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u32 timeout);
struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u8 event, u32 timeout);
int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen, int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param); const void *param);
@@ -1704,9 +1701,6 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode); void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u32 timeout);
u32 hci_conn_get_phy(struct hci_conn *conn); u32 hci_conn_get_phy(struct hci_conn *conn);
/* ----- HCI Sockets ----- */ /* ----- HCI Sockets ----- */
@@ -1806,7 +1800,6 @@ int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 entered); u8 entered);
void mgmt_auth_failed(struct hci_conn *conn, u8 status); void mgmt_auth_failed(struct hci_conn *conn, u8 status);
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status); void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
u8 status); u8 status);
void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status); void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
@@ -1831,8 +1824,6 @@ void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
u16 max_interval, u16 latency, u16 timeout); u16 max_interval, u16 latency, u16 timeout);
void mgmt_smp_complete(struct hci_conn *conn, bool complete); void mgmt_smp_complete(struct hci_conn *conn, bool complete);
bool mgmt_get_connectable(struct hci_dev *hdev); bool mgmt_get_connectable(struct hci_dev *hdev);
void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status);
void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status);
u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev); u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev);
void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev,
u8 instance); u8 instance);

View File

@@ -0,0 +1,102 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* BlueZ - Bluetooth protocol stack for Linux
*
* Copyright (C) 2021 Intel Corporation
*/
typedef int (*hci_cmd_sync_work_func_t)(struct hci_dev *hdev, void *data);
typedef void (*hci_cmd_sync_work_destroy_t)(struct hci_dev *hdev, void *data,
int err);
struct hci_cmd_sync_work_entry {
struct list_head list;
hci_cmd_sync_work_func_t func;
void *data;
hci_cmd_sync_work_destroy_t destroy;
};
/* Function with sync suffix shall not be called with hdev->lock held as they
* wait the command to complete and in the meantime an event could be received
* which could attempt to acquire hdev->lock causing a deadlock.
*/
struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u32 timeout);
struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u32 timeout);
struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u8 event, u32 timeout);
struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u8 event, u32 timeout,
struct sock *sk);
int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u32 timeout);
int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u8 event, u32 timeout,
struct sock *sk);
void hci_cmd_sync_init(struct hci_dev *hdev);
void hci_cmd_sync_clear(struct hci_dev *hdev);
int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
void *data, hci_cmd_sync_work_destroy_t destroy);
int hci_update_eir_sync(struct hci_dev *hdev);
int hci_update_class_sync(struct hci_dev *hdev);
int hci_update_eir_sync(struct hci_dev *hdev);
int hci_update_class_sync(struct hci_dev *hdev);
int hci_update_name_sync(struct hci_dev *hdev);
int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode);
int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy,
bool rpa, u8 *own_addr_type);
int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance);
int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance);
int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
bool force);
int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance);
int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance);
int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance);
int hci_enable_advertising_sync(struct hci_dev *hdev);
int hci_enable_advertising(struct hci_dev *hdev);
int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk,
u8 instance, bool force);
int hci_disable_advertising_sync(struct hci_dev *hdev);
int hci_update_passive_scan_sync(struct hci_dev *hdev);
int hci_update_passive_scan(struct hci_dev *hdev);
int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle);
int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type);
int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val);
int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp);
int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable);
int hci_update_scan_sync(struct hci_dev *hdev);
int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul);
int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance,
struct sock *sk);
struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, bool ext,
struct sock *sk);
int hci_reset_sync(struct hci_dev *hdev);
int hci_dev_open_sync(struct hci_dev *hdev);
int hci_dev_close_sync(struct hci_dev *hdev);
int hci_powered_update_sync(struct hci_dev *hdev);
int hci_set_powered_sync(struct hci_dev *hdev, u8 val);
int hci_update_discoverable_sync(struct hci_dev *hdev);
int hci_update_discoverable(struct hci_dev *hdev);
int hci_update_connectable_sync(struct hci_dev *hdev);
int hci_start_discovery_sync(struct hci_dev *hdev);
int hci_stop_discovery_sync(struct hci_dev *hdev);
int hci_suspend_sync(struct hci_dev *hdev);
int hci_resume_sync(struct hci_dev *hdev);

View File

@@ -15,7 +15,7 @@ bluetooth_6lowpan-y := 6lowpan.o
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \ hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \
ecdh_helper.o hci_request.o mgmt_util.o mgmt_config.o hci_codec.o \ ecdh_helper.o hci_request.o mgmt_util.o mgmt_config.o hci_codec.o \
eir.o eir.o hci_sync.o
bluetooth-$(CONFIG_BT_BREDR) += sco.o bluetooth-$(CONFIG_BT_BREDR) += sco.o
bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o

View File

@@ -8,9 +8,43 @@
#include "aosp.h" #include "aosp.h"
/* Command complete parameters of LE_Get_Vendor_Capabilities_Command
* The parameters grow over time. The base version that declares the
* version_supported field is v0.95. Refer to
* https://cs.android.com/android/platform/superproject/+/master:system/
* bt/gd/hci/controller.cc;l=452?q=le_get_vendor_capabilities_handler
*/
struct aosp_rp_le_get_vendor_capa {
/* v0.95: 15 octets */
__u8 status;
__u8 max_advt_instances;
__u8 offloaded_resolution_of_private_address;
__le16 total_scan_results_storage;
__u8 max_irk_list_sz;
__u8 filtering_support;
__u8 max_filter;
__u8 activity_energy_info_support;
__le16 version_supported;
__le16 total_num_of_advt_tracked;
__u8 extended_scan_support;
__u8 debug_logging_supported;
/* v0.96: 16 octets */
__u8 le_address_generation_offloading_support;
/* v0.98: 21 octets */
__le32 a2dp_source_offload_capability_mask;
__u8 bluetooth_quality_report_support;
/* v1.00: 25 octets */
__le32 dynamic_audio_buffer_support;
} __packed;
#define VENDOR_CAPA_BASE_SIZE 15
#define VENDOR_CAPA_0_98_SIZE 21
void aosp_do_open(struct hci_dev *hdev) void aosp_do_open(struct hci_dev *hdev)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct aosp_rp_le_get_vendor_capa *rp;
u16 version_supported;
if (!hdev->aosp_capable) if (!hdev->aosp_capable)
return; return;
@@ -20,9 +54,54 @@ void aosp_do_open(struct hci_dev *hdev)
/* LE Get Vendor Capabilities Command */ /* LE Get Vendor Capabilities Command */
skb = __hci_cmd_sync(hdev, hci_opcode_pack(0x3f, 0x153), 0, NULL, skb = __hci_cmd_sync(hdev, hci_opcode_pack(0x3f, 0x153), 0, NULL,
HCI_CMD_TIMEOUT); HCI_CMD_TIMEOUT);
if (IS_ERR(skb)) if (IS_ERR(skb)) {
bt_dev_err(hdev, "AOSP get vendor capabilities (%ld)",
PTR_ERR(skb));
return; return;
}
/* A basic length check */
if (skb->len < VENDOR_CAPA_BASE_SIZE)
goto length_error;
rp = (struct aosp_rp_le_get_vendor_capa *)skb->data;
version_supported = le16_to_cpu(rp->version_supported);
/* AOSP displays the verion number like v0.98, v1.00, etc. */
bt_dev_info(hdev, "AOSP extensions version v%u.%02u",
version_supported >> 8, version_supported & 0xff);
/* Do not support very old versions. */
if (version_supported < 95) {
bt_dev_warn(hdev, "AOSP capabilities version %u too old",
version_supported);
goto done;
}
if (version_supported < 98) {
bt_dev_warn(hdev, "AOSP quality report is not supported");
goto done;
}
if (skb->len < VENDOR_CAPA_0_98_SIZE)
goto length_error;
/* The bluetooth_quality_report_support is defined at version
* v0.98. Refer to
* https://cs.android.com/android/platform/superproject/+/
* master:system/bt/gd/hci/controller.cc;l=477
*/
if (rp->bluetooth_quality_report_support) {
hdev->aosp_quality_report = true;
bt_dev_info(hdev, "AOSP quality report is supported");
}
goto done;
length_error:
bt_dev_err(hdev, "AOSP capabilities length %d too short", skb->len);
done:
kfree_skb(skb); kfree_skb(skb);
} }
@@ -33,3 +112,90 @@ void aosp_do_close(struct hci_dev *hdev)
bt_dev_dbg(hdev, "Cleanup of AOSP extension"); bt_dev_dbg(hdev, "Cleanup of AOSP extension");
} }
/* BQR command */
#define BQR_OPCODE hci_opcode_pack(0x3f, 0x015e)
/* BQR report action */
#define REPORT_ACTION_ADD 0x00
#define REPORT_ACTION_DELETE 0x01
#define REPORT_ACTION_CLEAR 0x02
/* BQR event masks */
#define QUALITY_MONITORING BIT(0)
#define APPRAOCHING_LSTO BIT(1)
#define A2DP_AUDIO_CHOPPY BIT(2)
#define SCO_VOICE_CHOPPY BIT(3)
#define DEFAULT_BQR_EVENT_MASK (QUALITY_MONITORING | APPRAOCHING_LSTO | \
A2DP_AUDIO_CHOPPY | SCO_VOICE_CHOPPY)
/* Reporting at milliseconds so as not to stress the controller too much.
* Range: 0 ~ 65535 ms
*/
#define DEFALUT_REPORT_INTERVAL_MS 5000
struct aosp_bqr_cp {
__u8 report_action;
__u32 event_mask;
__u16 min_report_interval;
} __packed;
static int enable_quality_report(struct hci_dev *hdev)
{
struct sk_buff *skb;
struct aosp_bqr_cp cp;
cp.report_action = REPORT_ACTION_ADD;
cp.event_mask = DEFAULT_BQR_EVENT_MASK;
cp.min_report_interval = DEFALUT_REPORT_INTERVAL_MS;
skb = __hci_cmd_sync(hdev, BQR_OPCODE, sizeof(cp), &cp,
HCI_CMD_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Enabling Android BQR failed (%ld)",
PTR_ERR(skb));
return PTR_ERR(skb);
}
kfree_skb(skb);
return 0;
}
static int disable_quality_report(struct hci_dev *hdev)
{
struct sk_buff *skb;
struct aosp_bqr_cp cp = { 0 };
cp.report_action = REPORT_ACTION_CLEAR;
skb = __hci_cmd_sync(hdev, BQR_OPCODE, sizeof(cp), &cp,
HCI_CMD_TIMEOUT);
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Disabling Android BQR failed (%ld)",
PTR_ERR(skb));
return PTR_ERR(skb);
}
kfree_skb(skb);
return 0;
}
bool aosp_has_quality_report(struct hci_dev *hdev)
{
return hdev->aosp_quality_report;
}
int aosp_set_quality_report(struct hci_dev *hdev, bool enable)
{
if (!aosp_has_quality_report(hdev))
return -EOPNOTSUPP;
bt_dev_dbg(hdev, "quality report enable %d", enable);
/* Enable or disable the quality report feature. */
if (enable)
return enable_quality_report(hdev);
else
return disable_quality_report(hdev);
}

View File

@@ -8,9 +8,22 @@
void aosp_do_open(struct hci_dev *hdev); void aosp_do_open(struct hci_dev *hdev);
void aosp_do_close(struct hci_dev *hdev); void aosp_do_close(struct hci_dev *hdev);
bool aosp_has_quality_report(struct hci_dev *hdev);
int aosp_set_quality_report(struct hci_dev *hdev, bool enable);
#else #else
static inline void aosp_do_open(struct hci_dev *hdev) {} static inline void aosp_do_open(struct hci_dev *hdev) {}
static inline void aosp_do_close(struct hci_dev *hdev) {} static inline void aosp_do_close(struct hci_dev *hdev) {}
static inline bool aosp_has_quality_report(struct hci_dev *hdev)
{
return false;
}
static inline int aosp_set_quality_report(struct hci_dev *hdev, bool enable)
{
return -EOPNOTSUPP;
}
#endif #endif

View File

@@ -501,9 +501,7 @@ static int __init cmtp_init(void)
{ {
BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION); BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION);
cmtp_init_sockets(); return cmtp_init_sockets();
return 0;
} }
static void __exit cmtp_exit(void) static void __exit cmtp_exit(void)

View File

@@ -25,9 +25,11 @@ static int hci_codec_list_add(struct list_head *list,
} }
entry->transport = sent->transport; entry->transport = sent->transport;
entry->len = len; entry->len = len;
entry->num_caps = 0;
if (rp) {
entry->num_caps = rp->num_caps; entry->num_caps = rp->num_caps;
if (rp->num_caps)
memcpy(entry->caps, caps, len); memcpy(entry->caps, caps, len);
}
list_add(&entry->list, list); list_add(&entry->list, list);
return 0; return 0;
@@ -58,6 +60,18 @@ static void hci_read_codec_capabilities(struct hci_dev *hdev, __u8 transport,
__u32 len; __u32 len;
cmd->transport = i; cmd->transport = i;
/* If Read_Codec_Capabilities command is not supported
* then just add codec to the list without caps
*/
if (!(hdev->commands[45] & 0x08)) {
hci_dev_lock(hdev);
hci_codec_list_add(&hdev->local_codecs, cmd,
NULL, NULL, 0);
hci_dev_unlock(hdev);
continue;
}
skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_CODEC_CAPS, skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_CODEC_CAPS,
sizeof(*cmd), cmd, sizeof(*cmd), cmd,
HCI_CMD_TIMEOUT); HCI_CMD_TIMEOUT);

View File

@@ -108,7 +108,7 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
break; break;
} }
hci_update_background_scan(hdev); hci_update_passive_scan(hdev);
} }
static void hci_conn_cleanup(struct hci_conn *conn) static void hci_conn_cleanup(struct hci_conn *conn)
@@ -900,25 +900,15 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
hci_conn_del(conn); hci_conn_del(conn);
/* The suspend notifier is waiting for all devices to disconnect and an
* LE connect cancel will result in an hci_le_conn_failed. Once the last
* connection is deleted, we should also wake the suspend queue to
* complete suspend operations.
*/
if (list_empty(&hdev->conn_hash.list) &&
test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
wake_up(&hdev->suspend_wait_q);
}
/* Since we may have temporarily stopped the background scanning in /* Since we may have temporarily stopped the background scanning in
* favor of connection establishment, we should restart it. * favor of connection establishment, we should restart it.
*/ */
hci_update_background_scan(hdev); hci_update_passive_scan(hdev);
/* Re-enable advertising in case this was a failed connection /* Enable advertising in case this was a failed connection
* attempt as a peripheral. * attempt as a peripheral.
*/ */
hci_req_reenable_advertising(hdev); hci_enable_advertising(hdev);
} }
static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode) static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
@@ -1411,7 +1401,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
conn->conn_timeout = conn_timeout; conn->conn_timeout = conn_timeout;
conn->conn_reason = conn_reason; conn->conn_reason = conn_reason;
hci_update_background_scan(hdev); hci_update_passive_scan(hdev);
done: done:
hci_conn_hold(conn); hci_conn_hold(conn);

File diff suppressed because it is too large Load Diff

View File

@@ -545,9 +545,7 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
hdev->features[1][0] &= ~LMP_HOST_SSP; hdev->features[1][0] &= ~LMP_HOST_SSP;
} }
if (hci_dev_test_flag(hdev, HCI_MGMT)) if (!status) {
mgmt_ssp_enable_complete(hdev, sent->mode, status);
else if (!status) {
if (sent->mode) if (sent->mode)
hci_dev_set_flag(hdev, HCI_SSP_ENABLED); hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
else else
@@ -1239,6 +1237,55 @@ static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
hci_dev_unlock(hdev); hci_dev_unlock(hdev);
} }
static void hci_cc_le_remove_adv_set(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *)skb->data);
u8 *instance;
int err;
if (status)
return;
instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
if (!instance)
return;
hci_dev_lock(hdev);
err = hci_remove_adv_instance(hdev, *instance);
if (!err)
mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
*instance);
hci_dev_unlock(hdev);
}
static void hci_cc_le_clear_adv_sets(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *)skb->data);
struct adv_info *adv, *n;
int err;
if (status)
return;
if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
return;
hci_dev_lock(hdev);
list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
u8 instance = adv->instance;
err = hci_remove_adv_instance(hdev, instance);
if (!err)
mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
hdev, instance);
}
hci_dev_unlock(hdev);
}
static void hci_cc_le_read_transmit_power(struct hci_dev *hdev, static void hci_cc_le_read_transmit_power(struct hci_dev *hdev,
struct sk_buff *skb) struct sk_buff *skb)
{ {
@@ -1326,8 +1373,10 @@ static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
&conn->le_conn_timeout, &conn->le_conn_timeout,
conn->conn_timeout); conn->conn_timeout);
} else { } else {
if (adv) { if (cp->num_of_sets) {
if (adv)
adv->enabled = false; adv->enabled = false;
/* If just one instance was disabled check if there are /* If just one instance was disabled check if there are
* any other instance enabled before clearing HCI_LE_ADV * any other instance enabled before clearing HCI_LE_ADV
*/ */
@@ -1463,16 +1512,10 @@ static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
* interrupted scanning due to a connect request. Mark * interrupted scanning due to a connect request. Mark
* therefore discovery as stopped. If this was not * therefore discovery as stopped.
* because of a connect request advertising might have
* been disabled because of active scanning, so
* re-enable it again if necessary.
*/ */
if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED)) if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
hci_discovery_set_state(hdev, DISCOVERY_STOPPED); hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
hdev->discovery.state == DISCOVERY_FINDING)
hci_req_reenable_advertising(hdev);
break; break;
@@ -2371,9 +2414,14 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
{ {
struct hci_cp_disconnect *cp; struct hci_cp_disconnect *cp;
struct hci_conn_params *params;
struct hci_conn *conn; struct hci_conn *conn;
bool mgmt_conn;
if (!status) /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
* otherwise cleanup the connection immediately.
*/
if (!status && !hdev->suspended)
return; return;
cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
@@ -2383,23 +2431,60 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
hci_dev_lock(hdev); hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
if (conn) { if (!conn)
goto unlock;
if (status) {
mgmt_disconnect_failed(hdev, &conn->dst, conn->type, mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
conn->dst_type, status); conn->dst_type, status);
if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
hdev->cur_adv_instance = conn->adv_instance; hdev->cur_adv_instance = conn->adv_instance;
hci_req_reenable_advertising(hdev); hci_enable_advertising(hdev);
} }
goto done;
}
mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
if (conn->type == ACL_LINK) {
if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
hci_remove_link_key(hdev, &conn->dst);
}
params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
if (params) {
switch (params->auto_connect) {
case HCI_AUTO_CONN_LINK_LOSS:
if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
break;
fallthrough;
case HCI_AUTO_CONN_DIRECT:
case HCI_AUTO_CONN_ALWAYS:
list_del_init(&params->action);
list_add(&params->action, &hdev->pend_le_conns);
break;
default:
break;
}
}
mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
cp->reason, mgmt_conn);
hci_disconn_cfm(conn, cp->reason);
done:
/* If the disconnection failed for any reason, the upper layer /* If the disconnection failed for any reason, the upper layer
* does not retry to disconnect in current implementation. * does not retry to disconnect in current implementation.
* Hence, we need to do some basic cleanup here and re-enable * Hence, we need to do some basic cleanup here and re-enable
* advertising if necessary. * advertising if necessary.
*/ */
hci_conn_del(conn); hci_conn_del(conn);
} unlock:
hci_dev_unlock(hdev); hci_dev_unlock(hdev);
} }
@@ -2977,7 +3062,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
case HCI_AUTO_CONN_ALWAYS: case HCI_AUTO_CONN_ALWAYS:
list_del_init(&params->action); list_del_init(&params->action);
list_add(&params->action, &hdev->pend_le_conns); list_add(&params->action, &hdev->pend_le_conns);
hci_update_background_scan(hdev); hci_update_passive_scan(hdev);
break; break;
default: default:
@@ -2987,14 +3072,6 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_disconn_cfm(conn, ev->reason); hci_disconn_cfm(conn, ev->reason);
/* The suspend notifier is waiting for all devices to disconnect so
* clear the bit from pending tasks and inform the wait queue.
*/
if (list_empty(&hdev->conn_hash.list) &&
test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
wake_up(&hdev->suspend_wait_q);
}
/* Re-enable advertising if necessary, since it might /* Re-enable advertising if necessary, since it might
* have been disabled by the connection. From the * have been disabled by the connection. From the
* HCI_LE_Set_Advertise_Enable command description in * HCI_LE_Set_Advertise_Enable command description in
@@ -3007,7 +3084,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
*/ */
if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
hdev->cur_adv_instance = conn->adv_instance; hdev->cur_adv_instance = conn->adv_instance;
hci_req_reenable_advertising(hdev); hci_enable_advertising(hdev);
} }
hci_conn_del(conn); hci_conn_del(conn);
@@ -3723,6 +3800,14 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
hci_cc_le_set_adv_set_random_addr(hdev, skb); hci_cc_le_set_adv_set_random_addr(hdev, skb);
break; break;
case HCI_OP_LE_REMOVE_ADV_SET:
hci_cc_le_remove_adv_set(hdev, skb);
break;
case HCI_OP_LE_CLEAR_ADV_SETS:
hci_cc_le_clear_adv_sets(hdev, skb);
break;
case HCI_OP_LE_READ_TRANSMIT_POWER: case HCI_OP_LE_READ_TRANSMIT_POWER:
hci_cc_le_read_transmit_power(hdev, skb); hci_cc_le_read_transmit_power(hdev, skb);
break; break;
@@ -4445,7 +4530,6 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
{ {
struct hci_ev_sync_conn_complete *ev = (void *) skb->data; struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
struct hci_conn *conn; struct hci_conn *conn;
unsigned int notify_evt;
BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
@@ -4517,22 +4601,18 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
} }
bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode); bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
switch (ev->air_mode) {
case 0x02:
notify_evt = HCI_NOTIFY_ENABLE_SCO_CVSD;
break;
case 0x03:
notify_evt = HCI_NOTIFY_ENABLE_SCO_TRANSP;
break;
}
/* Notify only in case of SCO over HCI transport data path which /* Notify only in case of SCO over HCI transport data path which
* is zero and non-zero value shall be non-HCI transport data path * is zero and non-zero value shall be non-HCI transport data path
*/ */
if (conn->codec.data_path == 0) { if (conn->codec.data_path == 0 && hdev->notify) {
if (hdev->notify) switch (ev->air_mode) {
hdev->notify(hdev, notify_evt); case 0x02:
hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
break;
case 0x03:
hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
break;
}
} }
hci_connect_cfm(conn, ev->status); hci_connect_cfm(conn, ev->status);
@@ -5412,7 +5492,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
} }
unlock: unlock:
hci_update_background_scan(hdev); hci_update_passive_scan(hdev);
hci_dev_unlock(hdev); hci_dev_unlock(hdev);
} }
@@ -5441,23 +5521,30 @@ static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
le16_to_cpu(ev->interval), le16_to_cpu(ev->interval),
le16_to_cpu(ev->latency), le16_to_cpu(ev->latency),
le16_to_cpu(ev->supervision_timeout)); le16_to_cpu(ev->supervision_timeout));
if (use_ll_privacy(hdev) &&
hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
hci_req_disable_address_resolution(hdev);
} }
static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb) static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
{ {
struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data; struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
struct hci_conn *conn; struct hci_conn *conn;
struct adv_info *adv; struct adv_info *adv, *n;
BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
adv = hci_find_adv_instance(hdev, ev->handle); adv = hci_find_adv_instance(hdev, ev->handle);
/* The Bluetooth Core 5.3 specification clearly states that this event
* shall not be sent when the Host disables the advertising set. So in
* case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
*
* When the Host disables an advertising set, all cleanup is done via
* its command callback and not needed to be duplicated here.
*/
if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
return;
}
if (ev->status) { if (ev->status) {
if (!adv) if (!adv)
return; return;
@@ -5466,6 +5553,13 @@ static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_remove_adv_instance(hdev, ev->handle); hci_remove_adv_instance(hdev, ev->handle);
mgmt_advertising_removed(NULL, hdev, ev->handle); mgmt_advertising_removed(NULL, hdev, ev->handle);
list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
if (adv->enabled)
return;
}
/* We are no longer advertising, clear HCI_LE_ADV */
hci_dev_clear_flag(hdev, HCI_LE_ADV);
return; return;
} }
@@ -5529,8 +5623,9 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
return NULL; return NULL;
/* Ignore if the device is blocked */ /* Ignore if the device is blocked or hdev is suspended */
if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type)) if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
hdev->suspended)
return NULL; return NULL;
/* Most controller will fail if we try to create new connections /* Most controller will fail if we try to create new connections
@@ -5825,7 +5920,8 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_ev_le_advertising_info *ev = ptr; struct hci_ev_le_advertising_info *ev = ptr;
s8 rssi; s8 rssi;
if (ev->length <= HCI_MAX_AD_LENGTH) { if (ev->length <= HCI_MAX_AD_LENGTH &&
ev->data + ev->length <= skb_tail_pointer(skb)) {
rssi = ev->data[ev->length]; rssi = ev->data[ev->length];
process_adv_report(hdev, ev->evt_type, &ev->bdaddr, process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
ev->bdaddr_type, NULL, 0, rssi, ev->bdaddr_type, NULL, 0, rssi,
@@ -5835,6 +5931,11 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
} }
ptr += sizeof(*ev) + ev->length + 1; ptr += sizeof(*ev) + ev->length + 1;
if (ptr > (void *) skb_tail_pointer(skb) - sizeof(*ev)) {
bt_dev_err(hdev, "Malicious advertising data. Stopping processing");
break;
}
} }
hci_dev_unlock(hdev); hci_dev_unlock(hdev);

View File

@@ -32,10 +32,6 @@
#include "msft.h" #include "msft.h"
#include "eir.h" #include "eir.h"
#define HCI_REQ_DONE 0
#define HCI_REQ_PEND 1
#define HCI_REQ_CANCELED 2
void hci_req_init(struct hci_request *req, struct hci_dev *hdev) void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
{ {
skb_queue_head_init(&req->cmd_q); skb_queue_head_init(&req->cmd_q);
@@ -101,7 +97,7 @@ int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
return req_run(req, NULL, complete); return req_run(req, NULL, complete);
} }
static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
struct sk_buff *skb) struct sk_buff *skb)
{ {
bt_dev_dbg(hdev, "result 0x%2.2x", result); bt_dev_dbg(hdev, "result 0x%2.2x", result);
@@ -126,70 +122,6 @@ void hci_req_sync_cancel(struct hci_dev *hdev, int err)
} }
} }
struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u8 event, u32 timeout)
{
struct hci_request req;
struct sk_buff *skb;
int err = 0;
bt_dev_dbg(hdev, "");
hci_req_init(&req, hdev);
hci_req_add_ev(&req, opcode, plen, param, event);
hdev->req_status = HCI_REQ_PEND;
err = hci_req_run_skb(&req, hci_req_sync_complete);
if (err < 0)
return ERR_PTR(err);
err = wait_event_interruptible_timeout(hdev->req_wait_q,
hdev->req_status != HCI_REQ_PEND, timeout);
if (err == -ERESTARTSYS)
return ERR_PTR(-EINTR);
switch (hdev->req_status) {
case HCI_REQ_DONE:
err = -bt_to_errno(hdev->req_result);
break;
case HCI_REQ_CANCELED:
err = -hdev->req_result;
break;
default:
err = -ETIMEDOUT;
break;
}
hdev->req_status = hdev->req_result = 0;
skb = hdev->req_skb;
hdev->req_skb = NULL;
bt_dev_dbg(hdev, "end: err %d", err);
if (err < 0) {
kfree_skb(skb);
return ERR_PTR(err);
}
if (!skb)
return ERR_PTR(-ENODATA);
return skb;
}
EXPORT_SYMBOL(__hci_cmd_sync_ev);
struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param, u32 timeout)
{
return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
}
EXPORT_SYMBOL(__hci_cmd_sync);
/* Execute request and wait for completion. */ /* Execute request and wait for completion. */
int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
unsigned long opt), unsigned long opt),
@@ -436,82 +368,6 @@ static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
return false; return false;
} }
/* This function controls the background scanning based on hdev->pend_le_conns
* list. If there are pending LE connection we start the background scanning,
* otherwise we stop it.
*
* This function requires the caller holds hdev->lock.
*/
static void __hci_update_background_scan(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
if (!test_bit(HCI_UP, &hdev->flags) ||
test_bit(HCI_INIT, &hdev->flags) ||
hci_dev_test_flag(hdev, HCI_SETUP) ||
hci_dev_test_flag(hdev, HCI_CONFIG) ||
hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
hci_dev_test_flag(hdev, HCI_UNREGISTER))
return;
/* No point in doing scanning if LE support hasn't been enabled */
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
return;
/* If discovery is active don't interfere with it */
if (hdev->discovery.state != DISCOVERY_STOPPED)
return;
/* Reset RSSI and UUID filters when starting background scanning
* since these filters are meant for service discovery only.
*
* The Start Discovery and Start Service Discovery operations
* ensure to set proper values for RSSI threshold and UUID
* filter list. So it is safe to just reset them here.
*/
hci_discovery_filter_clear(hdev);
bt_dev_dbg(hdev, "ADV monitoring is %s",
hci_is_adv_monitoring(hdev) ? "on" : "off");
if (list_empty(&hdev->pend_le_conns) &&
list_empty(&hdev->pend_le_reports) &&
!hci_is_adv_monitoring(hdev)) {
/* If there is no pending LE connections or devices
* to be scanned for or no ADV monitors, we should stop the
* background scanning.
*/
/* If controller is not scanning we are done. */
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
return;
hci_req_add_le_scan_disable(req, false);
bt_dev_dbg(hdev, "stopping background scanning");
} else {
/* If there is at least one pending LE connection, we should
* keep the background scan running.
*/
/* If controller is connecting, we should not start scanning
* since some controllers are not able to scan and connect at
* the same time.
*/
if (hci_lookup_le_connect(hdev))
return;
/* If controller is currently scanning, we stop it to ensure we
* don't miss any advertising (due to duplicates filter).
*/
if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
hci_req_add_le_scan_disable(req, false);
hci_req_add_le_passive_scan(req);
bt_dev_dbg(hdev, "starting background scanning");
}
}
void __hci_req_update_name(struct hci_request *req) void __hci_req_update_name(struct hci_request *req)
{ {
struct hci_dev *hdev = req->hdev; struct hci_dev *hdev = req->hdev;
@@ -560,9 +416,6 @@ void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
return; return;
} }
if (hdev->suspended)
set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
if (use_ext_scan(hdev)) { if (use_ext_scan(hdev)) {
struct hci_cp_le_set_ext_scan_enable cp; struct hci_cp_le_set_ext_scan_enable cp;
@@ -579,9 +432,7 @@ void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
} }
/* Disable address resolution */ /* Disable address resolution */
if (use_ll_privacy(hdev) && if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
__u8 enable = 0x00; __u8 enable = 0x00;
hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
@@ -600,8 +451,7 @@ static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
cp.bdaddr_type); cp.bdaddr_type);
hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp); hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
if (use_ll_privacy(req->hdev) && if (use_ll_privacy(req->hdev)) {
hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
struct smp_irk *irk; struct smp_irk *irk;
irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type); irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
@@ -654,8 +504,7 @@ static int add_to_accept_list(struct hci_request *req,
cp.bdaddr_type); cp.bdaddr_type);
hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp); hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
if (use_ll_privacy(hdev) && if (use_ll_privacy(hdev)) {
hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
struct smp_irk *irk; struct smp_irk *irk;
irk = hci_find_irk_by_addr(hdev, &params->addr, irk = hci_find_irk_by_addr(hdev, &params->addr,
@@ -694,8 +543,7 @@ static u8 update_accept_list(struct hci_request *req)
*/ */
bool allow_rpa = hdev->suspended; bool allow_rpa = hdev->suspended;
if (use_ll_privacy(hdev) && if (use_ll_privacy(hdev))
hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
allow_rpa = true; allow_rpa = true;
/* Go through the current accept list programmed into the /* Go through the current accept list programmed into the
@@ -784,9 +632,7 @@ static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
return; return;
} }
if (use_ll_privacy(hdev) && if (use_ll_privacy(hdev) && addr_resolv) {
hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
addr_resolv) {
u8 enable = 0x01; u8 enable = 0x01;
hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
@@ -943,8 +789,6 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
if (hdev->suspended) { if (hdev->suspended) {
window = hdev->le_scan_window_suspend; window = hdev->le_scan_window_suspend;
interval = hdev->le_scan_int_suspend; interval = hdev->le_scan_int_suspend;
set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
} else if (hci_is_le_conn_scanning(hdev)) { } else if (hci_is_le_conn_scanning(hdev)) {
window = hdev->le_scan_window_connect; window = hdev->le_scan_window_connect;
interval = hdev->le_scan_int_connect; interval = hdev->le_scan_int_connect;
@@ -977,59 +821,6 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
addr_resolv); addr_resolv);
} }
static void hci_req_clear_event_filter(struct hci_request *req)
{
struct hci_cp_set_event_filter f;
if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED))
return;
if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) {
memset(&f, 0, sizeof(f));
f.flt_type = HCI_FLT_CLEAR_ALL;
hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
}
}
static void hci_req_set_event_filter(struct hci_request *req)
{
struct bdaddr_list_with_flags *b;
struct hci_cp_set_event_filter f;
struct hci_dev *hdev = req->hdev;
u8 scan = SCAN_DISABLED;
bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
return;
/* Always clear event filter when starting */
hci_req_clear_event_filter(req);
list_for_each_entry(b, &hdev->accept_list, list) {
if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
b->current_flags))
continue;
memset(&f, 0, sizeof(f));
bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
f.flt_type = HCI_FLT_CONN_SETUP;
f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
scan = SCAN_PAGE;
}
if (scan && !scanning) {
set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
} else if (!scan && scanning) {
set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
}
}
static void cancel_adv_timeout(struct hci_dev *hdev) static void cancel_adv_timeout(struct hci_dev *hdev)
{ {
if (hdev->adv_instance_timeout) { if (hdev->adv_instance_timeout) {
@@ -1088,185 +879,6 @@ int hci_req_resume_adv_instances(struct hci_dev *hdev)
return hci_req_run(&req, NULL); return hci_req_run(&req, NULL);
} }
static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
status);
if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
wake_up(&hdev->suspend_wait_q);
}
if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
wake_up(&hdev->suspend_wait_q);
}
}
static void hci_req_prepare_adv_monitor_suspend(struct hci_request *req,
bool suspending)
{
struct hci_dev *hdev = req->hdev;
switch (hci_get_adv_monitor_offload_ext(hdev)) {
case HCI_ADV_MONITOR_EXT_MSFT:
if (suspending)
msft_suspend(hdev);
else
msft_resume(hdev);
break;
default:
return;
}
/* No need to block when enabling since it's on resume path */
if (hdev->suspended && suspending)
set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
}
/* Call with hci_dev_lock */
void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
{
int old_state;
struct hci_conn *conn;
struct hci_request req;
u8 page_scan;
int disconnect_counter;
if (next == hdev->suspend_state) {
bt_dev_dbg(hdev, "Same state before and after: %d", next);
goto done;
}
hdev->suspend_state = next;
hci_req_init(&req, hdev);
if (next == BT_SUSPEND_DISCONNECT) {
/* Mark device as suspended */
hdev->suspended = true;
/* Pause discovery if not already stopped */
old_state = hdev->discovery.state;
if (old_state != DISCOVERY_STOPPED) {
set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
queue_work(hdev->req_workqueue, &hdev->discov_update);
}
hdev->discovery_paused = true;
hdev->discovery_old_state = old_state;
/* Stop directed advertising */
old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
if (old_state) {
set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
cancel_delayed_work(&hdev->discov_off);
queue_delayed_work(hdev->req_workqueue,
&hdev->discov_off, 0);
}
/* Pause other advertisements */
if (hdev->adv_instance_cnt)
__hci_req_pause_adv_instances(&req);
hdev->advertising_paused = true;
hdev->advertising_old_state = old_state;
/* Disable page scan if enabled */
if (test_bit(HCI_PSCAN, &hdev->flags)) {
page_scan = SCAN_DISABLED;
hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1,
&page_scan);
set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
}
/* Disable LE passive scan if enabled */
if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
cancel_interleave_scan(hdev);
hci_req_add_le_scan_disable(&req, false);
}
/* Disable advertisement filters */
hci_req_prepare_adv_monitor_suspend(&req, true);
/* Prevent disconnects from causing scanning to be re-enabled */
hdev->scanning_paused = true;
/* Run commands before disconnecting */
hci_req_run(&req, suspend_req_complete);
disconnect_counter = 0;
/* Soft disconnect everything (power off) */
list_for_each_entry(conn, &hdev->conn_hash.list, list) {
hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
disconnect_counter++;
}
if (disconnect_counter > 0) {
bt_dev_dbg(hdev,
"Had %d disconnects. Will wait on them",
disconnect_counter);
set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
}
} else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
/* Unpause to take care of updating scanning params */
hdev->scanning_paused = false;
/* Enable event filter for paired devices */
hci_req_set_event_filter(&req);
/* Enable passive scan at lower duty cycle */
__hci_update_background_scan(&req);
/* Pause scan changes again. */
hdev->scanning_paused = true;
hci_req_run(&req, suspend_req_complete);
} else {
hdev->suspended = false;
hdev->scanning_paused = false;
/* Clear any event filters and restore scan state */
hci_req_clear_event_filter(&req);
__hci_req_update_scan(&req);
/* Reset passive/background scanning to normal */
__hci_update_background_scan(&req);
/* Enable all of the advertisement filters */
hci_req_prepare_adv_monitor_suspend(&req, false);
/* Unpause directed advertising */
hdev->advertising_paused = false;
if (hdev->advertising_old_state) {
set_bit(SUSPEND_UNPAUSE_ADVERTISING,
hdev->suspend_tasks);
hci_dev_set_flag(hdev, HCI_ADVERTISING);
queue_work(hdev->req_workqueue,
&hdev->discoverable_update);
hdev->advertising_old_state = 0;
}
/* Resume other advertisements */
if (hdev->adv_instance_cnt)
__hci_req_resume_adv_instances(&req);
/* Unpause discovery */
hdev->discovery_paused = false;
if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
hdev->discovery_old_state != DISCOVERY_STOPPING) {
set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
hci_discovery_set_state(hdev, DISCOVERY_STARTING);
queue_work(hdev->req_workqueue, &hdev->discov_update);
}
hci_req_run(&req, suspend_req_complete);
}
hdev->suspend_state = next;
done:
clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
wake_up(&hdev->suspend_wait_q);
}
static bool adv_cur_instance_is_scannable(struct hci_dev *hdev) static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
{ {
return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance); return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
@@ -1548,8 +1160,7 @@ void hci_req_disable_address_resolution(struct hci_dev *hdev)
struct hci_request req; struct hci_request req;
__u8 enable = 0x00; __u8 enable = 0x00;
if (!use_ll_privacy(hdev) && if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
return; return;
hci_req_init(&req, hdev); hci_req_init(&req, hdev);
@@ -1692,8 +1303,7 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
/* If Controller supports LL Privacy use own address type is /* If Controller supports LL Privacy use own address type is
* 0x03 * 0x03
*/ */
if (use_ll_privacy(hdev) && if (use_ll_privacy(hdev))
hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
else else
*own_addr_type = ADDR_LE_DEV_RANDOM; *own_addr_type = ADDR_LE_DEV_RANDOM;
@@ -1871,7 +1481,8 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp); hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
if (own_addr_type == ADDR_LE_DEV_RANDOM && if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
bacmp(&random_addr, BDADDR_ANY)) { bacmp(&random_addr, BDADDR_ANY)) {
struct hci_cp_le_set_adv_set_rand_addr cp; struct hci_cp_le_set_adv_set_rand_addr cp;
@@ -2160,8 +1771,7 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
/* If Controller supports LL Privacy use own address type is /* If Controller supports LL Privacy use own address type is
* 0x03 * 0x03
*/ */
if (use_ll_privacy(hdev) && if (use_ll_privacy(hdev))
hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
else else
*own_addr_type = ADDR_LE_DEV_RANDOM; *own_addr_type = ADDR_LE_DEV_RANDOM;
@@ -2301,47 +1911,6 @@ static void scan_update_work(struct work_struct *work)
hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL); hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
} }
static int connectable_update(struct hci_request *req, unsigned long opt)
{
struct hci_dev *hdev = req->hdev;
hci_dev_lock(hdev);
__hci_req_update_scan(req);
/* If BR/EDR is not enabled and we disable advertising as a
* by-product of disabling connectable, we need to update the
* advertising flags.
*/
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
__hci_req_update_adv_data(req, hdev->cur_adv_instance);
/* Update the advertising parameters if necessary */
if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
!list_empty(&hdev->adv_instances)) {
if (ext_adv_capable(hdev))
__hci_req_start_ext_adv(req, hdev->cur_adv_instance);
else
__hci_req_enable_advertising(req);
}
__hci_update_background_scan(req);
hci_dev_unlock(hdev);
return 0;
}
static void connectable_update_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
connectable_update);
u8 status;
hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
mgmt_set_connectable_complete(hdev, status);
}
static u8 get_service_classes(struct hci_dev *hdev) static u8 get_service_classes(struct hci_dev *hdev)
{ {
struct bt_uuid *uuid; struct bt_uuid *uuid;
@@ -2445,16 +2014,6 @@ static int discoverable_update(struct hci_request *req, unsigned long opt)
return 0; return 0;
} }
static void discoverable_update_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
discoverable_update);
u8 status;
hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
mgmt_set_discoverable_complete(hdev, status);
}
void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn, void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
u8 reason) u8 reason)
{ {
@@ -2548,35 +2107,6 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason)
return 0; return 0;
} }
static int update_bg_scan(struct hci_request *req, unsigned long opt)
{
hci_dev_lock(req->hdev);
__hci_update_background_scan(req);
hci_dev_unlock(req->hdev);
return 0;
}
static void bg_scan_update(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
bg_scan_update);
struct hci_conn *conn;
u8 status;
int err;
err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
if (!err)
return;
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
if (conn)
hci_le_conn_failed(conn, status);
hci_dev_unlock(hdev);
}
static int le_scan_disable(struct hci_request *req, unsigned long opt) static int le_scan_disable(struct hci_request *req, unsigned long opt)
{ {
hci_req_add_le_scan_disable(req, false); hci_req_add_le_scan_disable(req, false);
@@ -3163,10 +2693,7 @@ int __hci_req_hci_power_on(struct hci_dev *hdev)
void hci_request_setup(struct hci_dev *hdev) void hci_request_setup(struct hci_dev *hdev)
{ {
INIT_WORK(&hdev->discov_update, discov_update); INIT_WORK(&hdev->discov_update, discov_update);
INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
INIT_WORK(&hdev->scan_update, scan_update_work); INIT_WORK(&hdev->scan_update, scan_update_work);
INIT_WORK(&hdev->connectable_update, connectable_update_work);
INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
INIT_DELAYED_WORK(&hdev->discov_off, discov_off); INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work); INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work); INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
@@ -3179,10 +2706,7 @@ void hci_request_cancel_all(struct hci_dev *hdev)
hci_req_sync_cancel(hdev, ENODEV); hci_req_sync_cancel(hdev, ENODEV);
cancel_work_sync(&hdev->discov_update); cancel_work_sync(&hdev->discov_update);
cancel_work_sync(&hdev->bg_scan_update);
cancel_work_sync(&hdev->scan_update); cancel_work_sync(&hdev->scan_update);
cancel_work_sync(&hdev->connectable_update);
cancel_work_sync(&hdev->discoverable_update);
cancel_delayed_work_sync(&hdev->discov_off); cancel_delayed_work_sync(&hdev->discov_off);
cancel_delayed_work_sync(&hdev->le_scan_disable); cancel_delayed_work_sync(&hdev->le_scan_disable);
cancel_delayed_work_sync(&hdev->le_scan_restart); cancel_delayed_work_sync(&hdev->le_scan_restart);

View File

@@ -22,9 +22,17 @@
#include <asm/unaligned.h> #include <asm/unaligned.h>
#define HCI_REQ_DONE 0
#define HCI_REQ_PEND 1
#define HCI_REQ_CANCELED 2
#define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock) #define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock)
#define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock) #define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock)
#define HCI_REQ_DONE 0
#define HCI_REQ_PEND 1
#define HCI_REQ_CANCELED 2
struct hci_request { struct hci_request {
struct hci_dev *hdev; struct hci_dev *hdev;
struct sk_buff_head cmd_q; struct sk_buff_head cmd_q;
@@ -40,6 +48,8 @@ void hci_req_purge(struct hci_request *req);
bool hci_req_status_pend(struct hci_dev *hdev); bool hci_req_status_pend(struct hci_dev *hdev);
int hci_req_run(struct hci_request *req, hci_req_complete_t complete); int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete); int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete);
void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
struct sk_buff *skb);
void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
const void *param); const void *param);
void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
@@ -117,10 +127,5 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason);
void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn, void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
u8 reason); u8 reason);
static inline void hci_update_background_scan(struct hci_dev *hdev)
{
queue_work(hdev->req_workqueue, &hdev->bg_scan_update);
}
void hci_request_setup(struct hci_dev *hdev); void hci_request_setup(struct hci_dev *hdev);
void hci_request_cancel_all(struct hci_dev *hdev); void hci_request_cancel_all(struct hci_dev *hdev);

View File

@@ -889,10 +889,6 @@ static int hci_sock_release(struct socket *sock)
} }
sock_orphan(sk); sock_orphan(sk);
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
release_sock(sk); release_sock(sk);
sock_put(sk); sock_put(sk);
return 0; return 0;
@@ -2058,6 +2054,12 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
return err; return err;
} }
static void hci_sock_destruct(struct sock *sk)
{
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
}
static const struct proto_ops hci_sock_ops = { static const struct proto_ops hci_sock_ops = {
.family = PF_BLUETOOTH, .family = PF_BLUETOOTH,
.owner = THIS_MODULE, .owner = THIS_MODULE,
@@ -2111,6 +2113,7 @@ static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
sock->state = SS_UNCONNECTED; sock->state = SS_UNCONNECTED;
sk->sk_state = BT_OPEN; sk->sk_state = BT_OPEN;
sk->sk_destruct = hci_sock_destruct;
bt_sock_link(&hci_sk_list, sk); bt_sock_link(&hci_sk_list, sk);
return 0; return 0;

4922
net/bluetooth/hci_sync.c Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -86,6 +86,8 @@ static void bt_host_release(struct device *dev)
if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
hci_release_dev(hdev); hci_release_dev(hdev);
else
kfree(hdev);
module_put(THIS_MODULE); module_put(THIS_MODULE);
} }

View File

@@ -172,6 +172,21 @@ done:
return err; return err;
} }
static void l2cap_sock_init_pid(struct sock *sk)
{
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
/* Only L2CAP_MODE_EXT_FLOWCTL ever need to access the PID in order to
* group the channels being requested.
*/
if (chan->mode != L2CAP_MODE_EXT_FLOWCTL)
return;
spin_lock(&sk->sk_peer_lock);
sk->sk_peer_pid = get_pid(task_tgid(current));
spin_unlock(&sk->sk_peer_lock);
}
static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
int alen, int flags) int alen, int flags)
{ {
@@ -243,6 +258,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
if (chan->psm && bdaddr_type_is_le(chan->src_type) && !chan->mode) if (chan->psm && bdaddr_type_is_le(chan->src_type) && !chan->mode)
chan->mode = L2CAP_MODE_LE_FLOWCTL; chan->mode = L2CAP_MODE_LE_FLOWCTL;
l2cap_sock_init_pid(sk);
err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid), err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
&la.l2_bdaddr, la.l2_bdaddr_type); &la.l2_bdaddr, la.l2_bdaddr_type);
if (err) if (err)
@@ -298,6 +315,8 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
goto done; goto done;
} }
l2cap_sock_init_pid(sk);
sk->sk_max_ack_backlog = backlog; sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0; sk->sk_ack_backlog = 0;

File diff suppressed because it is too large Load Diff

View File

@@ -227,7 +227,7 @@ void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
} }
} }
struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode,
struct hci_dev *hdev, struct hci_dev *hdev,
void *data, u16 len) void *data, u16 len)
{ {
@@ -251,6 +251,19 @@ struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
cmd->sk = sk; cmd->sk = sk;
sock_hold(sk); sock_hold(sk);
return cmd;
}
struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
struct hci_dev *hdev,
void *data, u16 len)
{
struct mgmt_pending_cmd *cmd;
cmd = mgmt_pending_new(sk, opcode, hdev, data, len);
if (!cmd)
return NULL;
list_add(&cmd->list, &hdev->mgmt_pending); list_add(&cmd->list, &hdev->mgmt_pending);
return cmd; return cmd;

View File

@@ -27,6 +27,7 @@ struct mgmt_pending_cmd {
void *param; void *param;
size_t param_len; size_t param_len;
struct sock *sk; struct sock *sk;
struct sk_buff *skb;
void *user_data; void *user_data;
int (*cmd_complete)(struct mgmt_pending_cmd *cmd, u8 status); int (*cmd_complete)(struct mgmt_pending_cmd *cmd, u8 status);
}; };
@@ -49,5 +50,8 @@ void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
struct hci_dev *hdev, struct hci_dev *hdev,
void *data, u16 len); void *data, u16 len);
struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode,
struct hci_dev *hdev,
void *data, u16 len);
void mgmt_pending_free(struct mgmt_pending_cmd *cmd); void mgmt_pending_free(struct mgmt_pending_cmd *cmd);
void mgmt_pending_remove(struct mgmt_pending_cmd *cmd); void mgmt_pending_remove(struct mgmt_pending_cmd *cmd);

View File

@@ -93,7 +93,7 @@ struct msft_data {
struct list_head handle_map; struct list_head handle_map;
__u16 pending_add_handle; __u16 pending_add_handle;
__u16 pending_remove_handle; __u16 pending_remove_handle;
__u8 reregistering; __u8 resuming;
__u8 suspending; __u8 suspending;
__u8 filter_enabled; __u8 filter_enabled;
}; };
@@ -156,7 +156,6 @@ failed:
return false; return false;
} }
/* This function requires the caller holds hdev->lock */
static void reregister_monitor(struct hci_dev *hdev, int handle) static void reregister_monitor(struct hci_dev *hdev, int handle)
{ {
struct adv_monitor *monitor; struct adv_monitor *monitor;
@@ -166,9 +165,9 @@ static void reregister_monitor(struct hci_dev *hdev, int handle)
while (1) { while (1) {
monitor = idr_get_next(&hdev->adv_monitors_idr, &handle); monitor = idr_get_next(&hdev->adv_monitors_idr, &handle);
if (!monitor) { if (!monitor) {
/* All monitors have been reregistered */ /* All monitors have been resumed */
msft->reregistering = false; msft->resuming = false;
hci_update_background_scan(hdev); hci_update_passive_scan(hdev);
return; return;
} }
@@ -185,67 +184,317 @@ static void reregister_monitor(struct hci_dev *hdev, int handle)
} }
} }
/* This function requires the caller holds hdev->lock */ /* is_mgmt = true matches the handle exposed to userspace via mgmt.
static void remove_monitor_on_suspend(struct hci_dev *hdev, int handle) * is_mgmt = false matches the handle used by the msft controller.
* This function requires the caller holds hdev->lock
*/
static struct msft_monitor_advertisement_handle_data *msft_find_handle_data
(struct hci_dev *hdev, u16 handle, bool is_mgmt)
{ {
struct msft_monitor_advertisement_handle_data *entry;
struct msft_data *msft = hdev->msft_data;
list_for_each_entry(entry, &msft->handle_map, list) {
if (is_mgmt && entry->mgmt_handle == handle)
return entry;
if (!is_mgmt && entry->msft_handle == handle)
return entry;
}
return NULL;
}
static void msft_le_monitor_advertisement_cb(struct hci_dev *hdev,
u8 status, u16 opcode,
struct sk_buff *skb)
{
struct msft_rp_le_monitor_advertisement *rp;
struct adv_monitor *monitor; struct adv_monitor *monitor;
struct msft_monitor_advertisement_handle_data *handle_data;
struct msft_data *msft = hdev->msft_data;
hci_dev_lock(hdev);
monitor = idr_find(&hdev->adv_monitors_idr, msft->pending_add_handle);
if (!monitor) {
bt_dev_err(hdev, "msft add advmon: monitor %u is not found!",
msft->pending_add_handle);
status = HCI_ERROR_UNSPECIFIED;
goto unlock;
}
if (status)
goto unlock;
rp = (struct msft_rp_le_monitor_advertisement *)skb->data;
if (skb->len < sizeof(*rp)) {
status = HCI_ERROR_UNSPECIFIED;
goto unlock;
}
handle_data = kmalloc(sizeof(*handle_data), GFP_KERNEL);
if (!handle_data) {
status = HCI_ERROR_UNSPECIFIED;
goto unlock;
}
handle_data->mgmt_handle = monitor->handle;
handle_data->msft_handle = rp->handle;
INIT_LIST_HEAD(&handle_data->list);
list_add(&handle_data->list, &msft->handle_map);
monitor->state = ADV_MONITOR_STATE_OFFLOADED;
unlock:
if (status && monitor)
hci_free_adv_monitor(hdev, monitor);
hci_dev_unlock(hdev);
if (!msft->resuming)
hci_add_adv_patterns_monitor_complete(hdev, status);
}
static void msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev,
u8 status, u16 opcode,
struct sk_buff *skb)
{
struct msft_cp_le_cancel_monitor_advertisement *cp;
struct msft_rp_le_cancel_monitor_advertisement *rp;
struct adv_monitor *monitor;
struct msft_monitor_advertisement_handle_data *handle_data;
struct msft_data *msft = hdev->msft_data; struct msft_data *msft = hdev->msft_data;
int err; int err;
bool pending;
if (status)
goto done;
rp = (struct msft_rp_le_cancel_monitor_advertisement *)skb->data;
if (skb->len < sizeof(*rp)) {
status = HCI_ERROR_UNSPECIFIED;
goto done;
}
hci_dev_lock(hdev);
cp = hci_sent_cmd_data(hdev, hdev->msft_opcode);
handle_data = msft_find_handle_data(hdev, cp->handle, false);
if (handle_data) {
monitor = idr_find(&hdev->adv_monitors_idr,
handle_data->mgmt_handle);
if (monitor && monitor->state == ADV_MONITOR_STATE_OFFLOADED)
monitor->state = ADV_MONITOR_STATE_REGISTERED;
/* Do not free the monitor if it is being removed due to
* suspend. It will be re-monitored on resume.
*/
if (monitor && !msft->suspending)
hci_free_adv_monitor(hdev, monitor);
list_del(&handle_data->list);
kfree(handle_data);
}
/* If remove all monitors is required, we need to continue the process
* here because the earlier it was paused when waiting for the
* response from controller.
*/
if (msft->pending_remove_handle == 0) {
pending = hci_remove_all_adv_monitor(hdev, &err);
if (pending) {
hci_dev_unlock(hdev);
return;
}
if (err)
status = HCI_ERROR_UNSPECIFIED;
}
hci_dev_unlock(hdev);
done:
if (!msft->suspending)
hci_remove_adv_monitor_complete(hdev, status);
}
static int msft_remove_monitor_sync(struct hci_dev *hdev,
struct adv_monitor *monitor)
{
struct msft_cp_le_cancel_monitor_advertisement cp;
struct msft_monitor_advertisement_handle_data *handle_data;
struct sk_buff *skb;
u8 status;
handle_data = msft_find_handle_data(hdev, monitor->handle, true);
/* If no matched handle, just remove without telling controller */
if (!handle_data)
return -ENOENT;
cp.sub_opcode = MSFT_OP_LE_CANCEL_MONITOR_ADVERTISEMENT;
cp.handle = handle_data->msft_handle;
skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp,
HCI_CMD_TIMEOUT);
if (IS_ERR(skb))
return PTR_ERR(skb);
status = skb->data[0];
skb_pull(skb, 1);
msft_le_cancel_monitor_advertisement_cb(hdev, status, hdev->msft_opcode,
skb);
return status;
}
/* This function requires the caller holds hci_req_sync_lock */
int msft_suspend_sync(struct hci_dev *hdev)
{
struct msft_data *msft = hdev->msft_data;
struct adv_monitor *monitor;
int handle = 0;
if (!msft || !msft_monitor_supported(hdev))
return 0;
msft->suspending = true;
while (1) { while (1) {
monitor = idr_get_next(&hdev->adv_monitors_idr, &handle); monitor = idr_get_next(&hdev->adv_monitors_idr, &handle);
if (!monitor) { if (!monitor)
/* All monitors have been removed */ break;
msft->suspending = false;
hci_update_background_scan(hdev);
return;
}
msft->pending_remove_handle = (u16)handle; msft_remove_monitor_sync(hdev, monitor);
err = __msft_remove_monitor(hdev, monitor, handle);
/* If success, return and wait for monitor removed callback */
if (!err)
return;
/* Otherwise free the monitor and keep removing */
hci_free_adv_monitor(hdev, monitor);
handle++; handle++;
} }
/* All monitors have been removed */
msft->suspending = false;
return 0;
} }
/* This function requires the caller holds hdev->lock */ static bool msft_monitor_rssi_valid(struct adv_monitor *monitor)
void msft_suspend(struct hci_dev *hdev)
{ {
struct msft_data *msft = hdev->msft_data; struct adv_rssi_thresholds *r = &monitor->rssi;
if (!msft) if (r->high_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN ||
return; r->high_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX ||
r->low_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN ||
r->low_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX)
return false;
if (msft_monitor_supported(hdev)) { /* High_threshold_timeout is not supported,
msft->suspending = true; * once high_threshold is reached, events are immediately reported.
/* Quitely remove all monitors on suspend to avoid waking up
* the system.
*/ */
remove_monitor_on_suspend(hdev, 0); if (r->high_threshold_timeout != 0)
} return false;
if (r->low_threshold_timeout > MSFT_RSSI_LOW_TIMEOUT_MAX)
return false;
/* Sampling period from 0x00 to 0xFF are all allowed */
return true;
} }
/* This function requires the caller holds hdev->lock */ static bool msft_monitor_pattern_valid(struct adv_monitor *monitor)
void msft_resume(struct hci_dev *hdev) {
return msft_monitor_rssi_valid(monitor);
/* No additional check needed for pattern-based monitor */
}
static int msft_add_monitor_sync(struct hci_dev *hdev,
struct adv_monitor *monitor)
{
struct msft_cp_le_monitor_advertisement *cp;
struct msft_le_monitor_advertisement_pattern_data *pattern_data;
struct msft_le_monitor_advertisement_pattern *pattern;
struct adv_pattern *entry;
size_t total_size = sizeof(*cp) + sizeof(*pattern_data);
ptrdiff_t offset = 0;
u8 pattern_count = 0;
struct sk_buff *skb;
u8 status;
if (!msft_monitor_pattern_valid(monitor))
return -EINVAL;
list_for_each_entry(entry, &monitor->patterns, list) {
pattern_count++;
total_size += sizeof(*pattern) + entry->length;
}
cp = kmalloc(total_size, GFP_KERNEL);
if (!cp)
return -ENOMEM;
cp->sub_opcode = MSFT_OP_LE_MONITOR_ADVERTISEMENT;
cp->rssi_high = monitor->rssi.high_threshold;
cp->rssi_low = monitor->rssi.low_threshold;
cp->rssi_low_interval = (u8)monitor->rssi.low_threshold_timeout;
cp->rssi_sampling_period = monitor->rssi.sampling_period;
cp->cond_type = MSFT_MONITOR_ADVERTISEMENT_TYPE_PATTERN;
pattern_data = (void *)cp->data;
pattern_data->count = pattern_count;
list_for_each_entry(entry, &monitor->patterns, list) {
pattern = (void *)(pattern_data->data + offset);
/* the length also includes data_type and offset */
pattern->length = entry->length + 2;
pattern->data_type = entry->ad_type;
pattern->start_byte = entry->offset;
memcpy(pattern->pattern, entry->value, entry->length);
offset += sizeof(*pattern) + entry->length;
}
skb = __hci_cmd_sync(hdev, hdev->msft_opcode, total_size, cp,
HCI_CMD_TIMEOUT);
kfree(cp);
if (IS_ERR(skb))
return PTR_ERR(skb);
status = skb->data[0];
skb_pull(skb, 1);
msft_le_monitor_advertisement_cb(hdev, status, hdev->msft_opcode, skb);
return status;
}
/* This function requires the caller holds hci_req_sync_lock */
int msft_resume_sync(struct hci_dev *hdev)
{ {
struct msft_data *msft = hdev->msft_data; struct msft_data *msft = hdev->msft_data;
struct adv_monitor *monitor;
int handle = 0;
if (!msft) if (!msft || !msft_monitor_supported(hdev))
return; return 0;
if (msft_monitor_supported(hdev)) { msft->resuming = true;
msft->reregistering = true;
/* Monitors are removed on suspend, so we need to add all while (1) {
* monitors on resume. monitor = idr_get_next(&hdev->adv_monitors_idr, &handle);
*/ if (!monitor)
reregister_monitor(hdev, 0); break;
msft_add_monitor_sync(hdev, monitor);
handle++;
} }
/* All monitors have been resumed */
msft->resuming = false;
return 0;
} }
void msft_do_open(struct hci_dev *hdev) void msft_do_open(struct hci_dev *hdev)
@@ -275,7 +524,7 @@ void msft_do_open(struct hci_dev *hdev)
} }
if (msft_monitor_supported(hdev)) { if (msft_monitor_supported(hdev)) {
msft->reregistering = true; msft->resuming = true;
msft_set_filter_enable(hdev, true); msft_set_filter_enable(hdev, true);
/* Monitors get removed on power off, so we need to explicitly /* Monitors get removed on power off, so we need to explicitly
* tell the controller to re-monitor. * tell the controller to re-monitor.
@@ -381,151 +630,6 @@ __u64 msft_get_features(struct hci_dev *hdev)
return msft ? msft->features : 0; return msft ? msft->features : 0;
} }
/* is_mgmt = true matches the handle exposed to userspace via mgmt.
* is_mgmt = false matches the handle used by the msft controller.
* This function requires the caller holds hdev->lock
*/
static struct msft_monitor_advertisement_handle_data *msft_find_handle_data
(struct hci_dev *hdev, u16 handle, bool is_mgmt)
{
struct msft_monitor_advertisement_handle_data *entry;
struct msft_data *msft = hdev->msft_data;
list_for_each_entry(entry, &msft->handle_map, list) {
if (is_mgmt && entry->mgmt_handle == handle)
return entry;
if (!is_mgmt && entry->msft_handle == handle)
return entry;
}
return NULL;
}
static void msft_le_monitor_advertisement_cb(struct hci_dev *hdev,
u8 status, u16 opcode,
struct sk_buff *skb)
{
struct msft_rp_le_monitor_advertisement *rp;
struct adv_monitor *monitor;
struct msft_monitor_advertisement_handle_data *handle_data;
struct msft_data *msft = hdev->msft_data;
hci_dev_lock(hdev);
monitor = idr_find(&hdev->adv_monitors_idr, msft->pending_add_handle);
if (!monitor) {
bt_dev_err(hdev, "msft add advmon: monitor %u is not found!",
msft->pending_add_handle);
status = HCI_ERROR_UNSPECIFIED;
goto unlock;
}
if (status)
goto unlock;
rp = (struct msft_rp_le_monitor_advertisement *)skb->data;
if (skb->len < sizeof(*rp)) {
status = HCI_ERROR_UNSPECIFIED;
goto unlock;
}
handle_data = kmalloc(sizeof(*handle_data), GFP_KERNEL);
if (!handle_data) {
status = HCI_ERROR_UNSPECIFIED;
goto unlock;
}
handle_data->mgmt_handle = monitor->handle;
handle_data->msft_handle = rp->handle;
INIT_LIST_HEAD(&handle_data->list);
list_add(&handle_data->list, &msft->handle_map);
monitor->state = ADV_MONITOR_STATE_OFFLOADED;
unlock:
if (status && monitor)
hci_free_adv_monitor(hdev, monitor);
/* If in restart/reregister sequence, keep registering. */
if (msft->reregistering)
reregister_monitor(hdev, msft->pending_add_handle + 1);
hci_dev_unlock(hdev);
if (!msft->reregistering)
hci_add_adv_patterns_monitor_complete(hdev, status);
}
static void msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev,
u8 status, u16 opcode,
struct sk_buff *skb)
{
struct msft_cp_le_cancel_monitor_advertisement *cp;
struct msft_rp_le_cancel_monitor_advertisement *rp;
struct adv_monitor *monitor;
struct msft_monitor_advertisement_handle_data *handle_data;
struct msft_data *msft = hdev->msft_data;
int err;
bool pending;
if (status)
goto done;
rp = (struct msft_rp_le_cancel_monitor_advertisement *)skb->data;
if (skb->len < sizeof(*rp)) {
status = HCI_ERROR_UNSPECIFIED;
goto done;
}
hci_dev_lock(hdev);
cp = hci_sent_cmd_data(hdev, hdev->msft_opcode);
handle_data = msft_find_handle_data(hdev, cp->handle, false);
if (handle_data) {
monitor = idr_find(&hdev->adv_monitors_idr,
handle_data->mgmt_handle);
if (monitor && monitor->state == ADV_MONITOR_STATE_OFFLOADED)
monitor->state = ADV_MONITOR_STATE_REGISTERED;
/* Do not free the monitor if it is being removed due to
* suspend. It will be re-monitored on resume.
*/
if (monitor && !msft->suspending)
hci_free_adv_monitor(hdev, monitor);
list_del(&handle_data->list);
kfree(handle_data);
}
/* If in suspend/remove sequence, keep removing. */
if (msft->suspending)
remove_monitor_on_suspend(hdev,
msft->pending_remove_handle + 1);
/* If remove all monitors is required, we need to continue the process
* here because the earlier it was paused when waiting for the
* response from controller.
*/
if (msft->pending_remove_handle == 0) {
pending = hci_remove_all_adv_monitor(hdev, &err);
if (pending) {
hci_dev_unlock(hdev);
return;
}
if (err)
status = HCI_ERROR_UNSPECIFIED;
}
hci_dev_unlock(hdev);
done:
if (!msft->suspending)
hci_remove_adv_monitor_complete(hdev, status);
}
static void msft_le_set_advertisement_filter_enable_cb(struct hci_dev *hdev, static void msft_le_set_advertisement_filter_enable_cb(struct hci_dev *hdev,
u8 status, u16 opcode, u8 status, u16 opcode,
struct sk_buff *skb) struct sk_buff *skb)
@@ -560,35 +664,6 @@ static void msft_le_set_advertisement_filter_enable_cb(struct hci_dev *hdev,
hci_dev_unlock(hdev); hci_dev_unlock(hdev);
} }
static bool msft_monitor_rssi_valid(struct adv_monitor *monitor)
{
struct adv_rssi_thresholds *r = &monitor->rssi;
if (r->high_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN ||
r->high_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX ||
r->low_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN ||
r->low_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX)
return false;
/* High_threshold_timeout is not supported,
* once high_threshold is reached, events are immediately reported.
*/
if (r->high_threshold_timeout != 0)
return false;
if (r->low_threshold_timeout > MSFT_RSSI_LOW_TIMEOUT_MAX)
return false;
/* Sampling period from 0x00 to 0xFF are all allowed */
return true;
}
static bool msft_monitor_pattern_valid(struct adv_monitor *monitor)
{
return msft_monitor_rssi_valid(monitor);
/* No additional check needed for pattern-based monitor */
}
/* This function requires the caller holds hdev->lock */ /* This function requires the caller holds hdev->lock */
static int __msft_add_monitor_pattern(struct hci_dev *hdev, static int __msft_add_monitor_pattern(struct hci_dev *hdev,
struct adv_monitor *monitor) struct adv_monitor *monitor)
@@ -656,7 +731,7 @@ int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor)
if (!msft) if (!msft)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (msft->reregistering || msft->suspending) if (msft->resuming || msft->suspending)
return -EBUSY; return -EBUSY;
return __msft_add_monitor_pattern(hdev, monitor); return __msft_add_monitor_pattern(hdev, monitor);
@@ -700,7 +775,7 @@ int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
if (!msft) if (!msft)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (msft->reregistering || msft->suspending) if (msft->resuming || msft->suspending)
return -EBUSY; return -EBUSY;
return __msft_remove_monitor(hdev, monitor, handle); return __msft_remove_monitor(hdev, monitor, handle);

View File

@@ -24,8 +24,8 @@ int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
u16 handle); u16 handle);
void msft_req_add_set_filter_enable(struct hci_request *req, bool enable); void msft_req_add_set_filter_enable(struct hci_request *req, bool enable);
int msft_set_filter_enable(struct hci_dev *hdev, bool enable); int msft_set_filter_enable(struct hci_dev *hdev, bool enable);
void msft_suspend(struct hci_dev *hdev); int msft_suspend_sync(struct hci_dev *hdev);
void msft_resume(struct hci_dev *hdev); int msft_resume_sync(struct hci_dev *hdev);
bool msft_curve_validity(struct hci_dev *hdev); bool msft_curve_validity(struct hci_dev *hdev);
#else #else
@@ -61,8 +61,15 @@ static inline int msft_set_filter_enable(struct hci_dev *hdev, bool enable)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline void msft_suspend(struct hci_dev *hdev) {} static inline int msft_suspend_sync(struct hci_dev *hdev)
static inline void msft_resume(struct hci_dev *hdev) {} {
return -EOPNOTSUPP;
}
static inline int msft_resume_sync(struct hci_dev *hdev)
{
return -EOPNOTSUPP;
}
static inline bool msft_curve_validity(struct hci_dev *hdev) static inline bool msft_curve_validity(struct hci_dev *hdev)
{ {