summaryrefslogtreecommitdiff
path: root/net/bluetooth/hci_request.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/bluetooth/hci_request.c')
-rw-r--r--net/bluetooth/hci_request.c410
1 files changed, 313 insertions, 97 deletions
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index e0269192f2e5..e55976db4403 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -29,6 +29,7 @@
#include "smp.h"
#include "hci_request.h"
+#include "msft.h"
#define HCI_REQ_DONE 0
#define HCI_REQ_PEND 1
@@ -58,7 +59,7 @@ static int req_run(struct hci_request *req, hci_req_complete_t complete,
struct sk_buff *skb;
unsigned long flags;
- BT_DBG("length %u", skb_queue_len(&req->cmd_q));
+ bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
/* If an error occurred during request building, remove all HCI
* commands queued on the HCI request queue.
@@ -102,7 +103,7 @@ int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
struct sk_buff *skb)
{
- BT_DBG("%s result 0x%2.2x", hdev->name, result);
+ bt_dev_dbg(hdev, "result 0x%2.2x", result);
if (hdev->req_status == HCI_REQ_PEND) {
hdev->req_result = result;
@@ -115,7 +116,7 @@ static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
void hci_req_sync_cancel(struct hci_dev *hdev, int err)
{
- BT_DBG("%s err 0x%2.2x", hdev->name, err);
+ bt_dev_dbg(hdev, "err 0x%2.2x", err);
if (hdev->req_status == HCI_REQ_PEND) {
hdev->req_result = err;
@@ -131,7 +132,7 @@ struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
struct sk_buff *skb;
int err = 0;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
hci_req_init(&req, hdev);
@@ -167,7 +168,7 @@ struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
skb = hdev->req_skb;
hdev->req_skb = NULL;
- BT_DBG("%s end: err %d", hdev->name, err);
+ bt_dev_dbg(hdev, "end: err %d", err);
if (err < 0) {
kfree_skb(skb);
@@ -196,7 +197,7 @@ int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
struct hci_request req;
int err = 0;
- BT_DBG("%s start", hdev->name);
+ bt_dev_dbg(hdev, "start");
hci_req_init(&req, hdev);
@@ -260,7 +261,7 @@ int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
hdev->req_skb = NULL;
hdev->req_status = hdev->req_result = 0;
- BT_DBG("%s end: err %d", hdev->name, err);
+ bt_dev_dbg(hdev, "end: err %d", err);
return err;
}
@@ -300,7 +301,7 @@ struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
if (plen)
skb_put_data(skb, param, plen);
- BT_DBG("skb len %d", skb->len);
+ bt_dev_dbg(hdev, "skb len %d", skb->len);
hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
hci_skb_opcode(skb) = opcode;
@@ -315,7 +316,7 @@ void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
struct hci_dev *hdev = req->hdev;
struct sk_buff *skb;
- BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
+ bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
/* If an error occurred during request building, there is no point in
* queueing the HCI command. We can simply return.
@@ -378,6 +379,58 @@ void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
}
+static void start_interleave_scan(struct hci_dev *hdev)
+{
+ hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
+ queue_delayed_work(hdev->req_workqueue,
+ &hdev->interleave_scan, 0);
+}
+
+static bool is_interleave_scanning(struct hci_dev *hdev)
+{
+ return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
+}
+
+static void cancel_interleave_scan(struct hci_dev *hdev)
+{
+ bt_dev_dbg(hdev, "cancelling interleave scan");
+
+ cancel_delayed_work_sync(&hdev->interleave_scan);
+
+ hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
+}
+
+/* Return true if interleave_scan wasn't started until exiting this function,
+ * otherwise, return false
+ */
+static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
+{
+ /* Do interleaved scan only if all of the following are true:
+ * - There is at least one ADV monitor
+ * - At least one pending LE connection or one device to be scanned for
+ * - Monitor offloading is not supported
+ * If so, we should alternate between allowlist scan and one without
+ * any filters to save power.
+ */
+ bool use_interleaving = hci_is_adv_monitoring(hdev) &&
+ !(list_empty(&hdev->pend_le_conns) &&
+ list_empty(&hdev->pend_le_reports)) &&
+ hci_get_adv_monitor_offload_ext(hdev) ==
+ HCI_ADV_MONITOR_EXT_NONE;
+ bool is_interleaving = is_interleave_scanning(hdev);
+
+ if (use_interleaving && !is_interleaving) {
+ start_interleave_scan(hdev);
+ bt_dev_dbg(hdev, "starting interleave scan");
+ return true;
+ }
+
+ if (!use_interleaving && is_interleaving)
+ cancel_interleave_scan(hdev);
+
+ return false;
+}
+
/* This function controls the background scanning based on hdev->pend_le_conns
* list. If there are pending LE connection we start the background scanning,
* otherwise we stop it.
@@ -413,8 +466,8 @@ static void __hci_update_background_scan(struct hci_request *req)
*/
hci_discovery_filter_clear(hdev);
- BT_DBG("%s ADV monitoring is %s", hdev->name,
- hci_is_adv_monitoring(hdev) ? "on" : "off");
+ bt_dev_dbg(hdev, "ADV monitoring is %s",
+ hci_is_adv_monitoring(hdev) ? "on" : "off");
if (list_empty(&hdev->pend_le_conns) &&
list_empty(&hdev->pend_le_reports) &&
@@ -430,7 +483,7 @@ static void __hci_update_background_scan(struct hci_request *req)
hci_req_add_le_scan_disable(req, false);
- BT_DBG("%s stopping background scanning", hdev->name);
+ bt_dev_dbg(hdev, "stopping background scanning");
} else {
/* If there is at least one pending LE connection, we should
* keep the background scan running.
@@ -450,8 +503,7 @@ static void __hci_update_background_scan(struct hci_request *req)
hci_req_add_le_scan_disable(req, false);
hci_req_add_le_passive_scan(req);
-
- BT_DBG("%s starting background scanning", hdev->name);
+ bt_dev_dbg(hdev, "starting background scanning");
}
}
@@ -661,6 +713,9 @@ void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
return;
}
+ if (hdev->suspended)
+ set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
+
if (use_ext_scan(hdev)) {
struct hci_cp_le_set_ext_scan_enable cp;
@@ -698,7 +753,8 @@ static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
cp.bdaddr_type);
hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
- if (use_ll_privacy(req->hdev)) {
+ if (use_ll_privacy(req->hdev) &&
+ hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
struct smp_irk *irk;
irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
@@ -732,7 +788,8 @@ static int add_to_white_list(struct hci_request *req,
return -1;
/* White list can not be used with RPAs */
- if (!allow_rpa && !use_ll_privacy(hdev) &&
+ if (!allow_rpa &&
+ !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
return -1;
}
@@ -750,7 +807,8 @@ static int add_to_white_list(struct hci_request *req,
cp.bdaddr_type);
hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
- if (use_ll_privacy(hdev)) {
+ if (use_ll_privacy(hdev) &&
+ hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
struct smp_irk *irk;
irk = hci_find_irk_by_addr(hdev, &params->addr,
@@ -812,7 +870,8 @@ static u8 update_white_list(struct hci_request *req)
}
/* White list can not be used with RPAs */
- if (!allow_rpa && !use_ll_privacy(hdev) &&
+ if (!allow_rpa &&
+ !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
return 0x00;
}
@@ -844,12 +903,14 @@ static u8 update_white_list(struct hci_request *req)
return 0x00;
}
- /* Once the controller offloading of advertisement monitor is in place,
- * the if condition should include the support of MSFT extension
- * support. If suspend is ongoing, whitelist should be the default to
- * prevent waking by random advertisements.
+ /* Use the allowlist unless the following conditions are all true:
+ * - We are not currently suspending
+ * - There are 1 or more ADV monitors registered and it's not offloaded
+ * - Interleaved scanning is not currently using the allowlist
*/
- if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended)
+ if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
+ hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
+ hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
return 0x00;
/* Select filter policy to use white list */
@@ -1002,6 +1063,11 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
&own_addr_type))
return;
+ if (hdev->enable_advmon_interleave_scan &&
+ __hci_update_interleaved_scan(hdev))
+ return;
+
+ bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
/* Adding or removing entries from the white list must
* happen before enabling scanning. The controller does
* not allow white list modification while scanning.
@@ -1024,9 +1090,14 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
if (hdev->suspended) {
window = hdev->le_scan_window_suspend;
interval = hdev->le_scan_int_suspend;
+
+ set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
} else if (hci_is_le_conn_scanning(hdev)) {
window = hdev->le_scan_window_connect;
interval = hdev->le_scan_int_connect;
+ } else if (hci_is_adv_monitoring(hdev)) {
+ window = hdev->le_scan_window_adv_monitor;
+ interval = hdev->le_scan_int_adv_monitor;
} else {
window = hdev->le_scan_window;
interval = hdev->le_scan_interval;
@@ -1037,22 +1108,23 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
own_addr_type, filter_policy, addr_resolv);
}
-static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
+static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
{
struct adv_info *adv_instance;
/* Instance 0x00 always set local name */
if (instance == 0x00)
- return 1;
+ return true;
adv_instance = hci_find_adv_instance(hdev, instance);
if (!adv_instance)
- return 0;
+ return false;
- /* TODO: Take into account the "appearance" and "local-name" flags here.
- * These are currently being ignored as they are not supported.
- */
- return adv_instance->scan_rsp_len;
+ if (adv_instance->flags & MGMT_ADV_FLAG_APPEARANCE ||
+ adv_instance->flags & MGMT_ADV_FLAG_LOCAL_NAME)
+ return true;
+
+ return adv_instance->scan_rsp_len ? true : false;
}
static void hci_req_clear_event_filter(struct hci_request *req)
@@ -1095,32 +1167,107 @@ static void hci_req_set_event_filter(struct hci_request *req)
scan = SCAN_PAGE;
}
+ if (scan)
+ set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
+ else
+ set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
+
hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
}
-static void hci_req_config_le_suspend_scan(struct hci_request *req)
+static void cancel_adv_timeout(struct hci_dev *hdev)
{
- /* Before changing params disable scan if enabled */
- if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
- hci_req_add_le_scan_disable(req, false);
+ if (hdev->adv_instance_timeout) {
+ hdev->adv_instance_timeout = 0;
+ cancel_delayed_work(&hdev->adv_instance_expire);
+ }
+}
- /* Configure params and enable scanning */
- hci_req_add_le_passive_scan(req);
+/* This function requires the caller holds hdev->lock */
+void __hci_req_pause_adv_instances(struct hci_request *req)
+{
+ bt_dev_dbg(req->hdev, "Pausing advertising instances");
- /* Block suspend notifier on response */
- set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
+ /* Call to disable any advertisements active on the controller.
+ * This will succeed even if no advertisements are configured.
+ */
+ __hci_req_disable_advertising(req);
+
+ /* If we are using software rotation, pause the loop */
+ if (!ext_adv_capable(req->hdev))
+ cancel_adv_timeout(req->hdev);
+}
+
+/* This function requires the caller holds hdev->lock */
+static void __hci_req_resume_adv_instances(struct hci_request *req)
+{
+ struct adv_info *adv;
+
+ bt_dev_dbg(req->hdev, "Resuming advertising instances");
+
+ if (ext_adv_capable(req->hdev)) {
+ /* Call for each tracked instance to be re-enabled */
+ list_for_each_entry(adv, &req->hdev->adv_instances, list) {
+ __hci_req_enable_ext_advertising(req,
+ adv->instance);
+ }
+
+ } else {
+ /* Schedule for most recent instance to be restarted and begin
+ * the software rotation loop
+ */
+ __hci_req_schedule_adv_instance(req,
+ req->hdev->cur_adv_instance,
+ true);
+ }
+}
+
+/* This function requires the caller holds hdev->lock */
+int hci_req_resume_adv_instances(struct hci_dev *hdev)
+{
+ struct hci_request req;
+
+ hci_req_init(&req, hdev);
+ __hci_req_resume_adv_instances(&req);
+
+ return hci_req_run(&req, NULL);
}
static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
status);
- if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
- test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
+ if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
+ test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
+ clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
+ clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
+ wake_up(&hdev->suspend_wait_q);
+ }
+
+ if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
+ clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
wake_up(&hdev->suspend_wait_q);
}
}
+static void hci_req_add_set_adv_filter_enable(struct hci_request *req,
+ bool enable)
+{
+ struct hci_dev *hdev = req->hdev;
+
+ switch (hci_get_adv_monitor_offload_ext(hdev)) {
+ case HCI_ADV_MONITOR_EXT_MSFT:
+ msft_req_add_set_filter_enable(req, enable);
+ break;
+ default:
+ return;
+ }
+
+ /* No need to block when enabling since it's on resume path */
+ if (hdev->suspended && !enable)
+ set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
+}
+
/* Call with hci_dev_lock */
void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
{
@@ -1153,7 +1300,7 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
hdev->discovery_paused = true;
hdev->discovery_old_state = old_state;
- /* Stop advertising */
+ /* Stop directed advertising */
old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
if (old_state) {
set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
@@ -1162,6 +1309,10 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
&hdev->discov_off, 0);
}
+ /* Pause other advertisements */
+ if (hdev->adv_instance_cnt)
+ __hci_req_pause_adv_instances(&req);
+
hdev->advertising_paused = true;
hdev->advertising_old_state = old_state;
/* Disable page scan */
@@ -1169,8 +1320,13 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
/* Disable LE passive scan if enabled */
- if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
+ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
+ cancel_interleave_scan(hdev);
hci_req_add_le_scan_disable(&req, false);
+ }
+
+ /* Disable advertisement filters */
+ hci_req_add_set_adv_filter_enable(&req, false);
/* Mark task needing completion */
set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
@@ -1200,7 +1356,7 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
/* Enable event filter for paired devices */
hci_req_set_event_filter(&req);
/* Enable passive scan at lower duty cycle */
- hci_req_config_le_suspend_scan(&req);
+ __hci_update_background_scan(&req);
/* Pause scan changes again. */
hdev->scanning_paused = true;
hci_req_run(&req, suspend_req_complete);
@@ -1210,9 +1366,11 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
hci_req_clear_event_filter(&req);
/* Reset passive/background scanning to normal */
- hci_req_config_le_suspend_scan(&req);
+ __hci_update_background_scan(&req);
+ /* Enable all of the advertisement filters */
+ hci_req_add_set_adv_filter_enable(&req, true);
- /* Unpause advertising */
+ /* Unpause directed advertising */
hdev->advertising_paused = false;
if (hdev->advertising_old_state) {
set_bit(SUSPEND_UNPAUSE_ADVERTISING,
@@ -1223,6 +1381,10 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
hdev->advertising_old_state = 0;
}
+ /* Resume other advertisements */
+ if (hdev->adv_instance_cnt)
+ __hci_req_resume_adv_instances(&req);
+
/* Unpause discovery */
hdev->discovery_paused = false;
if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
@@ -1242,23 +1404,9 @@ done:
wake_up(&hdev->suspend_wait_q);
}
-static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
+static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
{
- u8 instance = hdev->cur_adv_instance;
- struct adv_info *adv_instance;
-
- /* Instance 0x00 always set local name */
- if (instance == 0x00)
- return 1;
-
- adv_instance = hci_find_adv_instance(hdev, instance);
- if (!adv_instance)
- return 0;
-
- /* TODO: Take into account the "appearance" and "local-name" flags here.
- * These are currently being ignored as they are not supported.
- */
- return adv_instance->scan_rsp_len;
+ return adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
}
void __hci_req_disable_advertising(struct hci_request *req)
@@ -1370,6 +1518,7 @@ static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
void __hci_req_enable_advertising(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
+ struct adv_info *adv_instance;
struct hci_cp_le_set_adv_param cp;
u8 own_addr_type, enable = 0x01;
bool connectable;
@@ -1377,6 +1526,7 @@ void __hci_req_enable_advertising(struct hci_request *req)
u32 flags;
flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
+ adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
/* If the "connectable" instance flag was not set, then choose between
* ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
@@ -1408,13 +1558,18 @@ void __hci_req_enable_advertising(struct hci_request *req)
memset(&cp, 0, sizeof(cp));
- if (connectable) {
- cp.type = LE_ADV_IND;
-
+ if (adv_instance) {
+ adv_min_interval = adv_instance->min_interval;
+ adv_max_interval = adv_instance->max_interval;
+ } else {
adv_min_interval = hdev->le_adv_min_interval;
adv_max_interval = hdev->le_adv_max_interval;
+ }
+
+ if (connectable) {
+ cp.type = LE_ADV_IND;
} else {
- if (get_cur_adv_instance_scan_rsp_len(hdev))
+ if (adv_cur_instance_is_scannable(hdev))
cp.type = LE_ADV_SCAN_IND;
else
cp.type = LE_ADV_NONCONN_IND;
@@ -1423,9 +1578,6 @@ void __hci_req_enable_advertising(struct hci_request *req)
hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
- } else {
- adv_min_interval = hdev->le_adv_min_interval;
- adv_max_interval = hdev->le_adv_max_interval;
}
}
@@ -1750,7 +1902,7 @@ void hci_req_disable_address_resolution(struct hci_dev *hdev)
static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
- BT_DBG("%s status %u", hdev->name, status);
+ bt_dev_dbg(hdev, "status %u", status);
}
void hci_req_reenable_advertising(struct hci_dev *hdev)
@@ -1787,7 +1939,7 @@ static void adv_timeout_expire(struct work_struct *work)
struct hci_request req;
u8 instance;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
hci_dev_lock(hdev);
@@ -1810,6 +1962,62 @@ unlock:
hci_dev_unlock(hdev);
}
+static int hci_req_add_le_interleaved_scan(struct hci_request *req,
+ unsigned long opt)
+{
+ struct hci_dev *hdev = req->hdev;
+ int ret = 0;
+
+ hci_dev_lock(hdev);
+
+ if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
+ hci_req_add_le_scan_disable(req, false);
+ hci_req_add_le_passive_scan(req);
+
+ switch (hdev->interleave_scan_state) {
+ case INTERLEAVE_SCAN_ALLOWLIST:
+ bt_dev_dbg(hdev, "next state: allowlist");
+ hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
+ break;
+ case INTERLEAVE_SCAN_NO_FILTER:
+ bt_dev_dbg(hdev, "next state: no filter");
+ hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
+ break;
+ case INTERLEAVE_SCAN_NONE:
+ BT_ERR("unexpected error");
+ ret = -1;
+ }
+
+ hci_dev_unlock(hdev);
+
+ return ret;
+}
+
+static void interleave_scan_work(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev,
+ interleave_scan.work);
+ u8 status;
+ unsigned long timeout;
+
+ if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
+ timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
+ } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
+ timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
+ } else {
+ bt_dev_err(hdev, "unexpected error");
+ return;
+ }
+
+ hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
+ HCI_CMD_TIMEOUT, &status);
+
+ /* Don't continue interleaving if it was canceled */
+ if (is_interleave_scanning(hdev))
+ queue_delayed_work(hdev->req_workqueue,
+ &hdev->interleave_scan, timeout);
+}
+
int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
bool use_rpa, struct adv_info *adv_instance,
u8 *own_addr_type, bdaddr_t *rand_addr)
@@ -1824,7 +2032,13 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
if (use_rpa) {
int to;
- *own_addr_type = ADDR_LE_DEV_RANDOM;
+ /* If Controller supports LL Privacy use own address type is
+ * 0x03
+ */
+ if (use_ll_privacy(hdev))
+ *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
+ else
+ *own_addr_type = ADDR_LE_DEV_RANDOM;
if (adv_instance) {
if (!adv_instance->rpa_expired &&
@@ -1939,9 +2153,15 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
memset(&cp, 0, sizeof(cp));
- /* In ext adv set param interval is 3 octets */
- hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
- hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
+ if (adv_instance) {
+ hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
+ hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
+ cp.tx_power = adv_instance->tx_power;
+ } else {
+ hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
+ hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
+ cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
+ }
secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
@@ -1950,7 +2170,7 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
else
cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
- } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
+ } else if (adv_instance_is_scannable(hdev, instance)) {
if (secondary_adv)
cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
else
@@ -1964,7 +2184,6 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
cp.own_addr_type = own_addr_type;
cp.channel_map = hdev->le_adv_channel_map;
- cp.tx_power = 127;
cp.handle = instance;
if (flags & MGMT_ADV_FLAG_SEC_2M) {
@@ -2183,14 +2402,6 @@ int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
return 0;
}
-static void cancel_adv_timeout(struct hci_dev *hdev)
-{
- if (hdev->adv_instance_timeout) {
- hdev->adv_instance_timeout = 0;
- cancel_delayed_work(&hdev->adv_instance_expire);
- }
-}
-
/* For a single instance:
* - force == true: The instance will be removed even when its remaining
* lifetime is not zero.
@@ -2273,7 +2484,7 @@ static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
*/
if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
hci_lookup_le_connect(hdev)) {
- BT_DBG("Deferring random address update");
+ bt_dev_dbg(hdev, "Deferring random address update");
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
return;
}
@@ -2498,7 +2709,7 @@ void __hci_req_update_class(struct hci_request *req)
struct hci_dev *hdev = req->hdev;
u8 cod[3];
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
if (!hdev_is_powered(hdev))
return;
@@ -2667,7 +2878,7 @@ void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
if (status)
- BT_DBG("Failed to abort connection: status 0x%2.2x", status);
+ bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
}
int hci_abort_conn(struct hci_conn *conn, u8 reason)
@@ -2730,7 +2941,7 @@ static int bredr_inquiry(struct hci_request *req, unsigned long opt)
const u8 liac[3] = { 0x00, 0x8b, 0x9e };
struct hci_cp_inquiry cp;
- BT_DBG("%s", req->hdev->name);
+ bt_dev_dbg(req->hdev, "");
hci_dev_lock(req->hdev);
hci_inquiry_cache_flush(req->hdev);
@@ -2756,7 +2967,7 @@ static void le_scan_disable_work(struct work_struct *work)
le_scan_disable.work);
u8 status;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
return;
@@ -2852,7 +3063,7 @@ static void le_scan_restart_work(struct work_struct *work)
unsigned long timeout, duration, scan_start, now;
u8 status;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
if (status) {
@@ -2906,14 +3117,16 @@ static int active_scan(struct hci_request *req, unsigned long opt)
bool addr_resolv = false;
int err;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
/* If controller is scanning, it means the background scanning is
* running. Thus, we should temporarily stop it in order to set the
* discovery scanning parameters.
*/
- if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
+ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
hci_req_add_le_scan_disable(req, false);
+ cancel_interleave_scan(hdev);
+ }
/* All active scans will be done with either a resolvable private
* address (when privacy feature has been enabled) or non-resolvable
@@ -2934,7 +3147,7 @@ static int interleaved_discov(struct hci_request *req, unsigned long opt)
{
int err;
- BT_DBG("%s", req->hdev->name);
+ bt_dev_dbg(req->hdev, "");
err = active_scan(req, opt);
if (err)
@@ -2947,7 +3160,7 @@ static void start_discovery(struct hci_dev *hdev, u8 *status)
{
unsigned long timeout;
- BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
+ bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
switch (hdev->discovery.type) {
case DISCOV_TYPE_BREDR:
@@ -2995,7 +3208,7 @@ static void start_discovery(struct hci_dev *hdev, u8 *status)
if (*status)
return;
- BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
+ bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
/* When service discovery is used and the controller has a
* strict duplicate filter, it is important to remember the
@@ -3020,7 +3233,7 @@ bool hci_req_stop_discovery(struct hci_request *req)
struct inquiry_entry *e;
bool ret = false;
- BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
+ bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
if (test_bit(HCI_INQUIRY, &hdev->flags))
@@ -3100,7 +3313,7 @@ static void discov_off(struct work_struct *work)
struct hci_dev *hdev = container_of(work, struct hci_dev,
discov_off.work);
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
hci_dev_lock(hdev);
@@ -3239,6 +3452,7 @@ void hci_request_setup(struct hci_dev *hdev)
INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
+ INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
}
void hci_request_cancel_all(struct hci_dev *hdev)
@@ -3258,4 +3472,6 @@ void hci_request_cancel_all(struct hci_dev *hdev)
cancel_delayed_work_sync(&hdev->adv_instance_expire);
hdev->adv_instance_timeout = 0;
}
+
+ cancel_interleave_scan(hdev);
}