|
@@ -37,6 +37,8 @@
|
|
|
#include <net/bluetooth/l2cap.h>
|
|
|
#include <net/bluetooth/mgmt.h>
|
|
|
|
|
|
+#include "hci_request.h"
|
|
|
+#include "hci_debugfs.h"
|
|
|
#include "smp.h"
|
|
|
|
|
|
static void hci_rx_work(struct work_struct *work);
|
|
@@ -137,938 +139,6 @@ static const struct file_operations dut_mode_fops = {
|
|
|
.llseek = default_llseek,
|
|
|
};
|
|
|
|
|
|
-static int features_show(struct seq_file *f, void *ptr)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = f->private;
|
|
|
- u8 p;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
|
|
|
- seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
|
|
|
- "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
|
|
|
- hdev->features[p][0], hdev->features[p][1],
|
|
|
- hdev->features[p][2], hdev->features[p][3],
|
|
|
- hdev->features[p][4], hdev->features[p][5],
|
|
|
- hdev->features[p][6], hdev->features[p][7]);
|
|
|
- }
|
|
|
- if (lmp_le_capable(hdev))
|
|
|
- seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
|
|
|
- "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
|
|
|
- hdev->le_features[0], hdev->le_features[1],
|
|
|
- hdev->le_features[2], hdev->le_features[3],
|
|
|
- hdev->le_features[4], hdev->le_features[5],
|
|
|
- hdev->le_features[6], hdev->le_features[7]);
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int features_open(struct inode *inode, struct file *file)
|
|
|
-{
|
|
|
- return single_open(file, features_show, inode->i_private);
|
|
|
-}
|
|
|
-
|
|
|
-static const struct file_operations features_fops = {
|
|
|
- .open = features_open,
|
|
|
- .read = seq_read,
|
|
|
- .llseek = seq_lseek,
|
|
|
- .release = single_release,
|
|
|
-};
|
|
|
-
|
|
|
-static int blacklist_show(struct seq_file *f, void *p)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = f->private;
|
|
|
- struct bdaddr_list *b;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- list_for_each_entry(b, &hdev->blacklist, list)
|
|
|
- seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int blacklist_open(struct inode *inode, struct file *file)
|
|
|
-{
|
|
|
- return single_open(file, blacklist_show, inode->i_private);
|
|
|
-}
|
|
|
-
|
|
|
-static const struct file_operations blacklist_fops = {
|
|
|
- .open = blacklist_open,
|
|
|
- .read = seq_read,
|
|
|
- .llseek = seq_lseek,
|
|
|
- .release = single_release,
|
|
|
-};
|
|
|
-
|
|
|
-static int uuids_show(struct seq_file *f, void *p)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = f->private;
|
|
|
- struct bt_uuid *uuid;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- list_for_each_entry(uuid, &hdev->uuids, list) {
|
|
|
- u8 i, val[16];
|
|
|
-
|
|
|
- /* The Bluetooth UUID values are stored in big endian,
|
|
|
- * but with reversed byte order. So convert them into
|
|
|
- * the right order for the %pUb modifier.
|
|
|
- */
|
|
|
- for (i = 0; i < 16; i++)
|
|
|
- val[i] = uuid->uuid[15 - i];
|
|
|
-
|
|
|
- seq_printf(f, "%pUb\n", val);
|
|
|
- }
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int uuids_open(struct inode *inode, struct file *file)
|
|
|
-{
|
|
|
- return single_open(file, uuids_show, inode->i_private);
|
|
|
-}
|
|
|
-
|
|
|
-static const struct file_operations uuids_fops = {
|
|
|
- .open = uuids_open,
|
|
|
- .read = seq_read,
|
|
|
- .llseek = seq_lseek,
|
|
|
- .release = single_release,
|
|
|
-};
|
|
|
-
|
|
|
-static int inquiry_cache_show(struct seq_file *f, void *p)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = f->private;
|
|
|
- struct discovery_state *cache = &hdev->discovery;
|
|
|
- struct inquiry_entry *e;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
-
|
|
|
- list_for_each_entry(e, &cache->all, all) {
|
|
|
- struct inquiry_data *data = &e->data;
|
|
|
- seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
|
|
|
- &data->bdaddr,
|
|
|
- data->pscan_rep_mode, data->pscan_period_mode,
|
|
|
- data->pscan_mode, data->dev_class[2],
|
|
|
- data->dev_class[1], data->dev_class[0],
|
|
|
- __le16_to_cpu(data->clock_offset),
|
|
|
- data->rssi, data->ssp_mode, e->timestamp);
|
|
|
- }
|
|
|
-
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int inquiry_cache_open(struct inode *inode, struct file *file)
|
|
|
-{
|
|
|
- return single_open(file, inquiry_cache_show, inode->i_private);
|
|
|
-}
|
|
|
-
|
|
|
-static const struct file_operations inquiry_cache_fops = {
|
|
|
- .open = inquiry_cache_open,
|
|
|
- .read = seq_read,
|
|
|
- .llseek = seq_lseek,
|
|
|
- .release = single_release,
|
|
|
-};
|
|
|
-
|
|
|
-static int link_keys_show(struct seq_file *f, void *ptr)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = f->private;
|
|
|
- struct link_key *key;
|
|
|
-
|
|
|
- rcu_read_lock();
|
|
|
- list_for_each_entry_rcu(key, &hdev->link_keys, list)
|
|
|
- seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
|
|
|
- HCI_LINK_KEY_SIZE, key->val, key->pin_len);
|
|
|
- rcu_read_unlock();
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int link_keys_open(struct inode *inode, struct file *file)
|
|
|
-{
|
|
|
- return single_open(file, link_keys_show, inode->i_private);
|
|
|
-}
|
|
|
-
|
|
|
-static const struct file_operations link_keys_fops = {
|
|
|
- .open = link_keys_open,
|
|
|
- .read = seq_read,
|
|
|
- .llseek = seq_lseek,
|
|
|
- .release = single_release,
|
|
|
-};
|
|
|
-
|
|
|
-static int dev_class_show(struct seq_file *f, void *ptr)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = f->private;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
|
|
|
- hdev->dev_class[1], hdev->dev_class[0]);
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int dev_class_open(struct inode *inode, struct file *file)
|
|
|
-{
|
|
|
- return single_open(file, dev_class_show, inode->i_private);
|
|
|
-}
|
|
|
-
|
|
|
-static const struct file_operations dev_class_fops = {
|
|
|
- .open = dev_class_open,
|
|
|
- .read = seq_read,
|
|
|
- .llseek = seq_lseek,
|
|
|
- .release = single_release,
|
|
|
-};
|
|
|
-
|
|
|
-static int voice_setting_get(void *data, u64 *val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- *val = hdev->voice_setting;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
|
|
|
- NULL, "0x%4.4llx\n");
|
|
|
-
|
|
|
-static int auto_accept_delay_set(void *data, u64 val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- hdev->auto_accept_delay = val;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int auto_accept_delay_get(void *data, u64 *val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- *val = hdev->auto_accept_delay;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
|
|
|
- auto_accept_delay_set, "%llu\n");
|
|
|
-
|
|
|
-static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
|
|
|
- size_t count, loff_t *ppos)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = file->private_data;
|
|
|
- char buf[3];
|
|
|
-
|
|
|
- buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
|
|
|
- buf[1] = '\n';
|
|
|
- buf[2] = '\0';
|
|
|
- return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
|
|
|
-}
|
|
|
-
|
|
|
-static ssize_t force_sc_support_write(struct file *file,
|
|
|
- const char __user *user_buf,
|
|
|
- size_t count, loff_t *ppos)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = file->private_data;
|
|
|
- char buf[32];
|
|
|
- size_t buf_size = min(count, (sizeof(buf)-1));
|
|
|
- bool enable;
|
|
|
-
|
|
|
- if (test_bit(HCI_UP, &hdev->flags))
|
|
|
- return -EBUSY;
|
|
|
-
|
|
|
- if (copy_from_user(buf, user_buf, buf_size))
|
|
|
- return -EFAULT;
|
|
|
-
|
|
|
- buf[buf_size] = '\0';
|
|
|
- if (strtobool(buf, &enable))
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
|
|
|
- return -EALREADY;
|
|
|
-
|
|
|
- change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
|
|
|
-
|
|
|
- return count;
|
|
|
-}
|
|
|
-
|
|
|
-static const struct file_operations force_sc_support_fops = {
|
|
|
- .open = simple_open,
|
|
|
- .read = force_sc_support_read,
|
|
|
- .write = force_sc_support_write,
|
|
|
- .llseek = default_llseek,
|
|
|
-};
|
|
|
-
|
|
|
-static ssize_t force_lesc_support_read(struct file *file, char __user *user_buf,
|
|
|
- size_t count, loff_t *ppos)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = file->private_data;
|
|
|
- char buf[3];
|
|
|
-
|
|
|
- buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
|
|
|
- buf[1] = '\n';
|
|
|
- buf[2] = '\0';
|
|
|
- return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
|
|
|
-}
|
|
|
-
|
|
|
-static ssize_t force_lesc_support_write(struct file *file,
|
|
|
- const char __user *user_buf,
|
|
|
- size_t count, loff_t *ppos)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = file->private_data;
|
|
|
- char buf[32];
|
|
|
- size_t buf_size = min(count, (sizeof(buf)-1));
|
|
|
- bool enable;
|
|
|
-
|
|
|
- if (copy_from_user(buf, user_buf, buf_size))
|
|
|
- return -EFAULT;
|
|
|
-
|
|
|
- buf[buf_size] = '\0';
|
|
|
- if (strtobool(buf, &enable))
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
|
|
|
- return -EALREADY;
|
|
|
-
|
|
|
- change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
|
|
|
-
|
|
|
- return count;
|
|
|
-}
|
|
|
-
|
|
|
-static const struct file_operations force_lesc_support_fops = {
|
|
|
- .open = simple_open,
|
|
|
- .read = force_lesc_support_read,
|
|
|
- .write = force_lesc_support_write,
|
|
|
- .llseek = default_llseek,
|
|
|
-};
|
|
|
-
|
|
|
-static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
|
|
|
- size_t count, loff_t *ppos)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = file->private_data;
|
|
|
- char buf[3];
|
|
|
-
|
|
|
- buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
|
|
|
- buf[1] = '\n';
|
|
|
- buf[2] = '\0';
|
|
|
- return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
|
|
|
-}
|
|
|
-
|
|
|
-static const struct file_operations sc_only_mode_fops = {
|
|
|
- .open = simple_open,
|
|
|
- .read = sc_only_mode_read,
|
|
|
- .llseek = default_llseek,
|
|
|
-};
|
|
|
-
|
|
|
-static int idle_timeout_set(void *data, u64 val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- if (val != 0 && (val < 500 || val > 3600000))
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- hdev->idle_timeout = val;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int idle_timeout_get(void *data, u64 *val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- *val = hdev->idle_timeout;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
|
|
|
- idle_timeout_set, "%llu\n");
|
|
|
-
|
|
|
-static int rpa_timeout_set(void *data, u64 val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- /* Require the RPA timeout to be at least 30 seconds and at most
|
|
|
- * 24 hours.
|
|
|
- */
|
|
|
- if (val < 30 || val > (60 * 60 * 24))
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- hdev->rpa_timeout = val;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int rpa_timeout_get(void *data, u64 *val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- *val = hdev->rpa_timeout;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
|
|
|
- rpa_timeout_set, "%llu\n");
|
|
|
-
|
|
|
-static int sniff_min_interval_set(void *data, u64 val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- hdev->sniff_min_interval = val;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int sniff_min_interval_get(void *data, u64 *val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- *val = hdev->sniff_min_interval;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
|
|
|
- sniff_min_interval_set, "%llu\n");
|
|
|
-
|
|
|
-static int sniff_max_interval_set(void *data, u64 val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- hdev->sniff_max_interval = val;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int sniff_max_interval_get(void *data, u64 *val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- *val = hdev->sniff_max_interval;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
|
|
|
- sniff_max_interval_set, "%llu\n");
|
|
|
-
|
|
|
-static int conn_info_min_age_set(void *data, u64 val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- if (val == 0 || val > hdev->conn_info_max_age)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- hdev->conn_info_min_age = val;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int conn_info_min_age_get(void *data, u64 *val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- *val = hdev->conn_info_min_age;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
|
|
|
- conn_info_min_age_set, "%llu\n");
|
|
|
-
|
|
|
-static int conn_info_max_age_set(void *data, u64 val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- if (val == 0 || val < hdev->conn_info_min_age)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- hdev->conn_info_max_age = val;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int conn_info_max_age_get(void *data, u64 *val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- *val = hdev->conn_info_max_age;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
|
|
|
- conn_info_max_age_set, "%llu\n");
|
|
|
-
|
|
|
-static int identity_show(struct seq_file *f, void *p)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = f->private;
|
|
|
- bdaddr_t addr;
|
|
|
- u8 addr_type;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
-
|
|
|
- hci_copy_identity_address(hdev, &addr, &addr_type);
|
|
|
-
|
|
|
- seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
|
|
|
- 16, hdev->irk, &hdev->rpa);
|
|
|
-
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int identity_open(struct inode *inode, struct file *file)
|
|
|
-{
|
|
|
- return single_open(file, identity_show, inode->i_private);
|
|
|
-}
|
|
|
-
|
|
|
-static const struct file_operations identity_fops = {
|
|
|
- .open = identity_open,
|
|
|
- .read = seq_read,
|
|
|
- .llseek = seq_lseek,
|
|
|
- .release = single_release,
|
|
|
-};
|
|
|
-
|
|
|
-static int random_address_show(struct seq_file *f, void *p)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = f->private;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- seq_printf(f, "%pMR\n", &hdev->random_addr);
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int random_address_open(struct inode *inode, struct file *file)
|
|
|
-{
|
|
|
- return single_open(file, random_address_show, inode->i_private);
|
|
|
-}
|
|
|
-
|
|
|
-static const struct file_operations random_address_fops = {
|
|
|
- .open = random_address_open,
|
|
|
- .read = seq_read,
|
|
|
- .llseek = seq_lseek,
|
|
|
- .release = single_release,
|
|
|
-};
|
|
|
-
|
|
|
-static int static_address_show(struct seq_file *f, void *p)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = f->private;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- seq_printf(f, "%pMR\n", &hdev->static_addr);
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int static_address_open(struct inode *inode, struct file *file)
|
|
|
-{
|
|
|
- return single_open(file, static_address_show, inode->i_private);
|
|
|
-}
|
|
|
-
|
|
|
-static const struct file_operations static_address_fops = {
|
|
|
- .open = static_address_open,
|
|
|
- .read = seq_read,
|
|
|
- .llseek = seq_lseek,
|
|
|
- .release = single_release,
|
|
|
-};
|
|
|
-
|
|
|
-static ssize_t force_static_address_read(struct file *file,
|
|
|
- char __user *user_buf,
|
|
|
- size_t count, loff_t *ppos)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = file->private_data;
|
|
|
- char buf[3];
|
|
|
-
|
|
|
- buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
|
|
|
- buf[1] = '\n';
|
|
|
- buf[2] = '\0';
|
|
|
- return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
|
|
|
-}
|
|
|
-
|
|
|
-static ssize_t force_static_address_write(struct file *file,
|
|
|
- const char __user *user_buf,
|
|
|
- size_t count, loff_t *ppos)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = file->private_data;
|
|
|
- char buf[32];
|
|
|
- size_t buf_size = min(count, (sizeof(buf)-1));
|
|
|
- bool enable;
|
|
|
-
|
|
|
- if (test_bit(HCI_UP, &hdev->flags))
|
|
|
- return -EBUSY;
|
|
|
-
|
|
|
- if (copy_from_user(buf, user_buf, buf_size))
|
|
|
- return -EFAULT;
|
|
|
-
|
|
|
- buf[buf_size] = '\0';
|
|
|
- if (strtobool(buf, &enable))
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
|
|
|
- return -EALREADY;
|
|
|
-
|
|
|
- change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
|
|
|
-
|
|
|
- return count;
|
|
|
-}
|
|
|
-
|
|
|
-static const struct file_operations force_static_address_fops = {
|
|
|
- .open = simple_open,
|
|
|
- .read = force_static_address_read,
|
|
|
- .write = force_static_address_write,
|
|
|
- .llseek = default_llseek,
|
|
|
-};
|
|
|
-
|
|
|
-static int white_list_show(struct seq_file *f, void *ptr)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = f->private;
|
|
|
- struct bdaddr_list *b;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- list_for_each_entry(b, &hdev->le_white_list, list)
|
|
|
- seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int white_list_open(struct inode *inode, struct file *file)
|
|
|
-{
|
|
|
- return single_open(file, white_list_show, inode->i_private);
|
|
|
-}
|
|
|
-
|
|
|
-static const struct file_operations white_list_fops = {
|
|
|
- .open = white_list_open,
|
|
|
- .read = seq_read,
|
|
|
- .llseek = seq_lseek,
|
|
|
- .release = single_release,
|
|
|
-};
|
|
|
-
|
|
|
-static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = f->private;
|
|
|
- struct smp_irk *irk;
|
|
|
-
|
|
|
- rcu_read_lock();
|
|
|
- list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
|
|
|
- seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
|
|
|
- &irk->bdaddr, irk->addr_type,
|
|
|
- 16, irk->val, &irk->rpa);
|
|
|
- }
|
|
|
- rcu_read_unlock();
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int identity_resolving_keys_open(struct inode *inode, struct file *file)
|
|
|
-{
|
|
|
- return single_open(file, identity_resolving_keys_show,
|
|
|
- inode->i_private);
|
|
|
-}
|
|
|
-
|
|
|
-static const struct file_operations identity_resolving_keys_fops = {
|
|
|
- .open = identity_resolving_keys_open,
|
|
|
- .read = seq_read,
|
|
|
- .llseek = seq_lseek,
|
|
|
- .release = single_release,
|
|
|
-};
|
|
|
-
|
|
|
-static int long_term_keys_show(struct seq_file *f, void *ptr)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = f->private;
|
|
|
- struct smp_ltk *ltk;
|
|
|
-
|
|
|
- rcu_read_lock();
|
|
|
- list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
|
|
|
- seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
|
|
|
- <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
|
|
|
- ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
|
|
|
- __le64_to_cpu(ltk->rand), 16, ltk->val);
|
|
|
- rcu_read_unlock();
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int long_term_keys_open(struct inode *inode, struct file *file)
|
|
|
-{
|
|
|
- return single_open(file, long_term_keys_show, inode->i_private);
|
|
|
-}
|
|
|
-
|
|
|
-static const struct file_operations long_term_keys_fops = {
|
|
|
- .open = long_term_keys_open,
|
|
|
- .read = seq_read,
|
|
|
- .llseek = seq_lseek,
|
|
|
- .release = single_release,
|
|
|
-};
|
|
|
-
|
|
|
-static int conn_min_interval_set(void *data, u64 val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- hdev->le_conn_min_interval = val;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int conn_min_interval_get(void *data, u64 *val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- *val = hdev->le_conn_min_interval;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
|
|
|
- conn_min_interval_set, "%llu\n");
|
|
|
-
|
|
|
-static int conn_max_interval_set(void *data, u64 val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- hdev->le_conn_max_interval = val;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int conn_max_interval_get(void *data, u64 *val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- *val = hdev->le_conn_max_interval;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
|
|
|
- conn_max_interval_set, "%llu\n");
|
|
|
-
|
|
|
-static int conn_latency_set(void *data, u64 val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- if (val > 0x01f3)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- hdev->le_conn_latency = val;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int conn_latency_get(void *data, u64 *val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- *val = hdev->le_conn_latency;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
|
|
|
- conn_latency_set, "%llu\n");
|
|
|
-
|
|
|
-static int supervision_timeout_set(void *data, u64 val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- if (val < 0x000a || val > 0x0c80)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- hdev->le_supv_timeout = val;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int supervision_timeout_get(void *data, u64 *val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- *val = hdev->le_supv_timeout;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
|
|
|
- supervision_timeout_set, "%llu\n");
|
|
|
-
|
|
|
-static int adv_channel_map_set(void *data, u64 val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- if (val < 0x01 || val > 0x07)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- hdev->le_adv_channel_map = val;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int adv_channel_map_get(void *data, u64 *val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- *val = hdev->le_adv_channel_map;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
|
|
|
- adv_channel_map_set, "%llu\n");
|
|
|
-
|
|
|
-static int adv_min_interval_set(void *data, u64 val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- hdev->le_adv_min_interval = val;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int adv_min_interval_get(void *data, u64 *val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- *val = hdev->le_adv_min_interval;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
|
|
|
- adv_min_interval_set, "%llu\n");
|
|
|
-
|
|
|
-static int adv_max_interval_set(void *data, u64 val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- hdev->le_adv_max_interval = val;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int adv_max_interval_get(void *data, u64 *val)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = data;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- *val = hdev->le_adv_max_interval;
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
|
|
|
- adv_max_interval_set, "%llu\n");
|
|
|
-
|
|
|
-static int device_list_show(struct seq_file *f, void *ptr)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = f->private;
|
|
|
- struct hci_conn_params *p;
|
|
|
- struct bdaddr_list *b;
|
|
|
-
|
|
|
- hci_dev_lock(hdev);
|
|
|
- list_for_each_entry(b, &hdev->whitelist, list)
|
|
|
- seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
|
|
|
- list_for_each_entry(p, &hdev->le_conn_params, list) {
|
|
|
- seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
|
|
|
- p->auto_connect);
|
|
|
- }
|
|
|
- hci_dev_unlock(hdev);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static int device_list_open(struct inode *inode, struct file *file)
|
|
|
-{
|
|
|
- return single_open(file, device_list_show, inode->i_private);
|
|
|
-}
|
|
|
-
|
|
|
-static const struct file_operations device_list_fops = {
|
|
|
- .open = device_list_open,
|
|
|
- .read = seq_read,
|
|
|
- .llseek = seq_lseek,
|
|
|
- .release = single_release,
|
|
|
-};
|
|
|
-
|
|
|
/* ---- HCI requests ---- */
|
|
|
|
|
|
static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
|
|
@@ -1553,10 +623,16 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
|
|
|
if (lmp_le_capable(hdev))
|
|
|
le_setup(req);
|
|
|
|
|
|
- /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
|
|
|
- * local supported commands HCI command.
|
|
|
+ /* All Bluetooth 1.2 and later controllers should support the
|
|
|
+ * HCI command for reading the local supported commands.
|
|
|
+ *
|
|
|
+ * Unfortunately some controllers indicate Bluetooth 1.2 support,
|
|
|
+ * but do not have support for this command. If that is the case,
|
|
|
+ * the driver can quirk the behavior and skip reading the local
|
|
|
+ * supported commands.
|
|
|
*/
|
|
|
- if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
|
|
|
+ if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
|
|
|
+ !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
|
|
|
hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
|
|
|
|
|
|
if (lmp_ssp_capable(hdev)) {
|
|
@@ -1735,6 +811,12 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
|
|
|
* Parameter Request
|
|
|
*/
|
|
|
|
|
|
+ /* If the controller supports the Data Length Extension
|
|
|
+ * feature, enable the corresponding event.
|
|
|
+ */
|
|
|
+ if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
|
|
|
+ events[0] |= 0x40; /* LE Data Length Change */
|
|
|
+
|
|
|
/* If the controller supports Extended Scanner Filter
|
|
|
* Policies, enable the correspondig event.
|
|
|
*/
|
|
@@ -1765,6 +847,14 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
|
|
|
hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
|
|
|
}
|
|
|
|
|
|
+ if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
|
|
|
+ /* Read LE Maximum Data Length */
|
|
|
+ hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
|
|
|
+
|
|
|
+ /* Read LE Suggested Default Data Length */
|
|
|
+ hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
|
|
|
+ }
|
|
|
+
|
|
|
hci_set_le_support(req);
|
|
|
}
|
|
|
|
|
@@ -1847,102 +937,13 @@ static int __hci_init(struct hci_dev *hdev)
|
|
|
if (!test_bit(HCI_SETUP, &hdev->dev_flags))
|
|
|
return 0;
|
|
|
|
|
|
- debugfs_create_file("features", 0444, hdev->debugfs, hdev,
|
|
|
- &features_fops);
|
|
|
- debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
|
|
|
- &hdev->manufacturer);
|
|
|
- debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
|
|
|
- debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
|
|
|
- debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
|
|
|
- &device_list_fops);
|
|
|
- debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
|
|
|
- &blacklist_fops);
|
|
|
- debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
|
|
|
-
|
|
|
- debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
|
|
|
- &conn_info_min_age_fops);
|
|
|
- debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
|
|
|
- &conn_info_max_age_fops);
|
|
|
-
|
|
|
- if (lmp_bredr_capable(hdev)) {
|
|
|
- debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
|
|
|
- hdev, &inquiry_cache_fops);
|
|
|
- debugfs_create_file("link_keys", 0400, hdev->debugfs,
|
|
|
- hdev, &link_keys_fops);
|
|
|
- debugfs_create_file("dev_class", 0444, hdev->debugfs,
|
|
|
- hdev, &dev_class_fops);
|
|
|
- debugfs_create_file("voice_setting", 0444, hdev->debugfs,
|
|
|
- hdev, &voice_setting_fops);
|
|
|
- }
|
|
|
+ hci_debugfs_create_common(hdev);
|
|
|
|
|
|
- if (lmp_ssp_capable(hdev)) {
|
|
|
- debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
|
|
|
- hdev, &auto_accept_delay_fops);
|
|
|
- debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
|
|
|
- hdev, &force_sc_support_fops);
|
|
|
- debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
|
|
|
- hdev, &sc_only_mode_fops);
|
|
|
- if (lmp_le_capable(hdev))
|
|
|
- debugfs_create_file("force_lesc_support", 0644,
|
|
|
- hdev->debugfs, hdev,
|
|
|
- &force_lesc_support_fops);
|
|
|
- }
|
|
|
-
|
|
|
- if (lmp_sniff_capable(hdev)) {
|
|
|
- debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
|
|
|
- hdev, &idle_timeout_fops);
|
|
|
- debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
|
|
|
- hdev, &sniff_min_interval_fops);
|
|
|
- debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
|
|
|
- hdev, &sniff_max_interval_fops);
|
|
|
- }
|
|
|
+ if (lmp_bredr_capable(hdev))
|
|
|
+ hci_debugfs_create_bredr(hdev);
|
|
|
|
|
|
if (lmp_le_capable(hdev)) {
|
|
|
- debugfs_create_file("identity", 0400, hdev->debugfs,
|
|
|
- hdev, &identity_fops);
|
|
|
- debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
|
|
|
- hdev, &rpa_timeout_fops);
|
|
|
- debugfs_create_file("random_address", 0444, hdev->debugfs,
|
|
|
- hdev, &random_address_fops);
|
|
|
- debugfs_create_file("static_address", 0444, hdev->debugfs,
|
|
|
- hdev, &static_address_fops);
|
|
|
-
|
|
|
- /* For controllers with a public address, provide a debug
|
|
|
- * option to force the usage of the configured static
|
|
|
- * address. By default the public address is used.
|
|
|
- */
|
|
|
- if (bacmp(&hdev->bdaddr, BDADDR_ANY))
|
|
|
- debugfs_create_file("force_static_address", 0644,
|
|
|
- hdev->debugfs, hdev,
|
|
|
- &force_static_address_fops);
|
|
|
-
|
|
|
- debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
|
|
|
- &hdev->le_white_list_size);
|
|
|
- debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
|
|
|
- &white_list_fops);
|
|
|
- debugfs_create_file("identity_resolving_keys", 0400,
|
|
|
- hdev->debugfs, hdev,
|
|
|
- &identity_resolving_keys_fops);
|
|
|
- debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
|
|
|
- hdev, &long_term_keys_fops);
|
|
|
- debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
|
|
|
- hdev, &conn_min_interval_fops);
|
|
|
- debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
|
|
|
- hdev, &conn_max_interval_fops);
|
|
|
- debugfs_create_file("conn_latency", 0644, hdev->debugfs,
|
|
|
- hdev, &conn_latency_fops);
|
|
|
- debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
|
|
|
- hdev, &supervision_timeout_fops);
|
|
|
- debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
|
|
|
- hdev, &adv_channel_map_fops);
|
|
|
- debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
|
|
|
- hdev, &adv_min_interval_fops);
|
|
|
- debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
|
|
|
- hdev, &adv_max_interval_fops);
|
|
|
- debugfs_create_u16("discov_interleaved_timeout", 0644,
|
|
|
- hdev->debugfs,
|
|
|
- &hdev->discov_interleaved_timeout);
|
|
|
-
|
|
|
+ hci_debugfs_create_le(hdev);
|
|
|
smp_register(hdev);
|
|
|
}
|
|
|
|
|
@@ -3654,26 +2655,9 @@ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
|
|
|
params->addr_type == addr_type) {
|
|
|
return params;
|
|
|
}
|
|
|
- }
|
|
|
-
|
|
|
- return NULL;
|
|
|
-}
|
|
|
-
|
|
|
-static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
|
|
|
-{
|
|
|
- struct hci_conn *conn;
|
|
|
-
|
|
|
- conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
|
|
|
- if (!conn)
|
|
|
- return false;
|
|
|
-
|
|
|
- if (conn->dst_type != type)
|
|
|
- return false;
|
|
|
-
|
|
|
- if (conn->state != BT_CONNECTED)
|
|
|
- return false;
|
|
|
+ }
|
|
|
|
|
|
- return true;
|
|
|
+ return NULL;
|
|
|
}
|
|
|
|
|
|
/* This function requires the caller holds hdev->lock */
|
|
@@ -3731,47 +2715,6 @@ struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
|
|
|
return params;
|
|
|
}
|
|
|
|
|
|
-/* This function requires the caller holds hdev->lock */
|
|
|
-int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
|
|
|
- u8 auto_connect)
|
|
|
-{
|
|
|
- struct hci_conn_params *params;
|
|
|
-
|
|
|
- params = hci_conn_params_add(hdev, addr, addr_type);
|
|
|
- if (!params)
|
|
|
- return -EIO;
|
|
|
-
|
|
|
- if (params->auto_connect == auto_connect)
|
|
|
- return 0;
|
|
|
-
|
|
|
- list_del_init(¶ms->action);
|
|
|
-
|
|
|
- switch (auto_connect) {
|
|
|
- case HCI_AUTO_CONN_DISABLED:
|
|
|
- case HCI_AUTO_CONN_LINK_LOSS:
|
|
|
- hci_update_background_scan(hdev);
|
|
|
- break;
|
|
|
- case HCI_AUTO_CONN_REPORT:
|
|
|
- list_add(¶ms->action, &hdev->pend_le_reports);
|
|
|
- hci_update_background_scan(hdev);
|
|
|
- break;
|
|
|
- case HCI_AUTO_CONN_DIRECT:
|
|
|
- case HCI_AUTO_CONN_ALWAYS:
|
|
|
- if (!is_connected(hdev, addr, addr_type)) {
|
|
|
- list_add(¶ms->action, &hdev->pend_le_conns);
|
|
|
- hci_update_background_scan(hdev);
|
|
|
- }
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- params->auto_connect = auto_connect;
|
|
|
-
|
|
|
- BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
|
|
|
- auto_connect);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
static void hci_conn_params_free(struct hci_conn_params *params)
|
|
|
{
|
|
|
if (params->conn) {
|
|
@@ -3901,112 +2844,6 @@ static void le_scan_disable_work(struct work_struct *work)
|
|
|
BT_ERR("Disable LE scanning request failed: err %d", err);
|
|
|
}
|
|
|
|
|
|
-static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = req->hdev;
|
|
|
-
|
|
|
- /* If we're advertising or initiating an LE connection we can't
|
|
|
- * go ahead and change the random address at this time. This is
|
|
|
- * because the eventual initiator address used for the
|
|
|
- * subsequently created connection will be undefined (some
|
|
|
- * controllers use the new address and others the one we had
|
|
|
- * when the operation started).
|
|
|
- *
|
|
|
- * In this kind of scenario skip the update and let the random
|
|
|
- * address be updated at the next cycle.
|
|
|
- */
|
|
|
- if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
|
|
|
- hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
|
|
|
- BT_DBG("Deferring random address update");
|
|
|
- set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
|
|
|
-}
|
|
|
-
|
|
|
-int hci_update_random_address(struct hci_request *req, bool require_privacy,
|
|
|
- u8 *own_addr_type)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = req->hdev;
|
|
|
- int err;
|
|
|
-
|
|
|
- /* If privacy is enabled use a resolvable private address. If
|
|
|
- * current RPA has expired or there is something else than
|
|
|
- * the current RPA in use, then generate a new one.
|
|
|
- */
|
|
|
- if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
|
|
|
- int to;
|
|
|
-
|
|
|
- *own_addr_type = ADDR_LE_DEV_RANDOM;
|
|
|
-
|
|
|
- if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
|
|
|
- !bacmp(&hdev->random_addr, &hdev->rpa))
|
|
|
- return 0;
|
|
|
-
|
|
|
- err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
|
|
|
- if (err < 0) {
|
|
|
- BT_ERR("%s failed to generate new RPA", hdev->name);
|
|
|
- return err;
|
|
|
- }
|
|
|
-
|
|
|
- set_random_addr(req, &hdev->rpa);
|
|
|
-
|
|
|
- to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
|
|
|
- queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
|
|
|
-
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
- /* In case of required privacy without resolvable private address,
|
|
|
- * use an non-resolvable private address. This is useful for active
|
|
|
- * scanning and non-connectable advertising.
|
|
|
- */
|
|
|
- if (require_privacy) {
|
|
|
- bdaddr_t nrpa;
|
|
|
-
|
|
|
- while (true) {
|
|
|
- /* The non-resolvable private address is generated
|
|
|
- * from random six bytes with the two most significant
|
|
|
- * bits cleared.
|
|
|
- */
|
|
|
- get_random_bytes(&nrpa, 6);
|
|
|
- nrpa.b[5] &= 0x3f;
|
|
|
-
|
|
|
- /* The non-resolvable private address shall not be
|
|
|
- * equal to the public address.
|
|
|
- */
|
|
|
- if (bacmp(&hdev->bdaddr, &nrpa))
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- *own_addr_type = ADDR_LE_DEV_RANDOM;
|
|
|
- set_random_addr(req, &nrpa);
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
- /* If forcing static address is in use or there is no public
|
|
|
- * address use the static address as random address (but skip
|
|
|
- * the HCI command if the current random address is already the
|
|
|
- * static one.
|
|
|
- */
|
|
|
- if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
|
|
|
- !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
|
|
|
- *own_addr_type = ADDR_LE_DEV_RANDOM;
|
|
|
- if (bacmp(&hdev->static_addr, &hdev->random_addr))
|
|
|
- hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
|
|
|
- &hdev->static_addr);
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
- /* Neither privacy nor static address is being used so use a
|
|
|
- * public address.
|
|
|
- */
|
|
|
- *own_addr_type = ADDR_LE_DEV_PUBLIC;
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
/* Copy the Identity Address of the controller.
|
|
|
*
|
|
|
* If the controller has a public BD_ADDR, then by default use that one.
|
|
@@ -4015,12 +2852,18 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
|
|
|
*
|
|
|
* For debugging purposes it is possible to force controllers with a
|
|
|
* public address to use the static random address instead.
|
|
|
+ *
|
|
|
+ * In case BR/EDR has been disabled on a dual-mode controller and
|
|
|
+ * userspace has configured a static address, then that address
|
|
|
+ * becomes the identity address instead of the public BR/EDR address.
|
|
|
*/
|
|
|
void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
|
|
|
u8 *bdaddr_type)
|
|
|
{
|
|
|
if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
|
|
|
- !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
|
|
|
+ !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
|
|
|
+ (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
|
|
|
+ bacmp(&hdev->static_addr, BDADDR_ANY))) {
|
|
|
bacpy(bdaddr, &hdev->static_addr);
|
|
|
*bdaddr_type = ADDR_LE_DEV_RANDOM;
|
|
|
} else {
|
|
@@ -4059,6 +2902,12 @@ struct hci_dev *hci_alloc_dev(void)
|
|
|
hdev->le_conn_max_interval = 0x0038;
|
|
|
hdev->le_conn_latency = 0x0000;
|
|
|
hdev->le_supv_timeout = 0x002a;
|
|
|
+ hdev->le_def_tx_len = 0x001b;
|
|
|
+ hdev->le_def_tx_time = 0x0148;
|
|
|
+ hdev->le_max_tx_len = 0x001b;
|
|
|
+ hdev->le_max_tx_time = 0x0148;
|
|
|
+ hdev->le_max_rx_len = 0x001b;
|
|
|
+ hdev->le_max_rx_time = 0x0148;
|
|
|
|
|
|
hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
|
|
|
hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
|
|
@@ -4539,76 +3388,11 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
|
|
|
-{
|
|
|
- skb_queue_head_init(&req->cmd_q);
|
|
|
- req->hdev = hdev;
|
|
|
- req->err = 0;
|
|
|
-}
|
|
|
-
|
|
|
-int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = req->hdev;
|
|
|
- struct sk_buff *skb;
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- BT_DBG("length %u", skb_queue_len(&req->cmd_q));
|
|
|
-
|
|
|
- /* If an error occurred during request building, remove all HCI
|
|
|
- * commands queued on the HCI request queue.
|
|
|
- */
|
|
|
- if (req->err) {
|
|
|
- skb_queue_purge(&req->cmd_q);
|
|
|
- return req->err;
|
|
|
- }
|
|
|
-
|
|
|
- /* Do not allow empty requests */
|
|
|
- if (skb_queue_empty(&req->cmd_q))
|
|
|
- return -ENODATA;
|
|
|
-
|
|
|
- skb = skb_peek_tail(&req->cmd_q);
|
|
|
- bt_cb(skb)->req.complete = complete;
|
|
|
-
|
|
|
- spin_lock_irqsave(&hdev->cmd_q.lock, flags);
|
|
|
- skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
|
|
|
- spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
|
|
|
-
|
|
|
- queue_work(hdev->workqueue, &hdev->cmd_work);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
bool hci_req_pending(struct hci_dev *hdev)
|
|
|
{
|
|
|
return (hdev->req_status == HCI_REQ_PEND);
|
|
|
}
|
|
|
|
|
|
-static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
|
|
|
- u32 plen, const void *param)
|
|
|
-{
|
|
|
- int len = HCI_COMMAND_HDR_SIZE + plen;
|
|
|
- struct hci_command_hdr *hdr;
|
|
|
- struct sk_buff *skb;
|
|
|
-
|
|
|
- skb = bt_skb_alloc(len, GFP_ATOMIC);
|
|
|
- if (!skb)
|
|
|
- return NULL;
|
|
|
-
|
|
|
- hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
|
|
|
- hdr->opcode = cpu_to_le16(opcode);
|
|
|
- hdr->plen = plen;
|
|
|
-
|
|
|
- if (plen)
|
|
|
- memcpy(skb_put(skb, plen), param, plen);
|
|
|
-
|
|
|
- BT_DBG("skb len %d", skb->len);
|
|
|
-
|
|
|
- bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
|
|
|
- bt_cb(skb)->opcode = opcode;
|
|
|
-
|
|
|
- return skb;
|
|
|
-}
|
|
|
-
|
|
|
/* Send HCI command */
|
|
|
int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
|
|
|
const void *param)
|
|
@@ -4634,43 +3418,6 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/* Queue a command to an asynchronous HCI request */
|
|
|
-void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
|
|
|
- const void *param, u8 event)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = req->hdev;
|
|
|
- struct sk_buff *skb;
|
|
|
-
|
|
|
- BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
|
|
|
-
|
|
|
- /* If an error occurred during request building, there is no point in
|
|
|
- * queueing the HCI command. We can simply return.
|
|
|
- */
|
|
|
- if (req->err)
|
|
|
- return;
|
|
|
-
|
|
|
- skb = hci_prepare_cmd(hdev, opcode, plen, param);
|
|
|
- if (!skb) {
|
|
|
- BT_ERR("%s no memory for command (opcode 0x%4.4x)",
|
|
|
- hdev->name, opcode);
|
|
|
- req->err = -ENOMEM;
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- if (skb_queue_empty(&req->cmd_q))
|
|
|
- bt_cb(skb)->req.start = true;
|
|
|
-
|
|
|
- bt_cb(skb)->req.event = event;
|
|
|
-
|
|
|
- skb_queue_tail(&req->cmd_q, skb);
|
|
|
-}
|
|
|
-
|
|
|
-void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
|
|
|
- const void *param)
|
|
|
-{
|
|
|
- hci_req_add_ev(req, opcode, plen, param, 0);
|
|
|
-}
|
|
|
-
|
|
|
/* Get data from the previously sent command */
|
|
|
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
|
|
|
{
|
|
@@ -5518,302 +4265,3 @@ static void hci_cmd_work(struct work_struct *work)
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
-void hci_req_add_le_scan_disable(struct hci_request *req)
|
|
|
-{
|
|
|
- struct hci_cp_le_set_scan_enable cp;
|
|
|
-
|
|
|
- memset(&cp, 0, sizeof(cp));
|
|
|
- cp.enable = LE_SCAN_DISABLE;
|
|
|
- hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
|
|
|
-}
|
|
|
-
|
|
|
-static void add_to_white_list(struct hci_request *req,
|
|
|
- struct hci_conn_params *params)
|
|
|
-{
|
|
|
- struct hci_cp_le_add_to_white_list cp;
|
|
|
-
|
|
|
- cp.bdaddr_type = params->addr_type;
|
|
|
- bacpy(&cp.bdaddr, ¶ms->addr);
|
|
|
-
|
|
|
- hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
|
|
|
-}
|
|
|
-
|
|
|
-static u8 update_white_list(struct hci_request *req)
|
|
|
-{
|
|
|
- struct hci_dev *hdev = req->hdev;
|
|
|
- struct hci_conn_params *params;
|
|
|
- struct bdaddr_list *b;
|
|
|
- uint8_t white_list_entries = 0;
|
|
|
-
|
|
|
- /* Go through the current white list programmed into the
|
|
|
- * controller one by one and check if that address is still
|
|
|
- * in the list of pending connections or list of devices to
|
|
|
- * report. If not present in either list, then queue the
|
|
|
- * command to remove it from the controller.
|
|
|
- */
|
|
|
- list_for_each_entry(b, &hdev->le_white_list, list) {
|
|
|
- struct hci_cp_le_del_from_white_list cp;
|
|
|
-
|
|
|
- if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
|
|
|
- &b->bdaddr, b->bdaddr_type) ||
|
|
|
- hci_pend_le_action_lookup(&hdev->pend_le_reports,
|
|
|
- &b->bdaddr, b->bdaddr_type)) {
|
|
|
- white_list_entries++;
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- cp.bdaddr_type = b->bdaddr_type;
|
|
|
- bacpy(&cp.bdaddr, &b->bdaddr);
|
|
|
-
|
|
|
- hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
|
|
|
- sizeof(cp), &cp);
|
|
|
- }
|
|
|
-
|
|
|
- /* Since all no longer valid white list entries have been
|
|
|
- * removed, walk through the list of pending connections
|
|
|
- * and ensure that any new device gets programmed into
|
|
|
- * the controller.
|
|
|
- *
|
|
|
- * If the list of the devices is larger than the list of
|
|
|
- * available white list entries in the controller, then
|
|
|
- * just abort and return filer policy value to not use the
|
|
|
- * white list.
|
|
|
- */
|
|
|
- list_for_each_entry(params, &hdev->pend_le_conns, action) {
|
|
|
- if (hci_bdaddr_list_lookup(&hdev->le_white_list,
|
|
|
- ¶ms->addr, params->addr_type))
|
|
|
- continue;
|
|
|
-
|
|
|
- if (white_list_entries >= hdev->le_white_list_size) {
|
|
|
- /* Select filter policy to accept all advertising */
|
|
|
- return 0x00;
|
|
|
- }
|
|
|
-
|
|
|
- if (hci_find_irk_by_addr(hdev, ¶ms->addr,
|
|
|
- params->addr_type)) {
|
|
|
- /* White list can not be used with RPAs */
|
|
|
- return 0x00;
|
|
|
- }
|
|
|
-
|
|
|
- white_list_entries++;
|
|
|
- add_to_white_list(req, params);
|
|
|
- }
|
|
|
-
|
|
|
- /* After adding all new pending connections, walk through
|
|
|
- * the list of pending reports and also add these to the
|
|
|
- * white list if there is still space.
|
|
|
- */
|
|
|
- list_for_each_entry(params, &hdev->pend_le_reports, action) {
|
|
|
- if (hci_bdaddr_list_lookup(&hdev->le_white_list,
|
|
|
- ¶ms->addr, params->addr_type))
|
|
|
- continue;
|
|
|
-
|
|
|
- if (white_list_entries >= hdev->le_white_list_size) {
|
|
|
- /* Select filter policy to accept all advertising */
|
|
|
- return 0x00;
|
|
|
- }
|
|
|
-
|
|
|
- if (hci_find_irk_by_addr(hdev, ¶ms->addr,
|
|
|
- params->addr_type)) {
|
|
|
- /* White list can not be used with RPAs */
|
|
|
- return 0x00;
|
|
|
- }
|
|
|
-
|
|
|
- white_list_entries++;
|
|
|
- add_to_white_list(req, params);
|
|
|
- }
|
|
|
-
|
|
|
- /* Select filter policy to use white list */
|
|
|
- return 0x01;
|
|
|
-}
|
|
|
-
|
|
|
-void hci_req_add_le_passive_scan(struct hci_request *req)
|
|
|
-{
|
|
|
- struct hci_cp_le_set_scan_param param_cp;
|
|
|
- struct hci_cp_le_set_scan_enable enable_cp;
|
|
|
- struct hci_dev *hdev = req->hdev;
|
|
|
- u8 own_addr_type;
|
|
|
- u8 filter_policy;
|
|
|
-
|
|
|
- /* Set require_privacy to false since no SCAN_REQ are send
|
|
|
- * during passive scanning. Not using an non-resolvable address
|
|
|
- * here is important so that peer devices using direct
|
|
|
- * advertising with our address will be correctly reported
|
|
|
- * by the controller.
|
|
|
- */
|
|
|
- if (hci_update_random_address(req, false, &own_addr_type))
|
|
|
- return;
|
|
|
-
|
|
|
- /* Adding or removing entries from the white list must
|
|
|
- * happen before enabling scanning. The controller does
|
|
|
- * not allow white list modification while scanning.
|
|
|
- */
|
|
|
- filter_policy = update_white_list(req);
|
|
|
-
|
|
|
- /* When the controller is using random resolvable addresses and
|
|
|
- * with that having LE privacy enabled, then controllers with
|
|
|
- * Extended Scanner Filter Policies support can now enable support
|
|
|
- * for handling directed advertising.
|
|
|
- *
|
|
|
- * So instead of using filter polices 0x00 (no whitelist)
|
|
|
- * and 0x01 (whitelist enabled) use the new filter policies
|
|
|
- * 0x02 (no whitelist) and 0x03 (whitelist enabled).
|
|
|
- */
|
|
|
- if (test_bit(HCI_PRIVACY, &hdev->dev_flags) &&
|
|
|
- (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
|
|
|
- filter_policy |= 0x02;
|
|
|
-
|
|
|
- memset(¶m_cp, 0, sizeof(param_cp));
|
|
|
- param_cp.type = LE_SCAN_PASSIVE;
|
|
|
- param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
|
|
|
- param_cp.window = cpu_to_le16(hdev->le_scan_window);
|
|
|
- param_cp.own_address_type = own_addr_type;
|
|
|
- param_cp.filter_policy = filter_policy;
|
|
|
- hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
|
|
|
- ¶m_cp);
|
|
|
-
|
|
|
- memset(&enable_cp, 0, sizeof(enable_cp));
|
|
|
- enable_cp.enable = LE_SCAN_ENABLE;
|
|
|
- enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
|
|
|
- hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
|
|
|
- &enable_cp);
|
|
|
-}
|
|
|
-
|
|
|
-static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
|
|
|
-{
|
|
|
- if (status)
|
|
|
- BT_DBG("HCI request failed to update background scanning: "
|
|
|
- "status 0x%2.2x", status);
|
|
|
-}
|
|
|
-
|
|
|
-/* This function controls the background scanning based on hdev->pend_le_conns
|
|
|
- * list. If there are pending LE connection we start the background scanning,
|
|
|
- * otherwise we stop it.
|
|
|
- *
|
|
|
- * This function requires the caller holds hdev->lock.
|
|
|
- */
|
|
|
-void hci_update_background_scan(struct hci_dev *hdev)
|
|
|
-{
|
|
|
- struct hci_request req;
|
|
|
- struct hci_conn *conn;
|
|
|
- int err;
|
|
|
-
|
|
|
- if (!test_bit(HCI_UP, &hdev->flags) ||
|
|
|
- test_bit(HCI_INIT, &hdev->flags) ||
|
|
|
- test_bit(HCI_SETUP, &hdev->dev_flags) ||
|
|
|
- test_bit(HCI_CONFIG, &hdev->dev_flags) ||
|
|
|
- test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
|
|
|
- test_bit(HCI_UNREGISTER, &hdev->dev_flags))
|
|
|
- return;
|
|
|
-
|
|
|
- /* No point in doing scanning if LE support hasn't been enabled */
|
|
|
- if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
|
|
|
- return;
|
|
|
-
|
|
|
- /* If discovery is active don't interfere with it */
|
|
|
- if (hdev->discovery.state != DISCOVERY_STOPPED)
|
|
|
- return;
|
|
|
-
|
|
|
- /* Reset RSSI and UUID filters when starting background scanning
|
|
|
- * since these filters are meant for service discovery only.
|
|
|
- *
|
|
|
- * The Start Discovery and Start Service Discovery operations
|
|
|
- * ensure to set proper values for RSSI threshold and UUID
|
|
|
- * filter list. So it is safe to just reset them here.
|
|
|
- */
|
|
|
- hci_discovery_filter_clear(hdev);
|
|
|
-
|
|
|
- hci_req_init(&req, hdev);
|
|
|
-
|
|
|
- if (list_empty(&hdev->pend_le_conns) &&
|
|
|
- list_empty(&hdev->pend_le_reports)) {
|
|
|
- /* If there is no pending LE connections or devices
|
|
|
- * to be scanned for, we should stop the background
|
|
|
- * scanning.
|
|
|
- */
|
|
|
-
|
|
|
- /* If controller is not scanning we are done. */
|
|
|
- if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
|
|
|
- return;
|
|
|
-
|
|
|
- hci_req_add_le_scan_disable(&req);
|
|
|
-
|
|
|
- BT_DBG("%s stopping background scanning", hdev->name);
|
|
|
- } else {
|
|
|
- /* If there is at least one pending LE connection, we should
|
|
|
- * keep the background scan running.
|
|
|
- */
|
|
|
-
|
|
|
- /* If controller is connecting, we should not start scanning
|
|
|
- * since some controllers are not able to scan and connect at
|
|
|
- * the same time.
|
|
|
- */
|
|
|
- conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
|
|
|
- if (conn)
|
|
|
- return;
|
|
|
-
|
|
|
- /* If controller is currently scanning, we stop it to ensure we
|
|
|
- * don't miss any advertising (due to duplicates filter).
|
|
|
- */
|
|
|
- if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
|
|
|
- hci_req_add_le_scan_disable(&req);
|
|
|
-
|
|
|
- hci_req_add_le_passive_scan(&req);
|
|
|
-
|
|
|
- BT_DBG("%s starting background scanning", hdev->name);
|
|
|
- }
|
|
|
-
|
|
|
- err = hci_req_run(&req, update_background_scan_complete);
|
|
|
- if (err)
|
|
|
- BT_ERR("Failed to run HCI request: err %d", err);
|
|
|
-}
|
|
|
-
|
|
|
-static bool disconnected_whitelist_entries(struct hci_dev *hdev)
|
|
|
-{
|
|
|
- struct bdaddr_list *b;
|
|
|
-
|
|
|
- list_for_each_entry(b, &hdev->whitelist, list) {
|
|
|
- struct hci_conn *conn;
|
|
|
-
|
|
|
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
|
|
|
- if (!conn)
|
|
|
- return true;
|
|
|
-
|
|
|
- if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
|
|
|
- return true;
|
|
|
- }
|
|
|
-
|
|
|
- return false;
|
|
|
-}
|
|
|
-
|
|
|
-void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
|
|
|
-{
|
|
|
- u8 scan;
|
|
|
-
|
|
|
- if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
|
|
|
- return;
|
|
|
-
|
|
|
- if (!hdev_is_powered(hdev))
|
|
|
- return;
|
|
|
-
|
|
|
- if (mgmt_powering_down(hdev))
|
|
|
- return;
|
|
|
-
|
|
|
- if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
|
|
|
- disconnected_whitelist_entries(hdev))
|
|
|
- scan = SCAN_PAGE;
|
|
|
- else
|
|
|
- scan = SCAN_DISABLED;
|
|
|
-
|
|
|
- if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
|
|
|
- return;
|
|
|
-
|
|
|
- if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
|
|
|
- scan |= SCAN_INQUIRY;
|
|
|
-
|
|
|
- if (req)
|
|
|
- hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
|
|
|
- else
|
|
|
- hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
|
|
|
-}
|