Bladeren bron

Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next

Johan Hedberg says:

====================
pull request: bluetooth-next 2015-09-18

Here's the first bluetooth-next pull request for the 4.4 kernel:

 - ieee802154 cleanups & fixes
 - debugfs support for the at86rf230 driver
 - Support for quirky (seemingly counterfeit) CSR Bluetooth controllers
 - Power management and device config improvements for Intel controllers
 - Fix for devices with incorrect advertising data length
 - Fix for closing HCI user channel socket

Please let me know if there are any issues pulling. Thanks.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 10 jaren geleden
bovenliggende
commit
5dcd246107

+ 4 - 4
Documentation/networking/ieee802154.txt

@@ -7,11 +7,11 @@ Introduction
 The IEEE 802.15.4 working group focuses on standardization of bottom
 two layers: Medium Access Control (MAC) and Physical (PHY). And there
 are mainly two options available for upper layers:
- - ZigBee - proprietary protocol from ZigBee Alliance
- - 6LowPAN - IPv6 networking over low rate personal area networks
+ - ZigBee - proprietary protocol from the ZigBee Alliance
+ - 6LoWPAN - IPv6 networking over low rate personal area networks
 
-The Linux-ZigBee project goal is to provide complete implementation
-of IEEE 802.15.4 and 6LoWPAN protocols. IEEE 802.15.4 is a stack
+The linux-wpan project goal is to provide a complete implementation
+of the IEEE 802.15.4 and 6LoWPAN protocols. IEEE 802.15.4 is a stack
 of protocols for organizing Low-Rate Wireless Personal Area Networks.
 
 The stack is composed of three main parts:

+ 8 - 4
drivers/bluetooth/bt3c_cs.c

@@ -453,7 +453,8 @@ static int bt3c_load_firmware(struct bt3c_info *info,
 {
 	char *ptr = (char *) firmware;
 	char b[9];
-	unsigned int iobase, size, addr, fcs, tmp;
+	unsigned int iobase, tmp;
+	unsigned long size, addr, fcs;
 	int i, err = 0;
 
 	iobase = info->p_dev->resource[0]->start;
@@ -478,15 +479,18 @@ static int bt3c_load_firmware(struct bt3c_info *info,
 
 		memset(b, 0, sizeof(b));
 		memcpy(b, ptr + 2, 2);
-		size = simple_strtoul(b, NULL, 16);
+		if (kstrtoul(b, 16, &size) < 0)
+			return -EINVAL;
 
 		memset(b, 0, sizeof(b));
 		memcpy(b, ptr + 4, 8);
-		addr = simple_strtoul(b, NULL, 16);
+		if (kstrtoul(b, 16, &addr) < 0)
+			return -EINVAL;
 
 		memset(b, 0, sizeof(b));
 		memcpy(b, ptr + (size * 2) + 2, 2);
-		fcs = simple_strtoul(b, NULL, 16);
+		if (kstrtoul(b, 16, &fcs) < 0)
+			return -EINVAL;
 
 		memset(b, 0, sizeof(b));
 		for (tmp = 0, i = 0; i < size; i++) {

+ 46 - 0
drivers/bluetooth/btintel.c

@@ -22,6 +22,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/firmware.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
@@ -169,6 +170,51 @@ int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
 }
 EXPORT_SYMBOL_GPL(btintel_secure_send);
 
+int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name)
+{
+	const struct firmware *fw;
+	struct sk_buff *skb;
+	const u8 *fw_ptr;
+	int err;
+
+	err = request_firmware_direct(&fw, ddc_name, &hdev->dev);
+	if (err < 0) {
+		bt_dev_err(hdev, "Failed to load Intel DDC file %s (%d)",
+			   ddc_name, err);
+		return err;
+	}
+
+	bt_dev_info(hdev, "Found Intel DDC parameters: %s", ddc_name);
+
+	fw_ptr = fw->data;
+
+	/* DDC file contains one or more DDC structure which has
+	 * Length (1 byte), DDC ID (2 bytes), and DDC value (Length - 2).
+	 */
+	while (fw->size > fw_ptr - fw->data) {
+		u8 cmd_plen = fw_ptr[0] + sizeof(u8);
+
+		skb = __hci_cmd_sync(hdev, 0xfc8b, cmd_plen, fw_ptr,
+				     HCI_INIT_TIMEOUT);
+		if (IS_ERR(skb)) {
+			bt_dev_err(hdev, "Failed to send Intel_Write_DDC (%ld)",
+				   PTR_ERR(skb));
+			release_firmware(fw);
+			return PTR_ERR(skb);
+		}
+
+		fw_ptr += cmd_plen;
+		kfree_skb(skb);
+	}
+
+	release_firmware(fw);
+
+	bt_dev_info(hdev, "Applying Intel DDC parameters completed");
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_load_ddc_config);
+
 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
 MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
 MODULE_VERSION(VERSION);

+ 9 - 1
drivers/bluetooth/btintel.h

@@ -78,6 +78,7 @@ void btintel_hw_error(struct hci_dev *hdev, u8 code);
 void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver);
 int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
 			const void *param);
+int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name);
 
 #else
 
@@ -95,7 +96,8 @@ static inline void btintel_hw_error(struct hci_dev *hdev, u8 code)
 {
 }
 
-static void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver)
+static inline void btintel_version_info(struct hci_dev *hdev,
+					struct intel_version *ver)
 {
 }
 
@@ -105,4 +107,10 @@ static inline int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type,
 	return -EOPNOTSUPP;
 }
 
+static inline int btintel_load_ddc_config(struct hci_dev *hdev,
+					  const char *ddc_name)
+{
+	return -EOPNOTSUPP;
+}
+
 #endif

+ 0 - 14
drivers/bluetooth/btmrvl_main.c

@@ -377,20 +377,6 @@ static int btmrvl_tx_pkt(struct btmrvl_private *priv, struct sk_buff *skb)
 		return -EINVAL;
 	}
 
-	if (skb_headroom(skb) < BTM_HEADER_LEN) {
-		struct sk_buff *tmp = skb;
-
-		skb = skb_realloc_headroom(skb, BTM_HEADER_LEN);
-		if (!skb) {
-			BT_ERR("Tx Error: realloc_headroom failed %d",
-				BTM_HEADER_LEN);
-			skb = tmp;
-			return -EINVAL;
-		}
-
-		kfree_skb(tmp);
-	}
-
 	skb_push(skb, BTM_HEADER_LEN);
 
 	/* header type: byte[3]

+ 19 - 32
drivers/bluetooth/btusb.c

@@ -1277,6 +1277,20 @@ static void btusb_work(struct work_struct *work)
 			clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
 			usb_kill_anchored_urbs(&data->isoc_anchor);
 
+			/* When isochronous alternate setting needs to be
+			 * changed, because SCO connection has been added
+			 * or removed, a packet fragment may be left in the
+			 * reassembling state. This could lead to wrongly
+			 * assembled fragments.
+			 *
+			 * Clear outstanding fragment when selecting a new
+			 * alternate setting.
+			 */
+			spin_lock(&data->rxlock);
+			kfree_skb(data->sco_skb);
+			data->sco_skb = NULL;
+			spin_unlock(&data->rxlock);
+
 			if (__set_isoc_interface(hdev, new_alts) < 0)
 				return;
 		}
@@ -1348,7 +1362,9 @@ static int btusb_setup_csr(struct hci_dev *hdev)
 
 	rp = (struct hci_rp_read_local_version *)skb->data;
 
-	if (le16_to_cpu(rp->manufacturer) != 10) {
+	/* Detect controllers which aren't real CSR ones. */
+	if (le16_to_cpu(rp->manufacturer) != 10 ||
+	    le16_to_cpu(rp->lmp_subver) == 0x0c5c) {
 		/* Clear the reset quirk since this is not an actual
 		 * early Bluetooth 1.1 device from CSR.
 		 */
@@ -2217,36 +2233,7 @@ done:
 	 * The device can work without DDC parameters, so even if it fails
 	 * to load the file, no need to fail the setup.
 	 */
-	err = request_firmware_direct(&fw, fwname, &hdev->dev);
-	if (err < 0)
-		return 0;
-
-	BT_INFO("%s: Found Intel DDC parameters: %s", hdev->name, fwname);
-
-	fw_ptr = fw->data;
-
-	/* DDC file contains one or more DDC structure which has
-	 * Length (1 byte), DDC ID (2 bytes), and DDC value (Length - 2).
-	 */
-	while (fw->size > fw_ptr - fw->data) {
-		u8 cmd_plen = fw_ptr[0] + sizeof(u8);
-
-		skb = __hci_cmd_sync(hdev, 0xfc8b, cmd_plen, fw_ptr,
-				     HCI_INIT_TIMEOUT);
-		if (IS_ERR(skb)) {
-			BT_ERR("%s: Failed to send Intel_Write_DDC (%ld)",
-			       hdev->name, PTR_ERR(skb));
-			release_firmware(fw);
-			return PTR_ERR(skb);
-		}
-
-		fw_ptr += cmd_plen;
-		kfree_skb(skb);
-	}
-
-	release_firmware(fw);
-
-	BT_INFO("%s: Applying Intel DDC parameters completed", hdev->name);
+	btintel_load_ddc_config(hdev, fwname);
 
 	return 0;
 }
@@ -2782,7 +2769,7 @@ static int btusb_probe(struct usb_interface *intf,
 			set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
 
 		/* Fake CSR devices with broken commands */
-		if (bcdDevice <= 0x100)
+		if (bcdDevice <= 0x100 || bcdDevice == 0x134)
 			hdev->setup = btusb_setup_csr;
 
 		set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);

+ 181 - 41
drivers/bluetooth/hci_bcm.c

@@ -31,6 +31,7 @@
 #include <linux/clk.h>
 #include <linux/gpio/consumer.h>
 #include <linux/tty.h>
+#include <linux/interrupt.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
@@ -51,6 +52,8 @@ struct bcm_device {
 	bool			clk_enabled;
 
 	u32			init_speed;
+	int			irq;
+	u8			irq_polarity;
 
 #ifdef CONFIG_PM_SLEEP
 	struct hci_uart		*hu;
@@ -66,7 +69,7 @@ struct bcm_data {
 };
 
 /* List of BCM BT UART devices */
-static DEFINE_SPINLOCK(bcm_device_lock);
+static DEFINE_MUTEX(bcm_device_lock);
 static LIST_HEAD(bcm_device_list);
 
 static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed)
@@ -80,7 +83,7 @@ static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed)
 
 		clock.type = BCM_UART_CLOCK_48MHZ;
 
-		BT_DBG("%s: Set Controller clock (%d)", hdev->name, clock.type);
+		bt_dev_dbg(hdev, "Set Controller clock (%d)", clock.type);
 
 		/* This Broadcom specific command changes the UART's controller
 		 * clock for baud rate > 3000000.
@@ -88,15 +91,15 @@ static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed)
 		skb = __hci_cmd_sync(hdev, 0xfc45, 1, &clock, HCI_INIT_TIMEOUT);
 		if (IS_ERR(skb)) {
 			int err = PTR_ERR(skb);
-			BT_ERR("%s: BCM: failed to write clock command (%d)",
-			       hdev->name, err);
+			bt_dev_err(hdev, "BCM: failed to write clock (%d)",
+				   err);
 			return err;
 		}
 
 		kfree_skb(skb);
 	}
 
-	BT_DBG("%s: Set Controller UART speed to %d bit/s", hdev->name, speed);
+	bt_dev_dbg(hdev, "Set Controller UART speed to %d bit/s", speed);
 
 	param.zero = cpu_to_le16(0);
 	param.baud_rate = cpu_to_le32(speed);
@@ -108,8 +111,8 @@ static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed)
 			     HCI_INIT_TIMEOUT);
 	if (IS_ERR(skb)) {
 		int err = PTR_ERR(skb);
-		BT_ERR("%s: BCM: failed to write update baudrate command (%d)",
-		       hdev->name, err);
+		bt_dev_err(hdev, "BCM: failed to write update baudrate (%d)",
+			   err);
 		return err;
 	}
 
@@ -149,12 +152,92 @@ static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static irqreturn_t bcm_host_wake(int irq, void *data)
+{
+	struct bcm_device *bdev = data;
+
+	bt_dev_dbg(bdev, "Host wake IRQ");
+
+	return IRQ_HANDLED;
+}
+
+static int bcm_request_irq(struct bcm_data *bcm)
+{
+	struct bcm_device *bdev = bcm->dev;
+	int err = 0;
+
+	/* If this is not a platform device, do not enable PM functionalities */
+	mutex_lock(&bcm_device_lock);
+	if (!bcm_device_exists(bdev)) {
+		err = -ENODEV;
+		goto unlock;
+	}
+
+	if (bdev->irq > 0) {
+		err = devm_request_irq(&bdev->pdev->dev, bdev->irq,
+				       bcm_host_wake, IRQF_TRIGGER_RISING,
+				       "host_wake", bdev);
+		if (err)
+			goto unlock;
+
+		device_init_wakeup(&bdev->pdev->dev, true);
+	}
+
+unlock:
+	mutex_unlock(&bcm_device_lock);
+
+	return err;
+}
+
+static const struct bcm_set_sleep_mode default_sleep_params = {
+	.sleep_mode = 1,	/* 0=Disabled, 1=UART, 2=Reserved, 3=USB */
+	.idle_host = 2,		/* idle threshold HOST, in 300ms */
+	.idle_dev = 2,		/* idle threshold device, in 300ms */
+	.bt_wake_active = 1,	/* BT_WAKE active mode: 1 = high, 0 = low */
+	.host_wake_active = 0,	/* HOST_WAKE active mode: 1 = high, 0 = low */
+	.allow_host_sleep = 1,	/* Allow host sleep in SCO flag */
+	.combine_modes = 0,	/* Combine sleep and LPM flag */
+	.tristate_control = 0,	/* Allow tri-state control of UART tx flag */
+	/* Irrelevant USB flags */
+	.usb_auto_sleep = 0,
+	.usb_resume_timeout = 0,
+	.pulsed_host_wake = 0,
+	.break_to_host = 0
+};
+
+static int bcm_setup_sleep(struct hci_uart *hu)
+{
+	struct bcm_data *bcm = hu->priv;
+	struct sk_buff *skb;
+	struct bcm_set_sleep_mode sleep_params = default_sleep_params;
+
+	sleep_params.host_wake_active = !bcm->dev->irq_polarity;
+
+	skb = __hci_cmd_sync(hu->hdev, 0xfc27, sizeof(sleep_params),
+			     &sleep_params, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		int err = PTR_ERR(skb);
+		bt_dev_err(hu->hdev, "Sleep VSC failed (%d)", err);
+		return err;
+	}
+	kfree_skb(skb);
+
+	bt_dev_dbg(hu->hdev, "Set Sleep Parameters VSC succeeded");
+
+	return 0;
+}
+#else
+static inline int bcm_request_irq(struct bcm_data *bcm) { return 0; }
+static inline int bcm_setup_sleep(struct hci_uart *hu) { return 0; }
+#endif
+
 static int bcm_open(struct hci_uart *hu)
 {
 	struct bcm_data *bcm;
 	struct list_head *p;
 
-	BT_DBG("hu %p", hu);
+	bt_dev_dbg(hu->hdev, "hu %p", hu);
 
 	bcm = kzalloc(sizeof(*bcm), GFP_KERNEL);
 	if (!bcm)
@@ -164,7 +247,7 @@ static int bcm_open(struct hci_uart *hu)
 
 	hu->priv = bcm;
 
-	spin_lock(&bcm_device_lock);
+	mutex_lock(&bcm_device_lock);
 	list_for_each(p, &bcm_device_list) {
 		struct bcm_device *dev = list_entry(p, struct bcm_device, list);
 
@@ -178,14 +261,12 @@ static int bcm_open(struct hci_uart *hu)
 #ifdef CONFIG_PM_SLEEP
 			dev->hu = hu;
 #endif
+			bcm_gpio_set_power(bcm->dev, true);
 			break;
 		}
 	}
 
-	if (bcm->dev)
-		bcm_gpio_set_power(bcm->dev, true);
-
-	spin_unlock(&bcm_device_lock);
+	mutex_unlock(&bcm_device_lock);
 
 	return 0;
 }
@@ -193,18 +274,24 @@ static int bcm_open(struct hci_uart *hu)
 static int bcm_close(struct hci_uart *hu)
 {
 	struct bcm_data *bcm = hu->priv;
+	struct bcm_device *bdev = bcm->dev;
 
-	BT_DBG("hu %p", hu);
+	bt_dev_dbg(hu->hdev, "hu %p", hu);
 
 	/* Protect bcm->dev against removal of the device or driver */
-	spin_lock(&bcm_device_lock);
-	if (bcm_device_exists(bcm->dev)) {
-		bcm_gpio_set_power(bcm->dev, false);
+	mutex_lock(&bcm_device_lock);
+	if (bcm_device_exists(bdev)) {
+		bcm_gpio_set_power(bdev, false);
 #ifdef CONFIG_PM_SLEEP
-		bcm->dev->hu = NULL;
+		if (device_can_wakeup(&bdev->pdev->dev)) {
+			devm_free_irq(&bdev->pdev->dev, bdev->irq, bdev);
+			device_init_wakeup(&bdev->pdev->dev, false);
+		}
+
+		bdev->hu = NULL;
 #endif
 	}
-	spin_unlock(&bcm_device_lock);
+	mutex_unlock(&bcm_device_lock);
 
 	skb_queue_purge(&bcm->txq);
 	kfree_skb(bcm->rx_skb);
@@ -218,7 +305,7 @@ static int bcm_flush(struct hci_uart *hu)
 {
 	struct bcm_data *bcm = hu->priv;
 
-	BT_DBG("hu %p", hu);
+	bt_dev_dbg(hu->hdev, "hu %p", hu);
 
 	skb_queue_purge(&bcm->txq);
 
@@ -227,12 +314,13 @@ static int bcm_flush(struct hci_uart *hu)
 
 static int bcm_setup(struct hci_uart *hu)
 {
+	struct bcm_data *bcm = hu->priv;
 	char fw_name[64];
 	const struct firmware *fw;
 	unsigned int speed;
 	int err;
 
-	BT_DBG("hu %p", hu);
+	bt_dev_dbg(hu->hdev, "hu %p", hu);
 
 	hu->hdev->set_bdaddr = btbcm_set_bdaddr;
 
@@ -242,13 +330,13 @@ static int bcm_setup(struct hci_uart *hu)
 
 	err = request_firmware(&fw, fw_name, &hu->hdev->dev);
 	if (err < 0) {
-		BT_INFO("%s: BCM: Patch %s not found", hu->hdev->name, fw_name);
+		bt_dev_info(hu->hdev, "BCM: Patch %s not found", fw_name);
 		return 0;
 	}
 
 	err = btbcm_patchram(hu->hdev, fw);
 	if (err) {
-		BT_INFO("%s: BCM: Patch failed (%d)", hu->hdev->name, err);
+		bt_dev_info(hu->hdev, "BCM: Patch failed (%d)", err);
 		goto finalize;
 	}
 
@@ -281,6 +369,12 @@ finalize:
 	release_firmware(fw);
 
 	err = btbcm_finalize(hu->hdev);
+	if (err)
+		return err;
+
+	err = bcm_request_irq(bcm);
+	if (!err)
+		err = bcm_setup_sleep(hu);
 
 	return err;
 }
@@ -302,7 +396,7 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
 				  bcm_recv_pkts, ARRAY_SIZE(bcm_recv_pkts));
 	if (IS_ERR(bcm->rx_skb)) {
 		int err = PTR_ERR(bcm->rx_skb);
-		BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+		bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
 		bcm->rx_skb = NULL;
 		return err;
 	}
@@ -314,7 +408,7 @@ static int bcm_enqueue(struct hci_uart *hu, struct sk_buff *skb)
 {
 	struct bcm_data *bcm = hu->priv;
 
-	BT_DBG("hu %p skb %p", hu, skb);
+	bt_dev_dbg(hu->hdev, "hu %p skb %p", hu, skb);
 
 	/* Prepend skb with frame type */
 	memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
@@ -335,10 +429,11 @@ static struct sk_buff *bcm_dequeue(struct hci_uart *hu)
 static int bcm_suspend(struct device *dev)
 {
 	struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
+	int error;
 
-	BT_DBG("suspend (%p): is_suspended %d", bdev, bdev->is_suspended);
+	bt_dev_dbg(bdev, "suspend: is_suspended %d", bdev->is_suspended);
 
-	spin_lock(&bcm_device_lock);
+	mutex_lock(&bcm_device_lock);
 
 	if (!bdev->hu)
 		goto unlock;
@@ -353,12 +448,18 @@ static int bcm_suspend(struct device *dev)
 	/* Suspend the device */
 	if (bdev->device_wakeup) {
 		gpiod_set_value(bdev->device_wakeup, false);
-		BT_DBG("suspend, delaying 15 ms");
+		bt_dev_dbg(bdev, "suspend, delaying 15 ms");
 		mdelay(15);
 	}
 
+	if (device_may_wakeup(&bdev->pdev->dev)) {
+		error = enable_irq_wake(bdev->irq);
+		if (!error)
+			bt_dev_dbg(bdev, "BCM irq: enabled");
+	}
+
 unlock:
-	spin_unlock(&bcm_device_lock);
+	mutex_unlock(&bcm_device_lock);
 
 	return 0;
 }
@@ -368,16 +469,21 @@ static int bcm_resume(struct device *dev)
 {
 	struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
 
-	BT_DBG("resume (%p): is_suspended %d", bdev, bdev->is_suspended);
+	bt_dev_dbg(bdev, "resume: is_suspended %d", bdev->is_suspended);
 
-	spin_lock(&bcm_device_lock);
+	mutex_lock(&bcm_device_lock);
 
 	if (!bdev->hu)
 		goto unlock;
 
+	if (device_may_wakeup(&bdev->pdev->dev)) {
+		disable_irq_wake(bdev->irq);
+		bt_dev_dbg(bdev, "BCM irq: disabled");
+	}
+
 	if (bdev->device_wakeup) {
 		gpiod_set_value(bdev->device_wakeup, true);
-		BT_DBG("resume, delaying 15 ms");
+		bt_dev_dbg(bdev, "resume, delaying 15 ms");
 		mdelay(15);
 	}
 
@@ -389,7 +495,7 @@ static int bcm_resume(struct device *dev)
 	}
 
 unlock:
-	spin_unlock(&bcm_device_lock);
+	mutex_unlock(&bcm_device_lock);
 
 	return 0;
 }
@@ -397,10 +503,12 @@ unlock:
 
 static const struct acpi_gpio_params device_wakeup_gpios = { 0, 0, false };
 static const struct acpi_gpio_params shutdown_gpios = { 1, 0, false };
+static const struct acpi_gpio_params host_wakeup_gpios = { 2, 0, false };
 
 static const struct acpi_gpio_mapping acpi_bcm_default_gpios[] = {
 	{ "device-wakeup-gpios", &device_wakeup_gpios, 1 },
 	{ "shutdown-gpios", &shutdown_gpios, 1 },
+	{ "host-wakeup-gpios", &host_wakeup_gpios, 1 },
 	{ },
 };
 
@@ -408,13 +516,30 @@ static const struct acpi_gpio_mapping acpi_bcm_default_gpios[] = {
 static int bcm_resource(struct acpi_resource *ares, void *data)
 {
 	struct bcm_device *dev = data;
-
-	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
-		struct acpi_resource_uart_serialbus *sb;
-
+	struct acpi_resource_extended_irq *irq;
+	struct acpi_resource_gpio *gpio;
+	struct acpi_resource_uart_serialbus *sb;
+
+	switch (ares->type) {
+	case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
+		irq = &ares->data.extended_irq;
+		dev->irq_polarity = irq->polarity;
+		break;
+
+	case ACPI_RESOURCE_TYPE_GPIO:
+		gpio = &ares->data.gpio;
+		if (gpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT)
+			dev->irq_polarity = gpio->polarity;
+		break;
+
+	case ACPI_RESOURCE_TYPE_SERIAL_BUS:
 		sb = &ares->data.uart_serial_bus;
 		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART)
 			dev->init_speed = sb->default_baud_rate;
+		break;
+
+	default:
+		break;
 	}
 
 	/* Always tell the ACPI core to skip this resource */
@@ -453,6 +578,21 @@ static int bcm_acpi_probe(struct bcm_device *dev)
 	if (IS_ERR(dev->shutdown))
 		return PTR_ERR(dev->shutdown);
 
+	/* IRQ can be declared in ACPI table as Interrupt or GpioInt */
+	dev->irq = platform_get_irq(pdev, 0);
+	if (dev->irq <= 0) {
+		struct gpio_desc *gpio;
+
+		gpio = devm_gpiod_get_optional(&pdev->dev, "host-wakeup",
+					       GPIOD_IN);
+		if (IS_ERR(gpio))
+			return PTR_ERR(gpio);
+
+		dev->irq = gpiod_to_irq(gpio);
+	}
+
+	dev_info(&pdev->dev, "BCM irq: %d\n", dev->irq);
+
 	/* Make sure at-least one of the GPIO is defined and that
 	 * a name is specified for this instance
 	 */
@@ -504,9 +644,9 @@ static int bcm_probe(struct platform_device *pdev)
 	dev_info(&pdev->dev, "%s device registered.\n", dev->name);
 
 	/* Place this instance on the device list */
-	spin_lock(&bcm_device_lock);
+	mutex_lock(&bcm_device_lock);
 	list_add_tail(&dev->list, &bcm_device_list);
-	spin_unlock(&bcm_device_lock);
+	mutex_unlock(&bcm_device_lock);
 
 	bcm_gpio_set_power(dev, false);
 
@@ -517,9 +657,9 @@ static int bcm_remove(struct platform_device *pdev)
 {
 	struct bcm_device *dev = platform_get_drvdata(pdev);
 
-	spin_lock(&bcm_device_lock);
+	mutex_lock(&bcm_device_lock);
 	list_del(&dev->list);
-	spin_unlock(&bcm_device_lock);
+	mutex_unlock(&bcm_device_lock);
 
 	acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev));
 

+ 522 - 68
drivers/bluetooth/hci_intel.c

@@ -31,6 +31,8 @@
 #include <linux/platform_device.h>
 #include <linux/gpio/consumer.h>
 #include <linux/acpi.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
@@ -43,19 +45,45 @@
 #define STATE_FIRMWARE_LOADED	2
 #define STATE_FIRMWARE_FAILED	3
 #define STATE_BOOTING		4
+#define STATE_LPM_ENABLED	5
+#define STATE_TX_ACTIVE		6
+#define STATE_SUSPENDED		7
+#define STATE_LPM_TRANSACTION	8
+
+#define HCI_LPM_WAKE_PKT 0xf0
+#define HCI_LPM_PKT 0xf1
+#define HCI_LPM_MAX_SIZE 10
+#define HCI_LPM_HDR_SIZE HCI_EVENT_HDR_SIZE
+
+#define LPM_OP_TX_NOTIFY 0x00
+#define LPM_OP_SUSPEND_ACK 0x02
+#define LPM_OP_RESUME_ACK 0x03
+
+#define LPM_SUSPEND_DELAY_MS 1000
+
+struct hci_lpm_pkt {
+	__u8 opcode;
+	__u8 dlen;
+	__u8 data[0];
+} __packed;
 
 struct intel_device {
 	struct list_head list;
 	struct platform_device *pdev;
 	struct gpio_desc *reset;
+	struct hci_uart *hu;
+	struct mutex hu_lock;
+	int irq;
 };
 
 static LIST_HEAD(intel_device_list);
-static DEFINE_SPINLOCK(intel_device_list_lock);
+static DEFINE_MUTEX(intel_device_list_lock);
 
 struct intel_data {
 	struct sk_buff *rx_skb;
 	struct sk_buff_head txq;
+	struct work_struct busy_work;
+	struct hci_uart *hu;
 	unsigned long flags;
 };
 
@@ -101,24 +129,185 @@ static int intel_wait_booting(struct hci_uart *hu)
 				  msecs_to_jiffies(1000));
 
 	if (err == 1) {
-		BT_ERR("%s: Device boot interrupted", hu->hdev->name);
+		bt_dev_err(hu->hdev, "Device boot interrupted");
 		return -EINTR;
 	}
 
 	if (err) {
-		BT_ERR("%s: Device boot timeout", hu->hdev->name);
+		bt_dev_err(hu->hdev, "Device boot timeout");
 		return -ETIMEDOUT;
 	}
 
 	return err;
 }
 
+#ifdef CONFIG_PM
+static int intel_wait_lpm_transaction(struct hci_uart *hu)
+{
+	struct intel_data *intel = hu->priv;
+	int err;
+
+	err = wait_on_bit_timeout(&intel->flags, STATE_LPM_TRANSACTION,
+				  TASK_INTERRUPTIBLE,
+				  msecs_to_jiffies(1000));
+
+	if (err == 1) {
+		bt_dev_err(hu->hdev, "LPM transaction interrupted");
+		return -EINTR;
+	}
+
+	if (err) {
+		bt_dev_err(hu->hdev, "LPM transaction timeout");
+		return -ETIMEDOUT;
+	}
+
+	return err;
+}
+
+static int intel_lpm_suspend(struct hci_uart *hu)
+{
+	static const u8 suspend[] = { 0x01, 0x01, 0x01 };
+	struct intel_data *intel = hu->priv;
+	struct sk_buff *skb;
+
+	if (!test_bit(STATE_LPM_ENABLED, &intel->flags) ||
+	    test_bit(STATE_SUSPENDED, &intel->flags))
+		return 0;
+
+	if (test_bit(STATE_TX_ACTIVE, &intel->flags))
+		return -EAGAIN;
+
+	bt_dev_dbg(hu->hdev, "Suspending");
+
+	skb = bt_skb_alloc(sizeof(suspend), GFP_KERNEL);
+	if (!skb) {
+		bt_dev_err(hu->hdev, "Failed to alloc memory for LPM packet");
+		return -ENOMEM;
+	}
+
+	memcpy(skb_put(skb, sizeof(suspend)), suspend, sizeof(suspend));
+	bt_cb(skb)->pkt_type = HCI_LPM_PKT;
+
+	set_bit(STATE_LPM_TRANSACTION, &intel->flags);
+
+	/* LPM flow is a priority, enqueue packet at list head */
+	skb_queue_head(&intel->txq, skb);
+	hci_uart_tx_wakeup(hu);
+
+	intel_wait_lpm_transaction(hu);
+	/* Even in case of failure, continue and test the suspended flag */
+
+	clear_bit(STATE_LPM_TRANSACTION, &intel->flags);
+
+	if (!test_bit(STATE_SUSPENDED, &intel->flags)) {
+		bt_dev_err(hu->hdev, "Device suspend error");
+		return -EINVAL;
+	}
+
+	bt_dev_dbg(hu->hdev, "Suspended");
+
+	hci_uart_set_flow_control(hu, true);
+
+	return 0;
+}
+
+static int intel_lpm_resume(struct hci_uart *hu)
+{
+	struct intel_data *intel = hu->priv;
+	struct sk_buff *skb;
+
+	if (!test_bit(STATE_LPM_ENABLED, &intel->flags) ||
+	    !test_bit(STATE_SUSPENDED, &intel->flags))
+		return 0;
+
+	bt_dev_dbg(hu->hdev, "Resuming");
+
+	hci_uart_set_flow_control(hu, false);
+
+	skb = bt_skb_alloc(0, GFP_KERNEL);
+	if (!skb) {
+		bt_dev_err(hu->hdev, "Failed to alloc memory for LPM packet");
+		return -ENOMEM;
+	}
+
+	bt_cb(skb)->pkt_type = HCI_LPM_WAKE_PKT;
+
+	set_bit(STATE_LPM_TRANSACTION, &intel->flags);
+
+	/* LPM flow is a priority, enqueue packet at list head */
+	skb_queue_head(&intel->txq, skb);
+	hci_uart_tx_wakeup(hu);
+
+	intel_wait_lpm_transaction(hu);
+	/* Even in case of failure, continue and test the suspended flag */
+
+	clear_bit(STATE_LPM_TRANSACTION, &intel->flags);
+
+	if (test_bit(STATE_SUSPENDED, &intel->flags)) {
+		bt_dev_err(hu->hdev, "Device resume error");
+		return -EINVAL;
+	}
+
+	bt_dev_dbg(hu->hdev, "Resumed");
+
+	return 0;
+}
+#endif /* CONFIG_PM */
+
+static int intel_lpm_host_wake(struct hci_uart *hu)
+{
+	static const u8 lpm_resume_ack[] = { LPM_OP_RESUME_ACK, 0x00 };
+	struct intel_data *intel = hu->priv;
+	struct sk_buff *skb;
+
+	hci_uart_set_flow_control(hu, false);
+
+	clear_bit(STATE_SUSPENDED, &intel->flags);
+
+	skb = bt_skb_alloc(sizeof(lpm_resume_ack), GFP_KERNEL);
+	if (!skb) {
+		bt_dev_err(hu->hdev, "Failed to alloc memory for LPM packet");
+		return -ENOMEM;
+	}
+
+	memcpy(skb_put(skb, sizeof(lpm_resume_ack)), lpm_resume_ack,
+	       sizeof(lpm_resume_ack));
+	bt_cb(skb)->pkt_type = HCI_LPM_PKT;
+
+	/* LPM flow is a priority, enqueue packet at list head */
+	skb_queue_head(&intel->txq, skb);
+	hci_uart_tx_wakeup(hu);
+
+	bt_dev_dbg(hu->hdev, "Resumed by controller");
+
+	return 0;
+}
+
+static irqreturn_t intel_irq(int irq, void *dev_id)
+{
+	struct intel_device *idev = dev_id;
+
+	dev_info(&idev->pdev->dev, "hci_intel irq\n");
+
+	mutex_lock(&idev->hu_lock);
+	if (idev->hu)
+		intel_lpm_host_wake(idev->hu);
+	mutex_unlock(&idev->hu_lock);
+
+	/* Host/Controller are now LPM resumed, trigger a new delayed suspend */
+	pm_runtime_get(&idev->pdev->dev);
+	pm_runtime_mark_last_busy(&idev->pdev->dev);
+	pm_runtime_put_autosuspend(&idev->pdev->dev);
+
+	return IRQ_HANDLED;
+}
+
 static int intel_set_power(struct hci_uart *hu, bool powered)
 {
 	struct list_head *p;
 	int err = -ENODEV;
 
-	spin_lock(&intel_device_list_lock);
+	mutex_lock(&intel_device_list_lock);
 
 	list_for_each(p, &intel_device_list) {
 		struct intel_device *idev = list_entry(p, struct intel_device,
@@ -139,13 +328,73 @@ static int intel_set_power(struct hci_uart *hu, bool powered)
 			hu, dev_name(&idev->pdev->dev), powered);
 
 		gpiod_set_value(idev->reset, powered);
+
+		/* Provide to idev a hu reference which is used to run LPM
+		 * transactions (lpm suspend/resume) from PM callbacks.
+		 * hu needs to be protected against concurrent removing during
+		 * these PM ops.
+		 */
+		mutex_lock(&idev->hu_lock);
+		idev->hu = powered ? hu : NULL;
+		mutex_unlock(&idev->hu_lock);
+
+		if (idev->irq < 0)
+			break;
+
+		if (powered && device_can_wakeup(&idev->pdev->dev)) {
+			err = devm_request_threaded_irq(&idev->pdev->dev,
+							idev->irq, NULL,
+							intel_irq,
+							IRQF_ONESHOT,
+							"bt-host-wake", idev);
+			if (err) {
+				BT_ERR("hu %p, unable to allocate irq-%d",
+				       hu, idev->irq);
+				break;
+			}
+
+			device_wakeup_enable(&idev->pdev->dev);
+
+			pm_runtime_set_active(&idev->pdev->dev);
+			pm_runtime_use_autosuspend(&idev->pdev->dev);
+			pm_runtime_set_autosuspend_delay(&idev->pdev->dev,
+							 LPM_SUSPEND_DELAY_MS);
+			pm_runtime_enable(&idev->pdev->dev);
+		} else if (!powered && device_may_wakeup(&idev->pdev->dev)) {
+			devm_free_irq(&idev->pdev->dev, idev->irq, idev);
+			device_wakeup_disable(&idev->pdev->dev);
+
+			pm_runtime_disable(&idev->pdev->dev);
+		}
 	}
 
-	spin_unlock(&intel_device_list_lock);
+	mutex_unlock(&intel_device_list_lock);
 
 	return err;
 }
 
+static void intel_busy_work(struct work_struct *work)
+{
+	struct list_head *p;
+	struct intel_data *intel = container_of(work, struct intel_data,
+						busy_work);
+
+	/* Link is busy, delay the suspend */
+	mutex_lock(&intel_device_list_lock);
+	list_for_each(p, &intel_device_list) {
+		struct intel_device *idev = list_entry(p, struct intel_device,
+						       list);
+
+		if (intel->hu->tty->dev->parent == idev->pdev->dev.parent) {
+			pm_runtime_get(&idev->pdev->dev);
+			pm_runtime_mark_last_busy(&idev->pdev->dev);
+			pm_runtime_put_autosuspend(&idev->pdev->dev);
+			break;
+		}
+	}
+	mutex_unlock(&intel_device_list_lock);
+}
+
 static int intel_open(struct hci_uart *hu)
 {
 	struct intel_data *intel;
@@ -157,6 +406,9 @@ static int intel_open(struct hci_uart *hu)
 		return -ENOMEM;
 
 	skb_queue_head_init(&intel->txq);
+	INIT_WORK(&intel->busy_work, intel_busy_work);
+
+	intel->hu = hu;
 
 	hu->priv = intel;
 
@@ -172,6 +424,8 @@ static int intel_close(struct hci_uart *hu)
 
 	BT_DBG("hu %p", hu);
 
+	cancel_work_sync(&intel->busy_work);
+
 	intel_set_power(hu, false);
 
 	skb_queue_purge(&intel->txq);
@@ -237,11 +491,11 @@ static int intel_set_baudrate(struct hci_uart *hu, unsigned int speed)
 	if (err && err != ETIMEDOUT)
 		return err;
 
-	BT_INFO("%s: Change controller speed to %d", hdev->name, speed);
+	bt_dev_info(hdev, "Change controller speed to %d", speed);
 
 	speed_cmd[3] = intel_convert_speed(speed);
 	if (speed_cmd[3] == 0xff) {
-		BT_ERR("%s: Unsupported speed", hdev->name);
+		bt_dev_err(hdev, "Unsupported speed");
 		return -EINVAL;
 	}
 
@@ -250,16 +504,15 @@ static int intel_set_baudrate(struct hci_uart *hu, unsigned int speed)
 	 */
 	skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT);
 	if (IS_ERR(skb)) {
-		BT_ERR("%s: Reading Intel version information failed (%ld)",
-		       hdev->name, PTR_ERR(skb));
+		bt_dev_err(hdev, "Reading Intel version information failed (%ld)",
+			   PTR_ERR(skb));
 		return PTR_ERR(skb);
 	}
 	kfree_skb(skb);
 
 	skb = bt_skb_alloc(sizeof(speed_cmd), GFP_KERNEL);
 	if (!skb) {
-		BT_ERR("%s: Failed to allocate memory for baudrate packet",
-		       hdev->name);
+		bt_dev_err(hdev, "Failed to alloc memory for baudrate packet");
 		return -ENOMEM;
 	}
 
@@ -284,11 +537,14 @@ static int intel_setup(struct hci_uart *hu)
 {
 	static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
 					  0x00, 0x08, 0x04, 0x00 };
+	static const u8 lpm_param[] = { 0x03, 0x07, 0x01, 0x0b };
 	struct intel_data *intel = hu->priv;
+	struct intel_device *idev = NULL;
 	struct hci_dev *hdev = hu->hdev;
 	struct sk_buff *skb;
 	struct intel_version *ver;
 	struct intel_boot_params *params;
+	struct list_head *p;
 	const struct firmware *fw;
 	const u8 *fw_ptr;
 	char fwname[64];
@@ -299,7 +555,7 @@ static int intel_setup(struct hci_uart *hu)
 	int speed_change = 0;
 	int err;
 
-	BT_DBG("%s", hdev->name);
+	bt_dev_dbg(hdev, "start intel_setup");
 
 	hu->hdev->set_bdaddr = btintel_set_bdaddr;
 
@@ -335,21 +591,21 @@ static int intel_setup(struct hci_uart *hu)
 	 */
 	skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT);
 	if (IS_ERR(skb)) {
-		BT_ERR("%s: Reading Intel version information failed (%ld)",
-		       hdev->name, PTR_ERR(skb));
+		bt_dev_err(hdev, "Reading Intel version information failed (%ld)",
+			   PTR_ERR(skb));
 		return PTR_ERR(skb);
 	}
 
 	if (skb->len != sizeof(*ver)) {
-		BT_ERR("%s: Intel version event size mismatch", hdev->name);
+		bt_dev_err(hdev, "Intel version event size mismatch");
 		kfree_skb(skb);
 		return -EILSEQ;
 	}
 
 	ver = (struct intel_version *)skb->data;
 	if (ver->status) {
-		BT_ERR("%s: Intel version command failure (%02x)",
-		       hdev->name, ver->status);
+		bt_dev_err(hdev, "Intel version command failure (%02x)",
+			   ver->status);
 		err = -bt_to_errno(ver->status);
 		kfree_skb(skb);
 		return err;
@@ -359,8 +615,8 @@ static int intel_setup(struct hci_uart *hu)
 	 * for now only accept this single value.
 	 */
 	if (ver->hw_platform != 0x37) {
-		BT_ERR("%s: Unsupported Intel hardware platform (%u)",
-		       hdev->name, ver->hw_platform);
+		bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)",
+			   ver->hw_platform);
 		kfree_skb(skb);
 		return -EINVAL;
 	}
@@ -371,8 +627,8 @@ static int intel_setup(struct hci_uart *hu)
 	 * when newer hardware variants come along.
 	 */
 	if (ver->hw_variant != 0x0b) {
-		BT_ERR("%s: Unsupported Intel hardware variant (%u)",
-		       hdev->name, ver->hw_variant);
+		bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
+			   ver->hw_variant);
 		kfree_skb(skb);
 		return -EINVAL;
 	}
@@ -403,8 +659,8 @@ static int intel_setup(struct hci_uart *hu)
 	 * choice is to return an error and abort the device initialization.
 	 */
 	if (ver->fw_variant != 0x06) {
-		BT_ERR("%s: Unsupported Intel firmware variant (%u)",
-		       hdev->name, ver->fw_variant);
+		bt_dev_err(hdev, "Unsupported Intel firmware variant (%u)",
+			   ver->fw_variant);
 		kfree_skb(skb);
 		return -ENODEV;
 	}
@@ -416,33 +672,33 @@ static int intel_setup(struct hci_uart *hu)
 	 */
 	skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT);
 	if (IS_ERR(skb)) {
-		BT_ERR("%s: Reading Intel boot parameters failed (%ld)",
-		       hdev->name, PTR_ERR(skb));
+		bt_dev_err(hdev, "Reading Intel boot parameters failed (%ld)",
+			   PTR_ERR(skb));
 		return PTR_ERR(skb);
 	}
 
 	if (skb->len != sizeof(*params)) {
-		BT_ERR("%s: Intel boot parameters size mismatch", hdev->name);
+		bt_dev_err(hdev, "Intel boot parameters size mismatch");
 		kfree_skb(skb);
 		return -EILSEQ;
 	}
 
 	params = (struct intel_boot_params *)skb->data;
 	if (params->status) {
-		BT_ERR("%s: Intel boot parameters command failure (%02x)",
-		       hdev->name, params->status);
+		bt_dev_err(hdev, "Intel boot parameters command failure (%02x)",
+			   params->status);
 		err = -bt_to_errno(params->status);
 		kfree_skb(skb);
 		return err;
 	}
 
-	BT_INFO("%s: Device revision is %u", hdev->name,
-		le16_to_cpu(params->dev_revid));
+	bt_dev_info(hdev, "Device revision is %u",
+		    le16_to_cpu(params->dev_revid));
 
-	BT_INFO("%s: Secure boot is %s", hdev->name,
-		params->secure_boot ? "enabled" : "disabled");
+	bt_dev_info(hdev, "Secure boot is %s",
+		    params->secure_boot ? "enabled" : "disabled");
 
-	BT_INFO("%s: Minimum firmware build %u week %u %u", hdev->name,
+	bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
 		params->min_fw_build_nn, params->min_fw_build_cw,
 		2000 + params->min_fw_build_yy);
 
@@ -451,8 +707,8 @@ static int intel_setup(struct hci_uart *hu)
 	 * that this bootloader does not send them, then abort the setup.
 	 */
 	if (params->limited_cce != 0x00) {
-		BT_ERR("%s: Unsupported Intel firmware loading method (%u)",
-		       hdev->name, params->limited_cce);
+		bt_dev_err(hdev, "Unsupported Intel firmware loading method (%u)",
+			   params->limited_cce);
 		kfree_skb(skb);
 		return -EINVAL;
 	}
@@ -461,7 +717,7 @@ static int intel_setup(struct hci_uart *hu)
 	 * also be no valid address for the operational firmware.
 	 */
 	if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
-		BT_INFO("%s: No device address configured", hdev->name);
+		bt_dev_info(hdev, "No device address configured");
 		set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
 	}
 
@@ -476,19 +732,23 @@ static int intel_setup(struct hci_uart *hu)
 
 	err = request_firmware(&fw, fwname, &hdev->dev);
 	if (err < 0) {
-		BT_ERR("%s: Failed to load Intel firmware file (%d)",
-		       hdev->name, err);
+		bt_dev_err(hdev, "Failed to load Intel firmware file (%d)",
+			   err);
 		kfree_skb(skb);
 		return err;
 	}
 
-	BT_INFO("%s: Found device firmware: %s", hdev->name, fwname);
+	bt_dev_info(hdev, "Found device firmware: %s", fwname);
+
+	/* Save the DDC file name for later */
+	snprintf(fwname, sizeof(fwname), "intel/ibt-11-%u.ddc",
+		 le16_to_cpu(params->dev_revid));
 
 	kfree_skb(skb);
 
 	if (fw->size < 644) {
-		BT_ERR("%s: Invalid size of firmware file (%zu)",
-		       hdev->name, fw->size);
+		bt_dev_err(hdev, "Invalid size of firmware file (%zu)",
+			   fw->size);
 		err = -EBADF;
 		goto done;
 	}
@@ -500,8 +760,7 @@ static int intel_setup(struct hci_uart *hu)
 	 */
 	err = btintel_secure_send(hdev, 0x00, 128, fw->data);
 	if (err < 0) {
-		BT_ERR("%s: Failed to send firmware header (%d)",
-		       hdev->name, err);
+		bt_dev_err(hdev, "Failed to send firmware header (%d)", err);
 		goto done;
 	}
 
@@ -510,8 +769,8 @@ static int intel_setup(struct hci_uart *hu)
 	 */
 	err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
 	if (err < 0) {
-		BT_ERR("%s: Failed to send firmware public key (%d)",
-		       hdev->name, err);
+		bt_dev_err(hdev, "Failed to send firmware public key (%d)",
+			   err);
 		goto done;
 	}
 
@@ -520,8 +779,8 @@ static int intel_setup(struct hci_uart *hu)
 	 */
 	err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
 	if (err < 0) {
-		BT_ERR("%s: Failed to send firmware signature (%d)",
-		       hdev->name, err);
+		bt_dev_err(hdev, "Failed to send firmware signature (%d)",
+			   err);
 		goto done;
 	}
 
@@ -533,8 +792,8 @@ static int intel_setup(struct hci_uart *hu)
 
 		frag_len += sizeof(*cmd) + cmd->plen;
 
-		BT_DBG("%s: patching %td/%zu", hdev->name,
-		       (fw_ptr - fw->data), fw->size);
+		bt_dev_dbg(hdev, "Patching %td/%zu", (fw_ptr - fw->data),
+			   fw->size);
 
 		/* The parameter length of the secure send command requires
 		 * a 4 byte alignment. It happens so that the firmware file
@@ -552,8 +811,8 @@ static int intel_setup(struct hci_uart *hu)
 		 */
 		err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
 		if (err < 0) {
-			BT_ERR("%s: Failed to send firmware data (%d)",
-			       hdev->name, err);
+			bt_dev_err(hdev, "Failed to send firmware data (%d)",
+				   err);
 			goto done;
 		}
 
@@ -563,7 +822,7 @@ static int intel_setup(struct hci_uart *hu)
 
 	set_bit(STATE_FIRMWARE_LOADED, &intel->flags);
 
-	BT_INFO("%s: Waiting for firmware download to complete", hdev->name);
+	bt_dev_info(hdev, "Waiting for firmware download to complete");
 
 	/* Before switching the device into operational mode and with that
 	 * booting the loaded firmware, wait for the bootloader notification
@@ -580,19 +839,19 @@ static int intel_setup(struct hci_uart *hu)
 				  TASK_INTERRUPTIBLE,
 				  msecs_to_jiffies(5000));
 	if (err == 1) {
-		BT_ERR("%s: Firmware loading interrupted", hdev->name);
+		bt_dev_err(hdev, "Firmware loading interrupted");
 		err = -EINTR;
 		goto done;
 	}
 
 	if (err) {
-		BT_ERR("%s: Firmware loading timeout", hdev->name);
+		bt_dev_err(hdev, "Firmware loading timeout");
 		err = -ETIMEDOUT;
 		goto done;
 	}
 
 	if (test_bit(STATE_FIRMWARE_FAILED, &intel->flags)) {
-		BT_ERR("%s: Firmware loading failed", hdev->name);
+		bt_dev_err(hdev, "Firmware loading failed");
 		err = -ENOEXEC;
 		goto done;
 	}
@@ -601,7 +860,7 @@ static int intel_setup(struct hci_uart *hu)
 	delta = ktime_sub(rettime, calltime);
 	duration = (unsigned long long) ktime_to_ns(delta) >> 10;
 
-	BT_INFO("%s: Firmware loaded in %llu usecs", hdev->name, duration);
+	bt_dev_info(hdev, "Firmware loaded in %llu usecs", duration);
 
 done:
 	release_firmware(fw);
@@ -634,7 +893,7 @@ done:
 	 * 1 second. However if that happens, then just fail the setup
 	 * since something went wrong.
 	 */
-	BT_INFO("%s: Waiting for device to boot", hdev->name);
+	bt_dev_info(hdev, "Waiting for device to boot");
 
 	err = intel_wait_booting(hu);
 	if (err)
@@ -646,7 +905,39 @@ done:
 	delta = ktime_sub(rettime, calltime);
 	duration = (unsigned long long) ktime_to_ns(delta) >> 10;
 
-	BT_INFO("%s: Device booted in %llu usecs", hdev->name, duration);
+	bt_dev_info(hdev, "Device booted in %llu usecs", duration);
+
+	/* Enable LPM if matching pdev with wakeup enabled */
+	mutex_lock(&intel_device_list_lock);
+	list_for_each(p, &intel_device_list) {
+		struct intel_device *dev = list_entry(p, struct intel_device,
+						      list);
+		if (hu->tty->dev->parent == dev->pdev->dev.parent) {
+			if (device_may_wakeup(&dev->pdev->dev))
+				idev = dev;
+			break;
+		}
+	}
+	mutex_unlock(&intel_device_list_lock);
+
+	if (!idev)
+		goto no_lpm;
+
+	bt_dev_info(hdev, "Enabling LPM");
+
+	skb = __hci_cmd_sync(hdev, 0xfc8b, sizeof(lpm_param), lpm_param,
+			     HCI_CMD_TIMEOUT);
+	if (IS_ERR(skb)) {
+		bt_dev_err(hdev, "Failed to enable LPM");
+		goto no_lpm;
+	}
+	kfree_skb(skb);
+
+	set_bit(STATE_LPM_ENABLED, &intel->flags);
+
+no_lpm:
+	/* Ignore errors, device can work without DDC parameters */
+	btintel_load_ddc_config(hdev, fwname);
 
 	skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_CMD_TIMEOUT);
 	if (IS_ERR(skb))
@@ -659,7 +950,7 @@ done:
 			return err;
 	}
 
-	BT_INFO("%s: Setup complete", hdev->name);
+	bt_dev_info(hdev, "Setup complete");
 
 	clear_bit(STATE_BOOTLOADER, &intel->flags);
 
@@ -708,10 +999,71 @@ recv:
 	return hci_recv_frame(hdev, skb);
 }
 
+static void intel_recv_lpm_notify(struct hci_dev *hdev, int value)
+{
+	struct hci_uart *hu = hci_get_drvdata(hdev);
+	struct intel_data *intel = hu->priv;
+
+	bt_dev_dbg(hdev, "TX idle notification (%d)", value);
+
+	if (value) {
+		set_bit(STATE_TX_ACTIVE, &intel->flags);
+		schedule_work(&intel->busy_work);
+	} else {
+		clear_bit(STATE_TX_ACTIVE, &intel->flags);
+	}
+}
+
+static int intel_recv_lpm(struct hci_dev *hdev, struct sk_buff *skb)
+{
+	struct hci_lpm_pkt *lpm = (void *)skb->data;
+	struct hci_uart *hu = hci_get_drvdata(hdev);
+	struct intel_data *intel = hu->priv;
+
+	switch (lpm->opcode) {
+	case LPM_OP_TX_NOTIFY:
+		if (lpm->dlen < 1) {
+			bt_dev_err(hu->hdev, "Invalid LPM notification packet");
+			break;
+		}
+		intel_recv_lpm_notify(hdev, lpm->data[0]);
+		break;
+	case LPM_OP_SUSPEND_ACK:
+		set_bit(STATE_SUSPENDED, &intel->flags);
+		if (test_and_clear_bit(STATE_LPM_TRANSACTION, &intel->flags)) {
+			smp_mb__after_atomic();
+			wake_up_bit(&intel->flags, STATE_LPM_TRANSACTION);
+		}
+		break;
+	case LPM_OP_RESUME_ACK:
+		clear_bit(STATE_SUSPENDED, &intel->flags);
+		if (test_and_clear_bit(STATE_LPM_TRANSACTION, &intel->flags)) {
+			smp_mb__after_atomic();
+			wake_up_bit(&intel->flags, STATE_LPM_TRANSACTION);
+		}
+		break;
+	default:
+		bt_dev_err(hdev, "Unknown LPM opcode (%02x)", lpm->opcode);
+		break;
+	}
+
+	kfree_skb(skb);
+
+	return 0;
+}
+
+#define INTEL_RECV_LPM \
+	.type = HCI_LPM_PKT, \
+	.hlen = HCI_LPM_HDR_SIZE, \
+	.loff = 1, \
+	.lsize = 1, \
+	.maxlen = HCI_LPM_MAX_SIZE
+
 static const struct h4_recv_pkt intel_recv_pkts[] = {
-	{ H4_RECV_ACL,   .recv = hci_recv_frame },
-	{ H4_RECV_SCO,   .recv = hci_recv_frame },
-	{ H4_RECV_EVENT, .recv = intel_recv_event },
+	{ H4_RECV_ACL,    .recv = hci_recv_frame   },
+	{ H4_RECV_SCO,    .recv = hci_recv_frame   },
+	{ H4_RECV_EVENT,  .recv = intel_recv_event },
+	{ INTEL_RECV_LPM, .recv = intel_recv_lpm   },
 };
 
 static int intel_recv(struct hci_uart *hu, const void *data, int count)
@@ -726,7 +1078,7 @@ static int intel_recv(struct hci_uart *hu, const void *data, int count)
 				    ARRAY_SIZE(intel_recv_pkts));
 	if (IS_ERR(intel->rx_skb)) {
 		int err = PTR_ERR(intel->rx_skb);
-		BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+		bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
 		intel->rx_skb = NULL;
 		return err;
 	}
@@ -737,9 +1089,27 @@ static int intel_recv(struct hci_uart *hu, const void *data, int count)
 static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
 {
 	struct intel_data *intel = hu->priv;
+	struct list_head *p;
 
 	BT_DBG("hu %p skb %p", hu, skb);
 
+	/* Be sure our controller is resumed and potential LPM transaction
+	 * completed before enqueuing any packet.
+	 */
+	mutex_lock(&intel_device_list_lock);
+	list_for_each(p, &intel_device_list) {
+		struct intel_device *idev = list_entry(p, struct intel_device,
+						       list);
+
+		if (hu->tty->dev->parent == idev->pdev->dev.parent) {
+			pm_runtime_get_sync(&idev->pdev->dev);
+			pm_runtime_mark_last_busy(&idev->pdev->dev);
+			pm_runtime_put_autosuspend(&idev->pdev->dev);
+			break;
+		}
+	}
+	mutex_unlock(&intel_device_list_lock);
+
 	skb_queue_tail(&intel->txq, skb);
 
 	return 0;
@@ -813,6 +1183,59 @@ static int intel_acpi_probe(struct intel_device *idev)
 }
 #endif
 
+#ifdef CONFIG_PM
+static int intel_suspend_device(struct device *dev)
+{
+	struct intel_device *idev = dev_get_drvdata(dev);
+
+	mutex_lock(&idev->hu_lock);
+	if (idev->hu)
+		intel_lpm_suspend(idev->hu);
+	mutex_unlock(&idev->hu_lock);
+
+	return 0;
+}
+
+static int intel_resume_device(struct device *dev)
+{
+	struct intel_device *idev = dev_get_drvdata(dev);
+
+	mutex_lock(&idev->hu_lock);
+	if (idev->hu)
+		intel_lpm_resume(idev->hu);
+	mutex_unlock(&idev->hu_lock);
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int intel_suspend(struct device *dev)
+{
+	struct intel_device *idev = dev_get_drvdata(dev);
+
+	if (device_may_wakeup(dev))
+		enable_irq_wake(idev->irq);
+
+	return intel_suspend_device(dev);
+}
+
+static int intel_resume(struct device *dev)
+{
+	struct intel_device *idev = dev_get_drvdata(dev);
+
+	if (device_may_wakeup(dev))
+		disable_irq_wake(idev->irq);
+
+	return intel_resume_device(dev);
+}
+#endif
+
+static const struct dev_pm_ops intel_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(intel_suspend, intel_resume)
+	SET_RUNTIME_PM_OPS(intel_suspend_device, intel_resume_device, NULL)
+};
+
 static int intel_probe(struct platform_device *pdev)
 {
 	struct intel_device *idev;
@@ -821,6 +1244,8 @@ static int intel_probe(struct platform_device *pdev)
 	if (!idev)
 		return -ENOMEM;
 
+	mutex_init(&idev->hu_lock);
+
 	idev->pdev = pdev;
 
 	if (ACPI_HANDLE(&pdev->dev)) {
@@ -838,14 +1263,40 @@ static int intel_probe(struct platform_device *pdev)
 		return PTR_ERR(idev->reset);
 	}
 
+	idev->irq = platform_get_irq(pdev, 0);
+	if (idev->irq < 0) {
+		struct gpio_desc *host_wake;
+
+		dev_err(&pdev->dev, "No IRQ, falling back to gpio-irq\n");
+
+		host_wake = devm_gpiod_get_optional(&pdev->dev, "host-wake",
+						    GPIOD_IN);
+		if (IS_ERR(host_wake)) {
+			dev_err(&pdev->dev, "Unable to retrieve IRQ\n");
+			goto no_irq;
+		}
+
+		idev->irq = gpiod_to_irq(host_wake);
+		if (idev->irq < 0) {
+			dev_err(&pdev->dev, "No corresponding irq for gpio\n");
+			goto no_irq;
+		}
+	}
+
+	/* Only enable wake-up/irq when controller is powered */
+	device_set_wakeup_capable(&pdev->dev, true);
+	device_wakeup_disable(&pdev->dev);
+
+no_irq:
 	platform_set_drvdata(pdev, idev);
 
 	/* Place this instance on the device list */
-	spin_lock(&intel_device_list_lock);
+	mutex_lock(&intel_device_list_lock);
 	list_add_tail(&idev->list, &intel_device_list);
-	spin_unlock(&intel_device_list_lock);
+	mutex_unlock(&intel_device_list_lock);
 
-	dev_info(&pdev->dev, "registered.\n");
+	dev_info(&pdev->dev, "registered, gpio(%d)/irq(%d).\n",
+		 desc_to_gpio(idev->reset), idev->irq);
 
 	return 0;
 }
@@ -854,9 +1305,11 @@ static int intel_remove(struct platform_device *pdev)
 {
 	struct intel_device *idev = platform_get_drvdata(pdev);
 
-	spin_lock(&intel_device_list_lock);
+	device_wakeup_disable(&pdev->dev);
+
+	mutex_lock(&intel_device_list_lock);
 	list_del(&idev->list);
-	spin_unlock(&intel_device_list_lock);
+	mutex_unlock(&intel_device_list_lock);
 
 	dev_info(&pdev->dev, "unregistered.\n");
 
@@ -869,6 +1322,7 @@ static struct platform_driver intel_driver = {
 	.driver = {
 		.name = "hci_intel",
 		.acpi_match_table = ACPI_PTR(intel_acpi_match),
+		.pm = &intel_pm_ops,
 	},
 };
 

+ 6 - 6
drivers/bluetooth/hci_qca.c

@@ -41,13 +41,13 @@
 #define HCI_IBS_SLEEP_IND	0xFE
 #define HCI_IBS_WAKE_IND	0xFD
 #define HCI_IBS_WAKE_ACK	0xFC
-#define HCI_MAX_IBS_SIZE 	10
+#define HCI_MAX_IBS_SIZE	10
 
 /* Controller states */
 #define STATE_IN_BAND_SLEEP_ENABLED	1
 
-#define IBS_WAKE_RETRANS_TIMEOUT_MS 	100
-#define IBS_TX_IDLE_TIMEOUT_MS 		2000
+#define IBS_WAKE_RETRANS_TIMEOUT_MS	100
+#define IBS_TX_IDLE_TIMEOUT_MS		2000
 #define BAUDRATE_SETTLE_TIMEOUT_MS	300
 
 /* HCI_IBS transmit side sleep protocol states */
@@ -181,8 +181,8 @@ static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
 		else
 			__serial_clock_off(hu->tty);
 
-		BT_DBG("Vote serial clock %s(%s)", new_vote? "true" : "false",
-		       vote? "true" : "false");
+		BT_DBG("Vote serial clock %s(%s)", new_vote ? "true" : "false",
+		       vote ? "true" : "false");
 
 		diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
 
@@ -821,7 +821,7 @@ static struct sk_buff *qca_dequeue(struct hci_uart *hu)
 
 static uint8_t qca_get_baudrate_value(int speed)
 {
-	switch(speed) {
+	switch (speed) {
 	case 9600:
 		return QCA_BAUDRATE_9600;
 	case 19200:

+ 7 - 0
drivers/net/ieee802154/Kconfig

@@ -32,6 +32,13 @@ config IEEE802154_AT86RF230
 	  This driver can also be built as a module. To do so, say M here.
 	  the module will be called 'at86rf230'.
 
+config IEEE802154_AT86RF230_DEBUGFS
+	depends on IEEE802154_AT86RF230
+	bool "AT86RF230 debugfs interface"
+	depends on DEBUG_FS
+	---help---
+	  This option compiles debugfs code for the at86rf230 driver.
+
 config IEEE802154_MRF24J40
 	tristate "Microchip MRF24J40 transceiver driver"
 	depends on IEEE802154_DRIVERS && MAC802154

+ 148 - 47
drivers/net/ieee802154/at86rf230.c

@@ -31,6 +31,7 @@
 #include <linux/skbuff.h>
 #include <linux/of_gpio.h>
 #include <linux/ieee802154.h>
+#include <linux/debugfs.h>
 
 #include <net/mac802154.h>
 #include <net/cfg802154.h>
@@ -83,6 +84,15 @@ struct at86rf230_state_change {
 	bool irq_enable;
 };
 
+struct at86rf230_trac {
+	u64 success;
+	u64 success_data_pending;
+	u64 success_wait_for_ack;
+	u64 channel_access_failure;
+	u64 no_ack;
+	u64 invalid;
+};
+
 struct at86rf230_local {
 	struct spi_device *spi;
 
@@ -103,6 +113,8 @@ struct at86rf230_local {
 	u8 tx_retry;
 	struct sk_buff *tx_skb;
 	struct at86rf230_state_change tx;
+
+	struct at86rf230_trac trac;
 };
 
 #define AT86RF2XX_NUMREGS 0x3F
@@ -377,14 +389,6 @@ at86rf230_async_read_reg(struct at86rf230_local *lp, const u8 reg,
 	}
 }
 
-static inline u8 at86rf230_state_to_force(u8 state)
-{
-	if (state == STATE_TX_ON)
-		return STATE_FORCE_TX_ON;
-	else
-		return STATE_FORCE_TRX_OFF;
-}
-
 static void
 at86rf230_async_state_assert(void *context)
 {
@@ -426,7 +430,7 @@ at86rf230_async_state_assert(void *context)
 				u8 state = ctx->to_state;
 
 				if (lp->tx_retry >= AT86RF2XX_MAX_TX_RETRIES)
-					state = at86rf230_state_to_force(state);
+					state = STATE_FORCE_TRX_OFF;
 				lp->tx_retry++;
 
 				at86rf230_async_state_change(lp, ctx, state,
@@ -667,28 +671,34 @@ at86rf230_tx_trac_check(void *context)
 {
 	struct at86rf230_state_change *ctx = context;
 	struct at86rf230_local *lp = ctx->lp;
-	const u8 *buf = ctx->buf;
-	const u8 trac = (buf[1] & 0xe0) >> 5;
 
-	/* If trac status is different than zero we need to do a state change
-	 * to STATE_FORCE_TRX_OFF then STATE_RX_AACK_ON to recover the
-	 * transceiver.
-	 */
-	if (trac)
-		at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF,
-					     at86rf230_tx_on, true);
-	else
-		at86rf230_tx_on(context);
-}
+	if (IS_ENABLED(CONFIG_IEEE802154_AT86RF230_DEBUGFS)) {
+		u8 trac = TRAC_MASK(ctx->buf[1]);
 
-static void
-at86rf230_tx_trac_status(void *context)
-{
-	struct at86rf230_state_change *ctx = context;
-	struct at86rf230_local *lp = ctx->lp;
+		switch (trac) {
+		case TRAC_SUCCESS:
+			lp->trac.success++;
+			break;
+		case TRAC_SUCCESS_DATA_PENDING:
+			lp->trac.success_data_pending++;
+			break;
+		case TRAC_CHANNEL_ACCESS_FAILURE:
+			lp->trac.channel_access_failure++;
+			break;
+		case TRAC_NO_ACK:
+			lp->trac.no_ack++;
+			break;
+		case TRAC_INVALID:
+			lp->trac.invalid++;
+			break;
+		default:
+			WARN_ONCE(1, "received tx trac status %d\n", trac);
+			break;
+		}
+	}
 
-	at86rf230_async_read_reg(lp, RG_TRX_STATE, ctx,
-				 at86rf230_tx_trac_check, true);
+	at86rf230_async_state_change(lp, &lp->irq, STATE_TX_ON,
+				     at86rf230_tx_on, true);
 }
 
 static void
@@ -723,13 +733,32 @@ at86rf230_rx_read_frame_complete(void *context)
 }
 
 static void
-at86rf230_rx_read_frame(void *context)
+at86rf230_rx_trac_check(void *context)
 {
 	struct at86rf230_state_change *ctx = context;
 	struct at86rf230_local *lp = ctx->lp;
 	u8 *buf = ctx->buf;
 	int rc;
 
+	if (IS_ENABLED(CONFIG_IEEE802154_AT86RF230_DEBUGFS)) {
+		u8 trac = TRAC_MASK(buf[1]);
+
+		switch (trac) {
+		case TRAC_SUCCESS:
+			lp->trac.success++;
+			break;
+		case TRAC_SUCCESS_WAIT_FOR_ACK:
+			lp->trac.success_wait_for_ack++;
+			break;
+		case TRAC_INVALID:
+			lp->trac.invalid++;
+			break;
+		default:
+			WARN_ONCE(1, "received rx trac status %d\n", trac);
+			break;
+		}
+	}
+
 	buf[0] = CMD_FB;
 	ctx->trx.len = AT86RF2XX_MAX_BUF;
 	ctx->msg.complete = at86rf230_rx_read_frame_complete;
@@ -741,27 +770,13 @@ at86rf230_rx_read_frame(void *context)
 	}
 }
 
-static void
-at86rf230_rx_trac_check(void *context)
-{
-	/* Possible check on trac status here. This could be useful to make
-	 * some stats why receive is failed. Not used at the moment, but it's
-	 * maybe timing relevant. Datasheet doesn't say anything about this.
-	 * The programming guide say do it so.
-	 */
-
-	at86rf230_rx_read_frame(context);
-}
-
 static void
 at86rf230_irq_trx_end(struct at86rf230_local *lp)
 {
 	if (lp->is_tx) {
 		lp->is_tx = 0;
-		at86rf230_async_state_change(lp, &lp->irq,
-					     STATE_FORCE_TX_ON,
-					     at86rf230_tx_trac_status,
-					     true);
+		at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq,
+					 at86rf230_tx_trac_check, true);
 	} else {
 		at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq,
 					 at86rf230_rx_trac_check, true);
@@ -920,6 +935,10 @@ at86rf230_start(struct ieee802154_hw *hw)
 {
 	struct at86rf230_local *lp = hw->priv;
 
+	/* reset trac stats on start */
+	if (IS_ENABLED(CONFIG_IEEE802154_AT86RF230_DEBUGFS))
+		memset(&lp->trac, 0, sizeof(struct at86rf230_trac));
+
 	at86rf230_awake(lp);
 	enable_irq(lp->spi->irq);
 
@@ -1357,7 +1376,7 @@ static int at86rf230_hw_init(struct at86rf230_local *lp, u8 xtal_trim)
 	if (irq_type == IRQ_TYPE_EDGE_RISING ||
 	    irq_type == IRQ_TYPE_EDGE_FALLING)
 		dev_warn(&lp->spi->dev,
-			 "Using edge triggered irq's are not recommended!\n");
+			 "Using edge triggered irq's are not recommended, because it can cause races and result in a non-functional driver!\n");
 	if (irq_type == IRQ_TYPE_EDGE_FALLING ||
 	    irq_type == IRQ_TYPE_LEVEL_LOW)
 		irq_pol = IRQ_ACTIVE_LOW;
@@ -1620,6 +1639,81 @@ at86rf230_setup_spi_messages(struct at86rf230_local *lp)
 	lp->tx.timer.function = at86rf230_async_state_timer;
 }
 
+#ifdef CONFIG_IEEE802154_AT86RF230_DEBUGFS
+static struct dentry *at86rf230_debugfs_root;
+
+static int at86rf230_stats_show(struct seq_file *file, void *offset)
+{
+	struct at86rf230_local *lp = file->private;
+	int ret;
+
+	ret = seq_printf(file, "SUCCESS:\t\t%8llu\n", lp->trac.success);
+	if (ret < 0)
+		return ret;
+
+	ret = seq_printf(file, "SUCCESS_DATA_PENDING:\t%8llu\n",
+			 lp->trac.success_data_pending);
+	if (ret < 0)
+		return ret;
+
+	ret = seq_printf(file, "SUCCESS_WAIT_FOR_ACK:\t%8llu\n",
+			 lp->trac.success_wait_for_ack);
+	if (ret < 0)
+		return ret;
+
+	ret = seq_printf(file, "CHANNEL_ACCESS_FAILURE:\t%8llu\n",
+			 lp->trac.channel_access_failure);
+	if (ret < 0)
+		return ret;
+
+	ret = seq_printf(file, "NO_ACK:\t\t\t%8llu\n", lp->trac.no_ack);
+	if (ret < 0)
+		return ret;
+
+	return seq_printf(file, "INVALID:\t\t%8llu\n", lp->trac.invalid);
+}
+
+static int at86rf230_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, at86rf230_stats_show, inode->i_private);
+}
+
+static const struct file_operations at86rf230_stats_fops = {
+	.open		= at86rf230_stats_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int at86rf230_debugfs_init(struct at86rf230_local *lp)
+{
+	char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "at86rf230-";
+	struct dentry *stats;
+
+	strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN);
+
+	at86rf230_debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
+	if (!at86rf230_debugfs_root)
+		return -ENOMEM;
+
+	stats = debugfs_create_file("trac_stats", S_IRUGO,
+				    at86rf230_debugfs_root, lp,
+				    &at86rf230_stats_fops);
+	if (!stats)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void at86rf230_debugfs_remove(void)
+{
+	debugfs_remove_recursive(at86rf230_debugfs_root);
+}
+#else
+static int at86rf230_debugfs_init(struct at86rf230_local *lp) { return 0; }
+static void at86rf230_debugfs_remove(void) { }
+#endif
+
 static int at86rf230_probe(struct spi_device *spi)
 {
 	struct ieee802154_hw *hw;
@@ -1715,12 +1809,18 @@ static int at86rf230_probe(struct spi_device *spi)
 	/* going into sleep by default */
 	at86rf230_sleep(lp);
 
-	rc = ieee802154_register_hw(lp->hw);
+	rc = at86rf230_debugfs_init(lp);
 	if (rc)
 		goto free_dev;
 
+	rc = ieee802154_register_hw(lp->hw);
+	if (rc)
+		goto free_debugfs;
+
 	return rc;
 
+free_debugfs:
+	at86rf230_debugfs_remove();
 free_dev:
 	ieee802154_free_hw(lp->hw);
 
@@ -1735,6 +1835,7 @@ static int at86rf230_remove(struct spi_device *spi)
 	at86rf230_write_subreg(lp, SR_IRQ_MASK, 0);
 	ieee802154_unregister_hw(lp->hw);
 	ieee802154_free_hw(lp->hw);
+	at86rf230_debugfs_remove();
 	dev_dbg(&spi->dev, "unregistered at86rf230\n");
 
 	return 0;

+ 8 - 0
drivers/net/ieee802154/at86rf230.h

@@ -216,5 +216,13 @@
 #define STATE_TRANSITION_IN_PROGRESS 0x1F
 
 #define TRX_STATE_MASK		(0x1F)
+#define TRAC_MASK(x)		((x & 0xe0) >> 5)
+
+#define TRAC_SUCCESS			0
+#define TRAC_SUCCESS_DATA_PENDING	1
+#define TRAC_SUCCESS_WAIT_FOR_ACK	2
+#define TRAC_CHANNEL_ACCESS_FAILURE	3
+#define TRAC_NO_ACK			5
+#define TRAC_INVALID			7
 
 #endif /* !_AT86RF230_H */

+ 11 - 2
drivers/net/ieee802154/atusb.c

@@ -559,6 +559,7 @@ static int atusb_get_and_show_chip(struct atusb *atusb)
 {
 	struct usb_device *usb_dev = atusb->usb_dev;
 	uint8_t man_id_0, man_id_1, part_num, version_num;
+	const char *chip;
 
 	man_id_0 = atusb_read_reg(atusb, RG_MAN_ID_0);
 	man_id_1 = atusb_read_reg(atusb, RG_MAN_ID_1);
@@ -574,14 +575,22 @@ static int atusb_get_and_show_chip(struct atusb *atusb)
 			man_id_1, man_id_0);
 		goto fail;
 	}
-	if (part_num != 3 && part_num != 2) {
+
+	switch (part_num) {
+	case 2:
+		chip = "AT86RF230";
+		break;
+	case 3:
+		chip = "AT86RF231";
+		break;
+	default:
 		dev_err(&usb_dev->dev,
 			"unexpected transceiver, part 0x%02x version 0x%02x\n",
 			part_num, version_num);
 		goto fail;
 	}
 
-	dev_info(&usb_dev->dev, "ATUSB: AT86RF231 version %d\n", version_num);
+	dev_info(&usb_dev->dev, "ATUSB: %s version %d\n", chip, version_num);
 
 	return 0;
 

+ 25 - 0
include/linux/ieee802154.h

@@ -205,6 +205,31 @@ enum {
 	IEEE802154_SCAN_IN_PROGRESS = 0xfc,
 };
 
+/* frame control handling */
+#define IEEE802154_FCTL_FTYPE		0x0003
+#define IEEE802154_FCTL_INTRA_PAN	0x0040
+
+#define IEEE802154_FTYPE_DATA		0x0001
+
+/*
+ * ieee802154_is_data - check if type is IEEE802154_FTYPE_DATA
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee802154_is_data(__le16 fc)
+{
+	return (fc & cpu_to_le16(IEEE802154_FCTL_FTYPE)) ==
+		cpu_to_le16(IEEE802154_FTYPE_DATA);
+}
+
+/**
+ * ieee802154_is_intra_pan - check if intra pan id communication
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee802154_is_intra_pan(__le16 fc)
+{
+	return fc & cpu_to_le16(IEEE802154_FCTL_INTRA_PAN);
+}
+
 /**
  * ieee802154_is_valid_psdu_len - check if psdu len is valid
  * available lengths:

+ 25 - 119
include/net/6lowpan.h

@@ -126,13 +126,19 @@
 	 (((a)[6]) == 0xFF) &&	\
 	 (((a)[7]) == 0xFF))
 
-#define LOWPAN_DISPATCH_IPV6	0x41 /* 01000001 = 65 */
-#define LOWPAN_DISPATCH_HC1	0x42 /* 01000010 = 66 */
-#define LOWPAN_DISPATCH_IPHC	0x60 /* 011xxxxx = ... */
-#define LOWPAN_DISPATCH_FRAG1	0xc0 /* 11000xxx */
-#define LOWPAN_DISPATCH_FRAGN	0xe0 /* 11100xxx */
+#define LOWPAN_DISPATCH_IPV6		0x41 /* 01000001 = 65 */
+#define LOWPAN_DISPATCH_IPHC		0x60 /* 011xxxxx = ... */
+#define LOWPAN_DISPATCH_IPHC_MASK	0xe0
 
-#define LOWPAN_DISPATCH_MASK	0xf8 /* 11111000 */
+static inline bool lowpan_is_ipv6(u8 dispatch)
+{
+	return dispatch == LOWPAN_DISPATCH_IPV6;
+}
+
+static inline bool lowpan_is_iphc(u8 dispatch)
+{
+	return (dispatch & LOWPAN_DISPATCH_IPHC_MASK) == LOWPAN_DISPATCH_IPHC;
+}
 
 #define LOWPAN_FRAG_TIMEOUT	(HZ * 60)	/* time-out 60 sec */
 
@@ -218,6 +224,19 @@ struct lowpan_priv *lowpan_priv(const struct net_device *dev)
 	return netdev_priv(dev);
 }
 
+struct lowpan_802154_cb {
+	u16 d_tag;
+	unsigned int d_size;
+	u8 d_offset;
+};
+
+static inline
+struct lowpan_802154_cb *lowpan_802154_cb(const struct sk_buff *skb)
+{
+	BUILD_BUG_ON(sizeof(struct lowpan_802154_cb) > sizeof(skb->cb));
+	return (struct lowpan_802154_cb *)skb->cb;
+}
+
 #ifdef DEBUG
 /* print data in line */
 static inline void raw_dump_inline(const char *caller, char *msg,
@@ -280,119 +299,6 @@ static inline void lowpan_push_hc_data(u8 **hc_ptr, const void *data,
 	*hc_ptr += len;
 }
 
-static inline u8 lowpan_addr_mode_size(const u8 addr_mode)
-{
-	static const u8 addr_sizes[] = {
-		[LOWPAN_IPHC_ADDR_00] = 16,
-		[LOWPAN_IPHC_ADDR_01] = 8,
-		[LOWPAN_IPHC_ADDR_02] = 2,
-		[LOWPAN_IPHC_ADDR_03] = 0,
-	};
-	return addr_sizes[addr_mode];
-}
-
-static inline u8 lowpan_next_hdr_size(const u8 h_enc, u16 *uncomp_header)
-{
-	u8 ret = 1;
-
-	if ((h_enc & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
-		*uncomp_header += sizeof(struct udphdr);
-
-		switch (h_enc & LOWPAN_NHC_UDP_CS_P_11) {
-		case LOWPAN_NHC_UDP_CS_P_00:
-			ret += 4;
-			break;
-		case LOWPAN_NHC_UDP_CS_P_01:
-		case LOWPAN_NHC_UDP_CS_P_10:
-			ret += 3;
-			break;
-		case LOWPAN_NHC_UDP_CS_P_11:
-			ret++;
-			break;
-		default:
-			break;
-		}
-
-		if (!(h_enc & LOWPAN_NHC_UDP_CS_C))
-			ret += 2;
-	}
-
-	return ret;
-}
-
-/**
- *	lowpan_uncompress_size - returns skb->len size with uncompressed header
- *	@skb: sk_buff with 6lowpan header inside
- *	@datagram_offset: optional to get the datagram_offset value
- *
- *	Returns the skb->len with uncompressed header
- */
-static inline u16
-lowpan_uncompress_size(const struct sk_buff *skb, u16 *dgram_offset)
-{
-	u16 ret = 2, uncomp_header = sizeof(struct ipv6hdr);
-	u8 iphc0, iphc1, h_enc;
-
-	iphc0 = skb_network_header(skb)[0];
-	iphc1 = skb_network_header(skb)[1];
-
-	switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) {
-	case 0:
-		ret += 4;
-		break;
-	case 1:
-		ret += 3;
-		break;
-	case 2:
-		ret++;
-		break;
-	default:
-		break;
-	}
-
-	if (!(iphc0 & LOWPAN_IPHC_NH_C))
-		ret++;
-
-	if (!(iphc0 & 0x03))
-		ret++;
-
-	ret += lowpan_addr_mode_size((iphc1 & LOWPAN_IPHC_SAM) >>
-				     LOWPAN_IPHC_SAM_BIT);
-
-	if (iphc1 & LOWPAN_IPHC_M) {
-		switch ((iphc1 & LOWPAN_IPHC_DAM_11) >>
-			LOWPAN_IPHC_DAM_BIT) {
-		case LOWPAN_IPHC_DAM_00:
-			ret += 16;
-			break;
-		case LOWPAN_IPHC_DAM_01:
-			ret += 6;
-			break;
-		case LOWPAN_IPHC_DAM_10:
-			ret += 4;
-			break;
-		case LOWPAN_IPHC_DAM_11:
-			ret++;
-			break;
-		default:
-			break;
-		}
-	} else {
-		ret += lowpan_addr_mode_size((iphc1 & LOWPAN_IPHC_DAM_11) >>
-					     LOWPAN_IPHC_DAM_BIT);
-	}
-
-	if (iphc0 & LOWPAN_IPHC_NH_C) {
-		h_enc = skb_network_header(skb)[ret];
-		ret += lowpan_next_hdr_size(h_enc, &uncomp_header);
-	}
-
-	if (dgram_offset)
-		*dgram_offset = uncomp_header;
-
-	return skb->len + uncomp_header - ret;
-}
-
 void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype);
 
 int

+ 1 - 1
include/net/af_ieee802154.h

@@ -1,5 +1,5 @@
 /*
- * IEEE 802.15.4 inteface for userspace
+ * IEEE 802.15.4 interface for userspace
  *
  * Copyright 2007, 2008 Siemens AG
  *

+ 11 - 0
include/net/bluetooth/bluetooth.h

@@ -123,11 +123,22 @@ __printf(1, 2)
 void bt_info(const char *fmt, ...);
 __printf(1, 2)
 void bt_err(const char *fmt, ...);
+__printf(1, 2)
+void bt_err_ratelimited(const char *fmt, ...);
 
 #define BT_INFO(fmt, ...)	bt_info(fmt "\n", ##__VA_ARGS__)
 #define BT_ERR(fmt, ...)	bt_err(fmt "\n", ##__VA_ARGS__)
 #define BT_DBG(fmt, ...)	pr_debug(fmt "\n", ##__VA_ARGS__)
 
+#define BT_ERR_RATELIMITED(fmt, ...) bt_err_ratelimited(fmt "\n", ##__VA_ARGS__)
+
+#define bt_dev_info(hdev, fmt, ...)				\
+	BT_INFO("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+#define bt_dev_err(hdev, fmt, ...)				\
+	BT_ERR("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+#define bt_dev_dbg(hdev, fmt, ...)				\
+	BT_DBG("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+
 /* Connection and socket states */
 enum {
 	BT_CONNECTED = 1, /* Equal to TCP_ESTABLISHED to make net code happy */

+ 1 - 0
include/net/bluetooth/hci_core.h

@@ -987,6 +987,7 @@ int hci_resume_dev(struct hci_dev *hdev);
 int hci_reset_dev(struct hci_dev *hdev);
 int hci_dev_open(__u16 dev);
 int hci_dev_close(__u16 dev);
+int hci_dev_do_close(struct hci_dev *hdev);
 int hci_dev_reset(__u16 dev);
 int hci_dev_reset_stat(__u16 dev);
 int hci_dev_cmd(unsigned int cmd, void __user *arg);

+ 15 - 0
include/net/mac802154.h

@@ -249,6 +249,21 @@ struct ieee802154_ops {
 						const bool on);
 };
 
+/**
+ * ieee802154_get_fc_from_skb - get the frame control field from an skb
+ * @skb: skb where the frame control field will be get from
+ */
+static inline __le16 ieee802154_get_fc_from_skb(const struct sk_buff *skb)
+{
+	/* return some invalid fc on failure */
+	if (unlikely(skb->mac_len < 2)) {
+		WARN_ON(1);
+		return cpu_to_le16(0);
+	}
+
+	return (__force __le16)__get_unaligned_memmove16(skb_mac_header(skb));
+}
+
 /**
  * ieee802154_be64_to_le64 - copies and convert be64 to le64
  * @le64_dst: le64 destination pointer

+ 12 - 1
net/6lowpan/iphc.c

@@ -366,7 +366,18 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
 			return err;
 	}
 
-	hdr.payload_len = htons(skb->len);
+	switch (lowpan_priv(dev)->lltype) {
+	case LOWPAN_LLTYPE_IEEE802154:
+		if (lowpan_802154_cb(skb)->d_size)
+			hdr.payload_len = htons(lowpan_802154_cb(skb)->d_size -
+						sizeof(struct ipv6hdr));
+		else
+			hdr.payload_len = htons(skb->len);
+		break;
+	default:
+		hdr.payload_len = htons(skb->len);
+		break;
+	}
 
 	pr_debug("skb headroom size = %d, data length = %d\n",
 		 skb_headroom(skb), skb->len);

+ 12 - 1
net/6lowpan/nhc_udp.c

@@ -71,7 +71,18 @@ static int udp_uncompress(struct sk_buff *skb, size_t needed)
 	 * here, we obtain the hint from the remaining size of the
 	 * frame
 	 */
-	uh.len = htons(skb->len + sizeof(struct udphdr));
+	switch (lowpan_priv(skb->dev)->lltype) {
+	case LOWPAN_LLTYPE_IEEE802154:
+		if (lowpan_802154_cb(skb)->d_size)
+			uh.len = htons(lowpan_802154_cb(skb)->d_size -
+				       sizeof(struct ipv6hdr));
+		else
+			uh.len = htons(skb->len + sizeof(struct udphdr));
+		break;
+	default:
+		uh.len = htons(skb->len + sizeof(struct udphdr));
+		break;
+	}
 	pr_debug("uncompressed UDP length: src = %d", ntohs(uh.len));
 
 	/* replace the compressed UDP head by the uncompressed UDP

+ 3 - 2
net/bluetooth/hci_core.c

@@ -693,7 +693,8 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
 
 	hci_setup_event_mask(req);
 
-	if (hdev->commands[6] & 0x20) {
+	if (hdev->commands[6] & 0x20 &&
+	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
 		struct hci_cp_read_stored_link_key cp;
 
 		bacpy(&cp.bdaddr, BDADDR_ANY);
@@ -1548,7 +1549,7 @@ static void hci_pend_le_actions_clear(struct hci_dev *hdev)
 	BT_DBG("All LE pending actions cleared");
 }
 
-static int hci_dev_do_close(struct hci_dev *hdev)
+int hci_dev_do_close(struct hci_dev *hdev)
 {
 	BT_DBG("%s %p", hdev->name, hdev);
 

+ 21 - 0
net/bluetooth/hci_event.c

@@ -4719,6 +4719,27 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
 	struct hci_conn *conn;
 	bool match;
 	u32 flags;
+	u8 *ptr, real_len;
+
+	/* Find the end of the data in case the report contains padded zero
+	 * bytes at the end causing an invalid length value.
+	 *
+	 * When data is NULL, len is 0 so there is no need for extra ptr
+	 * check as 'ptr < data + 0' is already false in such case.
+	 */
+	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
+		if (ptr + 1 + *ptr > data + len)
+			break;
+	}
+
+	real_len = ptr - data;
+
+	/* Adjust for actual length */
+	if (len != real_len) {
+		BT_ERR_RATELIMITED("%s advertising data length corrected",
+				   hdev->name);
+		len = real_len;
+	}
 
 	/* If the direct address is present, then this report is from
 	 * a LE Direct Advertising Report event. In that case it is

+ 10 - 1
net/bluetooth/hci_sock.c

@@ -503,7 +503,16 @@ static int hci_sock_release(struct socket *sock)
 
 	if (hdev) {
 		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
-			hci_dev_close(hdev->id);
+			/* When releasing an user channel exclusive access,
+			 * call hci_dev_do_close directly instead of calling
+			 * hci_dev_close to ensure the exclusive access will
+			 * be released and the controller brought back down.
+			 *
+			 * The checking of HCI_AUTO_OFF is not needed in this
+			 * case since it will have been cleared already when
+			 * opening the user channel.
+			 */
+			hci_dev_do_close(hdev);
 			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
 			mgmt_index_added(hdev);
 		}

+ 16 - 0
net/bluetooth/lib.c

@@ -166,3 +166,19 @@ void bt_err(const char *format, ...)
 	va_end(args);
 }
 EXPORT_SYMBOL(bt_err);
+
+void bt_err_ratelimited(const char *format, ...)
+{
+	struct va_format vaf;
+	va_list args;
+
+	va_start(args, format);
+
+	vaf.fmt = format;
+	vaf.va = &args;
+
+	pr_err_ratelimited("%pV", &vaf);
+
+	va_end(args);
+}
+EXPORT_SYMBOL(bt_err_ratelimited);

+ 1 - 1
net/bluetooth/smp.c

@@ -495,7 +495,7 @@ static int smp_ah(struct crypto_blkcipher *tfm, const u8 irk[16],
 	}
 
 	/* The output of the random address function ah is:
-	 *	ah(h, r) = e(k, r') mod 2^24
+	 *	ah(k, r) = e(k, r') mod 2^24
 	 * The output of the security function e is then truncated to 24 bits
 	 * by taking the least significant 24 bits of the output of e as the
 	 * result of ah.

+ 13 - 1
net/ieee802154/6lowpan/6lowpan_i.h

@@ -7,6 +7,15 @@
 #include <net/inet_frag.h>
 #include <net/6lowpan.h>
 
+typedef unsigned __bitwise__ lowpan_rx_result;
+#define RX_CONTINUE		((__force lowpan_rx_result) 0u)
+#define RX_DROP_UNUSABLE	((__force lowpan_rx_result) 1u)
+#define RX_DROP			((__force lowpan_rx_result) 2u)
+#define RX_QUEUED		((__force lowpan_rx_result) 3u)
+
+#define LOWPAN_DISPATCH_FRAG1           0xc0
+#define LOWPAN_DISPATCH_FRAGN           0xe0
+
 struct lowpan_create_arg {
 	u16 tag;
 	u16 d_size;
@@ -40,7 +49,7 @@ static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a)
 
 /* private device info */
 struct lowpan_dev_info {
-	struct net_device	*real_dev; /* real WPAN device ptr */
+	struct net_device	*wdev; /* wpan device ptr */
 	u16			fragment_tag;
 };
 
@@ -62,4 +71,7 @@ int lowpan_header_create(struct sk_buff *skb, struct net_device *dev,
 			 const void *_saddr, unsigned int len);
 netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev);
 
+int lowpan_iphc_decompress(struct sk_buff *skb);
+lowpan_rx_result lowpan_rx_h_ipv6(struct sk_buff *skb);
+
 #endif /* __IEEE802154_6LOWPAN_I_H__ */

+ 61 - 55
net/ieee802154/6lowpan/core.c

@@ -61,7 +61,7 @@ static struct header_ops lowpan_header_ops = {
 static struct lock_class_key lowpan_tx_busylock;
 static struct lock_class_key lowpan_netdev_xmit_lock_key;
 
-static void lowpan_set_lockdep_class_one(struct net_device *dev,
+static void lowpan_set_lockdep_class_one(struct net_device *ldev,
 					 struct netdev_queue *txq,
 					 void *_unused)
 {
@@ -69,35 +69,52 @@ static void lowpan_set_lockdep_class_one(struct net_device *dev,
 			  &lowpan_netdev_xmit_lock_key);
 }
 
-static int lowpan_dev_init(struct net_device *dev)
+static int lowpan_dev_init(struct net_device *ldev)
 {
-	netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
-	dev->qdisc_tx_busylock = &lowpan_tx_busylock;
+	netdev_for_each_tx_queue(ldev, lowpan_set_lockdep_class_one, NULL);
+	ldev->qdisc_tx_busylock = &lowpan_tx_busylock;
+	return 0;
+}
+
+static int lowpan_open(struct net_device *dev)
+{
+	if (!open_count)
+		lowpan_rx_init();
+	open_count++;
+	return 0;
+}
+
+static int lowpan_stop(struct net_device *dev)
+{
+	open_count--;
+	if (!open_count)
+		lowpan_rx_exit();
 	return 0;
 }
 
 static const struct net_device_ops lowpan_netdev_ops = {
 	.ndo_init		= lowpan_dev_init,
 	.ndo_start_xmit		= lowpan_xmit,
+	.ndo_open		= lowpan_open,
+	.ndo_stop		= lowpan_stop,
 };
 
-static void lowpan_setup(struct net_device *dev)
+static void lowpan_setup(struct net_device *ldev)
 {
-	dev->addr_len		= IEEE802154_ADDR_LEN;
-	memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
-	dev->type		= ARPHRD_6LOWPAN;
+	ldev->addr_len		= IEEE802154_ADDR_LEN;
+	memset(ldev->broadcast, 0xff, IEEE802154_ADDR_LEN);
+	ldev->type		= ARPHRD_6LOWPAN;
 	/* Frame Control + Sequence Number + Address fields + Security Header */
-	dev->hard_header_len	= 2 + 1 + 20 + 14;
-	dev->needed_tailroom	= 2; /* FCS */
-	dev->mtu		= IPV6_MIN_MTU;
-	dev->priv_flags		|= IFF_NO_QUEUE;
-	dev->flags		= IFF_BROADCAST | IFF_MULTICAST;
-	dev->watchdog_timeo	= 0;
-
-	dev->netdev_ops		= &lowpan_netdev_ops;
-	dev->header_ops		= &lowpan_header_ops;
-	dev->destructor		= free_netdev;
-	dev->features		|= NETIF_F_NETNS_LOCAL;
+	ldev->hard_header_len	= 2 + 1 + 20 + 14;
+	ldev->needed_tailroom	= 2; /* FCS */
+	ldev->mtu		= IPV6_MIN_MTU;
+	ldev->priv_flags	|= IFF_NO_QUEUE;
+	ldev->flags		= IFF_BROADCAST | IFF_MULTICAST;
+
+	ldev->netdev_ops	= &lowpan_netdev_ops;
+	ldev->header_ops	= &lowpan_header_ops;
+	ldev->destructor	= free_netdev;
+	ldev->features		|= NETIF_F_NETNS_LOCAL;
 }
 
 static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -109,10 +126,10 @@ static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
 	return 0;
 }
 
-static int lowpan_newlink(struct net *src_net, struct net_device *dev,
+static int lowpan_newlink(struct net *src_net, struct net_device *ldev,
 			  struct nlattr *tb[], struct nlattr *data[])
 {
-	struct net_device *real_dev;
+	struct net_device *wdev;
 	int ret;
 
 	ASSERT_RTNL();
@@ -120,58 +137,47 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
 	pr_debug("adding new link\n");
 
 	if (!tb[IFLA_LINK] ||
-	    !net_eq(dev_net(dev), &init_net))
+	    !net_eq(dev_net(ldev), &init_net))
 		return -EINVAL;
-	/* find and hold real wpan device */
-	real_dev = dev_get_by_index(dev_net(dev), nla_get_u32(tb[IFLA_LINK]));
-	if (!real_dev)
+	/* find and hold wpan device */
+	wdev = dev_get_by_index(dev_net(ldev), nla_get_u32(tb[IFLA_LINK]));
+	if (!wdev)
 		return -ENODEV;
-	if (real_dev->type != ARPHRD_IEEE802154) {
-		dev_put(real_dev);
+	if (wdev->type != ARPHRD_IEEE802154) {
+		dev_put(wdev);
 		return -EINVAL;
 	}
 
-	if (real_dev->ieee802154_ptr->lowpan_dev) {
-		dev_put(real_dev);
+	if (wdev->ieee802154_ptr->lowpan_dev) {
+		dev_put(wdev);
 		return -EBUSY;
 	}
 
-	lowpan_dev_info(dev)->real_dev = real_dev;
+	lowpan_dev_info(ldev)->wdev = wdev;
 	/* Set the lowpan hardware address to the wpan hardware address. */
-	memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
+	memcpy(ldev->dev_addr, wdev->dev_addr, IEEE802154_ADDR_LEN);
 
-	lowpan_netdev_setup(dev, LOWPAN_LLTYPE_IEEE802154);
+	lowpan_netdev_setup(ldev, LOWPAN_LLTYPE_IEEE802154);
 
-	ret = register_netdevice(dev);
+	ret = register_netdevice(ldev);
 	if (ret < 0) {
-		dev_put(real_dev);
+		dev_put(wdev);
 		return ret;
 	}
 
-	real_dev->ieee802154_ptr->lowpan_dev = dev;
-	if (!open_count)
-		lowpan_rx_init();
-
-	open_count++;
-
+	wdev->ieee802154_ptr->lowpan_dev = ldev;
 	return 0;
 }
 
-static void lowpan_dellink(struct net_device *dev, struct list_head *head)
+static void lowpan_dellink(struct net_device *ldev, struct list_head *head)
 {
-	struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
-	struct net_device *real_dev = lowpan_dev->real_dev;
+	struct net_device *wdev = lowpan_dev_info(ldev)->wdev;
 
 	ASSERT_RTNL();
 
-	open_count--;
-
-	if (!open_count)
-		lowpan_rx_exit();
-
-	real_dev->ieee802154_ptr->lowpan_dev = NULL;
-	unregister_netdevice(dev);
-	dev_put(real_dev);
+	wdev->ieee802154_ptr->lowpan_dev = NULL;
+	unregister_netdevice(ldev);
+	dev_put(wdev);
 }
 
 static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
@@ -196,9 +202,9 @@ static inline void lowpan_netlink_fini(void)
 static int lowpan_device_event(struct notifier_block *unused,
 			       unsigned long event, void *ptr)
 {
-	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+	struct net_device *wdev = netdev_notifier_info_to_dev(ptr);
 
-	if (dev->type != ARPHRD_IEEE802154)
+	if (wdev->type != ARPHRD_IEEE802154)
 		goto out;
 
 	switch (event) {
@@ -207,8 +213,8 @@ static int lowpan_device_event(struct notifier_block *unused,
 		 * also delete possible lowpan interfaces which belongs
 		 * to the wpan interface.
 		 */
-		if (dev->ieee802154_ptr && dev->ieee802154_ptr->lowpan_dev)
-			lowpan_dellink(dev->ieee802154_ptr->lowpan_dev, NULL);
+		if (wdev->ieee802154_ptr->lowpan_dev)
+			lowpan_dellink(wdev->ieee802154_ptr->lowpan_dev, NULL);
 		break;
 	default:
 		break;

+ 107 - 50
net/ieee802154/6lowpan/reassembly.c

@@ -32,21 +32,10 @@
 
 static const char lowpan_frags_cache_name[] = "lowpan-frags";
 
-struct lowpan_frag_info {
-	u16 d_tag;
-	u16 d_size;
-	u8 d_offset;
-};
-
-static struct lowpan_frag_info *lowpan_cb(struct sk_buff *skb)
-{
-	return (struct lowpan_frag_info *)skb->cb;
-}
-
 static struct inet_frags lowpan_frags;
 
 static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
-			     struct sk_buff *prev, struct net_device *dev);
+			     struct sk_buff *prev, struct net_device *ldev);
 
 static unsigned int lowpan_hash_frag(u16 tag, u16 d_size,
 				     const struct ieee802154_addr *saddr,
@@ -111,7 +100,7 @@ out:
 }
 
 static inline struct lowpan_frag_queue *
-fq_find(struct net *net, const struct lowpan_frag_info *frag_info,
+fq_find(struct net *net, const struct lowpan_802154_cb *cb,
 	const struct ieee802154_addr *src,
 	const struct ieee802154_addr *dst)
 {
@@ -121,12 +110,12 @@ fq_find(struct net *net, const struct lowpan_frag_info *frag_info,
 	struct netns_ieee802154_lowpan *ieee802154_lowpan =
 		net_ieee802154_lowpan(net);
 
-	arg.tag = frag_info->d_tag;
-	arg.d_size = frag_info->d_size;
+	arg.tag = cb->d_tag;
+	arg.d_size = cb->d_size;
 	arg.src = src;
 	arg.dst = dst;
 
-	hash = lowpan_hash_frag(frag_info->d_tag, frag_info->d_size, src, dst);
+	hash = lowpan_hash_frag(cb->d_tag, cb->d_size, src, dst);
 
 	q = inet_frag_find(&ieee802154_lowpan->frags,
 			   &lowpan_frags, &arg, hash);
@@ -138,17 +127,17 @@ fq_find(struct net *net, const struct lowpan_frag_info *frag_info,
 }
 
 static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
-			     struct sk_buff *skb, const u8 frag_type)
+			     struct sk_buff *skb, u8 frag_type)
 {
 	struct sk_buff *prev, *next;
-	struct net_device *dev;
+	struct net_device *ldev;
 	int end, offset;
 
 	if (fq->q.flags & INET_FRAG_COMPLETE)
 		goto err;
 
-	offset = lowpan_cb(skb)->d_offset << 3;
-	end = lowpan_cb(skb)->d_size;
+	offset = lowpan_802154_cb(skb)->d_offset << 3;
+	end = lowpan_802154_cb(skb)->d_size;
 
 	/* Is this the final fragment? */
 	if (offset + skb->len == end) {
@@ -174,13 +163,16 @@ static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
 	 * this fragment, right?
 	 */
 	prev = fq->q.fragments_tail;
-	if (!prev || lowpan_cb(prev)->d_offset < lowpan_cb(skb)->d_offset) {
+	if (!prev ||
+	    lowpan_802154_cb(prev)->d_offset <
+	    lowpan_802154_cb(skb)->d_offset) {
 		next = NULL;
 		goto found;
 	}
 	prev = NULL;
 	for (next = fq->q.fragments; next != NULL; next = next->next) {
-		if (lowpan_cb(next)->d_offset >= lowpan_cb(skb)->d_offset)
+		if (lowpan_802154_cb(next)->d_offset >=
+		    lowpan_802154_cb(skb)->d_offset)
 			break;	/* bingo! */
 		prev = next;
 	}
@@ -195,18 +187,15 @@ found:
 	else
 		fq->q.fragments = skb;
 
-	dev = skb->dev;
-	if (dev)
+	ldev = skb->dev;
+	if (ldev)
 		skb->dev = NULL;
 
 	fq->q.stamp = skb->tstamp;
-	if (frag_type == LOWPAN_DISPATCH_FRAG1) {
-		/* Calculate uncomp. 6lowpan header to estimate full size */
-		fq->q.meat += lowpan_uncompress_size(skb, NULL);
+	if (frag_type == LOWPAN_DISPATCH_FRAG1)
 		fq->q.flags |= INET_FRAG_FIRST_IN;
-	} else {
-		fq->q.meat += skb->len;
-	}
+
+	fq->q.meat += skb->len;
 	add_frag_mem_limit(fq->q.net, skb->truesize);
 
 	if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
@@ -215,7 +204,7 @@ found:
 		unsigned long orefdst = skb->_skb_refdst;
 
 		skb->_skb_refdst = 0UL;
-		res = lowpan_frag_reasm(fq, prev, dev);
+		res = lowpan_frag_reasm(fq, prev, ldev);
 		skb->_skb_refdst = orefdst;
 		return res;
 	}
@@ -235,7 +224,7 @@ err:
  *	the last and the first frames arrived and all the bits are here.
  */
 static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
-			     struct net_device *dev)
+			     struct net_device *ldev)
 {
 	struct sk_buff *fp, *head = fq->q.fragments;
 	int sum_truesize;
@@ -313,7 +302,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
 	sub_frag_mem_limit(fq->q.net, sum_truesize);
 
 	head->next = NULL;
-	head->dev = dev;
+	head->dev = ldev;
 	head->tstamp = fq->q.stamp;
 
 	fq->q.fragments = NULL;
@@ -325,24 +314,87 @@ out_oom:
 	return -1;
 }
 
-static int lowpan_get_frag_info(struct sk_buff *skb, const u8 frag_type,
-				struct lowpan_frag_info *frag_info)
+static int lowpan_frag_rx_handlers_result(struct sk_buff *skb,
+					  lowpan_rx_result res)
+{
+	switch (res) {
+	case RX_QUEUED:
+		return NET_RX_SUCCESS;
+	case RX_CONTINUE:
+		/* nobody cared about this packet */
+		net_warn_ratelimited("%s: received unknown dispatch\n",
+				     __func__);
+
+		/* fall-through */
+	default:
+		/* all others failure */
+		return NET_RX_DROP;
+	}
+}
+
+static lowpan_rx_result lowpan_frag_rx_h_iphc(struct sk_buff *skb)
+{
+	int ret;
+
+	if (!lowpan_is_iphc(*skb_network_header(skb)))
+		return RX_CONTINUE;
+
+	ret = lowpan_iphc_decompress(skb);
+	if (ret < 0)
+		return RX_DROP;
+
+	return RX_QUEUED;
+}
+
+static int lowpan_invoke_frag_rx_handlers(struct sk_buff *skb)
+{
+	lowpan_rx_result res;
+
+#define CALL_RXH(rxh)			\
+	do {				\
+		res = rxh(skb);	\
+		if (res != RX_CONTINUE)	\
+			goto rxh_next;	\
+	} while (0)
+
+	/* likely at first */
+	CALL_RXH(lowpan_frag_rx_h_iphc);
+	CALL_RXH(lowpan_rx_h_ipv6);
+
+rxh_next:
+	return lowpan_frag_rx_handlers_result(skb, res);
+#undef CALL_RXH
+}
+
+#define LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK	0x07
+#define LOWPAN_FRAG_DGRAM_SIZE_HIGH_SHIFT	8
+
+static int lowpan_get_cb(struct sk_buff *skb, u8 frag_type,
+			 struct lowpan_802154_cb *cb)
 {
 	bool fail;
-	u8 pattern = 0, low = 0;
+	u8 high = 0, low = 0;
 	__be16 d_tag = 0;
 
-	fail = lowpan_fetch_skb(skb, &pattern, 1);
+	fail = lowpan_fetch_skb(skb, &high, 1);
 	fail |= lowpan_fetch_skb(skb, &low, 1);
-	frag_info->d_size = (pattern & 7) << 8 | low;
+	/* remove the dispatch value and use first three bits as high value
+	 * for the datagram size
+	 */
+	cb->d_size = (high & LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK) <<
+		LOWPAN_FRAG_DGRAM_SIZE_HIGH_SHIFT | low;
 	fail |= lowpan_fetch_skb(skb, &d_tag, 2);
-	frag_info->d_tag = ntohs(d_tag);
+	cb->d_tag = ntohs(d_tag);
 
 	if (frag_type == LOWPAN_DISPATCH_FRAGN) {
-		fail |= lowpan_fetch_skb(skb, &frag_info->d_offset, 1);
+		fail |= lowpan_fetch_skb(skb, &cb->d_offset, 1);
 	} else {
 		skb_reset_network_header(skb);
-		frag_info->d_offset = 0;
+		cb->d_offset = 0;
+		/* check if datagram_size has ipv6hdr on FRAG1 */
+		fail |= cb->d_size < sizeof(struct ipv6hdr);
+		/* check if we can dereference the dispatch value */
+		fail |= !skb->len;
 	}
 
 	if (unlikely(fail))
@@ -351,27 +403,33 @@ static int lowpan_get_frag_info(struct sk_buff *skb, const u8 frag_type,
 	return 0;
 }
 
-int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
+int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type)
 {
 	struct lowpan_frag_queue *fq;
 	struct net *net = dev_net(skb->dev);
-	struct lowpan_frag_info *frag_info = lowpan_cb(skb);
-	struct ieee802154_addr source, dest;
+	struct lowpan_802154_cb *cb = lowpan_802154_cb(skb);
+	struct ieee802154_hdr hdr;
 	int err;
 
-	source = mac_cb(skb)->source;
-	dest = mac_cb(skb)->dest;
+	if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
+		goto err;
 
-	err = lowpan_get_frag_info(skb, frag_type, frag_info);
+	err = lowpan_get_cb(skb, frag_type, cb);
 	if (err < 0)
 		goto err;
 
-	if (frag_info->d_size > IPV6_MIN_MTU) {
+	if (frag_type == LOWPAN_DISPATCH_FRAG1) {
+		err = lowpan_invoke_frag_rx_handlers(skb);
+		if (err == NET_RX_DROP)
+			goto err;
+	}
+
+	if (cb->d_size > IPV6_MIN_MTU) {
 		net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n");
 		goto err;
 	}
 
-	fq = fq_find(net, frag_info, &source, &dest);
+	fq = fq_find(net, cb, &hdr.source, &hdr.dest);
 	if (fq != NULL) {
 		int ret;
 
@@ -387,7 +445,6 @@ err:
 	kfree_skb(skb);
 	return -1;
 }
-EXPORT_SYMBOL(lowpan_frag_rcv);
 
 #ifdef CONFIG_SYSCTL
 static int zero;

+ 275 - 77
net/ieee802154/6lowpan/rx.c

@@ -11,40 +11,99 @@
 #include <linux/if_arp.h>
 
 #include <net/6lowpan.h>
+#include <net/mac802154.h>
 #include <net/ieee802154_netdev.h>
 
 #include "6lowpan_i.h"
 
-static int lowpan_give_skb_to_device(struct sk_buff *skb,
-				     struct net_device *dev)
+#define LOWPAN_DISPATCH_FIRST		0xc0
+#define LOWPAN_DISPATCH_FRAG_MASK	0xf8
+
+#define LOWPAN_DISPATCH_NALP		0x00
+#define LOWPAN_DISPATCH_ESC		0x40
+#define LOWPAN_DISPATCH_HC1		0x42
+#define LOWPAN_DISPATCH_DFF		0x43
+#define LOWPAN_DISPATCH_BC0		0x50
+#define LOWPAN_DISPATCH_MESH		0x80
+
+static int lowpan_give_skb_to_device(struct sk_buff *skb)
 {
-	skb->dev = dev->ieee802154_ptr->lowpan_dev;
 	skb->protocol = htons(ETH_P_IPV6);
-	skb->pkt_type = PACKET_HOST;
 
 	return netif_rx(skb);
 }
 
-static int
-iphc_decompress(struct sk_buff *skb, const struct ieee802154_hdr *hdr)
+static int lowpan_rx_handlers_result(struct sk_buff *skb, lowpan_rx_result res)
+{
+	switch (res) {
+	case RX_CONTINUE:
+		/* nobody cared about this packet */
+		net_warn_ratelimited("%s: received unknown dispatch\n",
+				     __func__);
+
+		/* fall-through */
+	case RX_DROP_UNUSABLE:
+		kfree_skb(skb);
+
+		/* fall-through */
+	case RX_DROP:
+		return NET_RX_DROP;
+	case RX_QUEUED:
+		return lowpan_give_skb_to_device(skb);
+	default:
+		break;
+	}
+
+	return NET_RX_DROP;
+}
+
+static inline bool lowpan_is_frag1(u8 dispatch)
+{
+	return (dispatch & LOWPAN_DISPATCH_FRAG_MASK) == LOWPAN_DISPATCH_FRAG1;
+}
+
+static inline bool lowpan_is_fragn(u8 dispatch)
+{
+	return (dispatch & LOWPAN_DISPATCH_FRAG_MASK) == LOWPAN_DISPATCH_FRAGN;
+}
+
+static lowpan_rx_result lowpan_rx_h_frag(struct sk_buff *skb)
+{
+	int ret;
+
+	if (!(lowpan_is_frag1(*skb_network_header(skb)) ||
+	      lowpan_is_fragn(*skb_network_header(skb))))
+		return RX_CONTINUE;
+
+	ret = lowpan_frag_rcv(skb, *skb_network_header(skb) &
+			      LOWPAN_DISPATCH_FRAG_MASK);
+	if (ret == 1)
+		return RX_QUEUED;
+
+	/* Packet is freed by lowpan_frag_rcv on error or put into the frag
+	 * bucket.
+	 */
+	return RX_DROP;
+}
+
+int lowpan_iphc_decompress(struct sk_buff *skb)
 {
-	u8 iphc0, iphc1;
 	struct ieee802154_addr_sa sa, da;
+	struct ieee802154_hdr hdr;
+	u8 iphc0, iphc1;
 	void *sap, *dap;
 
-	raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
-	/* at least two bytes will be used for the encoding */
-	if (skb->len < 2)
+	if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
 		return -EINVAL;
 
-	if (lowpan_fetch_skb_u8(skb, &iphc0))
-		return -EINVAL;
+	raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
 
-	if (lowpan_fetch_skb_u8(skb, &iphc1))
+	if (lowpan_fetch_skb_u8(skb, &iphc0) ||
+	    lowpan_fetch_skb_u8(skb, &iphc1))
 		return -EINVAL;
 
-	ieee802154_addr_to_sa(&sa, &hdr->source);
-	ieee802154_addr_to_sa(&da, &hdr->dest);
+	ieee802154_addr_to_sa(&sa, &hdr.source);
+	ieee802154_addr_to_sa(&da, &hdr.dest);
 
 	if (sa.addr_type == IEEE802154_ADDR_SHORT)
 		sap = &sa.short_addr;
@@ -61,77 +120,216 @@ iphc_decompress(struct sk_buff *skb, const struct ieee802154_hdr *hdr)
 					IEEE802154_ADDR_LEN, iphc0, iphc1);
 }
 
-static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
-		      struct packet_type *pt, struct net_device *orig_dev)
+static lowpan_rx_result lowpan_rx_h_iphc(struct sk_buff *skb)
 {
-	struct ieee802154_hdr hdr;
 	int ret;
 
-	if (dev->type != ARPHRD_IEEE802154 ||
-	    !dev->ieee802154_ptr->lowpan_dev)
-		goto drop;
+	if (!lowpan_is_iphc(*skb_network_header(skb)))
+		return RX_CONTINUE;
 
-	skb = skb_share_check(skb, GFP_ATOMIC);
-	if (!skb)
-		goto drop;
+	/* Setting datagram_offset to zero indicates non frag handling
+	 * while doing lowpan_header_decompress.
+	 */
+	lowpan_802154_cb(skb)->d_size = 0;
 
-	if (!netif_running(dev))
-		goto drop_skb;
+	ret = lowpan_iphc_decompress(skb);
+	if (ret < 0)
+		return RX_DROP_UNUSABLE;
 
-	if (skb->pkt_type == PACKET_OTHERHOST)
-		goto drop_skb;
+	return RX_QUEUED;
+}
 
-	if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
-		goto drop_skb;
-
-	/* check that it's our buffer */
-	if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
-		/* Pull off the 1-byte of 6lowpan header. */
-		skb_pull(skb, 1);
-		return lowpan_give_skb_to_device(skb, dev);
-	} else {
-		switch (skb->data[0] & 0xe0) {
-		case LOWPAN_DISPATCH_IPHC:	/* ipv6 datagram */
-			ret = iphc_decompress(skb, &hdr);
-			if (ret < 0)
-				goto drop_skb;
-
-			return lowpan_give_skb_to_device(skb, dev);
-		case LOWPAN_DISPATCH_FRAG1:	/* first fragment header */
-			ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1);
-			if (ret == 1) {
-				ret = iphc_decompress(skb, &hdr);
-				if (ret < 0)
-					goto drop_skb;
-
-				return lowpan_give_skb_to_device(skb, dev);
-			} else if (ret == -1) {
-				return NET_RX_DROP;
-			} else {
-				return NET_RX_SUCCESS;
-			}
-		case LOWPAN_DISPATCH_FRAGN:	/* next fragments headers */
-			ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAGN);
-			if (ret == 1) {
-				ret = iphc_decompress(skb, &hdr);
-				if (ret < 0)
-					goto drop_skb;
-
-				return lowpan_give_skb_to_device(skb, dev);
-			} else if (ret == -1) {
-				return NET_RX_DROP;
-			} else {
-				return NET_RX_SUCCESS;
-			}
-		default:
-			break;
-		}
+lowpan_rx_result lowpan_rx_h_ipv6(struct sk_buff *skb)
+{
+	if (!lowpan_is_ipv6(*skb_network_header(skb)))
+		return RX_CONTINUE;
+
+	/* Pull off the 1-byte of 6lowpan header. */
+	skb_pull(skb, 1);
+	return RX_QUEUED;
+}
+
+static inline bool lowpan_is_esc(u8 dispatch)
+{
+	return dispatch == LOWPAN_DISPATCH_ESC;
+}
+
+static lowpan_rx_result lowpan_rx_h_esc(struct sk_buff *skb)
+{
+	if (!lowpan_is_esc(*skb_network_header(skb)))
+		return RX_CONTINUE;
+
+	net_warn_ratelimited("%s: %s\n", skb->dev->name,
+			     "6LoWPAN ESC not supported\n");
+
+	return RX_DROP_UNUSABLE;
+}
+
+static inline bool lowpan_is_hc1(u8 dispatch)
+{
+	return dispatch == LOWPAN_DISPATCH_HC1;
+}
+
+static lowpan_rx_result lowpan_rx_h_hc1(struct sk_buff *skb)
+{
+	if (!lowpan_is_hc1(*skb_network_header(skb)))
+		return RX_CONTINUE;
+
+	net_warn_ratelimited("%s: %s\n", skb->dev->name,
+			     "6LoWPAN HC1 not supported\n");
+
+	return RX_DROP_UNUSABLE;
+}
+
+static inline bool lowpan_is_dff(u8 dispatch)
+{
+	return dispatch == LOWPAN_DISPATCH_DFF;
+}
+
+static lowpan_rx_result lowpan_rx_h_dff(struct sk_buff *skb)
+{
+	if (!lowpan_is_dff(*skb_network_header(skb)))
+		return RX_CONTINUE;
+
+	net_warn_ratelimited("%s: %s\n", skb->dev->name,
+			     "6LoWPAN DFF not supported\n");
+
+	return RX_DROP_UNUSABLE;
+}
+
+static inline bool lowpan_is_bc0(u8 dispatch)
+{
+	return dispatch == LOWPAN_DISPATCH_BC0;
+}
+
+static lowpan_rx_result lowpan_rx_h_bc0(struct sk_buff *skb)
+{
+	if (!lowpan_is_bc0(*skb_network_header(skb)))
+		return RX_CONTINUE;
+
+	net_warn_ratelimited("%s: %s\n", skb->dev->name,
+			     "6LoWPAN BC0 not supported\n");
+
+	return RX_DROP_UNUSABLE;
+}
+
+static inline bool lowpan_is_mesh(u8 dispatch)
+{
+	return (dispatch & LOWPAN_DISPATCH_FIRST) == LOWPAN_DISPATCH_MESH;
+}
+
+static lowpan_rx_result lowpan_rx_h_mesh(struct sk_buff *skb)
+{
+	if (!lowpan_is_mesh(*skb_network_header(skb)))
+		return RX_CONTINUE;
+
+	net_warn_ratelimited("%s: %s\n", skb->dev->name,
+			     "6LoWPAN MESH not supported\n");
+
+	return RX_DROP_UNUSABLE;
+}
+
+static int lowpan_invoke_rx_handlers(struct sk_buff *skb)
+{
+	lowpan_rx_result res;
+
+#define CALL_RXH(rxh)			\
+	do {				\
+		res = rxh(skb);	\
+		if (res != RX_CONTINUE)	\
+			goto rxh_next;	\
+	} while (0)
+
+	/* likely at first */
+	CALL_RXH(lowpan_rx_h_iphc);
+	CALL_RXH(lowpan_rx_h_frag);
+	CALL_RXH(lowpan_rx_h_ipv6);
+	CALL_RXH(lowpan_rx_h_esc);
+	CALL_RXH(lowpan_rx_h_hc1);
+	CALL_RXH(lowpan_rx_h_dff);
+	CALL_RXH(lowpan_rx_h_bc0);
+	CALL_RXH(lowpan_rx_h_mesh);
+
+rxh_next:
+	return lowpan_rx_handlers_result(skb, res);
+#undef CALL_RXH
+}
+
+static inline bool lowpan_is_nalp(u8 dispatch)
+{
+	return (dispatch & LOWPAN_DISPATCH_FIRST) == LOWPAN_DISPATCH_NALP;
+}
+
+/* Lookup for reserved dispatch values at:
+ * https://www.iana.org/assignments/_6lowpan-parameters/_6lowpan-parameters.xhtml#_6lowpan-parameters-1
+ *
+ * Last Updated: 2015-01-22
+ */
+static inline bool lowpan_is_reserved(u8 dispatch)
+{
+	return ((dispatch >= 0x44 && dispatch <= 0x4F) ||
+		(dispatch >= 0x51 && dispatch <= 0x5F) ||
+		(dispatch >= 0xc8 && dispatch <= 0xdf) ||
+		(dispatch >= 0xe8 && dispatch <= 0xff));
+}
+
+/* lowpan_rx_h_check checks on generic 6LoWPAN requirements
+ * in MAC and 6LoWPAN header.
+ *
+ * Don't manipulate the skb here, it could be shared buffer.
+ */
+static inline bool lowpan_rx_h_check(struct sk_buff *skb)
+{
+	__le16 fc = ieee802154_get_fc_from_skb(skb);
+
+	/* check on ieee802154 conform 6LoWPAN header */
+	if (!ieee802154_is_data(fc) ||
+	    !ieee802154_is_intra_pan(fc))
+		return false;
+
+	/* check if we can dereference the dispatch */
+	if (unlikely(!skb->len))
+		return false;
+
+	if (lowpan_is_nalp(*skb_network_header(skb)) ||
+	    lowpan_is_reserved(*skb_network_header(skb)))
+		return false;
+
+	return true;
+}
+
+static int lowpan_rcv(struct sk_buff *skb, struct net_device *wdev,
+		      struct packet_type *pt, struct net_device *orig_wdev)
+{
+	struct net_device *ldev;
+
+	if (wdev->type != ARPHRD_IEEE802154 ||
+	    skb->pkt_type == PACKET_OTHERHOST ||
+	    !lowpan_rx_h_check(skb))
+		return NET_RX_DROP;
+
+	ldev = wdev->ieee802154_ptr->lowpan_dev;
+	if (!ldev || !netif_running(ldev))
+		return NET_RX_DROP;
+
+	/* Replacing skb->dev and followed rx handlers will manipulate skb. */
+	skb = skb_share_check(skb, GFP_ATOMIC);
+	if (!skb)
+		return NET_RX_DROP;
+	skb->dev = ldev;
+
+	/* When receive frag1 it's likely that we manipulate the buffer.
+	 * When recevie iphc we manipulate the data buffer. So we need
+	 * to unshare the buffer.
+	 */
+	if (lowpan_is_frag1(*skb_network_header(skb)) ||
+	    lowpan_is_iphc(*skb_network_header(skb))) {
+		skb = skb_unshare(skb, GFP_ATOMIC);
+		if (!skb)
+			return NET_RX_DROP;
 	}
 
-drop_skb:
-	kfree_skb(skb);
-drop:
-	return NET_RX_DROP;
+	return lowpan_invoke_rx_handlers(skb);
 }
 
 static struct packet_type lowpan_packet_type = {

+ 27 - 24
net/ieee802154/6lowpan/tx.c

@@ -36,7 +36,7 @@ lowpan_addr_info *lowpan_skb_priv(const struct sk_buff *skb)
 			sizeof(struct lowpan_addr_info));
 }
 
-int lowpan_header_create(struct sk_buff *skb, struct net_device *dev,
+int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev,
 			 unsigned short type, const void *_daddr,
 			 const void *_saddr, unsigned int len)
 {
@@ -51,7 +51,7 @@ int lowpan_header_create(struct sk_buff *skb, struct net_device *dev,
 		return 0;
 
 	if (!saddr)
-		saddr = dev->dev_addr;
+		saddr = ldev->dev_addr;
 
 	raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
 	raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
@@ -73,22 +73,21 @@ static struct sk_buff*
 lowpan_alloc_frag(struct sk_buff *skb, int size,
 		  const struct ieee802154_hdr *master_hdr)
 {
-	struct net_device *real_dev = lowpan_dev_info(skb->dev)->real_dev;
+	struct net_device *wdev = lowpan_dev_info(skb->dev)->wdev;
 	struct sk_buff *frag;
 	int rc;
 
-	frag = alloc_skb(real_dev->hard_header_len +
-			 real_dev->needed_tailroom + size,
+	frag = alloc_skb(wdev->hard_header_len + wdev->needed_tailroom + size,
 			 GFP_ATOMIC);
 
 	if (likely(frag)) {
-		frag->dev = real_dev;
+		frag->dev = wdev;
 		frag->priority = skb->priority;
-		skb_reserve(frag, real_dev->hard_header_len);
+		skb_reserve(frag, wdev->hard_header_len);
 		skb_reset_network_header(frag);
 		*mac_cb(frag) = *mac_cb(skb);
 
-		rc = dev_hard_header(frag, real_dev, 0, &master_hdr->dest,
+		rc = dev_hard_header(frag, wdev, 0, &master_hdr->dest,
 				     &master_hdr->source, size);
 		if (rc < 0) {
 			kfree_skb(frag);
@@ -123,19 +122,17 @@ lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr,
 }
 
 static int
-lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *dev,
-		       const struct ieee802154_hdr *wpan_hdr)
+lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *ldev,
+		       const struct ieee802154_hdr *wpan_hdr, u16 dgram_size,
+		       u16 dgram_offset)
 {
-	u16 dgram_size, dgram_offset;
 	__be16 frag_tag;
 	u8 frag_hdr[5];
 	int frag_cap, frag_len, payload_cap, rc;
 	int skb_unprocessed, skb_offset;
 
-	dgram_size = lowpan_uncompress_size(skb, &dgram_offset) -
-		     skb->mac_len;
-	frag_tag = htons(lowpan_dev_info(dev)->fragment_tag);
-	lowpan_dev_info(dev)->fragment_tag++;
+	frag_tag = htons(lowpan_dev_info(ldev)->fragment_tag);
+	lowpan_dev_info(ldev)->fragment_tag++;
 
 	frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07);
 	frag_hdr[1] = dgram_size & 0xff;
@@ -188,9 +185,10 @@ err:
 	return rc;
 }
 
-static int lowpan_header(struct sk_buff *skb, struct net_device *dev)
+static int lowpan_header(struct sk_buff *skb, struct net_device *ldev,
+			 u16 *dgram_size, u16 *dgram_offset)
 {
-	struct wpan_dev *wpan_dev = lowpan_dev_info(dev)->real_dev->ieee802154_ptr;
+	struct wpan_dev *wpan_dev = lowpan_dev_info(ldev)->wdev->ieee802154_ptr;
 	struct ieee802154_addr sa, da;
 	struct ieee802154_mac_cb *cb = mac_cb_init(skb);
 	struct lowpan_addr_info info;
@@ -202,7 +200,10 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *dev)
 	daddr = &info.daddr.u.extended_addr;
 	saddr = &info.saddr.u.extended_addr;
 
-	lowpan_header_compress(skb, dev, ETH_P_IPV6, daddr, saddr, skb->len);
+	*dgram_size = skb->len;
+	lowpan_header_compress(skb, ldev, ETH_P_IPV6, daddr, saddr, skb->len);
+	/* dgram_offset = (saved bytes after compression) + lowpan header len */
+	*dgram_offset = (*dgram_size - skb->len) + skb_network_header_len(skb);
 
 	cb->type = IEEE802154_FC_TYPE_DATA;
 
@@ -227,14 +228,15 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *dev)
 		cb->ackreq = wpan_dev->ackreq;
 	}
 
-	return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
-			ETH_P_IPV6, (void *)&da, (void *)&sa, 0);
+	return dev_hard_header(skb, lowpan_dev_info(ldev)->wdev, ETH_P_IPV6,
+			       (void *)&da, (void *)&sa, 0);
 }
 
-netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
 {
 	struct ieee802154_hdr wpan_hdr;
 	int max_single, ret;
+	u16 dgram_size, dgram_offset;
 
 	pr_debug("package xmit\n");
 
@@ -245,7 +247,7 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
 	if (!skb)
 		return NET_XMIT_DROP;
 
-	ret = lowpan_header(skb, dev);
+	ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset);
 	if (ret < 0) {
 		kfree_skb(skb);
 		return NET_XMIT_DROP;
@@ -259,13 +261,14 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
 	max_single = ieee802154_max_payload(&wpan_hdr);
 
 	if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) {
-		skb->dev = lowpan_dev_info(dev)->real_dev;
+		skb->dev = lowpan_dev_info(ldev)->wdev;
 		return dev_queue_xmit(skb);
 	} else {
 		netdev_tx_t rc;
 
 		pr_debug("frame is too big, fragmentation is needed\n");
-		rc = lowpan_xmit_fragmented(skb, dev, &wpan_hdr);
+		rc = lowpan_xmit_fragmented(skb, ldev, &wpan_hdr, dgram_size,
+					    dgram_offset);
 
 		return rc < 0 ? NET_XMIT_DROP : rc;
 	}