فهرست منبع

Merge tag 'iwlwifi-next-for-kalle-2015-08-18' of https://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

* polish the Miracast operation
* fix a few power consumption issues
* scan cleanup
* fixes for D0i3 system state
* add paging for devices that support it
* add again the new RBD allocation model
* add more options to the firmware debug system
* add support for frag SKBs in Tx
Kalle Valo 10 سال پیش
والد
کامیت
a6bf49db8c
35فایلهای تغییر یافته به همراه1599 افزوده شده و 463 حذف شده
  1. 1 1
      drivers/net/wireless/iwlwifi/dvm/agn.h
  2. 2 6
      drivers/net/wireless/iwlwifi/dvm/debugfs.c
  3. 0 13
      drivers/net/wireless/iwlwifi/dvm/main.c
  4. 2 1
      drivers/net/wireless/iwlwifi/dvm/rx.c
  5. 1 1
      drivers/net/wireless/iwlwifi/iwl-7000.c
  6. 6 3
      drivers/net/wireless/iwlwifi/iwl-8000.c
  7. 2 0
      drivers/net/wireless/iwlwifi/iwl-csr.h
  8. 4 3
      drivers/net/wireless/iwlwifi/iwl-devtrace-data.h
  9. 71 1
      drivers/net/wireless/iwlwifi/iwl-drv.c
  10. 2 2
      drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
  11. 0 6
      drivers/net/wireless/iwlwifi/iwl-fh.h
  12. 17 0
      drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
  13. 33 1
      drivers/net/wireless/iwlwifi/iwl-fw-file.h
  14. 68 0
      drivers/net/wireless/iwlwifi/iwl-fw.h
  15. 4 22
      drivers/net/wireless/iwlwifi/iwl-op-mode.h
  16. 8 0
      drivers/net/wireless/iwlwifi/iwl-prph.h
  17. 32 4
      drivers/net/wireless/iwlwifi/iwl-trans.h
  18. 3 3
      drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
  19. 2 7
      drivers/net/wireless/iwlwifi/mvm/debugfs.c
  20. 12 0
      drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
  21. 50 0
      drivers/net/wireless/iwlwifi/mvm/fw-api.h
  22. 352 6
      drivers/net/wireless/iwlwifi/mvm/fw.c
  23. 28 9
      drivers/net/wireless/iwlwifi/mvm/mac80211.c
  24. 11 4
      drivers/net/wireless/iwlwifi/mvm/mvm.h
  25. 35 28
      drivers/net/wireless/iwlwifi/mvm/ops.c
  26. 4 28
      drivers/net/wireless/iwlwifi/mvm/power.c
  27. 2 1
      drivers/net/wireless/iwlwifi/mvm/rs.c
  28. 5 3
      drivers/net/wireless/iwlwifi/mvm/rx.c
  29. 116 106
      drivers/net/wireless/iwlwifi/mvm/scan.c
  30. 1 1
      drivers/net/wireless/iwlwifi/mvm/tof.c
  31. 6 4
      drivers/net/wireless/iwlwifi/mvm/tx.c
  32. 49 8
      drivers/net/wireless/iwlwifi/pcie/internal.h
  33. 393 82
      drivers/net/wireless/iwlwifi/pcie/rx.c
  34. 206 95
      drivers/net/wireless/iwlwifi/pcie/trans.c
  35. 71 14
      drivers/net/wireless/iwlwifi/pcie/tx.c

+ 1 - 1
drivers/net/wireless/iwlwifi/dvm/agn.h

@@ -122,7 +122,7 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
 void iwl_down(struct iwl_priv *priv);
 void iwl_down(struct iwl_priv *priv);
 void iwl_cancel_deferred_work(struct iwl_priv *priv);
 void iwl_cancel_deferred_work(struct iwl_priv *priv);
 void iwlagn_prepare_restart(struct iwl_priv *priv);
 void iwlagn_prepare_restart(struct iwl_priv *priv);
-void iwl_rx_dispatch(struct iwl_op_mode *op_mode,
+void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
 		     struct iwl_rx_cmd_buffer *rxb);
 		     struct iwl_rx_cmd_buffer *rxb);
 
 
 bool iwl_check_for_ct_kill(struct iwl_priv *priv);
 bool iwl_check_for_ct_kill(struct iwl_priv *priv);

+ 2 - 6
drivers/net/wireless/iwlwifi/dvm/debugfs.c

@@ -310,12 +310,8 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
 	pos += scnprintf(buf + pos, buf_size - pos,
 	pos += scnprintf(buf + pos, buf_size - pos,
 			 "NVM version: 0x%x\n", nvm_ver);
 			 "NVM version: 0x%x\n", nvm_ver);
 	for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
 	for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
-		pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
-		hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
-				   buf_size - pos, 0);
-		pos += strlen(buf + pos);
-		if (buf_size - pos > 0)
-			buf[pos++] = '\n';
+		pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x %16ph\n",
+				 ofs, ptr + ofs);
 	}
 	}
 
 
 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);

+ 0 - 13
drivers/net/wireless/iwlwifi/dvm/main.c

@@ -2029,18 +2029,6 @@ static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
 	return false;
 	return false;
 }
 }
 
 
-static void iwl_napi_add(struct iwl_op_mode *op_mode,
-			 struct napi_struct *napi,
-			 struct net_device *napi_dev,
-			 int (*poll)(struct napi_struct *, int),
-			 int weight)
-{
-	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
-
-	netif_napi_add(napi_dev, napi, poll, weight);
-	priv->napi = napi;
-}
-
 static const struct iwl_op_mode_ops iwl_dvm_ops = {
 static const struct iwl_op_mode_ops iwl_dvm_ops = {
 	.start = iwl_op_mode_dvm_start,
 	.start = iwl_op_mode_dvm_start,
 	.stop = iwl_op_mode_dvm_stop,
 	.stop = iwl_op_mode_dvm_stop,
@@ -2053,7 +2041,6 @@ static const struct iwl_op_mode_ops iwl_dvm_ops = {
 	.cmd_queue_full = iwl_cmd_queue_full,
 	.cmd_queue_full = iwl_cmd_queue_full,
 	.nic_config = iwl_nic_config,
 	.nic_config = iwl_nic_config,
 	.wimax_active = iwl_wimax_active,
 	.wimax_active = iwl_wimax_active,
-	.napi_add = iwl_napi_add,
 };
 };
 
 
 /*****************************************************************************
 /*****************************************************************************

+ 2 - 1
drivers/net/wireless/iwlwifi/dvm/rx.c

@@ -1073,7 +1073,8 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
 		iwlagn_bt_rx_handler_setup(priv);
 		iwlagn_bt_rx_handler_setup(priv);
 }
 }
 
 
-void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb)
+void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+		     struct iwl_rx_cmd_buffer *rxb)
 {
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
 	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);

+ 1 - 1
drivers/net/wireless/iwlwifi/iwl-7000.c

@@ -69,7 +69,7 @@
 #include "iwl-agn-hw.h"
 #include "iwl-agn-hw.h"
 
 
 /* Highest firmware API version supported */
 /* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX	15
+#define IWL7260_UCODE_API_MAX	16
 
 
 /* Oldest version we won't warn about */
 /* Oldest version we won't warn about */
 #define IWL7260_UCODE_API_OK	12
 #define IWL7260_UCODE_API_OK	12

+ 6 - 3
drivers/net/wireless/iwlwifi/iwl-8000.c

@@ -69,7 +69,7 @@
 #include "iwl-agn-hw.h"
 #include "iwl-agn-hw.h"
 
 
 /* Highest firmware API version supported */
 /* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX	15
+#define IWL8000_UCODE_API_MAX	16
 
 
 /* Oldest version we won't warn about */
 /* Oldest version we won't warn about */
 #define IWL8000_UCODE_API_OK	12
 #define IWL8000_UCODE_API_OK	12
@@ -97,8 +97,9 @@
 #define DEFAULT_NVM_FILE_FAMILY_8000B		"nvmData-8000B"
 #define DEFAULT_NVM_FILE_FAMILY_8000B		"nvmData-8000B"
 #define DEFAULT_NVM_FILE_FAMILY_8000C		"nvmData-8000C"
 #define DEFAULT_NVM_FILE_FAMILY_8000C		"nvmData-8000C"
 
 
-/* Max SDIO RX aggregation size of the ADDBA request/response */
-#define MAX_RX_AGG_SIZE_8260_SDIO	28
+/* Max SDIO RX/TX aggregation sizes of the ADDBA request/response */
+#define MAX_RX_AGG_SIZE_8260_SDIO	21
+#define MAX_TX_AGG_SIZE_8260_SDIO	40
 
 
 /* Max A-MPDU exponent for HT and VHT */
 /* Max A-MPDU exponent for HT and VHT */
 #define MAX_HT_AMPDU_EXPONENT_8260_SDIO	IEEE80211_HT_MAX_AMPDU_32K
 #define MAX_HT_AMPDU_EXPONENT_8260_SDIO	IEEE80211_HT_MAX_AMPDU_32K
@@ -204,6 +205,7 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
 	.nvm_ver = IWL8000_NVM_VERSION,
 	.nvm_ver = IWL8000_NVM_VERSION,
 	.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
 	.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
 	.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
 	.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
+	.max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
 	.disable_dummy_notification = true,
 	.disable_dummy_notification = true,
 	.max_ht_ampdu_exponent  = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
 	.max_ht_ampdu_exponent  = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
 	.max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
 	.max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
@@ -217,6 +219,7 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
 	.nvm_ver = IWL8000_NVM_VERSION,
 	.nvm_ver = IWL8000_NVM_VERSION,
 	.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
 	.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
 	.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
 	.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
+	.max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
 	.bt_shared_single_ant = true,
 	.bt_shared_single_ant = true,
 	.disable_dummy_notification = true,
 	.disable_dummy_notification = true,
 	.max_ht_ampdu_exponent  = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
 	.max_ht_ampdu_exponent  = MAX_HT_AMPDU_EXPONENT_8260_SDIO,

+ 2 - 0
drivers/net/wireless/iwlwifi/iwl-csr.h

@@ -200,6 +200,7 @@
 #define CSR_INT_BIT_FH_TX        (1 << 27) /* Tx DMA FH_INT[1:0] */
 #define CSR_INT_BIT_FH_TX        (1 << 27) /* Tx DMA FH_INT[1:0] */
 #define CSR_INT_BIT_SCD          (1 << 26) /* TXQ pointer advanced */
 #define CSR_INT_BIT_SCD          (1 << 26) /* TXQ pointer advanced */
 #define CSR_INT_BIT_SW_ERR       (1 << 25) /* uCode error */
 #define CSR_INT_BIT_SW_ERR       (1 << 25) /* uCode error */
+#define CSR_INT_BIT_PAGING       (1 << 24) /* SDIO PAGING */
 #define CSR_INT_BIT_RF_KILL      (1 << 7)  /* HW RFKILL switch GP_CNTRL[27] toggled */
 #define CSR_INT_BIT_RF_KILL      (1 << 7)  /* HW RFKILL switch GP_CNTRL[27] toggled */
 #define CSR_INT_BIT_CT_KILL      (1 << 6)  /* Critical temp (chip too hot) rfkill */
 #define CSR_INT_BIT_CT_KILL      (1 << 6)  /* Critical temp (chip too hot) rfkill */
 #define CSR_INT_BIT_SW_RX        (1 << 3)  /* Rx, command responses */
 #define CSR_INT_BIT_SW_RX        (1 << 3)  /* Rx, command responses */
@@ -210,6 +211,7 @@
 				 CSR_INT_BIT_HW_ERR  | \
 				 CSR_INT_BIT_HW_ERR  | \
 				 CSR_INT_BIT_FH_TX   | \
 				 CSR_INT_BIT_FH_TX   | \
 				 CSR_INT_BIT_SW_ERR  | \
 				 CSR_INT_BIT_SW_ERR  | \
+				 CSR_INT_BIT_PAGING  | \
 				 CSR_INT_BIT_RF_KILL | \
 				 CSR_INT_BIT_RF_KILL | \
 				 CSR_INT_BIT_SW_RX   | \
 				 CSR_INT_BIT_SW_RX   | \
 				 CSR_INT_BIT_WAKEUP  | \
 				 CSR_INT_BIT_WAKEUP  | \

+ 4 - 3
drivers/net/wireless/iwlwifi/iwl-devtrace-data.h

@@ -35,8 +35,8 @@
 TRACE_EVENT(iwlwifi_dev_tx_data,
 TRACE_EVENT(iwlwifi_dev_tx_data,
 	TP_PROTO(const struct device *dev,
 	TP_PROTO(const struct device *dev,
 		 struct sk_buff *skb,
 		 struct sk_buff *skb,
-		 void *data, size_t data_len),
-	TP_ARGS(dev, skb, data, data_len),
+		 u8 hdr_len, size_t data_len),
+	TP_ARGS(dev, skb, hdr_len, data_len),
 	TP_STRUCT__entry(
 	TP_STRUCT__entry(
 		DEV_ENTRY
 		DEV_ENTRY
 
 
@@ -45,7 +45,8 @@ TRACE_EVENT(iwlwifi_dev_tx_data,
 	TP_fast_assign(
 	TP_fast_assign(
 		DEV_ASSIGN;
 		DEV_ASSIGN;
 		if (iwl_trace_data(skb))
 		if (iwl_trace_data(skb))
-			memcpy(__get_dynamic_array(data), data, data_len);
+			skb_copy_bits(skb, hdr_len,
+				      __get_dynamic_array(data), data_len);
 	),
 	),
 	TP_printk("[%s] TX frame data", __get_str(dev))
 	TP_printk("[%s] TX frame data", __get_str(dev))
 );
 );

+ 71 - 1
drivers/net/wireless/iwlwifi/iwl-drv.c

@@ -372,6 +372,30 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
 	return 0;
 	return 0;
 }
 }
 
 
+static int iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data,
+				const u32 len)
+{
+	struct iwl_fw_gscan_capabilities *fw_capa = (void *)data;
+	struct iwl_gscan_capabilities *capa = &fw->gscan_capa;
+
+	if (len < sizeof(*fw_capa))
+		return -EINVAL;
+
+	capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size);
+	capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets);
+	capa->max_ap_cache_per_scan =
+		le32_to_cpu(fw_capa->max_ap_cache_per_scan);
+	capa->max_rssi_sample_size = le32_to_cpu(fw_capa->max_rssi_sample_size);
+	capa->max_scan_reporting_threshold =
+		le32_to_cpu(fw_capa->max_scan_reporting_threshold);
+	capa->max_hotlist_aps = le32_to_cpu(fw_capa->max_hotlist_aps);
+	capa->max_significant_change_aps =
+		le32_to_cpu(fw_capa->max_significant_change_aps);
+	capa->max_bssid_history_entries =
+		le32_to_cpu(fw_capa->max_bssid_history_entries);
+	return 0;
+}
+
 /*
 /*
  * Gets uCode section from tlv.
  * Gets uCode section from tlv.
  */
  */
@@ -573,13 +597,15 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
 	size_t len = ucode_raw->size;
 	size_t len = ucode_raw->size;
 	const u8 *data;
 	const u8 *data;
 	u32 tlv_len;
 	u32 tlv_len;
+	u32 usniffer_img;
 	enum iwl_ucode_tlv_type tlv_type;
 	enum iwl_ucode_tlv_type tlv_type;
 	const u8 *tlv_data;
 	const u8 *tlv_data;
 	char buildstr[25];
 	char buildstr[25];
-	u32 build;
+	u32 build, paging_mem_size;
 	int num_of_cpus;
 	int num_of_cpus;
 	bool usniffer_images = false;
 	bool usniffer_images = false;
 	bool usniffer_req = false;
 	bool usniffer_req = false;
+	bool gscan_capa = false;
 
 
 	if (len < sizeof(*ucode)) {
 	if (len < sizeof(*ucode)) {
 		IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
 		IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
@@ -955,12 +981,46 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
 					    IWL_UCODE_REGULAR_USNIFFER,
 					    IWL_UCODE_REGULAR_USNIFFER,
 					    tlv_len);
 					    tlv_len);
 			break;
 			break;
+		case IWL_UCODE_TLV_PAGING:
+			if (tlv_len != sizeof(u32))
+				goto invalid_tlv_len;
+			paging_mem_size = le32_to_cpup((__le32 *)tlv_data);
+
+			IWL_DEBUG_FW(drv,
+				     "Paging: paging enabled (size = %u bytes)\n",
+				     paging_mem_size);
+
+			if (paging_mem_size > MAX_PAGING_IMAGE_SIZE) {
+				IWL_ERR(drv,
+					"Paging: driver supports up to %lu bytes for paging image\n",
+					MAX_PAGING_IMAGE_SIZE);
+				return -EINVAL;
+			}
+
+			if (paging_mem_size & (FW_PAGING_SIZE - 1)) {
+				IWL_ERR(drv,
+					"Paging: image isn't multiple %lu\n",
+					FW_PAGING_SIZE);
+				return -EINVAL;
+			}
+
+			drv->fw.img[IWL_UCODE_REGULAR].paging_mem_size =
+				paging_mem_size;
+			usniffer_img = IWL_UCODE_REGULAR_USNIFFER;
+			drv->fw.img[usniffer_img].paging_mem_size =
+				paging_mem_size;
+			break;
 		case IWL_UCODE_TLV_SDIO_ADMA_ADDR:
 		case IWL_UCODE_TLV_SDIO_ADMA_ADDR:
 			if (tlv_len != sizeof(u32))
 			if (tlv_len != sizeof(u32))
 				goto invalid_tlv_len;
 				goto invalid_tlv_len;
 			drv->fw.sdio_adma_addr =
 			drv->fw.sdio_adma_addr =
 				le32_to_cpup((__le32 *)tlv_data);
 				le32_to_cpup((__le32 *)tlv_data);
 			break;
 			break;
+		case IWL_UCODE_TLV_FW_GSCAN_CAPA:
+			if (iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len))
+				goto invalid_tlv_len;
+			gscan_capa = true;
+			break;
 		default:
 		default:
 			IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
 			IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
 			break;
 			break;
@@ -979,6 +1039,16 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
+	/*
+	 * If ucode advertises that it supports GSCAN but GSCAN
+	 * capabilities TLV is not present, warn and continue without GSCAN.
+	 */
+	if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
+	    WARN(!gscan_capa,
+		 "GSCAN is supported but capabilities TLV is unavailable\n"))
+		__clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
+			    capa->_capa);
+
 	return 0;
 	return 0;
 
 
  invalid_tlv_len:
  invalid_tlv_len:

+ 2 - 2
drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c

@@ -713,12 +713,12 @@ int iwl_init_sband_channels(struct iwl_nvm_data *data,
 	struct ieee80211_channel *chan = &data->channels[0];
 	struct ieee80211_channel *chan = &data->channels[0];
 	int n = 0, idx = 0;
 	int n = 0, idx = 0;
 
 
-	while (chan->band != band && idx < n_channels)
+	while (idx < n_channels && chan->band != band)
 		chan = &data->channels[++idx];
 		chan = &data->channels[++idx];
 
 
 	sband->channels = &data->channels[idx];
 	sband->channels = &data->channels[idx];
 
 
-	while (chan->band == band && idx < n_channels) {
+	while (idx < n_channels && chan->band == band) {
 		chan = &data->channels[++idx];
 		chan = &data->channels[++idx];
 		n++;
 		n++;
 	}
 	}

+ 0 - 6
drivers/net/wireless/iwlwifi/iwl-fh.h

@@ -438,12 +438,6 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
 #define RX_QUEUE_MASK                         255
 #define RX_QUEUE_MASK                         255
 #define RX_QUEUE_SIZE_LOG                     8
 #define RX_QUEUE_SIZE_LOG                     8
 
 
-/*
- * RX related structures and functions
- */
-#define RX_FREE_BUFFERS 64
-#define RX_LOW_WATERMARK 8
-
 /**
 /**
  * struct iwl_rb_status - reserve buffer status
  * struct iwl_rb_status - reserve buffer status
  * 	host memory mapped FH registers
  * 	host memory mapped FH registers

+ 17 - 0
drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h

@@ -84,6 +84,8 @@
  * @IWL_FW_ERROR_DUMP_MEM: chunk of memory
  * @IWL_FW_ERROR_DUMP_MEM: chunk of memory
  * @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump.
  * @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump.
  *	Structured as &struct iwl_fw_error_dump_trigger_desc.
  *	Structured as &struct iwl_fw_error_dump_trigger_desc.
+ * @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as
+ *	&struct iwl_fw_error_dump_rb
  */
  */
 enum iwl_fw_error_dump_type {
 enum iwl_fw_error_dump_type {
 	/* 0 is deprecated */
 	/* 0 is deprecated */
@@ -97,6 +99,7 @@ enum iwl_fw_error_dump_type {
 	IWL_FW_ERROR_DUMP_FH_REGS = 8,
 	IWL_FW_ERROR_DUMP_FH_REGS = 8,
 	IWL_FW_ERROR_DUMP_MEM = 9,
 	IWL_FW_ERROR_DUMP_MEM = 9,
 	IWL_FW_ERROR_DUMP_ERROR_INFO = 10,
 	IWL_FW_ERROR_DUMP_ERROR_INFO = 10,
+	IWL_FW_ERROR_DUMP_RB = 11,
 
 
 	IWL_FW_ERROR_DUMP_MAX,
 	IWL_FW_ERROR_DUMP_MAX,
 };
 };
@@ -222,6 +225,20 @@ struct iwl_fw_error_dump_mem {
 	u8 data[];
 	u8 data[];
 };
 };
 
 
+/**
+ * struct iwl_fw_error_dump_rb - content of an Receive Buffer
+ * @index: the index of the Receive Buffer in the Rx queue
+ * @rxq: the RB's Rx queue
+ * @reserved:
+ * @data: the content of the Receive Buffer
+ */
+struct iwl_fw_error_dump_rb {
+	__le32 index;
+	__le32 rxq;
+	__le32 reserved;
+	u8 data[];
+};
+
 /**
 /**
  * iwl_fw_error_next_data - advance fw error dump data pointer
  * iwl_fw_error_next_data - advance fw error dump data pointer
  * @data: previous data block
  * @data: previous data block

+ 33 - 1
drivers/net/wireless/iwlwifi/iwl-fw-file.h

@@ -132,12 +132,14 @@ enum iwl_ucode_tlv_type {
 	IWL_UCODE_TLV_API_CHANGES_SET	= 29,
 	IWL_UCODE_TLV_API_CHANGES_SET	= 29,
 	IWL_UCODE_TLV_ENABLED_CAPABILITIES	= 30,
 	IWL_UCODE_TLV_ENABLED_CAPABILITIES	= 30,
 	IWL_UCODE_TLV_N_SCAN_CHANNELS		= 31,
 	IWL_UCODE_TLV_N_SCAN_CHANNELS		= 31,
+	IWL_UCODE_TLV_PAGING		= 32,
 	IWL_UCODE_TLV_SEC_RT_USNIFFER	= 34,
 	IWL_UCODE_TLV_SEC_RT_USNIFFER	= 34,
 	IWL_UCODE_TLV_SDIO_ADMA_ADDR	= 35,
 	IWL_UCODE_TLV_SDIO_ADMA_ADDR	= 35,
 	IWL_UCODE_TLV_FW_VERSION	= 36,
 	IWL_UCODE_TLV_FW_VERSION	= 36,
 	IWL_UCODE_TLV_FW_DBG_DEST	= 38,
 	IWL_UCODE_TLV_FW_DBG_DEST	= 38,
 	IWL_UCODE_TLV_FW_DBG_CONF	= 39,
 	IWL_UCODE_TLV_FW_DBG_CONF	= 39,
 	IWL_UCODE_TLV_FW_DBG_TRIGGER	= 40,
 	IWL_UCODE_TLV_FW_DBG_TRIGGER	= 40,
+	IWL_UCODE_TLV_FW_GSCAN_CAPA	= 50,
 };
 };
 
 
 struct iwl_ucode_tlv {
 struct iwl_ucode_tlv {
@@ -305,6 +307,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
  *	IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
  *	IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
  *	is supported.
  *	is supported.
  * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
  * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
+ * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
  */
  */
 enum iwl_ucode_tlv_capa {
 enum iwl_ucode_tlv_capa {
 	IWL_UCODE_TLV_CAPA_D0I3_SUPPORT			= (__force iwl_ucode_tlv_capa_t)0,
 	IWL_UCODE_TLV_CAPA_D0I3_SUPPORT			= (__force iwl_ucode_tlv_capa_t)0,
@@ -326,6 +329,7 @@ enum iwl_ucode_tlv_capa {
 	IWL_UCODE_TLV_CAPA_BT_COEX_PLCR			= (__force iwl_ucode_tlv_capa_t)28,
 	IWL_UCODE_TLV_CAPA_BT_COEX_PLCR			= (__force iwl_ucode_tlv_capa_t)28,
 	IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC		= (__force iwl_ucode_tlv_capa_t)29,
 	IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC		= (__force iwl_ucode_tlv_capa_t)29,
 	IWL_UCODE_TLV_CAPA_BT_COEX_RRC			= (__force iwl_ucode_tlv_capa_t)30,
 	IWL_UCODE_TLV_CAPA_BT_COEX_RRC			= (__force iwl_ucode_tlv_capa_t)30,
+	IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT		= (__force iwl_ucode_tlv_capa_t)31,
 };
 };
 
 
 /* The default calibrate table size if not specified by firmware file */
 /* The default calibrate table size if not specified by firmware file */
@@ -343,8 +347,9 @@ enum iwl_ucode_tlv_capa {
  * For 16.0 uCode and above, there is no differentiation between sections,
  * For 16.0 uCode and above, there is no differentiation between sections,
  * just an offset to the HW address.
  * just an offset to the HW address.
  */
  */
-#define IWL_UCODE_SECTION_MAX 12
+#define IWL_UCODE_SECTION_MAX 16
 #define CPU1_CPU2_SEPARATOR_SECTION	0xFFFFCCCC
 #define CPU1_CPU2_SEPARATOR_SECTION	0xFFFFCCCC
+#define PAGING_SEPARATOR_SECTION	0xAAAABBBB
 
 
 /* uCode version contains 4 values: Major/Minor/API/Serial */
 /* uCode version contains 4 values: Major/Minor/API/Serial */
 #define IWL_UCODE_MAJOR(ver)	(((ver) & 0xFF000000) >> 24)
 #define IWL_UCODE_MAJOR(ver)	(((ver) & 0xFF000000) >> 24)
@@ -493,10 +498,13 @@ struct iwl_fw_dbg_conf_hcmd {
  *
  *
  * @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism
  * @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism
  * @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data
  * @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data
+ * @IWL_FW_DBG_TRIGGER_MONITOR_ONLY: when trigger occurs trigger is set to
+ *	collect only monitor data
  */
  */
 enum iwl_fw_dbg_trigger_mode {
 enum iwl_fw_dbg_trigger_mode {
 	IWL_FW_DBG_TRIGGER_START = BIT(0),
 	IWL_FW_DBG_TRIGGER_START = BIT(0),
 	IWL_FW_DBG_TRIGGER_STOP = BIT(1),
 	IWL_FW_DBG_TRIGGER_STOP = BIT(1),
+	IWL_FW_DBG_TRIGGER_MONITOR_ONLY = BIT(2),
 };
 };
 
 
 /**
 /**
@@ -726,4 +734,28 @@ struct iwl_fw_dbg_conf_tlv {
 	struct iwl_fw_dbg_conf_hcmd hcmd;
 	struct iwl_fw_dbg_conf_hcmd hcmd;
 } __packed;
 } __packed;
 
 
+/**
+ * struct iwl_fw_gscan_capabilities - gscan capabilities supported by FW
+ * @max_scan_cache_size: total space allocated for scan results (in bytes).
+ * @max_scan_buckets: maximum number of channel buckets.
+ * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
+ * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
+ * @max_scan_reporting_threshold: max possible report threshold. in percentage.
+ * @max_hotlist_aps: maximum number of entries for hotlist APs.
+ * @max_significant_change_aps: maximum number of entries for significant
+ *	change APs.
+ * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
+ *	hold.
+ */
+struct iwl_fw_gscan_capabilities {
+	__le32 max_scan_cache_size;
+	__le32 max_scan_buckets;
+	__le32 max_ap_cache_per_scan;
+	__le32 max_rssi_sample_size;
+	__le32 max_scan_reporting_threshold;
+	__le32 max_hotlist_aps;
+	__le32 max_significant_change_aps;
+	__le32 max_bssid_history_entries;
+} __packed;
+
 #endif  /* __iwl_fw_file_h__ */
 #endif  /* __iwl_fw_file_h__ */

+ 68 - 0
drivers/net/wireless/iwlwifi/iwl-fw.h

@@ -133,6 +133,7 @@ struct fw_desc {
 struct fw_img {
 struct fw_img {
 	struct fw_desc sec[IWL_UCODE_SECTION_MAX];
 	struct fw_desc sec[IWL_UCODE_SECTION_MAX];
 	bool is_dual_cpus;
 	bool is_dual_cpus;
+	u32 paging_mem_size;
 };
 };
 
 
 struct iwl_sf_region {
 struct iwl_sf_region {
@@ -140,6 +141,48 @@ struct iwl_sf_region {
 	u32 size;
 	u32 size;
 };
 };
 
 
+/*
+ * Block paging calculations
+ */
+#define PAGE_2_EXP_SIZE 12 /* 4K == 2^12 */
+#define FW_PAGING_SIZE BIT(PAGE_2_EXP_SIZE) /* page size is 4KB */
+#define PAGE_PER_GROUP_2_EXP_SIZE 3
+/* 8 pages per group */
+#define NUM_OF_PAGE_PER_GROUP BIT(PAGE_PER_GROUP_2_EXP_SIZE)
+/* don't change, support only 32KB size */
+#define PAGING_BLOCK_SIZE (NUM_OF_PAGE_PER_GROUP * FW_PAGING_SIZE)
+/* 32K == 2^15 */
+#define BLOCK_2_EXP_SIZE (PAGE_2_EXP_SIZE + PAGE_PER_GROUP_2_EXP_SIZE)
+
+/*
+ * Image paging calculations
+ */
+#define BLOCK_PER_IMAGE_2_EXP_SIZE 5
+/* 2^5 == 32 blocks per image */
+#define NUM_OF_BLOCK_PER_IMAGE BIT(BLOCK_PER_IMAGE_2_EXP_SIZE)
+/* maximum image size 1024KB */
+#define MAX_PAGING_IMAGE_SIZE (NUM_OF_BLOCK_PER_IMAGE * PAGING_BLOCK_SIZE)
+
+/* Virtual address signature */
+#define PAGING_ADDR_SIG 0xAA000000
+
+#define PAGING_CMD_IS_SECURED BIT(9)
+#define PAGING_CMD_IS_ENABLED BIT(8)
+#define PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS	0
+#define PAGING_TLV_SECURE_MASK 1
+
+/**
+ * struct iwl_fw_paging
+ * @fw_paging_phys: page phy pointer
+ * @fw_paging_block: pointer to the allocated block
+ * @fw_paging_size: page size
+ */
+struct iwl_fw_paging {
+	dma_addr_t fw_paging_phys;
+	struct page *fw_paging_block;
+	u32 fw_paging_size;
+};
+
 /**
 /**
  * struct iwl_fw_cscheme_list - a cipher scheme list
  * struct iwl_fw_cscheme_list - a cipher scheme list
  * @size: a number of entries
  * @size: a number of entries
@@ -150,6 +193,30 @@ struct iwl_fw_cscheme_list {
 	struct iwl_fw_cipher_scheme cs[];
 	struct iwl_fw_cipher_scheme cs[];
 } __packed;
 } __packed;
 
 
+/**
+ * struct iwl_gscan_capabilities - gscan capabilities supported by FW
+ * @max_scan_cache_size: total space allocated for scan results (in bytes).
+ * @max_scan_buckets: maximum number of channel buckets.
+ * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
+ * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
+ * @max_scan_reporting_threshold: max possible report threshold. in percentage.
+ * @max_hotlist_aps: maximum number of entries for hotlist APs.
+ * @max_significant_change_aps: maximum number of entries for significant
+ *	change APs.
+ * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
+ *	hold.
+ */
+struct iwl_gscan_capabilities {
+	u32 max_scan_cache_size;
+	u32 max_scan_buckets;
+	u32 max_ap_cache_per_scan;
+	u32 max_rssi_sample_size;
+	u32 max_scan_reporting_threshold;
+	u32 max_hotlist_aps;
+	u32 max_significant_change_aps;
+	u32 max_bssid_history_entries;
+};
+
 /**
 /**
  * struct iwl_fw - variables associated with the firmware
  * struct iwl_fw - variables associated with the firmware
  *
  *
@@ -208,6 +275,7 @@ struct iwl_fw {
 	struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
 	struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
 	size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
 	size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
 	u8 dbg_dest_reg_num;
 	u8 dbg_dest_reg_num;
+	struct iwl_gscan_capabilities gscan_capa;
 };
 };
 
 
 static inline const char *get_fw_dbg_mode_string(int mode)
 static inline const char *get_fw_dbg_mode_string(int mode)

+ 4 - 22
drivers/net/wireless/iwlwifi/iwl-op-mode.h

@@ -116,10 +116,6 @@ struct iwl_cfg;
  *	May sleep
  *	May sleep
  * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
  * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
  *	HCMD this Rx responds to. Can't sleep.
  *	HCMD this Rx responds to. Can't sleep.
- * @napi_add: NAPI initialization. The transport is fully responsible for NAPI,
- *	but the higher layers need to know about it (in particular mac80211 to
- *	to able to call the right NAPI RX functions); this function is needed
- *	to eventually call netif_napi_add() with higher layer involvement.
  * @queue_full: notifies that a HW queue is full.
  * @queue_full: notifies that a HW queue is full.
  *	Must be atomic and called with BH disabled.
  *	Must be atomic and called with BH disabled.
  * @queue_not_full: notifies that a HW queue is not full any more.
  * @queue_not_full: notifies that a HW queue is not full any more.
@@ -148,12 +144,8 @@ struct iwl_op_mode_ops {
 				     const struct iwl_fw *fw,
 				     const struct iwl_fw *fw,
 				     struct dentry *dbgfs_dir);
 				     struct dentry *dbgfs_dir);
 	void (*stop)(struct iwl_op_mode *op_mode);
 	void (*stop)(struct iwl_op_mode *op_mode);
-	void (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb);
-	void (*napi_add)(struct iwl_op_mode *op_mode,
-			 struct napi_struct *napi,
-			 struct net_device *napi_dev,
-			 int (*poll)(struct napi_struct *, int),
-			 int weight);
+	void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+		   struct iwl_rx_cmd_buffer *rxb);
 	void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
 	void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
 	void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
 	void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
 	bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
 	bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
@@ -188,9 +180,10 @@ static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode)
 }
 }
 
 
 static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode,
 static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode,
+				  struct napi_struct *napi,
 				  struct iwl_rx_cmd_buffer *rxb)
 				  struct iwl_rx_cmd_buffer *rxb)
 {
 {
-	return op_mode->ops->rx(op_mode, rxb);
+	return op_mode->ops->rx(op_mode, napi, rxb);
 }
 }
 
 
 static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
 static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
@@ -258,15 +251,4 @@ static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode)
 	return op_mode->ops->exit_d0i3(op_mode);
 	return op_mode->ops->exit_d0i3(op_mode);
 }
 }
 
 
-static inline void iwl_op_mode_napi_add(struct iwl_op_mode *op_mode,
-					struct napi_struct *napi,
-					struct net_device *napi_dev,
-					int (*poll)(struct napi_struct *, int),
-					int weight)
-{
-	if (!op_mode->ops->napi_add)
-		return;
-	op_mode->ops->napi_add(op_mode, napi, napi_dev, poll, weight);
-}
-
 #endif /* __iwl_op_mode_h__ */
 #endif /* __iwl_op_mode_h__ */

+ 8 - 0
drivers/net/wireless/iwlwifi/iwl-prph.h

@@ -383,6 +383,8 @@ enum aux_misc_master1_en {
 #define AUX_MISC_MASTER1_SMPHR_STATUS	0xA20800
 #define AUX_MISC_MASTER1_SMPHR_STATUS	0xA20800
 #define RSA_ENABLE			0xA24B08
 #define RSA_ENABLE			0xA24B08
 #define PREG_AUX_BUS_WPROT_0		0xA04CC0
 #define PREG_AUX_BUS_WPROT_0		0xA04CC0
+#define SB_CPU_1_STATUS			0xA01E30
+#define SB_CPU_2_STATUS			0xA01E34
 
 
 /* FW chicken bits */
 /* FW chicken bits */
 #define LMPM_CHICK			0xA01FF8
 #define LMPM_CHICK			0xA01FF8
@@ -390,4 +392,10 @@ enum {
 	LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0),
 	LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0),
 };
 };
 
 
+/* FW chicken bits */
+#define LMPM_PAGE_PASS_NOTIF			0xA03824
+enum {
+	LMPM_PAGE_PASS_NOTIF_POS = BIT(20),
+};
+
 #endif				/* __iwl_prph_h__ */
 #endif				/* __iwl_prph_h__ */

+ 32 - 4
drivers/net/wireless/iwlwifi/iwl-trans.h

@@ -248,6 +248,8 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
  * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
  * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
  * @CMD_WAKE_UP_TRANS: The command response should wake up the trans
  * @CMD_WAKE_UP_TRANS: The command response should wake up the trans
  *	(i.e. mark it as non-idle).
  *	(i.e. mark it as non-idle).
+ * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to
+ *	check that we leave enough room for the TBs bitmap which needs 20 bits.
  */
  */
 enum CMD_MODE {
 enum CMD_MODE {
 	CMD_ASYNC		= BIT(0),
 	CMD_ASYNC		= BIT(0),
@@ -257,6 +259,8 @@ enum CMD_MODE {
 	CMD_SEND_IN_IDLE	= BIT(4),
 	CMD_SEND_IN_IDLE	= BIT(4),
 	CMD_MAKE_TRANS_IDLE	= BIT(5),
 	CMD_MAKE_TRANS_IDLE	= BIT(5),
 	CMD_WAKE_UP_TRANS	= BIT(6),
 	CMD_WAKE_UP_TRANS	= BIT(6),
+
+	CMD_TB_BITMAP_POS	= 11,
 };
 };
 
 
 #define DEF_CMD_PAYLOAD_SIZE 320
 #define DEF_CMD_PAYLOAD_SIZE 320
@@ -604,7 +608,9 @@ struct iwl_trans_ops {
 	int  (*suspend)(struct iwl_trans *trans);
 	int  (*suspend)(struct iwl_trans *trans);
 	void (*resume)(struct iwl_trans *trans);
 	void (*resume)(struct iwl_trans *trans);
 
 
-	struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans);
+	struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
+						 struct iwl_fw_dbg_trigger_tlv
+						 *trigger);
 };
 };
 
 
 /**
 /**
@@ -641,6 +647,8 @@ enum iwl_d0i3_mode {
  * @cfg - pointer to the configuration
  * @cfg - pointer to the configuration
  * @status: a bit-mask of transport status flags
  * @status: a bit-mask of transport status flags
  * @dev - pointer to struct device * that represents the device
  * @dev - pointer to struct device * that represents the device
+ * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
+ *	0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
  * @hw_id: a u32 with the ID of the device / sub-device.
  * @hw_id: a u32 with the ID of the device / sub-device.
  *	Set during transport allocation.
  *	Set during transport allocation.
  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
@@ -660,6 +668,12 @@ enum iwl_d0i3_mode {
  * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
  * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
  * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
  * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
  * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
  * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
+ * @paging_req_addr: The location were the FW will upload / download the pages
+ *	from. The address is set by the opmode
+ * @paging_db: Pointer to the opmode paging data base, the pointer is set by
+ *	the opmode.
+ * @paging_download_buf: Buffer used for copying all of the pages before
+ *	downloading them to the FW. The buffer is allocated in the opmode
  */
  */
 struct iwl_trans {
 struct iwl_trans {
 	const struct iwl_trans_ops *ops;
 	const struct iwl_trans_ops *ops;
@@ -669,6 +683,7 @@ struct iwl_trans {
 	unsigned long status;
 	unsigned long status;
 
 
 	struct device *dev;
 	struct device *dev;
+	u32 max_skb_frags;
 	u32 hw_rev;
 	u32 hw_rev;
 	u32 hw_id;
 	u32 hw_id;
 	char hw_id_str[52];
 	char hw_id_str[52];
@@ -696,6 +711,14 @@ struct iwl_trans {
 	struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
 	struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
 	u8 dbg_dest_reg_num;
 	u8 dbg_dest_reg_num;
 
 
+	/*
+	 * Paging parameters - All of the parameters should be set by the
+	 * opmode when paging is enabled
+	 */
+	u32 paging_req_addr;
+	struct iwl_fw_paging *paging_db;
+	void *paging_download_buf;
+
 	enum iwl_d0i3_mode d0i3_mode;
 	enum iwl_d0i3_mode d0i3_mode;
 
 
 	bool wowlan_d0i3;
 	bool wowlan_d0i3;
@@ -787,7 +810,8 @@ static inline void iwl_trans_stop_device(struct iwl_trans *trans)
 static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
 static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
 {
 {
 	might_sleep();
 	might_sleep();
-	trans->ops->d3_suspend(trans, test);
+	if (trans->ops->d3_suspend)
+		trans->ops->d3_suspend(trans, test);
 }
 }
 
 
 static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
 static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
@@ -795,6 +819,9 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
 				      bool test)
 				      bool test)
 {
 {
 	might_sleep();
 	might_sleep();
+	if (!trans->ops->d3_resume)
+		return 0;
+
 	return trans->ops->d3_resume(trans, status, test);
 	return trans->ops->d3_resume(trans, status, test);
 }
 }
 
 
@@ -825,11 +852,12 @@ static inline void iwl_trans_resume(struct iwl_trans *trans)
 }
 }
 
 
 static inline struct iwl_trans_dump_data *
 static inline struct iwl_trans_dump_data *
-iwl_trans_dump_data(struct iwl_trans *trans)
+iwl_trans_dump_data(struct iwl_trans *trans,
+		    struct iwl_fw_dbg_trigger_tlv *trigger)
 {
 {
 	if (!trans->ops->dump_data)
 	if (!trans->ops->dump_data)
 		return NULL;
 		return NULL;
-	return trans->ops->dump_data(trans);
+	return trans->ops->dump_data(trans, trigger);
 }
 }
 
 
 static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
 static inline int iwl_trans_send_cmd(struct iwl_trans *trans,

+ 3 - 3
drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c

@@ -911,9 +911,9 @@ static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
 		int size = sizeof(struct iwl_tof_range_req_ap_entry);
 		int size = sizeof(struct iwl_tof_range_req_ap_entry);
 		u16 burst_period;
 		u16 burst_period;
 		u8 *mac = ap.bssid;
 		u8 *mac = ap.bssid;
-		int i;
+		unsigned int i;
 
 
-		if (sscanf(data, "%d %hhd %hhx %hhx"
+		if (sscanf(data, "%u %hhd %hhx %hhx"
 			   "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx"
 			   "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx"
 			   "%hhx %hhx %hx"
 			   "%hhx %hhx %hx"
 			   "%hhx %hhx %x"
 			   "%hhx %hhx %x"
@@ -929,7 +929,7 @@ static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
 			ret = -EINVAL;
 			ret = -EINVAL;
 			goto out;
 			goto out;
 		}
 		}
-		if (i > IWL_MVM_TOF_MAX_APS) {
+		if (i >= IWL_MVM_TOF_MAX_APS) {
 			IWL_ERR(mvm, "Invalid AP index %d\n", i);
 			IWL_ERR(mvm, "Invalid AP index %d\n", i);
 			ret = -EINVAL;
 			ret = -EINVAL;
 			goto out;
 			goto out;

+ 2 - 7
drivers/net/wireless/iwlwifi/mvm/debugfs.c

@@ -974,7 +974,7 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, 0);
+	iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, NULL);
 
 
 	iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
 	iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
 
 
@@ -1200,12 +1200,7 @@ static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
 	if (ptr) {
 	if (ptr) {
 		for (ofs = 0; ofs < len; ofs += 16) {
 		for (ofs = 0; ofs < len; ofs += 16) {
 			pos += scnprintf(buf + pos, bufsz - pos,
 			pos += scnprintf(buf + pos, bufsz - pos,
-					 "0x%.4x ", ofs);
-			hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos,
-					   bufsz - pos, false);
-			pos += strlen(buf + pos);
-			if (bufsz - pos > 0)
-				buf[pos++] = '\n';
+					 "0x%.4x %16ph\n", ofs, ptr + ofs);
 		}
 		}
 	} else {
 	} else {
 		pos += scnprintf(buf + pos, bufsz - pos,
 		pos += scnprintf(buf + pos, bufsz - pos,

+ 12 - 0
drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h

@@ -124,6 +124,18 @@ enum iwl_tx_flags {
 	TX_CMD_FLG_HCCA_CHUNK		= BIT(31)
 	TX_CMD_FLG_HCCA_CHUNK		= BIT(31)
 }; /* TX_FLAGS_BITS_API_S_VER_1 */
 }; /* TX_FLAGS_BITS_API_S_VER_1 */
 
 
+/**
+ * enum iwl_tx_pm_timeouts - pm timeout values in TX command
+ * @PM_FRAME_NONE: no need to suspend sleep mode
+ * @PM_FRAME_MGMT: fw suspend sleep mode for 100TU
+ * @PM_FRAME_ASSOC: fw suspend sleep mode for 10sec
+ */
+enum iwl_tx_pm_timeouts {
+	PM_FRAME_NONE		= 0,
+	PM_FRAME_MGMT		= 2,
+	PM_FRAME_ASSOC		= 3,
+};
+
 /*
 /*
  * TX command security control
  * TX command security control
  */
  */

+ 50 - 0
drivers/net/wireless/iwlwifi/mvm/fw-api.h

@@ -120,6 +120,9 @@ enum {
 	ADD_STA = 0x18,
 	ADD_STA = 0x18,
 	REMOVE_STA = 0x19,
 	REMOVE_STA = 0x19,
 
 
+	/* paging get item */
+	FW_GET_ITEM_CMD = 0x1a,
+
 	/* TX */
 	/* TX */
 	TX_CMD = 0x1c,
 	TX_CMD = 0x1c,
 	TXPATH_FLUSH = 0x1e,
 	TXPATH_FLUSH = 0x1e,
@@ -149,6 +152,9 @@ enum {
 
 
 	LQ_CMD = 0x4e,
 	LQ_CMD = 0x4e,
 
 
+	/* paging block to FW cpu2 */
+	FW_PAGING_BLOCK_CMD = 0x4f,
+
 	/* Scan offload */
 	/* Scan offload */
 	SCAN_OFFLOAD_REQUEST_CMD = 0x51,
 	SCAN_OFFLOAD_REQUEST_CMD = 0x51,
 	SCAN_OFFLOAD_ABORT_CMD = 0x52,
 	SCAN_OFFLOAD_ABORT_CMD = 0x52,
@@ -370,6 +376,50 @@ struct iwl_nvm_access_cmd {
 	u8 data[];
 	u8 data[];
 } __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */
 } __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */
 
 
+#define NUM_OF_FW_PAGING_BLOCKS	33 /* 32 for data and 1 block for CSS */
+
+/*
+ * struct iwl_fw_paging_cmd - paging layout
+ *
+ * (FW_PAGING_BLOCK_CMD = 0x4f)
+ *
+ * Send to FW the paging layout in the driver.
+ *
+ * @flags: various flags for the command
+ * @block_size: the block size in powers of 2
+ * @block_num: number of blocks specified in the command.
+ * @device_phy_addr: virtual addresses from device side
+*/
+struct iwl_fw_paging_cmd {
+	__le32 flags;
+	__le32 block_size;
+	__le32 block_num;
+	__le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
+} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */
+
+/*
+ * Fw items ID's
+ *
+ * @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload
+ *	download
+ */
+enum iwl_fw_item_id {
+	IWL_FW_ITEM_ID_PAGING = 3,
+};
+
+/*
+ * struct iwl_fw_get_item_cmd - get an item from the fw
+ */
+struct iwl_fw_get_item_cmd {
+	__le32 item_id;
+} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */
+
+struct iwl_fw_get_item_resp {
+	__le32 item_id;
+	__le32 item_byte_cnt;
+	__le32 item_val;
+} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */
+
 /**
 /**
  * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD
  * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD
  * @offset: offset in bytes into the section
  * @offset: offset in bytes into the section

+ 352 - 6
drivers/net/wireless/iwlwifi/mvm/fw.c

@@ -106,6 +106,306 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
 				    sizeof(tx_ant_cmd), &tx_ant_cmd);
 				    sizeof(tx_ant_cmd), &tx_ant_cmd);
 }
 }
 
 
+static void iwl_free_fw_paging(struct iwl_mvm *mvm)
+{
+	int i;
+
+	if (!mvm->fw_paging_db[0].fw_paging_block)
+		return;
+
+	for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
+		if (!mvm->fw_paging_db[i].fw_paging_block) {
+			IWL_DEBUG_FW(mvm,
+				     "Paging: block %d already freed, continue to next page\n",
+				     i);
+
+			continue;
+		}
+
+		__free_pages(mvm->fw_paging_db[i].fw_paging_block,
+			     get_order(mvm->fw_paging_db[i].fw_paging_size));
+	}
+	kfree(mvm->trans->paging_download_buf);
+	memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
+}
+
+static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
+{
+	int sec_idx, idx;
+	u32 offset = 0;
+
+	/*
+	 * find where is the paging image start point:
+	 * if CPU2 exist and it's in paging format, then the image looks like:
+	 * CPU1 sections (2 or more)
+	 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
+	 * CPU2 sections (not paged)
+	 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
+	 * non paged to CPU2 paging sec
+	 * CPU2 paging CSS
+	 * CPU2 paging image (including instruction and data)
+	 */
+	for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) {
+		if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
+			sec_idx++;
+			break;
+		}
+	}
+
+	if (sec_idx >= IWL_UCODE_SECTION_MAX) {
+		IWL_ERR(mvm, "driver didn't find paging image\n");
+		iwl_free_fw_paging(mvm);
+		return -EINVAL;
+	}
+
+	/* copy the CSS block to the dram */
+	IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n",
+		     sec_idx);
+
+	memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
+	       image->sec[sec_idx].data,
+	       mvm->fw_paging_db[0].fw_paging_size);
+
+	IWL_DEBUG_FW(mvm,
+		     "Paging: copied %d CSS bytes to first block\n",
+		     mvm->fw_paging_db[0].fw_paging_size);
+
+	sec_idx++;
+
+	/*
+	 * copy the paging blocks to the dram
+	 * loop index start from 1 since that CSS block already copied to dram
+	 * and CSS index is 0.
+	 * loop stop at num_of_paging_blk since that last block is not full.
+	 */
+	for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
+		memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
+		       image->sec[sec_idx].data + offset,
+		       mvm->fw_paging_db[idx].fw_paging_size);
+
+		IWL_DEBUG_FW(mvm,
+			     "Paging: copied %d paging bytes to block %d\n",
+			     mvm->fw_paging_db[idx].fw_paging_size,
+			     idx);
+
+		offset += mvm->fw_paging_db[idx].fw_paging_size;
+	}
+
+	/* copy the last paging block */
+	if (mvm->num_of_pages_in_last_blk > 0) {
+		memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
+		       image->sec[sec_idx].data + offset,
+		       FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
+
+		IWL_DEBUG_FW(mvm,
+			     "Paging: copied %d pages in the last block %d\n",
+			     mvm->num_of_pages_in_last_blk, idx);
+	}
+
+	return 0;
+}
+
+static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
+				   const struct fw_img *image)
+{
+	struct page *block;
+	dma_addr_t phys = 0;
+	int blk_idx = 0;
+	int order, num_of_pages;
+	int dma_enabled;
+
+	if (mvm->fw_paging_db[0].fw_paging_block)
+		return 0;
+
+	dma_enabled = is_device_dma_capable(mvm->trans->dev);
+
+	/* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
+	BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
+
+	num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
+	mvm->num_of_paging_blk = ((num_of_pages - 1) /
+				    NUM_OF_PAGE_PER_GROUP) + 1;
+
+	mvm->num_of_pages_in_last_blk =
+		num_of_pages -
+		NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
+
+	IWL_DEBUG_FW(mvm,
+		     "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
+		     mvm->num_of_paging_blk,
+		     mvm->num_of_pages_in_last_blk);
+
+	/* allocate block of 4Kbytes for paging CSS */
+	order = get_order(FW_PAGING_SIZE);
+	block = alloc_pages(GFP_KERNEL, order);
+	if (!block) {
+		/* free all the previous pages since we failed */
+		iwl_free_fw_paging(mvm);
+		return -ENOMEM;
+	}
+
+	mvm->fw_paging_db[blk_idx].fw_paging_block = block;
+	mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE;
+
+	if (dma_enabled) {
+		phys = dma_map_page(mvm->trans->dev, block, 0,
+				    PAGE_SIZE << order, DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(mvm->trans->dev, phys)) {
+			/*
+			 * free the previous pages and the current one since
+			 * we failed to map_page.
+			 */
+			iwl_free_fw_paging(mvm);
+			return -ENOMEM;
+		}
+		mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
+	} else {
+		mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG |
+			blk_idx << BLOCK_2_EXP_SIZE;
+	}
+
+	IWL_DEBUG_FW(mvm,
+		     "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
+		     order);
+
+	/*
+	 * allocate blocks in dram.
+	 * since that CSS allocated in fw_paging_db[0] loop start from index 1
+	 */
+	for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
+		/* allocate block of PAGING_BLOCK_SIZE (32K) */
+		order = get_order(PAGING_BLOCK_SIZE);
+		block = alloc_pages(GFP_KERNEL, order);
+		if (!block) {
+			/* free all the previous pages since we failed */
+			iwl_free_fw_paging(mvm);
+			return -ENOMEM;
+		}
+
+		mvm->fw_paging_db[blk_idx].fw_paging_block = block;
+		mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE;
+
+		if (dma_enabled) {
+			phys = dma_map_page(mvm->trans->dev, block, 0,
+					    PAGE_SIZE << order,
+					    DMA_BIDIRECTIONAL);
+			if (dma_mapping_error(mvm->trans->dev, phys)) {
+				/*
+				 * free the previous pages and the current one
+				 * since we failed to map_page.
+				 */
+				iwl_free_fw_paging(mvm);
+				return -ENOMEM;
+			}
+			mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
+		} else {
+			mvm->fw_paging_db[blk_idx].fw_paging_phys =
+				PAGING_ADDR_SIG |
+				blk_idx << BLOCK_2_EXP_SIZE;
+		}
+
+		IWL_DEBUG_FW(mvm,
+			     "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
+			     order);
+	}
+
+	return 0;
+}
+
+static int iwl_save_fw_paging(struct iwl_mvm *mvm,
+			      const struct fw_img *fw)
+{
+	int ret;
+
+	ret = iwl_alloc_fw_paging_mem(mvm, fw);
+	if (ret)
+		return ret;
+
+	return iwl_fill_paging_mem(mvm, fw);
+}
+
+/* send paging cmd to FW in case CPU2 has paging image */
+static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
+{
+	int blk_idx;
+	__le32 dev_phy_addr;
+	struct iwl_fw_paging_cmd fw_paging_cmd = {
+		.flags =
+			cpu_to_le32(PAGING_CMD_IS_SECURED |
+				    PAGING_CMD_IS_ENABLED |
+				    (mvm->num_of_pages_in_last_blk <<
+				    PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
+		.block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
+		.block_num = cpu_to_le32(mvm->num_of_paging_blk),
+	};
+
+	/* loop for for all paging blocks + CSS block */
+	for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
+		dev_phy_addr =
+			cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >>
+				    PAGE_2_EXP_SIZE);
+		fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
+	}
+
+	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
+						    IWL_ALWAYS_LONG_GROUP, 0),
+				    0, sizeof(fw_paging_cmd), &fw_paging_cmd);
+}
+
+/*
+ * Send paging item cmd to FW in case CPU2 has paging image
+ */
+static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
+{
+	int ret;
+	struct iwl_fw_get_item_cmd fw_get_item_cmd = {
+		.item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
+	};
+
+	struct iwl_fw_get_item_resp *item_resp;
+	struct iwl_host_cmd cmd = {
+		.id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+		.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+		.data = { &fw_get_item_cmd, },
+	};
+
+	cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);
+
+	ret = iwl_mvm_send_cmd(mvm, &cmd);
+	if (ret) {
+		IWL_ERR(mvm,
+			"Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
+			ret);
+		return ret;
+	}
+
+	item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
+	if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
+		IWL_ERR(mvm,
+			"Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
+			le32_to_cpu(item_resp->item_id));
+		ret = -EIO;
+		goto exit;
+	}
+
+	mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE,
+						  GFP_KERNEL);
+	if (!mvm->trans->paging_download_buf) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+	mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
+	mvm->trans->paging_db = mvm->fw_paging_db;
+	IWL_DEBUG_FW(mvm,
+		     "Paging: got paging request address (paging_req_addr 0x%08x)\n",
+		     mvm->trans->paging_req_addr);
+
+exit:
+	iwl_free_resp(&cmd);
+
+	return ret;
+}
+
 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
 			 struct iwl_rx_packet *pkt, void *data)
 			 struct iwl_rx_packet *pkt, void *data)
 {
 {
@@ -244,6 +544,11 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
 	ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
 	ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
 				    MVM_UCODE_ALIVE_TIMEOUT);
 				    MVM_UCODE_ALIVE_TIMEOUT);
 	if (ret) {
 	if (ret) {
+		if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+			IWL_ERR(mvm,
+				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
+				iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
+				iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
 		mvm->cur_ucode = old_type;
 		mvm->cur_ucode = old_type;
 		return ret;
 		return ret;
 	}
 	}
@@ -268,6 +573,40 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
 
 
 	iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
 	iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
 
 
+	/*
+	 * configure and operate fw paging mechanism.
+	 * driver configures the paging flow only once, CPU2 paging image
+	 * included in the IWL_UCODE_INIT image.
+	 */
+	if (fw->paging_mem_size) {
+		/*
+		 * When dma is not enabled, the driver needs to copy / write
+		 * the downloaded / uploaded page to / from the smem.
+		 * This gets the location of the place were the pages are
+		 * stored.
+		 */
+		if (!is_device_dma_capable(mvm->trans->dev)) {
+			ret = iwl_trans_get_paging_item(mvm);
+			if (ret) {
+				IWL_ERR(mvm, "failed to get FW paging item\n");
+				return ret;
+			}
+		}
+
+		ret = iwl_save_fw_paging(mvm, fw);
+		if (ret) {
+			IWL_ERR(mvm, "failed to save the FW paging image\n");
+			return ret;
+		}
+
+		ret = iwl_send_paging_cmd(mvm, fw);
+		if (ret) {
+			IWL_ERR(mvm, "failed to send the paging cmd\n");
+			iwl_free_fw_paging(mvm);
+			return ret;
+		}
+	}
+
 	/*
 	/*
 	 * Note: all the queues are enabled as part of the interface
 	 * Note: all the queues are enabled as part of the interface
 	 * initialization, but in firmware restart scenarios they
 	 * initialization, but in firmware restart scenarios they
@@ -472,8 +811,13 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
 
 
 int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
 int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
 				struct iwl_mvm_dump_desc *desc,
 				struct iwl_mvm_dump_desc *desc,
-				unsigned int delay)
+				struct iwl_fw_dbg_trigger_tlv *trigger)
 {
 {
+	unsigned int delay = 0;
+
+	if (trigger)
+		delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
+
 	if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status))
 	if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status))
 		return -EBUSY;
 		return -EBUSY;
 
 
@@ -484,6 +828,7 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
 		 le32_to_cpu(desc->trig_desc.type));
 		 le32_to_cpu(desc->trig_desc.type));
 
 
 	mvm->fw_dump_desc = desc;
 	mvm->fw_dump_desc = desc;
+	mvm->fw_dump_trig = trigger;
 
 
 	queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay);
 	queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay);
 
 
@@ -491,7 +836,8 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
 }
 }
 
 
 int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
 int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
-			   const char *str, size_t len, unsigned int delay)
+			   const char *str, size_t len,
+			   struct iwl_fw_dbg_trigger_tlv *trigger)
 {
 {
 	struct iwl_mvm_dump_desc *desc;
 	struct iwl_mvm_dump_desc *desc;
 
 
@@ -503,14 +849,13 @@ int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
 	desc->trig_desc.type = cpu_to_le32(trig);
 	desc->trig_desc.type = cpu_to_le32(trig);
 	memcpy(desc->trig_desc.data, str, len);
 	memcpy(desc->trig_desc.data, str, len);
 
 
-	return iwl_mvm_fw_dbg_collect_desc(mvm, desc, delay);
+	return iwl_mvm_fw_dbg_collect_desc(mvm, desc, trigger);
 }
 }
 
 
 int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
 int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
 				struct iwl_fw_dbg_trigger_tlv *trigger,
 				struct iwl_fw_dbg_trigger_tlv *trigger,
 				const char *fmt, ...)
 				const char *fmt, ...)
 {
 {
-	unsigned int delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
 	u16 occurrences = le16_to_cpu(trigger->occurrences);
 	u16 occurrences = le16_to_cpu(trigger->occurrences);
 	int ret, len = 0;
 	int ret, len = 0;
 	char buf[64];
 	char buf[64];
@@ -534,8 +879,9 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
 		len = strlen(buf) + 1;
 		len = strlen(buf) + 1;
 	}
 	}
 
 
-	ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf,
-				     len, delay);
+	ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf, len,
+				     trigger);
+
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 

+ 28 - 9
drivers/net/wireless/iwlwifi/mvm/mac80211.c

@@ -641,6 +641,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
 			IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
 			IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
 		IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
 		IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
 		hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
 		hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+		ieee80211_hw_set(hw, TDLS_WIDER_BW);
 	}
 	}
 
 
 	if (fw_has_capa(&mvm->fw->ucode_capa,
 	if (fw_has_capa(&mvm->fw->ucode_capa,
@@ -1124,9 +1125,14 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
 	u32 file_len, fifo_data_len = 0;
 	u32 file_len, fifo_data_len = 0;
 	u32 smem_len = mvm->cfg->smem_len;
 	u32 smem_len = mvm->cfg->smem_len;
 	u32 sram2_len = mvm->cfg->dccm2_len;
 	u32 sram2_len = mvm->cfg->dccm2_len;
+	bool monitor_dump_only = false;
 
 
 	lockdep_assert_held(&mvm->mutex);
 	lockdep_assert_held(&mvm->mutex);
 
 
+	if (mvm->fw_dump_trig &&
+	    mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
+		monitor_dump_only = true;
+
 	fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
 	fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
 	if (!fw_error_dump)
 	if (!fw_error_dump)
 		return;
 		return;
@@ -1178,6 +1184,20 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
 		   fifo_data_len +
 		   fifo_data_len +
 		   sizeof(*dump_info);
 		   sizeof(*dump_info);
 
 
+	/* Make room for the SMEM, if it exists */
+	if (smem_len)
+		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
+
+	/* Make room for the secondary SRAM, if it exists */
+	if (sram2_len)
+		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
+
+	/* If we only want a monitor dump, reset the file length */
+	if (monitor_dump_only) {
+		file_len = sizeof(*dump_file) + sizeof(*dump_data) +
+			   sizeof(*dump_info);
+	}
+
 	/*
 	/*
 	 * In 8000 HW family B-step include the ICCM (which resides separately)
 	 * In 8000 HW family B-step include the ICCM (which resides separately)
 	 */
 	 */
@@ -1190,14 +1210,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
 		file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
 		file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
 			    mvm->fw_dump_desc->len;
 			    mvm->fw_dump_desc->len;
 
 
-	/* Make room for the SMEM, if it exists */
-	if (smem_len)
-		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
-
-	/* Make room for the secondary SRAM, if it exists */
-	if (sram2_len)
-		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
-
 	dump_file = vzalloc(file_len);
 	dump_file = vzalloc(file_len);
 	if (!dump_file) {
 	if (!dump_file) {
 		kfree(fw_error_dump);
 		kfree(fw_error_dump);
@@ -1243,6 +1255,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
 		dump_data = iwl_fw_error_next_data(dump_data);
 		dump_data = iwl_fw_error_next_data(dump_data);
 	}
 	}
 
 
+	/* In case we only want monitor dump, skip to dump trasport data */
+	if (monitor_dump_only)
+		goto dump_trans_data;
+
 	dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
 	dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
 	dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
 	dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
 	dump_mem = (void *)dump_data->data;
 	dump_mem = (void *)dump_data->data;
@@ -1286,7 +1302,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
 					 dump_mem->data, IWL8260_ICCM_LEN);
 					 dump_mem->data, IWL8260_ICCM_LEN);
 	}
 	}
 
 
-	fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans);
+dump_trans_data:
+	fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans,
+						       mvm->fw_dump_trig);
 	fw_error_dump->op_mode_len = file_len;
 	fw_error_dump->op_mode_len = file_len;
 	if (fw_error_dump->trans_ptr)
 	if (fw_error_dump->trans_ptr)
 		file_len += fw_error_dump->trans_ptr->len;
 		file_len += fw_error_dump->trans_ptr->len;
@@ -1295,6 +1313,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
 	dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
 	dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
 		      GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
 		      GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
 
 
+	mvm->fw_dump_trig = NULL;
 	clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
 	clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
 }
 }
 
 

+ 11 - 4
drivers/net/wireless/iwlwifi/mvm/mvm.h

@@ -563,7 +563,6 @@ struct iwl_mvm {
 	const struct iwl_cfg *cfg;
 	const struct iwl_cfg *cfg;
 	struct iwl_phy_db *phy_db;
 	struct iwl_phy_db *phy_db;
 	struct ieee80211_hw *hw;
 	struct ieee80211_hw *hw;
-	struct napi_struct *napi;
 
 
 	/* for protecting access to iwl_mvm */
 	/* for protecting access to iwl_mvm */
 	struct mutex mutex;
 	struct mutex mutex;
@@ -611,6 +610,11 @@ struct iwl_mvm {
 	/* NVM sections */
 	/* NVM sections */
 	struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
 	struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
 
 
+	/* Paging section */
+	struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS];
+	u16 num_of_paging_blk;
+	u16 num_of_pages_in_last_blk;
+
 	/* EEPROM MAC addresses */
 	/* EEPROM MAC addresses */
 	struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
 	struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
 
 
@@ -704,6 +708,7 @@ struct iwl_mvm {
 	u8 fw_dbg_conf;
 	u8 fw_dbg_conf;
 	struct delayed_work fw_dump_wk;
 	struct delayed_work fw_dump_wk;
 	struct iwl_mvm_dump_desc *fw_dump_desc;
 	struct iwl_mvm_dump_desc *fw_dump_desc;
+	struct iwl_fw_dbg_trigger_tlv *fw_dump_trig;
 
 
 #ifdef CONFIG_IWLWIFI_LEDS
 #ifdef CONFIG_IWLWIFI_LEDS
 	struct led_classdev led;
 	struct led_classdev led;
@@ -1079,7 +1084,8 @@ bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
  * Convention: iwl_mvm_rx_<NAME OF THE CMD>
  * Convention: iwl_mvm_rx_<NAME OF THE CMD>
  */
  */
 void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
-void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+			struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
 void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
@@ -1439,10 +1445,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
 
 
 int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id);
 int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id);
 int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
 int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
-			   const char *str, size_t len, unsigned int delay);
+			   const char *str, size_t len,
+			   struct iwl_fw_dbg_trigger_tlv *trigger);
 int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
 int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
 				struct iwl_mvm_dump_desc *desc,
 				struct iwl_mvm_dump_desc *desc,
-				unsigned int delay);
+				struct iwl_fw_dbg_trigger_tlv *trigger);
 void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm);
 void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm);
 int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
 int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
 				struct iwl_fw_dbg_trigger_tlv *trigger,
 				struct iwl_fw_dbg_trigger_tlv *trigger,

+ 35 - 28
drivers/net/wireless/iwlwifi/mvm/ops.c

@@ -288,8 +288,10 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
 	CMD(PHY_CONFIGURATION_CMD),
 	CMD(PHY_CONFIGURATION_CMD),
 	CMD(CALIB_RES_NOTIF_PHY_DB),
 	CMD(CALIB_RES_NOTIF_PHY_DB),
 	CMD(SET_CALIB_DEFAULT_CMD),
 	CMD(SET_CALIB_DEFAULT_CMD),
+	CMD(FW_PAGING_BLOCK_CMD),
 	CMD(ADD_STA_KEY),
 	CMD(ADD_STA_KEY),
 	CMD(ADD_STA),
 	CMD(ADD_STA),
+	CMD(FW_GET_ITEM_CMD),
 	CMD(REMOVE_STA),
 	CMD(REMOVE_STA),
 	CMD(LQ_CMD),
 	CMD(LQ_CMD),
 	CMD(SCAN_OFFLOAD_CONFIG_CMD),
 	CMD(SCAN_OFFLOAD_CONFIG_CMD),
@@ -715,6 +717,7 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
 }
 }
 
 
 static void iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
 static void iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
+				struct napi_struct *napi,
 				struct iwl_rx_cmd_buffer *rxb)
 				struct iwl_rx_cmd_buffer *rxb)
 {
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -722,7 +725,7 @@ static void iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
 	u8 i;
 	u8 i;
 
 
 	if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) {
 	if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) {
-		iwl_mvm_rx_rx_mpdu(mvm, rxb);
+		iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
 		return;
 		return;
 	}
 	}
 
 
@@ -913,7 +916,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
 	 * can't recover this since we're already half suspended.
 	 * can't recover this since we're already half suspended.
 	 */
 	 */
 	if (!mvm->restart_fw && fw_error) {
 	if (!mvm->restart_fw && fw_error) {
-		iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, 0);
+		iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert,
+					    NULL);
 	} else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART,
 	} else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART,
 				    &mvm->status)) {
 				    &mvm->status)) {
 		struct iwl_mvm_reprobe *reprobe;
 		struct iwl_mvm_reprobe *reprobe;
@@ -1110,9 +1114,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
 
 
 	IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
 	IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
 
 
-	/* make sure we have no running tx while configuring the qos */
 	set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
 	set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
-	synchronize_net();
 
 
 	/*
 	/*
 	 * iwl_mvm_ref_sync takes a reference before checking the flag.
 	 * iwl_mvm_ref_sync takes a reference before checking the flag.
@@ -1140,6 +1142,9 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
 		mvm->d0i3_offloading = false;
 		mvm->d0i3_offloading = false;
 	}
 	}
 
 
+	/* make sure we have no running tx while configuring the seqno */
+	synchronize_net();
+
 	iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, &d0i3_iter_data);
 	iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, &d0i3_iter_data);
 	ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
 	ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
 				   sizeof(wowlan_config_cmd),
 				   sizeof(wowlan_config_cmd),
@@ -1166,15 +1171,25 @@ static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac,
 	iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
 	iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
 }
 }
 
 
-static void iwl_mvm_d0i3_disconnect_iter(void *data, u8 *mac,
-					 struct ieee80211_vif *vif)
+struct iwl_mvm_wakeup_reason_iter_data {
+	struct iwl_mvm *mvm;
+	u32 wakeup_reasons;
+};
+
+static void iwl_mvm_d0i3_wakeup_reason_iter(void *_data, u8 *mac,
+					    struct ieee80211_vif *vif)
 {
 {
-	struct iwl_mvm *mvm = data;
+	struct iwl_mvm_wakeup_reason_iter_data *data = _data;
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
 
 	if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc &&
 	if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc &&
-	    mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
-		iwl_mvm_connection_loss(mvm, vif, "D0i3");
+	    data->mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) {
+		if (data->wakeup_reasons &
+		    IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)
+			iwl_mvm_connection_loss(data->mvm, vif, "D0i3");
+		else
+			ieee80211_beacon_loss(vif);
+	}
 }
 }
 
 
 void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
 void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
@@ -1242,7 +1257,7 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
 	};
 	};
 	struct iwl_wowlan_status *status;
 	struct iwl_wowlan_status *status;
 	int ret;
 	int ret;
-	u32 disconnection_reasons, wakeup_reasons;
+	u32 handled_reasons, wakeup_reasons;
 	__le16 *qos_seq = NULL;
 	__le16 *qos_seq = NULL;
 
 
 	mutex_lock(&mvm->mutex);
 	mutex_lock(&mvm->mutex);
@@ -1259,13 +1274,18 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
 
 
 	IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
 	IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
 
 
-	disconnection_reasons =
-		IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
-		IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
-	if (wakeup_reasons & disconnection_reasons)
+	handled_reasons = IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+				IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
+	if (wakeup_reasons & handled_reasons) {
+		struct iwl_mvm_wakeup_reason_iter_data data = {
+			.mvm = mvm,
+			.wakeup_reasons = wakeup_reasons,
+		};
+
 		ieee80211_iterate_active_interfaces(
 		ieee80211_iterate_active_interfaces(
 			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
 			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-			iwl_mvm_d0i3_disconnect_iter, mvm);
+			iwl_mvm_d0i3_wakeup_reason_iter, &data);
+	}
 out:
 out:
 	iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
 	iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
 
 
@@ -1318,18 +1338,6 @@ int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
 	return _iwl_mvm_exit_d0i3(mvm);
 	return _iwl_mvm_exit_d0i3(mvm);
 }
 }
 
 
-static void iwl_mvm_napi_add(struct iwl_op_mode *op_mode,
-			     struct napi_struct *napi,
-			     struct net_device *napi_dev,
-			     int (*poll)(struct napi_struct *, int),
-			     int weight)
-{
-	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-
-	netif_napi_add(napi_dev, napi, poll, weight);
-	mvm->napi = napi;
-}
-
 static const struct iwl_op_mode_ops iwl_mvm_ops = {
 static const struct iwl_op_mode_ops iwl_mvm_ops = {
 	.start = iwl_op_mode_mvm_start,
 	.start = iwl_op_mode_mvm_start,
 	.stop = iwl_op_mode_mvm_stop,
 	.stop = iwl_op_mode_mvm_stop,
@@ -1343,5 +1351,4 @@ static const struct iwl_op_mode_ops iwl_mvm_ops = {
 	.nic_config = iwl_mvm_nic_config,
 	.nic_config = iwl_mvm_nic_config,
 	.enter_d0i3 = iwl_mvm_enter_d0i3,
 	.enter_d0i3 = iwl_mvm_enter_d0i3,
 	.exit_d0i3 = iwl_mvm_exit_d0i3,
 	.exit_d0i3 = iwl_mvm_exit_d0i3,
-	.napi_add = iwl_mvm_napi_add,
 };
 };

+ 4 - 28
drivers/net/wireless/iwlwifi/mvm/power.c

@@ -288,27 +288,6 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
 	return true;
 	return true;
 }
 }
 
 
-static int iwl_mvm_power_get_skip_over_dtim(int dtimper, int bi)
-{
-	int numerator;
-	int dtim_interval = dtimper * bi;
-
-	if (WARN_ON(!dtim_interval))
-		return 0;
-
-	if (dtimper == 1) {
-		if (bi > 100)
-			numerator = 408;
-		else
-			numerator = 510;
-	} else if (dtimper < 10) {
-		numerator = 612;
-	} else {
-		return 0;
-	}
-	return max(1, (numerator / dtim_interval));
-}
-
 static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
 static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
 {
 {
 	struct ieee80211_chanctx_conf *chanctx_conf;
 	struct ieee80211_chanctx_conf *chanctx_conf;
@@ -358,8 +337,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
 
 
 	cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
 	cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
 
 
-	if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
-	    !mvmvif->pm_enabled)
+	if (!vif->bss_conf.ps || !mvmvif->pm_enabled ||
+	    (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p))
 		return;
 		return;
 
 
 	cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
 	cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -378,11 +357,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
 	if (!radar_detect && (dtimper < 10) &&
 	if (!radar_detect && (dtimper < 10) &&
 	    (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
 	    (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
 	     mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
 	     mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
-		cmd->skip_dtim_periods =
-			iwl_mvm_power_get_skip_over_dtim(dtimper, bi);
-		if (cmd->skip_dtim_periods)
-			cmd->flags |=
-				cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+		cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+		cmd->skip_dtim_periods = 3;
 	}
 	}
 
 
 	if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
 	if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {

+ 2 - 1
drivers/net/wireless/iwlwifi/mvm/rs.c

@@ -177,7 +177,8 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 
 
 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
 	mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
-	if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
+	if (IWL_MVM_RS_DISABLE_P2P_MIMO &&
+	    iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
 		return false;
 		return false;
 
 
 	if (mvm->nvm_data->sku_cap_mimo_disabled)
 	if (mvm->nvm_data->sku_cap_mimo_disabled)

+ 5 - 3
drivers/net/wireless/iwlwifi/mvm/rx.c

@@ -94,6 +94,7 @@ void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
  * Adds the rxb to a new skb and give it to mac80211
  * Adds the rxb to a new skb and give it to mac80211
  */
  */
 static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
 static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
+					    struct napi_struct *napi,
 					    struct sk_buff *skb,
 					    struct sk_buff *skb,
 					    struct ieee80211_hdr *hdr, u16 len,
 					    struct ieee80211_hdr *hdr, u16 len,
 					    u32 ampdu_status, u8 crypt_len,
 					    u32 ampdu_status, u8 crypt_len,
@@ -127,7 +128,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
 				fraglen, rxb->truesize);
 				fraglen, rxb->truesize);
 	}
 	}
 
 
-	ieee80211_rx_napi(mvm->hw, skb, mvm->napi);
+	ieee80211_rx_napi(mvm->hw, skb, napi);
 }
 }
 
 
 /*
 /*
@@ -253,7 +254,8 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
  *
  *
  * Handles the actual data of the Rx packet from the fw
  * Handles the actual data of the Rx packet from the fw
  */
  */
-void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+			struct iwl_rx_cmd_buffer *rxb)
 {
 {
 	struct ieee80211_hdr *hdr;
 	struct ieee80211_hdr *hdr;
 	struct ieee80211_rx_status *rx_status;
 	struct ieee80211_rx_status *rx_status;
@@ -442,7 +444,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 	iwl_mvm_update_frame_stats(mvm, rate_n_flags,
 	iwl_mvm_update_frame_stats(mvm, rate_n_flags,
 				   rx_status->flag & RX_FLAG_AMPDU_DETAILS);
 				   rx_status->flag & RX_FLAG_AMPDU_DETAILS);
 #endif
 #endif
-	iwl_mvm_pass_packet_to_mac80211(mvm, skb, hdr, len, ampdu_status,
+	iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status,
 					crypt_len, rxb);
 					crypt_len, rxb);
 }
 }
 
 

+ 116 - 106
drivers/net/wireless/iwlwifi/mvm/scan.c

@@ -72,10 +72,60 @@
 #define IWL_DENSE_EBS_SCAN_RATIO 5
 #define IWL_DENSE_EBS_SCAN_RATIO 5
 #define IWL_SPARSE_EBS_SCAN_RATIO 1
 #define IWL_SPARSE_EBS_SCAN_RATIO 1
 
 
-struct iwl_mvm_scan_params {
-	u32 max_out_time;
+enum iwl_mvm_scan_type {
+	IWL_SCAN_TYPE_UNASSOC,
+	IWL_SCAN_TYPE_WILD,
+	IWL_SCAN_TYPE_MILD,
+	IWL_SCAN_TYPE_FRAGMENTED,
+};
+
+enum iwl_mvm_traffic_load {
+	IWL_MVM_TRAFFIC_LOW,
+	IWL_MVM_TRAFFIC_MEDIUM,
+	IWL_MVM_TRAFFIC_HIGH,
+};
+
+struct iwl_mvm_scan_timing_params {
+	u32 dwell_active;
+	u32 dwell_passive;
+	u32 dwell_fragmented;
 	u32 suspend_time;
 	u32 suspend_time;
-	bool passive_fragmented;
+	u32 max_out_time;
+};
+
+static struct iwl_mvm_scan_timing_params scan_timing[] = {
+	[IWL_SCAN_TYPE_UNASSOC] = {
+		.dwell_active = 10,
+		.dwell_passive = 110,
+		.dwell_fragmented = 44,
+		.suspend_time = 0,
+		.max_out_time = 0,
+	},
+	[IWL_SCAN_TYPE_WILD] = {
+		.dwell_active = 10,
+		.dwell_passive = 110,
+		.dwell_fragmented = 44,
+		.suspend_time = 30,
+		.max_out_time = 120,
+	},
+	[IWL_SCAN_TYPE_MILD] = {
+		.dwell_active = 10,
+		.dwell_passive = 110,
+		.dwell_fragmented = 44,
+		.suspend_time = 120,
+		.max_out_time = 120,
+	},
+	[IWL_SCAN_TYPE_FRAGMENTED] = {
+		.dwell_active = 10,
+		.dwell_passive = 110,
+		.dwell_fragmented = 44,
+		.suspend_time = 95,
+		.max_out_time = 44,
+	},
+};
+
+struct iwl_mvm_scan_params {
+	enum iwl_mvm_scan_type type;
 	u32 n_channels;
 	u32 n_channels;
 	u16 delay;
 	u16 delay;
 	int n_ssids;
 	int n_ssids;
@@ -90,13 +140,7 @@ struct iwl_mvm_scan_params {
 	int n_match_sets;
 	int n_match_sets;
 	struct iwl_scan_probe_req preq;
 	struct iwl_scan_probe_req preq;
 	struct cfg80211_match_set *match_sets;
 	struct cfg80211_match_set *match_sets;
-	u16 passive_dwell;
-	u16 active_dwell;
-	u16 fragmented_dwell;
-	struct {
-		u8 iterations;
-		u8 full_scan_mul; /* not used for UMAC */
-	} schedule[2];
+	u8 iterations[2];
 };
 };
 
 
 static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
 static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
@@ -156,76 +200,39 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
 		*global_cnt += 1;
 		*global_cnt += 1;
 }
 }
 
 
-static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm,
-				    struct ieee80211_vif *vif,
-				    struct iwl_mvm_scan_params *params)
+static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
+{
+	return IWL_MVM_TRAFFIC_LOW;
+}
+
+static enum
+iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
+					struct ieee80211_vif *vif,
+					struct iwl_mvm_scan_params *params)
 {
 {
 	int global_cnt = 0;
 	int global_cnt = 0;
-	u8 frag_passive_dwell = 0;
+	enum iwl_mvm_traffic_load load;
+	bool low_latency;
 
 
 	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
 	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
 					    IEEE80211_IFACE_ITER_NORMAL,
 					    IEEE80211_IFACE_ITER_NORMAL,
 					    iwl_mvm_scan_condition_iterator,
 					    iwl_mvm_scan_condition_iterator,
 					    &global_cnt);
 					    &global_cnt);
 	if (!global_cnt)
 	if (!global_cnt)
-		goto not_bound;
-
-	params->suspend_time = 30;
-	params->max_out_time = 120;
-
-	if (iwl_mvm_low_latency(mvm)) {
-		if (fw_has_api(&mvm->fw->ucode_capa,
-			       IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
-
-			params->suspend_time = 105;
-			/*
-			 * If there is more than one active interface make
-			 * passive scan more fragmented.
-			 */
-			frag_passive_dwell = 40;
-			params->max_out_time = frag_passive_dwell;
-		} else {
-			params->suspend_time = 120;
-			params->max_out_time = 120;
-		}
-	}
-
-	if (frag_passive_dwell &&
-	    fw_has_api(&mvm->fw->ucode_capa,
-		       IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
-		/*
-		 * P2P device scan should not be fragmented to avoid negative
-		 * impact on P2P device discovery. Configure max_out_time to be
-		 * equal to dwell time on passive channel.
-		 */
-		if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
-			params->max_out_time = 120;
-		} else {
-			params->passive_fragmented = true;
-		}
-	}
-
-	if ((params->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
-	    (params->max_out_time > 200))
-		params->max_out_time = 200;
+		return IWL_SCAN_TYPE_UNASSOC;
 
 
-not_bound:
+	load = iwl_mvm_get_traffic_load(mvm);
+	low_latency = iwl_mvm_low_latency(mvm);
 
 
-	if (params->passive_fragmented)
-		params->fragmented_dwell = frag_passive_dwell;
+	if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) &&
+	    vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+	    fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN))
+		return IWL_SCAN_TYPE_FRAGMENTED;
 
 
-	/*
-	 * use only basic dwell time in scan command, regardless of the band or
-	 * the number of the probes. FW will calculate the actual dwell time.
-	 */
-	params->passive_dwell = 110;
-	params->active_dwell = 10;
+	if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
+		return IWL_SCAN_TYPE_MILD;
 
 
-
-	IWL_DEBUG_SCAN(mvm,
-		       "scan parameters: max_out_time %d, suspend_time %d, passive_fragmented %d\n",
-		       params->max_out_time, params->suspend_time,
-		       params->passive_fragmented);
+	return IWL_SCAN_TYPE_WILD;
 }
 }
 
 
 static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
 static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
@@ -342,9 +349,13 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
 	if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) {
 	if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) {
 		WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR);
 		WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR);
 
 
-		IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
+		IWL_DEBUG_SCAN(mvm,
+			       "Scheduled scan %s, EBS status %s, Last line %d, Last iteration %d, Time after last iteration %d\n",
 			       aborted ? "aborted" : "completed",
 			       aborted ? "aborted" : "completed",
-			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+			       iwl_mvm_ebs_status_str(scan_notif->ebs_status),
+			       scan_notif->last_schedule_line,
+			       scan_notif->last_schedule_iteration,
+			       __le32_to_cpu(scan_notif->time_after_last_iter));
 
 
 		mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
 		mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
 	} else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
 	} else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
@@ -356,9 +367,13 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
 	} else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
 	} else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
 		WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
 		WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
 
 
-		IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s (FW)\n",
+		IWL_DEBUG_SCAN(mvm,
+			       "Scheduled scan %s, EBS status %s, Last line %d, Last iteration %d, Time after last iteration %d (FW)\n",
 			       aborted ? "aborted" : "completed",
 			       aborted ? "aborted" : "completed",
-			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+			       iwl_mvm_ebs_status_str(scan_notif->ebs_status),
+			       scan_notif->last_schedule_line,
+			       scan_notif->last_schedule_iteration,
+			       __le32_to_cpu(scan_notif->time_after_last_iter));
 
 
 		mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
 		mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
 		ieee80211_sched_scan_stopped(mvm->hw);
 		ieee80211_sched_scan_stopped(mvm->hw);
@@ -699,12 +714,11 @@ static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
 				    struct iwl_scan_req_lmac *cmd,
 				    struct iwl_scan_req_lmac *cmd,
 				    struct iwl_mvm_scan_params *params)
 				    struct iwl_mvm_scan_params *params)
 {
 {
-	cmd->active_dwell = params->active_dwell;
-	cmd->passive_dwell = params->passive_dwell;
-	if (params->passive_fragmented)
-		cmd->fragmented_dwell = params->fragmented_dwell;
-	cmd->max_out_time = cpu_to_le32(params->max_out_time);
-	cmd->suspend_time = cpu_to_le32(params->suspend_time);
+	cmd->active_dwell = scan_timing[params->type].dwell_active;
+	cmd->passive_dwell = scan_timing[params->type].dwell_passive;
+	cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
+	cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
+	cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
 	cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
 	cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
 }
 }
 
 
@@ -741,7 +755,7 @@ static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
 
 
 static int iwl_mvm_scan_total_iterations(struct iwl_mvm_scan_params *params)
 static int iwl_mvm_scan_total_iterations(struct iwl_mvm_scan_params *params)
 {
 {
-	return params->schedule[0].iterations + params->schedule[1].iterations;
+	return params->iterations[0] + params->iterations[1];
 }
 }
 
 
 static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
 static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
@@ -755,7 +769,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
 	if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
 	if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
 		flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
 		flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
 
 
-	if (params->passive_fragmented)
+	if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
 		flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
 		flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
 
 
 	if (iwl_mvm_rrm_scan_needed(mvm))
 	if (iwl_mvm_rrm_scan_needed(mvm))
@@ -808,11 +822,11 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 	ssid_bitmap <<= 1;
 	ssid_bitmap <<= 1;
 
 
 	cmd->schedule[0].delay = cpu_to_le16(params->interval);
 	cmd->schedule[0].delay = cpu_to_le16(params->interval);
-	cmd->schedule[0].iterations = params->schedule[0].iterations;
-	cmd->schedule[0].full_scan_mul = params->schedule[0].full_scan_mul;
+	cmd->schedule[0].iterations = params->iterations[0];
+	cmd->schedule[0].full_scan_mul = 1;
 	cmd->schedule[1].delay = cpu_to_le16(params->interval);
 	cmd->schedule[1].delay = cpu_to_le16(params->interval);
-	cmd->schedule[1].iterations = params->schedule[1].iterations;
-	cmd->schedule[1].full_scan_mul = params->schedule[1].iterations;
+	cmd->schedule[1].iterations = params->iterations[1];
+	cmd->schedule[1].full_scan_mul = 1;
 
 
 	if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) {
 	if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) {
 		cmd->channel_opt[0].flags =
 		cmd->channel_opt[0].flags =
@@ -958,16 +972,15 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
 				    struct iwl_scan_req_umac *cmd,
 				    struct iwl_scan_req_umac *cmd,
 				    struct iwl_mvm_scan_params *params)
 				    struct iwl_mvm_scan_params *params)
 {
 {
-	cmd->active_dwell = params->active_dwell;
-	cmd->passive_dwell = params->passive_dwell;
-	if (params->passive_fragmented)
-		cmd->fragmented_dwell = params->fragmented_dwell;
-	cmd->max_out_time = cpu_to_le32(params->max_out_time);
-	cmd->suspend_time = cpu_to_le32(params->suspend_time);
+	cmd->active_dwell = scan_timing[params->type].dwell_active;
+	cmd->passive_dwell = scan_timing[params->type].dwell_passive;
+	cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
+	cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
+	cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
 	cmd->scan_priority =
 	cmd->scan_priority =
 		iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
 		iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
 
 
-	if (iwl_mvm_scan_total_iterations(params) == 0)
+	if (iwl_mvm_scan_total_iterations(params) == 1)
 		cmd->ooc_priority =
 		cmd->ooc_priority =
 			iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
 			iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
 	else
 	else
@@ -1003,7 +1016,7 @@ static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
 	if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
 	if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
 		flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
 		flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
 
 
-	if (params->passive_fragmented)
+	if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
 		flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
 		flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
 
 
 	if (iwl_mvm_rrm_scan_needed(mvm))
 	if (iwl_mvm_rrm_scan_needed(mvm))
@@ -1172,12 +1185,10 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 	params.n_match_sets = 0;
 	params.n_match_sets = 0;
 	params.match_sets = NULL;
 	params.match_sets = NULL;
 
 
-	params.schedule[0].iterations = 1;
-	params.schedule[0].full_scan_mul = 0;
-	params.schedule[1].iterations = 0;
-	params.schedule[1].full_scan_mul = 0;
+	params.iterations[0] = 1;
+	params.iterations[1] = 0;
 
 
-	iwl_mvm_scan_calc_dwell(mvm, vif, &params);
+	params.type = iwl_mvm_get_scan_type(mvm, vif, &params);
 
 
 	iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
 	iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
 
 
@@ -1255,10 +1266,10 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
 	params.n_match_sets = req->n_match_sets;
 	params.n_match_sets = req->n_match_sets;
 	params.match_sets = req->match_sets;
 	params.match_sets = req->match_sets;
 
 
-	params.schedule[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS;
-	params.schedule[0].full_scan_mul = 1;
-	params.schedule[1].iterations = 0xff;
-	params.schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
+	params.iterations[0] = 0;
+	params.iterations[1] = 0xff;
+
+	params.type = iwl_mvm_get_scan_type(mvm, vif, &params);
 
 
 	if (req->interval > U16_MAX) {
 	if (req->interval > U16_MAX) {
 		IWL_DEBUG_SCAN(mvm,
 		IWL_DEBUG_SCAN(mvm,
@@ -1281,8 +1292,6 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
 		params.delay = req->delay;
 		params.delay = req->delay;
 	}
 	}
 
 
-	iwl_mvm_scan_calc_dwell(mvm, vif, &params);
-
 	ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
 	ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
@@ -1336,13 +1345,14 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
 	}
 	}
 
 
 	mvm->scan_status &= ~mvm->scan_uid_status[uid];
 	mvm->scan_status &= ~mvm->scan_uid_status[uid];
-
 	IWL_DEBUG_SCAN(mvm,
 	IWL_DEBUG_SCAN(mvm,
-		       "Scan completed, uid %u type %u, status %s, EBS status %s\n",
+		       "Scan completed, uid %u type %u, status %s, EBS status %s, Last line %d, Last iteration %d, Time from last iteration %d\n",
 		       uid, mvm->scan_uid_status[uid],
 		       uid, mvm->scan_uid_status[uid],
 		       notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
 		       notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
 				"completed" : "aborted",
 				"completed" : "aborted",
-		       iwl_mvm_ebs_status_str(notif->ebs_status));
+		       iwl_mvm_ebs_status_str(notif->ebs_status),
+		       notif->last_schedule, notif->last_iter,
+		       __le32_to_cpu(notif->time_from_last_iter));
 
 
 	if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
 	if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
 	    notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
 	    notif->ebs_status != IWL_SCAN_EBS_INACTIVE)

+ 1 - 1
drivers/net/wireless/iwlwifi/mvm/tof.c

@@ -194,7 +194,7 @@ int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
 				  struct ieee80211_vif *vif)
 				  struct ieee80211_vif *vif)
 {
 {
 	struct iwl_host_cmd cmd = {
 	struct iwl_host_cmd cmd = {
-		.id = TOF_CMD,
+		.id = iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0),
 		.len = { sizeof(mvm->tof_data.range_req), },
 		.len = { sizeof(mvm->tof_data.range_req), },
 		/* no copy because of the command size */
 		/* no copy because of the command size */
 		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
 		.dataflags = { IWL_HCMD_DFL_NOCOPY, },

+ 6 - 4
drivers/net/wireless/iwlwifi/mvm/tx.c

@@ -153,18 +153,20 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
 
 
 	if (ieee80211_is_mgmt(fc)) {
 	if (ieee80211_is_mgmt(fc)) {
 		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
 		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
-			tx_cmd->pm_frame_timeout = cpu_to_le16(3);
+			tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
+		else if (ieee80211_is_action(fc))
+			tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
 		else
 		else
-			tx_cmd->pm_frame_timeout = cpu_to_le16(2);
+			tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
 
 
 		/* The spec allows Action frames in A-MPDU, we don't support
 		/* The spec allows Action frames in A-MPDU, we don't support
 		 * it
 		 * it
 		 */
 		 */
 		WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
 		WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
 	} else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
 	} else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
-		tx_cmd->pm_frame_timeout = cpu_to_le16(2);
+		tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
 	} else {
 	} else {
-		tx_cmd->pm_frame_timeout = 0;
+		tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
 	}
 	}
 
 
 	if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
 	if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&

+ 49 - 8
drivers/net/wireless/iwlwifi/pcie/internal.h

@@ -44,6 +44,21 @@
 #include "iwl-io.h"
 #include "iwl-io.h"
 #include "iwl-op-mode.h"
 #include "iwl-op-mode.h"
 
 
+/* We need 2 entries for the TX command and header, and another one might
+ * be needed for potential data in the SKB's head. The remaining ones can
+ * be used for frags.
+ */
+#define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3)
+
+/*
+ * RX related structures and functions
+ */
+#define RX_NUM_QUEUES 1
+#define RX_POST_REQ_ALLOC 2
+#define RX_CLAIM_REQ_ALLOC 8
+#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
+#define RX_LOW_WATERMARK 8
+
 struct iwl_host_cmd;
 struct iwl_host_cmd;
 
 
 /*This file includes the declaration that are internal to the
 /*This file includes the declaration that are internal to the
@@ -77,29 +92,29 @@ struct isr_statistics {
  * struct iwl_rxq - Rx queue
  * struct iwl_rxq - Rx queue
  * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
  * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
  * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
  * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
- * @pool:
- * @queue:
  * @read: Shared index to newest available Rx buffer
  * @read: Shared index to newest available Rx buffer
  * @write: Shared index to oldest written Rx packet
  * @write: Shared index to oldest written Rx packet
  * @free_count: Number of pre-allocated buffers in rx_free
  * @free_count: Number of pre-allocated buffers in rx_free
+ * @used_count: Number of RBDs handled to allocator to use for allocation
  * @write_actual:
  * @write_actual:
- * @rx_free: list of free SKBs for use
- * @rx_used: List of Rx buffers with no SKB
+ * @rx_free: list of RBDs with allocated RB ready for use
+ * @rx_used: list of RBDs with no RB attached
  * @need_update: flag to indicate we need to update read/write index
  * @need_update: flag to indicate we need to update read/write index
  * @rb_stts: driver's pointer to receive buffer status
  * @rb_stts: driver's pointer to receive buffer status
  * @rb_stts_dma: bus address of receive buffer status
  * @rb_stts_dma: bus address of receive buffer status
  * @lock:
  * @lock:
+ * @pool: initial pool of iwl_rx_mem_buffer for the queue
+ * @queue: actual rx queue
  *
  *
  * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
  * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
  */
  */
 struct iwl_rxq {
 struct iwl_rxq {
 	__le32 *bd;
 	__le32 *bd;
 	dma_addr_t bd_dma;
 	dma_addr_t bd_dma;
-	struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
-	struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
 	u32 read;
 	u32 read;
 	u32 write;
 	u32 write;
 	u32 free_count;
 	u32 free_count;
+	u32 used_count;
 	u32 write_actual;
 	u32 write_actual;
 	struct list_head rx_free;
 	struct list_head rx_free;
 	struct list_head rx_used;
 	struct list_head rx_used;
@@ -107,6 +122,32 @@ struct iwl_rxq {
 	struct iwl_rb_status *rb_stts;
 	struct iwl_rb_status *rb_stts;
 	dma_addr_t rb_stts_dma;
 	dma_addr_t rb_stts_dma;
 	spinlock_t lock;
 	spinlock_t lock;
+	struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
+	struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
+};
+
+/**
+ * struct iwl_rb_allocator - Rx allocator
+ * @pool: initial pool of allocator
+ * @req_pending: number of requests the allcator had not processed yet
+ * @req_ready: number of requests honored and ready for claiming
+ * @rbd_allocated: RBDs with pages allocated and ready to be handled to
+ *	the queue. This is a list of &struct iwl_rx_mem_buffer
+ * @rbd_empty: RBDs with no page attached for allocator use. This is a list
+ *	of &struct iwl_rx_mem_buffer
+ * @lock: protects the rbd_allocated and rbd_empty lists
+ * @alloc_wq: work queue for background calls
+ * @rx_alloc: work struct for background calls
+ */
+struct iwl_rb_allocator {
+	struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
+	atomic_t req_pending;
+	atomic_t req_ready;
+	struct list_head rbd_allocated;
+	struct list_head rbd_empty;
+	spinlock_t lock;
+	struct workqueue_struct *alloc_wq;
+	struct work_struct rx_alloc;
 };
 };
 
 
 struct iwl_dma_ptr {
 struct iwl_dma_ptr {
@@ -250,7 +291,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
 /**
 /**
  * struct iwl_trans_pcie - PCIe transport specific data
  * struct iwl_trans_pcie - PCIe transport specific data
  * @rxq: all the RX queue data
  * @rxq: all the RX queue data
- * @rx_replenish: work that will be called when buffers need to be allocated
+ * @rba: allocator for RX replenishing
  * @drv - pointer to iwl_drv
  * @drv - pointer to iwl_drv
  * @trans: pointer to the generic transport area
  * @trans: pointer to the generic transport area
  * @scd_base_addr: scheduler sram base address in SRAM
  * @scd_base_addr: scheduler sram base address in SRAM
@@ -275,7 +316,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
  */
  */
 struct iwl_trans_pcie {
 struct iwl_trans_pcie {
 	struct iwl_rxq rxq;
 	struct iwl_rxq rxq;
-	struct work_struct rx_replenish;
+	struct iwl_rb_allocator rba;
 	struct iwl_trans *trans;
 	struct iwl_trans *trans;
 	struct iwl_drv *drv;
 	struct iwl_drv *drv;
 
 

+ 393 - 82
drivers/net/wireless/iwlwifi/pcie/rx.c

@@ -1,7 +1,7 @@
 /******************************************************************************
 /******************************************************************************
  *
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
  * as portions of the ieee80211 subsystem header files.
@@ -74,16 +74,29 @@
  * resets the Rx queue buffers with new memory.
  * resets the Rx queue buffers with new memory.
  *
  *
  * The management in the driver is as follows:
  * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
- *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- *   to replenish the iwl->rxq->rx_free.
- * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
- *   iwl->rxq is replenished and the READ INDEX is updated (updating the
- *   'processed' and 'read' driver indexes as well)
+ * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
+ *   When the interrupt handler is called, the request is processed.
+ *   The page is either stolen - transferred to the upper layer
+ *   or reused - added immediately to the iwl->rxq->rx_free list.
+ * + When the page is stolen - the driver updates the matching queue's used
+ *   count, detaches the RBD and transfers it to the queue used list.
+ *   When there are two used RBDs - they are transferred to the allocator empty
+ *   list. Work is then scheduled for the allocator to start allocating
+ *   eight buffers.
+ *   When there are another 6 used RBDs - they are transferred to the allocator
+ *   empty list and the driver tries to claim the pre-allocated buffers and
+ *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
+ *   until ready.
+ *   When there are 8+ buffers in the free list - either from allocation or from
+ *   8 reused unstolen pages - restock is called to update the FW and indexes.
+ * + In order to make sure the allocator always has RBDs to use for allocation
+ *   the allocator has initial pool in the size of num_queues*(8-2) - the
+ *   maximum missing RBDs per allocation request (request posted with 2
+ *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
+ *   The queues supplies the recycle of the rest of the RBDs.
  * + A received packet is processed and handed to the kernel network stack,
  * + A received packet is processed and handed to the kernel network stack,
  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
- *   rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
+ * + If there are no allocated buffers in iwl->rxq->rx_free,
  *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
  *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
  *   If there were enough free buffers and RX_STALLED is set it is cleared.
  *   If there were enough free buffers and RX_STALLED is set it is cleared.
  *
  *
@@ -92,18 +105,32 @@
  *
  *
  * iwl_rxq_alloc()            Allocates rx_free
  * iwl_rxq_alloc()            Allocates rx_free
  * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
  * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
- *                            iwl_pcie_rxq_restock
+ *                            iwl_pcie_rxq_restock.
+ *                            Used only during initialization.
  * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
  * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
  *                            queue, updates firmware pointers, and updates
  *                            queue, updates firmware pointers, and updates
- *                            the WRITE index.  If insufficient rx_free buffers
- *                            are available, schedules iwl_pcie_rx_replenish
+ *                            the WRITE index.
+ * iwl_pcie_rx_allocator()     Background work for allocating pages.
  *
  *
  * -- enable interrupts --
  * -- enable interrupts --
  * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
  * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
  *                            READ INDEX, detaching the SKB from the pool.
  *                            READ INDEX, detaching the SKB from the pool.
  *                            Moves the packet buffer from queue to rx_used.
  *                            Moves the packet buffer from queue to rx_used.
+ *                            Posts and claims requests to the allocator.
  *                            Calls iwl_pcie_rxq_restock to refill any empty
  *                            Calls iwl_pcie_rxq_restock to refill any empty
  *                            slots.
  *                            slots.
+ *
+ * RBD life-cycle:
+ *
+ * Init:
+ * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
+ *
+ * Regular Receive interrupt:
+ * Page Stolen:
+ * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
+ * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
+ * Page not Stolen:
+ * rxq.queue -> rxq.rx_free -> rxq.queue
  * ...
  * ...
  *
  *
  */
  */
@@ -240,10 +267,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
 		rxq->free_count--;
 		rxq->free_count--;
 	}
 	}
 	spin_unlock(&rxq->lock);
 	spin_unlock(&rxq->lock);
-	/* If the pre-allocated buffer pool is dropping low, schedule to
-	 * refill it */
-	if (rxq->free_count <= RX_LOW_WATERMARK)
-		schedule_work(&trans_pcie->rx_replenish);
 
 
 	/* If we've added more space for the firmware to place data, tell it.
 	/* If we've added more space for the firmware to place data, tell it.
 	 * Increment device's write pointer in multiples of 8. */
 	 * Increment device's write pointer in multiples of 8. */
@@ -254,6 +277,45 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
 	}
 	}
 }
 }
 
 
+/*
+ * iwl_pcie_rx_alloc_page - allocates and returns a page.
+ *
+ */
+static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
+					   gfp_t priority)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rxq *rxq = &trans_pcie->rxq;
+	struct page *page;
+	gfp_t gfp_mask = priority;
+
+	if (rxq->free_count > RX_LOW_WATERMARK)
+		gfp_mask |= __GFP_NOWARN;
+
+	if (trans_pcie->rx_page_order > 0)
+		gfp_mask |= __GFP_COMP;
+
+	/* Alloc a new receive buffer */
+	page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
+	if (!page) {
+		if (net_ratelimit())
+			IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
+				       trans_pcie->rx_page_order);
+		/* Issue an error if the hardware has consumed more than half
+		 * of its free buffer list and we don't have enough
+		 * pre-allocated buffers.
+`		 */
+		if (rxq->free_count <= RX_LOW_WATERMARK &&
+		    iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
+		    net_ratelimit())
+			IWL_CRIT(trans,
+				 "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
+				 rxq->free_count);
+		return NULL;
+	}
+	return page;
+}
+
 /*
 /*
  * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
  * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
  *
  *
@@ -269,7 +331,6 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
 	struct iwl_rx_mem_buffer *rxb;
 	struct iwl_rx_mem_buffer *rxb;
 	struct page *page;
 	struct page *page;
-	gfp_t gfp_mask = priority;
 
 
 	while (1) {
 	while (1) {
 		spin_lock(&rxq->lock);
 		spin_lock(&rxq->lock);
@@ -279,32 +340,10 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
 		}
 		}
 		spin_unlock(&rxq->lock);
 		spin_unlock(&rxq->lock);
 
 
-		if (rxq->free_count > RX_LOW_WATERMARK)
-			gfp_mask |= __GFP_NOWARN;
-
-		if (trans_pcie->rx_page_order > 0)
-			gfp_mask |= __GFP_COMP;
-
 		/* Alloc a new receive buffer */
 		/* Alloc a new receive buffer */
-		page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
-		if (!page) {
-			if (net_ratelimit())
-				IWL_DEBUG_INFO(trans, "alloc_pages failed, "
-					   "order: %d\n",
-					   trans_pcie->rx_page_order);
-
-			if ((rxq->free_count <= RX_LOW_WATERMARK) &&
-			    net_ratelimit())
-				IWL_CRIT(trans, "Failed to alloc_pages with %s."
-					 "Only %u free buffers remaining.\n",
-					 priority == GFP_ATOMIC ?
-					 "GFP_ATOMIC" : "GFP_KERNEL",
-					 rxq->free_count);
-			/* We don't reschedule replenish work here -- we will
-			 * call the restock method and if it still needs
-			 * more buffers it will schedule replenish */
+		page = iwl_pcie_rx_alloc_page(trans, priority);
+		if (!page)
 			return;
 			return;
-		}
 
 
 		spin_lock(&rxq->lock);
 		spin_lock(&rxq->lock);
 
 
@@ -355,7 +394,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
 
 
 	lockdep_assert_held(&rxq->lock);
 	lockdep_assert_held(&rxq->lock);
 
 
-	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+	for (i = 0; i < RX_QUEUE_SIZE; i++) {
 		if (!rxq->pool[i].page)
 		if (!rxq->pool[i].page)
 			continue;
 			continue;
 		dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
 		dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
@@ -372,32 +411,164 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
  * When moving to rx_free an page is allocated for the slot.
  * When moving to rx_free an page is allocated for the slot.
  *
  *
  * Also restock the Rx queue via iwl_pcie_rxq_restock.
  * Also restock the Rx queue via iwl_pcie_rxq_restock.
- * This is called as a scheduled work item (except for during initialization)
+ * This is called only during initialization
  */
  */
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
 {
 {
-	iwl_pcie_rxq_alloc_rbs(trans, gfp);
+	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
 
 
 	iwl_pcie_rxq_restock(trans);
 	iwl_pcie_rxq_restock(trans);
 }
 }
 
 
-static void iwl_pcie_rx_replenish_work(struct work_struct *data)
+/*
+ * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
+ *
+ * Allocates for each received request 8 pages
+ * Called as a scheduled work item.
+ */
+static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
+	struct list_head local_empty;
+	int pending = atomic_xchg(&rba->req_pending, 0);
+
+	IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
+
+	/* If we were scheduled - there is at least one request */
+	spin_lock(&rba->lock);
+	/* swap out the rba->rbd_empty to a local list */
+	list_replace_init(&rba->rbd_empty, &local_empty);
+	spin_unlock(&rba->lock);
+
+	while (pending) {
+		int i;
+		struct list_head local_allocated;
+
+		INIT_LIST_HEAD(&local_allocated);
+
+		for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
+			struct iwl_rx_mem_buffer *rxb;
+			struct page *page;
+
+			/* List should never be empty - each reused RBD is
+			 * returned to the list, and initial pool covers any
+			 * possible gap between the time the page is allocated
+			 * to the time the RBD is added.
+			 */
+			BUG_ON(list_empty(&local_empty));
+			/* Get the first rxb from the rbd list */
+			rxb = list_first_entry(&local_empty,
+					       struct iwl_rx_mem_buffer, list);
+			BUG_ON(rxb->page);
+
+			/* Alloc a new receive buffer */
+			page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL);
+			if (!page)
+				continue;
+			rxb->page = page;
+
+			/* Get physical address of the RB */
+			rxb->page_dma = dma_map_page(trans->dev, page, 0,
+					PAGE_SIZE << trans_pcie->rx_page_order,
+					DMA_FROM_DEVICE);
+			if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+				rxb->page = NULL;
+				__free_pages(page, trans_pcie->rx_page_order);
+				continue;
+			}
+			/* dma address must be no more than 36 bits */
+			BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+			/* and also 256 byte aligned! */
+			BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+			/* move the allocated entry to the out list */
+			list_move(&rxb->list, &local_allocated);
+			i++;
+		}
+
+		pending--;
+		if (!pending) {
+			pending = atomic_xchg(&rba->req_pending, 0);
+			IWL_DEBUG_RX(trans,
+				     "Pending allocation requests = %d\n",
+				     pending);
+		}
+
+		spin_lock(&rba->lock);
+		/* add the allocated rbds to the allocator allocated list */
+		list_splice_tail(&local_allocated, &rba->rbd_allocated);
+		/* get more empty RBDs for current pending requests */
+		list_splice_tail_init(&rba->rbd_empty, &local_empty);
+		spin_unlock(&rba->lock);
+
+		atomic_inc(&rba->req_ready);
+	}
+
+	spin_lock(&rba->lock);
+	/* return unused rbds to the allocator empty list */
+	list_splice_tail(&local_empty, &rba->rbd_empty);
+	spin_unlock(&rba->lock);
+}
+
+/*
+ * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
+.*
+.* Called by queue when the queue posted allocation request and
+ * has freed 8 RBDs in order to restock itself.
+ */
+static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
+				     struct iwl_rx_mem_buffer
+				     *out[RX_CLAIM_REQ_ALLOC])
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
+	int i;
+
+	/*
+	 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
+	 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
+	 * function will return -ENOMEM, as there are no ready requests.
+	 * atomic_dec_if_positive will perofrm the *actual* decrement only if
+	 * req_ready > 0, i.e. - there are ready requests and the function
+	 * hands one request to the caller.
+	 */
+	if (atomic_dec_if_positive(&rba->req_ready) < 0)
+		return -ENOMEM;
+
+	spin_lock(&rba->lock);
+	for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
+		/* Get next free Rx buffer, remove it from free list */
+		out[i] = list_first_entry(&rba->rbd_allocated,
+			       struct iwl_rx_mem_buffer, list);
+		list_del(&out[i]->list);
+	}
+	spin_unlock(&rba->lock);
+
+	return 0;
+}
+
+static void iwl_pcie_rx_allocator_work(struct work_struct *data)
 {
 {
+	struct iwl_rb_allocator *rba_p =
+		container_of(data, struct iwl_rb_allocator, rx_alloc);
 	struct iwl_trans_pcie *trans_pcie =
 	struct iwl_trans_pcie *trans_pcie =
-	    container_of(data, struct iwl_trans_pcie, rx_replenish);
+		container_of(rba_p, struct iwl_trans_pcie, rba);
 
 
-	iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
+	iwl_pcie_rx_allocator(trans_pcie->trans);
 }
 }
 
 
 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
 {
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
 	struct device *dev = trans->dev;
 	struct device *dev = trans->dev;
 
 
 	memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
 	memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
 
 
 	spin_lock_init(&rxq->lock);
 	spin_lock_init(&rxq->lock);
+	spin_lock_init(&rba->lock);
 
 
 	if (WARN_ON(rxq->bd || rxq->rb_stts))
 	if (WARN_ON(rxq->bd || rxq->rb_stts))
 		return -EINVAL;
 		return -EINVAL;
@@ -487,15 +658,49 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
 	INIT_LIST_HEAD(&rxq->rx_free);
 	INIT_LIST_HEAD(&rxq->rx_free);
 	INIT_LIST_HEAD(&rxq->rx_used);
 	INIT_LIST_HEAD(&rxq->rx_used);
 	rxq->free_count = 0;
 	rxq->free_count = 0;
+	rxq->used_count = 0;
 
 
-	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+	for (i = 0; i < RX_QUEUE_SIZE; i++)
 		list_add(&rxq->pool[i].list, &rxq->rx_used);
 		list_add(&rxq->pool[i].list, &rxq->rx_used);
 }
 }
 
 
+static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
+{
+	int i;
+
+	lockdep_assert_held(&rba->lock);
+
+	INIT_LIST_HEAD(&rba->rbd_allocated);
+	INIT_LIST_HEAD(&rba->rbd_empty);
+
+	for (i = 0; i < RX_POOL_SIZE; i++)
+		list_add(&rba->pool[i].list, &rba->rbd_empty);
+}
+
+static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
+	int i;
+
+	lockdep_assert_held(&rba->lock);
+
+	for (i = 0; i < RX_POOL_SIZE; i++) {
+		if (!rba->pool[i].page)
+			continue;
+		dma_unmap_page(trans->dev, rba->pool[i].page_dma,
+			       PAGE_SIZE << trans_pcie->rx_page_order,
+			       DMA_FROM_DEVICE);
+		__free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
+		rba->pool[i].page = NULL;
+	}
+}
+
 int iwl_pcie_rx_init(struct iwl_trans *trans)
 int iwl_pcie_rx_init(struct iwl_trans *trans)
 {
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
 	int i, err;
 	int i, err;
 
 
 	if (!rxq->bd) {
 	if (!rxq->bd) {
@@ -503,11 +708,21 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
 		if (err)
 		if (err)
 			return err;
 			return err;
 	}
 	}
+	if (!rba->alloc_wq)
+		rba->alloc_wq = alloc_workqueue("rb_allocator",
+						WQ_HIGHPRI | WQ_UNBOUND, 1);
+	INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
+
+	spin_lock(&rba->lock);
+	atomic_set(&rba->req_pending, 0);
+	atomic_set(&rba->req_ready, 0);
+	/* free all first - we might be reconfigured for a different size */
+	iwl_pcie_rx_free_rba(trans);
+	iwl_pcie_rx_init_rba(rba);
+	spin_unlock(&rba->lock);
 
 
 	spin_lock(&rxq->lock);
 	spin_lock(&rxq->lock);
 
 
-	INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
-
 	/* free all first - we might be reconfigured for a different size */
 	/* free all first - we might be reconfigured for a different size */
 	iwl_pcie_rxq_free_rbs(trans);
 	iwl_pcie_rxq_free_rbs(trans);
 	iwl_pcie_rx_init_rxb_lists(rxq);
 	iwl_pcie_rx_init_rxb_lists(rxq);
@@ -522,7 +737,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
 	memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
 	memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
 	spin_unlock(&rxq->lock);
 	spin_unlock(&rxq->lock);
 
 
-	iwl_pcie_rx_replenish(trans, GFP_KERNEL);
+	iwl_pcie_rx_replenish(trans);
 
 
 	iwl_pcie_rx_hw_init(trans, rxq);
 	iwl_pcie_rx_hw_init(trans, rxq);
 
 
@@ -537,6 +752,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
 {
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
 
 
 	/*if rxq->bd is NULL, it means that nothing has been allocated,
 	/*if rxq->bd is NULL, it means that nothing has been allocated,
 	 * exit now */
 	 * exit now */
@@ -545,7 +761,15 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
 		return;
 		return;
 	}
 	}
 
 
-	cancel_work_sync(&trans_pcie->rx_replenish);
+	cancel_work_sync(&rba->rx_alloc);
+	if (rba->alloc_wq) {
+		destroy_workqueue(rba->alloc_wq);
+		rba->alloc_wq = NULL;
+	}
+
+	spin_lock(&rba->lock);
+	iwl_pcie_rx_free_rba(trans);
+	spin_unlock(&rba->lock);
 
 
 	spin_lock(&rxq->lock);
 	spin_lock(&rxq->lock);
 	iwl_pcie_rxq_free_rbs(trans);
 	iwl_pcie_rxq_free_rbs(trans);
@@ -566,8 +790,49 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
 	rxq->rb_stts = NULL;
 	rxq->rb_stts = NULL;
 }
 }
 
 
+/*
+ * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
+ *
+ * Called when a RBD can be reused. The RBD is transferred to the allocator.
+ * When there are 2 empty RBDs - a request for allocation is posted
+ */
+static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
+				  struct iwl_rx_mem_buffer *rxb,
+				  struct iwl_rxq *rxq, bool emergency)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
+
+	/* Move the RBD to the used list, will be moved to allocator in batches
+	 * before claiming or posting a request*/
+	list_add_tail(&rxb->list, &rxq->rx_used);
+
+	if (unlikely(emergency))
+		return;
+
+	/* Count the allocator owned RBDs */
+	rxq->used_count++;
+
+	/* If we have RX_POST_REQ_ALLOC new released rx buffers -
+	 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
+	 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
+	 * after but we still need to post another request.
+	 */
+	if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
+		/* Move the 2 RBDs to the allocator ownership.
+		 Allocator has another 6 from pool for the request completion*/
+		spin_lock(&rba->lock);
+		list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+		spin_unlock(&rba->lock);
+
+		atomic_inc(&rba->req_pending);
+		queue_work(rba->alloc_wq, &rba->rx_alloc);
+	}
+}
+
 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
-				struct iwl_rx_mem_buffer *rxb)
+				struct iwl_rx_mem_buffer *rxb,
+				bool emergency)
 {
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
@@ -633,7 +898,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
 		index = SEQ_TO_INDEX(sequence);
 		index = SEQ_TO_INDEX(sequence);
 		cmd_index = get_cmd_index(&txq->q, index);
 		cmd_index = get_cmd_index(&txq->q, index);
 
 
-		iwl_op_mode_rx(trans->op_mode, &rxcb);
+		iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb);
 
 
 		if (reclaim) {
 		if (reclaim) {
 			kzfree(txq->entries[cmd_index].free_buf);
 			kzfree(txq->entries[cmd_index].free_buf);
@@ -682,13 +947,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
 			 */
 			 */
 			__free_pages(rxb->page, trans_pcie->rx_page_order);
 			__free_pages(rxb->page, trans_pcie->rx_page_order);
 			rxb->page = NULL;
 			rxb->page = NULL;
-			list_add_tail(&rxb->list, &rxq->rx_used);
+			iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
 		} else {
 		} else {
 			list_add_tail(&rxb->list, &rxq->rx_free);
 			list_add_tail(&rxb->list, &rxq->rx_free);
 			rxq->free_count++;
 			rxq->free_count++;
 		}
 		}
 	} else
 	} else
-		list_add_tail(&rxb->list, &rxq->rx_used);
+		iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
 }
 }
 
 
 /*
 /*
@@ -698,10 +963,8 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
 {
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
-	u32 r, i;
-	u8 fill_rx = 0;
-	u32 count = 8;
-	int total_empty;
+	u32 r, i, j, count = 0;
+	bool emergency = false;
 
 
 restart:
 restart:
 	spin_lock(&rxq->lock);
 	spin_lock(&rxq->lock);
@@ -714,47 +977,95 @@ restart:
 	if (i == r)
 	if (i == r)
 		IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
 		IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
 
 
-	/* calculate total frames need to be restock after handling RX */
-	total_empty = r - rxq->write_actual;
-	if (total_empty < 0)
-		total_empty += RX_QUEUE_SIZE;
-
-	if (total_empty > (RX_QUEUE_SIZE / 2))
-		fill_rx = 1;
-
 	while (i != r) {
 	while (i != r) {
 		struct iwl_rx_mem_buffer *rxb;
 		struct iwl_rx_mem_buffer *rxb;
 
 
+		if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2))
+			emergency = true;
+
 		rxb = rxq->queue[i];
 		rxb = rxq->queue[i];
 		rxq->queue[i] = NULL;
 		rxq->queue[i] = NULL;
 
 
 		IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
 		IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
 			     r, i, rxb);
 			     r, i, rxb);
-		iwl_pcie_rx_handle_rb(trans, rxb);
+		iwl_pcie_rx_handle_rb(trans, rxb, emergency);
 
 
 		i = (i + 1) & RX_QUEUE_MASK;
 		i = (i + 1) & RX_QUEUE_MASK;
-		/* If there are a lot of unused frames,
-		 * restock the Rx queue so ucode wont assert. */
-		if (fill_rx) {
+
+		/* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
+		 * try to claim the pre-allocated buffers from the allocator */
+		if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
+			struct iwl_rb_allocator *rba = &trans_pcie->rba;
+			struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
+
+			if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 &&
+			    !emergency) {
+				/* Add the remaining 6 empty RBDs
+				* for allocator use
+				 */
+				spin_lock(&rba->lock);
+				list_splice_tail_init(&rxq->rx_used,
+						      &rba->rbd_empty);
+				spin_unlock(&rba->lock);
+			}
+
+			/* If not ready - continue, will try to reclaim later.
+			* No need to reschedule work - allocator exits only on
+			* success */
+			if (!iwl_pcie_rx_allocator_get(trans, out)) {
+				/* If success - then RX_CLAIM_REQ_ALLOC
+				 * buffers were retrieved and should be added
+				 * to free list */
+				rxq->used_count -= RX_CLAIM_REQ_ALLOC;
+				for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
+					list_add_tail(&out[j]->list,
+						      &rxq->rx_free);
+					rxq->free_count++;
+				}
+			}
+		}
+		if (emergency) {
 			count++;
 			count++;
-			if (count >= 8) {
-				rxq->read = i;
-				spin_unlock(&rxq->lock);
-				iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
+			if (count == 8) {
 				count = 0;
 				count = 0;
-				goto restart;
+				if (rxq->used_count < RX_QUEUE_SIZE / 3)
+					emergency = false;
+				spin_unlock(&rxq->lock);
+				iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
+				spin_lock(&rxq->lock);
 			}
 			}
 		}
 		}
+		/* handle restock for three cases, can be all of them at once:
+		* - we just pulled buffers from the allocator
+		* - we have 8+ unstolen pages accumulated
+		* - we are in emergency and allocated buffers
+		 */
+		if (rxq->free_count >=  RX_CLAIM_REQ_ALLOC) {
+			rxq->read = i;
+			spin_unlock(&rxq->lock);
+			iwl_pcie_rxq_restock(trans);
+			goto restart;
+		}
 	}
 	}
 
 
 	/* Backtrack one entry */
 	/* Backtrack one entry */
 	rxq->read = i;
 	rxq->read = i;
 	spin_unlock(&rxq->lock);
 	spin_unlock(&rxq->lock);
 
 
-	if (fill_rx)
-		iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
-	else
-		iwl_pcie_rxq_restock(trans);
+	/*
+	 * handle a case where in emergency there are some unallocated RBDs.
+	 * those RBDs are in the used list, but are not tracked by the queue's
+	 * used_count which counts allocator owned RBDs.
+	 * unallocated emergency RBDs must be allocated on exit, otherwise
+	 * when called again the function may not be in emergency mode and
+	 * they will be handed to the allocator with no tracking in the RBD
+	 * allocator counters, which will lead to them never being claimed back
+	 * by the queue.
+	 * by allocating them here, they are now in the queue free list, and
+	 * will be restocked by the next call of iwl_pcie_rxq_restock.
+	 */
+	if (unlikely(emergency && count))
+		iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
 
 
 	if (trans_pcie->napi.poll)
 	if (trans_pcie->napi.poll)
 		napi_gro_flush(&trans_pcie->napi, false);
 		napi_gro_flush(&trans_pcie->napi, false);

+ 206 - 95
drivers/net/wireless/iwlwifi/pcie/trans.c

@@ -478,10 +478,16 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
 		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
 		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
 			iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
 			iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
 					  APMG_PCIDEV_STT_VAL_WAKE_ME);
 					  APMG_PCIDEV_STT_VAL_WAKE_ME);
-		else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+		else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
+			iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+				    CSR_RESET_LINK_PWR_MGMT_DISABLED);
 			iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
 			iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
 				    CSR_HW_IF_CONFIG_REG_PREPARE |
 				    CSR_HW_IF_CONFIG_REG_PREPARE |
 				    CSR_HW_IF_CONFIG_REG_ENABLE_PME);
 				    CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+			mdelay(1);
+			iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+				      CSR_RESET_LINK_PWR_MGMT_DISABLED);
+		}
 		mdelay(5);
 		mdelay(5);
 	}
 	}
 
 
@@ -575,6 +581,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
 	if (ret >= 0)
 	if (ret >= 0)
 		return 0;
 		return 0;
 
 
+	iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+		    CSR_RESET_LINK_PWR_MGMT_DISABLED);
+	msleep(1);
+
 	for (iter = 0; iter < 10; iter++) {
 	for (iter = 0; iter < 10; iter++) {
 		/* If HW is not ready, prepare the conditions to check again */
 		/* If HW is not ready, prepare the conditions to check again */
 		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
 		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
@@ -582,8 +592,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
 
 
 		do {
 		do {
 			ret = iwl_pcie_set_hw_ready(trans);
 			ret = iwl_pcie_set_hw_ready(trans);
-			if (ret >= 0)
-				return 0;
+			if (ret >= 0) {
+				ret = 0;
+				goto out;
+			}
 
 
 			usleep_range(200, 1000);
 			usleep_range(200, 1000);
 			t += 200;
 			t += 200;
@@ -593,6 +605,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
 
 
 	IWL_ERR(trans, "Couldn't prepare the card\n");
 	IWL_ERR(trans, "Couldn't prepare the card\n");
 
 
+out:
+	iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+		      CSR_RESET_LINK_PWR_MGMT_DISABLED);
+
 	return ret;
 	return ret;
 }
 }
 
 
@@ -764,8 +780,15 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
 	for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
 	for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
 		last_read_idx = i;
 		last_read_idx = i;
 
 
+		/*
+		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
+		 * CPU1 to CPU2.
+		 * PAGING_SEPARATOR_SECTION delimiter - separate between
+		 * CPU2 non paged to CPU2 paging sec.
+		 */
 		if (!image->sec[i].data ||
 		if (!image->sec[i].data ||
-		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
+		    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
 			IWL_DEBUG_FW(trans,
 			IWL_DEBUG_FW(trans,
 				     "Break since Data not valid or Empty section, sec = %d\n",
 				     "Break since Data not valid or Empty section, sec = %d\n",
 				     i);
 				     i);
@@ -813,8 +836,15 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
 	for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
 	for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
 		last_read_idx = i;
 		last_read_idx = i;
 
 
+		/*
+		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
+		 * CPU1 to CPU2.
+		 * PAGING_SEPARATOR_SECTION delimiter - separate between
+		 * CPU2 non paged to CPU2 paging sec.
+		 */
 		if (!image->sec[i].data ||
 		if (!image->sec[i].data ||
-		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
+		    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
 			IWL_DEBUG_FW(trans,
 			IWL_DEBUG_FW(trans,
 				     "Break since Data not valid or Empty section, sec = %d\n",
 				     "Break since Data not valid or Empty section, sec = %d\n",
 				     i);
 				     i);
@@ -1430,11 +1460,10 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
 	 * As this function may be called again in some corner cases don't
 	 * As this function may be called again in some corner cases don't
 	 * do anything if NAPI was already initialized.
 	 * do anything if NAPI was already initialized.
 	 */
 	 */
-	if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
+	if (!trans_pcie->napi.poll) {
 		init_dummy_netdev(&trans_pcie->napi_dev);
 		init_dummy_netdev(&trans_pcie->napi_dev);
-		iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
-				     &trans_pcie->napi_dev,
-				     iwl_pcie_dummy_napi_poll, 64);
+		netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi,
+			       iwl_pcie_dummy_napi_poll, 64);
 	}
 	}
 }
 }
 
 
@@ -2261,6 +2290,47 @@ static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans,
 	return prph_len;
 	return prph_len;
 }
 }
 
 
+static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
+				   struct iwl_fw_error_dump_data **data,
+				   int allocated_rb_nums)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+	struct iwl_rxq *rxq = &trans_pcie->rxq;
+	u32 i, r, j, rb_len = 0;
+
+	spin_lock(&rxq->lock);
+
+	r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+
+	for (i = rxq->read, j = 0;
+	     i != r && j < allocated_rb_nums;
+	     i = (i + 1) & RX_QUEUE_MASK, j++) {
+		struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
+		struct iwl_fw_error_dump_rb *rb;
+
+		dma_unmap_page(trans->dev, rxb->page_dma, max_len,
+			       DMA_FROM_DEVICE);
+
+		rb_len += sizeof(**data) + sizeof(*rb) + max_len;
+
+		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
+		(*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
+		rb = (void *)(*data)->data;
+		rb->index = cpu_to_le32(i);
+		memcpy(rb->data, page_address(rxb->page), max_len);
+		/* remap the page for the free benefit */
+		rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
+						     max_len,
+						     DMA_FROM_DEVICE);
+
+		*data = iwl_fw_error_next_data(*data);
+	}
+
+	spin_unlock(&rxq->lock);
+
+	return rb_len;
+}
 #define IWL_CSR_TO_DUMP (0x250)
 #define IWL_CSR_TO_DUMP (0x250)
 
 
 static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
 static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
@@ -2330,17 +2400,97 @@ iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
 	return monitor_len;
 	return monitor_len;
 }
 }
 
 
-static
-struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
+static u32
+iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
+			    struct iwl_fw_error_dump_data **data,
+			    u32 monitor_len)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u32 len = 0;
+
+	if ((trans_pcie->fw_mon_page &&
+	     trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
+	    trans->dbg_dest_tlv) {
+		struct iwl_fw_error_dump_fw_mon *fw_mon_data;
+		u32 base, write_ptr, wrap_cnt;
+
+		/* If there was a dest TLV - use the values from there */
+		if (trans->dbg_dest_tlv) {
+			write_ptr =
+				le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
+			wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
+			base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+		} else {
+			base = MON_BUFF_BASE_ADDR;
+			write_ptr = MON_BUFF_WRPTR;
+			wrap_cnt = MON_BUFF_CYCLE_CNT;
+		}
+
+		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
+		fw_mon_data = (void *)(*data)->data;
+		fw_mon_data->fw_mon_wr_ptr =
+			cpu_to_le32(iwl_read_prph(trans, write_ptr));
+		fw_mon_data->fw_mon_cycle_cnt =
+			cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
+		fw_mon_data->fw_mon_base_ptr =
+			cpu_to_le32(iwl_read_prph(trans, base));
+
+		len += sizeof(**data) + sizeof(*fw_mon_data);
+		if (trans_pcie->fw_mon_page) {
+			/*
+			 * The firmware is now asserted, it won't write anything
+			 * to the buffer. CPU can take ownership to fetch the
+			 * data. The buffer will be handed back to the device
+			 * before the firmware will be restarted.
+			 */
+			dma_sync_single_for_cpu(trans->dev,
+						trans_pcie->fw_mon_phys,
+						trans_pcie->fw_mon_size,
+						DMA_FROM_DEVICE);
+			memcpy(fw_mon_data->data,
+			       page_address(trans_pcie->fw_mon_page),
+			       trans_pcie->fw_mon_size);
+
+			monitor_len = trans_pcie->fw_mon_size;
+		} else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
+			/*
+			 * Update pointers to reflect actual values after
+			 * shifting
+			 */
+			base = iwl_read_prph(trans, base) <<
+			       trans->dbg_dest_tlv->base_shift;
+			iwl_trans_read_mem(trans, base, fw_mon_data->data,
+					   monitor_len / sizeof(u32));
+		} else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
+			monitor_len =
+				iwl_trans_pci_dump_marbh_monitor(trans,
+								 fw_mon_data,
+								 monitor_len);
+		} else {
+			/* Didn't match anything - output no monitor data */
+			monitor_len = 0;
+		}
+
+		len += monitor_len;
+		(*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
+	}
+
+	return len;
+}
+
+static struct iwl_trans_dump_data
+*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
+			  struct iwl_fw_dbg_trigger_tlv *trigger)
 {
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_fw_error_dump_data *data;
 	struct iwl_fw_error_dump_data *data;
 	struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
 	struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
 	struct iwl_fw_error_dump_txcmd *txcmd;
 	struct iwl_fw_error_dump_txcmd *txcmd;
 	struct iwl_trans_dump_data *dump_data;
 	struct iwl_trans_dump_data *dump_data;
-	u32 len;
+	u32 len, num_rbs;
 	u32 monitor_len;
 	u32 monitor_len;
 	int i, ptr;
 	int i, ptr;
+	bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status);
 
 
 	/* transport dump header */
 	/* transport dump header */
 	len = sizeof(*dump_data);
 	len = sizeof(*dump_data);
@@ -2349,22 +2499,6 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
 	len += sizeof(*data) +
 	len += sizeof(*data) +
 		cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
 		cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
 
 
-	/* CSR registers */
-	len += sizeof(*data) + IWL_CSR_TO_DUMP;
-
-	/* PRPH registers */
-	for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
-		/* The range includes both boundaries */
-		int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
-			iwl_prph_dump_addr[i].start + 4;
-
-		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) +
-			num_bytes_in_chunk;
-	}
-
-	/* FH registers */
-	len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
-
 	/* FW monitor */
 	/* FW monitor */
 	if (trans_pcie->fw_mon_page) {
 	if (trans_pcie->fw_mon_page) {
 		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
 		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
@@ -2392,6 +2526,45 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
 		monitor_len = 0;
 		monitor_len = 0;
 	}
 	}
 
 
+	if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
+		dump_data = vzalloc(len);
+		if (!dump_data)
+			return NULL;
+
+		data = (void *)dump_data->data;
+		len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
+		dump_data->len = len;
+
+		return dump_data;
+	}
+
+	/* CSR registers */
+	len += sizeof(*data) + IWL_CSR_TO_DUMP;
+
+	/* PRPH registers */
+	for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
+		/* The range includes both boundaries */
+		int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
+			iwl_prph_dump_addr[i].start + 4;
+
+		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) +
+		       num_bytes_in_chunk;
+	}
+
+	/* FH registers */
+	len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
+
+	if (dump_rbs) {
+		/* RBs */
+		num_rbs = le16_to_cpu(ACCESS_ONCE(
+				      trans_pcie->rxq.rb_stts->closed_rb_num))
+				      & 0x0FFF;
+		num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK;
+		len += num_rbs * (sizeof(*data) +
+				  sizeof(struct iwl_fw_error_dump_rb) +
+				  (PAGE_SIZE << trans_pcie->rx_page_order));
+	}
+
 	dump_data = vzalloc(len);
 	dump_data = vzalloc(len);
 	if (!dump_data)
 	if (!dump_data)
 		return NULL;
 		return NULL;
@@ -2428,74 +2601,10 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
 	len += iwl_trans_pcie_dump_prph(trans, &data);
 	len += iwl_trans_pcie_dump_prph(trans, &data);
 	len += iwl_trans_pcie_dump_csr(trans, &data);
 	len += iwl_trans_pcie_dump_csr(trans, &data);
 	len += iwl_trans_pcie_fh_regs_dump(trans, &data);
 	len += iwl_trans_pcie_fh_regs_dump(trans, &data);
-	/* data is already pointing to the next section */
-
-	if ((trans_pcie->fw_mon_page &&
-	     trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
-	    trans->dbg_dest_tlv) {
-		struct iwl_fw_error_dump_fw_mon *fw_mon_data;
-		u32 base, write_ptr, wrap_cnt;
-
-		/* If there was a dest TLV - use the values from there */
-		if (trans->dbg_dest_tlv) {
-			write_ptr =
-				le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
-			wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
-			base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
-		} else {
-			base = MON_BUFF_BASE_ADDR;
-			write_ptr = MON_BUFF_WRPTR;
-			wrap_cnt = MON_BUFF_CYCLE_CNT;
-		}
-
-		data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
-		fw_mon_data = (void *)data->data;
-		fw_mon_data->fw_mon_wr_ptr =
-			cpu_to_le32(iwl_read_prph(trans, write_ptr));
-		fw_mon_data->fw_mon_cycle_cnt =
-			cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
-		fw_mon_data->fw_mon_base_ptr =
-			cpu_to_le32(iwl_read_prph(trans, base));
-
-		len += sizeof(*data) + sizeof(*fw_mon_data);
-		if (trans_pcie->fw_mon_page) {
-			/*
-			 * The firmware is now asserted, it won't write anything
-			 * to the buffer. CPU can take ownership to fetch the
-			 * data. The buffer will be handed back to the device
-			 * before the firmware will be restarted.
-			 */
-			dma_sync_single_for_cpu(trans->dev,
-						trans_pcie->fw_mon_phys,
-						trans_pcie->fw_mon_size,
-						DMA_FROM_DEVICE);
-			memcpy(fw_mon_data->data,
-			       page_address(trans_pcie->fw_mon_page),
-			       trans_pcie->fw_mon_size);
-
-			monitor_len = trans_pcie->fw_mon_size;
-		} else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
-			/*
-			 * Update pointers to reflect actual values after
-			 * shifting
-			 */
-			base = iwl_read_prph(trans, base) <<
-			       trans->dbg_dest_tlv->base_shift;
-			iwl_trans_read_mem(trans, base, fw_mon_data->data,
-					   monitor_len / sizeof(u32));
-		} else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
-			monitor_len =
-				iwl_trans_pci_dump_marbh_monitor(trans,
-								 fw_mon_data,
-								 monitor_len);
-		} else {
-			/* Didn't match anything - output no monitor data */
-			monitor_len = 0;
-		}
+	if (dump_rbs)
+		len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
 
 
-		len += monitor_len;
-		data->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
-	}
+	len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
 
 
 	dump_data->len = len;
 	dump_data->len = len;
 
 
@@ -2558,6 +2667,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 	if (!trans)
 	if (!trans)
 		return ERR_PTR(-ENOMEM);
 		return ERR_PTR(-ENOMEM);
 
 
+	trans->max_skb_frags = IWL_PCIE_MAX_FRAGS;
+
 	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
 
 	trans_pcie->trans = trans;
 	trans_pcie->trans = trans;

+ 71 - 14
drivers/net/wireless/iwlwifi/pcie/tx.c

@@ -388,11 +388,18 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
 
 
 	/* first TB is never freed - it's the scratchbuf data */
 	/* first TB is never freed - it's the scratchbuf data */
 
 
-	for (i = 1; i < num_tbs; i++)
-		dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
-				 iwl_pcie_tfd_tb_get_len(tfd, i),
-				 DMA_TO_DEVICE);
-
+	for (i = 1; i < num_tbs; i++) {
+		if (meta->flags & BIT(i + CMD_TB_BITMAP_POS))
+			dma_unmap_page(trans->dev,
+				       iwl_pcie_tfd_tb_get_addr(tfd, i),
+				       iwl_pcie_tfd_tb_get_len(tfd, i),
+				       DMA_TO_DEVICE);
+		else
+			dma_unmap_single(trans->dev,
+					 iwl_pcie_tfd_tb_get_addr(tfd, i),
+					 iwl_pcie_tfd_tb_get_len(tfd, i),
+					 DMA_TO_DEVICE);
+	}
 	tfd->num_tbs = 0;
 	tfd->num_tbs = 0;
 }
 }
 
 
@@ -468,7 +475,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
 
 
 	iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
 	iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
 
 
-	return 0;
+	return num_tbs;
 }
 }
 
 
 static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
 static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
@@ -1546,6 +1553,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
 		iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
 		iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
 	}
 	}
 
 
+	BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS >
+		     sizeof(out_meta->flags) * BITS_PER_BYTE);
 	out_meta->flags = cmd->flags;
 	out_meta->flags = cmd->flags;
 	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
 	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
 		kzfree(txq->entries[idx].free_buf);
 		kzfree(txq->entries[idx].free_buf);
@@ -1789,7 +1798,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 		      struct iwl_device_cmd *dev_cmd, int txq_id)
 		      struct iwl_device_cmd *dev_cmd, int txq_id)
 {
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct ieee80211_hdr *hdr;
 	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
 	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
 	struct iwl_cmd_meta *out_meta;
 	struct iwl_cmd_meta *out_meta;
 	struct iwl_txq *txq;
 	struct iwl_txq *txq;
@@ -1798,9 +1807,10 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 	void *tb1_addr;
 	void *tb1_addr;
 	u16 len, tb1_len, tb2_len;
 	u16 len, tb1_len, tb2_len;
 	bool wait_write_ptr;
 	bool wait_write_ptr;
-	__le16 fc = hdr->frame_control;
-	u8 hdr_len = ieee80211_hdrlen(fc);
+	__le16 fc;
+	u8 hdr_len;
 	u16 wifi_seq;
 	u16 wifi_seq;
+	int i;
 
 
 	txq = &trans_pcie->txq[txq_id];
 	txq = &trans_pcie->txq[txq_id];
 	q = &txq->q;
 	q = &txq->q;
@@ -1809,6 +1819,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 		      "TX on unused queue %d\n", txq_id))
 		      "TX on unused queue %d\n", txq_id))
 		return -EINVAL;
 		return -EINVAL;
 
 
+	if (skb_is_nonlinear(skb) &&
+	    skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS &&
+	    __skb_linearize(skb))
+		return -ENOMEM;
+
+	/* mac80211 always puts the full header into the SKB's head,
+	 * so there's no need to check if it's readable there
+	 */
+	hdr = (struct ieee80211_hdr *)skb->data;
+	fc = hdr->frame_control;
+	hdr_len = ieee80211_hdrlen(fc);
+
 	spin_lock(&txq->lock);
 	spin_lock(&txq->lock);
 
 
 	/* In AGG mode, the index in the ring must correspond to the WiFi
 	/* In AGG mode, the index in the ring must correspond to the WiFi
@@ -1839,6 +1861,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 
 
 	/* Set up first empty entry in queue's array of Tx/cmd buffers */
 	/* Set up first empty entry in queue's array of Tx/cmd buffers */
 	out_meta = &txq->entries[q->write_ptr].meta;
 	out_meta = &txq->entries[q->write_ptr].meta;
+	out_meta->flags = 0;
 
 
 	/*
 	/*
 	 * The second TB (tb1) points to the remainder of the TX command
 	 * The second TB (tb1) points to the remainder of the TX command
@@ -1872,9 +1895,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 
 
 	/*
 	/*
 	 * Set up TFD's third entry to point directly to remainder
 	 * Set up TFD's third entry to point directly to remainder
-	 * of skb, if any (802.11 null frames have no payload).
+	 * of skb's head, if any
 	 */
 	 */
-	tb2_len = skb->len - hdr_len;
+	tb2_len = skb_headlen(skb) - hdr_len;
 	if (tb2_len > 0) {
 	if (tb2_len > 0) {
 		dma_addr_t tb2_phys = dma_map_single(trans->dev,
 		dma_addr_t tb2_phys = dma_map_single(trans->dev,
 						     skb->data + hdr_len,
 						     skb->data + hdr_len,
@@ -1887,6 +1910,29 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 		iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
 		iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
 	}
 	}
 
 
+	/* set up the remaining entries to point to the data */
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		dma_addr_t tb_phys;
+		int tb_idx;
+
+		if (!skb_frag_size(frag))
+			continue;
+
+		tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
+					   skb_frag_size(frag), DMA_TO_DEVICE);
+
+		if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+			iwl_pcie_tfd_unmap(trans, out_meta,
+					   &txq->tfds[q->write_ptr]);
+			goto out_err;
+		}
+		tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
+						skb_frag_size(frag), false);
+
+		out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS);
+	}
+
 	/* Set up entry for this TFD in Tx byte-count array */
 	/* Set up entry for this TFD in Tx byte-count array */
 	iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
 	iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
 
 
@@ -1896,14 +1942,25 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 			     &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
 			     &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
 			     skb->data + hdr_len, tb2_len);
 			     skb->data + hdr_len, tb2_len);
 	trace_iwlwifi_dev_tx_data(trans->dev, skb,
 	trace_iwlwifi_dev_tx_data(trans->dev, skb,
-				  skb->data + hdr_len, tb2_len);
+				  hdr_len, skb->len - hdr_len);
 
 
 	wait_write_ptr = ieee80211_has_morefrags(fc);
 	wait_write_ptr = ieee80211_has_morefrags(fc);
 
 
 	/* start timer if queue currently empty */
 	/* start timer if queue currently empty */
 	if (q->read_ptr == q->write_ptr) {
 	if (q->read_ptr == q->write_ptr) {
-		if (txq->wd_timeout)
-			mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+		if (txq->wd_timeout) {
+			/*
+			 * If the TXQ is active, then set the timer, if not,
+			 * set the timer in remainder so that the timer will
+			 * be armed with the right value when the station will
+			 * wake up.
+			 */
+			if (!txq->frozen)
+				mod_timer(&txq->stuck_timer,
+					  jiffies + txq->wd_timeout);
+			else
+				txq->frozen_expiry_remainder = txq->wd_timeout;
+		}
 		IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
 		IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
 		iwl_trans_pcie_ref(trans);
 		iwl_trans_pcie_ref(trans);
 	}
 	}