|
@@ -178,14 +178,14 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
|
|
*
|
|
*
|
|
* HW has limitation that all vrings addresses must share the same
|
|
* HW has limitation that all vrings addresses must share the same
|
|
* upper 16 msb bits part of 48 bits address. To workaround that,
|
|
* upper 16 msb bits part of 48 bits address. To workaround that,
|
|
- * if we are using 48 bit addresses switch to 32 bit allocation
|
|
|
|
- * before allocating vring memory.
|
|
|
|
|
|
+ * if we are using more than 32 bit addresses switch to 32 bit
|
|
|
|
+ * allocation before allocating vring memory.
|
|
*
|
|
*
|
|
* There's no check for the return value of dma_set_mask_and_coherent,
|
|
* There's no check for the return value of dma_set_mask_and_coherent,
|
|
* since we assume if we were able to set the mask during
|
|
* since we assume if we were able to set the mask during
|
|
* initialization in this system it will not fail if we set it again
|
|
* initialization in this system it will not fail if we set it again
|
|
*/
|
|
*/
|
|
- if (wil->use_extended_dma_addr)
|
|
|
|
|
|
+ if (wil->dma_addr_size > 32)
|
|
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
|
|
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
|
|
|
|
|
|
vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
|
|
vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
|
|
@@ -195,8 +195,9 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
|
|
|
|
- if (wil->use_extended_dma_addr)
|
|
|
|
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
|
|
|
|
|
|
+ if (wil->dma_addr_size > 32)
|
|
|
|
+ dma_set_mask_and_coherent(dev,
|
|
|
|
+ DMA_BIT_MASK(wil->dma_addr_size));
|
|
|
|
|
|
/* initially, all descriptors are SW owned
|
|
/* initially, all descriptors are SW owned
|
|
* For Tx and Rx, ownership bit is at the same location, thus
|
|
* For Tx and Rx, ownership bit is at the same location, thus
|