|
@@ -66,9 +66,8 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
|
|
static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
|
|
static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
|
|
{
|
|
{
|
|
i40e_status ret_code;
|
|
i40e_status ret_code;
|
|
- struct i40e_virt_mem mem;
|
|
|
|
|
|
|
|
- ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq_mem,
|
|
|
|
|
|
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
|
|
i40e_mem_atq_ring,
|
|
i40e_mem_atq_ring,
|
|
(hw->aq.num_asq_entries *
|
|
(hw->aq.num_asq_entries *
|
|
sizeof(struct i40e_aq_desc)),
|
|
sizeof(struct i40e_aq_desc)),
|
|
@@ -76,21 +75,14 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
|
|
if (ret_code)
|
|
if (ret_code)
|
|
return ret_code;
|
|
return ret_code;
|
|
|
|
|
|
- hw->aq.asq.desc = hw->aq.asq_mem.va;
|
|
|
|
- hw->aq.asq.dma_addr = hw->aq.asq_mem.pa;
|
|
|
|
-
|
|
|
|
- ret_code = i40e_allocate_virt_mem(hw, &mem,
|
|
|
|
|
|
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
|
|
(hw->aq.num_asq_entries *
|
|
(hw->aq.num_asq_entries *
|
|
sizeof(struct i40e_asq_cmd_details)));
|
|
sizeof(struct i40e_asq_cmd_details)));
|
|
if (ret_code) {
|
|
if (ret_code) {
|
|
- i40e_free_dma_mem(hw, &hw->aq.asq_mem);
|
|
|
|
- hw->aq.asq_mem.va = NULL;
|
|
|
|
- hw->aq.asq_mem.pa = 0;
|
|
|
|
|
|
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
|
|
return ret_code;
|
|
return ret_code;
|
|
}
|
|
}
|
|
|
|
|
|
- hw->aq.asq.details = mem.va;
|
|
|
|
-
|
|
|
|
return ret_code;
|
|
return ret_code;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -102,16 +94,11 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
|
|
{
|
|
{
|
|
i40e_status ret_code;
|
|
i40e_status ret_code;
|
|
|
|
|
|
- ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq_mem,
|
|
|
|
|
|
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
|
|
i40e_mem_arq_ring,
|
|
i40e_mem_arq_ring,
|
|
(hw->aq.num_arq_entries *
|
|
(hw->aq.num_arq_entries *
|
|
sizeof(struct i40e_aq_desc)),
|
|
sizeof(struct i40e_aq_desc)),
|
|
I40E_ADMINQ_DESC_ALIGNMENT);
|
|
I40E_ADMINQ_DESC_ALIGNMENT);
|
|
- if (ret_code)
|
|
|
|
- return ret_code;
|
|
|
|
-
|
|
|
|
- hw->aq.arq.desc = hw->aq.arq_mem.va;
|
|
|
|
- hw->aq.arq.dma_addr = hw->aq.arq_mem.pa;
|
|
|
|
|
|
|
|
return ret_code;
|
|
return ret_code;
|
|
}
|
|
}
|
|
@@ -125,14 +112,7 @@ static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
|
|
**/
|
|
**/
|
|
static void i40e_free_adminq_asq(struct i40e_hw *hw)
|
|
static void i40e_free_adminq_asq(struct i40e_hw *hw)
|
|
{
|
|
{
|
|
- struct i40e_virt_mem mem;
|
|
|
|
-
|
|
|
|
- i40e_free_dma_mem(hw, &hw->aq.asq_mem);
|
|
|
|
- hw->aq.asq_mem.va = NULL;
|
|
|
|
- hw->aq.asq_mem.pa = 0;
|
|
|
|
- mem.va = hw->aq.asq.details;
|
|
|
|
- i40e_free_virt_mem(hw, &mem);
|
|
|
|
- hw->aq.asq.details = NULL;
|
|
|
|
|
|
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -144,9 +124,7 @@ static void i40e_free_adminq_asq(struct i40e_hw *hw)
|
|
**/
|
|
**/
|
|
static void i40e_free_adminq_arq(struct i40e_hw *hw)
|
|
static void i40e_free_adminq_arq(struct i40e_hw *hw)
|
|
{
|
|
{
|
|
- i40e_free_dma_mem(hw, &hw->aq.arq_mem);
|
|
|
|
- hw->aq.arq_mem.va = NULL;
|
|
|
|
- hw->aq.arq_mem.pa = 0;
|
|
|
|
|
|
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -157,7 +135,6 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
|
|
{
|
|
{
|
|
i40e_status ret_code;
|
|
i40e_status ret_code;
|
|
struct i40e_aq_desc *desc;
|
|
struct i40e_aq_desc *desc;
|
|
- struct i40e_virt_mem mem;
|
|
|
|
struct i40e_dma_mem *bi;
|
|
struct i40e_dma_mem *bi;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
@@ -166,11 +143,11 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
|
|
*/
|
|
*/
|
|
|
|
|
|
/* buffer_info structures do not need alignment */
|
|
/* buffer_info structures do not need alignment */
|
|
- ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_arq_entries *
|
|
|
|
- sizeof(struct i40e_dma_mem)));
|
|
|
|
|
|
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
|
|
|
|
+ (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
|
|
if (ret_code)
|
|
if (ret_code)
|
|
goto alloc_arq_bufs;
|
|
goto alloc_arq_bufs;
|
|
- hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)mem.va;
|
|
|
|
|
|
+ hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
|
|
|
|
|
|
/* allocate the mapped buffers */
|
|
/* allocate the mapped buffers */
|
|
for (i = 0; i < hw->aq.num_arq_entries; i++) {
|
|
for (i = 0; i < hw->aq.num_arq_entries; i++) {
|
|
@@ -212,8 +189,7 @@ unwind_alloc_arq_bufs:
|
|
i--;
|
|
i--;
|
|
for (; i >= 0; i--)
|
|
for (; i >= 0; i--)
|
|
i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
|
|
i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
|
|
- mem.va = hw->aq.arq.r.arq_bi;
|
|
|
|
- i40e_free_virt_mem(hw, &mem);
|
|
|
|
|
|
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
|
|
|
|
|
|
return ret_code;
|
|
return ret_code;
|
|
}
|
|
}
|
|
@@ -225,16 +201,15 @@ unwind_alloc_arq_bufs:
|
|
static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
|
|
static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
|
|
{
|
|
{
|
|
i40e_status ret_code;
|
|
i40e_status ret_code;
|
|
- struct i40e_virt_mem mem;
|
|
|
|
struct i40e_dma_mem *bi;
|
|
struct i40e_dma_mem *bi;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
/* No mapped memory needed yet, just the buffer info structures */
|
|
/* No mapped memory needed yet, just the buffer info structures */
|
|
- ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_asq_entries *
|
|
|
|
- sizeof(struct i40e_dma_mem)));
|
|
|
|
|
|
+ ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
|
|
|
|
+ (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
|
|
if (ret_code)
|
|
if (ret_code)
|
|
goto alloc_asq_bufs;
|
|
goto alloc_asq_bufs;
|
|
- hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)mem.va;
|
|
|
|
|
|
+ hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
|
|
|
|
|
|
/* allocate the mapped buffers */
|
|
/* allocate the mapped buffers */
|
|
for (i = 0; i < hw->aq.num_asq_entries; i++) {
|
|
for (i = 0; i < hw->aq.num_asq_entries; i++) {
|
|
@@ -254,8 +229,7 @@ unwind_alloc_asq_bufs:
|
|
i--;
|
|
i--;
|
|
for (; i >= 0; i--)
|
|
for (; i >= 0; i--)
|
|
i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
|
|
i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
|
|
- mem.va = hw->aq.asq.r.asq_bi;
|
|
|
|
- i40e_free_virt_mem(hw, &mem);
|
|
|
|
|
|
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
|
|
|
|
|
|
return ret_code;
|
|
return ret_code;
|
|
}
|
|
}
|
|
@@ -266,14 +240,17 @@ unwind_alloc_asq_bufs:
|
|
**/
|
|
**/
|
|
static void i40e_free_arq_bufs(struct i40e_hw *hw)
|
|
static void i40e_free_arq_bufs(struct i40e_hw *hw)
|
|
{
|
|
{
|
|
- struct i40e_virt_mem mem;
|
|
|
|
int i;
|
|
int i;
|
|
|
|
|
|
|
|
+ /* free descriptors */
|
|
for (i = 0; i < hw->aq.num_arq_entries; i++)
|
|
for (i = 0; i < hw->aq.num_arq_entries; i++)
|
|
i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
|
|
i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
|
|
|
|
|
|
- mem.va = hw->aq.arq.r.arq_bi;
|
|
|
|
- i40e_free_virt_mem(hw, &mem);
|
|
|
|
|
|
+ /* free the descriptor memory */
|
|
|
|
+ i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
|
|
|
|
+
|
|
|
|
+ /* free the dma header */
|
|
|
|
+ i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -282,7 +259,6 @@ static void i40e_free_arq_bufs(struct i40e_hw *hw)
|
|
**/
|
|
**/
|
|
static void i40e_free_asq_bufs(struct i40e_hw *hw)
|
|
static void i40e_free_asq_bufs(struct i40e_hw *hw)
|
|
{
|
|
{
|
|
- struct i40e_virt_mem mem;
|
|
|
|
int i;
|
|
int i;
|
|
|
|
|
|
/* only unmap if the address is non-NULL */
|
|
/* only unmap if the address is non-NULL */
|
|
@@ -290,9 +266,14 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
|
|
if (hw->aq.asq.r.asq_bi[i].pa)
|
|
if (hw->aq.asq.r.asq_bi[i].pa)
|
|
i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
|
|
i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
|
|
|
|
|
|
- /* now free the buffer info list */
|
|
|
|
- mem.va = hw->aq.asq.r.asq_bi;
|
|
|
|
- i40e_free_virt_mem(hw, &mem);
|
|
|
|
|
|
+ /* free the buffer info list */
|
|
|
|
+ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
|
|
|
|
+
|
|
|
|
+ /* free the descriptor memory */
|
|
|
|
+ i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
|
|
|
|
+
|
|
|
|
+ /* free the dma header */
|
|
|
|
+ i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -305,14 +286,18 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
|
|
{
|
|
{
|
|
if (hw->mac.type == I40E_MAC_VF) {
|
|
if (hw->mac.type == I40E_MAC_VF) {
|
|
/* configure the transmit queue */
|
|
/* configure the transmit queue */
|
|
- wr32(hw, I40E_VF_ATQBAH1, upper_32_bits(hw->aq.asq.dma_addr));
|
|
|
|
- wr32(hw, I40E_VF_ATQBAL1, lower_32_bits(hw->aq.asq.dma_addr));
|
|
|
|
|
|
+ wr32(hw, I40E_VF_ATQBAH1,
|
|
|
|
+ upper_32_bits(hw->aq.asq.desc_buf.pa));
|
|
|
|
+ wr32(hw, I40E_VF_ATQBAL1,
|
|
|
|
+ lower_32_bits(hw->aq.asq.desc_buf.pa));
|
|
wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
|
|
wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
|
|
I40E_VF_ATQLEN1_ATQENABLE_MASK));
|
|
I40E_VF_ATQLEN1_ATQENABLE_MASK));
|
|
} else {
|
|
} else {
|
|
/* configure the transmit queue */
|
|
/* configure the transmit queue */
|
|
- wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.dma_addr));
|
|
|
|
- wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.dma_addr));
|
|
|
|
|
|
+ wr32(hw, I40E_PF_ATQBAH,
|
|
|
|
+ upper_32_bits(hw->aq.asq.desc_buf.pa));
|
|
|
|
+ wr32(hw, I40E_PF_ATQBAL,
|
|
|
|
+ lower_32_bits(hw->aq.asq.desc_buf.pa));
|
|
wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
|
|
wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
|
|
I40E_PF_ATQLEN_ATQENABLE_MASK));
|
|
I40E_PF_ATQLEN_ATQENABLE_MASK));
|
|
}
|
|
}
|
|
@@ -328,14 +313,18 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
|
|
{
|
|
{
|
|
if (hw->mac.type == I40E_MAC_VF) {
|
|
if (hw->mac.type == I40E_MAC_VF) {
|
|
/* configure the receive queue */
|
|
/* configure the receive queue */
|
|
- wr32(hw, I40E_VF_ARQBAH1, upper_32_bits(hw->aq.arq.dma_addr));
|
|
|
|
- wr32(hw, I40E_VF_ARQBAL1, lower_32_bits(hw->aq.arq.dma_addr));
|
|
|
|
|
|
+ wr32(hw, I40E_VF_ARQBAH1,
|
|
|
|
+ upper_32_bits(hw->aq.arq.desc_buf.pa));
|
|
|
|
+ wr32(hw, I40E_VF_ARQBAL1,
|
|
|
|
+ lower_32_bits(hw->aq.arq.desc_buf.pa));
|
|
wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
|
|
wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
|
|
I40E_VF_ARQLEN1_ARQENABLE_MASK));
|
|
I40E_VF_ARQLEN1_ARQENABLE_MASK));
|
|
} else {
|
|
} else {
|
|
/* configure the receive queue */
|
|
/* configure the receive queue */
|
|
- wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.dma_addr));
|
|
|
|
- wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.dma_addr));
|
|
|
|
|
|
+ wr32(hw, I40E_PF_ARQBAH,
|
|
|
|
+ upper_32_bits(hw->aq.arq.desc_buf.pa));
|
|
|
|
+ wr32(hw, I40E_PF_ARQBAL,
|
|
|
|
+ lower_32_bits(hw->aq.arq.desc_buf.pa));
|
|
wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
|
|
wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
|
|
I40E_PF_ARQLEN_ARQENABLE_MASK));
|
|
I40E_PF_ARQLEN_ARQENABLE_MASK));
|
|
}
|
|
}
|
|
@@ -483,8 +472,6 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
|
|
|
|
|
|
/* free ring buffers */
|
|
/* free ring buffers */
|
|
i40e_free_asq_bufs(hw);
|
|
i40e_free_asq_bufs(hw);
|
|
- /* free the ring descriptors */
|
|
|
|
- i40e_free_adminq_asq(hw);
|
|
|
|
|
|
|
|
mutex_unlock(&hw->aq.asq_mutex);
|
|
mutex_unlock(&hw->aq.asq_mutex);
|
|
|
|
|
|
@@ -516,8 +503,6 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
|
|
|
|
|
|
/* free ring buffers */
|
|
/* free ring buffers */
|
|
i40e_free_arq_bufs(hw);
|
|
i40e_free_arq_bufs(hw);
|
|
- /* free the ring descriptors */
|
|
|
|
- i40e_free_adminq_arq(hw);
|
|
|
|
|
|
|
|
mutex_unlock(&hw->aq.arq_mutex);
|
|
mutex_unlock(&hw->aq.arq_mutex);
|
|
|
|
|