|
@@ -41,8 +41,277 @@
|
|
|
#include "hns_roce_device.h"
|
|
|
#include "hns_roce_cmd.h"
|
|
|
#include "hns_roce_hem.h"
|
|
|
+#include "hns_roce_hw_v2.h"
|
|
|
|
|
|
-static const struct hns_roce_hw hns_roce_hw_v2;
|
|
|
+static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
|
|
|
+{
|
|
|
+ int ntu = ring->next_to_use;
|
|
|
+ int ntc = ring->next_to_clean;
|
|
|
+ int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
|
|
|
+
|
|
|
+ return ring->desc_num - used - 1;
|
|
|
+}
|
|
|
+
|
|
|
+static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
|
|
|
+ struct hns_roce_v2_cmq_ring *ring)
|
|
|
+{
|
|
|
+ int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
|
|
|
+
|
|
|
+ ring->desc = kzalloc(size, GFP_KERNEL);
|
|
|
+ if (!ring->desc)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
|
|
|
+ DMA_BIDIRECTIONAL);
|
|
|
+ if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
|
|
|
+ ring->desc_dma_addr = 0;
|
|
|
+ kfree(ring->desc);
|
|
|
+ ring->desc = NULL;
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
|
|
|
+ struct hns_roce_v2_cmq_ring *ring)
|
|
|
+{
|
|
|
+ dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
|
|
|
+ ring->desc_num * sizeof(struct hns_roce_cmq_desc),
|
|
|
+ DMA_BIDIRECTIONAL);
|
|
|
+ kfree(ring->desc);
|
|
|
+}
|
|
|
+
|
|
|
+static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
|
|
|
+{
|
|
|
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
|
|
|
+ struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
|
|
|
+ &priv->cmq.csq : &priv->cmq.crq;
|
|
|
+
|
|
|
+ ring->flag = ring_type;
|
|
|
+ ring->next_to_clean = 0;
|
|
|
+ ring->next_to_use = 0;
|
|
|
+
|
|
|
+ return hns_roce_alloc_cmq_desc(hr_dev, ring);
|
|
|
+}
|
|
|
+
|
|
|
+static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
|
|
|
+{
|
|
|
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
|
|
|
+ struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
|
|
|
+ &priv->cmq.csq : &priv->cmq.crq;
|
|
|
+ dma_addr_t dma = ring->desc_dma_addr;
|
|
|
+
|
|
|
+ if (ring_type == TYPE_CSQ) {
|
|
|
+ roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
|
|
|
+ roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
|
|
|
+ upper_32_bits(dma));
|
|
|
+ roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
|
|
|
+ (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
|
|
|
+ HNS_ROCE_CMQ_ENABLE);
|
|
|
+ roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
|
|
|
+ roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
|
|
|
+ } else {
|
|
|
+ roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
|
|
|
+ roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
|
|
|
+ upper_32_bits(dma));
|
|
|
+ roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
|
|
|
+ (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
|
|
|
+ HNS_ROCE_CMQ_ENABLE);
|
|
|
+ roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
|
|
|
+ roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
|
|
|
+{
|
|
|
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ /* Setup the queue entries for command queue */
|
|
|
+ priv->cmq.csq.desc_num = 1024;
|
|
|
+ priv->cmq.crq.desc_num = 1024;
|
|
|
+
|
|
|
+ /* Setup the lock for command queue */
|
|
|
+ spin_lock_init(&priv->cmq.csq.lock);
|
|
|
+ spin_lock_init(&priv->cmq.crq.lock);
|
|
|
+
|
|
|
+ /* Setup Tx write back timeout */
|
|
|
+ priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
|
|
|
+
|
|
|
+ /* Init CSQ */
|
|
|
+ ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
|
|
|
+ if (ret) {
|
|
|
+ dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Init CRQ */
|
|
|
+ ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
|
|
|
+ if (ret) {
|
|
|
+ dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
|
|
|
+ goto err_crq;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Init CSQ REG */
|
|
|
+ hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
|
|
|
+
|
|
|
+ /* Init CRQ REG */
|
|
|
+ hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+err_crq:
|
|
|
+ hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
|
|
|
+{
|
|
|
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
|
|
|
+
|
|
|
+ hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
|
|
|
+ hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
|
|
|
+}
|
|
|
+
|
|
|
+void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
|
|
|
+ enum hns_roce_opcode_type opcode,
|
|
|
+ bool is_read)
|
|
|
+{
|
|
|
+ memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
|
|
|
+ desc->opcode = cpu_to_le16(opcode);
|
|
|
+ desc->flag =
|
|
|
+ cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
|
|
|
+ if (is_read)
|
|
|
+ desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
|
|
|
+ else
|
|
|
+ desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
|
|
|
+}
|
|
|
+
|
|
|
+static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
|
|
|
+{
|
|
|
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
|
|
|
+ u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
|
|
|
+
|
|
|
+ return head == priv->cmq.csq.next_to_use;
|
|
|
+}
|
|
|
+
|
|
|
+static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
|
|
|
+{
|
|
|
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
|
|
|
+ struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
|
|
|
+ struct hns_roce_cmq_desc *desc;
|
|
|
+ u16 ntc = csq->next_to_clean;
|
|
|
+ u32 head;
|
|
|
+ int clean = 0;
|
|
|
+
|
|
|
+ desc = &csq->desc[ntc];
|
|
|
+ head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
|
|
|
+ while (head != ntc) {
|
|
|
+ memset(desc, 0, sizeof(*desc));
|
|
|
+ ntc++;
|
|
|
+ if (ntc == csq->desc_num)
|
|
|
+ ntc = 0;
|
|
|
+ desc = &csq->desc[ntc];
|
|
|
+ clean++;
|
|
|
+ }
|
|
|
+ csq->next_to_clean = ntc;
|
|
|
+
|
|
|
+ return clean;
|
|
|
+}
|
|
|
+
|
|
|
+int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
|
|
|
+ struct hns_roce_cmq_desc *desc, int num)
|
|
|
+{
|
|
|
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
|
|
|
+ struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
|
|
|
+ struct hns_roce_cmq_desc *desc_to_use;
|
|
|
+ bool complete = false;
|
|
|
+ u32 timeout = 0;
|
|
|
+ int handle = 0;
|
|
|
+ u16 desc_ret;
|
|
|
+ int ret = 0;
|
|
|
+ int ntc;
|
|
|
+
|
|
|
+ spin_lock_bh(&csq->lock);
|
|
|
+
|
|
|
+ if (num > hns_roce_cmq_space(csq)) {
|
|
|
+ spin_unlock_bh(&csq->lock);
|
|
|
+ return -EBUSY;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Record the location of desc in the cmq for this time
|
|
|
+ * which will be use for hardware to write back
|
|
|
+ */
|
|
|
+ ntc = csq->next_to_use;
|
|
|
+
|
|
|
+ while (handle < num) {
|
|
|
+ desc_to_use = &csq->desc[csq->next_to_use];
|
|
|
+ *desc_to_use = desc[handle];
|
|
|
+ dev_dbg(hr_dev->dev, "set cmq desc:\n");
|
|
|
+ csq->next_to_use++;
|
|
|
+ if (csq->next_to_use == csq->desc_num)
|
|
|
+ csq->next_to_use = 0;
|
|
|
+ handle++;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Write to hardware */
|
|
|
+ roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If the command is sync, wait for the firmware to write back,
|
|
|
+ * if multi descriptors to be sent, use the first one to check
|
|
|
+ */
|
|
|
+ if ((desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
|
|
|
+ do {
|
|
|
+ if (hns_roce_cmq_csq_done(hr_dev))
|
|
|
+ break;
|
|
|
+ usleep_range(1000, 2000);
|
|
|
+ timeout++;
|
|
|
+ } while (timeout < priv->cmq.tx_timeout);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (hns_roce_cmq_csq_done(hr_dev)) {
|
|
|
+ complete = true;
|
|
|
+ handle = 0;
|
|
|
+ while (handle < num) {
|
|
|
+ /* get the result of hardware write back */
|
|
|
+ desc_to_use = &csq->desc[ntc];
|
|
|
+ desc[handle] = *desc_to_use;
|
|
|
+ dev_dbg(hr_dev->dev, "Get cmq desc:\n");
|
|
|
+ desc_ret = desc[handle].retval;
|
|
|
+ if (desc_ret == CMD_EXEC_SUCCESS)
|
|
|
+ ret = 0;
|
|
|
+ else
|
|
|
+ ret = -EIO;
|
|
|
+ priv->cmq.last_status = desc_ret;
|
|
|
+ ntc++;
|
|
|
+ handle++;
|
|
|
+ if (ntc == csq->desc_num)
|
|
|
+ ntc = 0;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!complete)
|
|
|
+ ret = -EAGAIN;
|
|
|
+
|
|
|
+ /* clean the command send queue */
|
|
|
+ handle = hns_roce_cmq_csq_clean(hr_dev);
|
|
|
+ if (handle != num)
|
|
|
+ dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
|
|
|
+ handle, num);
|
|
|
+
|
|
|
+ spin_unlock_bh(&csq->lock);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static const struct hns_roce_hw hns_roce_hw_v2 = {
|
|
|
+ .cmq_init = hns_roce_v2_cmq_init,
|
|
|
+ .cmq_exit = hns_roce_v2_cmq_exit,
|
|
|
+};
|
|
|
|
|
|
static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
|
|
|
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
|
|
@@ -87,6 +356,12 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
|
|
|
if (!hr_dev)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
+ hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
|
|
|
+ if (!hr_dev->priv) {
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto error_failed_kzalloc;
|
|
|
+ }
|
|
|
+
|
|
|
hr_dev->pci_dev = handle->pdev;
|
|
|
hr_dev->dev = &handle->pdev->dev;
|
|
|
handle->priv = hr_dev;
|
|
@@ -106,6 +381,9 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
|
|
|
return 0;
|
|
|
|
|
|
error_failed_get_cfg:
|
|
|
+ kfree(hr_dev->priv);
|
|
|
+
|
|
|
+error_failed_kzalloc:
|
|
|
ib_dealloc_device(&hr_dev->ib_dev);
|
|
|
|
|
|
return ret;
|
|
@@ -117,6 +395,7 @@ static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
|
|
|
struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
|
|
|
|
|
|
hns_roce_exit(hr_dev);
|
|
|
+ kfree(hr_dev->priv);
|
|
|
ib_dealloc_device(&hr_dev->ib_dev);
|
|
|
}
|
|
|
|