|
@@ -25,11 +25,45 @@
|
|
|
|
|
|
static int mt76x0e_start(struct ieee80211_hw *hw)
|
|
|
{
|
|
|
+ struct mt76x0_dev *dev = hw->priv;
|
|
|
+
|
|
|
+ mutex_lock(&dev->mt76.mutex);
|
|
|
+
|
|
|
+ mt76x02_mac_start(&dev->mt76);
|
|
|
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
|
|
|
+ MT_CALIBRATE_INTERVAL);
|
|
|
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
|
|
|
+ MT_CALIBRATE_INTERVAL);
|
|
|
+ set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
|
|
|
+
|
|
|
+ mutex_unlock(&dev->mt76.mutex);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
static void mt76x0e_stop(struct ieee80211_hw *hw)
|
|
|
{
|
|
|
+ struct mt76x0_dev *dev = hw->priv;
|
|
|
+
|
|
|
+ mutex_lock(&dev->mt76.mutex);
|
|
|
+
|
|
|
+ clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
|
|
|
+ cancel_delayed_work_sync(&dev->cal_work);
|
|
|
+ cancel_delayed_work_sync(&dev->mac_work);
|
|
|
+
|
|
|
+ if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY,
|
|
|
+ 0, 1000))
|
|
|
+ dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
|
|
|
+ mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_EN);
|
|
|
+
|
|
|
+ mt76x0_mac_stop(dev);
|
|
|
+
|
|
|
+ if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
|
|
|
+ 0, 1000))
|
|
|
+ dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
|
|
|
+ mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_EN);
|
|
|
+
|
|
|
+ mutex_unlock(&dev->mt76.mutex);
|
|
|
}
|
|
|
|
|
|
static const struct ieee80211_ops mt76x0e_ops = {
|