|
@@ -85,7 +85,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
|
|
|
memset(skbdesc, 0, sizeof(*skbdesc));
|
|
|
skbdesc->entry = entry;
|
|
|
|
|
|
- if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags)) {
|
|
|
+ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA)) {
|
|
|
dma_addr_t skb_dma;
|
|
|
|
|
|
skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
|
|
@@ -198,7 +198,7 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
|
|
|
|
|
|
__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
|
|
|
|
|
|
- if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags)) {
|
|
|
+ if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
|
|
|
/*
|
|
|
* rt2800 has a H/W (or F/W) bug, device incorrectly increase
|
|
|
* seqno on retransmited data (non-QOS) frames. To workaround
|
|
@@ -484,7 +484,7 @@ static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
|
|
|
rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc);
|
|
|
rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc);
|
|
|
|
|
|
- if (test_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags))
|
|
|
+ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_HT_TX_DESC))
|
|
|
rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc,
|
|
|
sta, hwrate);
|
|
|
else
|
|
@@ -526,7 +526,7 @@ static int rt2x00queue_write_tx_data(struct queue_entry *entry,
|
|
|
/*
|
|
|
* Map the skb to DMA.
|
|
|
*/
|
|
|
- if (test_bit(REQUIRE_DMA, &rt2x00dev->cap_flags) &&
|
|
|
+ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA) &&
|
|
|
rt2x00queue_map_txskb(entry))
|
|
|
return -ENOMEM;
|
|
|
|
|
@@ -646,7 +646,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
|
|
|
*/
|
|
|
if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
|
|
|
!test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
|
|
|
- if (test_bit(REQUIRE_COPY_IV, &queue->rt2x00dev->cap_flags))
|
|
|
+ if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_COPY_IV))
|
|
|
rt2x00crypto_tx_copy_iv(skb, &txdesc);
|
|
|
else
|
|
|
rt2x00crypto_tx_remove_iv(skb, &txdesc);
|
|
@@ -660,9 +660,9 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
|
|
|
* PCI and USB devices, while header alignment only is valid
|
|
|
* for PCI devices.
|
|
|
*/
|
|
|
- if (test_bit(REQUIRE_L2PAD, &queue->rt2x00dev->cap_flags))
|
|
|
+ if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_L2PAD))
|
|
|
rt2x00queue_insert_l2pad(skb, txdesc.header_length);
|
|
|
- else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
|
|
|
+ else if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_DMA))
|
|
|
rt2x00queue_align_frame(skb);
|
|
|
|
|
|
/*
|
|
@@ -1178,7 +1178,7 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
|
|
|
if (status)
|
|
|
goto exit;
|
|
|
|
|
|
- if (test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags)) {
|
|
|
+ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE)) {
|
|
|
status = rt2x00queue_alloc_entries(rt2x00dev->atim);
|
|
|
if (status)
|
|
|
goto exit;
|
|
@@ -1234,7 +1234,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
|
|
|
struct data_queue *queue;
|
|
|
enum data_queue_qid qid;
|
|
|
unsigned int req_atim =
|
|
|
- !!test_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags);
|
|
|
+ rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE);
|
|
|
|
|
|
/*
|
|
|
* We need the following queues:
|