|
@@ -102,6 +102,11 @@ static DEFINE_SPINLOCK(free_entries_lock);
|
|
|
/* Global disable flag - will be set in case of an error */
|
|
|
static u32 global_disable __read_mostly;
|
|
|
|
|
|
+static inline bool dma_debug_disabled(void)
|
|
|
+{
|
|
|
+ return global_disable;
|
|
|
+}
|
|
|
+
|
|
|
/* Global error count */
|
|
|
static u32 error_count;
|
|
|
|
|
@@ -945,7 +950,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
|
|
|
struct dma_debug_entry *uninitialized_var(entry);
|
|
|
int count;
|
|
|
|
|
|
- if (global_disable)
|
|
|
+ if (dma_debug_disabled())
|
|
|
return 0;
|
|
|
|
|
|
switch (action) {
|
|
@@ -973,7 +978,7 @@ void dma_debug_add_bus(struct bus_type *bus)
|
|
|
{
|
|
|
struct notifier_block *nb;
|
|
|
|
|
|
- if (global_disable)
|
|
|
+ if (dma_debug_disabled())
|
|
|
return;
|
|
|
|
|
|
nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
|
|
@@ -994,7 +999,7 @@ void dma_debug_init(u32 num_entries)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
|
- if (global_disable)
|
|
|
+ if (dma_debug_disabled())
|
|
|
return;
|
|
|
|
|
|
for (i = 0; i < HASH_SIZE; ++i) {
|
|
@@ -1243,7 +1248,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
|
|
|
{
|
|
|
struct dma_debug_entry *entry;
|
|
|
|
|
|
- if (unlikely(global_disable))
|
|
|
+ if (unlikely(dma_debug_disabled()))
|
|
|
return;
|
|
|
|
|
|
if (dma_mapping_error(dev, dma_addr))
|
|
@@ -1283,7 +1288,7 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
|
struct hash_bucket *bucket;
|
|
|
unsigned long flags;
|
|
|
|
|
|
- if (unlikely(global_disable))
|
|
|
+ if (unlikely(dma_debug_disabled()))
|
|
|
return;
|
|
|
|
|
|
ref.dev = dev;
|
|
@@ -1325,7 +1330,7 @@ void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
|
|
|
.direction = direction,
|
|
|
};
|
|
|
|
|
|
- if (unlikely(global_disable))
|
|
|
+ if (unlikely(dma_debug_disabled()))
|
|
|
return;
|
|
|
|
|
|
if (map_single)
|
|
@@ -1342,7 +1347,7 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
|
|
struct scatterlist *s;
|
|
|
int i;
|
|
|
|
|
|
- if (unlikely(global_disable))
|
|
|
+ if (unlikely(dma_debug_disabled()))
|
|
|
return;
|
|
|
|
|
|
for_each_sg(sg, s, mapped_ents, i) {
|
|
@@ -1395,7 +1400,7 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
|
|
struct scatterlist *s;
|
|
|
int mapped_ents = 0, i;
|
|
|
|
|
|
- if (unlikely(global_disable))
|
|
|
+ if (unlikely(dma_debug_disabled()))
|
|
|
return;
|
|
|
|
|
|
for_each_sg(sglist, s, nelems, i) {
|
|
@@ -1427,7 +1432,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
|
|
|
{
|
|
|
struct dma_debug_entry *entry;
|
|
|
|
|
|
- if (unlikely(global_disable))
|
|
|
+ if (unlikely(dma_debug_disabled()))
|
|
|
return;
|
|
|
|
|
|
if (unlikely(virt == NULL))
|
|
@@ -1462,7 +1467,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
|
|
|
.direction = DMA_BIDIRECTIONAL,
|
|
|
};
|
|
|
|
|
|
- if (unlikely(global_disable))
|
|
|
+ if (unlikely(dma_debug_disabled()))
|
|
|
return;
|
|
|
|
|
|
check_unmap(&ref);
|
|
@@ -1474,7 +1479,7 @@ void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
|
|
|
{
|
|
|
struct dma_debug_entry ref;
|
|
|
|
|
|
- if (unlikely(global_disable))
|
|
|
+ if (unlikely(dma_debug_disabled()))
|
|
|
return;
|
|
|
|
|
|
ref.type = dma_debug_single;
|
|
@@ -1494,7 +1499,7 @@ void debug_dma_sync_single_for_device(struct device *dev,
|
|
|
{
|
|
|
struct dma_debug_entry ref;
|
|
|
|
|
|
- if (unlikely(global_disable))
|
|
|
+ if (unlikely(dma_debug_disabled()))
|
|
|
return;
|
|
|
|
|
|
ref.type = dma_debug_single;
|
|
@@ -1515,7 +1520,7 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev,
|
|
|
{
|
|
|
struct dma_debug_entry ref;
|
|
|
|
|
|
- if (unlikely(global_disable))
|
|
|
+ if (unlikely(dma_debug_disabled()))
|
|
|
return;
|
|
|
|
|
|
ref.type = dma_debug_single;
|
|
@@ -1536,7 +1541,7 @@ void debug_dma_sync_single_range_for_device(struct device *dev,
|
|
|
{
|
|
|
struct dma_debug_entry ref;
|
|
|
|
|
|
- if (unlikely(global_disable))
|
|
|
+ if (unlikely(dma_debug_disabled()))
|
|
|
return;
|
|
|
|
|
|
ref.type = dma_debug_single;
|
|
@@ -1556,7 +1561,7 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
|
|
struct scatterlist *s;
|
|
|
int mapped_ents = 0, i;
|
|
|
|
|
|
- if (unlikely(global_disable))
|
|
|
+ if (unlikely(dma_debug_disabled()))
|
|
|
return;
|
|
|
|
|
|
for_each_sg(sg, s, nelems, i) {
|
|
@@ -1589,7 +1594,7 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
|
|
struct scatterlist *s;
|
|
|
int mapped_ents = 0, i;
|
|
|
|
|
|
- if (unlikely(global_disable))
|
|
|
+ if (unlikely(dma_debug_disabled()))
|
|
|
return;
|
|
|
|
|
|
for_each_sg(sg, s, nelems, i) {
|