diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2009-01-09 12:42:46 +0100 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2009-03-05 20:35:15 +0100 |
commit | 3b1e79ed734f58ac41ca0a287ff03ca355f120ad (patch) | |
tree | 58ff425e5181df9f2fc317a612f2313054cce7aa /lib/dma-debug.c | |
parent | 30dfa90cc8c4c9621d8d5aa9499f3a5df3376307 (diff) | |
download | op-kernel-dev-3b1e79ed734f58ac41ca0a287ff03ca355f120ad.zip op-kernel-dev-3b1e79ed734f58ac41ca0a287ff03ca355f120ad.tar.gz |
dma-debug: add allocator code
Impact: add allocator code for struct dma_debug_entry
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'lib/dma-debug.c')
-rw-r--r-- | lib/dma-debug.c | 57 |
1 files changed, 57 insertions, 0 deletions
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 5ff7d2e..b609146 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -52,6 +52,16 @@ struct hash_bucket { /* Hash list to save the allocated dma addresses */ static struct hash_bucket dma_entry_hash[HASH_SIZE]; +/* List of pre-allocated dma_debug_entry's */ +static LIST_HEAD(free_entries); +/* Lock for the list above */ +static DEFINE_SPINLOCK(free_entries_lock); + +/* Global disable flag - will be set in case of an error */ +static bool global_disable __read_mostly; + +static u32 num_free_entries; +static u32 min_free_entries; /* * Hash related functions @@ -141,3 +151,50 @@ static void add_dma_entry(struct dma_debug_entry *entry) put_hash_bucket(bucket, &flags); } +/* struct dma_entry allocator + * + * The next two functions implement the allocator for + * struct dma_debug_entries. + */ +static struct dma_debug_entry *dma_entry_alloc(void) +{ + struct dma_debug_entry *entry = NULL; + unsigned long flags; + + spin_lock_irqsave(&free_entries_lock, flags); + + if (list_empty(&free_entries)) { + printk(KERN_ERR "DMA-API: debugging out of memory " + "- disabling\n"); + global_disable = true; + goto out; + } + + entry = list_entry(free_entries.next, struct dma_debug_entry, list); + list_del(&entry->list); + memset(entry, 0, sizeof(*entry)); + + num_free_entries -= 1; + if (num_free_entries < min_free_entries) + min_free_entries = num_free_entries; + +out: + spin_unlock_irqrestore(&free_entries_lock, flags); + + return entry; +} + +static void dma_entry_free(struct dma_debug_entry *entry) +{ + unsigned long flags; + + /* + * add to beginning of the list - this way the entries are + * more likely cache hot when they are reallocated. + */ + spin_lock_irqsave(&free_entries_lock, flags); + list_add(&entry->list, &free_entries); + num_free_entries += 1; + spin_unlock_irqrestore(&free_entries_lock, flags); +} + |