From 7180e769f72650878eea8b0322e0298f4abcac52 Mon Sep 17 00:00:00 2001 From: Ricardo M. Correia Date: Fri, 21 May 2010 10:27:57 +0000 Subject: [PATCH] debug_vmalloc() v5 patch. --- arch/x86_64/Kconfig.debug | 10 + arch/x86_64/mm/fault.c | 8 +- include/asm-x86_64/pgalloc.h | 2 +- include/linux/debug_vmalloc.h | 47 +++ include/linux/mm.h | 3 + mm/Makefile | 1 + mm/debug_vmalloc.c | 652 +++++++++++++++++++++++++++++++++++++++++ 7 files changed, 721 insertions(+), 2 deletions(-) create mode 100644 include/linux/debug_vmalloc.h create mode 100644 mm/debug_vmalloc.c diff --git a/arch/x86_64/Kconfig.debug b/arch/x86_64/Kconfig.debug index 775d211..5378778 100644 --- a/arch/x86_64/Kconfig.debug +++ b/arch/x86_64/Kconfig.debug @@ -55,6 +55,16 @@ config DEBUG_STACK_USAGE This option will slow down process creation somewhat. +config DEBUG_VMALLOC + bool "debug_vmalloc()" + depends on DEBUG_KERNEL + depends on MMU + default n + help + debug_vmalloc() can be used to debug memory corruption problems. + Don't enable this unless you're debugging code yourself. + If in doubt, say "N". + #config X86_REMOTE_DEBUG # bool "kgdb debugging stub" diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c index 5a0c483..8e63871 100644 --- a/arch/x86_64/mm/fault.c +++ b/arch/x86_64/mm/fault.c @@ -296,7 +296,13 @@ static int vmalloc_fault(unsigned long address) happen within a race in page table update. In the later case just flush. */ - pgd = pgd_offset(current->mm ?: &init_mm, address); + /* + * Inside debug_vmalloc() it's possible to run into an infinite loop of + * page faults due to a bug in vmalloc_fault(). This was filed as + * kernel bug 12547 (in bugzilla.kernel.org) and fixed upstream as + * commit f313e12308f7c. We also fix it here. + */ + pgd = pgd_offset(current->active_mm, address); pgd_ref = pgd_offset_k(address); if (pgd_none(*pgd_ref)) return -1; diff --git a/include/asm-x86_64/pgalloc.h b/include/asm-x86_64/pgalloc.h index 21b7f3f..34c076c 100644 --- a/include/asm-x86_64/pgalloc.h +++ b/include/asm-x86_64/pgalloc.h @@ -107,7 +107,7 @@ static inline void pgd_free(pgd_t *pgd) static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); + return (pte_t *)get_zeroed_page(GFP_ATOMIC|__GFP_REPEAT); } static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) diff --git a/include/linux/debug_vmalloc.h b/include/linux/debug_vmalloc.h new file mode 100644 index 0000000..0b51c8f --- /dev/null +++ b/include/linux/debug_vmalloc.h @@ -0,0 +1,47 @@ +#ifndef _LINUX_DEBUG_VMALLOC_H +#define _LINUX_DEBUG_VMALLOC_H + +#include + +/* Replacements for kmalloc()/kfree() and vmalloc()/vfree() */ +extern void *debug_vmalloc(unsigned long size, gfp_t gfp); +extern void debug_vfree(void *addr); + +/* Replacements for page allocation functions. + * At the moment these only do leak tracking */ +extern struct page *debug_alloc_pages(gfp_t gfp, unsigned int order); +extern void __debug_free_pages(struct page *page, unsigned int order); +extern unsigned long __safedebug_get_free_pages(gfp_t gfp, unsigned int order); +extern void safedebug_free_pages(unsigned long addr, unsigned int order); + +static inline unsigned long __debug_get_free_pages(gfp_t gfp, unsigned int order) +{ +#ifdef VMDBG_USE_PAGES + return __safedebug_get_free_pages(gfp, order); +#else + return (unsigned long) debug_vmalloc(gfp, PAGE_SIZE << order); +#endif +} + +static inline void debug_free_pages(unsigned long addr, unsigned int order) +{ +#ifdef VMDBG_USE_PAGES + safedebug_free_pages(addr, order); +#else + debug_vfree((void *) addr); +#endif +} + +#define debug_alloc_page(gfp) debug_alloc_pages(gfp, 0) +#define __debug_get_free_page(gfp) __debug_get_free_pages((gfp), 0) +#define __debug_free_page(page) __debug_free_pages((page), 0) +#define debug_free_page(addr) debug_free_pages((addr), 0) + +/* Warning: you must free all memory before checking for leaks! + Especially if you changed vmdbg_leaktrack at runtime. */ +extern int debug_vmalloc_leakcheck(void); + +extern int vmdbg_savestack; +extern int vmdbg_leaktrack; + +#endif /* _LINUX_DEBUG_VMALLOC_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 452bbae..09e8d1b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -274,6 +274,9 @@ struct page { void *virtual; /* Kernel virtual address (NULL if not kmapped, ie. highmem) */ #endif /* WANT_PAGE_VIRTUAL */ +#ifdef CONFIG_DEBUG_VMALLOC + void *vmdbg; +#endif }; #define page_private(page) ((page)->private) diff --git a/mm/Makefile b/mm/Makefile index 8a8b863..caf54a6 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -24,4 +24,5 @@ obj-$(CONFIG_SLAB) += slab.o obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o obj-$(CONFIG_FS_XIP) += filemap_xip.o obj-$(CONFIG_MIGRATION) += migrate.o +obj-$(CONFIG_DEBUG_VMALLOC) += debug_vmalloc.o diff --git a/mm/debug_vmalloc.c b/mm/debug_vmalloc.c new file mode 100644 index 0000000..9689517 --- /dev/null +++ b/mm/debug_vmalloc.c @@ -0,0 +1,652 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +/* + * debug_vmalloc(): a debugging allocator. + * + * This allocator can be used in place of kmalloc(), kmalloc_node(), vmalloc(), + * __vmalloc(), __get_free_pages() and similar. debug_vfree() must be used to + * free the memory and its use is analogous to kfree()/vfree(). + * + * It was designed to catch most use-after-free, buffer underruns, buffer + * overruns and memory leaks. + * + * Unlike vmalloc(), this allocator can be used in atomic context (inside + * interrupts, etc), as long as you pass the same flags as you'd pass to + * kmalloc(). It is also significantly more scalable than vmalloc(), being + * O(1) with regards to number of allocations (if you ignore the overhead + * of the internal allocations, which should be insignificant). + * + * It will try to map pages starting at VMALLOC_END and continuing backwards, + * never reusing virtual addresses (in order to catch use-after-frees). + * + * This also means it's only usable on 64-bit architectures because it will + * quickly consume all available vmalloc address space on 32-bit arches. That, + * and the fact that allocated memory is not aligned (on purpose, to catch + * off-by-one errors), means that it's probably only usable on x86-64. + * + * Buffer overruns will always be caught on the spot (assuming it's not an + * overrun that skips 4K or more) due to holes in the address space and due to + * the fact that the allocation is aligned to the end of the page. + * + * The majority of buffer underruns will always be detected after-the-fact + * during debug_vfree() (or later in debug_vfree_do_free()), except if we are + * very unlucky and it corrupts only some fields of the 'vmdbg_info' struct, or + * if the underrun skips more than 4K. This could be improved at the expense of + * more memory waste. + * + * Use-after-frees will panic the kernel immediately for reads and writes in + * most cases due to memory being unmapped, but if the memory has to be freed + * asynchronously, writes-after-frees will be detected only after the fact in + * debug_vfree_do_free() (assuming the writes don't match the poison pattern). + */ + +#define VI_REDZONE 0xBADF00DBADF00DBAull + +#define VMDBG_STACKDEPTH 20 + +struct vmdbg_cpu_alloc { + int vca_init; /* just for debugging */ + spinlock_t vca_lock; + __u64 vca_alloc_bytes; + __u64 vca_alloc_total; + struct list_head vca_list; +}; + +struct vmdbg_info { + struct vm_struct vi_area; + unsigned long vi_size; + struct stack_trace vi_strace; + struct list_head vi_cpu_list; /* link into per-cpu alloc list */ + unsigned int vi_cpu_nr; + int vi_page; + __u64 vi_redzone; +}; + +struct vmdbg_free { + struct work_struct vf_work; + struct vmdbg_info *vf_info; +}; + +static DEFINE_SPINLOCK(vmdbg_ptr_lock); +static unsigned long vmdbg_ptr = VMALLOC_END + 1; +static int vmdbg_init = 0; +static DEFINE_PER_CPU(struct vmdbg_cpu_alloc, vmdbg_alloc); + +/* Tunables - safe to change at any time */ +int vmdbg_savestack = 0; +int vmdbg_leaktrack = 1; +EXPORT_SYMBOL(vmdbg_savestack); +EXPORT_SYMBOL(vmdbg_leaktrack); + +static void vmdbg_do_init(void) +{ + struct vmdbg_cpu_alloc *vca; + unsigned int j; + + for_each_possible_cpu(j) { + vca = &per_cpu(vmdbg_alloc, j); + spin_lock_init(&vca->vca_lock); + INIT_LIST_HEAD(&vca->vca_list); + vca->vca_alloc_bytes = 0; + vca->vca_alloc_total = 0; + vca->vca_init = 1; + } + vmdbg_init = 1; + printk(KERN_WARNING "debug_vmalloc() being used\n"); +} + +static void vmdbg_leak_add(struct vmdbg_info *info, unsigned long alloc_size, + unsigned long alloc_total) +{ + struct vmdbg_cpu_alloc *vca; + unsigned long flags; + + vca = &get_cpu_var(vmdbg_alloc); + spin_lock_irqsave(&vca->vca_lock, flags); + + BUG_ON(vca->vca_init != 1); + BUG_ON((vca->vca_alloc_bytes == 0) != (vca->vca_alloc_total == 0)); + + vca->vca_alloc_bytes += alloc_size; + vca->vca_alloc_total += alloc_total; + list_add_tail(&info->vi_cpu_list, &vca->vca_list); + info->vi_cpu_nr = smp_processor_id(); + BUG_ON(info->vi_cpu_nr < 0); + + spin_unlock_irqrestore(&vca->vca_lock, flags); + put_cpu_var(vmdbg_alloc); +} + +void *debug_vmalloc(unsigned long size, gfp_t gfp) +{ + struct vm_struct area; + struct vmdbg_info *info; + struct page **pages; + struct stack_trace *strace; + unsigned long align; + unsigned long flags; + unsigned long needed_size; + unsigned int array_size; + void *ptr; + int i; + + if (unlikely(!size)) + return NULL; + + /* We always allocate a guard page at the end */ + needed_size = sizeof(struct vmdbg_info) + size; + area.size = PAGE_ALIGN(needed_size) + PAGE_SIZE; + + if (unlikely((area.size >> PAGE_SHIFT) > num_physpages)) + return NULL; + + spin_lock_irqsave(&vmdbg_ptr_lock, flags); + + if (unlikely(vmdbg_init == 0)) + vmdbg_do_init(); + + vmdbg_ptr -= area.size; + area.addr = (void *) vmdbg_ptr; + + spin_unlock_irqrestore(&vmdbg_ptr_lock, flags); + + area.flags = 0; + area.phys_addr = 0; + area.nr_pages = (area.size - PAGE_SIZE) >> PAGE_SHIFT; + array_size = area.nr_pages * sizeof(struct page *); + + if (array_size > PAGE_SIZE) { + /* + * The recursion is strictly bounded. + * We use debug_vmalloc() instead of plain vmalloc() here + * because we may not be able to sleep. + */ + area.pages = debug_vmalloc(array_size, gfp); + area.flags |= VM_VPAGES; + } else { + area.pages = kmalloc(array_size, gfp & ~__GFP_HIGHMEM); + } + + if (unlikely(!area.pages)) + return NULL; + + for (i = 0; i < area.nr_pages; i++) { + area.pages[i] = alloc_page(gfp | __GFP_HIGHMEM); + if (unlikely(!area.pages[i])) { + area.nr_pages = i; + goto fail; + } + } + + /* Sigh.. map_vm_area() increments the caller's argument :/ */ + pages = area.pages; + if (unlikely(map_vm_area(&area, PAGE_KERNEL, &pages))) + goto fail; + + align = -needed_size & (PAGE_SIZE - 1); + memset(area.addr, 0x4a, align); + + info = area.addr + align; + info->vi_area = area; + info->vi_size = size; + + strace = &info->vi_strace; + strace->nr_entries = 0; + strace->entries = NULL; + + if (vmdbg_savestack & 1) { + strace->max_entries = VMDBG_STACKDEPTH; + strace->entries = kmalloc(VMDBG_STACKDEPTH * sizeof(unsigned long), + gfp & ~__GFP_HIGHMEM); + strace->skip = 1; + strace->all_contexts = 1; + + if (strace->entries != NULL) { + preempt_disable(); + save_stack_trace(strace, NULL); + preempt_enable(); + } + } + + INIT_LIST_HEAD(&info->vi_cpu_list); + info->vi_cpu_nr = -1; + info->vi_page = 0; + + if (vmdbg_leaktrack) + vmdbg_leak_add(info, size, area.nr_pages << PAGE_SHIFT); + + info->vi_redzone = VI_REDZONE; + + ptr = area.addr + align + sizeof(struct vmdbg_info); + memset(ptr, 0x4b, size); + + return ptr; + +fail: + for (i = 0; i < area.nr_pages; i++) { + BUG_ON(!area.pages[i]); + __free_page(area.pages[i]); + } + + if (area.flags & VM_VPAGES) + debug_vfree(area.pages); + else + kfree(area.pages); + + return NULL; +} +EXPORT_SYMBOL(debug_vmalloc); + +static int debug_vfree_redzone_check(struct vmdbg_info *info) +{ + unsigned char *ptr; + int align = ((unsigned long) info) & (PAGE_SIZE - 1); + int i; + + if (unlikely(info->vi_redzone != VI_REDZONE)) + return -EINVAL; + + if (unlikely(info->vi_page != 0)) + return -EINVAL; + + ptr = (unsigned char *) info->vi_area.addr; + for (i = 0; i < align; i++) { + if (unlikely(ptr[i] != 0x4a)) + return -EINVAL; + } + + return 0; +} + +static void debug_print_strace(struct vmdbg_info *info) +{ + if (info->vi_strace.entries == NULL) { + printk(KERN_ERR "Stack was not saved. Suggestion: set " + "vmdbg_savestack to 3\n"); + } else { + printk(KERN_ERR "Buffer possibly allocated here: \n"); + if (info->vi_strace.nr_entries > VMDBG_STACKDEPTH) + info->vi_strace.nr_entries = VMDBG_STACKDEPTH; + print_stack_trace(&info->vi_strace, 1); + } +} + +static void vmdbg_leak_del(struct vmdbg_info *info, void *ptr, + unsigned long alloc_size, unsigned long alloc_total) +{ + struct vmdbg_cpu_alloc *vca; + unsigned long flags; + + if (info->vi_cpu_nr == -1) { + /* + * This allocation never made it to the leak tracking, + * because vmdbg_leaktrack was disabled at the time the + * allocation happened. + */ + return; + } + + vca = &per_cpu(vmdbg_alloc, info->vi_cpu_nr); + BUG_ON(vca->vca_init != 1); + + spin_lock_irqsave(&vca->vca_lock, flags); + + if (unlikely(vca->vca_alloc_bytes < alloc_size)) { + printk(KERN_ERR "Inconsistent alloc bytes: %lu/%llu (cpu %u, " + "ptr %p)\n", alloc_size, + (unsigned long long) vca->vca_alloc_bytes, + info->vi_cpu_nr, ptr); + debug_print_strace(info); + BUG(); + } + + if (unlikely(vca->vca_alloc_total < alloc_total)) { + printk(KERN_ERR "Inconsistent alloc total: %lu/%llu (cpu %u, " + "ptr %p)\n", alloc_total, + (unsigned long long) vca->vca_alloc_total, + info->vi_cpu_nr, ptr); + debug_print_strace(info); + BUG(); + } + + vca->vca_alloc_bytes -= alloc_size; + vca->vca_alloc_total -= alloc_total; + list_del(&info->vi_cpu_list); + + if (unlikely((vca->vca_alloc_bytes == 0) != (vca->vca_alloc_total == 0))) { + printk(KERN_ERR "Inconsistent alloc values: %llu/%llu (cpu %u, " + "ptr %p)\n", (unsigned long long) vca->vca_alloc_bytes, + (unsigned long long) vca->vca_alloc_total, + info->vi_cpu_nr, ptr); + debug_print_strace(info); + BUG(); + } + + spin_unlock_irqrestore(&vca->vca_lock, flags); +} + +static void debug_vfree_do_free(struct vmdbg_info *info) +{ + struct vmdbg_info info_cpy; + unsigned char *ptr; + unsigned long k; + int i; + + ptr = (unsigned char *) info; + ptr += sizeof(struct vmdbg_info); + + if (unlikely(debug_vfree_redzone_check(info))) { + printk(KERN_ERR "Corruption detected on ptr %p\n", ptr); + debug_print_strace(info); + BUG(); + } + + vmdbg_leak_del(info, ptr, info->vi_size, + info->vi_area.nr_pages << PAGE_SHIFT); + + /* Just one more check for sanity */ + if (unlikely(debug_vfree_redzone_check(info))) { + printk(KERN_ERR "Corruption detected during free of %p\n", ptr); + debug_print_strace(info); + BUG(); + } + + /* + * Check for possible use-after-free between the debug_vfree() call and + * the actual unmap (which may be happening much later). + * + * If this is detected, it may be possible to catch the criminal in the + * act by trying to unmap sooner, e.g. by disabling some debug code. + */ + for (k = 0; k < info->vi_size; k++) { + if (unlikely(ptr[k] != 0x4c)) { + printk(KERN_ERR "Use-after-free detected on ptr %p\n", + ptr); + debug_print_strace(info); + BUG(); + } + } + + /* + * We copy 'info' into the stack because we're going to unmap and free + * the pages where the struct is stored. + */ + info_cpy = *info; + + unmap_vm_area(&info_cpy.vi_area); + + if (info_cpy.vi_strace.entries != NULL) + kfree(info_cpy.vi_strace.entries); + + for (i = 0; i < info_cpy.vi_area.nr_pages; i++) { + BUG_ON(!info_cpy.vi_area.pages[i]); + __free_page(info_cpy.vi_area.pages[i]); + } + + if (info_cpy.vi_area.flags & VM_VPAGES) + debug_vfree(info_cpy.vi_area.pages); + else + kfree(info_cpy.vi_area.pages); +} + +static void debug_vfree_async(void *data) +{ + struct vmdbg_free *vf = data; + struct vmdbg_info *info = vf->vf_info; + + kfree(vf); + + debug_vfree_do_free(info); +} + +void debug_vfree(void *addr) +{ + struct vmdbg_info *info = addr - sizeof(struct vmdbg_info); + struct vmdbg_free *vf; + + if (unlikely(debug_vfree_redzone_check(info))) { + printk(KERN_ERR "Invalid debug_vfree() or buffer underrun on " + "ptr %p\n", addr); + debug_print_strace(info); + BUG(); + } + + memset(addr, 0x4c, info->vi_size); + + /* + * unmap_vm_area() calls flush_tlb_all() which calls + * smp_call_function(). The latter doesn't allow being called when + * in softirq or hardirq context or with disabled interrupts. + */ + if (!irqs_disabled() && !in_interrupt()) { + debug_vfree_do_free(info); + } else { + do { + vf = kmalloc(sizeof(struct vmdbg_free), GFP_ATOMIC); + } while (vf == NULL); + + vf->vf_info = info; + INIT_WORK(&vf->vf_work, &debug_vfree_async, vf); + schedule_work(&vf->vf_work); + } +} +EXPORT_SYMBOL(debug_vfree); + +struct page *debug_alloc_pages(gfp_t gfp, unsigned int order) +{ + struct vmdbg_info *info; + struct page *pages; + struct stack_trace *strace; + unsigned long *entries = NULL; + unsigned long flags; + int i; + + if (unlikely(order >= MAX_ORDER)) + return NULL; + + pages = alloc_pages(gfp, order); + if (unlikely(pages == NULL)) + return NULL; + + spin_lock_irqsave(&vmdbg_ptr_lock, flags); + + if (unlikely(vmdbg_init == 0)) + vmdbg_do_init(); + + spin_unlock_irqrestore(&vmdbg_ptr_lock, flags); + + for (i = 0; i < (1 << order); i++) { + info = kmalloc(sizeof(struct vmdbg_info), gfp & ~__GFP_HIGHMEM); + if (info == NULL) + goto fail; + + pages[i].vmdbg = info; + + info->vi_size = PAGE_SIZE; + INIT_LIST_HEAD(&info->vi_cpu_list); + info->vi_cpu_nr = -1; + info->vi_page = 1; + + if (vmdbg_leaktrack) + vmdbg_leak_add(info, PAGE_SIZE, + PAGE_SIZE + sizeof(struct vmdbg_info)); + + strace = &info->vi_strace; + strace->nr_entries = 0; + strace->entries = NULL; + + info->vi_redzone = VI_REDZONE; + + if (!(vmdbg_savestack & 2)) + continue; + + strace->max_entries = VMDBG_STACKDEPTH; + strace->entries = kmalloc(VMDBG_STACKDEPTH * sizeof(unsigned long), + gfp & ~__GFP_HIGHMEM); + strace->skip = 1; + strace->all_contexts = 1; + + if (strace->entries == NULL) + continue; + + if (entries == NULL) { + entries = strace->entries; + preempt_disable(); + save_stack_trace(strace, NULL); + preempt_enable(); + } else { + memcpy(strace->entries, entries, + VMDBG_STACKDEPTH * sizeof(unsigned long)); + } + } + + return pages; + +fail: + while (i > 0) { + i--; + info = pages[i].vmdbg; + BUG_ON(info == NULL); + if (info->vi_strace.entries != NULL) + kfree(info->vi_strace.entries); + + kfree(info); + } + + __free_pages(pages, order); + + return NULL; +} +EXPORT_SYMBOL(debug_alloc_pages); + +void __debug_free_pages(struct page *page, unsigned int order) +{ + struct vmdbg_info *info; + int i; + + for (i = 0; i < (1 << order); i++) { + info = page[i].vmdbg; + BUG_ON(info == NULL); + BUG_ON(info->vi_page != 1); + BUG_ON(info->vi_size != PAGE_SIZE); + + vmdbg_leak_del(info, &page[i], PAGE_SIZE, + PAGE_SIZE + sizeof(struct vmdbg_info)); + + if (info->vi_strace.entries != NULL) + kfree(info->vi_strace.entries); + + kfree(info); + } + + __free_pages(page, order); +} +EXPORT_SYMBOL(__debug_free_pages); + +unsigned long __safedebug_get_free_pages(gfp_t gfp, unsigned int order) +{ + struct page *page; + page = debug_alloc_pages(gfp, order); + if (!page) + return 0; + return (unsigned long) page_address(page); +} +EXPORT_SYMBOL(__safedebug_get_free_pages); + +void safedebug_free_pages(unsigned long addr, unsigned int order) +{ + if (addr != 0) { + BUG_ON(!virt_addr_valid((void *) addr)); + __debug_free_pages(virt_to_page((void *) addr), order); + } +} +EXPORT_SYMBOL(safedebug_free_pages); + +/* Warning: you must free all memory before calling this function! + Especially if you changed vmdbg_leaktrack at runtime. */ +int debug_vmalloc_leakcheck(void) +{ + struct vmdbg_info *info; + struct vmdbg_cpu_alloc *vca; + unsigned char *ptr; + unsigned long flags; + unsigned int j; + int rc = 0; + + spin_lock_irqsave(&vmdbg_ptr_lock, flags); + + if (unlikely(vmdbg_init == 0)) { + printk(KERN_WARNING "debug_vmalloc_leakcheck(): " + "debug_vmalloc() was never used!\n"); + rc = -ENOSYS; + goto out; + } + + for_each_possible_cpu(j) { + vca = &per_cpu(vmdbg_alloc, j); + BUG_ON(vca->vca_init != 1); + + spin_lock_irqsave(&vca->vca_lock, flags); + + if (vca->vca_alloc_bytes != 0 || vca->vca_alloc_total != 0) { + printk(KERN_ERR "Memory leak(s) detected: %llu/%llu " + "(cpu %d)\n", + (unsigned long long) vca->vca_alloc_bytes, + (unsigned long long) vca->vca_alloc_total, j); + rc = -EINVAL; + } + + list_for_each_entry(info, &vca->vca_list, vi_cpu_list) { + ptr = (unsigned char *) info; + ptr += sizeof(struct vmdbg_info); + printk(KERN_ERR "Detected individual %lu byte leak on" + " cpu %d, ptr %p, page? %d\n", info->vi_size, j, + ptr, info->vi_page); + debug_print_strace(info); + rc = -EINVAL; + } + + spin_unlock_irqrestore(&vca->vca_lock, flags); + } + +out: + if (rc == 0) { + if (unlikely(vmdbg_leaktrack == 0)) { + printk(KERN_WARNING "debug_vmalloc_leakcheck(): " + "leak checking is disabled!\n"); + rc = -EBADR; + goto out2; + } + + printk(KERN_NOTICE "No memory leaks detected!\n"); + /* No memory allocated, so it's safe to reset vmdbg_ptr */ + vmdbg_ptr = VMALLOC_END + 1; + } + +out2: + spin_unlock_irqrestore(&vmdbg_ptr_lock, flags); + + return rc; +} +EXPORT_SYMBOL(debug_vmalloc_leakcheck); -- 1.6.2.2