drm/vmwgfx: Protect from excessive execbuf kernel memory allocations v3
With the new validation code, a malicious user-space app could potentially submit command streams with enough buffer-object and resource references in them to have the resulting allocated validion nodes and relocations make the kernel run out of GFP_KERNEL memory. Protect from this by having the validation code reserve TTM graphics memory when allocating. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Deepak Rawat <drawat@vmware.com> --- v2: Removed leftover debug printouts
This commit is contained in:
@@ -104,11 +104,25 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
|
||||
return NULL;
|
||||
|
||||
if (ctx->mem_size_left < size) {
|
||||
struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||
struct page *page;
|
||||
|
||||
if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
|
||||
int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
|
||||
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
ctx->vm_size_left += ctx->vm->gran;
|
||||
ctx->total_mem += ctx->vm->gran;
|
||||
}
|
||||
|
||||
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
if (ctx->vm)
|
||||
ctx->vm_size_left -= PAGE_SIZE;
|
||||
|
||||
list_add_tail(&page->lru, &ctx->page_list);
|
||||
ctx->page_address = page_address(page);
|
||||
ctx->mem_size_left = PAGE_SIZE;
|
||||
@@ -138,6 +152,11 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
|
||||
}
|
||||
|
||||
ctx->mem_size_left = 0;
|
||||
if (ctx->vm && ctx->total_mem) {
|
||||
ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
|
||||
ctx->total_mem = 0;
|
||||
ctx->vm_size_left = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
Reference in New Issue
Block a user