drm/vmwgfx: Use the linux DMA api to get valid device addresses of pages
The code handles three different cases: 1) physical page addresses. The ttm page array is used. 2) DMA subsystem addresses. A scatter-gather list is used. 3) Coherent pages. The ttm dma pool is used, together with the dma_ttm array os dma_addr_t Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Jakob Bornecrantz <jakob@vmware.com>
This commit is contained in:
@@ -32,9 +32,11 @@
|
||||
#define VMW_PPN_SIZE (sizeof(unsigned long))
|
||||
/* A future safe maximum remap size. */
|
||||
#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
|
||||
#define DMA_ADDR_INVALID ((dma_addr_t) 0)
|
||||
#define DMA_PAGE_INVALID 0UL
|
||||
|
||||
static int vmw_gmr2_bind(struct vmw_private *dev_priv,
|
||||
struct page *pages[],
|
||||
struct vmw_piter *iter,
|
||||
unsigned long num_pages,
|
||||
int gmr_id)
|
||||
{
|
||||
@@ -81,11 +83,13 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
|
||||
|
||||
for (i = 0; i < nr; ++i) {
|
||||
if (VMW_PPN_SIZE <= 4)
|
||||
*cmd = page_to_pfn(*pages++);
|
||||
*cmd = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
|
||||
else
|
||||
*((uint64_t *)cmd) = page_to_pfn(*pages++);
|
||||
*((uint64_t *)cmd) = vmw_piter_dma_addr(iter) >>
|
||||
PAGE_SHIFT;
|
||||
|
||||
cmd += VMW_PPN_SIZE / sizeof(*cmd);
|
||||
vmw_piter_next(iter);
|
||||
}
|
||||
|
||||
num_pages -= nr;
|
||||
@@ -120,22 +124,54 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
|
||||
vmw_fifo_commit(dev_priv, define_size);
|
||||
}
|
||||
|
||||
|
||||
static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma,
|
||||
struct list_head *desc_pages)
|
||||
{
|
||||
struct page *page, *next;
|
||||
struct svga_guest_mem_descriptor *page_virtual;
|
||||
unsigned int desc_per_page = PAGE_SIZE /
|
||||
sizeof(struct svga_guest_mem_descriptor) - 1;
|
||||
|
||||
if (list_empty(desc_pages))
|
||||
return;
|
||||
|
||||
list_for_each_entry_safe(page, next, desc_pages, lru) {
|
||||
list_del_init(&page->lru);
|
||||
|
||||
if (likely(desc_dma != DMA_ADDR_INVALID)) {
|
||||
dma_unmap_page(dev, desc_dma, PAGE_SIZE,
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
page_virtual = kmap_atomic(page);
|
||||
desc_dma = page_virtual[desc_per_page].ppn << PAGE_SHIFT;
|
||||
kunmap_atomic(page_virtual);
|
||||
|
||||
__free_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* FIXME: Adjust to the ttm lowmem / highmem storage to minimize
|
||||
* the number of used descriptors.
|
||||
*
|
||||
*/
|
||||
|
||||
static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
|
||||
struct page *pages[],
|
||||
unsigned long num_pages)
|
||||
static int vmw_gmr_build_descriptors(struct device *dev,
|
||||
struct list_head *desc_pages,
|
||||
struct vmw_piter *iter,
|
||||
unsigned long num_pages,
|
||||
dma_addr_t *first_dma)
|
||||
{
|
||||
struct page *page, *next;
|
||||
struct page *page;
|
||||
struct svga_guest_mem_descriptor *page_virtual = NULL;
|
||||
struct svga_guest_mem_descriptor *desc_virtual = NULL;
|
||||
unsigned int desc_per_page;
|
||||
unsigned long prev_pfn;
|
||||
unsigned long pfn;
|
||||
int ret;
|
||||
dma_addr_t desc_dma;
|
||||
|
||||
desc_per_page = PAGE_SIZE /
|
||||
sizeof(struct svga_guest_mem_descriptor) - 1;
|
||||
@@ -148,23 +184,12 @@ static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
|
||||
}
|
||||
|
||||
list_add_tail(&page->lru, desc_pages);
|
||||
|
||||
/*
|
||||
* Point previous page terminating descriptor to this
|
||||
* page before unmapping it.
|
||||
*/
|
||||
|
||||
if (likely(page_virtual != NULL)) {
|
||||
desc_virtual->ppn = page_to_pfn(page);
|
||||
kunmap_atomic(page_virtual);
|
||||
}
|
||||
|
||||
page_virtual = kmap_atomic(page);
|
||||
desc_virtual = page_virtual - 1;
|
||||
prev_pfn = ~(0UL);
|
||||
|
||||
while (likely(num_pages != 0)) {
|
||||
pfn = page_to_pfn(*pages);
|
||||
pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
|
||||
|
||||
if (pfn != prev_pfn + 1) {
|
||||
|
||||
@@ -181,104 +206,81 @@ static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
|
||||
}
|
||||
prev_pfn = pfn;
|
||||
--num_pages;
|
||||
++pages;
|
||||
vmw_piter_next(iter);
|
||||
}
|
||||
|
||||
(++desc_virtual)->ppn = cpu_to_le32(0);
|
||||
(++desc_virtual)->ppn = DMA_PAGE_INVALID;
|
||||
desc_virtual->num_pages = cpu_to_le32(0);
|
||||
kunmap_atomic(page_virtual);
|
||||
}
|
||||
|
||||
if (likely(page_virtual != NULL))
|
||||
desc_dma = 0;
|
||||
list_for_each_entry_reverse(page, desc_pages, lru) {
|
||||
page_virtual = kmap_atomic(page);
|
||||
page_virtual[desc_per_page].ppn = desc_dma >> PAGE_SHIFT;
|
||||
kunmap_atomic(page_virtual);
|
||||
desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
if (unlikely(dma_mapping_error(dev, desc_dma)))
|
||||
goto out_err;
|
||||
}
|
||||
*first_dma = desc_dma;
|
||||
|
||||
return 0;
|
||||
out_err:
|
||||
list_for_each_entry_safe(page, next, desc_pages, lru) {
|
||||
list_del_init(&page->lru);
|
||||
__free_page(page);
|
||||
}
|
||||
vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages)
|
||||
{
|
||||
struct page *page, *next;
|
||||
|
||||
list_for_each_entry_safe(page, next, desc_pages, lru) {
|
||||
list_del_init(&page->lru);
|
||||
__free_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
|
||||
int gmr_id, struct list_head *desc_pages)
|
||||
int gmr_id, dma_addr_t desc_dma)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
if (unlikely(list_empty(desc_pages)))
|
||||
return;
|
||||
|
||||
page = list_entry(desc_pages->next, struct page, lru);
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
|
||||
vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
|
||||
wmb();
|
||||
vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page));
|
||||
vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT);
|
||||
mb();
|
||||
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* FIXME: Adjust to the ttm lowmem / highmem storage to minimize
|
||||
* the number of used descriptors.
|
||||
*/
|
||||
|
||||
static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
|
||||
unsigned long num_pages)
|
||||
{
|
||||
unsigned long prev_pfn = ~(0UL);
|
||||
unsigned long pfn;
|
||||
unsigned long descriptors = 0;
|
||||
|
||||
while (num_pages--) {
|
||||
pfn = page_to_pfn(*pages++);
|
||||
if (prev_pfn + 1 != pfn)
|
||||
++descriptors;
|
||||
prev_pfn = pfn;
|
||||
}
|
||||
|
||||
return descriptors;
|
||||
}
|
||||
|
||||
int vmw_gmr_bind(struct vmw_private *dev_priv,
|
||||
struct page *pages[],
|
||||
const struct vmw_sg_table *vsgt,
|
||||
unsigned long num_pages,
|
||||
int gmr_id)
|
||||
{
|
||||
struct list_head desc_pages;
|
||||
dma_addr_t desc_dma = 0;
|
||||
struct device *dev = dev_priv->dev->dev;
|
||||
struct vmw_piter data_iter;
|
||||
int ret;
|
||||
|
||||
vmw_piter_start(&data_iter, vsgt, 0);
|
||||
|
||||
if (unlikely(!vmw_piter_next(&data_iter)))
|
||||
return 0;
|
||||
|
||||
if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
|
||||
return vmw_gmr2_bind(dev_priv, pages, num_pages, gmr_id);
|
||||
return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
|
||||
|
||||
if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
|
||||
return -EINVAL;
|
||||
|
||||
if (vmw_gmr_count_descriptors(pages, num_pages) >
|
||||
dev_priv->max_gmr_descriptors)
|
||||
if (vsgt->num_regions > dev_priv->max_gmr_descriptors)
|
||||
return -EINVAL;
|
||||
|
||||
INIT_LIST_HEAD(&desc_pages);
|
||||
|
||||
ret = vmw_gmr_build_descriptors(&desc_pages, pages, num_pages);
|
||||
ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter,
|
||||
num_pages, &desc_dma);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
vmw_gmr_fire_descriptors(dev_priv, gmr_id, &desc_pages);
|
||||
vmw_gmr_free_descriptors(&desc_pages);
|
||||
vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma);
|
||||
vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Reference in New Issue
Block a user