12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385 |
- /*
- * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
- * Copyright (C) 2013 Red Hat
- * Author: Rob Clark <[email protected]>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
- #include <linux/spinlock.h>
- #include <linux/shmem_fs.h>
- #include <linux/dma-buf.h>
- #include <linux/pfn_t.h>
- #include <linux/ion.h>
- #include "msm_drv.h"
- #include "msm_gem.h"
- #include "msm_mmu.h"
- #include "sde_dbg.h"
- static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
- static dma_addr_t physaddr(struct drm_gem_object *obj)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_drm_private *priv = obj->dev->dev_private;
- return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
- priv->vram.paddr;
- }
- static bool use_pages(struct drm_gem_object *obj)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- return !msm_obj->vram_node;
- }
- /* allocate pages from VRAM carveout, used when no IOMMU: */
- static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_drm_private *priv = obj->dev->dev_private;
- dma_addr_t paddr;
- struct page **p;
- int ret, i;
- p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (!p)
- return ERR_PTR(-ENOMEM);
- spin_lock(&priv->vram.lock);
- ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
- spin_unlock(&priv->vram.lock);
- if (ret) {
- kvfree(p);
- return ERR_PTR(ret);
- }
- paddr = physaddr(obj);
- for (i = 0; i < npages; i++) {
- p[i] = phys_to_page(paddr);
- paddr += PAGE_SIZE;
- }
- return p;
- }
- static struct page **get_pages(struct drm_gem_object *obj)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct device *aspace_dev;
- if (obj->import_attach)
- return msm_obj->pages;
- if (!msm_obj->pages) {
- struct drm_device *dev = obj->dev;
- struct page **p;
- int npages = obj->size >> PAGE_SHIFT;
- if (use_pages(obj))
- p = drm_gem_get_pages(obj);
- else
- p = get_pages_vram(obj, npages);
- if (IS_ERR(p)) {
- dev_err(dev->dev, "could not get pages: %ld\n",
- PTR_ERR(p));
- return p;
- }
- msm_obj->pages = p;
- msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
- if (IS_ERR(msm_obj->sgt)) {
- void *ptr = ERR_CAST(msm_obj->sgt);
- dev_err(dev->dev, "failed to allocate sgt\n");
- msm_obj->sgt = NULL;
- return ptr;
- }
- /* For non-cached buffers, ensure the new pages are clean
- * because display controller, GPU, etc. are not coherent:
- */
- if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) {
- aspace_dev = msm_gem_get_aspace_device(msm_obj->aspace);
- if (aspace_dev)
- dma_map_sg(aspace_dev, msm_obj->sgt->sgl,
- msm_obj->sgt->nents,
- DMA_BIDIRECTIONAL);
- else
- dev_err(dev->dev,
- "failed to get aspace_device\n");
- }
- }
- return msm_obj->pages;
- }
- static void put_pages_vram(struct drm_gem_object *obj)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_drm_private *priv = obj->dev->dev_private;
- spin_lock(&priv->vram.lock);
- drm_mm_remove_node(msm_obj->vram_node);
- spin_unlock(&priv->vram.lock);
- kvfree(msm_obj->pages);
- }
- static void put_pages(struct drm_gem_object *obj)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- if (msm_obj->pages) {
- if (msm_obj->sgt) {
- sg_free_table(msm_obj->sgt);
- kfree(msm_obj->sgt);
- }
- if (use_pages(obj))
- drm_gem_put_pages(obj, msm_obj->pages, true, false);
- else
- put_pages_vram(obj);
- msm_obj->pages = NULL;
- }
- }
- struct page **msm_gem_get_pages(struct drm_gem_object *obj)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct page **p;
- mutex_lock(&msm_obj->lock);
- if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
- mutex_unlock(&msm_obj->lock);
- return ERR_PTR(-EBUSY);
- }
- p = get_pages(obj);
- mutex_unlock(&msm_obj->lock);
- return p;
- }
- void msm_gem_put_pages(struct drm_gem_object *obj)
- {
- /* when we start tracking the pin count, then do something here */
- }
- void msm_gem_sync(struct drm_gem_object *obj)
- {
- struct msm_gem_object *msm_obj;
- struct device *aspace_dev;
- if (!obj)
- return;
- msm_obj = to_msm_bo(obj);
- /*
- * dma_sync_sg_for_device synchronises a single contiguous or
- * scatter/gather mapping for the CPU and device.
- */
- aspace_dev = msm_gem_get_aspace_device(msm_obj->aspace);
- if (aspace_dev)
- dma_sync_sg_for_device(aspace_dev, msm_obj->sgt->sgl,
- msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
- else
- dev_err(obj->dev->dev,
- "failed to get aspace_device\n");
- }
- int msm_gem_mmap_obj(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP;
- if (msm_obj->flags & MSM_BO_WC) {
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
- } else if (msm_obj->flags & MSM_BO_UNCACHED) {
- vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
- } else {
- /*
- * Shunt off cached objs to shmem file so they have their own
- * address_space (so unmap_mapping_range does what we want,
- * in particular in the case of mmap'd dmabufs)
- */
- fput(vma->vm_file);
- get_file(obj->filp);
- vma->vm_pgoff = 0;
- vma->vm_file = obj->filp;
- vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- }
- return 0;
- }
- int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
- {
- int ret;
- ret = drm_gem_mmap(filp, vma);
- if (ret) {
- DBG("mmap failed: %d", ret);
- return ret;
- }
- return msm_gem_mmap_obj(vma->vm_private_data, vma);
- }
- vm_fault_t msm_gem_fault(struct vm_fault *vmf)
- {
- struct vm_area_struct *vma = vmf->vma;
- struct drm_gem_object *obj = vma->vm_private_data;
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct page **pages;
- unsigned long pfn;
- pgoff_t pgoff;
- int err;
- vm_fault_t ret;
- /*
- * vm_ops.open/drm_gem_mmap_obj and close get and put
- * a reference on obj. So, we dont need to hold one here.
- */
- err = mutex_lock_interruptible(&msm_obj->lock);
- if (err) {
- ret = VM_FAULT_NOPAGE;
- goto out;
- }
- if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
- mutex_unlock(&msm_obj->lock);
- return VM_FAULT_SIGBUS;
- }
- /* make sure we have pages attached now */
- pages = get_pages(obj);
- if (IS_ERR(pages)) {
- ret = vmf_error(PTR_ERR(pages));
- goto out_unlock;
- }
- /* We don't use vmf->pgoff since that has the fake offset: */
- pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
- pfn = page_to_pfn(pages[pgoff]);
- VERB("Inserting %pK pfn %lx, pa %lx", (void *)vmf->address,
- pfn, pfn << PAGE_SHIFT);
- ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
- out_unlock:
- mutex_unlock(&msm_obj->lock);
- out:
- return ret;
- }
- /** get mmap offset */
- static uint64_t mmap_offset(struct drm_gem_object *obj)
- {
- struct drm_device *dev = obj->dev;
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int ret;
- WARN_ON(!mutex_is_locked(&msm_obj->lock));
- /* Make it mmapable */
- ret = drm_gem_create_mmap_offset(obj);
- if (ret) {
- dev_err(dev->dev, "could not allocate mmap offset\n");
- return 0;
- }
- return drm_vma_node_offset_addr(&obj->vma_node);
- }
- uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
- {
- uint64_t offset;
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- mutex_lock(&msm_obj->lock);
- offset = mmap_offset(obj);
- mutex_unlock(&msm_obj->lock);
- return offset;
- }
- dma_addr_t msm_gem_get_dma_addr(struct drm_gem_object *obj)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct sg_table *sgt;
- if (!msm_obj->sgt) {
- sgt = dma_buf_map_attachment(obj->import_attach,
- DMA_BIDIRECTIONAL);
- if (IS_ERR_OR_NULL(sgt)) {
- DRM_ERROR("dma_buf_map_attachment failure, err=%ld\n",
- PTR_ERR(sgt));
- return 0;
- }
- msm_obj->sgt = sgt;
- }
- return sg_phys(msm_obj->sgt->sgl);
- }
- static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
- WARN_ON(!mutex_is_locked(&msm_obj->lock));
- vma = kzalloc(sizeof(*vma), GFP_KERNEL);
- if (!vma)
- return ERR_PTR(-ENOMEM);
- vma->aspace = aspace;
- msm_obj->aspace = aspace;
- list_add_tail(&vma->list, &msm_obj->vmas);
- return vma;
- }
- static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
- WARN_ON(!mutex_is_locked(&msm_obj->lock));
- list_for_each_entry(vma, &msm_obj->vmas, list) {
- if (vma->aspace == aspace)
- return vma;
- }
- return NULL;
- }
- static void del_vma(struct msm_gem_vma *vma)
- {
- if (!vma)
- return;
- list_del(&vma->list);
- kfree(vma);
- }
- /* Called with msm_obj->lock locked */
- static void
- put_iova(struct drm_gem_object *obj)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma, *tmp;
- WARN_ON(!mutex_is_locked(&msm_obj->lock));
- list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
- msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt,
- msm_obj->flags);
- /*
- * put_iova removes the domain connected to the obj which makes
- * the aspace inaccessible. Store the aspace, as it is used to
- * update the active_list during gem_free_obj and gem_purge.
- */
- msm_obj->aspace = vma->aspace;
- del_vma(vma);
- }
- }
- /* get iova, taking a reference. Should have a matching put */
- static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
- int ret = 0;
- WARN_ON(!mutex_is_locked(&msm_obj->lock));
- vma = lookup_vma(obj, aspace);
- if (!vma) {
- struct page **pages;
- struct device *dev;
- struct dma_buf *dmabuf;
- bool reattach = false;
- dev = msm_gem_get_aspace_device(aspace);
- if ((dev && obj->import_attach) &&
- ((dev != obj->import_attach->dev) ||
- msm_obj->obj_dirty)) {
- dmabuf = obj->import_attach->dmabuf;
- DRM_DEBUG("detach nsec-dev:%pK attach sec-dev:%pK\n",
- obj->import_attach->dev, dev);
- SDE_EVT32(obj->import_attach->dev, dev, msm_obj->sgt);
- if (msm_obj->sgt)
- dma_buf_unmap_attachment(obj->import_attach,
- msm_obj->sgt, DMA_BIDIRECTIONAL);
- dma_buf_detach(dmabuf, obj->import_attach);
- obj->import_attach = dma_buf_attach(dmabuf, dev);
- if (IS_ERR(obj->import_attach)) {
- DRM_ERROR("dma_buf_attach failure, err=%ld\n",
- PTR_ERR(obj->import_attach));
- goto unlock;
- }
- msm_obj->obj_dirty = false;
- reattach = true;
- }
- /* perform delayed import for buffers without existing sgt */
- if (((msm_obj->flags & MSM_BO_EXTBUF) && !(msm_obj->sgt))
- || reattach) {
- ret = msm_gem_delayed_import(obj);
- if (ret) {
- DRM_ERROR("delayed dma-buf import failed %d\n",
- ret);
- goto unlock;
- }
- }
- vma = add_vma(obj, aspace);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto unlock;
- }
- pages = get_pages(obj);
- if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
- goto fail;
- }
- ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
- obj->size >> PAGE_SHIFT,
- msm_obj->flags);
- if (ret)
- goto fail;
- }
- *iova = vma->iova;
- if (aspace && !msm_obj->in_active_list) {
- mutex_lock(&aspace->list_lock);
- msm_gem_add_obj_to_aspace_active_list(aspace, obj);
- mutex_unlock(&aspace->list_lock);
- }
- mutex_unlock(&msm_obj->lock);
- return 0;
- fail:
- del_vma(vma);
- unlock:
- mutex_unlock(&msm_obj->lock);
- return ret;
- }
- static int msm_gem_pin_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
- struct page **pages;
- WARN_ON(!mutex_is_locked(&msm_obj->lock));
- if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
- return -EBUSY;
- vma = lookup_vma(obj, aspace);
- if (WARN_ON(!vma))
- return -EINVAL;
- pages = get_pages(obj);
- if (IS_ERR(pages))
- return PTR_ERR(pages);
- return msm_gem_map_vma(aspace, vma, msm_obj->sgt,
- obj->size >> PAGE_SHIFT, msm_obj->flags);
- }
- /* get iova and pin it. Should have a matching put */
- int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- u64 local;
- int ret;
- mutex_lock(&msm_obj->lock);
- ret = msm_gem_get_iova_locked(obj, aspace, &local);
- if (!ret)
- ret = msm_gem_pin_iova(obj, aspace);
- if (!ret)
- *iova = local;
- mutex_unlock(&msm_obj->lock);
- return ret;
- }
- int msm_gem_get_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int ret;
- mutex_lock(&msm_obj->lock);
- ret = msm_gem_get_iova_locked(obj, aspace, iova);
- mutex_unlock(&msm_obj->lock);
- return ret;
- }
- /* get iova without taking a reference, used in places where you have
- * already done a 'msm_gem_get_iova()'.
- */
- uint64_t msm_gem_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
- mutex_lock(&msm_obj->lock);
- vma = lookup_vma(obj, aspace);
- mutex_unlock(&msm_obj->lock);
- WARN_ON(!vma);
- return vma ? vma->iova : 0;
- }
- /*
- * Unpin a iova by updating the reference counts. The memory isn't actually
- * purged until something else (shrinker, mm_notifier, destroy, etc) decides
- * to get rid of it
- */
- void msm_gem_unpin_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_gem_vma *vma;
- mutex_lock(&msm_obj->lock);
- vma = lookup_vma(obj, aspace);
- if (!WARN_ON(!vma))
- msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt,
- msm_obj->flags);
- mutex_unlock(&msm_obj->lock);
- }
- void msm_gem_put_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace)
- {
- // XXX TODO ..
- // NOTE: probably don't need a _locked() version.. we wouldn't
- // normally unmap here, but instead just mark that it could be
- // unmapped (if the iova refcnt drops to zero), but then later
- // if another _get_iova_locked() fails we can start unmapping
- // things that are no longer needed..
- }
- void msm_gem_aspace_domain_attach_detach_update(
- struct msm_gem_address_space *aspace,
- bool is_detach)
- {
- struct msm_gem_object *msm_obj;
- struct drm_gem_object *obj;
- struct aspace_client *aclient;
- int ret;
- uint64_t iova;
- if (!aspace)
- return;
- mutex_lock(&aspace->list_lock);
- if (is_detach) {
- /* Indicate to clients domain is getting detached */
- list_for_each_entry(aclient, &aspace->clients, list) {
- if (aclient->cb)
- aclient->cb(aclient->cb_data,
- is_detach);
- }
- /**
- * Unmap active buffers,
- * typically clients should do this when the callback is called,
- * but this needs to be done for the buffers which are not
- * attached to any planes.
- */
- list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
- obj = &msm_obj->base;
- if (obj->import_attach) {
- mutex_lock(&msm_obj->lock);
- put_iova(obj);
- msm_obj->obj_dirty = true;
- mutex_unlock(&msm_obj->lock);
- }
- }
- } else {
- /* map active buffers */
- list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
- obj = &msm_obj->base;
- ret = msm_gem_get_iova(obj, aspace, &iova);
- if (ret) {
- mutex_unlock(&aspace->list_lock);
- return;
- }
- }
- /* Indicate to clients domain is attached */
- list_for_each_entry(aclient, &aspace->clients, list) {
- if (aclient->cb)
- aclient->cb(aclient->cb_data,
- is_detach);
- }
- }
- mutex_unlock(&aspace->list_lock);
- }
- int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
- struct drm_mode_create_dumb *args)
- {
- args->pitch = align_pitch(args->width, args->bpp);
- args->size = PAGE_ALIGN(args->pitch * args->height);
- return msm_gem_new_handle(dev, file, args->size,
- MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
- }
- int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
- uint32_t handle, uint64_t *offset)
- {
- struct drm_gem_object *obj;
- int ret = 0;
- /* GEM does all our handle to object mapping */
- obj = drm_gem_object_lookup(file, handle);
- if (obj == NULL) {
- ret = -ENOENT;
- goto fail;
- }
- *offset = msm_gem_mmap_offset(obj);
- drm_gem_object_put_unlocked(obj);
- fail:
- return ret;
- }
- static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int ret = 0;
- mutex_lock(&msm_obj->lock);
- if (WARN_ON(msm_obj->madv > madv)) {
- dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
- msm_obj->madv, madv);
- mutex_unlock(&msm_obj->lock);
- return ERR_PTR(-EBUSY);
- }
- /* increment vmap_count *before* vmap() call, so shrinker can
- * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
- * This guarantees that we won't try to msm_gem_vunmap() this
- * same object from within the vmap() call (while we already
- * hold msm_obj->lock)
- */
- msm_obj->vmap_count++;
- if (!msm_obj->vaddr) {
- struct page **pages = get_pages(obj);
- if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
- goto fail;
- }
- if (obj->import_attach) {
- ret = dma_buf_begin_cpu_access(
- obj->import_attach->dmabuf, DMA_BIDIRECTIONAL);
- if (ret)
- goto fail;
- msm_obj->vaddr =
- dma_buf_vmap(obj->import_attach->dmabuf);
- } else {
- msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
- VM_MAP, pgprot_writecombine(PAGE_KERNEL));
- }
- if (msm_obj->vaddr == NULL) {
- ret = -ENOMEM;
- goto fail;
- }
- }
- mutex_unlock(&msm_obj->lock);
- return msm_obj->vaddr;
- fail:
- msm_obj->vmap_count--;
- mutex_unlock(&msm_obj->lock);
- return ERR_PTR(ret);
- }
- void *msm_gem_get_vaddr(struct drm_gem_object *obj)
- {
- return get_vaddr(obj, MSM_MADV_WILLNEED);
- }
- /*
- * Don't use this! It is for the very special case of dumping
- * submits from GPU hangs or faults, were the bo may already
- * be MSM_MADV_DONTNEED, but we know the buffer is still on the
- * active list.
- */
- void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
- {
- return get_vaddr(obj, __MSM_MADV_PURGED);
- }
- void msm_gem_put_vaddr(struct drm_gem_object *obj)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- mutex_lock(&msm_obj->lock);
- WARN_ON(msm_obj->vmap_count < 1);
- msm_obj->vmap_count--;
- mutex_unlock(&msm_obj->lock);
- }
- /* Update madvise status, returns true if not purged, else
- * false or -errno.
- */
- int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- mutex_lock(&msm_obj->lock);
- WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
- if (msm_obj->madv != __MSM_MADV_PURGED)
- msm_obj->madv = madv;
- madv = msm_obj->madv;
- mutex_unlock(&msm_obj->lock);
- return (madv != __MSM_MADV_PURGED);
- }
- void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
- {
- struct drm_device *dev = obj->dev;
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- WARN_ON(!is_purgeable(msm_obj));
- WARN_ON(obj->import_attach);
- mutex_lock_nested(&msm_obj->lock, subclass);
- put_iova(obj);
- if (msm_obj->aspace) {
- mutex_lock(&msm_obj->aspace->list_lock);
- msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace,
- obj);
- mutex_unlock(&msm_obj->aspace->list_lock);
- }
- msm_gem_vunmap_locked(obj);
- put_pages(obj);
- msm_obj->madv = __MSM_MADV_PURGED;
- drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
- drm_gem_free_mmap_offset(obj);
- /* Our goal here is to return as much of the memory as
- * is possible back to the system as we are called from OOM.
- * To do this we must instruct the shmfs to drop all of its
- * backing pages, *now*.
- */
- shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
- invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
- 0, (loff_t)-1);
- mutex_unlock(&msm_obj->lock);
- }
- static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- WARN_ON(!mutex_is_locked(&msm_obj->lock));
- if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
- return;
- if (obj->import_attach) {
- dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
- dma_buf_end_cpu_access(obj->import_attach->dmabuf,
- DMA_BIDIRECTIONAL);
- } else {
- vunmap(msm_obj->vaddr);
- }
- msm_obj->vaddr = NULL;
- }
- void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- mutex_lock_nested(&msm_obj->lock, subclass);
- msm_gem_vunmap_locked(obj);
- mutex_unlock(&msm_obj->lock);
- }
- int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- bool write = !!(op & MSM_PREP_WRITE);
- unsigned long remain =
- op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
- long ret;
- ret = dma_resv_wait_timeout_rcu(msm_obj->resv, write,
- true, remain);
- if (ret == 0)
- return remain == 0 ? -EBUSY : -ETIMEDOUT;
- else if (ret < 0)
- return ret;
- /* TODO cache maintenance */
- return 0;
- }
- int msm_gem_cpu_fini(struct drm_gem_object *obj)
- {
- /* TODO cache maintenance */
- return 0;
- }
- #ifdef CONFIG_DEBUG_FS
- static void describe_fence(struct dma_fence *fence, const char *type,
- struct seq_file *m)
- {
- if (!dma_fence_is_signaled(fence))
- seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
- fence->ops->get_driver_name(fence),
- fence->ops->get_timeline_name(fence),
- fence->seqno);
- }
- void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct dma_resv *robj = msm_obj->resv;
- struct dma_resv_list *fobj;
- struct dma_fence *fence;
- struct msm_gem_vma *vma;
- uint64_t off = drm_vma_node_start(&obj->vma_node);
- const char *madv;
- mutex_lock(&msm_obj->lock);
- switch (msm_obj->madv) {
- case __MSM_MADV_PURGED:
- madv = " purged";
- break;
- case MSM_MADV_DONTNEED:
- madv = " purgeable";
- break;
- case MSM_MADV_WILLNEED:
- default:
- madv = "";
- break;
- }
- seq_printf(m, "%08x: %c %2d (%2d) %08llx %pK\t",
- msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
- obj->name, kref_read(&obj->refcount),
- off, msm_obj->vaddr);
- seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
- if (!list_empty(&msm_obj->vmas)) {
- seq_puts(m, " vmas:");
- list_for_each_entry(vma, &msm_obj->vmas, list)
- seq_printf(m, " [%s: %08llx,%s,inuse=%d]", vma->aspace->name,
- vma->iova, vma->mapped ? "mapped" : "unmapped",
- vma->inuse);
- seq_puts(m, "\n");
- }
- rcu_read_lock();
- fobj = rcu_dereference(robj->fence);
- if (fobj) {
- unsigned int i, shared_count = fobj->shared_count;
- for (i = 0; i < shared_count; i++) {
- fence = rcu_dereference(fobj->shared[i]);
- describe_fence(fence, "Shared", m);
- }
- }
- fence = rcu_dereference(robj->fence_excl);
- if (fence)
- describe_fence(fence, "Exclusive", m);
- rcu_read_unlock();
- mutex_unlock(&msm_obj->lock);
- }
- void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
- {
- struct msm_gem_object *msm_obj;
- int count = 0;
- size_t size = 0;
- seq_puts(m, " flags id ref offset kaddr size madv name\n");
- list_for_each_entry(msm_obj, list, mm_list) {
- struct drm_gem_object *obj = &msm_obj->base;
- seq_puts(m, " ");
- msm_gem_describe(obj, m);
- count++;
- size += obj->size;
- }
- seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
- }
- #endif
- /* don't call directly! Use drm_gem_object_put() and friends */
- void msm_gem_free_object(struct drm_gem_object *obj)
- {
- struct drm_device *dev = obj->dev;
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- /* object should not be on active list: */
- WARN_ON(is_active(msm_obj));
- list_del(&msm_obj->mm_list);
- mutex_lock(&msm_obj->lock);
- put_iova(obj);
- if (msm_obj->aspace) {
- mutex_lock(&msm_obj->aspace->list_lock);
- msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace,
- obj);
- mutex_unlock(&msm_obj->aspace->list_lock);
- }
- if (obj->import_attach) {
- if (msm_obj->vaddr)
- dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
- /* Don't drop the pages for imported dmabuf, as they are not
- * ours, just free the array we allocated:
- */
- if (msm_obj->pages)
- kvfree(msm_obj->pages);
- drm_prime_gem_destroy(obj, msm_obj->sgt);
- } else {
- msm_gem_vunmap_locked(obj);
- put_pages(obj);
- }
- if (msm_obj->resv == &msm_obj->_resv)
- dma_resv_fini(msm_obj->resv);
- drm_gem_object_release(obj);
- mutex_unlock(&msm_obj->lock);
- kfree(msm_obj);
- }
- /* convenience method to construct a GEM buffer object, and userspace handle */
- int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- uint32_t size, uint32_t flags, uint32_t *handle,
- char *name)
- {
- struct drm_gem_object *obj;
- int ret;
- obj = msm_gem_new(dev, size, flags);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
- if (name)
- msm_gem_object_set_name(obj, "%s", name);
- ret = drm_gem_handle_create(file, obj, handle);
- /* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(obj);
- return ret;
- }
- static int msm_gem_new_impl(struct drm_device *dev,
- uint32_t size, uint32_t flags,
- struct dma_resv *resv,
- struct drm_gem_object **obj,
- bool struct_mutex_locked)
- {
- struct msm_drm_private *priv = dev->dev_private;
- struct msm_gem_object *msm_obj;
- switch (flags & MSM_BO_CACHE_MASK) {
- case MSM_BO_UNCACHED:
- case MSM_BO_CACHED:
- case MSM_BO_WC:
- break;
- default:
- dev_err(dev->dev, "invalid cache flag: %x\n",
- (flags & MSM_BO_CACHE_MASK));
- return -EINVAL;
- }
- msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
- if (!msm_obj)
- return -ENOMEM;
- mutex_init(&msm_obj->lock);
- msm_obj->flags = flags;
- msm_obj->madv = MSM_MADV_WILLNEED;
- if (resv) {
- msm_obj->resv = resv;
- } else {
- msm_obj->resv = &msm_obj->_resv;
- dma_resv_init(msm_obj->resv);
- }
- INIT_LIST_HEAD(&msm_obj->submit_entry);
- INIT_LIST_HEAD(&msm_obj->vmas);
- INIT_LIST_HEAD(&msm_obj->iova_list);
- msm_obj->aspace = NULL;
- msm_obj->in_active_list = false;
- msm_obj->obj_dirty = false;
- if (struct_mutex_locked) {
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
- } else {
- mutex_lock(&dev->struct_mutex);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
- mutex_unlock(&dev->struct_mutex);
- }
- *obj = &msm_obj->base;
- return 0;
- }
- static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
- uint32_t size, uint32_t flags, bool struct_mutex_locked)
- {
- struct msm_drm_private *priv = dev->dev_private;
- struct drm_gem_object *obj = NULL;
- bool use_vram = false;
- int ret;
- size = PAGE_ALIGN(size);
- if (!iommu_present(&platform_bus_type))
- use_vram = true;
- else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
- use_vram = true;
- if (WARN_ON(use_vram && !priv->vram.size))
- return ERR_PTR(-EINVAL);
- /* Disallow zero sized objects as they make the underlying
- * infrastructure grumpy
- */
- if (size == 0)
- return ERR_PTR(-EINVAL);
- ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
- if (ret)
- goto fail;
- if (use_vram) {
- struct msm_gem_vma *vma;
- struct page **pages;
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- mutex_lock(&msm_obj->lock);
- vma = add_vma(obj, NULL);
- mutex_unlock(&msm_obj->lock);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto fail;
- }
- to_msm_bo(obj)->vram_node = &vma->node;
- drm_gem_private_object_init(dev, obj, size);
- pages = get_pages(obj);
- if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
- goto fail;
- }
- vma->iova = physaddr(obj);
- } else {
- ret = drm_gem_object_init(dev, obj, size);
- if (ret)
- goto fail;
- }
- return obj;
- fail:
- drm_gem_object_put_unlocked(obj);
- return ERR_PTR(ret);
- }
- struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
- uint32_t size, uint32_t flags)
- {
- return _msm_gem_new(dev, size, flags, true);
- }
- struct drm_gem_object *msm_gem_new(struct drm_device *dev,
- uint32_t size, uint32_t flags)
- {
- return _msm_gem_new(dev, size, flags, false);
- }
- int msm_gem_delayed_import(struct drm_gem_object *obj)
- {
- struct dma_buf_attachment *attach;
- struct sg_table *sgt;
- struct msm_gem_object *msm_obj;
- int ret = 0;
- if (!obj) {
- DRM_ERROR("NULL drm gem object\n");
- return -EINVAL;
- }
- msm_obj = to_msm_bo(obj);
- if (!obj->import_attach) {
- DRM_ERROR("NULL dma_buf_attachment in drm gem object\n");
- return -EINVAL;
- }
- attach = obj->import_attach;
- attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
- if (msm_obj->flags & MSM_BO_SKIPSYNC)
- attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
- /*
- * All SMMU mapping are generated with cache hint.
- * SSPP cache hint will control the LLCC access.
- */
- if (msm_obj->flags & MSM_BO_KEEPATTRS)
- attach->dma_map_attrs |=
- (DMA_ATTR_IOMMU_USE_UPSTREAM_HINT |
- DMA_ATTR_IOMMU_USE_LLC_NWA);
- /*
- * dma_buf_map_attachment will call dma_map_sg for ion buffer
- * mapping, and iova will get mapped when the function returns.
- */
- sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sgt)) {
- ret = PTR_ERR(sgt);
- DRM_ERROR("dma_buf_map_attachment failure, err=%d\n",
- ret);
- goto fail_import;
- }
- msm_obj->sgt = sgt;
- msm_obj->pages = NULL;
- fail_import:
- return ret;
- }
- struct drm_gem_object *msm_gem_import(struct drm_device *dev,
- struct dma_buf *dmabuf, struct sg_table *sgt)
- {
- struct msm_gem_object *msm_obj;
- struct drm_gem_object *obj = NULL;
- uint32_t size;
- int ret;
- unsigned long flags = 0;
- size = PAGE_ALIGN(dmabuf->size);
- ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj,
- false);
- if (ret)
- goto fail;
- drm_gem_private_object_init(dev, obj, size);
- msm_obj = to_msm_bo(obj);
- mutex_lock(&msm_obj->lock);
- msm_obj->sgt = sgt;
- msm_obj->pages = NULL;
- /*
- * 1) If sg table is NULL, user should call msm_gem_delayed_import
- * to add back the sg table to the drm gem object.
- *
- * 2) Add buffer flag unconditionally for all import cases.
- * # Cached buffer will be attached immediately hence sgt will
- * be available upon gem obj creation.
- * # Un-cached buffer will follow delayed attach hence sgt
- * will be NULL upon gem obj creation.
- */
- msm_obj->flags |= MSM_BO_EXTBUF;
- /*
- * For all uncached buffers, there is no need to perform cache
- * maintenance on dma map/unmap time.
- */
- ret = dma_buf_get_flags(dmabuf, &flags);
- if (ret) {
- DRM_ERROR("dma_buf_get_flags failure, err=%d\n", ret);
- } else if ((flags & ION_FLAG_CACHED) == 0) {
- DRM_DEBUG("Buffer is uncached type\n");
- msm_obj->flags |= MSM_BO_SKIPSYNC;
- }
- mutex_unlock(&msm_obj->lock);
- return obj;
- fail:
- drm_gem_object_put_unlocked(obj);
- return ERR_PTR(ret);
- }
- static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
- uint32_t flags, struct msm_gem_address_space *aspace,
- struct drm_gem_object **bo, uint64_t *iova, bool locked)
- {
- void *vaddr;
- struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
- int ret;
- if (IS_ERR(obj))
- return ERR_CAST(obj);
- if (iova) {
- ret = msm_gem_get_iova(obj, aspace, iova);
- if (ret)
- goto err;
- }
- vaddr = msm_gem_get_vaddr(obj);
- if (IS_ERR(vaddr)) {
- msm_gem_put_iova(obj, aspace);
- ret = PTR_ERR(vaddr);
- goto err;
- }
- if (bo)
- *bo = obj;
- return vaddr;
- err:
- if (locked)
- drm_gem_object_put(obj);
- else
- drm_gem_object_put_unlocked(obj);
- return ERR_PTR(ret);
- }
- void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
- uint32_t flags, struct msm_gem_address_space *aspace,
- struct drm_gem_object **bo, uint64_t *iova)
- {
- return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
- }
- void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
- uint32_t flags, struct msm_gem_address_space *aspace,
- struct drm_gem_object **bo, uint64_t *iova)
- {
- return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
- }
- void msm_gem_kernel_put(struct drm_gem_object *bo,
- struct msm_gem_address_space *aspace, bool locked)
- {
- if (IS_ERR_OR_NULL(bo))
- return;
- msm_gem_put_vaddr(bo);
- msm_gem_unpin_iova(bo, aspace);
- if (locked)
- drm_gem_object_put(bo);
- else
- drm_gem_object_put_unlocked(bo);
- }
- void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
- {
- struct msm_gem_object *msm_obj = to_msm_bo(bo);
- va_list ap;
- if (!fmt)
- return;
- va_start(ap, fmt);
- vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
- va_end(ap);
- }
|