123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741 |
- // SPDX-License-Identifier: GPL-2.0-only
- /*
- * FF-A v1.0 proxy to filter out invalid memory-sharing SMC calls issued by
- * the host. FF-A is a slightly more palatable abbreviation of "Arm Firmware
- * Framework for Arm A-profile", which is specified by Arm in document
- * number DEN0077.
- *
- * Copyright (C) 2022 - Google LLC
- * Author: Andrew Walbran <[email protected]>
- *
- * This driver hooks into the SMC trapping logic for the host and intercepts
- * all calls falling within the FF-A range. Each call is either:
- *
- * - Forwarded on unmodified to the SPMD at EL3
- * - Rejected as "unsupported"
- * - Accompanied by a host stage-2 page-table check/update and reissued
- *
- * Consequently, any attempts by the host to make guest memory pages
- * accessible to the secure world using FF-A will be detected either here
- * (in the case that the memory is already owned by the guest) or during
- * donation to the guest (in the case that the memory was previously shared
- * with the secure world).
- *
- * To allow the rolling-back of page-table updates and FF-A calls in the
- * event of failure, operations involving the RXTX buffers are locked for
- * the duration and are therefore serialised.
- */
- #include <linux/arm_ffa.h>
- #include <asm/kvm_pkvm.h>
- #include <nvhe/arm-smccc.h>
- #include <nvhe/ffa.h>
- #include <nvhe/mem_protect.h>
- #include <nvhe/memory.h>
- #include <nvhe/trap_handler.h>
- #include <nvhe/spinlock.h>
- /*
- * "ID value 0 must be returned at the Non-secure physical FF-A instance"
- * We share this ID with the host.
- */
- #define HOST_FFA_ID 0
- /*
- * A buffer to hold the maximum descriptor size we can see from the host,
- * which is required when the SPMD returns a fragmented FFA_MEM_RETRIEVE_RESP
- * when resolving the handle on the reclaim path.
- */
- struct kvm_ffa_descriptor_buffer {
- void *buf;
- size_t len;
- };
- static struct kvm_ffa_descriptor_buffer ffa_desc_buf;
- struct kvm_ffa_buffers {
- hyp_spinlock_t lock;
- void *tx;
- void *rx;
- };
- /*
- * Note that we don't currently lock these buffers explicitly, instead
- * relying on the locking of the host FFA buffers as we only have one
- * client.
- */
- static struct kvm_ffa_buffers hyp_buffers;
- static struct kvm_ffa_buffers host_buffers;
- static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno)
- {
- *res = (struct arm_smccc_res) {
- .a0 = FFA_ERROR,
- .a2 = ffa_errno,
- };
- }
- static void ffa_to_smccc_res_prop(struct arm_smccc_res *res, int ret, u64 prop)
- {
- if (ret == FFA_RET_SUCCESS) {
- *res = (struct arm_smccc_res) { .a0 = FFA_SUCCESS,
- .a2 = prop };
- } else {
- ffa_to_smccc_error(res, ret);
- }
- }
- static void ffa_to_smccc_res(struct arm_smccc_res *res, int ret)
- {
- ffa_to_smccc_res_prop(res, ret, 0);
- }
- static void ffa_set_retval(struct kvm_cpu_context *ctxt,
- struct arm_smccc_res *res)
- {
- cpu_reg(ctxt, 0) = res->a0;
- cpu_reg(ctxt, 1) = res->a1;
- cpu_reg(ctxt, 2) = res->a2;
- cpu_reg(ctxt, 3) = res->a3;
- }
- static bool is_ffa_call(u64 func_id)
- {
- return ARM_SMCCC_IS_FAST_CALL(func_id) &&
- ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD &&
- ARM_SMCCC_FUNC_NUM(func_id) >= FFA_MIN_FUNC_NUM &&
- ARM_SMCCC_FUNC_NUM(func_id) <= FFA_MAX_FUNC_NUM;
- }
- static int spmd_map_ffa_buffers(u64 ffa_page_count)
- {
- struct arm_smccc_res res;
- arm_smccc_1_1_smc(FFA_FN64_RXTX_MAP,
- hyp_virt_to_phys(hyp_buffers.tx),
- hyp_virt_to_phys(hyp_buffers.rx),
- ffa_page_count,
- 0, 0, 0, 0,
- &res);
- return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
- }
- static int spmd_unmap_ffa_buffers(void)
- {
- struct arm_smccc_res res;
- arm_smccc_1_1_smc(FFA_RXTX_UNMAP,
- HOST_FFA_ID,
- 0, 0, 0, 0, 0, 0,
- &res);
- return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
- }
- static void spmd_mem_frag_tx(struct arm_smccc_res *res, u32 handle_lo,
- u32 handle_hi, u32 fraglen, u32 endpoint_id)
- {
- arm_smccc_1_1_smc(FFA_MEM_FRAG_TX,
- handle_lo, handle_hi, fraglen, endpoint_id,
- 0, 0, 0,
- res);
- }
- static void spmd_mem_frag_rx(struct arm_smccc_res *res, u32 handle_lo,
- u32 handle_hi, u32 fragoff)
- {
- arm_smccc_1_1_smc(FFA_MEM_FRAG_RX,
- handle_lo, handle_hi, fragoff, HOST_FFA_ID,
- 0, 0, 0,
- res);
- }
- static void spmd_mem_xfer(struct arm_smccc_res *res, u64 func_id, u32 len,
- u32 fraglen)
- {
- arm_smccc_1_1_smc(func_id, len, fraglen,
- 0, 0, 0, 0, 0,
- res);
- }
- static void spmd_mem_reclaim(struct arm_smccc_res *res, u32 handle_lo,
- u32 handle_hi, u32 flags)
- {
- arm_smccc_1_1_smc(FFA_MEM_RECLAIM,
- handle_lo, handle_hi, flags,
- 0, 0, 0, 0,
- res);
- }
- static void spmd_retrieve_req(struct arm_smccc_res *res, u32 len)
- {
- arm_smccc_1_1_smc(FFA_FN64_MEM_RETRIEVE_REQ,
- len, len,
- 0, 0, 0, 0, 0,
- res);
- }
- static void do_ffa_rxtx_map(struct arm_smccc_res *res,
- struct kvm_cpu_context *ctxt)
- {
- DECLARE_REG(phys_addr_t, tx, ctxt, 1);
- DECLARE_REG(phys_addr_t, rx, ctxt, 2);
- DECLARE_REG(u32, npages, ctxt, 3);
- int ret = 0;
- void *rx_virt, *tx_virt;
- if (npages != (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) / FFA_PAGE_SIZE) {
- ret = FFA_RET_INVALID_PARAMETERS;
- goto out;
- }
- if (!PAGE_ALIGNED(tx) || !PAGE_ALIGNED(rx)) {
- ret = FFA_RET_INVALID_PARAMETERS;
- goto out;
- }
- hyp_spin_lock(&host_buffers.lock);
- if (host_buffers.tx) {
- ret = FFA_RET_DENIED;
- goto out_unlock;
- }
- ret = spmd_map_ffa_buffers(npages);
- if (ret)
- goto out_unlock;
- ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(tx));
- if (ret) {
- ret = FFA_RET_INVALID_PARAMETERS;
- goto err_unmap;
- }
- ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(rx));
- if (ret) {
- ret = FFA_RET_INVALID_PARAMETERS;
- goto err_unshare_tx;
- }
- tx_virt = hyp_phys_to_virt(tx);
- ret = hyp_pin_shared_mem(tx_virt, tx_virt + 1);
- if (ret) {
- ret = FFA_RET_INVALID_PARAMETERS;
- goto err_unshare_rx;
- }
- rx_virt = hyp_phys_to_virt(rx);
- ret = hyp_pin_shared_mem(rx_virt, rx_virt + 1);
- if (ret) {
- ret = FFA_RET_INVALID_PARAMETERS;
- goto err_unpin_tx;
- }
- host_buffers.tx = tx_virt;
- host_buffers.rx = rx_virt;
- out_unlock:
- hyp_spin_unlock(&host_buffers.lock);
- out:
- ffa_to_smccc_res(res, ret);
- return;
- err_unpin_tx:
- hyp_unpin_shared_mem(tx_virt, tx_virt + 1);
- err_unshare_rx:
- __pkvm_host_unshare_hyp(hyp_phys_to_pfn(rx));
- err_unshare_tx:
- __pkvm_host_unshare_hyp(hyp_phys_to_pfn(tx));
- err_unmap:
- spmd_unmap_ffa_buffers();
- goto out_unlock;
- }
- static void do_ffa_rxtx_unmap(struct arm_smccc_res *res,
- struct kvm_cpu_context *ctxt)
- {
- DECLARE_REG(u32, id, ctxt, 1);
- int ret = 0;
- if (id != HOST_FFA_ID) {
- ret = FFA_RET_INVALID_PARAMETERS;
- goto out;
- }
- hyp_spin_lock(&host_buffers.lock);
- if (!host_buffers.tx) {
- ret = FFA_RET_INVALID_PARAMETERS;
- goto out_unlock;
- }
- hyp_unpin_shared_mem(host_buffers.tx, host_buffers.tx + 1);
- WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.tx)));
- host_buffers.tx = NULL;
- hyp_unpin_shared_mem(host_buffers.rx, host_buffers.rx + 1);
- WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.rx)));
- host_buffers.rx = NULL;
- spmd_unmap_ffa_buffers();
- out_unlock:
- hyp_spin_unlock(&host_buffers.lock);
- out:
- ffa_to_smccc_res(res, ret);
- }
- static u32 __ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
- u32 nranges)
- {
- u32 i;
- for (i = 0; i < nranges; ++i) {
- struct ffa_mem_region_addr_range *range = &ranges[i];
- u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
- u64 pfn = hyp_phys_to_pfn(range->address);
- if (!PAGE_ALIGNED(sz))
- break;
- if (__pkvm_host_share_ffa(pfn, sz / PAGE_SIZE))
- break;
- }
- return i;
- }
- static u32 __ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
- u32 nranges)
- {
- u32 i;
- for (i = 0; i < nranges; ++i) {
- struct ffa_mem_region_addr_range *range = &ranges[i];
- u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
- u64 pfn = hyp_phys_to_pfn(range->address);
- if (!PAGE_ALIGNED(sz))
- break;
- if (__pkvm_host_unshare_ffa(pfn, sz / PAGE_SIZE))
- break;
- }
- return i;
- }
- static int ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
- u32 nranges)
- {
- u32 nshared = __ffa_host_share_ranges(ranges, nranges);
- int ret = 0;
- if (nshared != nranges) {
- WARN_ON(__ffa_host_unshare_ranges(ranges, nshared) != nshared);
- ret = FFA_RET_DENIED;
- }
- return ret;
- }
- static int ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
- u32 nranges)
- {
- u32 nunshared = __ffa_host_unshare_ranges(ranges, nranges);
- int ret = 0;
- if (nunshared != nranges) {
- WARN_ON(__ffa_host_share_ranges(ranges, nunshared) != nunshared);
- ret = FFA_RET_DENIED;
- }
- return ret;
- }
- static void do_ffa_mem_frag_tx(struct arm_smccc_res *res,
- struct kvm_cpu_context *ctxt)
- {
- DECLARE_REG(u32, handle_lo, ctxt, 1);
- DECLARE_REG(u32, handle_hi, ctxt, 2);
- DECLARE_REG(u32, fraglen, ctxt, 3);
- DECLARE_REG(u32, endpoint_id, ctxt, 4);
- struct ffa_mem_region_addr_range *buf;
- int ret = FFA_RET_INVALID_PARAMETERS;
- u32 nr_ranges;
- if (fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)
- goto out;
- if (fraglen % sizeof(*buf))
- goto out;
- hyp_spin_lock(&host_buffers.lock);
- if (!host_buffers.tx)
- goto out_unlock;
- buf = hyp_buffers.tx;
- memcpy(buf, host_buffers.tx, fraglen);
- nr_ranges = fraglen / sizeof(*buf);
- ret = ffa_host_share_ranges(buf, nr_ranges);
- if (ret) {
- /*
- * We're effectively aborting the transaction, so we need
- * to restore the global state back to what it was prior to
- * transmission of the first fragment.
- */
- spmd_mem_reclaim(res, handle_lo, handle_hi, 0);
- WARN_ON(res->a0 != FFA_SUCCESS);
- goto out_unlock;
- }
- spmd_mem_frag_tx(res, handle_lo, handle_hi, fraglen, endpoint_id);
- if (res->a0 != FFA_SUCCESS && res->a0 != FFA_MEM_FRAG_RX)
- WARN_ON(ffa_host_unshare_ranges(buf, nr_ranges));
- out_unlock:
- hyp_spin_unlock(&host_buffers.lock);
- out:
- if (ret)
- ffa_to_smccc_res(res, ret);
- /*
- * If for any reason this did not succeed, we're in trouble as we have
- * now lost the content of the previous fragments and we can't rollback
- * the host stage-2 changes. The pages previously marked as shared will
- * remain stuck in that state forever, hence preventing the host from
- * sharing/donating them again and may possibly lead to subsequent
- * failures, but this will not compromise confidentiality.
- */
- return;
- }
- static __always_inline void do_ffa_mem_xfer(const u64 func_id,
- struct arm_smccc_res *res,
- struct kvm_cpu_context *ctxt)
- {
- DECLARE_REG(u32, len, ctxt, 1);
- DECLARE_REG(u32, fraglen, ctxt, 2);
- DECLARE_REG(u64, addr_mbz, ctxt, 3);
- DECLARE_REG(u32, npages_mbz, ctxt, 4);
- struct ffa_composite_mem_region *reg;
- struct ffa_mem_region *buf;
- u32 offset, nr_ranges;
- int ret = 0;
- BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE &&
- func_id != FFA_FN64_MEM_LEND);
- if (addr_mbz || npages_mbz || fraglen > len ||
- fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
- ret = FFA_RET_INVALID_PARAMETERS;
- goto out;
- }
- if (fraglen < sizeof(struct ffa_mem_region) +
- sizeof(struct ffa_mem_region_attributes)) {
- ret = FFA_RET_INVALID_PARAMETERS;
- goto out;
- }
- hyp_spin_lock(&host_buffers.lock);
- if (!host_buffers.tx) {
- ret = FFA_RET_INVALID_PARAMETERS;
- goto out_unlock;
- }
- buf = hyp_buffers.tx;
- memcpy(buf, host_buffers.tx, fraglen);
- offset = buf->ep_mem_access[0].composite_off;
- if (!offset || buf->ep_count != 1 || buf->sender_id != HOST_FFA_ID) {
- ret = FFA_RET_INVALID_PARAMETERS;
- goto out_unlock;
- }
- if (fraglen < offset + sizeof(struct ffa_composite_mem_region)) {
- ret = FFA_RET_INVALID_PARAMETERS;
- goto out_unlock;
- }
- reg = (void *)buf + offset;
- nr_ranges = ((void *)buf + fraglen) - (void *)reg->constituents;
- if (nr_ranges % sizeof(reg->constituents[0])) {
- ret = FFA_RET_INVALID_PARAMETERS;
- goto out_unlock;
- }
- nr_ranges /= sizeof(reg->constituents[0]);
- ret = ffa_host_share_ranges(reg->constituents, nr_ranges);
- if (ret)
- goto out_unlock;
- spmd_mem_xfer(res, func_id, len, fraglen);
- if (fraglen != len) {
- if (res->a0 != FFA_MEM_FRAG_RX)
- goto err_unshare;
- if (res->a3 != fraglen)
- goto err_unshare;
- } else if (res->a0 != FFA_SUCCESS) {
- goto err_unshare;
- }
- out_unlock:
- hyp_spin_unlock(&host_buffers.lock);
- out:
- if (ret)
- ffa_to_smccc_res(res, ret);
- return;
- err_unshare:
- WARN_ON(ffa_host_unshare_ranges(reg->constituents, nr_ranges));
- goto out_unlock;
- }
- static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
- struct kvm_cpu_context *ctxt)
- {
- DECLARE_REG(u32, handle_lo, ctxt, 1);
- DECLARE_REG(u32, handle_hi, ctxt, 2);
- DECLARE_REG(u32, flags, ctxt, 3);
- struct ffa_composite_mem_region *reg;
- u32 offset, len, fraglen, fragoff;
- struct ffa_mem_region *buf;
- int ret = 0;
- u64 handle;
- handle = PACK_HANDLE(handle_lo, handle_hi);
- hyp_spin_lock(&host_buffers.lock);
- buf = hyp_buffers.tx;
- *buf = (struct ffa_mem_region) {
- .sender_id = HOST_FFA_ID,
- .handle = handle,
- };
- spmd_retrieve_req(res, sizeof(*buf));
- buf = hyp_buffers.rx;
- if (res->a0 != FFA_MEM_RETRIEVE_RESP)
- goto out_unlock;
- len = res->a1;
- fraglen = res->a2;
- offset = buf->ep_mem_access[0].composite_off;
- /*
- * We can trust the SPMD to get this right, but let's at least
- * check that we end up with something that doesn't look _completely_
- * bogus.
- */
- if (WARN_ON(offset > len ||
- fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
- ret = FFA_RET_ABORTED;
- goto out_unlock;
- }
- if (len > ffa_desc_buf.len) {
- ret = FFA_RET_NO_MEMORY;
- goto out_unlock;
- }
- buf = ffa_desc_buf.buf;
- memcpy(buf, hyp_buffers.rx, fraglen);
- for (fragoff = fraglen; fragoff < len; fragoff += fraglen) {
- spmd_mem_frag_rx(res, handle_lo, handle_hi, fragoff);
- if (res->a0 != FFA_MEM_FRAG_TX) {
- ret = FFA_RET_INVALID_PARAMETERS;
- goto out_unlock;
- }
- fraglen = res->a3;
- memcpy((void *)buf + fragoff, hyp_buffers.rx, fraglen);
- }
- spmd_mem_reclaim(res, handle_lo, handle_hi, flags);
- if (res->a0 != FFA_SUCCESS)
- goto out_unlock;
- reg = (void *)buf + offset;
- /* If the SPMD was happy, then we should be too. */
- WARN_ON(ffa_host_unshare_ranges(reg->constituents,
- reg->addr_range_cnt));
- out_unlock:
- hyp_spin_unlock(&host_buffers.lock);
- if (ret)
- ffa_to_smccc_res(res, ret);
- }
- static bool ffa_call_unsupported(u64 func_id)
- {
- switch (func_id) {
- /* Unsupported memory management calls */
- case FFA_FN64_MEM_RETRIEVE_REQ:
- case FFA_MEM_RETRIEVE_RESP:
- case FFA_MEM_RELINQUISH:
- case FFA_MEM_OP_PAUSE:
- case FFA_MEM_OP_RESUME:
- case FFA_MEM_FRAG_RX:
- case FFA_FN64_MEM_DONATE:
- /* Indirect message passing via RX/TX buffers */
- case FFA_MSG_SEND:
- case FFA_MSG_POLL:
- case FFA_MSG_WAIT:
- /* 32-bit variants of 64-bit calls */
- case FFA_MSG_SEND_DIRECT_REQ:
- case FFA_MSG_SEND_DIRECT_RESP:
- case FFA_RXTX_MAP:
- case FFA_MEM_DONATE:
- case FFA_MEM_RETRIEVE_REQ:
- return true;
- }
- return false;
- }
- static bool do_ffa_features(struct arm_smccc_res *res,
- struct kvm_cpu_context *ctxt)
- {
- DECLARE_REG(u32, id, ctxt, 1);
- u64 prop = 0;
- int ret = 0;
- if (ffa_call_unsupported(id)) {
- ret = FFA_RET_NOT_SUPPORTED;
- goto out_handled;
- }
- switch (id) {
- case FFA_MEM_SHARE:
- case FFA_FN64_MEM_SHARE:
- case FFA_MEM_LEND:
- case FFA_FN64_MEM_LEND:
- ret = FFA_RET_SUCCESS;
- prop = 0; /* No support for dynamic buffers */
- goto out_handled;
- default:
- return false;
- }
- out_handled:
- ffa_to_smccc_res_prop(res, ret, prop);
- return true;
- }
- bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt)
- {
- DECLARE_REG(u64, func_id, host_ctxt, 0);
- struct arm_smccc_res res;
- if (!is_ffa_call(func_id))
- return false;
- switch (func_id) {
- case FFA_FEATURES:
- if (!do_ffa_features(&res, host_ctxt))
- return false;
- goto out_handled;
- /* Memory management */
- case FFA_FN64_RXTX_MAP:
- do_ffa_rxtx_map(&res, host_ctxt);
- goto out_handled;
- case FFA_RXTX_UNMAP:
- do_ffa_rxtx_unmap(&res, host_ctxt);
- goto out_handled;
- case FFA_MEM_SHARE:
- case FFA_FN64_MEM_SHARE:
- do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, host_ctxt);
- goto out_handled;
- case FFA_MEM_RECLAIM:
- do_ffa_mem_reclaim(&res, host_ctxt);
- goto out_handled;
- case FFA_MEM_LEND:
- case FFA_FN64_MEM_LEND:
- do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt);
- goto out_handled;
- case FFA_MEM_FRAG_TX:
- do_ffa_mem_frag_tx(&res, host_ctxt);
- goto out_handled;
- }
- if (!ffa_call_unsupported(func_id))
- return false; /* Pass through */
- ffa_to_smccc_error(&res, FFA_RET_NOT_SUPPORTED);
- out_handled:
- ffa_set_retval(host_ctxt, &res);
- return true;
- }
- int hyp_ffa_init(void *pages)
- {
- struct arm_smccc_res res;
- size_t min_rxtx_sz;
- void *tx, *rx;
- if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_1)
- return 0;
- arm_smccc_1_1_smc(FFA_VERSION, FFA_VERSION_1_0, 0, 0, 0, 0, 0, 0, &res);
- if (res.a0 == FFA_RET_NOT_SUPPORTED)
- return 0;
- if (res.a0 != FFA_VERSION_1_0)
- return -EOPNOTSUPP;
- arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res);
- if (res.a0 != FFA_SUCCESS)
- return -EOPNOTSUPP;
- if (res.a2 != HOST_FFA_ID)
- return -EINVAL;
- arm_smccc_1_1_smc(FFA_FEATURES, FFA_FN64_RXTX_MAP,
- 0, 0, 0, 0, 0, 0, &res);
- if (res.a0 != FFA_SUCCESS)
- return -EOPNOTSUPP;
- switch (res.a2) {
- case FFA_FEAT_RXTX_MIN_SZ_4K:
- min_rxtx_sz = SZ_4K;
- break;
- case FFA_FEAT_RXTX_MIN_SZ_16K:
- min_rxtx_sz = SZ_16K;
- break;
- case FFA_FEAT_RXTX_MIN_SZ_64K:
- min_rxtx_sz = SZ_64K;
- break;
- default:
- return -EINVAL;
- }
- if (min_rxtx_sz > PAGE_SIZE)
- return -EOPNOTSUPP;
- tx = pages;
- pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
- rx = pages;
- pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
- ffa_desc_buf = (struct kvm_ffa_descriptor_buffer) {
- .buf = pages,
- .len = PAGE_SIZE *
- (hyp_ffa_proxy_pages() - (2 * KVM_FFA_MBOX_NR_PAGES)),
- };
- hyp_buffers = (struct kvm_ffa_buffers) {
- .lock = __HYP_SPIN_LOCK_UNLOCKED,
- .tx = tx,
- .rx = rx,
- };
- host_buffers = (struct kvm_ffa_buffers) {
- .lock = __HYP_SPIN_LOCK_UNLOCKED,
- };
- return 0;
- }
|