Merge branch kvm-arm64/ffa-proxy into kvmarm/next

* kvm-arm64/ffa-proxy:
  : pKVM FF-A Proxy, courtesy Will Deacon and Andrew Walbran
  :
  : From the cover letter:
  :
  : pKVM's primary goal is to protect guest pages from a compromised host by
  : enforcing access control restrictions using stage-2 page-tables. Sadly,
  : this cannot prevent TrustZone from accessing non-secure memory, and a
  : compromised host could, for example, perform a 'confused deputy' attack
  : by asking TrustZone to use pages that have been donated to protected
  : guests. This would effectively allow the host to have TrustZone
  : exfiltrate guest secrets on its behalf, hence breaking the isolation
  : that pKVM intends to provide.
  :
  : This series addresses this problem by providing pKVM with the ability to
  : monitor SMCs following the Arm FF-A protocol. FF-A provides (among other
  : things) a set of memory management APIs allowing the Normal World to
  : share, donate or lend pages with Secure. By monitoring these SMCs, pKVM
  : can ensure that the pages that are shared, lent or donated to Secure by
  : the host kernel are only pages that it owns.
  KVM: arm64: pkvm: Add support for fragmented FF-A descriptors
  KVM: arm64: Handle FFA_FEATURES call from the host
  KVM: arm64: Handle FFA_MEM_LEND calls from the host
  KVM: arm64: Handle FFA_MEM_RECLAIM calls from the host
  KVM: arm64: Handle FFA_MEM_SHARE calls from the host
  KVM: arm64: Add FF-A helpers to share/unshare memory with secure world
  KVM: arm64: Handle FFA_RXTX_MAP and FFA_RXTX_UNMAP calls from the host
  KVM: arm64: Allocate pages for hypervisor FF-A mailboxes
  KVM: arm64: Probe FF-A version and host/hyp partition ID during init
  KVM: arm64: Block unsafe FF-A calls from the host

Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
This commit is contained in:
Oliver Upton 2023-06-15 13:02:37 +00:00
commit 1a08f4927a
12 changed files with 897 additions and 1 deletions

View file

@ -420,6 +420,7 @@ struct kvm_host_data {
struct kvm_host_psci_config {
/* PSCI version used by host. */
u32 version;
u32 smccc_version;
/* Function IDs used by host if version is v0.1. */
struct psci_0_1_function_ids function_ids_0_1;

View file

@ -6,7 +6,9 @@
#ifndef __ARM64_KVM_PKVM_H__
#define __ARM64_KVM_PKVM_H__
#include <linux/arm_ffa.h>
#include <linux/memblock.h>
#include <linux/scatterlist.h>
#include <asm/kvm_pgtable.h>
/* Maximum number of VMs that can co-exist under pKVM. */
@ -106,4 +108,23 @@ static inline unsigned long host_s2_pgtable_pages(void)
return res;
}
#define KVM_FFA_MBOX_NR_PAGES 1
static inline unsigned long hyp_ffa_proxy_pages(void)
{
size_t desc_max;
/*
* The hypervisor FFA proxy needs enough memory to buffer a fragmented
* descriptor returned from EL3 in response to a RETRIEVE_REQ call.
*/
desc_max = sizeof(struct ffa_mem_region) +
sizeof(struct ffa_mem_region_attributes) +
sizeof(struct ffa_composite_mem_region) +
SG_MAX_SEGMENTS * sizeof(struct ffa_mem_region_addr_range);
/* Plus a page each for the hypervisor's RX and TX mailboxes. */
return (2 * KVM_FFA_MBOX_NR_PAGES) + DIV_ROUND_UP(desc_max, PAGE_SIZE);
}
#endif /* __ARM64_KVM_PKVM_H__ */

View file

@ -1938,6 +1938,7 @@ static bool __init init_psci_relay(void)
}
kvm_host_psci_config.version = psci_ops.get_version();
kvm_host_psci_config.smccc_version = arm_smccc_get_version();
if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();

View file

@ -0,0 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2022 - Google LLC
* Author: Andrew Walbran <qwandor@google.com>
*/
#ifndef __KVM_HYP_FFA_H
#define __KVM_HYP_FFA_H
#include <asm/kvm_host.h>
#define FFA_MIN_FUNC_NUM 0x60
#define FFA_MAX_FUNC_NUM 0x7F
int hyp_ffa_init(void *pages);
bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt);
#endif /* __KVM_HYP_FFA_H */

View file

@ -57,6 +57,7 @@ extern struct host_mmu host_mmu;
enum pkvm_component_id {
PKVM_ID_HOST,
PKVM_ID_HYP,
PKVM_ID_FFA,
};
extern unsigned long hyp_nr_cpus;
@ -66,6 +67,8 @@ int __pkvm_host_share_hyp(u64 pfn);
int __pkvm_host_unshare_hyp(u64 pfn);
int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages);
int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages);
bool addr_is_memory(phys_addr_t phys);
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);

View file

@ -22,7 +22,7 @@ lib-objs := $(addprefix ../../../lib/, $(lib-objs))
hyp-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
hyp-main.o hyp-smp.o psci-relay.o early_alloc.o page_alloc.o \
cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o stacktrace.o
cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o stacktrace.o ffa.o
hyp-obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
hyp-obj-$(CONFIG_DEBUG_LIST) += list_debug.o

View file

@ -0,0 +1,762 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* FF-A v1.0 proxy to filter out invalid memory-sharing SMC calls issued by
* the host. FF-A is a slightly more palatable abbreviation of "Arm Firmware
* Framework for Arm A-profile", which is specified by Arm in document
* number DEN0077.
*
* Copyright (C) 2022 - Google LLC
* Author: Andrew Walbran <qwandor@google.com>
*
* This driver hooks into the SMC trapping logic for the host and intercepts
* all calls falling within the FF-A range. Each call is either:
*
* - Forwarded on unmodified to the SPMD at EL3
* - Rejected as "unsupported"
* - Accompanied by a host stage-2 page-table check/update and reissued
*
* Consequently, any attempts by the host to make guest memory pages
* accessible to the secure world using FF-A will be detected either here
* (in the case that the memory is already owned by the guest) or during
* donation to the guest (in the case that the memory was previously shared
* with the secure world).
*
* To allow the rolling-back of page-table updates and FF-A calls in the
* event of failure, operations involving the RXTX buffers are locked for
* the duration and are therefore serialised.
*/
#include <linux/arm-smccc.h>
#include <linux/arm_ffa.h>
#include <asm/kvm_pkvm.h>
#include <nvhe/ffa.h>
#include <nvhe/mem_protect.h>
#include <nvhe/memory.h>
#include <nvhe/trap_handler.h>
#include <nvhe/spinlock.h>
/*
* "ID value 0 must be returned at the Non-secure physical FF-A instance"
* We share this ID with the host.
*/
#define HOST_FFA_ID 0
/*
* A buffer to hold the maximum descriptor size we can see from the host,
* which is required when the SPMD returns a fragmented FFA_MEM_RETRIEVE_RESP
* when resolving the handle on the reclaim path.
*/
struct kvm_ffa_descriptor_buffer {
void *buf;
size_t len;
};
static struct kvm_ffa_descriptor_buffer ffa_desc_buf;
struct kvm_ffa_buffers {
hyp_spinlock_t lock;
void *tx;
void *rx;
};
/*
* Note that we don't currently lock these buffers explicitly, instead
* relying on the locking of the host FFA buffers as we only have one
* client.
*/
static struct kvm_ffa_buffers hyp_buffers;
static struct kvm_ffa_buffers host_buffers;
static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno)
{
*res = (struct arm_smccc_res) {
.a0 = FFA_ERROR,
.a2 = ffa_errno,
};
}
static void ffa_to_smccc_res_prop(struct arm_smccc_res *res, int ret, u64 prop)
{
if (ret == FFA_RET_SUCCESS) {
*res = (struct arm_smccc_res) { .a0 = FFA_SUCCESS,
.a2 = prop };
} else {
ffa_to_smccc_error(res, ret);
}
}
static void ffa_to_smccc_res(struct arm_smccc_res *res, int ret)
{
ffa_to_smccc_res_prop(res, ret, 0);
}
static void ffa_set_retval(struct kvm_cpu_context *ctxt,
struct arm_smccc_res *res)
{
cpu_reg(ctxt, 0) = res->a0;
cpu_reg(ctxt, 1) = res->a1;
cpu_reg(ctxt, 2) = res->a2;
cpu_reg(ctxt, 3) = res->a3;
}
static bool is_ffa_call(u64 func_id)
{
return ARM_SMCCC_IS_FAST_CALL(func_id) &&
ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD &&
ARM_SMCCC_FUNC_NUM(func_id) >= FFA_MIN_FUNC_NUM &&
ARM_SMCCC_FUNC_NUM(func_id) <= FFA_MAX_FUNC_NUM;
}
static int ffa_map_hyp_buffers(u64 ffa_page_count)
{
struct arm_smccc_res res;
arm_smccc_1_1_smc(FFA_FN64_RXTX_MAP,
hyp_virt_to_phys(hyp_buffers.tx),
hyp_virt_to_phys(hyp_buffers.rx),
ffa_page_count,
0, 0, 0, 0,
&res);
return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
}
static int ffa_unmap_hyp_buffers(void)
{
struct arm_smccc_res res;
arm_smccc_1_1_smc(FFA_RXTX_UNMAP,
HOST_FFA_ID,
0, 0, 0, 0, 0, 0,
&res);
return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
}
static void ffa_mem_frag_tx(struct arm_smccc_res *res, u32 handle_lo,
u32 handle_hi, u32 fraglen, u32 endpoint_id)
{
arm_smccc_1_1_smc(FFA_MEM_FRAG_TX,
handle_lo, handle_hi, fraglen, endpoint_id,
0, 0, 0,
res);
}
static void ffa_mem_frag_rx(struct arm_smccc_res *res, u32 handle_lo,
u32 handle_hi, u32 fragoff)
{
arm_smccc_1_1_smc(FFA_MEM_FRAG_RX,
handle_lo, handle_hi, fragoff, HOST_FFA_ID,
0, 0, 0,
res);
}
static void ffa_mem_xfer(struct arm_smccc_res *res, u64 func_id, u32 len,
u32 fraglen)
{
arm_smccc_1_1_smc(func_id, len, fraglen,
0, 0, 0, 0, 0,
res);
}
static void ffa_mem_reclaim(struct arm_smccc_res *res, u32 handle_lo,
u32 handle_hi, u32 flags)
{
arm_smccc_1_1_smc(FFA_MEM_RECLAIM,
handle_lo, handle_hi, flags,
0, 0, 0, 0,
res);
}
static void ffa_retrieve_req(struct arm_smccc_res *res, u32 len)
{
arm_smccc_1_1_smc(FFA_FN64_MEM_RETRIEVE_REQ,
len, len,
0, 0, 0, 0, 0,
res);
}
static void do_ffa_rxtx_map(struct arm_smccc_res *res,
struct kvm_cpu_context *ctxt)
{
DECLARE_REG(phys_addr_t, tx, ctxt, 1);
DECLARE_REG(phys_addr_t, rx, ctxt, 2);
DECLARE_REG(u32, npages, ctxt, 3);
int ret = 0;
void *rx_virt, *tx_virt;
if (npages != (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) / FFA_PAGE_SIZE) {
ret = FFA_RET_INVALID_PARAMETERS;
goto out;
}
if (!PAGE_ALIGNED(tx) || !PAGE_ALIGNED(rx)) {
ret = FFA_RET_INVALID_PARAMETERS;
goto out;
}
hyp_spin_lock(&host_buffers.lock);
if (host_buffers.tx) {
ret = FFA_RET_DENIED;
goto out_unlock;
}
/*
* Map our hypervisor buffers into the SPMD before mapping and
* pinning the host buffers in our own address space.
*/
ret = ffa_map_hyp_buffers(npages);
if (ret)
goto out_unlock;
ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(tx));
if (ret) {
ret = FFA_RET_INVALID_PARAMETERS;
goto err_unmap;
}
ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(rx));
if (ret) {
ret = FFA_RET_INVALID_PARAMETERS;
goto err_unshare_tx;
}
tx_virt = hyp_phys_to_virt(tx);
ret = hyp_pin_shared_mem(tx_virt, tx_virt + 1);
if (ret) {
ret = FFA_RET_INVALID_PARAMETERS;
goto err_unshare_rx;
}
rx_virt = hyp_phys_to_virt(rx);
ret = hyp_pin_shared_mem(rx_virt, rx_virt + 1);
if (ret) {
ret = FFA_RET_INVALID_PARAMETERS;
goto err_unpin_tx;
}
host_buffers.tx = tx_virt;
host_buffers.rx = rx_virt;
out_unlock:
hyp_spin_unlock(&host_buffers.lock);
out:
ffa_to_smccc_res(res, ret);
return;
err_unpin_tx:
hyp_unpin_shared_mem(tx_virt, tx_virt + 1);
err_unshare_rx:
__pkvm_host_unshare_hyp(hyp_phys_to_pfn(rx));
err_unshare_tx:
__pkvm_host_unshare_hyp(hyp_phys_to_pfn(tx));
err_unmap:
ffa_unmap_hyp_buffers();
goto out_unlock;
}
static void do_ffa_rxtx_unmap(struct arm_smccc_res *res,
struct kvm_cpu_context *ctxt)
{
DECLARE_REG(u32, id, ctxt, 1);
int ret = 0;
if (id != HOST_FFA_ID) {
ret = FFA_RET_INVALID_PARAMETERS;
goto out;
}
hyp_spin_lock(&host_buffers.lock);
if (!host_buffers.tx) {
ret = FFA_RET_INVALID_PARAMETERS;
goto out_unlock;
}
hyp_unpin_shared_mem(host_buffers.tx, host_buffers.tx + 1);
WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.tx)));
host_buffers.tx = NULL;
hyp_unpin_shared_mem(host_buffers.rx, host_buffers.rx + 1);
WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.rx)));
host_buffers.rx = NULL;
ffa_unmap_hyp_buffers();
out_unlock:
hyp_spin_unlock(&host_buffers.lock);
out:
ffa_to_smccc_res(res, ret);
}
static u32 __ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
u32 nranges)
{
u32 i;
for (i = 0; i < nranges; ++i) {
struct ffa_mem_region_addr_range *range = &ranges[i];
u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
u64 pfn = hyp_phys_to_pfn(range->address);
if (!PAGE_ALIGNED(sz))
break;
if (__pkvm_host_share_ffa(pfn, sz / PAGE_SIZE))
break;
}
return i;
}
static u32 __ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
u32 nranges)
{
u32 i;
for (i = 0; i < nranges; ++i) {
struct ffa_mem_region_addr_range *range = &ranges[i];
u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
u64 pfn = hyp_phys_to_pfn(range->address);
if (!PAGE_ALIGNED(sz))
break;
if (__pkvm_host_unshare_ffa(pfn, sz / PAGE_SIZE))
break;
}
return i;
}
static int ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
u32 nranges)
{
u32 nshared = __ffa_host_share_ranges(ranges, nranges);
int ret = 0;
if (nshared != nranges) {
WARN_ON(__ffa_host_unshare_ranges(ranges, nshared) != nshared);
ret = FFA_RET_DENIED;
}
return ret;
}
static int ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
u32 nranges)
{
u32 nunshared = __ffa_host_unshare_ranges(ranges, nranges);
int ret = 0;
if (nunshared != nranges) {
WARN_ON(__ffa_host_share_ranges(ranges, nunshared) != nunshared);
ret = FFA_RET_DENIED;
}
return ret;
}
static void do_ffa_mem_frag_tx(struct arm_smccc_res *res,
struct kvm_cpu_context *ctxt)
{
DECLARE_REG(u32, handle_lo, ctxt, 1);
DECLARE_REG(u32, handle_hi, ctxt, 2);
DECLARE_REG(u32, fraglen, ctxt, 3);
DECLARE_REG(u32, endpoint_id, ctxt, 4);
struct ffa_mem_region_addr_range *buf;
int ret = FFA_RET_INVALID_PARAMETERS;
u32 nr_ranges;
if (fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)
goto out;
if (fraglen % sizeof(*buf))
goto out;
hyp_spin_lock(&host_buffers.lock);
if (!host_buffers.tx)
goto out_unlock;
buf = hyp_buffers.tx;
memcpy(buf, host_buffers.tx, fraglen);
nr_ranges = fraglen / sizeof(*buf);
ret = ffa_host_share_ranges(buf, nr_ranges);
if (ret) {
/*
* We're effectively aborting the transaction, so we need
* to restore the global state back to what it was prior to
* transmission of the first fragment.
*/
ffa_mem_reclaim(res, handle_lo, handle_hi, 0);
WARN_ON(res->a0 != FFA_SUCCESS);
goto out_unlock;
}
ffa_mem_frag_tx(res, handle_lo, handle_hi, fraglen, endpoint_id);
if (res->a0 != FFA_SUCCESS && res->a0 != FFA_MEM_FRAG_RX)
WARN_ON(ffa_host_unshare_ranges(buf, nr_ranges));
out_unlock:
hyp_spin_unlock(&host_buffers.lock);
out:
if (ret)
ffa_to_smccc_res(res, ret);
/*
* If for any reason this did not succeed, we're in trouble as we have
* now lost the content of the previous fragments and we can't rollback
* the host stage-2 changes. The pages previously marked as shared will
* remain stuck in that state forever, hence preventing the host from
* sharing/donating them again and may possibly lead to subsequent
* failures, but this will not compromise confidentiality.
*/
return;
}
static __always_inline void do_ffa_mem_xfer(const u64 func_id,
struct arm_smccc_res *res,
struct kvm_cpu_context *ctxt)
{
DECLARE_REG(u32, len, ctxt, 1);
DECLARE_REG(u32, fraglen, ctxt, 2);
DECLARE_REG(u64, addr_mbz, ctxt, 3);
DECLARE_REG(u32, npages_mbz, ctxt, 4);
struct ffa_composite_mem_region *reg;
struct ffa_mem_region *buf;
u32 offset, nr_ranges;
int ret = 0;
BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE &&
func_id != FFA_FN64_MEM_LEND);
if (addr_mbz || npages_mbz || fraglen > len ||
fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
ret = FFA_RET_INVALID_PARAMETERS;
goto out;
}
if (fraglen < sizeof(struct ffa_mem_region) +
sizeof(struct ffa_mem_region_attributes)) {
ret = FFA_RET_INVALID_PARAMETERS;
goto out;
}
hyp_spin_lock(&host_buffers.lock);
if (!host_buffers.tx) {
ret = FFA_RET_INVALID_PARAMETERS;
goto out_unlock;
}
buf = hyp_buffers.tx;
memcpy(buf, host_buffers.tx, fraglen);
offset = buf->ep_mem_access[0].composite_off;
if (!offset || buf->ep_count != 1 || buf->sender_id != HOST_FFA_ID) {
ret = FFA_RET_INVALID_PARAMETERS;
goto out_unlock;
}
if (fraglen < offset + sizeof(struct ffa_composite_mem_region)) {
ret = FFA_RET_INVALID_PARAMETERS;
goto out_unlock;
}
reg = (void *)buf + offset;
nr_ranges = ((void *)buf + fraglen) - (void *)reg->constituents;
if (nr_ranges % sizeof(reg->constituents[0])) {
ret = FFA_RET_INVALID_PARAMETERS;
goto out_unlock;
}
nr_ranges /= sizeof(reg->constituents[0]);
ret = ffa_host_share_ranges(reg->constituents, nr_ranges);
if (ret)
goto out_unlock;
ffa_mem_xfer(res, func_id, len, fraglen);
if (fraglen != len) {
if (res->a0 != FFA_MEM_FRAG_RX)
goto err_unshare;
if (res->a3 != fraglen)
goto err_unshare;
} else if (res->a0 != FFA_SUCCESS) {
goto err_unshare;
}
out_unlock:
hyp_spin_unlock(&host_buffers.lock);
out:
if (ret)
ffa_to_smccc_res(res, ret);
return;
err_unshare:
WARN_ON(ffa_host_unshare_ranges(reg->constituents, nr_ranges));
goto out_unlock;
}
static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
struct kvm_cpu_context *ctxt)
{
DECLARE_REG(u32, handle_lo, ctxt, 1);
DECLARE_REG(u32, handle_hi, ctxt, 2);
DECLARE_REG(u32, flags, ctxt, 3);
struct ffa_composite_mem_region *reg;
u32 offset, len, fraglen, fragoff;
struct ffa_mem_region *buf;
int ret = 0;
u64 handle;
handle = PACK_HANDLE(handle_lo, handle_hi);
hyp_spin_lock(&host_buffers.lock);
buf = hyp_buffers.tx;
*buf = (struct ffa_mem_region) {
.sender_id = HOST_FFA_ID,
.handle = handle,
};
ffa_retrieve_req(res, sizeof(*buf));
buf = hyp_buffers.rx;
if (res->a0 != FFA_MEM_RETRIEVE_RESP)
goto out_unlock;
len = res->a1;
fraglen = res->a2;
offset = buf->ep_mem_access[0].composite_off;
/*
* We can trust the SPMD to get this right, but let's at least
* check that we end up with something that doesn't look _completely_
* bogus.
*/
if (WARN_ON(offset > len ||
fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
ret = FFA_RET_ABORTED;
goto out_unlock;
}
if (len > ffa_desc_buf.len) {
ret = FFA_RET_NO_MEMORY;
goto out_unlock;
}
buf = ffa_desc_buf.buf;
memcpy(buf, hyp_buffers.rx, fraglen);
for (fragoff = fraglen; fragoff < len; fragoff += fraglen) {
ffa_mem_frag_rx(res, handle_lo, handle_hi, fragoff);
if (res->a0 != FFA_MEM_FRAG_TX) {
ret = FFA_RET_INVALID_PARAMETERS;
goto out_unlock;
}
fraglen = res->a3;
memcpy((void *)buf + fragoff, hyp_buffers.rx, fraglen);
}
ffa_mem_reclaim(res, handle_lo, handle_hi, flags);
if (res->a0 != FFA_SUCCESS)
goto out_unlock;
reg = (void *)buf + offset;
/* If the SPMD was happy, then we should be too. */
WARN_ON(ffa_host_unshare_ranges(reg->constituents,
reg->addr_range_cnt));
out_unlock:
hyp_spin_unlock(&host_buffers.lock);
if (ret)
ffa_to_smccc_res(res, ret);
}
/*
* Is a given FFA function supported, either by forwarding on directly
* or by handling at EL2?
*/
static bool ffa_call_supported(u64 func_id)
{
switch (func_id) {
/* Unsupported memory management calls */
case FFA_FN64_MEM_RETRIEVE_REQ:
case FFA_MEM_RETRIEVE_RESP:
case FFA_MEM_RELINQUISH:
case FFA_MEM_OP_PAUSE:
case FFA_MEM_OP_RESUME:
case FFA_MEM_FRAG_RX:
case FFA_FN64_MEM_DONATE:
/* Indirect message passing via RX/TX buffers */
case FFA_MSG_SEND:
case FFA_MSG_POLL:
case FFA_MSG_WAIT:
/* 32-bit variants of 64-bit calls */
case FFA_MSG_SEND_DIRECT_REQ:
case FFA_MSG_SEND_DIRECT_RESP:
case FFA_RXTX_MAP:
case FFA_MEM_DONATE:
case FFA_MEM_RETRIEVE_REQ:
return false;
}
return true;
}
static bool do_ffa_features(struct arm_smccc_res *res,
struct kvm_cpu_context *ctxt)
{
DECLARE_REG(u32, id, ctxt, 1);
u64 prop = 0;
int ret = 0;
if (!ffa_call_supported(id)) {
ret = FFA_RET_NOT_SUPPORTED;
goto out_handled;
}
switch (id) {
case FFA_MEM_SHARE:
case FFA_FN64_MEM_SHARE:
case FFA_MEM_LEND:
case FFA_FN64_MEM_LEND:
ret = FFA_RET_SUCCESS;
prop = 0; /* No support for dynamic buffers */
goto out_handled;
default:
return false;
}
out_handled:
ffa_to_smccc_res_prop(res, ret, prop);
return true;
}
bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(u64, func_id, host_ctxt, 0);
struct arm_smccc_res res;
/*
* There's no way we can tell what a non-standard SMC call might
* be up to. Ideally, we would terminate these here and return
* an error to the host, but sadly devices make use of custom
* firmware calls for things like power management, debugging,
* RNG access and crash reporting.
*
* Given that the architecture requires us to trust EL3 anyway,
* we forward unrecognised calls on under the assumption that
* the firmware doesn't expose a mechanism to access arbitrary
* non-secure memory. Short of a per-device table of SMCs, this
* is the best we can do.
*/
if (!is_ffa_call(func_id))
return false;
switch (func_id) {
case FFA_FEATURES:
if (!do_ffa_features(&res, host_ctxt))
return false;
goto out_handled;
/* Memory management */
case FFA_FN64_RXTX_MAP:
do_ffa_rxtx_map(&res, host_ctxt);
goto out_handled;
case FFA_RXTX_UNMAP:
do_ffa_rxtx_unmap(&res, host_ctxt);
goto out_handled;
case FFA_MEM_SHARE:
case FFA_FN64_MEM_SHARE:
do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, host_ctxt);
goto out_handled;
case FFA_MEM_RECLAIM:
do_ffa_mem_reclaim(&res, host_ctxt);
goto out_handled;
case FFA_MEM_LEND:
case FFA_FN64_MEM_LEND:
do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt);
goto out_handled;
case FFA_MEM_FRAG_TX:
do_ffa_mem_frag_tx(&res, host_ctxt);
goto out_handled;
}
if (ffa_call_supported(func_id))
return false; /* Pass through */
ffa_to_smccc_error(&res, FFA_RET_NOT_SUPPORTED);
out_handled:
ffa_set_retval(host_ctxt, &res);
return true;
}
int hyp_ffa_init(void *pages)
{
struct arm_smccc_res res;
size_t min_rxtx_sz;
void *tx, *rx;
if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2)
return 0;
arm_smccc_1_1_smc(FFA_VERSION, FFA_VERSION_1_0, 0, 0, 0, 0, 0, 0, &res);
if (res.a0 == FFA_RET_NOT_SUPPORTED)
return 0;
if (res.a0 != FFA_VERSION_1_0)
return -EOPNOTSUPP;
arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res);
if (res.a0 != FFA_SUCCESS)
return -EOPNOTSUPP;
if (res.a2 != HOST_FFA_ID)
return -EINVAL;
arm_smccc_1_1_smc(FFA_FEATURES, FFA_FN64_RXTX_MAP,
0, 0, 0, 0, 0, 0, &res);
if (res.a0 != FFA_SUCCESS)
return -EOPNOTSUPP;
switch (res.a2) {
case FFA_FEAT_RXTX_MIN_SZ_4K:
min_rxtx_sz = SZ_4K;
break;
case FFA_FEAT_RXTX_MIN_SZ_16K:
min_rxtx_sz = SZ_16K;
break;
case FFA_FEAT_RXTX_MIN_SZ_64K:
min_rxtx_sz = SZ_64K;
break;
default:
return -EINVAL;
}
if (min_rxtx_sz > PAGE_SIZE)
return -EOPNOTSUPP;
tx = pages;
pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
rx = pages;
pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
ffa_desc_buf = (struct kvm_ffa_descriptor_buffer) {
.buf = pages,
.len = PAGE_SIZE *
(hyp_ffa_proxy_pages() - (2 * KVM_FFA_MBOX_NR_PAGES)),
};
hyp_buffers = (struct kvm_ffa_buffers) {
.lock = __HYP_SPIN_LOCK_UNLOCKED,
.tx = tx,
.rx = rx,
};
host_buffers = (struct kvm_ffa_buffers) {
.lock = __HYP_SPIN_LOCK_UNLOCKED,
};
return 0;
}

View file

@ -13,6 +13,7 @@
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <nvhe/ffa.h>
#include <nvhe/mem_protect.h>
#include <nvhe/mm.h>
#include <nvhe/pkvm.h>
@ -383,6 +384,8 @@ static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
bool handled;
handled = kvm_host_psci_handler(host_ctxt);
if (!handled)
handled = kvm_host_ffa_handler(host_ctxt);
if (!handled)
default_host_smc_handler(host_ctxt);

View file

@ -842,6 +842,13 @@ static int check_share(struct pkvm_mem_share *share)
case PKVM_ID_HYP:
ret = hyp_ack_share(completer_addr, tx, share->completer_prot);
break;
case PKVM_ID_FFA:
/*
* We only check the host; the secure side will check the other
* end when we forward the FFA call.
*/
ret = 0;
break;
default:
ret = -EINVAL;
}
@ -870,6 +877,13 @@ static int __do_share(struct pkvm_mem_share *share)
case PKVM_ID_HYP:
ret = hyp_complete_share(completer_addr, tx, share->completer_prot);
break;
case PKVM_ID_FFA:
/*
* We're not responsible for any secure page-tables, so there's
* nothing to do here.
*/
ret = 0;
break;
default:
ret = -EINVAL;
}
@ -918,6 +932,10 @@ static int check_unshare(struct pkvm_mem_share *share)
case PKVM_ID_HYP:
ret = hyp_ack_unshare(completer_addr, tx);
break;
case PKVM_ID_FFA:
/* See check_share() */
ret = 0;
break;
default:
ret = -EINVAL;
}
@ -946,6 +964,10 @@ static int __do_unshare(struct pkvm_mem_share *share)
case PKVM_ID_HYP:
ret = hyp_complete_unshare(completer_addr, tx);
break;
case PKVM_ID_FFA:
/* See __do_share() */
ret = 0;
break;
default:
ret = -EINVAL;
}
@ -1235,3 +1257,49 @@ void hyp_unpin_shared_mem(void *from, void *to)
hyp_unlock_component();
host_unlock_component();
}
int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages)
{
int ret;
struct pkvm_mem_share share = {
.tx = {
.nr_pages = nr_pages,
.initiator = {
.id = PKVM_ID_HOST,
.addr = hyp_pfn_to_phys(pfn),
},
.completer = {
.id = PKVM_ID_FFA,
},
},
};
host_lock_component();
ret = do_share(&share);
host_unlock_component();
return ret;
}
int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages)
{
int ret;
struct pkvm_mem_share share = {
.tx = {
.nr_pages = nr_pages,
.initiator = {
.id = PKVM_ID_HOST,
.addr = hyp_pfn_to_phys(pfn),
},
.completer = {
.id = PKVM_ID_FFA,
},
},
};
host_lock_component();
ret = do_unshare(&share);
host_unlock_component();
return ret;
}

View file

@ -11,6 +11,7 @@
#include <asm/kvm_pkvm.h>
#include <nvhe/early_alloc.h>
#include <nvhe/ffa.h>
#include <nvhe/fixed_config.h>
#include <nvhe/gfp.h>
#include <nvhe/memory.h>
@ -28,6 +29,7 @@ static void *vmemmap_base;
static void *vm_table_base;
static void *hyp_pgt_base;
static void *host_s2_pgt_base;
static void *ffa_proxy_pages;
static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
static struct hyp_pool hpool;
@ -57,6 +59,11 @@ static int divide_memory_pool(void *virt, unsigned long size)
if (!host_s2_pgt_base)
return -ENOMEM;
nr_pages = hyp_ffa_proxy_pages();
ffa_proxy_pages = hyp_early_alloc_contig(nr_pages);
if (!ffa_proxy_pages)
return -ENOMEM;
return 0;
}
@ -314,6 +321,10 @@ void __noreturn __pkvm_init_finalise(void)
if (ret)
goto out;
ret = hyp_ffa_init(ffa_proxy_pages);
if (ret)
goto out;
pkvm_hyp_vm_table_init(vm_table_base);
out:
/*

View file

@ -78,6 +78,7 @@ void __init kvm_hyp_reserve(void)
hyp_mem_pages += host_s2_pgtable_pages();
hyp_mem_pages += hyp_vm_table_pages();
hyp_mem_pages += hyp_vmemmap_pages(STRUCT_HYP_PAGE_SIZE);
hyp_mem_pages += hyp_ffa_proxy_pages();
/*
* Try to allocate a PMD-aligned region to reduce TLB pressure once

View file

@ -94,6 +94,14 @@
*/
#define FFA_PAGE_SIZE SZ_4K
/*
* Minimum buffer size/alignment encodings returned by an FFA_FEATURES
* query for FFA_RXTX_MAP.
*/
#define FFA_FEAT_RXTX_MIN_SZ_4K 0
#define FFA_FEAT_RXTX_MIN_SZ_64K 1
#define FFA_FEAT_RXTX_MIN_SZ_16K 2
/* FFA Bus/Device/Driver related */
struct ffa_device {
int vm_id;