Commit ad0ebb91 authored by David Gibson's avatar David Gibson Committed by Anthony Liguori

pseries: Convert sPAPR TCEs to use generic IOMMU infrastructure

The pseries platform already contains an IOMMU implementation, since it is
essential for the platform's paravirtualized VIO devices.  This IOMMU
support is currently built into the implementation of the VIO "bus" and
the various VIO devices.

This patch converts this code to make use of the new common IOMMU
infrastructure.

We don't yet handle synchronization of map/unmap callbacks vs. invalidations,
this will require some complex interaction with the kernel and is not a
major concern at this stage.

Cc: Alex Graf <agraf@suse.de>
Signed-off-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarAnthony Liguori <aliguori@us.ibm.com>
parent e5332e63
......@@ -10,7 +10,7 @@ obj-y += ppc_newworld.o
# IBM pSeries (sPAPR)
obj-$(CONFIG_PSERIES) += spapr.o spapr_hcall.o spapr_rtas.o spapr_vio.o
obj-$(CONFIG_PSERIES) += xics.o spapr_vty.o spapr_llan.o spapr_vscsi.o
obj-$(CONFIG_PSERIES) += spapr_pci.o pci-hotplug.o
obj-$(CONFIG_PSERIES) += spapr_pci.o pci-hotplug.o spapr_iommu.o
# PowerPC 4xx boards
obj-y += ppc4xx_devs.o ppc4xx_pci.o ppc405_uc.o ppc405_boards.o
obj-y += ppc440_bamboo.o
......
......@@ -674,6 +674,9 @@ static void ppc_spapr_init(ram_addr_t ram_size,
spapr->icp = xics_system_init(XICS_IRQS);
spapr->next_irq = 16;
/* Set up IOMMU */
spapr_iommu_init();
/* Set up VIO bus */
spapr->vio_bus = spapr_vio_bus_init();
......
#if !defined(__HW_SPAPR_H__)
#define __HW_SPAPR_H__
#include "dma.h"
#include "hw/xics.h"
struct VIOsPAPRBus;
......@@ -320,4 +321,20 @@ target_ulong spapr_rtas_call(sPAPREnvironment *spapr,
int spapr_rtas_device_tree_setup(void *fdt, target_phys_addr_t rtas_addr,
target_phys_addr_t rtas_size);
#define SPAPR_TCE_PAGE_SHIFT 12
#define SPAPR_TCE_PAGE_SIZE (1ULL << SPAPR_TCE_PAGE_SHIFT)
#define SPAPR_TCE_PAGE_MASK (SPAPR_TCE_PAGE_SIZE - 1)
typedef struct sPAPRTCE {
uint64_t tce;
} sPAPRTCE;
#define SPAPR_VIO_BASE_LIOBN 0x00000000
void spapr_iommu_init(void);
DMAContext *spapr_tce_new_dma_context(uint32_t liobn, size_t window_size);
void spapr_tce_free(DMAContext *dma);
int spapr_dma_dt(void *fdt, int node_off, const char *propname,
DMAContext *dma);
#endif /* !defined (__HW_SPAPR_H__) */
/*
* QEMU sPAPR IOMMU (TCE) code
*
* Copyright (c) 2010 David Gibson, IBM Corporation <dwg@au1.ibm.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "hw.h"
#include "kvm.h"
#include "qdev.h"
#include "kvm_ppc.h"
#include "dma.h"
#include "hw/spapr.h"
#include <libfdt.h>
/* #define DEBUG_TCE */
enum sPAPRTCEAccess {
SPAPR_TCE_FAULT = 0,
SPAPR_TCE_RO = 1,
SPAPR_TCE_WO = 2,
SPAPR_TCE_RW = 3,
};
typedef struct sPAPRTCETable sPAPRTCETable;
struct sPAPRTCETable {
DMAContext dma;
uint32_t liobn;
uint32_t window_size;
sPAPRTCE *table;
int fd;
QLIST_ENTRY(sPAPRTCETable) list;
};
QLIST_HEAD(spapr_tce_tables, sPAPRTCETable) spapr_tce_tables;
static sPAPRTCETable *spapr_tce_find_by_liobn(uint32_t liobn)
{
sPAPRTCETable *tcet;
QLIST_FOREACH(tcet, &spapr_tce_tables, list) {
if (tcet->liobn == liobn) {
return tcet;
}
}
return NULL;
}
static int spapr_tce_translate(DMAContext *dma,
dma_addr_t addr,
target_phys_addr_t *paddr,
target_phys_addr_t *len,
DMADirection dir)
{
sPAPRTCETable *tcet = DO_UPCAST(sPAPRTCETable, dma, dma);
enum sPAPRTCEAccess access = (dir == DMA_DIRECTION_FROM_DEVICE)
? SPAPR_TCE_WO : SPAPR_TCE_RO;
uint64_t tce;
#ifdef DEBUG_TCE
fprintf(stderr, "spapr_tce_translate liobn=0x%" PRIx32 " addr=0x"
DMA_ADDR_FMT "\n", tcet->liobn, addr);
#endif
/* Check if we are in bound */
if (addr >= tcet->window_size) {
#ifdef DEBUG_TCE
fprintf(stderr, "spapr_tce_translate out of bounds\n");
#endif
return -EFAULT;
}
tce = tcet->table[addr >> SPAPR_TCE_PAGE_SHIFT].tce;
/* Check TCE */
if (!(tce & access)) {
return -EPERM;
}
/* How much til end of page ? */
*len = ((~addr) & SPAPR_TCE_PAGE_MASK) + 1;
/* Translate */
*paddr = (tce & ~SPAPR_TCE_PAGE_MASK) |
(addr & SPAPR_TCE_PAGE_MASK);
#ifdef DEBUG_TCE
fprintf(stderr, " -> *paddr=0x" TARGET_FMT_plx ", *len=0x"
TARGET_FMT_plx "\n", *paddr, *len);
#endif
return 0;
}
DMAContext *spapr_tce_new_dma_context(uint32_t liobn, size_t window_size)
{
sPAPRTCETable *tcet;
if (!window_size) {
return NULL;
}
tcet = g_malloc0(sizeof(*tcet));
dma_context_init(&tcet->dma, spapr_tce_translate, NULL, NULL);
tcet->liobn = liobn;
tcet->window_size = window_size;
if (kvm_enabled()) {
tcet->table = kvmppc_create_spapr_tce(liobn,
window_size,
&tcet->fd);
}
if (!tcet->table) {
size_t table_size = (window_size >> SPAPR_TCE_PAGE_SHIFT)
* sizeof(sPAPRTCE);
tcet->table = g_malloc0(table_size);
}
#ifdef DEBUG_TCE
fprintf(stderr, "spapr_iommu: New TCE table, liobn=0x%x, context @ %p, "
"table @ %p, fd=%d\n", liobn, &tcet->dma, tcet->table, tcet->fd);
#endif
QLIST_INSERT_HEAD(&spapr_tce_tables, tcet, list);
return &tcet->dma;
}
void spapr_tce_free(DMAContext *dma)
{
if (dma) {
sPAPRTCETable *tcet = DO_UPCAST(sPAPRTCETable, dma, dma);
QLIST_REMOVE(tcet, list);
if (!kvm_enabled() ||
(kvmppc_remove_spapr_tce(tcet->table, tcet->fd,
tcet->window_size) != 0)) {
g_free(tcet->table);
}
g_free(tcet);
}
}
static target_ulong h_put_tce(CPUPPCState *env, sPAPREnvironment *spapr,
target_ulong opcode, target_ulong *args)
{
target_ulong liobn = args[0];
target_ulong ioba = args[1];
target_ulong tce = args[2];
sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
sPAPRTCE *tcep;
if (liobn & 0xFFFFFFFF00000000ULL) {
hcall_dprintf("spapr_vio_put_tce on out-of-boundsw LIOBN "
TARGET_FMT_lx "\n", liobn);
return H_PARAMETER;
}
if (!tcet) {
hcall_dprintf("spapr_vio_put_tce on non-existent LIOBN "
TARGET_FMT_lx "\n", liobn);
return H_PARAMETER;
}
ioba &= ~(SPAPR_TCE_PAGE_SIZE - 1);
#ifdef DEBUG_TCE
fprintf(stderr, "spapr_vio_put_tce on liobn=" TARGET_FMT_lx /*%s*/
" ioba 0x" TARGET_FMT_lx " TCE 0x" TARGET_FMT_lx "\n",
liobn, /*dev->qdev.id, */ioba, tce);
#endif
if (ioba >= tcet->window_size) {
hcall_dprintf("spapr_vio_put_tce on out-of-boards IOBA 0x"
TARGET_FMT_lx "\n", ioba);
return H_PARAMETER;
}
tcep = tcet->table + (ioba >> SPAPR_TCE_PAGE_SHIFT);
tcep->tce = tce;
return H_SUCCESS;
}
void spapr_iommu_init(void)
{
QLIST_INIT(&spapr_tce_tables);
/* hcall-tce */
spapr_register_hypercall(H_PUT_TCE, h_put_tce);
}
int spapr_dma_dt(void *fdt, int node_off, const char *propname,
DMAContext *dma)
{
if (dma) {
sPAPRTCETable *tcet = DO_UPCAST(sPAPRTCETable, dma, dma);
uint32_t dma_prop[] = {cpu_to_be32(tcet->liobn),
0, 0,
0, cpu_to_be32(tcet->window_size)};
int ret;
ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-address-cells", 2);
if (ret < 0) {
return ret;
}
ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-size-cells", 2);
if (ret < 0) {
return ret;
}
ret = fdt_setprop(fdt, node_off, propname, dma_prop,
sizeof(dma_prop));
if (ret < 0) {
return ret;
}
}
return 0;
}
......@@ -71,7 +71,7 @@ typedef uint64_t vlan_bd_t;
#define VLAN_RXQ_BD_OFF 0
#define VLAN_FILTER_BD_OFF 8
#define VLAN_RX_BDS_OFF 16
#define VLAN_MAX_BUFS ((SPAPR_VIO_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF) / 8)
#define VLAN_MAX_BUFS ((SPAPR_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF) / 8)
typedef struct VIOsPAPRVLANDevice {
VIOsPAPRDevice sdev;
......@@ -95,7 +95,7 @@ static ssize_t spapr_vlan_receive(VLANClientState *nc, const uint8_t *buf,
{
VIOsPAPRDevice *sdev = DO_UPCAST(NICState, nc, nc)->opaque;
VIOsPAPRVLANDevice *dev = (VIOsPAPRVLANDevice *)sdev;
vlan_bd_t rxq_bd = ldq_tce(sdev, dev->buf_list + VLAN_RXQ_BD_OFF);
vlan_bd_t rxq_bd = vio_ldq(sdev, dev->buf_list + VLAN_RXQ_BD_OFF);
vlan_bd_t bd;
int buf_ptr = dev->use_buf_ptr;
uint64_t handle;
......@@ -114,11 +114,11 @@ static ssize_t spapr_vlan_receive(VLANClientState *nc, const uint8_t *buf,
do {
buf_ptr += 8;
if (buf_ptr >= SPAPR_VIO_TCE_PAGE_SIZE) {
if (buf_ptr >= SPAPR_TCE_PAGE_SIZE) {
buf_ptr = VLAN_RX_BDS_OFF;
}
bd = ldq_tce(sdev, dev->buf_list + buf_ptr);
bd = vio_ldq(sdev, dev->buf_list + buf_ptr);
dprintf("use_buf_ptr=%d bd=0x%016llx\n",
buf_ptr, (unsigned long long)bd);
} while ((!(bd & VLAN_BD_VALID) || (VLAN_BD_LEN(bd) < (size + 8)))
......@@ -132,12 +132,12 @@ static ssize_t spapr_vlan_receive(VLANClientState *nc, const uint8_t *buf,
/* Remove the buffer from the pool */
dev->rx_bufs--;
dev->use_buf_ptr = buf_ptr;
stq_tce(sdev, dev->buf_list + dev->use_buf_ptr, 0);
vio_stq(sdev, dev->buf_list + dev->use_buf_ptr, 0);
dprintf("Found buffer: ptr=%d num=%d\n", dev->use_buf_ptr, dev->rx_bufs);
/* Transfer the packet data */
if (spapr_tce_dma_write(sdev, VLAN_BD_ADDR(bd) + 8, buf, size) < 0) {
if (spapr_vio_dma_write(sdev, VLAN_BD_ADDR(bd) + 8, buf, size) < 0) {
return -1;
}
......@@ -149,23 +149,23 @@ static ssize_t spapr_vlan_receive(VLANClientState *nc, const uint8_t *buf,
control ^= VLAN_RXQC_TOGGLE;
}
handle = ldq_tce(sdev, VLAN_BD_ADDR(bd));
stq_tce(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 8, handle);
stw_tce(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 4, size);
sth_tce(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 2, 8);
stb_tce(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr, control);
handle = vio_ldq(sdev, VLAN_BD_ADDR(bd));
vio_stq(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 8, handle);
vio_stl(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 4, size);
vio_sth(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 2, 8);
vio_stb(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr, control);
dprintf("wrote rxq entry (ptr=0x%llx): 0x%016llx 0x%016llx\n",
(unsigned long long)dev->rxq_ptr,
(unsigned long long)ldq_tce(sdev, VLAN_BD_ADDR(rxq_bd) +
(unsigned long long)vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) +
dev->rxq_ptr),
(unsigned long long)ldq_tce(sdev, VLAN_BD_ADDR(rxq_bd) +
(unsigned long long)vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) +
dev->rxq_ptr + 8));
dev->rxq_ptr += 16;
if (dev->rxq_ptr >= VLAN_BD_LEN(rxq_bd)) {
dev->rxq_ptr = 0;
stq_tce(sdev, dev->buf_list + VLAN_RXQ_BD_OFF, rxq_bd ^ VLAN_BD_TOGGLE);
vio_stq(sdev, dev->buf_list + VLAN_RXQ_BD_OFF, rxq_bd ^ VLAN_BD_TOGGLE);
}
if (sdev->signal_state & 1) {
......@@ -254,8 +254,10 @@ static int check_bd(VIOsPAPRVLANDevice *dev, vlan_bd_t bd,
return -1;
}
if (spapr_vio_check_tces(&dev->sdev, VLAN_BD_ADDR(bd),
VLAN_BD_LEN(bd), SPAPR_TCE_RW) != 0) {
if (!spapr_vio_dma_valid(&dev->sdev, VLAN_BD_ADDR(bd),
VLAN_BD_LEN(bd), DMA_DIRECTION_FROM_DEVICE)
|| !spapr_vio_dma_valid(&dev->sdev, VLAN_BD_ADDR(bd),
VLAN_BD_LEN(bd), DMA_DIRECTION_TO_DEVICE)) {
return -1;
}
......@@ -285,14 +287,14 @@ static target_ulong h_register_logical_lan(CPUPPCState *env,
return H_RESOURCE;
}
if (check_bd(dev, VLAN_VALID_BD(buf_list, SPAPR_VIO_TCE_PAGE_SIZE),
SPAPR_VIO_TCE_PAGE_SIZE) < 0) {
if (check_bd(dev, VLAN_VALID_BD(buf_list, SPAPR_TCE_PAGE_SIZE),
SPAPR_TCE_PAGE_SIZE) < 0) {
hcall_dprintf("Bad buf_list 0x" TARGET_FMT_lx "\n", buf_list);
return H_PARAMETER;
}
filter_list_bd = VLAN_VALID_BD(filter_list, SPAPR_VIO_TCE_PAGE_SIZE);
if (check_bd(dev, filter_list_bd, SPAPR_VIO_TCE_PAGE_SIZE) < 0) {
filter_list_bd = VLAN_VALID_BD(filter_list, SPAPR_TCE_PAGE_SIZE);
if (check_bd(dev, filter_list_bd, SPAPR_TCE_PAGE_SIZE) < 0) {
hcall_dprintf("Bad filter_list 0x" TARGET_FMT_lx "\n", filter_list);
return H_PARAMETER;
}
......@@ -309,17 +311,17 @@ static target_ulong h_register_logical_lan(CPUPPCState *env,
rec_queue &= ~VLAN_BD_TOGGLE;
/* Initialize the buffer list */
stq_tce(sdev, buf_list, rec_queue);
stq_tce(sdev, buf_list + 8, filter_list_bd);
spapr_tce_dma_zero(sdev, buf_list + VLAN_RX_BDS_OFF,
SPAPR_VIO_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF);
vio_stq(sdev, buf_list, rec_queue);
vio_stq(sdev, buf_list + 8, filter_list_bd);
spapr_vio_dma_set(sdev, buf_list + VLAN_RX_BDS_OFF, 0,
SPAPR_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF);
dev->add_buf_ptr = VLAN_RX_BDS_OFF - 8;
dev->use_buf_ptr = VLAN_RX_BDS_OFF - 8;
dev->rx_bufs = 0;
dev->rxq_ptr = 0;
/* Initialize the receive queue */
spapr_tce_dma_zero(sdev, VLAN_BD_ADDR(rec_queue), VLAN_BD_LEN(rec_queue));
spapr_vio_dma_set(sdev, VLAN_BD_ADDR(rec_queue), 0, VLAN_BD_LEN(rec_queue));
dev->isopen = 1;
return H_SUCCESS;
......@@ -378,14 +380,14 @@ static target_ulong h_add_logical_lan_buffer(CPUPPCState *env,
do {
dev->add_buf_ptr += 8;
if (dev->add_buf_ptr >= SPAPR_VIO_TCE_PAGE_SIZE) {
if (dev->add_buf_ptr >= SPAPR_TCE_PAGE_SIZE) {
dev->add_buf_ptr = VLAN_RX_BDS_OFF;
}
bd = ldq_tce(sdev, dev->buf_list + dev->add_buf_ptr);
bd = vio_ldq(sdev, dev->buf_list + dev->add_buf_ptr);
} while (bd & VLAN_BD_VALID);
stq_tce(sdev, dev->buf_list + dev->add_buf_ptr, buf);
vio_stq(sdev, dev->buf_list + dev->add_buf_ptr, buf);
dev->rx_bufs++;
......@@ -451,7 +453,7 @@ static target_ulong h_send_logical_lan(CPUPPCState *env, sPAPREnvironment *spapr
lbuf = alloca(total_len);
p = lbuf;
for (i = 0; i < nbufs; i++) {
ret = spapr_tce_dma_read(sdev, VLAN_BD_ADDR(bufs[i]),
ret = spapr_vio_dma_read(sdev, VLAN_BD_ADDR(bufs[i]),
p, VLAN_BD_LEN(bufs[i]));
if (ret < 0) {
return ret;
......@@ -479,7 +481,7 @@ static target_ulong h_multicast_ctrl(CPUPPCState *env, sPAPREnvironment *spapr,
}
static Property spapr_vlan_properties[] = {
DEFINE_SPAPR_PROPERTIES(VIOsPAPRVLANDevice, sdev, 0x10000000),
DEFINE_SPAPR_PROPERTIES(VIOsPAPRVLANDevice, sdev),
DEFINE_NIC_PROPERTIES(VIOsPAPRVLANDevice, nicconf),
DEFINE_PROP_END_OF_LIST(),
};
......@@ -497,6 +499,7 @@ static void spapr_vlan_class_init(ObjectClass *klass, void *data)
k->dt_compatible = "IBM,l-lan";
k->signal_mask = 0x1;
dc->props = spapr_vlan_properties;
k->rtce_window_size = 0x10000000;
}
static TypeInfo spapr_vlan_info = {
......
......@@ -39,7 +39,6 @@
#endif /* CONFIG_FDT */
/* #define DEBUG_SPAPR */
/* #define DEBUG_TCE */
#ifdef DEBUG_SPAPR
#define dprintf(fmt, ...) \
......@@ -143,26 +142,9 @@ static int vio_make_devnode(VIOsPAPRDevice *dev,
}
}
if (dev->rtce_window_size) {
uint32_t dma_prop[] = {cpu_to_be32(dev->reg),
0, 0,
0, cpu_to_be32(dev->rtce_window_size)};
ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-address-cells", 2);
if (ret < 0) {
return ret;
}
ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-size-cells", 2);
if (ret < 0) {
return ret;
}
ret = fdt_setprop(fdt, node_off, "ibm,my-dma-window", dma_prop,
sizeof(dma_prop));
if (ret < 0) {
return ret;
}
ret = spapr_dma_dt(fdt, node_off, "ibm,my-dma-window", dev->dma);
if (ret < 0) {
return ret;
}
if (pc->devnode) {
......@@ -176,232 +158,6 @@ static int vio_make_devnode(VIOsPAPRDevice *dev,
}
#endif /* CONFIG_FDT */
/*
* RTCE handling
*/
static void rtce_init(VIOsPAPRDevice *dev)
{
size_t size = (dev->rtce_window_size >> SPAPR_VIO_TCE_PAGE_SHIFT)
* sizeof(VIOsPAPR_RTCE);
if (size) {
dev->rtce_table = kvmppc_create_spapr_tce(dev->reg,
dev->rtce_window_size,
&dev->kvmtce_fd);
if (!dev->rtce_table) {
dev->rtce_table = g_malloc0(size);
}
}
}
static target_ulong h_put_tce(CPUPPCState *env, sPAPREnvironment *spapr,
target_ulong opcode, target_ulong *args)
{
target_ulong liobn = args[0];
target_ulong ioba = args[1];
target_ulong tce = args[2];
VIOsPAPRDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, liobn);
VIOsPAPR_RTCE *rtce;
if (!dev) {
hcall_dprintf("LIOBN 0x" TARGET_FMT_lx " does not exist\n", liobn);
return H_PARAMETER;
}
ioba &= ~(SPAPR_VIO_TCE_PAGE_SIZE - 1);
#ifdef DEBUG_TCE
fprintf(stderr, "spapr_vio_put_tce on %s ioba 0x" TARGET_FMT_lx
" TCE 0x" TARGET_FMT_lx "\n", dev->qdev.id, ioba, tce);
#endif
if (ioba >= dev->rtce_window_size) {
hcall_dprintf("Out-of-bounds IOBA 0x" TARGET_FMT_lx "\n", ioba);
return H_PARAMETER;
}
rtce = dev->rtce_table + (ioba >> SPAPR_VIO_TCE_PAGE_SHIFT);
rtce->tce = tce;
return H_SUCCESS;
}
int spapr_vio_check_tces(VIOsPAPRDevice *dev, target_ulong ioba,
target_ulong len, enum VIOsPAPR_TCEAccess access)
{
int start, end, i;
start = ioba >> SPAPR_VIO_TCE_PAGE_SHIFT;
end = (ioba + len - 1) >> SPAPR_VIO_TCE_PAGE_SHIFT;
for (i = start; i <= end; i++) {
if ((dev->rtce_table[i].tce & access) != access) {
#ifdef DEBUG_TCE
fprintf(stderr, "FAIL on %d\n", i);
#endif
return -1;
}
}
return 0;
}
int spapr_tce_dma_write(VIOsPAPRDevice *dev, uint64_t taddr, const void *buf,
uint32_t size)
{
#ifdef DEBUG_TCE
fprintf(stderr, "spapr_tce_dma_write taddr=0x%llx size=0x%x\n",
(unsigned long long)taddr, size);
#endif
/* Check for bypass */
if (dev->flags & VIO_PAPR_FLAG_DMA_BYPASS) {
cpu_physical_memory_write(taddr, buf, size);
return 0;
}
while (size) {
uint64_t tce;
uint32_t lsize;
uint64_t txaddr;
/* Check if we are in bound */
if (taddr >= dev->rtce_window_size) {
#ifdef DEBUG_TCE
fprintf(stderr, "spapr_tce_dma_write out of bounds\n");
#endif
return H_DEST_PARM;
}
tce = dev->rtce_table[taddr >> SPAPR_VIO_TCE_PAGE_SHIFT].tce;
/* How much til end of page ? */
lsize = MIN(size, ((~taddr) & SPAPR_VIO_TCE_PAGE_MASK) + 1);
/* Check TCE */
if (!(tce & 2)) {
return H_DEST_PARM;
}
/* Translate */
txaddr = (tce & ~SPAPR_VIO_TCE_PAGE_MASK) |
(taddr & SPAPR_VIO_TCE_PAGE_MASK);
#ifdef DEBUG_TCE
fprintf(stderr, " -> write to txaddr=0x%llx, size=0x%x\n",
(unsigned long long)txaddr, lsize);
#endif
/* Do it */
cpu_physical_memory_write(txaddr, buf, lsize);