Commit ed8ccaef authored by Tadeusz Struk's avatar Tadeusz Struk Committed by Herbert Xu

crypto: qat - Add support for SRIOV

Add code that enables SRIOV on dh895xcc devices.
Signed-off-by: default avatarTadeusz Struk <tadeusz.struk@intel.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent a5733139
......@@ -19,3 +19,4 @@ intel_qat-objs := adf_cfg.o \
qat_hal.o
intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o
......@@ -46,13 +46,17 @@
*/
#ifndef ADF_ACCEL_DEVICES_H_
#define ADF_ACCEL_DEVICES_H_
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/io.h>
#include <linux/ratelimit.h>
#include "adf_cfg_common.h"
#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
#define ADF_DH895XCC_PCI_DEVICE_ID 0x435
#define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443
#define ADF_PCI_MAX_BARS 3
#define ADF_DEVICE_NAME_LENGTH 32
#define ADF_ETR_MAX_RINGS_PER_BANK 16
......@@ -79,6 +83,7 @@ struct adf_bar {
struct adf_accel_msix {
struct msix_entry *entries;
char **names;
u32 num_entries;
} __packed;
struct adf_accel_pci {
......@@ -99,6 +104,7 @@ enum dev_sku_info {
DEV_SKU_2,
DEV_SKU_3,
DEV_SKU_4,
DEV_SKU_VF,
DEV_SKU_UNKNOWN,
};
......@@ -113,6 +119,8 @@ static inline const char *get_sku_info(enum dev_sku_info info)
return "SKU3";
case DEV_SKU_4:
return "SKU4";
case DEV_SKU_VF:
return "SKUVF";
case DEV_SKU_UNKNOWN:
default:
break;
......@@ -140,6 +148,8 @@ struct adf_hw_device_data {
uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self);
uint32_t (*get_num_aes)(struct adf_hw_device_data *self);
uint32_t (*get_num_accels)(struct adf_hw_device_data *self);
uint32_t (*get_pf2vf_offset)(uint32_t i);
uint32_t (*get_vintmsk_offset)(uint32_t i);
enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
int (*alloc_irq)(struct adf_accel_dev *accel_dev);
void (*free_irq)(struct adf_accel_dev *accel_dev);
......@@ -151,7 +161,9 @@ struct adf_hw_device_data {
void (*exit_arb)(struct adf_accel_dev *accel_dev);
void (*get_arb_mapping)(struct adf_accel_dev *accel_dev,
const uint32_t **cfg);
void (*disable_iov)(struct adf_accel_dev *accel_dev);
void (*enable_ints)(struct adf_accel_dev *accel_dev);
int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
const char *fw_name;
const char *fw_mmp_name;
uint32_t fuses;
......@@ -165,6 +177,7 @@ struct adf_hw_device_data {
uint8_t num_accel;
uint8_t num_logical_accel;
uint8_t num_engines;
uint8_t min_iov_compat_ver;
} __packed;
/* CSR write macro */
......@@ -189,6 +202,15 @@ struct adf_fw_loader_data {
const struct firmware *mmp_fw;
};
struct adf_accel_vf_info {
struct adf_accel_dev *accel_dev;
struct tasklet_struct vf2pf_bh_tasklet;
struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
struct ratelimit_state vf2pf_ratelimit;
u32 vf_nr;
bool init;
};
struct adf_accel_dev {
struct adf_etr_data *transport;
struct adf_hw_device_data *hw_device;
......@@ -202,6 +224,21 @@ struct adf_accel_dev {
struct list_head list;
struct module *owner;
struct adf_accel_pci accel_pci_dev;
union {
struct {
/* vf_info is non-zero when SR-IOV is init'ed */
struct adf_accel_vf_info *vf_info;
} pf;
struct {
char *irq_name;
struct tasklet_struct pf2vf_bh_tasklet;
struct mutex vf2pf_lock; /* protect CSR access */
struct completion iov_msg_completion;
uint8_t compatible;
uint8_t pf_version;
} vf;
};
bool is_vf;
uint8_t accel_id;
} __packed;
#endif
......@@ -91,6 +91,9 @@ static void adf_dev_restore(struct adf_accel_dev *accel_dev)
dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
accel_dev->accel_id);
if (!parent)
parent = pdev;
if (!pci_wait_for_pending_transaction(pdev))
dev_info(&GET_DEV(accel_dev),
"Transaction still in progress. Proceeding\n");
......
......@@ -178,6 +178,9 @@ void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
{
struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
if (!dev_cfg_data)
return;
down_write(&dev_cfg_data->lock);
adf_cfg_section_del_all(&dev_cfg_data->sec_list);
up_write(&dev_cfg_data->lock);
......
......@@ -60,7 +60,7 @@
#define ADF_CFG_NO_DEVICE 0xFF
#define ADF_CFG_AFFINITY_WHATEVER 0xFF
#define MAX_DEVICE_NAME_SIZE 32
#define ADF_MAX_DEVICES 32
#define ADF_MAX_DEVICES (32 * 32)
enum adf_cfg_val_type {
ADF_DEC,
......@@ -71,6 +71,7 @@ enum adf_cfg_val_type {
enum adf_device_type {
DEV_UNKNOWN = 0,
DEV_DH895XCC,
DEV_DH895XCCVF,
};
struct adf_dev_status_info {
......
......@@ -54,8 +54,8 @@
#include "icp_qat_hal.h"
#define ADF_MAJOR_VERSION 0
#define ADF_MINOR_VERSION 1
#define ADF_BUILD_VERSION 4
#define ADF_MINOR_VERSION 2
#define ADF_BUILD_VERSION 0
#define ADF_DRV_VERSION __stringify(ADF_MAJOR_VERSION) "." \
__stringify(ADF_MINOR_VERSION) "." \
__stringify(ADF_BUILD_VERSION)
......@@ -95,7 +95,7 @@ struct service_hndl {
static inline int get_current_node(void)
{
return cpu_data(current_thread_info()->cpu).phys_proc_id;
return topology_physical_package_id(smp_processor_id());
}
int adf_service_register(struct service_hndl *service);
......@@ -106,13 +106,23 @@ int adf_dev_start(struct adf_accel_dev *accel_dev);
int adf_dev_stop(struct adf_accel_dev *accel_dev);
void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr);
void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
void adf_clean_vf_map(bool);
int adf_ctl_dev_register(void);
void adf_ctl_dev_unregister(void);
int adf_processes_dev_register(void);
void adf_processes_dev_unregister(void);
int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev);
void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev);
int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
struct adf_accel_dev *pf);
void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
struct adf_accel_dev *pf);
struct list_head *adf_devmgr_get_head(void);
struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id);
struct adf_accel_dev *adf_devmgr_get_first(void);
......@@ -211,4 +221,21 @@ int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, int mem_size);
void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
void *addr_ptr, int mem_size);
#if defined(CONFIG_PCI_IOV)
int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
void adf_disable_sriov(struct adf_accel_dev *accel_dev);
void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
uint32_t vf_mask);
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
uint32_t vf_mask);
#else
static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
{
return 0;
}
static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
{
}
#endif
#endif
......@@ -398,10 +398,9 @@ static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
}
accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
if (!accel_dev) {
pr_err("QAT: Device %d not found\n", dev_info.accel_id);
if (!accel_dev)
return -ENODEV;
}
hw_data = accel_dev->hw_device;
dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
dev_info.num_ae = hw_data->get_num_aes(hw_data);
......@@ -495,6 +494,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
adf_exit_aer();
qat_crypto_unregister();
qat_algs_exit();
adf_clean_vf_map(false);
mutex_destroy(&adf_ctl_lock);
}
......
......@@ -50,21 +50,125 @@
#include "adf_common_drv.h"
static LIST_HEAD(accel_table);
static LIST_HEAD(vfs_table);
static DEFINE_MUTEX(table_lock);
static uint32_t num_devices;
struct vf_id_map {
u32 bdf;
u32 id;
u32 fake_id;
bool attached;
struct list_head list;
};
static int adf_get_vf_id(struct adf_accel_dev *vf)
{
return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) +
PCI_FUNC(accel_to_pci_dev(vf)->devfn) +
(PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1));
}
static int adf_get_vf_num(struct adf_accel_dev *vf)
{
return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf);
}
static struct vf_id_map *adf_find_vf(u32 bdf)
{
struct list_head *itr;
list_for_each(itr, &vfs_table) {
struct vf_id_map *ptr =
list_entry(itr, struct vf_id_map, list);
if (ptr->bdf == bdf)
return ptr;
}
return NULL;
}
static int adf_get_vf_real_id(u32 fake)
{
struct list_head *itr;
list_for_each(itr, &vfs_table) {
struct vf_id_map *ptr =
list_entry(itr, struct vf_id_map, list);
if (ptr->fake_id == fake)
return ptr->id;
}
return -1;
}
/**
* adf_clean_vf_map() - Cleans VF id mapings
*
* Function cleans internal ids for virtual functions.
* @vf: flag indicating whether mappings is cleaned
* for vfs only or for vfs and pfs
*/
void adf_clean_vf_map(bool vf)
{
struct vf_id_map *map;
struct list_head *ptr, *tmp;
mutex_lock(&table_lock);
list_for_each_safe(ptr, tmp, &vfs_table) {
map = list_entry(ptr, struct vf_id_map, list);
if (map->bdf != -1)
num_devices--;
if (vf && map->bdf == -1)
continue;
list_del(ptr);
kfree(map);
}
mutex_unlock(&table_lock);
}
EXPORT_SYMBOL_GPL(adf_clean_vf_map);
/**
* adf_devmgr_update_class_index() - Update internal index
* @hw_data: Pointer to internal device data.
*
* Function updates internal dev index for VFs
*/
void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
{
struct adf_hw_device_class *class = hw_data->dev_class;
struct list_head *itr;
int i = 0;
list_for_each(itr, &accel_table) {
struct adf_accel_dev *ptr =
list_entry(itr, struct adf_accel_dev, list);
if (ptr->hw_device->dev_class == class)
ptr->hw_device->instance_id = i++;
if (i == class->instances)
break;
}
}
EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
/**
* adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
* @accel_dev: Pointer to acceleration device.
* @pf: Corresponding PF if the accel_dev is a VF
*
* Function adds acceleration device to the acceleration framework.
* To be used by QAT device specific drivers.
*
* Return: 0 on success, error code otherwise.
*/
int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev)
int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
struct adf_accel_dev *pf)
{
struct list_head *itr;
int ret = 0;
if (num_devices == ADF_MAX_DEVICES) {
dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n",
......@@ -73,20 +177,77 @@ int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev)
}
mutex_lock(&table_lock);
list_for_each(itr, &accel_table) {
struct adf_accel_dev *ptr =
atomic_set(&accel_dev->ref_count, 0);
/* PF on host or VF on guest */
if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
struct vf_id_map *map;
list_for_each(itr, &accel_table) {
struct adf_accel_dev *ptr =
list_entry(itr, struct adf_accel_dev, list);
if (ptr == accel_dev) {
mutex_unlock(&table_lock);
return -EEXIST;
if (ptr == accel_dev) {
ret = -EEXIST;
goto unlock;
}
}
list_add_tail(&accel_dev->list, &accel_table);
accel_dev->accel_id = num_devices++;
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
ret = -ENOMEM;
goto unlock;
}
map->bdf = ~0;
map->id = accel_dev->accel_id;
map->fake_id = map->id;
map->attached = true;
list_add_tail(&map->list, &vfs_table);
} else if (accel_dev->is_vf && pf) {
/* VF on host */
struct adf_accel_vf_info *vf_info;
struct vf_id_map *map;
vf_info = pf->pf.vf_info + adf_get_vf_id(accel_dev);
map = adf_find_vf(adf_get_vf_num(accel_dev));
if (map) {
struct vf_id_map *next;
accel_dev->accel_id = map->id;
list_add_tail(&accel_dev->list, &accel_table);
map->fake_id++;
map->attached = true;
next = list_next_entry(map, list);
while (next && &next->list != &vfs_table) {
next->fake_id++;
next = list_next_entry(next, list);
}
ret = 0;
goto unlock;
}
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map) {
ret = -ENOMEM;
goto unlock;
}
accel_dev->accel_id = num_devices++;
list_add_tail(&accel_dev->list, &accel_table);
map->bdf = adf_get_vf_num(accel_dev);
map->id = accel_dev->accel_id;
map->fake_id = map->id;
map->attached = true;
list_add_tail(&map->list, &vfs_table);
}
atomic_set(&accel_dev->ref_count, 0);
list_add_tail(&accel_dev->list, &accel_table);
accel_dev->accel_id = num_devices++;
unlock:
mutex_unlock(&table_lock);
return 0;
return ret;
}
EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
......@@ -98,17 +259,37 @@ struct list_head *adf_devmgr_get_head(void)
/**
* adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
* @accel_dev: Pointer to acceleration device.
* @pf: Corresponding PF if the accel_dev is a VF
*
* Function removes acceleration device from the acceleration framework.
* To be used by QAT device specific drivers.
*
* Return: void
*/
void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev)
void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
struct adf_accel_dev *pf)
{
mutex_lock(&table_lock);
if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
num_devices--;
} else if (accel_dev->is_vf && pf) {
struct vf_id_map *map, *next;
map = adf_find_vf(adf_get_vf_num(accel_dev));
if (!map) {
dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n");
goto unlock;
}
map->fake_id--;
map->attached = false;
next = list_next_entry(map, list);
while (next && &next->list != &vfs_table) {
next->fake_id--;
next = list_next_entry(next, list);
}
}
unlock:
list_del(&accel_dev->list);
num_devices--;
mutex_unlock(&table_lock);
}
EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
......@@ -154,17 +335,24 @@ EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
{
struct list_head *itr;
int real_id;
mutex_lock(&table_lock);
real_id = adf_get_vf_real_id(id);
if (real_id < 0)
goto unlock;
id = real_id;
list_for_each(itr, &accel_table) {
struct adf_accel_dev *ptr =
list_entry(itr, struct adf_accel_dev, list);
if (ptr->accel_id == id) {
mutex_unlock(&table_lock);
return ptr;
}
}
unlock:
mutex_unlock(&table_lock);
return NULL;
}
......@@ -180,16 +368,52 @@ int adf_devmgr_verify_id(uint32_t id)
return -ENODEV;
}
static int adf_get_num_dettached_vfs(void)
{
struct list_head *itr;
int vfs = 0;
mutex_lock(&table_lock);
list_for_each(itr, &vfs_table) {
struct vf_id_map *ptr =
list_entry(itr, struct vf_id_map, list);
if (ptr->bdf != ~0 && !ptr->attached)
vfs++;
}
mutex_unlock(&table_lock);
return vfs;
}
void adf_devmgr_get_num_dev(uint32_t *num)
{
*num = num_devices;
*num = num_devices - adf_get_num_dettached_vfs();
}
/**
* adf_dev_in_use() - Check whether accel_dev is currently in use
* @accel_dev: Pointer to acceleration device.
*
* To be used by QAT device specific drivers.
*
* Return: 1 when device is in use, 0 otherwise.
*/
int adf_dev_in_use(struct adf_accel_dev *accel_dev)
{
return atomic_read(&accel_dev->ref_count) != 0;
}
EXPORT_SYMBOL_GPL(adf_dev_in_use);
/**
* adf_dev_get() - Increment accel_dev reference count
* @accel_dev: Pointer to acceleration device.
*
* Increment the accel_dev refcount and if this is the first time
* incrementing it during this period the accel_dev is in use,
* increment the module refcount too.
* To be used by QAT device specific drivers.
*
* Return: 0 when successful, EFAULT when fail to bump module refcount
*/
int adf_dev_get(struct adf_accel_dev *accel_dev)
{
if (atomic_add_return(1, &accel_dev->ref_count) == 1)
......@@ -197,19 +421,50 @@ int adf_dev_get(struct adf_accel_dev *accel_dev)
return -EFAULT;
return 0;
}
EXPORT_SYMBOL_GPL(adf_dev_get);
/**
* adf_dev_put() - Decrement accel_dev reference count
* @accel_dev: Pointer to acceleration device.
*
* Decrement the accel_dev refcount and if this is the last time
* decrementing it during this period the accel_dev is in use,
* decrement the module refcount too.
* To be used by QAT device specific drivers.
*
* Return: void
*/
void adf_dev_put(struct adf_accel_dev *accel_dev)
{
if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
module_put(accel_dev->owner);
}
EXPORT_SYMBOL_GPL(adf_dev_put);
/**
* adf_devmgr_in_reset() - Check whether device is in reset
* @accel_dev: Pointer to acceleration device.
*
* To be used by QAT device specific drivers.
*
* Return: 1 when the device is being reset, 0 otherwise.
*/
int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
{
return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
}
EXPORT_SYMBOL_GPL(adf_devmgr_in_reset);
/**
* adf_dev_started() - Check whether device has started
* @accel_dev: Pointer to acceleration device.
*
* To be used by QAT device specific drivers.
*
* Return: 1 when the device has started, 0 otherwise
*/
int adf_dev_started(struct adf_accel_dev *accel_dev)
{
return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
}
EXPORT_SYMBOL_GPL(adf_dev_started);
......@@ -187,6 +187,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
}
hw_data->enable_error_correction(accel_dev);
hw_data->enable_vf2pf_comms(accel_dev);
return 0;
}
......@@ -235,7 +236,8 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
set_bit(ADF_STATUS_STARTED, &accel_dev->status);
if (qat_algs_register() || qat_asym_algs_register()) {
if (!list_empty(&accel_dev->crypto_list) &&
(qat_algs_register() || qat_asym_algs_register())) {
dev_err(&GET_DEV(accel_dev),
"Failed to register crypto algs\n");
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
......@@ -270,11 +272,12 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
if (qat_algs_unregister())
if (!list_empty(&accel_dev->crypto_list) && qat_algs_unregister())
dev_err(&GET_DEV(accel_dev),
"Failed to unregister crypto algs\n");
qat_asym_algs_unregister();
if (!list_empty(&accel_dev->crypto_list))
qat_asym_algs_unregister();
list_for_each(list_itr, &service_table) {
service = list_entry(list_itr, struct service_hndl, list);
......@@ -363,6 +366,7 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
if (hw_data->exit_admin_comms)
hw_data->exit_admin_comms(accel_dev);
hw_data->disable_iov(accel_dev);
adf_cleanup_etr_data(accel_dev);
}
EXPORT_SYMBOL_GPL(adf_dev_shutdown);
......
This diff is collapsed.
/*
This file is provided under a dual BSD/GPLv2 license. When using or
redistributing this file, you may do so under either license.
GPL LICENSE SUMMARY
Copyright(c) 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
Contact Information:
qat-linux@intel.com
BSD LICENSE
Copyright(c) 2015 Intel Corporation.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef ADF_PF2VF_MSG_H
#define ADF_PF2VF_MSG_H
/*
* PF<->VF Messaging
* The PF has an array of 32-bit PF2VF registers, one for each VF. The
* PF can access all these registers; each VF can access only the one
* register associated with that particular VF.
*
* The register functionally is split into two parts:
* The bottom half is for PF->VF messages. In particular when the first
* bit of this register (bit 0) gets set an interrupt will be triggered
* in the respective VF.
* The top half is for VF->PF messages. In particular when the first bit
* of this half of register (bit 16) gets set an interrupt will be triggered