Commit bfd788c9 authored by David Johnson's avatar David Johnson

An ML2 driver for using Capnet networks in OpenStack.

parents
Pipeline #1091 skipped
This diff is collapsed.
This is the source code (a `geni-lib` script, `osp-capnet.py`) to create
a CloudLab profile to setup and run Capnet in an OpenStack. This
profile lives at https://www.cloudlab.us/p/TCloud/OpenStack-Capnet . It
is basically the CloudLab OpenStack profile
(https://www.cloudlab.us/p/emulab-ops/OpenStack), but adds several
Capnet parameters, creates physical networks for Capnet to use, and also
comes with a set of extension scripts to the CloudLab OpenStack profile
that install Capnet and configure it based on the user's specified
profile parameters. It relies on this extension support to be present
in the core CloudLab OpenStack profile tarball.
To make the extension tarball, in this directory, do:
$ tar -czvf setup-ext-capnet-vX.tar.gz setup-ext-capnet
Then if you need to change the canonical, official one installed on
boss.emulab.net, that the official profile references, get someone with
privs to handle that :).
This diff is collapsed.
#!/bin/sh
set -x
DIRNAME=`dirname $0`
# Gotta know the rules!
if [ $EUID -ne 0 ] ; then
echo "This script must be run as root" 1>&2
exit 1
fi
# Grab our libs
. "$DIRNAME/../../setup-lib.sh"
if [ -f $SETTINGS ]; then
. $SETTINGS
fi
##
## First, we setup OVS stuff for Capnet physical networks.
##
$DIRNAME/setup-ovs-node.sh
##
## Second, however, we do all the Neutron config Capnet needs:
##
ML2TYPES=`crudini --get /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers`
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini \
ml2 type_drivers "capnet,$ML2TYPES"
ML2TENANTTYPES=`crudini --get /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types`
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini \
ml2 tenant_network_types "capnet,$ML2TENANTTYPES"
ML2MECHDRVS=`crudini --get /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers`
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini \
ml2 mechanism_drivers "capnet,$ML2MECHDRVS"
ML2EXT=`crudini --get /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers`
if [ ! -z "$ML2EXT" ] ; then
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini \
ml2 extension_drivers "capnet,$ML2EXT"
else
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini \
ml2 extension_drivers "capnet"
fi
crudini --set /etc/neutron/neutron.conf DEFAULT api_extensions_path \
/usr/lib/python2.7/dist-packages/networking_capnet/extensions
##
## Setup our physical Capnet connections
##
capnet_networks=""
bridge_mappings=""
for lan in $DATACAPNETLANS ; do
if [ -n "${capnet_networks}" ]; then
capnet_networks="${capnet_networks},"
fi
if [ -n "${bridge_mappings}" ]; then
bridge_mappings="${bridge_mappings},"
fi
. $OURDIR/info.${lan}
capnet_networks="${capnet_networks}${lan}"
bridge_mappings="${bridge_mappings}${lan}:${DATABRIDGE}"
done
crudini --set /etc/neutron/plugins/ml2/ml2_conf_capnet.ini \
capnet bridge_mappings "$bridge_mappings"
crudini --set /etc/neutron/plugins/ml2/ml2_conf_capnet.ini \
capnet capnet_networks "$capnet_networks"
crudini --set /etc/nova/nova-compute.conf DEFAULT compute_driver \
compute_capnet.virt.libvirt.driver.CapnetLibvirtDriver
crudini --set /etc/nova/nova-compute.conf libvirt vif_driver \
compute_capnet.virt.libvirt.vif.CapnetLibvirtVIFDriver
crudini --set /etc/nova/nova-compute.conf \
capnet bridge_mappings "$bridge_mappings"
crudini --set /etc/nova/nova-compute.conf \
capnet capnet_networks "$capnet_networks"
##
## Ok, restart Neutron Capnet ML2 plugin
##
service_restart neutron-plugin-capnet-agent
service_enable neutron-plugin-capnet-agent
#!/bin/sh
set -x
DIRNAME=`dirname $0`
# Gotta know the rules!
if [ $EUID -ne 0 ] ; then
echo "This script must be run as root" 1>&2
exit 1
fi
# Grab our libs
. "$DIRNAME/../../setup-lib.sh"
if [ "$HOSTNAME" != "$CONTROLLER" ]; then
exit 0;
fi
if [ -f $SETTINGS ]; then
. $SETTINGS
fi
##
## First, we *don't* setup OVS stuff for Capnet physical networks.
## It's unnecessary for Neutron.
##
#$DIRNAME/setup-ovs-node.sh
##
## Second, however, we do all the Neutron config Capnet needs:
##
crudini --set /etc/neutron/neutron.conf DEFAULT core_plugin capnet
#crudini --set /etc/neutron/neutron.conf \
# DEFAULT core_plugin networking_capnet.plugins.ml2.plugin.CapnetMl2Plugin
ML2TYPES=`crudini --get /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers`
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini \
ml2 type_drivers "capnet,$ML2TYPES"
ML2TENANTTYPES=`crudini --get /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types`
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini \
ml2 tenant_network_types "capnet,$ML2TENANTTYPES"
ML2MECHDRVS=`crudini --get /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers`
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini \
ml2 mechanism_drivers "capnet,$ML2MECHDRVS"
ML2EXT=`crudini --get /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers`
if [ ! -z "$ML2EXT" ] ; then
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini \
ml2 extension_drivers "capnet,$ML2EXT"
else
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini \
ml2 extension_drivers "capnet"
fi
crudini --set /etc/neutron/neutron.conf DEFAULT api_extensions_path \
/usr/lib/python2.7/dist-packages/networking_capnet/extensions
##
## Hack the initscript a bit, to add ml2_conf_capnet.ini as a config file.
##
echo 'CONFIG_FILE="/etc/neutron/neutron.conf --config-file=/etc/neutron/plugins/ml2/ml2_conf_capnet.ini"' >> /etc/default/neutron-server
##
## Ok, restart Neutron
##
service_restart neutron-server
#!/bin/sh
##
## This script builds and installs the necessary deps required for the
## Capnet controller, as well as the Capnet Neutron Plugin, and
## configures it all based on the Cloudlab Openstack Profile
## (https://www.cloudlab.us/p/capnet/OpenStack-Capnet).
##
set -x
DIRNAME=`dirname $0`
# Gotta know the rules!
if [ $EUID -ne 0 ] ; then
echo "This script must be run as root" 1>&2
exit 1
fi
LIBCAP_REPO="https://gitlab.flux.utah.edu/xcap/libcap.git"
LIBCAP_BRANCH="master"
OPENMUL_REPO="https://gitlab.flux.utah.edu/tcloud/openmul.git"
OPENMUL_BRANCH="capnet"
CAPNET_REPO="https://gitlab.flux.utah.edu/tcloud/capnet.git"
CAPNET_BRANCH="master"
#CAPNET_PLUGIN_REPO="https://gitlab.flux.utah.edu/tcloud/networking-capnet.git"
CAPNET_PLUGIN_REPO="http://www.emulab.net/downloads/networking-capnet.tar.gz"
CAPNET_PLUGIN_BRANCH="master"
# Grab our libs
. "$DIRNAME/../../setup-lib.sh"
if [ "$HOSTNAME" != "$CONTROLLER" ]; then
exit 0;
fi
if [ -f $SETTINGS ]; then
. $SETTINGS
fi
#
# openstack CLI commands seem flakey sometimes on Kilo and Liberty.
# Don't know if it's WSGI, mysql dropping connections, an NTP
# thing... but until it gets solved more permanently, have to retry :(.
#
__openstack() {
__err=1
__debug=
__times=0
while [ $__times -lt 16 -a ! $__err -eq 0 ]; do
openstack $__debug "$@"
__err=$?
if [ $__err -eq 0 ]; then
break
fi
__debug=" --debug "
__times=`expr $__times + 1`
if [ $__times -gt 1 ]; then
echo "ERROR: openstack command failed: sleeping and trying again!"
sleep 8
fi
done
}
##
## First, we install Capnet at all of the nodes.
##
cd $OURDIR
mkdir capnet
cd capnet
maybe_install_packages git
maybe_install_packages pkg-config glib2.0-dev
maybe_install_packages automake1.11 swig2.0 python2.7-dev gawk libevent-dev libcurl3-dev
maybe_install_packages python-protobuf
#maybe_install_packages protobuf-c-compiler libprotobuf-c-dev
maybe_install_packages libcap-dev
# Ubuntu protobuf-c is too old; install all that from src
# First grab protobuf itself:
wget https://github.com/google/protobuf/releases/download/v2.6.1/protobuf-2.6.1.tar.gz
tar -xzvf protobuf-2.6.1.tar.gz
cd protobuf-2.6.1
./configure --prefix=/usr/local
make && make install
ldconfig
cd ..
# Now protobuf-c
git clone https://github.com/protobuf-c/protobuf-c.git protobuf-c
cd protobuf-c && ./autogen.sh && cd ..
mkdir protobuf-c.obj && cd protobuf-c.obj
../protobuf-c/configure --prefix=/usr/local
make && make install
ldconfig
cd ..
#
# First, libcap.
#
git clone "$LIBCAP_REPO" libcap
cd libcap
git checkout "$LIBCAP_BRANCH"
./autogen.sh
cd ..
mkdir libcap.obj
cd libcap.obj
../libcap/configure --prefix=/opt/tcloud/libcap
make
make install
cd ..
#
# Second, our version of openmul.
#
git clone "$OPENMUL_REPO" openmul
cd openmul
git checkout "$OPENMUL_BRANCH"
./autogen.sh
cd ..
mkdir openmul.obj
cd openmul.obj
../openmul/configure --prefix=/opt/tcloud/mul
make
make install
cd ..
#
# Third, capnet controller.
#
git clone "$CAPNET_REPO" capnet
cd capnet
git checkout "$CAPNET_BRANCH"
./autogen.sh
cd ..
mkdir capnet.obj
cd capnet.obj
../capnet/configure --prefix=/opt/tcloud/capnet \
--with-libcap=/opt/tcloud/libcap --with-mul=/opt/tcloud/mul \
--with-protoc=/usr/local
make && make install
cd ..
#
# Finally, capnet Neutron plugin stuff.
#
echo "$CAPNET_PLUGIN_REPO" | grep -q tar\.gz
if [ $? = 0 ]; then
wget -O networking-capnet.tar.gz "$CAPNET_PLUGIN_REPO"
tar -xzf networking-capnet.tar.gz
else
git clone "$CAPNET_PLUGIN_REPO" networking-capnet
fi
cd networking-capnet
git checkout "$CAPNET_PLUGIN_BRANCH"
rm -rf build networking_capnet.egg-info
# Install the Ubuntu way, and straight into dist-packages (i.e. /).
# Otherwise it goes into site-packages and Neutron can't find us.
python setup.py install --install-layout=deb --install-data / -v -f
cd ..
##
##
##
#!/bin/sh
set -x
DIRNAME=`dirname $0`
# Gotta know the rules!
if [ $EUID -ne 0 ] ; then
echo "This script must be run as root" 1>&2
exit 1
fi
# Grab our libs
. "$DIRNAME/../../setup-lib.sh"
if [ "$HOSTNAME" != "$NETWORKMANAGER" ]; then
exit 0;
fi
if [ -f $SETTINGS ]; then
. $SETTINGS
fi
##
## First, we setup OVS stuff for Capnet physical networks.
##
$DIRNAME/setup-ovs-node.sh
##
## Second, apply our configuration
##
ML2TYPES=`crudini --get /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers`
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini \
ml2 type_drivers "capnet,$ML2TYPES"
ML2TENANTTYPES=`crudini --get /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types`
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini \
ml2 tenant_network_types "capnet,$ML2TENANTTYPES"
ML2MECHDRVS=`crudini --get /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers`
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini \
ml2 mechanism_drivers "capnet,$ML2MECHDRVS"
ML2EXT=`crudini --get /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers`
if [ ! -z "$ML2EXT" ] ; then
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini \
ml2 extension_drivers "capnet,$ML2EXT"
else
crudini --set /etc/neutron/plugins/ml2/ml2_conf.ini \
ml2 extension_drivers "capnet"
fi
crudini --set /etc/neutron/neutron.conf DEFAULT api_extensions_path \
/usr/lib/python2.7/dist-packages/networking_capnet/extensions
crudini --set /etc/neutron/dhcp_agent.ini DEFAULT interface_driver \
networking_capnet.agent.linux.interface.CapnetOVSInterfaceDriver
crudini --set /etc/neutron/l3_agent.ini DEFAULT interface_driver \
networking_capnet.agent.linux.interface.CapnetOVSInterfaceDriver
crudini --set /etc/neutron/metering_agent.ini DEFAULT interface_driver \
networking_capnet.agent.linux.interface.CapnetOVSInterfaceDriver
##
## Setup our physical Capnet connections
##
capnet_networks=""
bridge_mappings=""
for lan in $DATACAPNETLANS ; do
if [ -n "${capnet_networks}" ]; then
capnet_networks="${capnet_networks},"
fi
if [ -n "${bridge_mappings}" ]; then
bridge_mappings="${bridge_mappings},"
fi
. $OURDIR/info.${lan}
capnet_networks="${capnet_networks}${lan}"
bridge_mappings="${bridge_mappings}${lan}:${DATABRIDGE}"
done
crudini --set /etc/neutron/plugins/ml2/ml2_conf_capnet.ini \
capnet bridge_mappings "$bridge_mappings"
crudini --set /etc/neutron/plugins/ml2/ml2_conf_capnet.ini \
capnet capnet_networks "$capnet_networks"
crudini --set /etc/neutron/plugins/ml2/ml2_conf_capnet.ini \
capnet bridge_mappings "$bridge_mappings"
crudini --set /etc/neutron/plugins/ml2/ml2_conf_capnet.ini \
capnet capnet_networks "$capnet_networks"
crudini --set /etc/neutron/dhcp_agent.ini \
capnet bridge_mappings "$bridge_mappings"
crudini --set /etc/neutron/l3_agent.ini \
capnet bridge_mappings "$bridge_mappings"
crudini --set /etc/neutron/metering_agent.ini \
capnet bridge_mappings "$bridge_mappings"
##
## Ok, restart Neutron Capnet ML2 plugin
##
service_restart neutron-plugin-capnet-agent
service_enable neutron-plugin-capnet-agent
#!/bin/sh
##
## This script builds and installs the necessary deps required for the
## Capnet controller, as well as the Capnet Neutron Plugin, and
## configures it all based on the Cloudlab Openstack Profile
## (https://www.cloudlab.us/p/capnet/OpenStack-Capnet).
##
set -x
DIRNAME=`dirname $0`
# Gotta know the rules!
if [ $EUID -ne 0 ] ; then
echo "This script must be run as root" 1>&2
exit 1
fi
# Grab our libs
. "$DIRNAME/../../setup-lib.sh"
if [ "$HOSTNAME" != "$CONTROLLER" ]; then
exit 0;
fi
if [ -f $SETTINGS ]; then
. $SETTINGS
fi
#
# openstack CLI commands seem flakey sometimes on Kilo and Liberty.
# Don't know if it's WSGI, mysql dropping connections, an NTP
# thing... but until it gets solved more permanently, have to retry :(.
#
__openstack() {
__err=1
__debug=
__times=0
while [ $__times -lt 16 -a ! $__err -eq 0 ]; do
openstack $__debug "$@"
__err=$?
if [ $__err -eq 0 ]; then
break
fi
__debug=" --debug "
__times=`expr $__times + 1`
if [ $__times -gt 1 ]; then
echo "ERROR: openstack command failed: sleeping and trying again!"
sleep 8
fi
done
}
##
## First, we install Capnet at all of the nodes.
##
$DIRNAME/setup-capnet-install.sh
for node in $COMPUTENODES $NETWORKMANAGER ; do
fqdn=`getfqdn $node`
$SSH $fqdn $DIRNAME/setup-capnet-install.sh
done
##
## Setup the controller node:
##
$DIRNAME/setup-capnet-controller.sh
## Setup the networkmanager node:
fqdn=`getfqdn $NETWORKMANAGER`
ssh $fqdn $DIRNAME/setup-capnet-networkmanager.sh
for node in $COMPUTENODES ; do
fqdn=`getfqdn $node`
$SSH $fqdn $DIRNAME/setup-capnet-compute.sh
done
##
## Finally, we create initial networks!
##
. $OURDIR/admin-openrc.sh
#!/bin/sh
#
# This sets up openvswitch networks for Capnet networks.
# The networkmanager and compute nodes' physical interfaces
# have to get moved into br-capnet-lan-*. The controller is special; it doesn't
# get an openvswitch setup.
#
set -x
# Gotta know the rules!
if [ $EUID -ne 0 ] ; then
echo "This script must be run as root" 1>&2
exit 1
fi
# Grab our libs
. "`dirname $0`/../../setup-lib.sh"
#
# (Maybe) Setup the flat data networks
#
for lan in $DATACAPNETLANS ; do
# suck in the vars we'll use to configure this one
. $OURDIR/info.$lan
DATABRIDGE="br-$lan"
ovs-vsctl add-br ${DATABRIDGE}
ovs-vsctl add-port ${DATABRIDGE} ${DATADEV}
ifconfig ${DATADEV} 0 up
cat <<EOF >> /etc/network/interfaces
auto ${DATABRIDGE}
iface ${DATABRIDGE} inet static
address $DATAIP
netmask $DATANETMASK
auto ${DATADEV}
iface ${DATADEV} inet static
address 0.0.0.0
EOF
if [ -n "$DATAVLANDEV" ]; then
cat <<EOF >> /etc/network/interfaces
vlan-raw-device ${DATAVLANDEV}
EOF
fi
ifconfig ${DATABRIDGE} $DATAIP netmask $DATANETMASK up
# XXX!
#route add -net 10.0.0.0/8 dev ${DATA_NETWORK_BRIDGE}
done
#service_restart openvswitch-switch
exit 0
setup-capnet.sh
\ No newline at end of file
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from nova.i18n import _LE, _LI
from nova.virt.libvirt.driver import LibvirtDriver
LOG = logging.getLogger(__name__)
# We specify a default because there's no sense forcing the user to set
# yet another option, and the fact that they're using this driver means
# that of course they want our vif driver too ;).
capnet_libvirt_opts = [
cfg.StrOpt('vif_driver',
default=None,
help='Specify a libvirt VIF driver. Comment out'
' to revert to regular libvirt behavior.'),
]
cfg.CONF.register_opts(capnet_libvirt_opts, 'libvirt')
class CapnetLibvirtDriver(LibvirtDriver):
"""
The *sole* purpose of this class is to add a config option,
vif_driver, for libvirt, so it can choose our Capnet
"""
def __init__(self, virtapi, **kwargs):
super(CapnetLibvirtDriver, self).__init__(virtapi,**kwargs)
_vif_driver = cfg.CONF.libvirt.vif_driver
if not _vif_driver:
LOG.warn(_LE("Vif driver option required, but not specified;"
" falling back to default Libvirt vif driver"))
else:
LOG.info(_LI("Loading vif driver '%s'"),_vif_driver)
try:
self.vif_driver = importutils.import_object_ns('nova.virt',
_vif_driver)
except ImportError:
LOG.exception(_LE("Unable to load the libvirt vif_driver %s"),
_vif_driver)
sys.exit(1)
pass
pass
pass
from oslo_config import cfg
from oslo_log import log as logging
import nova.virt.libvirt.vif
#from neutron import context as n_context
#import networking_capnet.common.topics as cn_topics
#import networking_capnet.common.constants as cn_const
#import networking_capnet.common.interface
#from networking_capnet.common import capnetneutronclient as cneutronclient
#from networking_capnet.common.interface import CapnetInterfaceMixin
#from networking_capnet.api.rpc.capnet_rpc_plugin_api \
# import CapnetExtPluginApi
LOG = logging.getLogger(__name__)
# Pull in Neutron API client stuff, instead of using our client mixin stuff.
# Basically, the Nova Neutron API implementation pulls in the [neutron] cfg
# section, so we can't also pull it in here.
#from nova.network.neutronv2.api import get_client as get_neutron_client
# Pull in Neutron auth options from Nova config files
#cneutronclient.register_opts_for_nova_conf(cfg.CONF)
class CapnetLibvirtVIFDriver(nova.virt.libvirt.vif.LibvirtGenericVIFDriver): #,
# CapnetInterfaceMixin):
def __init__(self):
super(nova.virt.libvirt.vif.LibvirtGenericVIFDriver,self).__init__()
#super(CapnetInterfaceMixin,self).__init__()
#self.set_client(cneutronclient.get_client_for_nova_conf())
#self.neutron_context = n_context.get_admin_context_without_session()
#self.capnet_ext_plugin_rpc = CapnetExtPluginApi(
# cn_topics.PLUGIN_CAPNET,self.neutron_context,cfg.CONF.host)
pass
def get_config_capnet(self, instance, vif, image_meta, inst_type,
virt_type, host):
network_id = vif['network']['id']
#bridge = self._check_network_for_capnet(network_id)
LOG.info("vif: %s" % (str(vif)))
bridge = vif['network']['bridge']
if not bridge:
LOG.error("no bridge %s for network %s" % (bridge,network_id))
raise exception.NetworkMissingPhysicalNetwork(