Commit 8167c83f authored by David Johnson's avatar David Johnson

First step in Mitaka support: support ctl-only openstacks (2 nodes).

The docs have been recommending a unified networkmanager-ctl physical
node now since Liberty, instead of the old 3-node approach.  Obviously
this has appeal for us, so get this done before doing any
Mitaka-specific stuff.

It really wasn't as hard as I thought... basically just worked with
these few changes.  We no longer assign an 'nm' name at all.

(Also, this commit has my dev tarball in osp.py instead of the real
thing, because there is no real thing yet.)
parent fb397d2e
......@@ -8,7 +8,7 @@ import crypt
import random
# Don't want this as a param yet
TBURL = "http://www.emulab.net/downloads/openstack-setup-v26.tar.gz"
TBURL = "http://www.emulab.net/downloads/openstack-setup-v33-johnsond.tar.gz"
TBCMD = "sudo mkdir -p /root/setup && sudo -H /tmp/setup/setup-driver.sh 2>&1 | sudo tee /root/setup/setup-driver.log"
#
......@@ -145,8 +145,8 @@ pc.defineParameter("controllerHost", "Name of controller node",
portal.ParameterType.STRING, "ctl", advanced=True,
longDescription="The short name of the controller node. You shold leave this alone unless you really want the hostname to change.")
pc.defineParameter("networkManagerHost", "Name of network manager node",
portal.ParameterType.STRING, "nm",advanced=True,
longDescription="The short name of the network manager (neutron) node. You shold leave this alone unless you really want the hostname to change.")
portal.ParameterType.STRING, "ctl",advanced=True,
longDescription="The short name of the network manager (neutron) node. If you specify the same name here as you did for the controller, then your controller and network manager will be unified into a single node. You shold leave this alone unless you really want the hostname to change.")
pc.defineParameter("computeHostBaseName", "Base name of compute node(s)",
portal.ParameterType.STRING, "cp", advanced=True,
longDescription="The base string of the short name of the compute nodes (node names will look like cp-1, cp-2, ... or cp-s2-1, cp-s2-2, ... (for nodes at Site 2, if you request those)). You shold leave this alone unless you really want the hostname to change.")
......@@ -223,6 +223,11 @@ params = pc.bindParameters()
### #
### pass
if params.controllerHost == params.networkManagerHost \
and params.release in [ 'juno','kilo' ]:
perr = portal.ParameterWarning("We do not support use of the same physical node as both controller and networkmanager for older Juno and Kilo releases of this profile. You can try it, but it may not work. To revert to the old behavior, open the Advanced Parameters and change the networkManagerHost parameter to nm .",['release','controllerHost','networkManagerHost'])
pc.reportWarning(perr)
pass
if params.computeNodeCount > 8:
perr = portal.ParameterWarning("Are you creating a real cloud? Otherwise, do you really need more than 8 compute nodes? Think of your fellow users scrambling to get nodes :).",['computeNodeCount'])
pc.reportWarning(perr)
......@@ -522,36 +527,40 @@ if mgmtlan:
controller.addService(RSpec.Install(url=TBURL, path="/tmp"))
controller.addService(RSpec.Execute(shell="sh",command=TBCMD))
#
# Add the network manager (neutron) node.
#
networkManager = RSpec.RawPC(params.networkManagerHost)
nodes[params.networkManagerHost] = networkManager
if params.osNodeType:
networkManager.hardware_type = params.osNodeType
pass
networkManager.Site("1")
networkManager.disk_image = "urn:publicid:IDN+utah.cloudlab.us+image+emulab-ops//%s-%s" % (image_os,image_tag_nm)
i = 0
for datalan in alllans:
iface = networkManager.addInterface("if%d" % (i,))
datalan.addInterface(iface)
if generateIPs:
iface.addAddress(RSpec.IPv4Address(get_next_ipaddr(datalan.client_id),
get_netmask(datalan.client_id)))
if params.controllerHost != params.networkManagerHost:
#
# Add the network manager (neutron) node.
#
networkManager = RSpec.RawPC(params.networkManagerHost)
nodes[params.networkManagerHost] = networkManager
if params.osNodeType:
networkManager.hardware_type = params.osNodeType
pass
i += 1
pass
if mgmtlan:
iface = networkManager.addInterface("ifM")
mgmtlan.addInterface(iface)
if generateIPs:
iface.addAddress(RSpec.IPv4Address(get_next_ipaddr(mgmtlan.client_id),
get_netmask(mgmtlan.client_id)))
networkManager.Site("1")
networkManager.disk_image = "urn:publicid:IDN+utah.cloudlab.us+image+emulab-ops//%s-%s" % (image_os,image_tag_nm)
i = 0
for datalan in alllans:
iface = networkManager.addInterface("if%d" % (i,))
datalan.addInterface(iface)
if generateIPs:
iface.addAddress(
RSpec.IPv4Address(get_next_ipaddr(datalan.client_id),
get_netmask(datalan.client_id)))
pass
i += 1
pass
if mgmtlan:
iface = networkManager.addInterface("ifM")
mgmtlan.addInterface(iface)
if generateIPs:
iface.addAddress(
RSpec.IPv4Address(get_next_ipaddr(mgmtlan.client_id),
get_netmask(mgmtlan.client_id)))
pass
pass
networkManager.addService(RSpec.Install(url=TBURL, path="/tmp"))
networkManager.addService(RSpec.Execute(shell="sh",command=TBCMD))
pass
networkManager.addService(RSpec.Install(url=TBURL, path="/tmp"))
networkManager.addService(RSpec.Execute(shell="sh",command=TBCMD))
#
# Add the compute nodes. First we generate names for each node at each site;
......
......@@ -1227,16 +1227,23 @@ fi
#
# Install the Network service on the networkmanager
#
if [ -z "${NEUTRON_NETWORKMANAGER_DONE}" -a ! "$CONTROLLER" = "$NETWORKMANAGER" ]; then
if [ -z "${NEUTRON_NETWORKMANAGER_DONE}" ]; then
NEUTRON_NETWORKMANAGER_DONE=1
fqdn=`getfqdn $NETWORKMANAGER`
if ! unified ; then
echo "*** Setting up separate networkmanager"
# Copy the latest settings (passwords, endpoints, whatever) over
scp -o StrictHostKeyChecking=no $SETTINGS $fqdn:$SETTINGS
fqdn=`getfqdn $NETWORKMANAGER`
ssh -o StrictHostKeyChecking=no $fqdn \
# Copy the latest settings (passwords, endpoints, whatever) over
scp -o StrictHostKeyChecking=no $SETTINGS $fqdn:$SETTINGS
ssh -o StrictHostKeyChecking=no $fqdn \
$DIRNAME/setup-networkmanager.sh
else
echo "*** Setting up unified networkmanager on controller"
$DIRNAME/setup-networkmanager.sh
fi
echo "NEUTRON_NETWORKMANAGER_DONE=\"${NEUTRON_NETWORKMANAGER_DONE}\"" >> $SETTINGS
fi
......@@ -2193,8 +2200,12 @@ EOF
service_enable ceilometer-alarm-notifier
# NB: restart the neutron ceilometer agent too
fqdn=`getfqdn $NETWORKMANAGER`
ssh -o StrictHostKeyChecking=no $fqdn service neutron-metering-agent restart
if ! unified ; then
fqdn=`getfqdn $NETWORKMANAGER`
ssh -o StrictHostKeyChecking=no $fqdn service neutron-metering-agent restart
else
service neutron-metering-agent restart
fi
echo "CEILOMETER_DBPASS=\"${CEILOMETER_DBPASS}\"" >> $SETTINGS
echo "CEILOMETER_PASS=\"${CEILOMETER_PASS}\"" >> $SETTINGS
......
......@@ -18,6 +18,76 @@ echo "*** Setting up root ssh pubkey access across all nodes..."
# All nodes need to publish public keys, and acquire others'
$DIRNAME/setup-root-ssh.sh 1> $OURDIR/setup-root-ssh.log 2>&1
if [ -f $SETTINGS ]; then
. $SETTINGS
fi
if [ "$HOSTNAME" = "$NETWORKMANAGER" ]; then
echo "*** Waiting for ssh access to all nodes..."
for node in $NODES ; do
[ "$node" = "$NETWORKMANAGER" ] && continue
SUCCESS=1
fqdn=`getfqdn $node`
while [ $SUCCESS -ne 0 ] ; do
sleep 1
ssh -o ConnectTimeout=1 -o PasswordAuthentication=No -o NumberOfPasswordPrompts=0 -o StrictHostKeyChecking=No $fqdn /bin/ls > /dev/null
SUCCESS=$?
done
echo "*** $node is up!"
done
#
# Get our hosts files setup to point to the new management network.
# (These were created one-time in setup-lib.sh)
#
cat $OURDIR/mgmt-hosts > /etc/hosts
echo "127.0.0.1 localhost" >> /etc/hosts
for node in $NODES
do
[ "$node" = "$NETWORKMANAGER" ] && continue
fqdn=`getfqdn $node`
$SSH $fqdn mkdir -p $OURDIR
scp -p -o StrictHostKeyChecking=no \
$SETTINGS $OURDIR/mgmt-hosts $OURDIR/mgmt-netmask \
$OURDIR/data-hosts $OURDIR/data-netmask \
$fqdn:$OURDIR
$SSH $fqdn cp $OURDIR/mgmt-hosts /etc/hosts
$SSH $fqdn 'echo 127.0.0.1 localhost | tee -a /etc/hosts'
done
echo "*** Setting up the Management Network"
if [ -z "${MGMTLAN}" ]; then
echo "*** Building a VPN-based Management Network"
$DIRNAME/setup-vpn.sh 1> $OURDIR/setup-vpn.log 2>&1
# Give the VPN a chance to settle down
PINGED=0
while [ $PINGED -eq 0 ]; do
sleep 2
ping -c 1 $CONTROLLER
if [ $? -eq 0 ]; then
PINGED=1
fi
done
else
echo "*** Using $MGMTLAN as the Management Network"
fi
echo "*** Moving Interfaces into OpenVSwitch Bridges"
$DIRNAME/setup-ovs.sh 1> $OURDIR/setup-ovs.log 2>&1
echo "*** Telling controller to set up OpenStack!"
ssh -o StrictHostKeyChecking=no ${CONTROLLER} "/bin/touch $OURDIR/networkmanager-driver-done"
fi
if [ "$HOSTNAME" = "$CONTROLLER" ]; then
#
# Wait for networkmanager setup to touch a special file indicating that
......@@ -38,71 +108,4 @@ elif [ "$HOSTNAME" != "$NETWORKMANAGER" ]; then
exit 0;
fi
if [ -f $SETTINGS ]; then
. $SETTINGS
fi
echo "*** Waiting for ssh access to all nodes..."
for node in $NODES ; do
[ "$node" = "$NETWORKMANAGER" ] && continue
SUCCESS=1
fqdn=`getfqdn $node`
while [ $SUCCESS -ne 0 ] ; do
sleep 1
ssh -o ConnectTimeout=1 -o PasswordAuthentication=No -o NumberOfPasswordPrompts=0 -o StrictHostKeyChecking=No $fqdn /bin/ls > /dev/null
SUCCESS=$?
done
echo "*** $node is up!"
done
#
# Get our hosts files setup to point to the new management network.
# (These were created one-time in setup-lib.sh)
#
cat $OURDIR/mgmt-hosts > /etc/hosts
echo "127.0.0.1 localhost" >> /etc/hosts
for node in $NODES
do
[ "$node" = "$NETWORKMANAGER" ] && continue
fqdn=`getfqdn $node`
$SSH $fqdn mkdir -p $OURDIR
scp -p -o StrictHostKeyChecking=no \
$SETTINGS $OURDIR/mgmt-hosts $OURDIR/mgmt-netmask \
$OURDIR/data-hosts $OURDIR/data-netmask \
$fqdn:$OURDIR
$SSH $fqdn cp $OURDIR/mgmt-hosts /etc/hosts
$SSH $fqdn 'echo 127.0.0.1 localhost | tee -a /etc/hosts'
done
echo "*** Setting up the Management Network"
if [ -z "${MGMTLAN}" ]; then
echo "*** Building a VPN-based Management Network"
$DIRNAME/setup-vpn.sh 1> $OURDIR/setup-vpn.log 2>&1
# Give the VPN a chance to settle down
PINGED=0
while [ $PINGED -eq 0 ]; do
sleep 2
ping -c 1 $CONTROLLER
if [ $? -eq 0 ]; then
PINGED=1
fi
done
else
echo "*** Using $MGMTLAN as the Management Network"
fi
echo "*** Moving Interfaces into OpenVSwitch Bridges"
$DIRNAME/setup-ovs.sh 1> $OURDIR/setup-ovs.log 2>&1
echo "*** Telling controller to set up OpenStack!"
ssh -o StrictHostKeyChecking=no ${CONTROLLER} "/bin/touch $OURDIR/networkmanager-driver-done"
exit 0
......@@ -467,6 +467,17 @@ elif [ $UPDATING -ne 0 ]; then
sed -i -e "s/^\(COMPUTENODES=\"[^\"]*\"\)\$/COMPUTENODES=\"$COMPUTENODES\"/" $SETTINGS
fi
#
# 0 (true) if networkmanager node is also the controller; 1 if not.
#
unified() {
if [ "$NETWORKMANAGER" = "$CONTROLLER" ]; then
return 0
else
return 1
fi
}
# Setup apt-get to not prompt us
export DEBIAN_FRONTEND=noninteractive
# -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold"
......@@ -577,7 +588,9 @@ if [ ! -f $OURDIR/mgmt-hosts -o $UPDATING -ne 0 ] ; then
if [ $UPDATING -eq 0 ]; then
echo "255.255.0.0" > $OURDIR/mgmt-netmask
echo "192.168.0.1 $NETWORKMANAGER" > $OURDIR/mgmt-hosts
echo "192.168.0.3 $CONTROLLER" >> $OURDIR/mgmt-hosts
if ! unified ; then
echo "192.168.0.3 $CONTROLLER" >> $OURDIR/mgmt-hosts
fi
o3=0
o4=5
else
......@@ -639,7 +652,9 @@ if [ ! -f $OURDIR/mgmt-hosts -o $UPDATING -ne 0 ] ; then
echo "$prefix.0.0/255.255.0.0" > $OURDIR/data-cidr.$lan
echo "$prefix.0.0" > $OURDIR/data-network.$lan
echo "$prefix.0.1 $NETWORKMANAGER" > $OURDIR/data-hosts.$lan
echo "$prefix.0.3 $CONTROLLER" >> $OURDIR/data-hosts.$lan
if ! unified ; then
echo "$prefix.0.3 $CONTROLLER" >> $OURDIR/data-hosts.$lan
fi
#
# Now set static IPs for the compute nodes.
......
......@@ -51,7 +51,7 @@ maybe_install_packages neutron-plugin-ml2 neutron-plugin-openvswitch-agent \
neutron-l3-agent neutron-dhcp-agent conntrack neutron-metering-agent
# If not a shared controller, don't want neutron connecting to nonexistent db
if [ ! "$CONTROLLER" = "$NETWORKMANAGER" ]; then
if ! unified ; then
crudini --del /etc/neutron/neutron.conf database connection
crudini --del /etc/neutron/neutron.conf keystone_authtoken auth_host
crudini --del /etc/neutron/neutron.conf keystone_authtoken auth_port
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment