Commit 9d0373fb authored by David Johnson's avatar David Johnson

Continuing conversion to role-based configuration.

parent 8239e064
#!/usr/bin/env python2
import sys
import lxml.etree
links = {}
nodes = {}
f = open(sys.argv[1],'r')
contents = f.read()
f.close()
root = lxml.etree.fromstring(contents)
mycluster = None
if len(sys.argv) > 2:
mycluster = sys.argv[2]
else:
sys.exit(1)
# Find all the link roles:
for elm in root.getchildren():
if not elm.tag.endswith("}link"):
continue
name = elm.get("client_id")
cluster = None
role = None
for elm2 in elm.getchildren():
if elm2.tag.endswith("}label") and elm2.get("name") == "cluster":
cluster = elm2.text
if elm2.tag.endswith("}label") and elm2.get("name") == "role":
role = elm2.text
if mycluster == cluster and role:
if not role in links:
links[role] = []
links[role].append(name)
# Find all the node roles:
for elm in root.getchildren():
if not elm.tag.endswith("}node"):
continue
name = elm.get("client_id")
cluster = None
role = None
for elm2 in elm.getchildren():
if elm2.tag.endswith("}label") and elm2.get("name") == "cluster":
cluster = elm2.text
if elm2.tag.endswith("}label") and elm2.get("name") == "role":
role = elm2.text
if mycluster == cluster and role:
if not role in nodes:
nodes[role] = []
nodes[role].append(name)
for role in nodes:
print "%s=\"%s\"" % (role.upper()," ".join(nodes[role]))
for role in links:
print "%s=\"%s\"" % (role.upper()," ".join(links[role]))
sys.exit(0)
#!/usr/bin/env python2
import sys
import lxml.etree
import socket
f = open(sys.argv[1],'r')
contents = f.read()
f.close()
root = lxml.etree.fromstring(contents)
hostname = socket.gethostname().lower()
# Find our node and dump any labels:
labels = {}
ours = False
for elm in root.getchildren():
if not elm.tag.endswith("}node"):
continue
labels = {}
for elm2 in elm.getchildren():
if elm2.tag.endswith("}host") and elm2.get("name").lower() == hostname:
ours = True
break
elif elm2.tag.endswith("}label"):
labels[elm2.get("name")] = elm2.text
if ours:
break
if ours:
for (k,v) in labels.iteritems():
print "%s=%s" % (k.upper(),v)
sys.exit(0)
else:
sys.exit(1)
#!/usr/bin/env python2
import sys
import lxml.etree
f = open(sys.argv[1],'r')
contents = f.read()
f.close()
root = lxml.etree.fromstring(contents)
def convert(p,v):
if v in [ "True","true" ]:
v = 1
elif v in [ "False","false" ]:
v = 0
return v
# Find our node and dump any labels:
for elm in root.getchildren():
if elm.tag.endswith("}label"):
print "%s=%s" % (elm.get("name").upper(),elm.text)
if elm.tag.endswith("}data_set"):
for elm2 in elm.getchildren():
if elm2.tag.endswith("}data_item"):
p = elm2.get("name")
print "%s=%s" % (p.split(".")[-1].upper(),
str(convert(elm2.text)))
if elm.tag.endswith("}data_item"):
p = elm.get("name")
print "%s=%s" % (p.split(".")[-1].upper(),str(convert(p,elm.text)))
sys.exit(0)
#!/usr/bin/env python2
import sys
import lxml.etree
f = open(sys.argv[1],'r')
contents = f.read()
f.close()
root = lxml.etree.fromstring(contents)
mycluster = None
if len(sys.argv) > 2:
mycluster = sys.argv[2]
# Find all the public IP addresses:
for elm in root.getchildren():
if not elm.tag.endswith("}routable_pool"):
continue
name = elm.get("client_id")
if mycluster and not name.endswith("-%s" % (mycluster,)):
continue
for elm2 in elm.getchildren():
if elm2.tag.endswith("}ipv4"):
print "%s/%s" % (elm2.get("address"),elm2.get("netmask"))
sys.exit(0)
#!/usr/bin/env python2
import sys
import lxml.etree
iface_link_map = {}
link_members = {}
node_ifaces = {}
link_netmasks = {}
allifaces = {}
f = open(sys.argv[1],'r')
contents = f.read()
f.close()
root = lxml.etree.fromstring(contents)
mycluster = None
if len(sys.argv) > 2:
mycluster = sys.argv[2]
# Find all the links:
for elm in root.getchildren():
if not elm.tag.endswith("}link"):
continue
name = elm.get("client_id")
ifacerefs = []
cluster = None
for elm2 in elm.getchildren():
if elm2.tag.endswith("}interface_ref"):
ifacename = elm2.get("client_id")
ifacerefs.append(ifacename)
if elm2.tag.endswith("}label") and elm2.get("name") == "cluster":
cluster = elm2.text
if not mycluster or not cluster or mycluster == cluster:
for ifacename in ifacerefs:
iface_link_map[ifacename] = name
link_members[name] = ifacerefs
# Find all the node interfaces
for elm in root.getchildren():
if not elm.tag.endswith("}node"):
continue
name = elm.get("client_id")
ifaces = {}
cluster = None
for elm2 in elm.getchildren():
if elm2.tag.endswith("}interface"):
ifacename = elm2.get("client_id")
for elm3 in elm2.getchildren():
if not elm3.tag.endswith("}ip"):
continue
if not elm3.get("type") == 'ipv4':
continue
addrtuple = (elm3.get("address"),elm3.get("netmask"))
ifaces[ifacename] = addrtuple
allifaces[ifacename] = addrtuple
break
if elm2.tag.endswith("}label") and elm2.get("name") == "cluster":
cluster = elm2.text
if not mycluster or not cluster or mycluster == cluster:
for (k,v) in ifaces.iteritems():
allifaces[k] = v
node_ifaces[name] = ifaces
# Dump the nodes a la topomap
print "# nodes: vname,links"
for n in node_ifaces.keys():
for (i,(addr,mask)) in node_ifaces[n].iteritems():
print "%s,%s:%s" % (n,iface_link_map[i],addr)
# Dump the links a la topomap -- but with fixed cost of 1
print "# lans: vname,mask,cost"
for m in link_members.keys():
ifref = link_members[m][0]
(ip,mask) = allifaces[ifref]
print "%s,%s,1" % (m,mask)
sys.exit(0)
......@@ -84,12 +84,16 @@ pc.defineParameter(
pc.defineParameter(
"clusterCount","Number of OpenStack clusters",
portal.ParameterType.INTEGER,1,advanced=True,
portal.ParameterType.INTEGER,2,advanced=True,
longDescription="You can create multiple OpenStack clusters within a single experiment by setting this parameter > 1. Note that any settings you have chosen will be applied to each cluster, other than those regarding remote blockstore mounting.")
pc.defineParameter(
"clusterChooseSite","Choose Site for each OpenStack cluster",
portal.ParameterType.BOOLEAN,False,advanced=True,
longDescription="If you want to choose which CloudLab site each OpenStack cluster will be instantiated at, select this parameter.")
pc.defineParameter(
"connectControllers","Connect Controllers in LAN",
portal.ParameterType.BOOLEAN,True,advanced=True,
longDescription="Connect controller nodes to a LAN, if more than one cluster.")
pc.defineParameter(
"ubuntuMirrorHost","Ubuntu Package Mirror Hostname",
......@@ -127,11 +131,6 @@ pc.defineParameter(
portal.ParameterType.BOOLEAN, False,
longDescription="Multiplex any flat networks (i.e., management and all of the flat data networks) over physical interfaces, using VLANs. These VLANs are invisible to OpenStack, unlike the NUmber of VLAN Data Networks option, where OpenStack assigns the real VLAN tags to create its networks. On CloudLab, many physical machines have only a single experiment network interface, so if you want multiple flat networks, you have to multiplex. Currently, if you select this option, you *must* specify 0 for VLAN Data Networks; we cannot support both simultaneously yet.",
advanced=True)
pc.defineParameter(
"connectControllers","Connect Controllers in LAN",
portal.ParameterType.BOOLEAN,True,
longDescription="Connect controller nodes to a LAN, if more than one cluster.",
advanced=True)
pc.defineParameter(
"swiftLVSize", "Swift Logical Volume Size",
......@@ -222,11 +221,11 @@ pc.defineParameter(
longDescription="Enable debug logging for OpenStack components.")
pc.defineParameter(
"controllerHost", "Base name of controller node(s)",
"controllerBaseName", "Base name of controller node(s)",
portal.ParameterType.STRING, "ctl", advanced=True,
longDescription="The short name of the controller node. You shold leave this alone unless you really want the hostname to change.")
pc.defineParameter(
"computeHostBaseName", "Base name of compute node(s)",
"computeBaseName", "Base name of compute node(s)",
portal.ParameterType.STRING, "cp", advanced=True,
longDescription="The base string of the short name of the compute nodes (node names will look like cp-1, cp-2, ... ). You should leave this alone unless you really want the hostname to change.")
pc.defineParameter(
......@@ -321,7 +320,7 @@ passwdHelp += " When logging in to the Dashboard, use the `admin` user; when lo
grafanaInstructions = ""
if params.release in [ "pike","queens","rocky" ]:
grafanaInstructions = "You can also login to [your experiment's Grafana WWW interface](http://{host-%s}:3000/dashboard/db/openstack-instance-statistics?orgId=1) and view OpenStack statistics once you've created some VMs." % (params.controllerHost)
grafanaInstructions = "You can also login to [your experiment's Grafana WWW interface](http://{host-%s}:3000/dashboard/db/openstack-instance-statistics?orgId=1) and view OpenStack statistics once you've created some VMs." % (params.controllerBaseName)
tourInstructions = \
"""
......@@ -345,7 +344,7 @@ The profile's setup scripts are automatically installed on each node in `/tmp/se
### Detailed Parameter Documentation
%s
""" % (params.controllerHost,grafanaInstructions,passwdHelp,params.controllerHost,detailedParamAutoDocs)
""" % (params.controllerBaseName,grafanaInstructions,passwdHelp,params.controllerBaseName,detailedParamAutoDocs)
#
# Setup the Tour info with the above description and instructions.
......@@ -449,6 +448,7 @@ if params.clusterCount > 1 and params.connectControllers:
'base' : '10.222','netmask' : '255.255.0.0','values' : [-1,-1,10,0] }
ipSubnetsUsed += 1
interconnect = RSpec.LAN(lanstr)
interconnect._ext_children.append(Label("role","interconnect")
if params.multiplexFlatLans:
interconnect.link_multiplexing = True
interconnect.best_effort = True
......@@ -471,6 +471,7 @@ for cc in range(1,params.clusterCount + 1):
for i in range(1,params.flatDataLanCount + 1):
datalan = RSpec.LAN(flatlanstrs[cc][i])
datalan._ext_children.append(Label("cluster","c%d" % (cc,)))
datalan._ext_children.append(Label("role","flatlan")
if params.osLinkSpeed > 0:
datalan.bandwidth = int(params.osLinkSpeed)
if params.multiplexFlatLans:
......@@ -482,6 +483,7 @@ for cc in range(1,params.clusterCount + 1):
for i in range(1,params.vlanDataLanCount + 1):
datalan = RSpec.LAN("vlan-lan-%d" % (i,))
datalan._ext_children.append(Label("cluster","c%d" % (cc,)))
datalan._ext_children.append(Label("role","vlan")
if params.osLinkSpeed > 0:
datalan.bandwidth = int(params.osLinkSpeed)
datalan.link_multiplexing = True
......@@ -492,6 +494,7 @@ for cc in range(1,params.clusterCount + 1):
mgmtlan = RSpec.LAN(mlanstrs[cc])
mgmtlan._ext_children.append(Label("cluster","c%d" % (cc,)))
mgmtlan._ext_children.append(Label("role","mgmtlan")
if params.multiplexFlatLans:
mgmtlan.link_multiplexing = True
mgmtlan.best_effort = True
......@@ -580,7 +583,7 @@ for cc in range(1,params.clusterCount + 1):
#
# Add the controller node.
#
name = params.controllerHost + "-c%d" % (cc,)
name = params.controllerBaseName + "-c%d" % (cc,)
controller = RSpec.RawPC(name)
controllers.append(controller)
controller._ext_children.append(Label("role","controller"))
......@@ -639,7 +642,7 @@ for cc in range(1,params.clusterCount + 1):
# Add the compute nodes.
#
for i in range(1,params.computeNodeCount + 1):
cpname = "%s-%d-c%d" % (params.computeHostBaseName,i,cc)
cpname = "%s-%d-c%d" % (params.computeBaseName,i,cc)
cpnode = RSpec.RawPC(cpname)
cpnode._ext_children.append(Label("role","compute"))
cpnode._ext_children.append(Label("cluster","c%d" % (cc,)))
......
......@@ -11,7 +11,8 @@ if [ $EUID -ne 0 ] ; then
fi
ALLNODESCRIPTS="setup-root-ssh.sh setup-disk-space.sh"
CTLNODESCRIPTS="setup-letsencrypt.sh setup-nginx.sh setup-openstack.sh"
COMPUTESCRIPTS=""
CTLSCRIPTS="setup-letsencrypt.sh setup-nginx.sh setup-openstack.sh"
export SRC=`dirname $0`
cd $SRC
......@@ -22,14 +23,21 @@ if [ -f $OURDIR/setup-driver-done ]; then
echo "setup-driver already ran; not running again"
exit 0
fi
for script in $ALLNODESCRIPTS ; do
cd $SRC
$SRC/$script | tee - $OURDIR/${script}.log 2>&1
done
cat $NODEID | grep "^${CONTROLLERBASE}-"
if [ $? -eq 0 ]; then
for script in $CTLNODESCRIPTS ; do
if [ $ROLE = "compute" ]; then
for script in $COMPUTESCRIPTS ; do
cd $SRC
$SRC/$script | tee - $OURDIR/${script}.log 2>&1
done
fi
if [ $ROLE = "controller" ]; then
for script in $CTLSCRIPTS ; do
cd $SRC
$SRC/$script | tee - $OURDIR/${script}.log 2>&1
done
......
This diff is collapsed.
#!/bin/sh
#
# For a neutron setup, we have to move the external interface into
# br-ex, and copy its config to br-ex; move the data lan (ethX) into br-int,
# and copy its config to br-int . For now, we assume the default route of
# the machine is associated with eth0/br-ex .
#
set -x
# Gotta know the rules!
if [ $EUID -ne 0 ] ; then
echo "This script must be run as root" 1>&2
exit 1
fi
DIRNAME=`dirname $0`
# Grab our libs
. "$DIRNAME/setup-lib.sh"
if [ "$HOSTNAME" != "$NETWORKMANAGER" ]; then
exit 0;
fi
logtstart "linuxbridge"
maybe_install_packages pssh
PSSH='/usr/bin/parallel-ssh -t 0 -O StrictHostKeyChecking=no '
PHOSTS=""
mkdir -p $OURDIR/pssh.setup-linuxbridge-node.stdout $OURDIR/pssh.setup-linuxbridge-node.stderr
# Do the network manager node first, no ssh
echo "*** Setting up LinuxBridge on $HOSTNAME"
$DIRNAME/setup-linuxbridge-node.sh
for node in $NODES
do
[ "$node" = "$NETWORKMANAGER" ] && continue
fqdn=`getfqdn $node`
PHOSTS="$PHOSTS -H $fqdn"
done
echo "*** Setting up LinuxBridge via pssh: $PHOSTS"
$PSSH -o $OURDIR/pssh.setup-linuxbridge-node.stdout -e $OURDIR/pssh.setup-linuxbridge-node.stderr \
$PHOSTS $DIRNAME/setup-linuxbridge-node.sh
logtend "linuxbridge"
exit 0
#!/bin/sh
#
# For a neutron setup, we have to move the external interface into
# br-ex, and copy its config to br-ex; move the data lan (ethX) into br-int,
# and copy its config to br-int . For now, we assume the default route of
# the machine is associated with eth0/br-ex .
#
set -x
# Gotta know the rules!
if [ $EUID -ne 0 ] ; then
echo "This script must be run as root" 1>&2
exit 1
fi
DIRNAME=`dirname $0`
# Grab our libs
. "$DIRNAME/setup-lib.sh"
if [ "$HOSTNAME" != "$NETWORKMANAGER" ]; then
exit 0;
fi
logtstart "ovs"
maybe_install_packages pssh
PSSH='/usr/bin/parallel-ssh -t 0 -O StrictHostKeyChecking=no '
PHOSTS=""
mkdir -p $OURDIR/pssh.setup-ovs-node.stdout $OURDIR/pssh.setup-ovs-node.stderr
# Do the network manager node first, no ssh
echo "*** Setting up OpenVSwitch on $HOSTNAME"
$DIRNAME/setup-ovs-node.sh
for node in $NODES
do
[ "$node" = "$NETWORKMANAGER" ] && continue
fqdn=`getfqdn $node`
PHOSTS="$PHOSTS -H $fqdn"
done
echo "*** Setting up OpenVSwitch via pssh: $PHOSTS"
$PSSH -o $OURDIR/pssh.setup-ovs-node.stdout -e $OURDIR/pssh.setup-ovs-node.stderr \
$PHOSTS $DIRNAME/setup-ovs-node.sh
logtend "ovs"
exit 0
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment