Commit fdf65a39 authored by David Johnson's avatar David Johnson

Merge remote-tracking branch 'origin/rocky'

parents 00898fe3 f3921ff5
--- /usr/share/openstack-dashboard/openstack_dashboard/dashboards/project/static/dashboard/project/workflow/launch-instance/launch-instance-model.service.js~ 2017-08-30 05:08:44.000000000 -0600
+++ /usr/share/openstack-dashboard/openstack_dashboard/dashboards/project/static/dashboard/project/workflow/launch-instance/launch-instance-model.service.js 2017-11-30 12:29:31.341556532 -0700
@@ -200,7 +200,7 @@
// REQUIRED for JS logic (image | snapshot | volume | volume_snapshot)
source_type: null,
source: [],
- create_volume_default: true,
+ create_volume_default: false,
// REQUIRED for JS logic
vol_create: false,
// May be null
--- /usr/lib/python2.7/dist-packages/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py~ 2018-11-05 23:12:52.000000000 -0700
+++ /usr/lib/python2.7/dist-packages/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py 2019-05-06 13:37:54.939320705 -0600
@@ -127,30 +127,41 @@
if table_id is None:
table_id = ofp.OFPTT_ALL
+ cookie_masks = []
if cookie == ovs_lib.COOKIE_ANY:
cookie = 0
if cookie_mask != 0:
raise Exception("cookie=COOKIE_ANY but cookie_mask set to %s" %
cookie_mask)
+ cookies = set([f.cookie for f in self.dump_flows()]) - \
+ self.preserved_cookies
+ LOG.warning("Preserved cookies for %s: %s", self.br_name,
+ self.preserved_cookies)
+ for c in cookies:
+ cookie_masks.append((c,ovs_lib.UINT64_BITMASK))
elif cookie == COOKIE_DEFAULT:
- cookie = self._default_cookie
- cookie_mask = ovs_lib.UINT64_BITMASK
+ cookie_masks.append((self._default_cookie,ovs_lib.UINT64_BITMASK))
+ elif cookie in self.preserved_cookies:
+ return
- match = self._match(ofp, ofpp, match, **match_kwargs)
- if strict:
- cmd = ofp.OFPFC_DELETE_STRICT
- else:
- cmd = ofp.OFPFC_DELETE
- msg = ofpp.OFPFlowMod(dp,
- command=cmd,
- cookie=cookie,
- cookie_mask=cookie_mask,
- table_id=table_id,
- match=match,
- priority=priority,
- out_group=ofp.OFPG_ANY,
- out_port=ofp.OFPP_ANY)
- self._send_msg(msg, active_bundle=active_bundle)
+ for (_cookie,_mask) in cookie_masks:
+ match = self._match(ofp, ofpp, match, **match_kwargs)
+ if strict:
+ cmd = ofp.OFPFC_DELETE_STRICT
+ else:
+ cmd = ofp.OFPFC_DELETE
+ LOG.warning("Deleting flow with cookie 0x%(cookie)x",
+ {'cookie': _cookie})
+ msg = ofpp.OFPFlowMod(dp,
+ command=cmd,
+ cookie=_cookie,
+ cookie_mask=_mask,
+ table_id=table_id,
+ match=match,
+ priority=priority,
+ out_group=ofp.OFPG_ANY,
+ out_port=ofp.OFPP_ANY)
+ self._send_msg(msg)
def dump_flows(self, table_id=None):
(dp, ofp, ofpp) = self._get_dp()
--- /usr/lib/python2.7/dist-packages/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/br_cookie.py~ 2018-11-05 23:12:44.000000000 -0700
+++ /usr/lib/python2.7/dist-packages/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/br_cookie.py 2019-05-06 13:39:15.599229212 -0600
@@ -17,6 +17,8 @@
from neutron.agent.common import ovs_lib
+import os
+import os.path
class OVSBridgeCookieMixin(object):
'''Mixin to provide cookie retention functionality
@@ -26,6 +28,26 @@
def __init__(self, *args, **kwargs):
super(OVSBridgeCookieMixin, self).__init__(*args, **kwargs)
self._reserved_cookies = set()
+ self._preserved_cookies = set()
+
+ if not os.path.exists("/var/lib/neutron/ovs-default-flows.reserved_cookie"):
+ sc = self.request_cookie()
+ self._preserved_cookies.add(sc)
+ self._reserved_cookies.add(sc)
+ f = file("/var/lib/neutron/ovs-default-flows.reserved_cookie",'w')
+ f.write(str(sc))
+ f.close()
+ else:
+ f = file("/var/lib/neutron/ovs-default-flows.reserved_cookie",'r')
+ sc = int(f.read())
+ f.close()
+ self._preserved_cookies.add(sc)
+ self._reserved_cookies.add(sc)
+ pass
+
+ @property
+ def preserved_cookies(self):
+ return set(self._preserved_cookies)
@property
def reserved_cookies(self):
--- /usr/lib/python2.7/dist-packages/nova/virt/libvirt/driver.py.orig 2018-05-17 14:32:56.000000000 -0600
+++ /usr/lib/python2.7/dist-packages/nova/virt/libvirt/driver.py 2018-07-03 17:59:48.709498189 -0600
@@ -4602,7 +4602,8 @@
if (virt_type not in ("lxc", "uml", "parallels", "xen") or
(virt_type == "xen" and guest.os_type == fields.VMMode.HVM)):
- guest.features.append(vconfig.LibvirtConfigGuestFeatureACPI())
+ if caps.host.cpu.arch != 'aarch64' or guest.os_loader_type == "pflash":
+ guest.features.append(vconfig.LibvirtConfigGuestFeatureACPI())
guest.features.append(vconfig.LibvirtConfigGuestFeatureAPIC())
if (virt_type in ("qemu", "kvm") and
@@ -4820,7 +4821,7 @@
hw_firmware_type = image_meta.properties.get('hw_firmware_type')
if caps.host.cpu.arch == fields.Architecture.AARCH64:
if not hw_firmware_type:
- hw_firmware_type = fields.FirmwareType.UEFI
+ hw_firmware_type = fields.FirmwareType.BIOS
if hw_firmware_type == fields.FirmwareType.UEFI:
if self._has_uefi_support():
global uefi_logged
......@@ -35,8 +35,8 @@ pc = portal.Context()
# Define *many* parameters; see the help docs in geni-lib to learn how to modify.
#
pc.defineParameter("release","OpenStack Release",
portal.ParameterType.STRING,"queens",[("queens","Queens"),("pike","Pike"),("ocata","Ocata"),("newton","Newton"),("mitaka","Mitaka"),("liberty","Liberty (deprecated)"),("kilo","Kilo (deprecated)"),("juno","Juno (deprecated)")],
longDescription="We provide OpenStack Queens (Ubuntu 18.04), Pike, Ocata, Newton, Mitaka (Ubuntu 16.04); Liberty (Ubuntu 15.10); Kilo (Ubuntu 15.04); or Juno (Ubuntu 14.10). OpenStack is installed from packages available on these distributions.")
portal.ParameterType.STRING,"rocky",[("rocky","Rocky"),("queens","Queens"),("pike","Pike"),("ocata","Ocata"),("newton","Newton"),("mitaka","Mitaka"),("liberty","Liberty (deprecated)"),("kilo","Kilo (deprecated)"),("juno","Juno (deprecated)")],
longDescription="We provide OpenStack Rocky, Queens (Ubuntu 18.04), Pike, Ocata, Newton, Mitaka (Ubuntu 16.04); Liberty (Ubuntu 15.10); Kilo (Ubuntu 15.04); or Juno (Ubuntu 14.10). OpenStack is installed from packages available on these distributions.")
pc.defineParameter("computeNodeCount", "Number of compute nodes (at Site 1)",
portal.ParameterType.INTEGER, 1)
pc.defineParameter("osNodeType", "Hardware Type",
......@@ -448,7 +448,7 @@ for param in pc._parameterOrder:
pass
tourDescription = \
"This profile provides a highly-configurable OpenStack instance with a controller and one or more compute nodes (potentially at multiple Cloudlab sites) (and optionally a network manager node, in a split configuration). This profile runs x86, arm64, and POWER8 (Queens and up) nodes. It sets up OpenStack Queens (Ubuntu 18.04), Pike, Ocata, Newton, or Mitaka (Ubuntu 16.04) (Liberty on 15.10, Kilo on 15.04, and Juno on 14.10 are *deprecated*) according to your choice, and configures all OpenStack services, pulls in some VM disk images, and creates basic networks accessible via floating IPs. You'll be able to create instances and access them over the Internet in just a few minutes. When you click the Instantiate button, you'll be presented with a list of parameters that you can change to control what your OpenStack instance will look like; **carefully** read the parameter documentation on that page (or in the Instructions) to understand the various features available to you."
"This profile provides a highly-configurable OpenStack instance with a controller and one or more compute nodes (potentially at multiple Cloudlab sites) (and optionally a network manager node, in a split configuration). This profile runs x86, arm64, and POWER8 (Queens and up) nodes. It sets up OpenStack Rocky, Queens (Ubuntu 18.04), Pike, Ocata, Newton, or Mitaka (Ubuntu 16.04) (Liberty on 15.10, Kilo on 15.04, and Juno on 14.10 are *deprecated*) according to your choice, and configures all OpenStack services, pulls in some VM disk images, and creates basic networks accessible via floating IPs. You'll be able to create instances and access them over the Internet in just a few minutes. When you click the Instantiate button, you'll be presented with a list of parameters that you can change to control what your OpenStack instance will look like; **carefully** read the parameter documentation on that page (or in the Instructions) to understand the various features available to you."
###if not params.adminPass or len(params.adminPass) == 0:
passwdHelp = "Your OpenStack admin and instance VM password is randomly-generated by Cloudlab, and it is: `{password-adminPass}` ."
......@@ -457,15 +457,20 @@ passwdHelp = "Your OpenStack admin and instance VM password is randomly-generate
### pass
passwdHelp += " When logging in to the Dashboard, use the `admin` user; when logging into instance VMs, use the `ubuntu` user. If you have selected Mitaka or newer, use 'default' as the Domain at the login prompt."
grafanaInstructions = ""
if params.release in [ "pike","queens","rocky" ]:
grafanaInstructions = "You can also login to [your experiment's Grafana WWW interface](http://{host-%s}:3000/dashboard/db/openstack-instance-statistics?orgId=1) and view OpenStack statistics once you've created some VMs." % (params.controllerHost)
tourInstructions = \
"""
### Basic Instructions
Once your experiment nodes have booted, and this profile's configuration scripts have finished configuring OpenStack inside your experiment, you'll be able to visit [the OpenStack Dashboard WWW interface](http://{host-%s}/horizon/auth/login/?next=/horizon/project/instances/) (approx. 5-15 minutes). If you've selected the Pike release (or newer), you can also login to [your experiment's Grafana WWW interface](http://{host-%s}:3000/dashboard/db/openstack-instance-statistics?orgId=1) and view OpenStack instance VM statistics once you've created some VMs. %s
Once your experiment nodes have booted, and this profile's configuration scripts have finished configuring OpenStack inside your experiment, you'll be able to visit [the OpenStack Dashboard WWW interface](http://{host-%s}/horizon/auth/login/?next=/horizon/project/instances/) (approx. 5-15 minutes). %s %s
Please wait to login to the OpenStack dashboard until the setup scripts have completed (we've seen Dashboard issues with content not appearing if you login before configuration is complete). There are multiple ways to determine if the scripts have finished:
- First, you can watch the experiment status page: the overall State will say \"booted (startup services are still running)\" to indicate that the nodes have booted up, but the setup scripts are still running.
- Second, the Topology View will show you, for each node, the status of the startup command on each node (the startup command kicks off the setup scripts on each node). Once the startup command has finished on each node, the overall State field will change to \"ready\". If any of the startup scripts fail, you can mouse over the failed node in the topology viewer for the status code.
- Finally, the profile configuration scripts also send you two emails: once to notify you that controller setup has started, and a second to notify you that setup has completed. Once you receive the second email, you can login to the Openstack Dashboard and begin your work.
- Third, the profile configuration scripts also send you two emails: once to notify you that controller setup has started, and a second to notify you that setup has completed. Once you receive the second email, you can login to the Openstack Dashboard and begin your work.
- Finally, you can view [the profile setup script logfiles](http://{host-%s}:7999/) as the setup scripts run. Use the `admin` username and the random password above.
**NOTE:** If the web interface rejects your password or gives another error, the scripts might simply need more time to set up the backend. Wait a few minutes and try again. If you don't receive any email notifications, you can SSH to the 'ctl' node, become root, and check the primary setup script's logfile (/root/setup/setup-controller.log). If near the bottom there's a line that includes 'Your OpenStack instance has completed setup'), the scripts have finished, and it's safe to login to the Dashboard.
......@@ -478,7 +483,7 @@ The profile's setup scripts are automatically installed on each node in `/tmp/se
### Detailed Parameter Documentation
%s
""" % (params.controllerHost,params.controllerHost,passwdHelp,detailedParamAutoDocs)
""" % (params.controllerHost,grafanaInstructions,passwdHelp,params.controllerHost,detailedParamAutoDocs)
#
# Setup the Tour info with the above description and instructions.
......@@ -611,6 +616,8 @@ else:
#
# Construct the disk image URNs we're going to set the various nodes to load.
# NB: we stopped generating OSNM images at Rocky for x86/aarch64; and at
# Queens for ppc64le.
#
image_project = 'emulab-ops'
image_urn = 'emulab.net'
......@@ -639,6 +646,9 @@ elif params.release == 'pike':
elif params.release == 'queens':
image_os = 'UBUNTU18-64'
image_tag_rel = '-Q'
elif params.release == 'rocky':
image_os = 'UBUNTU18-64'
image_tag_rel = '-R'
else:
image_os = 'UBUNTU16-64'
params.fromScratch = True
......@@ -654,24 +664,32 @@ if params.fromScratch:
else:
image_tag_cn = '-OSCN'
image_tag_nm = '-OSNM'
if params.release in [ 'rocky' ]:
# See above comment; we stopped generating OSNM images at Rocky
# for x86/aarch64; and at Queens for ppc64le.
image_tag_nm = '-STD'
image_tag_cp = '-OSCP'
pass
#
# XXX: special handling for ppc64le at Clemson because of special disk
# image names, and because only >= Queens is available for them.
# image names for UBUNTU18-64-STD and UBUNTU18-*OSC*-Q, and because only
# >= Queens is available for them.
#
if params.osNodeType == 'ibm8335':
image_urn = 'clemson.cloudlab.us'
if params.fromScratch:
image_os = 'UBUNTU18-PPC64LE'
image_tag_cn = image_tag_nm = image_tag_cp = ''
else:
elif params.release == 'queens':
image_os = 'UBUNTU18-PPC'
# See above comment; we stopped generating OSNM images at Rocky
# for x86/aarch64; and at Queens for ppc64le.
image_tag_nm = ''
if params.release not in [ 'queens' ]:
if params.release not in [ 'queens','rocky' ]:
perr = portal.ParameterError(
"You can only run the Queens release (or greater) on `ibm8335` (POWER8) hardware!",
"You can only run the Queens release, or greater, on `ibm8335` (POWER8) hardware!",
['release','osNodeType'])
pc.reportError(perr)
pc.verifyParameters()
......
......@@ -263,8 +263,18 @@ if [ $OSVERSION -ge $OSLIBERTY ]; then
fi
cname=`getfqdn $CONTROLLER`
crudini --set /etc/nova/nova.conf $VNCSECTION vncserver_listen ${MGMTIP}
crudini --set /etc/nova/nova.conf $VNCSECTION vncserver_proxyclient_address ${MGMTIP}
if [ $OSVERSION -lt $OSQUEENS ]; then
crudini --set /etc/nova/nova.conf $VNCSECTION \
vncserver_listen ${MGMTIP}
crudini --set /etc/nova/nova.conf $VNCSECTION \
vncserver_proxyclient_address ${MGMTIP}
else
crudini --set /etc/nova/nova.conf $VNCSECTION \
server_listen ${MGMTIP}
crudini --set /etc/nova/nova.conf $VNCSECTION \
server_proxyclient_address ${MGMTIP}
fi
#
# https://bugs.launchpad.net/nova/+bug/1635131
#
......
This diff is collapsed.
......@@ -17,6 +17,48 @@ if [ -f $OURDIR/setup-driver-done ]; then
echo "setup-driver already ran; not running again"
exit 0
fi
#
# Setup nginx to show our setup/config directory.
#
if [ "$HOSTNAME" = "$CONTROLLER" ]; then
maybe_install_packages nginx
# Handle case where nginx won't start because the default site
# (which is enabled!) needs port 80, and apache might be listening
# there.
if [ ! $? -eq 0 ]; then
rm -f /etc/nginx/sites-available/default \
/etc/nginx/sites-enabled/default
maybe_install_packages nginx
fi
echo "$ADMIN_PASS" | htpasswd -n -i admin > /etc/nginx/htpasswd
chown www-data:root /etc/nginx/htpasswd
chmod 660 /etc/nginx/htpasswd
mkdir /var/www/profile-setup
chown www-data /var/www/profile-setup
mount -o bind,ro $OURDIR /var/www/profile-setup/
echo $OURDIR /var/www/profile-setup none defaults,bind 0 0 >> /etc/fstab
cat <<EOF >/etc/nginx/sites-available/profile-setup-logs
server {
include /etc/nginx/mime.types;
types { text/plain log; }
listen 7999 default_server;
listen [::]:7999 default_server;
root /var/www/profile-setup;
index index.html;
server_name _;
location / {
autoindex on;
auth_basic "profile-setup";
auth_basic_user_file /etc/nginx/htpasswd;
}
}
EOF
ln -s /etc/nginx/sites-available/profile-setup-logs \
/etc/nginx/sites-enabled/profile-setup-logs
service_enable nginx
service_restart nginx
fi
logtstart "driver"
# Copy our source code into $OURDIR for future use:
......
......@@ -29,14 +29,14 @@ fi
cd $IMAGEDIR
imgfile=`get_url "http://boss.utah.cloudlab.us/downloads/openstack/trusty-server-cloudimg-arm64-disk1.img https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-arm64-disk1.img"`
imgname=trusty-server
imgfile=`get_url "http://boss.utah.cloudlab.us/downloads/openstack/xenial-server-cloudimg-arm64-disk1.img https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-arm64-disk1.img"`
imgname=xenial-server
if [ ! $? -eq 0 ]; then
echo "ERROR: failed to download trusty-server-cloudimg-arm64-disk1.img from Cloudlab or Ubuntu!"
echo "ERROR: failed to download xenial-server-cloudimg-arm64-disk1.img from Cloudlab or Ubuntu!"
else
imgfile=`extract_image "$imgfile"`
if [ ! $? -eq 0 ]; then
echo "ERROR: failed to extract trusty-server-cloudimg-arm64-disk1.img"
echo "ERROR: failed to extract xenial-server-cloudimg-arm64-disk1.img"
else
(fixup_image "$imgfile" \
&& sched_image "$IMAGEDIR/$imgfile" "$imgname" ) \
......
......@@ -29,13 +29,13 @@ fi
cd $IMAGEDIR
echo "*** Configuring a trusty-server x86_64 image ..."
imgfile=trusty-server-cloudimg-amd64-disk1.img
imgname=trusty-server
echo "*** Configuring a xenial-server x86_64 image ..."
imgfile=xenial-server-cloudimg-amd64-disk1.img
imgname=xenial-server
#
# First try the local boss, then Apt, then just grab from Ubuntu.
#
imgfile=`get_url "http://boss.${OURDOMAIN}/downloads/openstack/$imgfile http://boss.apt.emulab.net/downloads/openstack/$imgfile https://cloud-images.ubuntu.com/trusty/current/$imgfile"`
imgfile=`get_url "http://boss.${OURDOMAIN}/downloads/openstack/$imgfile http://boss.apt.emulab.net/downloads/openstack/$imgfile https://cloud-images.ubuntu.com/xenial/current/$imgfile"`
if [ ! $? -eq 0 ]; then
echo "ERROR: failed to download $imgfile from Cloudlab or Ubuntu!"
else
......
......@@ -341,6 +341,7 @@ OSNEWTON=14
OSOCATA=15
OSPIKE=16
OSQUEENS=17
OSROCKY=18
. /etc/lsb-release
#
......@@ -356,6 +357,7 @@ if [ ! "x$OSRELEASE" = "x" ]; then
if [ $OSCODENAME = "ocata" ]; then OSVERSION=$OSOCATA ; fi
if [ $OSCODENAME = "pike" ]; then OSVERSION=$OSPIKE ; fi
if [ $OSCODENAME = "queens" ]; then OSVERSION=$OSQUEENS ; fi
if [ $OSCODENAME = "rocky" ]; then OSVERSION=$OSROCKY ; fi
#
# We only use cloudarchive for LTS images!
......
......@@ -40,8 +40,12 @@ EOF
sysctl -p
maybe_install_packages neutron-plugin-ml2 neutron-plugin-linuxbridge-agent \
conntrack
maybe_install_packages neutron-plugin-ml2 conntrack
if [ $OSVERSION -ge $OSROCKY ]; then
maybe_install_packages neutron-linuxbridge-agent
else
maybe_install_packages neutron-plugin-linuxbridge-agent
fi
# Only the controller node runs neutron-server and needs the DB.
if [ "$HOSTNAME" != "$CONTROLLER" ]; then
......
......@@ -40,8 +40,12 @@ EOF
sysctl -p
maybe_install_packages neutron-plugin-ml2 neutron-plugin-openvswitch-agent \
conntrack
maybe_install_packages neutron-plugin-ml2 conntrack
if [ $OSVERSION -ge $OSROCKY ]; then
maybe_install_packages neutron-openvswitch-agent
else
maybe_install_packages neutron-plugin-openvswitch-agent
fi
# Only the controller node runs neutron-server and needs the DB.
if [ "$HOSTNAME" != "$CONTROLLER" ]; then
......
......@@ -57,6 +57,13 @@ if [ $USE_NEUTRON_LBAAS -eq 1 -a $OSVERSION -ge $OSNEWTON ]; then
maybe_install_packages neutron-lbaasv2-agent
fi
if [ $OSVERSION -eq $OSROCKY ]; then
crudini --set /etc/neutron/neutron.conf oslo_concurrency \
lock_path /var/lib/neutron/lock
mkdir -p /var/lib/neutron/lock/
chown neutron:neutron /var/lib/neutron/lock
fi
# Configure the L3 agent.
crudini --set /etc/neutron/l3_agent.ini DEFAULT \
interface_driver $interface_driver
......@@ -78,8 +85,11 @@ crudini --set /etc/neutron/dhcp_agent.ini DEFAULT \
if [ "${ML2PLUGIN}" = "openvswitch" ]; then
crudini --set /etc/neutron/dhcp_agent.ini DEFAULT use_namespaces True
#crudini --set /etc/neutron/dhcp_agent.ini DEFAULT dhcp_delete_namespaces True
else
crudini --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata True
fi
# Enable this by default for >= Ocata; that's all I have tested.
if [ $OSVERSION -ge $OCATA ]; then
crudini --set /etc/neutron/dhcp_agent.ini DEFAULT \
enable_isolated_metadata True
fi
crudini --set /etc/neutron/dhcp_agent.ini DEFAULT verbose ${VERBOSE_LOGGING}
crudini --set /etc/neutron/dhcp_agent.ini DEFAULT debug ${DEBUG_LOGGING}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment