Commit 10e12b53 authored by Leigh Stoller's avatar Leigh Stoller

Support for push webhooks for repo-based profiles:

We are running another apache server on boss, on port 51369, which
invokes a backend perl script that maps the URL path argument to the
profile, and then calls out to manage_profile to pull from the
repository and update the profile to reflect the new HEAD branch.
Using mod_rewrite in the apache config to restrict URLs to exactly
the one URL that is accepted, modulo the value of the secret token.

I had to refactor a bunch of code in manage_profile to make it easier to
add a new entrypoint for modification from a git repo. This needed to be
done for a long time, I had never cleaned up the original profile
creation code.

On the edit profile web page, there is a new row in the Repository panel
providing the Push URL, and an explanatory help modal.

There is a new slow polling timer that looks for a change to the repo
hash and causes the web page to update in place from the repo, as when a
push hook is invoked and changes the repo.
parent f9ce0452
......@@ -291,6 +291,26 @@ sub Lookup($$;$$)
}
return undef;
}
#
# Lookup by repo key.
#
sub LookupByRepoKey($$)
{
my ($class, $token) = @_;
return undef
if ($token !~ /^\w+$/);
my $query_result =
DBQueryWarn("select uuid from apt_profile_versions ".
"where repokey='$token'");
return undef
if (!defined($query_result) || !$query_result->numrows);
my ($uuid) = $query_result->fetchrow_array();
return Lookup($class, $uuid);
}
AUTOLOAD {
my $self = $_[0];
......@@ -423,6 +443,7 @@ sub Create($$$$$$)
#
$cquery .= "name=$name,profileid='$profileid'";
$cquery .= ",pid='$pid',pid_idx='$pid_idx'";
$cquery .= ",gid='$gid',gid_idx='$gid_idx'";
# And the versions table.
$vquery = $cquery;
......@@ -445,6 +466,7 @@ sub Create($$$$$$)
$vquery .= ",repourl=" . DBQuoteSpecial($argref->{'repourl'});
$vquery .= ",reponame=" . DBQuoteSpecial($argref->{'reponame'});
$vquery .= ",repohash=" . DBQuoteSpecial($argref->{'repohash'});
$vquery .= ",repokey=" . DBQuoteSpecial($argref->{'repokey'});
}
# Back to the main table.
......@@ -512,11 +534,13 @@ sub NewVersion($$)
goto bad
if (! DBQueryWarn("insert into apt_profile_versions ".
" (name,profileid,version,pid,pid_idx, ".
" gid,gid_idx, ".
" creator,creator_idx,updater,updater_idx, ".
" created,uuid, ".
" parent_profileid,parent_version,rspec, ".
" script,paramdefs,reponame,repourl) ".
"select name,profileid,'$newvers',pid,pid_idx, ".
" gid,gid_idx, ".
" creator,creator_idx,'$uid','$uid_idx',".
" now(),uuid(),'$profileid', ".
" '$version',rspec,script,paramdefs, ".
......
......@@ -42,6 +42,7 @@ WEB_BIN_SCRIPTS = webmanage_profile webmanage_instance webmanage_dataset \
webcreate_instance webrungenilib webns2rspec webns2genilib \
webrspec2genilib webmanage_reservations webmanage_gitrepo \
webmanage_images
APACHEHOOKS = apt_gitrepo.hook
WEB_SBIN_SCRIPTS= webportal_xmlrpc
LIBEXEC_SCRIPTS = $(WEB_BIN_SCRIPTS) $(WEB_SBIN_SCRIPTS)
USERLIBEXEC = rungenilib.proxy genilib-jail genilib-iocage gitrepo.proxy
......@@ -49,14 +50,14 @@ USERLIBEXEC = rungenilib.proxy genilib-jail genilib-iocage gitrepo.proxy
# These scripts installed setuid, with sudo.
SETUID_BIN_SCRIPTS = rungenilib manage_gitrepo
SETUID_SBIN_SCRIPTS =
SETUID_SUEXEC_SCRIPTS=
SETUID_SUEXEC_SCRIPTS= apt_gitrepo.hook
#
# Force dependencies on the scripts so that they will be rerun through
# configure if the .in file is changed.
#
all: $(BIN_SCRIPTS) $(SBIN_SCRIPTS) $(LIBEXEC_SCRIPTS) $(SUBDIRS) \
$(LIB_SCRIPTS) $(USERLIBEXEC) all-subdirs
$(LIB_SCRIPTS) $(USERLIBEXEC) $(APACHEHOOKS) all-subdirs
subboss:
......@@ -66,7 +67,9 @@ install: $(addprefix $(INSTALL_BINDIR)/, $(BIN_SCRIPTS)) \
$(addprefix $(INSTALL_SBINDIR)/, $(SBIN_SCRIPTS)) \
$(addprefix $(INSTALL_LIBDIR)/, $(LIB_SCRIPTS)) \
$(addprefix $(INSTALL_LIBEXECDIR)/, $(LIBEXEC_SCRIPTS)) \
$(addprefix $(INSTALL_LIBEXECDIR)/, $(APACHEHOOKS)) \
$(addprefix $(INSTALL_DIR)/opsdir/libexec/, $(USERLIBEXEC)) \
$(addprefix $(INSTALL_DIR)/apt/, $(APACHEHOOKS)) \
$(INSTALL_ETCDIR)/cloudlab-fedonly.json \
$(INSTALL_ETCDIR)/cloudlab-nofed.json
......@@ -106,4 +109,10 @@ $(INSTALL_DIR)/opsdir/libexec/%: %
-mkdir -p $(INSTALL_DIR)/opsdir/libexec
$(INSTALL) $< $@
$(INSTALL_DIR)/apt/%: %
@echo "Installing $@"
-mkdir -p $(INSTALL_DIR)/apt
-rm -f $(INSTALL_DIR)/apt/$<
ln $(INSTALL_LIBEXECDIR)/$< $(INSTALL_DIR)/apt/$<
.PHONY: $(SUBDIRS) install
#!/usr/bin/perl -w
#
# Copyright (c) 2008-2017 University of Utah and the Flux Group.
#
# {{{GENIPUBLIC-LICENSE
#
# GENI Public License
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#
# }}}
#
use strict;
use English;
use Getopt::Std;
use Data::Dumper;
use JSON;
use CGI;
# Configure variables
my $TB = "@prefix@";
my $MAINSITE = @TBMAINSITE@;
my $TBOPS = "@TBOPSEMAIL@";
my $MANAGEPROFILE = "$TB/bin/manage_profile";
# Locals
my $debug = 0;
my $token;
#
# Turn off line buffering on output
#
$| = 1;
#
# Untaint the path
#
$ENV{'PATH'} = '/bin:/usr/bin:/usr/local/bin';
delete @ENV{'IFS', 'CDPATH', 'ENV', 'BASH_ENV'};
# Testbed libraries.
use lib '@prefix@/lib';
use emutil;
use libaudit;
use libtestbed;
use APT_Profile;
use User;
use Group;
#
# We want to make sure we send back a header.
#
sub SendStatus($)
{
my ($status) = @_;
if (1) {
# Error, we are done.
LogEnd($status);
}
else {
# We want to keep logging, not send an email here.
LogStop();
}
print "Content-Type: text/plain \n\n";
print "We love all profiles equally.\n";
print "Exited with $status\n";
exit(0);
}
#
# Send logs to tblogs
#
LogStart(0, undef, LIBAUDIT_LOGTBLOGS());
# The query holds the token we need to find the profile.
my $query = new CGI();
if ($debug > 1) {
my %headers = map { $_ => $query->http($_) } $query->http();
print STDERR Dumper(\%headers);
print STDERR Dumper($query);
}
#
# The profile is provided in the path.
#
if (!exists($ENV{'PATH_INFO'}) || $ENV{'PATH_INFO'} eq "") {
print STDERR "No path info\n";
SendStatus(1);
}
my $pathinfo = $ENV{'PATH_INFO'};
if ($pathinfo =~ /^\/([-\w]+)$/) {
$token = $1;
}
else {
print STDERR "Bad path info\n";
SendStatus(1);
}
if (!defined($token)) {
print STDERR "No token provided\n";
SendStatus(1);
}
if ($token !~ /^[-\w]+$/ || length($token) > 64) {
print STDERR "Bad token format\n";
SendStatus(1);
}
#
# Before calling out, find the profile.
#
my $profile = APT_Profile->LookupByRepoKey($token);
if (!defined($profile)) {
print STDERR "No profile for token $token\n";
SendStatus(1);
}
if (!defined($profile->repourl())) {
print STDERR "Not a repo based profile for token $token\n";
SendStatus(1);
}
#
# Let the parent return, no need to keep the client waiting since it
# don't care anyway.
#
my $mypid = fork();
if ($mypid) {
#
# Must end with this for the client. Does not return;
#
SendStatus(0);
}
sleep(1);
libaudit::AuditFork();
if ($debug) {
print "$profile\n";
}
#
# We are going to do the update as the profile creator.
#
my $creator = User->Lookup($profile->creator_idx());
if (!defined($creator)) {
print STDERR "Cannot lookup creator for $profile\n";
exit(1);
}
my $group = Group->Lookup($profile->gid_idx());
if (!defined($group)) {
print STDERR "Cannot lookup group for $profile\n";
exit(1);
}
if ($creator->FlipTo($group)) {
print STDERR "Could not flip to $creator\n";
exit(1);
}
my $output =
emutil::ExecQuiet("$MANAGEPROFILE updatefromrepo " . $profile->uuid());
if ($?) {
print STDERR $output;
exit(1);
}
if ($debug > 1) {
print $output;
}
LogEnd(0);
exit(0);
......@@ -39,30 +39,25 @@ sub usage()
{
print("Usage: manage_profile create [-s uuid | -c uuid] <xmlfile>\n");
print("Usage: manage_profile update <profile> <xmlfile>\n");
print("Usage: manage_profile updatefromrepo <profile>\n");
print("Usage: manage_profile publish <profile>\n");
print("Usage: manage_profile delete -a <profile>\n");
print("Usage: manage_profile undelete pid,name:version\n");
print("Usage: manage_profile listimages <profile>\n");
exit(-1);
}
my $optlist = "ds:t:c:m";
my $optlist = "dvt:m";
my $debug = 0;
my $update = 0;
my $snap = 0;
my $copy = 0;
my $fromrepo = 0;
my $updatemaster= 0;
my $copyuuid;
my $uuid;
my $rspec;
my $script;
my $profile;
my $parent_profile;
my $instance;
my $aggregate;
my $node_id;
my $verbose = 1;
my $webtask;
my $webtask_id;
# VerifyXML sets these, need to declare early. Need to clean this up.
my %new_args = ();
my %update_args = ();
my %modifiers = ();
my $rspec;
my $script;
my $project;
#
# Configure variables
......@@ -107,20 +102,20 @@ use EmulabFeatures;
# Protos
sub fatal($);
sub UserError(;$);
sub UserError($);
sub CreateProfile();
sub ModifyProfile();
sub UpdateProfileFromRepo();
sub DeleteProfile();
sub UnDeleteProfile($);
sub CanDelete($$);
sub PublishProfile($);
sub InsertImageRecords($);
sub ListImages();
# Parse args below.
if (@ARGV < 2) {
usage();
}
my $action = shift(@ARGV);
sub HandleScript($);
sub VerifyXML($$);
sub ModifyProfileInternal($$$);
# The web interface (and in the future the xmlrpc interface) sets this.
my $this_user = User->ImpliedUser();
if (! defined($this_user)) {
......@@ -141,13 +136,8 @@ if (! getopts($optlist, \%options)) {
if (defined($options{"d"})) {
$debug = 1;
}
if (defined($options{"s"})) {
$snap = 1;
$copyuuid = $options{"s"};
}
if (defined($options{"c"})) {
$copy = 1;
$copyuuid = $options{"c"};
if (defined($options{"v"})) {
$verbose = 1;
}
if (defined($options{"t"})) {
$webtask_id = $options{"t"};
......@@ -160,31 +150,9 @@ if (defined($options{"t"})) {
}
$webtask->AutoStore(1);
}
if ($action eq "update") {
usage()
if (!@ARGV);
$update = 1;
$uuid = shift(@ARGV);
}
elsif ($action eq "delete") {
exit(DeleteProfile());
}
elsif ($action eq "undelete") {
exit(UnDeleteProfile(shift(@ARGV)));
}
elsif ($action eq "publish") {
exit(PublishProfile(shift(@ARGV)));
}
elsif ($action eq "insertimages") {
exit(InsertImageRecords(shift(@ARGV)));
}
elsif ($action eq "listimages") {
exit(ListImages());
}
elsif ($action ne "create") {
usage();
}
my $xmlfile = shift(@ARGV);
usage()
if (!@ARGV);
my $action = shift(@ARGV);
#
# These are the fields that we allow to come in from the XMLfile.
......@@ -221,243 +189,438 @@ my %xmlfields =
"repourl" => ["repourl", $SLOT_OPTIONAL],
);
#
# Must wrap the parser in eval since it exits on error.
#
my $xmlparse = eval { XMLin($xmlfile,
VarAttr => 'name',
ContentKey => '-content',
SuppressEmpty => undef); };
fatal($@)
if ($@);
#
# Process and dump the errors (formatted for the web interface).
# We should probably XML format the errors instead but not sure I want
# to go there yet.
#
my %errors = ();
#
# Make sure all the required arguments were provided.
#
my $key;
foreach $key (keys(%xmlfields)) {
my (undef, $required, undef) = @{$xmlfields{$key}};
$errors{$key} = "Required value not provided"
if ($required & $SLOT_REQUIRED &&
! exists($xmlparse->{'attribute'}->{"$key"}));
if ($action eq "update") {
exit(ModifyProfile());
}
elsif ($action eq "updatefromrepo") {
exit(UpdateProfileFromRepo());
}
elsif ($action eq "delete") {
exit(DeleteProfile());
}
elsif ($action eq "undelete") {
exit(UnDeleteProfile(shift(@ARGV)));
}
elsif ($action eq "publish") {
exit(PublishProfile(shift(@ARGV)));
}
elsif ($action eq "insertimages") {
exit(InsertImageRecords(shift(@ARGV)));
}
elsif ($action eq "listimages") {
exit(ListImages());
}
elsif ($action eq "create") {
exit(CreateProfile());
}
UserError()
if (keys(%errors));
#
# We build up an array of arguments to create.
# Create/Update a profile.
#
my %new_args = ();
my %update_args = ();
my %modifiers = ();
sub CreateProfile()
{
my $optlist = "s:c:";
my $snap = 0;
my $copy = 0;
my $copyuuid;
my $fromrepo = 0;
my $instance;
my $aggregate;
my $parent_profile;
my $node_id;
my $usererror;
my %errors = ();
my %options = ();
if (! getopts($optlist, \%options)) {
usage();
}
if (defined($options{"s"})) {
$snap = 1;
$copyuuid = $options{"s"};
}
if (defined($options{"c"})) {
$copy = 1;
$copyuuid = $options{"c"};
}
usage()
if (@ARGV != 1);
my $xmlfile = shift(@ARGV);
# This will exit if there are any errors.
VerifyXML($xmlfile, 0);
foreach $key (keys(%{ $xmlparse->{'attribute'} })) {
my $value = $xmlparse->{'attribute'}->{"$key"}->{'value'};
if (!defined($value)) { # Empty string comes from XML as an undef value.
$xmlparse->{'attribute'}->{"$key"}->{'value'} = $value = "";
#
# We need to make sure the project exists and is a valid project for
# the creator (current user).
#
$project = Project->Lookup($new_args{"pid"});
if (!defined($project)) {
UserError({"profile_pid" => "No such project exists"})
}
elsif (!$project->AccessCheck($this_user, TB_PROJECT_MAKEIMAGEID())) {
UserError({"profile_pid" => "Not enough permission in this project"});
}
# Check datasets.
if (defined($rspec)) {
my $errmsg = "Bad dataset";
if (APT_Profile::CheckDatasets($rspec, \$errmsg)) {
UserError($errmsg);
}
}
print STDERR "User attribute: '$key' -> '$value'\n"
if ($debug);
#
# Need to do initial clone.
#
if (exists($new_args{'repourl'})) {
my $repourl = $new_args{'repourl'};
my $reponame = NewUUID();
my $repohash;
my $field = $key;
if (!exists($xmlfields{$field})) {
next; # Skip it.
my $output =
emutil::ExecQuiet("$MANAGEGITREPO clone -n $reponame '$repourl'");
if ($?) {
UserError($output);
}
$new_args{'reponame'} = $reponame;
#
# Get the commit hash for the HEAD commit.
#
$output = emutil::ExecQuiet("$MANAGEGITREPO hash -n $reponame");
if ($?) {
UserError($output);
}
$repohash = $output;
chomp($repohash);
$new_args{'repohash'} = $repohash;
$fromrepo = 1;
#
# And an access key for the push webhook.
#
my $repokey = TBGenSecretKey();
if (!defined($repokey)) {
fatal("Could not generate a repo access key");
}
$new_args{'repokey'} = $repokey;
}
my ($dbslot, $required, $default) = @{$xmlfields{$field}};
# Script parameters
if (defined($script) && $script ne "") {
my $paramdefs = HandleScript($script);
if ($required & $SLOT_REQUIRED) {
# A slot that must be provided, so do not allow a null value.
if (!defined($value)) {
$errors{$key} = "Must provide a non-null value";
next;
$new_args{"paramdefs"} = $paramdefs if ($paramdefs ne "");
}
#
# Are we going to snapshot a node in an experiment? If so we
# sanity check to make sure there is just one node.
#
if ($snap) {
$instance = APT_Instance->Lookup($copyuuid);
if (!defined($instance)) {
fatal("Could not look up instance $copyuuid");
}
if ($instance->status() ne "ready") {
UserError("Instance must be in the ready state for cloning");
}
if ($instance->AggregateList() != 1) {
UserError("Must be only one aggregate to snapshot");
}
($aggregate) = $instance->AggregateList();
my $manifest = GeniXML::Parse($aggregate->manifest());
if (! defined($manifest)) {
fatal("Could not parse manifest");
}
my @nodes = GeniXML::FindNodes("n:node", $manifest)->get_nodelist();
if (@nodes != 1) {
UserError("Too many nodes (> 1) to snapshot");
}
my $sliver_urn = GeniXML::GetSliverId($nodes[0]);
my $manager_urn= GeniXML::GetManagerId($nodes[0]);
$node_id = GeniXML::GetVirtualId($nodes[0]);
if (! (defined($sliver_urn) &&
$manager_urn eq $aggregate->aggregate_urn())) {
UserError("$node_id is not at " . $aggregate->aggregate_urn());
}
$parent_profile = $instance->Profile();
}
if ($required & $SLOT_OPTIONAL) {
# Optional slot. If value is null skip it. Might not be the correct
# thing to do all the time?
if (!defined($value)) {
next
if (!defined($default));
$value = $default;
elsif ($copy) {
$parent_profile = APT_Profile->Lookup($copyuuid);
if (!defined($parent_profile)) {
fatal("Could not look up copy profile $copyuuid");
}
}
if ($required & $SLOT_ADMINONLY) {
# Admin implies optional, but thats probably not correct approach.
$errors{$key} = "Administrators only"
if (! $this_user->IsAdmin());
if (defined(APT_Profile->Lookup($new_args{"pid"}, $new_args{"name"}))) {
$errors{"profile_name"} = "Already in use";
UserError(\%errors);
}
if ($required & $SLOT_MODIFIER) {
$modifiers{$dbslot} = $value;
next;
my $profile = APT_Profile->Create($parent_profile, $project,
$this_user, \%new_args, \$usererror);
if (!defined($profile)) {
if (defined($usererror)) {
$errors{"profile_name"} = $usererror;
UserError(\%errors);
}
fatal("Could not create new profile");
}
# This is deprecated.
$profile->Publish();
#
# Now do the snapshot operation.
#
if (defined($instance)) {
my $apt_uuid = $instance->uuid();
my $imagename = $profile->name();
my $new_uuid = $profile->uuid();
# We want to use the webtask associated with the new profile.
my $pwebtask = $profile->webtask();
# But the image details are stored in the instance webtask.
my $iwebtask = $instance->webtask();
if ($profile->Lock()) {
$profile->Delete(1);
fatal("Could not lock new profile");
}
my $command = "$MANAGEINSTANCE -t " . $pwebtask->task_id() . " ".
"snapshot $apt_uuid -c $new_uuid -n $node_id -i $imagename";
if ($verbose) {
print "$command\n";
}
#
# This returns pretty fast, and then the imaging takes place in
# the background at the aggregate. The script keeps a process
# running in the background waiting for the sliver to unlock and
# the sliverstatus to indicate the node is running again.
#
my $output = emutil::ExecQuiet($command);
if ($?) {
my $stat = $? >> 8;
$profile->Delete(1);
print STDERR $output . "\n";
if ($stat < 0) {
fatal("Failed to create disk image!");
}
UserError($output);
}
print $output;
#
# The script helpfully put the new image urn in the webtask.
#
$pwebtask->AutoStore(1);
$pwebtask->Refresh();
$iwebtask->Refresh();
my $newimage;
if (GetSiteVar("protogeni/use_imagetracker") &&
EmulabFeatures->FeatureEnabled("APT_UseImageTracker",
$this_user, $project)) {
$newimage = $iwebtask->image_urn();
}
else {
$newimage = $iwebtask->image_url();
}
if (!defined($newimage)) {
$profile->Delete(1);
fatal("Did not get an image for $node_id");
}
if ($profile->UpdateDiskImage($node_id, $newimage, 0)) {
$profile->Delete(1);
fatal("Could not update image in rspec for $node_id; $newimage;");
}
# Tell web interface cloning has started.
$pwebtask->cloning(1);
# And what is being cloned.
$pwebtask->cloning_instance($instance->uuid());
$pwebtask->image_name($iwebtask->image_name());
#
# Exit and leave child to poll.
#
if (! $debug) {
my $child = fork();
if ($child) {
exit(0);
}
# Close our descriptors so web server thinks we are disconnected.
if ($webtask_id) {
for (my $i = 0; $i < 1024; $i++) {
POSIX::close($i);
}
}
# Let parent exit;
sleep(2);
POSIX::setsid();
}
#
# We are waiting for the backend process to exit. The web interface is
# reading the webtask structure, but if it fails we want to know that
# so we can delete the profile.
#
while (1) {
sleep(10);
# Now check that the value is legal.
if (! TBcheck_dbslot($value, "apt_profiles",