Commit 6f9f7aa7 authored by Keith Downie's avatar Keith Downie

Merge remote-tracking branch 'upstream/master'

parents d96a35b7 740744df
#
# Copyright (c) 2000-2016 University of Utah and the Flux Group.
# Copyright (c) 2000-2017 University of Utah and the Flux Group.
#
# {{{EMULAB-LICENSE
#
......@@ -292,39 +292,27 @@ mfsoscheck:
fi
mfs: mfsoscheck
@$(MAKE) -C os mfs
@$(MAKE) -C clientside mfs
mfs-nostatic: mfsoscheck
@NOSTATIC=1 $(MAKE) -C os mfs
@NOSTATIC=1 $(MAKE) -C clientside mfs
mfs-install: destdircheck mfs client-mkdirs
@$(MAKE) -C os mfs-install
@$(MAKE) -C clientside mfs-install
mfs-nostatic-install: destdircheck mfs-nostatic client-mkdirs
@$(MAKE) -C os mfs-install
@$(MAKE) -C clientside mfs-install
frisbee-mfs: mfsoscheck
@$(MAKE) -C cdrom/groklilo client
@$(MAKE) -C os frisbee-mfs
@$(MAKE) -C clientside frisbee-mfs
frisbee-mfs-nostatic: mfsoscheck
@NOSTATIC=1 $(MAKE) -C cdrom/groklilo client
@NOSTATIC=1 $(MAKE) -C os frisbee-mfs
@NOSTATIC=1 $(MAKE) -C clientside frisbee-mfs
frisbee-mfs-install: destdircheck frisbee-mfs
@CLIENT_BINDIR=/etc/testbed $(MAKE) -e -C cdrom/groklilo client-install
@$(MAKE) -C os frisbee-mfs-install
@$(MAKE) -C clientside frisbee-mfs-install
frisbee-mfs-nostatic-install: destdircheck frisbee-mfs-nostatic
@CLIENT_BINDIR=/etc/testbed $(MAKE) -e -C cdrom/groklilo client-install
@$(MAKE) -C os frisbee-mfs-install
@$(MAKE) -C clientside frisbee-mfs-install
newnode-mfs: mfsoscheck
......
#!/usr/bin/perl -wT
#
# Copyright (c) 2000-2016 University of Utah and the Flux Group.
# Copyright (c) 2000-2017 University of Utah and the Flux Group.
#
# {{{EMULAB-LICENSE
#
......@@ -43,7 +43,7 @@ use Getopt::Std;
sub usage()
{
print("Usage: tbacct [-f] [-b] [-u] [-v] ".
"<add|del|mod|passwd|wpasswd|email|freeze|thaw|verify|revoke|dots> ".
"<add|del|mod|passwd|wpasswd|email|freeze|thaw|verify|revoke|dots|deactivate|reactivate> ".
"<user> [args]\n");
exit(-1);
}
......
#!/usr/bin/perl -wT
#
# Copyright (c) 2007-2016 University of Utah and the Flux Group.
# Copyright (c) 2007-2017 University of Utah and the Flux Group.
#
# {{{EMULAB-LICENSE
#
......@@ -87,6 +87,27 @@ sub Lookup($$;$)
bless($self, $class);
#
# Grab the webtask. Backwards compat mode, see if there is one associated
# with the object, use that. Otherwise create a new one.
#
my $webtask;
if (defined($self->webtask_id())) {
$webtask = WebTask->Lookup($self->webtask_id());
}
if (!defined($webtask)) {
$webtask = WebTask->LookupByObject($self->uuid());
if (!defined($webtask)) {
$webtask = WebTask->Create();
return undef
if (!defined($webtask));
}
$self->Update({"webtask_id" => $webtask->task_id()}) == 0
or return undef;
}
$self->{'WEBTASK'} = $webtask;
return $self;
}
......@@ -134,6 +155,7 @@ AUTOLOAD {
carp("No such slot '$name' field in class $type");
return undef;
}
sub webtask($) { return $_[0]->{'WEBTASK'}; }
# Break circular reference someplace to avoid exit errors.
sub DESTROY {
......@@ -141,6 +163,7 @@ sub DESTROY {
$self->{'DATASET'} = undef;
$self->{'HASH'} = undef;
$self->{'WEBTASK'} = undef;
}
# Valid Blockstore backend.
......@@ -290,9 +313,9 @@ sub Delete($)
my $certificate = $self->GetCertificate();
$certificate->Delete()
if (defined($certificate));
DBQueryWarn("delete from web_tasks where object_uuid='$uuid'") or
return -1;
$self->webtask()->Delete()
if ($self->webtask());
DBQueryWarn("delete from apt_datasets where uuid='$uuid'") or
return -1;
......
#!/usr/bin/perl -wT
#
# Copyright (c) 2007-2016 University of Utah and the Flux Group.
# Copyright (c) 2007-2017 University of Utah and the Flux Group.
#
# {{{EMULAB-LICENSE
#
......@@ -149,6 +149,27 @@ sub Lookup($$)
APT_Instance::Aggregate->GenTemp($self)};
}
$self->{'AGGREGATES'} = $aggregates;
#
# Grab the webtask. Backwards compat mode, see if there is one associated
# with the object, use that. Otherwise create a new one.
#
my $webtask;
if (defined($self->webtask_id())) {
$webtask = WebTask->Lookup($self->webtask_id());
}
if (!defined($webtask)) {
$webtask = WebTask->LookupByObject($self->uuid());
if (!defined($webtask)) {
$webtask = WebTask->Create();
return undef
if (!defined($webtask));
}
$self->Update({"webtask_id" => $webtask->task_id()}) == 0
or return undef;
}
$self->{'WEBTASK'} = $webtask;
# Add to cache.
$instances{$self->uuid()} = $self;
......@@ -182,6 +203,7 @@ sub Brand($) { return $_[0]->{'BRAND'}; }
sub isAPT($) { return $_[0]->Brand()->isAPT() ? 1 : 0; }
sub isCloud($) { return $_[0]->Brand()->isCloud() ? 1 : 0; }
sub isPNet($) { return $_[0]->Brand()->isPNet() ? 1 : 0; }
sub webtask($) { return $_[0]->{'WEBTASK'}; }
sub AggregateList($) { return values(%{ $_[0]->{'AGGREGATES'} }); }
sub AggregateHash($) { return $_[0]->{'AGGREGATES'}; }
......@@ -193,6 +215,7 @@ sub DESTROY {
$self->{'BRAND'} = undef;
$self->{'AGGREGATES'} = undef;
$self->{'HASH'} = undef;
$self->{'WEBTASK'} = undef;
}
#
......@@ -369,10 +392,6 @@ sub Update($$)
return Refresh($self);
}
#
# NOTE: We should delete the webtask, but the web UI needs it to
# report status back to the user when an experiment is terminated.
#
sub Delete($)
{
my ($self) = @_;
......@@ -382,6 +401,7 @@ sub Delete($)
$agg->Delete() == 0
or return -1;
}
$self->webtask()->Delete();
DBQueryWarn("delete from apt_instance_extension_info where uuid='$uuid'") or
return -1;
DBQueryWarn("delete from apt_instances where uuid='$uuid'") or
......@@ -615,7 +635,8 @@ sub RecordHistory($$)
" created,now(),$expired, ".
" extension_count,extension_days, ".
" physnode_count,virtnode_count, ".
" servername,rspec,params,manifest ".
" servername,repourl,reponame,reporef,repohash, ".
" rspec,script,params,manifest ".
" from apt_instances where uuid='$uuid'")
or return -1;
......@@ -966,7 +987,7 @@ sub UpdateImageStatus($$)
if ($self->status() ne "imaging") {
goto done;
}
my $webtask = WebTask->LookupByObject($self->uuid());
my $webtask = $self->webtask();
if (!defined($webtask)) {
goto done;
}
......@@ -1266,7 +1287,7 @@ sub isAL2S($) { return $_[0]->{'ISAL2S'}; }
sub GenTemp($$)
{
my ($class, $instance) = @_;
my $webtask = WebTask->LookupByObject($instance->uuid());
my $webtask = $instance->webtask();
if (!defined($webtask)) {
$webtask = WebTask->Create($instance->uuid());
}
......@@ -1350,6 +1371,8 @@ sub Create($$$)
my $instance_uuid = $instance->uuid();
my $instance_name = $instance->name();
# XXX Anonymous is the wrong thing to do here, but we do not have
# a unique uuid to use.
my $webtask = WebTask->Create(undef);
return undef
if (!defined($webtask));
......@@ -1373,7 +1396,9 @@ sub Delete($)
my $uuid = $self->uuid();
my $urn = $self->aggregate_urn();
$self->webtask()->Delete();
$self->webtask()->Delete()
if ($self->webtask()->Delete());
return 0
if ($self->{'FAKE'});
......
#!/usr/bin/perl -wT
#
# Copyright (c) 2007-2016 University of Utah and the Flux Group.
# Copyright (c) 2007-2017 University of Utah and the Flux Group.
#
# {{{EMULAB-LICENSE
#
......@@ -45,6 +45,7 @@ use vars qw(@ISA @EXPORT $AUTOLOAD);
# Must come after package declaration!
use EmulabConstants;
use emutil;
use WebTask;
use emdb;
use APT_Dataset;
use GeniXML;
......@@ -53,7 +54,9 @@ use libtestbed;
use Lease;
use English;
use Data::Dumper;
use File::Basename;
use File::Temp qw(tempfile :mktemp tmpnam :POSIX);
use URI::URL;
use overload ('""' => 'Stringify');
# Configure variables
......@@ -84,8 +87,32 @@ sub BlessRow($$)
my $self = {};
$self->{'DBROW'} = $row;
bless($self, $class);
#
# Grab the webtask. Backwards compat mode, see if there is one associated
# with the object, use that. Otherwise create a new one.
#
my $webtask;
if (defined($self->webtask_id())) {
$webtask = WebTask->Lookup($self->webtask_id());
}
if (!defined($webtask)) {
$webtask = WebTask->LookupByObject($self->uuid());
if (!defined($webtask)) {
$webtask = WebTask->Create();
return undef
if (!defined($webtask));
}
my $profileid = $self->profileid();
my $webtask_id = $webtask->task_id();
DBQueryWarn("update apt_profiles set webtask_id='$webtask_id' ".
"where profileid='$profileid'")
or return undef;
}
$self->{'WEBTASK'} = $webtask;
return $self;
}
......@@ -252,9 +279,17 @@ AUTOLOAD {
sub DESTROY {
my $self = shift;
$self->{'DBROW'} = undef;
$self->{'WEBTASK'} = undef;
$self->{'DBROW'} = undef;
}
sub IsRepoBased($) {
my ($self) = @_;
return (defined($self->repourl()) ? 1 : 0);
}
sub webtask($) { return $_[0]->{'WEBTASK'}; }
#
# Refresh a class instance by reloading from the DB.
#
......@@ -294,27 +329,33 @@ sub Create($$$$$$)
my $gid_idx = $project->pid_idx();
my $uid = $creator->uid();
my $uid_idx = $creator->uid_idx();
my $puuid = NewUUID();
my $vuuid = NewUUID();
my $webtask = WebTask->Create();
return undef
if (!defined($webtask));
#
# The pid/imageid has to be unique, so lock the table for the check/insert.
#
DBQueryWarn("lock tables apt_profiles write, apt_profile_versions write, ".
" emulab_indicies write")
or return undef;
if (!DBQueryWarn("lock tables apt_profiles write, ".
" apt_profile_versions write, ".
" emulab_indicies write")) {
$webtask->Delete();
return undef;
}
my $query_result =
DBQueryWarn("select name from apt_profiles ".
"where pid_idx='$pid_idx' and name=$name");
if ($query_result->numrows) {
DBQueryWarn("unlock tables");
$webtask->Delete();
$$usrerr_ref = "Profile already exists in project!";
return undef;
}
my $profileid = TBGetUniqueIndex("next_profile", undef, 1);
my $puuid = NewUUID();
my $vuuid = NewUUID();
my $rspec = DBQuoteSpecial($argref->{'rspec'});
my $cquery = "";
my $vquery = "";
......@@ -342,9 +383,15 @@ sub Create($$$$$$)
$vquery .= ",paramdefs=" . DBQuoteSpecial($argref->{'paramdefs'});
}
}
if (exists($argref->{'repourl'}) && $argref->{'repourl'} ne "") {
$vquery .= ",repourl=" . DBQuoteSpecial($argref->{'repourl'});
$vquery .= ",reponame=" . DBQuoteSpecial($argref->{'reponame'});
$vquery .= ",repohash=" . DBQuoteSpecial($argref->{'repohash'});
}
# Back to the main table.
$cquery .= ",uuid='$puuid'";
$cquery .= ",webtask_id=" . DBQuoteSpecial($webtask->task_id());
$cquery .= ",public=1"
if (exists($argref->{'public'}) && $argref->{'public'});
$cquery .= ",listed=1"
......@@ -358,6 +405,7 @@ sub Create($$$$$$)
if (! DBQueryWarn("insert into apt_profiles set $cquery")) {
DBQueryWarn("unlock tables");
tberror("Error inserting new apt_profiles record!");
$webtask->Delete();
return undef;
}
# And the versions entry.
......@@ -365,6 +413,7 @@ sub Create($$$$$$)
DBQueryWarn("delete from apt_profiles where profileid='$profileid'");
DBQueryWarn("unlock tables");
tberror("Error inserting new apt_profile_versions record!");
$webtask->Delete();
return undef;
}
DBQueryWarn("unlock tables");
......@@ -408,11 +457,12 @@ sub NewVersion($$)
" creator,creator_idx,updater,updater_idx, ".
" created,uuid, ".
" parent_profileid,parent_version,rspec, ".
" script,paramdefs) ".
" script,paramdefs,reponame,repourl) ".
"select name,profileid,'$newvers',pid,pid_idx, ".
" creator,creator_idx,'$uid','$uid_idx',".
" now(),uuid(),'$profileid', ".
" '$version',rspec,script,paramdefs ".
" '$version',rspec,script,paramdefs, ".
" reponame,repourl ".
"from apt_profile_versions as v ".
"where v.profileid='$profileid' and ".
" v.version='$version'"));
......@@ -458,7 +508,10 @@ sub UpdateVersion($$)
my $version = $self->version();
my $query = "update apt_profile_versions set ".
join(",", map("$_=" . DBQuoteSpecial($argref->{$_}), keys(%{$argref})));
join(",", map("$_=" .
(defined($argref->{$_}) ?
DBQuoteSpecial($argref->{$_}) : "NULL"),
keys(%{$argref})));
$query .= " where profileid='$profileid' and version='$version'";
......@@ -546,11 +599,14 @@ sub Delete($$)
DBQueryWarn("update apt_profile_versions set deleted=now() ".
"where profileid='$profileid'")
or goto bad;
# Delete any leftover webtasks.
# Delete any leftover webtasks. These are old ones.
DBQueryWarn("delete web_tasks from apt_profile_versions ".
"left join web_tasks on ".
" web_tasks.object_uuid=apt_profile_versions.uuid ".
"where apt_profile_versions.profileid='$profileid'");
# Primary webtask.
$self->webtask()->Delete()
if ($self->webtask());
}
DBQueryWarn("delete from apt_profile_favorites ".
"where profileid='$profileid'")
......@@ -1121,6 +1177,76 @@ sub SetSites($$$$$$)
return 0;
}
#
# Set the repository for the rspec. This is a top level element. At
# some point we can think about per-node repos.
#
sub SetRepo($$$$$$)
{
my ($prspecstr, $repourl, $reporef, $repohash, $geniuser, $perrmsg) = @_;
my $rspec = GeniXML::Parse($$prspecstr);
if (! defined($rspec)) {
$$perrmsg = "Could not parse rspec\n";
return -1;
}
my $repo = GeniXML::FindNodesNS("n:repository", $rspec,
$GeniXML::EMULAB_NS)->pop();
if (! defined($repo)) {
$repo = GeniXML::AddElement("repository", $rspec, $GeniXML::EMULAB_NS);
GeniXML::SetText("type", $repo, "git");
GeniXML::SetText("url", $repo, $repourl);
GeniXML::SetText("refspec", $repo, $reporef);
GeniXML::SetText("commit", $repo, $repohash);
}
#
# Temporary, lets just throw an execute service onto each node.
# We put it before any existing execute services.
#
my $owner = $geniuser->uid();
my $command = "cd /local && sudo chmod 777 . && umask 002 && ".
"git clone -n '$repourl' repository && " .
"cd repository && " .
"git config --add core.sharedRepository group && ".
"git checkout $repohash ";
if ($reporef =~ m"^(refs/|)heads/(.+)") {
my $branchname = $2;
$command .= "&& git branch -ft $branchname origin/$branchname" .
"&& git checkout -B $branchname HEAD ";
}
$command .= "&& sudo chown -R $owner . ";
#
# Generate an SSH url and make that the push url so users can easily
# push up (assuming they have the proper credentials loaded).
#
my $url = new URI::URL $repourl;
my $host = $url->host();
my ($org,$project) = ($url->path() =~ /^\/([^\/]+)(\/.*)$/);
if (defined($org) && defined($project)) {
my $sshurl = "git\@${host}:${org}${project}";
$command .= "&& git remote set-url --push origin '$sshurl'";
}
foreach my $node (GeniXML::FindNodes("n:node", $rspec)->get_nodelist()) {
my $service;
my $services = FindNodes("n:services", $node)->shift();
if (!defined($services)) {
$services = GeniXML::AddElement("services", $node);
$service = GeniXML::AddElement("execute", $services);
}
else {
$service = XML::LibXML::Element->new("execute");
$services->insertBefore($service,
FindNodes("n:execute", $services)->shift());
}
GeniXML::SetText("shell", $service, "/bin/sh");
GeniXML::SetText("command", $service, $command);
}
$$prspecstr = GeniXML::Serialize($rspec);
return 0;
}
#
# Encrypt blocks.
#
......
......@@ -32,20 +32,20 @@ SUBDIRS =
BIN_SCRIPTS = manage_profile manage_instance manage_dataset \
create_instance rungenilib ns2rspec nsgenilib.py \
rspec2genilib ns2genilib manage_reservations
rspec2genilib ns2genilib manage_reservations manage_gitrepo
SBIN_SCRIPTS = apt_daemon aptevent_daemon portal_xmlrpc apt_checkup \
portal_monitor
LIB_SCRIPTS = APT_Profile.pm APT_Instance.pm APT_Dataset.pm APT_Geni.pm \
APT_Aggregate.pm APT_Utility.pm
WEB_BIN_SCRIPTS = webmanage_profile webmanage_instance webmanage_dataset \
webcreate_instance webrungenilib webns2rspec webns2genilib \
webrspec2genilib webmanage_reservations
WEB_SBIN_SCRIPTS= webportal_xmlrpc
webrspec2genilib webmanage_reservations webmanage_gitrepo
WEB_SBIN_SCRIPTS= webportal_xmlrpc
LIBEXEC_SCRIPTS = $(WEB_BIN_SCRIPTS) $(WEB_SBIN_SCRIPTS)
USERLIBEXEC = rungenilib.proxy genilib-jail genilib-iocage
USERLIBEXEC = rungenilib.proxy genilib-jail genilib-iocage gitrepo.proxy
# These scripts installed setuid, with sudo.
SETUID_BIN_SCRIPTS = rungenilib
SETUID_BIN_SCRIPTS = rungenilib manage_gitrepo
SETUID_SBIN_SCRIPTS =
SETUID_SUEXEC_SCRIPTS=
......
A. How to setup update the genilib-jail environment.
A. How to setup and update the genilib-jail and git-profile-jail environments.
We use a combination of iocage and our own hand-rolled script to efficiently
spawn up environments in which we can interpret geni-lib scripts. Right now
we only have to do this at Utah, but down the road, if other sites start
running their own portal, this will be needed.
We also use a persistant jail for caching "git-based profile" repos. It
doesn't need genilib stuff, just git, but we just roll it all into the same
setup (and FreeBSD package) cuz, ya know, who doesn't want git!
0. Convert your ops node to using ZFS.
Ugh.
......@@ -18,10 +22,6 @@ running their own portal, this will be needed.
iocage fetch release=10.2-RELEASE
iocage create -c release=10.2-RELEASE tag=py-cage-new
# if you want to be able to start it up with IP addr
iocage set ip4_addr='ix0|155.98.33.87' py-cage-new
iocage set host_hostname='py-cage-new.emulab.net' py-cage-new
iocage set compression=off py-cage-new
iocage set quota=10G py-cage-new
......@@ -58,6 +58,120 @@ B. Updating your iocage:
1. Fetch the appropriate release:
sudo iocage fetch release=10.2-RELEASE
iocage fetch release=10.2-RELEASE
2. to be finished...
C. Setting up the git repo cache.
Skip to step 8 if you are only exporting a ZFS to boss (no jail).
Skip step 1 if you already have iocage installed.
Skip step 2 if you already have iocages (e.g., if you have a genilib-jail).
1. Make sure the "iocage" package is installed.
We are using version 1.7.4.
pkg install iocage
echo 'iocage_enable="YES"' >> /etc/rc.conf
2. Load the appropriate release
iocage fetch release=10.2-RELEASE
3. Setup the jail.
iocage create -c release=10.2-RELEASE tag=repo
# Make it network ready
iocage set ip4_addr='xn0|128.110.100.35' repo
iocage set host_hostname='repo.apt.emulab.net' repo
# Override the UUID default for hostname
iocage set hostname='repo.apt.emulab.net' repo
# If not on a flat control network
iocage set defaultrouter=128.110.100.33 repo
# XXX for the git repo jail, you will want a resolver
iocage set resolver='nameserver 128.110.100.4;search apt.emulab.net' repo
# so we can install updates from inside
iocage set securelevel=0 repo
# make it boot at host boot time
iocage set boot=on repo
# tweak our zfs fs
iocage set compression=off repo
iocage set quota=10G repo
4. Make sure sshd starts up and you can login from boss (for our benefit)
cd /iocage/jails/<UUID>/root
mkdir -m 0700 root/.ssh