Commit c0c6547c authored by Robert Ricci's avatar Robert Ricci

New scripts: tarfiles_setup, fetchtar.proxy, and webtarfiles_setup .

The idea is to give us hooks for grabbing experimenters' tarballs (and
RPMs) from locations other than files on ops. Mainly, to remove
another dependance on users having shells on ops.

tarfiles_setup supports fetching files from http and ftp URLs right
now, through wget. It places them into the experiment directory, so
that they'll go away when the experiment is terminated, and the rest
of the chain (ie. downloading to clients and os_setup's checks)
remains unchaged.  It is now tarfiles_setup's job to copy tarballs and
RPMs from the virt_nodes table to the nodes table for allocated nodes.
This way, it can translate URLs into the local filenames it
constructs. It get invoked from tbswap.

Does the actual fetching over on ops, running as the user, with
fetchtar.proxy.

Should be idempotent, so we should be able to give the user a button
to run webtarfiles_setup (none exists yet) yet to 'freshen' their
tarballs. (We'd also have to somehow let the experiment's nodes know
they need to re-fetch their tarballs.)

One funny side effect of this is that the separator in
virt_nodes.tarfiles is now ';' instead of ':' like nodes.tarballs,
since we can now put URLs in the former. Making these consistent is a
project for another day.
parent 6dcae6d3
......@@ -1411,6 +1411,8 @@ outfiles="$outfiles Makeconf GNUmakefile \
tbsetup/sfskey_update tbsetup/sfskey_update.proxy \
tbsetup/idleswap tbsetup/webidleswap tbsetup/switchmac \
tbsetup/newnode_reboot \
tbsetup/tarfiles_setup tbsetup/webtarfiles_setup \
tbsetup/fetchtar.proxy \
tbsetup/plab/GNUmakefile tbsetup/plab/libplab.py \
tbsetup/plab/plabslice tbsetup/plab/plabnode tbsetup/plab/plabdaemon \
tbsetup/plab/plabmetrics tbsetup/plab/plabstats \
......
......@@ -454,6 +454,8 @@ outfiles="$outfiles Makeconf GNUmakefile \
tbsetup/sfskey_update tbsetup/sfskey_update.proxy \
tbsetup/idleswap tbsetup/webidleswap tbsetup/switchmac \
tbsetup/newnode_reboot \
tbsetup/tarfiles_setup tbsetup/webtarfiles_setup \
tbsetup/fetchtar.proxy \
tbsetup/plab/GNUmakefile tbsetup/plab/libplab.py \
tbsetup/plab/plabslice tbsetup/plab/plabnode tbsetup/plab/plabdaemon \
tbsetup/plab/plabmetrics tbsetup/plab/plabstats \
......
......@@ -19,7 +19,7 @@ BIN_STUFF = power snmpit tbend tbprerun tbreport \
os_load startexp endexp batchexp swapexp \
node_reboot nscheck node_update savelogs node_control \
portstats checkports eventsys_control os_select tbrestart \
tbswap nseswap
tbswap nseswap tarfiles_setup
# Stuff that mere users get on plastic.
USERBINS = os_load node_reboot nscheck node_update savelogs \
......@@ -42,7 +42,7 @@ LIBEXEC_STUFF = rmproj wanlinksolve wanlinkinfo \
webmkgroup websetgroups webmkproj \
spewlogfile staticroutes routecalc wanassign \
webnodereboot webrmuser webidleswap switchmac \
spewrpmtar
spewrpmtar webtarfiles_setup
LIB_STUFF = libtbsetup.pm exitonwarn.pm libtestbed.pm snmpit_intel.pm \
snmpit_cisco.pm snmpit_lib.pm snmpit_apc.pm power_rpc27.pm \
......@@ -144,6 +144,8 @@ post-install:
chmod u+s $(INSTALL_SBINDIR)/vnode_setup
chown root $(INSTALL_BINDIR)/eventsys_control
chmod u+s $(INSTALL_BINDIR)/eventsys_control
chown root $(INSTALL_BINDIR)/tarfiles_setup
chmod u+s $(INSTALL_BINDIR)/tarfiles_setup
#
# Control node installation (okay, plastic)
......@@ -158,6 +160,7 @@ endif
control-install: $(addprefix $(INSTALL_SBINDIR)/, console_setup.proxy) \
$(addprefix $(INSTALL_SBINDIR)/, exports_setup.proxy) \
$(addprefix $(INSTALL_SBINDIR)/, sfskey_update.proxy) \
$(addprefix $(INSTALL_BINDIR)/, fetchtar.proxy) \
$(addprefix $(INSTALL_LIBDIR)/, libtestbed.pm)
@$(MAKE) -C ns2ir control-install
$(LINKS)
......
......@@ -2269,14 +2269,16 @@ sub InitPnode($pnode, $vnode)
$vname = $vnode;
$role = TBDB_RSRVROLE_NODE;
#
# NOTE: We no longer include tarballs and RPMs in this update, because
# they are now handled by tarfiles_setup
#
DBQueryFatal("UPDATE nodes set ".
" def_boot_cmd_line='$cmdline'," .
" startstatus='none'," .
" bootstatus='unknown'," .
" ready=0," .
" rpms='$rpms'," .
" deltas='$deltas'," .
" tarballs='$tarfiles'," .
" startupcmd='$startupcmd'," .
" failureaction='$failureaction'," .
" routertype='$routertype'" .
......
#!/usr/bin/perl -wT
#
# EMULAB-COPYRIGHT
# Copyright (c) 2003 University of Utah and the Flux Group.
# All rights reserved.
#
use English;
use Getopt::Std;
use BSD::Resource;
use POSIX qw(:signal_h);
#
# Fetch tarballs and RPMs on behalf of a user
#
sub usage()
{
print "Usage: $0 -u user URL localfile\n";
exit(-1);
}
#
# Configure variables
#
my $TB = "@prefix@";
my $TBOPS = "@TBOPSEMAIL@";
my $FSDIR_PROJ = "@FSDIR_PROJ@";
my $FSDIR_GROUPS = "@FSDIR_GROUPS@";
# Locals
my $WGET = "/usr/local/bin/wget";
my $REALPATH = "/bin/realpath";
#
# Turn off line buffering on output
#
$| = 1;
#
# Untaint the path
#
$ENV{'PATH'} = "/bin:/usr/bin:/sbin:/usr/sbin";
delete @ENV{'IFS', 'CDPATH', 'ENV', 'BASH_ENV'};
#
# Testbed Support libraries
#
use lib "@prefix@/lib";
use libtestbed;
#
# First option has to be the -u option, the user to run this script as.
#
if ($UID != 0) {
die("*** $0:\n".
" Must be root to run this script!");
}
if (@ARGV != 4) {
usage();
}
if ((shift @ARGV) ne '-u') {
die("*** $0:\n".
" Improper first argument. Must be the user name!\n");
}
my $user = shift @ARGV;
my $URL = shift @ARGV;
my $localfile = shift @ARGV;
#
# Check arguments
#
if (!($user =~ /^([\w-]+)$/)) {
die("*** $0:\n".
" Bad username given\n");
} else {
$user = $1;
}
(undef,undef,$unix_uid) = getpwnam($user) or
die("*** $0:\n".
" No such user $user\n");
if (!($URL =~ /^((http|ftp):\/\/[\w.\-\/\@:~]+(\.tar\.gz|\.tgz|\.rpm))$/)) {
die("*** $0:\n".
" Illegal URL given: $URL\n");
} else {
$URL = $1;
}
if (!($localfile =~ /^([\w\.\_\-+\/]+)$/)) {
die("*** $0:\n".
" Illegal local filename given: $localfile\n");
} else {
$localfile = $1;
my $realpath = `$REALPATH $localfile`;
chomp $realpath;
if ($realpath !~ /^(($FSDIR_PROJ|$FSDIR_GROUPS)\/.*)$/) {
die("*** $0:\n".
" Local file must be in /proj or /groups: $localfile, $realpath \n");
} else {
$localfile = $1;
}
}
#
# Need the entire group list for the user, cause of subgroups, and
# cause thats the correct thing to do. Too bad perl does not have a
# getgrouplist function like the C library.
#
my $glist = `id -G $user`;
if ($glist =~ /^([\d ]*)$/) {
$glist = $1;
}
else {
die("*** $0:\n".
" Unexpected results from 'id -G $user': $glist\n");
}
# Need to split off the first group and create a proper list for $GUID.
my @gglist = split(" ", $glist);
my $unix_gid = $gglist[0];
$glist = "$unix_gid $glist";
# Flip to user and never go back!
$GID = $unix_gid;
$EGID = $glist;
$EUID = $UID = $unix_uid;
$ENV{'USER'} = $user;
$ENV{'LOGNAME'} = $user;
#
# Fork a child process to run the wget
#
my $pid = fork();
if (!defined($pid)) {
die("*** $0:\n".
" Could not fork a new process!");
}
#
# Child does the fetch, niced down, and exits
#
if (! $pid) {
# Set the CPU limit for us.
setrlimit(RLIMIT_CPU, 180, 180);
# Give parent a chance to react.
sleep(1);
exec("nice -15 $WGET -nv -O $localfile $URL");
die("Could not exec wget!\n");
}
#
# Parent waits.
#
waitpid($pid, 0);
my $exit_status = $?;
#
# If the child was KILLed, then it overran its time limit.
# Send email. Otherwise, exit with result of child.
#
if (($exit_status & 0xff) == SIGKILL) {
my $msg = "wget CPU Limit";
SENDMAIL($TBOPS, "wget Exceeded CPU Limit", $msg);
print STDERR "$msg\n";
exit(15);
}
exit($exit_status >> 8);
......@@ -231,7 +231,7 @@ proc tb-set-node-tarfiles {node args} {
lappend tarfiles [join [lrange $args 0 1]]
set args [lrange $args 2 end]
}
$node set tarfiles [join $tarfiles :]
$node set tarfiles [join $tarfiles ;]
}
proc tb-set-node-deltas {node args} {
if {$args == {}} {
......
#!/usr/bin/perl -w
#
# EMULAB-COPYRIGHT
# Copyright (c) 2003 University of Utah and the Flux Group.
# All rights reserved.
#
use English;
use Getopt::Std;
use Socket;
#
# Fetch all tarball(s) (and RPM(s)) for an experiment. Since we don't want to
# give users the chance to exploit bugs or features used in the program to do
# the fetching, we ssh over to ops to do the actual fetching.
#
# As a side-effect, copies the contents of the tarfiles and rpms fields from
# virt_nodes to the nodes table. Any fetched tarballs (or RPMs) are entered
# into the nodes table as the location on local disk they were fetched to.
#
# Should be run _after_ the experiment has begun swapin - ie. when the
# virt_nodes have already been assigned to physical nodes.
#
sub usage()
{
print "Usage: $0 eid pid\n";
exit(-1);
}
#
# Configure variables
#
my $TB = "@prefix@";
my $TBOPS = "@TBOPSEMAIL@";
my $CONTROL = "@USERNODE@";
my $TESTMODE = @TESTMODE@;
my $SAVEUID = $UID;
my $MD5 = "/sbin/md5";
#
# Turn off line buffering on output
#
$| = 1;
#
# Untaint the path
#
$ENV{'PATH'} = "$TB/bin:$TB/sbin:/bin:/usr/bin:/sbin:/usr/sbin";
delete @ENV{'IFS', 'CDPATH', 'ENV', 'BASH_ENV'};
if ($TESTMODE) {
# In testmode, drop privs (my devel tree at home is TESTMODE=1)
$EUID = $UID;
}
elsif ($EUID != 0) {
# We don't want to run this script unless its the real version.
die("*** $0: Must be root! Maybe its a development version?\n");
}
# This script is setuid, so please do not run it as root. Hard to track
# what has happened.
if ($UID == 0) {
die("*** $0: Please do not run this as root! Its already setuid!\n");
}
#
# Testbed Support libraries
#
use lib "@prefix@/lib";
use libtestbed;
use libdb;
if (@ARGV != 2) {
usage();
}
my ($pid, $eid) = @ARGV;
my $dbuid;
#
# Verify user and get his DB uid.
#
if (! UNIX2DBUID($UID, \$dbuid)) {
die("*** Go Away! You do not exist in the Emulab Database.\n");
}
#
# First, make sure the experiment exists
#
if (!ExpState($pid,$eid)) {
die("*** There is no experiment $eid in project $pid\n");
}
#
# User must have at least MODIFY permissions to use this script
#
if (!TBExptAccessCheck($dbuid,$pid,$eid,TB_EXPT_MODIFY())) {
die("*** You are not allowed to modify experiment $eid in project $pid\n");
}
#
# Get the experiment's directoty - that's where we'll stash any files we
# fetch
#
my $expdir = TBExptUserDir($pid,$eid);
if (!$expdir) {
die("*** Unable to get experiment directory\n");
}
#
# Get a list of all RPMs and tarballs to fetch
#
my $result = DBQueryFatal("SELECT vname, rpms, tarfiles FROM virt_nodes WHERE " .
"pid='$pid' and eid='$eid'");
while (my ($vname, $rpms, $tarfiles) = $result->fetchrow()) {
#
# Find out the pnode where the this vnode is mapped, if any
#
my $physnode;
VnameToNodeid($pid,$eid,$vname,\$physnode);
#
# Go through the list of RPMs looking for files to fetch
#
foreach my $rpm (split(";", $rpms)) {
if ($rpm =~ /^(http|ftp)/) {
#
# Veryify that they gave us a legal URL
#
my $URL = verifyURL($rpm);
if (!$URL) {
die("*** Invalid RPM URL given: $rpm\n");
}
#
# Build up a local filename using an MD5 hash of the URL, so that
# we can uniquely identify it, but don't have to worry about
# putting funny characters in filenames.
#
my $md5 = `$MD5 -q -s '$URL'`;
chomp $md5;
# Have to untaint the hash
$md5 =~ /^(\w+)$/;
$md5 = $1;
my $localfile = $expdir . "/" . $md5 . ".rpm";
#
# Remember this RPM and put the local filename in the string that
# will be uploaded to the nodes table
#
$tofetch{$URL} = $localfile;
$rpms =~ s/$URL/$localfile/g;
}
}
#
# Same as above, for tarballs
#
foreach my $tar (split(";", $tarfiles)) {
my ($dir,$tarfile) = split(" ",$tar);
if ($tarfile =~ /^(http|ftp)/) {
my $URL = verifyURL($tarfile);
if (!$URL) {
die("*** Invalid tarball URL given: $tarfile\n");
}
my $md5 = `md5 -q -s '$URL'`;
chomp $md5;
$md5 =~ /^(\w+)$/;
$md5 = $1;
my $localfile = $expdir . "/" . $md5 . ".tar.gz";
$tofetch{$URL} = $localfile;
$tarfiles =~ s/$URL/$localfile/g;
}
}
#
# Hack, hack, hack! We use ';' as a separator in the virt_nodes table, but
# ":" in the nodes table. We should fix the latter
#
$tarfiles =~ s/;/:/g;
$rpms =~ s/;/:/g;
#
# If this virtual node is allocated, update the nodes table
#
if ($physnode) {
DBQueryFatal("UPDATE nodes SET tarballs='$tarfiles', rpms='$rpms' " .
"WHERE node_id='$physnode'");
}
}
#
# In testmode, don't actually fetch anything
#
if ($TESTMODE) {
exit(0);
}
#
# Actually fetch the tarballs
#
while (my ($URL, $localfile) = each %tofetch) {
print "Fetching $URL to $localfile\n";
#
# Build up a new command line to do the fetch on ops
#
my $cmdargs = "$TB/bin/fetchtar.proxy ";
$cmdargs .= " -u $dbuid $URL $localfile ";
#
# Must flip to real root for the ssh, and then flip back afterwards.
#
$EUID = $UID = 0;
system("sshtb -host $CONTROL $cmdargs ");
$EUID = $UID = $SAVEUID;
if ($?) {
die("*** Fetch of Tarball/RPM failed!\n");
}
}
#
# Check to make sure a URL for a tarball or RPM is valid, and return an
# untained version of it. Returns undefined if the URL is not valid.
#
sub verifyURL($) {
my ($URL) = @_;
if ($URL =~ /^((http|ftp):\/\/[\w.\-\/\@:~]+(\.tar\.gz|\.tgz|\.rpm))$/) {
return $1;
} else {
return undef;
}
}
exit 0;
......@@ -636,6 +636,22 @@ sub doSwapin($) {
return 0;
}
#
# Handle tarballs - we might need to fetch some from URLs if the user
# asked for that.
#
print "Fetching tarballs and RPMs (if any) ...\n";
TBDebugTimeStamp("tarfiles_setup started");
if (system("tarfiles_setup $pid $eid")) {
#
# No recovery for now - what would we do?
#
print STDERR "*** Failed to set up tarballs.\n";
return 1;
}
TBDebugTimeStamp("tarfiles_setup finished");
#
# If there are any Plab dslice nodes in the experiment, create the
# dslice now
......
#!/usr/bin/perl -w
#
# EMULAB-COPYRIGHT
# Copyright (c) 2003 University of Utah and the Flux Group.
# All rights reserved.
#
use English;
#
# This gets invoked from the Web interface. Simply a wrapper.
#
# usage: webtarfiles_setup arguments ...
#
#
# Configure variables
#
my $TB = "@prefix@";
#
# Run the real thing, and never return.
#
exec "$TB/bin/tarfiles_setup", @ARGV;
die("webtarfiles_setup: Could not exec tarfiles_setup: $!");
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment