Commit c34575d9 authored by Robert Ricci's avatar Robert Ricci

Getting closer to automating experiments! There are still a few kinks

to be worked out, and I need to finish the documentation, but most of
what's needed is here.
parent fabfd1eb
What is supported:
* Automatic creation of an experiment of any number of nodes
* Running experiments on the real PlanetLab or a 'fake' PlanetLab inside
of Emulab - or both in the same experiment
* Automatic tcpdumps of emulab-side traffic
* Automatic collection of log files at the end of an experiment
* Easy starting/restarting of stubs and monitors
----------------------------------------------------------------------
What is not supported:
* Automatic tcpdump on the real PlanetLab (planned)
* Running test clients (ie. 'iperf -c') for you (planned)
* Asking for specific planetlab nodes (planned)
* Specifying point to point traffic shaping for the Elab initial conditions
and for fake planetlabs. This should be taken care of by whatever
mechanisms are used to fetch wanetmon data
----------------------------------------------------------------------
Running an experiment with the auto-pelab scripts:
1) Create a tarball of the version of the pelab software (monitor, stub, etc.)
that you'd like to test:
Go to your testbed source tree
Run 'cvs up'!!!
Run 'gmake' in pelab/stub and pelab/libnetmon (on a DevBox machine)
From the root of your source tree, run:
tar czvf /proj/tbres/my-pelab.tar.gz pelab
Of course, don't name it 'my-plab.tar.gz'! You can put this file in
a subdirectory if you want, but it must be somewhere in
/proj/tbres/
Put the path to your tarball in the pelab_tar variable in auto-pelab.ns
2) Read the user-settable options at the top of auto-pelab.ns and change them
to your preferences.
3) Use auto-pelab.ns to create an experiment
4) Once it has swapped in, and you're ready to start an experiment, run the
'start-experiment' script on ops. It will
Restart monitors and stubs
Reset ALL link shaping to the defaults from the NS file
Clean logfiles
Restart iperf servers on all 'elab-*' nodes
5) Log into the 'elab-*' node or nodes, and run your experiment (ie. one run
of iperf). Make sure to run them with 'instrument.sh'.
6) When your're done with your experiment, run 'stop-experiment' on ops. It
will:
Stop the stubs, monitors, and iperf servers
Collect logfiles from all of the nodes in the experiment, including
stub/monitor logs, tcpdumps, delay agent logs, and so on
Run tcptrace on all tcpdump logs to produce xplot graphs
Put all of these things in a zip file for you in the experiment's log
directory (/proj/PID/exp/EID/logs)
7) If you want to look at the logs, unzip the .zip file somewhere, and see
the description of the files it contains below
----------------------------------------------------------------------
Important places:
* Logfiles from the monitors, stubs, etc. can all be found in /local/logs
on the node while the experiment runs. You can tail the .out or .err
files to see the stdout and stderr of these processes
* Inside the archive zip file you will find:
elab-*/ - Logifles from the emulator hosts (more detail below)
even-sched.log - Log of all events (including dummynet control events) sent
during the experiment
plab-*/ - Logfiles from the FAKE plabnetlab nodes
planet-*/ - Logifles from the REAL planetlab nodes
tbsdelay*/ - Logs from the delay nodes, including tcpdump traces
Node
......@@ -3,6 +3,9 @@ source tb_compat.tcl
set ns [new Simulator]
$ns rtproto Static
##########
# Beginning of user-settable options
#
# This control how many _pairs_ of PCs there are. ie. 2 gets you 2 elab PCs
# and 2 plab PCs
......@@ -10,11 +13,16 @@ $ns rtproto Static
set num_pcs 2
#
# Set this to 0 to use the real PlanetLab, not elab nodes pretending to be
# plab nodes.
# If set to 1, we create a fake PlanetLab inside Emulab
#
set fake_plab 1
#
# If set to 1, we grab real PlanetLab nodes. Both this and fake_plab can be
# set at the same time
#
set real_plab 1
#
# Where to grab your tarball of pelab software from. To make this tarball:
# Go to your testbed source tree
......@@ -27,7 +35,7 @@ set fake_plab 1
# /proj/tbres/
# Put the path to your tarball in this variable
#
set pelab_tar "/proj/tbres/CHANGEME.tar.gz"
set pelab_tar "/proj/tbres/ricci/pelab.tar.gz"
#
# When using a fake plab, these are the parameters for the 'control'
......@@ -43,42 +51,76 @@ set control_bw "100Mbps"
set cloud_delay "30ms"
set cloud_bw "1.5Mbps"
#
# These are the initial conditions for the 'elabc' cloud, the Emulab side of
# a pelab experiment
#
set ecloud_delay "30ms"
set ecloud_bw "1.5Mbps"
#
# Hardare type to use for PCs inside of emulab
#
set hwtype "pc"
#
# Server and client to use for automated testing. If set, will automatically
# be started by the 'start-experiment' script
#
set serverprog "/usr/bin/iperf -s "
# NOTE: No client support for now, you'll have to run the client yourself
set clientprog "/usr/bin/iperf -t 60 -c "
# End of user-settable options
##########
tb-set-delay-os FBSD54-FUTURE
#
# Tarballs and RPMs we install on all nodes
#
set tarfiles "/local $pelab_tar"
set rpms "/proj/tbres/auto-pelab/libpcap-0.8.3-3.i386.rpm /proj/tbres/auto-pelab/iperf-2.0.2-1.1.fc2.rf.i386.rpm"
set elan_string ""
set plan_string ""
set stublist {}
set planetstublist {}
set plabstublist {}
set monitorlist {}
set alllist {}
set planetservers {}
set serverlist {}
set clientlist {}
#
# Create all of the nodes
#
for {set i 1} {$i <= $num_pcs} {incr i} {
set planet($i) [$ns node]
if {$fake_plab} {
tb-set-node-os $planet($i) PLAB-DEVBOX
}
if {$fake_plab} {
tb-set-hardware $planet($i) $hwtype
} else {
if {$real_plab} {
set planet($i) [$ns node]
tb-set-hardware $planet($i) pcplab
}
append plan_string "$planet(${i}) "
set planetstub($i) [$planet($i) program-agent -command "/bin/sh /local/pelab/stub/auto-stub.sh"]
lappend stublist $planetstub($i)
lappend planetstublist $planetstub($i)
set stub($i) [$planet($i) program-agent -command "/bin/sh /local/pelab/stub/auto-stub.sh"]
lappend stublist $stub($i)
lappend alllist $stub($i)
tb-set-node-tarfiles $planet($i) $tarfiles
tb-set-node-rpms $planet($i) $rpms
}
tb-set-node-tarfiles $planet($i) /local $pelab_tar
tb-set-node-rpms $planet($i) /proj/tbres/auto-pelab/libpcap-0.8.3-3.i386.rpm /proj/tbres/auto-pelab/iperf-2.0.2-1.1.fc2.rf.i386.rpm
if {$fake_plab} {
set plab($i) [$ns node]
tb-set-node-os $plab($i) PLAB-DEVBOX
tb-set-hardware $plab($i) $hwtype
append plan_string "$plab(${i}) "
set plabstub($i) [$plab($i) program-agent -command "/bin/sh /local/pelab/stub/auto-stub.sh"]
lappend stublist $plabstub($i)
lappend plabstublist $plabstub($i)
tb-set-node-tarfiles $plab($i) $tarfiles
tb-set-node-rpms $plab($i) $rpms
}
set elab($i) [$ns node]
tb-set-node-os $elab($i) PLAB-DEVBOX
......@@ -86,32 +128,40 @@ for {set i 1} {$i <= $num_pcs} {incr i} {
append elan_string "$elab(${i}) "
set monitor($i) [$elab($i) program-agent -command "/bin/sh /local/pelab/monitor/auto-monitor.sh"]
lappend monitorlist $monitor($i)
lappend alllist $monitor($i)
tb-set-node-tarfiles $elab($i) /local $pelab_tar
tb-set-node-rpms $planet($i) /proj/tbres/auto-pelab/libpcap-0.8.3-3.i386.rpm /proj/tbres/auto-pelab/iperf-2.0.2-1.1.fc2.rf.i386.rpm
set server($i) [$elab($i) program-agent -command $serverprog]
set client($i) [$elab($i) program-agent -command $clientprog]
lappend serverlist $server($i)
lappend clientlist $client($i)
tb-set-node-tarfiles $elab($i) $tarfiles
tb-set-node-rpms $elab($i) $rpms
}
#
# Set up groups to make it easy for us to start/stop program agents
#
set stubgroup [$ns event-group $stublist]
set planetstubs [$ns event-group $planetstublist]
set plabstubs [$ns event-group $plabstublist]
set monitorgroup [$ns event-group $monitorlist]
set allgroup [$ns event-group $alllist]
set allservers [$ns event-group $serverlist]
set allclients [$ns event-group $clientlist]
#
# Fake 'Inernet' cloud for fake plab nodes
#
if {$fake_plab} {
set planetc [$ns make-lan "$plan_string" $cloud_bw $cloud_delay]
tb-set-ip-lan $planet(1) $planetc 10.1.0.1
$planetc trace
set plabc [$ns make-lan "$plan_string" $cloud_bw $cloud_delay]
tb-set-ip-lan $plab(1) $plabc 10.1.0.1
$plabc trace
}
#
# Lan which will be controlled by the monitor
#
set elabc [$ns make-lan "$elan_string" 10Mb 10ms]
set elabc [$ns make-lan "$elan_string" $ecloud_bw $ecloud_delay]
tb-set-ip-lan $elab(1) $elabc 10.0.0.1
$elabc trace
......@@ -129,13 +179,13 @@ if {$fake_plab} {
set prouter [$ns node]
set elabcontrol [$ns make-lan "$elan_string $erouter" 100Mbps 0ms]
set planetcontrol [$ns make-lan "$plan_string $prouter" 100Mbps 0ms]
set plabcontrol [$ns make-lan "$plan_string $prouter" 100Mbps 0ms]
set internet [$ns duplex-link $erouter $prouter $control_bw $control_delay DropTail]
$internet trace
tb-set-ip-lan $elab(1) $elabcontrol 192.168.0.1
tb-set-ip-lan $planet(1) $planetcontrol 192.168.1.1
tb-set-ip-lan $plab(1) $plabcontrol 192.168.1.1
tb-set-ip-link $erouter $internet 192.168.254.1
tb-set-ip-link $prouter $internet 192.168.254.2
......
......@@ -122,7 +122,7 @@ fi
#
# How big is this experiment? Counts the number of planetlab nodes
#
export PEER_PAIRS=`$GREP -E -c 'elab-.*' /etc/hosts`
export PEER_PAIRS=`$GREP -E -c 'elab-.*-elabc' /etc/hosts`
export PEERS=`expr $PEER_PAIRS \* 2`
#
......
......@@ -4,6 +4,7 @@ use strict;
my $TEVC = "/usr/testbed/bin/tevc";
my $LOGHOLE = "/usr/testbed/bin/loghole";
my $PYTHON = "/usr/local/bin/python";
#
# Require the pid and eid
......@@ -33,6 +34,9 @@ if (system "$TEVC -e $pid/$eid now planetc-tracemon stop") {
die "Error running tevc\n";
}
#
# Restart the server program
#
# Clean out the logs so that we can start fresh
#
......@@ -41,6 +45,25 @@ if (system "$LOGHOLE -e $pid/$eid clean -f -r") {
die "Error running loghole\n";
}
#
# Reset the links so that we remove any delay changes we might have previously
# made
#
print "##### Resetting all links in the experiment\n";
if (system "$PYTHON resetlinks.py $pid $eid") {
die "Error resetting links\n";
}
print "##### Restarting servers\n";
if (system "$TEVC -e $pid/$eid now allservers stop") {
die "Error running tevc\n";
}
if (system "$TEVC -e $pid/$eid now allservers start") {
die "Error running tevc\n";
}
#
# Start link tracing
#
......@@ -52,8 +75,12 @@ if (system "$TEVC -e $pid/$eid now planetc-tracemon start") {
#
# Start up the stubs and monitors
#
print "##### Starting stubs and monitors\n";
if (system "$TEVC -e $pid/$eid now allgroup start") {
print "##### Starting stubs\n";
if (system "$TEVC -e $pid/$eid now plabstubs start") {
die "Error running tevc\n";
}
print "##### Starting monitors\n";
if (system "$TEVC -e $pid/$eid now monitorgroup start") {
die "Error running tevc\n";
}
......
......@@ -25,6 +25,7 @@ if (system "$TEVC -e $pid/$eid now allgroup stop") {
#
# Stop link tracing
#
print "##### Stopping link tracing\n";
if (system "$TEVC -e $pid/$eid now planetc-tracemon snapshot") {
die "Error running tevc\n";
}
......@@ -32,6 +33,14 @@ if (system "$TEVC -e $pid/$eid now planetc-tracemon stop") {
die "Error running tevc\n";
}
#
# Stop running servers
#
print "##### Stopping servers\n";
if (system "$TEVC -e $pid/$eid now allservers stop") {
die "Error running tevc\n";
}
#
# Grab logfiles
#
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment