...
 
Commits (3)
# Build libfixbuf2
## Download
wget https://tools.netsa.cert.org/releases/libfixbuf-2.3.0.tar.gz
sudo apt install libglib2.0-dev
## Extract
tar -xvf libfixbuf-2.3.0.tar.gz
mkdir libfixbuf-build
cd libfixbuf-build
## Build
/tmp/libfixbuf-2.3.0/configure
make -j8
sudo make install
sudo ldconfig
# Build SILK
## Official setup directions
https://tools.netsa.cert.org/confluence/pages/viewpage.action?pageId=23298051
https://tools.netsa.cert.org/silk/silk-install-handbook.html#x1-410004
## Download
wget https://tools.netsa.cert.org/releases/silk-3.18.1.tar.gz
## Extract
tar -xvf silk-3.18.1.tar.gz
mkdir silk-build
cd silk-build
## Build
sudo apt install libpython3-dev libfixbuf3-dev
../silk-3.18.1/configure --enable-ipv6 --with-python=/usr/bin/python3 --with-libfixbuf
make -j8
sudo make install
sudo ldconfig
## Setup
https://blog.because-security.com/t/opensource-netflow-collection-with-silk-flowbat-and-how-to-perform-data-analysis/81
https://tools.netsa.cert.org/confluence/pages/viewpage.action?pageId=23298051
......@@ -21,6 +21,7 @@ import add_routable_ipv6_addrs
import frr_configurator
import json
import ospf_sniffer_configurator
import silk_configurator
import ssh_helper
import sysctl_configurator
import topomap_parser
......@@ -45,11 +46,11 @@ if __name__ == "__main__":
parser.add_argument("--no-sniffer", action='store_true',
help="Do not start the OSPF sniffer daemons")
parser.add_argument("--controller-name", action='store', type=str, default=ospf_sniffer_configurator.DEFAULT_CONTROLLER,
help="Hostname or IP of the node which is listening to the OSPF reports (Default: {default})".format(default=ospf_sniffer_configurator.DEFAULT_CONTROLLER))
help="Hostname or IP of the node which is acting as the controller for all collectors (Default: {default})".format(default=ospf_sniffer_configurator.DEFAULT_CONTROLLER))
parser.add_argument("--controller-port", action='store', type=int, default=ospf_sniffer_configurator.DEFAULT_CONTROLLER_PORT,
help="Port number on the server listening for OSPF reports (Default: {default})".format(default=ospf_sniffer_configurator.DEFAULT_CONTROLLER_PORT))
parser.add_argument("--ovs-regex", action='store', type=str, default='^ovs.*',
help="Regex to distinguish OVS nodes by label (Default \"^ovs.*\")")
parser.add_argument("--border-regex", action='store', type=str, default='^ovs.*',
help="Regex to distinguish border switch nodes by label (Default \"^ovs.*\")")
parser.add_argument("--host-regex", action='store', type=str, default='^host.*',
help="Regex to distinguish host nodes by label (Default \"^host.*\")")
......@@ -70,12 +71,14 @@ if __name__ == "__main__":
add_routable_ipv6_addrs.add_ULAs_to_hosts(netgraph.graph, ULA_map)
add_routable_ipv6_addrs.add_interfaces_to_netgraph(netgraph.graph, ULA_map)
# Prepare a list of nodes which run OVS and should thus be ignore for router-related activites
ovs_nodes = []
# Prepare a list of nodes which act as an edge switch and should thus be ignore for router-related activites
border_nodes = []
for node in netgraph.graph.nodes:
node_name = netgraph.graph._node[node]['label']
if re.match(args.ovs_regex, node_name):
ovs_nodes.append(node)
if re.match(args.border_regex, node_name):
border_nodes.append(node)
controller_node = [node for node in netgraph.graph._node if netgraph.graph._node[node]['label'] == args.controller_name].pop()
# Prepare a list of nodes which are "customer host nodes" and should thus be ignored for core network-related activities
host_nodes = []
......@@ -84,20 +87,24 @@ if __name__ == "__main__":
if re.match(args.host_regex, node_name):
host_nodes.append(node)
frr_configurator.configure_nodes(netgraph.graph, ignore_nodes=ovs_nodes)
sysctl_configurator.configure_nodes(netgraph.graph,ignore_nodes=ovs_nodes)
frr_configurator.configure_nodes(netgraph.graph, ignore_nodes=border_nodes)
sysctl_configurator.configure_nodes(netgraph.graph, ignore_nodes=border_nodes)
ospf_sniffer_configurator.clone_repo_on_network(netgraph.graph, ignore_nodes=ovs_nodes + host_nodes)
ospf_sniffer_configurator.stop_sniffer_on_network(netgraph.graph, ignore_nodes=ovs_nodes + host_nodes) # Stopping with the app not running is not great, but better than starting twice
ospf_sniffer_configurator.clone_repo_on_network(netgraph.graph, ignore_nodes=border_nodes + host_nodes)
ospf_sniffer_configurator.stop_sniffer_on_network(netgraph.graph, ignore_nodes=border_nodes + host_nodes) # Stopping with the app not running is not great, but better than starting twice
if not args.no_frr:
frr_configurator.start_frr_on_network(netgraph.graph, ignore_nodes=ovs_nodes)
frr_configurator.start_frr_on_network(netgraph.graph, ignore_nodes=border_nodes)
if not args.no_sniffer:
ospf_sniffer_configurator.start_sniffer_on_network(netgraph.graph,
controller=args.controller_name,
port=args.controller_port,
ignore_nodes=ovs_nodes + host_nodes)
ignore_nodes=border_nodes + host_nodes)
silk_configurator.configure(netgraph.graph,
controller_node=controller_node,
border_routers=border_nodes,
)
ssh_helper.network_graph_logout(netgraph.graph)
if args.netgraph_write:
......
# silk.conf for the "twoway" site
# RCSIDENT("$SiLK: silk.conf 52d8f4f62ffd 2012-05-25 21:16:30Z mthomas $")
# For a description of the syntax of this file, see silk.conf(5).
# The syntactic format of this file
# version 2 supports sensor descriptions, but otherwise identical to 1
version 2
# NOTE: Once data has been collected for a sensor or a flowtype, the
# sensor or flowtype should never be removed or renumbered. SiLK Flow
# files store the sensor ID and flowtype ID as integers; removing or
# renumbering a sensor or flowtype breaks this mapping.
{sensor_lines}
class all
sensors {sensor_names}
end class
# Editing above this line is sufficient for sensor definition.
# Be sure you understand the workings of the packing system before
# editing the class and type definitions below. In particular, if you
# change or add-to the following, the C code in packlogic-twoway.c
# will need to change as well.
class all
type 0 in in
type 1 out out
type 2 inweb iw
type 3 outweb ow
type 4 innull innull
type 5 outnull outnull
type 6 int2int int2int
type 7 ext2ext ext2ext
type 8 inicmp inicmp
type 9 outicmp outicmp
type 10 other other
default-types in inweb inicmp
end class
default-class all
# The layout of the tree below SILK_DATA_ROOTDIR.
# Use the default, which assumes a single class.
# path-format "%T/%Y/%m/%d/%x"
# The plug-in to load to get the packing logic to use in rwflowpack.
# The --packing-logic switch to rwflowpack will override this value.
# If SiLK was configured with hard-coded packing logic, this value is
# ignored.
packing-logic "packlogic-twoway.so"
#!/usr/bin/env python3
# Copyright (C) 2019 Simon Redman <sredman@cs.utah.edu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ssh_helper
import argparse
from collections import namedtuple
import networkx
from typing import List
SensorLine = namedtuple('SensorLine', ['uuid', 'name', 'description',])
"""
Build a valid silk.conf file by reading from the template and filling in the gaps
The template defines the following placeholders:
sensor_lines: Should be replaced with a series of lines of the format described by SILK_CONF_SENSOR_LINE_TEMPLATE
sensor_names: Should be replaced with a space-separated list of the names defined for each sensor
"""
SILK_CONF_TEMPLATE_FILENAME="./silk.conf.template"
"""
Define the existence of a sensor
uuid <int>: Some ID which is unique among all defined sensors
name <str>: Some name which is unique among all defined sensors
description <str>: Some optional description of the sensor
"""
SILK_CONF_SENSOR_LINE_TEMPLATE="sensor {uuid} {name} \"{description}\""
FILE_PUSH_COMMAND_TEMPLATE= "sudo mkdir -p /data/ && sudo chown $USER /data && cat <<EOF >/data/{filename}\n{data}\nEOF"
SENSOR_CONF_PROBE_BLOCK_TEMPLATE="""probe {name} netflow-v9
listen-on-port {portnum}
protocol udp
end probe"""
"""
This group block describes the experimental network IP block
"""
SENSOR_CONF_GROUP_EXPT_BLOCK="""group expt-network
ipblocks fd00::/16
end group"""
"""
This group block describes all IPv4 addresses
"""
SENSOR_CONF_GROUP_IPV4_BLOCK="""group ipv4
ipblocks 0.0.0.0/0
end group"""
SENSOR_CONF_SENSOR_BLOCK_TEMPLATE="""sensor {name}
netflow-v9-probes {name}
internal-ipblocks @expt-network
discard-when @ipv4 # Discard ALL IPv4 traffic
external-ipblocks remainder
end sensor"""
def _update_netgraph(netgraph, border_routers: List[str], listening_ports: List[int]) -> None:
"""
Update the netgraph entry for each sensor with information about how it should communicate to the collector
Adds "sensor_port" key to each border_router node with the information about which port to communicate to
:param netgraph:
:param sensor_lines:
:return:
"""
for idx in range(len(border_routers)):
router = border_routers[idx]
port = listening_ports[idx]
netgraph._node[router]['sensor_port'] = port
def _write_sensors_conf(session, sensor_lines: List[SensorLine], port_nums: List[int]) -> None:
probes: List[str] = []
groups: List[str] = []
sensors: List[str] = []
for idx in range(len(sensor_lines)):
line = sensor_lines[idx]
portnum = port_nums[idx]
probe = SENSOR_CONF_PROBE_BLOCK_TEMPLATE.format(
name = line.name,
portnum = portnum,
)
sensor = SENSOR_CONF_SENSOR_BLOCK_TEMPLATE.format(
name = line.name,
)
probes.append(probe)
sensors.append(sensor)
groups.extend([SENSOR_CONF_GROUP_EXPT_BLOCK, SENSOR_CONF_GROUP_IPV4_BLOCK])
sensors_conf = "\n\n".join(probes + groups + sensors)
command = FILE_PUSH_COMMAND_TEMPLATE.format(filename="sensors.conf", data=sensors_conf)
ssh_helper.run_command_on_host(session, command)
def _write_silk_conf(session, sensor_lines: List[SensorLine]) -> None:
with open(SILK_CONF_TEMPLATE_FILENAME, 'r') as input:
silk_conf_template: str = input.read()
sensor_names: str = " ".join([line.name for line in sensor_lines])
sensor_line_block: str = "\n".join(
[
SILK_CONF_SENSOR_LINE_TEMPLATE.format(
uuid=line.uuid,
name=line.name,
description=line.description,
)
for line in sensor_lines])
silk_conf = silk_conf_template.format(
sensor_lines=sensor_line_block,
sensor_names=sensor_names,
)
command = FILE_PUSH_COMMAND_TEMPLATE.format(filename="silk.conf", data=silk_conf)
ssh_helper.run_command_on_host(session, command)
def _build_port_nums(sensor_lines: List[SensorLine]) -> List[int]:
"""
:param sensor_lines: List of sensor information
:return: List of ports each sensor should connect to
"""
# Construct ports starting from 18000 from uuid and hope it's not in use
port_nums = [18000 + line.uuid for line in sensor_lines]
return port_nums
def _build_sensor_lines(border_routers: List[str]) -> List[SensorLine]:
"""
Assign uuid, names, and descriptions for all border routers
Does NOT modify netgraph
:param border_routers: List of routers to treat as sensors
:return:
"""
next_uuid = 0
sensor_lines: List[SensorLine] = []
for router in border_routers:
uuid = next_uuid
next_uuid += 1
# There are some rules on sensor names, so we can't safely just use the node name, unfortunately
name = "S{uuid}".format(uuid=uuid)
description = router
sensor_lines.append(SensorLine(uuid=uuid, name=name, description=description))
return sensor_lines
def configure(netgraph: networkx.Graph, controller_node: str, border_routers: List[str]) -> None:
"""
Configure the controller node to be a SiLK NetFlow v9 collector
MODIFIES netgraph to have the listening port information needed by ipt_NETFLOW_configurator
:param netgraph: networkx graph object representing the network
:param controller_node: Hostname of node which is running the SiLK collector
:param border_routers: List of nodes to whom we are listening
:return: Output from the SSH commands
"""
collector_session = netgraph._node[controller_node]['session']
sensor_lines: List[SensorLine] = _build_sensor_lines(border_routers)
port_nums: List[int] = _build_port_nums(sensor_lines)
_write_silk_conf(collector_session, sensor_lines)
_write_sensors_conf(collector_session, sensor_lines, port_nums)
_update_netgraph(netgraph, border_routers, port_nums)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Configure SiLK to listen for netflow data from all edge switches")
args = parser.parse_args()
print("This library is not currently executable")
......@@ -29,6 +29,21 @@ SYSCTL_LINE_TEMPLATEs as desired
"""
SYSCTL_COMMAND_TEMPLATE = "sudo sysctl {lines}"
SYSCTL_SEG6_LINE_TEMPLATE = "net.ipv6.conf.{iface}.seg6_enabled=1"
SYSCTL_FORWARDING_LINE_TEMPLETE = "net.ipv6.conf.{iface}.forwarding=1"
def build_forwarding_sysctl_command(interfaces: List[str]):
"""
Construct the sysctl command to enable ipv6 forwarding on all of the listed interfaces
:param interfaces: list of interface names
:return: sysctl configuration command
"""
lines = []
for interface in interfaces:
lines.append(SYSCTL_FORWARDING_LINE_TEMPLETE.format(iface=interface))
return SYSCTL_COMMAND_TEMPLATE.format(lines=str.join(" ", lines))
def build_seg6_sysctl_command(interfaces: List[str]):
......@@ -42,6 +57,11 @@ def build_seg6_sysctl_command(interfaces: List[str]):
for interface in interfaces:
lines.append(SYSCTL_SEG6_LINE_TEMPLATE.format(iface=interface))
# Hack - Do this better when in less of a hurry
# Also enable ipv6 forwarding
for interface in interfaces:
lines.append(SYSCTL_FORWARDING_LINE_TEMPLETE.format(iface=interface))
return SYSCTL_COMMAND_TEMPLATE.format(lines=str.join(" ", lines))
......