diff options
Diffstat (limited to 'rp/rcynic')
23 files changed, 2868 insertions, 1737 deletions
diff --git a/rp/rcynic/Makefile.in b/rp/rcynic/Makefile.in index a2d844bd..52c67fde 100644 --- a/rp/rcynic/Makefile.in +++ b/rp/rcynic/Makefile.in @@ -1,17 +1,7 @@ # $Id$ -NAME = rcynic - -BIN = ${NAME} -SRC = ${NAME}.c -OBJ = ${NAME}.o - -GEN = defstack.h - -OBJS = ${OBJ} bio_f_linebreak.o - CFLAGS = @CFLAGS@ -Wall -Wshadow -Wmissing-prototypes -Wmissing-declarations -Werror-implicit-function-declaration -LDFLAGS = @LDFLAGS@ @LD_STATIC_FLAG@ +LDFLAGS = @LDFLAGS@ LIBS = @LIBS@ AWK = @AWK@ @@ -40,53 +30,48 @@ abs_top_srcdir = @abs_top_srcdir@ abs_top_builddir = @abs_top_builddir@ srcdir = @srcdir@ -RCYNIC_BIN_RCYNIC = @RCYNIC_BIN_RCYNIC@ +RCYNIC_BIN_RCYNIC = ${DESTDIR}${bindir}/rcynic RCYNIC_CONF_DATA = @RCYNIC_CONF_DATA@ -RCYNIC_CONF_FILE = @RCYNIC_CONF_FILE@ -RCYNIC_CONF_RSYNC = @RCYNIC_CONF_RSYNC@ -RCYNIC_CONF_TA_DIR = @RCYNIC_CONF_TA_DIR@ -RCYNIC_CRON_USER = @RCYNIC_CRON_USER@ -RCYNIC_DATA_DIR = ${RCYNIC_DIR}/data +RCYNIC_CONF_FILE = ${DESTDIR}${sysconfdir}/rcynic.conf +RCYNIC_CONF_RSYNC = @RSYNC@ +RCYNIC_CONF_TA_DIR = ${sysconfdir}/rpki/trust-anchors +RCYNIC_CRON_USER = ${RPKI_USER} +RCYNIC_DATA_DIR = ${DESTDIR}${RCYNIC_DIR}/data RCYNIC_DIR = @RCYNIC_DIR@ -RCYNIC_DIRS = ${RCYNIC_TA_DIR} ${RCYNIC_JAIL_DIRS} ${RCYNIC_DATA_DIR} ${RPKIRTR_DIR} ${RPKIRTR_DIR}/sockets -RCYNIC_GECOS = RPKI Validation System -RCYNIC_GROUP = @RCYNIC_GROUP@ +RCYNIC_DIRS = ${RCYNIC_TA_DIR} ${RCYNIC_DATA_DIR} ${RPKIRTR_DIR} ${RPKIRTR_DIR}/sockets +RPKI_GECOS = RPKI System Software +RPKI_GROUP = @RPKI_GROUP@ RCYNIC_HTML_DIR = @RCYNIC_HTML_DIR@ RCYNIC_INSTALL_TARGETS = @RCYNIC_INSTALL_TARGETS@ -RCYNIC_JAIL_DIRS = @RCYNIC_JAIL_DIRS@ -RCYNIC_STATIC_RSYNC = @RCYNIC_STATIC_RSYNC@ -RCYNIC_TA_DIR = @RCYNIC_TA_DIR@ -RCYNIC_USER = @RCYNIC_USER@ -RPKIRTR_DIR = ${RCYNIC_DIR}/rpki-rtr -RPKIRTR_GECOS = RPKI router server -RPKIRTR_GROUP = rpkirtr -RPKIRTR_MODE = 775 -RPKIRTR_USER = rpkirtr - -all: ${BIN} ${RCYNIC_STATIC_RSYNC} +RCYNIC_TA_DIR = ${DESTDIR}${sysconfdir}/rpki/trust-anchors +RPKI_USER = @RPKI_USER@ +RPKIRTR_DIR = ${DESTDIR}${RCYNIC_DIR}/rpki-rtr + +OBJS = rcynic.o bio_f_linebreak.o + +all: rcynicng clean: - if test -r static-rsync/Makefile; then cd static-rsync; ${MAKE} $@; fi - rm -f ${BIN} ${OBJS} + rm -f rcynic ${OBJS} -${OBJ}: ${SRC} ${GEN} +rcynic.o: rcynic.c defstack.h -${BIN}: ${OBJS} +rcynic: ${OBJS} ${CC} ${CFLAGS} -o $@ ${OBJS} ${LDFLAGS} ${LIBS} -${GEN}: ${SRC} - ${PYTHON} ${abs_top_srcdir}/buildtools/defstack.py ${SRC} >$@.tmp +defstack.h: rcynic.c + ${PYTHON} ${abs_top_srcdir}/buildtools/defstack.py rcynic.c >$@.tmp mv $@.tmp $@ tags: TAGS -TAGS: ${SRC} ${GEN} - etags ${SRC} ${GEN} +TAGS: rcynic.c defstack.h + etags rcynic.c defstack.h -test: ${BIN} +test: rcynic if test -r rcynic.conf; \ then \ - ./${BIN} -j 0 && \ + ./rcynic -j 0 && \ test -r rcynic.xml && \ echo && \ ./rcynic-text rcynic.xml; \ @@ -108,33 +93,31 @@ static-rsync/rsync: install: all ${RCYNIC_INSTALL_TARGETS} install-always: \ - install-directories install-rcynic install-rcynic-conf + install-directories install-rcynic install-tals install-postconf: \ install-user-and-group install-directory-ownership install-crontab -install-jailed: \ - install-static-rsync install-shared-libraries install-rc-scripts - install-directories: ${RCYNIC_DIRS} ${RCYNIC_DIRS} ${DESTDIR}${bindir} ${DESTDIR}${sysconfdir}: ${INSTALL} -v -d $@ install-directory-ownership: ${RCYNIC_DATA_DIR} ${RPKIRTR_DIR} ${RPKIRTR_DIR}/sockets - chown ${RCYNIC_USER}:${RCYNIC_GROUP} ${RCYNIC_DATA_DIR} ${RPKIRTR_DIR} - chown ${RPKIRTR_USER}:${RCYNIC_GROUP} ${RPKIRTR_DIR}/sockets - chmod ${RPKIRTR_MODE} ${RPKIRTR_DIR}/sockets + chown ${RPKI_USER}:${RPKI_GROUP} ${RCYNIC_DATA_DIR} ${RPKIRTR_DIR} ${RPKIRTR_DIR}/sockets -install-rcynic-conf: ${RCYNIC_CONF_FILE} +install-tals: + ${INSTALL} -v -d ${RCYNIC_TA_DIR} + ${INSTALL} -v -p -m 444 sample-trust-anchors/*.tal ${RCYNIC_TA_DIR} + +# We don't install rcynic.conf anymore. Keep this for now as internal documentation, +# clean up later. ${RCYNIC_CONF_FILE}: @echo - @echo Found no ${RCYNIC_CONF_FILE}, creating basic config and installing default trust anchor locators. + @echo Found no ${RCYNIC_CONF_FILE}, creating basic configuration. @echo You might want to edit this. @echo - ${INSTALL} -v -d ${RCYNIC_TA_DIR} - ${INSTALL} -v -p -m 444 sample-trust-anchors/*.tal ${RCYNIC_TA_DIR} @echo > $@.tmp '# Basic rcynic configuration file with default trust anchors.' @echo >>$@.tmp '# See documentation for details.' @echo >>$@.tmp '' @@ -153,13 +136,8 @@ ${RCYNIC_CONF_FILE}: install-rcynic: ${RCYNIC_BIN_RCYNIC} -${RCYNIC_BIN_RCYNIC}: ${BIN} - ${INSTALL} -p -m 555 ${BIN} $@ - -install-static-rsync: ${RCYNIC_DIR}/bin/rsync - -${RCYNIC_DIR}/bin/rsync: static-rsync/rsync - ${INSTALL} -p -m 555 static-rsync/rsync $@ +${RCYNIC_BIN_RCYNIC}: rcynicng + ${INSTALL} -p -m 555 rcynicng $@ .FORCE: diff --git a/rp/rcynic/rc-scripts/darwin/RCynic b/rp/rcynic/rc-scripts/darwin/RCynic deleted file mode 100755 index d486a3c3..00000000 --- a/rp/rcynic/rc-scripts/darwin/RCynic +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/sh - -# -# $Id$ -# -. /etc/rc.common - -name="rcynic" -start_cmd="rcynic_start" -stop_cmd="rcynic_stop" - -: ${rcynic_dir="/var/rcynic"} - -StartService() -{ - /sbin/umount "${rcynic_dir}/dev" 2>/dev/null - - if ! /sbin/mount_devfs devfs "${rcynic_dir}/dev"; then - echo "Mounting devfs on ${rcynic_dir}/dev failed..." - exit 1 - fi - - for i in /etc/localtime /etc/resolv.conf; do - j="${rcynic_dir}${i}" - if /bin/test -r "$i" && ! /usr/bin/cmp -s "$i" "$j"; then - /usr/bin/install -m 444 -o root -g wheel -p "$i" "$j" - fi - done - - /bin/ln -f /var/run/mDNSResponder "${rcynic_dir}/var/run/mDNSResponder" -} - -StopService() -{ - /sbin/umount "${rcynic_dir}/dev" 2>/dev/null -} - -RestartService() -{ - StartService -} - -RunService "$1" diff --git a/rp/rcynic/rc-scripts/darwin/StartupParameters.plist b/rp/rcynic/rc-scripts/darwin/StartupParameters.plist deleted file mode 100644 index ca46b676..00000000 --- a/rp/rcynic/rc-scripts/darwin/StartupParameters.plist +++ /dev/null @@ -1,19 +0,0 @@ -<?xml version="1.0" encoding="UTF-8"?> -<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> -<plist version="1.0"> -<dict> - <key>Description</key> - <string>RCynic Setup</string> - <key>OrderPreference</key> - <string>None</string> - <key>Provides</key> - <array> - <string>RCynic</string> - </array> - <key>Uses</key> - <array> - <string>Network</string> - <string>Resolver</string> - </array> - </dict> -</plist> diff --git a/rp/rcynic/rc-scripts/freebsd/rc.d.rcynic b/rp/rcynic/rc-scripts/freebsd/rc.d.rcynic deleted file mode 100755 index 9b7aa545..00000000 --- a/rp/rcynic/rc-scripts/freebsd/rc.d.rcynic +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/sh - -# -# $Id$ -# -# PROVIDE: rcynic -# REQUIRE: DAEMON -# KEYWORD: nojail - -. /etc/rc.subr - -name="rcynic" -start_cmd="rcynic_start" -stop_cmd="rcynic_stop" - -: ${rcynic_dir="/var/rcynic"} - -rcynic_start() -{ - /sbin/umount "${rcynic_dir}/dev" 2>/dev/null - - if ! /sbin/mount -t devfs dev "${rcynic_dir}/dev"; then - echo "Mounting devfs on ${rcynic_dir}/dev failed..." - exit 1 - fi - - /sbin/devfs -m "${rcynic_dir}/dev" rule apply hide - /sbin/devfs -m "${rcynic_dir}/dev" rule apply path null unhide - /sbin/devfs -m "${rcynic_dir}/dev" rule apply path random unhide - - for i in /etc/localtime /etc/resolv.conf; do - j="${rcynic_dir}${i}" - if /bin/test -r "$i" && ! /usr/bin/cmp -s "$i" "$j"; then - /usr/bin/install -m 444 -o root -g wheel -p "$i" "$j" - fi - done -} - -rcynic_stop() -{ - /sbin/umount "${rcynic_dir}/dev" 2>/dev/null -} - -load_rc_config $name -run_rc_command "$1" diff --git a/rp/rcynic/rcynic-cron b/rp/rcynic/rcynic-cron index 53bfea9f..e7e564b3 100755 --- a/rp/rcynic/rcynic-cron +++ b/rp/rcynic/rcynic-cron @@ -28,83 +28,51 @@ our purposes. In theory this is portable to any Unix-like system. import os import sys -import pwd import fcntl import errno -import argparse import rpki.autoconf def run(*cmd, **kwargs): - chroot_this = kwargs.pop("chroot_this", False) - cwd = kwargs.pop("cwd", None) - pid = os.fork() - if pid == 0: - if chroot_this: - os.chdir(rpki.autoconf.RCYNIC_DIR) - elif cwd is not None: - os.chdir(cwd) - if we_are_root: - os.initgroups(pw.pw_name, pw.pw_gid) - if chroot_this: - os.chroot(rpki.autoconf.RCYNIC_DIR) - if we_are_root: - os.setgid(pw.pw_gid) - os.setuid(pw.pw_uid) - os.closerange(3, os.sysconf("SC_OPEN_MAX")) - os.execvp(cmd[0], cmd) - os._exit(1) - else: - status = os.waitpid(pid, 0)[1] - if status == 0: - return - elif os.WIFSIGNALED(status): - sys.exit("Process %s exited with signal %s" % (" ".join(cmd), os.WTERMSIG(status))) - elif os.WIFEXITED(status): - sys.exit("Program %s exited with status %s" % (" ".join(cmd), os.WEXITSTATUS(status))) + cwd = kwargs.pop("cwd", None) + pid = os.fork() + if pid == 0: + if cwd is not None: + os.chdir(cwd) + os.closerange(3, os.sysconf("SC_OPEN_MAX")) + os.execvp(cmd[0], cmd) + os._exit(1) # pylint: disable=W0212 else: - sys.exit("Program %s exited for unknown reason %s" % (" ".join(cmd), status)) - -parser = argparse.ArgumentParser(description = __doc__) -parser.add_argument("--chroot", action = "store_true", help = "run chrooted") -args = parser.parse_args() - -we_are_root = os.getuid() == 0 - -if args.chroot and not we_are_root: - sys.exit("Only root can --chroot") + status = os.waitpid(pid, 0)[1] + if status == 0: + return + elif os.WIFSIGNALED(status): + sys.exit("Process %s exited with signal %s" % (" ".join(cmd), os.WTERMSIG(status))) + elif os.WIFEXITED(status): + sys.exit("Program %s exited with status %s" % (" ".join(cmd), os.WEXITSTATUS(status))) + else: + sys.exit("Program %s exited for unknown reason %s" % (" ".join(cmd), status)) try: - pw = pwd.getpwnam(rpki.autoconf.RCYNIC_USER) -except KeyError: - sys.exit("Could not find passwd entry for user %s" % rpki.autoconf.RCYNIC_USER) - -try: - lock = os.open(os.path.join(rpki.autoconf.RCYNIC_DIR, "data/lock"), - os.O_RDONLY | os.O_CREAT | os.O_NONBLOCK, 0666) - fcntl.flock(lock, fcntl.LOCK_EX | fcntl.LOCK_NB) - if we_are_root: - os.fchown(lock, pw.pw_uid, pw.pw_gid) + lock = os.open(os.path.join(rpki.autoconf.RCYNIC_DIR, "data", "lock"), + os.O_RDONLY | os.O_CREAT | os.O_NONBLOCK, 0666) + fcntl.flock(lock, fcntl.LOCK_EX | fcntl.LOCK_NB) except (IOError, OSError), e: - if e.errno == errno.EAGAIN: - sys.exit(0) # Another instance of this script is already running, exit silently - else: - sys.exit("Error %r opening lock %r" % (e.strerror, os.path.join(rpki.autoconf.RCYNIC_DIR, "data/lock"))) + if e.errno == errno.EAGAIN: + sys.exit(0) # Another instance of this script is already running, exit silently + else: + sys.exit("Error %r opening lock %r" % (e.strerror, os.path.join(rpki.autoconf.RCYNIC_DIR, "data/lock"))) -if args.chroot: - run("/bin/rcynic", "-c", "/etc/rcynic.conf", chroot_this = True) -else: - run(os.path.join(rpki.autoconf.bindir, "rcynic"), "-c", os.path.join(rpki.autoconf.sysconfdir, "rcynic.conf")) +run(os.path.join(rpki.autoconf.bindir, "rcynic")) run(os.path.join(rpki.autoconf.bindir, "rpki-rtr"), "cronjob", - os.path.join(rpki.autoconf.RCYNIC_DIR, "data/authenticated"), cwd = os.path.join(rpki.autoconf.RCYNIC_DIR, "rpki-rtr")) prog = os.path.join(rpki.autoconf.libexecdir, "rpkigui-rcynic") if os.path.exists(prog): - run(prog) + run(prog) if rpki.autoconf.RCYNIC_HTML_DIR and os.path.exists(os.path.dirname(rpki.autoconf.RCYNIC_HTML_DIR)): - run(os.path.join(rpki.autoconf.bindir, "rcynic-html"), - os.path.join(rpki.autoconf.RCYNIC_DIR, "data/rcynic.xml"), - rpki.autoconf.RCYNIC_HTML_DIR) + run(os.path.join(rpki.autoconf.bindir, "rcynic-html"), + os.path.join(rpki.autoconf.RCYNIC_DIR, "data/rcynic.xml"), + rpki.autoconf.RCYNIC_HTML_DIR) diff --git a/rp/rcynic/rcynic-dump b/rp/rcynic/rcynic-dump new file mode 100755 index 00000000..0c7f898f --- /dev/null +++ b/rp/rcynic/rcynic-dump @@ -0,0 +1,95 @@ +#!/usr/bin/env python + +# $Id$ + +""" +Dump rcynicng database to old-style disk files. + +This is a slow operation due to blocking operations in the underlying +filesystem, so in the long run we will almost certainly want to +rewrite the RP toolkit to use the database directly, but it's (much) +easier to compare results between the old and new validation engines +when they use the same data representation. +""" + +import os +import sys +import time +import shutil +import logging +import argparse + +import rpki.config +import rpki.autoconf + +logger = logging.getLogger("rcynic-dump") + +os.environ.update(TZ = "UTC", + DJANGO_SETTINGS_MODULE = "rpki.django_settings.rcynic") +time.tzset() + +logging.basicConfig(level = logging.DEBUG, format = "%(asctime)s %(message)s", datefmt = "%Y-%m-%d %H:%M:%S") + +parser = argparse.ArgumentParser(description = __doc__) +parser.add_argument("-c", "--config") +parser.add_argument("output_tree", nargs = "?", default = "rcynic-data") +args = parser.parse_args() + +rpki.config.parser(set_filename = args.config, section = "rcynic") + +import django +django.setup() + +import rpki.rcynicdb + +def uri_to_filename(obj, base): + return os.path.join(args.output_tree, base, obj.uri[obj.uri.index("://") + 3:]) + +def sha256_to_filename(obj): + return os.path.join(args.output_tree, "sha256", obj.sha256[:2], obj.sha256 + obj.uri[-4:]) + +def authenticated_to_dirname(authenticated): + return "authenticated-{}".format(authenticated.started.strftime("%Y-%m-%dT%H:%M:%SZ")) + +seen = set() + +def check_der(fn, der): + with open(fn, "rb") as f: + return der == f.read() + +def mkdir_maybe(fn): + dn = os.path.dirname(fn) + if not os.path.exists(dn): + os.makedirs(dn) + +for obj in rpki.rcynicdb.models.RPKIObject.objects.all(): + + hfn = sha256_to_filename(obj) + ufn = uri_to_filename(obj, "unauthenticated") + + if not os.path.exists(hfn) or not check_der(hfn, obj.der): + mkdir_maybe(hfn) + with open(hfn, "wb") as f: + f.write(obj.der) + + seen.add(hfn) + seen.add(ufn) + + for auth in obj.authenticated.all(): + afn = uri_to_filename(obj, authenticated_to_dirname(auth)) + mkdir_maybe(afn) + if not os.path.exists(afn): + os.link(hfn, afn) + elif not check_der(afn, obj.der): + os.unlink(afn) + os.link(hfn, afn) + seen.add(afn) + +auth = rpki.rcynicdb.models.Authenticated.objects.order_by("-started").first() + +if auth is not None: + src = authenticated_to_dirname(auth) + dst = os.path.join(args.output_tree, "authenticated") + if os.path.exists(dst): + os.unlink(dst) + os.symlink(src, dst) diff --git a/rp/rcynic/rcynic-html b/rp/rcynic/rcynic-html index ef566440..154193b2 100755 --- a/rp/rcynic/rcynic-html +++ b/rp/rcynic/rcynic-html @@ -32,361 +32,363 @@ import copy import rpki.autoconf try: - from lxml.etree import (ElementTree, Element, SubElement, Comment) + from lxml.etree import (ElementTree, Element, SubElement, Comment) except ImportError: - from xml.etree.ElementTree import (ElementTree, Element, SubElement, Comment) + from xml.etree.ElementTree import (ElementTree, Element, SubElement, Comment) session = None args = None def parse_options(): - global args - - parser = argparse.ArgumentParser(description = __doc__) - parser.add_argument("--refresh", type = int, default = 1800, - help = "refresh interval for generated HTML") - parser.add_argument("--hide-problems", action = "store_true", - help = "don't generate \"problems\" page") - parser.add_argument("--hide-graphs", action = "store_true", - help = "don't generate graphs") - parser.add_argument("--hide-object-counts", action = "store_true", - help = "don't display object counts") - parser.add_argument("--dont-update-rrds", action = "store_true", - help = "don't add new data to RRD databases") - parser.add_argument("--png-height", type = int, default = 190, - help = "height of PNG images") - parser.add_argument("--png-width", type = int, default = 1350, - help = "width of PNG images") - parser.add_argument("--svg-height", type = int, default = 600, - help = "height of SVG images") - parser.add_argument("--svg-width", type = int, default = 1200, - help = "width of SVG images") - parser.add_argument("--eps-height", type = int, default = 0, - help = "height of EPS images") - parser.add_argument("--eps-width", type = int, default = 0, - help = "width of EPS images") - parser.add_argument("--rrdtool-binary", default = rpki.autoconf.RRDTOOL, - help = "location of rrdtool binary") - parser.add_argument("input_file", type = argparse.FileType("r"), - help = "XML input file") - parser.add_argument("output_directory", - help = "output directory") - args = parser.parse_args() + global args # pylint: disable=W0603 + + parser = argparse.ArgumentParser(description = __doc__) + parser.add_argument("--refresh", type = int, default = 1800, + help = "refresh interval for generated HTML") + parser.add_argument("--hide-problems", action = "store_true", + help = "don't generate \"problems\" page") + parser.add_argument("--hide-graphs", action = "store_true", + help = "don't generate graphs") + parser.add_argument("--hide-object-counts", action = "store_true", + help = "don't display object counts") + parser.add_argument("--dont-update-rrds", action = "store_true", + help = "don't add new data to RRD databases") + parser.add_argument("--png-height", type = int, default = 190, + help = "height of PNG images") + parser.add_argument("--png-width", type = int, default = 1350, + help = "width of PNG images") + parser.add_argument("--svg-height", type = int, default = 600, + help = "height of SVG images") + parser.add_argument("--svg-width", type = int, default = 1200, + help = "width of SVG images") + parser.add_argument("--eps-height", type = int, default = 0, + help = "height of EPS images") + parser.add_argument("--eps-width", type = int, default = 0, + help = "width of EPS images") + parser.add_argument("--rrdtool-binary", default = rpki.autoconf.RRDTOOL, + help = "location of rrdtool binary") + parser.add_argument("input_file", type = argparse.FileType("r"), + help = "XML input file") + parser.add_argument("output_directory", + help = "output directory") + args = parser.parse_args() def parse_utc(s): - return int(time.mktime(time.strptime(s, "%Y-%m-%dT%H:%M:%SZ"))) + return int(time.mktime(time.strptime(s, "%Y-%m-%dT%H:%M:%SZ"))) class Label(object): - moods = ["bad", "warn", "good"] + moods = ["bad", "warn", "good"] - def __init__(self, elt): - self.code = elt.tag - self.mood = elt.get("kind") - self.text = elt.text.strip() - self.count = 0 + def __init__(self, elt): + self.code = elt.tag + self.mood = elt.get("kind") + self.text = elt.text.strip() + self.count = 0 - def get_count(self): - return self.count + def get_count(self): + return self.count - @property - def sort_key(self): - try: - return self.moods.index(self.mood) - except ValueError: - return len(self.moods) + @property + def sort_key(self): + try: + return self.moods.index(self.mood) + except ValueError: + return len(self.moods) class Validation_Status(object): - def __init__(self, elt, label_map): - self.uri = elt.text.strip() - self.timestamp = elt.get("timestamp") - self.generation = elt.get("generation") - self.hostname = urlparse.urlparse(self.uri).hostname or "[None]" - self.fn2 = os.path.splitext(self.uri)[1] or None if self.generation else None - self.label = label_map[elt.get("status")] + def __init__(self, elt, label_map): + self.uri = elt.text.strip() + self.timestamp = elt.get("timestamp") + self.generation = elt.get("generation") + self.hostname = urlparse.urlparse(self.uri).hostname or "[None]" + self.fn2 = os.path.splitext(self.uri)[1] or None if self.generation else None + self.label = label_map[elt.get("status")] - def sort_key(self): - return (self.label.sort_key, self.timestamp, self.hostname, self.fn2, self.generation) + def sort_key(self): + return (self.label.sort_key, self.timestamp, self.hostname, self.fn2, self.generation) - @property - def code(self): - return self.label.code + @property + def code(self): + return self.label.code - @property - def mood(self): - return self.label.mood + @property + def mood(self): + return self.label.mood - @property - def accepted(self): - return self.label.code == "object_accepted" + @property + def accepted(self): + return self.label.code == "object_accepted" - @property - def rejected(self): - return self.label.code == "object_rejected" + @property + def rejected(self): + return self.label.code == "object_rejected" - @property - def is_current(self): - return self.generation == "current" + @property + def is_current(self): + return self.generation == "current" - @property - def is_backup(self): - return self.generation == "backup" + @property + def is_backup(self): + return self.generation == "backup" - @property - def is_problem(self): - return self.label.mood != "good" + @property + def is_problem(self): + return self.label.mood != "good" - @property - def is_connection_problem(self): - return self.label.mood != "good" and self.label.code.startswith("rsync_transfer_") + @property + def is_connection_problem(self): + return self.label.mood != "good" and self.label.code.startswith("rsync_transfer_") - @property - def is_object_problem(self): - return self.label.mood != "good" and not self.label.code.startswith("rsync_transfer_") + @property + def is_object_problem(self): + return self.label.mood != "good" and not self.label.code.startswith("rsync_transfer_") - @property - def is_connection_detail(self): - return self.label.code.startswith("rsync_transfer_") + @property + def is_connection_detail(self): + return self.label.code.startswith("rsync_transfer_") - @property - def is_object_detail(self): - return not self.label.code.startswith("rsync_transfer_") + @property + def is_object_detail(self): + return not self.label.code.startswith("rsync_transfer_") class Problem_Mixin(object): - @property - def connection_problems(self): - result = [v for v in self.validation_status if v.is_connection_problem] - result.sort(key = Validation_Status.sort_key) - return result + # pylint: disable=E1101 - @property - def object_problems(self): - result = [v for v in self.validation_status if v.is_object_problem] - result.sort(key = Validation_Status.sort_key) - return result + @property + def connection_problems(self): + result = [v for v in self.validation_status if v.is_connection_problem] + result.sort(key = Validation_Status.sort_key) + return result + + @property + def object_problems(self): + result = [v for v in self.validation_status if v.is_object_problem] + result.sort(key = Validation_Status.sort_key) + return result class Host(Problem_Mixin): - def __init__(self, hostname, timestamp): - self.hostname = hostname - self.timestamp = timestamp - self.elapsed = 0 - self.connections = 0 - self.failures = 0 - self.uris = set() - self.graph = None - self.counters = {} - self.totals = {} - self.validation_status = [] - - def add_connection(self, elt): - self.elapsed += parse_utc(elt.get("finished")) - parse_utc(elt.get("started")) - self.connections += 1 - if elt.get("error") is not None: - self.failures += 1 - - def add_validation_status(self, v): - self.validation_status.append(v) - if v.generation == "current": - self.uris.add(v.uri) - self.counters[(v.fn2, v.generation, v.label)] = self.get_counter(v.fn2, v.generation, v.label) + 1 - self.totals[v.label] = self.get_total(v.label) + 1 - v.label.count += 1 - - def get_counter(self, fn2, generation, label): - return self.counters.get((fn2, generation, label), 0) - - def get_total(self, label): - return self.totals.get(label, 0) - - @property - def failed(self): - return 1 if self.failures > 0 else 0 - - @property - def objects(self): - return len(self.uris) - - field_table = (("connections", "GAUGE"), - ("objects", "GAUGE"), - ("elapsed", "GAUGE"), - ("failed", "ABSOLUTE")) - - rras = tuple("RRA:AVERAGE:0.5:%s:9600" % steps - for steps in (1, 4, 24)) - - @classmethod - def field_ds_specifiers(cls, heartbeat = 24 * 60 * 60, minimum = 0, maximum = "U"): - return ["DS:%s:%s:%s:%s:%s" % (field[0], field[1], heartbeat, minimum, maximum) - for field in cls.field_table] - - @property - def field_values(self): - return tuple(str(getattr(self, field[0])) for field in self.field_table) - - @classmethod - def field_defs(cls, filebase): - return ["DEF:%s=%s.rrd:%s:AVERAGE" % (field[0], filebase, field[0]) - for field in cls.field_table] - - graph_opts = ( - "--vertical-label", "Sync time (seconds)", - "--right-axis-label", "Objects (count)", - "--lower-limit", "0", - "--right-axis", "1:0", - "--full-size-mode" ) - - graph_cmds = ( - - # Split elapsed into separate data sets, so we can color - # differently to indicate how succesful transfer was. Intent is - # that exactly one of these be defined for every value in elapsed. - - r"CDEF:success=failed,UNKN,elapsed,IF", - r"CDEF:failure=connections,1,EQ,failed,*,elapsed,UNKN,IF", - r"CDEF:partial=connections,1,NE,failed,*,elapsed,UNKN,IF", - - # Show connection timing first, as color-coded semi-transparent - # areas with opaque borders. Intent is to make the colors stand - # out, since they're a major health indicator. Transparency is - # handled via an alpha channel (fourth octet of color code). We - # draw this stuff first so that later lines can overwrite it. - - r"AREA:success#00FF0080:Sync time (success)", - r"AREA:partial#FFA50080:Sync time (partial failure)", - r"AREA:failure#FF000080:Sync time (total failure)", - - r"LINE1:success#00FF00", # Green - r"LINE1:partial#FFA500", # Orange - r"LINE1:failure#FF0000", # Red - - # Now show object counts, as a simple black line. - - r"LINE1:objects#000000:Objects", # Black - - # Add averages over period to chart legend. - - r"VDEF:avg_elapsed=elapsed,AVERAGE", - r"VDEF:avg_connections=connections,AVERAGE", - r"VDEF:avg_objects=objects,AVERAGE", - r"COMMENT:\j", - r"GPRINT:avg_elapsed:Average sync time (seconds)\: %5.2lf", - r"GPRINT:avg_connections:Average connection count\: %5.2lf", - r"GPRINT:avg_objects:Average object count\: %5.2lf" ) - - graph_periods = (("week", "-1w"), - ("month", "-31d"), - ("year", "-1y")) - - def rrd_run(self, cmd): - try: - cmd = [str(i) for i in cmd] - cmd.insert(0, args.rrdtool_binary) - subprocess.check_call(cmd, stdout = open("/dev/null", "w")) - except OSError, e: - sys.exit("Problem running %s, perhaps you need to set --rrdtool-binary? (%s)" % (args.rrdtool_binary, e)) - except subprocess.CalledProcessError, e: - sys.exit("Failure running %s: %s" % (args.rrdtool_binary, e)) - - def rrd_update(self): - filename = os.path.join(args.output_directory, self.hostname) + ".rrd" - if not os.path.exists(filename): - cmd = ["create", filename, "--start", self.timestamp - 1, "--step", "3600"] - cmd.extend(self.field_ds_specifiers()) - cmd.extend(self.rras) - self.rrd_run(cmd) - self.rrd_run(["update", filename, - "%s:%s" % (self.timestamp, ":".join(str(v) for v in self.field_values))]) - - def rrd_graph(self, html): - # pylint: disable=W0622 - filebase = os.path.join(args.output_directory, self.hostname) - formats = [format for format in ("png", "svg", "eps") - if getattr(args, format + "_width") and getattr(args, format + "_height")] - for period, start in self.graph_periods: - for format in formats: - cmds = [ "graph", "%s_%s.%s" % (filebase, period, format), - "--title", "%s last %s" % (self.hostname, period), - "--start", start, - "--width", getattr(args, format + "_width"), - "--height", getattr(args, format + "_height"), - "--imgformat", format.upper() ] - cmds.extend(self.graph_opts) - cmds.extend(self.field_defs(filebase)) - cmds.extend(self.graph_cmds) - self.rrd_run(cmds) - img = Element("img", src = "%s_%s.png" % (self.hostname, period), - width = str(args.png_width), - height = str(args.png_height)) - if self.graph is None: - self.graph = copy.copy(img) - html.BodyElement("h2").text = "%s over last %s" % (self.hostname, period) - html.BodyElement("a", href = "%s_%s_svg.html" % (self.hostname, period)).append(img) - html.BodyElement("br") - svg_html = HTML("%s over last %s" % (self.hostname, period), - "%s_%s_svg" % (self.hostname, period)) - svg_html.BodyElement("img", src = "%s_%s.svg" % (self.hostname, period)) - svg_html.close() + def __init__(self, hostname, timestamp): + self.hostname = hostname + self.timestamp = timestamp + self.elapsed = 0 + self.connections = 0 + self.failures = 0 + self.uris = set() + self.graph = None + self.counters = {} + self.totals = {} + self.validation_status = [] + + def add_connection(self, elt): + self.elapsed += parse_utc(elt.get("finished")) - parse_utc(elt.get("started")) + self.connections += 1 + if elt.get("error") is not None: + self.failures += 1 + + def add_validation_status(self, v): + self.validation_status.append(v) + if v.generation == "current": + self.uris.add(v.uri) + self.counters[(v.fn2, v.generation, v.label)] = self.get_counter(v.fn2, v.generation, v.label) + 1 + self.totals[v.label] = self.get_total(v.label) + 1 + v.label.count += 1 + + def get_counter(self, fn2, generation, label): + return self.counters.get((fn2, generation, label), 0) + + def get_total(self, label): + return self.totals.get(label, 0) + + @property + def failed(self): + return 1 if self.failures > 0 else 0 + + @property + def objects(self): + return len(self.uris) + + field_table = (("connections", "GAUGE"), + ("objects", "GAUGE"), + ("elapsed", "GAUGE"), + ("failed", "ABSOLUTE")) + + rras = tuple("RRA:AVERAGE:0.5:%s:9600" % steps + for steps in (1, 4, 24)) + + @classmethod + def field_ds_specifiers(cls, heartbeat = 24 * 60 * 60, minimum = 0, maximum = "U"): + return ["DS:%s:%s:%s:%s:%s" % (field[0], field[1], heartbeat, minimum, maximum) + for field in cls.field_table] + + @property + def field_values(self): + return tuple(str(getattr(self, field[0])) for field in self.field_table) + + @classmethod + def field_defs(cls, filebase): + return ["DEF:%s=%s.rrd:%s:AVERAGE" % (field[0], filebase, field[0]) + for field in cls.field_table] + + graph_opts = ( + "--vertical-label", "Sync time (seconds)", + "--right-axis-label", "Objects (count)", + "--lower-limit", "0", + "--right-axis", "1:0", + "--full-size-mode" ) + + graph_cmds = ( + + # Split elapsed into separate data sets, so we can color + # differently to indicate how succesful transfer was. Intent is + # that exactly one of these be defined for every value in elapsed. + + r"CDEF:success=failed,UNKN,elapsed,IF", + r"CDEF:failure=connections,1,EQ,failed,*,elapsed,UNKN,IF", + r"CDEF:partial=connections,1,NE,failed,*,elapsed,UNKN,IF", + + # Show connection timing first, as color-coded semi-transparent + # areas with opaque borders. Intent is to make the colors stand + # out, since they're a major health indicator. Transparency is + # handled via an alpha channel (fourth octet of color code). We + # draw this stuff first so that later lines can overwrite it. + + r"AREA:success#00FF0080:Sync time (success)", + r"AREA:partial#FFA50080:Sync time (partial failure)", + r"AREA:failure#FF000080:Sync time (total failure)", + + r"LINE1:success#00FF00", # Green + r"LINE1:partial#FFA500", # Orange + r"LINE1:failure#FF0000", # Red + + # Now show object counts, as a simple black line. + + r"LINE1:objects#000000:Objects", # Black + + # Add averages over period to chart legend. + + r"VDEF:avg_elapsed=elapsed,AVERAGE", + r"VDEF:avg_connections=connections,AVERAGE", + r"VDEF:avg_objects=objects,AVERAGE", + r"COMMENT:\j", + r"GPRINT:avg_elapsed:Average sync time (seconds)\: %5.2lf", + r"GPRINT:avg_connections:Average connection count\: %5.2lf", + r"GPRINT:avg_objects:Average object count\: %5.2lf" ) + + graph_periods = (("week", "-1w"), + ("month", "-31d"), + ("year", "-1y")) + + def rrd_run(self, cmd): + try: + cmd = [str(i) for i in cmd] + cmd.insert(0, args.rrdtool_binary) + subprocess.check_call(cmd, stdout = open("/dev/null", "w")) + except OSError, e: + sys.exit("Problem running %s, perhaps you need to set --rrdtool-binary? (%s)" % (args.rrdtool_binary, e)) + except subprocess.CalledProcessError, e: + sys.exit("Failure running %s: %s" % (args.rrdtool_binary, e)) + + def rrd_update(self): + filename = os.path.join(args.output_directory, self.hostname) + ".rrd" + if not os.path.exists(filename): + cmd = ["create", filename, "--start", self.timestamp - 1, "--step", "3600"] + cmd.extend(self.field_ds_specifiers()) + cmd.extend(self.rras) + self.rrd_run(cmd) + self.rrd_run(["update", filename, + "%s:%s" % (self.timestamp, ":".join(str(v) for v in self.field_values))]) + + def rrd_graph(self, html): + # pylint: disable=W0622 + filebase = os.path.join(args.output_directory, self.hostname) + formats = [format for format in ("png", "svg", "eps") + if getattr(args, format + "_width") and getattr(args, format + "_height")] + for period, start in self.graph_periods: + for format in formats: + cmds = [ "graph", "%s_%s.%s" % (filebase, period, format), + "--title", "%s last %s" % (self.hostname, period), + "--start", start, + "--width", getattr(args, format + "_width"), + "--height", getattr(args, format + "_height"), + "--imgformat", format.upper() ] + cmds.extend(self.graph_opts) + cmds.extend(self.field_defs(filebase)) + cmds.extend(self.graph_cmds) + self.rrd_run(cmds) + img = Element("img", src = "%s_%s.png" % (self.hostname, period), + width = str(args.png_width), + height = str(args.png_height)) + if self.graph is None: + self.graph = copy.copy(img) + html.BodyElement("h2").text = "%s over last %s" % (self.hostname, period) + html.BodyElement("a", href = "%s_%s_svg.html" % (self.hostname, period)).append(img) + html.BodyElement("br") + svg_html = HTML("%s over last %s" % (self.hostname, period), + "%s_%s_svg" % (self.hostname, period)) + svg_html.BodyElement("img", src = "%s_%s.svg" % (self.hostname, period)) + svg_html.close() class Session(Problem_Mixin): - def __init__(self): - self.hosts = {} + def __init__(self): + self.hosts = {} - self.root = ElementTree(file = args.input_file).getroot() + self.root = ElementTree(file = args.input_file).getroot() - self.rcynic_version = self.root.get("rcynic-version") - self.rcynic_date = self.root.get("date") - self.timestamp = parse_utc(self.rcynic_date) + self.rcynic_version = self.root.get("rcynic-version") + self.rcynic_date = self.root.get("date") + self.timestamp = parse_utc(self.rcynic_date) - self.labels = [Label(elt) for elt in self.root.find("labels")] - self.load_validation_status() + self.labels = [Label(elt) for elt in self.root.find("labels")] + self.load_validation_status() - for elt in self.root.findall("rsync_history"): - self.get_host(urlparse.urlparse(elt.text.strip()).hostname).add_connection(elt) + for elt in self.root.findall("rsync_history"): + self.get_host(urlparse.urlparse(elt.text.strip()).hostname).add_connection(elt) - generations = set() - fn2s = set() + generations = set() + fn2s = set() - for v in self.validation_status: - self.get_host(v.hostname).add_validation_status(v) - generations.add(v.generation) - fn2s.add(v.fn2) + for v in self.validation_status: + self.get_host(v.hostname).add_validation_status(v) + generations.add(v.generation) + fn2s.add(v.fn2) - self.labels = [l for l in self.labels if l.count > 0] + self.labels = [l for l in self.labels if l.count > 0] - self.hostnames = sorted(self.hosts) - self.generations = sorted(generations) - self.fn2s = sorted(fn2s) + self.hostnames = sorted(self.hosts) + self.generations = sorted(generations) + self.fn2s = sorted(fn2s) - def load_validation_status(self): - label_map = dict((label.code, label) for label in self.labels) - full_validation_status = [Validation_Status(elt, label_map) - for elt in self.root.findall("validation_status")] - accepted_current = set(v.uri for v in full_validation_status - if v.is_current and v.accepted) - self.validation_status = [v for v in full_validation_status - if not v.is_backup - or v.uri not in accepted_current] + def load_validation_status(self): + label_map = dict((label.code, label) for label in self.labels) + full_validation_status = [Validation_Status(elt, label_map) + for elt in self.root.findall("validation_status")] + accepted_current = set(v.uri for v in full_validation_status + if v.is_current and v.accepted) + self.validation_status = [v for v in full_validation_status + if not v.is_backup + or v.uri not in accepted_current] - def get_host(self, hostname): - if hostname not in self.hosts: - self.hosts[hostname] = Host(hostname, self.timestamp) - return self.hosts[hostname] + def get_host(self, hostname): + if hostname not in self.hosts: + self.hosts[hostname] = Host(hostname, self.timestamp) + return self.hosts[hostname] - def get_sum(self, fn2, generation, label): - return sum(h.get_counter(fn2, generation, label) - for h in self.hosts.itervalues()) + def get_sum(self, fn2, generation, label): + return sum(h.get_counter(fn2, generation, label) + for h in self.hosts.itervalues()) - def rrd_update(self): - if not args.dont_update_rrds: - for h in self.hosts.itervalues(): - h.rrd_update() + def rrd_update(self): + if not args.dont_update_rrds: + for h in self.hosts.itervalues(): + h.rrd_update() css = ''' th, td { @@ -475,183 +477,183 @@ css = ''' class HTML(object): - def __init__(self, title, filebase): + def __init__(self, title, filebase): + + self.filename = os.path.join(args.output_directory, filebase + ".html") + + self.html = Element("html") + self.html.append(Comment(" Generators:\n" + + " " + session.rcynic_version + "\n" + + " $Id$\n")) + self.head = SubElement(self.html, "head") + self.body = SubElement(self.html, "body") + + title += " " + session.rcynic_date + SubElement(self.head, "title").text = title + SubElement(self.body, "h1").text = title + SubElement(self.head, "style", type = "text/css").text = css + + if args.refresh: + SubElement(self.head, "meta", { "http-equiv" : "Refresh", "content" : str(args.refresh) }) + + hostwidth = max(len(hostname) for hostname in session.hostnames) + + toc = SubElement(self.body, "ul", id = "nav") + SubElement(SubElement(toc, "li"), "a", href = "index.html").text = "Overview" + li = SubElement(toc, "li") + SubElement(li, "span").text = "Repositories" + ul = SubElement(li, "ul", style = "width: %sem" % hostwidth) + for hostname in session.hostnames: + SubElement(SubElement(ul, "li"), "a", href = "%s.html" % hostname).text = hostname + SubElement(SubElement(toc, "li"), "a", href = "problems.html").text = "Problems" + li = SubElement(toc, "li") + SubElement(li, "span").text = "All Details" + ul = SubElement(li, "ul", style = "width: 15em") + SubElement(SubElement(ul, "li"), "a", href = "connections.html").text = "All Connections" + SubElement(SubElement(ul, "li"), "a", href = "objects.html").text = "All Objects" + SubElement(self.body, "br") + + def close(self): + ElementTree(element = self.html).write(self.filename) + + def BodyElement(self, tag, **attrib): + return SubElement(self.body, tag, **attrib) + + def counter_table(self, data_func, total_func): + table = self.BodyElement("table", rules = "all", border = "1") + thead = SubElement(table, "thead") + tfoot = SubElement(table, "tfoot") + tbody = SubElement(table, "tbody") + tr = SubElement(thead, "tr") + SubElement(tr, "th") + for label in session.labels: + SubElement(tr, "th").text = label.text + for fn2 in session.fn2s: + for generation in session.generations: + counters = [data_func(fn2, generation, label) for label in session.labels] + if sum(counters) > 0: + tr = SubElement(tbody, "tr") + SubElement(tr, "td").text = ((generation or "") + " " + (fn2 or "")).strip() + for label, count in zip(session.labels, counters): + td = SubElement(tr, "td") + if count > 0: + td.set("class", label.mood) + td.text = str(count) + tr = SubElement(tfoot, "tr") + SubElement(tr, "td").text = "Total" + counters = [total_func(label) for label in session.labels] + for label, count in zip(session.labels, counters): + td = SubElement(tr, "td") + if count > 0: + td.set("class", label.mood) + td.text = str(count) + return table + + def object_count_table(self, session): # pylint: disable=W0621 + table = self.BodyElement("table", rules = "all", border = "1") + thead = SubElement(table, "thead") + tbody = SubElement(table, "tbody") + tfoot = SubElement(table, "tfoot") + fn2s = [fn2 for fn2 in session.fn2s if fn2 is not None] + total = dict((fn2, 0) for fn2 in fn2s) + for hostname in session.hostnames: + tr = SubElement(tbody, "tr") + SubElement(tr, "td").text = hostname + for fn2 in fn2s: + td = SubElement(tr, "td") + count = sum(uri.endswith(fn2) for uri in session.hosts[hostname].uris) + total[fn2] += count + if count > 0: + td.text = str(count) + trhead = SubElement(thead, "tr") + trfoot = SubElement(tfoot, "tr") + SubElement(trhead, "th").text = "Repository" + SubElement(trfoot, "td").text = "Total" + for fn2 in fn2s: + SubElement(trhead, "th").text = fn2 + SubElement(trfoot, "td").text = str(total[fn2]) + return table + + def detail_table(self, records): + if records: + table = self.BodyElement("table", rules = "all", border = "1") + thead = SubElement(table, "thead") + tbody = SubElement(table, "tbody") + tr = SubElement(thead, "tr") + SubElement(tr, "th").text = "Timestamp" + SubElement(tr, "th").text = "Generation" + SubElement(tr, "th").text = "Status" + SubElement(tr, "th").text = "URI" + for v in records: + tr = SubElement(tbody, "tr", { "class" : v.mood }) + SubElement(tr, "td").text = v.timestamp + SubElement(tr, "td").text = v.generation + SubElement(tr, "td").text = v.label.text + SubElement(tr, "td", { "class" : "uri"}).text = v.uri + return table + else: + self.BodyElement("p").text = "None found" + return None - self.filename = os.path.join(args.output_directory, filebase + ".html") +def main(): - self.html = Element("html") - self.html.append(Comment(" Generators:\n" + - " " + session.rcynic_version + "\n" + - " $Id$\n")) - self.head = SubElement(self.html, "head") - self.body = SubElement(self.html, "body") + global session # pylint: disable=W0603 - title += " " + session.rcynic_date - SubElement(self.head, "title").text = title - SubElement(self.body, "h1").text = title - SubElement(self.head, "style", type = "text/css").text = css + os.putenv("TZ", "UTC") + time.tzset() - if args.refresh: - SubElement(self.head, "meta", { "http-equiv" : "Refresh", "content" : str(args.refresh) }) + parse_options() - hostwidth = max(len(hostname) for hostname in session.hostnames) + session = Session() + session.rrd_update() - toc = SubElement(self.body, "ul", id = "nav") - SubElement(SubElement(toc, "li"), "a", href = "index.html").text = "Overview" - li = SubElement(toc, "li") - SubElement(li, "span").text = "Repositories" - ul = SubElement(li, "ul", style = "width: %sem" % hostwidth) for hostname in session.hostnames: - SubElement(SubElement(ul, "li"), "a", href = "%s.html" % hostname).text = hostname - SubElement(SubElement(toc, "li"), "a", href = "problems.html").text = "Problems" - li = SubElement(toc, "li") - SubElement(li, "span").text = "All Details" - ul = SubElement(li, "ul", style = "width: 15em") - SubElement(SubElement(ul, "li"), "a", href = "connections.html").text = "All Connections" - SubElement(SubElement(ul, "li"), "a", href = "objects.html").text = "All Objects" - SubElement(self.body, "br") - - def close(self): - ElementTree(element = self.html).write(self.filename) - - def BodyElement(self, tag, **attrib): - return SubElement(self.body, tag, **attrib) - - def counter_table(self, data_func, total_func): - table = self.BodyElement("table", rules = "all", border = "1") - thead = SubElement(table, "thead") - tfoot = SubElement(table, "tfoot") - tbody = SubElement(table, "tbody") - tr = SubElement(thead, "tr") - SubElement(tr, "th") - for label in session.labels: - SubElement(tr, "th").text = label.text - for fn2 in session.fn2s: - for generation in session.generations: - counters = [data_func(fn2, generation, label) for label in session.labels] - if sum(counters) > 0: - tr = SubElement(tbody, "tr") - SubElement(tr, "td").text = ((generation or "") + " " + (fn2 or "")).strip() - for label, count in zip(session.labels, counters): - td = SubElement(tr, "td") - if count > 0: - td.set("class", label.mood) - td.text = str(count) - tr = SubElement(tfoot, "tr") - SubElement(tr, "td").text = "Total" - counters = [total_func(label) for label in session.labels] - for label, count in zip(session.labels, counters): - td = SubElement(tr, "td") - if count > 0: - td.set("class", label.mood) - td.text = str(count) - return table - - def object_count_table(self, session): # pylint: disable=W0621 - table = self.BodyElement("table", rules = "all", border = "1") - thead = SubElement(table, "thead") - tbody = SubElement(table, "tbody") - tfoot = SubElement(table, "tfoot") - fn2s = [fn2 for fn2 in session.fn2s if fn2 is not None] - total = dict((fn2, 0) for fn2 in fn2s) + html = HTML("Repository details for %s" % hostname, hostname) + html.counter_table(session.hosts[hostname].get_counter, session.hosts[hostname].get_total) + if not args.hide_graphs: + session.hosts[hostname].rrd_graph(html) + if not args.hide_problems: + html.BodyElement("h2").text = "Connection Problems" + html.detail_table(session.hosts[hostname].connection_problems) + html.BodyElement("h2").text = "Object Problems" + html.detail_table(session.hosts[hostname].object_problems) + html.close() + + html = HTML("rcynic summary", "index") + html.BodyElement("h2").text = "Grand totals for all repositories" + html.counter_table(session.get_sum, Label.get_count) + if not args.hide_object_counts: + html.BodyElement("br") + html.BodyElement("hr") + html.BodyElement("br") + html.BodyElement("h2").text = "Current total object counts (distinct URIs)" + html.object_count_table(session) for hostname in session.hostnames: - tr = SubElement(tbody, "tr") - SubElement(tr, "td").text = hostname - for fn2 in fn2s: - td = SubElement(tr, "td") - count = sum(uri.endswith(fn2) for uri in session.hosts[hostname].uris) - total[fn2] += count - if count > 0: - td.text = str(count) - trhead = SubElement(thead, "tr") - trfoot = SubElement(tfoot, "tr") - SubElement(trhead, "th").text = "Repository" - SubElement(trfoot, "td").text = "Total" - for fn2 in fn2s: - SubElement(trhead, "th").text = fn2 - SubElement(trfoot, "td").text = str(total[fn2]) - return table - - def detail_table(self, records): - if records: - table = self.BodyElement("table", rules = "all", border = "1") - thead = SubElement(table, "thead") - tbody = SubElement(table, "tbody") - tr = SubElement(thead, "tr") - SubElement(tr, "th").text = "Timestamp" - SubElement(tr, "th").text = "Generation" - SubElement(tr, "th").text = "Status" - SubElement(tr, "th").text = "URI" - for v in records: - tr = SubElement(tbody, "tr", { "class" : v.mood }) - SubElement(tr, "td").text = v.timestamp - SubElement(tr, "td").text = v.generation - SubElement(tr, "td").text = v.label.text - SubElement(tr, "td", { "class" : "uri"}).text = v.uri - return table - else: - self.BodyElement("p").text = "None found" - return None - -def main(): - - global session - - os.putenv("TZ", "UTC") - time.tzset() - - parse_options() + html.BodyElement("br") + html.BodyElement("hr") + html.BodyElement("br") + html.BodyElement("h2").text = "Overview for repository %s" % hostname + html.counter_table(session.hosts[hostname].get_counter, session.hosts[hostname].get_total) + if not args.hide_graphs: + html.BodyElement("br") + html.BodyElement("a", href = "%s.html" % hostname).append(session.hosts[hostname].graph) + html.close() - session = Session() - session.rrd_update() + html = HTML("Problems", "problems") + html.BodyElement("h2").text = "Connection Problems" + html.detail_table(session.connection_problems) + html.BodyElement("h2").text = "Object Problems" + html.detail_table(session.object_problems) + html.close() - for hostname in session.hostnames: - html = HTML("Repository details for %s" % hostname, hostname) - html.counter_table(session.hosts[hostname].get_counter, session.hosts[hostname].get_total) - if not args.hide_graphs: - session.hosts[hostname].rrd_graph(html) - if not args.hide_problems: - html.BodyElement("h2").text = "Connection Problems" - html.detail_table(session.hosts[hostname].connection_problems) - html.BodyElement("h2").text = "Object Problems" - html.detail_table(session.hosts[hostname].object_problems) + html = HTML("All connections", "connections") + html.detail_table([v for v in session.validation_status if v.is_connection_detail]) html.close() - html = HTML("rcynic summary", "index") - html.BodyElement("h2").text = "Grand totals for all repositories" - html.counter_table(session.get_sum, Label.get_count) - if not args.hide_object_counts: - html.BodyElement("br") - html.BodyElement("hr") - html.BodyElement("br") - html.BodyElement("h2").text = "Current total object counts (distinct URIs)" - html.object_count_table(session) - for hostname in session.hostnames: - html.BodyElement("br") - html.BodyElement("hr") - html.BodyElement("br") - html.BodyElement("h2").text = "Overview for repository %s" % hostname - html.counter_table(session.hosts[hostname].get_counter, session.hosts[hostname].get_total) - if not args.hide_graphs: - html.BodyElement("br") - html.BodyElement("a", href = "%s.html" % hostname).append(session.hosts[hostname].graph) - html.close() - - html = HTML("Problems", "problems") - html.BodyElement("h2").text = "Connection Problems" - html.detail_table(session.connection_problems) - html.BodyElement("h2").text = "Object Problems" - html.detail_table(session.object_problems) - html.close() - - html = HTML("All connections", "connections") - html.detail_table([v for v in session.validation_status if v.is_connection_detail]) - html.close() - - html = HTML("All objects", "objects") - html.detail_table([v for v in session.validation_status if v.is_object_detail]) - html.close() + html = HTML("All objects", "objects") + html.detail_table([v for v in session.validation_status if v.is_object_detail]) + html.close() if __name__ == "__main__": - main() + main() diff --git a/rp/rcynic/rcynic-svn b/rp/rcynic/rcynic-svn index 28b24672..3c59116a 100755 --- a/rp/rcynic/rcynic-svn +++ b/rp/rcynic/rcynic-svn @@ -27,50 +27,50 @@ import fcntl import os try: - from lxml.etree import ElementTree + from lxml.etree import ElementTree except ImportError: - from xml.etree.ElementTree import ElementTree + from xml.etree.ElementTree import ElementTree mime_types = ( - ("html", "application/xhtml+xml"), - ("cer", "application/pkix-cert"), - ("crl", "application/pkix-crl"), - ("mft", "application/rpki-manifest"), - ("mnf", "application/rpki-manifest"), - ("roa", "application/rpki-roa"), - ("gbr", "application/rpki-ghostbusters")) + ("html", "application/xhtml+xml"), + ("cer", "application/pkix-cert"), + ("crl", "application/pkix-crl"), + ("mft", "application/rpki-manifest"), + ("mnf", "application/rpki-manifest"), + ("roa", "application/rpki-roa"), + ("gbr", "application/rpki-ghostbusters")) def run(*argv, **kwargs): - """ - Run a program, displaying timing data when appropriate. - """ + """ + Run a program, displaying timing data when appropriate. + """ - _t0 = datetime.datetime.utcnow() - subprocess.check_call(argv, **kwargs) - if args.show_timing: - _t1 = datetime.datetime.utcnow() - print _t1, (_t1 - _t0), " ".join(argv) + _t0 = datetime.datetime.utcnow() + subprocess.check_call(argv, **kwargs) + if args.show_timing: + _t1 = datetime.datetime.utcnow() + print _t1, (_t1 - _t0), " ".join(argv) def runxml(*argv): - """ - - Run a program which produces XML output, displaying timing data when - appropriate and returning an ElementTree constructed from the - program's output. - """ - _t0 = datetime.datetime.utcnow() - p = subprocess.Popen(argv, stdout = subprocess.PIPE) - x = ElementTree(file = p.stdout) - s = p.wait() - if s: - raise subprocess.CalledProcessError(s, argv[0]) - if args.show_timing: - _t1 = datetime.datetime.utcnow() - print _t1, (_t1 - _t0), " ".join(argv) - return x + """ + + Run a program which produces XML output, displaying timing data when + appropriate and returning an ElementTree constructed from the + program's output. + """ + _t0 = datetime.datetime.utcnow() + p = subprocess.Popen(argv, stdout = subprocess.PIPE) + x = ElementTree(file = p.stdout) + s = p.wait() + if s: + raise subprocess.CalledProcessError(s, argv[0]) + if args.show_timing: + _t1 = datetime.datetime.utcnow() + print _t1, (_t1 - _t0), " ".join(argv) + return x # Main program. @@ -120,8 +120,8 @@ parser.add_argument("working_directory", help = \ args = parser.parse_args() if args.show_timing: - t0 = datetime.datetime.utcnow() - print t0, "Starting" + t0 = datetime.datetime.utcnow() + print t0, "Starting" # Lock out other instances of this program. We may want some more # sophsiticated approach when combining this with other programs, but @@ -141,18 +141,18 @@ run("svn", "update", "--quiet", args.working_directory) if args.files_to_archive: - if args.verbatim: - cmd = ["rsync", "--archive", "--quiet", "--delete"] - cmd.extend(args.files_to_archive) - cmd.append(args.working_directory) - run(*cmd) + if args.verbatim: + cmd = ["rsync", "--archive", "--quiet", "--delete"] + cmd.extend(args.files_to_archive) + cmd.append(args.working_directory) + run(*cmd) - else: - for src in args.files_to_archive: - cmd = ["rsync", "--archive", "--quiet", "--delete", "--copy-links"] - cmd.append(src.rstrip("/")) - cmd.append(args.working_directory.rstrip("/") + "/") - run(*cmd) + else: + for src in args.files_to_archive: + cmd = ["rsync", "--archive", "--quiet", "--delete", "--copy-links"] + cmd.append(src.rstrip("/")) + cmd.append(args.working_directory.rstrip("/") + "/") + run(*cmd) # Ask Subversion to add any new files, trying hard to get the MIME # types right. @@ -160,8 +160,8 @@ if args.files_to_archive: cmd = ["svn", "add", "--quiet", "--force", "--auto-props"] for fn2, mime_type in mime_types: - cmd.append("--config-option") - cmd.append("config:auto-props:*.%s=svn:mime-type=%s" % (fn2, mime_type)) + cmd.append("--config-option") + cmd.append("config:auto-props:*.%s=svn:mime-type=%s" % (fn2, mime_type)) cmd.append(".") @@ -171,15 +171,16 @@ run(*cmd, cwd = args.working_directory) # files have been deleted, and tell Subversion that we deleted them # intentionally. +# pylint: disable=E1101 missing = sorted(entry.get("path") for entry in runxml("svn", "status", "--xml", args.working_directory).find("target").findall("entry") if entry.find("wc-status").get("item") == "missing") deleted = [] for path in missing: - if not any(path.startswith(r) for r in deleted): - run("svn", "delete", "--quiet", path) - deleted.append(path + "/") + if not any(path.startswith(r) for r in deleted): + run("svn", "delete", "--quiet", path) + deleted.append(path + "/") # Commit our changes and update the working tree. @@ -187,5 +188,5 @@ run("svn", "commit", "--quiet", "--message", "Auto update.", args.working_direct run("svn", "update", "--quiet", args.working_directory) if args.show_timing: - t1 = datetime.datetime.utcnow() - print t1, t1 - t0, "total runtime" + t1 = datetime.datetime.utcnow() + print t1, t1 - t0, "total runtime" diff --git a/rp/rcynic/rcynic-text b/rp/rcynic/rcynic-text index db4126ce..d4a5b23e 100755 --- a/rp/rcynic/rcynic-text +++ b/rp/rcynic/rcynic-text @@ -25,96 +25,96 @@ import urlparse import textwrap try: - from lxml.etree import ElementTree + from lxml.etree import ElementTree except ImportError: - from xml.etree.ElementTree import ElementTree + from xml.etree.ElementTree import ElementTree class Label(object): - def __init__(self, elt): - self.tag = elt.tag - self.width = max(len(s) for s in elt.text.split()) - self.lines = textwrap.wrap(elt.text.strip(), width = self.width) - self.counter = 0 + def __init__(self, elt): + self.tag = elt.tag + self.width = max(len(s) for s in elt.text.split()) + self.lines = textwrap.wrap(elt.text.strip(), width = self.width) + self.counter = 0 - def line(self, n): - try: - return " " + self.lines[n].center(self.width) + " " - except IndexError: - return " " * (self.width + 2) + def line(self, n): + try: + return " " + self.lines[n].center(self.width) + " " + except IndexError: + return " " * (self.width + 2) - def add(self): - self.counter += 1 + def add(self): + self.counter += 1 - @property - def total(self): - return " " + str(self.counter).rjust(self.width) + " " + @property + def total(self): + return " " + str(self.counter).rjust(self.width) + " " - @property - def visible(self): - return self.counter > 0 + @property + def visible(self): + return self.counter > 0 class Host(object): - def __init__(self): - self.counters = {} + def __init__(self): + self.counters = {} - def add(self, label): - self.counters[label] = self.counters.get(label, 0) + 1 - label.add() + def add(self, label): + self.counters[label] = self.counters.get(label, 0) + 1 + label.add() - def total(self, label): - if label in self.counters: - return " " + str(self.counters[label]).rjust(label.width) + " " - else: - return " " * (label.width + 2) + def total(self, label): + if label in self.counters: + return " " + str(self.counters[label]).rjust(label.width) + " " + else: + return " " * (label.width + 2) class Session(object): - def __init__(self, labels): - self.hosts = {} - self.labels = labels - self.map = dict((label.tag, label) for label in labels) - - def add(self, elt): - label = self.map[elt.get("status")] - hostname = urlparse.urlparse(elt.text.strip()).hostname - if hostname not in self.hosts: - self.hosts[hostname] = Host() - self.hosts[hostname].add(label) - - def show(self): - visible = [label for label in self.labels if label.visible] - hostnames = sorted(hostname for hostname in self.hosts if hostname is not None) - hostwidth = max(len(hostname) for hostname in hostnames + ["Hostname"]) - separator = "+-%s-+-%s-+" % ( - "-" * hostwidth, - "-+-".join("-" * label.width for label in visible)) - print separator - for i in xrange(max(len(label.lines) for label in visible)): - print "| %s |%s|" % ( - ("Hostname" if i == 0 else "").ljust(hostwidth), - "|".join(label.line(i) for label in visible)) - print separator - for hostname in hostnames: - print "| %s |%s|" % ( - hostname.ljust(hostwidth), - "|".join(self.hosts[hostname].total(label) for label in visible)) - if hostnames: - print separator - print "| %s |%s|" % ( - "Total".ljust(hostwidth), - "|".join(label.total for label in visible)) - print separator + def __init__(self, labels): + self.hosts = {} + self.labels = labels + self.map = dict((label.tag, label) for label in labels) + + def add(self, elt): + label = self.map[elt.get("status")] + hostname = urlparse.urlparse(elt.text.strip()).hostname + if hostname not in self.hosts: + self.hosts[hostname] = Host() + self.hosts[hostname].add(label) + + def show(self): + visible = [label for label in self.labels if label.visible] + hostnames = sorted(hostname for hostname in self.hosts if hostname is not None) + hostwidth = max(len(hostname) for hostname in hostnames + ["Hostname"]) + separator = "+-%s-+-%s-+" % ( + "-" * hostwidth, + "-+-".join("-" * label.width for label in visible)) + print separator + for i in xrange(max(len(label.lines) for label in visible)): + print "| %s |%s|" % ( + ("Hostname" if i == 0 else "").ljust(hostwidth), + "|".join(label.line(i) for label in visible)) + print separator + for hostname in hostnames: + print "| %s |%s|" % ( + hostname.ljust(hostwidth), + "|".join(self.hosts[hostname].total(label) for label in visible)) + if hostnames: + print separator + print "| %s |%s|" % ( + "Total".ljust(hostwidth), + "|".join(label.total for label in visible)) + print separator def main(): - for filename in ([sys.stdin] if len(sys.argv) < 2 else sys.argv[1:]): - etree = ElementTree(file = filename) - session = Session([Label(elt) for elt in etree.find("labels")]) - for elt in etree.findall("validation_status"): - session.add(elt) - session.show() + for filename in ([sys.stdin] if len(sys.argv) < 2 else sys.argv[1:]): + etree = ElementTree(file = filename) + session = Session([Label(elt) for elt in etree.find("labels")]) + for elt in etree.findall("validation_status"): + session.add(elt) + session.show() if __name__ == "__main__": - main() + main() diff --git a/rp/rcynic/rcynic.c b/rp/rcynic/rcynic.c index d0da40f5..36c1950f 100644 --- a/rp/rcynic/rcynic.c +++ b/rp/rcynic/rcynic.c @@ -3190,7 +3190,7 @@ static int extract_access_uri(rcynic_ctx_t *rc, if (OBJ_obj2nid(a->method) != nid) continue; ++*count; - if (!relevant((char *) a->location->d.uniformResourceIdentifier->data)) + if (relevant && !relevant((char *) a->location->d.uniformResourceIdentifier->data)) continue; if (sizeof(result->s) <= a->location->d.uniformResourceIdentifier->length) log_validation_status(rc, uri, uri_too_long, generation); @@ -3707,7 +3707,7 @@ static int check_x509(rcynic_ctx_t *rc, int n_caIssuers = 0; ex_count--; if (!extract_access_uri(rc, uri, generation, aia, NID_ad_ca_issuers, - &certinfo->aia, &n_caIssuers, is_rsync) || + &certinfo->aia, &n_caIssuers, NULL) || !certinfo->aia.s[0] || sk_ACCESS_DESCRIPTION_num(aia) != n_caIssuers) { log_validation_status(rc, uri, malformed_aia_extension, generation); diff --git a/rp/rcynic/rcynicng b/rp/rcynic/rcynicng new file mode 100755 index 00000000..eccd247f --- /dev/null +++ b/rp/rcynic/rcynicng @@ -0,0 +1,1478 @@ +#!/usr/bin/env python + +# $Id$ + +""" +Reimplementation of rcynic in Python. Work in progress. +""" + +import os +import sys +import ssl +import time +import copy +import errno +import shutil +import socket +import logging +import argparse +import tempfile +import urlparse +import subprocess + +import tornado.gen +import tornado.locks +import tornado.ioloop +import tornado.queues +import tornado.process +import tornado.httpclient + +import rpki.POW +import rpki.log +import rpki.config +import rpki.sundial +import rpki.relaxng +import rpki.autoconf + +from rpki.oids import id_kp_bgpsec_router + +from lxml.etree import (ElementTree, Element, SubElement, Comment, + XML, DocumentInvalid, XMLSyntaxError, iterparse) + +logger = logging.getLogger("rcynicng") + +xmlns = rpki.relaxng.rrdp.xmlns + +tag_delta = xmlns + "delta" +tag_notification = xmlns + "notification" +tag_publish = xmlns + "publish" +tag_snapshot = xmlns + "snapshot" +tag_withdraw = xmlns + "withdraw" + +codes = rpki.POW.validation_status + + +class Status(object): + """ + Validation status database, like validation_status_t in rcynic:tos. + + rcynic:tos version of this data structure is stored as an AVL + tree, because the OpenSSL STACK_OF() sort-and-bsearch turned out + to be a very poor choice for the input data. Remains to be seen + whether we need to do something like that here too. + """ + + db = dict() + + def __init__(self, uri): + self.uri = uri + self._timestamp = None + self.status = set() + + def __str__(self): + return "{my.timestamp} {my.uri} {status}".format( + my = self, status = ",".join(str(s) for s in sorted(self.status))) + + @property + def timestamp(self): + return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(self._timestamp)) + + @classmethod + def get(cls, uri): + try: + return cls.db[uri].status + except KeyError: + return None + + @classmethod + def update(cls, uri): + try: + self = cls.db[uri] + except KeyError: + self = cls.db[uri] = cls(uri) + self._timestamp = time.time() + return self.status + + @classmethod + def add(cls, uri, *codes): + status = cls.update(uri) + for code in codes: + status.add(code) + + @classmethod + def remove(cls, uri, *codes): + if uri in cls.db: + for code in codes: + cls.db[uri].status.discard(code) + + @classmethod + def test(cls, uri, code): + return uri in cls.db and code in cls.db[uri].status + + +def install_object(obj): + obj.obj.authenticated.add(authenticated) + obj.obj.save() + + +class X509StoreCTX(rpki.POW.X509StoreCTX): + + @classmethod + def subclass(cls, **kwargs): + return type(cls.__name__, (cls,), kwargs) + + status = None + + def verify_callback(self, ok): + err = self.getError() + if err in (codes.X509_V_OK.code, codes.X509_V_ERR_SUBJECT_ISSUER_MISMATCH.code): + return ok + elif err == codes.X509_V_ERR_CRL_HAS_EXPIRED.code: + return True + elif err == codes.X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT.code: + self.status.add(codes.TRUST_ANCHOR_NOT_SELF_SIGNED) + return ok + else: + self.status.add(codes.find(err)) + return ok + + +class POW_Mixin(object): + + @classmethod + def store_if_new(cls, der, uri, retrieval): + self = cls.derRead(der) + ski, aki = self.get_hex_SKI_AKI() + return RPKIObject.objects.get_or_create( + der = der, + defaults = dict(uri = uri, + aki = aki, + ski = ski, + sha256 = sha256hex(der), + retrieved = retrieval)) + + def get_hex_SKI_AKI(self): + cer = self.certs()[0] + ski = cer.getSKI() + aki = cer.getAKI() + return ski.encode("hex") if ski else "", aki.encode("hex") if aki else "" + + @property + def uri(self): + return self.obj.uri + + @property + def aki(self): + return self.obj.aki + + @property + def ski(self): + return self.obj.ski + + +class X509(rpki.POW.X509, POW_Mixin): + + def __repr__(self): + try: + return "<X509 \"{}\" at 0x{:x}>".format(self.uri, id(self)) + except: + return "<X509 at 0x{:x}>".format(id(self)) + + def get_hex_SKI_AKI(self): + ski = self.getSKI() + aki = self.getAKI() + return ski.encode("hex") if ski else "", aki.encode("hex") if aki else "" + + @classmethod + def load(cls, obj, cms = None): + if cms is not None: + # XXX Kludge to work around lack of subclass support in rpki.POW.CMS.certs(). + der = cms.certs()[0].derWrite() + else: + der = obj.der + self = cls.derRead(der) + self.obj = obj + self.bc = self.getBasicConstraints() + self.eku = self.getEKU() + self.aia = self.getAIA() + self.sia = self.getSIA() + self.crldp = self.getCRLDP() + self.is_ca = self.bc is not None and self.bc[0] + self.caDirectory, self.rpkiManifest, self.signedObjectRepository, self.rpkiNotify \ + = self.sia or (None, None, None, None) + return self + + @staticmethod + def count_uris(uris, scheme = "rsync://"): + count = 0 + if uris is not None: + for uri in uris: + if uri.startswith(scheme): + count += 1 + return count + + def check(self, trusted, crl): + #logger.debug("Starting checks for %r", self) + status = Status.update(self.uri) + is_ta = trusted is None + is_routercert = (self.eku is not None and id_kp_bgpsec_router in self.eku and + not self.is_ca and self.uri.endswith(".cer")) + if self.eku is not None and (self.is_ca or not self.uri.endswith(".cer")): + status.add(codes.INAPPROPRIATE_EKU_EXTENSION) + if is_ta and not self.is_ca: + status.add(codes.MALFORMED_TRUST_ANCHOR) + if is_ta and self.aia is not None: + status.add(codes.AIA_EXTENSION_FORBIDDEN) + if not is_ta and self.aia is None: + status.add(codes.AIA_EXTENSION_MISSING) + if is_routercert and self.sia is not None: + status.add(codes.SIA_EXTENSION_FORBIDDEN) + if not is_routercert and self.sia is None: + status.add(codes.SIA_EXTENSION_MISSING) + if is_ta and self.crldp is not None: + status.add(codes.CRLDP_EXTENSION_FORBIDDEN) + if not is_ta and self.crldp is None: + status.add(codes.CRLDP_EXTENSION_MISSING) + if not is_ta and not self.aki: + status.add(codes.AKI_EXTENSION_MISSING) + elif not is_ta and self.aki != trusted[0].ski: + status.add(codes.AKI_EXTENSION_ISSUER_MISMATCH) + serial = self.getSerial() + if serial <= 0 or serial > 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF: + status.add(codes.BAD_CERTIFICATE_SERIAL_NUMBER) + if self.getVersion() != 2: + status.add(codes.WRONG_OBJECT_VERSION) + n_rsync_caIssuers = self.count_uris(self.aia) + n_rsync_caDirectory = self.count_uris(self.caDirectory) + n_rsync_rpkiManifest = self.count_uris(self.rpkiManifest) + n_rsync_signedObjectRepository = self.count_uris(self.signedObjectRepository) + if n_rsync_caIssuers > 1 or n_rsync_caDirectory > 1 or n_rsync_rpkiManifest > 1 or n_rsync_signedObjectRepository > 1: + status.add(codes.MULTIPLE_RSYNC_URIS_IN_EXTENSION) + if self.aia is not None and n_rsync_caIssuers == 0: + status.add(codes.MALFORMED_AIA_EXTENSION) + if self.is_ca: + ok = n_rsync_caDirectory != 0 and n_rsync_rpkiManifest != 0 and n_rsync_signedObjectRepository == 0 + elif not is_routercert: + ok = n_rsync_caDirectory == 0 and n_rsync_rpkiManifest == 0 and n_rsync_signedObjectRepository != 0 + else: + ok = self.sia is None + if not ok: + status.add(codes.MALFORMED_SIA_EXTENSION) + if not is_ta and self.count_uris(self.crldp) == 0: + status.add(codes.MALFORMED_CRLDP_EXTENSION) + self.checkRPKIConformance(status = status, eku = id_kp_bgpsec_router if is_routercert else None) + try: + self.verify(trusted = [self] if trusted is None else trusted, crl = crl, policy = "1.3.6.1.5.5.7.14.2", + context_class = X509StoreCTX.subclass(status = status)) + except rpki.POW.ValidationError as e: + logger.debug("%r rejected: %s", self, e) + status.add(codes.OBJECT_REJECTED) + codes.normalize(status) + #logger.debug("Finished checks for %r", self) + return not any(s.kind == "bad" for s in status) + + +class CRL(rpki.POW.CRL, POW_Mixin): + + def __repr__(self): + try: + return "<CRL \"{}\" at 0x{:x}>".format(self.uri, id(self)) + except: + return "<CRL at 0x{:x}>".format(id(self)) + + def get_hex_SKI_AKI(self): + aki = self.getAKI() + return "", aki.encode("hex") if aki else "" + + @classmethod + def load(cls, obj): + self = cls.derRead(obj.der) + self.obj = obj + self.thisUpdate = self.getThisUpdate() + self.nextUpdate = self.getNextUpdate() + self.number = self.getCRLNumber() + return self + + def check(self, issuer): + status = Status.update(self.uri) + self.checkRPKIConformance(status = status, issuer = issuer) + try: + self.verify(issuer) + except rpki.POW.ValidationError as e: + logger.debug("%r rejected: %s", self, e) + status.add(codes.OBJECT_REJECTED) + codes.normalize(status) + if self.getVersion() != 1: + status.add(codes.WRONG_OBJECT_VERSION) + now = rpki.sundial.now() + if self.thisUpdate > now: + status.add(codes.CRL_NOT_YET_VALID) + if self.nextUpdate < now: + status.add(codes.STALE_CRL_OR_MANIFEST) + if self.number is None: + status.add(codes.CRL_NUMBER_EXTENSION_MISSING) + if self.number < 0: + status.add(codes.CRL_NUMBER_IS_NEGATIVE) + if self.number > 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF: + status.add(codes.CRL_NUMBER_OUT_OF_RANGE) + if self.getIssuer() != issuer.getSubject(): + status.add(codes.CRL_ISSUER_NAME_MISMATCH) + if not self.aki: + status.add(codes.AKI_EXTENSION_MISSING) + elif self.aki != issuer.ski: + status.add(codes.AKI_EXTENSION_ISSUER_MISMATCH) + + return not any(s.kind == "bad" for s in status) + + +class Ghostbuster(rpki.POW.CMS, POW_Mixin): + + def __repr__(self): + try: + return "<Ghostbuster \"{}\" at 0x{:x}>".format(self.uri, id(self)) + except: + return "<Ghostbuster at 0x{:x}>".format(id(self)) + + @classmethod + def load(cls, obj): + self = cls.derRead(obj.der) + self.obj = obj + self.ee = X509.load(obj, self) + self.vcard = None + return self + + def check(self, trusted, crl): + status = Status.update(self.uri) + self.ee.check(trusted = trusted, crl = crl) + try: + self.vcard = self.verify() + except rpki.POW.ValidationError as e: + logger.debug("%r rejected: %s", self, e) + status.add(codes.OBJECT_REJECTED) + self.checkRPKIConformance(status) + codes.normalize(status) + return not any(s.kind == "bad" for s in status) + + +class Manifest(rpki.POW.Manifest, POW_Mixin): + + def __repr__(self): + try: + return "<Manifest \"{}\" at 0x{:x}>".format(self.uri, id(self)) + except: + return "<Manifest at 0x{:x}>".format(id(self)) + + @classmethod + def load(cls, obj): + self = cls.derRead(obj.der) + self.obj = obj + self.ee = X509.load(obj, self) + self.fah = None + self.thisUpdate = None + self.nextUpdate = None + self.number = None + return self + + def check(self, trusted, crl): + status = Status.update(self.uri) + self.ee.check(trusted = trusted, crl = crl) + try: + self.verify() + except rpki.POW.ValidationError as e: + logger.debug("%r rejected: %s", self, e) + status.add(codes.OBJECT_REJECTED) + self.checkRPKIConformance(status) + self.thisUpdate = self.getThisUpdate() + self.nextUpdate = self.getNextUpdate() + self.number = self.getManifestNumber() + self.fah = self.getFiles() + self.notBefore = self.ee.getNotBefore() + self.notAfter = self.ee.getNotAfter() + if self.thisUpdate < self.notBefore or self.nextUpdate > self.notAfter: + status.add(codes.MANIFEST_INTERVAL_OVERRUNS_CERT) + now = rpki.sundial.now() + if self.thisUpdate > now: + status.add(codes.MANIFEST_NOT_YET_VALID) + if self.nextUpdate < now: + status.add(codes.STALE_CRL_OR_MANIFEST) + codes.normalize(status) + return not any(s.kind == "bad" for s in status) + + def find_crl_candidate_hashes(self): + for fn, digest in self.fah: + if fn.endswith(".crl"): + yield digest.encode("hex") + + +class ROA(rpki.POW.ROA, POW_Mixin): + + def __repr__(self): + try: + return "<ROA \"{}\" at 0x{:x}>".format(self.uri, id(self)) + except: + return "<ROA at 0x{:x}>".format(id(self)) + + @classmethod + def load(cls, obj): + self = cls.derRead(obj.der) + self.obj = obj + self.ee = X509.load(obj, self) + self.asn = None + self.prefixes = None + return self + + def check(self, trusted, crl): + status = Status.update(self.uri) + self.ee.check(trusted = trusted, crl = crl) + try: + vcard = self.verify() + except rpki.POW.ValidationError: + status.add(codes.OBJECT_REJECTED) + self.checkRPKIConformance(status) + self.asn = self.getASID() + self.prefixes = self.getPrefixes() + codes.normalize(status) + return not any(s.kind == "bad" for s in status) + + +class_dispatch = dict(cer = X509, + crl = CRL, + gbr = Ghostbuster, + mft = Manifest, + roa = ROA) + +def uri_to_class(uri): + cls = class_dispatch.get(uri[-3:]) if len(uri) > 4 and uri[-4] == "." else None + if cls is None: + Status.add(uri, None, codes.UNKNOWN_OBJECT_TYPE_SKIPPED) + return cls + + +# If we find ourselves using this same ordering for every retrieval from the RPKIObjects model, we +# can add it as a Meta option for the model and omit it in the query expressions, like this: +# +# class RPKIObjects(models.Model): +# ... +# class Meta: +# ordering = ["-retrieved__started"] +# +# https://docs.djangoproject.com/en/1.8/ref/models/querysets/#order-by +# https://docs.djangoproject.com/en/1.8/ref/models/options/#django.db.models.Options.ordering + +def fetch_objects(**kwargs): + for obj in RPKIObject.objects.filter(**kwargs).order_by("-retrieved__started"): + cls = uri_to_class(obj.uri) + if cls is not None: + yield cls.load(obj) + + +class WalkFrame(object): + """ + Certificate tree walk stack frame. This is basically just a + preamble and a loop, broken out into several separate methods so + that we can fork new tasks in the middle then resume processing of + the current state machine (ie, this frame) when appropriate (eg, + after an rsync or RRDP fetch completes). + """ + + def __init__(self, cer): + self.cer = cer + self.state = self.initial + + def __repr__(self): + try: + return "<WalkFrame \"{}\" at 0x{:x}>".format(self.cer.uri, id(self)) + except: + return "<WalkFrame at 0x{:x}>".format(id(self)) + + @tornado.gen.coroutine + def __call__(self, wsk): + yield self.state(wsk) + + @tornado.gen.coroutine + def initial(self, wsk): + + rsync_uri = first_rsync_uri(self.cer.caDirectory) + rrdp_uri = first_https_uri(self.cer.rpkiNotify) + + if args.prefer_rsync: + uri = rsync_uri or rrdp_uri + else: + uri = rrdp_uri or rsync_uri + + self.fetcher = Fetcher(uri) + + if not self.fetcher.needed(): + self.state = self.ready + elif not args.spawn_on_fetch: + self.state = self.fetch + else: + self.state = self.fetch + yield task_queue.put(wsk.clone()) + wsk.pop() + + @tornado.gen.coroutine + def fetch(self, wsk): + yield self.fetcher.fetch() + self.state = self.ready + + @tornado.gen.coroutine + def ready(self, wsk): + self.trusted = wsk.trusted() + + logger.debug("%r scanning products", self) + + # NB: CRL checks on manifest EE certificates deferred until we've picked a CRL. + + mft_candidates = [] + crl_candidates = [] + crl_candidate_hashes = set() + + for mft in fetch_objects(aki = self.cer.ski, uri__endswith = ".mft"): + if mft.check(trusted = self.trusted, crl = None): + mft_candidates.append(mft) + crl_candidate_hashes.update(mft.find_crl_candidate_hashes()) + + if not mft_candidates: + wsk.pop() + return + + for crl in fetch_objects(aki = self.cer.ski, uri__endswith = ".crl", sha256__in = crl_candidate_hashes): + if crl.check(self.trusted[0]): + crl_candidates.append(crl) + + mft_candidates.sort(reverse = True, key = lambda x: (x.number, x.thisUpdate, x.obj.retrieved.started)) + crl_candidates.sort(reverse = True, key = lambda x: (x.number, x.thisUpdate, x.obj.retrieved.started)) + + if not crl_candidates: + wsk.pop() + return + + self.crl = crl_candidates[0] + + install_object(self.crl) + Status.add(self.crl.uri, codes.OBJECT_ACCEPTED) + + #logger.debug("Picked CRL %s", self.crl.uri) + + for mft in mft_candidates: + if self.crl.isRevoked(mft.ee): + Status.add(mft.obj.uri, codes.MANIFEST_EE_REVOKED) + continue + self.mft = mft + break + else: + wsk.pop() + return + + install_object(self.mft) + Status.add(self.mft.obj.uri, codes.OBJECT_ACCEPTED) + + self.stale_crl = Status.test(self.crl.uri, codes.STALE_CRL_OR_MANIFEST) + self.stale_mft = Status.test(self.mft.uri, codes.STALE_CRL_OR_MANIFEST) + + # Issue warnings on mft and crl URI mismatches? + + # Use an explicit iterator so we can resume it; run loop in separate method, same reason. + + self.mft_iterator = iter(self.mft.getFiles()) + self.state = self.loop + + @tornado.gen.coroutine + def loop(self, wsk): + + #logger.debug("Processing %s", self.mft.uri) + + for fn, digest in self.mft_iterator: + + yield tornado.gen.moment + + uri = self.mft.uri[:self.mft.uri.rindex("/") + 1] + fn + + # Need general URI validator here? + + if uri == self.crl.uri: + continue + + cls = uri_to_class(uri) + + if cls is None: + continue + + if cls in (Manifest, CRL): + Status.add(uri, None, codes.INAPPROPRIATE_OBJECT_TYPE_SKIPPED) + continue + + for obj in fetch_objects(sha256 = digest.encode("hex")): + + if self.stale_crl: + Status.add(uri, codes.TAINTED_BY_STALE_CRL) + if self.stale_mft: + Status.add(uri, codes.TAINTED_BY_STALE_MANIFEST) + + if not obj.check(trusted = self.trusted, crl = self.crl): + Status.add(uri, codes.OBJECT_REJECTED) + continue + + install_object(obj) + Status.add(uri, codes.OBJECT_ACCEPTED) + + if cls is not X509 or not obj.is_ca: + break + + wsk.push(obj) + return + + wsk.pop() + + +class WalkTask(object): + """ + Task corresponding to one walk stack, roughly analgous to + STACK_OF(walk_ctx_t) in rcynic:tos. + """ + + def __init__(self, wsk = None, cer = None): + self.wsk = [] if wsk is None else wsk + if cer is not None: + self.push(cer) + + def __repr__(self): + try: + return "<WalkTask \"{}\" at 0x{:x}>".format(self.wsk[-1].cer.uri, id(self)) + except: + return "<WalkTask at 0x{:x}>".format(id(self)) + + @tornado.gen.coroutine + def __call__(self): + while self.wsk: + yield self.wsk[-1](wsk = self) + + def push(self, cer): + self.wsk.append(WalkFrame(cer)) + + def pop(self): + return self.wsk.pop() + + def clone(self): + return WalkTask(wsk = list(self.wsk)) + + def trusted(self): + stack = [w.cer for w in self.wsk] + stack.reverse() + return stack + + +def read_tals(): + for head, dirs, files in os.walk(args.trust_anchor_locators): + for fn in files: + if fn.endswith(".tal"): + furi = "file://" + os.path.abspath(os.path.join(head, fn)) + try: + with open(os.path.join(head, fn), "r") as f: + lines = [line.strip() for line in f] + blank = lines.index("") + uris = lines[:blank] + key = rpki.POW.Asymmetric.derReadPublic("".join(lines[blank:]).decode("base64")) + if not uris or not all(uri.endswith(".cer") for uri in uris): + Status.add(furi, None, codes.MALFORMED_TAL_URI) + yield uris, key + except: + Status.add(furi, None, codes.UNREADABLE_TRUST_ANCHOR_LOCATOR) + + +def uri_to_filename(uri, base = None): + fn = uri[uri.index("://")+3:] + if base is not None: + fn = os.path.join(base, fn) + return fn + +def first_uri(uris, scheme): + if uris is not None: + for uri in uris: + if uri.startswith(scheme): + return uri + return None + +def first_rsync_uri(uris): + return first_uri(uris, "rsync://") + +def first_https_uri(uris): + return first_uri(uris, "https://") + +def sha256hex(bytes): + d = rpki.POW.Digest(rpki.POW.SHA256_DIGEST) + d.update(bytes) + return d.digest().encode("hex") + + +class RRDP_ParseFailure(Exception): + "Failure parsing RRDP message." + +class DeadHost(Exception): + "Host recently tried and known to be unavailable." + + +class Fetcher(object): + """ + Network transfer methods and history database. + + At the moment this is rsync-only; eventually it will include + support for HTTPS and RRDP. + """ + + # Internal protocol: + # + # - Instances which have just gotten to the query stage are not registered + # + # - Instances which are in progress are listed in .history and + # have a Condition object in .pending; instances which depend on + # this should wait for the condition, then return. + # + # - Instances which have completed are listed in .history and have + # .pending set to None. + + _rsync_deadhosts = set() + _rsync_history = dict() + + _https_deadhosts = set() + _https_history = dict() + + def __init__(self, uri, ta = False): + self.uri = uri + self.ta = ta + self.pending = None + self.status = None + + def _rsync_split_uri(self): + return tuple(self.uri.rstrip("/").split("/")[2:]) + + def _rsync_find(self, path): + for i in xrange(1, len(path)): + target = path[:i+1] + try: + return self._rsync_history[target] + except KeyError: + continue + return None + + def needed(self): + if not args.fetch: + return False + if self.uri.startswith("rsync://"): + return self._rsync_needed() + if self.uri.startswith("https://"): + return self._https_needed() + raise ValueError + + def _rsync_needed(self): + path = self._rsync_split_uri() + if path[0] in self._rsync_deadhosts: + return False + entry = self._rsync_find(path) + return entry is None or entry.pending is not None + + def _https_needed(self): + netloc = urlparse.urlparse(self.uri).netloc + if netloc in self._https_deadhosts: + return False + entry = self._https_history.get(self.uri) + return entry is None or entry.pending is not None + + def fetch(self): + if self.uri.startswith("rsync://"): + return self._rsync_fetch() + if self.uri.startswith("https://"): + return self._https_fetch_ta() if self.ta else self._rrdp_fetch() + raise ValueError + + @tornado.gen.coroutine + def _rsync_fetch(self): + assert self.uri.startswith("rsync://") and (self.uri.endswith(".cer") if self.ta else self.uri.endswith("/")) + + if not args.fetch: + return + path = self._rsync_split_uri() + dead = path[0] in self._rsync_deadhosts + other = self._rsync_find(path) + if not dead and other is not None and other.pending is not None: + yield other.pending.wait() + if dead or other is not None: + return + + self.pending = tornado.locks.Condition() + self._rsync_history[path] = self + + try: + path = uri_to_filename(self.uri, args.unauthenticated) + cmd = ["rsync", "--update", "--times", "--copy-links", "--itemize-changes"] + if self.uri.endswith("/"): + cmd.append("--recursive") + cmd.append("--delete") + cmd.append(self.uri) + cmd.append(path) + + dn = os.path.dirname(path) + if not os.path.exists(dn): + os.makedirs(dn) + + # We use the stdout close from rsync to detect when the subprocess has finished. + # There's a lovely tornado.process.Subprocess.wait_for_exit() method which does + # exactly what one would think we'd want -- but Unix signal handling still hasn't + # caught up to the software interrupt architecture ITS had forty years ago, so + # signals still cause random "system call interrupted" failures in other libraries. + # Nothing Tornado can do about this, so we avoid signals entirely and collect the + # process exit status directly from the operating system. In theory, the WNOHANG + # isn't necessary here, we use it anyway to be safe in case theory is wrong. + + # If we need to add a timeout here to guard against rsync processes taking too long + # (which has happened in the past with, eg, LACNIC), see tornado.gen.with_timeout() + # (documented in the utility functions section of the tornado.gen page), which wraps + # any future in a timeout. + + t0 = time.time() + rsync = tornado.process.Subprocess(cmd, stdout = tornado.process.Subprocess.STREAM, stderr = subprocess.STDOUT) + logger.debug("rsync[%s] started \"%s\"", rsync.pid, " ".join(cmd)) + output = yield rsync.stdout.read_until_close() + pid, self.status = os.waitpid(rsync.pid, os.WNOHANG) + t1 = time.time() + if (pid, self.status) == (0, 0): + logger.warn("rsync[%s] Couldn't get real exit status without blocking, sorry", rsync.pid) + for line in output.splitlines(): + logger.debug("rsync[%s] %s", rsync.pid, line) + logger.debug("rsync[%s] finished after %s seconds with status 0x%x", rsync.pid, t1 - t0, self.status) + + # Should do something with rsync result and validation status database here. + + retrieval = Retrieval.objects.create( + uri = self.uri, + started = rpki.sundial.datetime.fromtimestamp(t0), + finished = rpki.sundial.datetime.fromtimestamp(t1), + successful = self.status == 0) + + for fn in self._rsync_walk(path): + yield tornado.gen.moment + uri = "rsync://" + fn[len(args.unauthenticated):].lstrip("/") + cls = uri_to_class(uri) + if cls is not None: + try: + with open(fn, "rb") as f: + cls.store_if_new(f.read(), uri, retrieval) + except: + Status.add(uri, codes.UNREADABLE_OBJECT) + logger.exception("Couldn't read %s from rsync tree", uri) + + finally: + pending = self.pending + self.pending = None + pending.notify_all() + + def _rsync_walk(self, path): + if self.uri.endswith("/"): + for head, dirs, files in os.walk(path): + for fn in files: + yield os.path.join(head, fn) + elif os.path.exists(path): + yield path + + @tornado.gen.coroutine + def _https_fetch_url(self, url, streaming_callback = None): + + if urlparse.urlparse(url).netloc in self._https_deadhosts: + raise DeadHost + + # Should do something with deadhost processing below. Looks + # like errors such as HTTP timeout show up as + # tornado.httpclient.HTTPError exceptions (which we could + # suppress if we wanted to do so, but we probably don't). + # HTTP timeout shows up in the logs as "HTTP 599". See doc for: + # + # tornado.httpclient.AsyncHTTPClient.fetch() + # tornado.httpclient.HTTPError + + # Might need to do something with If-Modified-Since support + # See if_modified_since argument to + # http://www.tornadoweb.org/en/stable/httpclient.html#request-objects + # (which we can pass to client.fetch(), below). Not sure how + # "you don't need to retrieve this" result comes back, + # probably a benign exception we need to catch. Supporting + # this means adding another null-able timestamp field to the + # RRDPSnapshot model (which probably should be named the + # RRDPZone model instead), and storing a datetime there. + # Would also need to pull timestamp from the Last-Modified + # header in the response object. + + try: + ok = False + t0 = time.time() + client = tornado.httpclient.AsyncHTTPClient(max_body_size = args.max_https_body_size) + response = yield client.fetch(url, + streaming_callback = streaming_callback, + validate_cert = args.validate_https, + connect_timeout = args.https_timeout, + request_timeout = args.https_timeout) + # Might want to check response Content-Type here + ok = True + + except tornado.httpclient.HTTPError as e: + # Might want to check e.response here to figure out whether to add to _https_deadhosts. + logger.info("HTTP error for %s: %s", url, e) + raise + + except (socket.error, IOError, ssl.SSLError) as e: + # Might want to check e.errno here to figure out whether to add to _https_deadhosts. + logger.info("Network I/O error for %s: %s", url, e) + raise + + except Exception as e: + logger.exception("Error (%r) for %s", type(e), url) + raise + + finally: + t1 = time.time() + logger.debug("Fetch of %s finished after %s seconds", url, t1 - t0) + retrieval = Retrieval.objects.create( + uri = url, + started = rpki.sundial.datetime.fromtimestamp(t0), + finished = rpki.sundial.datetime.fromtimestamp(t1), + successful = ok) + if ok: + raise tornado.gen.Return((retrieval, response)) + + @tornado.gen.coroutine + def _https_fetch_ta(self): + + if not args.fetch: + return + + other = self._https_history.get(self.uri) + if other is not None and other.pending is not None: + yield other.pending.wait() + return + + self.pending = tornado.locks.Condition() + self._rsync_history[self.uri] = self + + try: + retrieval, response = yield self._https_fetch_url(self.uri) + X509.store_if_new(response.body, self.uri, retrieval) + except: + logger.exception("Couldn't load %s", self.uri) + + finally: + pending = self.pending + self.pending = None + pending.notify_all() + + @tornado.gen.coroutine + def _rrdp_fetch_notification(self, url): + + retrieval, response = yield self._https_fetch_url(url) + + notification = ElementTree(file = response.buffer).getroot() + + rpki.relaxng.rrdp.schema.assertValid(notification) + + if notification.tag != tag_notification: + raise RRDP_ParseFailure("Expected RRDP notification for {}, got {}".format(url, notification.tag)) + + raise tornado.gen.Return((retrieval, notification)) + + @tornado.gen.coroutine + def _rrdp_fetch_data_file(self, url, expected_hash): + + sha256 = rpki.POW.Digest(rpki.POW.SHA256_DIGEST) + xml_file = tempfile.SpooledTemporaryFile() + + retrieval, response = yield self._https_fetch_url(url, lambda data: (sha256.update(data), xml_file.write(data))) + + received_hash = sha256.digest().encode("hex") + xml_file.seek(0) + + if received_hash != expected_hash.lower(): + raise RRDP_ParseFailure("Expected RRDP hash {} for {}, got {}".format(expected_hash.lower(), url, received_hash)) + + raise tornado.gen.Return((retrieval, response, xml_file)) + + @tornado.gen.coroutine + def _rrdp_bulk_create(self, new_objs, existing_objs): + from django.db import IntegrityError + + #logger.debug("Bulk creation of new RPKIObjects") + + try: + RPKIObject.objects.bulk_create(new_objs) + + except IntegrityError: + #logger.debug("Some objects already existed, weeding and retrying") + i = 0 + while i < len(new_objs): + yield tornado.gen.moment + try: + existing_objs.append(RPKIObject.objects.values_list("pk", flat = True).get(der = new_objs[i].der)) + logger.debug("Object existed in SQL but, apparently, not in prior copy of snapshot: uri %s sha256 %s", + new_objs[i].uri, new_objs[i].sha256) + except RPKIObject.DoesNotExist: + i += 1 + else: + del new_objs[i] + RPKIObject.objects.bulk_create(new_objs) + + del new_objs[:] + + @tornado.gen.coroutine + def _rrdp_fetch(self): + from django.db import transaction + + if not args.fetch: + return + + other = self._https_history.get(self.uri) + if other is not None and other.pending is not None: + yield other.pending.wait() + return + + self.pending = tornado.locks.Condition() + self._https_history[self.uri] = self + + try: + retrieval, notification = yield self._rrdp_fetch_notification(url = self.uri) + + session_id = notification.get("session_id") + serial = long(notification.get("serial")) + + snapshot = RRDPSnapshot.objects.filter( + session_id = session_id).order_by("-retrieved__started").first() + + logger.debug("RRDP notification for %s session_id %s serial %s current snapshot %r", + self.uri, session_id, serial, snapshot) + + if snapshot is not None and snapshot.serial == serial: + logger.debug("RRDP data for %s is up-to-date, nothing to do", self.uri) + return + + deltas = dict((long(delta.get("serial")), (delta.get("uri"), delta.get("hash"))) + for delta in notification.iterchildren(tag_delta)) + + if snapshot is None or snapshot.serial + 1 not in deltas: + + existing_rpkiobject_map = dict() + + if snapshot is not None: + logger.debug("RRDP %s no deltas available for serial %s", self.uri, snapshot.serial) + existing_rpkiobject_map.update(snapshot.rpkiobject_set.values_list("sha256", "pk")) + + x = notification.find(tag_snapshot) + + url, hash = x.get("uri"), x.get("hash") + + logger.debug("RRDP %s loading from snapshot %s serial %s", self.uri, url, serial) + + retrieval, response, xml_file = yield self._rrdp_fetch_data_file(url, hash) + + snapshot = RRDPSnapshot.objects.create(session_id = session_id, serial = serial) + + # Value of "chunk" here may need to be configurable. Larger numbers batch more objects in + # a single bulk addition, which is faster ... unless one or more of them isn't really new, in + # which case we have to check everything in that batch when we get the IntegrityError, so + # the smaller the batch, the faster that check. No single good answer. + + root = None + existing_rpkiobjects = [] + new_rpkiobjects = [] + chunk = 2000 + + for event, node in iterparse(xml_file): + if node is root: + continue + + if root is None: + root = node.getparent() + if root is None or root.tag != tag_snapshot \ + or root.get("version") != "1" \ + or any(a not in ("version", "session_id", "serial") for a in root.attrib): + raise RRDP_ParseFailure("{} doesn't look like an RRDP snapshot file".format(url)) + if root.get("session_id") != session_id: + raise RRDP_ParseFailure("Expected RRDP session_id {} for {}, got {}".format( + session_id, url, root.get("session_id"))) + if long(root.get("serial")) != long(serial): + raise RRDP_ParseFailure("Expected RRDP serial {} for {}, got {}".format( + serial, url, root.get("serial"))) + + if node.tag != tag_publish or node.getparent() is not root \ + or any(a != "uri" for a in node.attrib): + raise RRDP_ParseFailure("{} doesn't look like an RRDP snapshot file".format(url)) + + uri = node.get("uri") + cls = uri_to_class(uri) + if cls is None: + raise RRDP_ParseFailure("Unexpected URI {}".format(uri)) + + der = node.text.decode("base64") + sha256 = sha256hex(der) + try: + existing_rpkiobjects.append(existing_rpkiobject_map[sha256]) + except KeyError: + ski, aki = cls.derRead(der).get_hex_SKI_AKI() + new_rpkiobjects.append(RPKIObject(der = der, uri = uri, ski = ski, aki = aki, + retrieved = retrieval, sha256 = sha256)) + + node.clear() + while node.getprevious() is not None: + del root[0] + + if len(new_rpkiobjects) > chunk: + yield self._rrdp_bulk_create(new_rpkiobjects, existing_rpkiobjects) + + yield tornado.gen.moment + + if len(new_rpkiobjects) > 0: + yield self._rrdp_bulk_create(new_rpkiobjects, existing_rpkiobjects) + + RPKIObject.snapshot.through.objects.bulk_create([ + RPKIObject.snapshot.through(rrdpsnapshot_id = snapshot.id, rpkiobject_id = i) + for i in retrieval.rpkiobject_set.values_list("pk", flat = True)]) + + RPKIObject.snapshot.through.objects.bulk_create([ + RPKIObject.snapshot.through(rrdpsnapshot_id = snapshot.id, rpkiobject_id = i) + for i in existing_rpkiobjects]) + + snapshot.retrieved = retrieval + snapshot.save() + + xml_file.close() + + else: + logger.debug("RRDP %s %s deltas (%s--%s)", self.uri, + (serial - snapshot.serial), snapshot.serial, serial) + + deltas = [(serial, deltas[serial][0], deltas[serial][1]) + for serial in xrange(snapshot.serial + 1, serial + 1)] + futures = [] + + while deltas or futures: + + while deltas and len(futures) < args.fetch_ahead_goal: + serial, url, hash = deltas.pop(0) + logger.debug("RRDP %s serial %s fetching %s", self.uri, serial, url) + futures.append(self._rrdp_fetch_data_file(url, hash)) + + retrieval, response, xml_file = yield futures.pop(0) + + root = None + + with transaction.atomic(): + snapshot.serial += 1 + snapshot.save() + logger.debug("RRDP %s serial %s loading", self.uri, snapshot.serial) + + for event, node in iterparse(xml_file): + if node is root: + continue + + if root is None: + root = node.getparent() + if root is None or root.tag != tag_delta \ + or root.get("version") != "1" \ + or any(a not in ("version", "session_id", "serial") for a in root.attrib): + raise RRDP_ParseFailure("{} doesn't look like an RRDP delta file".format(url)) + if root.get("session_id") != session_id: + raise RRDP_ParseFailure("Expected RRDP session_id {} for {}, got {}".format( + session_id, url, root.get("session_id"))) + if long(root.get("serial")) != snapshot.serial: + raise RRDP_ParseFailure("Expected RRDP serial {} for {}, got {}".format( + snapshot.serial, url, root.get("serial"))) + + hash = node.get("hash") + + if node.getparent() is not root or node.tag not in (tag_publish, tag_withdraw) \ + or (node.tag == tag_withdraw and hash is None) \ + or any(a not in ("uri", "hash") for a in node.attrib): + raise RRDP_ParseFailure("{} doesn't look like an RRDP delta file".format(url)) + + if node.tag == tag_withdraw or node.get("hash") is not None: + snapshot.rpkiobject_set.remove(snapshot.rpkiobject_set.get(sha256 = node.get("hash").lower())) + + if node.tag == tag_publish: + uri = node.get("uri") + cls = uri_to_class(uri) + if cls is None: + raise RRDP_ParseFailure("Unexpected URI %s" % uri) + obj, created = cls.store_if_new(node.text.decode("base64"), uri, retrieval) + obj.snapshot.add(snapshot) + + node.clear() + while node.getprevious() is not None: + del root[0] + + #yield tornado.gen.moment + + xml_file.close() + + logger.debug("RRDP %s done processing deltas", self.uri) + + except (tornado.httpclient.HTTPError, socket.error, IOError, ssl.SSLError): + pass # Already logged + + except RRDP_ParseFailure as e: + logger.info("RRDP parse failure: %s", e) + + except: + logger.exception("Couldn't load %s", self.uri) + + finally: + pending = self.pending + self.pending = None + pending.notify_all() + + +class CheckTALTask(object): + + def __init__(self, uris, key): + rsync_uri = first_rsync_uri(uris) + https_uri = first_https_uri(uris) + + if args.prefer_rsync: + self.uri = rsync_uri or https_uri + else: + self.uri = https_uri or rsync_uri + + self.key = key + + def __repr__(self): + return "<CheckTALTask: \"{}\">".format(self.uri) + + @tornado.gen.coroutine + def __call__(self): + yield Fetcher(self.uri, ta = True).fetch() + for cer in fetch_objects(uri = self.uri): + if self.check(cer): + yield task_queue.put(WalkTask(cer = cer)) + break + else: + Status.add(self.uri, codes.TRUST_ANCHOR_SKIPPED) + + def check(self, cer): + if self.key.derWritePublic() != cer.getPublicKey().derWritePublic(): + Status.add(self.uri, codes.TRUST_ANCHOR_KEY_MISMATCH) + ok = False + else: + ok = cer.check(trusted = None, crl = None) + if ok: + install_object(cer) + Status.add(self.uri, codes.OBJECT_ACCEPTED) + else: + Status.add(self.uri, codes.OBJECT_REJECTED) + return ok + + +@tornado.gen.coroutine +def worker(meself): + # + # NB: This particular style of control loop REQUIRES an except + # clause, even if that except clause is just a pass statement. + # + while True: + task = yield task_queue.get() + name = repr(task) + try: + logger.debug("Worker %s starting %s, queue length %s", meself, name, task_queue.qsize()) + yield task() + except: + logger.exception("Worker %s caught unhandled exception from %s", meself, name) + finally: + task_queue.task_done() + logger.debug("Worker %s finished %s, queue length %s", meself, name, task_queue.qsize()) + + +def final_report(): + # Clean up a bit to avoid confusing the user unnecessarily. + for s in Status.db.itervalues(): + if codes.OBJECT_ACCEPTED in s.status: + s.status.discard(codes.OBJECT_REJECTED) + doc = Element("rcynic-summary", date = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())) + doc.set("reporting-hostname", socket.getfqdn()) + doc.set("rcynic-version", "rcynicng") + doc.set("summary-version", "1") + labels = SubElement(doc, "labels") + for code in codes.all(): + SubElement(labels, code.name, kind = code.kind).text = code.text + for uri in Status.db: + for sym in sorted(Status.db[uri].status): + SubElement(doc, "validation_status", + timestamp = str(Status.db[uri].timestamp), + status = str(sym), + generation = "None" # Historical relic, remove eventually + ).text = uri + # + # Should generate <rsync_history/> elements here too, later + # + ElementTree(doc).write(file = argparse.FileType("w")(args.xml_file), + pretty_print = True) + + +def final_cleanup(): + from django.db import transaction, models + + def report(when): + logger.debug("Database %s cleanup: %s Authenticated %s RRDPSnapshot %s RPKIObject %s Retrieval", when, + Authenticated.objects.all().count(), RRDPSnapshot.objects.all().count(), + RPKIObject.objects.all().count(), Retrieval.objects.all().count()) + + report("before") + + with transaction.atomic(): + + #logger.debug("Flushing incomplete RRDP snapshots") + + q = RRDPSnapshot.objects + q = q.filter(retrieved__isnull = True) + q.delete() + + #logger.debug("Flushing old authenticated sets") + + q = Authenticated.objects + q = q.exclude(id = authenticated.id) + q.delete() + + #logger.debug("Flushing RRDP snapshots which don't contain anything in the (remaining) authenticated set") + + q = RPKIObject.objects + q = q.filter(authenticated = authenticated.id) + q = q.exclude(snapshot = None) + q = q.order_by("snapshot__id") + q = q.values_list("snapshot__id", flat = True) + q = q.distinct() + q = RRDPSnapshot.objects.exclude(id__in = q) + q.delete() + + #logger.debug("Flushing RPKI objects which are in neither current authenticated set nor current RRDP snapshot") + + q = RPKIObject.objects + q = q.filter(authenticated = None) # was: q = q.exclude(authenticated = authenticated.id) + q = q.filter(snapshot = None) + q.delete() + + #logger.debug("Flushing retrieval objects which are no longer related to any RPKI objects or RRDP snapshot") + + q = RPKIObject.objects + q = q.order_by("retrieved__id") + q = q.values_list("retrieved__id", flat = True) + q = q.distinct() + q = Retrieval.objects.exclude(id__in = q) + q = q.filter(rrdpsnapshot = None) + q.delete() + + report("after") + + +@tornado.gen.coroutine +def launcher(): + for i in xrange(args.workers): + tornado.ioloop.IOLoop.current().spawn_callback(worker, i) + + yield [task_queue.put(CheckTALTask(uris, key)) for uris, key in read_tals()] + yield task_queue.join() + + +class posint(int): + def __init__(self, value): + if self <= 0: + raise ValueError + + +def main(): + global rpki + + os.environ.update(TZ = "UTC", + DJANGO_SETTINGS_MODULE = "rpki.django_settings.rcynic") + time.tzset() + + cfg = rpki.config.argparser(section = "rcynic", doc = __doc__, cfg_optional = True) + + cfg.add_logging_arguments() + + cfg.add_argument("-u", "--unauthenticated", + help = "where to store unauthenticated data retrieved via rsycnc", + default = os.path.join(rpki.autoconf.RCYNIC_DIR, "data", "unauthenticated")) + + cfg.add_argument("-x", "--xml-file", + help = "where to write XML log of validation results", + default = os.path.join(rpki.autoconf.RCYNIC_DIR, "data", "rcynic.xml")) + + cfg.add_argument("-t", "--trust-anchor-locators", "--tals", + help = "where to find trust anchor locators", + default = os.path.join(rpki.autoconf.sysconfdir, "rpki", "trust-anchors")) + + cfg.add_argument("-w", "--workers", type = posint, + help = "number of worker pseudo-threads to allow", + default = 10) + + cfg.add_argument("--fetch-ahead-goal", type = posint, + help = "how many deltas we want in the fetch-ahead pipe", + default = 2) + + cfg.add_argument("--https-timeout", type = posint, + help = "HTTPS connection timeout, in seconds", + default = 300) + + cfg.add_argument("--max-https-body-size", type = posint, + help = "upper limit on byte length of HTTPS message body", + default = 512 * 1024 * 1024) + + cfg.add_boolean_argument("--fetch", default = True, + help = "whether to fetch data at all") + + cfg.add_boolean_argument("--spawn-on-fetch", default = True, + help = "whether to spawn new pseudo-threads on fetch") + + cfg.add_boolean_argument("--migrate", default = True, + help = "whether to migrate the ORM database on startup") + + cfg.add_boolean_argument("--prefer-rsync", default = False, + help = "whether to prefer rsync over RRDP") + + cfg.add_boolean_argument("--validate-https", default = False, + help = "whether to validate HTTPS server certificates") + + global args + args = cfg.argparser.parse_args() + + cfg.configure_logging(args = args, ident = "rcynic") + + import django + django.setup() + + if args.migrate: + # Not sure we should be doing this on every run, but sure simplifies things. + import django.core.management + django.core.management.call_command("migrate", verbosity = 0, interactive = False) + + import rpki.rcynicdb + global Retrieval + global Authenticated + global RRDPSnapshot + global RPKIObject + Retrieval = rpki.rcynicdb.models.Retrieval + Authenticated = rpki.rcynicdb.models.Authenticated + RRDPSnapshot = rpki.rcynicdb.models.RRDPSnapshot + RPKIObject = rpki.rcynicdb.models.RPKIObject + + + global authenticated + authenticated = Authenticated.objects.create(started = rpki.sundial.datetime.now()) + + global task_queue + task_queue = tornado.queues.Queue() + tornado.ioloop.IOLoop.current().run_sync(launcher) + + authenticated.finished = rpki.sundial.datetime.now() + authenticated.save() + + final_report() + + final_cleanup() + + +if __name__ == "__main__": + main() diff --git a/rp/rcynic/rpki-torrent.py b/rp/rcynic/rpki-torrent.py index 2c6aa64d..f9a3d620 100644 --- a/rp/rcynic/rpki-torrent.py +++ b/rp/rcynic/rpki-torrent.py @@ -46,688 +46,688 @@ import transmissionrpc tr_env_vars = ("TR_TORRENT_DIR", "TR_TORRENT_ID", "TR_TORRENT_NAME") class WrongServer(Exception): - "Hostname not in X.509v3 subjectAltName extension." + "Hostname not in X.509v3 subjectAltName extension." class UnexpectedRedirect(Exception): - "Unexpected HTTP redirect." + "Unexpected HTTP redirect." class WrongMode(Exception): - "Wrong operation for mode." + "Wrong operation for mode." class BadFormat(Exception): - "Zip file does not match our expectations." + "Zip file does not match our expectations." class InconsistentEnvironment(Exception): - "Environment variables received from Transmission aren't consistent." + "Environment variables received from Transmission aren't consistent." class TorrentNotReady(Exception): - "Torrent is not ready for checking." + "Torrent is not ready for checking." class TorrentDoesNotMatchManifest(Exception): - "Retrieved torrent does not match manifest." + "Retrieved torrent does not match manifest." class TorrentNameDoesNotMatchURL(Exception): - "Torrent name doesn't uniquely match a URL." + "Torrent name doesn't uniquely match a URL." class CouldNotFindTorrents(Exception): - "Could not find torrent(s) with given name(s)." + "Could not find torrent(s) with given name(s)." class UseTheSourceLuke(Exception): - "Use The Source, Luke." + "Use The Source, Luke." cfg = None def main(): - try: - syslog_flags = syslog.LOG_PID - if os.isatty(sys.stderr.fileno()): - syslog_flags |= syslog.LOG_PERROR - syslog.openlog("rpki-torrent", syslog_flags) - - # If I seriously expected this script to get a lot of further use, - # I might rewrite this using subparsers, but it'd be a bit tricky - # as argparse doesn't support making the subparser argument - # optional and transmission gives no sane way to provide arguments - # when running a completion script. So, for the moment, let's - # just fix the bugs accidently introduced while converting the - # universe to argparse without making any radical changes to the - # program structure here, even if the result looks kind of klunky. - - parser = argparse.ArgumentParser(description = __doc__) - parser.add_argument("-c", "--config", - help = "configuration file") - parser.add_argument("action", choices = ("poll", "generate", "mirror"), nargs = "?", - help = "action to take") - args = parser.parse_args() - - global cfg - cfg = MyConfigParser() - cfg.read(args.config or - [os.path.join(dn, fn) - for fn in ("rcynic.conf", "rpki.conf") - for dn in ("/var/rcynic/etc", "/usr/local/etc", "/etc")]) - - if cfg.act_as_generator: - if args.action == "generate": - generator_main() - elif args.action == "mirror": - mirror_main() - else: - raise UseTheSourceLuke - else: - if args.action is None and all(v in os.environ for v in tr_env_vars): - torrent_completion_main() - elif args.action == "poll": - poll_main() - else: - raise UseTheSourceLuke - - except: - for line in traceback.format_exc().splitlines(): - syslog.syslog(line) - sys.exit(1) + try: + syslog_flags = syslog.LOG_PID + if os.isatty(sys.stderr.fileno()): + syslog_flags |= syslog.LOG_PERROR + syslog.openlog("rpki-torrent", syslog_flags) + + # If I seriously expected this script to get a lot of further use, + # I might rewrite this using subparsers, but it'd be a bit tricky + # as argparse doesn't support making the subparser argument + # optional and transmission gives no sane way to provide arguments + # when running a completion script. So, for the moment, let's + # just fix the bugs accidently introduced while converting the + # universe to argparse without making any radical changes to the + # program structure here, even if the result looks kind of klunky. + + parser = argparse.ArgumentParser(description = __doc__) + parser.add_argument("-c", "--config", + help = "configuration file") + parser.add_argument("action", choices = ("poll", "generate", "mirror"), nargs = "?", + help = "action to take") + args = parser.parse_args() + + global cfg + cfg = MyConfigParser() + cfg.read(args.config or + [os.path.join(dn, fn) + for fn in ("rcynic.conf", "rpki.conf") + for dn in ("/var/rcynic/etc", "/usr/local/etc", "/etc")]) + + if cfg.act_as_generator: + if args.action == "generate": + generator_main() + elif args.action == "mirror": + mirror_main() + else: + raise UseTheSourceLuke + else: + if args.action is None and all(v in os.environ for v in tr_env_vars): + torrent_completion_main() + elif args.action == "poll": + poll_main() + else: + raise UseTheSourceLuke + + except: + for line in traceback.format_exc().splitlines(): + syslog.syslog(line) + sys.exit(1) def generator_main(): - import paramiko - - class SFTPClient(paramiko.SFTPClient): - def atomic_rename(self, oldpath, newpath): - oldpath = self._adjust_cwd(oldpath) - newpath = self._adjust_cwd(newpath) - self._log(paramiko.common.DEBUG, 'atomic_rename(%r, %r)' % (oldpath, newpath)) - self._request(paramiko.sftp.CMD_EXTENDED, "posix-rename@openssh.com", oldpath, newpath) - - z = ZipFile(url = cfg.generate_url, dn = cfg.zip_dir) - client = TransmissionClient() - - client.remove_torrents(z.torrent_name) - - download_dir = client.get_session().download_dir - torrent_dir = os.path.join(download_dir, z.torrent_name) - torrent_file = os.path.join(cfg.zip_dir, z.torrent_name + ".torrent") - - - syslog.syslog("Synchronizing local data from %s to %s" % (cfg.unauthenticated, torrent_dir)) - subprocess.check_call((cfg.rsync_prog, "--archive", "--delete", - os.path.normpath(cfg.unauthenticated) + "/", - os.path.normpath(torrent_dir) + "/")) - - syslog.syslog("Creating %s" % torrent_file) - try: - os.unlink(torrent_file) - except OSError, e: - if e.errno != errno.ENOENT: - raise - ignore_output_for_now = subprocess.check_output( # pylint: disable=W0612 - (cfg.mktorrent_prog, - "-a", cfg.tracker_url, - "-c", "RPKI unauthenticated data snapshot generated by rpki-torrent", - "-o", torrent_file, - torrent_dir)) - - syslog.syslog("Generating manifest") - manifest = create_manifest(download_dir, z.torrent_name) - - syslog.syslog("Loading %s with unlimited seeding" % torrent_file) - f = open(torrent_file, "rb") - client.add(base64.b64encode(f.read())) - f.close() - client.unlimited_seeding(z.torrent_name) - - syslog.syslog("Creating upload connection") - ssh = paramiko.Transport((cfg.sftp_host, cfg.sftp_port)) - try: - hostkeys = paramiko.util.load_host_keys(cfg.sftp_hostkey_file)[cfg.sftp_host]["ssh-rsa"] - except ConfigParser.Error: - hostkeys = None - ssh.connect( - username = cfg.sftp_user, - hostkey = hostkeys, - pkey = paramiko.RSAKey.from_private_key_file(cfg.sftp_private_key_file)) - sftp = SFTPClient.from_transport(ssh) - - zip_filename = os.path.join("data", os.path.basename(z.filename)) - zip_tempname = zip_filename + ".new" - - syslog.syslog("Creating %s" % zip_tempname) - f = sftp.open(zip_tempname, "wb") - z.set_output_stream(f) - - syslog.syslog("Writing %s to zip" % torrent_file) - z.write( - torrent_file, - arcname = os.path.basename(torrent_file), - compress_type = zipfile.ZIP_DEFLATED) - - manifest_name = z.torrent_name + ".manifest" - - syslog.syslog("Writing %s to zip" % manifest_name) - zi = zipfile.ZipInfo(manifest_name, time.gmtime()[:6]) - zi.external_attr = (stat.S_IFREG | 0644) << 16 - zi.internal_attr = 1 # Text, not binary - z.writestr(zi, - "".join("%s %s\n" % (v, k) for k, v in manifest.iteritems()), - zipfile.ZIP_DEFLATED) - - syslog.syslog("Closing %s and renaming to %s" % (zip_tempname, zip_filename)) - z.close() - f.close() - sftp.atomic_rename(zip_tempname, zip_filename) - - syslog.syslog("Closing upload connection") - ssh.close() - -def mirror_main(): - client = TransmissionClient() - torrent_names = [] - - for zip_url in cfg.zip_urls: - if zip_url != cfg.generate_url: - z = ZipFile(url = zip_url, dn = cfg.zip_dir, ta = cfg.zip_ta) - if z.fetch(): - client.remove_torrents(z.torrent_name) - syslog.syslog("Mirroring torrent %s" % z.torrent_name) - client.add(z.get_torrent()) - torrent_names.append(z.torrent_name) - - if torrent_names: - client.unlimited_seeding(*torrent_names) + import paramiko + class SFTPClient(paramiko.SFTPClient): + def atomic_rename(self, oldpath, newpath): + oldpath = self._adjust_cwd(oldpath) + newpath = self._adjust_cwd(newpath) + self._log(paramiko.common.DEBUG, 'atomic_rename(%r, %r)' % (oldpath, newpath)) + self._request(paramiko.sftp.CMD_EXTENDED, "posix-rename@openssh.com", oldpath, newpath) -def poll_main(): - for zip_url in cfg.zip_urls: - - z = ZipFile(url = zip_url, dn = cfg.zip_dir, ta = cfg.zip_ta) + z = ZipFile(url = cfg.generate_url, dn = cfg.zip_dir) client = TransmissionClient() - if z.fetch(): - client.remove_torrents(z.torrent_name) - syslog.syslog("Adding torrent %s" % z.torrent_name) - client.add(z.get_torrent()) - - elif cfg.run_rcynic_anyway: - run_rcynic(client, z) - - -def torrent_completion_main(): - torrent_name = os.getenv("TR_TORRENT_NAME") - torrent_id = int(os.getenv("TR_TORRENT_ID")) - - z = ZipFile(url = cfg.find_url(torrent_name), dn = cfg.zip_dir, ta = cfg.zip_ta) - client = TransmissionClient() - torrent = client.info([torrent_id]).popitem()[1] + client.remove_torrents(z.torrent_name) - if torrent.name != torrent_name: - raise InconsistentEnvironment("Torrent name %s does not match ID %d" % (torrent_name, torrent_id)) + download_dir = client.get_session().download_dir + torrent_dir = os.path.join(download_dir, z.torrent_name) + torrent_file = os.path.join(cfg.zip_dir, z.torrent_name + ".torrent") - if z.torrent_name != torrent_name: - raise InconsistentEnvironment("Torrent name %s does not match torrent name in zip file %s" % (torrent_name, z.torrent_name)) - if torrent is None or torrent.progress != 100: - raise TorrentNotReady("Torrent %s not ready for checking, how did I get here?" % torrent_name) + syslog.syslog("Synchronizing local data from %s to %s" % (cfg.unauthenticated, torrent_dir)) + subprocess.check_call((cfg.rsync_prog, "--archive", "--delete", + os.path.normpath(cfg.unauthenticated) + "/", + os.path.normpath(torrent_dir) + "/")) - log_email("Download complete %s" % z.url) - - run_rcynic(client, z) - - -def run_rcynic(client, z): - """ - Run rcynic and any post-processing we might want. - """ - - if cfg.lockfile is not None: - syslog.syslog("Acquiring lock %s" % cfg.lockfile) - lock = os.open(cfg.lockfile, os.O_WRONLY | os.O_CREAT, 0600) - fcntl.flock(lock, fcntl.LOCK_EX) - else: - lock = None - - syslog.syslog("Checking manifest against disk") - - download_dir = client.get_session().download_dir - - manifest_from_disk = create_manifest(download_dir, z.torrent_name) - manifest_from_zip = z.get_manifest() + syslog.syslog("Creating %s" % torrent_file) + try: + os.unlink(torrent_file) + except OSError, e: + if e.errno != errno.ENOENT: + raise + ignore_output_for_now = subprocess.check_output( # pylint: disable=W0612 + (cfg.mktorrent_prog, + "-a", cfg.tracker_url, + "-c", "RPKI unauthenticated data snapshot generated by rpki-torrent", + "-o", torrent_file, + torrent_dir)) + + syslog.syslog("Generating manifest") + manifest = create_manifest(download_dir, z.torrent_name) + + syslog.syslog("Loading %s with unlimited seeding" % torrent_file) + f = open(torrent_file, "rb") + client.add(base64.b64encode(f.read())) + f.close() + client.unlimited_seeding(z.torrent_name) - excess_files = set(manifest_from_disk) - set(manifest_from_zip) - for fn in excess_files: - del manifest_from_disk[fn] + syslog.syslog("Creating upload connection") + ssh = paramiko.Transport((cfg.sftp_host, cfg.sftp_port)) + try: + hostkeys = paramiko.util.load_host_keys(cfg.sftp_hostkey_file)[cfg.sftp_host]["ssh-rsa"] + except ConfigParser.Error: + hostkeys = None + ssh.connect( + username = cfg.sftp_user, + hostkey = hostkeys, + pkey = paramiko.RSAKey.from_private_key_file(cfg.sftp_private_key_file)) + sftp = SFTPClient.from_transport(ssh) + + zip_filename = os.path.join("data", os.path.basename(z.filename)) + zip_tempname = zip_filename + ".new" + + syslog.syslog("Creating %s" % zip_tempname) + f = sftp.open(zip_tempname, "wb") + z.set_output_stream(f) + + syslog.syslog("Writing %s to zip" % torrent_file) + z.write( + torrent_file, + arcname = os.path.basename(torrent_file), + compress_type = zipfile.ZIP_DEFLATED) + + manifest_name = z.torrent_name + ".manifest" + + syslog.syslog("Writing %s to zip" % manifest_name) + zi = zipfile.ZipInfo(manifest_name, time.gmtime()[:6]) + zi.external_attr = (stat.S_IFREG | 0644) << 16 + zi.internal_attr = 1 # Text, not binary + z.writestr(zi, + "".join("%s %s\n" % (v, k) for k, v in manifest.iteritems()), + zipfile.ZIP_DEFLATED) + + syslog.syslog("Closing %s and renaming to %s" % (zip_tempname, zip_filename)) + z.close() + f.close() + sftp.atomic_rename(zip_tempname, zip_filename) - if manifest_from_disk != manifest_from_zip: - raise TorrentDoesNotMatchManifest("Manifest for torrent %s does not match what we got" % - z.torrent_name) + syslog.syslog("Closing upload connection") + ssh.close() - if excess_files: - syslog.syslog("Cleaning up excess files") - for fn in excess_files: - os.unlink(os.path.join(download_dir, fn)) +def mirror_main(): + client = TransmissionClient() + torrent_names = [] - syslog.syslog("Running rcynic") - log_email("Starting rcynic %s" % z.url) - subprocess.check_call((cfg.rcynic_prog, - "-c", cfg.rcynic_conf, - "-u", os.path.join(client.get_session().download_dir, z.torrent_name))) - log_email("Completed rcynic %s" % z.url) + for zip_url in cfg.zip_urls: + if zip_url != cfg.generate_url: + z = ZipFile(url = zip_url, dn = cfg.zip_dir, ta = cfg.zip_ta) + if z.fetch(): + client.remove_torrents(z.torrent_name) + syslog.syslog("Mirroring torrent %s" % z.torrent_name) + client.add(z.get_torrent()) + torrent_names.append(z.torrent_name) - for cmd in cfg.post_rcynic_commands: - syslog.syslog("Running post-rcynic command: %s" % cmd) - subprocess.check_call(cmd, shell = True) + if torrent_names: + client.unlimited_seeding(*torrent_names) - if lock is not None: - syslog.syslog("Releasing lock %s" % cfg.lockfile) - os.close(lock) -# See http://www.minstrel.org.uk/papers/sftp/ for details on how to -# set up safe upload-only SFTP directories on the server. In -# particular http://www.minstrel.org.uk/papers/sftp/builtin/ is likely -# to be the right path. +def poll_main(): + for zip_url in cfg.zip_urls: + z = ZipFile(url = zip_url, dn = cfg.zip_dir, ta = cfg.zip_ta) + client = TransmissionClient() -class ZipFile(object): - """ - Augmented version of standard python zipfile.ZipFile class, with - some extra methods and specialized capabilities. - - All methods of the standard zipfile.ZipFile class are supported, but - the constructor arguments are different, and opening the zip file - itself is deferred until a call which requires this, since the file - may first need to be fetched via HTTPS. - """ - - def __init__(self, url, dn, ta = None, verbose = True): - self.url = url - self.dir = dn - self.ta = ta - self.verbose = verbose - self.filename = os.path.join(dn, os.path.basename(url)) - self.changed = False - self.zf = None - self.peercert = None - self.torrent_name, zip_ext = os.path.splitext(os.path.basename(url)) - if zip_ext != ".zip": - raise BadFormat - - - def __getattr__(self, name): - if self.zf is None: - self.zf = zipfile.ZipFile(self.filename) - return getattr(self.zf, name) - - - def build_opener(self): - """ - Voodoo to create a urllib2.OpenerDirector object with TLS - certificate checking enabled and a hook to set self.peercert so - our caller can check the subjectAltName field. + if z.fetch(): + client.remove_torrents(z.torrent_name) + syslog.syslog("Adding torrent %s" % z.torrent_name) + client.add(z.get_torrent()) - You probably don't want to look at this if you can avoid it. - """ + elif cfg.run_rcynic_anyway: + run_rcynic(client, z) - assert self.ta is not None - # Yes, we're constructing one-off classes. Look away, look away. +def torrent_completion_main(): + torrent_name = os.getenv("TR_TORRENT_NAME") + torrent_id = int(os.getenv("TR_TORRENT_ID")) - class HTTPSConnection(httplib.HTTPSConnection): - zip = self - def connect(self): - sock = socket.create_connection((self.host, self.port), self.timeout) - if getattr(self, "_tunnel_host", None): - self.sock = sock - self._tunnel() - self.sock = ssl.wrap_socket(sock, - keyfile = self.key_file, - certfile = self.cert_file, - cert_reqs = ssl.CERT_REQUIRED, - ssl_version = ssl.PROTOCOL_TLSv1, - ca_certs = self.zip.ta) - self.zip.peercert = self.sock.getpeercert() + z = ZipFile(url = cfg.find_url(torrent_name), dn = cfg.zip_dir, ta = cfg.zip_ta) + client = TransmissionClient() + torrent = client.info([torrent_id]).popitem()[1] - class HTTPSHandler(urllib2.HTTPSHandler): - def https_open(self, req): - return self.do_open(HTTPSConnection, req) + if torrent.name != torrent_name: + raise InconsistentEnvironment("Torrent name %s does not match ID %d" % (torrent_name, torrent_id)) - return urllib2.build_opener(HTTPSHandler) + if z.torrent_name != torrent_name: + raise InconsistentEnvironment("Torrent name %s does not match torrent name in zip file %s" % (torrent_name, z.torrent_name)) + if torrent is None or torrent.progress != 100: + raise TorrentNotReady("Torrent %s not ready for checking, how did I get here?" % torrent_name) - def check_subjectAltNames(self): - """ - Check self.peercert against URL to make sure we were talking to - the right HTTPS server. - """ + log_email("Download complete %s" % z.url) - hostname = urlparse.urlparse(self.url).hostname - subjectAltNames = set(i[1] - for i in self.peercert.get("subjectAltName", ()) - if i[0] == "DNS") - if hostname not in subjectAltNames: - raise WrongServer + run_rcynic(client, z) - def download_file(self, r, bufsize = 4096): +def run_rcynic(client, z): """ - Downloaded file to disk. + Run rcynic and any post-processing we might want. """ - tempname = self.filename + ".new" - f = open(tempname, "wb") - n = int(r.info()["Content-Length"]) - for i in xrange(0, n - bufsize, bufsize): # pylint: disable=W0612 - f.write(r.read(bufsize)) - f.write(r.read()) - f.close() - mtime = email.utils.mktime_tz(email.utils.parsedate_tz(r.info()["Last-Modified"])) - os.utime(tempname, (mtime, mtime)) - os.rename(tempname, self.filename) + if cfg.lockfile is not None: + syslog.syslog("Acquiring lock %s" % cfg.lockfile) + lock = os.open(cfg.lockfile, os.O_WRONLY | os.O_CREAT, 0600) + fcntl.flock(lock, fcntl.LOCK_EX) + else: + lock = None + syslog.syslog("Checking manifest against disk") - def set_output_stream(self, stream): - """ - Set up this zip file for writing to a network stream. - """ + download_dir = client.get_session().download_dir - assert self.zf is None - self.zf = zipfile.ZipFile(stream, "w") + manifest_from_disk = create_manifest(download_dir, z.torrent_name) + manifest_from_zip = z.get_manifest() + excess_files = set(manifest_from_disk) - set(manifest_from_zip) + for fn in excess_files: + del manifest_from_disk[fn] - def fetch(self): - """ - Fetch zip file from URL given to constructor. - """ + if manifest_from_disk != manifest_from_zip: + raise TorrentDoesNotMatchManifest("Manifest for torrent %s does not match what we got" % + z.torrent_name) - headers = { "User-Agent" : "rpki-torrent" } - try: - headers["If-Modified-Since"] = email.utils.formatdate( - os.path.getmtime(self.filename), False, True) - except OSError: - pass + if excess_files: + syslog.syslog("Cleaning up excess files") + for fn in excess_files: + os.unlink(os.path.join(download_dir, fn)) - syslog.syslog("Checking %s..." % self.url) - try: - r = self.build_opener().open(urllib2.Request(self.url, None, headers)) - syslog.syslog("%s has changed, starting download" % self.url) - self.changed = True - log_email("Downloading %s" % self.url) - except urllib2.HTTPError, e: - if e.code == 304: - syslog.syslog("%s has not changed" % self.url) - elif e.code == 404: - syslog.syslog("%s does not exist" % self.url) - else: - raise - r = None - - self.check_subjectAltNames() + syslog.syslog("Running rcynic") + log_email("Starting rcynic %s" % z.url) + subprocess.check_call((cfg.rcynic_prog, + "-c", cfg.rcynic_conf, + "-u", os.path.join(client.get_session().download_dir, z.torrent_name))) + log_email("Completed rcynic %s" % z.url) - if r is not None and r.geturl() != self.url: - raise UnexpectedRedirect + for cmd in cfg.post_rcynic_commands: + syslog.syslog("Running post-rcynic command: %s" % cmd) + subprocess.check_call(cmd, shell = True) - if r is not None: - self.download_file(r) - r.close() + if lock is not None: + syslog.syslog("Releasing lock %s" % cfg.lockfile) + os.close(lock) - return self.changed +# See http://www.minstrel.org.uk/papers/sftp/ for details on how to +# set up safe upload-only SFTP directories on the server. In +# particular http://www.minstrel.org.uk/papers/sftp/builtin/ is likely +# to be the right path. - def check_format(self): - """ - Make sure that format of zip file matches our preconceptions: it - should contain two files, one of which is the .torrent file, the - other is the manifest, with names derived from the torrent name - inferred from the URL. +class ZipFile(object): """ + Augmented version of standard python zipfile.ZipFile class, with + some extra methods and specialized capabilities. - if set(self.namelist()) != set((self.torrent_name + ".torrent", self.torrent_name + ".manifest")): - raise BadFormat - - - def get_torrent(self): - """ - Extract torrent file from zip file, encoded in Base64 because - that's what the transmisionrpc library says it wants. + All methods of the standard zipfile.ZipFile class are supported, but + the constructor arguments are different, and opening the zip file + itself is deferred until a call which requires this, since the file + may first need to be fetched via HTTPS. """ - self.check_format() - return base64.b64encode(self.read(self.torrent_name + ".torrent")) + def __init__(self, url, dn, ta = None, verbose = True): + self.url = url + self.dir = dn + self.ta = ta + self.verbose = verbose + self.filename = os.path.join(dn, os.path.basename(url)) + self.changed = False + self.zf = None + self.peercert = None + self.torrent_name, zip_ext = os.path.splitext(os.path.basename(url)) + if zip_ext != ".zip": + raise BadFormat + + + def __getattr__(self, name): + if self.zf is None: + self.zf = zipfile.ZipFile(self.filename) + return getattr(self.zf, name) + + + def build_opener(self): + """ + Voodoo to create a urllib2.OpenerDirector object with TLS + certificate checking enabled and a hook to set self.peercert so + our caller can check the subjectAltName field. + + You probably don't want to look at this if you can avoid it. + """ + + assert self.ta is not None + + # Yes, we're constructing one-off classes. Look away, look away. + + class HTTPSConnection(httplib.HTTPSConnection): + zip = self + def connect(self): + sock = socket.create_connection((self.host, self.port), self.timeout) + if getattr(self, "_tunnel_host", None): + self.sock = sock + self._tunnel() + self.sock = ssl.wrap_socket(sock, + keyfile = self.key_file, + certfile = self.cert_file, + cert_reqs = ssl.CERT_REQUIRED, + ssl_version = ssl.PROTOCOL_TLSv1, + ca_certs = self.zip.ta) + self.zip.peercert = self.sock.getpeercert() + + class HTTPSHandler(urllib2.HTTPSHandler): + def https_open(self, req): + return self.do_open(HTTPSConnection, req) + + return urllib2.build_opener(HTTPSHandler) + + + def check_subjectAltNames(self): + """ + Check self.peercert against URL to make sure we were talking to + the right HTTPS server. + """ + + hostname = urlparse.urlparse(self.url).hostname + subjectAltNames = set(i[1] + for i in self.peercert.get("subjectAltName", ()) + if i[0] == "DNS") + if hostname not in subjectAltNames: + raise WrongServer + + + def download_file(self, r, bufsize = 4096): + """ + Downloaded file to disk. + """ + + tempname = self.filename + ".new" + f = open(tempname, "wb") + n = int(r.info()["Content-Length"]) + for i in xrange(0, n - bufsize, bufsize): # pylint: disable=W0612 + f.write(r.read(bufsize)) + f.write(r.read()) + f.close() + mtime = email.utils.mktime_tz(email.utils.parsedate_tz(r.info()["Last-Modified"])) + os.utime(tempname, (mtime, mtime)) + os.rename(tempname, self.filename) + + + def set_output_stream(self, stream): + """ + Set up this zip file for writing to a network stream. + """ + + assert self.zf is None + self.zf = zipfile.ZipFile(stream, "w") + + + def fetch(self): + """ + Fetch zip file from URL given to constructor. + """ + + headers = { "User-Agent" : "rpki-torrent" } + try: + headers["If-Modified-Since"] = email.utils.formatdate( + os.path.getmtime(self.filename), False, True) + except OSError: + pass + + syslog.syslog("Checking %s..." % self.url) + try: + r = self.build_opener().open(urllib2.Request(self.url, None, headers)) + syslog.syslog("%s has changed, starting download" % self.url) + self.changed = True + log_email("Downloading %s" % self.url) + except urllib2.HTTPError, e: + if e.code == 304: + syslog.syslog("%s has not changed" % self.url) + elif e.code == 404: + syslog.syslog("%s does not exist" % self.url) + else: + raise + r = None + + self.check_subjectAltNames() + + if r is not None and r.geturl() != self.url: + raise UnexpectedRedirect + + if r is not None: + self.download_file(r) + r.close() + + return self.changed + + + def check_format(self): + """ + Make sure that format of zip file matches our preconceptions: it + should contain two files, one of which is the .torrent file, the + other is the manifest, with names derived from the torrent name + inferred from the URL. + """ + + if set(self.namelist()) != set((self.torrent_name + ".torrent", self.torrent_name + ".manifest")): + raise BadFormat + + + def get_torrent(self): + """ + Extract torrent file from zip file, encoded in Base64 because + that's what the transmisionrpc library says it wants. + """ + + self.check_format() + return base64.b64encode(self.read(self.torrent_name + ".torrent")) + + + def get_manifest(self): + """ + Extract manifest from zip file, as a dictionary. + + For the moment we're fixing up the internal file names from the + format that the existing shell-script prototype uses, but this + should go away once this program both generates and checks the + manifests. + """ + + self.check_format() + result = {} + for line in self.open(self.torrent_name + ".manifest"): + h, fn = line.split() + # + # Fixup for earlier manifest format, this should go away + if not fn.startswith(self.torrent_name): + fn = os.path.normpath(os.path.join(self.torrent_name, fn)) + # + result[fn] = h + return result - def get_manifest(self): +def create_manifest(topdir, torrent_name): """ - Extract manifest from zip file, as a dictionary. - - For the moment we're fixing up the internal file names from the - format that the existing shell-script prototype uses, but this - should go away once this program both generates and checks the - manifests. + Generate a manifest, expressed as a dictionary. """ - self.check_format() result = {} - for line in self.open(self.torrent_name + ".manifest"): - h, fn = line.split() - # - # Fixup for earlier manifest format, this should go away - if not fn.startswith(self.torrent_name): - fn = os.path.normpath(os.path.join(self.torrent_name, fn)) - # - result[fn] = h + topdir = os.path.abspath(topdir) + for dirpath, dirnames, filenames in os.walk(os.path.join(topdir, torrent_name)): # pylint: disable=W0612 + for filename in filenames: + filename = os.path.join(dirpath, filename) + f = open(filename, "rb") + result[os.path.relpath(filename, topdir)] = hashlib.sha256(f.read()).hexdigest() + f.close() return result -def create_manifest(topdir, torrent_name): - """ - Generate a manifest, expressed as a dictionary. - """ - - result = {} - topdir = os.path.abspath(topdir) - for dirpath, dirnames, filenames in os.walk(os.path.join(topdir, torrent_name)): # pylint: disable=W0612 - for filename in filenames: - filename = os.path.join(dirpath, filename) - f = open(filename, "rb") - result[os.path.relpath(filename, topdir)] = hashlib.sha256(f.read()).hexdigest() - f.close() - return result - - def log_email(msg, subj = None): - try: - if not msg.endswith("\n"): - msg += "\n" - if subj is None: - subj = msg.partition("\n")[0] - m = email.mime.text.MIMEText(msg) - m["Date"] = time.strftime("%d %b %Y %H:%M:%S +0000", time.gmtime()) - m["From"] = cfg.log_email - m["To"] = cfg.log_email - m["Subject"] = subj - s = smtplib.SMTP("localhost") - s.sendmail(cfg.log_email, [cfg.log_email], m.as_string()) - s.quit() - except ConfigParser.Error: - pass + try: + if not msg.endswith("\n"): + msg += "\n" + if subj is None: + subj = msg.partition("\n")[0] + m = email.mime.text.MIMEText(msg) + m["Date"] = time.strftime("%d %b %Y %H:%M:%S +0000", time.gmtime()) + m["From"] = cfg.log_email + m["To"] = cfg.log_email + m["Subject"] = subj + s = smtplib.SMTP("localhost") + s.sendmail(cfg.log_email, [cfg.log_email], m.as_string()) + s.quit() + except ConfigParser.Error: + pass class TransmissionClient(transmissionrpc.client.Client): - """ - Extension of transmissionrpc.client.Client. - """ - - def __init__(self, **kwargs): - kwargs.setdefault("address", "127.0.0.1") - kwargs.setdefault("user", cfg.transmission_username) - kwargs.setdefault("password", cfg.transmission_password) - transmissionrpc.client.Client.__init__(self, **kwargs) - - - def find_torrents(self, *names): - """ - Find torrents with given name(s), return id(s). - """ - - result = [i for i, t in self.list().iteritems() if t.name in names] - if not result: - raise CouldNotFindTorrents - return result - - - def remove_torrents(self, *names): """ - Remove any torrents with the given name(s). + Extension of transmissionrpc.client.Client. """ - try: - ids = self.find_torrents(*names) - except CouldNotFindTorrents: - pass - else: - syslog.syslog("Removing torrent%s %s (%s)" % ( - "" if len(ids) == 1 else "s", - ", ".join(names), - ", ".join("#%s" % i for i in ids))) - self.remove(ids) + def __init__(self, **kwargs): + kwargs.setdefault("address", "127.0.0.1") + kwargs.setdefault("user", cfg.transmission_username) + kwargs.setdefault("password", cfg.transmission_password) + transmissionrpc.client.Client.__init__(self, **kwargs) - def unlimited_seeding(self, *names): - """ - Set unlimited seeding for specified torrents. - """ - # Apparently seedRatioMode = 2 means "no limit" - try: - self.change(self.find_torrents(*names), seedRatioMode = 2) - except CouldNotFindTorrents: - syslog.syslog("Couldn't tweak seedRatioMode, blundering onwards") + def find_torrents(self, *names): + """ + Find torrents with given name(s), return id(s). + """ + result = [i for i, t in self.list().iteritems() if t.name in names] + if not result: + raise CouldNotFindTorrents + return result -class MyConfigParser(ConfigParser.RawConfigParser): - rpki_torrent_section = "rpki-torrent" + def remove_torrents(self, *names): + """ + Remove any torrents with the given name(s). + """ - @property - def zip_dir(self): - return self.get(self.rpki_torrent_section, "zip_dir") + try: + ids = self.find_torrents(*names) + except CouldNotFindTorrents: + pass + else: + syslog.syslog("Removing torrent%s %s (%s)" % ( + "" if len(ids) == 1 else "s", + ", ".join(names), + ", ".join("#%s" % i for i in ids))) + self.remove(ids) - @property - def zip_ta(self): - return self.get(self.rpki_torrent_section, "zip_ta") + def unlimited_seeding(self, *names): + """ + Set unlimited seeding for specified torrents. + """ - @property - def rcynic_prog(self): - return self.get(self.rpki_torrent_section, "rcynic_prog") + # Apparently seedRatioMode = 2 means "no limit" + try: + self.change(self.find_torrents(*names), seedRatioMode = 2) + except CouldNotFindTorrents: + syslog.syslog("Couldn't tweak seedRatioMode, blundering onwards") - @property - def rcynic_conf(self): - return self.get(self.rpki_torrent_section, "rcynic_conf") - @property - def run_rcynic_anyway(self): - return self.getboolean(self.rpki_torrent_section, "run_rcynic_anyway") - - @property - def generate_url(self): - return self.get(self.rpki_torrent_section, "generate_url") - - @property - def act_as_generator(self): - try: - return self.get(self.rpki_torrent_section, "generate_url") != "" - except ConfigParser.Error: - return False - - @property - def rsync_prog(self): - return self.get(self.rpki_torrent_section, "rsync_prog") - - @property - def mktorrent_prog(self): - return self.get(self.rpki_torrent_section, "mktorrent_prog") - - @property - def tracker_url(self): - return self.get(self.rpki_torrent_section, "tracker_url") - - @property - def sftp_host(self): - return self.get(self.rpki_torrent_section, "sftp_host") - - @property - def sftp_port(self): - try: - return self.getint(self.rpki_torrent_section, "sftp_port") - except ConfigParser.Error: - return 22 - - @property - def sftp_user(self): - return self.get(self.rpki_torrent_section, "sftp_user") - - @property - def sftp_hostkey_file(self): - return self.get(self.rpki_torrent_section, "sftp_hostkey_file") - - @property - def sftp_private_key_file(self): - return self.get(self.rpki_torrent_section, "sftp_private_key_file") - - @property - def lockfile(self): - try: - return self.get(self.rpki_torrent_section, "lockfile") - except ConfigParser.Error: - return None - - @property - def unauthenticated(self): - try: - return self.get(self.rpki_torrent_section, "unauthenticated") - except ConfigParser.Error: - return self.get("rcynic", "unauthenticated") - - @property - def log_email(self): - return self.get(self.rpki_torrent_section, "log_email") - - @property - def transmission_username(self): - try: - return self.get(self.rpki_torrent_section, "transmission_username") - except ConfigParser.Error: - return None +class MyConfigParser(ConfigParser.RawConfigParser): - @property - def transmission_password(self): - try: - return self.get(self.rpki_torrent_section, "transmission_password") - except ConfigParser.Error: - return None - - def multioption_iter(self, name, getter = None): - if getter is None: - getter = self.get - if self.has_option(self.rpki_torrent_section, name): - yield getter(self.rpki_torrent_section, name) - name += "." - names = [i for i in self.options(self.rpki_torrent_section) if i.startswith(name) and i[len(name):].isdigit()] - names.sort(key = lambda s: int(s[len(name):])) # pylint: disable=W0631 - for name in names: - yield getter(self.rpki_torrent_section, name) - - @property - def zip_urls(self): - return self.multioption_iter("zip_url") - - @property - def post_rcynic_commands(self): - return self.multioption_iter("post_rcynic_command") - - def find_url(self, torrent_name): - urls = [u for u in self.zip_urls - if os.path.splitext(os.path.basename(u))[0] == torrent_name] - if len(urls) != 1: - raise TorrentNameDoesNotMatchURL("Can't find URL matching torrent name %s" % torrent_name) - return urls[0] + rpki_torrent_section = "rpki-torrent" + + @property + def zip_dir(self): + return self.get(self.rpki_torrent_section, "zip_dir") + + @property + def zip_ta(self): + return self.get(self.rpki_torrent_section, "zip_ta") + + @property + def rcynic_prog(self): + return self.get(self.rpki_torrent_section, "rcynic_prog") + + @property + def rcynic_conf(self): + return self.get(self.rpki_torrent_section, "rcynic_conf") + + @property + def run_rcynic_anyway(self): + return self.getboolean(self.rpki_torrent_section, "run_rcynic_anyway") + + @property + def generate_url(self): + return self.get(self.rpki_torrent_section, "generate_url") + + @property + def act_as_generator(self): + try: + return self.get(self.rpki_torrent_section, "generate_url") != "" + except ConfigParser.Error: + return False + + @property + def rsync_prog(self): + return self.get(self.rpki_torrent_section, "rsync_prog") + + @property + def mktorrent_prog(self): + return self.get(self.rpki_torrent_section, "mktorrent_prog") + + @property + def tracker_url(self): + return self.get(self.rpki_torrent_section, "tracker_url") + + @property + def sftp_host(self): + return self.get(self.rpki_torrent_section, "sftp_host") + + @property + def sftp_port(self): + try: + return self.getint(self.rpki_torrent_section, "sftp_port") + except ConfigParser.Error: + return 22 + + @property + def sftp_user(self): + return self.get(self.rpki_torrent_section, "sftp_user") + + @property + def sftp_hostkey_file(self): + return self.get(self.rpki_torrent_section, "sftp_hostkey_file") + + @property + def sftp_private_key_file(self): + return self.get(self.rpki_torrent_section, "sftp_private_key_file") + + @property + def lockfile(self): + try: + return self.get(self.rpki_torrent_section, "lockfile") + except ConfigParser.Error: + return None + + @property + def unauthenticated(self): + try: + return self.get(self.rpki_torrent_section, "unauthenticated") + except ConfigParser.Error: + return self.get("rcynic", "unauthenticated") + + @property + def log_email(self): + return self.get(self.rpki_torrent_section, "log_email") + + @property + def transmission_username(self): + try: + return self.get(self.rpki_torrent_section, "transmission_username") + except ConfigParser.Error: + return None + + @property + def transmission_password(self): + try: + return self.get(self.rpki_torrent_section, "transmission_password") + except ConfigParser.Error: + return None + + def multioption_iter(self, name, getter = None): + if getter is None: + getter = self.get + if self.has_option(self.rpki_torrent_section, name): + yield getter(self.rpki_torrent_section, name) + name += "." + names = [i for i in self.options(self.rpki_torrent_section) if i.startswith(name) and i[len(name):].isdigit()] + names.sort(key = lambda s: int(s[len(name):])) # pylint: disable=W0631 + for name in names: + yield getter(self.rpki_torrent_section, name) + + @property + def zip_urls(self): + return self.multioption_iter("zip_url") + + @property + def post_rcynic_commands(self): + return self.multioption_iter("post_rcynic_command") + + def find_url(self, torrent_name): + urls = [u for u in self.zip_urls + if os.path.splitext(os.path.basename(u))[0] == torrent_name] + if len(urls) != 1: + raise TorrentNameDoesNotMatchURL("Can't find URL matching torrent name %s" % torrent_name) + return urls[0] if __name__ == "__main__": - main() + main() diff --git a/rp/rcynic/rules.darwin.mk b/rp/rcynic/rules.darwin.mk index d37b0e75..f1eed3ce 100644 --- a/rp/rcynic/rules.darwin.mk +++ b/rp/rcynic/rules.darwin.mk @@ -1,108 +1,38 @@ # $Id$ install-user-and-group: .FORCE - @if /usr/bin/dscl . -read "/Groups/${RCYNIC_GROUP}" >/dev/null 2>&1; \ + @if /usr/bin/dscl . -read "/Groups/${RPKI_GROUP}" >/dev/null 2>&1; \ then \ - echo "You already have a group \"${RCYNIC_GROUP}\", so I will use it."; \ + echo "You already have a group \"${RPKI_GROUP}\", so I will use it."; \ elif gid="$$(/usr/bin/dscl . -list /Groups PrimaryGroupID | /usr/bin/awk 'BEGIN {gid = 501} $$2 >= gid {gid = 1 + $$2} END {print gid}')" && \ - /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" && \ - /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" RealName "${RCYNIC_GECOS}" && \ - /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" PrimaryGroupID "$$gid" && \ - /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" GeneratedUID "$$(/usr/bin/uuidgen)" && \ - /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" Password "*"; \ + /usr/bin/dscl . -create "/Groups/${RPKI_GROUP}" && \ + /usr/bin/dscl . -create "/Groups/${RPKI_GROUP}" RealName "${RPKI_GECOS}" && \ + /usr/bin/dscl . -create "/Groups/${RPKI_GROUP}" PrimaryGroupID "$$gid" && \ + /usr/bin/dscl . -create "/Groups/${RPKI_GROUP}" GeneratedUID "$$(/usr/bin/uuidgen)" && \ + /usr/bin/dscl . -create "/Groups/${RPKI_GROUP}" Password "*"; \ then \ - echo "Added group \"${RCYNIC_GROUP}\"."; \ + echo "Added group \"${RPKI_GROUP}\"."; \ else \ - echo "Adding group \"${RCYNIC_GROUP}\" failed..."; \ + echo "Adding group \"${RPKI_GROUP}\" failed..."; \ echo "Please create it, then try again."; \ exit 1; \ fi; \ - if /usr/bin/dscl . -read "/Users/${RCYNIC_USER}" >/dev/null 2>&1; \ + if /usr/bin/dscl . -read "/Users/${RPKI_USER}" >/dev/null 2>&1; \ then \ - echo "You already have a user \"${RCYNIC_USER}\", so I will use it."; \ + echo "You already have a user \"${RPKI_USER}\", so I will use it."; \ elif uid="$$(/usr/bin/dscl . -list /Users UniqueID | /usr/bin/awk 'BEGIN {uid = 501} $$2 >= uid {uid = 1 + $$2} END {print uid}')" && \ - /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" && \ - /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" UserShell "/usr/bin/false" && \ - /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" RealName "${RCYNIC_GECOS}" && \ - /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" UniqueID "$$uid" && \ - /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" PrimaryGroupID "$$gid" && \ - /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" NFSHomeDirectory "/var/empty" && \ - /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" GeneratedUID "$$(/usr/bin/uuidgen)" && \ - /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" Password "*"; \ - then \ - echo "Added user \"${RCYNIC_USER}\"."; \ - else \ - echo "Adding user \"${RCYNIC_USER}\" failed..."; \ - echo "Please create it, then try again."; \ - exit 1; \ - fi - @if /usr/bin/dscl . -read "/Groups/${RPKIRTR_GROUP}" >/dev/null 2>&1; \ - then \ - echo "You already have a group \"${RPKIRTR_GROUP}\", so I will use it."; \ - elif gid="$$(/usr/bin/dscl . -list /Groups PrimaryGroupID | /usr/bin/awk 'BEGIN {gid = 501} $$2 >= gid {gid = 1 + $$2} END {print gid}')" && \ - /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" && \ - /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" RealName "${RPKIRTR_GECOS}" && \ - /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" PrimaryGroupID "$$gid" && \ - /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" GeneratedUID "$$(/usr/bin/uuidgen)" && \ - /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" Password "*"; \ - then \ - echo "Added group \"${RPKIRTR_GROUP}\"."; \ - else \ - echo "Adding group \"${RPKIRTR_GROUP}\" failed..."; \ - echo "Please create it, then try again."; \ - exit 1; \ - fi; \ - if /usr/bin/dscl . -read "/Users/${RPKIRTR_USER}" >/dev/null 2>&1; \ - then \ - echo "You already have a user \"${RPKIRTR_USER}\", so I will use it."; \ - elif uid="$$(/usr/bin/dscl . -list /Users UniqueID | /usr/bin/awk 'BEGIN {uid = 501} $$2 >= uid {uid = 1 + $$2} END {print uid}')" && \ - /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" && \ - /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" UserShell "/usr/bin/false" && \ - /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" RealName "${RPKIRTR_GECOS}" && \ - /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" UniqueID "$$uid" && \ - /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" PrimaryGroupID "$$gid" && \ - /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" NFSHomeDirectory "/var/empty" && \ - /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" GeneratedUID "$$(/usr/bin/uuidgen)" && \ - /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" Password "*"; \ - then \ - echo "Added user \"${RPKIRTR_USER}\"."; \ + /usr/bin/dscl . -create "/Users/${RPKI_USER}" && \ + /usr/bin/dscl . -create "/Users/${RPKI_USER}" UserShell "/usr/bin/false" && \ + /usr/bin/dscl . -create "/Users/${RPKI_USER}" RealName "${RPKI_GECOS}" && \ + /usr/bin/dscl . -create "/Users/${RPKI_USER}" UniqueID "$$uid" && \ + /usr/bin/dscl . -create "/Users/${RPKI_USER}" PrimaryGroupID "$$gid" && \ + /usr/bin/dscl . -create "/Users/${RPKI_USER}" NFSHomeDirectory "/var/empty" && \ + /usr/bin/dscl . -create "/Users/${RPKI_USER}" GeneratedUID "$$(/usr/bin/uuidgen)" && \ + /usr/bin/dscl . -create "/Users/${RPKI_USER}" Password "*"; \ + then \ + echo "Added user \"${RPKI_USER}\"."; \ else \ - echo "Adding user \"${RPKIRTR_USER}\" failed..."; \ + echo "Adding user \"${RPKI_USER}\" failed..."; \ echo "Please create it, then try again."; \ exit 1; \ fi - - -install-shared-libraries: .FORCE - @echo "Copying required shared libraries" - @shared_libraries="${RCYNIC_DIR}/bin/rcynic ${RCYNIC_DIR}/bin/rsync"; \ - while true; \ - do \ - closure="$$(/usr/bin/otool -L $${shared_libraries} | /usr/bin/awk '/:$$/ {next} {print $$1}' | /usr/bin/sort -u)"; \ - if test "x$$shared_libraries" = "x$$closure"; - then \ - break; \ - else \ - shared_libraries="$$closure"; \ - fi; \ - done; \ - for shared in /usr/lib/dyld $$shared_libraries; \ - do \ - if /bin/test -r "${RCYNIC_DIR}/$${shared}"; \ - then \ - echo "You already have a \"${RCYNIC_DIR}/$${shared}\", so I will use it"; \ - elif /usr/bin/install -m 555 -o root -g wheel -p "$${shared}" "${RCYNIC_DIR}/$${shared}"; \ - then \ - echo "Copied $${shared} into ${RCYNIC_DIR}"; \ - else \ - echo "Unable to copy $${shared} into ${RCYNIC_DIR}"; \ - exit 1; \ - fi; \ - done - -install-rc-scripts: - ${INSTALL} -o root -g wheel -d ${DESTDIR}/Library/StartupItems/RCynic - ${INSTALL} -o root -g wheel -m 555 \ - rc-scripts/darwin/RCynic \ - rc-scripts/darwin/StartupParameters.plist \ - ${DESTDIR}/Library/Startup/RCynic diff --git a/rp/rcynic/rules.freebsd.mk b/rp/rcynic/rules.freebsd.mk index 5233386e..0f022a2e 100644 --- a/rp/rcynic/rules.freebsd.mk +++ b/rp/rcynic/rules.freebsd.mk @@ -1,56 +1,25 @@ # $Id$ install-user-and-group: .FORCE - @if /usr/sbin/pw groupshow "${RCYNIC_GROUP}" 2>/dev/null; \ + @if /usr/sbin/pw groupshow "${RPKI_GROUP}" 2>/dev/null; \ then \ - echo "You already have a group \"${RCYNIC_GROUP}\", so I will use it."; \ - elif /usr/sbin/pw groupadd ${RCYNIC_GROUP}; \ + echo "You already have a group \"${RPKI_GROUP}\", so I will use it."; \ + elif /usr/sbin/pw groupadd ${RPKI_GROUP}; \ then \ - echo "Added group \"${RCYNIC_GROUP}\"."; \ + echo "Added group \"${RPKI_GROUP}\"."; \ else \ - echo "Adding group \"${RCYNIC_GROUP}\" failed..."; \ + echo "Adding group \"${RPKI_GROUP}\" failed..."; \ echo "Please create it, then try again."; \ exit 1; \ fi - @if /usr/sbin/pw groupshow "${RPKIRTR_GROUP}" 2>/dev/null; \ + @if /usr/sbin/pw usershow "${RPKI_USER}" 2>/dev/null; \ then \ - echo "You already have a group \"${RPKIRTR_GROUP}\", so I will use it."; \ - elif /usr/sbin/pw groupadd ${RPKIRTR_GROUP}; \ + echo "You already have a user \"${RPKI_USER}\", so I will use it."; \ + elif /usr/sbin/pw useradd ${RPKI_USER} -g ${RPKI_GROUP} -h - -d /nonexistant -s /usr/sbin/nologin -c "${RPKI_GECOS}"; \ then \ - echo "Added group \"${RPKIRTR_GROUP}\"."; \ + echo "Added user \"${RPKI_USER}\"."; \ else \ - echo "Adding group \"${RPKIRTR_GROUP}\" failed..."; \ + echo "Adding user \"${RPKI_USER}\" failed..."; \ echo "Please create it, then try again."; \ exit 1; \ fi - @if /usr/sbin/pw usershow "${RCYNIC_USER}" 2>/dev/null; \ - then \ - echo "You already have a user \"${RCYNIC_USER}\", so I will use it."; \ - elif /usr/sbin/pw useradd ${RCYNIC_USER} -g ${RCYNIC_GROUP} -h - -d /nonexistant -s /usr/sbin/nologin -c "${RCYNIC_GECOS}" -G "${RPKIRTR_GROUP}"; \ - then \ - echo "Added user \"${RCYNIC_USER}\"."; \ - else \ - echo "Adding user \"${RCYNIC_USER}\" failed..."; \ - echo "Please create it, then try again."; \ - exit 1; \ - fi - @if /usr/sbin/pw usershow "${RPKIRTR_USER}" 2>/dev/null; \ - then \ - echo "You already have a user \"${RPKIRTR_USER}\", so I will use it."; \ - elif /usr/sbin/pw useradd ${RPKIRTR_USER} -g ${RPKIRTR_GROUP} -h - -d /nonexistant -s /usr/sbin/nologin -c "${RPKIRTR_GECOS}"; \ - then \ - echo "Added user \"${RPKIRTR_USER}\"."; \ - else \ - echo "Adding user \"${RPKIRTR_USER}\" failed..."; \ - echo "Please create it, then try again."; \ - exit 1; \ - fi - - -# We use static compilation on FreeBSD, so no need for shared libraries - -install-shared-libraries: - @true - -install-rc-scripts: - ${INSTALL} -m 555 -o root -g wheel -p rc-scripts/freebsd/rc.d.rcynic ${DESTDIR}/usr/local/etc/rc.d/rcynic diff --git a/rp/rcynic/rules.linux.mk b/rp/rcynic/rules.linux.mk index 6a962cef..c116f75c 100644 --- a/rp/rcynic/rules.linux.mk +++ b/rp/rcynic/rules.linux.mk @@ -1,92 +1,27 @@ # $Id$ install-user-and-group: .FORCE - @if getent group ${RCYNIC_GROUP} >/dev/null; \ + @if getent group ${RPKI_GROUP} >/dev/null; \ then \ - echo "You already have a group \"${RCYNIC_GROUP}\", so I will use it."; \ - elif /usr/sbin/groupadd ${RCYNIC_GROUP}; \ + echo "You already have a group \"${RPKI_GROUP}\", so I will use it."; \ + elif /usr/sbin/groupadd ${RPKI_GROUP}; \ then \ - echo "Added group \"${RCYNIC_GROUP}\"."; \ + echo "Added group \"${RPKI_GROUP}\"."; \ else \ - echo "Adding group \"${RCYNIC_GROUP}\" failed..."; \ + echo "Adding group \"${RPKI_GROUP}\" failed..."; \ echo "Please create it, then try again."; \ exit 1; \ fi @nogroup='-N'; \ if test -f /etc/redhat-release; then read vendor release version < /etc/redhat-release; if test $$vendor = CentOS; then nogroup='-n'; fi; fi; \ - if getent passwd ${RCYNIC_USER} >/dev/null; \ + if getent passwd ${RPKI_USER} >/dev/null; \ then \ - echo "You already have a user \"${RCYNIC_USER}\", so I will use it."; \ - elif /usr/sbin/useradd -g ${RCYNIC_GROUP} -M $$nogroup -d "${RCYNIC_DIR}" -s /sbin/nologin -c "${RCYNIC_GECOS}" ${RCYNIC_USER}; \ + echo "You already have a user \"${RPKI_USER}\", so I will use it."; \ + elif /usr/sbin/useradd -g ${RPKI_GROUP} -M $$nogroup -d "${DESTDIR}${RCYNIC_DIR}" -s /sbin/nologin -c "${RPKI_GECOS}" ${RPKI_USER}; \ then \ - echo "Added user \"${RCYNIC_USER}\"."; \ + echo "Added user \"${RPKI_USER}\"."; \ else \ - echo "Adding user \"${RCYNIC_USER}\" failed..."; \ + echo "Adding user \"${RPKI_USER}\" failed..."; \ echo "Please create it, then try again."; \ exit 1; \ fi - @if getent group ${RPKIRTR_GROUP} >/dev/null; \ - then \ - echo "You already have a group \"${RPKIRTR_GROUP}\", so I will use it."; \ - elif /usr/sbin/groupadd ${RPKIRTR_GROUP}; \ - then \ - echo "Added group \"${RPKIRTR_GROUP}\"."; \ - else \ - echo "Adding group \"${RPKIRTR_GROUP}\" failed..."; \ - echo "Please create it, then try again."; \ - exit 1; \ - fi - @nogroup='-N'; \ - if test -f /etc/redhat-release; then read vendor release version < /etc/redhat-release; if test $$vendor = CentOS; then nogroup='-n'; fi; fi; \ - if getent passwd ${RPKIRTR_USER} >/dev/null; \ - then \ - echo "You already have a user \"${RPKIRTR_USER}\", so I will use it."; \ - elif /usr/sbin/useradd -g ${RPKIRTR_GROUP} -M $$nogroup -d "${RPKIRTR_DIR}" -s /sbin/nologin -c "${RPKIRTR_GECOS}" ${RPKIRTR_USER}; \ - then \ - echo "Added user \"${RPKIRTR_USER}\"."; \ - else \ - echo "Adding user \"${RPKIRTR_USER}\" failed..."; \ - echo "Please create it, then try again."; \ - exit 1; \ - fi - usermod -a -G ${RPKIRTR_GROUP} ${RCYNIC_USER} - -install-shared-libraries: .FORCE - @echo "Copying required shared libraries" - @if test -d /lib64; then libdir=/lib64; else libdir=/lib; fi; \ - shared_libraries="${RCYNIC_DIR}/bin/rcynic ${RCYNIC_DIR}/bin/rsync $$(/usr/bin/find $${libdir} -name 'libnss*.so*' -print)"; \ - while true; \ - do \ - closure="$$(/usr/bin/ldd $${shared_libraries} | \ - ${AWK} ' \ - { sub(/:$/, "") } \ - $$0 == "${RCYNIC_DIR}/bin/rcynic" { next } \ - $$0 == "${RCYNIC_DIR}/bin/rsync" { next } \ - $$1 ~ /\/ld-linux\.so/ { next } \ - { for (i = 1; i <= NF; i++) if ($$i ~ /^\//) print $$i } \ - ' | \ - ${SORT} -u)"; \ - if test "X$$shared_libraries" = "X$$closure"; \ - then \ - break; \ - else \ - shared_libraries="$$closure"; \ - fi; \ - done; \ - if test -f $${libdir}/libresolv.so.2; \ - then \ - shared_libraries="$${shared_libraries} $${libdir}/libresolv.so.2"; - fi; \ - for shared in $${libdir}/*ld*.so* $$shared_libraries; \ - do \ - if test ! -r "${RCYNIC_DIR}/$${shared}"; \ - then \ - ${INSTALL} -m 555 -d `dirname "${RCYNIC_DIR}$${shared}"` && \ - ${INSTALL} -m 555 -p "$${shared}" "${RCYNIC_DIR}$${shared}"; \ - fi; \ - done - -# No devfs, so no rc script - -install-rc-scripts: - @true diff --git a/rp/rcynic/rules.unknown.mk b/rp/rcynic/rules.unknown.mk index 6ce3ea18..03cbd858 100644 --- a/rp/rcynic/rules.unknown.mk +++ b/rp/rcynic/rules.unknown.mk @@ -1,4 +1,4 @@ # $Id$ -install-user-and-group install-shared-libraries install-rc-scripts: .FORCE +install-user-and-group: .FORCE @echo "Don't know how to make $@ on this platform"; exit 1 diff --git a/rp/rcynic/sample-trust-anchors/apnic-testbed.tal b/rp/rcynic/sample-trust-anchors/apnic-testbed.tal.disabled index f87a3bf3..f87a3bf3 100644 --- a/rp/rcynic/sample-trust-anchors/apnic-testbed.tal +++ b/rp/rcynic/sample-trust-anchors/apnic-testbed.tal.disabled diff --git a/rp/rcynic/sample-trust-anchors/rpki.net-testbed.tal b/rp/rcynic/sample-trust-anchors/rpki.net-testbed.tal.disabled index 1e466300..1e466300 100644 --- a/rp/rcynic/sample-trust-anchors/rpki.net-testbed.tal +++ b/rp/rcynic/sample-trust-anchors/rpki.net-testbed.tal.disabled diff --git a/rp/rcynic/static-rsync/Makefile.in b/rp/rcynic/static-rsync/Makefile.in deleted file mode 100644 index 8a433c7b..00000000 --- a/rp/rcynic/static-rsync/Makefile.in +++ /dev/null @@ -1,44 +0,0 @@ -# $Id$ - -VERSION = 2.6.9 - -CFG_ENV = CFLAGS='@CFLAGS@' LDFLAGS='@LDFLAGS@ @LD_STATIC_FLAG@' -CFG_ARG = - -TARBALL = rsync-${VERSION}.tar.gz -DIRNAME = rsync-${VERSION} - -CFG_LOG = > ../config.log 2>&1 -BIN_LOG = > ../build.log 2>&1 - -BIN = rsync - -abs_top_srcdir = @abs_top_srcdir@ -abs_top_builddir = @abs_top_builddir@ - -all: ${BIN} - -${BIN}: ${DIRNAME}/${BIN} - ln ${DIRNAME}/${BIN} $@ - file $@ - -${DIRNAME}/${BIN}: configured.stamp - cd ${DIRNAME} && ${MAKE} ${BIN_LOG} - -extracted.stamp: ${TARBALL} - gzip -c -d ${TARBALL} | tar -xf - - touch $@ - -patched.stamp: extracted.stamp - for i in patches/patch-*; do if test -f "$$i"; then patch -d ${DIRNAME} <"$$i"; else :; fi; done - touch $@ - -configured.stamp: patched.stamp - cd ${DIRNAME} && ${CFG_ENV} ./configure ${CFG_ARG} ${CFG_LOG} - touch $@ - -clean: - rm -rf ${BIN} ${DIRNAME} *.stamp *.log - -distclean: clean - rm -f Makefile diff --git a/rp/rcynic/static-rsync/README b/rp/rcynic/static-rsync/README deleted file mode 100644 index 9ff5afa8..00000000 --- a/rp/rcynic/static-rsync/README +++ /dev/null @@ -1,15 +0,0 @@ -$Id$ - -Hack to build a static rsync binary suitable for use in a chroot jail. - -The default configuration is for gcc, since that's the most widely -used compiler on the platforms we use. I've provided hooks intended -to make it simple to support other compilers just by overriding make -variables on the command line: if you need to do something more -drastic than this to get your compiler working, please tell me. - -If your platform doesn't support static binaries at all, you're on -your own (and should whine at your OS vendor, as this is nuts). - -We try to stick with rsync release code, but apply security patches -when necessary. diff --git a/rp/rcynic/static-rsync/patches/patch-CVE-2007-4091 b/rp/rcynic/static-rsync/patches/patch-CVE-2007-4091 deleted file mode 100644 index 201af96a..00000000 --- a/rp/rcynic/static-rsync/patches/patch-CVE-2007-4091 +++ /dev/null @@ -1,60 +0,0 @@ ---- sender.c 2006-09-20 03:53:32.000000000 +0200 -+++ sender.c 2007-07-25 15:33:05.000000000 +0200 -@@ -123,6 +123,7 @@ - char fname[MAXPATHLEN]; - struct file_struct *file; - unsigned int offset; -+ size_t l = 0; - - if (ndx < 0 || ndx >= the_file_list->count) - return; -@@ -133,6 +134,20 @@ - file->dir.root, "/", NULL); - } else - offset = 0; -+ -+ l = offset + 1; -+ if (file) { -+ if (file->dirname) -+ l += strlen(file->dirname); -+ if (file->basename) -+ l += strlen(file->basename); -+ } -+ -+ if (l >= sizeof(fname)) { -+ rprintf(FERROR, "Overlong pathname\n"); -+ exit_cleanup(RERR_FILESELECT); -+ } -+ - f_name(file, fname + offset); - if (remove_source_files) { - if (do_unlink(fname) == 0) { -@@ -224,6 +239,7 @@ - enum logcode log_code = log_before_transfer ? FLOG : FINFO; - int f_xfer = write_batch < 0 ? batch_fd : f_out; - int i, j; -+ size_t l = 0; - - if (verbose > 2) - rprintf(FINFO, "send_files starting\n"); -@@ -259,6 +275,20 @@ - fname[offset++] = '/'; - } else - offset = 0; -+ -+ l = offset + 1; -+ if (file) { -+ if (file->dirname) -+ l += strlen(file->dirname); -+ if (file->basename) -+ l += strlen(file->basename); -+ } -+ -+ if (l >= sizeof(fname)) { -+ rprintf(FERROR, "Overlong pathname\n"); -+ exit_cleanup(RERR_FILESELECT); -+ } -+ - fname2 = f_name(file, fname + offset); - - if (verbose > 2) diff --git a/rp/rcynic/static-rsync/rsync-2.6.9.tar.gz b/rp/rcynic/static-rsync/rsync-2.6.9.tar.gz Binary files differdeleted file mode 100644 index 6377f639..00000000 --- a/rp/rcynic/static-rsync/rsync-2.6.9.tar.gz +++ /dev/null diff --git a/rp/rcynic/validation_status b/rp/rcynic/validation_status index a3ee36f1..d8e2c8ae 100755 --- a/rp/rcynic/validation_status +++ b/rp/rcynic/validation_status @@ -23,14 +23,13 @@ Flat text listing of <validation_status/> elements from rcynic.xml. import sys try: - from lxml.etree import ElementTree + from lxml.etree import ElementTree except ImportError: - from xml.etree.ElementTree import ElementTree + from xml.etree.ElementTree import ElementTree for filename in ([sys.stdin] if len(sys.argv) < 2 else sys.argv[1:]): - for elt in ElementTree(file = filename).findall("validation_status"): - print "%s %8s %-40s %s" % ( - elt.get("timestamp"), - elt.get("generation"), - elt.get("status"), - elt.text.strip()) + for elt in ElementTree(file = filename).findall("validation_status"): + print "%s %-40s %s" % ( + elt.get("timestamp"), + elt.get("status"), + elt.text.strip()) |