aboutsummaryrefslogtreecommitdiff
path: root/rp
diff options
context:
space:
mode:
Diffstat (limited to 'rp')
-rw-r--r--rp/Makefile.in2
-rw-r--r--rp/config/Makefile.in88
l---------rp/config/rpki1
-rwxr-xr-xrp/config/rpki-confgen281
-rw-r--r--rp/config/rpki-confgen.xml1062
-rwxr-xr-xrp/config/rpki-generate-root-certificate77
-rwxr-xr-xrp/config/rpki-manage46
-rwxr-xr-xrp/config/rpki-sql-backup63
-rwxr-xr-xrp/config/rpki-sql-setup348
-rw-r--r--rp/rcynic/Makefile.in96
-rwxr-xr-xrp/rcynic/rc-scripts/darwin/RCynic42
-rw-r--r--rp/rcynic/rc-scripts/darwin/StartupParameters.plist19
-rwxr-xr-xrp/rcynic/rc-scripts/freebsd/rc.d.rcynic44
-rwxr-xr-xrp/rcynic/rcynic-cron90
-rwxr-xr-xrp/rcynic/rcynic-dump95
-rwxr-xr-xrp/rcynic/rcynic-html934
-rwxr-xr-xrp/rcynic/rcynic-svn107
-rwxr-xr-xrp/rcynic/rcynic-text144
-rw-r--r--rp/rcynic/rcynic.c4
-rwxr-xr-xrp/rcynic/rcynicng1478
-rw-r--r--rp/rcynic/rpki-torrent.py1166
-rw-r--r--rp/rcynic/rules.darwin.mk114
-rw-r--r--rp/rcynic/rules.freebsd.mk51
-rw-r--r--rp/rcynic/rules.linux.mk85
-rw-r--r--rp/rcynic/rules.unknown.mk2
-rw-r--r--rp/rcynic/sample-trust-anchors/apnic-testbed.tal.disabled (renamed from rp/rcynic/sample-trust-anchors/apnic-testbed.tal)0
-rw-r--r--rp/rcynic/sample-trust-anchors/rpki.net-testbed.tal.disabled (renamed from rp/rcynic/sample-trust-anchors/rpki.net-testbed.tal)0
-rw-r--r--rp/rcynic/static-rsync/Makefile.in44
-rw-r--r--rp/rcynic/static-rsync/README15
-rw-r--r--rp/rcynic/static-rsync/patches/patch-CVE-2007-409160
-rw-r--r--rp/rcynic/static-rsync/rsync-2.6.9.tar.gzbin811841 -> 0 bytes
-rwxr-xr-xrp/rcynic/validation_status15
-rwxr-xr-xrp/rpki-rtr/rpki-rtr4
-rw-r--r--rp/rpki-rtr/rules.freebsd.mk4
-rw-r--r--rp/rpki-rtr/rules.linux.mk2
-rwxr-xr-xrp/utils/find_roa233
-rwxr-xr-xrp/utils/hashdir60
-rwxr-xr-xrp/utils/print_roa89
-rwxr-xr-xrp/utils/print_rpki_manifest44
-rwxr-xr-xrp/utils/scan_roas67
-rwxr-xr-xrp/utils/scan_routercerts39
-rwxr-xr-xrp/utils/uri80
42 files changed, 5151 insertions, 2044 deletions
diff --git a/rp/Makefile.in b/rp/Makefile.in
index 2c770a46..d22ddbcb 100644
--- a/rp/Makefile.in
+++ b/rp/Makefile.in
@@ -1,6 +1,6 @@
# $Id$
-SUBDIRS = rcynic rpki-rtr utils
+SUBDIRS = config rcynic rpki-rtr utils
all clean test distclean install deinstall uninstall::
@for i in ${SUBDIRS}; do echo "Making $@ in $$i"; (cd $$i && ${MAKE} $@); done
diff --git a/rp/config/Makefile.in b/rp/config/Makefile.in
new file mode 100644
index 00000000..c6050f3e
--- /dev/null
+++ b/rp/config/Makefile.in
@@ -0,0 +1,88 @@
+# $Id$
+
+PYTHON = @PYTHON@
+
+INSTALL = @INSTALL@ -m 555
+
+prefix = @prefix@
+exec_prefix = @exec_prefix@
+datarootdir = @datarootdir@
+datadir = @datadir@
+localstatedir = @localstatedir@
+sharedstatedir = @sharedstatedir@
+sysconfdir = @sysconfdir@
+bindir = @bindir@
+sbindir = @sbindir@
+libexecdir = @libexecdir@
+sysconfdir = @sysconfdir@
+
+abs_builddir = @abs_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+abs_top_builddir= @abs_top_builddir@
+srcdir = @srcdir@
+
+CFG_INSTALL_TARGETS = @CFG_INSTALL_TARGETS@
+
+all:: rpki.rp.xml rpki.rp.conf.sample
+
+clean::
+ @true
+
+install:: ${CFG_INSTALL_TARGETS}
+
+install-always:: all
+ @echo
+ @echo "== Default configuration file location is ${sysconfdir}/rpki.conf =="
+ @echo
+ ${INSTALL} -d ${DESTDIR}${sysconfdir}/rpki
+ ${INSTALL} rpki.rp.xml rpki.rp.conf.sample ${DESTDIR}${sysconfdir}/rpki
+
+test uninstall deinstall::
+ @true
+
+distclean:: clean
+ rm -f Makefile
+
+rpki.rp.xml: ${abs_top_srcdir}/rpki/autoconf.py rpki-confgen rpki-confgen.xml
+ ${PYTHON} rpki-confgen \
+ --read-xml rpki-confgen.xml \
+ --autoconf \
+ --set myrpki::handle=`hostname -f | sed 's/[.]/_/g'` \
+ --set myrpki::rpkid_server_host=`hostname -f` \
+ --set myrpki::pubd_server_host=`hostname -f` \
+ --pwgen myrpki::shared_sql_password \
+ --pwgen web_portal::secret-key \
+ --set myrpki::run_rpkid=no \
+ --set myrpki::run_pubd=no \
+ --write-xml $@
+
+rpki.rp.conf.sample: rpki.rp.xml
+ ${PYTHON} rpki-confgen \
+ --read-xml rpki.rp.xml \
+ --write-conf $@
+
+clean::
+ rm -f rpki.rp.xml rpki.rp.conf.sample
+
+install-postconf: \
+ install-user install-conf install-sql install-django
+
+# This should create user "rpki" and group "rpki", but rcynic already
+# does that...but we probably need to do it here instead, bother.
+
+install-user:
+ @true
+
+install-conf:
+ test -f ${DESTDIR}${sysconfdir}/rpki.conf ||\
+ cp -p ${DESTDIR}${sysconfdir}/rpki/rpki.rp.conf.sample ${DESTDIR}${sysconfdir}/rpki.conf
+
+#uninstall deinstall::
+# rm -f ${DESTDIR}${sysconfdir}/rpki/rpki.rp.xml ${DESTDIR}${sysconfdir}/rpki/rpki.rp.conf.sample
+
+install-sql:
+ ${sbindir}/rpki-sql-setup create
+
+install-django:
+ ${sbindir}/rpki-manage syncdb --noinput
+ ${sbindir}/rpki-manage migrate app
diff --git a/rp/config/rpki b/rp/config/rpki
new file mode 120000
index 00000000..d39d05b6
--- /dev/null
+++ b/rp/config/rpki
@@ -0,0 +1 @@
+../../rpki \ No newline at end of file
diff --git a/rp/config/rpki-confgen b/rp/config/rpki-confgen
new file mode 100755
index 00000000..7fac9eab
--- /dev/null
+++ b/rp/config/rpki-confgen
@@ -0,0 +1,281 @@
+#!/usr/bin/env python
+
+# $Id$
+#
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+# Portions copyright (C) 2013 Internet Systems Consortium ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notices and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ISC DISCLAIM ALL
+# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
+# ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA
+# OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+import os
+import sys
+import argparse
+import base64
+import textwrap
+
+from lxml.etree import Element, SubElement, ElementTree, Comment
+
+space4 = " " * 4
+space6 = " " * 6
+space8 = " " * 8
+star78 = "*" * 78
+
+wiki_wrapper = textwrap.TextWrapper()
+conf_wrapper = textwrap.TextWrapper(initial_indent = "# ", subsequent_indent = "# ")
+xml6_wrapper = textwrap.TextWrapper(initial_indent = space6, subsequent_indent = space6)
+xml8_wrapper = textwrap.TextWrapper(initial_indent = space8, subsequent_indent = space8)
+
+class Option(object):
+
+ def __init__(self, name, value, doc):
+ self.name = name
+ self.value = value
+ self.doc = doc
+
+ @property
+ def width(self):
+ return len(self.name)
+
+ def to_xml(self):
+ x = Element("option", name = self.name)
+ if self.value is not None:
+ x.set("value", self.value)
+ for d in self.doc:
+ SubElement(x, "doc").text = "\n" + xml8_wrapper.fill(d) + "\n" + space6
+ return x
+
+ def to_wiki(self, f):
+ f.write("\n== {0.name} == #{0.name}\n".format(self))
+ for d in self.doc:
+ f.write("\n{0}\n".format(wiki_wrapper.fill(d)))
+ if self.value is None:
+ f.write("\n{0}\n".format(wiki_wrapper.fill("No default value.")))
+ else:
+ f.write("\n{{{{{{\n#!ini\n{0.name} = {0.value}\n}}}}}}\n".format(self))
+
+ def to_conf(self, f, width):
+ for i, d in enumerate(self.doc):
+ f.write("{}\n{}\n".format("" if i == 0 else "#",
+ conf_wrapper.fill(d)))
+ if self.value is None:
+ f.write("\n#{1.name:{0}} = ???\n".format(width - 1, self))
+ else:
+ f.write("\n{1.name:{0}} = {1.value}\n".format(width, self))
+
+class Section(object):
+
+ def __init__(self, name):
+ self.name = name
+ self.doc = []
+ self.options = []
+
+ @property
+ def width(self):
+ return max(o.width for o in self.options)
+
+ @classmethod
+ def from_xml(cls, elt):
+ self = cls(name = elt.get("name"))
+ for x in elt.iterchildren("doc"):
+ self.doc.append(" ".join(x.text.split()))
+ for x in elt.iterchildren("option"):
+ self.options.append(Option(name = x.get("name"), value = x.get("value"),
+ doc = [" ".join(d.text.split())
+ for d in x.iterchildren("doc")]))
+ return self
+
+ def to_xml(self):
+ x = Element("section", name = self.name)
+ for d in self.doc:
+ SubElement(x, "doc").text = "\n" + xml6_wrapper.fill(d) + "\n" + space4
+ x.extend(o.to_xml() for o in self.options)
+ return x
+
+ def to_wiki(self, f):
+ f.write("\n= [{0}] section = #{0}\n".format(self.name))
+ for d in self.doc:
+ f.write("\n{0}\n".format(wiki_wrapper.fill(d)))
+ for o in self.options:
+ o.to_wiki(f)
+
+ def to_conf(self, f, width):
+ f.write("\n" + "#" * 78 + "\n\n[" + self.name + "]\n")
+ if self.doc:
+ f.write("\n##")
+ for i, d in enumerate(self.doc):
+ f.write("{}\n{}\n".format("" if i == 0 else "#",
+ conf_wrapper.fill(d)))
+ f.write("##\n")
+ for o in self.options:
+ o.to_conf(f, width)
+
+def wiki_header(f, ident, toc):
+ f.write(textwrap.dedent('''\
+ {{{{{{
+ #!comment
+
+ {star78}
+ THIS PAGE WAS GENERATED AUTOMATICALLY, DO NOT EDIT.
+
+ Generated from {ident}
+ by $Id$
+ {star78}
+
+ }}}}}}
+ '''.format(star78 = star78,
+ ident = ident)))
+ if toc is not None:
+ f.write("[[TracNav({})]]\n".format(toc))
+ f.write("[[PageOutline]]\n")
+
+def conf_header(f, ident):
+ f.write(textwrap.dedent('''\
+ # Automatically generated. Edit as needed, but be careful of overwriting.
+ #
+ # Generated from {ident}
+ # by $Id$
+
+ '''.format(ident = ident)))
+
+
+# http://stackoverflow.com/questions/9027028/argparse-argument-order
+
+class CustomAction(argparse.Action):
+
+ def __call__(self, parser, namespace, values, option_string = None):
+ if not "ordered_args" in namespace:
+ namespace.ordered_args = []
+ namespace.ordered_args.append((self.dest, values))
+
+class CustomFlagAction(CustomAction):
+
+ def __init__(self, option_strings, dest, default = None,
+ required = False, help = None): # pylint: disable=W0622
+ super(CustomFlagAction, self).__init__(
+ option_strings = option_strings,
+ dest = dest,
+ nargs = 0,
+ const = None,
+ default = default,
+ required = required,
+ help = help)
+
+
+class main(object):
+
+ def __init__(self):
+ self.sections = []
+ self.section_map = None
+ self.option_map = None
+ self.ident = None
+ self.toc = None
+
+ parser = argparse.ArgumentParser(description = __doc__)
+ parser.add_argument("--read-xml", type = argparse.FileType("r"), metavar = "FILE", action = CustomAction, help = "XML input file defining sections and options", required = True)
+ parser.add_argument("--write-xml", type = argparse.FileType("w"), metavar = "FILE", action = CustomAction, help = "XML output file to snapshot configuration")
+ parser.add_argument("--write-conf", type = argparse.FileType("w"), metavar = "FILE", action = CustomAction, help = "rpki.conf configuration file to write")
+ parser.add_argument("--write-wiki", type = argparse.FileType("w"), metavar = "FILE", action = CustomAction, help = "TracWiki file to write (monolithic)")
+ parser.add_argument("--write-wiki-pages", metavar = "PATTERN", action = CustomAction, help = "TracWiki filenames (pattern) to write (one section per page)")
+ parser.add_argument("--set", metavar = "VARVAL", action = CustomAction, help = "variable setting in form \"VAR=VAL\"")
+ parser.add_argument("--pwgen", metavar = "VAR", action = CustomAction, help = "set variable to generated password")
+ parser.add_argument("--toc", metavar = "TOCVAL", action = CustomAction, help = "set TOC value to use with TracNav plugin")
+ parser.add_argument("--autoconf", action = CustomFlagAction, help = "configure [autoconf] section")
+ args = parser.parse_args()
+
+ for cmd, arg in args.ordered_args:
+ getattr(self, "do_" + cmd)(arg)
+
+ def do_read_xml(self, arg):
+ self.option_map = None
+ root = ElementTree(file = arg).getroot()
+ self.ident = root.get("ident")
+ self.sections.extend(Section.from_xml(x) for x in root.iterchildren("section"))
+ self.option_map = {}
+ self.section_map = {}
+ for section in self.sections:
+ if section.name in self.section_map:
+ sys.exit("Duplicate section {}".format(section.name))
+ self.section_map[section.name] = section
+ for option in section.options:
+ name = (section.name, option.name)
+ if name in self.option_map:
+ sys.exit("Duplicate option {}::{}".format(*name))
+ self.option_map[name] = option
+
+ def do_set(self, arg):
+ try:
+ name, value = arg.split("=", 1)
+ section, option = name.split("::")
+ except ValueError:
+ sys.exit("Couldn't parse --set specification \"{}\"".format(arg))
+ name = (section, option)
+ if name not in self.option_map:
+ sys.exit("Couldn't find option {}::{}".format(*name))
+ self.option_map[name].value = value
+
+ def do_pwgen(self, arg):
+ try:
+ section, option = arg.split("::")
+ except ValueError:
+ sys.exit("Couldn't parse --pwgen specification \"{}\"".format(arg))
+ name = (section, option)
+ if name not in self.option_map:
+ sys.exit("Couldn't find option {}::{}".format(name))
+ self.option_map[name].value = base64.urlsafe_b64encode(os.urandom(66))
+
+ def do_autoconf(self, ignored):
+ try:
+ import rpki.autoconf
+ for option in self.section_map["autoconf"].options:
+ try:
+ option.value = getattr(rpki.autoconf, option.name)
+ except AttributeError:
+ pass
+ except ImportError:
+ sys.exit("rpki.autoconf module is not available")
+ except KeyError:
+ sys.exit("Couldn't find autoconf section")
+
+ def do_write_xml(self, arg):
+ x = Element("configuration", ident = self.ident)
+ x.append(Comment(" Machine-editable configuration snapshot, generated automatically, do not touch "))
+ x.extend(s.to_xml() for s in self.sections)
+ ElementTree(x).write(arg, pretty_print = True, encoding = "us-ascii")
+
+ def do_write_wiki(self, arg):
+ for i, section in enumerate(self.sections):
+ if i == 0:
+ wiki_header(arg, self.ident, self.toc)
+ else:
+ arg.write("\f\n")
+ section.to_wiki(arg)
+
+ def do_write_wiki_pages(self, arg):
+ for section in self.sections:
+ with open(arg % section.name, "w") as f:
+ wiki_header(f, self.ident, self.toc)
+ section.to_wiki(f)
+
+ def do_write_conf(self, arg):
+ conf_header(arg, self.ident)
+ width = max(s.width for s in self.sections)
+ for section in self.sections:
+ section.to_conf(arg, width)
+
+ def do_toc(self, arg):
+ self.toc = arg
+
+
+if __name__ == "__main__":
+ main()
diff --git a/rp/config/rpki-confgen.xml b/rp/config/rpki-confgen.xml
new file mode 100644
index 00000000..b7bc2f62
--- /dev/null
+++ b/rp/config/rpki-confgen.xml
@@ -0,0 +1,1062 @@
+<!-- -*- SGML -*-
+ $Id$
+
+ Documented option definitions for rpki-confgen to use in generating
+ rpki.conf and TracWiki documentation.
+
+ Copyright (C) 2009-2013 Internet Systems Consortium ("ISC")
+
+ Permission to use, copy, modify, and distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ PERFORMANCE OF THIS SOFTWARE.
+-->
+
+<configuration ident = "$Id$">
+
+ <section name = "myrpki">
+
+ <doc>
+ The "`[myrpki]`" section contains all the parameters that you
+ really need to configure. The name "`myrpki`" is historical and
+ may change in the future.
+ </doc>
+
+ <option name = "handle">
+ <doc>
+ Every resource-holding or server-operating entity needs a
+ "handle", which is just an identifier by which the entity
+ calls itself. Handles do not need to be globally unique, but
+ should be chosen with an eye towards debugging operational
+ problems: it's best if you use a handle that your parents and
+ children will recognize as being you.
+ </doc>
+ <doc>
+ The "`handle`" option in the "`[myrpki]`" section specifies the
+ default handle for this installation. Previous versions of
+ the CA tools required a separate configuration file, each with
+ its own handle setting, for each hosted entity. The current
+ code allows the current handle to be selected at runtime in
+ both the GUI and command line user interface tools, so the
+ handle setting here is just the default when you don't set one
+ explictly. In the long run, this option may go away entirely,
+ but for now you need to set this.
+ </doc>
+ <doc>
+ Syntax is an identifier (ASCII letters, digits, hyphen,
+ underscore -- no whitespace, non-ASCII characters, or other
+ punctuation).
+ </doc>
+ </option>
+
+ <option name = "bpki_servers_directory"
+ value = "${autoconf::datarootdir}/rpki/bpki">
+ <doc>
+ Directory for BPKI files generated by rpkic and used by rpkid
+ and pubd. You will not normally need to change this.
+ </doc>
+ </option>
+
+ <option name = "run_rpkid"
+ value = "yes">
+ <doc>
+ Whether you want to run your own copy of rpkid (and irdbd).
+ Leave this alone unless you're doing something unusual like
+ running a pubd-only installation.
+ </doc>
+ </option>
+
+ <option name = "rpkid_server_host">
+ <doc>
+ DNS hostname for rpkid. In most cases, this must resolve to a
+ publicly-reachable address to be useful, as your RPKI children
+ will need to contact your rpkid at this address.
+ </doc>
+ </option>
+
+ <option name = "rpkid_server_port"
+ value = "4404">
+ <doc>
+ Server port number for rpkid. This can be any legal TCP port
+ number that you're not using for something else.
+ </doc>
+ </option>
+
+ <option name = "irdbd_server_host"
+ value = "localhost">
+ <doc>
+ DNS hostname for irdbd, or "`localhost`". This should be
+ "`localhost`" unless you really know what you are doing.
+ </doc>
+ </option>
+
+ <option name = "irdbd_server_port"
+ value = "4403">
+ <doc>
+ Server port number for irdbd. This can be any legal TCP port
+ number that you're not using for something else.
+ </doc>
+ </option>
+
+ <option name = "run_pubd"
+ value = "yes">
+ <doc>
+ Whether you want to run your own copy of pubd. In general,
+ it's best to use your parent's pubd if your parent allows you
+ to do so, because this will reduce the overall number of
+ publication sites from which relying parties will need to
+ retrieve data. However, not all parents offer publication
+ service, or you may need to run pubd yourself for reliability
+ reasons, or because you're certifying private address space or
+ private Autonomous System Numbers.
+ </doc>
+ <doc>
+ The out of band setup protocol will attempt to negotiate
+ publication service for you with whatever publication service
+ your parent is using, if it can and if you let it.
+ </doc>
+ </option>
+
+ <option name = "pubd_server_host">
+ <doc>
+ DNS hostname for pubd, if you're running it. This must
+ resolve to a publicly reachable address to be useful.
+ </doc>
+ </option>
+
+ <option name = "pubd_server_port"
+ value = "4402">
+ <doc>
+ Server port number for pubd. This can be any legal TCP port
+ number that you're not using for something else.
+ </doc>
+ </option>
+
+ <option name = "pubd_contact_info">
+ <doc>
+ Contact information to include in offers of repository
+ service. This only matters when you're running pubd. This
+ should be a human readable string, perhaps containing an email
+ address or URL.
+ </doc>
+ </option>
+
+ <option name = "publication_base_directory"
+ value = "${autoconf::datarootdir}/rpki/publication">
+ <doc>
+ Root of local directory tree where pubd should write out published
+ data. You need to configure this, and the configuration should
+ match up with the directory where you point rsyncd. Neither pubd
+ nor rsyncd much cares //where// you tell it to put this stuff, the
+ important thing is that the rsync URIs in generated
+ certificates match up with the published objects so that relying
+ parties can find and verify rpkid's published outputs.
+ </doc>
+ </option>
+
+ <option name = "rrdp_publication_base_directory"
+ value = "${autoconf::datarootdir}/rpki/rrdp-publication">
+ <doc>
+ Root of local directory tree where pubd should write out RRDP
+ files. You need to configure this, and the configuration
+ should match up with the directory where you point the web
+ server (usually Apache) that serves the RRDP files. Neither
+ pubd nor Apache much cares //where// you tell it to put this
+ stuff, the important thing is that all the URIs match up so
+ that relying parties can find and verify rpkid's published
+ outputs.
+ </doc>
+ </option>
+
+ <option name = "publication_rsync_module"
+ value = "rpki">
+ <doc>
+ rsyncd module name corresponding to publication_base_directory.
+ This has to match the module you configured into `rsyncd.conf`.
+ Leave this alone unless you have some need to change it.
+ </doc>
+ </option>
+
+ <option name = "publication_rsync_server"
+ value = "${myrpki::pubd_server_host}">
+ <doc>
+ Hostname and optional port number for rsync URIs. In most cases
+ this should just be the same value as pubd_server_host.
+ </doc>
+ </option>
+
+ <option name = "publication_rrdp_base_uri"
+ value = "https://${myrpki::pubd_server_host}/rrdp/">
+ <doc>
+ Base URI for RRDP notification, snapshot, and delta files.
+ In most cases this should be a HTTPS URL for the directory
+ on the publication server where the notify.xml lives.
+ </doc>
+ </option>
+
+ <option name = "publication_rrdp_notification_uri"
+ value = "${myrpki::publication_rrdp_base_uri}notify.xml">
+ <doc>
+ URI for RRDP notification file. You shouldn't need to change this.
+ </doc>
+ </option>
+
+ <option name = "start_rpkid"
+ value = "${myrpki::run_rpkid}">
+ <doc>
+ rpkid startup control. This should usually have the same value as
+ run_rpkid: the only case where you would want to change this is
+ when you are running the back-end code on a different machine from
+ one or more of the daemons, in which case you need finer control
+ over which daemons to start on which machines. In such cases,
+ run_rpkid controls whether the back-end code is doing things to
+ manage rpkid, while start_rpkid controls whether
+ rpki-start-servers attempts to start rpkid on this machine.
+ </doc>
+ </option>
+
+ <option name = "start_irdbd"
+ value = "${myrpki::run_rpkid}">
+ <doc>
+ irdbd startup control. This should usually have the same value as
+ run_rpkid: the only case where you would want to change this is
+ when you are running the back-end code on a different machine from
+ one or more of the daemons, in which case you need finer control
+ over which daemons to start on which machines. In such cases,
+ run_rpkid controls whether the back-end code is doing things to
+ manage rpkid, while start_irdbd controls whether
+ rpki-start-servers attempts to start irdbd on this machine.
+ </doc>
+ </option>
+
+ <option name = "start_pubd"
+ value = "${myrpki::run_pubd}">
+ <doc>
+ pubd startup control. This should usually have the same value as
+ run_pubd: the only case where you would want to change this is
+ when you are running the back-end code on a different machine from
+ one or more of the daemons, in which case you need finer control
+ over which daemons to start on which machines. In such cases,
+ run_pubd controls whether the back-end code is doing things to
+ manage pubd, while start_pubd controls whether
+ rpki-start-servers attempts to start pubd on this machine.
+ </doc>
+ </option>
+
+ <option name = "shared_sql_engine"
+ value = "mysql">
+ <doc>
+ Database engine to use. Default is MySQL, because that's what
+ we've been using for years. Now that all runtime database
+ access is via Django ORM, changing to another engine supported
+ by Django is just a configuration issue.
+ </doc>
+ <doc>
+ Current supported values are "mysql" (the default), "sqlite3",
+ and "postgresql". In theory it should be straightforward to
+ add support for any SQL engine Django supports.
+ </doc>
+ </option>
+
+ <option name = "shared_sql_username"
+ value = "rpki">
+ <doc>
+ If you're comfortable with having all of the databases use the
+ same SQL username, set that value here. The default setting
+ of this variable should be fine.
+ </doc>
+ </option>
+
+ <option name = "shared_sql_password">
+ <doc>
+ If you're comfortable with having all of the databases use the
+ same SQL password, set that value here. You should use a
+ locally generated password either here or in the individual
+ settings below. The installation process generates a random
+ value for this option, which satisfies this requirement, so
+ ordinarily you should have no need to change this option.
+ </doc>
+ </option>
+
+ <option name = "rcynic_sql_engine"
+ value = "${myrpki::shared_sql_engine}">
+ <doc>
+ SQL engine to use for rcynic's database. The default setting
+ of this variable should be fine.
+ </doc>
+ </option>
+
+ <option name = "rcynic_sql_database"
+ value = "rcynic">
+ <doc>
+ SQL database name for rcynic's database. The default setting of
+ this variable should be fine.
+ </doc>
+ </option>
+
+ <option name = "rcynic_sql_username"
+ value = "${myrpki::shared_sql_username}">
+ <doc>
+ If you want to use a separate SQL username for rcynic's database,
+ set it here.
+ </doc>
+ </option>
+
+ <option name = "rcynic_sql_password"
+ value = "${myrpki::shared_sql_password}">
+ <doc>
+ If you want to use a separate SQL password for rcynic's database,
+ set it here.
+ </doc>
+ </option>
+
+ <option name = "rpkid_sql_engine"
+ value = "${myrpki::shared_sql_engine}">
+ <doc>
+ SQL engine to use for rpkid's database. The default setting
+ of this variable should be fine.
+ </doc>
+ </option>
+
+ <option name = "rpkid_sql_database"
+ value = "rpkid">
+ <doc>
+ SQL database name for rpkid's database. The default setting of
+ this variable should be fine.
+ </doc>
+ </option>
+
+ <option name = "rpkid_sql_username"
+ value = "${myrpki::shared_sql_username}">
+ <doc>
+ If you want to use a separate SQL username for rpkid's database,
+ set it here.
+ </doc>
+ </option>
+
+ <option name = "rpkid_sql_password"
+ value = "${myrpki::shared_sql_password}">
+ <doc>
+ If you want to use a separate SQL password for rpkid's database,
+ set it here.
+ </doc>
+ </option>
+
+ <option name = "irdbd_sql_engine"
+ value = "${myrpki::shared_sql_engine}">
+ <doc>
+ SQL engine to use for irdbd's database. The default setting
+ of this variable should be fine.
+ </doc>
+ </option>
+
+ <option name = "irdbd_sql_database"
+ value = "irdbd">
+ <doc>
+ SQL database for irdbd's database. The default setting of this
+ variable should be fine.
+ </doc>
+ </option>
+
+ <option name = "irdbd_sql_username"
+ value = "${myrpki::shared_sql_username}">
+ <doc>
+ If you want to use a separate SQL username for irdbd's database,
+ set it here.
+ </doc>
+ </option>
+
+ <option name = "irdbd_sql_password"
+ value = "${myrpki::shared_sql_password}">
+ <doc>
+ If you want to use a separate SQL password for irdbd's database,
+ set it here.
+ </doc>
+ </option>
+
+ <option name = "pubd_sql_engine"
+ value = "${myrpki::shared_sql_engine}">
+ <doc>
+ SQL engine to use for pubd's database. The default setting
+ of this variable should be fine.
+ </doc>
+ </option>
+
+ <option name = "pubd_sql_database"
+ value = "pubd">
+ <doc>
+ SQL database name for pubd's database. The default setting of
+ this variable should be fine.
+ </doc>
+ </option>
+
+ <option name = "pubd_sql_username"
+ value = "${myrpki::shared_sql_username}">
+ <doc>
+ If you want to use a separate SQL username for pubd's database,
+ set it here.
+ </doc>
+ </option>
+
+ <option name = "pubd_sql_password"
+ value = "${myrpki::shared_sql_password}">
+ <doc>
+ If you want to use a separate SQL password for pubd's database,
+ set it here.
+ </doc>
+ </option>
+
+ <option name = "log-destination"
+ value = "file">
+ <doc>
+ Default logging mechanism, can be "file", "syslog", "stderr", or "stdout".
+ </doc>
+ </option>
+
+ <option name = "log-directory"
+ value = "/var/log/rpki">
+ <doc>
+ Where to write log files when logging to files.
+ </doc>
+ </option>
+
+ <option name = "log-level"
+ value = "info">
+ <doc>
+ Default logging level.
+ </doc>
+ </option>
+
+ <option name = "log-time-limit"
+ value = "3">
+ <doc>
+ Interval between log file rotations, in hours.
+ Set to zero to disable automatic rotations.
+ </doc>
+ </option>
+
+ <option name = "log-count"
+ value = "56">
+ <doc>
+ How many old logs to keep before deleting.
+ </doc>
+ </option>
+
+ </section>
+
+ <section name = "rcynic">
+
+ <doc>
+ rcynicng, unlike it's predecessor, uses the same `rpki.conf`
+ file as all the other programs in the RPKI toolkit. Start
+ rcynicng with "`-c filename`" to choose a different
+ configuration file. All options are in the "`[rcynic]`"
+ section.
+ </doc>
+
+ <option name = "sql-engine"
+ value = "${myrpki::rcynic_sql_engine}">
+ <doc>
+ SQL engine for rcynic.
+ </doc>
+ </option>
+
+ <option name = "sql-database"
+ value = "${myrpki::rcynic_sql_database}">
+ <doc>
+ SQL database name for rcynic.
+ </doc>
+ </option>
+
+ <option name = "sql-username"
+ value = "${myrpki::rcynic_sql_username}">
+ <doc>
+ SQL user name for rcynic.
+ </doc>
+ </option>
+
+ <option name = "sql-password"
+ value = "${myrpki::rcynic_sql_password}">
+ <doc>
+ SQL password for rcynic.
+ </doc>
+ </option>
+
+ <option name = "log-destination"
+ value = "${myrpki::log-destination}">
+ <doc>
+ Logging mechanism, can be "file", "syslog", "stderr", or "stdout".
+ </doc>
+ </option>
+
+ <option name = "log-filename"
+ value = "${myrpki::log-directory}/rcynic.log">
+ <doc>
+ Where to write log file when logging to a file.
+ </doc>
+ </option>
+
+ <option name = "log-level"
+ value = "${myrpki::log-level}">
+ <doc>
+ Default logging level.
+ </doc>
+ </option>
+
+ <option name = "log-time-limit"
+ value = "${myrpki::log-time-limit}">
+ <doc>
+ Interval between log file rotations, in hours.
+ Set to zero to disable automatic rotations.
+ </doc>
+ </option>
+
+ <option name = "log-count"
+ value = "${myrpki::log-count}">
+ <doc>
+ How many old logs to keep before deleting.
+ </doc>
+ </option>
+
+ </section>
+
+ <section name = "rpkid">
+
+ <doc>
+ rpkid's default config file is the system `rpki.conf` file.
+ Start rpkid with "`-c filename`" to choose a different config
+ file. All options are in the "`[rpkid]`" section. BPKI
+ Certificates and keys may be in either DER or PEM format.
+ </doc>
+
+ <option name = "sql-engine"
+ value = "${myrpki::rpkid_sql_engine}">
+ <doc>
+ SQL engine for rpkid.
+ </doc>
+ </option>
+
+ <option name = "sql-database"
+ value = "${myrpki::rpkid_sql_database}">
+ <doc>
+ SQL database name for rpkid.
+ </doc>
+ </option>
+
+ <option name = "sql-username"
+ value = "${myrpki::rpkid_sql_username}">
+ <doc>
+ SQL user name for rpkid.
+ </doc>
+ </option>
+
+ <option name = "sql-password"
+ value = "${myrpki::rpkid_sql_password}">
+ <doc>
+ SQL password for rpkid.
+ </doc>
+ </option>
+
+ <option name = "server-host"
+ value = "${myrpki::rpkid_server_host}">
+ <doc>
+ Host on which rpkid should listen for HTTP service requests.
+ </doc>
+ </option>
+
+ <option name = "server-port"
+ value = "${myrpki::rpkid_server_port}">
+ <doc>
+ Port on which rpkid should listen for HTTP service requests.
+ </doc>
+ </option>
+
+ <option name = "irdb-url"
+ value = "http://${myrpki::irdbd_server_host}:${myrpki::irdbd_server_port}/">
+ <doc>
+ HTTP service URL rpkid should use to contact irdbd. If irdbd is
+ running on the same machine as rpkid, this can and probably should
+ be a loopback URL, since nobody but rpkid needs to talk to irdbd.
+ </doc>
+ </option>
+
+ <option name = "bpki-ta"
+ value = "${myrpki::bpki_servers_directory}/ca.cer">
+ <doc>
+ Where rpkid should look for the BPKI trust anchor. All BPKI
+ certificate verification within rpkid traces back to this
+ trust anchor. Don't change this unless you really know what
+ you are doing.
+ </doc>
+ </option>
+
+ <option name = "rpkid-cert"
+ value = "${myrpki::bpki_servers_directory}/rpkid.cer">
+ <doc>
+ Where rpkid should look for its own BPKI EE certificate. Don't
+ change this unless you really know what you are doing.
+ </doc>
+ </option>
+
+ <option name = "rpkid-key"
+ value = "${myrpki::bpki_servers_directory}/rpkid.key">
+ <doc>
+ Where rpkid should look for the private key corresponding to its
+ own BPKI EE certificate. Don't change this unless you really know
+ what you are doing.
+ </doc>
+ </option>
+
+ <option name = "irdb-cert"
+ value = "${myrpki::bpki_servers_directory}/irdbd.cer">
+ <doc>
+ Where rpkid should look for irdbd's BPKI EE certificate.
+ Don't change this unless you really know what you are doing.
+ </doc>
+ </option>
+
+ <option name = "irbe-cert"
+ value = "${myrpki::bpki_servers_directory}/irbe.cer">
+ <doc>
+ Where rpkid should look for the back-end control client's BPKI EE
+ certificate. Don't change this unless you really know what you
+ are doing.
+ </doc>
+ </option>
+
+ <option name = "log-destination"
+ value = "${myrpki::log-destination}">
+ <doc>
+ Logging mechanism, can be "file", "syslog", "stderr", or "stdout".
+ </doc>
+ </option>
+
+ <option name = "log-filename"
+ value = "${myrpki::log-directory}/rpkid.log">
+ <doc>
+ Where to write log file when logging to a file.
+ </doc>
+ </option>
+
+ <option name = "log-level"
+ value = "${myrpki::log-level}">
+ <doc>
+ Default logging level.
+ </doc>
+ </option>
+
+ <option name = "log-time-limit"
+ value = "${myrpki::log-time-limit}">
+ <doc>
+ Interval between log file rotations, in hours.
+ Set to zero to disable automatic rotations.
+ </doc>
+ </option>
+
+ <option name = "log-count"
+ value = "${myrpki::log-count}">
+ <doc>
+ How many old logs to keep before deleting.
+ </doc>
+ </option>
+
+ </section>
+
+ <section name = "irdbd">
+
+ <doc>
+ irdbd's default configuration file is the system `rpki.conf`
+ file. Start irdbd with "`-c filename`" to choose a different
+ configuration file. All options are in the "`[irdbd]`" section.
+ </doc>
+
+ <doc>
+ Since irdbd is part of the back-end system, it has direct access to
+ the back-end's SQL database, and thus is able to pull its own BPKI
+ configuration directly from the database, and thus needs a bit less
+ configuration than the other daemons.
+ </doc>
+
+ <option name = "sql-engine"
+ value = "${myrpki::irdbd_sql_engine}">
+ <doc>
+ SQL engine for irdbd.
+ </doc>
+ </option>
+
+ <option name = "sql-database"
+ value = "${myrpki::irdbd_sql_database}">
+ <doc>
+ SQL database name for irdbd.
+ </doc>
+ </option>
+
+ <option name = "sql-username"
+ value = "${myrpki::irdbd_sql_username}">
+ <doc>
+ SQL user name for irdbd.
+ </doc>
+ </option>
+
+ <option name = "sql-password"
+ value = "${myrpki::irdbd_sql_password}">
+ <doc>
+ SQL password for irdbd.
+ </doc>
+ </option>
+
+ <option name = "server-host"
+ value = "${myrpki::irdbd_server_host}">
+ <doc>
+ Host on which irdbd should listen for HTTP service requests.
+ </doc>
+ </option>
+
+ <option name = "server-port"
+ value = "${myrpki::irdbd_server_port}">
+ <doc>
+ Port on which irdbd should listen for HTTP service requests.
+ </doc>
+ </option>
+
+ <option name = "startup-message">
+ <doc>
+ String to log on startup, useful when debugging a collection
+ of irdbd instances at once.
+ </doc>
+ </option>
+
+ <option name = "log-destination"
+ value = "${myrpki::log-destination}">
+ <doc>
+ Logging mechanism, can be "file", "syslog", "stderr", or "stdout".
+ </doc>
+ </option>
+
+ <option name = "log-filename"
+ value = "${myrpki::log-directory}/irdbd.log">
+ <doc>
+ Where to write log file when logging to a file.
+ </doc>
+ </option>
+
+ <option name = "log-level"
+ value = "${myrpki::log-level}">
+ <doc>
+ Default logging level.
+ </doc>
+ </option>
+
+ <option name = "log-time-limit"
+ value = "${myrpki::log-time-limit}">
+ <doc>
+ Interval between log file rotations, in hours.
+ Set to zero to disable automatic rotations.
+ </doc>
+ </option>
+
+ <option name = "log-count"
+ value = "${myrpki::log-count}">
+ <doc>
+ How many old logs to keep before deleting.
+ </doc>
+ </option>
+
+ </section>
+
+ <section name = "pubd">
+
+ <doc>
+ pubd's default configuration file is the system `rpki.conf`
+ file. Start pubd with "`-c filename`" to choose a different
+ configuration file. All options are in the "`[pubd]`" section.
+ BPKI certificates and keys may be either DER or PEM format.
+ </doc>
+
+ <option name = "sql-engine"
+ value = "${myrpki::pubd_sql_engine}">
+ <doc>
+ SQL engine for pubd.
+ </doc>
+ </option>
+
+ <option name = "sql-database"
+ value = "${myrpki::pubd_sql_database}">
+ <doc>
+ SQL database name for pubd.
+ </doc>
+ </option>
+
+ <option name = "sql-username"
+ value = "${myrpki::pubd_sql_username}">
+ <doc>
+ SQL user name for pubd.
+ </doc>
+ </option>
+
+ <option name = "sql-password"
+ value = "${myrpki::pubd_sql_password}">
+ <doc>
+ SQL password for pubd.
+ </doc>
+ </option>
+
+ <option name = "publication-base"
+ value = "${myrpki::publication_base_directory}">
+ <doc>
+ Root of directory tree where pubd should write out published data.
+ You need to configure this, and the configuration should match up
+ with the directory where you point rsyncd. Neither pubd nor rsyncd
+ much cares -where- you tell them to put this stuff, the important
+ thing is that the rsync URIs in generated certificates match up
+ with the published objects so that relying parties can find and
+ verify rpkid's published outputs.
+ </doc>
+ </option>
+
+ <option name = "rrdp-publication-base"
+ value = "${myrpki::rrdp_publication_base_directory}">
+ <doc>
+ Root of local directory tree where pubd should write out RRDP
+ files. You need to configure this, and the configuration
+ should match up with the directory where you point the web
+ server (usually Apache) that serves the RRDP files. Neither
+ pubd nor Apache much cares //where// you tell it to put this
+ stuff, the important thing is that all the URIs match up so
+ that relying parties can find and verify rpkid's published
+ outputs.
+ </doc>
+ </option>
+
+ <option name = "server-host"
+ value = "${myrpki::pubd_server_host}">
+ <doc>
+ Host on which pubd should listen for HTTP service requests.
+ </doc>
+ </option>
+
+ <option name = "server-port"
+ value = "${myrpki::pubd_server_port}">
+ <doc>
+ Port on which pubd should listen for HTTP service requests.
+ </doc>
+ </option>
+
+ <option name = "bpki-ta"
+ value = "${myrpki::bpki_servers_directory}/ca.cer">
+ <doc>
+ Where pubd should look for the BPKI trust anchor. All BPKI
+ certificate verification within pubd traces back to this
+ trust anchor. Don't change this unless you really know what
+ you are doing.
+ </doc>
+ </option>
+
+ <option name = "pubd-cert"
+ value = "${myrpki::bpki_servers_directory}/pubd.cer">
+ <doc>
+ Where pubd should look for its own BPKI EE certificate. Don't
+ change this unless you really know what you are doing.
+ </doc>
+ </option>
+
+ <option name = "pubd-key"
+ value = "${myrpki::bpki_servers_directory}/pubd.key">
+ <doc>
+ Where pubd should look for the private key corresponding to its
+ own BPKI EE certificate. Don't change this unless you really know
+ what you are doing.
+ </doc>
+ </option>
+
+ <option name = "pubd-crl"
+ value = "${myrpki::bpki_servers_directory}/ca.crl">
+ <doc>
+ Where pubd should look for the CRL covering its own BPKI EE
+ certificate. Don't change this unless you really know what
+ you are doing.
+ </doc>
+ </option>
+
+ <option name = "irbe-cert"
+ value = "${myrpki::bpki_servers_directory}/irbe.cer">
+ <doc>
+ Where pubd should look for the back-end control client's BPKI EE
+ certificate. Don't change this unless you really know what you
+ are doing.
+ </doc>
+ </option>
+
+ <option name = "rrdp-base-uri"
+ value = "${myrpki::publication_rrdp_base_uri}">
+ <doc>
+ RRDP base URI for naming snapshots and deltas.
+ </doc>
+ </option>
+
+ <option name = "log-destination"
+ value = "${myrpki::log-destination}">
+ <doc>
+ Logging mechanism, can be "file", "syslog", "stderr", or "stdout".
+ </doc>
+ </option>
+
+ <option name = "log-filename"
+ value = "${myrpki::log-directory}/pubd.log">
+ <doc>
+ Where to write log file when logging to a file.
+ </doc>
+ </option>
+
+ <option name = "log-level"
+ value = "${myrpki::log-level}">
+ <doc>
+ Default logging level.
+ </doc>
+ </option>
+
+ <option name = "log-time-limit"
+ value = "${myrpki::log-time-limit}">
+ <doc>
+ Interval between log file rotations, in hours.
+ Set to zero to disable automatic rotations.
+ </doc>
+ </option>
+
+ <option name = "log-count"
+ value = "${myrpki::log-count}">
+ <doc>
+ How many old logs to keep before deleting.
+ </doc>
+ </option>
+
+ </section>
+
+ <section name = "rpki-nanny">
+
+ <option name = "log-destination"
+ value = "${myrpki::log-destination}">
+ <doc>
+ Logging mechanism, can be "file", "syslog", "stderr", or "stdout".
+ </doc>
+ </option>
+
+ <option name = "log-filename"
+ value = "${myrpki::log-directory}/rpki-nanny.log">
+ <doc>
+ Where to write log file when logging to a file.
+ </doc>
+ </option>
+
+ <option name = "log-level"
+ value = "${myrpki::log-level}">
+ <doc>
+ Default logging level.
+ </doc>
+ </option>
+
+ <option name = "log-time-limit"
+ value = "${myrpki::log-time-limit}">
+ <doc>
+ Interval between log file rotations, in hours.
+ Set to zero to disable automatic rotations.
+ </doc>
+ </option>
+
+ <option name = "log-count"
+ value = "${myrpki::log-count}">
+ <doc>
+ How many old logs to keep before deleting.
+ </doc>
+ </option>
+
+ </section>
+
+ <section name = "web_portal">
+
+ <doc>
+ Glue to allow Django to pull user configuration from this file
+ rather than requiring the user to edit settings.py.
+ </doc>
+
+ <!--
+ We used to have SQL settings for the GUI here, but since
+ they're pretty much required to be identical to the ones for
+ irdbd at this point, the duplicate entries were just another
+ chance to misconfigure something, so I removed them. Not yet
+ sure whether this was the right approach. Too much historical
+ baggage in this file.
+ -->
+
+ <option name = "secret-key">
+ <doc>
+ Site-specific secret key for Django.
+ </doc>
+ </option>
+
+ <option name = "allowed-hosts">
+ <doc>
+ Name of virtual host that runs the Django GUI, if this is not
+ the same as the system hostname. Django's security code wants
+ to know the name of the virtual host on which Django is
+ running, and will fail when it thinks it's running on a
+ disallowed host.
+ </doc>
+ <doc>
+ If you get an error like "Invalid HTTP_HOST header (you may
+ need to set ALLOWED_HOSTS)", you will need to set this option.
+ </doc>
+ </option>
+
+ <option name = "download-directory"
+ value = "/var/tmp">
+ <doc>
+ A directory large enough to hold the RouteViews.org routing table dump
+ fetched by the rpkigui-import-routes script.
+ </doc>
+ </option>
+
+ </section>
+
+ <section name = "autoconf">
+
+ <doc>
+ rpki-confgen --autoconf records the current autoconf settings
+ here, so that other options can refer to them. The section name
+ "autoconf" is magic, don't change it.
+ </doc>
+
+ <option name = "bindir">
+ <doc>
+ Usually /usr/bin or /usr/local/bin.
+ </doc>
+ </option>
+
+ <option name = "datarootdir">
+ <doc>
+ Usually /usr/share or /usr/local/share.
+ </doc>
+ </option>
+
+ <option name = "sbindir">
+ <doc>
+ Usually /usr/sbin or /usr/local/sbin.
+ </doc>
+ </option>
+
+ <option name = "sysconfdir">
+ <doc>
+ Usually /etc or /usr/local/etc.
+ </doc>
+ </option>
+
+ </section>
+
+</configuration>
diff --git a/rp/config/rpki-generate-root-certificate b/rp/config/rpki-generate-root-certificate
new file mode 100755
index 00000000..10b8b194
--- /dev/null
+++ b/rp/config/rpki-generate-root-certificate
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+
+"""
+Generate an RPKI root certificate for rootd. In most cases you should
+not need to do this; see caveats in the manual about running rootd if
+you think you need this. This script does nothing that can't also be
+done with the OpenSSL command line tool, but on some platforms the
+installed copy of openssl doesn't understand the RFC 3779 extensions.
+"""
+
+import os
+import sys
+import pwd
+import time
+import rpki.x509
+import rpki.config
+import rpki.sundial
+import rpki.autoconf
+import rpki.resource_set
+
+os.environ["TZ"] = "UTC"
+time.tzset()
+
+cfg = rpki.config.argparser(section = "rootd", doc = __doc__)
+
+default_certfile = cfg.get("rpki-root-cert-file", "root.cer")
+default_keyfile = cfg.get("rpki-root-key-file", "root.key")
+default_talfile = os.path.splitext(default_certfile)[0] + ".tal"
+
+cfg.argparser.add_argument("-a", "--asns", help = "ASN resources", default = "0-4294967295")
+cfg.argparser.add_argument("-4", "--ipv4", help = "IPv4 resources", default = "0.0.0.0/0")
+cfg.argparser.add_argument("-6", "--ipv6", help = "IPv6 resources", default = "::/0")
+cfg.argparser.add_argument("--certificate", help = "certificate file", default = default_certfile)
+cfg.argparser.add_argument("--key", help = "key file", default = default_keyfile)
+cfg.argparser.add_argument("--tal", help = "TAL file", default = default_talfile)
+
+args = cfg.argparser.parse_args()
+
+resources = rpki.resource_set.resource_bag(
+ asn = args.asns,
+ v4 = args.ipv4,
+ v6 = args.ipv6)
+
+keypair = rpki.x509.RSA.generate(quiet = True)
+
+sia = (cfg.get("rpki_base_uri") + "/",
+ cfg.get("rpki-root-manifest-uri"),
+ None,
+ cfg.get("publication_rrdp_notification_uri", section = "myrpki"))
+
+uris = (cfg.get("rpki-root-cert-uri"),
+ cfg.get("publication_rrdp_base_uri", section = "myrpki") + "root.cer")
+
+cert = rpki.x509.X509.self_certify(
+ keypair = keypair,
+ subject_key = keypair.get_public(),
+ serial = 1,
+ sia = sia,
+ notAfter = rpki.sundial.now() + rpki.sundial.timedelta(days = 365),
+ resources = resources)
+
+with open(args.certificate, "wb") as f:
+ f.write(cert.get_DER())
+
+with open(args.tal, "w") as f:
+ for uri in uris:
+ f.write(uri + "\n")
+ f.write(keypair.get_public().get_Base64())
+
+with os.fdopen(os.open(args.key, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0400), "w") as f:
+ f.write(keypair.get_DER())
+
+try:
+ pw = pwd.getpwnam(rpki.autoconf.RPKI_USER)
+ os.chown(args.key, pw.pw_uid, pw.pw_gid)
+except:
+ pass
diff --git a/rp/config/rpki-manage b/rp/config/rpki-manage
new file mode 100755
index 00000000..ac3cc967
--- /dev/null
+++ b/rp/config/rpki-manage
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+
+# Using a Python script to run sudo to run a Python script is a bit
+# silly, but it lets us use rpki.autoconf to locate sudo, lets us
+# avoid needing a custom setuid wrapper, lets us avoid another pass
+# through the adventures of shell quoting and tokenization, and
+# generally is just a lot simpler to implement correctly.
+#
+# OK, it's probably a few milliseconds slower. Big deal.
+
+if __name__ == "__main__":
+
+ import os
+ import pwd
+ import sys
+ import rpki.autoconf
+
+ try:
+ uid = pwd.getpwnam(rpki.autoconf.RPKI_USER).pw_uid
+ except:
+ uid = None
+
+ if uid is None or uid == os.geteuid():
+
+ # django-admin seems to have problems creating the superuser account when
+ # $LANG is unset or is set to something totally incompatible with UTF-8.
+
+ if os.environ.get("LANG") in (None, "", "C"):
+ os.environ["LANG"] = "en_US.UTF-8"
+
+ os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rpki.django_settings.gui")
+
+ from django.core.management import execute_from_command_line
+
+ execute_from_command_line()
+
+ else:
+
+ try:
+ argv = [rpki.autoconf.SUDO, "-u", rpki.autoconf.RPKI_USER, sys.executable]
+ argv.extend(os.path.abspath(a) if i == 0 else a for i, a in enumerate(sys.argv))
+ os.execv(argv[0], argv)
+ sys.exit("rpki-manage startup failure, no exception so don't know why, sorry")
+
+ except Exception as e:
+ sys.exit("Couldn't exec sudo python rpki-manage: {!s}".format(e))
diff --git a/rp/config/rpki-sql-backup b/rp/config/rpki-sql-backup
new file mode 100755
index 00000000..09e5856e
--- /dev/null
+++ b/rp/config/rpki-sql-backup
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+
+# $Id$
+#
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+# Portions copyright (C) 2010-2013 Internet Systems Consortium ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notices and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ISC DISCLAIM ALL
+# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
+# ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA
+# OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Back up data from SQL databases, looking at config file to figure out
+which databases and what credentials to use with them, and eliminating
+duplicates in cases where we've configured multiple applications to
+share a single database.
+"""
+
+import os
+import sys
+import time
+import argparse
+import subprocess
+import rpki.config
+
+os.environ["TZ"] = "UTC"
+time.tzset()
+
+cfg = rpki.config.argparser(doc = __doc__, section = "myrpki")
+cfg.argparser.add_argument("-o", "--output", type = argparse.FileType("wb"), default = sys.stdout,
+ help = "destination for SQL dump (default: stdout)")
+cfg.argparser.add_argument("-v", "--verbose", action = "store_true",
+ help = "whistle while you work")
+args = cfg.argparser.parse_args()
+
+templates = dict(mysql = "mysqldump --add-drop-database -u{username} -p{password} -B{database}",
+ sqlite3 = "sqlite3 {database} .dump",
+ postgresql = "sudo -u {username} pg_dump {database}")
+
+cmds = []
+
+for name in ("rpkid", "irdbd", "pubd"):
+ if cfg.getboolean("start_" + name, False):
+ cmd = templates[cfg.get("sql-engine", section = name)]
+ cmd = cmd.format(database = cfg.get("sql-database", section = name),
+ username = cfg.get("sql-username", section = name),
+ password = cfg.get("sql-password", section = name))
+ if cmd not in cmds:
+ cmds.append(cmd)
+
+for cmd in cmds:
+ if args.verbose:
+ sys.stderr.write("[Running \"{}\"]\n".format(cmd))
+ subprocess.check_call(cmd.split(), stdout = args.output)
diff --git a/rp/config/rpki-sql-setup b/rp/config/rpki-sql-setup
new file mode 100755
index 00000000..6fd64588
--- /dev/null
+++ b/rp/config/rpki-sql-setup
@@ -0,0 +1,348 @@
+#!/usr/bin/env python
+
+# $Id$
+#
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+# Portions copyright (C) 2009-2013 Internet Systems Consortium ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notices and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ISC DISCLAIM ALL
+# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
+# ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA
+# OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Automated setup of SQL stuff used by the RPKI tools. Pulls
+configuration from rpki.conf, prompts for SQL password when needed.
+"""
+
+import os
+import pwd
+import sys
+import getpass
+import textwrap
+import argparse
+import rpki.config
+
+
+class Abstract_Driver(object):
+
+ # Kludge to make classes derived from this into singletons. Net
+ # of a Million Lies says this is Not Pythonic, but it seems to
+ # work, so long as one doesn't attempt to subclass the resulting
+ # driver classes. For our purposes, it will do.
+
+ __instance = None
+
+ def __new__(cls, *args, **kwargs):
+ if cls.__instance is None:
+ cls.__instance = object.__new__(cls, *args, **kwargs)
+ return cls.__instance
+
+ def db_accessible(self, udb):
+ try:
+ self._db_accessible_test(udb)
+ except:
+ return False
+ else:
+ return True
+
+ def fetchone(self):
+ return self._cur.fetchone()
+
+ def fetchall(self):
+ return self._cur.fetchall()
+
+ def close(self):
+ self._cur.close()
+ self._db.close()
+
+ def log(self, msg):
+ if self.args.verbose:
+ sys.stderr.write(msg + "\n")
+
+
+class MySQL_Driver(Abstract_Driver):
+
+ _initialized = False
+
+ def __init__(self, args):
+ try:
+ self.driver
+ except AttributeError:
+ from rpki.mysql_import import MySQLdb
+ self.driver = MySQLdb
+ self.args = args
+
+ def _db_accessible_test(self, udb):
+ self.driver.connect(db = udb.database, user = udb.username, passwd = udb.password).close()
+
+ def db_exists(self, udb):
+ self.execute("SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = '{0.database}'".format(udb))
+ return bool(self.fetchone()[0])
+
+ def execute(*args):
+ try:
+ self._cur
+ except AttributeError:
+ self.log("MySQL driver initializing root connection")
+ if self.args.mysql_defaults:
+ mysql_cfg = rpki.config.parser(set_filename = self.args.mysql_defaults, section = "client")
+ self._db = self.driver.connect(db = "mysql",
+ user = mysql_cfg.get("user"),
+ passwd = mysql_cfg.get("password"))
+ else:
+ self._db = self.driver.connect(db = "mysql",
+ user = "root",
+ passwd = getpass.getpass("Please enter your MySQL root password: "))
+ self._db.autocommit(True)
+ self._cur = self._db.cursor()
+ self.log("MySQL driver executing {}".format(", ".join(args)))
+ return self._cur.execute(*args)
+
+ def create(self, udb):
+ self.execute("CREATE DATABASE IF NOT EXISTS {0.database}".format(udb))
+ self.fix_grants(udb)
+
+ def drop(self, udb):
+ self.execute("DROP DATABASE IF EXISTS {0.database}".format(udb))
+
+ def script_drop(self, udb):
+ self.args.script_output.write("DROP DATABASE IF EXISTS {};\n".format(udb.database))
+
+ def fix_grants(self, udb):
+ self.execute("GRANT ALL ON {0.database}.* TO {0.username}@localhost IDENTIFIED BY %s".format(udb),
+ (udb.password,))
+
+class SQLite3_Driver(Abstract_Driver):
+
+ def __init__(self, args):
+ try:
+ self.driver
+ except AttributeError:
+ import sqlite3
+ self.driver = sqlite3
+ self.args = args
+
+ def _db_accessible_test(self, udb):
+ self.driver.connect(udb.database).close()
+
+ def db_exists(self, udb):
+ return os.path.exists(udb.database)
+
+ def _grant(self, udb):
+ if udb.username and os.geteuid() == 0:
+ pw = pwd.getpwnam(udb.username)
+ os.chown(udb.database, pw.pw_uid, pw.pw_gid)
+
+ def create(self, udb):
+ self._db_accessible_test(udb.database)
+ self._grant(udb)
+
+ def drop(self, udb):
+ os.unlink(udb.database)
+
+ def script_drop(self, udb):
+ self.args.script_output.write("rm {}\n".format(udb.database))
+
+ def fix_grants(self, udb):
+ self._grant(udb)
+
+
+class PostgreSQL_Driver(Abstract_Driver):
+
+ def __init__(self, args):
+ try:
+ self.driver
+ except AttributeError:
+ import psycopg2
+ self.driver = psycopg2
+ self.args = args
+ if args.postgresql_root_username and (os.getuid() == 0 or os.geteuid() == 0):
+ self._pw = pwd.getpwnam(args.postgresql_root_username)
+ else:
+ self._pw = None
+ self.log("Initialized PostgreSQL driver, pw {!r}".format(self._pw))
+
+ def _seteuid(self, new_uid):
+ old_uid = os.geteuid()
+ if new_uid != old_uid:
+ self.log("PostgreSQL driver changing EUID from {} to {}".format(old_uid, new_uid))
+ os.seteuid(new_uid)
+ return old_uid
+
+ def execute(self, *args):
+ try:
+ self._cur
+ except AttributeError:
+ self.log("PostgreSQL driver opening connection to database {}".format(self.args.postgresql_root_database))
+ if self._pw is not None:
+ euid = self._seteuid(self._pw.pw_uid)
+ try:
+ self._db = self.driver.connect(database = self.args.postgresql_root_database)
+ self._db.autocommit = True
+ self._cur = self._db.cursor()
+ finally:
+ if self._pw is not None:
+ self._seteuid(euid)
+ self.log("PostgreSQL driver executing {}".format(", ".join(args)))
+ return self._cur.execute(*args)
+
+ def _db_accessible_test(self, udb):
+ pw = pwd.getpwnam(udb.username)
+ uid = self._seteuid(pw.pw_uid)
+ try:
+ self.driver.connect(database = udb.database, user = udb.username , password = udb.password).close()
+ finally:
+ self._seteuid(uid)
+
+ def db_exists(self, udb):
+ self.execute("SELECT COUNT(*) FROM pg_database WHERE datname = '{0.database}'".format(udb))
+ return bool(self.fetchone()[0])
+
+ def role_in_use(self, udb):
+ self.execute(textwrap.dedent('''\
+ SELECT COUNT(*) FROM pg_database
+ JOIN pg_roles ON pg_database.datdba = pg_roles.oid
+ WHERE pg_roles.rolname = '{0.username}'
+ '''.format(udb)))
+ return bool(self.fetchone()[0])
+
+ def create(self, udb):
+ if not self.role_in_use(udb):
+ self.execute("CREATE ROLE {0.username} LOGIN PASSWORD '{0.password}'".format(udb))
+ if not self.db_exists(udb):
+ self.execute("CREATE DATABASE {0.database} OWNER {0.username}".format(udb))
+
+ def drop(self, udb):
+ self.execute("DROP DATABASE IF EXISTS {0.database}".format(udb))
+ if not self.role_in_use(udb):
+ self.execute("DROP ROLE IF EXISTS {0.username}".format(udb))
+
+ def script_drop(self, udb):
+ self.args.script_output.write(textwrap.dedent('''\
+ DROP DATABASE IF EXISTS {0.database};
+ DO $$ BEGIN
+ IF NOT EXISTS (SELECT * FROM pg_database JOIN pg_roles
+ ON pg_database.datdba = pg_roles.oid
+ WHERE pg_roles.rolname = '{0.username}')
+ THEN
+ DROP ROLE IF EXISTS {0.username};
+ END IF;
+ END $$;
+ '''.format(udb)))
+
+ def fix_grants(self, udb):
+ self.execute("ALTER DATABASE {0.database} OWNER TO {0.username}".format(udb))
+ self.execute("ALTER ROLE {0.username} WITH PASSWORD '{0.password}".format(udb))
+
+
+class UserDB(object):
+ """
+ Class to wrap access parameters for a particular database.
+ """
+
+ drivers = dict(sqlite3 = SQLite3_Driver,
+ mysql = MySQL_Driver,
+ postgresql = PostgreSQL_Driver)
+
+ def __init__(self, args, name):
+ self.database = cfg.get("sql-database", section = name)
+ self.username = cfg.get("sql-username", section = name)
+ self.password = cfg.get("sql-password", section = name)
+ self.engine = cfg.get("sql-engine", section = name)
+ self.driver = self.drivers[self.engine](args)
+ self.args = args
+
+ def drop(self):
+ if self.args.force or self.driver.db_accessible(self):
+ self.driver.drop(self)
+
+ def create(self):
+ if self.args.force or not self.driver.db_accessible(self):
+ self.driver.create(self)
+
+ def script_drop(self):
+ self.driver.script_drop(self)
+
+ def drop_and_create(self):
+ if self.args.force or self.driver.db_accessible(self):
+ self.driver.drop(self)
+ self.driver.create(self)
+
+ def fix_grants(self):
+ if self.args.force or not self.driver.db_accessible(self):
+ self.driver.fix_grants(self)
+
+
+parser = argparse.ArgumentParser(description = __doc__)
+parser.add_argument("-c", "--config",
+ help = "specify alternate location for rpki.conf")
+parser.add_argument("-d", "--debug", action = "store_true",
+ help = "enable debugging (eg, Python backtraces)")
+parser.add_argument("-v", "--verbose", action = "store_true",
+ help = "whistle while you work")
+parser.add_argument("-f", "--force", action = "store_true",
+ help = "force database create, drop, or grant regardless of current state")
+
+parser.add_argument("--mysql-defaults",
+ help = "specify MySQL root access credentials via a configuration file")
+
+
+parser.add_argument("--postgresql-root-database", default = "postgres",
+ help = "name of PostgreSQL control database")
+parser.add_argument("--postgresql-root-username",
+ help = "username of PostgreSQL control role")
+
+subparsers = parser.add_subparsers(title = "Commands", metavar = "", dest = "dispatch")
+
+subparsers.add_parser("create",
+ help = "create databases and load schemas")
+
+subparsers.add_parser("drop",
+ help = "drop databases")
+
+subparser = subparsers.add_parser("script-drop",
+ help = "show SQL commands to drop databases")
+subparser.add_argument("script_output",
+ nargs = "?", type = argparse.FileType("w"), default = "-",
+ help = "destination for drop script")
+
+subparsers.add_parser("drop-and-create",
+ help = "drop databases then recreate them and load schemas")
+
+subparsers.add_parser("fix-grants",
+ help = "whack database to match configuration file")
+
+args = parser.parse_args()
+
+try:
+
+ cfg = rpki.config.parser(set_filename = args.config, section = "myrpki")
+
+ names = [name for name in ("irdbd", "rpkid", "pubd")
+ if cfg.getboolean("start_" + name, False)]
+ names.append("rcynic")
+
+ # For now, we quietly ignore missing sections rather than throwing an exception.
+ # I could make a case either way for this, but ignoring missing sections is a
+ # lot easier to clean up while debugging the installation scripts.
+
+ for name in names:
+ if cfg.has_section(name):
+ udb = UserDB(args = args, name = name)
+ method = args.dispatch.replace("-", "_")
+ getattr(udb, method)()
+
+except Exception, e:
+ if args.debug:
+ raise
+ else:
+ sys.exit(str(e))
diff --git a/rp/rcynic/Makefile.in b/rp/rcynic/Makefile.in
index a2d844bd..52c67fde 100644
--- a/rp/rcynic/Makefile.in
+++ b/rp/rcynic/Makefile.in
@@ -1,17 +1,7 @@
# $Id$
-NAME = rcynic
-
-BIN = ${NAME}
-SRC = ${NAME}.c
-OBJ = ${NAME}.o
-
-GEN = defstack.h
-
-OBJS = ${OBJ} bio_f_linebreak.o
-
CFLAGS = @CFLAGS@ -Wall -Wshadow -Wmissing-prototypes -Wmissing-declarations -Werror-implicit-function-declaration
-LDFLAGS = @LDFLAGS@ @LD_STATIC_FLAG@
+LDFLAGS = @LDFLAGS@
LIBS = @LIBS@
AWK = @AWK@
@@ -40,53 +30,48 @@ abs_top_srcdir = @abs_top_srcdir@
abs_top_builddir = @abs_top_builddir@
srcdir = @srcdir@
-RCYNIC_BIN_RCYNIC = @RCYNIC_BIN_RCYNIC@
+RCYNIC_BIN_RCYNIC = ${DESTDIR}${bindir}/rcynic
RCYNIC_CONF_DATA = @RCYNIC_CONF_DATA@
-RCYNIC_CONF_FILE = @RCYNIC_CONF_FILE@
-RCYNIC_CONF_RSYNC = @RCYNIC_CONF_RSYNC@
-RCYNIC_CONF_TA_DIR = @RCYNIC_CONF_TA_DIR@
-RCYNIC_CRON_USER = @RCYNIC_CRON_USER@
-RCYNIC_DATA_DIR = ${RCYNIC_DIR}/data
+RCYNIC_CONF_FILE = ${DESTDIR}${sysconfdir}/rcynic.conf
+RCYNIC_CONF_RSYNC = @RSYNC@
+RCYNIC_CONF_TA_DIR = ${sysconfdir}/rpki/trust-anchors
+RCYNIC_CRON_USER = ${RPKI_USER}
+RCYNIC_DATA_DIR = ${DESTDIR}${RCYNIC_DIR}/data
RCYNIC_DIR = @RCYNIC_DIR@
-RCYNIC_DIRS = ${RCYNIC_TA_DIR} ${RCYNIC_JAIL_DIRS} ${RCYNIC_DATA_DIR} ${RPKIRTR_DIR} ${RPKIRTR_DIR}/sockets
-RCYNIC_GECOS = RPKI Validation System
-RCYNIC_GROUP = @RCYNIC_GROUP@
+RCYNIC_DIRS = ${RCYNIC_TA_DIR} ${RCYNIC_DATA_DIR} ${RPKIRTR_DIR} ${RPKIRTR_DIR}/sockets
+RPKI_GECOS = RPKI System Software
+RPKI_GROUP = @RPKI_GROUP@
RCYNIC_HTML_DIR = @RCYNIC_HTML_DIR@
RCYNIC_INSTALL_TARGETS = @RCYNIC_INSTALL_TARGETS@
-RCYNIC_JAIL_DIRS = @RCYNIC_JAIL_DIRS@
-RCYNIC_STATIC_RSYNC = @RCYNIC_STATIC_RSYNC@
-RCYNIC_TA_DIR = @RCYNIC_TA_DIR@
-RCYNIC_USER = @RCYNIC_USER@
-RPKIRTR_DIR = ${RCYNIC_DIR}/rpki-rtr
-RPKIRTR_GECOS = RPKI router server
-RPKIRTR_GROUP = rpkirtr
-RPKIRTR_MODE = 775
-RPKIRTR_USER = rpkirtr
-
-all: ${BIN} ${RCYNIC_STATIC_RSYNC}
+RCYNIC_TA_DIR = ${DESTDIR}${sysconfdir}/rpki/trust-anchors
+RPKI_USER = @RPKI_USER@
+RPKIRTR_DIR = ${DESTDIR}${RCYNIC_DIR}/rpki-rtr
+
+OBJS = rcynic.o bio_f_linebreak.o
+
+all: rcynicng
clean:
- if test -r static-rsync/Makefile; then cd static-rsync; ${MAKE} $@; fi
- rm -f ${BIN} ${OBJS}
+ rm -f rcynic ${OBJS}
-${OBJ}: ${SRC} ${GEN}
+rcynic.o: rcynic.c defstack.h
-${BIN}: ${OBJS}
+rcynic: ${OBJS}
${CC} ${CFLAGS} -o $@ ${OBJS} ${LDFLAGS} ${LIBS}
-${GEN}: ${SRC}
- ${PYTHON} ${abs_top_srcdir}/buildtools/defstack.py ${SRC} >$@.tmp
+defstack.h: rcynic.c
+ ${PYTHON} ${abs_top_srcdir}/buildtools/defstack.py rcynic.c >$@.tmp
mv $@.tmp $@
tags: TAGS
-TAGS: ${SRC} ${GEN}
- etags ${SRC} ${GEN}
+TAGS: rcynic.c defstack.h
+ etags rcynic.c defstack.h
-test: ${BIN}
+test: rcynic
if test -r rcynic.conf; \
then \
- ./${BIN} -j 0 && \
+ ./rcynic -j 0 && \
test -r rcynic.xml && \
echo && \
./rcynic-text rcynic.xml; \
@@ -108,33 +93,31 @@ static-rsync/rsync:
install: all ${RCYNIC_INSTALL_TARGETS}
install-always: \
- install-directories install-rcynic install-rcynic-conf
+ install-directories install-rcynic install-tals
install-postconf: \
install-user-and-group install-directory-ownership install-crontab
-install-jailed: \
- install-static-rsync install-shared-libraries install-rc-scripts
-
install-directories: ${RCYNIC_DIRS}
${RCYNIC_DIRS} ${DESTDIR}${bindir} ${DESTDIR}${sysconfdir}:
${INSTALL} -v -d $@
install-directory-ownership: ${RCYNIC_DATA_DIR} ${RPKIRTR_DIR} ${RPKIRTR_DIR}/sockets
- chown ${RCYNIC_USER}:${RCYNIC_GROUP} ${RCYNIC_DATA_DIR} ${RPKIRTR_DIR}
- chown ${RPKIRTR_USER}:${RCYNIC_GROUP} ${RPKIRTR_DIR}/sockets
- chmod ${RPKIRTR_MODE} ${RPKIRTR_DIR}/sockets
+ chown ${RPKI_USER}:${RPKI_GROUP} ${RCYNIC_DATA_DIR} ${RPKIRTR_DIR} ${RPKIRTR_DIR}/sockets
-install-rcynic-conf: ${RCYNIC_CONF_FILE}
+install-tals:
+ ${INSTALL} -v -d ${RCYNIC_TA_DIR}
+ ${INSTALL} -v -p -m 444 sample-trust-anchors/*.tal ${RCYNIC_TA_DIR}
+
+# We don't install rcynic.conf anymore. Keep this for now as internal documentation,
+# clean up later.
${RCYNIC_CONF_FILE}:
@echo
- @echo Found no ${RCYNIC_CONF_FILE}, creating basic config and installing default trust anchor locators.
+ @echo Found no ${RCYNIC_CONF_FILE}, creating basic configuration.
@echo You might want to edit this.
@echo
- ${INSTALL} -v -d ${RCYNIC_TA_DIR}
- ${INSTALL} -v -p -m 444 sample-trust-anchors/*.tal ${RCYNIC_TA_DIR}
@echo > $@.tmp '# Basic rcynic configuration file with default trust anchors.'
@echo >>$@.tmp '# See documentation for details.'
@echo >>$@.tmp ''
@@ -153,13 +136,8 @@ ${RCYNIC_CONF_FILE}:
install-rcynic: ${RCYNIC_BIN_RCYNIC}
-${RCYNIC_BIN_RCYNIC}: ${BIN}
- ${INSTALL} -p -m 555 ${BIN} $@
-
-install-static-rsync: ${RCYNIC_DIR}/bin/rsync
-
-${RCYNIC_DIR}/bin/rsync: static-rsync/rsync
- ${INSTALL} -p -m 555 static-rsync/rsync $@
+${RCYNIC_BIN_RCYNIC}: rcynicng
+ ${INSTALL} -p -m 555 rcynicng $@
.FORCE:
diff --git a/rp/rcynic/rc-scripts/darwin/RCynic b/rp/rcynic/rc-scripts/darwin/RCynic
deleted file mode 100755
index d486a3c3..00000000
--- a/rp/rcynic/rc-scripts/darwin/RCynic
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/sh -
-#
-# $Id$
-#
-. /etc/rc.common
-
-name="rcynic"
-start_cmd="rcynic_start"
-stop_cmd="rcynic_stop"
-
-: ${rcynic_dir="/var/rcynic"}
-
-StartService()
-{
- /sbin/umount "${rcynic_dir}/dev" 2>/dev/null
-
- if ! /sbin/mount_devfs devfs "${rcynic_dir}/dev"; then
- echo "Mounting devfs on ${rcynic_dir}/dev failed..."
- exit 1
- fi
-
- for i in /etc/localtime /etc/resolv.conf; do
- j="${rcynic_dir}${i}"
- if /bin/test -r "$i" && ! /usr/bin/cmp -s "$i" "$j"; then
- /usr/bin/install -m 444 -o root -g wheel -p "$i" "$j"
- fi
- done
-
- /bin/ln -f /var/run/mDNSResponder "${rcynic_dir}/var/run/mDNSResponder"
-}
-
-StopService()
-{
- /sbin/umount "${rcynic_dir}/dev" 2>/dev/null
-}
-
-RestartService()
-{
- StartService
-}
-
-RunService "$1"
diff --git a/rp/rcynic/rc-scripts/darwin/StartupParameters.plist b/rp/rcynic/rc-scripts/darwin/StartupParameters.plist
deleted file mode 100644
index ca46b676..00000000
--- a/rp/rcynic/rc-scripts/darwin/StartupParameters.plist
+++ /dev/null
@@ -1,19 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
-<plist version="1.0">
-<dict>
- <key>Description</key>
- <string>RCynic Setup</string>
- <key>OrderPreference</key>
- <string>None</string>
- <key>Provides</key>
- <array>
- <string>RCynic</string>
- </array>
- <key>Uses</key>
- <array>
- <string>Network</string>
- <string>Resolver</string>
- </array>
- </dict>
-</plist>
diff --git a/rp/rcynic/rc-scripts/freebsd/rc.d.rcynic b/rp/rcynic/rc-scripts/freebsd/rc.d.rcynic
deleted file mode 100755
index 9b7aa545..00000000
--- a/rp/rcynic/rc-scripts/freebsd/rc.d.rcynic
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/bin/sh -
-#
-# $Id$
-#
-# PROVIDE: rcynic
-# REQUIRE: DAEMON
-# KEYWORD: nojail
-
-. /etc/rc.subr
-
-name="rcynic"
-start_cmd="rcynic_start"
-stop_cmd="rcynic_stop"
-
-: ${rcynic_dir="/var/rcynic"}
-
-rcynic_start()
-{
- /sbin/umount "${rcynic_dir}/dev" 2>/dev/null
-
- if ! /sbin/mount -t devfs dev "${rcynic_dir}/dev"; then
- echo "Mounting devfs on ${rcynic_dir}/dev failed..."
- exit 1
- fi
-
- /sbin/devfs -m "${rcynic_dir}/dev" rule apply hide
- /sbin/devfs -m "${rcynic_dir}/dev" rule apply path null unhide
- /sbin/devfs -m "${rcynic_dir}/dev" rule apply path random unhide
-
- for i in /etc/localtime /etc/resolv.conf; do
- j="${rcynic_dir}${i}"
- if /bin/test -r "$i" && ! /usr/bin/cmp -s "$i" "$j"; then
- /usr/bin/install -m 444 -o root -g wheel -p "$i" "$j"
- fi
- done
-}
-
-rcynic_stop()
-{
- /sbin/umount "${rcynic_dir}/dev" 2>/dev/null
-}
-
-load_rc_config $name
-run_rc_command "$1"
diff --git a/rp/rcynic/rcynic-cron b/rp/rcynic/rcynic-cron
index 53bfea9f..e7e564b3 100755
--- a/rp/rcynic/rcynic-cron
+++ b/rp/rcynic/rcynic-cron
@@ -28,83 +28,51 @@ our purposes. In theory this is portable to any Unix-like system.
import os
import sys
-import pwd
import fcntl
import errno
-import argparse
import rpki.autoconf
def run(*cmd, **kwargs):
- chroot_this = kwargs.pop("chroot_this", False)
- cwd = kwargs.pop("cwd", None)
- pid = os.fork()
- if pid == 0:
- if chroot_this:
- os.chdir(rpki.autoconf.RCYNIC_DIR)
- elif cwd is not None:
- os.chdir(cwd)
- if we_are_root:
- os.initgroups(pw.pw_name, pw.pw_gid)
- if chroot_this:
- os.chroot(rpki.autoconf.RCYNIC_DIR)
- if we_are_root:
- os.setgid(pw.pw_gid)
- os.setuid(pw.pw_uid)
- os.closerange(3, os.sysconf("SC_OPEN_MAX"))
- os.execvp(cmd[0], cmd)
- os._exit(1)
- else:
- status = os.waitpid(pid, 0)[1]
- if status == 0:
- return
- elif os.WIFSIGNALED(status):
- sys.exit("Process %s exited with signal %s" % (" ".join(cmd), os.WTERMSIG(status)))
- elif os.WIFEXITED(status):
- sys.exit("Program %s exited with status %s" % (" ".join(cmd), os.WEXITSTATUS(status)))
+ cwd = kwargs.pop("cwd", None)
+ pid = os.fork()
+ if pid == 0:
+ if cwd is not None:
+ os.chdir(cwd)
+ os.closerange(3, os.sysconf("SC_OPEN_MAX"))
+ os.execvp(cmd[0], cmd)
+ os._exit(1) # pylint: disable=W0212
else:
- sys.exit("Program %s exited for unknown reason %s" % (" ".join(cmd), status))
-
-parser = argparse.ArgumentParser(description = __doc__)
-parser.add_argument("--chroot", action = "store_true", help = "run chrooted")
-args = parser.parse_args()
-
-we_are_root = os.getuid() == 0
-
-if args.chroot and not we_are_root:
- sys.exit("Only root can --chroot")
+ status = os.waitpid(pid, 0)[1]
+ if status == 0:
+ return
+ elif os.WIFSIGNALED(status):
+ sys.exit("Process %s exited with signal %s" % (" ".join(cmd), os.WTERMSIG(status)))
+ elif os.WIFEXITED(status):
+ sys.exit("Program %s exited with status %s" % (" ".join(cmd), os.WEXITSTATUS(status)))
+ else:
+ sys.exit("Program %s exited for unknown reason %s" % (" ".join(cmd), status))
try:
- pw = pwd.getpwnam(rpki.autoconf.RCYNIC_USER)
-except KeyError:
- sys.exit("Could not find passwd entry for user %s" % rpki.autoconf.RCYNIC_USER)
-
-try:
- lock = os.open(os.path.join(rpki.autoconf.RCYNIC_DIR, "data/lock"),
- os.O_RDONLY | os.O_CREAT | os.O_NONBLOCK, 0666)
- fcntl.flock(lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
- if we_are_root:
- os.fchown(lock, pw.pw_uid, pw.pw_gid)
+ lock = os.open(os.path.join(rpki.autoconf.RCYNIC_DIR, "data", "lock"),
+ os.O_RDONLY | os.O_CREAT | os.O_NONBLOCK, 0666)
+ fcntl.flock(lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
except (IOError, OSError), e:
- if e.errno == errno.EAGAIN:
- sys.exit(0) # Another instance of this script is already running, exit silently
- else:
- sys.exit("Error %r opening lock %r" % (e.strerror, os.path.join(rpki.autoconf.RCYNIC_DIR, "data/lock")))
+ if e.errno == errno.EAGAIN:
+ sys.exit(0) # Another instance of this script is already running, exit silently
+ else:
+ sys.exit("Error %r opening lock %r" % (e.strerror, os.path.join(rpki.autoconf.RCYNIC_DIR, "data/lock")))
-if args.chroot:
- run("/bin/rcynic", "-c", "/etc/rcynic.conf", chroot_this = True)
-else:
- run(os.path.join(rpki.autoconf.bindir, "rcynic"), "-c", os.path.join(rpki.autoconf.sysconfdir, "rcynic.conf"))
+run(os.path.join(rpki.autoconf.bindir, "rcynic"))
run(os.path.join(rpki.autoconf.bindir, "rpki-rtr"),
"cronjob",
- os.path.join(rpki.autoconf.RCYNIC_DIR, "data/authenticated"),
cwd = os.path.join(rpki.autoconf.RCYNIC_DIR, "rpki-rtr"))
prog = os.path.join(rpki.autoconf.libexecdir, "rpkigui-rcynic")
if os.path.exists(prog):
- run(prog)
+ run(prog)
if rpki.autoconf.RCYNIC_HTML_DIR and os.path.exists(os.path.dirname(rpki.autoconf.RCYNIC_HTML_DIR)):
- run(os.path.join(rpki.autoconf.bindir, "rcynic-html"),
- os.path.join(rpki.autoconf.RCYNIC_DIR, "data/rcynic.xml"),
- rpki.autoconf.RCYNIC_HTML_DIR)
+ run(os.path.join(rpki.autoconf.bindir, "rcynic-html"),
+ os.path.join(rpki.autoconf.RCYNIC_DIR, "data/rcynic.xml"),
+ rpki.autoconf.RCYNIC_HTML_DIR)
diff --git a/rp/rcynic/rcynic-dump b/rp/rcynic/rcynic-dump
new file mode 100755
index 00000000..0c7f898f
--- /dev/null
+++ b/rp/rcynic/rcynic-dump
@@ -0,0 +1,95 @@
+#!/usr/bin/env python
+
+# $Id$
+
+"""
+Dump rcynicng database to old-style disk files.
+
+This is a slow operation due to blocking operations in the underlying
+filesystem, so in the long run we will almost certainly want to
+rewrite the RP toolkit to use the database directly, but it's (much)
+easier to compare results between the old and new validation engines
+when they use the same data representation.
+"""
+
+import os
+import sys
+import time
+import shutil
+import logging
+import argparse
+
+import rpki.config
+import rpki.autoconf
+
+logger = logging.getLogger("rcynic-dump")
+
+os.environ.update(TZ = "UTC",
+ DJANGO_SETTINGS_MODULE = "rpki.django_settings.rcynic")
+time.tzset()
+
+logging.basicConfig(level = logging.DEBUG, format = "%(asctime)s %(message)s", datefmt = "%Y-%m-%d %H:%M:%S")
+
+parser = argparse.ArgumentParser(description = __doc__)
+parser.add_argument("-c", "--config")
+parser.add_argument("output_tree", nargs = "?", default = "rcynic-data")
+args = parser.parse_args()
+
+rpki.config.parser(set_filename = args.config, section = "rcynic")
+
+import django
+django.setup()
+
+import rpki.rcynicdb
+
+def uri_to_filename(obj, base):
+ return os.path.join(args.output_tree, base, obj.uri[obj.uri.index("://") + 3:])
+
+def sha256_to_filename(obj):
+ return os.path.join(args.output_tree, "sha256", obj.sha256[:2], obj.sha256 + obj.uri[-4:])
+
+def authenticated_to_dirname(authenticated):
+ return "authenticated-{}".format(authenticated.started.strftime("%Y-%m-%dT%H:%M:%SZ"))
+
+seen = set()
+
+def check_der(fn, der):
+ with open(fn, "rb") as f:
+ return der == f.read()
+
+def mkdir_maybe(fn):
+ dn = os.path.dirname(fn)
+ if not os.path.exists(dn):
+ os.makedirs(dn)
+
+for obj in rpki.rcynicdb.models.RPKIObject.objects.all():
+
+ hfn = sha256_to_filename(obj)
+ ufn = uri_to_filename(obj, "unauthenticated")
+
+ if not os.path.exists(hfn) or not check_der(hfn, obj.der):
+ mkdir_maybe(hfn)
+ with open(hfn, "wb") as f:
+ f.write(obj.der)
+
+ seen.add(hfn)
+ seen.add(ufn)
+
+ for auth in obj.authenticated.all():
+ afn = uri_to_filename(obj, authenticated_to_dirname(auth))
+ mkdir_maybe(afn)
+ if not os.path.exists(afn):
+ os.link(hfn, afn)
+ elif not check_der(afn, obj.der):
+ os.unlink(afn)
+ os.link(hfn, afn)
+ seen.add(afn)
+
+auth = rpki.rcynicdb.models.Authenticated.objects.order_by("-started").first()
+
+if auth is not None:
+ src = authenticated_to_dirname(auth)
+ dst = os.path.join(args.output_tree, "authenticated")
+ if os.path.exists(dst):
+ os.unlink(dst)
+ os.symlink(src, dst)
diff --git a/rp/rcynic/rcynic-html b/rp/rcynic/rcynic-html
index ef566440..154193b2 100755
--- a/rp/rcynic/rcynic-html
+++ b/rp/rcynic/rcynic-html
@@ -32,361 +32,363 @@ import copy
import rpki.autoconf
try:
- from lxml.etree import (ElementTree, Element, SubElement, Comment)
+ from lxml.etree import (ElementTree, Element, SubElement, Comment)
except ImportError:
- from xml.etree.ElementTree import (ElementTree, Element, SubElement, Comment)
+ from xml.etree.ElementTree import (ElementTree, Element, SubElement, Comment)
session = None
args = None
def parse_options():
- global args
-
- parser = argparse.ArgumentParser(description = __doc__)
- parser.add_argument("--refresh", type = int, default = 1800,
- help = "refresh interval for generated HTML")
- parser.add_argument("--hide-problems", action = "store_true",
- help = "don't generate \"problems\" page")
- parser.add_argument("--hide-graphs", action = "store_true",
- help = "don't generate graphs")
- parser.add_argument("--hide-object-counts", action = "store_true",
- help = "don't display object counts")
- parser.add_argument("--dont-update-rrds", action = "store_true",
- help = "don't add new data to RRD databases")
- parser.add_argument("--png-height", type = int, default = 190,
- help = "height of PNG images")
- parser.add_argument("--png-width", type = int, default = 1350,
- help = "width of PNG images")
- parser.add_argument("--svg-height", type = int, default = 600,
- help = "height of SVG images")
- parser.add_argument("--svg-width", type = int, default = 1200,
- help = "width of SVG images")
- parser.add_argument("--eps-height", type = int, default = 0,
- help = "height of EPS images")
- parser.add_argument("--eps-width", type = int, default = 0,
- help = "width of EPS images")
- parser.add_argument("--rrdtool-binary", default = rpki.autoconf.RRDTOOL,
- help = "location of rrdtool binary")
- parser.add_argument("input_file", type = argparse.FileType("r"),
- help = "XML input file")
- parser.add_argument("output_directory",
- help = "output directory")
- args = parser.parse_args()
+ global args # pylint: disable=W0603
+
+ parser = argparse.ArgumentParser(description = __doc__)
+ parser.add_argument("--refresh", type = int, default = 1800,
+ help = "refresh interval for generated HTML")
+ parser.add_argument("--hide-problems", action = "store_true",
+ help = "don't generate \"problems\" page")
+ parser.add_argument("--hide-graphs", action = "store_true",
+ help = "don't generate graphs")
+ parser.add_argument("--hide-object-counts", action = "store_true",
+ help = "don't display object counts")
+ parser.add_argument("--dont-update-rrds", action = "store_true",
+ help = "don't add new data to RRD databases")
+ parser.add_argument("--png-height", type = int, default = 190,
+ help = "height of PNG images")
+ parser.add_argument("--png-width", type = int, default = 1350,
+ help = "width of PNG images")
+ parser.add_argument("--svg-height", type = int, default = 600,
+ help = "height of SVG images")
+ parser.add_argument("--svg-width", type = int, default = 1200,
+ help = "width of SVG images")
+ parser.add_argument("--eps-height", type = int, default = 0,
+ help = "height of EPS images")
+ parser.add_argument("--eps-width", type = int, default = 0,
+ help = "width of EPS images")
+ parser.add_argument("--rrdtool-binary", default = rpki.autoconf.RRDTOOL,
+ help = "location of rrdtool binary")
+ parser.add_argument("input_file", type = argparse.FileType("r"),
+ help = "XML input file")
+ parser.add_argument("output_directory",
+ help = "output directory")
+ args = parser.parse_args()
def parse_utc(s):
- return int(time.mktime(time.strptime(s, "%Y-%m-%dT%H:%M:%SZ")))
+ return int(time.mktime(time.strptime(s, "%Y-%m-%dT%H:%M:%SZ")))
class Label(object):
- moods = ["bad", "warn", "good"]
+ moods = ["bad", "warn", "good"]
- def __init__(self, elt):
- self.code = elt.tag
- self.mood = elt.get("kind")
- self.text = elt.text.strip()
- self.count = 0
+ def __init__(self, elt):
+ self.code = elt.tag
+ self.mood = elt.get("kind")
+ self.text = elt.text.strip()
+ self.count = 0
- def get_count(self):
- return self.count
+ def get_count(self):
+ return self.count
- @property
- def sort_key(self):
- try:
- return self.moods.index(self.mood)
- except ValueError:
- return len(self.moods)
+ @property
+ def sort_key(self):
+ try:
+ return self.moods.index(self.mood)
+ except ValueError:
+ return len(self.moods)
class Validation_Status(object):
- def __init__(self, elt, label_map):
- self.uri = elt.text.strip()
- self.timestamp = elt.get("timestamp")
- self.generation = elt.get("generation")
- self.hostname = urlparse.urlparse(self.uri).hostname or "[None]"
- self.fn2 = os.path.splitext(self.uri)[1] or None if self.generation else None
- self.label = label_map[elt.get("status")]
+ def __init__(self, elt, label_map):
+ self.uri = elt.text.strip()
+ self.timestamp = elt.get("timestamp")
+ self.generation = elt.get("generation")
+ self.hostname = urlparse.urlparse(self.uri).hostname or "[None]"
+ self.fn2 = os.path.splitext(self.uri)[1] or None if self.generation else None
+ self.label = label_map[elt.get("status")]
- def sort_key(self):
- return (self.label.sort_key, self.timestamp, self.hostname, self.fn2, self.generation)
+ def sort_key(self):
+ return (self.label.sort_key, self.timestamp, self.hostname, self.fn2, self.generation)
- @property
- def code(self):
- return self.label.code
+ @property
+ def code(self):
+ return self.label.code
- @property
- def mood(self):
- return self.label.mood
+ @property
+ def mood(self):
+ return self.label.mood
- @property
- def accepted(self):
- return self.label.code == "object_accepted"
+ @property
+ def accepted(self):
+ return self.label.code == "object_accepted"
- @property
- def rejected(self):
- return self.label.code == "object_rejected"
+ @property
+ def rejected(self):
+ return self.label.code == "object_rejected"
- @property
- def is_current(self):
- return self.generation == "current"
+ @property
+ def is_current(self):
+ return self.generation == "current"
- @property
- def is_backup(self):
- return self.generation == "backup"
+ @property
+ def is_backup(self):
+ return self.generation == "backup"
- @property
- def is_problem(self):
- return self.label.mood != "good"
+ @property
+ def is_problem(self):
+ return self.label.mood != "good"
- @property
- def is_connection_problem(self):
- return self.label.mood != "good" and self.label.code.startswith("rsync_transfer_")
+ @property
+ def is_connection_problem(self):
+ return self.label.mood != "good" and self.label.code.startswith("rsync_transfer_")
- @property
- def is_object_problem(self):
- return self.label.mood != "good" and not self.label.code.startswith("rsync_transfer_")
+ @property
+ def is_object_problem(self):
+ return self.label.mood != "good" and not self.label.code.startswith("rsync_transfer_")
- @property
- def is_connection_detail(self):
- return self.label.code.startswith("rsync_transfer_")
+ @property
+ def is_connection_detail(self):
+ return self.label.code.startswith("rsync_transfer_")
- @property
- def is_object_detail(self):
- return not self.label.code.startswith("rsync_transfer_")
+ @property
+ def is_object_detail(self):
+ return not self.label.code.startswith("rsync_transfer_")
class Problem_Mixin(object):
- @property
- def connection_problems(self):
- result = [v for v in self.validation_status if v.is_connection_problem]
- result.sort(key = Validation_Status.sort_key)
- return result
+ # pylint: disable=E1101
- @property
- def object_problems(self):
- result = [v for v in self.validation_status if v.is_object_problem]
- result.sort(key = Validation_Status.sort_key)
- return result
+ @property
+ def connection_problems(self):
+ result = [v for v in self.validation_status if v.is_connection_problem]
+ result.sort(key = Validation_Status.sort_key)
+ return result
+
+ @property
+ def object_problems(self):
+ result = [v for v in self.validation_status if v.is_object_problem]
+ result.sort(key = Validation_Status.sort_key)
+ return result
class Host(Problem_Mixin):
- def __init__(self, hostname, timestamp):
- self.hostname = hostname
- self.timestamp = timestamp
- self.elapsed = 0
- self.connections = 0
- self.failures = 0
- self.uris = set()
- self.graph = None
- self.counters = {}
- self.totals = {}
- self.validation_status = []
-
- def add_connection(self, elt):
- self.elapsed += parse_utc(elt.get("finished")) - parse_utc(elt.get("started"))
- self.connections += 1
- if elt.get("error") is not None:
- self.failures += 1
-
- def add_validation_status(self, v):
- self.validation_status.append(v)
- if v.generation == "current":
- self.uris.add(v.uri)
- self.counters[(v.fn2, v.generation, v.label)] = self.get_counter(v.fn2, v.generation, v.label) + 1
- self.totals[v.label] = self.get_total(v.label) + 1
- v.label.count += 1
-
- def get_counter(self, fn2, generation, label):
- return self.counters.get((fn2, generation, label), 0)
-
- def get_total(self, label):
- return self.totals.get(label, 0)
-
- @property
- def failed(self):
- return 1 if self.failures > 0 else 0
-
- @property
- def objects(self):
- return len(self.uris)
-
- field_table = (("connections", "GAUGE"),
- ("objects", "GAUGE"),
- ("elapsed", "GAUGE"),
- ("failed", "ABSOLUTE"))
-
- rras = tuple("RRA:AVERAGE:0.5:%s:9600" % steps
- for steps in (1, 4, 24))
-
- @classmethod
- def field_ds_specifiers(cls, heartbeat = 24 * 60 * 60, minimum = 0, maximum = "U"):
- return ["DS:%s:%s:%s:%s:%s" % (field[0], field[1], heartbeat, minimum, maximum)
- for field in cls.field_table]
-
- @property
- def field_values(self):
- return tuple(str(getattr(self, field[0])) for field in self.field_table)
-
- @classmethod
- def field_defs(cls, filebase):
- return ["DEF:%s=%s.rrd:%s:AVERAGE" % (field[0], filebase, field[0])
- for field in cls.field_table]
-
- graph_opts = (
- "--vertical-label", "Sync time (seconds)",
- "--right-axis-label", "Objects (count)",
- "--lower-limit", "0",
- "--right-axis", "1:0",
- "--full-size-mode" )
-
- graph_cmds = (
-
- # Split elapsed into separate data sets, so we can color
- # differently to indicate how succesful transfer was. Intent is
- # that exactly one of these be defined for every value in elapsed.
-
- r"CDEF:success=failed,UNKN,elapsed,IF",
- r"CDEF:failure=connections,1,EQ,failed,*,elapsed,UNKN,IF",
- r"CDEF:partial=connections,1,NE,failed,*,elapsed,UNKN,IF",
-
- # Show connection timing first, as color-coded semi-transparent
- # areas with opaque borders. Intent is to make the colors stand
- # out, since they're a major health indicator. Transparency is
- # handled via an alpha channel (fourth octet of color code). We
- # draw this stuff first so that later lines can overwrite it.
-
- r"AREA:success#00FF0080:Sync time (success)",
- r"AREA:partial#FFA50080:Sync time (partial failure)",
- r"AREA:failure#FF000080:Sync time (total failure)",
-
- r"LINE1:success#00FF00", # Green
- r"LINE1:partial#FFA500", # Orange
- r"LINE1:failure#FF0000", # Red
-
- # Now show object counts, as a simple black line.
-
- r"LINE1:objects#000000:Objects", # Black
-
- # Add averages over period to chart legend.
-
- r"VDEF:avg_elapsed=elapsed,AVERAGE",
- r"VDEF:avg_connections=connections,AVERAGE",
- r"VDEF:avg_objects=objects,AVERAGE",
- r"COMMENT:\j",
- r"GPRINT:avg_elapsed:Average sync time (seconds)\: %5.2lf",
- r"GPRINT:avg_connections:Average connection count\: %5.2lf",
- r"GPRINT:avg_objects:Average object count\: %5.2lf" )
-
- graph_periods = (("week", "-1w"),
- ("month", "-31d"),
- ("year", "-1y"))
-
- def rrd_run(self, cmd):
- try:
- cmd = [str(i) for i in cmd]
- cmd.insert(0, args.rrdtool_binary)
- subprocess.check_call(cmd, stdout = open("/dev/null", "w"))
- except OSError, e:
- sys.exit("Problem running %s, perhaps you need to set --rrdtool-binary? (%s)" % (args.rrdtool_binary, e))
- except subprocess.CalledProcessError, e:
- sys.exit("Failure running %s: %s" % (args.rrdtool_binary, e))
-
- def rrd_update(self):
- filename = os.path.join(args.output_directory, self.hostname) + ".rrd"
- if not os.path.exists(filename):
- cmd = ["create", filename, "--start", self.timestamp - 1, "--step", "3600"]
- cmd.extend(self.field_ds_specifiers())
- cmd.extend(self.rras)
- self.rrd_run(cmd)
- self.rrd_run(["update", filename,
- "%s:%s" % (self.timestamp, ":".join(str(v) for v in self.field_values))])
-
- def rrd_graph(self, html):
- # pylint: disable=W0622
- filebase = os.path.join(args.output_directory, self.hostname)
- formats = [format for format in ("png", "svg", "eps")
- if getattr(args, format + "_width") and getattr(args, format + "_height")]
- for period, start in self.graph_periods:
- for format in formats:
- cmds = [ "graph", "%s_%s.%s" % (filebase, period, format),
- "--title", "%s last %s" % (self.hostname, period),
- "--start", start,
- "--width", getattr(args, format + "_width"),
- "--height", getattr(args, format + "_height"),
- "--imgformat", format.upper() ]
- cmds.extend(self.graph_opts)
- cmds.extend(self.field_defs(filebase))
- cmds.extend(self.graph_cmds)
- self.rrd_run(cmds)
- img = Element("img", src = "%s_%s.png" % (self.hostname, period),
- width = str(args.png_width),
- height = str(args.png_height))
- if self.graph is None:
- self.graph = copy.copy(img)
- html.BodyElement("h2").text = "%s over last %s" % (self.hostname, period)
- html.BodyElement("a", href = "%s_%s_svg.html" % (self.hostname, period)).append(img)
- html.BodyElement("br")
- svg_html = HTML("%s over last %s" % (self.hostname, period),
- "%s_%s_svg" % (self.hostname, period))
- svg_html.BodyElement("img", src = "%s_%s.svg" % (self.hostname, period))
- svg_html.close()
+ def __init__(self, hostname, timestamp):
+ self.hostname = hostname
+ self.timestamp = timestamp
+ self.elapsed = 0
+ self.connections = 0
+ self.failures = 0
+ self.uris = set()
+ self.graph = None
+ self.counters = {}
+ self.totals = {}
+ self.validation_status = []
+
+ def add_connection(self, elt):
+ self.elapsed += parse_utc(elt.get("finished")) - parse_utc(elt.get("started"))
+ self.connections += 1
+ if elt.get("error") is not None:
+ self.failures += 1
+
+ def add_validation_status(self, v):
+ self.validation_status.append(v)
+ if v.generation == "current":
+ self.uris.add(v.uri)
+ self.counters[(v.fn2, v.generation, v.label)] = self.get_counter(v.fn2, v.generation, v.label) + 1
+ self.totals[v.label] = self.get_total(v.label) + 1
+ v.label.count += 1
+
+ def get_counter(self, fn2, generation, label):
+ return self.counters.get((fn2, generation, label), 0)
+
+ def get_total(self, label):
+ return self.totals.get(label, 0)
+
+ @property
+ def failed(self):
+ return 1 if self.failures > 0 else 0
+
+ @property
+ def objects(self):
+ return len(self.uris)
+
+ field_table = (("connections", "GAUGE"),
+ ("objects", "GAUGE"),
+ ("elapsed", "GAUGE"),
+ ("failed", "ABSOLUTE"))
+
+ rras = tuple("RRA:AVERAGE:0.5:%s:9600" % steps
+ for steps in (1, 4, 24))
+
+ @classmethod
+ def field_ds_specifiers(cls, heartbeat = 24 * 60 * 60, minimum = 0, maximum = "U"):
+ return ["DS:%s:%s:%s:%s:%s" % (field[0], field[1], heartbeat, minimum, maximum)
+ for field in cls.field_table]
+
+ @property
+ def field_values(self):
+ return tuple(str(getattr(self, field[0])) for field in self.field_table)
+
+ @classmethod
+ def field_defs(cls, filebase):
+ return ["DEF:%s=%s.rrd:%s:AVERAGE" % (field[0], filebase, field[0])
+ for field in cls.field_table]
+
+ graph_opts = (
+ "--vertical-label", "Sync time (seconds)",
+ "--right-axis-label", "Objects (count)",
+ "--lower-limit", "0",
+ "--right-axis", "1:0",
+ "--full-size-mode" )
+
+ graph_cmds = (
+
+ # Split elapsed into separate data sets, so we can color
+ # differently to indicate how succesful transfer was. Intent is
+ # that exactly one of these be defined for every value in elapsed.
+
+ r"CDEF:success=failed,UNKN,elapsed,IF",
+ r"CDEF:failure=connections,1,EQ,failed,*,elapsed,UNKN,IF",
+ r"CDEF:partial=connections,1,NE,failed,*,elapsed,UNKN,IF",
+
+ # Show connection timing first, as color-coded semi-transparent
+ # areas with opaque borders. Intent is to make the colors stand
+ # out, since they're a major health indicator. Transparency is
+ # handled via an alpha channel (fourth octet of color code). We
+ # draw this stuff first so that later lines can overwrite it.
+
+ r"AREA:success#00FF0080:Sync time (success)",
+ r"AREA:partial#FFA50080:Sync time (partial failure)",
+ r"AREA:failure#FF000080:Sync time (total failure)",
+
+ r"LINE1:success#00FF00", # Green
+ r"LINE1:partial#FFA500", # Orange
+ r"LINE1:failure#FF0000", # Red
+
+ # Now show object counts, as a simple black line.
+
+ r"LINE1:objects#000000:Objects", # Black
+
+ # Add averages over period to chart legend.
+
+ r"VDEF:avg_elapsed=elapsed,AVERAGE",
+ r"VDEF:avg_connections=connections,AVERAGE",
+ r"VDEF:avg_objects=objects,AVERAGE",
+ r"COMMENT:\j",
+ r"GPRINT:avg_elapsed:Average sync time (seconds)\: %5.2lf",
+ r"GPRINT:avg_connections:Average connection count\: %5.2lf",
+ r"GPRINT:avg_objects:Average object count\: %5.2lf" )
+
+ graph_periods = (("week", "-1w"),
+ ("month", "-31d"),
+ ("year", "-1y"))
+
+ def rrd_run(self, cmd):
+ try:
+ cmd = [str(i) for i in cmd]
+ cmd.insert(0, args.rrdtool_binary)
+ subprocess.check_call(cmd, stdout = open("/dev/null", "w"))
+ except OSError, e:
+ sys.exit("Problem running %s, perhaps you need to set --rrdtool-binary? (%s)" % (args.rrdtool_binary, e))
+ except subprocess.CalledProcessError, e:
+ sys.exit("Failure running %s: %s" % (args.rrdtool_binary, e))
+
+ def rrd_update(self):
+ filename = os.path.join(args.output_directory, self.hostname) + ".rrd"
+ if not os.path.exists(filename):
+ cmd = ["create", filename, "--start", self.timestamp - 1, "--step", "3600"]
+ cmd.extend(self.field_ds_specifiers())
+ cmd.extend(self.rras)
+ self.rrd_run(cmd)
+ self.rrd_run(["update", filename,
+ "%s:%s" % (self.timestamp, ":".join(str(v) for v in self.field_values))])
+
+ def rrd_graph(self, html):
+ # pylint: disable=W0622
+ filebase = os.path.join(args.output_directory, self.hostname)
+ formats = [format for format in ("png", "svg", "eps")
+ if getattr(args, format + "_width") and getattr(args, format + "_height")]
+ for period, start in self.graph_periods:
+ for format in formats:
+ cmds = [ "graph", "%s_%s.%s" % (filebase, period, format),
+ "--title", "%s last %s" % (self.hostname, period),
+ "--start", start,
+ "--width", getattr(args, format + "_width"),
+ "--height", getattr(args, format + "_height"),
+ "--imgformat", format.upper() ]
+ cmds.extend(self.graph_opts)
+ cmds.extend(self.field_defs(filebase))
+ cmds.extend(self.graph_cmds)
+ self.rrd_run(cmds)
+ img = Element("img", src = "%s_%s.png" % (self.hostname, period),
+ width = str(args.png_width),
+ height = str(args.png_height))
+ if self.graph is None:
+ self.graph = copy.copy(img)
+ html.BodyElement("h2").text = "%s over last %s" % (self.hostname, period)
+ html.BodyElement("a", href = "%s_%s_svg.html" % (self.hostname, period)).append(img)
+ html.BodyElement("br")
+ svg_html = HTML("%s over last %s" % (self.hostname, period),
+ "%s_%s_svg" % (self.hostname, period))
+ svg_html.BodyElement("img", src = "%s_%s.svg" % (self.hostname, period))
+ svg_html.close()
class Session(Problem_Mixin):
- def __init__(self):
- self.hosts = {}
+ def __init__(self):
+ self.hosts = {}
- self.root = ElementTree(file = args.input_file).getroot()
+ self.root = ElementTree(file = args.input_file).getroot()
- self.rcynic_version = self.root.get("rcynic-version")
- self.rcynic_date = self.root.get("date")
- self.timestamp = parse_utc(self.rcynic_date)
+ self.rcynic_version = self.root.get("rcynic-version")
+ self.rcynic_date = self.root.get("date")
+ self.timestamp = parse_utc(self.rcynic_date)
- self.labels = [Label(elt) for elt in self.root.find("labels")]
- self.load_validation_status()
+ self.labels = [Label(elt) for elt in self.root.find("labels")]
+ self.load_validation_status()
- for elt in self.root.findall("rsync_history"):
- self.get_host(urlparse.urlparse(elt.text.strip()).hostname).add_connection(elt)
+ for elt in self.root.findall("rsync_history"):
+ self.get_host(urlparse.urlparse(elt.text.strip()).hostname).add_connection(elt)
- generations = set()
- fn2s = set()
+ generations = set()
+ fn2s = set()
- for v in self.validation_status:
- self.get_host(v.hostname).add_validation_status(v)
- generations.add(v.generation)
- fn2s.add(v.fn2)
+ for v in self.validation_status:
+ self.get_host(v.hostname).add_validation_status(v)
+ generations.add(v.generation)
+ fn2s.add(v.fn2)
- self.labels = [l for l in self.labels if l.count > 0]
+ self.labels = [l for l in self.labels if l.count > 0]
- self.hostnames = sorted(self.hosts)
- self.generations = sorted(generations)
- self.fn2s = sorted(fn2s)
+ self.hostnames = sorted(self.hosts)
+ self.generations = sorted(generations)
+ self.fn2s = sorted(fn2s)
- def load_validation_status(self):
- label_map = dict((label.code, label) for label in self.labels)
- full_validation_status = [Validation_Status(elt, label_map)
- for elt in self.root.findall("validation_status")]
- accepted_current = set(v.uri for v in full_validation_status
- if v.is_current and v.accepted)
- self.validation_status = [v for v in full_validation_status
- if not v.is_backup
- or v.uri not in accepted_current]
+ def load_validation_status(self):
+ label_map = dict((label.code, label) for label in self.labels)
+ full_validation_status = [Validation_Status(elt, label_map)
+ for elt in self.root.findall("validation_status")]
+ accepted_current = set(v.uri for v in full_validation_status
+ if v.is_current and v.accepted)
+ self.validation_status = [v for v in full_validation_status
+ if not v.is_backup
+ or v.uri not in accepted_current]
- def get_host(self, hostname):
- if hostname not in self.hosts:
- self.hosts[hostname] = Host(hostname, self.timestamp)
- return self.hosts[hostname]
+ def get_host(self, hostname):
+ if hostname not in self.hosts:
+ self.hosts[hostname] = Host(hostname, self.timestamp)
+ return self.hosts[hostname]
- def get_sum(self, fn2, generation, label):
- return sum(h.get_counter(fn2, generation, label)
- for h in self.hosts.itervalues())
+ def get_sum(self, fn2, generation, label):
+ return sum(h.get_counter(fn2, generation, label)
+ for h in self.hosts.itervalues())
- def rrd_update(self):
- if not args.dont_update_rrds:
- for h in self.hosts.itervalues():
- h.rrd_update()
+ def rrd_update(self):
+ if not args.dont_update_rrds:
+ for h in self.hosts.itervalues():
+ h.rrd_update()
css = '''
th, td {
@@ -475,183 +477,183 @@ css = '''
class HTML(object):
- def __init__(self, title, filebase):
+ def __init__(self, title, filebase):
+
+ self.filename = os.path.join(args.output_directory, filebase + ".html")
+
+ self.html = Element("html")
+ self.html.append(Comment(" Generators:\n" +
+ " " + session.rcynic_version + "\n" +
+ " $Id$\n"))
+ self.head = SubElement(self.html, "head")
+ self.body = SubElement(self.html, "body")
+
+ title += " " + session.rcynic_date
+ SubElement(self.head, "title").text = title
+ SubElement(self.body, "h1").text = title
+ SubElement(self.head, "style", type = "text/css").text = css
+
+ if args.refresh:
+ SubElement(self.head, "meta", { "http-equiv" : "Refresh", "content" : str(args.refresh) })
+
+ hostwidth = max(len(hostname) for hostname in session.hostnames)
+
+ toc = SubElement(self.body, "ul", id = "nav")
+ SubElement(SubElement(toc, "li"), "a", href = "index.html").text = "Overview"
+ li = SubElement(toc, "li")
+ SubElement(li, "span").text = "Repositories"
+ ul = SubElement(li, "ul", style = "width: %sem" % hostwidth)
+ for hostname in session.hostnames:
+ SubElement(SubElement(ul, "li"), "a", href = "%s.html" % hostname).text = hostname
+ SubElement(SubElement(toc, "li"), "a", href = "problems.html").text = "Problems"
+ li = SubElement(toc, "li")
+ SubElement(li, "span").text = "All Details"
+ ul = SubElement(li, "ul", style = "width: 15em")
+ SubElement(SubElement(ul, "li"), "a", href = "connections.html").text = "All Connections"
+ SubElement(SubElement(ul, "li"), "a", href = "objects.html").text = "All Objects"
+ SubElement(self.body, "br")
+
+ def close(self):
+ ElementTree(element = self.html).write(self.filename)
+
+ def BodyElement(self, tag, **attrib):
+ return SubElement(self.body, tag, **attrib)
+
+ def counter_table(self, data_func, total_func):
+ table = self.BodyElement("table", rules = "all", border = "1")
+ thead = SubElement(table, "thead")
+ tfoot = SubElement(table, "tfoot")
+ tbody = SubElement(table, "tbody")
+ tr = SubElement(thead, "tr")
+ SubElement(tr, "th")
+ for label in session.labels:
+ SubElement(tr, "th").text = label.text
+ for fn2 in session.fn2s:
+ for generation in session.generations:
+ counters = [data_func(fn2, generation, label) for label in session.labels]
+ if sum(counters) > 0:
+ tr = SubElement(tbody, "tr")
+ SubElement(tr, "td").text = ((generation or "") + " " + (fn2 or "")).strip()
+ for label, count in zip(session.labels, counters):
+ td = SubElement(tr, "td")
+ if count > 0:
+ td.set("class", label.mood)
+ td.text = str(count)
+ tr = SubElement(tfoot, "tr")
+ SubElement(tr, "td").text = "Total"
+ counters = [total_func(label) for label in session.labels]
+ for label, count in zip(session.labels, counters):
+ td = SubElement(tr, "td")
+ if count > 0:
+ td.set("class", label.mood)
+ td.text = str(count)
+ return table
+
+ def object_count_table(self, session): # pylint: disable=W0621
+ table = self.BodyElement("table", rules = "all", border = "1")
+ thead = SubElement(table, "thead")
+ tbody = SubElement(table, "tbody")
+ tfoot = SubElement(table, "tfoot")
+ fn2s = [fn2 for fn2 in session.fn2s if fn2 is not None]
+ total = dict((fn2, 0) for fn2 in fn2s)
+ for hostname in session.hostnames:
+ tr = SubElement(tbody, "tr")
+ SubElement(tr, "td").text = hostname
+ for fn2 in fn2s:
+ td = SubElement(tr, "td")
+ count = sum(uri.endswith(fn2) for uri in session.hosts[hostname].uris)
+ total[fn2] += count
+ if count > 0:
+ td.text = str(count)
+ trhead = SubElement(thead, "tr")
+ trfoot = SubElement(tfoot, "tr")
+ SubElement(trhead, "th").text = "Repository"
+ SubElement(trfoot, "td").text = "Total"
+ for fn2 in fn2s:
+ SubElement(trhead, "th").text = fn2
+ SubElement(trfoot, "td").text = str(total[fn2])
+ return table
+
+ def detail_table(self, records):
+ if records:
+ table = self.BodyElement("table", rules = "all", border = "1")
+ thead = SubElement(table, "thead")
+ tbody = SubElement(table, "tbody")
+ tr = SubElement(thead, "tr")
+ SubElement(tr, "th").text = "Timestamp"
+ SubElement(tr, "th").text = "Generation"
+ SubElement(tr, "th").text = "Status"
+ SubElement(tr, "th").text = "URI"
+ for v in records:
+ tr = SubElement(tbody, "tr", { "class" : v.mood })
+ SubElement(tr, "td").text = v.timestamp
+ SubElement(tr, "td").text = v.generation
+ SubElement(tr, "td").text = v.label.text
+ SubElement(tr, "td", { "class" : "uri"}).text = v.uri
+ return table
+ else:
+ self.BodyElement("p").text = "None found"
+ return None
- self.filename = os.path.join(args.output_directory, filebase + ".html")
+def main():
- self.html = Element("html")
- self.html.append(Comment(" Generators:\n" +
- " " + session.rcynic_version + "\n" +
- " $Id$\n"))
- self.head = SubElement(self.html, "head")
- self.body = SubElement(self.html, "body")
+ global session # pylint: disable=W0603
- title += " " + session.rcynic_date
- SubElement(self.head, "title").text = title
- SubElement(self.body, "h1").text = title
- SubElement(self.head, "style", type = "text/css").text = css
+ os.putenv("TZ", "UTC")
+ time.tzset()
- if args.refresh:
- SubElement(self.head, "meta", { "http-equiv" : "Refresh", "content" : str(args.refresh) })
+ parse_options()
- hostwidth = max(len(hostname) for hostname in session.hostnames)
+ session = Session()
+ session.rrd_update()
- toc = SubElement(self.body, "ul", id = "nav")
- SubElement(SubElement(toc, "li"), "a", href = "index.html").text = "Overview"
- li = SubElement(toc, "li")
- SubElement(li, "span").text = "Repositories"
- ul = SubElement(li, "ul", style = "width: %sem" % hostwidth)
for hostname in session.hostnames:
- SubElement(SubElement(ul, "li"), "a", href = "%s.html" % hostname).text = hostname
- SubElement(SubElement(toc, "li"), "a", href = "problems.html").text = "Problems"
- li = SubElement(toc, "li")
- SubElement(li, "span").text = "All Details"
- ul = SubElement(li, "ul", style = "width: 15em")
- SubElement(SubElement(ul, "li"), "a", href = "connections.html").text = "All Connections"
- SubElement(SubElement(ul, "li"), "a", href = "objects.html").text = "All Objects"
- SubElement(self.body, "br")
-
- def close(self):
- ElementTree(element = self.html).write(self.filename)
-
- def BodyElement(self, tag, **attrib):
- return SubElement(self.body, tag, **attrib)
-
- def counter_table(self, data_func, total_func):
- table = self.BodyElement("table", rules = "all", border = "1")
- thead = SubElement(table, "thead")
- tfoot = SubElement(table, "tfoot")
- tbody = SubElement(table, "tbody")
- tr = SubElement(thead, "tr")
- SubElement(tr, "th")
- for label in session.labels:
- SubElement(tr, "th").text = label.text
- for fn2 in session.fn2s:
- for generation in session.generations:
- counters = [data_func(fn2, generation, label) for label in session.labels]
- if sum(counters) > 0:
- tr = SubElement(tbody, "tr")
- SubElement(tr, "td").text = ((generation or "") + " " + (fn2 or "")).strip()
- for label, count in zip(session.labels, counters):
- td = SubElement(tr, "td")
- if count > 0:
- td.set("class", label.mood)
- td.text = str(count)
- tr = SubElement(tfoot, "tr")
- SubElement(tr, "td").text = "Total"
- counters = [total_func(label) for label in session.labels]
- for label, count in zip(session.labels, counters):
- td = SubElement(tr, "td")
- if count > 0:
- td.set("class", label.mood)
- td.text = str(count)
- return table
-
- def object_count_table(self, session): # pylint: disable=W0621
- table = self.BodyElement("table", rules = "all", border = "1")
- thead = SubElement(table, "thead")
- tbody = SubElement(table, "tbody")
- tfoot = SubElement(table, "tfoot")
- fn2s = [fn2 for fn2 in session.fn2s if fn2 is not None]
- total = dict((fn2, 0) for fn2 in fn2s)
+ html = HTML("Repository details for %s" % hostname, hostname)
+ html.counter_table(session.hosts[hostname].get_counter, session.hosts[hostname].get_total)
+ if not args.hide_graphs:
+ session.hosts[hostname].rrd_graph(html)
+ if not args.hide_problems:
+ html.BodyElement("h2").text = "Connection Problems"
+ html.detail_table(session.hosts[hostname].connection_problems)
+ html.BodyElement("h2").text = "Object Problems"
+ html.detail_table(session.hosts[hostname].object_problems)
+ html.close()
+
+ html = HTML("rcynic summary", "index")
+ html.BodyElement("h2").text = "Grand totals for all repositories"
+ html.counter_table(session.get_sum, Label.get_count)
+ if not args.hide_object_counts:
+ html.BodyElement("br")
+ html.BodyElement("hr")
+ html.BodyElement("br")
+ html.BodyElement("h2").text = "Current total object counts (distinct URIs)"
+ html.object_count_table(session)
for hostname in session.hostnames:
- tr = SubElement(tbody, "tr")
- SubElement(tr, "td").text = hostname
- for fn2 in fn2s:
- td = SubElement(tr, "td")
- count = sum(uri.endswith(fn2) for uri in session.hosts[hostname].uris)
- total[fn2] += count
- if count > 0:
- td.text = str(count)
- trhead = SubElement(thead, "tr")
- trfoot = SubElement(tfoot, "tr")
- SubElement(trhead, "th").text = "Repository"
- SubElement(trfoot, "td").text = "Total"
- for fn2 in fn2s:
- SubElement(trhead, "th").text = fn2
- SubElement(trfoot, "td").text = str(total[fn2])
- return table
-
- def detail_table(self, records):
- if records:
- table = self.BodyElement("table", rules = "all", border = "1")
- thead = SubElement(table, "thead")
- tbody = SubElement(table, "tbody")
- tr = SubElement(thead, "tr")
- SubElement(tr, "th").text = "Timestamp"
- SubElement(tr, "th").text = "Generation"
- SubElement(tr, "th").text = "Status"
- SubElement(tr, "th").text = "URI"
- for v in records:
- tr = SubElement(tbody, "tr", { "class" : v.mood })
- SubElement(tr, "td").text = v.timestamp
- SubElement(tr, "td").text = v.generation
- SubElement(tr, "td").text = v.label.text
- SubElement(tr, "td", { "class" : "uri"}).text = v.uri
- return table
- else:
- self.BodyElement("p").text = "None found"
- return None
-
-def main():
-
- global session
-
- os.putenv("TZ", "UTC")
- time.tzset()
-
- parse_options()
+ html.BodyElement("br")
+ html.BodyElement("hr")
+ html.BodyElement("br")
+ html.BodyElement("h2").text = "Overview for repository %s" % hostname
+ html.counter_table(session.hosts[hostname].get_counter, session.hosts[hostname].get_total)
+ if not args.hide_graphs:
+ html.BodyElement("br")
+ html.BodyElement("a", href = "%s.html" % hostname).append(session.hosts[hostname].graph)
+ html.close()
- session = Session()
- session.rrd_update()
+ html = HTML("Problems", "problems")
+ html.BodyElement("h2").text = "Connection Problems"
+ html.detail_table(session.connection_problems)
+ html.BodyElement("h2").text = "Object Problems"
+ html.detail_table(session.object_problems)
+ html.close()
- for hostname in session.hostnames:
- html = HTML("Repository details for %s" % hostname, hostname)
- html.counter_table(session.hosts[hostname].get_counter, session.hosts[hostname].get_total)
- if not args.hide_graphs:
- session.hosts[hostname].rrd_graph(html)
- if not args.hide_problems:
- html.BodyElement("h2").text = "Connection Problems"
- html.detail_table(session.hosts[hostname].connection_problems)
- html.BodyElement("h2").text = "Object Problems"
- html.detail_table(session.hosts[hostname].object_problems)
+ html = HTML("All connections", "connections")
+ html.detail_table([v for v in session.validation_status if v.is_connection_detail])
html.close()
- html = HTML("rcynic summary", "index")
- html.BodyElement("h2").text = "Grand totals for all repositories"
- html.counter_table(session.get_sum, Label.get_count)
- if not args.hide_object_counts:
- html.BodyElement("br")
- html.BodyElement("hr")
- html.BodyElement("br")
- html.BodyElement("h2").text = "Current total object counts (distinct URIs)"
- html.object_count_table(session)
- for hostname in session.hostnames:
- html.BodyElement("br")
- html.BodyElement("hr")
- html.BodyElement("br")
- html.BodyElement("h2").text = "Overview for repository %s" % hostname
- html.counter_table(session.hosts[hostname].get_counter, session.hosts[hostname].get_total)
- if not args.hide_graphs:
- html.BodyElement("br")
- html.BodyElement("a", href = "%s.html" % hostname).append(session.hosts[hostname].graph)
- html.close()
-
- html = HTML("Problems", "problems")
- html.BodyElement("h2").text = "Connection Problems"
- html.detail_table(session.connection_problems)
- html.BodyElement("h2").text = "Object Problems"
- html.detail_table(session.object_problems)
- html.close()
-
- html = HTML("All connections", "connections")
- html.detail_table([v for v in session.validation_status if v.is_connection_detail])
- html.close()
-
- html = HTML("All objects", "objects")
- html.detail_table([v for v in session.validation_status if v.is_object_detail])
- html.close()
+ html = HTML("All objects", "objects")
+ html.detail_table([v for v in session.validation_status if v.is_object_detail])
+ html.close()
if __name__ == "__main__":
- main()
+ main()
diff --git a/rp/rcynic/rcynic-svn b/rp/rcynic/rcynic-svn
index 28b24672..3c59116a 100755
--- a/rp/rcynic/rcynic-svn
+++ b/rp/rcynic/rcynic-svn
@@ -27,50 +27,50 @@ import fcntl
import os
try:
- from lxml.etree import ElementTree
+ from lxml.etree import ElementTree
except ImportError:
- from xml.etree.ElementTree import ElementTree
+ from xml.etree.ElementTree import ElementTree
mime_types = (
- ("html", "application/xhtml+xml"),
- ("cer", "application/pkix-cert"),
- ("crl", "application/pkix-crl"),
- ("mft", "application/rpki-manifest"),
- ("mnf", "application/rpki-manifest"),
- ("roa", "application/rpki-roa"),
- ("gbr", "application/rpki-ghostbusters"))
+ ("html", "application/xhtml+xml"),
+ ("cer", "application/pkix-cert"),
+ ("crl", "application/pkix-crl"),
+ ("mft", "application/rpki-manifest"),
+ ("mnf", "application/rpki-manifest"),
+ ("roa", "application/rpki-roa"),
+ ("gbr", "application/rpki-ghostbusters"))
def run(*argv, **kwargs):
- """
- Run a program, displaying timing data when appropriate.
- """
+ """
+ Run a program, displaying timing data when appropriate.
+ """
- _t0 = datetime.datetime.utcnow()
- subprocess.check_call(argv, **kwargs)
- if args.show_timing:
- _t1 = datetime.datetime.utcnow()
- print _t1, (_t1 - _t0), " ".join(argv)
+ _t0 = datetime.datetime.utcnow()
+ subprocess.check_call(argv, **kwargs)
+ if args.show_timing:
+ _t1 = datetime.datetime.utcnow()
+ print _t1, (_t1 - _t0), " ".join(argv)
def runxml(*argv):
- """
-
- Run a program which produces XML output, displaying timing data when
- appropriate and returning an ElementTree constructed from the
- program's output.
- """
- _t0 = datetime.datetime.utcnow()
- p = subprocess.Popen(argv, stdout = subprocess.PIPE)
- x = ElementTree(file = p.stdout)
- s = p.wait()
- if s:
- raise subprocess.CalledProcessError(s, argv[0])
- if args.show_timing:
- _t1 = datetime.datetime.utcnow()
- print _t1, (_t1 - _t0), " ".join(argv)
- return x
+ """
+
+ Run a program which produces XML output, displaying timing data when
+ appropriate and returning an ElementTree constructed from the
+ program's output.
+ """
+ _t0 = datetime.datetime.utcnow()
+ p = subprocess.Popen(argv, stdout = subprocess.PIPE)
+ x = ElementTree(file = p.stdout)
+ s = p.wait()
+ if s:
+ raise subprocess.CalledProcessError(s, argv[0])
+ if args.show_timing:
+ _t1 = datetime.datetime.utcnow()
+ print _t1, (_t1 - _t0), " ".join(argv)
+ return x
# Main program.
@@ -120,8 +120,8 @@ parser.add_argument("working_directory", help = \
args = parser.parse_args()
if args.show_timing:
- t0 = datetime.datetime.utcnow()
- print t0, "Starting"
+ t0 = datetime.datetime.utcnow()
+ print t0, "Starting"
# Lock out other instances of this program. We may want some more
# sophsiticated approach when combining this with other programs, but
@@ -141,18 +141,18 @@ run("svn", "update", "--quiet", args.working_directory)
if args.files_to_archive:
- if args.verbatim:
- cmd = ["rsync", "--archive", "--quiet", "--delete"]
- cmd.extend(args.files_to_archive)
- cmd.append(args.working_directory)
- run(*cmd)
+ if args.verbatim:
+ cmd = ["rsync", "--archive", "--quiet", "--delete"]
+ cmd.extend(args.files_to_archive)
+ cmd.append(args.working_directory)
+ run(*cmd)
- else:
- for src in args.files_to_archive:
- cmd = ["rsync", "--archive", "--quiet", "--delete", "--copy-links"]
- cmd.append(src.rstrip("/"))
- cmd.append(args.working_directory.rstrip("/") + "/")
- run(*cmd)
+ else:
+ for src in args.files_to_archive:
+ cmd = ["rsync", "--archive", "--quiet", "--delete", "--copy-links"]
+ cmd.append(src.rstrip("/"))
+ cmd.append(args.working_directory.rstrip("/") + "/")
+ run(*cmd)
# Ask Subversion to add any new files, trying hard to get the MIME
# types right.
@@ -160,8 +160,8 @@ if args.files_to_archive:
cmd = ["svn", "add", "--quiet", "--force", "--auto-props"]
for fn2, mime_type in mime_types:
- cmd.append("--config-option")
- cmd.append("config:auto-props:*.%s=svn:mime-type=%s" % (fn2, mime_type))
+ cmd.append("--config-option")
+ cmd.append("config:auto-props:*.%s=svn:mime-type=%s" % (fn2, mime_type))
cmd.append(".")
@@ -171,15 +171,16 @@ run(*cmd, cwd = args.working_directory)
# files have been deleted, and tell Subversion that we deleted them
# intentionally.
+# pylint: disable=E1101
missing = sorted(entry.get("path")
for entry in runxml("svn", "status", "--xml", args.working_directory).find("target").findall("entry")
if entry.find("wc-status").get("item") == "missing")
deleted = []
for path in missing:
- if not any(path.startswith(r) for r in deleted):
- run("svn", "delete", "--quiet", path)
- deleted.append(path + "/")
+ if not any(path.startswith(r) for r in deleted):
+ run("svn", "delete", "--quiet", path)
+ deleted.append(path + "/")
# Commit our changes and update the working tree.
@@ -187,5 +188,5 @@ run("svn", "commit", "--quiet", "--message", "Auto update.", args.working_direct
run("svn", "update", "--quiet", args.working_directory)
if args.show_timing:
- t1 = datetime.datetime.utcnow()
- print t1, t1 - t0, "total runtime"
+ t1 = datetime.datetime.utcnow()
+ print t1, t1 - t0, "total runtime"
diff --git a/rp/rcynic/rcynic-text b/rp/rcynic/rcynic-text
index db4126ce..d4a5b23e 100755
--- a/rp/rcynic/rcynic-text
+++ b/rp/rcynic/rcynic-text
@@ -25,96 +25,96 @@ import urlparse
import textwrap
try:
- from lxml.etree import ElementTree
+ from lxml.etree import ElementTree
except ImportError:
- from xml.etree.ElementTree import ElementTree
+ from xml.etree.ElementTree import ElementTree
class Label(object):
- def __init__(self, elt):
- self.tag = elt.tag
- self.width = max(len(s) for s in elt.text.split())
- self.lines = textwrap.wrap(elt.text.strip(), width = self.width)
- self.counter = 0
+ def __init__(self, elt):
+ self.tag = elt.tag
+ self.width = max(len(s) for s in elt.text.split())
+ self.lines = textwrap.wrap(elt.text.strip(), width = self.width)
+ self.counter = 0
- def line(self, n):
- try:
- return " " + self.lines[n].center(self.width) + " "
- except IndexError:
- return " " * (self.width + 2)
+ def line(self, n):
+ try:
+ return " " + self.lines[n].center(self.width) + " "
+ except IndexError:
+ return " " * (self.width + 2)
- def add(self):
- self.counter += 1
+ def add(self):
+ self.counter += 1
- @property
- def total(self):
- return " " + str(self.counter).rjust(self.width) + " "
+ @property
+ def total(self):
+ return " " + str(self.counter).rjust(self.width) + " "
- @property
- def visible(self):
- return self.counter > 0
+ @property
+ def visible(self):
+ return self.counter > 0
class Host(object):
- def __init__(self):
- self.counters = {}
+ def __init__(self):
+ self.counters = {}
- def add(self, label):
- self.counters[label] = self.counters.get(label, 0) + 1
- label.add()
+ def add(self, label):
+ self.counters[label] = self.counters.get(label, 0) + 1
+ label.add()
- def total(self, label):
- if label in self.counters:
- return " " + str(self.counters[label]).rjust(label.width) + " "
- else:
- return " " * (label.width + 2)
+ def total(self, label):
+ if label in self.counters:
+ return " " + str(self.counters[label]).rjust(label.width) + " "
+ else:
+ return " " * (label.width + 2)
class Session(object):
- def __init__(self, labels):
- self.hosts = {}
- self.labels = labels
- self.map = dict((label.tag, label) for label in labels)
-
- def add(self, elt):
- label = self.map[elt.get("status")]
- hostname = urlparse.urlparse(elt.text.strip()).hostname
- if hostname not in self.hosts:
- self.hosts[hostname] = Host()
- self.hosts[hostname].add(label)
-
- def show(self):
- visible = [label for label in self.labels if label.visible]
- hostnames = sorted(hostname for hostname in self.hosts if hostname is not None)
- hostwidth = max(len(hostname) for hostname in hostnames + ["Hostname"])
- separator = "+-%s-+-%s-+" % (
- "-" * hostwidth,
- "-+-".join("-" * label.width for label in visible))
- print separator
- for i in xrange(max(len(label.lines) for label in visible)):
- print "| %s |%s|" % (
- ("Hostname" if i == 0 else "").ljust(hostwidth),
- "|".join(label.line(i) for label in visible))
- print separator
- for hostname in hostnames:
- print "| %s |%s|" % (
- hostname.ljust(hostwidth),
- "|".join(self.hosts[hostname].total(label) for label in visible))
- if hostnames:
- print separator
- print "| %s |%s|" % (
- "Total".ljust(hostwidth),
- "|".join(label.total for label in visible))
- print separator
+ def __init__(self, labels):
+ self.hosts = {}
+ self.labels = labels
+ self.map = dict((label.tag, label) for label in labels)
+
+ def add(self, elt):
+ label = self.map[elt.get("status")]
+ hostname = urlparse.urlparse(elt.text.strip()).hostname
+ if hostname not in self.hosts:
+ self.hosts[hostname] = Host()
+ self.hosts[hostname].add(label)
+
+ def show(self):
+ visible = [label for label in self.labels if label.visible]
+ hostnames = sorted(hostname for hostname in self.hosts if hostname is not None)
+ hostwidth = max(len(hostname) for hostname in hostnames + ["Hostname"])
+ separator = "+-%s-+-%s-+" % (
+ "-" * hostwidth,
+ "-+-".join("-" * label.width for label in visible))
+ print separator
+ for i in xrange(max(len(label.lines) for label in visible)):
+ print "| %s |%s|" % (
+ ("Hostname" if i == 0 else "").ljust(hostwidth),
+ "|".join(label.line(i) for label in visible))
+ print separator
+ for hostname in hostnames:
+ print "| %s |%s|" % (
+ hostname.ljust(hostwidth),
+ "|".join(self.hosts[hostname].total(label) for label in visible))
+ if hostnames:
+ print separator
+ print "| %s |%s|" % (
+ "Total".ljust(hostwidth),
+ "|".join(label.total for label in visible))
+ print separator
def main():
- for filename in ([sys.stdin] if len(sys.argv) < 2 else sys.argv[1:]):
- etree = ElementTree(file = filename)
- session = Session([Label(elt) for elt in etree.find("labels")])
- for elt in etree.findall("validation_status"):
- session.add(elt)
- session.show()
+ for filename in ([sys.stdin] if len(sys.argv) < 2 else sys.argv[1:]):
+ etree = ElementTree(file = filename)
+ session = Session([Label(elt) for elt in etree.find("labels")])
+ for elt in etree.findall("validation_status"):
+ session.add(elt)
+ session.show()
if __name__ == "__main__":
- main()
+ main()
diff --git a/rp/rcynic/rcynic.c b/rp/rcynic/rcynic.c
index d0da40f5..36c1950f 100644
--- a/rp/rcynic/rcynic.c
+++ b/rp/rcynic/rcynic.c
@@ -3190,7 +3190,7 @@ static int extract_access_uri(rcynic_ctx_t *rc,
if (OBJ_obj2nid(a->method) != nid)
continue;
++*count;
- if (!relevant((char *) a->location->d.uniformResourceIdentifier->data))
+ if (relevant && !relevant((char *) a->location->d.uniformResourceIdentifier->data))
continue;
if (sizeof(result->s) <= a->location->d.uniformResourceIdentifier->length)
log_validation_status(rc, uri, uri_too_long, generation);
@@ -3707,7 +3707,7 @@ static int check_x509(rcynic_ctx_t *rc,
int n_caIssuers = 0;
ex_count--;
if (!extract_access_uri(rc, uri, generation, aia, NID_ad_ca_issuers,
- &certinfo->aia, &n_caIssuers, is_rsync) ||
+ &certinfo->aia, &n_caIssuers, NULL) ||
!certinfo->aia.s[0] ||
sk_ACCESS_DESCRIPTION_num(aia) != n_caIssuers) {
log_validation_status(rc, uri, malformed_aia_extension, generation);
diff --git a/rp/rcynic/rcynicng b/rp/rcynic/rcynicng
new file mode 100755
index 00000000..eccd247f
--- /dev/null
+++ b/rp/rcynic/rcynicng
@@ -0,0 +1,1478 @@
+#!/usr/bin/env python
+
+# $Id$
+
+"""
+Reimplementation of rcynic in Python. Work in progress.
+"""
+
+import os
+import sys
+import ssl
+import time
+import copy
+import errno
+import shutil
+import socket
+import logging
+import argparse
+import tempfile
+import urlparse
+import subprocess
+
+import tornado.gen
+import tornado.locks
+import tornado.ioloop
+import tornado.queues
+import tornado.process
+import tornado.httpclient
+
+import rpki.POW
+import rpki.log
+import rpki.config
+import rpki.sundial
+import rpki.relaxng
+import rpki.autoconf
+
+from rpki.oids import id_kp_bgpsec_router
+
+from lxml.etree import (ElementTree, Element, SubElement, Comment,
+ XML, DocumentInvalid, XMLSyntaxError, iterparse)
+
+logger = logging.getLogger("rcynicng")
+
+xmlns = rpki.relaxng.rrdp.xmlns
+
+tag_delta = xmlns + "delta"
+tag_notification = xmlns + "notification"
+tag_publish = xmlns + "publish"
+tag_snapshot = xmlns + "snapshot"
+tag_withdraw = xmlns + "withdraw"
+
+codes = rpki.POW.validation_status
+
+
+class Status(object):
+ """
+ Validation status database, like validation_status_t in rcynic:tos.
+
+ rcynic:tos version of this data structure is stored as an AVL
+ tree, because the OpenSSL STACK_OF() sort-and-bsearch turned out
+ to be a very poor choice for the input data. Remains to be seen
+ whether we need to do something like that here too.
+ """
+
+ db = dict()
+
+ def __init__(self, uri):
+ self.uri = uri
+ self._timestamp = None
+ self.status = set()
+
+ def __str__(self):
+ return "{my.timestamp} {my.uri} {status}".format(
+ my = self, status = ",".join(str(s) for s in sorted(self.status)))
+
+ @property
+ def timestamp(self):
+ return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(self._timestamp))
+
+ @classmethod
+ def get(cls, uri):
+ try:
+ return cls.db[uri].status
+ except KeyError:
+ return None
+
+ @classmethod
+ def update(cls, uri):
+ try:
+ self = cls.db[uri]
+ except KeyError:
+ self = cls.db[uri] = cls(uri)
+ self._timestamp = time.time()
+ return self.status
+
+ @classmethod
+ def add(cls, uri, *codes):
+ status = cls.update(uri)
+ for code in codes:
+ status.add(code)
+
+ @classmethod
+ def remove(cls, uri, *codes):
+ if uri in cls.db:
+ for code in codes:
+ cls.db[uri].status.discard(code)
+
+ @classmethod
+ def test(cls, uri, code):
+ return uri in cls.db and code in cls.db[uri].status
+
+
+def install_object(obj):
+ obj.obj.authenticated.add(authenticated)
+ obj.obj.save()
+
+
+class X509StoreCTX(rpki.POW.X509StoreCTX):
+
+ @classmethod
+ def subclass(cls, **kwargs):
+ return type(cls.__name__, (cls,), kwargs)
+
+ status = None
+
+ def verify_callback(self, ok):
+ err = self.getError()
+ if err in (codes.X509_V_OK.code, codes.X509_V_ERR_SUBJECT_ISSUER_MISMATCH.code):
+ return ok
+ elif err == codes.X509_V_ERR_CRL_HAS_EXPIRED.code:
+ return True
+ elif err == codes.X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT.code:
+ self.status.add(codes.TRUST_ANCHOR_NOT_SELF_SIGNED)
+ return ok
+ else:
+ self.status.add(codes.find(err))
+ return ok
+
+
+class POW_Mixin(object):
+
+ @classmethod
+ def store_if_new(cls, der, uri, retrieval):
+ self = cls.derRead(der)
+ ski, aki = self.get_hex_SKI_AKI()
+ return RPKIObject.objects.get_or_create(
+ der = der,
+ defaults = dict(uri = uri,
+ aki = aki,
+ ski = ski,
+ sha256 = sha256hex(der),
+ retrieved = retrieval))
+
+ def get_hex_SKI_AKI(self):
+ cer = self.certs()[0]
+ ski = cer.getSKI()
+ aki = cer.getAKI()
+ return ski.encode("hex") if ski else "", aki.encode("hex") if aki else ""
+
+ @property
+ def uri(self):
+ return self.obj.uri
+
+ @property
+ def aki(self):
+ return self.obj.aki
+
+ @property
+ def ski(self):
+ return self.obj.ski
+
+
+class X509(rpki.POW.X509, POW_Mixin):
+
+ def __repr__(self):
+ try:
+ return "<X509 \"{}\" at 0x{:x}>".format(self.uri, id(self))
+ except:
+ return "<X509 at 0x{:x}>".format(id(self))
+
+ def get_hex_SKI_AKI(self):
+ ski = self.getSKI()
+ aki = self.getAKI()
+ return ski.encode("hex") if ski else "", aki.encode("hex") if aki else ""
+
+ @classmethod
+ def load(cls, obj, cms = None):
+ if cms is not None:
+ # XXX Kludge to work around lack of subclass support in rpki.POW.CMS.certs().
+ der = cms.certs()[0].derWrite()
+ else:
+ der = obj.der
+ self = cls.derRead(der)
+ self.obj = obj
+ self.bc = self.getBasicConstraints()
+ self.eku = self.getEKU()
+ self.aia = self.getAIA()
+ self.sia = self.getSIA()
+ self.crldp = self.getCRLDP()
+ self.is_ca = self.bc is not None and self.bc[0]
+ self.caDirectory, self.rpkiManifest, self.signedObjectRepository, self.rpkiNotify \
+ = self.sia or (None, None, None, None)
+ return self
+
+ @staticmethod
+ def count_uris(uris, scheme = "rsync://"):
+ count = 0
+ if uris is not None:
+ for uri in uris:
+ if uri.startswith(scheme):
+ count += 1
+ return count
+
+ def check(self, trusted, crl):
+ #logger.debug("Starting checks for %r", self)
+ status = Status.update(self.uri)
+ is_ta = trusted is None
+ is_routercert = (self.eku is not None and id_kp_bgpsec_router in self.eku and
+ not self.is_ca and self.uri.endswith(".cer"))
+ if self.eku is not None and (self.is_ca or not self.uri.endswith(".cer")):
+ status.add(codes.INAPPROPRIATE_EKU_EXTENSION)
+ if is_ta and not self.is_ca:
+ status.add(codes.MALFORMED_TRUST_ANCHOR)
+ if is_ta and self.aia is not None:
+ status.add(codes.AIA_EXTENSION_FORBIDDEN)
+ if not is_ta and self.aia is None:
+ status.add(codes.AIA_EXTENSION_MISSING)
+ if is_routercert and self.sia is not None:
+ status.add(codes.SIA_EXTENSION_FORBIDDEN)
+ if not is_routercert and self.sia is None:
+ status.add(codes.SIA_EXTENSION_MISSING)
+ if is_ta and self.crldp is not None:
+ status.add(codes.CRLDP_EXTENSION_FORBIDDEN)
+ if not is_ta and self.crldp is None:
+ status.add(codes.CRLDP_EXTENSION_MISSING)
+ if not is_ta and not self.aki:
+ status.add(codes.AKI_EXTENSION_MISSING)
+ elif not is_ta and self.aki != trusted[0].ski:
+ status.add(codes.AKI_EXTENSION_ISSUER_MISMATCH)
+ serial = self.getSerial()
+ if serial <= 0 or serial > 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF:
+ status.add(codes.BAD_CERTIFICATE_SERIAL_NUMBER)
+ if self.getVersion() != 2:
+ status.add(codes.WRONG_OBJECT_VERSION)
+ n_rsync_caIssuers = self.count_uris(self.aia)
+ n_rsync_caDirectory = self.count_uris(self.caDirectory)
+ n_rsync_rpkiManifest = self.count_uris(self.rpkiManifest)
+ n_rsync_signedObjectRepository = self.count_uris(self.signedObjectRepository)
+ if n_rsync_caIssuers > 1 or n_rsync_caDirectory > 1 or n_rsync_rpkiManifest > 1 or n_rsync_signedObjectRepository > 1:
+ status.add(codes.MULTIPLE_RSYNC_URIS_IN_EXTENSION)
+ if self.aia is not None and n_rsync_caIssuers == 0:
+ status.add(codes.MALFORMED_AIA_EXTENSION)
+ if self.is_ca:
+ ok = n_rsync_caDirectory != 0 and n_rsync_rpkiManifest != 0 and n_rsync_signedObjectRepository == 0
+ elif not is_routercert:
+ ok = n_rsync_caDirectory == 0 and n_rsync_rpkiManifest == 0 and n_rsync_signedObjectRepository != 0
+ else:
+ ok = self.sia is None
+ if not ok:
+ status.add(codes.MALFORMED_SIA_EXTENSION)
+ if not is_ta and self.count_uris(self.crldp) == 0:
+ status.add(codes.MALFORMED_CRLDP_EXTENSION)
+ self.checkRPKIConformance(status = status, eku = id_kp_bgpsec_router if is_routercert else None)
+ try:
+ self.verify(trusted = [self] if trusted is None else trusted, crl = crl, policy = "1.3.6.1.5.5.7.14.2",
+ context_class = X509StoreCTX.subclass(status = status))
+ except rpki.POW.ValidationError as e:
+ logger.debug("%r rejected: %s", self, e)
+ status.add(codes.OBJECT_REJECTED)
+ codes.normalize(status)
+ #logger.debug("Finished checks for %r", self)
+ return not any(s.kind == "bad" for s in status)
+
+
+class CRL(rpki.POW.CRL, POW_Mixin):
+
+ def __repr__(self):
+ try:
+ return "<CRL \"{}\" at 0x{:x}>".format(self.uri, id(self))
+ except:
+ return "<CRL at 0x{:x}>".format(id(self))
+
+ def get_hex_SKI_AKI(self):
+ aki = self.getAKI()
+ return "", aki.encode("hex") if aki else ""
+
+ @classmethod
+ def load(cls, obj):
+ self = cls.derRead(obj.der)
+ self.obj = obj
+ self.thisUpdate = self.getThisUpdate()
+ self.nextUpdate = self.getNextUpdate()
+ self.number = self.getCRLNumber()
+ return self
+
+ def check(self, issuer):
+ status = Status.update(self.uri)
+ self.checkRPKIConformance(status = status, issuer = issuer)
+ try:
+ self.verify(issuer)
+ except rpki.POW.ValidationError as e:
+ logger.debug("%r rejected: %s", self, e)
+ status.add(codes.OBJECT_REJECTED)
+ codes.normalize(status)
+ if self.getVersion() != 1:
+ status.add(codes.WRONG_OBJECT_VERSION)
+ now = rpki.sundial.now()
+ if self.thisUpdate > now:
+ status.add(codes.CRL_NOT_YET_VALID)
+ if self.nextUpdate < now:
+ status.add(codes.STALE_CRL_OR_MANIFEST)
+ if self.number is None:
+ status.add(codes.CRL_NUMBER_EXTENSION_MISSING)
+ if self.number < 0:
+ status.add(codes.CRL_NUMBER_IS_NEGATIVE)
+ if self.number > 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF:
+ status.add(codes.CRL_NUMBER_OUT_OF_RANGE)
+ if self.getIssuer() != issuer.getSubject():
+ status.add(codes.CRL_ISSUER_NAME_MISMATCH)
+ if not self.aki:
+ status.add(codes.AKI_EXTENSION_MISSING)
+ elif self.aki != issuer.ski:
+ status.add(codes.AKI_EXTENSION_ISSUER_MISMATCH)
+
+ return not any(s.kind == "bad" for s in status)
+
+
+class Ghostbuster(rpki.POW.CMS, POW_Mixin):
+
+ def __repr__(self):
+ try:
+ return "<Ghostbuster \"{}\" at 0x{:x}>".format(self.uri, id(self))
+ except:
+ return "<Ghostbuster at 0x{:x}>".format(id(self))
+
+ @classmethod
+ def load(cls, obj):
+ self = cls.derRead(obj.der)
+ self.obj = obj
+ self.ee = X509.load(obj, self)
+ self.vcard = None
+ return self
+
+ def check(self, trusted, crl):
+ status = Status.update(self.uri)
+ self.ee.check(trusted = trusted, crl = crl)
+ try:
+ self.vcard = self.verify()
+ except rpki.POW.ValidationError as e:
+ logger.debug("%r rejected: %s", self, e)
+ status.add(codes.OBJECT_REJECTED)
+ self.checkRPKIConformance(status)
+ codes.normalize(status)
+ return not any(s.kind == "bad" for s in status)
+
+
+class Manifest(rpki.POW.Manifest, POW_Mixin):
+
+ def __repr__(self):
+ try:
+ return "<Manifest \"{}\" at 0x{:x}>".format(self.uri, id(self))
+ except:
+ return "<Manifest at 0x{:x}>".format(id(self))
+
+ @classmethod
+ def load(cls, obj):
+ self = cls.derRead(obj.der)
+ self.obj = obj
+ self.ee = X509.load(obj, self)
+ self.fah = None
+ self.thisUpdate = None
+ self.nextUpdate = None
+ self.number = None
+ return self
+
+ def check(self, trusted, crl):
+ status = Status.update(self.uri)
+ self.ee.check(trusted = trusted, crl = crl)
+ try:
+ self.verify()
+ except rpki.POW.ValidationError as e:
+ logger.debug("%r rejected: %s", self, e)
+ status.add(codes.OBJECT_REJECTED)
+ self.checkRPKIConformance(status)
+ self.thisUpdate = self.getThisUpdate()
+ self.nextUpdate = self.getNextUpdate()
+ self.number = self.getManifestNumber()
+ self.fah = self.getFiles()
+ self.notBefore = self.ee.getNotBefore()
+ self.notAfter = self.ee.getNotAfter()
+ if self.thisUpdate < self.notBefore or self.nextUpdate > self.notAfter:
+ status.add(codes.MANIFEST_INTERVAL_OVERRUNS_CERT)
+ now = rpki.sundial.now()
+ if self.thisUpdate > now:
+ status.add(codes.MANIFEST_NOT_YET_VALID)
+ if self.nextUpdate < now:
+ status.add(codes.STALE_CRL_OR_MANIFEST)
+ codes.normalize(status)
+ return not any(s.kind == "bad" for s in status)
+
+ def find_crl_candidate_hashes(self):
+ for fn, digest in self.fah:
+ if fn.endswith(".crl"):
+ yield digest.encode("hex")
+
+
+class ROA(rpki.POW.ROA, POW_Mixin):
+
+ def __repr__(self):
+ try:
+ return "<ROA \"{}\" at 0x{:x}>".format(self.uri, id(self))
+ except:
+ return "<ROA at 0x{:x}>".format(id(self))
+
+ @classmethod
+ def load(cls, obj):
+ self = cls.derRead(obj.der)
+ self.obj = obj
+ self.ee = X509.load(obj, self)
+ self.asn = None
+ self.prefixes = None
+ return self
+
+ def check(self, trusted, crl):
+ status = Status.update(self.uri)
+ self.ee.check(trusted = trusted, crl = crl)
+ try:
+ vcard = self.verify()
+ except rpki.POW.ValidationError:
+ status.add(codes.OBJECT_REJECTED)
+ self.checkRPKIConformance(status)
+ self.asn = self.getASID()
+ self.prefixes = self.getPrefixes()
+ codes.normalize(status)
+ return not any(s.kind == "bad" for s in status)
+
+
+class_dispatch = dict(cer = X509,
+ crl = CRL,
+ gbr = Ghostbuster,
+ mft = Manifest,
+ roa = ROA)
+
+def uri_to_class(uri):
+ cls = class_dispatch.get(uri[-3:]) if len(uri) > 4 and uri[-4] == "." else None
+ if cls is None:
+ Status.add(uri, None, codes.UNKNOWN_OBJECT_TYPE_SKIPPED)
+ return cls
+
+
+# If we find ourselves using this same ordering for every retrieval from the RPKIObjects model, we
+# can add it as a Meta option for the model and omit it in the query expressions, like this:
+#
+# class RPKIObjects(models.Model):
+# ...
+# class Meta:
+# ordering = ["-retrieved__started"]
+#
+# https://docs.djangoproject.com/en/1.8/ref/models/querysets/#order-by
+# https://docs.djangoproject.com/en/1.8/ref/models/options/#django.db.models.Options.ordering
+
+def fetch_objects(**kwargs):
+ for obj in RPKIObject.objects.filter(**kwargs).order_by("-retrieved__started"):
+ cls = uri_to_class(obj.uri)
+ if cls is not None:
+ yield cls.load(obj)
+
+
+class WalkFrame(object):
+ """
+ Certificate tree walk stack frame. This is basically just a
+ preamble and a loop, broken out into several separate methods so
+ that we can fork new tasks in the middle then resume processing of
+ the current state machine (ie, this frame) when appropriate (eg,
+ after an rsync or RRDP fetch completes).
+ """
+
+ def __init__(self, cer):
+ self.cer = cer
+ self.state = self.initial
+
+ def __repr__(self):
+ try:
+ return "<WalkFrame \"{}\" at 0x{:x}>".format(self.cer.uri, id(self))
+ except:
+ return "<WalkFrame at 0x{:x}>".format(id(self))
+
+ @tornado.gen.coroutine
+ def __call__(self, wsk):
+ yield self.state(wsk)
+
+ @tornado.gen.coroutine
+ def initial(self, wsk):
+
+ rsync_uri = first_rsync_uri(self.cer.caDirectory)
+ rrdp_uri = first_https_uri(self.cer.rpkiNotify)
+
+ if args.prefer_rsync:
+ uri = rsync_uri or rrdp_uri
+ else:
+ uri = rrdp_uri or rsync_uri
+
+ self.fetcher = Fetcher(uri)
+
+ if not self.fetcher.needed():
+ self.state = self.ready
+ elif not args.spawn_on_fetch:
+ self.state = self.fetch
+ else:
+ self.state = self.fetch
+ yield task_queue.put(wsk.clone())
+ wsk.pop()
+
+ @tornado.gen.coroutine
+ def fetch(self, wsk):
+ yield self.fetcher.fetch()
+ self.state = self.ready
+
+ @tornado.gen.coroutine
+ def ready(self, wsk):
+ self.trusted = wsk.trusted()
+
+ logger.debug("%r scanning products", self)
+
+ # NB: CRL checks on manifest EE certificates deferred until we've picked a CRL.
+
+ mft_candidates = []
+ crl_candidates = []
+ crl_candidate_hashes = set()
+
+ for mft in fetch_objects(aki = self.cer.ski, uri__endswith = ".mft"):
+ if mft.check(trusted = self.trusted, crl = None):
+ mft_candidates.append(mft)
+ crl_candidate_hashes.update(mft.find_crl_candidate_hashes())
+
+ if not mft_candidates:
+ wsk.pop()
+ return
+
+ for crl in fetch_objects(aki = self.cer.ski, uri__endswith = ".crl", sha256__in = crl_candidate_hashes):
+ if crl.check(self.trusted[0]):
+ crl_candidates.append(crl)
+
+ mft_candidates.sort(reverse = True, key = lambda x: (x.number, x.thisUpdate, x.obj.retrieved.started))
+ crl_candidates.sort(reverse = True, key = lambda x: (x.number, x.thisUpdate, x.obj.retrieved.started))
+
+ if not crl_candidates:
+ wsk.pop()
+ return
+
+ self.crl = crl_candidates[0]
+
+ install_object(self.crl)
+ Status.add(self.crl.uri, codes.OBJECT_ACCEPTED)
+
+ #logger.debug("Picked CRL %s", self.crl.uri)
+
+ for mft in mft_candidates:
+ if self.crl.isRevoked(mft.ee):
+ Status.add(mft.obj.uri, codes.MANIFEST_EE_REVOKED)
+ continue
+ self.mft = mft
+ break
+ else:
+ wsk.pop()
+ return
+
+ install_object(self.mft)
+ Status.add(self.mft.obj.uri, codes.OBJECT_ACCEPTED)
+
+ self.stale_crl = Status.test(self.crl.uri, codes.STALE_CRL_OR_MANIFEST)
+ self.stale_mft = Status.test(self.mft.uri, codes.STALE_CRL_OR_MANIFEST)
+
+ # Issue warnings on mft and crl URI mismatches?
+
+ # Use an explicit iterator so we can resume it; run loop in separate method, same reason.
+
+ self.mft_iterator = iter(self.mft.getFiles())
+ self.state = self.loop
+
+ @tornado.gen.coroutine
+ def loop(self, wsk):
+
+ #logger.debug("Processing %s", self.mft.uri)
+
+ for fn, digest in self.mft_iterator:
+
+ yield tornado.gen.moment
+
+ uri = self.mft.uri[:self.mft.uri.rindex("/") + 1] + fn
+
+ # Need general URI validator here?
+
+ if uri == self.crl.uri:
+ continue
+
+ cls = uri_to_class(uri)
+
+ if cls is None:
+ continue
+
+ if cls in (Manifest, CRL):
+ Status.add(uri, None, codes.INAPPROPRIATE_OBJECT_TYPE_SKIPPED)
+ continue
+
+ for obj in fetch_objects(sha256 = digest.encode("hex")):
+
+ if self.stale_crl:
+ Status.add(uri, codes.TAINTED_BY_STALE_CRL)
+ if self.stale_mft:
+ Status.add(uri, codes.TAINTED_BY_STALE_MANIFEST)
+
+ if not obj.check(trusted = self.trusted, crl = self.crl):
+ Status.add(uri, codes.OBJECT_REJECTED)
+ continue
+
+ install_object(obj)
+ Status.add(uri, codes.OBJECT_ACCEPTED)
+
+ if cls is not X509 or not obj.is_ca:
+ break
+
+ wsk.push(obj)
+ return
+
+ wsk.pop()
+
+
+class WalkTask(object):
+ """
+ Task corresponding to one walk stack, roughly analgous to
+ STACK_OF(walk_ctx_t) in rcynic:tos.
+ """
+
+ def __init__(self, wsk = None, cer = None):
+ self.wsk = [] if wsk is None else wsk
+ if cer is not None:
+ self.push(cer)
+
+ def __repr__(self):
+ try:
+ return "<WalkTask \"{}\" at 0x{:x}>".format(self.wsk[-1].cer.uri, id(self))
+ except:
+ return "<WalkTask at 0x{:x}>".format(id(self))
+
+ @tornado.gen.coroutine
+ def __call__(self):
+ while self.wsk:
+ yield self.wsk[-1](wsk = self)
+
+ def push(self, cer):
+ self.wsk.append(WalkFrame(cer))
+
+ def pop(self):
+ return self.wsk.pop()
+
+ def clone(self):
+ return WalkTask(wsk = list(self.wsk))
+
+ def trusted(self):
+ stack = [w.cer for w in self.wsk]
+ stack.reverse()
+ return stack
+
+
+def read_tals():
+ for head, dirs, files in os.walk(args.trust_anchor_locators):
+ for fn in files:
+ if fn.endswith(".tal"):
+ furi = "file://" + os.path.abspath(os.path.join(head, fn))
+ try:
+ with open(os.path.join(head, fn), "r") as f:
+ lines = [line.strip() for line in f]
+ blank = lines.index("")
+ uris = lines[:blank]
+ key = rpki.POW.Asymmetric.derReadPublic("".join(lines[blank:]).decode("base64"))
+ if not uris or not all(uri.endswith(".cer") for uri in uris):
+ Status.add(furi, None, codes.MALFORMED_TAL_URI)
+ yield uris, key
+ except:
+ Status.add(furi, None, codes.UNREADABLE_TRUST_ANCHOR_LOCATOR)
+
+
+def uri_to_filename(uri, base = None):
+ fn = uri[uri.index("://")+3:]
+ if base is not None:
+ fn = os.path.join(base, fn)
+ return fn
+
+def first_uri(uris, scheme):
+ if uris is not None:
+ for uri in uris:
+ if uri.startswith(scheme):
+ return uri
+ return None
+
+def first_rsync_uri(uris):
+ return first_uri(uris, "rsync://")
+
+def first_https_uri(uris):
+ return first_uri(uris, "https://")
+
+def sha256hex(bytes):
+ d = rpki.POW.Digest(rpki.POW.SHA256_DIGEST)
+ d.update(bytes)
+ return d.digest().encode("hex")
+
+
+class RRDP_ParseFailure(Exception):
+ "Failure parsing RRDP message."
+
+class DeadHost(Exception):
+ "Host recently tried and known to be unavailable."
+
+
+class Fetcher(object):
+ """
+ Network transfer methods and history database.
+
+ At the moment this is rsync-only; eventually it will include
+ support for HTTPS and RRDP.
+ """
+
+ # Internal protocol:
+ #
+ # - Instances which have just gotten to the query stage are not registered
+ #
+ # - Instances which are in progress are listed in .history and
+ # have a Condition object in .pending; instances which depend on
+ # this should wait for the condition, then return.
+ #
+ # - Instances which have completed are listed in .history and have
+ # .pending set to None.
+
+ _rsync_deadhosts = set()
+ _rsync_history = dict()
+
+ _https_deadhosts = set()
+ _https_history = dict()
+
+ def __init__(self, uri, ta = False):
+ self.uri = uri
+ self.ta = ta
+ self.pending = None
+ self.status = None
+
+ def _rsync_split_uri(self):
+ return tuple(self.uri.rstrip("/").split("/")[2:])
+
+ def _rsync_find(self, path):
+ for i in xrange(1, len(path)):
+ target = path[:i+1]
+ try:
+ return self._rsync_history[target]
+ except KeyError:
+ continue
+ return None
+
+ def needed(self):
+ if not args.fetch:
+ return False
+ if self.uri.startswith("rsync://"):
+ return self._rsync_needed()
+ if self.uri.startswith("https://"):
+ return self._https_needed()
+ raise ValueError
+
+ def _rsync_needed(self):
+ path = self._rsync_split_uri()
+ if path[0] in self._rsync_deadhosts:
+ return False
+ entry = self._rsync_find(path)
+ return entry is None or entry.pending is not None
+
+ def _https_needed(self):
+ netloc = urlparse.urlparse(self.uri).netloc
+ if netloc in self._https_deadhosts:
+ return False
+ entry = self._https_history.get(self.uri)
+ return entry is None or entry.pending is not None
+
+ def fetch(self):
+ if self.uri.startswith("rsync://"):
+ return self._rsync_fetch()
+ if self.uri.startswith("https://"):
+ return self._https_fetch_ta() if self.ta else self._rrdp_fetch()
+ raise ValueError
+
+ @tornado.gen.coroutine
+ def _rsync_fetch(self):
+ assert self.uri.startswith("rsync://") and (self.uri.endswith(".cer") if self.ta else self.uri.endswith("/"))
+
+ if not args.fetch:
+ return
+ path = self._rsync_split_uri()
+ dead = path[0] in self._rsync_deadhosts
+ other = self._rsync_find(path)
+ if not dead and other is not None and other.pending is not None:
+ yield other.pending.wait()
+ if dead or other is not None:
+ return
+
+ self.pending = tornado.locks.Condition()
+ self._rsync_history[path] = self
+
+ try:
+ path = uri_to_filename(self.uri, args.unauthenticated)
+ cmd = ["rsync", "--update", "--times", "--copy-links", "--itemize-changes"]
+ if self.uri.endswith("/"):
+ cmd.append("--recursive")
+ cmd.append("--delete")
+ cmd.append(self.uri)
+ cmd.append(path)
+
+ dn = os.path.dirname(path)
+ if not os.path.exists(dn):
+ os.makedirs(dn)
+
+ # We use the stdout close from rsync to detect when the subprocess has finished.
+ # There's a lovely tornado.process.Subprocess.wait_for_exit() method which does
+ # exactly what one would think we'd want -- but Unix signal handling still hasn't
+ # caught up to the software interrupt architecture ITS had forty years ago, so
+ # signals still cause random "system call interrupted" failures in other libraries.
+ # Nothing Tornado can do about this, so we avoid signals entirely and collect the
+ # process exit status directly from the operating system. In theory, the WNOHANG
+ # isn't necessary here, we use it anyway to be safe in case theory is wrong.
+
+ # If we need to add a timeout here to guard against rsync processes taking too long
+ # (which has happened in the past with, eg, LACNIC), see tornado.gen.with_timeout()
+ # (documented in the utility functions section of the tornado.gen page), which wraps
+ # any future in a timeout.
+
+ t0 = time.time()
+ rsync = tornado.process.Subprocess(cmd, stdout = tornado.process.Subprocess.STREAM, stderr = subprocess.STDOUT)
+ logger.debug("rsync[%s] started \"%s\"", rsync.pid, " ".join(cmd))
+ output = yield rsync.stdout.read_until_close()
+ pid, self.status = os.waitpid(rsync.pid, os.WNOHANG)
+ t1 = time.time()
+ if (pid, self.status) == (0, 0):
+ logger.warn("rsync[%s] Couldn't get real exit status without blocking, sorry", rsync.pid)
+ for line in output.splitlines():
+ logger.debug("rsync[%s] %s", rsync.pid, line)
+ logger.debug("rsync[%s] finished after %s seconds with status 0x%x", rsync.pid, t1 - t0, self.status)
+
+ # Should do something with rsync result and validation status database here.
+
+ retrieval = Retrieval.objects.create(
+ uri = self.uri,
+ started = rpki.sundial.datetime.fromtimestamp(t0),
+ finished = rpki.sundial.datetime.fromtimestamp(t1),
+ successful = self.status == 0)
+
+ for fn in self._rsync_walk(path):
+ yield tornado.gen.moment
+ uri = "rsync://" + fn[len(args.unauthenticated):].lstrip("/")
+ cls = uri_to_class(uri)
+ if cls is not None:
+ try:
+ with open(fn, "rb") as f:
+ cls.store_if_new(f.read(), uri, retrieval)
+ except:
+ Status.add(uri, codes.UNREADABLE_OBJECT)
+ logger.exception("Couldn't read %s from rsync tree", uri)
+
+ finally:
+ pending = self.pending
+ self.pending = None
+ pending.notify_all()
+
+ def _rsync_walk(self, path):
+ if self.uri.endswith("/"):
+ for head, dirs, files in os.walk(path):
+ for fn in files:
+ yield os.path.join(head, fn)
+ elif os.path.exists(path):
+ yield path
+
+ @tornado.gen.coroutine
+ def _https_fetch_url(self, url, streaming_callback = None):
+
+ if urlparse.urlparse(url).netloc in self._https_deadhosts:
+ raise DeadHost
+
+ # Should do something with deadhost processing below. Looks
+ # like errors such as HTTP timeout show up as
+ # tornado.httpclient.HTTPError exceptions (which we could
+ # suppress if we wanted to do so, but we probably don't).
+ # HTTP timeout shows up in the logs as "HTTP 599". See doc for:
+ #
+ # tornado.httpclient.AsyncHTTPClient.fetch()
+ # tornado.httpclient.HTTPError
+
+ # Might need to do something with If-Modified-Since support
+ # See if_modified_since argument to
+ # http://www.tornadoweb.org/en/stable/httpclient.html#request-objects
+ # (which we can pass to client.fetch(), below). Not sure how
+ # "you don't need to retrieve this" result comes back,
+ # probably a benign exception we need to catch. Supporting
+ # this means adding another null-able timestamp field to the
+ # RRDPSnapshot model (which probably should be named the
+ # RRDPZone model instead), and storing a datetime there.
+ # Would also need to pull timestamp from the Last-Modified
+ # header in the response object.
+
+ try:
+ ok = False
+ t0 = time.time()
+ client = tornado.httpclient.AsyncHTTPClient(max_body_size = args.max_https_body_size)
+ response = yield client.fetch(url,
+ streaming_callback = streaming_callback,
+ validate_cert = args.validate_https,
+ connect_timeout = args.https_timeout,
+ request_timeout = args.https_timeout)
+ # Might want to check response Content-Type here
+ ok = True
+
+ except tornado.httpclient.HTTPError as e:
+ # Might want to check e.response here to figure out whether to add to _https_deadhosts.
+ logger.info("HTTP error for %s: %s", url, e)
+ raise
+
+ except (socket.error, IOError, ssl.SSLError) as e:
+ # Might want to check e.errno here to figure out whether to add to _https_deadhosts.
+ logger.info("Network I/O error for %s: %s", url, e)
+ raise
+
+ except Exception as e:
+ logger.exception("Error (%r) for %s", type(e), url)
+ raise
+
+ finally:
+ t1 = time.time()
+ logger.debug("Fetch of %s finished after %s seconds", url, t1 - t0)
+ retrieval = Retrieval.objects.create(
+ uri = url,
+ started = rpki.sundial.datetime.fromtimestamp(t0),
+ finished = rpki.sundial.datetime.fromtimestamp(t1),
+ successful = ok)
+ if ok:
+ raise tornado.gen.Return((retrieval, response))
+
+ @tornado.gen.coroutine
+ def _https_fetch_ta(self):
+
+ if not args.fetch:
+ return
+
+ other = self._https_history.get(self.uri)
+ if other is not None and other.pending is not None:
+ yield other.pending.wait()
+ return
+
+ self.pending = tornado.locks.Condition()
+ self._rsync_history[self.uri] = self
+
+ try:
+ retrieval, response = yield self._https_fetch_url(self.uri)
+ X509.store_if_new(response.body, self.uri, retrieval)
+ except:
+ logger.exception("Couldn't load %s", self.uri)
+
+ finally:
+ pending = self.pending
+ self.pending = None
+ pending.notify_all()
+
+ @tornado.gen.coroutine
+ def _rrdp_fetch_notification(self, url):
+
+ retrieval, response = yield self._https_fetch_url(url)
+
+ notification = ElementTree(file = response.buffer).getroot()
+
+ rpki.relaxng.rrdp.schema.assertValid(notification)
+
+ if notification.tag != tag_notification:
+ raise RRDP_ParseFailure("Expected RRDP notification for {}, got {}".format(url, notification.tag))
+
+ raise tornado.gen.Return((retrieval, notification))
+
+ @tornado.gen.coroutine
+ def _rrdp_fetch_data_file(self, url, expected_hash):
+
+ sha256 = rpki.POW.Digest(rpki.POW.SHA256_DIGEST)
+ xml_file = tempfile.SpooledTemporaryFile()
+
+ retrieval, response = yield self._https_fetch_url(url, lambda data: (sha256.update(data), xml_file.write(data)))
+
+ received_hash = sha256.digest().encode("hex")
+ xml_file.seek(0)
+
+ if received_hash != expected_hash.lower():
+ raise RRDP_ParseFailure("Expected RRDP hash {} for {}, got {}".format(expected_hash.lower(), url, received_hash))
+
+ raise tornado.gen.Return((retrieval, response, xml_file))
+
+ @tornado.gen.coroutine
+ def _rrdp_bulk_create(self, new_objs, existing_objs):
+ from django.db import IntegrityError
+
+ #logger.debug("Bulk creation of new RPKIObjects")
+
+ try:
+ RPKIObject.objects.bulk_create(new_objs)
+
+ except IntegrityError:
+ #logger.debug("Some objects already existed, weeding and retrying")
+ i = 0
+ while i < len(new_objs):
+ yield tornado.gen.moment
+ try:
+ existing_objs.append(RPKIObject.objects.values_list("pk", flat = True).get(der = new_objs[i].der))
+ logger.debug("Object existed in SQL but, apparently, not in prior copy of snapshot: uri %s sha256 %s",
+ new_objs[i].uri, new_objs[i].sha256)
+ except RPKIObject.DoesNotExist:
+ i += 1
+ else:
+ del new_objs[i]
+ RPKIObject.objects.bulk_create(new_objs)
+
+ del new_objs[:]
+
+ @tornado.gen.coroutine
+ def _rrdp_fetch(self):
+ from django.db import transaction
+
+ if not args.fetch:
+ return
+
+ other = self._https_history.get(self.uri)
+ if other is not None and other.pending is not None:
+ yield other.pending.wait()
+ return
+
+ self.pending = tornado.locks.Condition()
+ self._https_history[self.uri] = self
+
+ try:
+ retrieval, notification = yield self._rrdp_fetch_notification(url = self.uri)
+
+ session_id = notification.get("session_id")
+ serial = long(notification.get("serial"))
+
+ snapshot = RRDPSnapshot.objects.filter(
+ session_id = session_id).order_by("-retrieved__started").first()
+
+ logger.debug("RRDP notification for %s session_id %s serial %s current snapshot %r",
+ self.uri, session_id, serial, snapshot)
+
+ if snapshot is not None and snapshot.serial == serial:
+ logger.debug("RRDP data for %s is up-to-date, nothing to do", self.uri)
+ return
+
+ deltas = dict((long(delta.get("serial")), (delta.get("uri"), delta.get("hash")))
+ for delta in notification.iterchildren(tag_delta))
+
+ if snapshot is None or snapshot.serial + 1 not in deltas:
+
+ existing_rpkiobject_map = dict()
+
+ if snapshot is not None:
+ logger.debug("RRDP %s no deltas available for serial %s", self.uri, snapshot.serial)
+ existing_rpkiobject_map.update(snapshot.rpkiobject_set.values_list("sha256", "pk"))
+
+ x = notification.find(tag_snapshot)
+
+ url, hash = x.get("uri"), x.get("hash")
+
+ logger.debug("RRDP %s loading from snapshot %s serial %s", self.uri, url, serial)
+
+ retrieval, response, xml_file = yield self._rrdp_fetch_data_file(url, hash)
+
+ snapshot = RRDPSnapshot.objects.create(session_id = session_id, serial = serial)
+
+ # Value of "chunk" here may need to be configurable. Larger numbers batch more objects in
+ # a single bulk addition, which is faster ... unless one or more of them isn't really new, in
+ # which case we have to check everything in that batch when we get the IntegrityError, so
+ # the smaller the batch, the faster that check. No single good answer.
+
+ root = None
+ existing_rpkiobjects = []
+ new_rpkiobjects = []
+ chunk = 2000
+
+ for event, node in iterparse(xml_file):
+ if node is root:
+ continue
+
+ if root is None:
+ root = node.getparent()
+ if root is None or root.tag != tag_snapshot \
+ or root.get("version") != "1" \
+ or any(a not in ("version", "session_id", "serial") for a in root.attrib):
+ raise RRDP_ParseFailure("{} doesn't look like an RRDP snapshot file".format(url))
+ if root.get("session_id") != session_id:
+ raise RRDP_ParseFailure("Expected RRDP session_id {} for {}, got {}".format(
+ session_id, url, root.get("session_id")))
+ if long(root.get("serial")) != long(serial):
+ raise RRDP_ParseFailure("Expected RRDP serial {} for {}, got {}".format(
+ serial, url, root.get("serial")))
+
+ if node.tag != tag_publish or node.getparent() is not root \
+ or any(a != "uri" for a in node.attrib):
+ raise RRDP_ParseFailure("{} doesn't look like an RRDP snapshot file".format(url))
+
+ uri = node.get("uri")
+ cls = uri_to_class(uri)
+ if cls is None:
+ raise RRDP_ParseFailure("Unexpected URI {}".format(uri))
+
+ der = node.text.decode("base64")
+ sha256 = sha256hex(der)
+ try:
+ existing_rpkiobjects.append(existing_rpkiobject_map[sha256])
+ except KeyError:
+ ski, aki = cls.derRead(der).get_hex_SKI_AKI()
+ new_rpkiobjects.append(RPKIObject(der = der, uri = uri, ski = ski, aki = aki,
+ retrieved = retrieval, sha256 = sha256))
+
+ node.clear()
+ while node.getprevious() is not None:
+ del root[0]
+
+ if len(new_rpkiobjects) > chunk:
+ yield self._rrdp_bulk_create(new_rpkiobjects, existing_rpkiobjects)
+
+ yield tornado.gen.moment
+
+ if len(new_rpkiobjects) > 0:
+ yield self._rrdp_bulk_create(new_rpkiobjects, existing_rpkiobjects)
+
+ RPKIObject.snapshot.through.objects.bulk_create([
+ RPKIObject.snapshot.through(rrdpsnapshot_id = snapshot.id, rpkiobject_id = i)
+ for i in retrieval.rpkiobject_set.values_list("pk", flat = True)])
+
+ RPKIObject.snapshot.through.objects.bulk_create([
+ RPKIObject.snapshot.through(rrdpsnapshot_id = snapshot.id, rpkiobject_id = i)
+ for i in existing_rpkiobjects])
+
+ snapshot.retrieved = retrieval
+ snapshot.save()
+
+ xml_file.close()
+
+ else:
+ logger.debug("RRDP %s %s deltas (%s--%s)", self.uri,
+ (serial - snapshot.serial), snapshot.serial, serial)
+
+ deltas = [(serial, deltas[serial][0], deltas[serial][1])
+ for serial in xrange(snapshot.serial + 1, serial + 1)]
+ futures = []
+
+ while deltas or futures:
+
+ while deltas and len(futures) < args.fetch_ahead_goal:
+ serial, url, hash = deltas.pop(0)
+ logger.debug("RRDP %s serial %s fetching %s", self.uri, serial, url)
+ futures.append(self._rrdp_fetch_data_file(url, hash))
+
+ retrieval, response, xml_file = yield futures.pop(0)
+
+ root = None
+
+ with transaction.atomic():
+ snapshot.serial += 1
+ snapshot.save()
+ logger.debug("RRDP %s serial %s loading", self.uri, snapshot.serial)
+
+ for event, node in iterparse(xml_file):
+ if node is root:
+ continue
+
+ if root is None:
+ root = node.getparent()
+ if root is None or root.tag != tag_delta \
+ or root.get("version") != "1" \
+ or any(a not in ("version", "session_id", "serial") for a in root.attrib):
+ raise RRDP_ParseFailure("{} doesn't look like an RRDP delta file".format(url))
+ if root.get("session_id") != session_id:
+ raise RRDP_ParseFailure("Expected RRDP session_id {} for {}, got {}".format(
+ session_id, url, root.get("session_id")))
+ if long(root.get("serial")) != snapshot.serial:
+ raise RRDP_ParseFailure("Expected RRDP serial {} for {}, got {}".format(
+ snapshot.serial, url, root.get("serial")))
+
+ hash = node.get("hash")
+
+ if node.getparent() is not root or node.tag not in (tag_publish, tag_withdraw) \
+ or (node.tag == tag_withdraw and hash is None) \
+ or any(a not in ("uri", "hash") for a in node.attrib):
+ raise RRDP_ParseFailure("{} doesn't look like an RRDP delta file".format(url))
+
+ if node.tag == tag_withdraw or node.get("hash") is not None:
+ snapshot.rpkiobject_set.remove(snapshot.rpkiobject_set.get(sha256 = node.get("hash").lower()))
+
+ if node.tag == tag_publish:
+ uri = node.get("uri")
+ cls = uri_to_class(uri)
+ if cls is None:
+ raise RRDP_ParseFailure("Unexpected URI %s" % uri)
+ obj, created = cls.store_if_new(node.text.decode("base64"), uri, retrieval)
+ obj.snapshot.add(snapshot)
+
+ node.clear()
+ while node.getprevious() is not None:
+ del root[0]
+
+ #yield tornado.gen.moment
+
+ xml_file.close()
+
+ logger.debug("RRDP %s done processing deltas", self.uri)
+
+ except (tornado.httpclient.HTTPError, socket.error, IOError, ssl.SSLError):
+ pass # Already logged
+
+ except RRDP_ParseFailure as e:
+ logger.info("RRDP parse failure: %s", e)
+
+ except:
+ logger.exception("Couldn't load %s", self.uri)
+
+ finally:
+ pending = self.pending
+ self.pending = None
+ pending.notify_all()
+
+
+class CheckTALTask(object):
+
+ def __init__(self, uris, key):
+ rsync_uri = first_rsync_uri(uris)
+ https_uri = first_https_uri(uris)
+
+ if args.prefer_rsync:
+ self.uri = rsync_uri or https_uri
+ else:
+ self.uri = https_uri or rsync_uri
+
+ self.key = key
+
+ def __repr__(self):
+ return "<CheckTALTask: \"{}\">".format(self.uri)
+
+ @tornado.gen.coroutine
+ def __call__(self):
+ yield Fetcher(self.uri, ta = True).fetch()
+ for cer in fetch_objects(uri = self.uri):
+ if self.check(cer):
+ yield task_queue.put(WalkTask(cer = cer))
+ break
+ else:
+ Status.add(self.uri, codes.TRUST_ANCHOR_SKIPPED)
+
+ def check(self, cer):
+ if self.key.derWritePublic() != cer.getPublicKey().derWritePublic():
+ Status.add(self.uri, codes.TRUST_ANCHOR_KEY_MISMATCH)
+ ok = False
+ else:
+ ok = cer.check(trusted = None, crl = None)
+ if ok:
+ install_object(cer)
+ Status.add(self.uri, codes.OBJECT_ACCEPTED)
+ else:
+ Status.add(self.uri, codes.OBJECT_REJECTED)
+ return ok
+
+
+@tornado.gen.coroutine
+def worker(meself):
+ #
+ # NB: This particular style of control loop REQUIRES an except
+ # clause, even if that except clause is just a pass statement.
+ #
+ while True:
+ task = yield task_queue.get()
+ name = repr(task)
+ try:
+ logger.debug("Worker %s starting %s, queue length %s", meself, name, task_queue.qsize())
+ yield task()
+ except:
+ logger.exception("Worker %s caught unhandled exception from %s", meself, name)
+ finally:
+ task_queue.task_done()
+ logger.debug("Worker %s finished %s, queue length %s", meself, name, task_queue.qsize())
+
+
+def final_report():
+ # Clean up a bit to avoid confusing the user unnecessarily.
+ for s in Status.db.itervalues():
+ if codes.OBJECT_ACCEPTED in s.status:
+ s.status.discard(codes.OBJECT_REJECTED)
+ doc = Element("rcynic-summary", date = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()))
+ doc.set("reporting-hostname", socket.getfqdn())
+ doc.set("rcynic-version", "rcynicng")
+ doc.set("summary-version", "1")
+ labels = SubElement(doc, "labels")
+ for code in codes.all():
+ SubElement(labels, code.name, kind = code.kind).text = code.text
+ for uri in Status.db:
+ for sym in sorted(Status.db[uri].status):
+ SubElement(doc, "validation_status",
+ timestamp = str(Status.db[uri].timestamp),
+ status = str(sym),
+ generation = "None" # Historical relic, remove eventually
+ ).text = uri
+ #
+ # Should generate <rsync_history/> elements here too, later
+ #
+ ElementTree(doc).write(file = argparse.FileType("w")(args.xml_file),
+ pretty_print = True)
+
+
+def final_cleanup():
+ from django.db import transaction, models
+
+ def report(when):
+ logger.debug("Database %s cleanup: %s Authenticated %s RRDPSnapshot %s RPKIObject %s Retrieval", when,
+ Authenticated.objects.all().count(), RRDPSnapshot.objects.all().count(),
+ RPKIObject.objects.all().count(), Retrieval.objects.all().count())
+
+ report("before")
+
+ with transaction.atomic():
+
+ #logger.debug("Flushing incomplete RRDP snapshots")
+
+ q = RRDPSnapshot.objects
+ q = q.filter(retrieved__isnull = True)
+ q.delete()
+
+ #logger.debug("Flushing old authenticated sets")
+
+ q = Authenticated.objects
+ q = q.exclude(id = authenticated.id)
+ q.delete()
+
+ #logger.debug("Flushing RRDP snapshots which don't contain anything in the (remaining) authenticated set")
+
+ q = RPKIObject.objects
+ q = q.filter(authenticated = authenticated.id)
+ q = q.exclude(snapshot = None)
+ q = q.order_by("snapshot__id")
+ q = q.values_list("snapshot__id", flat = True)
+ q = q.distinct()
+ q = RRDPSnapshot.objects.exclude(id__in = q)
+ q.delete()
+
+ #logger.debug("Flushing RPKI objects which are in neither current authenticated set nor current RRDP snapshot")
+
+ q = RPKIObject.objects
+ q = q.filter(authenticated = None) # was: q = q.exclude(authenticated = authenticated.id)
+ q = q.filter(snapshot = None)
+ q.delete()
+
+ #logger.debug("Flushing retrieval objects which are no longer related to any RPKI objects or RRDP snapshot")
+
+ q = RPKIObject.objects
+ q = q.order_by("retrieved__id")
+ q = q.values_list("retrieved__id", flat = True)
+ q = q.distinct()
+ q = Retrieval.objects.exclude(id__in = q)
+ q = q.filter(rrdpsnapshot = None)
+ q.delete()
+
+ report("after")
+
+
+@tornado.gen.coroutine
+def launcher():
+ for i in xrange(args.workers):
+ tornado.ioloop.IOLoop.current().spawn_callback(worker, i)
+
+ yield [task_queue.put(CheckTALTask(uris, key)) for uris, key in read_tals()]
+ yield task_queue.join()
+
+
+class posint(int):
+ def __init__(self, value):
+ if self <= 0:
+ raise ValueError
+
+
+def main():
+ global rpki
+
+ os.environ.update(TZ = "UTC",
+ DJANGO_SETTINGS_MODULE = "rpki.django_settings.rcynic")
+ time.tzset()
+
+ cfg = rpki.config.argparser(section = "rcynic", doc = __doc__, cfg_optional = True)
+
+ cfg.add_logging_arguments()
+
+ cfg.add_argument("-u", "--unauthenticated",
+ help = "where to store unauthenticated data retrieved via rsycnc",
+ default = os.path.join(rpki.autoconf.RCYNIC_DIR, "data", "unauthenticated"))
+
+ cfg.add_argument("-x", "--xml-file",
+ help = "where to write XML log of validation results",
+ default = os.path.join(rpki.autoconf.RCYNIC_DIR, "data", "rcynic.xml"))
+
+ cfg.add_argument("-t", "--trust-anchor-locators", "--tals",
+ help = "where to find trust anchor locators",
+ default = os.path.join(rpki.autoconf.sysconfdir, "rpki", "trust-anchors"))
+
+ cfg.add_argument("-w", "--workers", type = posint,
+ help = "number of worker pseudo-threads to allow",
+ default = 10)
+
+ cfg.add_argument("--fetch-ahead-goal", type = posint,
+ help = "how many deltas we want in the fetch-ahead pipe",
+ default = 2)
+
+ cfg.add_argument("--https-timeout", type = posint,
+ help = "HTTPS connection timeout, in seconds",
+ default = 300)
+
+ cfg.add_argument("--max-https-body-size", type = posint,
+ help = "upper limit on byte length of HTTPS message body",
+ default = 512 * 1024 * 1024)
+
+ cfg.add_boolean_argument("--fetch", default = True,
+ help = "whether to fetch data at all")
+
+ cfg.add_boolean_argument("--spawn-on-fetch", default = True,
+ help = "whether to spawn new pseudo-threads on fetch")
+
+ cfg.add_boolean_argument("--migrate", default = True,
+ help = "whether to migrate the ORM database on startup")
+
+ cfg.add_boolean_argument("--prefer-rsync", default = False,
+ help = "whether to prefer rsync over RRDP")
+
+ cfg.add_boolean_argument("--validate-https", default = False,
+ help = "whether to validate HTTPS server certificates")
+
+ global args
+ args = cfg.argparser.parse_args()
+
+ cfg.configure_logging(args = args, ident = "rcynic")
+
+ import django
+ django.setup()
+
+ if args.migrate:
+ # Not sure we should be doing this on every run, but sure simplifies things.
+ import django.core.management
+ django.core.management.call_command("migrate", verbosity = 0, interactive = False)
+
+ import rpki.rcynicdb
+ global Retrieval
+ global Authenticated
+ global RRDPSnapshot
+ global RPKIObject
+ Retrieval = rpki.rcynicdb.models.Retrieval
+ Authenticated = rpki.rcynicdb.models.Authenticated
+ RRDPSnapshot = rpki.rcynicdb.models.RRDPSnapshot
+ RPKIObject = rpki.rcynicdb.models.RPKIObject
+
+
+ global authenticated
+ authenticated = Authenticated.objects.create(started = rpki.sundial.datetime.now())
+
+ global task_queue
+ task_queue = tornado.queues.Queue()
+ tornado.ioloop.IOLoop.current().run_sync(launcher)
+
+ authenticated.finished = rpki.sundial.datetime.now()
+ authenticated.save()
+
+ final_report()
+
+ final_cleanup()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/rp/rcynic/rpki-torrent.py b/rp/rcynic/rpki-torrent.py
index 2c6aa64d..f9a3d620 100644
--- a/rp/rcynic/rpki-torrent.py
+++ b/rp/rcynic/rpki-torrent.py
@@ -46,688 +46,688 @@ import transmissionrpc
tr_env_vars = ("TR_TORRENT_DIR", "TR_TORRENT_ID", "TR_TORRENT_NAME")
class WrongServer(Exception):
- "Hostname not in X.509v3 subjectAltName extension."
+ "Hostname not in X.509v3 subjectAltName extension."
class UnexpectedRedirect(Exception):
- "Unexpected HTTP redirect."
+ "Unexpected HTTP redirect."
class WrongMode(Exception):
- "Wrong operation for mode."
+ "Wrong operation for mode."
class BadFormat(Exception):
- "Zip file does not match our expectations."
+ "Zip file does not match our expectations."
class InconsistentEnvironment(Exception):
- "Environment variables received from Transmission aren't consistent."
+ "Environment variables received from Transmission aren't consistent."
class TorrentNotReady(Exception):
- "Torrent is not ready for checking."
+ "Torrent is not ready for checking."
class TorrentDoesNotMatchManifest(Exception):
- "Retrieved torrent does not match manifest."
+ "Retrieved torrent does not match manifest."
class TorrentNameDoesNotMatchURL(Exception):
- "Torrent name doesn't uniquely match a URL."
+ "Torrent name doesn't uniquely match a URL."
class CouldNotFindTorrents(Exception):
- "Could not find torrent(s) with given name(s)."
+ "Could not find torrent(s) with given name(s)."
class UseTheSourceLuke(Exception):
- "Use The Source, Luke."
+ "Use The Source, Luke."
cfg = None
def main():
- try:
- syslog_flags = syslog.LOG_PID
- if os.isatty(sys.stderr.fileno()):
- syslog_flags |= syslog.LOG_PERROR
- syslog.openlog("rpki-torrent", syslog_flags)
-
- # If I seriously expected this script to get a lot of further use,
- # I might rewrite this using subparsers, but it'd be a bit tricky
- # as argparse doesn't support making the subparser argument
- # optional and transmission gives no sane way to provide arguments
- # when running a completion script. So, for the moment, let's
- # just fix the bugs accidently introduced while converting the
- # universe to argparse without making any radical changes to the
- # program structure here, even if the result looks kind of klunky.
-
- parser = argparse.ArgumentParser(description = __doc__)
- parser.add_argument("-c", "--config",
- help = "configuration file")
- parser.add_argument("action", choices = ("poll", "generate", "mirror"), nargs = "?",
- help = "action to take")
- args = parser.parse_args()
-
- global cfg
- cfg = MyConfigParser()
- cfg.read(args.config or
- [os.path.join(dn, fn)
- for fn in ("rcynic.conf", "rpki.conf")
- for dn in ("/var/rcynic/etc", "/usr/local/etc", "/etc")])
-
- if cfg.act_as_generator:
- if args.action == "generate":
- generator_main()
- elif args.action == "mirror":
- mirror_main()
- else:
- raise UseTheSourceLuke
- else:
- if args.action is None and all(v in os.environ for v in tr_env_vars):
- torrent_completion_main()
- elif args.action == "poll":
- poll_main()
- else:
- raise UseTheSourceLuke
-
- except:
- for line in traceback.format_exc().splitlines():
- syslog.syslog(line)
- sys.exit(1)
+ try:
+ syslog_flags = syslog.LOG_PID
+ if os.isatty(sys.stderr.fileno()):
+ syslog_flags |= syslog.LOG_PERROR
+ syslog.openlog("rpki-torrent", syslog_flags)
+
+ # If I seriously expected this script to get a lot of further use,
+ # I might rewrite this using subparsers, but it'd be a bit tricky
+ # as argparse doesn't support making the subparser argument
+ # optional and transmission gives no sane way to provide arguments
+ # when running a completion script. So, for the moment, let's
+ # just fix the bugs accidently introduced while converting the
+ # universe to argparse without making any radical changes to the
+ # program structure here, even if the result looks kind of klunky.
+
+ parser = argparse.ArgumentParser(description = __doc__)
+ parser.add_argument("-c", "--config",
+ help = "configuration file")
+ parser.add_argument("action", choices = ("poll", "generate", "mirror"), nargs = "?",
+ help = "action to take")
+ args = parser.parse_args()
+
+ global cfg
+ cfg = MyConfigParser()
+ cfg.read(args.config or
+ [os.path.join(dn, fn)
+ for fn in ("rcynic.conf", "rpki.conf")
+ for dn in ("/var/rcynic/etc", "/usr/local/etc", "/etc")])
+
+ if cfg.act_as_generator:
+ if args.action == "generate":
+ generator_main()
+ elif args.action == "mirror":
+ mirror_main()
+ else:
+ raise UseTheSourceLuke
+ else:
+ if args.action is None and all(v in os.environ for v in tr_env_vars):
+ torrent_completion_main()
+ elif args.action == "poll":
+ poll_main()
+ else:
+ raise UseTheSourceLuke
+
+ except:
+ for line in traceback.format_exc().splitlines():
+ syslog.syslog(line)
+ sys.exit(1)
def generator_main():
- import paramiko
-
- class SFTPClient(paramiko.SFTPClient):
- def atomic_rename(self, oldpath, newpath):
- oldpath = self._adjust_cwd(oldpath)
- newpath = self._adjust_cwd(newpath)
- self._log(paramiko.common.DEBUG, 'atomic_rename(%r, %r)' % (oldpath, newpath))
- self._request(paramiko.sftp.CMD_EXTENDED, "posix-rename@openssh.com", oldpath, newpath)
-
- z = ZipFile(url = cfg.generate_url, dn = cfg.zip_dir)
- client = TransmissionClient()
-
- client.remove_torrents(z.torrent_name)
-
- download_dir = client.get_session().download_dir
- torrent_dir = os.path.join(download_dir, z.torrent_name)
- torrent_file = os.path.join(cfg.zip_dir, z.torrent_name + ".torrent")
-
-
- syslog.syslog("Synchronizing local data from %s to %s" % (cfg.unauthenticated, torrent_dir))
- subprocess.check_call((cfg.rsync_prog, "--archive", "--delete",
- os.path.normpath(cfg.unauthenticated) + "/",
- os.path.normpath(torrent_dir) + "/"))
-
- syslog.syslog("Creating %s" % torrent_file)
- try:
- os.unlink(torrent_file)
- except OSError, e:
- if e.errno != errno.ENOENT:
- raise
- ignore_output_for_now = subprocess.check_output( # pylint: disable=W0612
- (cfg.mktorrent_prog,
- "-a", cfg.tracker_url,
- "-c", "RPKI unauthenticated data snapshot generated by rpki-torrent",
- "-o", torrent_file,
- torrent_dir))
-
- syslog.syslog("Generating manifest")
- manifest = create_manifest(download_dir, z.torrent_name)
-
- syslog.syslog("Loading %s with unlimited seeding" % torrent_file)
- f = open(torrent_file, "rb")
- client.add(base64.b64encode(f.read()))
- f.close()
- client.unlimited_seeding(z.torrent_name)
-
- syslog.syslog("Creating upload connection")
- ssh = paramiko.Transport((cfg.sftp_host, cfg.sftp_port))
- try:
- hostkeys = paramiko.util.load_host_keys(cfg.sftp_hostkey_file)[cfg.sftp_host]["ssh-rsa"]
- except ConfigParser.Error:
- hostkeys = None
- ssh.connect(
- username = cfg.sftp_user,
- hostkey = hostkeys,
- pkey = paramiko.RSAKey.from_private_key_file(cfg.sftp_private_key_file))
- sftp = SFTPClient.from_transport(ssh)
-
- zip_filename = os.path.join("data", os.path.basename(z.filename))
- zip_tempname = zip_filename + ".new"
-
- syslog.syslog("Creating %s" % zip_tempname)
- f = sftp.open(zip_tempname, "wb")
- z.set_output_stream(f)
-
- syslog.syslog("Writing %s to zip" % torrent_file)
- z.write(
- torrent_file,
- arcname = os.path.basename(torrent_file),
- compress_type = zipfile.ZIP_DEFLATED)
-
- manifest_name = z.torrent_name + ".manifest"
-
- syslog.syslog("Writing %s to zip" % manifest_name)
- zi = zipfile.ZipInfo(manifest_name, time.gmtime()[:6])
- zi.external_attr = (stat.S_IFREG | 0644) << 16
- zi.internal_attr = 1 # Text, not binary
- z.writestr(zi,
- "".join("%s %s\n" % (v, k) for k, v in manifest.iteritems()),
- zipfile.ZIP_DEFLATED)
-
- syslog.syslog("Closing %s and renaming to %s" % (zip_tempname, zip_filename))
- z.close()
- f.close()
- sftp.atomic_rename(zip_tempname, zip_filename)
-
- syslog.syslog("Closing upload connection")
- ssh.close()
-
-def mirror_main():
- client = TransmissionClient()
- torrent_names = []
-
- for zip_url in cfg.zip_urls:
- if zip_url != cfg.generate_url:
- z = ZipFile(url = zip_url, dn = cfg.zip_dir, ta = cfg.zip_ta)
- if z.fetch():
- client.remove_torrents(z.torrent_name)
- syslog.syslog("Mirroring torrent %s" % z.torrent_name)
- client.add(z.get_torrent())
- torrent_names.append(z.torrent_name)
-
- if torrent_names:
- client.unlimited_seeding(*torrent_names)
+ import paramiko
+ class SFTPClient(paramiko.SFTPClient):
+ def atomic_rename(self, oldpath, newpath):
+ oldpath = self._adjust_cwd(oldpath)
+ newpath = self._adjust_cwd(newpath)
+ self._log(paramiko.common.DEBUG, 'atomic_rename(%r, %r)' % (oldpath, newpath))
+ self._request(paramiko.sftp.CMD_EXTENDED, "posix-rename@openssh.com", oldpath, newpath)
-def poll_main():
- for zip_url in cfg.zip_urls:
-
- z = ZipFile(url = zip_url, dn = cfg.zip_dir, ta = cfg.zip_ta)
+ z = ZipFile(url = cfg.generate_url, dn = cfg.zip_dir)
client = TransmissionClient()
- if z.fetch():
- client.remove_torrents(z.torrent_name)
- syslog.syslog("Adding torrent %s" % z.torrent_name)
- client.add(z.get_torrent())
-
- elif cfg.run_rcynic_anyway:
- run_rcynic(client, z)
-
-
-def torrent_completion_main():
- torrent_name = os.getenv("TR_TORRENT_NAME")
- torrent_id = int(os.getenv("TR_TORRENT_ID"))
-
- z = ZipFile(url = cfg.find_url(torrent_name), dn = cfg.zip_dir, ta = cfg.zip_ta)
- client = TransmissionClient()
- torrent = client.info([torrent_id]).popitem()[1]
+ client.remove_torrents(z.torrent_name)
- if torrent.name != torrent_name:
- raise InconsistentEnvironment("Torrent name %s does not match ID %d" % (torrent_name, torrent_id))
+ download_dir = client.get_session().download_dir
+ torrent_dir = os.path.join(download_dir, z.torrent_name)
+ torrent_file = os.path.join(cfg.zip_dir, z.torrent_name + ".torrent")
- if z.torrent_name != torrent_name:
- raise InconsistentEnvironment("Torrent name %s does not match torrent name in zip file %s" % (torrent_name, z.torrent_name))
- if torrent is None or torrent.progress != 100:
- raise TorrentNotReady("Torrent %s not ready for checking, how did I get here?" % torrent_name)
+ syslog.syslog("Synchronizing local data from %s to %s" % (cfg.unauthenticated, torrent_dir))
+ subprocess.check_call((cfg.rsync_prog, "--archive", "--delete",
+ os.path.normpath(cfg.unauthenticated) + "/",
+ os.path.normpath(torrent_dir) + "/"))
- log_email("Download complete %s" % z.url)
-
- run_rcynic(client, z)
-
-
-def run_rcynic(client, z):
- """
- Run rcynic and any post-processing we might want.
- """
-
- if cfg.lockfile is not None:
- syslog.syslog("Acquiring lock %s" % cfg.lockfile)
- lock = os.open(cfg.lockfile, os.O_WRONLY | os.O_CREAT, 0600)
- fcntl.flock(lock, fcntl.LOCK_EX)
- else:
- lock = None
-
- syslog.syslog("Checking manifest against disk")
-
- download_dir = client.get_session().download_dir
-
- manifest_from_disk = create_manifest(download_dir, z.torrent_name)
- manifest_from_zip = z.get_manifest()
+ syslog.syslog("Creating %s" % torrent_file)
+ try:
+ os.unlink(torrent_file)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ ignore_output_for_now = subprocess.check_output( # pylint: disable=W0612
+ (cfg.mktorrent_prog,
+ "-a", cfg.tracker_url,
+ "-c", "RPKI unauthenticated data snapshot generated by rpki-torrent",
+ "-o", torrent_file,
+ torrent_dir))
+
+ syslog.syslog("Generating manifest")
+ manifest = create_manifest(download_dir, z.torrent_name)
+
+ syslog.syslog("Loading %s with unlimited seeding" % torrent_file)
+ f = open(torrent_file, "rb")
+ client.add(base64.b64encode(f.read()))
+ f.close()
+ client.unlimited_seeding(z.torrent_name)
- excess_files = set(manifest_from_disk) - set(manifest_from_zip)
- for fn in excess_files:
- del manifest_from_disk[fn]
+ syslog.syslog("Creating upload connection")
+ ssh = paramiko.Transport((cfg.sftp_host, cfg.sftp_port))
+ try:
+ hostkeys = paramiko.util.load_host_keys(cfg.sftp_hostkey_file)[cfg.sftp_host]["ssh-rsa"]
+ except ConfigParser.Error:
+ hostkeys = None
+ ssh.connect(
+ username = cfg.sftp_user,
+ hostkey = hostkeys,
+ pkey = paramiko.RSAKey.from_private_key_file(cfg.sftp_private_key_file))
+ sftp = SFTPClient.from_transport(ssh)
+
+ zip_filename = os.path.join("data", os.path.basename(z.filename))
+ zip_tempname = zip_filename + ".new"
+
+ syslog.syslog("Creating %s" % zip_tempname)
+ f = sftp.open(zip_tempname, "wb")
+ z.set_output_stream(f)
+
+ syslog.syslog("Writing %s to zip" % torrent_file)
+ z.write(
+ torrent_file,
+ arcname = os.path.basename(torrent_file),
+ compress_type = zipfile.ZIP_DEFLATED)
+
+ manifest_name = z.torrent_name + ".manifest"
+
+ syslog.syslog("Writing %s to zip" % manifest_name)
+ zi = zipfile.ZipInfo(manifest_name, time.gmtime()[:6])
+ zi.external_attr = (stat.S_IFREG | 0644) << 16
+ zi.internal_attr = 1 # Text, not binary
+ z.writestr(zi,
+ "".join("%s %s\n" % (v, k) for k, v in manifest.iteritems()),
+ zipfile.ZIP_DEFLATED)
+
+ syslog.syslog("Closing %s and renaming to %s" % (zip_tempname, zip_filename))
+ z.close()
+ f.close()
+ sftp.atomic_rename(zip_tempname, zip_filename)
- if manifest_from_disk != manifest_from_zip:
- raise TorrentDoesNotMatchManifest("Manifest for torrent %s does not match what we got" %
- z.torrent_name)
+ syslog.syslog("Closing upload connection")
+ ssh.close()
- if excess_files:
- syslog.syslog("Cleaning up excess files")
- for fn in excess_files:
- os.unlink(os.path.join(download_dir, fn))
+def mirror_main():
+ client = TransmissionClient()
+ torrent_names = []
- syslog.syslog("Running rcynic")
- log_email("Starting rcynic %s" % z.url)
- subprocess.check_call((cfg.rcynic_prog,
- "-c", cfg.rcynic_conf,
- "-u", os.path.join(client.get_session().download_dir, z.torrent_name)))
- log_email("Completed rcynic %s" % z.url)
+ for zip_url in cfg.zip_urls:
+ if zip_url != cfg.generate_url:
+ z = ZipFile(url = zip_url, dn = cfg.zip_dir, ta = cfg.zip_ta)
+ if z.fetch():
+ client.remove_torrents(z.torrent_name)
+ syslog.syslog("Mirroring torrent %s" % z.torrent_name)
+ client.add(z.get_torrent())
+ torrent_names.append(z.torrent_name)
- for cmd in cfg.post_rcynic_commands:
- syslog.syslog("Running post-rcynic command: %s" % cmd)
- subprocess.check_call(cmd, shell = True)
+ if torrent_names:
+ client.unlimited_seeding(*torrent_names)
- if lock is not None:
- syslog.syslog("Releasing lock %s" % cfg.lockfile)
- os.close(lock)
-# See http://www.minstrel.org.uk/papers/sftp/ for details on how to
-# set up safe upload-only SFTP directories on the server. In
-# particular http://www.minstrel.org.uk/papers/sftp/builtin/ is likely
-# to be the right path.
+def poll_main():
+ for zip_url in cfg.zip_urls:
+ z = ZipFile(url = zip_url, dn = cfg.zip_dir, ta = cfg.zip_ta)
+ client = TransmissionClient()
-class ZipFile(object):
- """
- Augmented version of standard python zipfile.ZipFile class, with
- some extra methods and specialized capabilities.
-
- All methods of the standard zipfile.ZipFile class are supported, but
- the constructor arguments are different, and opening the zip file
- itself is deferred until a call which requires this, since the file
- may first need to be fetched via HTTPS.
- """
-
- def __init__(self, url, dn, ta = None, verbose = True):
- self.url = url
- self.dir = dn
- self.ta = ta
- self.verbose = verbose
- self.filename = os.path.join(dn, os.path.basename(url))
- self.changed = False
- self.zf = None
- self.peercert = None
- self.torrent_name, zip_ext = os.path.splitext(os.path.basename(url))
- if zip_ext != ".zip":
- raise BadFormat
-
-
- def __getattr__(self, name):
- if self.zf is None:
- self.zf = zipfile.ZipFile(self.filename)
- return getattr(self.zf, name)
-
-
- def build_opener(self):
- """
- Voodoo to create a urllib2.OpenerDirector object with TLS
- certificate checking enabled and a hook to set self.peercert so
- our caller can check the subjectAltName field.
+ if z.fetch():
+ client.remove_torrents(z.torrent_name)
+ syslog.syslog("Adding torrent %s" % z.torrent_name)
+ client.add(z.get_torrent())
- You probably don't want to look at this if you can avoid it.
- """
+ elif cfg.run_rcynic_anyway:
+ run_rcynic(client, z)
- assert self.ta is not None
- # Yes, we're constructing one-off classes. Look away, look away.
+def torrent_completion_main():
+ torrent_name = os.getenv("TR_TORRENT_NAME")
+ torrent_id = int(os.getenv("TR_TORRENT_ID"))
- class HTTPSConnection(httplib.HTTPSConnection):
- zip = self
- def connect(self):
- sock = socket.create_connection((self.host, self.port), self.timeout)
- if getattr(self, "_tunnel_host", None):
- self.sock = sock
- self._tunnel()
- self.sock = ssl.wrap_socket(sock,
- keyfile = self.key_file,
- certfile = self.cert_file,
- cert_reqs = ssl.CERT_REQUIRED,
- ssl_version = ssl.PROTOCOL_TLSv1,
- ca_certs = self.zip.ta)
- self.zip.peercert = self.sock.getpeercert()
+ z = ZipFile(url = cfg.find_url(torrent_name), dn = cfg.zip_dir, ta = cfg.zip_ta)
+ client = TransmissionClient()
+ torrent = client.info([torrent_id]).popitem()[1]
- class HTTPSHandler(urllib2.HTTPSHandler):
- def https_open(self, req):
- return self.do_open(HTTPSConnection, req)
+ if torrent.name != torrent_name:
+ raise InconsistentEnvironment("Torrent name %s does not match ID %d" % (torrent_name, torrent_id))
- return urllib2.build_opener(HTTPSHandler)
+ if z.torrent_name != torrent_name:
+ raise InconsistentEnvironment("Torrent name %s does not match torrent name in zip file %s" % (torrent_name, z.torrent_name))
+ if torrent is None or torrent.progress != 100:
+ raise TorrentNotReady("Torrent %s not ready for checking, how did I get here?" % torrent_name)
- def check_subjectAltNames(self):
- """
- Check self.peercert against URL to make sure we were talking to
- the right HTTPS server.
- """
+ log_email("Download complete %s" % z.url)
- hostname = urlparse.urlparse(self.url).hostname
- subjectAltNames = set(i[1]
- for i in self.peercert.get("subjectAltName", ())
- if i[0] == "DNS")
- if hostname not in subjectAltNames:
- raise WrongServer
+ run_rcynic(client, z)
- def download_file(self, r, bufsize = 4096):
+def run_rcynic(client, z):
"""
- Downloaded file to disk.
+ Run rcynic and any post-processing we might want.
"""
- tempname = self.filename + ".new"
- f = open(tempname, "wb")
- n = int(r.info()["Content-Length"])
- for i in xrange(0, n - bufsize, bufsize): # pylint: disable=W0612
- f.write(r.read(bufsize))
- f.write(r.read())
- f.close()
- mtime = email.utils.mktime_tz(email.utils.parsedate_tz(r.info()["Last-Modified"]))
- os.utime(tempname, (mtime, mtime))
- os.rename(tempname, self.filename)
+ if cfg.lockfile is not None:
+ syslog.syslog("Acquiring lock %s" % cfg.lockfile)
+ lock = os.open(cfg.lockfile, os.O_WRONLY | os.O_CREAT, 0600)
+ fcntl.flock(lock, fcntl.LOCK_EX)
+ else:
+ lock = None
+ syslog.syslog("Checking manifest against disk")
- def set_output_stream(self, stream):
- """
- Set up this zip file for writing to a network stream.
- """
+ download_dir = client.get_session().download_dir
- assert self.zf is None
- self.zf = zipfile.ZipFile(stream, "w")
+ manifest_from_disk = create_manifest(download_dir, z.torrent_name)
+ manifest_from_zip = z.get_manifest()
+ excess_files = set(manifest_from_disk) - set(manifest_from_zip)
+ for fn in excess_files:
+ del manifest_from_disk[fn]
- def fetch(self):
- """
- Fetch zip file from URL given to constructor.
- """
+ if manifest_from_disk != manifest_from_zip:
+ raise TorrentDoesNotMatchManifest("Manifest for torrent %s does not match what we got" %
+ z.torrent_name)
- headers = { "User-Agent" : "rpki-torrent" }
- try:
- headers["If-Modified-Since"] = email.utils.formatdate(
- os.path.getmtime(self.filename), False, True)
- except OSError:
- pass
+ if excess_files:
+ syslog.syslog("Cleaning up excess files")
+ for fn in excess_files:
+ os.unlink(os.path.join(download_dir, fn))
- syslog.syslog("Checking %s..." % self.url)
- try:
- r = self.build_opener().open(urllib2.Request(self.url, None, headers))
- syslog.syslog("%s has changed, starting download" % self.url)
- self.changed = True
- log_email("Downloading %s" % self.url)
- except urllib2.HTTPError, e:
- if e.code == 304:
- syslog.syslog("%s has not changed" % self.url)
- elif e.code == 404:
- syslog.syslog("%s does not exist" % self.url)
- else:
- raise
- r = None
-
- self.check_subjectAltNames()
+ syslog.syslog("Running rcynic")
+ log_email("Starting rcynic %s" % z.url)
+ subprocess.check_call((cfg.rcynic_prog,
+ "-c", cfg.rcynic_conf,
+ "-u", os.path.join(client.get_session().download_dir, z.torrent_name)))
+ log_email("Completed rcynic %s" % z.url)
- if r is not None and r.geturl() != self.url:
- raise UnexpectedRedirect
+ for cmd in cfg.post_rcynic_commands:
+ syslog.syslog("Running post-rcynic command: %s" % cmd)
+ subprocess.check_call(cmd, shell = True)
- if r is not None:
- self.download_file(r)
- r.close()
+ if lock is not None:
+ syslog.syslog("Releasing lock %s" % cfg.lockfile)
+ os.close(lock)
- return self.changed
+# See http://www.minstrel.org.uk/papers/sftp/ for details on how to
+# set up safe upload-only SFTP directories on the server. In
+# particular http://www.minstrel.org.uk/papers/sftp/builtin/ is likely
+# to be the right path.
- def check_format(self):
- """
- Make sure that format of zip file matches our preconceptions: it
- should contain two files, one of which is the .torrent file, the
- other is the manifest, with names derived from the torrent name
- inferred from the URL.
+class ZipFile(object):
"""
+ Augmented version of standard python zipfile.ZipFile class, with
+ some extra methods and specialized capabilities.
- if set(self.namelist()) != set((self.torrent_name + ".torrent", self.torrent_name + ".manifest")):
- raise BadFormat
-
-
- def get_torrent(self):
- """
- Extract torrent file from zip file, encoded in Base64 because
- that's what the transmisionrpc library says it wants.
+ All methods of the standard zipfile.ZipFile class are supported, but
+ the constructor arguments are different, and opening the zip file
+ itself is deferred until a call which requires this, since the file
+ may first need to be fetched via HTTPS.
"""
- self.check_format()
- return base64.b64encode(self.read(self.torrent_name + ".torrent"))
+ def __init__(self, url, dn, ta = None, verbose = True):
+ self.url = url
+ self.dir = dn
+ self.ta = ta
+ self.verbose = verbose
+ self.filename = os.path.join(dn, os.path.basename(url))
+ self.changed = False
+ self.zf = None
+ self.peercert = None
+ self.torrent_name, zip_ext = os.path.splitext(os.path.basename(url))
+ if zip_ext != ".zip":
+ raise BadFormat
+
+
+ def __getattr__(self, name):
+ if self.zf is None:
+ self.zf = zipfile.ZipFile(self.filename)
+ return getattr(self.zf, name)
+
+
+ def build_opener(self):
+ """
+ Voodoo to create a urllib2.OpenerDirector object with TLS
+ certificate checking enabled and a hook to set self.peercert so
+ our caller can check the subjectAltName field.
+
+ You probably don't want to look at this if you can avoid it.
+ """
+
+ assert self.ta is not None
+
+ # Yes, we're constructing one-off classes. Look away, look away.
+
+ class HTTPSConnection(httplib.HTTPSConnection):
+ zip = self
+ def connect(self):
+ sock = socket.create_connection((self.host, self.port), self.timeout)
+ if getattr(self, "_tunnel_host", None):
+ self.sock = sock
+ self._tunnel()
+ self.sock = ssl.wrap_socket(sock,
+ keyfile = self.key_file,
+ certfile = self.cert_file,
+ cert_reqs = ssl.CERT_REQUIRED,
+ ssl_version = ssl.PROTOCOL_TLSv1,
+ ca_certs = self.zip.ta)
+ self.zip.peercert = self.sock.getpeercert()
+
+ class HTTPSHandler(urllib2.HTTPSHandler):
+ def https_open(self, req):
+ return self.do_open(HTTPSConnection, req)
+
+ return urllib2.build_opener(HTTPSHandler)
+
+
+ def check_subjectAltNames(self):
+ """
+ Check self.peercert against URL to make sure we were talking to
+ the right HTTPS server.
+ """
+
+ hostname = urlparse.urlparse(self.url).hostname
+ subjectAltNames = set(i[1]
+ for i in self.peercert.get("subjectAltName", ())
+ if i[0] == "DNS")
+ if hostname not in subjectAltNames:
+ raise WrongServer
+
+
+ def download_file(self, r, bufsize = 4096):
+ """
+ Downloaded file to disk.
+ """
+
+ tempname = self.filename + ".new"
+ f = open(tempname, "wb")
+ n = int(r.info()["Content-Length"])
+ for i in xrange(0, n - bufsize, bufsize): # pylint: disable=W0612
+ f.write(r.read(bufsize))
+ f.write(r.read())
+ f.close()
+ mtime = email.utils.mktime_tz(email.utils.parsedate_tz(r.info()["Last-Modified"]))
+ os.utime(tempname, (mtime, mtime))
+ os.rename(tempname, self.filename)
+
+
+ def set_output_stream(self, stream):
+ """
+ Set up this zip file for writing to a network stream.
+ """
+
+ assert self.zf is None
+ self.zf = zipfile.ZipFile(stream, "w")
+
+
+ def fetch(self):
+ """
+ Fetch zip file from URL given to constructor.
+ """
+
+ headers = { "User-Agent" : "rpki-torrent" }
+ try:
+ headers["If-Modified-Since"] = email.utils.formatdate(
+ os.path.getmtime(self.filename), False, True)
+ except OSError:
+ pass
+
+ syslog.syslog("Checking %s..." % self.url)
+ try:
+ r = self.build_opener().open(urllib2.Request(self.url, None, headers))
+ syslog.syslog("%s has changed, starting download" % self.url)
+ self.changed = True
+ log_email("Downloading %s" % self.url)
+ except urllib2.HTTPError, e:
+ if e.code == 304:
+ syslog.syslog("%s has not changed" % self.url)
+ elif e.code == 404:
+ syslog.syslog("%s does not exist" % self.url)
+ else:
+ raise
+ r = None
+
+ self.check_subjectAltNames()
+
+ if r is not None and r.geturl() != self.url:
+ raise UnexpectedRedirect
+
+ if r is not None:
+ self.download_file(r)
+ r.close()
+
+ return self.changed
+
+
+ def check_format(self):
+ """
+ Make sure that format of zip file matches our preconceptions: it
+ should contain two files, one of which is the .torrent file, the
+ other is the manifest, with names derived from the torrent name
+ inferred from the URL.
+ """
+
+ if set(self.namelist()) != set((self.torrent_name + ".torrent", self.torrent_name + ".manifest")):
+ raise BadFormat
+
+
+ def get_torrent(self):
+ """
+ Extract torrent file from zip file, encoded in Base64 because
+ that's what the transmisionrpc library says it wants.
+ """
+
+ self.check_format()
+ return base64.b64encode(self.read(self.torrent_name + ".torrent"))
+
+
+ def get_manifest(self):
+ """
+ Extract manifest from zip file, as a dictionary.
+
+ For the moment we're fixing up the internal file names from the
+ format that the existing shell-script prototype uses, but this
+ should go away once this program both generates and checks the
+ manifests.
+ """
+
+ self.check_format()
+ result = {}
+ for line in self.open(self.torrent_name + ".manifest"):
+ h, fn = line.split()
+ #
+ # Fixup for earlier manifest format, this should go away
+ if not fn.startswith(self.torrent_name):
+ fn = os.path.normpath(os.path.join(self.torrent_name, fn))
+ #
+ result[fn] = h
+ return result
- def get_manifest(self):
+def create_manifest(topdir, torrent_name):
"""
- Extract manifest from zip file, as a dictionary.
-
- For the moment we're fixing up the internal file names from the
- format that the existing shell-script prototype uses, but this
- should go away once this program both generates and checks the
- manifests.
+ Generate a manifest, expressed as a dictionary.
"""
- self.check_format()
result = {}
- for line in self.open(self.torrent_name + ".manifest"):
- h, fn = line.split()
- #
- # Fixup for earlier manifest format, this should go away
- if not fn.startswith(self.torrent_name):
- fn = os.path.normpath(os.path.join(self.torrent_name, fn))
- #
- result[fn] = h
+ topdir = os.path.abspath(topdir)
+ for dirpath, dirnames, filenames in os.walk(os.path.join(topdir, torrent_name)): # pylint: disable=W0612
+ for filename in filenames:
+ filename = os.path.join(dirpath, filename)
+ f = open(filename, "rb")
+ result[os.path.relpath(filename, topdir)] = hashlib.sha256(f.read()).hexdigest()
+ f.close()
return result
-def create_manifest(topdir, torrent_name):
- """
- Generate a manifest, expressed as a dictionary.
- """
-
- result = {}
- topdir = os.path.abspath(topdir)
- for dirpath, dirnames, filenames in os.walk(os.path.join(topdir, torrent_name)): # pylint: disable=W0612
- for filename in filenames:
- filename = os.path.join(dirpath, filename)
- f = open(filename, "rb")
- result[os.path.relpath(filename, topdir)] = hashlib.sha256(f.read()).hexdigest()
- f.close()
- return result
-
-
def log_email(msg, subj = None):
- try:
- if not msg.endswith("\n"):
- msg += "\n"
- if subj is None:
- subj = msg.partition("\n")[0]
- m = email.mime.text.MIMEText(msg)
- m["Date"] = time.strftime("%d %b %Y %H:%M:%S +0000", time.gmtime())
- m["From"] = cfg.log_email
- m["To"] = cfg.log_email
- m["Subject"] = subj
- s = smtplib.SMTP("localhost")
- s.sendmail(cfg.log_email, [cfg.log_email], m.as_string())
- s.quit()
- except ConfigParser.Error:
- pass
+ try:
+ if not msg.endswith("\n"):
+ msg += "\n"
+ if subj is None:
+ subj = msg.partition("\n")[0]
+ m = email.mime.text.MIMEText(msg)
+ m["Date"] = time.strftime("%d %b %Y %H:%M:%S +0000", time.gmtime())
+ m["From"] = cfg.log_email
+ m["To"] = cfg.log_email
+ m["Subject"] = subj
+ s = smtplib.SMTP("localhost")
+ s.sendmail(cfg.log_email, [cfg.log_email], m.as_string())
+ s.quit()
+ except ConfigParser.Error:
+ pass
class TransmissionClient(transmissionrpc.client.Client):
- """
- Extension of transmissionrpc.client.Client.
- """
-
- def __init__(self, **kwargs):
- kwargs.setdefault("address", "127.0.0.1")
- kwargs.setdefault("user", cfg.transmission_username)
- kwargs.setdefault("password", cfg.transmission_password)
- transmissionrpc.client.Client.__init__(self, **kwargs)
-
-
- def find_torrents(self, *names):
- """
- Find torrents with given name(s), return id(s).
- """
-
- result = [i for i, t in self.list().iteritems() if t.name in names]
- if not result:
- raise CouldNotFindTorrents
- return result
-
-
- def remove_torrents(self, *names):
"""
- Remove any torrents with the given name(s).
+ Extension of transmissionrpc.client.Client.
"""
- try:
- ids = self.find_torrents(*names)
- except CouldNotFindTorrents:
- pass
- else:
- syslog.syslog("Removing torrent%s %s (%s)" % (
- "" if len(ids) == 1 else "s",
- ", ".join(names),
- ", ".join("#%s" % i for i in ids)))
- self.remove(ids)
+ def __init__(self, **kwargs):
+ kwargs.setdefault("address", "127.0.0.1")
+ kwargs.setdefault("user", cfg.transmission_username)
+ kwargs.setdefault("password", cfg.transmission_password)
+ transmissionrpc.client.Client.__init__(self, **kwargs)
- def unlimited_seeding(self, *names):
- """
- Set unlimited seeding for specified torrents.
- """
- # Apparently seedRatioMode = 2 means "no limit"
- try:
- self.change(self.find_torrents(*names), seedRatioMode = 2)
- except CouldNotFindTorrents:
- syslog.syslog("Couldn't tweak seedRatioMode, blundering onwards")
+ def find_torrents(self, *names):
+ """
+ Find torrents with given name(s), return id(s).
+ """
+ result = [i for i, t in self.list().iteritems() if t.name in names]
+ if not result:
+ raise CouldNotFindTorrents
+ return result
-class MyConfigParser(ConfigParser.RawConfigParser):
- rpki_torrent_section = "rpki-torrent"
+ def remove_torrents(self, *names):
+ """
+ Remove any torrents with the given name(s).
+ """
- @property
- def zip_dir(self):
- return self.get(self.rpki_torrent_section, "zip_dir")
+ try:
+ ids = self.find_torrents(*names)
+ except CouldNotFindTorrents:
+ pass
+ else:
+ syslog.syslog("Removing torrent%s %s (%s)" % (
+ "" if len(ids) == 1 else "s",
+ ", ".join(names),
+ ", ".join("#%s" % i for i in ids)))
+ self.remove(ids)
- @property
- def zip_ta(self):
- return self.get(self.rpki_torrent_section, "zip_ta")
+ def unlimited_seeding(self, *names):
+ """
+ Set unlimited seeding for specified torrents.
+ """
- @property
- def rcynic_prog(self):
- return self.get(self.rpki_torrent_section, "rcynic_prog")
+ # Apparently seedRatioMode = 2 means "no limit"
+ try:
+ self.change(self.find_torrents(*names), seedRatioMode = 2)
+ except CouldNotFindTorrents:
+ syslog.syslog("Couldn't tweak seedRatioMode, blundering onwards")
- @property
- def rcynic_conf(self):
- return self.get(self.rpki_torrent_section, "rcynic_conf")
- @property
- def run_rcynic_anyway(self):
- return self.getboolean(self.rpki_torrent_section, "run_rcynic_anyway")
-
- @property
- def generate_url(self):
- return self.get(self.rpki_torrent_section, "generate_url")
-
- @property
- def act_as_generator(self):
- try:
- return self.get(self.rpki_torrent_section, "generate_url") != ""
- except ConfigParser.Error:
- return False
-
- @property
- def rsync_prog(self):
- return self.get(self.rpki_torrent_section, "rsync_prog")
-
- @property
- def mktorrent_prog(self):
- return self.get(self.rpki_torrent_section, "mktorrent_prog")
-
- @property
- def tracker_url(self):
- return self.get(self.rpki_torrent_section, "tracker_url")
-
- @property
- def sftp_host(self):
- return self.get(self.rpki_torrent_section, "sftp_host")
-
- @property
- def sftp_port(self):
- try:
- return self.getint(self.rpki_torrent_section, "sftp_port")
- except ConfigParser.Error:
- return 22
-
- @property
- def sftp_user(self):
- return self.get(self.rpki_torrent_section, "sftp_user")
-
- @property
- def sftp_hostkey_file(self):
- return self.get(self.rpki_torrent_section, "sftp_hostkey_file")
-
- @property
- def sftp_private_key_file(self):
- return self.get(self.rpki_torrent_section, "sftp_private_key_file")
-
- @property
- def lockfile(self):
- try:
- return self.get(self.rpki_torrent_section, "lockfile")
- except ConfigParser.Error:
- return None
-
- @property
- def unauthenticated(self):
- try:
- return self.get(self.rpki_torrent_section, "unauthenticated")
- except ConfigParser.Error:
- return self.get("rcynic", "unauthenticated")
-
- @property
- def log_email(self):
- return self.get(self.rpki_torrent_section, "log_email")
-
- @property
- def transmission_username(self):
- try:
- return self.get(self.rpki_torrent_section, "transmission_username")
- except ConfigParser.Error:
- return None
+class MyConfigParser(ConfigParser.RawConfigParser):
- @property
- def transmission_password(self):
- try:
- return self.get(self.rpki_torrent_section, "transmission_password")
- except ConfigParser.Error:
- return None
-
- def multioption_iter(self, name, getter = None):
- if getter is None:
- getter = self.get
- if self.has_option(self.rpki_torrent_section, name):
- yield getter(self.rpki_torrent_section, name)
- name += "."
- names = [i for i in self.options(self.rpki_torrent_section) if i.startswith(name) and i[len(name):].isdigit()]
- names.sort(key = lambda s: int(s[len(name):])) # pylint: disable=W0631
- for name in names:
- yield getter(self.rpki_torrent_section, name)
-
- @property
- def zip_urls(self):
- return self.multioption_iter("zip_url")
-
- @property
- def post_rcynic_commands(self):
- return self.multioption_iter("post_rcynic_command")
-
- def find_url(self, torrent_name):
- urls = [u for u in self.zip_urls
- if os.path.splitext(os.path.basename(u))[0] == torrent_name]
- if len(urls) != 1:
- raise TorrentNameDoesNotMatchURL("Can't find URL matching torrent name %s" % torrent_name)
- return urls[0]
+ rpki_torrent_section = "rpki-torrent"
+
+ @property
+ def zip_dir(self):
+ return self.get(self.rpki_torrent_section, "zip_dir")
+
+ @property
+ def zip_ta(self):
+ return self.get(self.rpki_torrent_section, "zip_ta")
+
+ @property
+ def rcynic_prog(self):
+ return self.get(self.rpki_torrent_section, "rcynic_prog")
+
+ @property
+ def rcynic_conf(self):
+ return self.get(self.rpki_torrent_section, "rcynic_conf")
+
+ @property
+ def run_rcynic_anyway(self):
+ return self.getboolean(self.rpki_torrent_section, "run_rcynic_anyway")
+
+ @property
+ def generate_url(self):
+ return self.get(self.rpki_torrent_section, "generate_url")
+
+ @property
+ def act_as_generator(self):
+ try:
+ return self.get(self.rpki_torrent_section, "generate_url") != ""
+ except ConfigParser.Error:
+ return False
+
+ @property
+ def rsync_prog(self):
+ return self.get(self.rpki_torrent_section, "rsync_prog")
+
+ @property
+ def mktorrent_prog(self):
+ return self.get(self.rpki_torrent_section, "mktorrent_prog")
+
+ @property
+ def tracker_url(self):
+ return self.get(self.rpki_torrent_section, "tracker_url")
+
+ @property
+ def sftp_host(self):
+ return self.get(self.rpki_torrent_section, "sftp_host")
+
+ @property
+ def sftp_port(self):
+ try:
+ return self.getint(self.rpki_torrent_section, "sftp_port")
+ except ConfigParser.Error:
+ return 22
+
+ @property
+ def sftp_user(self):
+ return self.get(self.rpki_torrent_section, "sftp_user")
+
+ @property
+ def sftp_hostkey_file(self):
+ return self.get(self.rpki_torrent_section, "sftp_hostkey_file")
+
+ @property
+ def sftp_private_key_file(self):
+ return self.get(self.rpki_torrent_section, "sftp_private_key_file")
+
+ @property
+ def lockfile(self):
+ try:
+ return self.get(self.rpki_torrent_section, "lockfile")
+ except ConfigParser.Error:
+ return None
+
+ @property
+ def unauthenticated(self):
+ try:
+ return self.get(self.rpki_torrent_section, "unauthenticated")
+ except ConfigParser.Error:
+ return self.get("rcynic", "unauthenticated")
+
+ @property
+ def log_email(self):
+ return self.get(self.rpki_torrent_section, "log_email")
+
+ @property
+ def transmission_username(self):
+ try:
+ return self.get(self.rpki_torrent_section, "transmission_username")
+ except ConfigParser.Error:
+ return None
+
+ @property
+ def transmission_password(self):
+ try:
+ return self.get(self.rpki_torrent_section, "transmission_password")
+ except ConfigParser.Error:
+ return None
+
+ def multioption_iter(self, name, getter = None):
+ if getter is None:
+ getter = self.get
+ if self.has_option(self.rpki_torrent_section, name):
+ yield getter(self.rpki_torrent_section, name)
+ name += "."
+ names = [i for i in self.options(self.rpki_torrent_section) if i.startswith(name) and i[len(name):].isdigit()]
+ names.sort(key = lambda s: int(s[len(name):])) # pylint: disable=W0631
+ for name in names:
+ yield getter(self.rpki_torrent_section, name)
+
+ @property
+ def zip_urls(self):
+ return self.multioption_iter("zip_url")
+
+ @property
+ def post_rcynic_commands(self):
+ return self.multioption_iter("post_rcynic_command")
+
+ def find_url(self, torrent_name):
+ urls = [u for u in self.zip_urls
+ if os.path.splitext(os.path.basename(u))[0] == torrent_name]
+ if len(urls) != 1:
+ raise TorrentNameDoesNotMatchURL("Can't find URL matching torrent name %s" % torrent_name)
+ return urls[0]
if __name__ == "__main__":
- main()
+ main()
diff --git a/rp/rcynic/rules.darwin.mk b/rp/rcynic/rules.darwin.mk
index d37b0e75..f1eed3ce 100644
--- a/rp/rcynic/rules.darwin.mk
+++ b/rp/rcynic/rules.darwin.mk
@@ -1,108 +1,38 @@
# $Id$
install-user-and-group: .FORCE
- @if /usr/bin/dscl . -read "/Groups/${RCYNIC_GROUP}" >/dev/null 2>&1; \
+ @if /usr/bin/dscl . -read "/Groups/${RPKI_GROUP}" >/dev/null 2>&1; \
then \
- echo "You already have a group \"${RCYNIC_GROUP}\", so I will use it."; \
+ echo "You already have a group \"${RPKI_GROUP}\", so I will use it."; \
elif gid="$$(/usr/bin/dscl . -list /Groups PrimaryGroupID | /usr/bin/awk 'BEGIN {gid = 501} $$2 >= gid {gid = 1 + $$2} END {print gid}')" && \
- /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" && \
- /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" RealName "${RCYNIC_GECOS}" && \
- /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" PrimaryGroupID "$$gid" && \
- /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" GeneratedUID "$$(/usr/bin/uuidgen)" && \
- /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" Password "*"; \
+ /usr/bin/dscl . -create "/Groups/${RPKI_GROUP}" && \
+ /usr/bin/dscl . -create "/Groups/${RPKI_GROUP}" RealName "${RPKI_GECOS}" && \
+ /usr/bin/dscl . -create "/Groups/${RPKI_GROUP}" PrimaryGroupID "$$gid" && \
+ /usr/bin/dscl . -create "/Groups/${RPKI_GROUP}" GeneratedUID "$$(/usr/bin/uuidgen)" && \
+ /usr/bin/dscl . -create "/Groups/${RPKI_GROUP}" Password "*"; \
then \
- echo "Added group \"${RCYNIC_GROUP}\"."; \
+ echo "Added group \"${RPKI_GROUP}\"."; \
else \
- echo "Adding group \"${RCYNIC_GROUP}\" failed..."; \
+ echo "Adding group \"${RPKI_GROUP}\" failed..."; \
echo "Please create it, then try again."; \
exit 1; \
fi; \
- if /usr/bin/dscl . -read "/Users/${RCYNIC_USER}" >/dev/null 2>&1; \
+ if /usr/bin/dscl . -read "/Users/${RPKI_USER}" >/dev/null 2>&1; \
then \
- echo "You already have a user \"${RCYNIC_USER}\", so I will use it."; \
+ echo "You already have a user \"${RPKI_USER}\", so I will use it."; \
elif uid="$$(/usr/bin/dscl . -list /Users UniqueID | /usr/bin/awk 'BEGIN {uid = 501} $$2 >= uid {uid = 1 + $$2} END {print uid}')" && \
- /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" && \
- /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" UserShell "/usr/bin/false" && \
- /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" RealName "${RCYNIC_GECOS}" && \
- /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" UniqueID "$$uid" && \
- /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" PrimaryGroupID "$$gid" && \
- /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" NFSHomeDirectory "/var/empty" && \
- /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" GeneratedUID "$$(/usr/bin/uuidgen)" && \
- /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" Password "*"; \
- then \
- echo "Added user \"${RCYNIC_USER}\"."; \
- else \
- echo "Adding user \"${RCYNIC_USER}\" failed..."; \
- echo "Please create it, then try again."; \
- exit 1; \
- fi
- @if /usr/bin/dscl . -read "/Groups/${RPKIRTR_GROUP}" >/dev/null 2>&1; \
- then \
- echo "You already have a group \"${RPKIRTR_GROUP}\", so I will use it."; \
- elif gid="$$(/usr/bin/dscl . -list /Groups PrimaryGroupID | /usr/bin/awk 'BEGIN {gid = 501} $$2 >= gid {gid = 1 + $$2} END {print gid}')" && \
- /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" && \
- /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" RealName "${RPKIRTR_GECOS}" && \
- /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" PrimaryGroupID "$$gid" && \
- /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" GeneratedUID "$$(/usr/bin/uuidgen)" && \
- /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" Password "*"; \
- then \
- echo "Added group \"${RPKIRTR_GROUP}\"."; \
- else \
- echo "Adding group \"${RPKIRTR_GROUP}\" failed..."; \
- echo "Please create it, then try again."; \
- exit 1; \
- fi; \
- if /usr/bin/dscl . -read "/Users/${RPKIRTR_USER}" >/dev/null 2>&1; \
- then \
- echo "You already have a user \"${RPKIRTR_USER}\", so I will use it."; \
- elif uid="$$(/usr/bin/dscl . -list /Users UniqueID | /usr/bin/awk 'BEGIN {uid = 501} $$2 >= uid {uid = 1 + $$2} END {print uid}')" && \
- /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" && \
- /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" UserShell "/usr/bin/false" && \
- /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" RealName "${RPKIRTR_GECOS}" && \
- /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" UniqueID "$$uid" && \
- /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" PrimaryGroupID "$$gid" && \
- /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" NFSHomeDirectory "/var/empty" && \
- /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" GeneratedUID "$$(/usr/bin/uuidgen)" && \
- /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" Password "*"; \
- then \
- echo "Added user \"${RPKIRTR_USER}\"."; \
+ /usr/bin/dscl . -create "/Users/${RPKI_USER}" && \
+ /usr/bin/dscl . -create "/Users/${RPKI_USER}" UserShell "/usr/bin/false" && \
+ /usr/bin/dscl . -create "/Users/${RPKI_USER}" RealName "${RPKI_GECOS}" && \
+ /usr/bin/dscl . -create "/Users/${RPKI_USER}" UniqueID "$$uid" && \
+ /usr/bin/dscl . -create "/Users/${RPKI_USER}" PrimaryGroupID "$$gid" && \
+ /usr/bin/dscl . -create "/Users/${RPKI_USER}" NFSHomeDirectory "/var/empty" && \
+ /usr/bin/dscl . -create "/Users/${RPKI_USER}" GeneratedUID "$$(/usr/bin/uuidgen)" && \
+ /usr/bin/dscl . -create "/Users/${RPKI_USER}" Password "*"; \
+ then \
+ echo "Added user \"${RPKI_USER}\"."; \
else \
- echo "Adding user \"${RPKIRTR_USER}\" failed..."; \
+ echo "Adding user \"${RPKI_USER}\" failed..."; \
echo "Please create it, then try again."; \
exit 1; \
fi
-
-
-install-shared-libraries: .FORCE
- @echo "Copying required shared libraries"
- @shared_libraries="${RCYNIC_DIR}/bin/rcynic ${RCYNIC_DIR}/bin/rsync"; \
- while true; \
- do \
- closure="$$(/usr/bin/otool -L $${shared_libraries} | /usr/bin/awk '/:$$/ {next} {print $$1}' | /usr/bin/sort -u)"; \
- if test "x$$shared_libraries" = "x$$closure";
- then \
- break; \
- else \
- shared_libraries="$$closure"; \
- fi; \
- done; \
- for shared in /usr/lib/dyld $$shared_libraries; \
- do \
- if /bin/test -r "${RCYNIC_DIR}/$${shared}"; \
- then \
- echo "You already have a \"${RCYNIC_DIR}/$${shared}\", so I will use it"; \
- elif /usr/bin/install -m 555 -o root -g wheel -p "$${shared}" "${RCYNIC_DIR}/$${shared}"; \
- then \
- echo "Copied $${shared} into ${RCYNIC_DIR}"; \
- else \
- echo "Unable to copy $${shared} into ${RCYNIC_DIR}"; \
- exit 1; \
- fi; \
- done
-
-install-rc-scripts:
- ${INSTALL} -o root -g wheel -d ${DESTDIR}/Library/StartupItems/RCynic
- ${INSTALL} -o root -g wheel -m 555 \
- rc-scripts/darwin/RCynic \
- rc-scripts/darwin/StartupParameters.plist \
- ${DESTDIR}/Library/Startup/RCynic
diff --git a/rp/rcynic/rules.freebsd.mk b/rp/rcynic/rules.freebsd.mk
index 5233386e..0f022a2e 100644
--- a/rp/rcynic/rules.freebsd.mk
+++ b/rp/rcynic/rules.freebsd.mk
@@ -1,56 +1,25 @@
# $Id$
install-user-and-group: .FORCE
- @if /usr/sbin/pw groupshow "${RCYNIC_GROUP}" 2>/dev/null; \
+ @if /usr/sbin/pw groupshow "${RPKI_GROUP}" 2>/dev/null; \
then \
- echo "You already have a group \"${RCYNIC_GROUP}\", so I will use it."; \
- elif /usr/sbin/pw groupadd ${RCYNIC_GROUP}; \
+ echo "You already have a group \"${RPKI_GROUP}\", so I will use it."; \
+ elif /usr/sbin/pw groupadd ${RPKI_GROUP}; \
then \
- echo "Added group \"${RCYNIC_GROUP}\"."; \
+ echo "Added group \"${RPKI_GROUP}\"."; \
else \
- echo "Adding group \"${RCYNIC_GROUP}\" failed..."; \
+ echo "Adding group \"${RPKI_GROUP}\" failed..."; \
echo "Please create it, then try again."; \
exit 1; \
fi
- @if /usr/sbin/pw groupshow "${RPKIRTR_GROUP}" 2>/dev/null; \
+ @if /usr/sbin/pw usershow "${RPKI_USER}" 2>/dev/null; \
then \
- echo "You already have a group \"${RPKIRTR_GROUP}\", so I will use it."; \
- elif /usr/sbin/pw groupadd ${RPKIRTR_GROUP}; \
+ echo "You already have a user \"${RPKI_USER}\", so I will use it."; \
+ elif /usr/sbin/pw useradd ${RPKI_USER} -g ${RPKI_GROUP} -h - -d /nonexistant -s /usr/sbin/nologin -c "${RPKI_GECOS}"; \
then \
- echo "Added group \"${RPKIRTR_GROUP}\"."; \
+ echo "Added user \"${RPKI_USER}\"."; \
else \
- echo "Adding group \"${RPKIRTR_GROUP}\" failed..."; \
+ echo "Adding user \"${RPKI_USER}\" failed..."; \
echo "Please create it, then try again."; \
exit 1; \
fi
- @if /usr/sbin/pw usershow "${RCYNIC_USER}" 2>/dev/null; \
- then \
- echo "You already have a user \"${RCYNIC_USER}\", so I will use it."; \
- elif /usr/sbin/pw useradd ${RCYNIC_USER} -g ${RCYNIC_GROUP} -h - -d /nonexistant -s /usr/sbin/nologin -c "${RCYNIC_GECOS}" -G "${RPKIRTR_GROUP}"; \
- then \
- echo "Added user \"${RCYNIC_USER}\"."; \
- else \
- echo "Adding user \"${RCYNIC_USER}\" failed..."; \
- echo "Please create it, then try again."; \
- exit 1; \
- fi
- @if /usr/sbin/pw usershow "${RPKIRTR_USER}" 2>/dev/null; \
- then \
- echo "You already have a user \"${RPKIRTR_USER}\", so I will use it."; \
- elif /usr/sbin/pw useradd ${RPKIRTR_USER} -g ${RPKIRTR_GROUP} -h - -d /nonexistant -s /usr/sbin/nologin -c "${RPKIRTR_GECOS}"; \
- then \
- echo "Added user \"${RPKIRTR_USER}\"."; \
- else \
- echo "Adding user \"${RPKIRTR_USER}\" failed..."; \
- echo "Please create it, then try again."; \
- exit 1; \
- fi
-
-
-# We use static compilation on FreeBSD, so no need for shared libraries
-
-install-shared-libraries:
- @true
-
-install-rc-scripts:
- ${INSTALL} -m 555 -o root -g wheel -p rc-scripts/freebsd/rc.d.rcynic ${DESTDIR}/usr/local/etc/rc.d/rcynic
diff --git a/rp/rcynic/rules.linux.mk b/rp/rcynic/rules.linux.mk
index 6a962cef..c116f75c 100644
--- a/rp/rcynic/rules.linux.mk
+++ b/rp/rcynic/rules.linux.mk
@@ -1,92 +1,27 @@
# $Id$
install-user-and-group: .FORCE
- @if getent group ${RCYNIC_GROUP} >/dev/null; \
+ @if getent group ${RPKI_GROUP} >/dev/null; \
then \
- echo "You already have a group \"${RCYNIC_GROUP}\", so I will use it."; \
- elif /usr/sbin/groupadd ${RCYNIC_GROUP}; \
+ echo "You already have a group \"${RPKI_GROUP}\", so I will use it."; \
+ elif /usr/sbin/groupadd ${RPKI_GROUP}; \
then \
- echo "Added group \"${RCYNIC_GROUP}\"."; \
+ echo "Added group \"${RPKI_GROUP}\"."; \
else \
- echo "Adding group \"${RCYNIC_GROUP}\" failed..."; \
+ echo "Adding group \"${RPKI_GROUP}\" failed..."; \
echo "Please create it, then try again."; \
exit 1; \
fi
@nogroup='-N'; \
if test -f /etc/redhat-release; then read vendor release version < /etc/redhat-release; if test $$vendor = CentOS; then nogroup='-n'; fi; fi; \
- if getent passwd ${RCYNIC_USER} >/dev/null; \
+ if getent passwd ${RPKI_USER} >/dev/null; \
then \
- echo "You already have a user \"${RCYNIC_USER}\", so I will use it."; \
- elif /usr/sbin/useradd -g ${RCYNIC_GROUP} -M $$nogroup -d "${RCYNIC_DIR}" -s /sbin/nologin -c "${RCYNIC_GECOS}" ${RCYNIC_USER}; \
+ echo "You already have a user \"${RPKI_USER}\", so I will use it."; \
+ elif /usr/sbin/useradd -g ${RPKI_GROUP} -M $$nogroup -d "${DESTDIR}${RCYNIC_DIR}" -s /sbin/nologin -c "${RPKI_GECOS}" ${RPKI_USER}; \
then \
- echo "Added user \"${RCYNIC_USER}\"."; \
+ echo "Added user \"${RPKI_USER}\"."; \
else \
- echo "Adding user \"${RCYNIC_USER}\" failed..."; \
+ echo "Adding user \"${RPKI_USER}\" failed..."; \
echo "Please create it, then try again."; \
exit 1; \
fi
- @if getent group ${RPKIRTR_GROUP} >/dev/null; \
- then \
- echo "You already have a group \"${RPKIRTR_GROUP}\", so I will use it."; \
- elif /usr/sbin/groupadd ${RPKIRTR_GROUP}; \
- then \
- echo "Added group \"${RPKIRTR_GROUP}\"."; \
- else \
- echo "Adding group \"${RPKIRTR_GROUP}\" failed..."; \
- echo "Please create it, then try again."; \
- exit 1; \
- fi
- @nogroup='-N'; \
- if test -f /etc/redhat-release; then read vendor release version < /etc/redhat-release; if test $$vendor = CentOS; then nogroup='-n'; fi; fi; \
- if getent passwd ${RPKIRTR_USER} >/dev/null; \
- then \
- echo "You already have a user \"${RPKIRTR_USER}\", so I will use it."; \
- elif /usr/sbin/useradd -g ${RPKIRTR_GROUP} -M $$nogroup -d "${RPKIRTR_DIR}" -s /sbin/nologin -c "${RPKIRTR_GECOS}" ${RPKIRTR_USER}; \
- then \
- echo "Added user \"${RPKIRTR_USER}\"."; \
- else \
- echo "Adding user \"${RPKIRTR_USER}\" failed..."; \
- echo "Please create it, then try again."; \
- exit 1; \
- fi
- usermod -a -G ${RPKIRTR_GROUP} ${RCYNIC_USER}
-
-install-shared-libraries: .FORCE
- @echo "Copying required shared libraries"
- @if test -d /lib64; then libdir=/lib64; else libdir=/lib; fi; \
- shared_libraries="${RCYNIC_DIR}/bin/rcynic ${RCYNIC_DIR}/bin/rsync $$(/usr/bin/find $${libdir} -name 'libnss*.so*' -print)"; \
- while true; \
- do \
- closure="$$(/usr/bin/ldd $${shared_libraries} | \
- ${AWK} ' \
- { sub(/:$/, "") } \
- $$0 == "${RCYNIC_DIR}/bin/rcynic" { next } \
- $$0 == "${RCYNIC_DIR}/bin/rsync" { next } \
- $$1 ~ /\/ld-linux\.so/ { next } \
- { for (i = 1; i <= NF; i++) if ($$i ~ /^\//) print $$i } \
- ' | \
- ${SORT} -u)"; \
- if test "X$$shared_libraries" = "X$$closure"; \
- then \
- break; \
- else \
- shared_libraries="$$closure"; \
- fi; \
- done; \
- if test -f $${libdir}/libresolv.so.2; \
- then \
- shared_libraries="$${shared_libraries} $${libdir}/libresolv.so.2";
- fi; \
- for shared in $${libdir}/*ld*.so* $$shared_libraries; \
- do \
- if test ! -r "${RCYNIC_DIR}/$${shared}"; \
- then \
- ${INSTALL} -m 555 -d `dirname "${RCYNIC_DIR}$${shared}"` && \
- ${INSTALL} -m 555 -p "$${shared}" "${RCYNIC_DIR}$${shared}"; \
- fi; \
- done
-
-# No devfs, so no rc script
-
-install-rc-scripts:
- @true
diff --git a/rp/rcynic/rules.unknown.mk b/rp/rcynic/rules.unknown.mk
index 6ce3ea18..03cbd858 100644
--- a/rp/rcynic/rules.unknown.mk
+++ b/rp/rcynic/rules.unknown.mk
@@ -1,4 +1,4 @@
# $Id$
-install-user-and-group install-shared-libraries install-rc-scripts: .FORCE
+install-user-and-group: .FORCE
@echo "Don't know how to make $@ on this platform"; exit 1
diff --git a/rp/rcynic/sample-trust-anchors/apnic-testbed.tal b/rp/rcynic/sample-trust-anchors/apnic-testbed.tal.disabled
index f87a3bf3..f87a3bf3 100644
--- a/rp/rcynic/sample-trust-anchors/apnic-testbed.tal
+++ b/rp/rcynic/sample-trust-anchors/apnic-testbed.tal.disabled
diff --git a/rp/rcynic/sample-trust-anchors/rpki.net-testbed.tal b/rp/rcynic/sample-trust-anchors/rpki.net-testbed.tal.disabled
index 1e466300..1e466300 100644
--- a/rp/rcynic/sample-trust-anchors/rpki.net-testbed.tal
+++ b/rp/rcynic/sample-trust-anchors/rpki.net-testbed.tal.disabled
diff --git a/rp/rcynic/static-rsync/Makefile.in b/rp/rcynic/static-rsync/Makefile.in
deleted file mode 100644
index 8a433c7b..00000000
--- a/rp/rcynic/static-rsync/Makefile.in
+++ /dev/null
@@ -1,44 +0,0 @@
-# $Id$
-
-VERSION = 2.6.9
-
-CFG_ENV = CFLAGS='@CFLAGS@' LDFLAGS='@LDFLAGS@ @LD_STATIC_FLAG@'
-CFG_ARG =
-
-TARBALL = rsync-${VERSION}.tar.gz
-DIRNAME = rsync-${VERSION}
-
-CFG_LOG = > ../config.log 2>&1
-BIN_LOG = > ../build.log 2>&1
-
-BIN = rsync
-
-abs_top_srcdir = @abs_top_srcdir@
-abs_top_builddir = @abs_top_builddir@
-
-all: ${BIN}
-
-${BIN}: ${DIRNAME}/${BIN}
- ln ${DIRNAME}/${BIN} $@
- file $@
-
-${DIRNAME}/${BIN}: configured.stamp
- cd ${DIRNAME} && ${MAKE} ${BIN_LOG}
-
-extracted.stamp: ${TARBALL}
- gzip -c -d ${TARBALL} | tar -xf -
- touch $@
-
-patched.stamp: extracted.stamp
- for i in patches/patch-*; do if test -f "$$i"; then patch -d ${DIRNAME} <"$$i"; else :; fi; done
- touch $@
-
-configured.stamp: patched.stamp
- cd ${DIRNAME} && ${CFG_ENV} ./configure ${CFG_ARG} ${CFG_LOG}
- touch $@
-
-clean:
- rm -rf ${BIN} ${DIRNAME} *.stamp *.log
-
-distclean: clean
- rm -f Makefile
diff --git a/rp/rcynic/static-rsync/README b/rp/rcynic/static-rsync/README
deleted file mode 100644
index 9ff5afa8..00000000
--- a/rp/rcynic/static-rsync/README
+++ /dev/null
@@ -1,15 +0,0 @@
-$Id$
-
-Hack to build a static rsync binary suitable for use in a chroot jail.
-
-The default configuration is for gcc, since that's the most widely
-used compiler on the platforms we use. I've provided hooks intended
-to make it simple to support other compilers just by overriding make
-variables on the command line: if you need to do something more
-drastic than this to get your compiler working, please tell me.
-
-If your platform doesn't support static binaries at all, you're on
-your own (and should whine at your OS vendor, as this is nuts).
-
-We try to stick with rsync release code, but apply security patches
-when necessary.
diff --git a/rp/rcynic/static-rsync/patches/patch-CVE-2007-4091 b/rp/rcynic/static-rsync/patches/patch-CVE-2007-4091
deleted file mode 100644
index 201af96a..00000000
--- a/rp/rcynic/static-rsync/patches/patch-CVE-2007-4091
+++ /dev/null
@@ -1,60 +0,0 @@
---- sender.c 2006-09-20 03:53:32.000000000 +0200
-+++ sender.c 2007-07-25 15:33:05.000000000 +0200
-@@ -123,6 +123,7 @@
- char fname[MAXPATHLEN];
- struct file_struct *file;
- unsigned int offset;
-+ size_t l = 0;
-
- if (ndx < 0 || ndx >= the_file_list->count)
- return;
-@@ -133,6 +134,20 @@
- file->dir.root, "/", NULL);
- } else
- offset = 0;
-+
-+ l = offset + 1;
-+ if (file) {
-+ if (file->dirname)
-+ l += strlen(file->dirname);
-+ if (file->basename)
-+ l += strlen(file->basename);
-+ }
-+
-+ if (l >= sizeof(fname)) {
-+ rprintf(FERROR, "Overlong pathname\n");
-+ exit_cleanup(RERR_FILESELECT);
-+ }
-+
- f_name(file, fname + offset);
- if (remove_source_files) {
- if (do_unlink(fname) == 0) {
-@@ -224,6 +239,7 @@
- enum logcode log_code = log_before_transfer ? FLOG : FINFO;
- int f_xfer = write_batch < 0 ? batch_fd : f_out;
- int i, j;
-+ size_t l = 0;
-
- if (verbose > 2)
- rprintf(FINFO, "send_files starting\n");
-@@ -259,6 +275,20 @@
- fname[offset++] = '/';
- } else
- offset = 0;
-+
-+ l = offset + 1;
-+ if (file) {
-+ if (file->dirname)
-+ l += strlen(file->dirname);
-+ if (file->basename)
-+ l += strlen(file->basename);
-+ }
-+
-+ if (l >= sizeof(fname)) {
-+ rprintf(FERROR, "Overlong pathname\n");
-+ exit_cleanup(RERR_FILESELECT);
-+ }
-+
- fname2 = f_name(file, fname + offset);
-
- if (verbose > 2)
diff --git a/rp/rcynic/static-rsync/rsync-2.6.9.tar.gz b/rp/rcynic/static-rsync/rsync-2.6.9.tar.gz
deleted file mode 100644
index 6377f639..00000000
--- a/rp/rcynic/static-rsync/rsync-2.6.9.tar.gz
+++ /dev/null
Binary files differ
diff --git a/rp/rcynic/validation_status b/rp/rcynic/validation_status
index a3ee36f1..d8e2c8ae 100755
--- a/rp/rcynic/validation_status
+++ b/rp/rcynic/validation_status
@@ -23,14 +23,13 @@ Flat text listing of <validation_status/> elements from rcynic.xml.
import sys
try:
- from lxml.etree import ElementTree
+ from lxml.etree import ElementTree
except ImportError:
- from xml.etree.ElementTree import ElementTree
+ from xml.etree.ElementTree import ElementTree
for filename in ([sys.stdin] if len(sys.argv) < 2 else sys.argv[1:]):
- for elt in ElementTree(file = filename).findall("validation_status"):
- print "%s %8s %-40s %s" % (
- elt.get("timestamp"),
- elt.get("generation"),
- elt.get("status"),
- elt.text.strip())
+ for elt in ElementTree(file = filename).findall("validation_status"):
+ print "%s %-40s %s" % (
+ elt.get("timestamp"),
+ elt.get("status"),
+ elt.text.strip())
diff --git a/rp/rpki-rtr/rpki-rtr b/rp/rpki-rtr/rpki-rtr
index 5ad4cf26..7f3e6b4f 100755
--- a/rp/rpki-rtr/rpki-rtr
+++ b/rp/rpki-rtr/rpki-rtr
@@ -19,5 +19,5 @@
# PERFORMANCE OF THIS SOFTWARE.
if __name__ == "__main__":
- from rpki.rtr.main import main
- main()
+ from rpki.rtr.main import main
+ main()
diff --git a/rp/rpki-rtr/rules.freebsd.mk b/rp/rpki-rtr/rules.freebsd.mk
index f4d214a3..0f1546b2 100644
--- a/rp/rpki-rtr/rules.freebsd.mk
+++ b/rp/rpki-rtr/rules.freebsd.mk
@@ -18,7 +18,7 @@ install-listener: .FORCE
@if /usr/bin/egrep -q "rpki-rtr[ ]+stream[ ]+tcp[ ]" /etc/inetd.conf; \
then \
echo "You already have an inetd.conf entry for rpki-rtr on TCPv4, so I will use it."; \
- elif echo >>/etc/inetd.conf "rpki-rtr stream tcp nowait rpkirtr /usr/local/bin/rpki-rtr rpki-rtr server /var/rcynic/rpki-rtr"; \
+ elif echo >>/etc/inetd.conf "rpki-rtr stream tcp nowait rpki /usr/local/bin/rpki-rtr rpki-rtr server /var/rcynic/rpki-rtr"; \
then \
echo "Added rpki-rtr for TCPv4 to /etc/inetd.conf."; \
else \
@@ -28,7 +28,7 @@ install-listener: .FORCE
@if /usr/bin/egrep -q "rpki-rtr[ ]+stream[ ]+tcp6[ ]" /etc/inetd.conf; \
then \
echo "You already have an inetd.conf entry for rpki-rtr on TCPv6, so I will use it."; \
- elif echo >>/etc/inetd.conf "rpki-rtr stream tcp6 nowait rpkirtr /usr/local/bin/rpki-rtr rpki-rtr server /var/rcynic/rpki-rtr"; \
+ elif echo >>/etc/inetd.conf "rpki-rtr stream tcp6 nowait rpki /usr/local/bin/rpki-rtr rpki-rtr server /var/rcynic/rpki-rtr"; \
then \
echo "Added rpki-rtr for TCPv6 to /etc/inetd.conf."; \
else \
diff --git a/rp/rpki-rtr/rules.linux.mk b/rp/rpki-rtr/rules.linux.mk
index d9b21590..bad35ace 100644
--- a/rp/rpki-rtr/rules.linux.mk
+++ b/rp/rpki-rtr/rules.linux.mk
@@ -19,7 +19,7 @@ ${DESTDIR}/etc/xinetd.d/rpki-rtr:
print " protocol = tcp"; \
print " port = ${RPKI_RTR_PORT}"; \
print " wait = no"; \
- print " user = rpkirtr"; \
+ print " user = rpki"; \
print " server = ${bindir}/${BIN}"; \
print " server_args = server /var/rcynic/rpki-rtr"; \
print "}"; \
diff --git a/rp/utils/find_roa b/rp/utils/find_roa
index 4cfcccac..9a387c6a 100755
--- a/rp/utils/find_roa
+++ b/rp/utils/find_roa
@@ -25,134 +25,137 @@ import os
import argparse
import rpki.POW
import rpki.oids
+import rpki.config
def check_dir(s):
- if os.path.isdir(s):
- return os.path.abspath(s)
- else:
- raise argparse.ArgumentTypeError("%r is not a directory" % s)
+ if os.path.isdir(s):
+ return os.path.abspath(s)
+ else:
+ raise argparse.ArgumentTypeError("%r is not a directory" % s)
def filename_to_uri(filename):
- if not filename.startswith(args.rcynic_dir):
- raise ValueError
- return "rsync://" + filename[len(args.rcynic_dir):].lstrip("/")
+ if not filename.startswith(args.rcynic_dir):
+ raise ValueError
+ return "rsync://" + filename[len(args.rcynic_dir):].lstrip("/")
def uri_to_filename(uri):
- if not uri.startswith("rsync://"):
- raise ValueError
- return os.path.join(args.rcynic_dir, uri[len("rsync://"):])
+ if not uri.startswith("rsync://"):
+ raise ValueError
+ return os.path.join(args.rcynic_dir, uri[len("rsync://"):])
class Prefix(object):
- """
- One prefix parsed from the command line.
- """
-
- def __init__(self, val):
- addr, length = val.split("/")
- length, sep, maxlength = length.partition("-") # pylint: disable=W0612
- self.prefix = rpki.POW.IPAddress(addr)
- self.length = int(length)
- self.maxlength = int(maxlength) if maxlength else self.length
- if self.maxlength < self.length or self.length < 0 or self.length > self.prefix.bits:
- raise ValueError
- if self.prefix & ((1 << (self.prefix.bits - self.length)) - 1) != 0:
- raise ValueError
-
- def matches(self, roa):
- return any(self.prefix == prefix and
- self.length == length and
- (not args.match_maxlength or
- self.maxlength == maxlength or
- (maxlength is None and
- self.length == self.maxlength))
- for prefix, length, maxlength in roa.prefixes)
-
-
-class ROA(rpki.POW.ROA):
- """
- Aspects of a ROA that we care about.
- """
-
- @classmethod
- def parse(cls, fn):
- assert fn.startswith(args.rcynic_dir)
- self = cls.derReadFile(fn)
- self.fn = fn
- self.extractWithoutVerifying()
- v4, v6 = self.getPrefixes()
- self.prefixes = (v4 or ()) + (v6 or ())
- return self
-
- @property
- def uri(self):
- return filename_to_uri(self.fn)
-
- @property
- def formatted_prefixes(self):
- for prefix in self.prefixes:
- if prefix[2] is None or prefix[1] == prefix[2]:
- yield "%s/%d" % (prefix[0], prefix[1])
- else:
- yield "%s/%d-%d" % (prefix[0], prefix[1], prefix[2])
-
- def __str__(self):
- prefixes = " ".join(self.formatted_prefixes)
- plural = "es" if " " in prefixes else ""
- if args.show_inception:
- return "signingTime %s ASN %s prefix%s %s" % (self.signingTime(), self.getASID(), plural, prefixes)
- else:
- return "ASN %s prefix%s %s" % (self.getASID(), plural, prefixes)
-
- def show(self):
- print "%s %s" % (self, self.fn if args.show_filenames else self.uri)
-
- def show_expiration(self):
- print self
- x = self.certs()[0]
- fn = self.fn
- uri = self.uri
- while uri is not None:
- name = fn if args.show_filenames else uri
- if args.show_inception:
- print "notBefore", x.getNotBefore(), "notAfter", x.getNotAfter(), name
- else:
- print x.getNotAfter(), name
- for uri in x.getAIA() or ():
- if uri.startswith("rsync://"):
- break
- else:
- break
- fn = uri_to_filename(uri)
- if not os.path.exists(fn):
- print "***** MISSING ******", uri
- break
- x = rpki.POW.X509.derReadFile(fn)
- print
-
-
-parser = argparse.ArgumentParser(description = __doc__)
-parser.add_argument("-a", "--all", action = "store_true", help = "show all ROAs, do no prefix matching at all")
-parser.add_argument("-m", "--match-maxlength", action = "store_true", help = "pay attention to maxLength values")
-parser.add_argument("-e", "--show-expiration", action = "store_true", help = "show ROA chain expiration dates")
-parser.add_argument("-f", "--show-filenames", action = "store_true", help = "show filenames instead of URIs")
-parser.add_argument("-i", "--show-inception", action = "store_true", help = "show inception dates")
-parser.add_argument("rcynic_dir", type = check_dir, help = "rcynic authenticated output directory")
-parser.add_argument("prefixes", type = Prefix, nargs = "*", help = "ROA prefix(es) to match")
-args = parser.parse_args()
+ """
+ One prefix parsed from the command line.
+ """
+
+ def __init__(self, val):
+ addr, length = val.split("/")
+ length, sep, maxlength = length.partition("-") # pylint: disable=W0612
+ self.prefix = rpki.POW.IPAddress(addr)
+ self.length = int(length)
+ self.maxlength = int(maxlength) if maxlength else self.length
+ if self.maxlength < self.length or self.length < 0 or self.length > self.prefix.bits:
+ raise ValueError
+ if self.prefix & ((1 << (self.prefix.bits - self.length)) - 1) != 0:
+ raise ValueError
+
+ def matches(self, roa): # pylint: disable=W0621
+ return any(self.prefix == prefix and
+ self.length == length and
+ (not args.match_maxlength or
+ self.maxlength == maxlength or
+ (maxlength is None and
+ self.length == self.maxlength))
+ for prefix, length, maxlength in roa.prefixes)
+
+
+class ROA(rpki.POW.ROA): # pylint: disable=W0232
+ """
+ Aspects of a ROA that we care about.
+ """
+
+ @classmethod
+ def parse(cls, fn): # pylint: disable=W0621
+ assert fn.startswith(args.rcynic_dir)
+ self = cls.derReadFile(fn) # pylint: disable=E1101
+ self.fn = fn
+ self.extractWithoutVerifying()
+ v4, v6 = self.getPrefixes()
+ self.prefixes = (v4 or ()) + (v6 or ())
+ return self
+
+ @property
+ def uri(self):
+ return filename_to_uri(self.fn) # pylint: disable=E1101
+
+ @property
+ def formatted_prefixes(self):
+ for prefix in self.prefixes: # pylint: disable=E1101
+ if prefix[2] is None or prefix[1] == prefix[2]:
+ yield "%s/%d" % (prefix[0], prefix[1])
+ else:
+ yield "%s/%d-%d" % (prefix[0], prefix[1], prefix[2])
+
+ def __str__(self):
+ # pylint: disable=E1101
+ prefixes = " ".join(self.formatted_prefixes)
+ plural = "es" if " " in prefixes else ""
+ if args.show_inception:
+ return "signingTime %s ASN %s prefix%s %s" % (self.signingTime(), self.getASID(), plural, prefixes)
+ else:
+ return "ASN %s prefix%s %s" % (self.getASID(), plural, prefixes)
+
+ def show(self):
+ # pylint: disable=E1101
+ print "%s %s" % (self, self.fn if args.show_filenames else self.uri)
+
+ def show_expiration(self):
+ print self
+ x = self.certs()[0] # pylint: disable=E1101
+ fn = self.fn # pylint: disable=E1101,W0621
+ uri = self.uri
+ while uri is not None:
+ name = fn if args.show_filenames else uri
+ if args.show_inception:
+ print "notBefore", x.getNotBefore(), "notAfter", x.getNotAfter(), name
+ else:
+ print x.getNotAfter(), name
+ for uri in x.getAIA() or ():
+ if uri.startswith("rsync://"):
+ break
+ else:
+ break
+ fn = uri_to_filename(uri)
+ if not os.path.exists(fn):
+ print "***** MISSING ******", uri
+ break
+ x = rpki.POW.X509.derReadFile(fn)
+ print
+
+
+cfg = rpki.config.argparser(doc = __doc__)
+cfg.argparser.add_argument("-a", "--all", action = "store_true", help = "show all ROAs, do no prefix matching at all")
+cfg.argparser.add_argument("-m", "--match-maxlength", action = "store_true", help = "pay attention to maxLength values")
+cfg.argparser.add_argument("-e", "--show-expiration", action = "store_true", help = "show ROA chain expiration dates")
+cfg.argparser.add_argument("-f", "--show-filenames", action = "store_true", help = "show filenames instead of URIs")
+cfg.argparser.add_argument("-i", "--show-inception", action = "store_true", help = "show inception dates")
+cfg.argparser.add_argument("rcynic_dir", type = check_dir, help = "rcynic authenticated output directory")
+cfg.argparser.add_argument("prefixes", type = Prefix, nargs = "*", help = "ROA prefix(es) to match")
+args = cfg.argparser.parse_args()
# If there's some way to automate this in the parser, I don't know what it is, so just catch it here.
if args.all != (not args.prefixes):
- parser.error("--all and prefix list are mutually exclusive")
+ parser.error("--all and prefix list are mutually exclusive")
for root, dirs, files in os.walk(args.rcynic_dir):
- for fn in files:
- if fn.endswith(".roa"):
- roa = ROA.parse(os.path.join(root, fn))
- if args.all or any(prefix.matches(roa) for prefix in args.prefixes):
- if args.show_expiration:
- roa.show_expiration()
- else:
- roa.show()
+ for fn in files:
+ if fn.endswith(".roa"):
+ roa = ROA.parse(os.path.join(root, fn))
+ if args.all or any(prefix.matches(roa) for prefix in args.prefixes):
+ if args.show_expiration:
+ roa.show_expiration()
+ else:
+ roa.show()
diff --git a/rp/utils/hashdir b/rp/utils/hashdir
index d3fe393c..c2c100b8 100755
--- a/rp/utils/hashdir
+++ b/rp/utils/hashdir
@@ -26,42 +26,40 @@ distributed as part of the repository system.
import os
import sys
-import argparse
-import rpki.POW
+import rpki.config
+
+from rpki.rcynicdb.iterator import authenticated_objects
def check_dir(s):
- if os.path.isdir(s):
- return os.path.abspath(s)
- else:
- raise argparse.ArgumentTypeError("%r is not a directory" % s)
+ if os.path.isdir(s):
+ return os.path.abspath(s)
+ else:
+ raise argparse.ArgumentTypeError("{!r} is not a directory".format(s))
-parser = argparse.ArgumentParser(description = __doc__)
-parser.add_argument("-v", "--verbose", action = "store_true", help = "whistle while you work")
-parser.add_argument("rcynic_dir", type = check_dir, help = "rcynic authenticated output directory")
-parser.add_argument("output_dir", help = "name of output directory to create")
-args = parser.parse_args()
+cfg = rpki.config.argparser(doc = __doc__)
+cfg.argparser.add_argument("-v", "--verbose", action = "store_true", help = "whistle while you work")
+cfg.argparser.add_argument("rcynic_dir", nargs = "?", type = check_dir, help = "rcynic authenticated output directory")
+cfg.argparser.add_argument("output_dir", help = "name of output directory to create")
+args = cfg.argparser.parse_args()
if not os.path.isdir(args.output_dir):
- os.makedirs(args.output_dir)
+ os.makedirs(args.output_dir)
-for root, dirs, files in os.walk(args.rcynic_dir):
- for ifn in files:
- ifn = os.path.join(root, ifn)
- if ifn.endswith(".cer"):
- obj = rpki.POW.X509.derReadFile(ifn)
- fmt = "%08x.%%d" % obj.getSubjectHash()
- elif ifn.endswith(".crl"):
- obj = rpki.POW.CRL.derReadFile(ifn)
- fmt = "%08x.r%%d" % obj.getIssuerHash()
- else:
- continue
+def store(uri, obj, fmt):
for i in xrange(1000000):
- ofn = os.path.join(args.output_dir, fmt % i)
- if not os.path.exists(ofn):
- with open(ofn, "w") as f:
- f.write(obj.pemWrite())
- if args.verbose:
- print ofn, "<=", ifn
- break
+ fn = os.path.join(args.output_dir, fmt.format(i))
+ if os.path.exists(fn):
+ continue
+ with open(fn, "w") as f:
+ f.write(obj.pemWrite())
+ if args.verbose:
+ print fn, "<=", uri
+ return
else:
- sys.exit("No path name available for %s (%s)" % (ifn, ofn))
+ sys.exit("No path name available for {} ({})".format(uri, fn))
+
+for uri, cer in authenticated_objects(uri_suffix = ".cer"):
+ store(uri, cer, "{:08x}.{{:d}}".format(cer.getSubjectHash()))
+
+for uri, crl in authenticated_objects(uri_suffix = ".crl"):
+ store(uri, crl, "{:08x}.r{{:d}}".format(crl.getIssuerHash()))
diff --git a/rp/utils/print_roa b/rp/utils/print_roa
index d5db0c3c..c5b7793a 100755
--- a/rp/utils/print_roa
+++ b/rp/utils/print_roa
@@ -21,53 +21,56 @@ Pretty-print the content of a ROA. Does NOT attempt to verify the
signature.
"""
-import argparse
+import rpki.config
import rpki.POW
-class ROA(rpki.POW.ROA):
+class ROA(rpki.POW.ROA): # pylint: disable=W0232
- @staticmethod
- def _format_prefix(prefix):
- if prefix[2] is None or prefix[1] == prefix[2]:
- return "%s/%d" % (prefix[0], prefix[1])
- else:
- return "%s/%d-%d" % (prefix[0], prefix[1], prefix[2])
+ v4_prefixes = None
+ v6_prefixes = None
+
+ @staticmethod
+ def _format_prefix(p):
+ if p[2] in (None, p[1]):
+ return "%s/%d" % (p[0], p[1])
+ else:
+ return "%s/%d-%d" % (p[0], p[1], p[2])
- def parse(self):
- self.extractWithoutVerifying()
- v4, v6 = self.getPrefixes()
- self.v4_prefixes = [self._format_prefix(p) for p in (v4 or ())]
- self.v6_prefixes = [self._format_prefix(p) for p in (v6 or ())]
+ def parse(self):
+ self.extractWithoutVerifying() # pylint: disable=E1101
+ v4, v6 = self.getPrefixes() # pylint: disable=E1101
+ self.v4_prefixes = [self._format_prefix(p) for p in (v4 or ())]
+ self.v6_prefixes = [self._format_prefix(p) for p in (v6 or ())]
-parser = argparse.ArgumentParser(description = __doc__)
-parser.add_argument("-b", "--brief", action = "store_true", help = "show only ASN and prefix(es)")
-parser.add_argument("-c", "--cms", action = "store_true", help = "print text representation of entire CMS blob")
-parser.add_argument("-s", "--signing-time", action = "store_true", help = "show SigningTime in brief mode")
-parser.add_argument("roas", nargs = "+", type = ROA.derReadFile, help = "ROA(s) to print")
-args = parser.parse_args()
+cfg = rpki.config.argparser(doc = __doc__)
+cfg.argparser.add_argument("--brief", action = "store_true", help = "show only ASN and prefix(es)")
+cfg.argparser.add_argument("--cms", action = "store_true", help = "print text representation of entire CMS blob")
+cfg.argparser.add_argument("--signing-time", action = "store_true", help = "show SigningTime in brief mode")
+cfg.argparser.add_argument("roas", nargs = "+", type = ROA.derReadFile, help = "ROA(s) to print") # pylint: disable=E1101
+args = cfg.argparser.parse_args()
for roa in args.roas:
- roa.parse()
- if args.brief:
- if args.signing_time:
- print roa.signingTime(),
- print roa.getASID(), " ".join(roa.v4_prefixes + roa.v6_prefixes)
- else:
- print "ROA Version: ", roa.getVersion()
- print "SigningTime: ", roa.signingTime()
- print "asID: ", roa.getASID()
- if roa.v4_prefixes:
- print " addressFamily:", 1
- for p in roa.v4_prefixes:
- print " IPAddress:", p
- if roa.v6_prefixes:
- print " addressFamily:", 2
- for p in roa.v6_prefixes:
- print " IPAddress:", p
- if args.cms:
- print roa.pprint()
- for cer in roa.certs():
- print cer.pprint()
- for crl in roa.crls():
- print crl.pprint()
- print
+ roa.parse()
+ if args.brief:
+ if args.signing_time:
+ print roa.signingTime(),
+ print roa.getASID(), " ".join(roa.v4_prefixes + roa.v6_prefixes)
+ else:
+ print "ROA Version: ", roa.getVersion()
+ print "SigningTime: ", roa.signingTime()
+ print "asID: ", roa.getASID()
+ if roa.v4_prefixes:
+ print " addressFamily:", 1
+ for prefix in roa.v4_prefixes:
+ print " IPAddress:", prefix
+ if roa.v6_prefixes:
+ print " addressFamily:", 2
+ for prefix in roa.v6_prefixes:
+ print " IPAddress:", prefix
+ if args.cms:
+ print roa.pprint()
+ for cer in roa.certs():
+ print cer.pprint()
+ for crl in roa.crls():
+ print crl.pprint()
+ print
diff --git a/rp/utils/print_rpki_manifest b/rp/utils/print_rpki_manifest
index 5ebc6356..74a3fbd4 100755
--- a/rp/utils/print_rpki_manifest
+++ b/rp/utils/print_rpki_manifest
@@ -21,30 +21,30 @@ Pretty-print the content of a manifest. Does NOT attempt to verify the
signature.
"""
-import argparse
+import rpki.config
import rpki.POW
import rpki.oids
-parser = argparse.ArgumentParser(description = __doc__)
-parser.add_argument("-c", "--cms", action = "store_true", help = "print text representation of entire CMS blob")
-parser.add_argument("manifests", nargs = "+", type = rpki.POW.Manifest.derReadFile, help = "manifest(s) to print")
-args = parser.parse_args()
+cfg = rpki.config.argparser(doc = __doc__)
+cfg.argparser.add_argument("--cms", action = "store_true", help = "print text representation of entire CMS blob")
+cfg.argparser.add_argument("manifests", nargs = "+", type = rpki.POW.Manifest.derReadFile, help = "manifest(s) to print")
+args = cfg.argparser.parse_args()
for mft in args.manifests:
- mft.extractWithoutVerifying()
- print "Manifest Version:", mft.getVersion()
- print "SigningTime: ", mft.signingTime()
- print "Number: ", mft.getManifestNumber()
- print "thisUpdate: ", mft.getThisUpdate()
- print "nextUpdate: ", mft.getNextUpdate()
- print "fileHashAlg: ", rpki.oids.oid2name(mft.getAlgorithm())
- for i, fah in enumerate(mft.getFiles()):
- name, hash = fah
- print "fileList[%3d]: %s %s" % (i, ":".join(("%02X" % ord(h) for h in hash)), name)
- if args.cms:
- print mft.pprint()
- for cer in mft.certs():
- print cer.pprint()
- for crl in mft.crls():
- print crl.pprint()
- print
+ mft.extractWithoutVerifying()
+ print "Manifest Version:", mft.getVersion()
+ print "SigningTime: ", mft.signingTime()
+ print "Number: ", mft.getManifestNumber()
+ print "thisUpdate: ", mft.getThisUpdate()
+ print "nextUpdate: ", mft.getNextUpdate()
+ print "fileHashAlg: ", rpki.oids.oid2name(mft.getAlgorithm())
+ for i, fah in enumerate(mft.getFiles()):
+ name, obj_hash = fah
+ print "fileList[%3d]: %s %s" % (i, ":".join(("%02X" % ord(h) for h in obj_hash)), name)
+ if args.cms:
+ print mft.pprint()
+ for cer in mft.certs():
+ print cer.pprint()
+ for crl in mft.crls():
+ print crl.pprint()
+ print
diff --git a/rp/utils/scan_roas b/rp/utils/scan_roas
index a1b64f01..510fd7a0 100755
--- a/rp/utils/scan_roas
+++ b/rp/utils/scan_roas
@@ -24,40 +24,39 @@ per line.
import os
import argparse
+
+import rpki.config
import rpki.POW
+from rpki.rcynicdb.iterator import authenticated_objects
+
def check_dir(d):
- if not os.path.isdir(d):
- raise argparse.ArgumentTypeError("%r is not a directory" % d)
- return d
-
-class ROA(rpki.POW.ROA):
-
- @classmethod
- def parse(cls, fn):
- self = cls.derReadFile(fn)
- self.extractWithoutVerifying()
- return self
-
- @property
- def prefixes(self):
- v4, v6 = self.getPrefixes()
- for prefix, length, maxlength in (v4 or ()) + (v6 or ()):
- if maxlength is None or length == maxlength:
- yield "%s/%d" % (prefix, length)
- else:
- yield "%s/%d-%d" % (prefix, length, maxlength)
-
- def __str__(self):
- return "%s %s %s" % (self.signingTime(), self.getASID(), " ".join(self.prefixes))
-
-parser = argparse.ArgumentParser(description = __doc__)
-parser.add_argument("rcynic_dir", nargs = "+", type = check_dir,
- help = "rcynic authenticated output directory")
-args = parser.parse_args()
-
-for rcynic_dir in args.rcynic_dir:
- for root, dirs, files in os.walk(rcynic_dir):
- for fn in files:
- if fn.endswith(".roa"):
- print ROA.parse(os.path.join(root, fn))
+ if not os.path.isdir(d):
+ raise argparse.ArgumentTypeError("%r is not a directory" % d)
+ return d
+
+class ROA(rpki.POW.ROA): # pylint: disable=W0232
+
+ @property
+ def prefixes(self):
+ v4, v6 = self.getPrefixes() # pylint: disable=E1101
+ for prefix, length, maxlength in (v4 or ()) + (v6 or ()):
+ if maxlength is None or length == maxlength:
+ yield "%s/%d" % (prefix, length)
+ else:
+ yield "%s/%d-%d" % (prefix, length, maxlength)
+
+ def __str__(self):
+ # pylint: disable=E1101
+ return "%s %s %s" % (self.signingTime(), self.getASID(), " ".join(self.prefixes))
+
+cfg = rpki.config.argparser(doc = __doc__)
+cfg.argparser.add_argument("rcynic_dir", nargs = "?", type = check_dir,
+ help = "rcynic authenticated output directory")
+args = cfg.argparser.parse_args()
+
+for uri, roa in authenticated_objects(args.rcynic_dir,
+ uri_suffix = ".roa",
+ class_map = dict(roa = ROA)):
+ roa.extractWithoutVerifying()
+ print roa
diff --git a/rp/utils/scan_routercerts b/rp/utils/scan_routercerts
index 081a6293..540a8e25 100755
--- a/rp/utils/scan_routercerts
+++ b/rp/utils/scan_routercerts
@@ -26,32 +26,29 @@ import base64
import argparse
import rpki.POW
import rpki.oids
+import rpki.config
-def check_dir(s):
- if not os.path.isdir(s):
- raise argparse.ArgumentTypeError("%r is not a directory" % s)
- return s
-
-parser = argparse.ArgumentParser(description = __doc__)
-parser.add_argument("rcynic_dir", type = check_dir, help = "rcynic authenticated output directory")
-args = parser.parse_args()
+from rpki.rcynicdb.iterator import authenticated_objects
-for root, dirs, files in os.walk(args.rcynic_dir):
-
- for fn in files:
+def check_dir(s):
+ if not os.path.isdir(s):
+ raise argparse.ArgumentTypeError("{!r} is not a directory".format(s))
+ return s
- if not fn.endswith(".cer"):
- continue
+cfg = rpki.config.argparser(doc = __doc__)
+cfg.argparser.add_argument("rcynic_dir", nargs = "?", type = check_dir,
+ help = "rcynic authenticated output directory")
+args = cfg.argparser.parse_args()
- x = rpki.POW.X509.derReadFile(os.path.join(root, fn))
+for uri, cer in authenticated_objects(args.rcynic_dir, uri_suffix = ".cer"):
- if rpki.oids.id_kp_bgpsec_router not in (x.getEKU() or ()):
- continue
+ if rpki.oids.id_kp_bgpsec_router not in (cer.getEKU() or ()):
+ continue
- sys.stdout.write(base64.urlsafe_b64encode(x.getSKI()).rstrip("="))
+ sys.stdout.write(base64.urlsafe_b64encode(cer.getSKI()).rstrip("="))
- for min_asn, max_asn in x.getRFC3779()[0]:
- for asn in xrange(min_asn, max_asn + 1):
- sys.stdout.write(" %s" % asn)
+ for min_asn, max_asn in cer.getRFC3779()[0]:
+ for asn in xrange(min_asn, max_asn + 1):
+ sys.stdout.write(" {}".format(asn))
- sys.stdout.write(" %s\n" % base64.b64encode(x.getPublicKey().derWritePublic()))
+ sys.stdout.write(" {}\n".format(base64.b64encode(cer.getPublicKey().derWritePublic())))
diff --git a/rp/utils/uri b/rp/utils/uri
index e72d5e0d..d3d9eebb 100755
--- a/rp/utils/uri
+++ b/rp/utils/uri
@@ -24,47 +24,57 @@ Input files must be in DER format and may be either X.509v3 certificates
or CMS objects which contain X.509v3 certificates in the CMS wrapper.
"""
-import argparse
+import rpki.config
import rpki.POW
class Certificate(object):
- @staticmethod
- def first_rsync(uris):
- if uris is not None:
- for uri in uris:
- if uri.startswith("rsync://"):
- return uri
- return None
+ @staticmethod
+ def first_whatever(uris, prefix):
+ if uris is not None:
+ for uri in uris:
+ if uri.startswith(prefix):
+ return uri
+ return None
- def __init__(self, fn):
- try:
- x = rpki.POW.X509.derReadFile(fn)
- except: # pylint: disable=W0702
- try:
- cms = rpki.POW.CMS.derReadFile(fn)
- cms.extractWithoutVerifying()
- x = cms.certs()[0]
- except:
- raise ValueError
- sia = x.getSIA() or (None, None, None)
- self.fn = fn
- self.uris = (
- ("AIA:caIssuers", self.first_rsync(x.getAIA())),
- ("SIA:caRepository", self.first_rsync(sia[0])),
- ("SIA:rpkiManifest", self.first_rsync(sia[1])),
- ("SIA:signedObject", self.first_rsync(sia[2])),
- ("CRLDP", self.first_rsync(x.getCRLDP())))
+ def first_rsync(self, uris):
+ return self.first_whatever(uris, "rsync://")
- def __str__(self):
- words = [self.fn] if args.single_line else ["File: " + self.fn]
- words.extend(" %s: %s" % (tag, uri) for tag, uri in self.uris if uri is not None)
- return ("" if args.single_line else "\n").join(words)
+ def first_https(self, uris):
+ return self.first_whatever(uris, "https://")
-parser = argparse.ArgumentParser(description = __doc__)
-parser.add_argument("-s", "--single-line", action = "store_true", help = "single output line per object")
-parser.add_argument("certs", nargs = "+", type = Certificate, help = "RPKI objects to examine")
-args = parser.parse_args()
+ def first_http(self, uris):
+ return self.first_whatever(uris, "http://")
+
+ def __init__(self, fn):
+ try:
+ x = rpki.POW.X509.derReadFile(fn)
+ except:
+ try:
+ cms = rpki.POW.CMS.derReadFile(fn)
+ cms.extractWithoutVerifying()
+ x = cms.certs()[0]
+ except:
+ raise ValueError
+ sia = x.getSIA() or (None, None, None, None)
+ self.fn = fn
+ self.uris = (
+ ("AIA:caIssuers", self.first_rsync(x.getAIA())),
+ ("SIA:caRepository", self.first_rsync(sia[0])),
+ ("SIA:rpkiManifest", self.first_rsync(sia[1])),
+ ("SIA:signedObject", self.first_rsync(sia[2])),
+ ("SIA:rpkiNotify", self.first_https(sia[3]) or self.first_http(sia[3])),
+ ("CRLDP", self.first_rsync(x.getCRLDP())))
+
+ def __str__(self):
+ words = [self.fn] if args.single_line else ["File: " + self.fn]
+ words.extend(" %s: %s" % (tag, uri) for tag, uri in self.uris if uri is not None)
+ return ("" if args.single_line else "\n").join(words)
+
+cfg = rpki.config.argparser(doc = __doc__)
+cfg.argparser.add_argument("-s", "--single-line", action = "store_true", help = "single output line per object")
+cfg.argparser.add_argument("certs", nargs = "+", type = Certificate, help = "RPKI objects to examine")
+args = cfg.argparser.parse_args()
for cert in args.certs:
- print cert
+ print cert