aboutsummaryrefslogtreecommitdiff
path: root/rpkid.without_tls/rpki
diff options
context:
space:
mode:
Diffstat (limited to 'rpkid.without_tls/rpki')
-rw-r--r--rpkid.without_tls/rpki/__doc__.py.in2380
-rw-r--r--rpkid.without_tls/rpki/__init__.py2
-rw-r--r--rpkid.without_tls/rpki/adns.py374
-rw-r--r--rpkid.without_tls/rpki/async.py411
-rw-r--r--rpkid.without_tls/rpki/cli.py176
-rw-r--r--rpkid.without_tls/rpki/config.py224
-rw-r--r--rpkid.without_tls/rpki/exceptions.py328
-rw-r--r--rpkid.without_tls/rpki/http.py979
-rw-r--r--rpkid.without_tls/rpki/ipaddrs.py114
-rw-r--r--rpkid.without_tls/rpki/left_right.py1149
-rw-r--r--rpkid.without_tls/rpki/log.py111
-rw-r--r--rpkid.without_tls/rpki/manifest.py54
-rw-r--r--rpkid.without_tls/rpki/myrpki.py1835
-rw-r--r--rpkid.without_tls/rpki/oids.py58
-rw-r--r--rpkid.without_tls/rpki/publication.py401
-rw-r--r--rpkid.without_tls/rpki/relaxng.py1773
-rw-r--r--rpkid.without_tls/rpki/resource_set.py1107
-rw-r--r--rpkid.without_tls/rpki/roa.py76
-rw-r--r--rpkid.without_tls/rpki/rpki_engine.py1411
-rw-r--r--rpkid.without_tls/rpki/sql.py352
-rw-r--r--rpkid.without_tls/rpki/sundial.py287
-rw-r--r--rpkid.without_tls/rpki/up_down.py689
-rw-r--r--rpkid.without_tls/rpki/x509.py1242
-rw-r--r--rpkid.without_tls/rpki/xml_utils.py470
24 files changed, 16003 insertions, 0 deletions
diff --git a/rpkid.without_tls/rpki/__doc__.py.in b/rpkid.without_tls/rpki/__doc__.py.in
new file mode 100644
index 00000000..7b2b280b
--- /dev/null
+++ b/rpkid.without_tls/rpki/__doc__.py.in
@@ -0,0 +1,2380 @@
+## @file
+# @details
+# Documentation sourc, expressed as Python comments to make Doxygen happy.
+#
+# $Id$
+#
+# Copyright (C) 2009--2010 Internet Systems Consortium ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+#
+# Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+## @mainpage RPKI Engine Reference Manual
+#
+# This collection of Python modules implements a prototype of the
+# RPKI Engine. This is a work in progress.
+#
+# See http://viewvc.hactrn.net/subvert-rpki.hactrn.net/ for code and
+# design documents.
+#
+# The RPKI Engine is an implementation of the production-side tools
+# for generating certificates, CRLs, and ROAs. The
+# <a href="http://viewvc.hactrn.net/subvert-rpki.hactrn.net/rcynic/">relying party tools</a>
+# are a separate (and much simpler) package.
+#
+# The Subversion repository for the entire project is available for
+# (read-only) anonymous access at http://subvert-rpki.hactrn.net/.
+#
+# The documentation you're reading is generated automatically by
+# Doxygen from comments and documentation in
+# <a href="http://viewvc.hactrn.net/subvert-rpki.hactrn.net/rpkid/rpki/">the code</a>.
+#
+# Besides the automatically-generated code documentation, this manual
+# also includes documentation of the overall package:
+#
+# @li @subpage Overview "Overview of the tools"
+#
+# @li @subpage Installation "Installation instructions"
+#
+# @li @subpage Configuration "Configuration instructions"
+#
+# @li @subpage MySQL-Setup "MySQL setup instructions"
+#
+# @li The @subpage myrpki "myrpki tool"
+#
+# @li A description of the @subpage Left-Right "left-right protocol"
+#
+# @li A description of the @subpage Publication "publication protocol"
+#
+# @li A description of the @subpage bpki-model "BPKI model"
+# used to secure the up-down, left-right, and %publication protocols
+#
+# @li A description of the several @subpage sql-schemas "SQL database schemas"
+#
+# This work was funded from 2006 through 2008 by <a
+# href="http://www.arin.net/">ARIN</a>, in collaboration with the
+# other Regional Internet Registries. Current work is funded by DHS.
+
+## @page Overview Overview
+#
+# @section Terminology Terminology
+#
+# A few special terms that appear often enough in code or
+# documentation that they need explaining.
+#
+# @todo
+# These explanations should be fleshed out properly.
+#
+# @par IRBE:
+# Internet Registry Back End.
+#
+# @par IRDB:
+# Internet Registry Data Base.
+#
+# @par BPKI:
+# Business PKI.
+#
+# @par RPKI:
+# Resource PKI.
+#
+#
+# @section Programs Programs
+#
+# At present the package is intended to be run out of the @c rpkid/
+# directory.
+#
+# In addition to the library routines in the @c rpkid/rpki/ directory,
+# the package includes the following programs:
+#
+# @li @ref rpkid "@c rpkid":
+# The main RPKI engine daemon.
+#
+# @li @ref pubd "@c pubd":
+# The publication engine daemon.
+#
+# @li @ref rootd "@c rootd"
+# A separate daemon for handling the root of an RPKI
+# certificate tree. This is essentially a stripped down
+# version of rpkid with no SQL database, no left-right
+# protocol implementation, and only the parent side of
+# the up-down protocol. It's separate because the root
+# is a special case in several ways and it was simpler
+# to keep the special cases out of the main daemon.
+#
+# @li @ref irdbd "@c irdbd":
+# A sample implementation of an IR database daemon.
+# rpkid calls into this to perform lookups via the
+# left-right protocol.
+#
+# @li @ref smoketest "@c smoketest":
+# A test tool for running a collection of rpkid and irdb
+# instances under common control, driven by a unified
+# test script.
+#
+# @li @ref yamltest "@c yamltest":
+# Another test tool which takes the same input format as
+# @c smoketest.py, but with slightly different purpose.
+# @c smoketest.py is intended to support regression tests,
+# while @c yamltest.py is intended for automated testing
+# of something closer to a real operational environment.
+# There's a fair amount of code duplication between the
+# two, and at some point they will probably be merged
+# into a single program that supports both modes of
+# operation.
+#
+# Most of these programs take configuration files in a common format
+# similar to that used by the OpenSSL command line tool. The test
+# programs also take input in YAML format to drive the tests. Runs of
+# the @c yamltest test tool will generate a fairly complete set
+# configuration files which may be useful as examples.
+#
+# Basic operation consists of creating the appropriate MySQL databases
+# (see @ref MySQL-Setup "MySQL Setup"), configuring relationships
+# between parents and children and between publication clients and
+# repositories (see @ref MyRPKI "The myrpki tool"), starting @c rpkid,
+# @c pubd, @c rootd, and @c irdbd, and using the left-right and
+# publication control protocols (see @ref MyRPKI "The myrpki tool") to
+# set up rpkid's and pubd's internal state. All other operations
+# should occur either as a result of cron events or as a result of
+# incoming left-right and up-down protocol requests.
+#
+# The core programs are all event-driven, and are (in theory) capable
+# of supporting an arbitrary number of hosted RPKI engines to run in a
+# single rpkid instance, up to the performance limits of the underlying
+# hardware.
+#
+# At present the daemon programs all run in foreground, that is, the
+# daemons themselves make no attempt to put themselves in background.
+# The easiest way to run the servers is to run the @c start_servers
+# script, which examines your @c myrpki.conf file and starts the
+# appropriate servers in background using @c myrpki.conf as the
+# configuration file for each server as well.
+#
+# If you prefer, you can run each server by hand instead of using the
+# script, eg, using Bourne shell syntax to run rpkid in background:
+#
+# @verbatim
+# $ python rpkid.py &
+# $ echo >rpkid.pid "$!"
+# @endverbatim
+#
+# All of the daemons use syslog by default. You can change this by
+# running either the servers themselves or the @c start_servers script
+# with the "-d" option. Used as an argument to a server directly,
+# "-d" causes that server to log to @c stderr instead of to syslog.
+# Used as an argument to @c start_servers, "-d" starts each of the
+# servers with "-d" while redirecting @c stderr from each server to a
+# separate log file. This is intended primarily for debugging.
+#
+# Some of the options that the several daemons take are common to all
+# daemons. Which daemon they affect depends only on which sections of
+# which config files they are in. See
+# @ref CommonOptions "Common Options"
+# for details.
+#
+# @subsection rpkid rpkid
+#
+# rpkid is the main RPKI engine daemon. Configuration of rpkid is a
+# two step process: a %config file to bootstrap rpkid to the point
+# where it can speak using the @ref Left-Right "left-right protocol",
+# followed by dynamic configuration via the left-right protocol. The
+# latter stage is handled by the @c myrpki tool.
+#
+# rpkid stores dynamic data in an SQL database, which must have been
+# created for it, as explained in the
+# @ref Installation "Installation Guide".
+#
+#
+# @subsection pubd pubd
+#
+# pubd is the publication daemon. It implements the server side of
+# the publication protocol, and is used by rpkid to publish the
+# certificates and other objects that rpkid generates.
+#
+# pubd is separate from rpkid for two reasons:
+#
+# @li The hosting model allows entities which choose to run their own
+# copies of rpkid to publish their output under a common
+# publication point. In general, encouraging shared publication
+# services where practical is a good thing for relying parties,
+# as it will speed up rcynic synchronization time.
+#
+# @li The publication server has to run on (or at least close to) the
+# publication point itself, which in turn must be on a publically
+# reachable server to be useful. rpkid, on the other hand, need
+# only be reachable by the IRBE and its children in the RPKI tree.
+# rpkid is a much more complex piece of software than pubd, so in
+# some situations it might make sense to wrap tighter firewall
+# constraints around rpkid than would be practical if rpkid and
+# pubd were a single program.
+#
+# pubd stores dynamic data in an SQL database, which must have been
+# created for it, as explained in the
+# @ref Installation "Installation Guide". pubd also
+# stores the published objects themselves as disk files in a
+# configurable location which should correspond to an appropriate
+# module definition in rsync.conf; see the
+# @ref Configuration "Configuration Guide"
+# for details.
+#
+#
+# @subsection rootd rootd
+#
+# rootd is a stripped down implmenetation of (only) the server side of
+# the up-down protocol. It's a separate program because the root
+# certificate of an RPKI certificate tree requires special handling
+# and may also require a special handling policy. rootd is a simple
+# implementation intended for test use, it's not suitable for use in a
+# production system. All configuration comes via the %config file;
+# see the
+# @ref Configuration "Configuration Guide"
+# for details.
+#
+#
+# @subsection irdbd irdbd
+#
+# irdbd is a sample implemntation of the server side of the IRDB
+# callback subset of the left-right protocol. In production use this
+# service is a function of the IRBE stub; irdbd may be suitable for
+# production use in simple cases, but an IR with a complex IRDB may need
+# to extend or rewrite irdbd.
+#
+# irdbd requires a pre-populated database to represent the IR's
+# customers. irdbd expects this database to use
+# @ref irdbd-sql "the SQL schema defined in rpkid/irdbd.sql".
+# Once this database has been populated, the IRBE stub needs to create
+# the appropriate objects in rpkid's database via the control subset
+# of the left-right protocol, and store the linkage handles (foreign
+# keys into rpkid's database) in the IRDB. See the
+# @ref Installation "Installation Guide"
+# and the
+# @ref MySQL-Setup "MySQL setup instructions"
+# for details.
+#
+#
+# @subsection smoketest smoketest
+#
+# smoketest is a test harness to set up and run a collection of rpkid and
+# irdbd instances under scripted control.
+#
+# Unlike the programs described above, smoketest takes two configuration
+# files in different languages. The first configuration file uses the
+# same syntax as the above configuration files but is completely
+# optional. The second configuration file is the test script, which is
+# encoded using the YAML serialization language (see
+# http://www.yaml.org/ for more information on YAML). The YAML script
+# is not optional, as it describes the test layout. smoketest is designed
+# to support running a fairly wide set of test configurations as canned
+# scripts without writing any new control code. The intent is to make
+# it possible to write meaningful regression tests.
+#
+# See @ref smoketestconf "smoketest.conf" for what can go into the
+# (optional) first configuration file.
+#
+# See @ref smoketestyaml "smoketest.yaml" for what goes into the
+# (required) second configuration file.
+#
+#
+# @subsection yamltest yamltest
+#
+# yamltest is another test harness to set up and run a collection of
+# rpkid and irdbd instances under scripted control. It is similar in
+# many ways to @ref smoketest "@c smoketest", and in fact uses the
+# same YAML test description language, but its purpose is different:
+# @c smoketest runs a particular test scenario through a series of
+# changes, then shuts it down; @c yamltest, on the other hand, sets up
+# a test network using the same tools that a real user would
+# (principally the @c myrpki tool), and leaves the test running
+# indefinitely.
+#
+# @c yamltest grew out of @c smoketest and the two probably should be
+# merged back into a single tool which supports both kinds of testing.
+#
+#
+# @section further-reading Further Reading
+#
+# If you're interested in this package you might also be interested
+# in:
+#
+# @li <a href="http://track-rpki.hactrn.net/browser/rcynic/">The rcynic validation tool</a>
+#
+# @li <a href="http://www.hactrn.net/opaque/rcynic.html">A live sample of rcynic's summary output</a>
+#
+#
+# @section getting-started Getting Started
+#
+# The first step to bringing up rpkid and friends is installing the code,
+# which is described in the @ref Installation "Installation Guide".
+
+## @page Installation Installation Guide
+#
+# Installation instructions for rpkid et al. These are the
+# production-side RPKI tools, for Internet Registries (RIRs, LIRs,
+# etc). See the "rcynic" program for relying party tools.
+#
+# rpkid is a set of Python modules supporting generation and maintenance
+# of resource certificates. Most of the code is in the rpkid/rpki/
+# directory. rpkid itself is a relatively small program that calls the
+# library modules. There are several other programs that make use of
+# the same libraries, as well as a collection of test programs.
+#
+# At present the package is intended to be run out of its build
+# directory. Setting up proper installation in a system area using the
+# Python distutils package would likely not be very hard but has not yet
+# been done.
+#
+# Note that initial development of this code has been on FreeBSD, so
+# installation will probably be easiest on FreeBSD.
+#
+# Before attempting to build the package, you need to install any
+# missing prerequisites. Note that the Python code requires Python
+# version 2.5 or 2.6. rpkid et al are mostly self-contained, but do
+# require a small number of external packages to run.
+#
+# <ul>
+# <li>
+# If your Python installation does not already include the sources
+# files needed to compile new Python extension modules, you will
+# need to install whatever package does include those source
+# files. The need for and name of this package varies from system
+# to system. On FreeBSD, the base Python interpreter package
+# includes the development sources; on at least some Linux
+# distributions, you have to install a separate "python-devel"
+# package or something similar. If you get compilation errors
+# trying to build the POW code (below) and the error message says
+# something about the file "Python.h" being missing, this is
+# almost certainly your problem.
+# </li>
+#
+# <li>
+# <a href="http://codespeak.net/lxml/">http://codespeak.net/lxml/</a>,
+# a Pythonic interface to the Gnome LibXML2 libraries.
+# lxml in turn requires the LibXML2 C libraries.
+# <ul>
+# <li>FreeBSD: /usr/ports/devel/py-lxml</li>
+# <li>Fedora: python-lxml.i386</li>
+# <li>Ubuntu: python-lxml</li>
+# </ul>
+# </li>
+#
+# <li>
+# <a href="http://sourceforge.net/projects/mysql-python/">http://sourceforge.net/projects/mysql-python/</a>,
+# the Python "db" interface to MySQL. MySQLdb in turn requires MySQL client and server. rpkid et al have
+# been tested with MySQL 5.0 and 5.1.
+# <ul>
+# <li>FreeBSD: /usr/ports/databases/py-MySQLdb</li>
+# <li>Fedora: MySQL-python.i386</li>
+# <li>Ubuntu: python-mysqldb</li>
+# </ul>
+# </li>
+# </ul>
+#
+# rpkid et al also make heavy use of a modified copy of the Python
+# OpenSSL Wrappers (POW) package, but this copy has enough modifications
+# and additions that it's included in the subversion tree.
+#
+# The next step is to build the OpenSSL and POW binaries. At present
+# the OpenSSL code is just a snapshot of the OpenSSL development
+# sources, compiled with special options to enable RFC 3779 support
+# that ISC wrote under previous contract to ARIN. The POW (Python
+# OpenSSL Wrapper) library is an extended copy of the stock POW
+# release.
+#
+# To build these, cd to the top-level directory in the distribution,
+# run the configure script, then run "make":
+#
+# @verbatim
+# $ cd $top
+# $ ./configure
+# $ make
+# @endverbatim
+#
+# This should automatically build everything, in the right order,
+# including linking the POW extension module with the OpenSSL library
+# to provide RFC 3779 support. If you get errors building POW, see
+# the above discussion of Python development sources.
+#
+# The architecture is intended to support hardware signing modules
+# (HSMs), but the code to support them has not been written.
+#
+# At this point, you should have all the necessary software installed
+# to run the core programs, but you will probably want to test it.
+# The test suite requires a few more external packages, only one of
+# which is Python code.
+#
+# <ul>
+# <li>
+# <a href="http://pyyaml.org/">http://pyyaml.org/</a>.
+# Several of the test programs use PyYAML to parse a YAML
+# description of a simulated allocation hierarchy to test.
+# <ul>
+# <li>FreeBSD: /usr/ports/devel/py-yaml</li>
+# <li>Ubuntu: python-yaml</li>
+# </ul>
+# </li>
+#
+# <li>
+# <a href="http://xmlsoft.org/XSLT/">http://xmlsoft.org/XSLT/</a>.
+# Some of the test code uses xsltproc, from the Gnome LibXSLT
+# package.
+# <ul>
+# <li>FreeBSD: /usr/ports/textproc/libxslt</li>
+# <li>Ubuntu: xsltproc</li>
+# </ul>
+# </li>
+# </ul>
+#
+# All tests should be run from the rpkid/ directories.
+#
+# Some of the tests require MySQL databases to store their data. To
+# set up all the databases that the tests will need, run the SQL
+# commands in rpkid/tests/smoketest.setup.sql. The MySQL command line
+# client is usually the easiest way to do this, eg:
+#
+# @verbatim
+# $ cd $top/rpkid
+# $ mysql -u root -p <tests/smoketest.setup.sql
+# @endverbatim
+#
+# To run the tests, run "make all-tests":
+#
+# @verbatim
+# $ cd $top/rpkid
+# $ make all-tests
+# @endverbatim
+#
+# If nothing explodes, your installation is probably ok. Any Python
+# backtraces in the output indicate a problem.
+#
+# There's a last set of tools that only developers should need, as
+# they're only used when modifying schemas or regenerating the
+# documentation. These tools are listed here for completeness.
+#
+# <ul>
+# <li>
+# <a href="http://www.doxygen.org/">http://www.doxygen.org/</a>.
+# Doxygen in turn pulls in several other tools, notably Graphviz,
+# pdfLaTeX, and Ghostscript.
+# <ul>
+# <li>FreeBSD: /usr/ports/devel/doxygen</li>
+# <li>Ubuntu: doxygen</li>
+# </ul>
+# </li>
+#
+# <li>
+# <a href="http://www.mbayer.de/html2text/">http://www.mbayer.de/html2text/</a>.
+# The documentation build process uses xsltproc and html2text to dump
+# flat text versions of a few critical documentation pages.
+# <ul>
+# <li>FreeBSD: /usr/ports/textproc/html2text</li>
+# </ul>
+# </li>
+#
+# <li>
+# <a href="http://www.thaiopensource.com/relaxng/trang.html">http://www.thaiopensource.com/relaxng/trang.html</a>.
+# Trang is used to convert RelaxNG schemas from the human-readable
+# "compact" form to the XML form that LibXML2 understands. Trang in
+# turn requires Java.
+# <ul>
+# <li>FreeBSD: /usr/ports/textproc/trang</li>
+# </ul>
+# </li>
+#
+# <li>
+# <a href="http://search.cpan.org/dist/SQL-Translator/">http://search.cpan.org/dist/SQL-Translator/</a>.
+# SQL-Translator, also known as "SQL Fairy", includes code to parse
+# an SQL schema and dump a description of it as Graphviz input.
+# SQL Fairy in turn requires Perl.
+# <ul>
+# <li>FreeBSD: /usr/ports/databases/p5-SQL-Translator</li>
+# </ul>
+# </li>
+# </ul>
+#
+# Once you've finished with installation, the next thing you should
+# read is the @ref Configuration "Configuration Guide".
+
+## @page Configuration Configuration Guide
+#
+# This section describes the configuration file syntax and settings.
+#
+# Each of the programs that make up the RPKI tookit can potentially
+# take its own configuration file, but for most uses this is
+# unnecessarily complicated. The recommended approach is to use a
+# single configuration file, and to put all of the parameters that a
+# normal user might need to change into a single section of that
+# configuration file, then reference these common settings from the
+# program-specific sections of the configuration file via macro
+# expansion. The configuration file parser supports a limited version
+# of the macro facility used in OpenSSL's configuration parser. An
+# expression such as @verbatim foo = ${bar::baz} @endverbatim sets foo
+# to the value of the @c baz variable from section @c bar. The section
+# name @c ENV is special: it refers to environment variables.
+#
+# @section myrpkiconf myrpki.conf
+#
+# The default name for the shared configuration file is @c myrpki.conf.
+# Unless you really know what you're doing, you should start by
+# copying the @c $top/myrpki.conf from the @c rpkid/examples directory and
+# modifying it, as the sample configuration file already includes all
+# the additional settings necessary to use the simplified configuration.
+#
+# @dontinclude myrpki.conf
+# @skipline [myrpki]
+#
+# The @c [myrpki] section of @c myrpki.conf contains all the
+# parameters that you really need to configure.
+#
+# @skip #
+# @until =
+#
+# Every resource-holding or server-operating entity needs a "handle",
+# which is just an identifier by which the entity calls itself.
+# Handles do not need to be globally unique, but should be chosen with
+# an eye towards debugging operational problems: it's best if you use
+# a handle that your parents and children will recognize as being you.
+#
+# @skip #
+# @until bpki/servers
+#
+# The myrpki tool requires filenames for several input data files, the
+# "business PKI" databases used to secure CMS and TLS communications,
+# and the XML intermediate format that it uses. Rather than
+# hardwiring the names into the code, they're configured here. You
+# can change the names if you must, but the defaults should be fine in
+# most cases.
+#
+# @skip #
+# @until irdbd_server_port
+#
+# If you're hosting RPKI service for others, or are self-hosting, you
+# want this on. If somebody else is running rpkid on your behalf and
+# you're just shipping them your @c myrpki.xml file, you can turn this
+# off.
+#
+# If you're running @c rpkid at all, you'll need to set at least the
+# @c rpkid_server_host parameter here. You may be able to use the
+# default port numbers, or may need to pick different ones. Unless
+# you plan to run @c irdbd on a different machine from @c rpkid, you
+# should leave @c irdbd_server_host alone.
+#
+# @skip #
+# @until pubd_contact_info
+#
+# The myrpki tool will attempt to negotiate publication service for
+# you with whatever publication service your parent is using, if you
+# let it, so in most cases you should not need to run @c pubd unless
+# you need to issue certificates for private IP address space or
+# private Autononmous System Numbers.
+#
+# If you do run @c pubd, you will need to set @c pubd_server_host.
+# You may also need to set @c pubd_server_port, and you should provide
+# something helpful as contact information in @c pubd_contact_info if
+# you plan to offer publication service to your RPKI children, so that
+# grandchildren (or descendents even further down the tree) who
+# receive referrals to your service will know how to contact you.
+#
+# @skip #
+# @until rootd_server_port
+#
+# You shouldn't run rootd unless you're the root of an RPKI tree. Who
+# gets to be the root of the public RPKI tree is a political issue
+# outside the scope of this document. For everybody else, the only
+# reason for running @c rootd (other than test purposes) would be to
+# support certification of private IP addresses and ASNs. The core
+# tools can do this without any problem, but the simplified
+# configuration mechanism does not (yet) make this easy to do.
+#
+# @skip #
+# @until publication_rsync_server
+#
+# These parameters control the mapping between the rsync URIs
+# presented by @c rsyncd and the local filesystem on the machine where
+# @c pubd and @c rsyncd run. Any changes here must also be reflected
+# as changes in @c rsyncd.conf. In most cases you should not change
+# the value of @c publication_rsync_module from the default; since
+# pubd can't (and should not) rewrite @c rsyncd.conf, it's best to use
+# a static rsync module name here and let @c pubd do its work
+# underneath that name. In most cases @c publication_rsync_server
+# should be the same as @c publication_rsync_server, which is what the
+# macro invocation in the default setting does. @c
+# publication_base_directory, like other pathnames in @c myrpki.conf,
+# can be either a relative or absolute pathname; if relative, it's
+# interpreted with respect to the directory in which the programs in
+# question were started. In this specific case, it's probably better
+# to use an absolute pathname, since this pathname must also appear in
+# @c rsyncd.conf.
+#
+# @skip #
+# @until pubd_sql_password
+#
+# These settings control how @c rpkid, @c irdbd, and @c pubd talk to
+# the MySQL server. At minimum, each daemon needs its own database;
+# in the simplest configuration, the username and password can be
+# shared, which is what the macro references in the default
+# configuration does. If for some reason you need to set different
+# usernames and passwords for different daemons, you can do so by
+# changing the daemon-specific variables.
+#
+# @skip #
+# @until = openssl
+#
+# The @c myrpki tool uses the @c openssl command line tool for most of
+# its BPKI operations, for two reasons:
+#
+# @li To avoid duplicating CA-management functionality already
+# provided by the command line tool, and
+#
+# @li To ease portability of the @c myrpki tool, so that a "hosted"
+# resource holder can use it without needing to install entire toolkit.
+#
+# The @c myrpki tool's use of OpenSSL does not require exotic features
+# like RFC 3779 support, but it does require a version of the tool
+# recent enough to support CMS and the @c -ss_cert argument to the @c
+# ca command. Depending on the platform on which you are running this
+# code, you may or may not have a system copy of the @c openssl tool
+# installed that meets these criteria; if not, the @c openssl binary
+# built when you compile the toolkit will suffice. This parameter
+# allows you to tell @c myrpki where to find the binary, if necessary;
+# the default just uses the system search path.
+#
+# @section otherconf Other configuration files and options
+#
+# In most cases the simplified configuration in the @c [myrpki]
+# section of @c myrpki.conf should suffice, but in case you need to
+# tinker, here are details on the the rest of the configuration
+# options. In most cases the default name of the configuration file
+# for a program is the name of the program followed by @c ".conf", and
+# the section name is also named for the program, so that you can
+# combine sections into a single configuration file as shown with @c
+# myrpki.conf.
+#
+# @li @subpage CommonOptions "Common configuration options"
+#
+# @li @subpage rpkidconf "rpkid configuration"
+#
+# @li @subpage irdbdconf "irdbd configuration"
+#
+# @li @subpage pubdconf "pubd configuration"
+#
+# @li @subpage rootdconf "rootd configuration"
+#
+# @li @subpage smoketestconf "configuration of the smoketest test harness"
+#
+# @li @subpage smoketestyaml "test description language for the smoketest test harness"
+#
+# Once you've finished with configuration, the next thing you should
+# read is the @ref MySQL-Setup "MySQL setup instructions".
+
+## @page MySQL-Setup MySQL Setup
+#
+# You need to install MySQL and set up the relevant databases before
+# starting @c rpkid, @c irdbd, or @c pubd.
+#
+# See the @ref Installation "Installation Guide" for details on where
+# to download MySQL and find documentation on installing it.
+#
+# See the @ref Configuration "Configuration Guide" for details on the
+# configuration file settings the daemons will use to find and
+# authenticate themselves to their respective databases.
+#
+# Before you can (usefully) start any of the daemons, you will need to
+# set up the MySQL databases they use. You can do this by hand, or
+# you can use the @c sql-setup.py script, which prompts you for your
+# MySQL root password then attempts to do everything else
+# automatically using values from myrpki.conf.
+#
+# Using the script is simple:
+#
+# @verbatim
+# $ python sql-setup.py
+# Please enter your MySQL root password:
+# @endverbatim
+#
+# The script should tell you what databases it creates. You can use
+# the -v option if you want to see more details about what it's doing.
+#
+# If you'd prefer to do the SQL setup manually, perhaps because you
+# have valuable data in other MySQL databases and you don't want to
+# trust some random setup script with your MySQL root password, you'll
+# need to use the MySQL command line tool, as follows:
+#
+# @verbatim
+# $ mysql -u root -p
+#
+# mysql> CREATE DATABASE irdb_database;
+# mysql> GRANT all ON irdb_database.* TO irdb_user@localhost IDENTIFIED BY 'irdb_password';
+# mysql> USE irdb_database;
+# mysql> SOURCE $top/rpkid/irdbd.sql;
+# mysql> CREATE DATABASE rpki_database;
+# mysql> GRANT all ON rpki_database.* TO rpki_user@localhost IDENTIFIED BY 'rpki_password';
+# mysql> USE rpki_database;
+# mysql> SOURCE $top/rpkid/rpkid.sql;
+# mysql> COMMIT;
+# mysql> quit
+# @endverbatim
+#
+# where @c irdb_database, @c irdb_user, @c irdb_password, @c
+# rpki_database, @c rpki_user, and @c rpki_password match the values
+# you used in your configuration file.
+#
+# If you are running pubd and are doing manual SQL setup, you'll also
+# have to do:
+#
+# @verbatim
+# $ mysql -u root -p
+# mysql> CREATE DATABASE pubd_database;
+# mysql> GRANT all ON pubd_database.* TO pubd_user@localhost IDENTIFIED BY 'pubd_password';
+# mysql> USE pubd_database;
+# mysql> SOURCE $top/rpkid/pubd.sql;
+# mysql> COMMIT;
+# mysql> quit
+# @endverbatim
+#
+# where @c pubd_database, @c pubd_user @c pubd_password match the
+# values you used in your configuration file.
+#
+# Once you've finished configuring MySQL, the next thing you should
+# read is the instructions for the @ref MyRPKI "myrpki tool".
+
+
+## @page MyRPKI The myrpki tool
+#
+# The design of rpkid and friends assumes that certain tasks can be
+# thrown over the wall to the registry's back end operation. This was
+# a deliberate design decision to allow rpkid et al to remain
+# independent of existing database schema, business PKIs, and so forth
+# that a registry might already have. All very nice, but it leaves
+# someone who just wants to test the tools or who has no existing back
+# end with a fairly large programming project. The @c myrpki tool
+# attempts to fill that gap.
+#
+# @c myrpki is a basic implementation of what a registry back end
+# would need to use rpkid and friends. @c myrpki does not use every
+# available option in the other programs, nor is it necessarily as
+# efficient as possible. Large registries will almost certainly want
+# to roll their own tools, perhaps using these as a starting point.
+# Nevertheless, we hope that @c myrpki will at least provide a useful
+# example, and may be adaquate for simple use.
+#
+# @c myrpki is (currently) implemented as a single command line Python
+# program. It has a number of commands, most of which are used for
+# initial setup, some of which are used on an ongoing basis. @c
+# myrpki can be run either in an interactive mode or by passing a
+# single command on the command line when starting the program; the
+# former mode is intended to be somewhat human-friendly, the latter
+# mode is useful in scripting, cron jobs, and automated testing.
+#
+# @c myrpki use has two distinct phases: setup and data maintenance.
+# The setup phase is primarily about constructing the "business PKI"
+# (BPKI) certificates that the daemons use to authenticate CMS
+# messages and obtaining the service URLs needed to configure
+# the daemons. The data maintenance phase is about configuring local
+# data into the daemons.
+#
+# @c myrpki uses the OpenSSL command line tool for almost all
+# operations on keys and certificates; the one exception to this is
+# the comamnd which talks directly to the daemons, as this command
+# uses the same communication libraries as the daemons themselves do.
+# The intent behind using the OpenSSL command line tool for everything
+# else is to allow all the other commands to be run without requiring
+# all the auxiliary packages upon which the daemons depend; this can
+# be useful, eg, if one wants to run the back-end on a laptop while
+# running the daemons on a server, in which case one might prefer not
+# to have to install a bunch of unnecessary packages on the laptop.
+#
+# During setup phase @c myrpki generates and processes small XML
+# messages which it expects the user to ship to and from its parents,
+# children, etc via some out-of-band means (email, perhaps with PGP
+# signatures, USB stick, we really don't care). During data
+# maintenance phase, @c myrpki does something similar with another XML
+# file, to allow hosting of RPKI services; in the degenerate case
+# where an entity is just self-hosting (ie, is running the daemons for
+# itself, and only for itself), this latter XML file need not be sent
+# anywhere.
+#
+# The basic idea here is that a user who has resources maintains a set
+# of .csv files containing a text representation of the data needed by
+# the back-end, along with a configuration file containing other
+# parameters. The intent is that these be very simple files that are
+# easy to generate either by hand or as a dump from relational
+# database, spreadsheet, awk script, whatever works in your
+# environment. Given these files, the user then runs @c myrpki to
+# extract the relevant information and encode everything about its
+# back end state into an XML file, which can then be shipped to the
+# appropriate other party.
+#
+# Many of the @c myrpki commands which process XML input write out a
+# new XML file, either in place or as an entirely new file; in
+# general, these files need to be sent back to the party that sent the
+# original file. Think of all this as a very slow packet-based
+# communication channel, where each XML file is a single packet. In
+# setup phase, there's generally a single round-trip per setup
+# conversation; in the data maintenance phase, the same XML file keeps
+# bouncing back and forth between hosted entity and hosting entity.
+#
+# Note that, as certificates and CRLs have expiration and nextUpdate
+# values, a low-level cycle of updates passing between resource holder
+# and rpkid operator will be necessary as a part of steady state
+# operation. [The current version of these tools does not yet
+# regenerate these expiring objects, but fixing this will be a
+# relatively minor matter.]
+#
+# The third important kind of file in this system is the
+# @ref Configuration "configuration file"
+# for @c myrpki. This contains a number of sections, some of which
+# are for myrpki, others of which are for the OpenSSL command line
+# tool, still others of which are for the various RPKI daemon
+# programs. The examples/ subdirectory contains a commented version
+# of the configuration file that explains the various parameters.
+#
+# The .csv files read by myrpki are (now) misnamed: formerly, they
+# used the "excel-tab" format from the Python csv library, but early
+# users kept trying to make the colums line up, which didn't do what
+# the users expected. So now these files are just
+# whitespace-delimted, such as a program like "awk" would understand.
+#
+# Keep reading, and don't panic.
+#
+# The default configuration file name for @c myrpki is
+# @ref Configuration "@c myrpki.conf".
+# You can change this using the "-c" option when invoking myrpki, or
+# by setting the environment variable MYRPKI_CONF.
+#
+# See examples/*.csv for commented examples of the several CSV files.
+# Note that the comments themselves are not legal CSV, they're just
+# present to make it easier to understand the examples.
+#
+# @section myrpkioverview myrpki overview
+#
+# Which process you need to follow depends on whether you are running
+# rpkid yourself or will be hosted by somebody else. We call the first
+# case "self-hosted", because the software treats running rpkid to
+# handle resources that you yourself hold as if you are an rpkid
+# operator who is hosting an entity that happens to be yourself.
+#
+# "$top" in the following refers to wherever you put the
+# subvert-rpki.hactrn.net code. Once we have autoconf and "make
+# install" targets, this will be some system directory or another; for
+# now, it's wherever you checked out a copy of the code from the
+# subversion repository or unpacked a tarball of the code.
+#
+# Most of the setup process looks the same for any resource holder,
+# regardless of whether they are self-hosting or not. The differences
+# come in the data maintenence phase.
+#
+# The steps needed during setup phase are:
+#
+# @li Write a configuration file (copy $top/rpkid/examples/myrpki.conf
+# and edit as needed). You need to configure the @c [myrpki] section;
+# in theory, the rest of the file should be ok as it is, at least for
+# simple use. You also need to create (either by hand or by dumping
+# from a database, spreadsheet, whatever) the CSV files describing
+# prefixes and ASNs you want to allocate to your children and ROAs
+# you want created.
+#
+# @li Initialization ("initialize" command). This creates the local BPKI
+# and other data structures that can be constructed just based on
+# local data such as the config file. Other than some internal data
+# structures, the main output of this step is the "identity.xml" file,
+# which is used as input to later stages.
+#
+# In theory it should be safe to run the "initialize" command more
+# than once, in practice this has not (yet) been tested.
+#
+# @li Send (email, USB stick, carrier pigeon) identity.xml to each of your
+# parents. This tells each of your parents what you call yourself,
+# and supplies each parent with a trust anchor for your
+# resource-holding BPKI.
+#
+# @li Each of your parents runs the "configure_child" command, giving
+# the identity.xml you supplied as input. This registers your
+# data with the parent, including BPKI cross-registration, and
+# generates a return message containing your parent's BPKI trust
+# anchors, a service URL for contacting your parent via the
+# "up-down" protocol, and (usually) either an offer of publication
+# service (if your parent operates a repository) or a referral
+# from your parent to whatever publication service your parent
+# does use. Referrals include a CMS-signed authorization token
+# that the repository operator can use to determine that your
+# parent has given you permission to home underneath your parent
+# in the publication tree.
+#
+# @li Each of your parents sends (...) back the response XML file
+# generated by the "configure_child" command.
+#
+# @li You feed the response message you just got into myrpki using the
+# "configure_parent" command. This registers the parent's
+# information in your database, including BPKI
+# cross-certification, and processes the repository offer or
+# referral to generate a publication request message.
+#
+# @li You send (...) the publication request message to the
+# repository. The @c contact_info element in the request message
+# should (in theory) provide some clue as to where you should send
+# this.
+#
+# @li The repository operator processes your request using myrpki's
+# "configure_publication_client" command. This registers your
+# information, including BPKI cross-certification, and generates a
+# response message containing the repository's BPKI trust anchor
+# and service URL.
+#
+# @li Repository operator sends (...) the publication confirmation message
+# back to you.
+#
+# @li You process the publication confirmation message using myrpki's
+# "configure_repository" command.
+#
+# At this point you should, in theory, have established relationships,
+# exchanged trust anchors, and obtained service URLs from all of your
+# parents and repositories. The last setup step is establishing a
+# relationship with your RPKI service host, if you're not self-hosted,
+# but as this is really just the first message of an ongoing exchange
+# with your host, it's handled by the data maintenance commands.
+#
+# The two commands used in data maintenence phase are
+# "configure_resources" and "configure_daemons". The first is used by
+# the resource holder, the second is used by the host. In the
+# self-hosted case, it is not necessary to run "configure_resources" at
+# all, myrpki will run it for you automatically.
+#
+# @section myrpkihosted Hosted case
+#
+# The basic steps involved in getting started for a resource holder who
+# is being hosted by somebody else are:
+#
+# @li Run through steps listed in
+# @ref myrpkioverview "the myrpki overview section".
+#
+# @li Run the configure_resources command to generate myrpki.xml.
+#
+# @li Send myrpki.xml to the rpkid operator who will be hosting you.
+#
+# @li Wait for your rpkid operator to ship you back an updated XML
+# file containing a PKCS #10 certificate request for the BPKI
+# signing context (BSC) created by rpkid.
+#
+# @li Run configure_resources again with the XML file you just
+# received, to issue the BSC certificate and update the XML file
+# again to contain the newly issued BSC certificate.
+#
+# @li Send the updated XML file back to your rpkid operator.
+#
+# At this point you're done with initial setup. You will need to run
+# configure_resources again whenever you make any changes to your
+# configuration file or CSV files.
+#
+# @warning Once myrpki knows how to update
+# BPKI CRLs, you will also need to run configure_resources periodically
+# to keep your BPKI CRLs up to date.
+#
+# Any time you run configure_resources myrpki, you should send the
+# updated XML file to your rpkid operator, who should send you a
+# further updated XML file in response.
+#
+# @section myrpkiselfhosted Self-hosted case
+#
+# The first few steps involved in getting started for a self-hosted
+# resource holder (that is, a resource holder that runs its own copy
+# of rpkid) are the same as in the @ref myrpkihosted "hosted case"
+# above; after that the process diverges.
+#
+# The [current] steps are:
+#
+# @li Follow the basic installation instructions in
+# @ref Installation "the Installation Guide" to build the
+# RFC-3779-aware OpenSSL code and associated Python extension
+# module.
+#
+# @li Run through steps listed in
+# @ref myrpkioverview "the myrpki overview section".
+#
+# @li Set up the MySQL databases that rpkid et al will use. The
+# package includes a tool to do this for you, you can use that or
+# do the job by hand. See
+# @ref MySQL-Setup "MySQL database setup"
+# for details.
+#
+# @li If you are running your own publication repository (that is, if
+# you are running pubd), you will also need to set up an rsyncd
+# server or configure your existing one to serve pubd's output.
+# There's a sample configuration file in
+# $top/rpkid/examples/rsyncd.conf, but you may need to do
+# something more complicated if you are already running rsyncd for
+# other purposes. See the rsync(1) and rsyncd.conf(5) manual
+# pages for more details.
+#
+# @li Start the daemons. You can use $top/rpkid/start-servers.py to
+# do this, or write your own script. If you intend to run pubd,
+# you should make sure that the directory you specified as
+# publication_base_directory exists and is writable by the userid
+# that will be running pubd, and should also make sure to start
+# rsyncd.
+#
+# @li Run myrpki's configure_daemons command, twice, with no
+# arguments. You need to run the command twice because myrpki has
+# to ask rpkid to create a keypair and generate a certification
+# request for the BSC. The first pass does this, the second
+# processes the certification request, issues the BSC, and loads
+# the result into rpkid. [Yes, we could automate this somehow, if
+# necessary.]
+#
+# At this point, if everything went well, rpkid should be up,
+# configured, and starting to obtain resource certificates from its
+# parents, generate CRLs and manifests, and so forth. At this point you
+# should go figure out how to use the relying party tool, rcynic: see
+# $top/rcynic/README if you haven't already done so.
+#
+# If and when you change your CSV files, you should run
+# configure_daemons again to feed the changes into the daemons.
+#
+# @section myrpkihosting Hosting case
+#
+# If you are running rpkid not just for your own resources but also to
+# host other resource holders (see @ref myrpkihosted "hosted case"
+# above), your setup will be almost the same as in the self-hosted
+# case (see @ref myrpkiselfhosted "self-hosted case", above), with one
+# procedural change: you will need to tell @c configure_daemons to
+# process the XML files produced by the resource holders you are
+# hosting. You do this by specifying the names of all those XML files
+# on as arguments to the @c configure_daemons command. So, if you are
+# hosting two friends, Alice and Bob, then, everywhere the
+# instructions for the self-hosted case say to run @c
+# configure_daemons with no arguments, you will instead run it with
+# the names of Alice's and Bob's XML files as arguments.
+#
+# Note that @c configure_daemons sometimes modifies these XML files,
+# in which case it will write them back to the same filenames. While
+# it is possible to figure out the set of circumstances in which this
+# will happen (at present, only when @c myrpki has to ask @c rpkid to
+# create a new BSC keypair and PKCS #10 certificate request), it may
+# be easiest just to ship back an updated copy of the XML file after
+# every you run @c configure_daemons.
+#
+# @section myrpkipurehosting "Pure" hosting case
+#
+# In general we assume that anybody who bothers to run @c rpkid is
+# also a resource holder, but the software does not insist on this.
+#
+# @todo
+# Er, well, rpkid doesn't, but myrpki now does -- "pure" hosting was an
+# unused feature that fell by the wayside while simplifying the user
+# interface. It would be relatively straightforward to add it back if
+# we ever need it for anything, but the mechanism it used to use no
+# longer exists -- the old [myirbe] section of the config file has been
+# collapsed into the [myrpki] section, so testing for existance of the
+# [myrpki] section no longer works. So we'll need an explicit
+# configuration option, no big deal, just not worth chasing now.
+#
+# A (perhaps) plausible use for this capability would be if you are an
+# rpkid-running resource holder who wants for some reason to keep the
+# resource-holding side of your operation completely separate from the
+# rpkid-running side of your operation. This is essentially the
+# pure-hosting model, just with an internal hosted entity within a
+# different part of your own organization.
+#
+# @section myrpkitroubleshooting Troubleshooting
+#
+# If you run into trouble setting up this package, the first thing to do
+# is categorize the kind of trouble you are having. If you've gotten
+# far enough to be running the daemons, check their log files. If
+# you're seeing Python exceptions, read the error messages. If you're
+# getting TLS errors, check to make sure that you're using all the right
+# BPKI certificates and service contact URLs.
+#
+# TLS configuration errors are, unfortunately, notoriously difficult to
+# debug, because connection failures due to misconfiguration happen
+# early, deep in the guts of the OpenSSL TLS code, where there isn't
+# enough application context available to provide useful error messages.
+#
+# If you've completed the steps above, everything appears to have gone
+# OK, but nothing seems to be happening, the first thing to do is
+# check the logs to confirm that nothing is actively broken. @c
+# rpkid's log should include messages telling you when it starts and
+# finishes its internal "cron" cycle. It can take several cron cycles
+# for resources to work their way down from your parent into a full
+# set of certificates and ROAs, so have a little patience. @c rpkid's
+# log should also include messages showing every time it contacts its
+# parent(s) or attempts to publish anything.
+#
+# @c rcynic in fully verbose mode provides a fairly detailed
+# explanation of what it's doing and why objects that fail have
+# failed.
+#
+# You can use @c rsync (sic) to examine the contents of a publication
+# repository one directory at a time, without attempting validation,
+# by running rsync with just the URI of the directory on its command
+# line:
+#
+# @verbatim
+# $ rsync rsync://rpki.example.org/where/ever/
+# @endverbatim
+#
+# @section myrpkiknownissues Known Issues
+#
+# The lxml package provides a Python interface to the Gnome libxml2
+# and libxslt C libraries. This code has been quite stable for
+# several years, but initial testing with lxml compiled and linked
+# against a newer version of libxml2 ran into problems (specifically,
+# gratuitous RelaxNG schema validation failures). libxml2 2.7.3
+# worked; libxml2 2.7.5 did not work on the test machine in question.
+# Reverting to libxml2 2.7.3 fixed the problem. Rewriting the two
+# lines of Python code that were triggering the lxml bug appears to
+# have solved the problem, so the code now works properly with libxml
+# 2.7.5, but if you start seeing weird XML validation failures, it
+# might be another variation of this lxml bug.
+#
+# An earlier version of this code ran into problems with what appears to
+# be an implementation restriction in the the GNU linker ("ld") on
+# 64-bit hardware, resulting in obscure build failures. The workaround
+# for this required use of shared libraries and is somewhat less
+# portable than the original code, but without it the code simply would
+# not build in 64-bit environments with the GNU tools. The current
+# workaround appears to behave properly, but the workaround requires
+# that the pathname to the RFC-3779-aware OpenSSL shared libraries be
+# built into the _POW.so Python extension module. At the moment, in the
+# absence of "make install" targets for the Python code and libraries,
+# this means the build directory; eventually, once we're using autoconf
+# and installation targets, this will be the installation directory. If
+# necessary, you can override this by setting the LD_LIBRARY_PATH
+# environment variable, see the ld.so man page for details. This is a
+# relatively minor variation on the usual build issues for shared
+# libraries, it's just annoying because shared libraries should not be
+# needed here and would not be if not for this GNU linker issue.
+
+## @page CommonOptions Common Configuration Options
+#
+# Some of the options that the several daemons take are common to all
+# daemons. Which daemon they affect depends only on which sections of
+# which config files they are in.
+#
+# The first group of options are debugging flags, which can be set to
+# "true" or "false". If not specified, default values will be chosen
+# (generally false).
+#
+# @par @c debug_http:
+# Enable verbose http debug logging.
+#
+# @par @c debug_tls_certs:
+# Enable verbose logging about tls certs.
+#
+# @par @c want_persistent_client:
+# Enable http 1.1 persistence, client side.
+#
+# @par @c want_persistent_server:
+# Enable http 1.1 persistence, server side.
+#
+# @par @c debug_cms_certs:
+# Enable verbose logging about cms certs.
+#
+# @par @c sql_debug:
+# Enable verbose logging about sql operations.
+#
+# @par @c gc_debug:
+# Enable scary garbage collector debugging.
+#
+# @par @c timer_debug:
+# Enable verbose logging of timer system.
+#
+# There are also a few options that allow you to save CMS messages for
+# audit or debugging. The save format is a simple MIME encoding in a
+# Maildir-format mailbox. The current options are very crude, at some
+# point we may provide finer grain controls.
+#
+# @par @c dump_outbound_cms:
+# Dump messages we send to this mailbox.
+#
+# @par @c dump_inbound_cms:
+# Dump messages we receive to this mailbox.
+
+## @page rpkidconf rpkid.conf
+#
+# rpkid's default %config file is rpkid.conf, start rpkid with "-c
+# filename" to choose a different %config file. All options are in
+# the section "[rpkid]". Certificates, keys, and trust anchors may be
+# in either DER or PEM format.
+#
+# %Config file options:
+#
+# @par @c startup-message:
+# String to %log on startup, useful when
+# debugging a collection of rpkid instances at
+# once.
+#
+# @par @c sql-username:
+# Username to hand to MySQL when connecting to
+# rpkid's database.
+#
+# @par @c sql-database:
+# MySQL's database name for rpkid's database.
+#
+# @par @c sql-password:
+# Password to hand to MySQL when connecting to
+# rpkid's database.
+#
+# @par @c bpki-ta:
+# Name of file containing BPKI trust anchor.
+# All BPKI certificate verification within rpkid
+# traces back to this trust anchor.
+#
+# @par @c rpkid-cert:
+# Name of file containing rpkid's own BPKI EE
+# certificate.
+#
+# @par @c rpkid-key:
+# Name of file containing RSA key corresponding
+# to rpkid-cert.
+#
+# @par @c irbe-cert:
+# Name of file containing BPKI certificate used
+# by IRBE when talking to rpkid.
+#
+# @par @c irdb-cert:
+# Name of file containing BPKI certificate used
+# by irdbd.
+#
+# @par @c irdb-url:
+# Service URL for irdbd. Must be a %http:// URL.
+#
+# @par @c server-host:
+# Hostname or IP address on which to listen for
+# HTTP connections. Current default is
+# INADDR_ANY (IPv4 0.0.0.0); this will need to
+# be hacked to support IPv6 for production.
+#
+# @par @c server-port:
+# TCP port on which to listen for HTTP
+# connections.
+
+## @page pubdconf pubd.conf
+#
+# pubd's default %config file is pubd.conf, start pubd with "-c
+# filename" to choose a different %config file. All options are in
+# the section "[pubd]". Certifiates, keys, and trust anchors may be
+# either DER or PEM format.
+#
+# %Config file options:
+#
+# @par @c sql-username:
+# Username to hand to MySQL when connecting to
+# pubd's database.
+#
+# @par @c sql-database:
+# MySQL's database name for pubd's database.
+#
+# @par @c sql-password:
+# Password to hand to MySQL when connecting to
+# pubd's database.
+#
+# @par @c bpki-ta:
+# Name of file containing master BPKI trust
+# anchor for pubd. All BPKI validation in pubd
+# traces back to this trust anchor.
+#
+# @par @c irbe-cert:
+# Name of file containing BPKI certificate used
+# by IRBE when talking to pubd.
+#
+# @par @c pubd-cert:
+# Name of file containing BPKI certificate used
+# by pubd.
+#
+# @par @c pubd-key:
+# Name of file containing RSA key corresponding
+# to @c pubd-cert.
+#
+# @par @c server-host:
+# Hostname or IP address on which to listen for
+# HTTP connections. Current default is
+# INADDR_ANY (IPv4 0.0.0.0); this will need to
+# be hacked to support IPv6 for production.
+#
+# @par @c server-port:
+# TCP port on which to listen for HTTP
+# connections.
+#
+# @par @c publication-base:
+# Path to base of filesystem tree where pubd
+# should store publishable objects. Default is
+# "publication/".
+
+## @page rootdconf rootd.conf
+#
+# rootd's default %config file is rootd.conf, start rootd with "-c
+# filename" to choose a different %config file. All options are in
+# the section "[rootd]". Certificates, keys, and trust anchors may be
+# in either DER or PEM format.
+#
+# %Config file options:
+#
+# @par @c bpki-ta:
+# Name of file containing BPKI trust anchor. All
+# BPKI certificate validation in rootd traces
+# back to this trust anchor.
+#
+# @par @c rootd-bpki-cert:
+# Name of file containing rootd's own BPKI
+# certificate.
+#
+# @par @c rootd-bpki-key:
+# Name of file containing RSA key corresponding to
+# rootd-bpki-cert.
+#
+# @par @c rootd-bpki-crl:
+# Name of file containing BPKI CRL that would
+# cover rootd-bpki-cert had it been revoked.
+#
+# @par @c child-bpki-cert:
+# Name of file containing BPKI certificate for
+# rootd's one and only child (RPKI engine to
+# which rootd issues an RPKI certificate).
+#
+# @par @c server-host:
+# Hostname or IP address on which to listen for
+# HTTP connections. Default is localhost.
+#
+# @par @c server-port:
+# TCP port on which to listen for HTTP
+# connections.
+#
+# @par @c rpki-root-key:
+# Name of file containing RSA key to use in
+# signing resource certificates.
+#
+# @par @c rpki-root-cert:
+# Name of file containing self-signed root
+# resource certificate corresponding to
+# rpki-root-key.
+#
+# @par @c rpki-root-dir:
+# Name of directory where rootd should write
+# RPKI subject certificate, manifest, and CRL.
+#
+# @par @c rpki-subject-cert:
+# Name of file that rootd should use to save the
+# one and only certificate it issues.
+# Default is "Subroot.cer".
+#
+# @par @c rpki-root-crl:
+# Name of file to which rootd should save its
+# RPKI CRL. Default is "Root.crl".
+#
+# @par @c rpki-root-manifest:
+# Name of file to which rootd should save its
+# RPKI manifest. Default is "Root.mnf".
+#
+# @par @c rpki-subject-pkcs10:
+# Name of file that rootd should use when saving
+# a copy of the received PKCS #10 request for a
+# resource certificate. This is only used for
+# debugging. Default is not to save the PKCS
+# #10 request.
+
+## @page irdbdconf irdbd.conf
+#
+# irdbd's default %config file is irdbd.conf, start irdbd with "-c
+# filename" to choose a different %config file. All options are in the
+# section "[irdbd]". Certificates, keys, and trust anchors may be in
+# either DER or PEM format.
+#
+# %Config file options:
+#
+# @par @c startup-message:
+# String to %log on startup, useful when
+# debugging a collection of irdbd instances at
+# once.
+#
+# @par @c sql-username:
+# Username to hand to MySQL when connecting to
+# irdbd's database.
+#
+# @par @c sql-database:
+# MySQL's database name for irdbd's database.
+#
+# @par @c sql-password:
+# Password to hand to MySQL when connecting to
+# irdbd's database.
+#
+# @par @c bpki-ta:
+# Name of file containing BPKI trust anchor. All
+# BPKI certificate validation in irdbd traces
+# back to this trust anchor.
+#
+# @par @c irdbd-cert:
+# Name of file containing irdbd's own BPKI
+# certificate.
+#
+# @par @c irdbd-key:
+# Name of file containing RSA key corresponding
+# to irdbd-cert.
+#
+# @par @c rpkid-cert:
+# Name of file containing certificate used the
+# one and only by rpkid instance authorized to
+# contact this irdbd instance.
+#
+# @par @c http-url:
+# Service URL for irdbd. Must be a %http:// URL.
+
+## @page smoketestconf smoketest.conf
+#
+# All of the options in smoketest's (optional) configuration file are
+# overrides for wired-in default values. In almost all cases the
+# defaults will suffice. There are a ridiculous number of options,
+# most of which noone will ever need, see the code for details. The
+# default name for this configuration file is smoketest.conf, run
+# smoketest with "-c filename" to change it.
+
+## @page smoketestyaml smoketest.yaml
+#
+# smoketest's second configuration file is named smoketest.yaml by
+# default, run smoketest with "-y filename" to change it. The YAML
+# file contains multiple YAML "documents". The first document
+# describes the initial test layout and resource allocations,
+# subsequent documents describe modifications to the initial
+# allocations and other parameters. Resources listed in the initial
+# layout are aggregated automatically, so that a node in the resource
+# hierarchy automatically receives the resources it needs to issue
+# whatever its children are listed as holding. Actions in the
+# subsequent documents are modifications to the current resource set,
+# modifications to validity dates or other non-resource parameters, or
+# special commands like "sleep".
+#
+# Here's an example of current usage:
+#
+# @verbatim
+# name: Alice
+# valid_for: 2d
+# sia_base: "rsync://alice.example/rpki/"
+# kids:
+# - name: Bob
+# kids:
+# - name: Carol
+# ipv4: 192.0.2.1-192.0.2.33
+# asn: 64533
+# ---
+# - name: Carol
+# valid_add: 10
+# ---
+# - name: Carol
+# add_as: 33
+# valid_add: 2d
+# ---
+# - name: Carol
+# valid_sub: 2d
+# ---
+# - name: Carol
+# valid_for: 10d
+# @endverbatim
+#
+# This specifies an initial layout consisting of an RPKI engine named
+# "Alice", with one child "Bob", which in turn has one child "Carol".
+# Carol has a set of assigned resources, and all resources in the system
+# are initially set to be valid for two days from the time at which the
+# test is started. The first subsequent document adds ten seconds to
+# the validity interval for Carol's resources and makes no other
+# modifications. The second subsequent document grants Carol additional
+# resources and adds another two days to the validity interval for
+# Carol's resources. The next document subtracts two days from the
+# validity interval for Carol's resources. The final document sets the
+# validity interval for Carol's resources to ten days.
+#
+# Operators in subsequent (update) documents:
+#
+# @par @c add_as:
+# Add ASN resources.
+#
+# @par @c add_v4:
+# Add IPv4 resources.
+#
+# @par @c add_v6:
+# Add IPv6 resources.
+#
+# @par @c sub_as:
+# Subtract ASN resources.
+#
+# @par @c sub_v4:
+# Subtract IPv4 resources.
+#
+# @par @c sub_v6:
+# Subtract IPv6 resources.
+#
+# @par @c valid_until:
+# Set an absolute expiration date.
+#
+# @par @c valid_for:
+# Set a relative expiration date.
+#
+# @par @c valid_add:
+# Add to validity interval.
+#
+# @par @c valid_sub:
+# Subtract from validity interval.
+#
+# @par @c sleep [interval]:
+# Sleep for specified interval, or until smoketest receives a SIGALRM signal.
+#
+# @par @c shell cmd...:
+# Pass rest of line verbatim to /bin/sh and block until the shell returns.
+#
+# Absolute timestamps should be in the form shown (UTC timestamp format
+# as used in XML).
+#
+# Intervals (@c valid_add, @c valid_sub, @c valid_for, @c sleep) are either
+# integers, in which case they're interpreted as seconds, or are a
+# string of the form "wD xH yM zS" where w, x, y, and z are integers and
+# D, H, M, and S indicate days, hours, minutes, and seconds. In the
+# latter case all of the fields are optional, but at least one must be
+# specified. For example, "3D4H" means "three days plus four hours".
+
+
+## @page Left-Right Left-Right Protocol
+#
+# The left-right protocol is really two separate client/server
+# protocols over separate channels between the RPKI engine and the IR
+# back end (IRBE). The IRBE is the client for one of the
+# subprotocols, the RPKI engine is the client for the other.
+#
+# @section Operations initiated by the IRBE
+#
+# This part of the protcol uses a kind of message-passing. Each %object
+# that the RPKI engine knows about takes five messages: "create", "set",
+# "get", "list", and "destroy". Actions which are not just data
+# operations on %objects are handled via an SNMP-like mechanism, as if
+# they were fields to be set. For example, to generate a keypair one
+# "sets" the "generate-keypair" field of a BSC %object, even though there
+# is no such field in the %object itself as stored in SQL. This is a bit
+# of a kludge, but the reason for doing it as if these were variables
+# being set is to allow composite operations such as creating a BSC,
+# populating all of its data fields, and generating a keypair, all as a
+# single operation. With this model, that's trivial, otherwise it's at
+# least two round trips.
+#
+# Fields can be set in either "create" or "set" operations, the
+# difference just being whether the %object already exists. A "get"
+# operation returns all visible fields of the %object. A "list"
+# operation returns a %list containing what "get" would have returned on
+# each of those %objects.
+#
+# Left-right protocol %objects are encoded as signed CMS messages
+# containing XML as eContent and using an eContentType OID of @c id-ct-xml
+# (1.2.840.113549.1.9.16.1.28). These CMS messages are in turn passed
+# as the data for HTTP POST operations, with an HTTP content type of
+# "application/x-rpki" for both the POST data and the response data.
+#
+# All operations allow an optional "tag" attribute which can be any
+# alphanumeric token. The main purpose of the tag attribute is to allow
+# batching of multiple requests into a single PDU.
+#
+# @subsection self_obj <self/> object
+#
+# A @c &lt;self/&gt; %object represents one virtual RPKI engine. In simple cases
+# where the RPKI engine operator operates the engine only on their own
+# behalf, there will only be one @c &lt;self/&gt; %object, representing the engine
+# operator's organization, but in environments where the engine operator
+# hosts other entities, there will be one @c @c &lt;self/&gt; %object per hosted
+# entity (probably including the engine operator's own organization,
+# considered as a hosted customer of itself).
+#
+# Some of the RPKI engine's configured parameters and data are shared by
+# all hosted entities, but most are tied to a specific @c &lt;self/&gt; %object.
+# Data which are shared by all hosted entities are referred to as
+# "per-engine" data, data which are specific to a particular @c &lt;self/&gt;
+# %object are "per-self" data.
+#
+# Since all other RPKI engine %objects refer to a @c &lt;self/&gt; %object via a
+# "self_handle" value, one must create a @c &lt;self/&gt; %object before one can
+# usefully configure any other left-right protocol %objects.
+#
+# Every @c &lt;self/&gt; %object has a self_handle attribute, which must be specified
+# for the "create", "set", "get", and "destroy" actions.
+#
+# Payload data which can be configured in a @c &lt;self/&gt; %object:
+#
+# @par @c use_hsm (attribute):
+# Whether to use a Hardware Signing Module. At present this option
+# has no effect, as the implementation does not yet support HSMs.
+#
+# @par @c crl_interval (attribute):
+# Positive integer representing the planned lifetime of an RPKI CRL
+# for this @c &lt;self/&gt;, measured in seconds.
+#
+# @par @c regen_margin (attribute):
+# Positive integer representing how long before expiration of an
+# RPKI certificiate a new one should be generated, measured in
+# seconds. At present this only affects the one-off EE
+# certificates associated with ROAs. This parameter also controls
+# how long before the nextUpdate time of CRL or manifest the CRL
+# or manifest should be updated.
+#
+# @par @c bpki_cert (element):
+# BPKI CA certificate for this @c &lt;self/&gt;. This is used as part of the
+# certificate chain when validating incoming TLS and CMS messages,
+# and should be the issuer of cross-certification BPKI certificates
+# used in @c &lt;repository/&gt;, @c &lt;parent/&gt;, and @c &lt;child/&gt; %objects. If the
+# bpki_glue certificate is in use (below), the bpki_cert certificate
+# should be issued by the bpki_glue certificate; otherwise, the
+# bpki_cert certificate should be issued by the per-engine bpki_ta
+# certificate.
+#
+# @par @c bpki_glue (element):
+# Another BPKI CA certificate for this @c &lt;self/&gt;, usually not needed.
+# Certain pathological cross-certification cases require a
+# two-certificate chain due to issuer name conflicts. If used, the
+# bpki_glue certificate should be the issuer of the bpki_cert
+# certificate and should be issued by the per-engine bpki_ta
+# certificate; if not needed, the bpki_glue certificate should be
+# left unset.
+#
+# Control attributes that can be set to "yes" to force actions:
+#
+# @par @c rekey:
+# Start a key rollover for every RPKI CA associated with every
+# @c &lt;parent/&gt; %object associated with this @c &lt;self/&gt; %object. This is the
+# first phase of a key rollover operation.
+#
+# @par @c revoke:
+# Revoke any remaining certificates for any expired key associated
+# with any RPKI CA for any @c &lt;parent/&gt; %object associated with this
+# @c &lt;self/&gt; %object. This is the second (cleanup) phase for a key
+# rollover operation; it's separate from the first phase to leave
+# time for new RPKI certificates to propegate and be installed.
+#
+# @par @c reissue:
+# Not implemented, may be removed from protocol. Original theory
+# was that this operation would force reissuance of any %object with
+# a changed key, but as that happens automatically as part of the
+# key rollover mechanism this operation seems unnecessary.
+#
+# @par @c run_now:
+# Force immediate processing for all tasks associated with this
+# @c &lt;self/&gt; %object that would ordinarily be performed under cron. Not
+# currently implemented.
+#
+# @par @c publish_world_now:
+# Force (re)publication of every publishable %object for this @c &lt;self/&gt;
+# %object. Not currently implemented. Intended to aid in recovery
+# if RPKI engine and publication engine somehow get out of sync.
+#
+#
+# @subsection bsc_obj <bsc/> object
+#
+# The @c &lt;bsc/&gt; ("business signing context") %object represents all the BPKI
+# data needed to sign outgoing CMS messages. Various other
+# %objects include pointers to a @c &lt;bsc/&gt; %object. Whether a particular
+# @c &lt;self/&gt; uses only one @c &lt;bsc/&gt; or multiple is a configuration decision
+# based on external requirements: the RPKI engine code doesn't care, it
+# just cares that, for any %object representing a relationship for which
+# it must sign messages, there be a @c &lt;bsc/&gt; %object that it can use to
+# produce that signature.
+#
+# Every @c &lt;bsc/&gt; %object has a bsc_handle, which must be specified for the
+# "create", "get", "set", and "destroy" actions. Every @c &lt;bsc/&gt; also has a self_handle
+# attribute which indicates the @c &lt;self/&gt; %object with which this @c &lt;bsc/&gt;
+# %object is associated.
+#
+# Payload data which can be configured in a @c &lt;isc/&gt; %object:
+#
+# @par @c signing_cert (element):
+# BPKI certificate to use when generating a signature.
+#
+# @par @c signing_cert_crl (element):
+# CRL which would %list signing_cert if it had been revoked.
+#
+# Control attributes that can be set to "yes" to force actions:
+#
+# @par @c generate_keypair:
+# Generate a new BPKI keypair and return a PKCS #10 certificate
+# request. The resulting certificate, once issued, should be
+# configured as this @c &lt;bsc/&gt; %object's signing_cert.
+#
+# Additional attributes which may be specified when specifying
+# "generate_keypair":
+#
+# @par @c key_type:
+# Type of BPKI keypair to generate. "rsa" is both the default and,
+# at the moment, the only allowed value.
+#
+# @par @c hash_alg:
+# Cryptographic hash algorithm to use with this keypair. "sha256"
+# is both the default and, at the moment, the only allowed value.
+#
+# @par @c key_length:
+# Length in bits of the keypair to be generated. "2048" is both the
+# default and, at the moment, the only allowed value.
+#
+# Replies to "create" and "set" actions that specify "generate-keypair"
+# include a &lt;bsc_pkcs10/> element, as do replies to "get" and "list"
+# actions for a @c &lt;bsc/&gt; %object for which a "generate-keypair" command has
+# been issued. The RPKI engine stores the PKCS #10 request, which
+# allows the IRBE to reuse the request if and when it needs to reissue
+# the corresponding BPKI signing certificate.
+#
+# @subsection parent_obj <parent/> object
+#
+# The @c &lt;parent/&gt; %object represents the RPKI engine's view of a particular
+# parent of the current @c &lt;self/&gt; %object in the up-down protocol. Due to
+# the way that the resource hierarchy works, a given @c &lt;self/&gt; may obtain
+# resources from multiple parents, but it will always have at least one;
+# in the case of IANA or an RIR, the parent RPKI engine may be a trivial
+# stub.
+#
+# Every @c &lt;parent/&gt; %object has a parent_handle, which must be specified for
+# the "create", "get", "set", and "destroy" actions. Every @c &lt;parent/&gt; also has a
+# self_handle attribute which indicates the @c &lt;self/&gt; %object with which this
+# @c &lt;parent/&gt; %object is associated, a bsc_handle attribute indicating the @c &lt;bsc/&gt;
+# %object to be used when signing messages sent to this parent, and a
+# repository_handle indicating the @c &lt;repository/&gt; %object to be used when
+# publishing issued by the certificate issued by this parent.
+#
+# Payload data which can be configured in a @c &lt;parent/&gt; %object:
+#
+# @par @c peer_contact_uri (attribute):
+# HTTP URI used to contact this parent.
+#
+# @par @c sia_base (attribute):
+# The leading portion of an rsync URI that the RPKI engine should
+# use when composing the publication URI for %objects issued by the
+# RPKI certificate issued by this parent.
+#
+# @par @c sender_name (attribute):
+# Sender name to use in the up-down protocol when talking to this
+# parent. The RPKI engine doesn't really care what this value is,
+# but other implementations of the up-down protocol do care.
+#
+# @par @c recipient_name (attribute):
+# Recipient name to use in the up-down protocol when talking to this
+# parent. The RPKI engine doesn't really care what this value is,
+# but other implementations of the up-down protocol do care.
+#
+# @par @c bpki_cms_cert (element):
+# BPKI CMS CA certificate for this @c &lt;parent/&gt;. This is used as part
+# of the certificate chain when validating incoming CMS messages If
+# the bpki_cms_glue certificate is in use (below), the bpki_cms_cert
+# certificate should be issued by the bpki_cms_glue certificate;
+# otherwise, the bpki_cms_cert certificate should be issued by the
+# bpki_cert certificate in the @c &lt;self/&gt; %object.
+#
+# @par @c bpki_cms_glue (element):
+# Another BPKI CMS CA certificate for this @c &lt;parent/&gt;, usually not
+# needed. Certain pathological cross-certification cases require a
+# two-certificate chain due to issuer name conflicts. If used, the
+# bpki_cms_glue certificate should be the issuer of the
+# bpki_cms_cert certificate and should be issued by the bpki_cert
+# certificate in the @c &lt;self/&gt; %object; if not needed, the
+# bpki_cms_glue certificate should be left unset.
+#
+# Control attributes that can be set to "yes" to force actions:
+#
+# @par @c rekey:
+# This is like the rekey command in the @c &lt;self/&gt; %object, but limited
+# to RPKI CAs under this parent.
+#
+# @par @c reissue:
+# This is like the reissue command in the @c &lt;self/&gt; %object, but limited
+# to RPKI CAs under this parent.
+#
+# @par @c revoke:
+# This is like the revoke command in the @c &lt;self/&gt; %object, but limited
+# to RPKI CAs under this parent.
+#
+# @subsection child_obj <child/> object
+#
+# The @c &lt;child/&gt; %object represents the RPKI engine's view of particular
+# child of the current @c &lt;self/&gt; in the up-down protocol.
+#
+# Every @c &lt;child/&gt; %object has a child_handle, which must be specified for the
+# "create", "get", "set", and "destroy" actions. Every @c &lt;child/&gt; also has a
+# self_handle attribute which indicates the @c &lt;self/&gt; %object with which this
+# @c &lt;child/&gt; %object is associated.
+#
+# Payload data which can be configured in a @c &lt;child/&gt; %object:
+#
+# @par @c bpki_cert (element):
+# BPKI CA certificate for this @c &lt;child/&gt;. This is used as part of
+# the certificate chain when validating incoming TLS and CMS
+# messages. If the bpki_glue certificate is in use (below), the
+# bpki_cert certificate should be issued by the bpki_glue
+# certificate; otherwise, the bpki_cert certificate should be issued
+# by the bpki_cert certificate in the @c &lt;self/&gt; %object.
+#
+# @par @c bpki_glue (element):
+# Another BPKI CA certificate for this @c &lt;child/&gt;, usually not needed.
+# Certain pathological cross-certification cases require a
+# two-certificate chain due to issuer name conflicts. If used, the
+# bpki_glue certificate should be the issuer of the bpki_cert
+# certificate and should be issued by the bpki_cert certificate in
+# the @c &lt;self/&gt; %object; if not needed, the bpki_glue certificate
+# should be left unset.
+#
+# Control attributes that can be set to "yes" to force actions:
+#
+# @par @c reissue:
+# Not implemented, may be removed from protocol.
+#
+# @subsection repository_obj <repository/> object
+#
+# The @c &lt;repository/&gt; %object represents the RPKI engine's view of a
+# particular publication repository used by the current @c &lt;self/&gt; %object.
+#
+# Every @c &lt;repository/&gt; %object has a repository_handle, which must be
+# specified for the "create", "get", "set", and "destroy" actions. Every
+# @c &lt;repository/&gt; also has a self_handle attribute which indicates the @c &lt;self/&gt;
+# %object with which this @c &lt;repository/&gt; %object is associated.
+#
+# Payload data which can be configured in a @c &lt;repository/&gt; %object:
+#
+# @par @c peer_contact_uri (attribute):
+# HTTP URI used to contact this repository.
+#
+# @par @c bpki_cms_cert (element):
+# BPKI CMS CA certificate for this @c &lt;repository/&gt;. This is used as part
+# of the certificate chain when validating incoming CMS messages If
+# the bpki_cms_glue certificate is in use (below), the bpki_cms_cert
+# certificate should be issued by the bpki_cms_glue certificate;
+# otherwise, the bpki_cms_cert certificate should be issued by the
+# bpki_cert certificate in the @c &lt;self/&gt; %object.
+#
+# @par @c bpki_cms_glue (element):
+# Another BPKI CMS CA certificate for this @c &lt;repository/&gt;, usually not
+# needed. Certain pathological cross-certification cases require a
+# two-certificate chain due to issuer name conflicts. If used, the
+# bpki_cms_glue certificate should be the issuer of the
+# bpki_cms_cert certificate and should be issued by the bpki_cert
+# certificate in the @c &lt;self/&gt; %object; if not needed, the
+# bpki_cms_glue certificate should be left unset.
+#
+# At present there are no control attributes for @c &lt;repository/&gt; %objects.
+#
+# @subsection route_origin_obj <route_origin/> object
+#
+# This section is out-of-date. The @c &lt;route_origin/&gt; %object
+# has been replaced by the @c &lt;list_roa_requests/&gt; IRDB query,
+# but the documentation for that hasn't been written yet.
+#
+# The @c &lt;route_origin/&gt; %object is a kind of prototype for a ROA. It
+# contains all the information needed to generate a ROA once the RPKI
+# engine obtains the appropriate RPKI certificates from its parent(s).
+#
+# Note that a @c &lt;route_origin/&gt; %object represents a ROA to be generated on
+# behalf of @c &lt;self/&gt;, not on behalf of a @c &lt;child/&gt;. Thus, a hosted entity
+# that has no children but which does need to generate ROAs would be
+# represented by a hosted @c &lt;self/&gt; with no @c &lt;child/&gt; %objects but one or
+# more @c &lt;route_origin/&gt; %objects. While lumping ROA generation in with
+# the other RPKI engine activities may seem a little odd at first, it's
+# a natural consequence of the design requirement that the RPKI daemon
+# never transmit private keys across the network in any form; given this
+# requirement, the RPKI engine that holds the private keys for an RPKI
+# certificate must also be the engine which generates any ROAs that
+# derive from that RPKI certificate.
+#
+# The precise content of the @c &lt;route_origin/&gt; has changed over time as
+# the underlying ROA specification has changed. The current
+# implementation as of this writing matches what we expect to see in
+# draft-ietf-sidr-roa-format-03, once it is issued. In particular, note
+# that the exactMatch boolean from the -02 draft has been replaced by
+# the prefix and maxLength encoding used in the -03 draft.
+#
+# Payload data which can be configured in a @c &lt;route_origin/&gt; %object:
+#
+# @par @c asn (attribute):
+# Autonomous System Number (ASN) to place in the generated ROA. A
+# single ROA can only grant authorization to a single ASN; multiple
+# ASNs require multiple ROAs, thus multiple @c &lt;route_origin/&gt; %objects.
+#
+# @par @c ipv4 (attribute):
+# %List of IPv4 prefix and maxLength values, see below for format.
+#
+# @par @c ipv6 (attribute):
+# %List of IPv6 prefix and maxLength values, see below for format.
+#
+# Control attributes that can be set to "yes" to force actions:
+#
+# @par @c suppress_publication:
+# Not implemented, may be removed from protocol.
+#
+# The lists of IPv4 and IPv6 prefix and maxLength values are represented
+# as comma-separated text strings, with no whitespace permitted. Each
+# entry in such a string represents a single prefix/maxLength pair.
+#
+# ABNF for these address lists:
+#
+# @verbatim
+#
+# <ROAIPAddress> ::= <address> "/" <prefixlen> [ "-" <max_prefixlen> ]
+# ; Where <max_prefixlen> defaults to the same
+# ; value as <prefixlen>.
+#
+# <ROAIPAddressList> ::= <ROAIPAddress> *( "," <ROAIPAddress> )
+#
+# @endverbatim
+#
+# For example, @c "10.0.1.0/24-32,10.0.2.0/24", which is a shorthand
+# form of @c "10.0.1.0/24-32,10.0.2.0/24-24".
+#
+# @section irdb_queries Operations initiated by the RPKI engine
+#
+# The left-right protocol also includes queries from the RPKI engine
+# back to the IRDB. These queries do not follow the message-passing
+# pattern used in the IRBE-initiated part of the protocol. Instead,
+# there's a single query back to the IRDB, with a corresponding
+# response. The CMS encoding are the same as in the rest of
+# the protocol, but the BPKI certificates will be different as the
+# back-queries and responses form a separate communication channel.
+#
+# @subsection list_resources_msg <list_resources/> messages
+#
+# The @c &lt;list_resources/&gt; query and response allow the RPKI engine to ask
+# the IRDB for information about resources assigned to a particular
+# child. The query must include both a @c "self_handle" attribute naming
+# the @c &lt;self/&gt; that is making the request and also a @c "child_handle"
+# attribute naming the child that is the subject of the query. The
+# query and response also allow an optional @c "tag" attribute of the
+# same form used elsewhere in this protocol, to allow batching.
+#
+# A @c &lt;list_resources/&gt; response includes the following attributes, along
+# with the @c tag (if specified), @c self_handle, and @c child_handle copied
+# from the request:
+#
+# @par @c valid_until:
+# A timestamp indicating the date and time at which certificates
+# generated by the RPKI engine for these data should expire. The
+# timestamp is expressed as an XML @c xsd:dateTime, must be
+# expressed in UTC, and must carry the "Z" suffix indicating UTC.
+#
+# @par @c asn:
+# A %list of autonomous sequence numbers, expressed as a
+# comma-separated sequence of decimal integers with no whitespace.
+#
+# @par @c ipv4:
+# A %list of IPv4 address prefixes and ranges, expressed as a
+# comma-separated %list of prefixes and ranges with no whitespace.
+# See below for format details.
+#
+# @par @c ipv6:
+# A %list of IPv6 address prefixes and ranges, expressed as a
+# comma-separated %list of prefixes and ranges with no whitespace.
+# See below for format details.
+#
+# Entries in a %list of address prefixes and ranges can be either
+# prefixes, which are written in the usual address/prefixlen notation,
+# or ranges, which are expressed as a pair of addresses denoting the
+# beginning and end of the range, written in ascending order separated
+# by a single "-" character. This format is superficially similar to
+# the format used for prefix and maxLength values in the @c &lt;route_origin/&gt;
+# %object, but the semantics differ: note in particular that
+# @c &lt;route_origin/&gt; %objects don't allow ranges, while @c &lt;list_resources/&gt;
+# messages don't allow a maxLength specification.
+#
+# @section left_right_error_handling Error handling
+#
+# Error in this protocol are handled at two levels.
+#
+# Since all messages in this protocol are conveyed over HTTP
+# connections, basic errors are indicated via the HTTP response code.
+# 4xx and 5xx responses indicate that something bad happened. Errors
+# that make it impossible to decode a query or encode a response are
+# handled in this way.
+#
+# Where possible, errors will result in a @c &lt;report_error/&gt; message which
+# takes the place of the expected protocol response message.
+# @c &lt;report_error/&gt; messages are CMS-signed XML messages like the rest of
+# this protocol, and thus can be archived to provide an audit trail.
+#
+# @c &lt;report_error/&gt; messages only appear in replies, never in queries.
+# The @c &lt;report_error/&gt; message can appear on either the "forward" (IRBE
+# as client of RPKI engine) or "back" (RPKI engine as client of IRDB)
+# communication channel.
+#
+# The @c &lt;report_error/&gt; message includes an optional @c "tag" attribute to
+# assist in matching the error with a particular query when using
+# batching, and also includes a @c "self_handle" attribute indicating the
+# @c &lt;self/&gt; that issued the error.
+#
+# The error itself is conveyed in the @c error_code (attribute). The
+# value of this attribute is a token indicating the specific error that
+# occurred. At present this will be the name of a Python exception; the
+# production version of this protocol will nail down the allowed error
+# tokens here, probably in the RelaxNG schema.
+#
+# The body of the @c &lt;report_error/&gt; element itself is an optional text
+# string; if present, this is debugging information. At present this
+# capabilty is not used, debugging information goes to syslog.
+
+## @page Publication Publication protocol
+#
+# The %publication protocol is really two separate client/server
+# protocols, between different parties. The first is a configuration
+# protocol for an IRBE to use to configure a %publication engine,
+# the second is the interface by which authorized clients request
+# %publication of specific objects.
+#
+# Much of the architecture of the %publication protocol is borrowed
+# from the @ref Left-Right "left-right protocol": like the
+# left-right protocol, the %publication protocol uses CMS-wrapped XML
+# over HTTP with the same eContentType OID and the same HTTP
+# content-type, and the overall style of the XML messages is very
+# similar to the left-right protocol. All operations allow an
+# optional "tag" attribute to allow batching.
+#
+# The %publication engine operates a single HTTP server which serves
+# both of these subprotocols. The two subprotocols share a single
+# server port, but use distinct URLs to allow demultiplexing.
+#
+# @section Publication-control Publication control subprotocol
+#
+# The control subprotocol reuses the message-passing design of the
+# left-right protocol. Configured objects support the "create", "set",
+# "get", "list", and "destroy" actions, or a subset thereof when the
+# full set of actions doesn't make sense.
+#
+# @subsection config_obj <config/> object
+#
+# The &lt;config/&gt; %object allows configuration of data that apply to the
+# entire %publication server rather than a particular client.
+#
+# There is exactly one &lt;config/&gt; %object in the %publication server, and
+# it only supports the "set" and "get" actions -- it cannot be created
+# or destroyed.
+#
+# Payload data which can be configured in a &lt;config/&gt; %object:
+#
+# @par @c bpki_crl (element):
+# This is the BPKI CRL used by the %publication server when
+# signing the CMS wrapper on responses in the %publication
+# subprotocol. As the CRL must be updated at regular intervals,
+# it's not practical to restart the %publication server when the
+# BPKI CRL needs to be updated. The BPKI model doesn't require
+# use of a BPKI CRL between the IRBE and the %publication server,
+# so we can use the %publication control subprotocol to update the
+# BPKI CRL.
+#
+# @subsection client_obj <client/> object
+#
+# The &lt;client/&gt; %object represents one client authorized to use the
+# %publication server.
+#
+# The &lt;client/&gt; %object supports the full set of "create", "set", "get",
+# "list", and "destroy" actions. Each client has a "client_handle"
+# attribute, which is used in responses and must be specified in "create", "set",
+# "get", or "destroy" actions.
+#
+# Payload data which can be configured in a &lt;client/&gt; %object:
+#
+# @par @c base_uri (attribute):
+# This is the base URI below which this client is allowed to publish
+# data. The %publication server may impose additional constraints in
+# the case of a child publishing beneath its parent.
+#
+# @par @c bpki_cert (element):
+# BPKI CA certificate for this &lt;client/&gt;. This is used as part of
+# the certificate chain when validating incoming TLS and CMS
+# messages. If the bpki_glue certificate is in use (below), the
+# bpki_cert certificate should be issued by the bpki_glue
+# certificate; otherwise, the bpki_cert certificate should be issued
+# by the %publication engine's bpki_ta certificate.
+#
+# @par @c bpki_glue (element):
+# Another BPKI CA certificate for this &lt;client/&gt;, usually not
+# needed. Certain pathological cross-certification cases require a
+# two-certificate chain due to issuer name conflicts. If used, the
+# bpki_glue certificate should be the issuer of the bpki_cert
+# certificate and should be issued by the %publication engine's
+# bpki_ta certificate; if not needed, the bpki_glue certificate
+# should be left unset.
+#
+# @section Publication-publication Publication subprotocol
+#
+# The %publication subprotocol is structured somewhat differently from
+# the %publication control protocol. Objects in the %publication
+# subprotocol represent objects to be published or objects to be
+# withdrawn from %publication. Each kind of %object supports two actions:
+# "publish" and "withdraw". In each case the XML element representing
+# hte %object to be published or withdrawn has a "uri" attribute which
+# contains the %publication URI. For "publish" actions, the XML element
+# body contains the DER %object to be published, encoded in Base64; for
+# "withdraw" actions, the XML element body is empty.
+#
+# In theory, the detailed access control for each kind of %object might
+# be different. In practice, as of this writing, access control for all
+# objects is a simple check that the client's @c "base_uri" is a leading
+# substring of the %publication URI. Details of why access control might
+# need to become more complicated are discussed in a later section.
+#
+# @subsection certificate_obj <certificate/> object
+#
+# The &lt;certificate/&gt; %object represents an RPKI certificate to be
+# published or withdrawn.
+#
+# @subsection crl_obj <crl/> object
+#
+# The &lt;crl/&gt; %object represents an RPKI CRL to be published or withdrawn.
+#
+# @subsection manifest_obj <manifest/> object
+#
+# The &lt;manifest/&gt; %object represents an RPKI %publication %manifest to be
+# published or withdrawn.
+#
+# Note that part of the reason for the batching support in the
+# %publication protocol is because @em every %publication or withdrawal
+# action requires a new %manifest, thus every %publication or withdrawal
+# action will involve at least two objects.
+#
+# @subsection roa_obj <roa/> object
+#
+# The &lt;roa/&gt; %object represents a ROA to be published or withdrawn.
+#
+# @section publication_error_handling Error handling
+#
+# Error in this protocol are handled at two levels.
+#
+# Since all messages in this protocol are conveyed over HTTP
+# connections, basic errors are indicated via the HTTP response code.
+# 4xx and 5xx responses indicate that something bad happened. Errors
+# that make it impossible to decode a query or encode a response are
+# handled in this way.
+#
+# Where possible, errors will result in a &lt;report_error/&gt; message which
+# takes the place of the expected protocol response message.
+# &lt;report_error/&gt; messages are CMS-signed XML messages like the rest of
+# this protocol, and thus can be archived to provide an audit trail.
+#
+# &lt;report_error/&gt; messages only appear in replies, never in
+# queries. The &lt;report_error/&gt; message can appear in both the
+# control and publication subprotocols.
+#
+# The &lt;report_error/&gt; message includes an optional @c "tag" attribute to
+# assist in matching the error with a particular query when using
+# batching.
+#
+# The error itself is conveyed in the @c error_code (attribute). The
+# value of this attribute is a token indicating the specific error that
+# occurred. At present this will be the name of a Python exception; the
+# production version of this protocol will nail down the allowed error
+# tokens here, probably in the RelaxNG schema.
+#
+# The body of the &lt;report_error/&gt; element itself is an optional text
+# string; if present, this is debugging information. At present this
+# capabilty is not used, debugging information goes to syslog.
+#
+# @section publication_access_control Additional access control considerations.
+#
+# As detailed above, the %publication protocol is trivially simple. This
+# glosses over two bits of potential complexity:
+#
+# @li In the case where parent and child are sharing a repository, we'd
+# like to nest child under parent, because testing has demonstrated
+# that even on relatively slow hardware the delays involved in
+# setting up separate rsync connections tend to dominate
+# synchronization time for relying parties.
+#
+# @li The repository operator might also want to do some checks to
+# assure itself that what it's about to allow the RPKI engine to
+# publish is not dangerous toxic waste.
+#
+# The up-down protocol includes a mechanism by which a parent can
+# suggest a %publication URI to each of its children. The children are
+# not required to accept this hint, and the children must make separate
+# arrangements with the repository operator (who might or might not be
+# the same as the entity that hosts the children's RPKI engine
+# operations) to use the suggested %publication point, but if everything
+# works out, this allows children to nest cleanly under their parents
+# %publication points, which helps reduce synchronization time for
+# relying parties.
+#
+# In this case, one could argue that the %publication server is
+# responsible for preventing one of its clients (the child in the above
+# description) from stomping on data published by another of its clients
+# (the parent in the above description). This goes beyond the basic
+# access check and requires the %publication server to determine whether
+# the parent has given its consent for the child to publish under the
+# parent. Since the RPKI certificate profile requires the child's
+# %publication point to be indicated in an SIA extension in a certificate
+# issued by the parent to the child, the %publication engine can infer
+# this permission from the parent's issuance of a certificate to the
+# child. Since, by definition, the parent also uses this %publication
+# server, this is an easy check, as the %publication server should
+# already have the parent's certificate available by the time it needs
+# to check the child's certificate.
+#
+# The previous paragraph only covers a "publish" action for a
+# &lt;certificate/&gt; %object. For "publish" actions on other
+# objects, the %publication server would need to trace permission back
+# to the certificate issued by the parent; for "withdraw" actions,
+# the %publication server would have to perform the same checks it
+# would perform for a "publish" action, using the current published
+# data before withdrawing it. The latter in turn implies an ordering
+# constraint on "withdraw" actions in order to preserve the data
+# necessary for these access control decisions; as this may prove
+# impractical, the %publication server may probably need to make
+# periodic sweeps over its published data looking for orphaned
+# objects, but that's probably a good idea anyway.
+#
+# Note that, in this %publication model, any agreement that the
+# repository makes to publish the RPKI engine's output is conditional
+# upon the %object to be published passing whatever access control checks
+# the %publication server imposes.
+
+## @page sql-schemas SQL database schemas
+#
+# @li @subpage rpkid-sql "rpkid database schema"
+# @li @subpage pubd-sql "pubd database schema"
+# @li @subpage irdbd-sql "irdbd database schema"
+
+## @page rpkid-sql rpkid SQL schema
+#
+# @image html @abs_top_builddir@/rpkid/doc/rpkid.png "Diagram of rpkid.sql"
+# @image latex @abs_top_builddir@/rpkid/doc/rpkid.eps "Diagram of rpkid.sql" height=\textheight
+#
+# @verbinclude rpkid.sql
+
+## @page pubd-sql pubd SQL Schema
+#
+# @image html @abs_top_builddir@/rpkid/doc/pubd.png "Diagram of pubd.sql"
+# @image latex @abs_top_builddir@/rpkid/doc/pubd.eps "Diagram of pubd.sql" width=\textwidth
+#
+# @verbinclude @abs_top_builddir@/rpkid/pubd.sql
+
+## @page irdbd-sql irdbd SQL Schema
+#
+# @image html @abs_top_builddir@/rpkid/doc/irdbd.png "Diagram of irdbd.sql"
+# @image latex @abs_top_builddir@/rpkid/doc/irdbd.eps "Diagram of irdbd.sql" width=\textwidth
+#
+# @verbinclude @abs_top_builddir@/rpkid/irdbd.sql
+
+## @page bpki-model BPKI model
+#
+# The "business PKI" (BPKI) is the PKI used to authenticate
+# communication on the up-down, left-right, and %publication protocols.
+# BPKI certificates are @em not resource PKI (RPKI) certificates. The
+# BPKI is a separate PKI that represents relationships between the
+# various entities involved in the production side of the RPKI system.
+# In most cases the BPKI tree will follow existing business
+# relationships, hence the "B" (Business) in "BPKI".
+#
+# Setup of the BPKI is handled by the back end; for the most part,
+# rpkid and pubd just use the result. The one place where the engines
+# are directly involved in creation of new BPKI certificates is in the
+# production of end-entity certificates for use by the engines.
+#
+# For the most part an ordinary user of this package need not worry
+# about the details explained here, as the
+# @ref MyRPKI "myrpki tool"
+# takes care of all of this. However, users who want to understand
+# what's going on behind the scenes or who have needs too complex for
+# the myrpki tool to handle might want to understand the underlying
+# model.
+#
+# There are a few design principals that underly the chosen BPKI model:
+#
+# @li Each engine should rely on a single BPKI trust anchor which is
+# controlled by the back end entity that runs the engine; all
+# other trust material should be cross-certified into the engine's
+# BPKI tree.
+#
+# @li Private keys must never transit the network.
+#
+# @li Except for end entity certificates, the engine should only have
+# access to the BPKI certificates; in particular, the private key
+# for the BPKI trust anchor should not be accessible to the engine.
+#
+# @li The number of BPKI keys and certificates that the engine has to
+# manage should be no larger than is necessary.
+#
+# rpkid's hosting model adds an additional constraint: rpkid's BPKI
+# trust anchor belongs to the entity operating rpkid, but the entities
+# hosted by rpkid should have control of their own BPKI private keys.
+# This implies the need for an additional layer of BPKI certificate
+# hierarchy within rpkid.
+#
+# Here is a simplified picture of what the BPKI might look like for an
+# rpkid operator that hosts two entities, "Alice" and "Ellen":
+#
+# @image html @abs_top_builddir@/rpkid/doc/rpkid-bpki.png
+# @image latex @abs_top_builddir@/rpkid/doc/rpkid-bpki.eps width=\textwidth
+#
+# Black objects belong to the hosting entity, blue objects belong to
+# the hosted entities, red objects are cross-certified objects from
+# the hosted entities' peers. The arrows indicate certificate
+# issuance: solid arrows are the ones that rpkid will care about
+# during certificate validation, dotted arrows show the origin of the
+# EE certificates that rpkid uses to sign CMS and TLS messages.
+#
+# The certificate tree looks complicated, but the set of certificates
+# needed to build any particular validation chain is obvious.
+#
+# Detailed instructions on how to build a BPKI are beyond the scope of
+# this document, but one can handle simple cases using the OpenSSL
+# command line tool and cross_certify; the latter is a tool
+# designed specifically for the purpose of generating the
+# cross-certification certificates needed to splice foreign trust
+# material into a BPKI tree.
+#
+# The BPKI tree for a pubd instance is similar to to the BPKI tree for
+# an rpkid instance, but is a bit simpler, as pubd does not provide
+# hosting in the same sense that rpkid does: pubd is a relatively
+# simple server that publishes objects as instructed by its clients.
+#
+# Here's a simplified picture of what the BPKI might look like for a
+# pubd operator that serves two clients, "Alice" and "Bob":
+#
+# @image html @abs_top_builddir@/rpkid/doc/pubd-bpki.png
+# @image latex @abs_top_builddir@/rpkid/doc/pubd-bpki.eps width=\textwidth
+#
+# While it is likely that RIRs (at least) will operate both rpkid and
+# pubd instances, the two functions are conceptually separate. As far
+# as pubd is concerned, it doesn't matter who operates the rpkid
+# instance: pubd just has clients, each of which has trust material
+# that has been cross-certified into pubd's BPKI. Similarly, rpkid
+# doesn't really care who operates a pubd instance that it's been
+# configured to use, it just treats that pubd as a foreign BPKI whose
+# trust material has to be cross-certified into its own BPKI. Cross
+# certification itself is done by the back end operator, using
+# cross_certify or some equivalent tool; the resulting BPKI
+# certificates are configured into rpkid and pubd via the left-right
+# protocol and the control subprotocol of the publication protocol,
+# respectively.
+#
+# Because the BPKI tree is almost entirely controlled by the operating
+# entity, CRLs are not necessary for most of the BPKI. The one
+# exception to this is the EE certificates issued under the
+# cross-certification points. These EE certificates are generated by
+# the peer, not the local operator, and thus require CRLs. Because of
+# this, both rpkid and pubd require regular updates of certain BPKI
+# CRLs, again via the left-right and publication control protocols.
+#
+# Because the left-right protocol and the publication control
+# subprotocol are used to configure BPKI certificates and CRLs, they
+# cannot themselves use certificates and CRLs configured in this way.
+# This is why the configuration files for rpkid and pubd require
+# static configuration of the left-right and publication control
+# certificates.
+
+# Local Variables:
+# mode:python
+# compile-command: "cd ../.. && ./config.status && cd rpkid && make docs"
+# End:
diff --git a/rpkid.without_tls/rpki/__init__.py b/rpkid.without_tls/rpki/__init__.py
new file mode 100644
index 00000000..9e090f63
--- /dev/null
+++ b/rpkid.without_tls/rpki/__init__.py
@@ -0,0 +1,2 @@
+# This file exists to tell Python that this the content of this
+# directory constitute a Python package.
diff --git a/rpkid.without_tls/rpki/adns.py b/rpkid.without_tls/rpki/adns.py
new file mode 100644
index 00000000..f627ac7a
--- /dev/null
+++ b/rpkid.without_tls/rpki/adns.py
@@ -0,0 +1,374 @@
+"""
+Basic asynchronous DNS code, using asyncore and Bob Halley's excellent
+dnspython package.
+
+$Id$
+
+Copyright (C) 2010 Internet Systems Consortium ("ISC")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+
+Portions copyright (C) 2003--2007, 2009, 2010 Nominum, Inc.
+
+Permission to use, copy, modify, and distribute this software and its
+documentation for any purpose with or without fee is hereby granted,
+provided that the above copyright notice and this permission notice
+appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+"""
+
+import asyncore, socket, time, sys
+import rpki.async, rpki.sundial, rpki.log
+
+try:
+ import dns.resolver, dns.rdatatype, dns.rdataclass, dns.name, dns.message
+ import dns.inet, dns.exception, dns.query, dns.rcode, dns.ipv4, dns.ipv6
+except ImportError:
+ if __name__ == "__main__":
+ sys.exit("DNSPython not available, skipping rpki.adns unit test")
+ else:
+ raise
+
+## @var resolver
+# Resolver object, shared by everything using this module
+
+resolver = dns.resolver.Resolver()
+if resolver.cache is None:
+ resolver.cache = dns.resolver.Cache()
+
+## @var nameservers
+# Nameservers from resolver.nameservers converted to (af, address)
+# pairs. The latter turns out to be a more useful form for us to use
+# internally, because it simplifies the checks we need to make upon
+# packet receiption.
+
+nameservers = []
+
+for ns in resolver.nameservers:
+ try:
+ nameservers.append((socket.AF_INET, dns.ipv4.inet_aton(ns)))
+ continue
+ except:
+ pass
+ try:
+ nameservers.append((socket.AF_INET6, dns.ipv6.inet_aton(ns)))
+ continue
+ except:
+ pass
+ rpki.log.error("Couldn't parse nameserver address %r" % ns)
+
+class dispatcher(asyncore.dispatcher):
+ """
+ Basic UDP socket reader for use with asyncore.
+ """
+
+ def __init__(self, cb, eb, af, bufsize = 65535):
+ asyncore.dispatcher.__init__(self)
+ self.cb = cb
+ self.eb = eb
+ self.af = af
+ self.bufsize = bufsize
+ self.create_socket(af, socket.SOCK_DGRAM)
+
+ def handle_read(self):
+ """
+ Receive a packet, hand it off to query class callback.
+ """
+ wire, from_address = self.recvfrom(self.bufsize)
+ self.cb(self.af, from_address[0], from_address[1], wire)
+
+ def handle_error(self):
+ """
+ Pass errors to query class errback.
+ """
+ self.eb(sys.exc_info()[1])
+
+ def handle_connect(self):
+ """
+ Quietly ignore UDP "connection" events.
+ """
+ pass
+
+ def writable(self):
+ """
+ We don't need to hear about UDP socket becoming writable.
+ """
+ return False
+
+
+class query(object):
+ """
+ Simplified (no search paths) asynchronous adaptation of
+ dns.resolver.Resolver.query() (q.v.).
+ """
+
+ def __init__(self, cb, eb, qname, qtype = dns.rdatatype.A, qclass = dns.rdataclass.IN):
+ if isinstance(qname, (str, unicode)):
+ qname = dns.name.from_text(qname)
+ if isinstance(qtype, str):
+ qtype = dns.rdatatype.from_text(qtype)
+ if isinstance(qclass, str):
+ qclass = dns.rdataclass.from_text(qclass)
+ assert qname.is_absolute()
+ self.cb = cb
+ self.eb = eb
+ self.qname = qname
+ self.qtype = qtype
+ self.qclass = qclass
+ self.start = time.time()
+ rpki.async.defer(self.go)
+
+ def go(self):
+ """
+ Start running the query. Check our cache before doing network
+ query; if we find an answer there, just return it. Otherwise
+ start the network query.
+ """
+ if resolver.cache:
+ answer = resolver.cache.get((self.qname, self.qtype, self.qclass))
+ else:
+ answer = None
+ if answer:
+ self.cb(self, answer)
+ else:
+ self.timer = rpki.async.timer()
+ self.sockets = {}
+ self.request = dns.message.make_query(self.qname, self.qtype, self.qclass)
+ if resolver.keyname is not None:
+ self.request.use_tsig(resolver.keyring, resolver.keyname, resolver.keyalgorithm)
+ self.request.use_edns(resolver.edns, resolver.ednsflags, resolver.payload)
+ self.response = None
+ self.backoff = 0.10
+ self.nameservers = nameservers[:]
+ self.loop1()
+
+ def loop1(self):
+ """
+ Outer loop. If we haven't got a response yet and still have
+ nameservers to check, start inner loop. Otherwise, we're done.
+ """
+ self.timer.cancel()
+ if self.response is None and self.nameservers:
+ self.iterator = rpki.async.iterator(self.nameservers[:], self.loop2, self.done2)
+ else:
+ self.done1()
+
+ def loop2(self, iterator, nameserver):
+ """
+ Inner loop. Send query to next nameserver in our list, unless
+ we've hit the overall timeout for this query.
+ """
+ self.timer.cancel()
+ try:
+ timeout = resolver._compute_timeout(self.start)
+ except dns.resolver.Timeout, e:
+ self.lose(e)
+ else:
+ af, addr = nameserver
+ if af not in self.sockets:
+ self.sockets[af] = dispatcher(self.socket_cb, self.socket_eb, af)
+ self.sockets[af].sendto(self.request.to_wire(),
+ (dns.inet.inet_ntop(af, addr), resolver.port))
+ self.timer.set_handler(self.socket_timeout)
+ self.timer.set_errback(self.socket_eb)
+ self.timer.set(rpki.sundial.timedelta(seconds = timeout))
+
+ def socket_timeout(self):
+ """
+ No answer from nameserver, move on to next one (inner loop).
+ """
+ self.response = None
+ self.iterator()
+
+ def socket_eb(self, e):
+ """
+ UDP socket signaled error. If it really is some kind of socket
+ error, handle as if we've timed out on this nameserver; otherwise,
+ pass error back to caller.
+ """
+ self.timer.cancel()
+ if isinstance(e, socket.error):
+ self.response = None
+ self.iterator()
+ else:
+ self.lose(e)
+
+ def socket_cb(self, af, from_host, from_port, wire):
+ """
+ Received a packet that might be a DNS message. If it doesn't look
+ like it came from one of our nameservers, just drop it and leave
+ the timer running. Otherwise, try parsing it: if it's an answer,
+ we're done, otherwise handle error appropriately and move on to
+ next nameserver.
+ """
+ sender = (af, dns.inet.inet_pton(af, from_host))
+ if from_port != resolver.port or sender not in self.nameservers:
+ return
+ self.timer.cancel()
+ try:
+ self.response = dns.message.from_wire(wire, keyring = self.request.keyring, request_mac = self.request.mac, one_rr_per_rrset = False)
+ except dns.exception.FormError:
+ self.nameservers.remove(sender)
+ else:
+ rcode = self.response.rcode()
+ if rcode in (dns.rcode.NOERROR, dns.rcode.NXDOMAIN):
+ self.done1()
+ return
+ if rcode != dns.rcode.SERVFAIL:
+ self.nameservers.remove(sender)
+ self.response = None
+ self.iterator()
+
+ def done2(self):
+ """
+ Done with inner loop. If we still haven't got an answer and
+ haven't (yet?) eliminated all of our nameservers, wait a little
+ while before starting the cycle again, unless we've hit the
+ timeout threshold for the whole query.
+ """
+ if self.response is None and self.nameservers:
+ try:
+ delay = rpki.sundial.timedelta(seconds = min(resolver._compute_timeout(self.start), self.backoff))
+ self.backoff *= 2
+ self.timer.set_handler(self.loop1)
+ self.timer.set_errback(self.lose)
+ self.timer.set(delay)
+ except dns.resolver.Timeout, e:
+ self.lose(e)
+ else:
+ self.loop1()
+
+ def cleanup(self):
+ """
+ Shut down our timer and sockets.
+ """
+ self.timer.cancel()
+ for s in self.sockets.itervalues():
+ s.close()
+
+ def lose(self, e):
+ """
+ Something bad happened. Clean up, then pass error back to caller.
+ """
+ self.cleanup()
+ self.eb(self, e)
+
+ def done1(self):
+ """
+ Done with outer loop. If we got a useful answer, cache it, then
+ pass it back to caller; if we got an error, pass the appropriate
+ exception back to caller.
+ """
+ self.cleanup()
+ try:
+ if not self.nameservers:
+ raise dns.resolver.NoNameservers
+ if self.response.rcode() == dns.rcode.NXDOMAIN:
+ raise dns.resolver.NXDOMAIN
+ answer = dns.resolver.Answer(self.qname, self.qtype, self.qclass, self.response)
+ if resolver.cache:
+ resolver.cache.put((self.qname, self.qtype, self.qclass), answer)
+ self.cb(self, answer)
+ except (rpki.async.ExitNow, SystemExit):
+ raise
+ except Exception, e:
+ self.lose(e)
+
+class getaddrinfo(object):
+
+ typemap = { dns.rdatatype.A : socket.AF_INET,
+ dns.rdatatype.AAAA : socket.AF_INET6 }
+
+ def __init__(self, cb, eb, host, address_families = typemap.values()):
+ self.cb = cb
+ self.eb = eb
+ self.host = host
+ self.result = []
+ self.queries = [query(self.done, self.lose, host, qtype)
+ for qtype in self.typemap
+ if self.typemap[qtype] in address_families]
+
+ def done(self, q, answer):
+ if answer is not None:
+ for a in answer:
+ self.result.append((self.typemap[a.rdtype], a.address))
+ self.queries.remove(q)
+ if not self.queries:
+ self.cb(self.result)
+
+ def lose(self, q, e):
+ if isinstance(e, dns.resolver.NoAnswer):
+ self.done(q, None)
+ else:
+ for q in self.queries:
+ q.cleanup()
+ self.eb(e)
+
+if __name__ == "__main__":
+
+ rpki.log.use_syslog = False
+ print "Some adns tests may take a minute or two, please be patient"
+
+ class test_getaddrinfo(object):
+
+ def __init__(self, qname):
+ self.qname = qname
+ getaddrinfo(self.done, self.lose, qname)
+
+ def done(self, result):
+ print "getaddrinfo(%s) returned: %s" % (
+ self.qname,
+ ", ".join(str(r) for r in result))
+
+ def lose(self, e):
+ print "getaddrinfo(%s) failed: %r" % (self.qname, e)
+
+ class test_query(object):
+
+ def __init__(self, qname, qtype = dns.rdatatype.A, qclass = dns.rdataclass.IN):
+ self.qname = qname
+ self.qtype = qtype
+ self.qclass = qclass
+ query(self.done, self.lose, qname, qtype = qtype, qclass = qclass)
+
+ def done(self, q, result):
+ print "query(%s, %s, %s) returned: %s" % (
+ self.qname,
+ dns.rdatatype.to_text(self.qtype),
+ dns.rdataclass.to_text(self.qclass),
+ ", ".join(str(r) for r in result))
+
+ def lose(self, q, e):
+ print "getaddrinfo(%s, %s, %s) failed: %r" % (
+ self.qname,
+ dns.rdatatype.to_text(self.qtype),
+ dns.rdataclass.to_text(self.qclass),
+ e)
+
+ if True:
+ for qtype in (dns.rdatatype.A, dns.rdatatype.AAAA, dns.rdatatype.HINFO):
+ test_query("subvert-rpki.hactrn.net", qtype)
+ test_query("nonexistant.rpki.net")
+ test_query("subvert-rpki.hactrn.net", qclass = dns.rdataclass.CH)
+
+ for host in ("subvert-rpki.hactrn.net", "nonexistant.rpki.net"):
+ test_getaddrinfo(host)
+
+ rpki.async.event_loop()
diff --git a/rpkid.without_tls/rpki/async.py b/rpkid.without_tls/rpki/async.py
new file mode 100644
index 00000000..5bff4d45
--- /dev/null
+++ b/rpkid.without_tls/rpki/async.py
@@ -0,0 +1,411 @@
+"""
+Utilities for event-driven programming.
+
+$Id$
+
+Copyright (C) 2009--2010 Internet Systems Consortium ("ISC")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+"""
+
+import asyncore, signal, traceback, gc, sys
+import rpki.log, rpki.sundial
+
+ExitNow = asyncore.ExitNow
+
+class iterator(object):
+ """
+ Iteration construct for event-driven code. Takes three
+ arguments:
+
+ - Some kind of iterable object
+
+ - A callback to call on each item in the iteration
+
+ - A callback to call after the iteration terminates.
+
+ The item callback receives two arguments: the callable iterator
+ object and the current value of the iteration. It should call the
+ iterator (or arrange for the iterator to be called) when it is time
+ to continue to the next item in the iteration.
+
+ The termination callback receives no arguments.
+ """
+
+ def __init__(self, iterable, item_callback, done_callback, unwind_stack = True):
+ self.item_callback = item_callback
+ self.done_callback = done_callback
+ self.caller_file, self.caller_line, self.caller_function = traceback.extract_stack(limit = 2)[0][0:3]
+ self.unwind_stack = unwind_stack
+ try:
+ self.iterator = iter(iterable)
+ except (ExitNow, SystemExit):
+ raise
+ except:
+ rpki.log.debug("Problem constructing iterator for %r" % (iterable,))
+ raise
+ self.doit()
+
+ def __repr__(self):
+ return ("<%s created at %s:%s %s at 0x%x>" %
+ (self.__class__.__name__,
+ self.caller_file, self.caller_line, self.caller_function, id(self)))
+
+ def __call__(self):
+ if self.unwind_stack:
+ defer(self.doit)
+ else:
+ self.doit()
+
+ def doit(self):
+ """
+ Implement the iterator protocol: attempt to call the item handler
+ with the next iteration value, call the termination handler if the
+ iterator signaled StopIteration.
+ """
+ try:
+ self.item_callback(self, self.iterator.next())
+ except StopIteration:
+ if self.done_callback is not None:
+ self.done_callback()
+
+class timer(object):
+ """
+ Timer construct for event-driven code. It can be used in either of two ways:
+
+ - As a virtual class, in which case the subclass should provide a
+ handler() method to receive the wakup event when the timer expires; or
+
+ - By setting an explicit handler callback, either via the
+ constructor or the set_handler() method.
+
+ Subclassing is probably more Pythonic, but setting an explict
+ handler turns out to be very convenient when combined with bound
+ methods to other objects.
+ """
+
+ ## @var gc_debug
+ # Verbose chatter about timers states and garbage collection.
+ gc_debug = False
+
+ ## @var run_debug
+ # Verbose chatter about timers being run.
+ run_debug = False
+
+ ## @var queue
+ # Timer queue, shared by all timer instances (there can be only one queue).
+ queue = []
+
+ def __init__(self, handler = None, errback = None):
+ if handler is not None:
+ self.set_handler(handler)
+ if errback is not None:
+ self.set_errback(errback)
+ self.when = None
+ if self.gc_debug:
+ self.trace("Creating %r" % self)
+
+ def trace(self, msg):
+ """
+ Debug logging.
+ """
+ if self.gc_debug:
+ bt = traceback.extract_stack(limit = 3)
+ rpki.log.debug("%s from %s:%d" % (msg, bt[0][0], bt[0][1]))
+
+ def set(self, when):
+ """
+ Set a timer. Argument can be a datetime, to specify an absolute
+ time, or a timedelta, to specify an offset time.
+ """
+ if self.gc_debug:
+ self.trace("Setting %r to %r" % (self, when))
+ if isinstance(when, rpki.sundial.timedelta):
+ self.when = rpki.sundial.now() + when
+ else:
+ self.when = when
+ assert isinstance(self.when, rpki.sundial.datetime), "%r: Expecting a datetime, got %r" % (self, self.when)
+ if self not in self.queue:
+ self.queue.append(self)
+ self.queue.sort(key = lambda x: x.when)
+
+ def __cmp__(self, other):
+ return cmp(id(self), id(other))
+
+ if gc_debug:
+ def __del__(self):
+ rpki.log.debug("Deleting %r" % self)
+
+ def cancel(self):
+ """
+ Cancel a timer, if it was set.
+ """
+ if self.gc_debug:
+ self.trace("Canceling %r" % self)
+ try:
+ while True:
+ self.queue.remove(self)
+ except ValueError:
+ pass
+
+ def is_set(self):
+ """Test whether this timer is currently set."""
+ return self in self.queue
+
+ def handler(self):
+ """
+ Handle a timer that has expired. This must either be overriden by
+ a subclass or set dynamically by set_handler().
+ """
+ raise NotImplementedError
+
+ def set_handler(self, handler):
+ """
+ Set timer's expiration handler. This is an alternative to
+ subclassing the timer class, and may be easier to use when
+ integrating timers into other classes (eg, the handler can be a
+ bound method to an object in a class representing a network
+ connection).
+ """
+ self.handler = handler
+
+ def errback(self, e):
+ """
+ Error callback. May be overridden, or set with set_errback().
+ """
+ rpki.log.error("Unhandled exception from timer: %s" % e)
+ rpki.log.traceback()
+
+ def set_errback(self, errback):
+ """Set a timer's errback. Like set_handler(), for errbacks."""
+ self.errback = errback
+
+ @classmethod
+ def runq(cls):
+ """
+ Run the timer queue: for each timer whose call time has passed,
+ pull the timer off the queue and call its handler() method.
+ """
+ while cls.queue and rpki.sundial.now() >= cls.queue[0].when:
+ t = cls.queue.pop(0)
+ if cls.run_debug:
+ rpki.log.debug("Running %r" % t)
+ try:
+ t.handler()
+ except (ExitNow, SystemExit):
+ raise
+ except Exception, e:
+ t.errback(e)
+
+ def __repr__(self):
+ return "<%s %r %r at 0x%x>" % (self.__class__.__name__, self.when, self.handler, id(self))
+
+ @classmethod
+ def seconds_until_wakeup(cls):
+ """
+ Calculate delay until next timer expires, or None if no timers are
+ set and we should wait indefinitely. Rounds up to avoid spinning
+ in select() or poll(). We could calculate fractional seconds in
+ the right units instead, but select() and poll() don't even take
+ the same units (argh!), and we're not doing anything that
+ hair-triggered, so rounding up is simplest.
+ """
+ if not cls.queue:
+ return None
+ now = rpki.sundial.now()
+ if now >= cls.queue[0].when:
+ return 0
+ delay = cls.queue[0].when - now
+ seconds = delay.convert_to_seconds()
+ if delay.microseconds:
+ seconds += 1
+ return seconds
+
+ @classmethod
+ def clear(cls):
+ """
+ Cancel every timer on the queue. We could just throw away the
+ queue content, but this way we can notify subclasses that provide
+ their own cancel() method.
+ """
+ while cls.queue:
+ cls.queue.pop(0).cancel()
+
+## @var deferred_queue
+# List to hold deferred actions. We used to do this with the timer
+# queue, but that appears to confuse the garbage collector, and is
+# overengineering for simple deferred actions in any case.
+
+deferred_queue = []
+
+def defer(thunk):
+ """
+ Defer an action until the next pass through the event loop.
+ """
+ deferred_queue.append(thunk)
+
+def run_deferred():
+ """
+ Run deferred actions.
+ """
+ while deferred_queue:
+ try:
+ deferred_queue.pop(0)()
+ except (ExitNow, SystemExit):
+ raise
+ except Exception, e:
+ rpki.log.error("Unhandled exception from deferred action: %s" % e)
+ rpki.log.traceback()
+
+def _raiseExitNow(signum, frame):
+ """Signal handler for event_loop()."""
+ raise ExitNow
+
+def event_loop(catch_signals = (signal.SIGINT, signal.SIGTERM)):
+ """
+ Replacement for asyncore.loop(), adding timer and signal support.
+ """
+ while True:
+ old_signal_handlers = {}
+ try:
+ for sig in catch_signals:
+ old_signal_handlers[sig] = signal.signal(sig, _raiseExitNow)
+ while asyncore.socket_map or deferred_queue or timer.queue:
+ run_deferred()
+ asyncore.poll(timer.seconds_until_wakeup(), asyncore.socket_map)
+ run_deferred()
+ timer.runq()
+ if timer.gc_debug:
+ gc.collect()
+ if gc.garbage:
+ for i in gc.garbage:
+ rpki.log.debug("GC-cycle %r" % i)
+ del gc.garbage[:]
+ except ExitNow:
+ break
+ except SystemExit:
+ raise
+ except Exception, e:
+ rpki.log.error("event_loop() exited with exception %r, this is not supposed to happen, restarting" % e)
+ else:
+ break
+ finally:
+ for sig in old_signal_handlers:
+ signal.signal(sig, old_signal_handlers[sig])
+
+class sync_wrapper(object):
+ """
+ Synchronous wrapper around asynchronous functions. Running in
+ asynchronous mode at all times makes sense for event-driven daemons,
+ but is kind of tedious for simple scripts, hence this wrapper.
+
+ The wrapped function should take at least two arguments: a callback
+ function and an errback function. If any arguments are passed to
+ the wrapper, they will be passed as additional arguments to the
+ wrapped function.
+ """
+
+ res = None
+ err = None
+
+ def __init__(self, func):
+ self.func = func
+
+ def cb(self, res = None):
+ """
+ Wrapped code has requested normal termination. Store result, and
+ exit the event loop.
+ """
+ self.res = res
+ raise ExitNow
+
+ def eb(self, err):
+ """
+ Wrapped code raised an exception. Store exception data, then exit
+ the event loop.
+ """
+ exc_info = sys.exc_info()
+ self.err = exc_info if exc_info[1] is err else err
+ raise ExitNow
+
+ def __call__(self, *args, **kwargs):
+
+ def thunk():
+ """
+ Deferred action to call the wrapped code once event system is
+ running.
+ """
+ try:
+ self.func(self.cb, self.eb, *args, **kwargs)
+ except ExitNow:
+ raise
+ except Exception, e:
+ self.eb(e)
+
+ defer(thunk)
+ event_loop()
+ if self.err is not None:
+ if isinstance(self.err, tuple):
+ raise self.err[0], self.err[1], self.err[2]
+ else:
+ raise self.err
+ else:
+ return self.res
+
+def exit_event_loop():
+ """Force exit from event_loop()."""
+ raise ExitNow
+
+class gc_summary(object):
+ """
+ Periodic summary of GC state, for tracking down memory bloat.
+ """
+
+ def __init__(self, interval, threshold = 0):
+ if isinstance(interval, (int, long)):
+ interval = rpki.sundial.timedelta(seconds = interval)
+ self.interval = interval
+ self.threshold = threshold
+ self.timer = timer(handler = self.handler)
+ self.timer.set(self.interval)
+
+ def handler(self):
+ """
+ Collect and log GC state for this period, reset timer.
+ """
+ rpki.log.debug("gc_summary: Running gc.collect()")
+ gc.collect()
+ rpki.log.debug("gc_summary: Summarizing (threshold %d)" % self.threshold)
+ total = {}
+ tuples = {}
+ for g in gc.get_objects():
+ k = type(g).__name__
+ total[k] = total.get(k, 0) + 1
+ if isinstance(g, tuple):
+ k = ", ".join(type(x).__name__ for x in g)
+ tuples[k] = tuples.get(k, 0) + 1
+ rpki.log.debug("gc_summary: Sorting result")
+ total = total.items()
+ total.sort(reverse = True, key = lambda x: x[1])
+ tuples = tuples.items()
+ tuples.sort(reverse = True, key = lambda x: x[1])
+ rpki.log.debug("gc_summary: Object type counts in descending order")
+ for name, count in total:
+ if count > self.threshold:
+ rpki.log.debug("gc_summary: %8d %s" % (count, name))
+ rpki.log.debug("gc_summary: Tuple content type signature counts in descending order")
+ for types, count in tuples:
+ if count > self.threshold:
+ rpki.log.debug("gc_summary: %8d (%s)" % (count, types))
+ rpki.log.debug("gc_summary: Scheduling next cycle")
+ self.timer.set(self.interval)
diff --git a/rpkid.without_tls/rpki/cli.py b/rpkid.without_tls/rpki/cli.py
new file mode 100644
index 00000000..d31f7204
--- /dev/null
+++ b/rpkid.without_tls/rpki/cli.py
@@ -0,0 +1,176 @@
+"""
+Utilities for writing command line tools.
+
+$Id$
+
+Copyright (C) 2010 Internet Systems Consortium ("ISC")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+"""
+
+import cmd, glob, os.path, traceback
+
+try:
+ import readline
+ have_readline = True
+except ImportError:
+ have_readline = False
+
+class Cmd(cmd.Cmd):
+ """
+ Customized subclass of Python cmd module.
+ """
+
+ emptyline_repeats_last_command = False
+
+ EOF_exits_command_loop = True
+
+ identchars = cmd.IDENTCHARS + "/-."
+
+ histfile = None
+
+ def __init__(self, argv = None):
+ cmd.Cmd.__init__(self)
+ if argv:
+ self.onecmd(" ".join(argv))
+ else:
+ self.cmdloop_with_history()
+
+ def onecmd(self, line):
+ """
+ Wrap error handling around cmd.Cmd.onecmd(). Might want to do
+ something kinder than showing a traceback, eventually.
+ """
+ try:
+ return cmd.Cmd.onecmd(self, line)
+ except SystemExit:
+ raise
+ except:
+ traceback.print_exc()
+
+ def do_EOF(self, arg):
+ """
+ Exit program.
+ """
+ if self.EOF_exits_command_loop and self.prompt:
+ print
+ return self.EOF_exits_command_loop
+
+ def do_exit(self, arg):
+ """
+ Exit program.
+ """
+ return True
+
+ do_quit = do_exit
+
+ def emptyline(self):
+ """
+ Handle an empty line. cmd module default is to repeat the last
+ command, which I find to be violation of the principal of least
+ astonishment, so my preference is that an empty line does nothing.
+ """
+ if self.emptyline_repeats_last_command:
+ cmd.Cmd.emptyline(self)
+
+ def filename_complete(self, text, line, begidx, endidx):
+ """
+ Filename completion handler, with hack to restore what I consider
+ the normal (bash-like) behavior when one hits the completion key
+ and there's only one match.
+ """
+ result = glob.glob(text + "*")
+ if len(result) == 1:
+ path = result.pop()
+ if os.path.isdir(path) or (os.path.islink(path) and os.path.isdir(os.path.join(path, "."))):
+ result.append(path + os.path.sep)
+ else:
+ result.append(path + " ")
+ return result
+
+ def completenames(self, text, *ignored):
+ """
+ Command name completion handler, with hack to restore what I
+ consider the normal (bash-like) behavior when one hits the
+ completion key and there's only one match.
+ """
+ result = set(cmd.Cmd.completenames(self, text, *ignored))
+ if len(result) == 1:
+ result.add(result.pop() + " ")
+ return list(result)
+
+ def help_help(self):
+ """
+ Type "help [topic]" for help on a command,
+ or just "help" for a list of commands.
+ """
+ self.stdout.write(self.help_help.__doc__ + "\n")
+
+ def complete_help(self, *args):
+ """
+ Better completion function for help command arguments.
+ """
+ text = args[0]
+ names = self.get_names()
+ result = []
+ for prefix in ("do_", "help_"):
+ result.extend(s[len(prefix):] for s in names if s.startswith(prefix + text))
+ return result
+
+ if have_readline:
+
+ def cmdloop_with_history(self):
+ """
+ Better command loop, with history file and tweaked readline
+ completion delimiters.
+ """
+ old_completer_delims = readline.get_completer_delims()
+ if self.histfile is not None:
+ try:
+ readline.read_history_file(self.histfile)
+ except IOError:
+ pass
+ try:
+ readline.set_completer_delims("".join(set(old_completer_delims) - set(self.identchars)))
+ self.cmdloop()
+ finally:
+ if self.histfile is not None and readline.get_current_history_length():
+ readline.write_history_file(self.histfile)
+ readline.set_completer_delims(old_completer_delims)
+
+ else:
+
+ cmdloop_with_history = cmd.Cmd.cmdloop
+
+
+
+def yes_or_no(prompt, default = None, require_full_word = False):
+ """
+ Ask a yes-or-no question.
+ """
+ prompt = prompt.rstrip() + _yes_or_no_prompts[default]
+ while True:
+ answer = raw_input(prompt).strip().lower()
+ if not answer and default is not None:
+ return default
+ if answer == "yes" or (not require_full_word and answer.startswith("y")):
+ return True
+ if answer == "no" or (not require_full_word and answer.startswith("n")):
+ return False
+ print 'Please answer "yes" or "no"'
+
+_yes_or_no_prompts = {
+ True : ' ("yes" or "no" ["yes"]) ',
+ False : ' ("yes" or "no" ["no"]) ',
+ None : ' ("yes" or "no") ' }
+
diff --git a/rpkid.without_tls/rpki/config.py b/rpkid.without_tls/rpki/config.py
new file mode 100644
index 00000000..2bdc160c
--- /dev/null
+++ b/rpkid.without_tls/rpki/config.py
@@ -0,0 +1,224 @@
+"""
+Configuration file parsing utilities, layered on top of stock Python
+ConfigParser module.
+
+$Id$
+
+Copyright (C) 2009--2010 Internet Systems Consortium ("ISC")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+
+Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+"""
+
+import ConfigParser, os, re
+
+class parser(object):
+ """
+ Extensions to stock Python ConfigParser:
+
+ Read config file and set default section while initializing parser object.
+
+ Support for OpenSSL-style subscripted options and a limited form of
+ OpenSSL-style indirect variable references (${section::option}).
+
+ get-methods with default values and default section name.
+ """
+
+ def __init__(self, filename, section = None, allow_missing = False):
+ """
+ Initialize this parser.
+ """
+
+ self.filename = filename
+ self.cfg = ConfigParser.RawConfigParser()
+ try:
+ self.cfg.readfp(open(filename), filename)
+ except IOError:
+ if not allow_missing:
+ raise
+ self.default_section = section
+
+ def has_section(self, section):
+ """
+ Test whether a section exists.
+ """
+
+ return self.cfg.has_section(section)
+
+ def has_option(self, option, section = None):
+ """
+ Test whether an option exists.
+ """
+
+ if section is None:
+ section = self.default_section
+ return self.cfg.has_option(section, option)
+
+ def multiget(self, option, section = None):
+ """
+ Parse OpenSSL-style foo.0, foo.1, ... subscripted options.
+
+ Returns a list of values matching the specified option name.
+ """
+
+ matches = []
+ if section is None:
+ section = self.default_section
+ if self.cfg.has_option(section, option):
+ matches.append((-1, self.get(option, section = section)))
+ for key, value in self.cfg.items(section):
+ s = key.rsplit(".", 1)
+ if len(s) == 2 and s[0] == option and s[1].isdigit():
+ matches.append((int(s[1]), self.get(option, section = section)))
+ matches.sort()
+ return [match[1] for match in matches]
+
+ _regexp = re.compile("\\${(.*?)::(.*?)}")
+
+ def _repl(self, m):
+ """
+ Replacement function for indirect variable substitution.
+ This is intended for use with re.subn().
+ """
+ section, option = m.group(1, 2)
+ if section == "ENV":
+ return os.getenv(option, "")
+ else:
+ return self.cfg.get(section, option)
+
+ def get(self, option, default = None, section = None):
+ """
+ Get an option, perhaps with a default value.
+ """
+ if section is None:
+ section = self.default_section
+ if default is not None and not self.cfg.has_option(section, option):
+ return default
+ val = self.cfg.get(section, option)
+ while True:
+ val, modified = self._regexp.subn(self._repl, val, 1)
+ if not modified:
+ return val
+
+ def getboolean(self, option, default = None, section = None):
+ """
+ Get a boolean option, perhaps with a default value.
+ """
+ v = self.get(option, default, section)
+ if isinstance(v, str):
+ v = v.lower()
+ if v not in self.cfg._boolean_states:
+ raise ValueError, "Not a boolean: %s" % v
+ v = self.cfg._boolean_states[v]
+ return v
+
+ def getint(self, option, default = None, section = None):
+ """
+ Get an integer option, perhaps with a default value.
+ """
+ return int(self.get(option, default, section))
+
+ def getlong(self, option, default = None, section = None):
+ """
+ Get a long integer option, perhaps with a default value.
+ """
+ return long(self.get(option, default, section))
+
+ def set_global_flags(self):
+ """
+ Consolidated control for all the little global control flags
+ scattered through the libraries. This isn't a particularly good
+ place for this function to live, but it has to live somewhere and
+ making it a method of the config parser from which it gets all of
+ its data is less silly than the available alternatives.
+ """
+
+ import rpki.http, rpki.x509, rpki.sql, rpki.async
+
+ try:
+ rpki.http.debug_http = self.getboolean("debug_http")
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.http.want_persistent_client = self.getboolean("want_persistent_client")
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.http.want_persistent_server = self.getboolean("want_persistent_server")
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.http.use_adns = self.getboolean("use_adns")
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.http.enable_ipv6_clients = self.getboolean("enable_ipv6_clients")
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.http.enable_ipv6_servers = self.getboolean("enable_ipv6_servers")
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.x509.CMS_object.debug_cms_certs = self.getboolean("debug_cms_certs")
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.sql.sql_persistent.sql_debug = self.getboolean("sql_debug")
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.async.timer.gc_debug = self.getboolean("gc_debug")
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.async.timer.run_debug = self.getboolean("timer_debug")
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.x509.XML_CMS_object.dump_outbound_cms = rpki.x509.DeadDrop(self.get("dump_outbound_cms"))
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.x509.XML_CMS_object.dump_inbound_cms = rpki.x509.DeadDrop(self.get("dump_inbound_cms"))
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.async.gc_summary(self.getint("gc_summary"), self.getint("gc_summary_threshold", 0))
+ except ConfigParser.NoOptionError:
+ pass
diff --git a/rpkid.without_tls/rpki/exceptions.py b/rpkid.without_tls/rpki/exceptions.py
new file mode 100644
index 00000000..f57c679d
--- /dev/null
+++ b/rpkid.without_tls/rpki/exceptions.py
@@ -0,0 +1,328 @@
+"""
+Exception definitions for RPKI modules.
+
+$Id$
+
+Copyright (C) 2009--2010 Internet Systems Consortium ("ISC")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+
+Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+"""
+
+class RPKI_Exception(Exception):
+ """
+ Base class for RPKI exceptions.
+ """
+
+class NotInDatabase(RPKI_Exception):
+ """
+ Lookup failed for an object expected to be in the database.
+ """
+
+class BadURISyntax(RPKI_Exception):
+ """
+ Illegal syntax for a URI.
+ """
+
+class BadStatusCode(RPKI_Exception):
+ """
+ Unrecognized protocol status code.
+ """
+
+class BadQuery(RPKI_Exception):
+ """
+ Unexpected protocol query.
+ """
+
+class DBConsistancyError(RPKI_Exception):
+ """
+ Found multiple matches for a database query that shouldn't ever
+ return that.
+ """
+
+class CMSVerificationFailed(RPKI_Exception):
+ """
+ Verification of a CMS message failed.
+ """
+
+class HTTPRequestFailed(RPKI_Exception):
+ """
+ HTTP request failed.
+ """
+
+class DERObjectConversionError(RPKI_Exception):
+ """
+ Error trying to convert a DER-based object from one representation
+ to another.
+ """
+
+class NotACertificateChain(RPKI_Exception):
+ """
+ Certificates don't form a proper chain.
+ """
+
+class BadContactURL(RPKI_Exception):
+ """
+ Error trying to parse contact URL.
+ """
+
+class BadClassNameSyntax(RPKI_Exception):
+ """
+ Illegal syntax for a class_name.
+ """
+
+class BadIssueResponse(RPKI_Exception):
+ """
+ issue_response PDU with wrong number of classes or certificates.
+ """
+
+class NotImplementedYet(RPKI_Exception):
+ """
+ Internal error -- not implemented yet.
+ """
+
+class BadPKCS10(RPKI_Exception):
+ """
+ Bad PKCS #10 object.
+ """
+
+class UpstreamError(RPKI_Exception):
+ """
+ Received an error from upstream.
+ """
+
+class ChildNotFound(RPKI_Exception):
+ """
+ Could not find specified child in database.
+ """
+
+class BSCNotFound(RPKI_Exception):
+ """
+ Could not find specified BSC in database.
+ """
+
+class BadSender(RPKI_Exception):
+ """
+ Unexpected XML sender value.
+ """
+
+class ClassNameMismatch(RPKI_Exception):
+ """
+ class_name does not match child context.
+ """
+
+class ClassNameUnknown(RPKI_Exception):
+ """
+ Unknown class_name.
+ """
+
+class SKIMismatch(RPKI_Exception):
+ """
+ SKI value in response does not match request.
+ """
+
+class SubprocessError(RPKI_Exception):
+ """
+ Subprocess returned unexpected error.
+ """
+
+class BadIRDBReply(RPKI_Exception):
+ """
+ Unexpected reply to IRDB query.
+ """
+
+class NotFound(RPKI_Exception):
+ """
+ Object not found in database.
+ """
+
+class MustBePrefix(RPKI_Exception):
+ """
+ Resource range cannot be expressed as a prefix.
+ """
+
+class TLSValidationError(RPKI_Exception):
+ """
+ TLS certificate validation error.
+ """
+
+class MultipleTLSEECert(TLSValidationError):
+ """
+ Received more than one TLS EE certificate.
+ """
+
+class ReceivedTLSCACert(TLSValidationError):
+ """
+ Received CA certificate via TLS.
+ """
+
+class WrongEContentType(RPKI_Exception):
+ """
+ Received wrong CMS eContentType.
+ """
+
+class EmptyPEM(RPKI_Exception):
+ """
+ Couldn't find PEM block to convert.
+ """
+
+class UnexpectedCMSCerts(RPKI_Exception):
+ """
+ Received CMS certs when not expecting any.
+ """
+
+class UnexpectedCMSCRLs(RPKI_Exception):
+ """
+ Received CMS CRLs when not expecting any.
+ """
+
+class MissingCMSEEcert(RPKI_Exception):
+ """
+ Didn't receive CMS EE cert when expecting one.
+ """
+
+class MissingCMSCRL(RPKI_Exception):
+ """
+ Didn't receive CMS CRL when expecting one.
+ """
+
+class UnparsableCMSDER(RPKI_Exception):
+ """
+ Alleged CMS DER wasn't parsable.
+ """
+
+class CMSCRLNotSet(RPKI_Exception):
+ """
+ CMS CRL has not been configured.
+ """
+
+class ServerShuttingDown(RPKI_Exception):
+ """
+ Server is shutting down.
+ """
+
+class NoActiveCA(RPKI_Exception):
+ """
+ No active ca_detail for specified class.
+ """
+
+class BadClientURL(RPKI_Exception):
+ """
+ URL given to HTTP client does not match profile.
+ """
+
+class ClientNotFound(RPKI_Exception):
+ """
+ Could not find specified client in database.
+ """
+
+class BadExtension(RPKI_Exception):
+ """
+ Forbidden X.509 extension.
+ """
+
+class ForbiddenURI(RPKI_Exception):
+ """
+ Forbidden URI, does not start with correct base URI.
+ """
+
+class HTTPClientAborted(RPKI_Exception):
+ """
+ HTTP client connection closed while in request-sent state.
+ """
+
+class BadPublicationReply(RPKI_Exception):
+ """
+ Unexpected reply to publication query.
+ """
+
+class DuplicateObject(RPKI_Exception):
+ """
+ Attempt to create an object that already exists.
+ """
+
+class EmptyROAPrefixList(RPKI_Exception):
+ """
+ Can't create ROA with an empty prefix list.
+ """
+
+class NoCoveringCertForROA(RPKI_Exception):
+ """
+ Couldn't find a covering certificate to generate ROA.
+ """
+
+class BSCNotReady(RPKI_Exception):
+ """
+ BSC not yet in a usable state, signing_cert not set.
+ """
+
+class HTTPUnexpectedState(RPKI_Exception):
+ """
+ HTTP event occurred in an unexpected state.
+ """
+
+class HTTPBadVersion(RPKI_Exception):
+ """
+ HTTP couldn't parse HTTP version.
+ """
+
+class HandleTranslationError(RPKI_Exception):
+ """
+ Internal error translating protocol handle -> SQL id.
+ """
+
+class NoObjectAtURI(RPKI_Exception):
+ """
+ No object published at specified URI.
+ """
+
+class CMSContentNotSet(RPKI_Exception):
+ """
+ Inner content of a CMS_object has not been set. If object is known
+ to be valid, the .extract() method should be able to set the
+ content; otherwise, only the .verify() method (which checks
+ signatures) is safe.
+ """
+
+class HTTPTimeout(RPKI_Exception):
+ """
+ HTTP connection timed out.
+ """
+
+class BadIPResource(RPKI_Exception):
+ """
+ Parse failure for alleged IP resource string.
+ """
+
+class BadROAPrefix(RPKI_Exception):
+ """
+ Parse failure for alleged ROA prefix string.
+ """
+
+class CommandParseFailure(RPKI_Exception):
+ """
+ Failed to parse command line.
+ """
diff --git a/rpkid.without_tls/rpki/http.py b/rpkid.without_tls/rpki/http.py
new file mode 100644
index 00000000..ec6200bb
--- /dev/null
+++ b/rpkid.without_tls/rpki/http.py
@@ -0,0 +1,979 @@
+"""
+HTTP utilities, both client and server.
+
+$Id$
+
+Copyright (C) 2009-2010 Internet Systems Consortium ("ISC")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+
+Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+"""
+
+import time, socket, asyncore, asynchat, urlparse, sys, random
+import rpki.async, rpki.sundial, rpki.x509, rpki.exceptions, rpki.log
+import POW
+
+## @var rpki_content_type
+# HTTP content type used for all RPKI messages.
+rpki_content_type = "application/x-rpki"
+
+## @var debug_http
+# Verbose chatter about HTTP streams.
+debug_http = False
+
+## @var want_persistent_client
+# Whether we want persistent HTTP client streams, when server also supports them.
+want_persistent_client = False
+
+## @var want_persistent_server
+# Whether we want persistent HTTP server streams, when client also supports them.
+want_persistent_server = False
+
+## @var default_client_timeout
+# Default HTTP client connection timeout.
+default_client_timeout = rpki.sundial.timedelta(minutes = 15)
+
+## @var default_server_timeout
+# Default HTTP server connection timeouts. Given our druthers, we'd
+# prefer that the client close the connection, as this avoids the
+# problem of client starting to reuse connection just as server closes
+# it, so this should be longer than the client timeout.
+default_server_timeout = rpki.sundial.timedelta(minutes = 20)
+
+## @var default_http_version
+# Preferred HTTP version.
+default_http_version = (1, 0)
+
+## @var default_tcp_port
+# Default port for clients and servers that don't specify one.
+default_tcp_port = 443
+
+## @var enable_ipv6_servers
+# Whether to enable IPv6 listeners. Enabled by default, as it should
+# be harmless. Has no effect if kernel doesn't support IPv6.
+enable_ipv6_servers = True
+
+## @var enable_ipv6_clients
+# Whether to consider IPv6 addresses when making connections.
+# Disabled by default, as IPv6 connectivity is still a bad joke in
+# far too much of the world.
+enable_ipv6_clients = False
+
+## @var use_adns
+# Whether to use rpki.adns code. This is still experimental, so it's
+# not (yet) enabled by default.
+use_adns = False
+
+## @var have_ipv6
+# Whether the current machine claims to support IPv6. Note that just
+# because the kernel supports it doesn't mean that the machine has
+# usable IPv6 connectivity. I don't know of a simple portable way to
+# probe for connectivity at runtime (the old test of "can you ping
+# SRI-NIC.ARPA?" seems a bit dated...). Don't set this, it's set
+# automatically by probing using the socket() system call at runtime.
+try:
+ socket.socket(socket.AF_INET6).close()
+ socket.IPPROTO_IPV6
+ socket.IPV6_V6ONLY
+except:
+ have_ipv6 = False
+else:
+ have_ipv6 = True
+
+def supported_address_families(enable_ipv6):
+ """
+ IP address families on which servers should listen, and to consider
+ when selecting addresses for client connections.
+ """
+ if enable_ipv6 and have_ipv6:
+ return (socket.AF_INET, socket.AF_INET6)
+ else:
+ return (socket.AF_INET,)
+
+def localhost_addrinfo():
+ """
+ Return pseudo-getaddrinfo results for localhost.
+ """
+ result = [(socket.AF_INET, "127.0.0.1")]
+ if enable_ipv6_clients and have_ipv6:
+ result.append((socket.AF_INET6, "::1"))
+ return result
+
+class http_message(object):
+ """
+ Virtual class representing of one HTTP message.
+ """
+
+ software_name = "ISC RPKI library"
+
+ def __init__(self, version = None, body = None, headers = None):
+ self.version = version
+ self.body = body
+ self.headers = headers
+ self.normalize_headers()
+
+ def normalize_headers(self, headers = None):
+ """
+ Clean up (some of) the horrible messes that HTTP allows in its
+ headers.
+ """
+ if headers is None:
+ headers = () if self.headers is None else self.headers.items()
+ translate_underscore = True
+ else:
+ translate_underscore = False
+ result = {}
+ for k, v in headers:
+ if translate_underscore:
+ k = k.replace("_", "-")
+ k = "-".join(s.capitalize() for s in k.split("-"))
+ v = v.strip()
+ if k in result:
+ result[k] += ", " + v
+ else:
+ result[k] = v
+ self.headers = result
+
+ @classmethod
+ def parse_from_wire(cls, headers):
+ """
+ Parse and normalize an incoming HTTP message.
+ """
+ self = cls()
+ headers = headers.split("\r\n")
+ self.parse_first_line(*headers.pop(0).split(None, 2))
+ for i in xrange(len(headers) - 2, -1, -1):
+ if headers[i + 1][0].isspace():
+ headers[i] += headers[i + 1]
+ del headers[i + 1]
+ self.normalize_headers([h.split(":", 1) for h in headers])
+ return self
+
+ def format(self):
+ """
+ Format an outgoing HTTP message.
+ """
+ s = self.format_first_line()
+ if self.body is not None:
+ assert isinstance(self.body, str)
+ self.headers["Content-Length"] = len(self.body)
+ for kv in self.headers.iteritems():
+ s += "%s: %s\r\n" % kv
+ s += "\r\n"
+ if self.body is not None:
+ s += self.body
+ return s
+
+ def __str__(self):
+ return self.format()
+
+ def parse_version(self, version):
+ """
+ Parse HTTP version, raise an exception if we can't.
+ """
+ if version[:5] != "HTTP/":
+ raise rpki.exceptions.HTTPBadVersion, "Couldn't parse version %s" % version
+ self.version = tuple(int(i) for i in version[5:].split("."))
+
+ def persistent(self):
+ """
+ Figure out whether this HTTP message encourages a persistent connection.
+ """
+ c = self.headers.get("Connection")
+ if self.version == (1, 1):
+ return c is None or "close" not in c.lower()
+ elif self.version == (1, 0):
+ return c is not None and "keep-alive" in c.lower()
+ else:
+ return False
+
+class http_request(http_message):
+ """
+ HTTP request message.
+ """
+
+ def __init__(self, cmd = None, path = None, version = default_http_version, body = None, callback = None, errback = None, **headers):
+ assert cmd == "POST" or body is None
+ http_message.__init__(self, version = version, body = body, headers = headers)
+ self.cmd = cmd
+ self.path = path
+ self.callback = callback
+ self.errback = errback
+ self.retried = False
+
+ def parse_first_line(self, cmd, path, version):
+ """
+ Parse first line of HTTP request message.
+ """
+ self.parse_version(version)
+ self.cmd = cmd
+ self.path = path
+
+ def format_first_line(self):
+ """
+ Format first line of HTTP request message, and set up the
+ User-Agent header.
+ """
+ self.headers.setdefault("User-Agent", self.software_name)
+ return "%s %s HTTP/%d.%d\r\n" % (self.cmd, self.path, self.version[0], self.version[1])
+
+class http_response(http_message):
+ """
+ HTTP response message.
+ """
+
+ def __init__(self, code = None, reason = None, version = default_http_version, body = None, **headers):
+ http_message.__init__(self, version = version, body = body, headers = headers)
+ self.code = code
+ self.reason = reason
+
+ def parse_first_line(self, version, code, reason):
+ """
+ Parse first line of HTTP response message.
+ """
+ self.parse_version(version)
+ self.code = int(code)
+ self.reason = reason
+
+ def format_first_line(self):
+ """
+ Format first line of HTTP response message, and set up Date and
+ Server headers.
+ """
+ self.headers.setdefault("Date", time.strftime("%a, %d %b %Y %T GMT"))
+ self.headers.setdefault("Server", self.software_name)
+ return "HTTP/%d.%d %s %s\r\n" % (self.version[0], self.version[1], self.code, self.reason)
+
+def log_method(self, msg, logger = rpki.log.debug):
+ """
+ Logging method used in several different classes.
+ """
+ assert isinstance(logger, rpki.log.logger)
+ if debug_http or logger is not rpki.log.debug:
+ logger("%r: %s" % (self, msg))
+
+class http_stream(asynchat.async_chat):
+ """
+ Virtual class representing an HTTP message stream.
+ """
+
+ log = log_method
+
+ def __init__(self, sock = None):
+ asynchat.async_chat.__init__(self, sock)
+ self.buffer = []
+ self.timer = rpki.async.timer(self.handle_timeout)
+ self.restart()
+
+ def restart(self):
+ """
+ (Re)start HTTP message parser, reset timer.
+ """
+ assert not self.buffer
+ self.chunk_handler = None
+ self.set_terminator("\r\n\r\n")
+ self.update_timeout()
+
+ def update_timeout(self):
+ """
+ Put this stream's timer in known good state: set it to the
+ stream's timeout value if we're doing timeouts, otherwise clear
+ it.
+ """
+ if self.timeout is not None:
+ self.log("Setting timeout %r" % self.timeout)
+ self.timer.set(self.timeout)
+ else:
+ self.log("Clearing timeout")
+ self.timer.cancel()
+
+ def collect_incoming_data(self, data):
+ """
+ Buffer incoming data from asynchat.
+ """
+ self.buffer.append(data)
+ self.update_timeout()
+
+ def get_buffer(self):
+ """
+ Consume data buffered from asynchat.
+ """
+ val = "".join(self.buffer)
+ self.buffer = []
+ return val
+
+ def found_terminator(self):
+ """
+ Asynchat reported that it found whatever terminator we set, so
+ figure out what to do next. This can be messy, because we can be
+ in any of several different states:
+
+ @li We might be handling chunked HTTP, in which case we have to
+ initialize the chunk decoder;
+
+ @li We might have found the end of the message body, in which case
+ we can (finally) process it; or
+
+ @li We might have just gotten to the end of the message headers,
+ in which case we have to parse them to figure out which of three
+ separate mechanisms (chunked, content-length, TCP close) is going
+ to tell us how to find the end of the message body.
+ """
+ self.update_timeout()
+ if self.chunk_handler:
+ self.chunk_handler()
+ elif not isinstance(self.get_terminator(), str):
+ self.handle_body()
+ else:
+ self.msg = self.parse_type.parse_from_wire(self.get_buffer())
+ if self.msg.version == (1, 1) and "chunked" in self.msg.headers.get("Transfer-Encoding", "").lower():
+ self.msg.body = []
+ self.chunk_handler = self.chunk_header
+ self.set_terminator("\r\n")
+ elif "Content-Length" in self.msg.headers:
+ self.set_terminator(int(self.msg.headers["Content-Length"]))
+ else:
+ self.handle_no_content_length()
+
+ def chunk_header(self):
+ """
+ Asynchat just handed us what should be the header of one chunk of
+ a chunked encoding stream. If this chunk has a body, set the
+ stream up to read it; otherwise, this is the last chunk, so start
+ the process of exiting the chunk decoder.
+ """
+ n = int(self.get_buffer().partition(";")[0], 16)
+ self.log("Chunk length %s" % n)
+ if n:
+ self.chunk_handler = self.chunk_body
+ self.set_terminator(n)
+ else:
+ self.msg.body = "".join(self.msg.body)
+ self.chunk_handler = self.chunk_discard_trailer
+
+ def chunk_body(self):
+ """
+ Asynchat just handed us what should be the body of a chunk of the
+ body of a chunked message (sic). Save it, and prepare to move on
+ to the next chunk.
+ """
+ self.log("Chunk body")
+ self.msg.body += self.buffer
+ self.buffer = []
+ self.chunk_handler = self.chunk_discard_crlf
+ self.set_terminator("\r\n")
+
+ def chunk_discard_crlf(self):
+ """
+ Consume the CRLF that terminates a chunk, reinitialize chunk
+ decoder to be ready for the next chunk.
+ """
+ self.log("Chunk CRLF")
+ s = self.get_buffer()
+ assert s == "", "%r: Expected chunk CRLF, got '%s'" % (self, s)
+ self.chunk_handler = self.chunk_header
+
+ def chunk_discard_trailer(self):
+ """
+ Consume chunk trailer, which should be empty, then (finally!) exit
+ the chunk decoder and hand complete message off to the application.
+ """
+ self.log("Chunk trailer")
+ s = self.get_buffer()
+ assert s == "", "%r: Expected end of chunk trailers, got '%s'" % (self, s)
+ self.chunk_handler = None
+ self.handle_message()
+
+ def handle_body(self):
+ """
+ Hand normal (not chunked) message off to the application.
+ """
+ self.msg.body = self.get_buffer()
+ self.handle_message()
+
+ def handle_error(self):
+ """
+ Asynchat (or asyncore, or somebody) raised an exception. See
+ whether it's one we should just pass along, otherwise log a stack
+ trace and close the stream.
+ """
+ etype = sys.exc_info()[0]
+ if etype in (SystemExit, rpki.async.ExitNow):
+ self.log("Caught %s, propagating" % etype.__name__)
+ raise
+ self.log("Error in HTTP stream handler", rpki.log.warn)
+ rpki.log.traceback()
+ if etype not in (rpki.exceptions.HTTPClientAborted,):
+ self.log("Closing due to error", rpki.log.warn)
+ self.close(force = True)
+
+ def handle_timeout(self):
+ """
+ Inactivity timer expired, close connection with prejudice.
+ """
+ self.log("Timeout, closing")
+ self.close(force = True)
+
+ def handle_close(self):
+ """
+ Wrapper around asynchat connection close handler, so that we can
+ log the event.
+ """
+ self.log("Close event in HTTP stream handler")
+ asynchat.async_chat.handle_close(self)
+
+class http_server(http_stream):
+ """
+ HTTP(S) server stream.
+ """
+
+ ## @var parse_type
+ # Stream parser should look for incoming HTTP request messages.
+ parse_type = http_request
+
+ ## @var timeout
+ # Use the default server timeout value set in the module header.
+ timeout = default_server_timeout
+
+ def __init__(self, sock, handlers):
+ self.log("Starting")
+ self.handlers = handlers
+ http_stream.__init__(self, sock = sock)
+ self.expect_close = not want_persistent_server
+
+ def handle_no_content_length(self):
+ """
+ Handle an incoming message that used neither chunking nor a
+ Content-Length header (that is: this message will be the last one
+ in this server stream). No special action required.
+ """
+ self.handle_message()
+
+ def find_handler(self, path):
+ """
+ Helper method to search self.handlers.
+ """
+ for s, h in self.handlers:
+ if path.startswith(s):
+ return h
+ return None
+
+ def handle_message(self):
+ """
+ HTTP layer managed to deliver a complete HTTP request to
+ us, figure out what to do with it. Check the command and
+ Content-Type, look for a handler, and if everything looks right,
+ pass the message body, path, and a reply callback to the handler.
+ """
+ self.log("Received request %s %s" % (self.msg.cmd, self.msg.path))
+ if not self.msg.persistent():
+ self.expect_close = True
+ handler = self.find_handler(self.msg.path)
+ error = None
+ if self.msg.cmd != "POST":
+ error = 501, "No handler for method %s" % self.msg.cmd
+ elif self.msg.headers["Content-Type"] != rpki_content_type:
+ error = 415, "No handler for Content-Type %s" % self.headers["Content-Type"]
+ elif handler is None:
+ error = 404, "No handler for URL %s" % self.msg.path
+ if error is None:
+ try:
+ handler(self.msg.body, self.msg.path, self.send_reply)
+ except (rpki.async.ExitNow, SystemExit):
+ raise
+ except Exception, e:
+ rpki.log.traceback()
+ self.send_error(500, "Unhandled exception %s" % e)
+ else:
+ self.send_error(code = error[0], reason = error[1])
+
+ def send_error(self, code, reason):
+ """
+ Send an error response to this request.
+ """
+ self.send_message(code = code, reason = reason)
+
+ def send_reply(self, code, body):
+ """
+ Send a reply to this request.
+ """
+ self.send_message(code = code, body = body)
+
+ def send_message(self, code, reason = "OK", body = None):
+ """
+ Queue up reply message. If both parties agree that connection is
+ persistant, and if no error occurred, restart this stream to
+ listen for next message; otherwise, queue up a close event for
+ this stream so it will shut down once the reply has been sent.
+ """
+ self.log("Sending response %s %s" % (code, reason))
+ if code >= 400:
+ self.expect_close = True
+ msg = http_response(code = code, reason = reason, body = body,
+ Content_Type = rpki_content_type,
+ Connection = "Close" if self.expect_close else "Keep-Alive")
+ self.push(msg.format())
+ if self.expect_close:
+ self.log("Closing")
+ self.timer.cancel()
+ self.close_when_done()
+ else:
+ self.log("Listening for next message")
+ self.restart()
+
+class http_listener(asyncore.dispatcher):
+ """
+ Listener for incoming HTTP(S) connections.
+ """
+
+ log = log_method
+
+ def __init__(self, handlers, addrinfo):
+ self.log("Listener")
+ asyncore.dispatcher.__init__(self)
+ self.handlers = handlers
+ try:
+ af, socktype, proto, canonname, sockaddr = addrinfo
+ self.create_socket(af, socktype)
+ self.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ try:
+ self.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+ except AttributeError:
+ pass
+ if have_ipv6 and af == socket.AF_INET6:
+ self.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
+ self.bind(sockaddr)
+ self.listen(5)
+ except:
+ self.log("Couldn't set up HTTP listener", rpki.log.warn)
+ rpki.log.traceback()
+ self.close()
+ self.log("Listening on %r, handlers %r" % (sockaddr, handlers))
+
+ def handle_accept(self):
+ """
+ Asyncore says we have an incoming connection, spawn an http_server
+ stream for it and pass along all of our handler data.
+ """
+ self.log("Accepting connection")
+ try:
+ s, client = self.accept()
+ self.log("Accepting connection from %r" % (client,))
+ http_server(sock = s, handlers = self.handlers)
+ except (rpki.async.ExitNow, SystemExit):
+ raise
+ except:
+ self.handle_error()
+
+ def handle_error(self):
+ """
+ Asyncore signaled an error, pass it along or log it.
+ """
+ if sys.exc_info()[0] in (SystemExit, rpki.async.ExitNow):
+ raise
+ self.log("Error in HTTP listener", rpki.log.warn)
+ rpki.log.traceback()
+
+class http_client(http_stream):
+ """
+ HTTP(S) client stream.
+ """
+
+ ## @var parse_type
+ # Stream parser should look for incoming HTTP response messages.
+ parse_type = http_response
+
+ ## @var timeout
+ # Use the default client timeout value set in the module header.
+ timeout = default_client_timeout
+
+ def __init__(self, queue, hostport):
+ self.log("Creating new connection to %r" % (hostport,))
+ http_stream.__init__(self)
+ self.queue = queue
+ self.host = hostport[0]
+ self.port = hostport[1]
+ self.state = "opening"
+ self.expect_close = not want_persistent_client
+
+ def start(self):
+ """
+ Create socket and request a connection.
+ """
+ if not use_adns:
+ self.gotaddrinfo([(socket.AF_INET, self.host)])
+ elif self.host == "localhost":
+ self.gotaddrinfo(localhost_addrinfo())
+ else:
+ import rpki.adns # This should move to start of file once we've decided to inflict it on all users
+ rpki.adns.getaddrinfo(self.gotaddrinfo, self.dns_error, self.host, supported_address_families(enable_ipv6_clients))
+
+ def dns_error(self, e):
+ """
+ Handle DNS lookup errors. For now, just whack the connection.
+ Undoubtedly we should do something better with diagnostics here.
+ """
+ self.handle_error()
+
+ def gotaddrinfo(self, addrinfo):
+ """
+ Got address data from DNS, create socket and request connection.
+ """
+ try:
+ self.af, self.addr = random.choice(addrinfo)
+ self.create_socket(self.af, socket.SOCK_STREAM)
+ self.connect((self.addr, self.port))
+ except (rpki.async.ExitNow, SystemExit):
+ raise
+ except:
+ self.handle_error()
+
+ def handle_connect(self):
+ """
+ Asyncore says socket has connected.
+ """
+ self.log("Socket connected")
+ self.set_state("idle")
+ self.queue.send_request()
+
+ def set_state(self, state):
+ """
+ Set HTTP client connection state.
+ """
+ self.log("State transition %s => %s" % (self.state, state))
+ self.state = state
+
+ def handle_no_content_length(self):
+ """
+ Handle response message that used neither chunking nor a
+ Content-Length header (that is: this message will be the last one
+ in this server stream). In this case we want to read until we
+ reach the end of the data stream.
+ """
+ self.set_terminator(None)
+
+ def send_request(self, msg):
+ """
+ Queue up request message and kickstart connection.
+ """
+ self.log("Sending request %r" % msg)
+ assert self.state == "idle", "%r: state should be idle, is %s" % (self, self.state)
+ self.set_state("request-sent")
+ msg.headers["Connection"] = "Close" if self.expect_close else "Keep-Alive"
+ self.push(msg.format())
+ self.restart()
+
+ def handle_message(self):
+ """
+ Handle incoming HTTP response message. Make sure we're in a state
+ where we expect to see such a message (and allow the mysterious
+ empty messages that Apache sends during connection close, no idea
+ what that is supposed to be about). If everybody agrees that the
+ connection should stay open, put it into an idle state; otherwise,
+ arrange for the stream to shut down.
+ """
+
+ self.log("Message received, state %s" % self.state)
+
+ if not self.msg.persistent():
+ self.expect_close = True
+
+ if self.state != "request-sent":
+ if self.state == "closing":
+ assert not self.msg.body
+ self.log("Ignoring empty response received while closing")
+ return
+ raise rpki.exceptions.HTTPUnexpectedState, "%r received message while in unexpected state %s" % (self, self.state)
+
+ if self.expect_close:
+ self.log("Closing")
+ self.set_state("closing")
+ self.queue.detach(self)
+ self.close_when_done()
+ else:
+ self.log("Idling")
+ self.set_state("idle")
+ self.update_timeout()
+
+ if self.msg.code != 200:
+ raise rpki.exceptions.HTTPRequestFailed, "HTTP request failed with status %s, reason %s, response %s" % (self.msg.code, self.msg.reason, self.msg.body)
+ self.queue.return_result(self.msg)
+
+ def handle_close(self):
+ """
+ Asyncore signaled connection close. If we were waiting for that
+ to find the end of a response message, process the resulting
+ message now; if we were waiting for the response to a request we
+ sent, signal the error.
+ """
+ http_stream.handle_close(self)
+ self.log("State %s" % self.state)
+ self.queue.detach(self)
+ if self.get_terminator() is None:
+ self.handle_body()
+ elif self.state == "request-sent":
+ raise rpki.exceptions.HTTPClientAborted, "HTTP request aborted by close event"
+
+ def handle_timeout(self):
+ """
+ Connection idle timer has expired. Shut down connection in any
+ case, noisily if we weren't idle.
+ """
+ if self.state != "idle":
+ self.log("Timeout while in state %s" % self.state, rpki.log.warn)
+ http_stream.handle_timeout(self)
+ self.queue.detach(self)
+ if self.state != "idle":
+ try:
+ raise rpki.exceptions.HTTPTimeout
+ except rpki.exceptions.HTTPTimeout, e:
+ self.queue.return_result(e)
+
+ def handle_error(self):
+ """
+ Asyncore says something threw an exception. Log it, then shut
+ down the connection and pass back the exception.
+ """
+ eclass, edata = sys.exc_info()[0:2]
+ self.log("Error on HTTP client connection %s:%s: %s %s" % (self.host, self.port, eclass, edata), rpki.log.warn)
+ http_stream.handle_error(self)
+ self.queue.detach(self)
+ self.queue.return_result(edata)
+
+class http_queue(object):
+ """
+ Queue of pending HTTP requests for a single destination. This class
+ is very tightly coupled to http_client; http_client handles the HTTP
+ stream itself, this class provides a slightly higher-level API.
+ """
+
+ log = log_method
+
+ def __init__(self, hostport):
+ self.log("Creating queue for %r" % (hostport,))
+ self.hostport = hostport
+ self.client = None
+ self.queue = []
+
+ def request(self, *requests):
+ """
+ Append http_request object(s) to this queue.
+ """
+ self.log("Adding requests %r" % requests)
+ self.queue.extend(requests)
+
+ def restart(self):
+ """
+ Send next request for this queue, if we can. This may involve
+ starting a new http_client stream, reusing an existing idle
+ stream, or just ignoring this request if there's an active client
+ stream already; in the last case, handling of the response (or
+ exception, or timeout) for the query currently in progress will
+ call this method when it's time to kick out the next query.
+ """
+ try:
+ if self.client is None:
+ self.client = http_client(self, self.hostport)
+ self.log("Attached client %r" % self.client)
+ self.client.start()
+ elif self.client.state == "idle":
+ self.log("Sending request to existing client %r" % self.client)
+ self.send_request()
+ else:
+ self.log("Client %r exists in state %r" % (self.client, self.client.state))
+ except (rpki.async.ExitNow, SystemExit):
+ raise
+ except Exception, e:
+ self.return_result(e)
+
+ def send_request(self):
+ """
+ Kick out the next query in this queue, if any.
+ """
+ if self.queue:
+ self.client.send_request(self.queue[0])
+
+ def detach(self, client_):
+ """
+ Detatch a client from this queue. Silently ignores attempting to
+ detach a client that is not attached to this queue, to simplify
+ handling of what otherwise would be a nasty set of race
+ conditions.
+ """
+ if client_ is self.client:
+ self.log("Detaching client %r" % client_)
+ self.client = None
+
+ def return_result(self, result):
+ """
+ Client stream has returned a result, which we need to pass along
+ to the original caller. Result may be either an HTTP response
+ message or an exception. In either case, once we're done
+ processing this result, kick off next message in the queue, if any.
+ """
+
+ if not self.queue:
+ self.log("No caller, this should not happen. Dropping result %r" % result)
+
+ req = self.queue.pop(0)
+ self.log("Dequeuing request %r" % req)
+
+ try:
+ if isinstance(result, http_response):
+ self.log("Returning result %r to caller" % result)
+ req.callback(result.body)
+ else:
+ assert isinstance(result, Exception)
+ self.log("Returning exception %r to caller: %s" % (result, result), rpki.log.warn)
+ req.errback(result)
+ except (rpki.async.ExitNow, SystemExit):
+ raise
+ except:
+ self.log("Unhandled exception from callback")
+ rpki.log.traceback()
+
+ self.log("Queue: %r" % self.queue)
+
+ if self.queue:
+ self.restart()
+
+## @var client_queues
+# Map of (host, port) tuples to http_queue objects.
+client_queues = {}
+
+def client(msg, url, callback, errback):
+ """
+ Open client HTTP connection, send a message, set up callbacks to
+ handle response.
+ """
+
+ u = urlparse.urlparse(url)
+
+ if (u.scheme not in ("", "http") or
+ u.username is not None or
+ u.password is not None or
+ u.params != "" or
+ u.query != "" or
+ u.fragment != ""):
+ raise rpki.exceptions.BadClientURL, "Unusable URL %s" % url
+
+ if debug_http:
+ rpki.log.debug("Contacting %s" % url)
+
+ request = http_request(
+ cmd = "POST",
+ path = u.path,
+ body = msg,
+ callback = callback,
+ errback = errback,
+ Host = u.hostname,
+ Content_Type = rpki_content_type)
+
+ hostport = (u.hostname or "localhost", u.port or default_tcp_port)
+
+ if debug_http:
+ rpki.log.debug("Created request %r for %r" % (request, hostport))
+ if hostport not in client_queues:
+ client_queues[hostport] = http_queue(hostport)
+ client_queues[hostport].request(request)
+
+ # Defer connection attempt until after we've had time to process any
+ # pending I/O events, in case connections have closed.
+
+ if debug_http:
+ rpki.log.debug("Scheduling connection startup for %r" % request)
+ rpki.async.defer(client_queues[hostport].restart)
+
+def server(handlers, port, host = ""):
+ """
+ Run an HTTP server and wait (forever) for connections.
+ """
+
+ if not isinstance(handlers, (tuple, list)):
+ handlers = (("/", handlers),)
+
+ # Yes, this is sick. So is getaddrinfo() returning duplicate
+ # records, which RedHat has the gall to claim is a feature.
+ ai = []
+ for af in supported_address_families(enable_ipv6_servers):
+ try:
+ if host:
+ h = host
+ elif have_ipv6 and af == socket.AF_INET6:
+ h = "::"
+ else:
+ h = "0.0.0.0"
+ for a in socket.getaddrinfo(h, port, af, socket.SOCK_STREAM):
+ if a not in ai:
+ ai.append(a)
+ except socket.gaierror:
+ pass
+
+ for a in ai:
+ http_listener(addrinfo = a, handlers = handlers)
+
+ rpki.async.event_loop()
+
+class caller(object):
+ """
+ Handle client-side mechanics for protocols based on HTTP, CMS, and
+ rpki.xml_utils. Calling sequence is intended to nest within
+ rpki.async.sync_wrapper.
+ """
+
+ debug = False
+
+ def __init__(self, proto, client_key, client_cert, server_ta, server_cert, url, debug = None):
+ self.proto = proto
+ self.client_key = client_key
+ self.client_cert = client_cert
+ self.server_ta = server_ta
+ self.server_cert = server_cert
+ self.url = url
+ if debug is not None:
+ self.debug = debug
+
+ def __call__(self, cb, eb, *pdus):
+
+ def done(r_der):
+ """
+ Handle CMS-wrapped XML response message.
+ """
+ r_cms = self.proto.cms_msg(DER = r_der)
+ r_msg = r_cms.unwrap((self.server_ta, self.server_cert))
+ if self.debug:
+ print "<!-- Reply -->"
+ print r_cms.pretty_print_content()
+ cb(r_msg)
+
+ q_msg = self.proto.msg.query(*pdus)
+ q_cms = self.proto.cms_msg()
+ q_der = q_cms.wrap(q_msg, self.client_key, self.client_cert)
+ if self.debug:
+ print "<!-- Query -->"
+ print q_cms.pretty_print_content()
+
+ client(url = self.url, msg = q_der, callback = done, errback = eb)
diff --git a/rpkid.without_tls/rpki/ipaddrs.py b/rpkid.without_tls/rpki/ipaddrs.py
new file mode 100644
index 00000000..58185cc1
--- /dev/null
+++ b/rpkid.without_tls/rpki/ipaddrs.py
@@ -0,0 +1,114 @@
+"""
+Classes to represent IP addresses.
+
+Given some of the other operations we need to perform on them, it's
+most convenient to represent IP addresses as Python "long" values.
+The classes in this module just wrap suitable read/write syntax around
+the underlying "long" type.
+
+These classes also supply a "bits" attribute for use by other code
+built on these classes; for the most part, IPv6 addresses really are
+just IPv4 addresses with more bits, so we supply the number of bits
+once, here, thus avoiding a lot of duplicate code elsewhere.
+
+$Id$
+
+Copyright (C) 2009 Internet Systems Consortium ("ISC")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+
+Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+"""
+
+import socket, struct
+
+class v4addr(long):
+ """
+ IPv4 address.
+
+ Derived from long, but supports IPv4 print syntax.
+ """
+
+ bits = 32
+
+ def __new__(cls, x):
+ """
+ Construct a v4addr object.
+ """
+ if isinstance(x, str):
+ return cls.from_bytes(socket.inet_pton(socket.AF_INET, ".".join(str(int(i)) for i in x.split("."))))
+ else:
+ return long.__new__(cls, x)
+
+ def to_bytes(self):
+ """Convert a v4addr object to a raw byte string."""
+ return struct.pack("!I", long(self))
+
+ @classmethod
+ def from_bytes(cls, x):
+ """Convert from a raw byte string to a v4addr object."""
+ return cls(struct.unpack("!I", x)[0])
+
+ def __str__(self):
+ """Convert a v4addr object to string format."""
+ return socket.inet_ntop(socket.AF_INET, self.to_bytes())
+
+class v6addr(long):
+ """
+ IPv6 address.
+
+ Derived from long, but supports IPv6 print syntax.
+ """
+
+ bits = 128
+
+ def __new__(cls, x):
+ """Construct a v6addr object."""
+ if isinstance(x, str):
+ return cls.from_bytes(socket.inet_pton(socket.AF_INET6, x))
+ else:
+ return long.__new__(cls, x)
+
+ def to_bytes(self):
+ """Convert a v6addr object to a raw byte string."""
+ return struct.pack("!QQ", long(self) >> 64, long(self) & 0xFFFFFFFFFFFFFFFF)
+
+ @classmethod
+ def from_bytes(cls, x):
+ """Convert from a raw byte string to a v6addr object."""
+ x = struct.unpack("!QQ", x)
+ return cls((x[0] << 64) | x[1])
+
+ def __str__(self):
+ """Convert a v6addr object to string format."""
+ return socket.inet_ntop(socket.AF_INET6, self.to_bytes())
+
+def parse(s):
+ """
+ Parse a string as either an IPv4 or IPv6 address, and return object of appropriate class.
+ """
+ if isinstance(s, unicode):
+ s = s.encode("ascii")
+ return v6addr(s) if ":" in s else v4addr(s)
diff --git a/rpkid.without_tls/rpki/left_right.py b/rpkid.without_tls/rpki/left_right.py
new file mode 100644
index 00000000..ceeda55f
--- /dev/null
+++ b/rpkid.without_tls/rpki/left_right.py
@@ -0,0 +1,1149 @@
+"""
+RPKI "left-right" protocol.
+
+$Id$
+
+Copyright (C) 2009--2010 Internet Systems Consortium ("ISC")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+
+Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+"""
+
+import rpki.resource_set, rpki.x509, rpki.sql, rpki.exceptions, rpki.xml_utils
+import rpki.http, rpki.up_down, rpki.relaxng, rpki.sundial, rpki.log, rpki.roa
+import rpki.publication, rpki.async
+
+# Enforce strict checking of XML "sender" field in up-down protocol
+enforce_strict_up_down_xml_sender = False
+
+class left_right_namespace(object):
+ """
+ XML namespace parameters for left-right protocol.
+ """
+
+ xmlns = "http://www.hactrn.net/uris/rpki/left-right-spec/"
+ nsmap = { None : xmlns }
+
+class data_elt(rpki.xml_utils.data_elt, rpki.sql.sql_persistent, left_right_namespace):
+ """
+ Virtual class for top-level left-right protocol data elements.
+ """
+
+ handles = ()
+
+ self_id = None
+ self_handle = None
+
+ def self(self):
+ """Fetch self object to which this object links."""
+ return self_elt.sql_fetch(self.gctx, self.self_id)
+
+ def bsc(self):
+ """Return BSC object to which this object links."""
+ return bsc_elt.sql_fetch(self.gctx, self.bsc_id)
+
+ def make_reply_clone_hook(self, r_pdu):
+ """
+ Set handles when cloning, including _id -> _handle translation.
+ """
+ if r_pdu.self_handle is None:
+ r_pdu.self_handle = self.self_handle
+ for tag, elt in self.handles:
+ id_name = tag + "_id"
+ handle_name = tag + "_handle"
+ if getattr(r_pdu, handle_name, None) is None:
+ try:
+ setattr(r_pdu, handle_name, getattr(elt.sql_fetch(self.gctx, getattr(r_pdu, id_name)), handle_name))
+ except AttributeError:
+ continue
+
+ @classmethod
+ def serve_fetch_handle(cls, gctx, self_id, handle):
+ """
+ Find an object based on its handle.
+ """
+ return cls.sql_fetch_where1(gctx, cls.element_name + "_handle = %s AND self_id = %s", (handle, self_id))
+
+ def serve_fetch_one_maybe(self):
+ """
+ Find the object on which a get, set, or destroy method should
+ operate, or which would conflict with a create method.
+ """
+ where = "%s.%s_handle = %%s AND %s.self_id = self.self_id AND self.self_handle = %%s" % ((self.element_name,) * 3)
+ args = (getattr(self, self.element_name + "_handle"), self.self_handle)
+ return self.sql_fetch_where1(self.gctx, where, args, "self")
+
+ def serve_fetch_all(self):
+ """
+ Find the objects on which a list method should operate.
+ """
+ where = "%s.self_id = self.self_id and self.self_handle = %%s" % self.element_name
+ return self.sql_fetch_where(self.gctx, where, (self.self_handle,), "self")
+
+ def serve_pre_save_hook(self, q_pdu, r_pdu, cb, eb):
+ """
+ Hook to do _handle => _id translation before saving.
+
+ self is always the object to be saved to SQL. For create
+ operations, self and q_pdu are be the same object; for set
+ operations, self is the pre-existing object from SQL and q_pdu is
+ the set request received from the the IRBE.
+ """
+ for tag, elt in self.handles:
+ id_name = tag + "_id"
+ if getattr(self, id_name, None) is None:
+ x = elt.serve_fetch_handle(self.gctx, self.self_id, getattr(q_pdu, tag + "_handle"))
+ if x is None:
+ raise rpki.exceptions.HandleTranslationError, "Could not translate %r %s_handle" % (self, tag)
+ setattr(self, id_name, getattr(x, id_name))
+ cb()
+
+class self_elt(data_elt):
+ """
+ <self/> element.
+ """
+
+ element_name = "self"
+ attributes = ("action", "tag", "self_handle", "crl_interval", "regen_margin")
+ elements = ("bpki_cert", "bpki_glue")
+ booleans = ("rekey", "reissue", "revoke", "run_now", "publish_world_now", "revoke_forgotten")
+
+ sql_template = rpki.sql.template("self", "self_id", "self_handle",
+ "use_hsm", "crl_interval", "regen_margin",
+ ("bpki_cert", rpki.x509.X509), ("bpki_glue", rpki.x509.X509))
+ handles = ()
+
+ use_hsm = False
+ crl_interval = None
+ regen_margin = None
+ bpki_cert = None
+ bpki_glue = None
+
+ def bscs(self):
+ """Fetch all BSC objects that link to this self object."""
+ return bsc_elt.sql_fetch_where(self.gctx, "self_id = %s", (self.self_id,))
+
+ def repositories(self):
+ """Fetch all repository objects that link to this self object."""
+ return repository_elt.sql_fetch_where(self.gctx, "self_id = %s", (self.self_id,))
+
+ def parents(self):
+ """Fetch all parent objects that link to this self object."""
+ return parent_elt.sql_fetch_where(self.gctx, "self_id = %s", (self.self_id,))
+
+ def children(self):
+ """Fetch all child objects that link to this self object."""
+ return child_elt.sql_fetch_where(self.gctx, "self_id = %s", (self.self_id,))
+
+ def roas(self):
+ """Fetch all ROA objects that link to this self object."""
+ return rpki.rpki_engine.roa_obj.sql_fetch_where(self.gctx, "self_id = %s", (self.self_id,))
+
+ def serve_post_save_hook(self, q_pdu, r_pdu, cb, eb):
+ """
+ Extra server actions for self_elt.
+ """
+ rpki.log.trace()
+ self.unimplemented_control("reissue")
+ actions = []
+ if q_pdu.rekey:
+ actions.append(self.serve_rekey)
+ if q_pdu.revoke:
+ actions.append(self.serve_revoke)
+ if q_pdu.revoke_forgotten:
+ actions.append(self.serve_revoke_forgotten)
+ if q_pdu.publish_world_now:
+ actions.append(self.serve_publish_world_now)
+ if q_pdu.run_now:
+ actions.append(self.serve_run_now)
+ def loop(iterator, action):
+ action(iterator, eb)
+ rpki.async.iterator(actions, loop, cb)
+
+ def serve_rekey(self, cb, eb):
+ """
+ Handle a left-right rekey action for this self.
+ """
+ rpki.log.trace()
+ def loop(iterator, parent):
+ parent.serve_rekey(iterator, eb)
+ rpki.async.iterator(self.parents(), loop, cb)
+
+ def serve_revoke(self, cb, eb):
+ """
+ Handle a left-right revoke action for this self.
+ """
+ rpki.log.trace()
+ def loop(iterator, parent):
+ parent.serve_revoke(iterator, eb)
+ rpki.async.iterator(self.parents(), loop, cb)
+
+ def serve_revoke_forgotten(self, cb, eb):
+ """
+ Handle a left-right revoke_forgotten action for this self.
+ """
+ rpki.log.trace()
+ def loop(iterator, parent):
+ parent.serve_revoke_forgotten(iterator, eb)
+ rpki.async.iterator(self.parents(), loop, cb)
+
+ def serve_publish_world_now(self, cb, eb):
+ """
+ Handle a left-right publish_world_now action for this self.
+
+ The publication stuff needs refactoring, right now publication is
+ interleaved with local operations in a way that forces far too
+ many bounces through the task system for any complex update. The
+ whole thing ought to be rewritten to queue up outgoing publication
+ PDUs and only send them when we're all done or when we need to
+ force publication at a particular point in a multi-phase operation.
+
+ Once that reorganization has been done, this method should be
+ rewritten to reuse the low-level publish() methods that each
+ object will have...but we're not there yet. So, for now, we just
+ do this via brute force. Think of it as a trial version to see
+ whether we've identified everything that needs to be republished
+ for this operation.
+ """
+
+ def loop(iterator, parent):
+ q_msg = rpki.publication.msg.query()
+ for ca in parent.cas():
+ ca_detail = ca.fetch_active()
+ if ca_detail is not None:
+ q_msg.append(rpki.publication.crl_elt.make_publish(ca_detail.crl_uri(ca), ca_detail.latest_crl))
+ q_msg.append(rpki.publication.manifest_elt.make_publish(ca_detail.manifest_uri(ca), ca_detail.latest_manifest))
+ q_msg.extend(rpki.publication.certificate_elt.make_publish(c.uri(ca), c.cert) for c in ca_detail.child_certs())
+ q_msg.extend(rpki.publication.roa_elt.make_publish(r.uri(), r.roa) for r in ca_detail.roas() if r.roa is not None)
+ parent.repository().call_pubd(iterator, eb, q_msg)
+
+ rpki.async.iterator(self.parents(), loop, cb)
+
+ def serve_run_now(self, cb, eb):
+ """
+ Handle a left-right run_now action for this self.
+ """
+ rpki.log.debug("Forced immediate run of periodic actions for self %s[%d]" % (self.self_handle, self.self_id))
+ self.cron(cb)
+
+ def serve_fetch_one_maybe(self):
+ """
+ Find the self object upon which a get, set, or destroy action
+ should operate, or which would conflict with a create method.
+ """
+ return self.serve_fetch_handle(self.gctx, None, self.self_handle)
+
+ @classmethod
+ def serve_fetch_handle(cls, gctx, self_id, self_handle):
+ """
+ Find a self object based on its self_handle.
+ """
+ return cls.sql_fetch_where1(gctx, "self_handle = %s", self_handle)
+
+ def serve_fetch_all(self):
+ """
+ Find the self objects upon which a list action should operate.
+ This is different from the list action for all other objects,
+ where list only works within a given self_id context.
+ """
+ return self.sql_fetch_all(self.gctx)
+
+ def cron(self, cb):
+ """
+ Periodic tasks.
+ """
+
+ def one():
+ self.gctx.checkpoint()
+ rpki.log.debug("Self %s[%d] polling parents" % (self.self_handle, self.self_id))
+ self.client_poll(two)
+
+ def two():
+ self.gctx.checkpoint()
+ rpki.log.debug("Self %s[%d] updating children" % (self.self_handle, self.self_id))
+ self.update_children(three)
+
+ def three():
+ self.gctx.checkpoint()
+ rpki.log.debug("Self %s[%d] updating ROAs" % (self.self_handle, self.self_id))
+ self.update_roas(four)
+
+ def four():
+ self.gctx.checkpoint()
+ rpki.log.debug("Self %s[%d] regenerating CRLs and manifests" % (self.self_handle, self.self_id))
+ self.regenerate_crls_and_manifests(cb)
+
+ one()
+
+
+ def client_poll(self, callback):
+ """
+ Run the regular client poll cycle with each of this self's parents
+ in turn.
+ """
+
+ rpki.log.trace()
+
+ def parent_loop(parent_iterator, parent):
+
+ def got_list(r_msg):
+ ca_map = dict((ca.parent_resource_class, ca) for ca in parent.cas())
+ self.gctx.checkpoint()
+
+ def class_loop(class_iterator, rc):
+
+ def class_update_failed(e):
+ rpki.log.traceback()
+ rpki.log.warn("Couldn't update class, skipping: %s" % e)
+ class_iterator()
+
+ def class_create_failed(e):
+ rpki.log.traceback()
+ rpki.log.warn("Couldn't create class, skipping: %s" % e)
+ class_iterator()
+
+ self.gctx.checkpoint()
+ if rc.class_name in ca_map:
+ ca = ca_map[rc.class_name]
+ del ca_map[rc.class_name]
+ ca.check_for_updates(parent, rc, class_iterator, class_update_failed)
+ else:
+ rpki.rpki_engine.ca_obj.create(parent, rc, class_iterator, class_create_failed)
+
+ def class_done():
+
+ def ca_loop(iterator, ca):
+ self.gctx.checkpoint()
+ ca.delete(parent, iterator)
+
+ def ca_done():
+ self.gctx.checkpoint()
+ self.gctx.sql.sweep()
+ parent_iterator()
+
+ rpki.async.iterator(ca_map.values(), ca_loop, ca_done)
+
+ rpki.async.iterator(r_msg.payload.classes, class_loop, class_done)
+
+ def list_failed(e):
+ rpki.log.traceback()
+ rpki.log.warn("Couldn't get resource class list from parent %r, skipping: %s" % (parent, e))
+ parent_iterator()
+
+ rpki.up_down.list_pdu.query(parent, got_list, list_failed)
+
+ rpki.async.iterator(self.parents(), parent_loop, callback)
+
+
+ def update_children(self, cb):
+ """
+ Check for updated IRDB data for all of this self's children and
+ issue new certs as necessary. Must handle changes both in
+ resources and in expiration date.
+ """
+
+ rpki.log.trace()
+ now = rpki.sundial.now()
+ rsn = now + rpki.sundial.timedelta(seconds = self.regen_margin)
+ publisher = rpki.rpki_engine.publication_queue()
+
+ def loop(iterator, child):
+
+ def lose(e):
+ rpki.log.traceback()
+ rpki.log.warn("Couldn't update child %r, skipping: %s" % (child, e))
+ iterator()
+
+ def got_resources(irdb_resources):
+ try:
+ for child_cert in child_certs:
+ ca_detail = child_cert.ca_detail()
+ ca = ca_detail.ca()
+ if ca_detail.state == "active":
+ old_resources = child_cert.cert.get_3779resources()
+ new_resources = irdb_resources.intersection(old_resources).intersection(ca_detail.latest_ca_cert.get_3779resources())
+
+ if new_resources.empty():
+ rpki.log.debug("Resources shrank to the null set, revoking and withdrawing child certificate SKI %s" % child_cert.cert.gSKI())
+ child_cert.revoke(publisher = publisher)
+ ca_detail.generate_crl(publisher = publisher)
+ ca_detail.generate_manifest(publisher = publisher)
+
+ elif old_resources != new_resources or (old_resources.valid_until < rsn and irdb_resources.valid_until > now):
+ rpki.log.debug("Need to reissue child certificate SKI %s" % child_cert.cert.gSKI())
+ child_cert.reissue(
+ ca_detail = ca_detail,
+ resources = new_resources,
+ publisher = publisher)
+
+ elif old_resources.valid_until < now:
+ rpki.log.debug("Child certificate SKI %s has expired: cert.valid_until %s, irdb.valid_until %s"
+ % (child_cert.cert.gSKI(), old_resources.valid_until, irdb_resources.valid_until))
+ child_cert.sql_delete()
+ publisher.withdraw(cls = rpki.publication.certificate_elt, uri = child_cert.uri(ca), obj = child_cert.cert, repository = ca.parent().repository())
+ ca_detail.generate_manifest(publisher = publisher)
+
+ except (SystemExit, rpki.async.ExitNow):
+ raise
+ except Exception, e:
+ self.gctx.checkpoint()
+ lose(e)
+ else:
+ self.gctx.checkpoint()
+ iterator()
+
+ self.gctx.checkpoint()
+ child_certs = child.child_certs()
+ if child_certs:
+ self.gctx.irdb_query_child_resources(child.self().self_handle, child.child_handle, got_resources, lose)
+ else:
+ iterator()
+
+ def done():
+ def lose(e):
+ rpki.log.traceback()
+ rpki.log.warn("Couldn't publish for %s, skipping: %s" % (self.self_handle, e))
+ self.gctx.checkpoint()
+ cb()
+ self.gctx.checkpoint()
+ publisher.call_pubd(cb, lose)
+
+ rpki.async.iterator(self.children(), loop, done)
+
+
+ def regenerate_crls_and_manifests(self, cb):
+ """
+ Generate new CRLs and manifests as necessary for all of this
+ self's CAs. Extracting nextUpdate from a manifest is hard at the
+ moment due to implementation silliness, so for now we generate a
+ new manifest whenever we generate a new CRL
+
+ This method also cleans up tombstones left behind by revoked
+ ca_detail objects, since we're walking through the relevant
+ portions of the database anyway.
+ """
+
+ rpki.log.trace()
+ now = rpki.sundial.now()
+ regen_margin = rpki.sundial.timedelta(seconds = self.regen_margin)
+ publisher = rpki.rpki_engine.publication_queue()
+
+ for parent in self.parents():
+ for ca in parent.cas():
+ try:
+ for ca_detail in ca.fetch_revoked():
+ if now > ca_detail.latest_crl.getNextUpdate():
+ ca_detail.delete(ca = ca, publisher = publisher)
+ ca_detail = ca.fetch_active()
+ if ca_detail is not None and now + regen_margin> ca_detail.latest_crl.getNextUpdate():
+ ca_detail.generate_crl(publisher = publisher)
+ ca_detail.generate_manifest(publisher = publisher)
+ except (SystemExit, rpki.async.ExitNow):
+ raise
+ except Exception, e:
+ rpki.log.traceback()
+ rpki.log.warn("Couldn't regenerate CRLs and manifests for CA %r, skipping: %s" % (ca, e))
+
+ def lose(e):
+ rpki.log.traceback()
+ rpki.log.warn("Couldn't publish updated CRLs and manifests for self %r, skipping: %s" % (self.self_handle, e))
+ self.gctx.checkpoint()
+ cb()
+
+ self.gctx.checkpoint()
+ publisher.call_pubd(cb, lose)
+
+
+ def update_roas(self, cb):
+ """
+ Generate or update ROAs for this self.
+ """
+
+ def got_roa_requests(roa_requests):
+
+ self.gctx.checkpoint()
+
+ if self.gctx.sql.dirty:
+ rpki.log.warn("Unexpected dirty SQL cache, flushing")
+ self.gctx.sql.sweep()
+
+ roas = {}
+ orphans = []
+ for roa in self.roas():
+ k = (roa.asn, str(roa.ipv4), str(roa.ipv6))
+ if k not in roas:
+ roas[k] = roa
+ elif (roa.roa is not None and roa.cert is not None and roa.ca_detail() is not None and roa.ca_detail().state == "active" and
+ (roas[k].roa is None or roas[k].cert is None or roas[k].ca_detail() is None or roas[k].ca_detail().state != "active")):
+ orphans.append(roas[k])
+ roas[k] = roa
+ else:
+ orphans.append(roa)
+
+ publisher = rpki.rpki_engine.publication_queue()
+ ca_details = set()
+
+ seen = set()
+ for roa_request in roa_requests:
+ try:
+ k = (roa_request.asn, str(roa_request.ipv4), str(roa_request.ipv6))
+ if k in seen:
+ rpki.log.warn("Skipping duplicate ROA request %r for %r" % (k, roa_request))
+ continue
+ seen.add(k)
+ roa = roas.pop(k, None)
+ if roa is None:
+ roa = rpki.rpki_engine.roa_obj(self.gctx, self.self_id, roa_request.asn, roa_request.ipv4, roa_request.ipv6)
+ rpki.log.debug("Couldn't find existing ROA matching %r, created %r" % (k, roa))
+ else:
+ rpki.log.debug("Found existing ROA %r matching %r" % (roa, k))
+ roa.update(publisher = publisher, fast = True)
+ ca_details.add(roa.ca_detail())
+ except (SystemExit, rpki.async.ExitNow):
+ raise
+ except Exception, e:
+ if not isinstance(e, rpki.exceptions.NoCoveringCertForROA):
+ rpki.log.traceback()
+ rpki.log.warn("Could not update ROA %r, %r, skipping: %s" % (roa_request, roa, e))
+
+ orphans.extend(roas.itervalues())
+ for roa in orphans:
+ try:
+ ca_details.add(roa.ca_detail())
+ roa.revoke(publisher = publisher, fast = True)
+ except (SystemExit, rpki.async.ExitNow):
+ raise
+ except Exception, e:
+ rpki.log.traceback()
+ rpki.log.warn("Could not revoke ROA %r: %s" % (roa, e))
+
+ for ca_detail in ca_details:
+ ca_detail.generate_crl(publisher = publisher)
+ ca_detail.generate_manifest(publisher = publisher)
+
+ self.gctx.sql.sweep()
+
+ def publication_failed(e):
+ rpki.log.traceback()
+ rpki.log.warn("Couldn't publish for %s, skipping: %s" % (self.self_handle, e))
+ self.gctx.checkpoint()
+ cb()
+
+ self.gctx.checkpoint()
+ publisher.call_pubd(cb, publication_failed)
+
+ def roa_requests_failed(e):
+ rpki.log.traceback()
+ rpki.log.warn("Could not fetch ROA requests for %s, skipping: %s" % (self.self_handle, e))
+ cb()
+
+ self.gctx.checkpoint()
+ self.gctx.irdb_query_roa_requests(self.self_handle, got_roa_requests, roa_requests_failed)
+
+class bsc_elt(data_elt):
+ """
+ <bsc/> (Business Signing Context) element.
+ """
+
+ element_name = "bsc"
+ attributes = ("action", "tag", "self_handle", "bsc_handle", "key_type", "hash_alg", "key_length")
+ elements = ("signing_cert", "signing_cert_crl", "pkcs10_request")
+ booleans = ("generate_keypair",)
+
+ sql_template = rpki.sql.template("bsc", "bsc_id", "bsc_handle",
+ "self_id", "hash_alg",
+ ("private_key_id", rpki.x509.RSA),
+ ("pkcs10_request", rpki.x509.PKCS10),
+ ("signing_cert", rpki.x509.X509),
+ ("signing_cert_crl", rpki.x509.CRL))
+ handles = (("self", self_elt),)
+
+ private_key_id = None
+ pkcs10_request = None
+ signing_cert = None
+ signing_cert_crl = None
+
+ def repositories(self):
+ """Fetch all repository objects that link to this BSC object."""
+ return repository_elt.sql_fetch_where(self.gctx, "bsc_id = %s", (self.bsc_id,))
+
+ def parents(self):
+ """Fetch all parent objects that link to this BSC object."""
+ return parent_elt.sql_fetch_where(self.gctx, "bsc_id = %s", (self.bsc_id,))
+
+ def children(self):
+ """Fetch all child objects that link to this BSC object."""
+ return child_elt.sql_fetch_where(self.gctx, "bsc_id = %s", (self.bsc_id,))
+
+ def serve_pre_save_hook(self, q_pdu, r_pdu, cb, eb):
+ """
+ Extra server actions for bsc_elt -- handle key generation. For
+ now this only allows RSA with SHA-256.
+ """
+ if q_pdu.generate_keypair:
+ assert q_pdu.key_type in (None, "rsa") and q_pdu.hash_alg in (None, "sha256")
+ self.private_key_id = rpki.x509.RSA.generate(keylength = q_pdu.key_length or 2048)
+ self.pkcs10_request = rpki.x509.PKCS10.create(self.private_key_id)
+ r_pdu.pkcs10_request = self.pkcs10_request
+ data_elt.serve_pre_save_hook(self, q_pdu, r_pdu, cb, eb)
+
+class repository_elt(data_elt):
+ """
+ <repository/> element.
+ """
+
+ element_name = "repository"
+ attributes = ("action", "tag", "self_handle", "repository_handle", "bsc_handle", "peer_contact_uri")
+ elements = ("bpki_cert", "bpki_glue")
+
+ sql_template = rpki.sql.template("repository", "repository_id", "repository_handle",
+ "self_id", "bsc_id", "peer_contact_uri",
+ ("bpki_cert", rpki.x509.X509), ("bpki_glue", rpki.x509.X509))
+ handles = (("self", self_elt), ("bsc", bsc_elt))
+
+ bpki_cert = None
+ bpki_glue = None
+
+ def parents(self):
+ """Fetch all parent objects that link to this repository object."""
+ return parent_elt.sql_fetch_where(self.gctx, "repository_id = %s", (self.repository_id,))
+
+ @staticmethod
+ def default_pubd_handler(pdu):
+ """
+ Default handler for publication response PDUs.
+ """
+ pdu.raise_if_error()
+
+ def call_pubd(self, callback, errback, q_msg, handlers = None):
+ """
+ Send a message to publication daemon and return the response.
+
+ As a convenience, attempting to send an empty message returns
+ immediate success without sending anything.
+
+ Handlers is a dict of handler functions to process the response
+ PDUs. If the tag value in the response PDU appears in the dict,
+ the associated handler is called to process the PDU. If no tag
+ matches, default_pubd_handler() is called. A handler value of
+ False suppresses calling of the default handler.
+ """
+
+ try:
+ rpki.log.trace()
+
+ self.gctx.sql.sweep()
+
+ if not q_msg:
+ return callback()
+
+ if handlers is None:
+ handlers = {}
+
+ for q_pdu in q_msg:
+ rpki.log.info("Sending <%s %r %r> to pubd" % (q_pdu.action, q_pdu.uri, q_pdu.payload))
+
+ bsc = self.bsc()
+ q_der = rpki.publication.cms_msg().wrap(q_msg, bsc.private_key_id, bsc.signing_cert, bsc.signing_cert_crl)
+ bpki_ta_path = (self.gctx.bpki_ta, self.self().bpki_cert, self.self().bpki_glue, self.bpki_cert, self.bpki_glue)
+
+ def done(r_der):
+ try:
+ r_msg = rpki.publication.cms_msg(DER = r_der).unwrap(bpki_ta_path)
+ for r_pdu in r_msg:
+ handler = handlers.get(r_pdu.tag, self.default_pubd_handler)
+ if handler:
+ handler(r_pdu)
+ if len(q_msg) != len(r_msg):
+ raise rpki.exceptions.BadPublicationReply, "Wrong number of response PDUs from pubd: sent %r, got %r" % (q_msg, r_msg)
+ callback()
+ except (rpki.async.ExitNow, SystemExit):
+ raise
+ except Exception, e:
+ errback(e)
+
+ rpki.http.client(
+ url = self.peer_contact_uri,
+ msg = q_der,
+ callback = done,
+ errback = errback)
+
+ except (rpki.async.ExitNow, SystemExit):
+ raise
+ except Exception, e:
+ errback(e)
+
+class parent_elt(data_elt):
+ """
+ <parent/> element.
+ """
+
+ element_name = "parent"
+ attributes = ("action", "tag", "self_handle", "parent_handle", "bsc_handle", "repository_handle",
+ "peer_contact_uri", "sia_base", "sender_name", "recipient_name")
+ elements = ("bpki_cms_cert", "bpki_cms_glue")
+ booleans = ("rekey", "reissue", "revoke", "revoke_forgotten")
+
+ sql_template = rpki.sql.template("parent", "parent_id", "parent_handle",
+ "self_id", "bsc_id", "repository_id",
+ ("bpki_cms_cert", rpki.x509.X509), ("bpki_cms_glue", rpki.x509.X509),
+ "peer_contact_uri", "sia_base", "sender_name", "recipient_name")
+ handles = (("self", self_elt), ("bsc", bsc_elt), ("repository", repository_elt))
+
+ bpki_cms_cert = None
+ bpki_cms_glue = None
+
+ def repository(self):
+ """Fetch repository object to which this parent object links."""
+ return repository_elt.sql_fetch(self.gctx, self.repository_id)
+
+ def cas(self):
+ """Fetch all CA objects that link to this parent object."""
+ return rpki.rpki_engine.ca_obj.sql_fetch_where(self.gctx, "parent_id = %s", (self.parent_id,))
+
+ def serve_post_save_hook(self, q_pdu, r_pdu, cb, eb):
+ """
+ Extra server actions for parent_elt.
+ """
+ self.unimplemented_control("reissue")
+ actions = []
+ if q_pdu.rekey:
+ actions.append(self.serve_rekey)
+ if q_pdu.revoke:
+ actions.append(self.serve_revoke)
+ if q_pdu.revoke_forgotten:
+ actions.append(self.serve_revoke_forgotten)
+ def loop(iterator, action):
+ action(iterator, eb)
+ rpki.async.iterator(actions, loop, cb)
+
+ def serve_rekey(self, cb, eb):
+ """
+ Handle a left-right rekey action for this parent.
+ """
+ def loop(iterator, ca):
+ ca.rekey(iterator, eb)
+ rpki.async.iterator(self.cas(), loop, cb)
+
+ def serve_revoke(self, cb, eb):
+ """
+ Handle a left-right revoke action for this parent.
+ """
+ def loop(iterator, ca):
+ ca.revoke(cb = iterator, eb = eb)
+ rpki.async.iterator(self.cas(), loop, cb)
+
+ def serve_revoke_forgotten(self, cb, eb):
+ """
+ Handle a left-right revoke_forgotten action for this parent.
+
+ This is a bit fiddly: we have to compare the result of an up-down
+ list query with what we have locally and identify the SKIs of any
+ certificates that have gone missing. This should never happen in
+ ordinary operation, but can arise if we have somehow lost a
+ private key, in which case there is nothing more we can do with
+ the issued cert, so we have to clear it. As this really is not
+ supposed to happen, we don't clear it automatically, instead we
+ require an explicit trigger.
+ """
+
+ def got_list(r_msg):
+
+ ca_map = dict((ca.parent_resource_class, ca) for ca in self.cas())
+
+ def rc_loop(rc_iterator, rc):
+
+ if rc.class_name in ca_map:
+
+ def ski_loop(ski_iterator, ski):
+ rpki.log.warn("Revoking certificates missing from our database, class %r, SKI %s" % (rc.class_name, ski))
+ rpki.up_down.revoke_pdu.query(ca, ski, lambda x: ski_iterator(), eb)
+
+ ca = ca_map[rc.class_name]
+ skis_parent_knows_about = set(c.cert.gSKI() for c in rc.certs)
+ skis_ca_knows_about = set(ca_detail.latest_ca_cert.gSKI() for ca_detail in ca.fetch_issue_response_candidates())
+ skis_only_parent_knows_about = skis_parent_knows_about - skis_ca_knows_about
+ rpki.async.iterator(skis_only_parent_knows_about, ski_loop, rc_iterator)
+
+ else:
+ rc_iterator()
+
+ rpki.async.iterator(r_msg.payload.classes, rc_loop, cb)
+
+ rpki.up_down.list_pdu.query(self, got_list, eb)
+
+
+ def query_up_down(self, q_pdu, cb, eb):
+ """
+ Client code for sending one up-down query PDU to this parent.
+ """
+
+ rpki.log.trace()
+
+ bsc = self.bsc()
+ if bsc is None:
+ raise rpki.exceptions.BSCNotFound, "Could not find BSC %s" % self.bsc_id
+
+ if bsc.signing_cert is None:
+ raise rpki.exceptions.BSCNotReady, "BSC %r[%s] is not yet usable" % (bsc.bsc_handle, bsc.bsc_id)
+
+ q_msg = rpki.up_down.message_pdu.make_query(
+ payload = q_pdu,
+ sender = self.sender_name,
+ recipient = self.recipient_name)
+
+ q_der = rpki.up_down.cms_msg().wrap(q_msg, bsc.private_key_id,
+ bsc.signing_cert,
+ bsc.signing_cert_crl)
+
+ def unwrap(r_der):
+ try:
+ r_msg = rpki.up_down.cms_msg(DER = r_der).unwrap((self.gctx.bpki_ta,
+ self.self().bpki_cert,
+ self.self().bpki_glue,
+ self.bpki_cms_cert,
+ self.bpki_cms_glue))
+ r_msg.payload.check_response()
+ except (SystemExit, rpki.async.ExitNow):
+ raise
+ except Exception, e:
+ eb(e)
+ else:
+ cb(r_msg)
+
+ rpki.http.client(
+ msg = q_der,
+ url = self.peer_contact_uri,
+ callback = unwrap,
+ errback = eb)
+
+class child_elt(data_elt):
+ """
+ <child/> element.
+ """
+
+ element_name = "child"
+ attributes = ("action", "tag", "self_handle", "child_handle", "bsc_handle")
+ elements = ("bpki_cert", "bpki_glue")
+ booleans = ("reissue", )
+
+ sql_template = rpki.sql.template("child", "child_id", "child_handle",
+ "self_id", "bsc_id",
+ ("bpki_cert", rpki.x509.X509),
+ ("bpki_glue", rpki.x509.X509))
+
+ handles = (("self", self_elt), ("bsc", bsc_elt))
+
+ bpki_cert = None
+ bpki_glue = None
+
+ def child_certs(self, ca_detail = None, ski = None, unique = False):
+ """Fetch all child_cert objects that link to this child object."""
+ return rpki.rpki_engine.child_cert_obj.fetch(self.gctx, self, ca_detail, ski, unique)
+
+ def parents(self):
+ """Fetch all parent objects that link to self object to which this child object links."""
+ return parent_elt.sql_fetch_where(self.gctx, "self_id = %s", (self.self_id,))
+
+ def ca_from_class_name(self, class_name):
+ """
+ Fetch the CA corresponding to an up-down class_name.
+ """
+ if not class_name.isdigit():
+ raise rpki.exceptions.BadClassNameSyntax, "Bad class name %s" % class_name
+ ca = rpki.rpki_engine.ca_obj.sql_fetch(self.gctx, long(class_name))
+ if ca is None:
+ raise rpki.exceptions.ClassNameUnknown, "Unknown class name %s" % class_name
+ parent = ca.parent()
+ if self.self_id != parent.self_id:
+ raise rpki.exceptions.ClassNameMismatch, "Class name mismatch: child.self_id = %d, parent.self_id = %d" % (self.self_id, parent.self_id)
+ return ca
+
+ def serve_destroy_hook(self, cb, eb):
+ """
+ Extra server actions when destroying a child_elt.
+ """
+ publisher = rpki.rpki_engine.publication_queue()
+ for child_cert in self.child_certs():
+ child_cert.revoke(publisher = publisher,
+ generate_crl_and_manifest = True)
+ publisher.call_pubd(cb, eb)
+
+ def serve_up_down(self, query, callback):
+ """
+ Outer layer of server handling for one up-down PDU from this child.
+ """
+
+ rpki.log.trace()
+
+ bsc = self.bsc()
+ if bsc is None:
+ raise rpki.exceptions.BSCNotFound, "Could not find BSC %s" % self.bsc_id
+ q_msg = rpki.up_down.cms_msg(DER = query).unwrap((self.gctx.bpki_ta,
+ self.self().bpki_cert,
+ self.self().bpki_glue,
+ self.bpki_cert,
+ self.bpki_glue))
+ q_msg.payload.gctx = self.gctx
+ if enforce_strict_up_down_xml_sender and q_msg.sender != str(self.child_id):
+ raise rpki.exceptions.BadSender, "Unexpected XML sender %s" % q_msg.sender
+
+ def done(r_msg):
+ #
+ # Exceptions from this point on are problematic, as we have no
+ # sane way of reporting errors in the error reporting mechanism.
+ # May require refactoring, ignore the issue for now.
+ #
+ reply = rpki.up_down.cms_msg().wrap(r_msg, bsc.private_key_id,
+ bsc.signing_cert, bsc.signing_cert_crl)
+ callback(reply)
+
+ try:
+ q_msg.serve_top_level(self, done)
+ except (rpki.async.ExitNow, SystemExit):
+ raise
+ except rpki.exceptions.NoActiveCA, data:
+ done(q_msg.serve_error(data))
+ except Exception, data:
+ rpki.log.traceback()
+ done(q_msg.serve_error(data))
+
+class list_resources_elt(rpki.xml_utils.base_elt, left_right_namespace):
+ """
+ <list_resources/> element.
+ """
+
+ element_name = "list_resources"
+ attributes = ("self_handle", "tag", "child_handle", "valid_until", "asn", "ipv4", "ipv6")
+ valid_until = None
+
+ def startElement(self, stack, name, attrs):
+ """
+ Handle <list_resources/> element. This requires special handling
+ due to the data types of some of the attributes.
+ """
+ assert name == "list_resources", "Unexpected name %s, stack %s" % (name, stack)
+ self.read_attrs(attrs)
+ if isinstance(self.valid_until, str):
+ self.valid_until = rpki.sundial.datetime.fromXMLtime(self.valid_until)
+ if self.asn is not None:
+ self.asn = rpki.resource_set.resource_set_as(self.asn)
+ if self.ipv4 is not None:
+ self.ipv4 = rpki.resource_set.resource_set_ipv4(self.ipv4)
+ if self.ipv6 is not None:
+ self.ipv6 = rpki.resource_set.resource_set_ipv6(self.ipv6)
+
+ def toXML(self):
+ """
+ Generate <list_resources/> element. This requires special
+ handling due to the data types of some of the attributes.
+ """
+ elt = self.make_elt()
+ if isinstance(self.valid_until, int):
+ elt.set("valid_until", self.valid_until.toXMLtime())
+ return elt
+
+class list_roa_requests_elt(rpki.xml_utils.base_elt, left_right_namespace):
+ """
+ <list_roa_requests/> element.
+ """
+
+ element_name = "list_roa_requests"
+ attributes = ("self_handle", "tag", "asn", "ipv4", "ipv6")
+
+ def startElement(self, stack, name, attrs):
+ """
+ Handle <list_roa_requests/> element. This requires special handling
+ due to the data types of some of the attributes.
+ """
+ assert name == "list_roa_requests", "Unexpected name %s, stack %s" % (name, stack)
+ self.read_attrs(attrs)
+ if self.ipv4 is not None:
+ self.ipv4 = rpki.resource_set.roa_prefix_set_ipv4(self.ipv4)
+ if self.ipv6 is not None:
+ self.ipv6 = rpki.resource_set.roa_prefix_set_ipv6(self.ipv6)
+
+class list_published_objects_elt(rpki.xml_utils.text_elt, left_right_namespace):
+ """
+ <list_published_objects/> element.
+ """
+
+ element_name = "list_published_objects"
+ attributes = ("self_handle", "tag", "uri")
+ text_attribute = "obj"
+
+ obj = None
+
+ def serve_dispatch(self, r_msg, cb, eb):
+ """
+ Handle a <list_published_objects/> query. The method name is a
+ misnomer here, there's no action attribute and no dispatch, we
+ just dump every published object for the specified <self/> and return.
+ """
+ for parent in self_elt.serve_fetch_handle(self.gctx, None, self.self_handle).parents():
+ for ca in parent.cas():
+ ca_detail = ca.fetch_active()
+ if ca_detail is not None:
+ r_msg.append(self.make_reply(ca_detail.crl_uri(ca), ca_detail.latest_crl))
+ r_msg.append(self.make_reply(ca_detail.manifest_uri(ca), ca_detail.latest_manifest))
+ r_msg.extend(self.make_reply(c.uri(ca), c.cert) for c in ca_detail.child_certs())
+ r_msg.extend(self.make_reply(r.uri(), r.roa) for r in ca_detail.roas() if r.roa is not None)
+ cb()
+
+ def make_reply(self, uri, obj):
+ """
+ Generate one reply PDU.
+ """
+ r_pdu = self.make_pdu(tag = self.tag, self_handle = self.self_handle, uri = uri)
+ r_pdu.obj = obj.get_Base64()
+ return r_pdu
+
+class list_received_resources_elt(rpki.xml_utils.base_elt, left_right_namespace):
+ """
+ <list_received_resources/> element.
+ """
+
+ element_name = "list_received_resources"
+ attributes = ("self_handle", "tag", "parent_handle",
+ "notBefore", "notAfter", "uri", "sia_uri", "aia_uri", "asn", "ipv4", "ipv6")
+
+ def serve_dispatch(self, r_msg, cb, eb):
+ """
+ Handle a <list_received_resources/> query. The method name is a
+ misnomer here, there's no action attribute and no dispatch, we
+ just dump a bunch of data about every certificate issued to us by
+ one of our parents, then return.
+ """
+ for parent in self_elt.serve_fetch_handle(self.gctx, None, self.self_handle).parents():
+ for ca in parent.cas():
+ ca_detail = ca.fetch_active()
+ if ca_detail is not None and ca_detail.latest_ca_cert is not None:
+ r_msg.append(self.make_reply(parent.parent_handle, ca_detail.ca_cert_uri, ca_detail.latest_ca_cert))
+ cb()
+
+ def make_reply(self, parent_handle, uri, cert):
+ """
+ Generate one reply PDU.
+ """
+ resources = cert.get_3779resources()
+ return self.make_pdu(
+ tag = self.tag,
+ self_handle = self.self_handle,
+ parent_handle = parent_handle,
+ notBefore = str(cert.getNotBefore()),
+ notAfter = str(cert.getNotAfter()),
+ uri = uri,
+ sia_uri = cert.get_sia_directory_uri(),
+ aia_uri = cert.get_aia_uri(),
+ asn = resources.asn,
+ ipv4 = resources.v4,
+ ipv6 = resources.v6)
+
+class report_error_elt(rpki.xml_utils.text_elt, left_right_namespace):
+ """
+ <report_error/> element.
+ """
+
+ element_name = "report_error"
+ attributes = ("tag", "self_handle", "error_code")
+ text_attribute = "error_text"
+
+ error_text = None
+
+ @classmethod
+ def from_exception(cls, e, self_handle = None, tag = None):
+ """
+ Generate a <report_error/> element from an exception.
+ """
+ self = cls()
+ self.self_handle = self_handle
+ self.tag = tag
+ self.error_code = e.__class__.__name__
+ self.error_text = str(e)
+ return self
+
+class msg(rpki.xml_utils.msg, left_right_namespace):
+ """
+ Left-right PDU.
+ """
+
+ ## @var version
+ # Protocol version
+ version = 1
+
+ ## @var pdus
+ # Dispatch table of PDUs for this protocol.
+ pdus = dict((x.element_name, x)
+ for x in (self_elt, child_elt, parent_elt, bsc_elt, repository_elt,
+ list_resources_elt, list_roa_requests_elt,
+ list_published_objects_elt, list_received_resources_elt,
+ report_error_elt))
+
+ def serve_top_level(self, gctx, cb):
+ """
+ Serve one msg PDU.
+ """
+
+ r_msg = self.__class__.reply()
+
+ def loop(iterator, q_pdu):
+
+ def fail(e):
+ if not isinstance(e, rpki.exceptions.NotFound):
+ rpki.log.traceback()
+ r_msg.append(report_error_elt.from_exception(e, self_handle = q_pdu.self_handle, tag = q_pdu.tag))
+ cb(r_msg)
+
+ try:
+ q_pdu.gctx = gctx
+ q_pdu.serve_dispatch(r_msg, iterator, fail)
+ except (rpki.async.ExitNow, SystemExit):
+ raise
+ except Exception, e:
+ fail(e)
+
+ def done():
+ cb(r_msg)
+
+ rpki.async.iterator(self, loop, done)
+
+class sax_handler(rpki.xml_utils.sax_handler):
+ """
+ SAX handler for Left-Right protocol.
+ """
+
+ pdu = msg
+ name = "msg"
+ version = "1"
+
+class cms_msg(rpki.x509.XML_CMS_object):
+ """
+ Class to hold a CMS-signed left-right PDU.
+ """
+
+ encoding = "us-ascii"
+ schema = rpki.relaxng.left_right
+ saxify = sax_handler.saxify
diff --git a/rpkid.without_tls/rpki/log.py b/rpkid.without_tls/rpki/log.py
new file mode 100644
index 00000000..9d346385
--- /dev/null
+++ b/rpkid.without_tls/rpki/log.py
@@ -0,0 +1,111 @@
+"""
+Logging facilities for RPKI libraries.
+
+$Id$
+
+Copyright (C) 2009--2010 Internet Systems Consortium ("ISC")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+
+Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+"""
+
+import syslog, sys, os, time
+import traceback as tb
+
+## @var enable_trace
+# Whether call tracing is enabled.
+
+enable_trace = False
+
+## @var use_syslog
+# Whether to use syslog
+
+use_syslog = True
+
+tag = ""
+pid = 0
+
+def init(ident = "rpki", flags = syslog.LOG_PID, facility = syslog.LOG_DAEMON):
+ """
+ Initialize logging system.
+ """
+
+ if use_syslog:
+ return syslog.openlog(ident, flags, facility)
+ else:
+ global tag, pid
+ tag = ident
+ pid = os.getpid()
+
+def set_trace(enable):
+ """
+ Enable or disable call tracing.
+ """
+
+ global enable_trace
+ enable_trace = enable
+
+class logger(object):
+ """
+ Closure for logging.
+ """
+
+ def __init__(self, priority):
+ self.priority = priority
+
+ def __call__(self, message):
+ if use_syslog:
+ return syslog.syslog(self.priority, message)
+ else:
+ sys.stderr.write("%s %s[%d]: %s\n" % (time.strftime("%F %T"), tag, pid, message))
+
+error = logger(syslog.LOG_ERR)
+warn = logger(syslog.LOG_WARNING)
+note = logger(syslog.LOG_NOTICE)
+info = logger(syslog.LOG_INFO)
+debug = logger(syslog.LOG_DEBUG)
+
+def trace():
+ """
+ Execution trace -- where are we now, and whence came we here?
+ """
+
+ if enable_trace:
+ bt = tb.extract_stack(limit = 3)
+ return debug("[%s() at %s:%d from %s:%d]" % (bt[1][2], bt[1][0], bt[1][1], bt[0][0], bt[0][1]))
+
+def traceback():
+ """
+ Consolidated backtrace facility with a bit of extra info.
+ """
+
+ assert sys.exc_info() != (None, None, None), "rpki.log.traceback() called without valid trace on stack, this is a programming error"
+ bt = tb.extract_stack(limit = 3)
+ error("Exception caught in %s() at %s:%d called from %s:%d" % (bt[1][2], bt[1][0], bt[1][1], bt[0][0], bt[0][1]))
+ bt = tb.format_exc()
+ assert bt is not None, "Apparently I'm still not using the right test for null backtrace"
+ for line in bt.splitlines():
+ warn(line)
diff --git a/rpkid.without_tls/rpki/manifest.py b/rpkid.without_tls/rpki/manifest.py
new file mode 100644
index 00000000..8581f3a0
--- /dev/null
+++ b/rpkid.without_tls/rpki/manifest.py
@@ -0,0 +1,54 @@
+"""
+Signed manifests. This is just the ASN.1 encoder, the rest is in
+rpki.x509 with the rest of the DER_object code.
+
+Note that rpki.x509.SignedManifest implements the signed manifest;
+the structures here are just the payload of the CMS eContent field.
+
+$Id$
+
+Copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+"""
+
+from POW._der import *
+
+class FileAndHash(Sequence):
+ def __init__(self, optional=0, default=''):
+ self.file = IA5String()
+ self.hash = AltBitString()
+ contents = [ self.file, self.hash ]
+ Sequence.__init__(self, contents, optional, default)
+
+class FilesAndHashes(SequenceOf):
+ def __init__(self, optional=0, default=''):
+ SequenceOf.__init__(self, FileAndHash, optional, default)
+
+class Manifest(Sequence):
+ def __init__(self, optional=0, default=''):
+ self.version = Integer()
+ self.explicitVersion = Explicit(CLASS_CONTEXT, FORM_CONSTRUCTED, 0, self.version, 0, 'oAMCAQA=')
+ self.manifestNumber = Integer()
+ self.thisUpdate = GeneralizedTime()
+ self.nextUpdate = GeneralizedTime()
+ self.fileHashAlg = Oid()
+ self.fileList = FilesAndHashes()
+
+ contents = [ self.explicitVersion,
+ self.manifestNumber,
+ self.thisUpdate,
+ self.nextUpdate,
+ self.fileHashAlg,
+ self.fileList ]
+ Sequence.__init__(self, contents, optional, default)
diff --git a/rpkid.without_tls/rpki/myrpki.py b/rpkid.without_tls/rpki/myrpki.py
new file mode 100644
index 00000000..f4fd8218
--- /dev/null
+++ b/rpkid.without_tls/rpki/myrpki.py
@@ -0,0 +1,1835 @@
+"""
+This (oversized) module used to be an (oversized) program.
+Refactoring in progress, some doc still needs updating.
+
+
+This program is now the merger of three different tools: the old
+myrpki.py script, the old myirbe.py script, and the newer setup.py CLI
+tool. As such, it is still in need of some cleanup, but the need to
+provide a saner user interface is more urgent than internal code
+prettiness at the moment. In the long run, 90% of the code in this
+file probably ought to move to well-designed library modules.
+
+Overall goal here is to build up the configuration necessary to run
+rpkid and friends, by reading a config file, a collection of .CSV
+files, and the results of a few out-of-band XML setup messages
+exchanged with one's parents, children, and so forth.
+
+The config file is in an OpenSSL-compatible format, the CSV files are
+simple tab-delimited text. The XML files are all generated by this
+program, either the local instance or an instance being run by another
+player in the system; the mechanism used to exchange these setup
+messages is outside the scope of this program, feel free to use
+PGP-signed mail, a web interface (not provided), USB stick, carrier
+pigeons, whatever works.
+
+With one exception, the commands in this program avoid using any
+third-party Python code other than the rpki libraries themselves; with
+the same one exception, all OpenSSL work is done with the OpenSSL
+command line tool (the one built as a side effect of building rcynic
+will do, if your platform has no system copy or the system copy is too
+old). This is all done in an attempt to make the code more portable,
+so one can run most of the RPKI back end software on a laptop or
+whatever. The one exception is the configure_daemons command, which
+must, of necessity, use the same communication libraries as the
+daemons with which it is conversing. So that one command will not
+work if the correct Python modules are not available.
+
+
+$Id$
+
+Copyright (C) 2009--2010 Internet Systems Consortium ("ISC")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+"""
+
+from __future__ import with_statement
+
+import subprocess, csv, re, os, getopt, sys, base64, time, glob, copy, warnings
+import rpki.config, rpki.cli, rpki.sundial, rpki.log, rpki.oids
+
+try:
+ from lxml.etree import (Element, SubElement, ElementTree,
+ fromstring as ElementFromString,
+ tostring as ElementToString)
+except ImportError:
+ from xml.etree.ElementTree import (Element, SubElement, ElementTree,
+ fromstring as ElementFromString,
+ tostring as ElementToString)
+
+
+
+# Our XML namespace and protocol version.
+
+namespace = "http://www.hactrn.net/uris/rpki/myrpki/"
+version = "2"
+namespaceQName = "{" + namespace + "}"
+
+# Whether to include incomplete entries when rendering to XML.
+
+allow_incomplete = False
+
+# Whether to whine about incomplete entries while rendering to XML.
+
+whine = False
+
+class comma_set(set):
+ """
+ Minor customization of set(), to provide a print syntax.
+ """
+
+ def __str__(self):
+ return ",".join(self)
+
+class EntityDB(object):
+ """
+ Wrapper for entitydb path lookups. Hmm, maybe some or all of the
+ entitydb glob stuff should end up here too? Later.
+ """
+
+ def __init__(self, cfg):
+ self.dir = cfg.get("entitydb_dir", "entitydb")
+
+ def __call__(self, *args):
+ return os.path.join(self.dir, *args)
+
+ def iterate(self, *args):
+ return glob.iglob(os.path.join(self.dir, *args))
+
+class roa_request(object):
+ """
+ Representation of a ROA request.
+ """
+
+ v4re = re.compile("^([0-9]{1,3}\.){3}[0-9]{1,3}/[0-9]+(-[0-9]+)?$", re.I)
+ v6re = re.compile("^([0-9a-f]{0,4}:){0,15}[0-9a-f]{0,4}/[0-9]+(-[0-9]+)?$", re.I)
+
+ def __init__(self, asn, group):
+ self.asn = asn
+ self.group = group
+ self.v4 = comma_set()
+ self.v6 = comma_set()
+
+ def __repr__(self):
+ s = "<%s asn %s group %s" % (self.__class__.__name__, self.asn, self.group)
+ if self.v4:
+ s += " v4 %s" % self.v4
+ if self.v6:
+ s += " v6 %s" % self.v6
+ return s + ">"
+
+ def add(self, prefix):
+ """
+ Add one prefix to this ROA request.
+ """
+ if self.v4re.match(prefix):
+ self.v4.add(prefix)
+ elif self.v6re.match(prefix):
+ self.v6.add(prefix)
+ else:
+ raise RuntimeError, "Bad prefix syntax: %r" % (prefix,)
+
+ def xml(self, e):
+ """
+ Generate XML element represeting representing this ROA request.
+ """
+ e = SubElement(e, "roa_request",
+ asn = self.asn,
+ v4 = str(self.v4),
+ v6 = str(self.v6))
+ e.tail = "\n"
+
+class roa_requests(dict):
+ """
+ Database of ROA requests.
+ """
+
+ def add(self, asn, group, prefix):
+ """
+ Add one <ASN, group, prefix> set to ROA request database.
+ """
+ key = (asn, group)
+ if key not in self:
+ self[key] = roa_request(asn, group)
+ self[key].add(prefix)
+
+ def xml(self, e):
+ """
+ Render ROA requests as XML elements.
+ """
+ for r in self.itervalues():
+ r.xml(e)
+
+ @classmethod
+ def from_csv(cls, roa_csv_file):
+ """
+ Parse ROA requests from CSV file.
+ """
+ self = cls()
+ # format: p/n-m asn group
+ for pnm, asn, group in csv_reader(roa_csv_file, columns = 3):
+ self.add(asn = asn, group = group, prefix = pnm)
+ return self
+
+class child(object):
+ """
+ Representation of one child entity.
+ """
+
+ v4re = re.compile("^(([0-9]{1,3}\.){3}[0-9]{1,3}/[0-9]+)|(([0-9]{1,3}\.){3}[0-9]{1,3}-([0-9]{1,3}\.){3}[0-9]{1,3})$", re.I)
+ v6re = re.compile("^(([0-9a-f]{0,4}:){0,15}[0-9a-f]{0,4}/[0-9]+)|(([0-9a-f]{0,4}:){0,15}[0-9a-f]{0,4}-([0-9a-f]{0,4}:){0,15}[0-9a-f]{0,4})$", re.I)
+
+ def __init__(self, handle):
+ self.handle = handle
+ self.asns = comma_set()
+ self.v4 = comma_set()
+ self.v6 = comma_set()
+ self.validity = None
+ self.bpki_certificate = None
+
+ def __repr__(self):
+ s = "<%s %s" % (self.__class__.__name__, self.handle)
+ if self.asns:
+ s += " asn %s" % self.asns
+ if self.v4:
+ s += " v4 %s" % self.v4
+ if self.v6:
+ s += " v6 %s" % self.v6
+ if self.validity:
+ s += " valid %s" % self.validity
+ if self.bpki_certificate:
+ s += " cert %s" % self.bpki_certificate
+ return s + ">"
+
+ def add(self, prefix = None, asn = None, validity = None, bpki_certificate = None):
+ """
+ Add prefix, autonomous system number, validity date, or BPKI
+ certificate for this child.
+ """
+ if prefix is not None:
+ if self.v4re.match(prefix):
+ self.v4.add(prefix)
+ elif self.v6re.match(prefix):
+ self.v6.add(prefix)
+ else:
+ raise RuntimeError, "Bad prefix syntax: %r" % (prefix,)
+ if asn is not None:
+ self.asns.add(asn)
+ if validity is not None:
+ self.validity = validity
+ if bpki_certificate is not None:
+ self.bpki_certificate = bpki_certificate
+
+ def xml(self, e):
+ """
+ Render this child as an XML element.
+ """
+ complete = self.bpki_certificate and self.validity
+ if whine and not complete:
+ print "Incomplete child entry %s" % self
+ if complete or allow_incomplete:
+ e = SubElement(e, "child",
+ handle = self.handle,
+ valid_until = self.validity,
+ asns = str(self.asns),
+ v4 = str(self.v4),
+ v6 = str(self.v6))
+ e.tail = "\n"
+ if self.bpki_certificate:
+ PEMElement(e, "bpki_certificate", self.bpki_certificate)
+
+class children(dict):
+ """
+ Database of children.
+ """
+
+ def add(self, handle, prefix = None, asn = None, validity = None, bpki_certificate = None):
+ """
+ Add resources to a child, creating the child object if necessary.
+ """
+ if handle not in self:
+ self[handle] = child(handle)
+ self[handle].add(prefix = prefix, asn = asn, validity = validity, bpki_certificate = bpki_certificate)
+
+ def xml(self, e):
+ """
+ Render children database to XML.
+ """
+ for c in self.itervalues():
+ c.xml(e)
+
+ @classmethod
+ def from_csv(cls, prefix_csv_file, asn_csv_file, fxcert, entitydb):
+ """
+ Parse child resources, certificates, and validity dates from CSV files.
+ """
+ self = cls()
+ for f in entitydb.iterate("children", "*.xml"):
+ c = etree_read(f)
+ self.add(handle = os.path.splitext(os.path.split(f)[-1])[0],
+ validity = c.get("valid_until"),
+ bpki_certificate = fxcert(c.findtext("bpki_child_ta")))
+ # childname p/n
+ for handle, pn in csv_reader(prefix_csv_file, columns = 2):
+ self.add(handle = handle, prefix = pn)
+ # childname asn
+ for handle, asn in csv_reader(asn_csv_file, columns = 2):
+ self.add(handle = handle, asn = asn)
+ return self
+
+class parent(object):
+ """
+ Representation of one parent entity.
+ """
+
+ def __init__(self, handle):
+ self.handle = handle
+ self.service_uri = None
+ self.bpki_cms_certificate = None
+ self.myhandle = None
+ self.sia_base = None
+
+ def __repr__(self):
+ s = "<%s %s" % (self.__class__.__name__, self.handle)
+ if self.myhandle:
+ s += " myhandle %s" % self.myhandle
+ if self.service_uri:
+ s += " uri %s" % self.service_uri
+ if self.sia_base:
+ s += " sia %s" % self.sia_base
+ if self.bpki_cms_certificate:
+ s += " cms %s" % self.bpki_cms_certificate
+ return s + ">"
+
+ def add(self, service_uri = None,
+ bpki_cms_certificate = None,
+ myhandle = None,
+ sia_base = None):
+ """
+ Add service URI or BPKI certificates to this parent object.
+ """
+ if service_uri is not None:
+ self.service_uri = service_uri
+ if bpki_cms_certificate is not None:
+ self.bpki_cms_certificate = bpki_cms_certificate
+ if myhandle is not None:
+ self.myhandle = myhandle
+ if sia_base is not None:
+ self.sia_base = sia_base
+
+ def xml(self, e):
+ """
+ Render this parent object to XML.
+ """
+ complete = self.bpki_cms_certificate and self.myhandle and self.service_uri and self.sia_base
+ if whine and not complete:
+ print "Incomplete parent entry %s" % self
+ if complete or allow_incomplete:
+ e = SubElement(e, "parent",
+ handle = self.handle,
+ myhandle = self.myhandle,
+ service_uri = self.service_uri,
+ sia_base = self.sia_base)
+ e.tail = "\n"
+ if self.bpki_cms_certificate:
+ PEMElement(e, "bpki_cms_certificate", self.bpki_cms_certificate)
+
+class parents(dict):
+ """
+ Database of parent objects.
+ """
+
+ def add(self, handle,
+ service_uri = None,
+ bpki_cms_certificate = None,
+ myhandle = None,
+ sia_base = None):
+ """
+ Add service URI or certificates to parent object, creating it if necessary.
+ """
+ if handle not in self:
+ self[handle] = parent(handle)
+ self[handle].add(service_uri = service_uri,
+ bpki_cms_certificate = bpki_cms_certificate,
+ myhandle = myhandle,
+ sia_base = sia_base)
+
+ def xml(self, e):
+ for c in self.itervalues():
+ c.xml(e)
+
+ @classmethod
+ def from_csv(cls, fxcert, entitydb):
+ """
+ Parse parent data from entitydb.
+ """
+ self = cls()
+ for f in entitydb.iterate("parents", "*.xml"):
+ h = os.path.splitext(os.path.split(f)[-1])[0]
+ p = etree_read(f)
+ r = etree_read(f.replace(os.path.sep + "parents" + os.path.sep,
+ os.path.sep + "repositories" + os.path.sep))
+ assert r.get("type") == "confirmed"
+ self.add(handle = h,
+ service_uri = p.get("service_uri"),
+ bpki_cms_certificate = fxcert(p.findtext("bpki_resource_ta")),
+ myhandle = p.get("child_handle"),
+ sia_base = r.get("sia_base"))
+ return self
+
+class repository(object):
+ """
+ Representation of one repository entity.
+ """
+
+ def __init__(self, handle):
+ self.handle = handle
+ self.service_uri = None
+ self.bpki_certificate = None
+
+ def __repr__(self):
+ s = "<%s %s" % (self.__class__.__name__, self.handle)
+ if self.service_uri:
+ s += " uri %s" % self.service_uri
+ if self.bpki_certificate:
+ s += " cert %s" % self.bpki_certificate
+ return s + ">"
+
+ def add(self, service_uri = None, bpki_certificate = None):
+ """
+ Add service URI or BPKI certificates to this repository object.
+ """
+ if service_uri is not None:
+ self.service_uri = service_uri
+ if bpki_certificate is not None:
+ self.bpki_certificate = bpki_certificate
+
+ def xml(self, e):
+ """
+ Render this repository object to XML.
+ """
+ complete = self.bpki_certificate and self.service_uri
+ if whine and not complete:
+ print "Incomplete repository entry %s" % self
+ if complete or allow_incomplete:
+ e = SubElement(e, "repository",
+ handle = self.handle,
+ service_uri = self.service_uri)
+ e.tail = "\n"
+ if self.bpki_certificate:
+ PEMElement(e, "bpki_certificate", self.bpki_certificate)
+
+class repositories(dict):
+ """
+ Database of repository objects.
+ """
+
+ def add(self, handle,
+ service_uri = None,
+ bpki_certificate = None):
+ """
+ Add service URI or certificate to repository object, creating it if necessary.
+ """
+ if handle not in self:
+ self[handle] = repository(handle)
+ self[handle].add(service_uri = service_uri,
+ bpki_certificate = bpki_certificate)
+
+ def xml(self, e):
+ for c in self.itervalues():
+ c.xml(e)
+
+ @classmethod
+ def from_csv(cls, fxcert, entitydb):
+ """
+ Parse repository data from entitydb.
+ """
+ self = cls()
+ for f in entitydb.iterate("repositories", "*.xml"):
+ h = os.path.splitext(os.path.split(f)[-1])[0]
+ r = etree_read(f)
+ assert r.get("type") == "confirmed"
+ self.add(handle = h,
+ service_uri = r.get("service_uri"),
+ bpki_certificate = fxcert(r.findtext("bpki_server_ta")))
+ return self
+
+class csv_reader(object):
+ """
+ Reader for tab-delimited text that's (slightly) friendlier than the
+ stock Python csv module (which isn't intended for direct use by
+ humans anyway, and neither was this package originally, but that
+ seems to be the way that it has evolved...).
+
+ Columns parameter specifies how many columns users of the reader
+ expect to see; lines with fewer columns will be padded with None
+ values.
+
+ Original API design for this class courtesy of Warren Kumari, but
+ don't blame him if you don't like what I did with his ideas.
+ """
+
+ def __init__(self, filename, columns = None, min_columns = None, comment_characters = "#;"):
+ assert columns is None or isinstance(columns, int)
+ assert min_columns is None or isinstance(min_columns, int)
+ if columns is not None and min_columns is None:
+ min_columns = columns
+ self.filename = filename
+ self.columns = columns
+ self.min_columns = min_columns
+ self.comment_characters = comment_characters
+ self.file = open(filename, "r")
+
+ def __iter__(self):
+ line_number = 0
+ for line in self.file:
+ line_number += 1
+ line = line.strip()
+ if not line or line[0] in self.comment_characters:
+ continue
+ fields = line.split()
+ if self.min_columns is not None and len(fields) < self.min_columns:
+ raise RuntimeError, "%s:%d: Not enough columns in line %r" % (self.filename, line_number, line)
+ if self.columns is not None and len(fields) > self.columns:
+ raise RuntimeError, "%s:%d: Too many columns in line %r" % (self.filename, line_number, line)
+ if self.columns is not None and len(fields) < self.columns:
+ fields += tuple(None for i in xrange(self.columns - len(fields)))
+ yield fields
+
+class csv_writer(object):
+ """
+ Writer object for tab delimited text. We just use the stock CSV
+ module in excel-tab mode for this.
+ """
+
+ def __init__(self, filename):
+ self.filename = filename
+ self.file = open(filename, "w")
+ self.writer = csv.writer(self.file, dialect = csv.get_dialect("excel-tab"))
+
+ def close(self):
+ """
+ Close this writer.
+ """
+ self.file.close()
+
+ def __getattr__(self, attr):
+ """
+ Fake inheritance from whatever object csv.writer deigns to give us.
+ """
+ return getattr(self.writer, attr)
+
+def PEMElement(e, tag, filename, **kwargs):
+ """
+ Create an XML element containing Base64 encoded data taken from a
+ PEM file.
+ """
+ lines = open(filename).readlines()
+ while lines:
+ if lines.pop(0).startswith("-----BEGIN "):
+ break
+ while lines:
+ if lines.pop(-1).startswith("-----END "):
+ break
+ if e.text is None:
+ e.text = "\n"
+ se = SubElement(e, tag, **kwargs)
+ se.text = "\n" + "".join(lines)
+ se.tail = "\n"
+ return se
+
+class CA(object):
+ """
+ Representation of one certification authority.
+ """
+
+ # Mapping of path restriction values we use to OpenSSL config file
+ # section names.
+
+ path_restriction = { 0 : "ca_x509_ext_xcert0",
+ 1 : "ca_x509_ext_xcert1" }
+
+ def __init__(self, cfg_file, dir):
+ self.cfg = cfg_file
+ self.dir = dir
+ self.cer = dir + "/ca.cer"
+ self.key = dir + "/ca.key"
+ self.req = dir + "/ca.req"
+ self.crl = dir + "/ca.crl"
+ self.index = dir + "/index"
+ self.serial = dir + "/serial"
+ self.crlnum = dir + "/crl_number"
+
+ cfg = rpki.config.parser(cfg_file, "myrpki")
+ self.openssl = cfg.get("openssl", "openssl")
+
+ self.env = { "PATH" : os.environ["PATH"],
+ "BPKI_DIRECTORY" : dir,
+ "RANDFILE" : ".OpenSSL.whines.unless.I.set.this",
+ "OPENSSL_CONF" : cfg_file }
+
+ def run_openssl(self, *cmd, **kwargs):
+ """
+ Run an OpenSSL command, suppresses stderr unless OpenSSL returns
+ failure, and returns stdout.
+ """
+ stdin = kwargs.pop("stdin", None)
+ env = self.env.copy()
+ env.update(kwargs)
+ cmd = (self.openssl,) + cmd
+ p = subprocess.Popen(cmd, env = env, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
+ stdout, stderr = p.communicate(stdin)
+ if p.wait() != 0:
+ sys.stderr.write("OpenSSL command failed: " + stderr + "\n")
+ raise subprocess.CalledProcessError(returncode = p.returncode, cmd = cmd)
+ return stdout
+
+ def run_ca(self, *args):
+ """
+ Run OpenSSL "ca" command with common initial arguments.
+ """
+ self.run_openssl("ca", "-batch", "-config", self.cfg, *args)
+
+ def run_req(self, key_file, req_file, log_key = sys.stdout):
+ """
+ Run OpenSSL "genrsa" and "req" commands.
+ """
+ if not os.path.exists(key_file):
+ if log_key:
+ log_key.write("Generating 2048-bit RSA key %s\n" % os.path.realpath(key_file))
+ self.run_openssl("genrsa", "-out", key_file, "2048")
+ if not os.path.exists(req_file):
+ self.run_openssl("req", "-new", "-sha256", "-config", self.cfg, "-key", key_file, "-out", req_file)
+
+ def run_dgst(self, input, algorithm = "md5"):
+ """
+ Run OpenSSL "dgst" command, return cleaned-up result.
+ """
+ hash = self.run_openssl("dgst", "-" + algorithm, stdin = input)
+ #
+ # Twits just couldn't leave well enough alone, grr.
+ hash = "".join(hash.split())
+ if hash.startswith("(stdin)="):
+ hash = hash[len("(stdin)="):]
+ return hash
+
+ @staticmethod
+ def touch_file(filename, content = None):
+ """
+ Create dumb little text files expected by OpenSSL "ca" utility.
+ """
+ if not os.path.exists(filename):
+ f = open(filename, "w")
+ if content is not None:
+ f.write(content)
+ f.close()
+
+ def setup(self, ca_name):
+ """
+ Set up this CA. ca_name is an X.509 distinguished name in
+ /tag=val/tag=val format.
+ """
+
+ modified = False
+
+ if not os.path.exists(self.dir):
+ os.makedirs(self.dir)
+ self.touch_file(self.index)
+ self.touch_file(self.serial, "01\n")
+ self.touch_file(self.crlnum, "01\n")
+
+ self.run_req(key_file = self.key, req_file = self.req)
+
+ if not os.path.exists(self.cer):
+ modified = True
+ self.run_ca("-selfsign", "-extensions", "ca_x509_ext_ca", "-subj", ca_name, "-in", self.req, "-out", self.cer)
+
+ if not os.path.exists(self.crl):
+ modified = True
+ self.run_ca("-gencrl", "-out", self.crl)
+
+ return modified
+
+ def ee(self, ee_name, base_name):
+ """
+ Issue an end-enity certificate.
+ """
+ key_file = "%s/%s.key" % (self.dir, base_name)
+ req_file = "%s/%s.req" % (self.dir, base_name)
+ cer_file = "%s/%s.cer" % (self.dir, base_name)
+ self.run_req(key_file = key_file, req_file = req_file)
+ if not os.path.exists(cer_file):
+ self.run_ca("-extensions", "ca_x509_ext_ee", "-subj", ee_name, "-in", req_file, "-out", cer_file)
+ return True
+ else:
+ return False
+
+ def cms_xml_sign(self, ee_name, base_name, elt):
+ """
+ Sign an XML object with CMS, return Base64 text.
+ """
+ self.ee(ee_name, base_name)
+ return base64.b64encode(self.run_openssl(
+ "cms", "-sign", "-binary", "-outform", "DER",
+ "-keyid", "-md", "sha256", "-nodetach", "-nosmimecap",
+ "-econtent_type", ".".join(str(i) for i in rpki.oids.name2oid["id-ct-xml"]),
+ "-inkey", "%s/%s.key" % (self.dir, base_name),
+ "-signer", "%s/%s.cer" % (self.dir, base_name),
+ stdin = ElementToString(etree_pre_write(elt))))
+
+ def cms_xml_verify(self, b64, ca):
+ """
+ Attempt to verify and extract XML from a Base64-encoded signed CMS
+ object. CA is the filename of a certificate that we expect to be
+ the issuer of the EE certificate bundled with the CMS, and must
+ previously have been cross-certified under our trust anchor.
+ """
+ # In theory, we should be able to use the -certfile parameter to
+ # pass in the CA certificate, but in practice, I have never gotten
+ # this to work, either with the command line tool or in the
+ # OpenSSL C API. Dunno why. Passing both TA and CA via -CAfile
+ # does work, so we do that, using a temporary file, sigh.
+ CAfile = os.path.join(self.dir, "temp.%s.pem" % os.getpid())
+ try:
+ f = open(CAfile, "w")
+ f.write(open(self.cer).read())
+ f.write(open(ca).read())
+ f.close()
+ return etree_post_read(ElementFromString(self.run_openssl(
+ "cms", "-verify", "-inform", "DER", "-CAfile", CAfile,
+ stdin = base64.b64decode(b64))))
+ finally:
+ if os.path.exists(CAfile):
+ os.unlink(CAfile)
+
+ def bsc(self, pkcs10):
+ """
+ Issue BSC certificiate, if we have a PKCS #10 request for it.
+ """
+
+ if pkcs10 is None:
+ return None, None
+
+ pkcs10 = base64.b64decode(pkcs10)
+
+ hash = self.run_dgst(pkcs10)
+
+ req_file = "%s/bsc.%s.req" % (self.dir, hash)
+ cer_file = "%s/bsc.%s.cer" % (self.dir, hash)
+
+ if not os.path.exists(cer_file):
+ self.run_openssl("req", "-inform", "DER", "-out", req_file, stdin = pkcs10)
+ self.run_ca("-extensions", "ca_x509_ext_ee", "-in", req_file, "-out", cer_file)
+
+ return req_file, cer_file
+
+ def fxcert(self, b64, filename = None, path_restriction = 0):
+ """
+ Write PEM certificate to file, then cross-certify.
+ """
+ fn = os.path.join(self.dir, filename or "temp.%s.cer" % os.getpid())
+ try:
+ self.run_openssl("x509", "-inform", "DER", "-out", fn,
+ stdin = base64.b64decode(b64))
+ return self.xcert(fn, path_restriction)
+ finally:
+ if not filename and os.path.exists(fn):
+ os.unlink(fn)
+
+ def xcert_filename(self, cert):
+ """
+ Generate filename for a cross-certification.
+
+ Extracts public key and subject name from PEM file and hash it so
+ we can use the result as a tag for cross-certifying this cert.
+ """
+
+ if cert and os.path.exists(cert):
+ return "%s/xcert.%s.cer" % (self.dir, self.run_dgst(self.run_openssl(
+ "x509", "-noout", "-pubkey", "-subject", "-in", cert)).strip())
+ else:
+ return None
+
+ def xcert(self, cert, path_restriction = 0):
+ """
+ Cross-certify a certificate represented as a PEM file, if we
+ haven't already. This only works for self-signed certs, due to
+ limitations of the OpenSSL command line tool, but that suffices
+ for our purposes.
+ """
+
+ xcert = self.xcert_filename(cert)
+ if not os.path.exists(xcert):
+ self.run_ca("-ss_cert", cert, "-out", xcert, "-extensions", self.path_restriction[path_restriction])
+ return xcert
+
+ def xcert_revoke(self, cert):
+ """
+ Revoke a cross-certification and regenerate CRL.
+ """
+
+ xcert = self.xcert_filename(cert)
+ if xcert:
+ self.run_ca("-revoke", xcert)
+ self.run_ca("-gencrl", "-out", self.crl)
+
+def etree_validate(e):
+ # This is a kludge, schema should be loaded as module or configured
+ # in .conf, but it will do as a temporary debugging hack.
+ schema = os.getenv("MYRPKI_RNG")
+ if schema:
+ try:
+ import lxml.etree
+ except ImportError:
+ return
+ try:
+ lxml.etree.RelaxNG(file = schema).assertValid(e)
+ except lxml.etree.RelaxNGParseError:
+ return
+ except lxml.etree.DocumentInvalid:
+ print lxml.etree.tostring(e, pretty_print = True)
+ raise
+
+def etree_write(e, filename, verbose = False, validate = True, msg = None):
+ """
+ Write out an etree to a file, safely.
+
+ I still miss SYSCAL(RENMWO).
+ """
+ filename = os.path.realpath(filename)
+ tempname = filename
+ if not filename.startswith("/dev/"):
+ tempname += ".tmp"
+ if verbose or msg:
+ print "Writing", filename
+ if msg:
+ print msg
+ e = etree_pre_write(e, validate)
+ ElementTree(e).write(tempname)
+ if tempname != filename:
+ os.rename(tempname, filename)
+
+def etree_pre_write(e, validate = True):
+ """
+ Do the namespace frobbing needed on write; broken out of
+ etree_write() because also needed with ElementToString().
+ """
+ e = copy.deepcopy(e)
+ e.set("version", version)
+ for i in e.getiterator():
+ if i.tag[0] != "{":
+ i.tag = namespaceQName + i.tag
+ assert i.tag.startswith(namespaceQName)
+ if validate:
+ etree_validate(e)
+ return e
+
+def etree_read(filename, verbose = False, validate = True):
+ """
+ Read an etree from a file, verifying then stripping XML namespace
+ cruft.
+ """
+ if verbose:
+ print "Reading", filename
+ e = ElementTree(file = filename).getroot()
+ return etree_post_read(e, validate)
+
+def etree_post_read(e, validate = True):
+ """
+ Do the namespace frobbing needed on read; broken out of etree_read()
+ beause also needed by ElementFromString().
+ """
+ if validate:
+ etree_validate(e)
+ for i in e.getiterator():
+ if i.tag.startswith(namespaceQName):
+ i.tag = i.tag[len(namespaceQName):]
+ else:
+ raise RuntimeError, "XML tag %r is not in namespace %r" % (i.tag, namespace)
+ return e
+
+def b64_equal(thing1, thing2):
+ """
+ Compare two Base64-encoded values for equality.
+ """
+ return "".join(thing1.split()) == "".join(thing2.split())
+
+
+
+class main(rpki.cli.Cmd):
+
+ prompt = "myrpki> "
+
+ completedefault = rpki.cli.Cmd.filename_complete
+
+ show_xml = False
+
+ def __init__(self):
+ os.environ["TZ"] = "UTC"
+ time.tzset()
+
+ rpki.log.use_syslog = False
+
+ self.cfg_file = os.getenv("MYRPKI_CONF", "myrpki.conf")
+
+ opts, argv = getopt.getopt(sys.argv[1:], "c:h?", ["config=", "help"])
+ for o, a in opts:
+ if o in ("-c", "--config"):
+ self.cfg_file = a
+ elif o in ("-h", "--help", "-?"):
+ argv = ["help"]
+
+ if not argv or argv[0] != "help":
+ rpki.log.init("myrpki")
+ self.read_config()
+
+ rpki.cli.Cmd.__init__(self, argv)
+
+
+ def help_overview(self):
+ """
+ Show program __doc__ string. Perhaps there's some clever way to
+ do this using the textwrap module, but for now something simple
+ and crude will suffice.
+ """
+ for line in __doc__.splitlines(True):
+ self.stdout.write(" " * 4 + line)
+ self.stdout.write("\n")
+
+ def entitydb_complete(self, prefix, text, line, begidx, endidx):
+ """
+ Completion helper for entitydb filenames.
+ """
+ names = []
+ for name in self.entitydb.iterate(prefix, "*.xml"):
+ name = os.path.splitext(os.path.basename(name))[0]
+ if name.startswith(text):
+ names.append(name)
+ return names
+
+ def read_config(self):
+
+ self.cfg = rpki.config.parser(self.cfg_file, "myrpki")
+
+ self.histfile = self.cfg.get("history_file", ".myrpki_history")
+ self.handle = self.cfg.get("handle")
+ self.run_rpkid = self.cfg.getboolean("run_rpkid")
+ self.run_pubd = self.cfg.getboolean("run_pubd")
+ self.run_rootd = self.cfg.getboolean("run_rootd")
+ self.entitydb = EntityDB(self.cfg)
+
+ if self.run_rootd and (not self.run_pubd or not self.run_rpkid):
+ raise RuntimeError, "Can't run rootd unless also running rpkid and pubd"
+
+ self.bpki_resources = CA(self.cfg_file, self.cfg.get("bpki_resources_directory"))
+ if self.run_rpkid or self.run_pubd or self.run_rootd:
+ self.bpki_servers = CA(self.cfg_file, self.cfg.get("bpki_servers_directory"))
+
+ self.pubd_contact_info = self.cfg.get("pubd_contact_info", "")
+
+ self.rsync_module = self.cfg.get("publication_rsync_module")
+ self.rsync_server = self.cfg.get("publication_rsync_server")
+
+
+ def do_initialize(self, arg):
+ """
+ Initialize an RPKI installation. This command reads the
+ configuration file, creates the BPKI and EntityDB directories,
+ generates the initial BPKI certificates, and creates an XML file
+ describing the resource-holding aspect of this RPKI installation.
+ """
+
+ if arg:
+ raise RuntimeError, "This command takes no arguments"
+
+ self.bpki_resources.setup(self.cfg.get("bpki_resources_ta_dn",
+ "/CN=%s BPKI Resource Trust Anchor" % self.handle))
+ if self.run_rpkid or self.run_pubd or self.run_rootd:
+ self.bpki_servers.setup(self.cfg.get("bpki_servers_ta_dn",
+ "/CN=%s BPKI Server Trust Anchor" % self.handle))
+
+ # Create entitydb directories.
+
+ for i in ("parents", "children", "repositories", "pubclients"):
+ d = self.entitydb(i)
+ if not os.path.exists(d):
+ os.makedirs(d)
+
+ if self.run_rpkid or self.run_pubd or self.run_rootd:
+
+ if self.run_rpkid:
+ self.bpki_servers.ee(self.cfg.get("bpki_rpkid_ee_dn",
+ "/CN=%s rpkid server certificate" % self.handle), "rpkid")
+ self.bpki_servers.ee(self.cfg.get("bpki_irdbd_ee_dn",
+ "/CN=%s irdbd server certificate" % self.handle), "irdbd")
+ if self.run_pubd:
+ self.bpki_servers.ee(self.cfg.get("bpki_pubd_ee_dn",
+ "/CN=%s pubd server certificate" % self.handle), "pubd")
+ if self.run_rpkid or self.run_pubd:
+ self.bpki_servers.ee(self.cfg.get("bpki_irbe_ee_dn",
+ "/CN=%s irbe client certificate" % self.handle), "irbe")
+ if self.run_rootd:
+ self.bpki_servers.ee(self.cfg.get("bpki_rootd_ee_dn",
+ "/CN=%s rootd server certificate" % self.handle), "rootd")
+
+ # Build the identity.xml file. Need to check for existing file so we don't
+ # overwrite? Worry about that later.
+
+ e = Element("identity", handle = self.handle)
+ PEMElement(e, "bpki_ta", self.bpki_resources.cer)
+ etree_write(e, self.entitydb("identity.xml"),
+ msg = None if self.run_rootd else 'This is the "identity" file you will need to send to your parent')
+
+ # If we're running rootd, construct a fake parent to go with it,
+ # and cross-certify in both directions so we can talk to rootd.
+
+ if self.run_rootd:
+
+ e = Element("parent", parent_handle = self.handle, child_handle = self.handle,
+ service_uri = "http://localhost:%s/" % self.cfg.get("rootd_server_port"),
+ valid_until = str(rpki.sundial.now() + rpki.sundial.timedelta(days = 365)))
+ PEMElement(e, "bpki_resource_ta", self.bpki_servers.cer)
+ PEMElement(e, "bpki_server_ta", self.bpki_servers.cer)
+ PEMElement(e, "bpki_child_ta", self.bpki_resources.cer)
+ SubElement(e, "repository", type = "offer")
+ etree_write(e, self.entitydb("parents", "%s.xml" % self.handle))
+
+ self.bpki_resources.xcert(self.bpki_servers.cer)
+
+ rootd_child_fn = self.cfg.get("child-bpki-cert", None, "rootd")
+ if not os.path.exists(rootd_child_fn):
+ os.link(self.bpki_servers.xcert(self.bpki_resources.cer), rootd_child_fn)
+
+ repo_file_name = self.entitydb("repositories", "%s.xml" % self.handle)
+
+ try:
+ want_offer = etree_read(repo_file_name).get("type") != "confirmed"
+ except IOError:
+ want_offer = True
+
+ if want_offer:
+ e = Element("repository", type = "offer", handle = self.handle, parent_handle = self.handle)
+ PEMElement(e, "bpki_client_ta", self.bpki_resources.cer)
+ etree_write(e, repo_file_name,
+ msg = 'This is the "repository offer" file for you to use if you want to publish in your own repository')
+
+ def do_configure_child(self, arg):
+ """
+ Configure a new child of this RPKI entity, given the child's XML
+ identity file as an input. This command extracts the child's data
+ from the XML, cross-certifies the child's resource-holding BPKI
+ certificate, and generates an XML file describing the relationship
+ between the child and this parent, including this parent's BPKI
+ data and up-down protocol service URI.
+ """
+
+ child_handle = None
+
+ opts, argv = getopt.getopt(arg.split(), "", ["child_handle="])
+ for o, a in opts:
+ if o == "--child_handle":
+ child_handle = a
+
+ if len(argv) != 1:
+ raise RuntimeError, "Need to specify filename for child.xml"
+
+ c = etree_read(argv[0])
+
+ if child_handle is None:
+ child_handle = c.get("handle")
+
+ try:
+ e = etree_read(self.cfg.get("xml_filename"))
+ service_uri_base = e.get("service_uri")
+ server_ta = e.findtext("bpki_server_ta")
+ except IOError:
+ service_uri_base = None
+ server_ta = None
+
+ if not service_uri_base and self.run_rpkid:
+ service_uri_base = "http://%s:%s/up-down/%s" % (self.cfg.get("rpkid_server_host"),
+ self.cfg.get("rpkid_server_port"),
+ self.handle)
+ if not service_uri_base or not server_ta:
+ print "Sorry, you can't set up children of a hosted config that itself has not yet been set up"
+ return
+
+ print "Child calls itself %r, we call it %r" % (c.get("handle"), child_handle)
+
+ if self.run_rpkid or self.run_pubd or self.run_rootd:
+ self.bpki_servers.fxcert(c.findtext("bpki_ta"))
+
+ e = Element("parent", parent_handle = self.handle, child_handle = child_handle,
+ service_uri = "%s/%s" % (service_uri_base, child_handle),
+ valid_until = str(rpki.sundial.now() + rpki.sundial.timedelta(days = 365)))
+
+ PEMElement(e, "bpki_resource_ta", self.bpki_resources.cer)
+ if self.run_rpkid or self.run_pubd or self.run_rootd:
+ PEMElement(e, "bpki_server_ta", self.bpki_servers.cer)
+ else:
+ assert server_ta is not None
+ SubElement(e, "bpki_server_ta").text = server_ta
+ SubElement(e, "bpki_child_ta").text = c.findtext("bpki_ta")
+
+ try:
+ repo = None
+ for f in self.entitydb.iterate("repositories", "*.xml"):
+ r = etree_read(f)
+ if r.get("type") == "confirmed":
+ if repo is not None:
+ raise RuntimeError, "Too many repositories, I don't know what to do, not giving referral"
+ repo_handle = os.path.splitext(os.path.split(f)[-1])[0]
+ repo = r
+ if repo is None:
+ raise RuntimeError, "Couldn't find any usable repositories, not giving referral"
+
+ if repo_handle == self.handle:
+ SubElement(e, "repository", type = "offer")
+ else:
+ proposed_sia_base = repo.get("sia_base") + child_handle + "/"
+ r = Element("referral", authorized_sia_base = proposed_sia_base)
+ r.text = c.findtext("bpki_ta")
+ auth = self.bpki_resources.cms_xml_sign(
+ "/CN=%s Publication Referral" % self.handle, "referral", r)
+ r = SubElement(e, "repository", type = "referral")
+ SubElement(r, "authorization", referrer = repo.get("client_handle")).text = auth
+ SubElement(r, "contact_info").text = repo.findtext("contact_info")
+
+ except RuntimeError, err:
+ print err
+
+ etree_write(e, self.entitydb("children", "%s.xml" % child_handle),
+ msg = "Send this file back to the child you just configured")
+
+
+ def do_delete_child(self, arg):
+ """
+ Delete a child of this RPKI entity.
+
+ This should check that the XML file it's deleting really is a
+ child, but doesn't, yet.
+ """
+
+ try:
+ os.unlink(self.entitydb("children", "%s.xml" % arg))
+ except OSError:
+ print "No such child \"%s\"" % arg
+
+ def complete_delete_child(self, *args):
+ return self.entitydb_complete("children", *args)
+
+
+ def do_configure_parent(self, arg):
+ """
+ Configure a new parent of this RPKI entity, given the output of
+ the parent's configure_child command as input. This command reads
+ the parent's response XML, extracts the parent's BPKI and service
+ URI information, cross-certifies the parent's BPKI data into this
+ entity's BPKI, and checks for offers or referrals of publication
+ service. If a publication offer or referral is present, we
+ generate a request-for-service message to that repository, in case
+ the user wants to avail herself of the referral or offer.
+ """
+
+ parent_handle = None
+
+ opts, argv = getopt.getopt(arg.split(), "", ["parent_handle="])
+ for o, a in opts:
+ if o == "--parent_handle":
+ parent_handle = a
+
+ if len(argv) != 1:
+ raise RuntimeError, "Need to specify filename for parent.xml on command line"
+
+ p = etree_read(argv[0])
+
+ if parent_handle is None:
+ parent_handle = p.get("parent_handle")
+
+ print "Parent calls itself %r, we call it %r" % (p.get("parent_handle"), parent_handle)
+ print "Parent calls us %r" % p.get("child_handle")
+
+ self.bpki_resources.fxcert(p.findtext("bpki_resource_ta"))
+ self.bpki_resources.fxcert(p.findtext("bpki_server_ta"))
+
+ etree_write(p, self.entitydb("parents", "%s.xml" % parent_handle))
+
+ r = p.find("repository")
+
+ if r is not None and r.get("type") in ("offer", "referral"):
+ r.set("handle", self.handle)
+ r.set("parent_handle", parent_handle)
+ PEMElement(r, "bpki_client_ta", self.bpki_resources.cer)
+ etree_write(r, self.entitydb("repositories", "%s.xml" % parent_handle),
+ msg = 'This is the "repository %s" file to send to the repository operator' % r.get("type"))
+ else:
+ print "Couldn't find repository offer or referral"
+
+
+ def do_delete_parent(self, arg):
+ """
+ Delete a parent of this RPKI entity.
+
+ This should check that the XML file it's deleting really is a
+ parent, but doesn't, yet.
+ """
+
+ try:
+ os.unlink(self.entitydb("parents", "%s.xml" % arg))
+ except OSError:
+ print "No such parent \"%s\"" % arg
+
+ def complete_delete_parent(self, *args):
+ return self.entitydb_complete("parents", *args)
+
+
+ def do_configure_publication_client(self, arg):
+ """
+ Configure publication server to know about a new client, given the
+ client's request-for-service message as input. This command reads
+ the client's request for service, cross-certifies the client's
+ BPKI data, and generates a response message containing the
+ repository's BPKI data and service URI.
+ """
+
+ sia_base = None
+
+ opts, argv = getopt.getopt(arg.split(), "", ["sia_base="])
+ for o, a in opts:
+ if o == "--sia_base":
+ sia_base = a
+
+ if len(argv) != 1:
+ raise RuntimeError, "Need to specify filename for client.xml"
+
+ client = etree_read(argv[0])
+
+ if sia_base is None:
+
+ auth = client.find("authorization")
+ if auth is not None:
+ print "Found <authorization/> element, this looks like a referral"
+ referrer = etree_read(self.entitydb("pubclients", "%s.xml" % auth.get("referrer").replace("/",".")))
+ referrer = self.bpki_servers.fxcert(referrer.findtext("bpki_client_ta"))
+ referral = self.bpki_servers.cms_xml_verify(auth.text, referrer)
+ if not b64_equal(referral.text, client.findtext("bpki_client_ta")):
+ raise RuntimeError, "Referral trust anchor does not match"
+ sia_base = referral.get("authorized_sia_base")
+
+ elif client.get("parent_handle") == self.handle:
+ print "Client claims to be our child, checking"
+ client_ta = client.findtext("bpki_client_ta")
+ assert client_ta
+ for child in self.entitydb.iterate("children", "*.xml"):
+ c = etree_read(child)
+ if b64_equal(c.findtext("bpki_child_ta"), client_ta):
+ sia_base = "rsync://%s/%s/%s/%s/" % (self.rsync_server, self.rsync_module,
+ self.handle, client.get("handle"))
+ break
+
+ # If we still haven't figured out what to do with this client, it
+ # gets a top-level tree of its own, no attempt at nesting.
+
+ if sia_base is None:
+ print "Don't know where to nest this client, defaulting to top-level"
+ sia_base = "rsync://%s/%s/%s/" % (self.rsync_server, self.rsync_module, client.get("handle"))
+
+ assert sia_base.startswith("rsync://")
+
+ client_handle = "/".join(sia_base.rstrip("/").split("/")[4:])
+
+ parent_handle = client.get("parent_handle")
+
+ print "Client calls itself %r, we call it %r" % (client.get("handle"), client_handle)
+ print "Client says its parent handle is %r" % parent_handle
+
+ self.bpki_servers.fxcert(client.findtext("bpki_client_ta"))
+
+ e = Element("repository", type = "confirmed",
+ client_handle = client_handle,
+ parent_handle = parent_handle,
+ sia_base = sia_base,
+ service_uri = "http://%s:%s/client/%s" % (self.cfg.get("pubd_server_host"),
+ self.cfg.get("pubd_server_port"),
+ client_handle))
+
+ PEMElement(e, "bpki_server_ta", self.bpki_servers.cer)
+ SubElement(e, "bpki_client_ta").text = client.findtext("bpki_client_ta")
+ SubElement(e, "contact_info").text = self.pubd_contact_info
+ etree_write(e, self.entitydb("pubclients", "%s.xml" % client_handle.replace("/", ".")),
+ msg = "Send this file back to the publication client you just configured")
+
+
+ def do_delete_publication_client(self, arg):
+ """
+ Delete a publication client of this RPKI entity.
+
+ This should check that the XML file it's deleting really is a
+ client, but doesn't, yet.
+ """
+
+ try:
+ os.unlink(self.entitydb("pubclients", "%s.xml" % arg))
+ except OSError:
+ print "No such client \"%s\"" % arg
+
+ def complete_delete_publication_client(self, *args):
+ return self.entitydb_complete("pubclients", *args)
+
+
+ def do_configure_repository(self, arg):
+ """
+ Configure a publication repository for this RPKI entity, given the
+ repository's response to our request-for-service message as input.
+ This command reads the repository's response, extracts and
+ cross-certifies the BPKI data and service URI, and links the
+ repository data with the corresponding parent data in our local
+ database.
+ """
+
+ argv = arg.split()
+
+ if len(argv) != 1:
+ raise RuntimeError, "Need to specify filename for repository.xml on command line"
+
+ r = etree_read(argv[0])
+
+ parent_handle = r.get("parent_handle")
+
+ print "Repository calls us %r" % (r.get("client_handle"))
+ print "Repository response associated with parent_handle %r" % parent_handle
+
+ etree_write(r, self.entitydb("repositories", "%s.xml" % parent_handle))
+
+
+ def do_delete_repository(self, arg):
+ """
+ Delete a repository of this RPKI entity.
+
+ This should check that the XML file it's deleting really is a
+ repository, but doesn't, yet.
+ """
+
+ try:
+ os.unlink(self.entitydb("repositories", "%s.xml" % arg))
+ except OSError:
+ print "No such repository \"%s\"" % arg
+
+ def complete_delete_repository(self, *args):
+ return self.entitydb_complete("repositories", *args)
+
+
+
+
+ def configure_resources_main(self, msg = None):
+ """
+ Main program of old myrpki.py script. This remains separate
+ because it's called from more than one place.
+ """
+
+ roa_csv_file = self.cfg.get("roa_csv")
+ prefix_csv_file = self.cfg.get("prefix_csv")
+ asn_csv_file = self.cfg.get("asn_csv")
+
+ # This probably should become an argument instead of (or in
+ # addition to a default from?) a config file option.
+ xml_filename = self.cfg.get("xml_filename")
+
+ try:
+ e = etree_read(xml_filename)
+ bsc_req, bsc_cer = self.bpki_resources.bsc(e.findtext("bpki_bsc_pkcs10"))
+ service_uri = e.get("service_uri")
+ server_ta = e.findtext("bpki_server_ta")
+ except IOError:
+ bsc_req, bsc_cer = None, None
+ service_uri = None
+ server_ta = None
+
+ e = Element("myrpki", handle = self.handle)
+
+ if service_uri:
+ e.set("service_uri", service_uri)
+
+ roa_requests.from_csv(roa_csv_file).xml(e)
+
+ children.from_csv(
+ prefix_csv_file = prefix_csv_file,
+ asn_csv_file = asn_csv_file,
+ fxcert = self.bpki_resources.fxcert,
+ entitydb = self.entitydb).xml(e)
+
+ parents.from_csv( fxcert = self.bpki_resources.fxcert, entitydb = self.entitydb).xml(e)
+ repositories.from_csv(fxcert = self.bpki_resources.fxcert, entitydb = self.entitydb).xml(e)
+
+ PEMElement(e, "bpki_ca_certificate", self.bpki_resources.cer)
+ PEMElement(e, "bpki_crl", self.bpki_resources.crl)
+
+ if bsc_cer:
+ PEMElement(e, "bpki_bsc_certificate", bsc_cer)
+
+ if bsc_req:
+ PEMElement(e, "bpki_bsc_pkcs10", bsc_req)
+
+ if server_ta:
+ SubElement(e, "bpki_server_ta").text = server_ta
+
+ etree_write(e, xml_filename, msg = msg)
+
+
+ def do_configure_resources(self, arg):
+ """
+ Read CSV files and all the descriptions of parents and children
+ that we've built up, package the result up as a single XML file to
+ be shipped to a hosting rpkid.
+ """
+
+ if arg:
+ raise RuntimeError, "Unexpected argument %r" % arg
+ self.configure_resources_main(msg = "Send this file to the rpkid operator who is hosting you")
+
+
+
+ def do_configure_daemons(self, arg):
+ """
+ Configure RPKI daemons with the data built up by the other
+ commands in this program.
+
+ The basic model here is that each entity with resources to certify
+ runs the myrpki tool, but not all of them necessarily run their
+ own RPKI engines. The entities that do run RPKI engines get data
+ from the entities they host via the XML files output by the
+ configure_resources command. Those XML files are the input to
+ this command, which uses them to do all the work of configuring
+ daemons, populating SQL databases, and so forth. A few operations
+ (eg, BSC construction) generate data which has to be shipped back
+ to the resource holder, which we do by updating the same XML file.
+
+ In essence, the XML files are a sneakernet (or email, or carrier
+ pigeon) communication channel between the resource holders and the
+ RPKI engine operators.
+
+ As a convenience, for the normal case where the RPKI engine
+ operator is itself a resource holder, this command in effect runs
+ the configure_resources command automatically to process the RPKI
+ engine operator's own resources.
+
+ Note that, due to the back and forth nature of some of these
+ operations, it may take several cycles for data structures to stablize
+ and everything to reach a steady state. This is normal.
+ """
+
+ argv = arg.split()
+
+ try:
+ import rpki.http, rpki.resource_set, rpki.relaxng, rpki.exceptions
+ import rpki.left_right, rpki.x509, rpki.async
+ if hasattr(warnings, "catch_warnings"):
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", DeprecationWarning)
+ import MySQLdb
+ else:
+ import MySQLdb
+
+ except ImportError, e:
+ print "Sorry, you appear to be missing some of the Python modules needed to run this command"
+ print "[Error: %r]" % e
+
+ def findbase64(tree, name, b64type = rpki.x509.X509):
+ x = tree.findtext(name)
+ return b64type(Base64 = x) if x else None
+
+ # We can use a single BSC for everything -- except BSC key
+ # rollovers. Drive off that bridge when we get to it.
+
+ bsc_handle = "bsc"
+
+ self.cfg.set_global_flags()
+
+ # Default values for CRL parameters are low, for testing. Not
+ # quite as low as they once were, too much expired CRL whining.
+
+ self_crl_interval = self.cfg.getint("self_crl_interval", 2 * 60 * 60)
+ self_regen_margin = self.cfg.getint("self_regen_margin", self_crl_interval / 4)
+ pubd_base = "http://%s:%s/" % (self.cfg.get("pubd_server_host"), self.cfg.get("pubd_server_port"))
+ rpkid_base = "http://%s:%s/" % (self.cfg.get("rpkid_server_host"), self.cfg.get("rpkid_server_port"))
+
+ # Wrappers to simplify calling rpkid and pubd.
+
+ call_rpkid = rpki.async.sync_wrapper(rpki.http.caller(
+ proto = rpki.left_right,
+ client_key = rpki.x509.RSA( PEM_file = self.bpki_servers.dir + "/irbe.key"),
+ client_cert = rpki.x509.X509(PEM_file = self.bpki_servers.dir + "/irbe.cer"),
+ server_ta = rpki.x509.X509(PEM_file = self.bpki_servers.cer),
+ server_cert = rpki.x509.X509(PEM_file = self.bpki_servers.dir + "/rpkid.cer"),
+ url = rpkid_base + "left-right",
+ debug = self.show_xml))
+
+ if self.run_pubd:
+
+ call_pubd = rpki.async.sync_wrapper(rpki.http.caller(
+ proto = rpki.publication,
+ client_key = rpki.x509.RSA( PEM_file = self.bpki_servers.dir + "/irbe.key"),
+ client_cert = rpki.x509.X509(PEM_file = self.bpki_servers.dir + "/irbe.cer"),
+ server_ta = rpki.x509.X509(PEM_file = self.bpki_servers.cer),
+ server_cert = rpki.x509.X509(PEM_file = self.bpki_servers.dir + "/pubd.cer"),
+ url = pubd_base + "control",
+ debug = self.show_xml))
+
+ # Make sure that pubd's BPKI CRL is up to date.
+
+ call_pubd(rpki.publication.config_elt.make_pdu(
+ action = "set",
+ bpki_crl = rpki.x509.CRL(PEM_file = self.bpki_servers.crl)))
+
+ irdbd_cfg = rpki.config.parser(self.cfg.get("irdbd_conf", self.cfg_file), "irdbd")
+
+ db = MySQLdb.connect(user = irdbd_cfg.get("sql-username"),
+ db = irdbd_cfg.get("sql-database"),
+ passwd = irdbd_cfg.get("sql-password"))
+
+ cur = db.cursor()
+
+ xmlfiles = []
+
+ # If [myrpki] section includes an "xml_filename" setting, run
+ # myrpki.py internally, as a convenience, and include its output at
+ # the head of our list of XML files to process.
+
+ my_xmlfile = self.cfg.get("xml_filename", "")
+ if my_xmlfile:
+ self.configure_resources_main()
+ xmlfiles.append(my_xmlfile)
+ else:
+ my_xmlfile = None
+
+ # Add any other XML files specified on the command line
+
+ xmlfiles.extend(argv)
+
+ for xmlfile in xmlfiles:
+
+ # Parse XML file and validate it against our scheme
+
+ tree = etree_read(xmlfile, validate = True)
+
+ handle = tree.get("handle")
+
+ # Update IRDB with parsed resource and roa-request data.
+
+ cur.execute(
+ """
+ DELETE
+ FROM roa_request_prefix
+ USING roa_request, roa_request_prefix
+ WHERE roa_request.roa_request_id = roa_request_prefix.roa_request_id AND roa_request.roa_request_handle = %s
+ """, (handle,))
+
+ cur.execute("DELETE FROM roa_request WHERE roa_request.roa_request_handle = %s", (handle,))
+
+ for x in tree.getiterator("roa_request"):
+ cur.execute("INSERT roa_request (roa_request_handle, asn) VALUES (%s, %s)", (handle, x.get("asn")))
+ roa_request_id = cur.lastrowid
+ for version, prefix_set in ((4, rpki.resource_set.roa_prefix_set_ipv4(x.get("v4"))), (6, rpki.resource_set.roa_prefix_set_ipv6(x.get("v6")))):
+ if prefix_set:
+ cur.executemany("INSERT roa_request_prefix (roa_request_id, prefix, prefixlen, max_prefixlen, version) VALUES (%s, %s, %s, %s, %s)",
+ ((roa_request_id, p.prefix, p.prefixlen, p.max_prefixlen, version) for p in prefix_set))
+
+ cur.execute(
+ """
+ DELETE
+ FROM registrant_asn
+ USING registrant, registrant_asn
+ WHERE registrant.registrant_id = registrant_asn.registrant_id AND registrant.registry_handle = %s
+ """ , (handle,))
+
+ cur.execute(
+ """
+ DELETE FROM registrant_net USING registrant, registrant_net
+ WHERE registrant.registrant_id = registrant_net.registrant_id AND registrant.registry_handle = %s
+ """ , (handle,))
+
+ cur.execute("DELETE FROM registrant WHERE registrant.registry_handle = %s" , (handle,))
+
+ for x in tree.getiterator("child"):
+ child_handle = x.get("handle")
+ asns = rpki.resource_set.resource_set_as(x.get("asns"))
+ ipv4 = rpki.resource_set.resource_set_ipv4(x.get("v4"))
+ ipv6 = rpki.resource_set.resource_set_ipv6(x.get("v6"))
+
+ cur.execute("INSERT registrant (registrant_handle, registry_handle, registrant_name, valid_until) VALUES (%s, %s, %s, %s)",
+ (child_handle, handle, child_handle, rpki.sundial.datetime.fromXMLtime(x.get("valid_until")).to_sql()))
+ child_id = cur.lastrowid
+ if asns:
+ cur.executemany("INSERT registrant_asn (start_as, end_as, registrant_id) VALUES (%s, %s, %s)",
+ ((a.min, a.max, child_id) for a in asns))
+ if ipv4:
+ cur.executemany("INSERT registrant_net (start_ip, end_ip, version, registrant_id) VALUES (%s, %s, 4, %s)",
+ ((a.min, a.max, child_id) for a in ipv4))
+ if ipv6:
+ cur.executemany("INSERT registrant_net (start_ip, end_ip, version, registrant_id) VALUES (%s, %s, 6, %s)",
+ ((a.min, a.max, child_id) for a in ipv6))
+
+ db.commit()
+
+ # Check for certificates before attempting anything else
+
+ hosted_cacert = findbase64(tree, "bpki_ca_certificate")
+ if not hosted_cacert:
+ print "Nothing else I can do without a trust anchor for the entity I'm hosting."
+ continue
+
+ rpkid_xcert = rpki.x509.X509(PEM_file = self.bpki_servers.fxcert(b64 = hosted_cacert.get_Base64(),
+ #filename = handle + ".cacert.cer",
+ path_restriction = 1))
+
+ # See what rpkid and pubd already have on file for this entity.
+
+ if self.run_pubd:
+ client_pdus = dict((x.client_handle, x)
+ for x in call_pubd(rpki.publication.client_elt.make_pdu(action = "list"))
+ if isinstance(x, rpki.publication.client_elt))
+
+ rpkid_reply = call_rpkid(
+ rpki.left_right.self_elt.make_pdu( action = "get", tag = "self", self_handle = handle),
+ rpki.left_right.bsc_elt.make_pdu( action = "list", tag = "bsc", self_handle = handle),
+ rpki.left_right.repository_elt.make_pdu(action = "list", tag = "repository", self_handle = handle),
+ rpki.left_right.parent_elt.make_pdu( action = "list", tag = "parent", self_handle = handle),
+ rpki.left_right.child_elt.make_pdu( action = "list", tag = "child", self_handle = handle))
+
+ self_pdu = rpkid_reply[0]
+ bsc_pdus = dict((x.bsc_handle, x) for x in rpkid_reply if isinstance(x, rpki.left_right.bsc_elt))
+ repository_pdus = dict((x.repository_handle, x) for x in rpkid_reply if isinstance(x, rpki.left_right.repository_elt))
+ parent_pdus = dict((x.parent_handle, x) for x in rpkid_reply if isinstance(x, rpki.left_right.parent_elt))
+ child_pdus = dict((x.child_handle, x) for x in rpkid_reply if isinstance(x, rpki.left_right.child_elt))
+
+ pubd_query = []
+ rpkid_query = []
+
+ # There should be exactly one <self/> object per hosted entity, by definition
+
+ if (isinstance(self_pdu, rpki.left_right.report_error_elt) or
+ self_pdu.crl_interval != self_crl_interval or
+ self_pdu.regen_margin != self_regen_margin or
+ self_pdu.bpki_cert != rpkid_xcert):
+ rpkid_query.append(rpki.left_right.self_elt.make_pdu(
+ action = "create" if isinstance(self_pdu, rpki.left_right.report_error_elt) else "set",
+ tag = "self",
+ self_handle = handle,
+ bpki_cert = rpkid_xcert,
+ crl_interval = self_crl_interval,
+ regen_margin = self_regen_margin))
+
+ # In general we only need one <bsc/> per <self/>. BSC objects are a
+ # little unusual in that the PKCS #10 subelement is generated by rpkid
+ # in response to generate_keypair, so there's more of a separation
+ # between create and set than with other objects.
+
+ bsc_cert = findbase64(tree, "bpki_bsc_certificate")
+ bsc_crl = findbase64(tree, "bpki_crl", rpki.x509.CRL)
+
+ bsc_pdu = bsc_pdus.pop(bsc_handle, None)
+
+ if bsc_pdu is None:
+ rpkid_query.append(rpki.left_right.bsc_elt.make_pdu(
+ action = "create",
+ tag = "bsc",
+ self_handle = handle,
+ bsc_handle = bsc_handle,
+ generate_keypair = "yes"))
+ elif bsc_pdu.signing_cert != bsc_cert or bsc_pdu.signing_cert_crl != bsc_crl:
+ rpkid_query.append(rpki.left_right.bsc_elt.make_pdu(
+ action = "set",
+ tag = "bsc",
+ self_handle = handle,
+ bsc_handle = bsc_handle,
+ signing_cert = bsc_cert,
+ signing_cert_crl = bsc_crl))
+
+ rpkid_query.extend(rpki.left_right.bsc_elt.make_pdu(
+ action = "destroy", self_handle = handle, bsc_handle = b) for b in bsc_pdus)
+
+ bsc_req = None
+
+ if bsc_pdu and bsc_pdu.pkcs10_request:
+ bsc_req = bsc_pdu.pkcs10_request
+
+ # At present we need one <repository/> per <parent/>, not because
+ # rpkid requires that, but because pubd does. pubd probably should
+ # be fixed to support a single client allowed to update multiple
+ # trees, but for the moment the easiest way forward is just to
+ # enforce a 1:1 mapping between <parent/> and <repository/> objects
+
+ for repository in tree.getiterator("repository"):
+
+ repository_handle = repository.get("handle")
+ repository_pdu = repository_pdus.pop(repository_handle, None)
+ repository_uri = repository.get("service_uri")
+ repository_cert = findbase64(repository, "bpki_certificate")
+
+ if (repository_pdu is None or
+ repository_pdu.bsc_handle != bsc_handle or
+ repository_pdu.peer_contact_uri != repository_uri or
+ repository_pdu.bpki_cert != repository_cert):
+ rpkid_query.append(rpki.left_right.repository_elt.make_pdu(
+ action = "create" if repository_pdu is None else "set",
+ tag = repository_handle,
+ self_handle = handle,
+ repository_handle = repository_handle,
+ bsc_handle = bsc_handle,
+ peer_contact_uri = repository_uri,
+ bpki_cert = repository_cert))
+
+ rpkid_query.extend(rpki.left_right.repository_elt.make_pdu(
+ action = "destroy", self_handle = handle, repository_handle = r) for r in repository_pdus)
+
+ # <parent/> setup code currently assumes 1:1 mapping between
+ # <repository/> and <parent/>, and further assumes that the handles
+ # for an associated pair are the identical (that is:
+ # parent.repository_handle == parent.parent_handle).
+
+ for parent in tree.getiterator("parent"):
+
+ parent_handle = parent.get("handle")
+ parent_pdu = parent_pdus.pop(parent_handle, None)
+ parent_uri = parent.get("service_uri")
+ parent_myhandle = parent.get("myhandle")
+ parent_sia_base = parent.get("sia_base")
+ parent_cms_cert = findbase64(parent, "bpki_cms_certificate")
+
+ if (parent_pdu is None or
+ parent_pdu.bsc_handle != bsc_handle or
+ parent_pdu.repository_handle != parent_handle or
+ parent_pdu.peer_contact_uri != parent_uri or
+ parent_pdu.sia_base != parent_sia_base or
+ parent_pdu.sender_name != parent_myhandle or
+ parent_pdu.recipient_name != parent_handle or
+ parent_pdu.bpki_cms_cert != parent_cms_cert):
+ rpkid_query.append(rpki.left_right.parent_elt.make_pdu(
+ action = "create" if parent_pdu is None else "set",
+ tag = parent_handle,
+ self_handle = handle,
+ parent_handle = parent_handle,
+ bsc_handle = bsc_handle,
+ repository_handle = parent_handle,
+ peer_contact_uri = parent_uri,
+ sia_base = parent_sia_base,
+ sender_name = parent_myhandle,
+ recipient_name = parent_handle,
+ bpki_cms_cert = parent_cms_cert))
+
+ rpkid_query.extend(rpki.left_right.parent_elt.make_pdu(
+ action = "destroy", self_handle = handle, parent_handle = p) for p in parent_pdus)
+
+ # Children are simpler than parents, because they call us, so no URL
+ # to construct and figuring out what certificate to use is their
+ # problem, not ours.
+
+ for child in tree.getiterator("child"):
+
+ child_handle = child.get("handle")
+ child_pdu = child_pdus.pop(child_handle, None)
+ child_cert = findbase64(child, "bpki_certificate")
+
+ if (child_pdu is None or
+ child_pdu.bsc_handle != bsc_handle or
+ child_pdu.bpki_cert != child_cert):
+ rpkid_query.append(rpki.left_right.child_elt.make_pdu(
+ action = "create" if child_pdu is None else "set",
+ tag = child_handle,
+ self_handle = handle,
+ child_handle = child_handle,
+ bsc_handle = bsc_handle,
+ bpki_cert = child_cert))
+
+ rpkid_query.extend(rpki.left_right.child_elt.make_pdu(
+ action = "destroy", self_handle = handle, child_handle = c) for c in child_pdus)
+
+ # Publication setup.
+
+ if self.run_pubd:
+
+ for f in self.entitydb.iterate("pubclients", "*.xml"):
+ c = etree_read(f)
+
+ client_handle = c.get("client_handle")
+ client_base_uri = c.get("sia_base")
+ client_bpki_cert = rpki.x509.X509(PEM_file = self.bpki_servers.fxcert(c.findtext("bpki_client_ta")))
+ client_pdu = client_pdus.pop(client_handle, None)
+
+ if (client_pdu is None or
+ client_pdu.base_uri != client_base_uri or
+ client_pdu.bpki_cert != client_bpki_cert):
+ pubd_query.append(rpki.publication.client_elt.make_pdu(
+ action = "create" if client_pdu is None else "set",
+ client_handle = client_handle,
+ bpki_cert = client_bpki_cert,
+ base_uri = client_base_uri))
+
+ pubd_query.extend(rpki.publication.client_elt.make_pdu(
+ action = "destroy", client_handle = p) for p in client_pdus)
+
+ # If we changed anything, ship updates off to daemons
+
+ failed = False
+
+ if rpkid_query:
+ rpkid_reply = call_rpkid(*rpkid_query)
+ bsc_pdus = dict((x.bsc_handle, x) for x in rpkid_reply if isinstance(x, rpki.left_right.bsc_elt))
+ if bsc_handle in bsc_pdus and bsc_pdus[bsc_handle].pkcs10_request:
+ bsc_req = bsc_pdus[bsc_handle].pkcs10_request
+ for r in rpkid_reply:
+ if isinstance(r, rpki.left_right.report_error_elt):
+ failed = True
+ print "rpkid reported failure:", r.error_code
+ if r.error_text:
+ print r.error_text
+
+ if failed:
+ raise RuntimeError
+
+ if pubd_query:
+ assert self.run_pubd
+ pubd_reply = call_pubd(*pubd_query)
+ for r in pubd_reply:
+ if isinstance(r, rpki.publication.report_error_elt):
+ failed = True
+ print "pubd reported failure:", r.error_code
+ if r.error_text:
+ print r.error_text
+
+ if failed:
+ raise RuntimeError
+
+ # Rewrite XML.
+
+ e = tree.find("bpki_bsc_pkcs10")
+ if e is not None:
+ tree.remove(e)
+ if bsc_req is not None:
+ SubElement(tree, "bpki_bsc_pkcs10").text = bsc_req.get_Base64()
+
+ tree.set("service_uri", rpkid_base + "up-down/" + handle)
+
+ e = tree.find("bpki_server_ta")
+ if e is not None:
+ tree.remove(e)
+ PEMElement(tree, "bpki_server_ta", self.bpki_resources.cer)
+
+ etree_write(tree, xmlfile, validate = True,
+ msg = None if xmlfile is my_xmlfile else 'Send this file back to the hosted entity ("%s")' % handle)
+
+ db.close()
+
+ # We used to run event loop again to give TLS connections a chance to shut down cleanly.
+ # Seems not to be needed (and sometimes hangs forever, which is odd) with TLS out of the picture.
+ #rpki.async.event_loop()
diff --git a/rpkid.without_tls/rpki/oids.py b/rpkid.without_tls/rpki/oids.py
new file mode 100644
index 00000000..90e486b7
--- /dev/null
+++ b/rpkid.without_tls/rpki/oids.py
@@ -0,0 +1,58 @@
+"""
+OID database.
+
+$Id$
+
+Copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+"""
+
+## @var oid2name
+# Mapping table of OIDs to conventional string names.
+
+oid2name = {
+ (1, 2, 840, 113549, 1, 1, 11) : "sha256WithRSAEncryption",
+ (1, 2, 840, 113549, 1, 1, 12) : "sha384WithRSAEncryption",
+ (1, 2, 840, 113549, 1, 1, 13) : "sha512WithRSAEncryption",
+ (1, 2, 840, 113549, 1, 7, 1) : "id-data",
+ (1, 2, 840, 113549, 1, 9, 16) : "id-smime",
+ (1, 2, 840, 113549, 1, 9, 16, 1) : "id-ct",
+ (1, 2, 840, 113549, 1, 9, 16, 1, 24) : "id-ct-routeOriginAttestation",
+ (1, 2, 840, 113549, 1, 9, 16, 1, 26) : "id-ct-rpkiManifest",
+ (1, 2, 840, 113549, 1, 9, 16, 1, 28) : "id-ct-xml",
+ (1, 3, 6, 1, 5, 5, 7, 1, 1) : "authorityInfoAccess",
+ (1, 3, 6, 1, 5, 5, 7, 1, 11) : "subjectInfoAccess",
+ (1, 3, 6, 1, 5, 5, 7, 1, 7) : "sbgp-ipAddrBlock",
+ (1, 3, 6, 1, 5, 5, 7, 1, 8) : "sbgp-autonomousSysNum",
+ (1, 3, 6, 1, 5, 5, 7, 14, 2) : "id-cp-ipAddr-asNumber",
+ (1, 3, 6, 1, 5, 5, 7, 48, 2) : "id-ad-caIssuers",
+ (1, 3, 6, 1, 5, 5, 7, 48, 5) : "id-ad-caRepository",
+ (1, 3, 6, 1, 5, 5, 7, 48, 9) : "id-ad-signedObjectRepository",
+ (1, 3, 6, 1, 5, 5, 7, 48, 10) : "id-ad-rpkiManifest",
+ (1, 3, 6, 1, 5, 5, 7, 48, 11) : "id-ad-signedObject",
+ (2, 16, 840, 1, 101, 3, 4, 2, 1) : "id-sha256",
+ (2, 5, 29, 14) : "subjectKeyIdentifier",
+ (2, 5, 29, 15) : "keyUsage",
+ (2, 5, 29, 19) : "basicConstraints",
+ (2, 5, 29, 20) : "cRLNumber",
+ (2, 5, 29, 31) : "cRLDistributionPoints",
+ (2, 5, 29, 32) : "certificatePolicies",
+ (2, 5, 29, 35) : "authorityKeyIdentifier",
+ (2, 5, 4, 3) : "commonName",
+}
+
+## @var name2oid
+# Mapping table of string names to OIDs
+
+name2oid = dict((v, k) for k, v in oid2name.items())
diff --git a/rpkid.without_tls/rpki/publication.py b/rpkid.without_tls/rpki/publication.py
new file mode 100644
index 00000000..b3f42953
--- /dev/null
+++ b/rpkid.without_tls/rpki/publication.py
@@ -0,0 +1,401 @@
+"""
+RPKI "publication" protocol.
+
+$Id$
+
+Copyright (C) 2009--2010 Internet Systems Consortium ("ISC")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+
+Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+"""
+
+import base64, os, errno
+import rpki.resource_set, rpki.x509, rpki.sql, rpki.exceptions, rpki.xml_utils
+import rpki.http, rpki.up_down, rpki.relaxng, rpki.sundial, rpki.log, rpki.roa
+
+class publication_namespace(object):
+ """
+ XML namespace parameters for publication protocol.
+ """
+
+ xmlns = "http://www.hactrn.net/uris/rpki/publication-spec/"
+ nsmap = { None : xmlns }
+
+class control_elt(rpki.xml_utils.data_elt, rpki.sql.sql_persistent, publication_namespace):
+ """
+ Virtual class for control channel objects.
+ """
+
+ def serve_dispatch(self, r_msg, cb, eb):
+ """
+ Action dispatch handler. This needs special handling because we
+ need to make sure that this PDU arrived via the control channel.
+ """
+ if self.client is not None:
+ raise rpki.exceptions.BadQuery, "Control query received on client channel"
+ rpki.xml_utils.data_elt.serve_dispatch(self, r_msg, cb, eb)
+
+class config_elt(control_elt):
+ """
+ <config/> element. This is a little weird because there should
+ never be more than one row in the SQL config table, but we have to
+ put the BPKI CRL somewhere and SQL is the least bad place available.
+
+ So we reuse a lot of the SQL machinery, but we nail config_id at 1,
+ we don't expose it in the XML protocol, and we only support the get
+ and set actions.
+ """
+
+ attributes = ("action", "tag")
+ element_name = "config"
+ elements = ("bpki_crl",)
+
+ sql_template = rpki.sql.template("config", "config_id", ("bpki_crl", rpki.x509.CRL))
+
+ wired_in_config_id = 1
+
+ def startElement(self, stack, name, attrs):
+ """
+ StartElement() handler for config object. This requires special
+ handling because of the weird way we treat config_id.
+ """
+ control_elt.startElement(self, stack, name, attrs)
+ self.config_id = self.wired_in_config_id
+
+ @classmethod
+ def fetch(cls, gctx):
+ """
+ Fetch the config object from SQL. This requires special handling
+ because of the weird way we treat config_id.
+ """
+ return cls.sql_fetch(gctx, cls.wired_in_config_id)
+
+ def serve_set(self, r_msg, cb, eb):
+ """
+ Handle a set action. This requires special handling because
+ config doesn't support the create method.
+ """
+ if self.sql_fetch(self.gctx, self.config_id) is None:
+ control_elt.serve_create(self, r_msg, cb, eb)
+ else:
+ control_elt.serve_set(self, r_msg, cb, eb)
+
+ def serve_fetch_one_maybe(self):
+ """
+ Find the config object on which a get or set method should
+ operate.
+ """
+ return self.sql_fetch(self.gctx, self.config_id)
+
+class client_elt(control_elt):
+ """
+ <client/> element.
+ """
+
+ element_name = "client"
+ attributes = ("action", "tag", "client_handle", "base_uri")
+ elements = ("bpki_cert", "bpki_glue")
+
+ sql_template = rpki.sql.template("client", "client_id", "client_handle", "base_uri", ("bpki_cert", rpki.x509.X509), ("bpki_glue", rpki.x509.X509))
+
+ base_uri = None
+ bpki_cert = None
+ bpki_glue = None
+
+ def serve_fetch_one_maybe(self):
+ """
+ Find the client object on which a get, set, or destroy method
+ should operate, or which would conflict with a create method.
+ """
+ return self.sql_fetch_where1(self.gctx, "client_handle = %s", self.client_handle)
+
+ def serve_fetch_all(self):
+ """Find client objects on which a list method should operate."""
+ return self.sql_fetch_all(self.gctx)
+
+ def check_allowed_uri(self, uri):
+ if not uri.startswith(self.base_uri):
+ raise rpki.exceptions.ForbiddenURI
+
+class publication_object_elt(rpki.xml_utils.base_elt, publication_namespace):
+ """
+ Virtual class for publishable objects. These have very similar
+ syntax, differences lie in underlying datatype and methods. XML
+ methods are a little different from the pattern used for objects
+ that support the create/set/get/list/destroy actions, but
+ publishable objects don't go in SQL either so these classes would be
+ different in any case.
+ """
+
+ attributes = ("action", "tag", "client_handle", "uri")
+ payload_type = None
+ payload = None
+
+ def endElement(self, stack, name, text):
+ """
+ Handle a publishable element element.
+ """
+ assert name == self.element_name, "Unexpected name %s, stack %s" % (name, stack)
+ if text:
+ self.payload = self.payload_type(Base64 = text)
+ stack.pop()
+
+ def toXML(self):
+ """
+ Generate XML element for publishable object.
+ """
+ elt = self.make_elt()
+ if self.payload:
+ elt.text = self.payload.get_Base64()
+ return elt
+
+ def serve_dispatch(self, r_msg, cb, eb):
+ """
+ Action dispatch handler.
+ """
+ try:
+ if self.client is None:
+ raise rpki.exceptions.BadQuery, "Client query received on control channel"
+ dispatch = { "publish" : self.serve_publish,
+ "withdraw" : self.serve_withdraw }
+ if self.action not in dispatch:
+ raise rpki.exceptions.BadQuery, "Unexpected query: action %s" % self.action
+ self.client.check_allowed_uri(self.uri)
+ dispatch[self.action]()
+ r_pdu = self.__class__()
+ r_pdu.action = self.action
+ r_pdu.tag = self.tag
+ r_pdu.uri = self.uri
+ r_msg.append(r_pdu)
+ cb()
+ except rpki.exceptions.NoObjectAtURI, e:
+ # This can happen when we're cleaning up from a prior mess, so
+ # we generate a <report_error/> PDU then carry on.
+ r_msg.append(report_error_elt.from_exception(e, self.tag))
+ cb()
+
+ def serve_publish(self):
+ """
+ Publish an object.
+ """
+ rpki.log.info("Publishing %r as %r" % (self.payload, self.uri))
+ filename = self.uri_to_filename()
+ filename_tmp = filename + ".tmp"
+ dirname = os.path.dirname(filename)
+ if not os.path.isdir(dirname):
+ os.makedirs(dirname)
+ f = open(filename_tmp, "wb")
+ f.write(self.payload.get_DER())
+ f.close()
+ os.rename(filename_tmp, filename)
+
+ def serve_withdraw(self):
+ """
+ Withdraw an object.
+ """
+ rpki.log.info("Withdrawing %r" % (self.uri,))
+ filename = self.uri_to_filename()
+ try:
+ os.remove(filename)
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ raise rpki.exceptions.NoObjectAtURI, "No object published at %r" % self.uri
+ else:
+ raise
+
+ def uri_to_filename(self):
+ """
+ Convert a URI to a local filename.
+ """
+ if not self.uri.startswith("rsync://"):
+ raise rpki.exceptions.BadURISyntax, self.uri
+ path = self.uri.split("/")[3:]
+ if not self.gctx.publication_multimodule:
+ del path[0]
+ path.insert(0, self.gctx.publication_base.rstrip("/"))
+ filename = "/".join(path)
+ if "/../" in filename or filename.endswith("/.."):
+ raise rpki.exceptions.BadURISyntax, filename
+ return filename
+
+ @classmethod
+ def make_publish(cls, uri, obj, tag = None):
+ """
+ Construct a publication PDU.
+ """
+ assert cls.payload_type is not None and type(obj) is cls.payload_type
+ return cls.make_pdu(action = "publish", uri = uri, payload = obj, tag = tag)
+
+ @classmethod
+ def make_withdraw(cls, uri, obj, tag = None):
+ """
+ Construct a withdrawal PDU.
+ """
+ assert cls.payload_type is not None and type(obj) is cls.payload_type
+ return cls.make_pdu(action = "withdraw", uri = uri, tag = tag)
+
+ def raise_if_error(self):
+ """
+ No-op, since this is not a <report_error/> PDU.
+ """
+ pass
+
+class certificate_elt(publication_object_elt):
+ """
+ <certificate/> element.
+ """
+
+ element_name = "certificate"
+ payload_type = rpki.x509.X509
+
+class crl_elt(publication_object_elt):
+ """
+ <crl/> element.
+ """
+
+ element_name = "crl"
+ payload_type = rpki.x509.CRL
+
+class manifest_elt(publication_object_elt):
+ """
+ <manifest/> element.
+ """
+
+ element_name = "manifest"
+ payload_type = rpki.x509.SignedManifest
+
+class roa_elt(publication_object_elt):
+ """
+ <roa/> element.
+ """
+
+ element_name = "roa"
+ payload_type = rpki.x509.ROA
+
+publication_object_elt.obj2elt = dict((e.payload_type, e) for e in (certificate_elt, crl_elt, manifest_elt, roa_elt))
+
+class report_error_elt(rpki.xml_utils.text_elt, publication_namespace):
+ """
+ <report_error/> element.
+ """
+
+ element_name = "report_error"
+ attributes = ("tag", "error_code")
+ text_attribute = "error_text"
+
+ error_text = None
+
+ @classmethod
+ def from_exception(cls, e, tag = None):
+ """
+ Generate a <report_error/> element from an exception.
+ """
+ self = cls()
+ self.tag = tag
+ self.error_code = e.__class__.__name__
+ self.error_text = str(e)
+ return self
+
+ def __str__(self):
+ s = ""
+ if getattr(self, "tag", None) is not None:
+ s += "[%s] " % self.tag
+ s += self.error_code
+ if getattr(self, "error_text", None) is not None:
+ s += ": " + self.error_text
+ return s
+
+ def raise_if_error(self):
+ """
+ Raise exception associated with this <report_error/> PDU.
+ """
+ t = rpki.exceptions.__dict__.get(self.error_code)
+ if isinstance(t, type) and issubclass(t, rpki.exceptions.RPKI_Exception):
+ raise t, getattr(self, "text", None)
+ else:
+ raise rpki.exceptions.BadPublicationReply, "Unexpected response from pubd: %s" % self
+
+class msg(rpki.xml_utils.msg, publication_namespace):
+ """
+ Publication PDU.
+ """
+
+ ## @var version
+ # Protocol version
+ version = 1
+
+ ## @var pdus
+ # Dispatch table of PDUs for this protocol.
+ pdus = dict((x.element_name, x)
+ for x in (config_elt, client_elt, certificate_elt, crl_elt, manifest_elt, roa_elt, report_error_elt))
+
+ def serve_top_level(self, gctx, client, cb):
+ """
+ Serve one msg PDU.
+ """
+ if not self.is_query():
+ raise rpki.exceptions.BadQuery, "Message type is not query"
+ r_msg = self.__class__.reply()
+
+ def loop(iterator, q_pdu):
+
+ def fail(e):
+ if not isinstance(e, rpki.exceptions.NotFound):
+ rpki.log.traceback()
+ r_msg.append(report_error_elt.from_exception(e, q_pdu.tag))
+ cb(r_msg)
+
+ try:
+ q_pdu.gctx = gctx
+ q_pdu.client = client
+ q_pdu.serve_dispatch(r_msg, iterator, fail)
+ except (rpki.async.ExitNow, SystemExit):
+ raise
+ except Exception, e:
+ fail(e)
+
+ def done():
+ cb(r_msg)
+
+ rpki.async.iterator(self, loop, done)
+
+class sax_handler(rpki.xml_utils.sax_handler):
+ """
+ SAX handler for publication protocol.
+ """
+
+ pdu = msg
+ name = "msg"
+ version = "1"
+
+class cms_msg(rpki.x509.XML_CMS_object):
+ """
+ Class to hold a CMS-signed publication PDU.
+ """
+
+ encoding = "us-ascii"
+ schema = rpki.relaxng.publication
+ saxify = sax_handler.saxify
diff --git a/rpkid.without_tls/rpki/relaxng.py b/rpkid.without_tls/rpki/relaxng.py
new file mode 100644
index 00000000..62c5fb41
--- /dev/null
+++ b/rpkid.without_tls/rpki/relaxng.py
@@ -0,0 +1,1773 @@
+# Automatically generated, do not edit.
+
+import lxml.etree
+
+## @var left_right
+## Parsed RelaxNG left_right schema
+left_right = lxml.etree.RelaxNG(lxml.etree.fromstring('''<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ $Id: left-right-schema.rnc 3302 2010-06-29 01:51:45Z sra $
+
+ RelaxNG Schema for RPKI left-right protocol.
+
+ libxml2 (including xmllint) only groks the XML syntax of RelaxNG, so
+ run the compact syntax through trang to get XML syntax.
+
+ Copyright (C) 2009-2010 Internet Systems Consortium ("ISC")
+
+ Permission to use, copy, modify, and distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ PERFORMANCE OF THIS SOFTWARE.
+
+ Portions copyright (C) 2007-2008 American Registry for Internet Numbers ("ARIN")
+
+ Permission to use, copy, modify, and distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+ REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+ INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ PERFORMANCE OF THIS SOFTWARE.
+-->
+<grammar ns="http://www.hactrn.net/uris/rpki/left-right-spec/" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <!-- Top level PDU -->
+ <start>
+ <element name="msg">
+ <attribute name="version">
+ <data type="positiveInteger">
+ <param name="maxInclusive">1</param>
+ </data>
+ </attribute>
+ <choice>
+ <group>
+ <attribute name="type">
+ <value>query</value>
+ </attribute>
+ <zeroOrMore>
+ <ref name="query_elt"/>
+ </zeroOrMore>
+ </group>
+ <group>
+ <attribute name="type">
+ <value>reply</value>
+ </attribute>
+ <zeroOrMore>
+ <ref name="reply_elt"/>
+ </zeroOrMore>
+ </group>
+ </choice>
+ </element>
+ </start>
+ <!-- PDUs allowed in a query -->
+ <define name="query_elt" combine="choice">
+ <ref name="self_query"/>
+ </define>
+ <define name="query_elt" combine="choice">
+ <ref name="bsc_query"/>
+ </define>
+ <define name="query_elt" combine="choice">
+ <ref name="parent_query"/>
+ </define>
+ <define name="query_elt" combine="choice">
+ <ref name="child_query"/>
+ </define>
+ <define name="query_elt" combine="choice">
+ <ref name="repository_query"/>
+ </define>
+ <define name="query_elt" combine="choice">
+ <ref name="list_roa_requests_query"/>
+ </define>
+ <define name="query_elt" combine="choice">
+ <ref name="list_resources_query"/>
+ </define>
+ <define name="query_elt" combine="choice">
+ <ref name="list_published_objects_query"/>
+ </define>
+ <define name="query_elt" combine="choice">
+ <ref name="list_received_resources_query"/>
+ </define>
+ <!-- PDUs allowed in a reply -->
+ <define name="reply_elt" combine="choice">
+ <ref name="self_reply"/>
+ </define>
+ <define name="reply_elt" combine="choice">
+ <ref name="bsc_reply"/>
+ </define>
+ <define name="reply_elt" combine="choice">
+ <ref name="parent_reply"/>
+ </define>
+ <define name="reply_elt" combine="choice">
+ <ref name="child_reply"/>
+ </define>
+ <define name="reply_elt" combine="choice">
+ <ref name="repository_reply"/>
+ </define>
+ <define name="reply_elt" combine="choice">
+ <ref name="list_resources_reply"/>
+ </define>
+ <define name="reply_elt" combine="choice">
+ <ref name="list_roa_requests_reply"/>
+ </define>
+ <define name="reply_elt" combine="choice">
+ <ref name="list_published_objects_reply"/>
+ </define>
+ <define name="reply_elt" combine="choice">
+ <ref name="list_received_resources_reply"/>
+ </define>
+ <define name="reply_elt" combine="choice">
+ <ref name="report_error_reply"/>
+ </define>
+ <!-- Tag attributes for bulk operations -->
+ <define name="tag">
+ <optional>
+ <attribute name="tag">
+ <data type="token">
+ <param name="maxLength">1024</param>
+ </data>
+ </attribute>
+ </optional>
+ </define>
+ <!--
+ Combinations of action and type attributes used in later definitions.
+ The same patterns repeat in most of the elements in this protocol.
+ -->
+ <define name="ctl_create">
+ <attribute name="action">
+ <value>create</value>
+ </attribute>
+ <ref name="tag"/>
+ </define>
+ <define name="ctl_set">
+ <attribute name="action">
+ <value>set</value>
+ </attribute>
+ <ref name="tag"/>
+ </define>
+ <define name="ctl_get">
+ <attribute name="action">
+ <value>get</value>
+ </attribute>
+ <ref name="tag"/>
+ </define>
+ <define name="ctl_list">
+ <attribute name="action">
+ <value>list</value>
+ </attribute>
+ <ref name="tag"/>
+ </define>
+ <define name="ctl_destroy">
+ <attribute name="action">
+ <value>destroy</value>
+ </attribute>
+ <ref name="tag"/>
+ </define>
+ <!-- Base64 encoded DER stuff -->
+ <define name="base64">
+ <data type="base64Binary">
+ <param name="maxLength">512000</param>
+ </data>
+ </define>
+ <!--
+ Base definition for all fields that are really just SQL primary indices
+ sql_id = xsd:nonNegativeInteger
+ -->
+ <!--
+ ...except that fields containing SQL primary indicies don't belong
+ in this protocol, so they're turninging into handles.
+ Length restriction is a MySQL implementation issue.
+ Handles are case-insensitive (because SQL is, among other reasons).
+ -->
+ <define name="object_handle">
+ <data type="string">
+ <param name="maxLength">255</param>
+ <param name="pattern">[\-_A-Za-z0-9]*</param>
+ </data>
+ </define>
+ <!-- URIs -->
+ <define name="uri">
+ <data type="anyURI">
+ <param name="maxLength">4096</param>
+ </data>
+ </define>
+ <!-- Name fields imported from up-down protocol -->
+ <define name="up_down_name">
+ <data type="token">
+ <param name="maxLength">1024</param>
+ </data>
+ </define>
+ <!-- Resource lists -->
+ <define name="asn_list">
+ <data type="string">
+ <param name="maxLength">512000</param>
+ <param name="pattern">[\-,0-9]*</param>
+ </data>
+ </define>
+ <define name="ipv4_list">
+ <data type="string">
+ <param name="maxLength">512000</param>
+ <param name="pattern">[\-,0-9/.]*</param>
+ </data>
+ </define>
+ <define name="ipv6_list">
+ <data type="string">
+ <param name="maxLength">512000</param>
+ <param name="pattern">[\-,0-9/:a-fA-F]*</param>
+ </data>
+ </define>
+ <!-- <self/> element -->
+ <define name="self_bool">
+ <optional>
+ <attribute name="rekey">
+ <value>yes</value>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="reissue">
+ <value>yes</value>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="revoke">
+ <value>yes</value>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="run_now">
+ <value>yes</value>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="publish_world_now">
+ <value>yes</value>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="revoke_forgotten">
+ <value>yes</value>
+ </attribute>
+ </optional>
+ </define>
+ <define name="self_payload">
+ <optional>
+ <attribute name="use_hsm">
+ <choice>
+ <value>yes</value>
+ <value>no</value>
+ </choice>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="crl_interval">
+ <data type="positiveInteger"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="regen_margin">
+ <data type="positiveInteger"/>
+ </attribute>
+ </optional>
+ <optional>
+ <element name="bpki_cert">
+ <ref name="base64"/>
+ </element>
+ </optional>
+ <optional>
+ <element name="bpki_glue">
+ <ref name="base64"/>
+ </element>
+ </optional>
+ </define>
+ <define name="self_handle">
+ <attribute name="self_handle">
+ <ref name="object_handle"/>
+ </attribute>
+ </define>
+ <define name="self_query" combine="choice">
+ <element name="self">
+ <ref name="ctl_create"/>
+ <ref name="self_handle"/>
+ <ref name="self_bool"/>
+ <ref name="self_payload"/>
+ </element>
+ </define>
+ <define name="self_reply" combine="choice">
+ <element name="self">
+ <ref name="ctl_create"/>
+ <ref name="self_handle"/>
+ </element>
+ </define>
+ <define name="self_query" combine="choice">
+ <element name="self">
+ <ref name="ctl_set"/>
+ <ref name="self_handle"/>
+ <ref name="self_bool"/>
+ <ref name="self_payload"/>
+ </element>
+ </define>
+ <define name="self_reply" combine="choice">
+ <element name="self">
+ <ref name="ctl_set"/>
+ <ref name="self_handle"/>
+ </element>
+ </define>
+ <define name="self_query" combine="choice">
+ <element name="self">
+ <ref name="ctl_get"/>
+ <ref name="self_handle"/>
+ </element>
+ </define>
+ <define name="self_reply" combine="choice">
+ <element name="self">
+ <ref name="ctl_get"/>
+ <ref name="self_handle"/>
+ <ref name="self_payload"/>
+ </element>
+ </define>
+ <define name="self_query" combine="choice">
+ <element name="self">
+ <ref name="ctl_list"/>
+ </element>
+ </define>
+ <define name="self_reply" combine="choice">
+ <element name="self">
+ <ref name="ctl_list"/>
+ <ref name="self_handle"/>
+ <ref name="self_payload"/>
+ </element>
+ </define>
+ <define name="self_query" combine="choice">
+ <element name="self">
+ <ref name="ctl_destroy"/>
+ <ref name="self_handle"/>
+ </element>
+ </define>
+ <define name="self_reply" combine="choice">
+ <element name="self">
+ <ref name="ctl_destroy"/>
+ <ref name="self_handle"/>
+ </element>
+ </define>
+ <!-- <bsc/> element. Key parameters hardwired for now. -->
+ <define name="bsc_bool">
+ <optional>
+ <attribute name="generate_keypair">
+ <value>yes</value>
+ </attribute>
+ <optional>
+ <attribute name="key_type">
+ <value>rsa</value>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="hash_alg">
+ <value>sha256</value>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="key_length">
+ <value>2048</value>
+ </attribute>
+ </optional>
+ </optional>
+ </define>
+ <define name="bsc_handle">
+ <attribute name="bsc_handle">
+ <ref name="object_handle"/>
+ </attribute>
+ </define>
+ <define name="bsc_payload">
+ <optional>
+ <element name="signing_cert">
+ <ref name="base64"/>
+ </element>
+ </optional>
+ <optional>
+ <element name="signing_cert_crl">
+ <ref name="base64"/>
+ </element>
+ </optional>
+ </define>
+ <define name="bsc_pkcs10">
+ <optional>
+ <element name="pkcs10_request">
+ <ref name="base64"/>
+ </element>
+ </optional>
+ </define>
+ <define name="bsc_query" combine="choice">
+ <element name="bsc">
+ <ref name="ctl_create"/>
+ <ref name="self_handle"/>
+ <ref name="bsc_handle"/>
+ <ref name="bsc_bool"/>
+ <ref name="bsc_payload"/>
+ </element>
+ </define>
+ <define name="bsc_reply" combine="choice">
+ <element name="bsc">
+ <ref name="ctl_create"/>
+ <ref name="self_handle"/>
+ <ref name="bsc_handle"/>
+ <ref name="bsc_pkcs10"/>
+ </element>
+ </define>
+ <define name="bsc_query" combine="choice">
+ <element name="bsc">
+ <ref name="ctl_set"/>
+ <ref name="self_handle"/>
+ <ref name="bsc_handle"/>
+ <ref name="bsc_bool"/>
+ <ref name="bsc_payload"/>
+ </element>
+ </define>
+ <define name="bsc_reply" combine="choice">
+ <element name="bsc">
+ <ref name="ctl_set"/>
+ <ref name="self_handle"/>
+ <ref name="bsc_handle"/>
+ <ref name="bsc_pkcs10"/>
+ </element>
+ </define>
+ <define name="bsc_query" combine="choice">
+ <element name="bsc">
+ <ref name="ctl_get"/>
+ <ref name="self_handle"/>
+ <ref name="bsc_handle"/>
+ </element>
+ </define>
+ <define name="bsc_reply" combine="choice">
+ <element name="bsc">
+ <ref name="ctl_get"/>
+ <ref name="self_handle"/>
+ <ref name="bsc_handle"/>
+ <ref name="bsc_payload"/>
+ <ref name="bsc_pkcs10"/>
+ </element>
+ </define>
+ <define name="bsc_query" combine="choice">
+ <element name="bsc">
+ <ref name="ctl_list"/>
+ <ref name="self_handle"/>
+ </element>
+ </define>
+ <define name="bsc_reply" combine="choice">
+ <element name="bsc">
+ <ref name="ctl_list"/>
+ <ref name="self_handle"/>
+ <ref name="bsc_handle"/>
+ <ref name="bsc_payload"/>
+ <ref name="bsc_pkcs10"/>
+ </element>
+ </define>
+ <define name="bsc_query" combine="choice">
+ <element name="bsc">
+ <ref name="ctl_destroy"/>
+ <ref name="self_handle"/>
+ <ref name="bsc_handle"/>
+ </element>
+ </define>
+ <define name="bsc_reply" combine="choice">
+ <element name="bsc">
+ <ref name="ctl_destroy"/>
+ <ref name="self_handle"/>
+ <ref name="bsc_handle"/>
+ </element>
+ </define>
+ <!-- <parent/> element -->
+ <define name="parent_handle">
+ <attribute name="parent_handle">
+ <ref name="object_handle"/>
+ </attribute>
+ </define>
+ <define name="parent_bool">
+ <optional>
+ <attribute name="rekey">
+ <value>yes</value>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="reissue">
+ <value>yes</value>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="revoke">
+ <value>yes</value>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="revoke_forgotten">
+ <value>yes</value>
+ </attribute>
+ </optional>
+ </define>
+ <define name="parent_payload">
+ <optional>
+ <attribute name="peer_contact_uri">
+ <ref name="uri"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="sia_base">
+ <ref name="uri"/>
+ </attribute>
+ </optional>
+ <optional>
+ <ref name="bsc_handle"/>
+ </optional>
+ <optional>
+ <ref name="repository_handle"/>
+ </optional>
+ <optional>
+ <attribute name="sender_name">
+ <ref name="up_down_name"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="recipient_name">
+ <ref name="up_down_name"/>
+ </attribute>
+ </optional>
+ <optional>
+ <element name="bpki_cms_cert">
+ <ref name="base64"/>
+ </element>
+ </optional>
+ <optional>
+ <element name="bpki_cms_glue">
+ <ref name="base64"/>
+ </element>
+ </optional>
+ </define>
+ <define name="parent_query" combine="choice">
+ <element name="parent">
+ <ref name="ctl_create"/>
+ <ref name="self_handle"/>
+ <ref name="parent_handle"/>
+ <ref name="parent_bool"/>
+ <ref name="parent_payload"/>
+ </element>
+ </define>
+ <define name="parent_reply" combine="choice">
+ <element name="parent">
+ <ref name="ctl_create"/>
+ <ref name="self_handle"/>
+ <ref name="parent_handle"/>
+ </element>
+ </define>
+ <define name="parent_query" combine="choice">
+ <element name="parent">
+ <ref name="ctl_set"/>
+ <ref name="self_handle"/>
+ <ref name="parent_handle"/>
+ <ref name="parent_bool"/>
+ <ref name="parent_payload"/>
+ </element>
+ </define>
+ <define name="parent_reply" combine="choice">
+ <element name="parent">
+ <ref name="ctl_set"/>
+ <ref name="self_handle"/>
+ <ref name="parent_handle"/>
+ </element>
+ </define>
+ <define name="parent_query" combine="choice">
+ <element name="parent">
+ <ref name="ctl_get"/>
+ <ref name="self_handle"/>
+ <ref name="parent_handle"/>
+ </element>
+ </define>
+ <define name="parent_reply" combine="choice">
+ <element name="parent">
+ <ref name="ctl_get"/>
+ <ref name="self_handle"/>
+ <ref name="parent_handle"/>
+ <ref name="parent_payload"/>
+ </element>
+ </define>
+ <define name="parent_query" combine="choice">
+ <element name="parent">
+ <ref name="ctl_list"/>
+ <ref name="self_handle"/>
+ </element>
+ </define>
+ <define name="parent_reply" combine="choice">
+ <element name="parent">
+ <ref name="ctl_list"/>
+ <ref name="self_handle"/>
+ <ref name="parent_handle"/>
+ <ref name="parent_payload"/>
+ </element>
+ </define>
+ <define name="parent_query" combine="choice">
+ <element name="parent">
+ <ref name="ctl_destroy"/>
+ <ref name="self_handle"/>
+ <ref name="parent_handle"/>
+ </element>
+ </define>
+ <define name="parent_reply" combine="choice">
+ <element name="parent">
+ <ref name="ctl_destroy"/>
+ <ref name="self_handle"/>
+ <ref name="parent_handle"/>
+ </element>
+ </define>
+ <!-- <child/> element -->
+ <define name="child_handle">
+ <attribute name="child_handle">
+ <ref name="object_handle"/>
+ </attribute>
+ </define>
+ <define name="child_bool">
+ <optional>
+ <attribute name="reissue">
+ <value>yes</value>
+ </attribute>
+ </optional>
+ </define>
+ <define name="child_payload">
+ <optional>
+ <ref name="bsc_handle"/>
+ </optional>
+ <optional>
+ <element name="bpki_cert">
+ <ref name="base64"/>
+ </element>
+ </optional>
+ <optional>
+ <element name="bpki_glue">
+ <ref name="base64"/>
+ </element>
+ </optional>
+ </define>
+ <define name="child_query" combine="choice">
+ <element name="child">
+ <ref name="ctl_create"/>
+ <ref name="self_handle"/>
+ <ref name="child_handle"/>
+ <ref name="child_bool"/>
+ <ref name="child_payload"/>
+ </element>
+ </define>
+ <define name="child_reply" combine="choice">
+ <element name="child">
+ <ref name="ctl_create"/>
+ <ref name="self_handle"/>
+ <ref name="child_handle"/>
+ </element>
+ </define>
+ <define name="child_query" combine="choice">
+ <element name="child">
+ <ref name="ctl_set"/>
+ <ref name="self_handle"/>
+ <ref name="child_handle"/>
+ <ref name="child_bool"/>
+ <ref name="child_payload"/>
+ </element>
+ </define>
+ <define name="child_reply" combine="choice">
+ <element name="child">
+ <ref name="ctl_set"/>
+ <ref name="self_handle"/>
+ <ref name="child_handle"/>
+ </element>
+ </define>
+ <define name="child_query" combine="choice">
+ <element name="child">
+ <ref name="ctl_get"/>
+ <ref name="self_handle"/>
+ <ref name="child_handle"/>
+ </element>
+ </define>
+ <define name="child_reply" combine="choice">
+ <element name="child">
+ <ref name="ctl_get"/>
+ <ref name="self_handle"/>
+ <ref name="child_handle"/>
+ <ref name="child_payload"/>
+ </element>
+ </define>
+ <define name="child_query" combine="choice">
+ <element name="child">
+ <ref name="ctl_list"/>
+ <ref name="self_handle"/>
+ </element>
+ </define>
+ <define name="child_reply" combine="choice">
+ <element name="child">
+ <ref name="ctl_list"/>
+ <ref name="self_handle"/>
+ <ref name="child_handle"/>
+ <ref name="child_payload"/>
+ </element>
+ </define>
+ <define name="child_query" combine="choice">
+ <element name="child">
+ <ref name="ctl_destroy"/>
+ <ref name="self_handle"/>
+ <ref name="child_handle"/>
+ </element>
+ </define>
+ <define name="child_reply" combine="choice">
+ <element name="child">
+ <ref name="ctl_destroy"/>
+ <ref name="self_handle"/>
+ <ref name="child_handle"/>
+ </element>
+ </define>
+ <!-- <repository/> element -->
+ <define name="repository_handle">
+ <attribute name="repository_handle">
+ <ref name="object_handle"/>
+ </attribute>
+ </define>
+ <define name="repository_payload">
+ <optional>
+ <attribute name="peer_contact_uri">
+ <ref name="uri"/>
+ </attribute>
+ </optional>
+ <optional>
+ <ref name="bsc_handle"/>
+ </optional>
+ <optional>
+ <element name="bpki_cert">
+ <ref name="base64"/>
+ </element>
+ </optional>
+ <optional>
+ <element name="bpki_glue">
+ <ref name="base64"/>
+ </element>
+ </optional>
+ </define>
+ <define name="repository_query" combine="choice">
+ <element name="repository">
+ <ref name="ctl_create"/>
+ <ref name="self_handle"/>
+ <ref name="repository_handle"/>
+ <ref name="repository_payload"/>
+ </element>
+ </define>
+ <define name="repository_reply" combine="choice">
+ <element name="repository">
+ <ref name="ctl_create"/>
+ <ref name="self_handle"/>
+ <ref name="repository_handle"/>
+ </element>
+ </define>
+ <define name="repository_query" combine="choice">
+ <element name="repository">
+ <ref name="ctl_set"/>
+ <ref name="self_handle"/>
+ <ref name="repository_handle"/>
+ <ref name="repository_payload"/>
+ </element>
+ </define>
+ <define name="repository_reply" combine="choice">
+ <element name="repository">
+ <ref name="ctl_set"/>
+ <ref name="self_handle"/>
+ <ref name="repository_handle"/>
+ </element>
+ </define>
+ <define name="repository_query" combine="choice">
+ <element name="repository">
+ <ref name="ctl_get"/>
+ <ref name="self_handle"/>
+ <ref name="repository_handle"/>
+ </element>
+ </define>
+ <define name="repository_reply" combine="choice">
+ <element name="repository">
+ <ref name="ctl_get"/>
+ <ref name="self_handle"/>
+ <ref name="repository_handle"/>
+ <ref name="repository_payload"/>
+ </element>
+ </define>
+ <define name="repository_query" combine="choice">
+ <element name="repository">
+ <ref name="ctl_list"/>
+ <ref name="self_handle"/>
+ </element>
+ </define>
+ <define name="repository_reply" combine="choice">
+ <element name="repository">
+ <ref name="ctl_list"/>
+ <ref name="self_handle"/>
+ <ref name="repository_handle"/>
+ <ref name="repository_payload"/>
+ </element>
+ </define>
+ <define name="repository_query" combine="choice">
+ <element name="repository">
+ <ref name="ctl_destroy"/>
+ <ref name="self_handle"/>
+ <ref name="repository_handle"/>
+ </element>
+ </define>
+ <define name="repository_reply" combine="choice">
+ <element name="repository">
+ <ref name="ctl_destroy"/>
+ <ref name="self_handle"/>
+ <ref name="repository_handle"/>
+ </element>
+ </define>
+ <!-- <list_resources/> element -->
+ <define name="list_resources_query">
+ <element name="list_resources">
+ <ref name="tag"/>
+ <ref name="self_handle"/>
+ <ref name="child_handle"/>
+ </element>
+ </define>
+ <define name="list_resources_reply">
+ <element name="list_resources">
+ <ref name="tag"/>
+ <ref name="self_handle"/>
+ <ref name="child_handle"/>
+ <attribute name="valid_until">
+ <data type="dateTime">
+ <param name="pattern">.*Z</param>
+ </data>
+ </attribute>
+ <optional>
+ <attribute name="asn">
+ <ref name="asn_list"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="ipv4">
+ <ref name="ipv4_list"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="ipv6">
+ <ref name="ipv6_list"/>
+ </attribute>
+ </optional>
+ </element>
+ </define>
+ <!-- <list_roa_requests/> element -->
+ <define name="list_roa_requests_query">
+ <element name="list_roa_requests">
+ <ref name="tag"/>
+ <ref name="self_handle"/>
+ </element>
+ </define>
+ <define name="list_roa_requests_reply">
+ <element name="list_roa_requests">
+ <ref name="tag"/>
+ <ref name="self_handle"/>
+ <attribute name="asn">
+ <data type="positiveInteger"/>
+ </attribute>
+ <optional>
+ <attribute name="ipv4">
+ <ref name="ipv4_list"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="ipv6">
+ <ref name="ipv6_list"/>
+ </attribute>
+ </optional>
+ </element>
+ </define>
+ <!-- <list_published_objects/> element -->
+ <define name="list_published_objects_query">
+ <element name="list_published_objects">
+ <ref name="tag"/>
+ <ref name="self_handle"/>
+ </element>
+ </define>
+ <define name="list_published_objects_reply">
+ <element name="list_published_objects">
+ <ref name="tag"/>
+ <ref name="self_handle"/>
+ <attribute name="uri">
+ <ref name="uri"/>
+ </attribute>
+ <ref name="base64"/>
+ </element>
+ </define>
+ <!-- <list_received_resources/> element -->
+ <define name="list_received_resources_query">
+ <element name="list_received_resources">
+ <ref name="tag"/>
+ <ref name="self_handle"/>
+ </element>
+ </define>
+ <define name="list_received_resources_reply">
+ <element name="list_received_resources">
+ <ref name="tag"/>
+ <ref name="self_handle"/>
+ <ref name="parent_handle"/>
+ <attribute name="notBefore">
+ <data type="dateTime">
+ <param name="pattern">.*Z</param>
+ </data>
+ </attribute>
+ <attribute name="notAfter">
+ <data type="dateTime">
+ <param name="pattern">.*Z</param>
+ </data>
+ </attribute>
+ <attribute name="uri">
+ <ref name="uri"/>
+ </attribute>
+ <attribute name="sia_uri">
+ <ref name="uri"/>
+ </attribute>
+ <attribute name="aia_uri">
+ <ref name="uri"/>
+ </attribute>
+ <optional>
+ <attribute name="asn">
+ <ref name="asn_list"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="ipv4">
+ <ref name="ipv4_list"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="ipv6">
+ <ref name="ipv6_list"/>
+ </attribute>
+ </optional>
+ </element>
+ </define>
+ <!-- <report_error/> element -->
+ <define name="error">
+ <data type="token">
+ <param name="maxLength">1024</param>
+ </data>
+ </define>
+ <define name="report_error_reply">
+ <element name="report_error">
+ <ref name="tag"/>
+ <optional>
+ <ref name="self_handle"/>
+ </optional>
+ <attribute name="error_code">
+ <ref name="error"/>
+ </attribute>
+ <optional>
+ <data type="string">
+ <param name="maxLength">512000</param>
+ </data>
+ </optional>
+ </element>
+ </define>
+</grammar>
+<!--
+ Local Variables:
+ indent-tabs-mode: nil
+ End:
+-->
+'''))
+
+## @var up_down
+## Parsed RelaxNG up_down schema
+up_down = lxml.etree.RelaxNG(lxml.etree.fromstring('''<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ $Id: up-down-schema.rnc 2839 2009-10-27 18:53:00Z sra $
+
+ RelaxNG Scheme for up-down protocol, extracted from APNIC Wiki.
+
+ libxml2 (including xmllint) only groks the XML syntax of RelaxNG, so
+ run the compact syntax through trang to get XML syntax.
+-->
+<grammar ns="http://www.apnic.net/specs/rescerts/up-down/" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <element name="message">
+ <attribute name="version">
+ <data type="positiveInteger">
+ <param name="maxInclusive">1</param>
+ </data>
+ </attribute>
+ <attribute name="sender">
+ <data type="token">
+ <param name="maxLength">1024</param>
+ </data>
+ </attribute>
+ <attribute name="recipient">
+ <data type="token">
+ <param name="maxLength">1024</param>
+ </data>
+ </attribute>
+ <ref name="payload"/>
+ </element>
+ </start>
+ <define name="payload" combine="choice">
+ <attribute name="type">
+ <value>list</value>
+ </attribute>
+ <ref name="list_request"/>
+ </define>
+ <define name="payload" combine="choice">
+ <attribute name="type">
+ <value>list_response</value>
+ </attribute>
+ <ref name="list_response"/>
+ </define>
+ <define name="payload" combine="choice">
+ <attribute name="type">
+ <value>issue</value>
+ </attribute>
+ <ref name="issue_request"/>
+ </define>
+ <define name="payload" combine="choice">
+ <attribute name="type">
+ <value>issue_response</value>
+ </attribute>
+ <ref name="issue_response"/>
+ </define>
+ <define name="payload" combine="choice">
+ <attribute name="type">
+ <value>revoke</value>
+ </attribute>
+ <ref name="revoke_request"/>
+ </define>
+ <define name="payload" combine="choice">
+ <attribute name="type">
+ <value>revoke_response</value>
+ </attribute>
+ <ref name="revoke_response"/>
+ </define>
+ <define name="payload" combine="choice">
+ <attribute name="type">
+ <value>error_response</value>
+ </attribute>
+ <ref name="error_response"/>
+ </define>
+ <define name="list_request">
+ <empty/>
+ </define>
+ <define name="list_response">
+ <zeroOrMore>
+ <ref name="class"/>
+ </zeroOrMore>
+ </define>
+ <define name="class">
+ <element name="class">
+ <attribute name="class_name">
+ <data type="token">
+ <param name="maxLength">1024</param>
+ </data>
+ </attribute>
+ <attribute name="cert_url">
+ <data type="string">
+ <param name="maxLength">4096</param>
+ </data>
+ </attribute>
+ <attribute name="resource_set_as">
+ <data type="string">
+ <param name="maxLength">512000</param>
+ <param name="pattern">[\-,0-9]*</param>
+ </data>
+ </attribute>
+ <attribute name="resource_set_ipv4">
+ <data type="string">
+ <param name="maxLength">512000</param>
+ <param name="pattern">[\-,/.0-9]*</param>
+ </data>
+ </attribute>
+ <attribute name="resource_set_ipv6">
+ <data type="string">
+ <param name="maxLength">512000</param>
+ <param name="pattern">[\-,/:0-9a-fA-F]*</param>
+ </data>
+ </attribute>
+ <optional>
+ <attribute name="resource_set_notafter">
+ <data type="dateTime">
+ <param name="pattern">.*Z</param>
+ </data>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="suggested_sia_head">
+ <data type="anyURI">
+ <param name="maxLength">1024</param>
+ <param name="pattern">rsync://.+</param>
+ </data>
+ </attribute>
+ </optional>
+ <zeroOrMore>
+ <element name="certificate">
+ <attribute name="cert_url">
+ <data type="string">
+ <param name="maxLength">4096</param>
+ </data>
+ </attribute>
+ <optional>
+ <attribute name="req_resource_set_as">
+ <data type="string">
+ <param name="maxLength">512000</param>
+ <param name="pattern">[\-,0-9]*</param>
+ </data>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="req_resource_set_ipv4">
+ <data type="string">
+ <param name="maxLength">512000</param>
+ <param name="pattern">[\-,/.0-9]*</param>
+ </data>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="req_resource_set_ipv6">
+ <data type="string">
+ <param name="maxLength">512000</param>
+ <param name="pattern">[\-,/:0-9a-fA-F]*</param>
+ </data>
+ </attribute>
+ </optional>
+ <data type="base64Binary">
+ <param name="maxLength">512000</param>
+ </data>
+ </element>
+ </zeroOrMore>
+ <element name="issuer">
+ <data type="base64Binary">
+ <param name="maxLength">512000</param>
+ </data>
+ </element>
+ </element>
+ </define>
+ <define name="issue_request">
+ <element name="request">
+ <attribute name="class_name">
+ <data type="token">
+ <param name="maxLength">1024</param>
+ </data>
+ </attribute>
+ <optional>
+ <attribute name="req_resource_set_as">
+ <data type="string">
+ <param name="maxLength">512000</param>
+ <param name="pattern">[\-,0-9]*</param>
+ </data>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="req_resource_set_ipv4">
+ <data type="string">
+ <param name="maxLength">512000</param>
+ <param name="pattern">[\-,/.0-9]*</param>
+ </data>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="req_resource_set_ipv6">
+ <data type="string">
+ <param name="maxLength">512000</param>
+ <param name="pattern">[\-,/:0-9a-fA-F]*</param>
+ </data>
+ </attribute>
+ </optional>
+ <data type="base64Binary">
+ <param name="maxLength">512000</param>
+ </data>
+ </element>
+ </define>
+ <define name="issue_response">
+ <ref name="class"/>
+ </define>
+ <define name="revoke_request">
+ <ref name="revocation"/>
+ </define>
+ <define name="revoke_response">
+ <ref name="revocation"/>
+ </define>
+ <define name="revocation">
+ <element name="key">
+ <attribute name="class_name">
+ <data type="token">
+ <param name="maxLength">1024</param>
+ </data>
+ </attribute>
+ <attribute name="ski">
+ <data type="token">
+ <param name="maxLength">1024</param>
+ </data>
+ </attribute>
+ </element>
+ </define>
+ <define name="error_response">
+ <element name="status">
+ <data type="positiveInteger">
+ <param name="maxInclusive">999999999999999</param>
+ </data>
+ </element>
+ <optional>
+ <element name="description">
+ <attribute name="xml:lang">
+ <data type="language"/>
+ </attribute>
+ <data type="string">
+ <param name="maxLength">1024</param>
+ </data>
+ </element>
+ </optional>
+ </define>
+</grammar>
+<!--
+ Local Variables:
+ indent-tabs-mode: nil
+ End:
+-->
+'''))
+
+## @var publication
+## Parsed RelaxNG publication schema
+publication = lxml.etree.RelaxNG(lxml.etree.fromstring('''<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ $Id: publication-schema.rnc 3434 2010-08-31 00:31:55Z sra $
+
+ RelaxNG Schema for RPKI publication protocol.
+
+ libxml2 (including xmllint) only groks the XML syntax of RelaxNG, so
+ run the compact syntax through trang to get XML syntax.
+
+ Copyright (C) 2009- -2010 Internet Systems Consortium ("ISC")
+
+ Permission to use, copy, modify, and distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ PERFORMANCE OF THIS SOFTWARE.
+
+ Portions copyright (C) 2007- -2008 American Registry for Internet Numbers ("ARIN")
+
+ Permission to use, copy, modify, and distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+ REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+ INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ PERFORMANCE OF THIS SOFTWARE.
+-->
+<grammar ns="http://www.hactrn.net/uris/rpki/publication-spec/" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <!-- Top level PDU -->
+ <start>
+ <element name="msg">
+ <attribute name="version">
+ <data type="positiveInteger">
+ <param name="maxInclusive">1</param>
+ </data>
+ </attribute>
+ <choice>
+ <group>
+ <attribute name="type">
+ <value>query</value>
+ </attribute>
+ <zeroOrMore>
+ <ref name="query_elt"/>
+ </zeroOrMore>
+ </group>
+ <group>
+ <attribute name="type">
+ <value>reply</value>
+ </attribute>
+ <zeroOrMore>
+ <ref name="reply_elt"/>
+ </zeroOrMore>
+ </group>
+ </choice>
+ </element>
+ </start>
+ <!-- PDUs allowed in a query -->
+ <define name="query_elt">
+ <choice>
+ <ref name="config_query"/>
+ <ref name="client_query"/>
+ <ref name="certificate_query"/>
+ <ref name="crl_query"/>
+ <ref name="manifest_query"/>
+ <ref name="roa_query"/>
+ </choice>
+ </define>
+ <!-- PDUs allowed in a reply -->
+ <define name="reply_elt">
+ <choice>
+ <ref name="config_reply"/>
+ <ref name="client_reply"/>
+ <ref name="certificate_reply"/>
+ <ref name="crl_reply"/>
+ <ref name="manifest_reply"/>
+ <ref name="roa_reply"/>
+ <ref name="report_error_reply"/>
+ </choice>
+ </define>
+ <!-- Tag attributes for bulk operations -->
+ <define name="tag">
+ <attribute name="tag">
+ <data type="token">
+ <param name="maxLength">1024</param>
+ </data>
+ </attribute>
+ </define>
+ <!--
+ Base64 encoded DER stuff
+ base64 = xsd:base64Binary { maxLength="512000" }
+
+ Sadly, it turns out that CRLs can in fact get longer than this for an active CA.
+ Remove length limit for now, think about whether to put it back later.
+ -->
+ <define name="base64">
+ <data type="base64Binary"/>
+ </define>
+ <!-- Publication URLs -->
+ <define name="uri_t">
+ <data type="anyURI">
+ <param name="maxLength">4096</param>
+ </data>
+ </define>
+ <define name="uri">
+ <attribute name="uri">
+ <ref name="uri_t"/>
+ </attribute>
+ </define>
+ <!--
+ Handles on remote objects (replaces passing raw SQL IDs). NB:
+ Unlike the up-down protocol, handles in this protocol allow "/" as a
+ hierarchy delimiter.
+ -->
+ <define name="object_handle">
+ <data type="string">
+ <param name="maxLength">255</param>
+ <param name="pattern">[\-_A-Za-z0-9/]*</param>
+ </data>
+ </define>
+ <!--
+ <config/> element (use restricted to repository operator)
+ config_handle attribute, create, list, and destroy commands omitted deliberately, see code for details
+ -->
+ <define name="config_payload">
+ <optional>
+ <element name="bpki_crl">
+ <ref name="base64"/>
+ </element>
+ </optional>
+ </define>
+ <define name="config_query" combine="choice">
+ <element name="config">
+ <attribute name="action">
+ <value>set</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="config_payload"/>
+ </element>
+ </define>
+ <define name="config_reply" combine="choice">
+ <element name="config">
+ <attribute name="action">
+ <value>set</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ </element>
+ </define>
+ <define name="config_query" combine="choice">
+ <element name="config">
+ <attribute name="action">
+ <value>get</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ </element>
+ </define>
+ <define name="config_reply" combine="choice">
+ <element name="config">
+ <attribute name="action">
+ <value>get</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="config_payload"/>
+ </element>
+ </define>
+ <!-- <client/> element (use restricted to repository operator) -->
+ <define name="client_handle">
+ <attribute name="client_handle">
+ <ref name="object_handle"/>
+ </attribute>
+ </define>
+ <define name="client_payload">
+ <optional>
+ <attribute name="base_uri">
+ <ref name="uri_t"/>
+ </attribute>
+ </optional>
+ <optional>
+ <element name="bpki_cert">
+ <ref name="base64"/>
+ </element>
+ </optional>
+ <optional>
+ <element name="bpki_glue">
+ <ref name="base64"/>
+ </element>
+ </optional>
+ </define>
+ <define name="client_query" combine="choice">
+ <element name="client">
+ <attribute name="action">
+ <value>create</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="client_handle"/>
+ <ref name="client_payload"/>
+ </element>
+ </define>
+ <define name="client_reply" combine="choice">
+ <element name="client">
+ <attribute name="action">
+ <value>create</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="client_handle"/>
+ </element>
+ </define>
+ <define name="client_query" combine="choice">
+ <element name="client">
+ <attribute name="action">
+ <value>set</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="client_handle"/>
+ <ref name="client_payload"/>
+ </element>
+ </define>
+ <define name="client_reply" combine="choice">
+ <element name="client">
+ <attribute name="action">
+ <value>set</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="client_handle"/>
+ </element>
+ </define>
+ <define name="client_query" combine="choice">
+ <element name="client">
+ <attribute name="action">
+ <value>get</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="client_handle"/>
+ </element>
+ </define>
+ <define name="client_reply" combine="choice">
+ <element name="client">
+ <attribute name="action">
+ <value>get</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="client_handle"/>
+ <ref name="client_payload"/>
+ </element>
+ </define>
+ <define name="client_query" combine="choice">
+ <element name="client">
+ <attribute name="action">
+ <value>list</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ </element>
+ </define>
+ <define name="client_reply" combine="choice">
+ <element name="client">
+ <attribute name="action">
+ <value>list</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="client_handle"/>
+ <ref name="client_payload"/>
+ </element>
+ </define>
+ <define name="client_query" combine="choice">
+ <element name="client">
+ <attribute name="action">
+ <value>destroy</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="client_handle"/>
+ </element>
+ </define>
+ <define name="client_reply" combine="choice">
+ <element name="client">
+ <attribute name="action">
+ <value>destroy</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="client_handle"/>
+ </element>
+ </define>
+ <!-- <certificate/> element -->
+ <define name="certificate_query" combine="choice">
+ <element name="certificate">
+ <attribute name="action">
+ <value>publish</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="uri"/>
+ <ref name="base64"/>
+ </element>
+ </define>
+ <define name="certificate_reply" combine="choice">
+ <element name="certificate">
+ <attribute name="action">
+ <value>publish</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="uri"/>
+ </element>
+ </define>
+ <define name="certificate_query" combine="choice">
+ <element name="certificate">
+ <attribute name="action">
+ <value>withdraw</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="uri"/>
+ </element>
+ </define>
+ <define name="certificate_reply" combine="choice">
+ <element name="certificate">
+ <attribute name="action">
+ <value>withdraw</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="uri"/>
+ </element>
+ </define>
+ <!-- <crl/> element -->
+ <define name="crl_query" combine="choice">
+ <element name="crl">
+ <attribute name="action">
+ <value>publish</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="uri"/>
+ <ref name="base64"/>
+ </element>
+ </define>
+ <define name="crl_reply" combine="choice">
+ <element name="crl">
+ <attribute name="action">
+ <value>publish</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="uri"/>
+ </element>
+ </define>
+ <define name="crl_query" combine="choice">
+ <element name="crl">
+ <attribute name="action">
+ <value>withdraw</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="uri"/>
+ </element>
+ </define>
+ <define name="crl_reply" combine="choice">
+ <element name="crl">
+ <attribute name="action">
+ <value>withdraw</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="uri"/>
+ </element>
+ </define>
+ <!-- <manifest/> element -->
+ <define name="manifest_query" combine="choice">
+ <element name="manifest">
+ <attribute name="action">
+ <value>publish</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="uri"/>
+ <ref name="base64"/>
+ </element>
+ </define>
+ <define name="manifest_reply" combine="choice">
+ <element name="manifest">
+ <attribute name="action">
+ <value>publish</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="uri"/>
+ </element>
+ </define>
+ <define name="manifest_query" combine="choice">
+ <element name="manifest">
+ <attribute name="action">
+ <value>withdraw</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="uri"/>
+ </element>
+ </define>
+ <define name="manifest_reply" combine="choice">
+ <element name="manifest">
+ <attribute name="action">
+ <value>withdraw</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="uri"/>
+ </element>
+ </define>
+ <!-- <roa/> element -->
+ <define name="roa_query" combine="choice">
+ <element name="roa">
+ <attribute name="action">
+ <value>publish</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="uri"/>
+ <ref name="base64"/>
+ </element>
+ </define>
+ <define name="roa_reply" combine="choice">
+ <element name="roa">
+ <attribute name="action">
+ <value>publish</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="uri"/>
+ </element>
+ </define>
+ <define name="roa_query" combine="choice">
+ <element name="roa">
+ <attribute name="action">
+ <value>withdraw</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="uri"/>
+ </element>
+ </define>
+ <define name="roa_reply" combine="choice">
+ <element name="roa">
+ <attribute name="action">
+ <value>withdraw</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="uri"/>
+ </element>
+ </define>
+ <!-- <report_error/> element -->
+ <define name="error">
+ <data type="token">
+ <param name="maxLength">1024</param>
+ </data>
+ </define>
+ <define name="report_error_reply">
+ <element name="report_error">
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <attribute name="error_code">
+ <ref name="error"/>
+ </attribute>
+ <optional>
+ <data type="string">
+ <param name="maxLength">512000</param>
+ </data>
+ </optional>
+ </element>
+ </define>
+</grammar>
+<!--
+ Local Variables:
+ indent-tabs-mode: nil
+ End:
+-->
+'''))
+
diff --git a/rpkid.without_tls/rpki/resource_set.py b/rpkid.without_tls/rpki/resource_set.py
new file mode 100644
index 00000000..08a577c9
--- /dev/null
+++ b/rpkid.without_tls/rpki/resource_set.py
@@ -0,0 +1,1107 @@
+"""
+Classes dealing with sets of resources.
+
+The basic mechanics of a resource set are the same for any of the
+resources we handle (ASNs, IPv4 addresses, or IPv6 addresses), so we
+can provide the same operations on any of them, even though the
+underlying details vary.
+
+We also provide some basic set operations (union, intersection, etc).
+
+$Id$
+
+Copyright (C) 2009--2010 Internet Systems Consortium ("ISC")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+
+Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+"""
+
+import re, math
+import rpki.ipaddrs, rpki.oids, rpki.exceptions
+
+## @var inherit_token
+# Token used to indicate inheritance in read and print syntax.
+
+inherit_token = "<inherit>"
+
+re_asn_range = re.compile("^([0-9]+)-([0-9]+)$")
+re_address_range = re.compile("^([0-9:.a-fA-F]+)-([0-9:.a-fA-F]+)$")
+re_prefix_with_maxlen = re.compile("^([0-9:.a-fA-F]+)/([0-9]+)-([0-9]+)$")
+re_prefix = re.compile("^([0-9:.a-fA-F]+)/([0-9]+)$")
+
+class resource_range(object):
+ """
+ Generic resource range type. Assumes underlying type is some kind
+ of integer.
+
+ This is a virtual class. You probably don't want to use this type
+ directly.
+ """
+
+ def __init__(self, min, max):
+ """
+ Initialize and sanity check a resource_range.
+ """
+ assert min.__class__ is max.__class__, "Type mismatch, %r doesn't match %r" % (min.__class__, max.__class__)
+ assert min <= max, "Mis-ordered range: %s before %s" % (min, max)
+ self.min = min
+ self.max = max
+
+ def __cmp__(self, other):
+ """
+ Compare two resource_range objects.
+ """
+ assert self.__class__ is other.__class__, "Type mismatch, comparing %r with %r" % (self.__class__, other.__class__)
+ return cmp(self.min, other.min) or cmp(self.max, other.max)
+
+class resource_range_as(resource_range):
+ """
+ Range of Autonomous System Numbers.
+
+ Denotes a single ASN by a range whose min and max values are
+ identical.
+ """
+
+ ## @var datum_type
+ # Type of underlying data (min and max).
+
+ datum_type = long
+
+ def __str__(self):
+ """
+ Convert a resource_range_as to string format.
+ """
+ if self.min == self.max:
+ return str(self.min)
+ else:
+ return str(self.min) + "-" + str(self.max)
+
+ def to_rfc3779_tuple(self):
+ """
+ Convert a resource_range_as to tuple format for RFC 3779 ASN.1 encoding.
+ """
+ if self.min == self.max:
+ return ("id", self.min)
+ else:
+ return ("range", (self.min, self.max))
+
+ @classmethod
+ def parse_str(cls, x):
+ """
+ Parse ASN resource range from text (eg, XML attributes).
+ """
+ r = re_asn_range.match(x)
+ if r:
+ return cls(long(r.group(1)), long(r.group(2)))
+ else:
+ return cls(long(x), long(x))
+
+ @classmethod
+ def from_strings(cls, a, b = None):
+ """
+ Construct ASN range from strings.
+ """
+ if b is None:
+ b = a
+ return cls(long(a), long(b))
+
+class resource_range_ip(resource_range):
+ """
+ Range of (generic) IP addresses.
+
+ Prefixes are converted to ranges on input, and ranges that can be
+ represented as prefixes are written as prefixes on output.
+
+ This is a virtual class. You probably don't want to use it
+ directly.
+ """
+
+ def prefixlen(self):
+ """
+ Determine whether a resource_range_ip can be expressed as a
+ prefix. Returns prefix length if it can, otherwise raises
+ MustBePrefix exception.
+ """
+ mask = self.min ^ self.max
+ if self.min & mask != 0:
+ raise rpki.exceptions.MustBePrefix
+ prefixlen = self.datum_type.bits
+ while mask & 1:
+ prefixlen -= 1
+ mask >>= 1
+ if mask:
+ raise rpki.exceptions.MustBePrefix
+ return prefixlen
+
+ # Backwards compatability, will go away at some point
+ _prefixlen = prefixlen
+
+ def __str__(self):
+ """
+ Convert a resource_range_ip to string format.
+ """
+ try:
+ return str(self.min) + "/" + str(self.prefixlen())
+ except rpki.exceptions.MustBePrefix:
+ return str(self.min) + "-" + str(self.max)
+
+ def to_rfc3779_tuple(self):
+ """
+ Convert a resource_range_ip to tuple format for RFC 3779 ASN.1
+ encoding.
+ """
+ try:
+ return ("addressPrefix", _long2bs(self.min, self.datum_type.bits,
+ prefixlen = self.prefixlen()))
+ except rpki.exceptions.MustBePrefix:
+ return ("addressRange", (_long2bs(self.min, self.datum_type.bits, strip = 0),
+ _long2bs(self.max, self.datum_type.bits, strip = 1)))
+
+ @classmethod
+ def parse_str(cls, x):
+ """
+ Parse IP address range or prefix from text (eg, XML attributes).
+ """
+ r = re_address_range.match(x)
+ if r:
+ return cls(cls.datum_type(r.group(1)), cls.datum_type(r.group(2)))
+ r = re_prefix.match(x)
+ if r:
+ return cls.make_prefix(cls.datum_type(r.group(1)), int(r.group(2)))
+ raise rpki.exceptions.BadIPResource, 'Bad IP resource "%s"' % (x)
+
+ @classmethod
+ def make_prefix(cls, prefix, prefixlen):
+ """
+ Construct a resource range corresponding to a prefix.
+ """
+ assert isinstance(prefix, cls.datum_type) and isinstance(prefixlen, (int, long))
+ assert prefixlen >= 0 and prefixlen <= cls.datum_type.bits, "Nonsensical prefix length: %s" % prefixlen
+ mask = (1 << (cls.datum_type.bits - prefixlen)) - 1
+ assert (prefix & mask) == 0, "Resource not in canonical form: %s/%s" % (prefix, prefixlen)
+ return cls(cls.datum_type(prefix), cls.datum_type(prefix | mask))
+
+ def chop_into_prefixes(self, result):
+ """
+ Chop up a resource_range_ip into ranges that can be represented as
+ prefixes.
+ """
+ try:
+ self.prefixlen()
+ result.append(self)
+ except rpki.exceptions.MustBePrefix:
+ min = self.min
+ max = self.max
+ while max >= min:
+ bits = int(math.log(max - min + 1, 2))
+ while True:
+ mask = ~(~0 << bits)
+ assert min + mask <= max
+ if min & mask == 0:
+ break
+ assert bits > 0
+ bits -= 1
+ result.append(self.make_prefix(min, self.datum_type.bits - bits))
+ min = self.datum_type(min + mask + 1)
+
+ @classmethod
+ def from_strings(cls, a, b = None):
+ """
+ Construct IP address range from strings.
+ """
+ if b is None:
+ b = a
+ a = rpki.ipaddrs.parse(a)
+ b = rpki.ipaddrs.parse(b)
+ if a.__class__ is not b.__class__:
+ raise TypeError
+ if cls is resource_range_ip:
+ if isinstance(a, rpki.ipaddrs.v4addr):
+ return resource_range_ipv4(a, b)
+ if isinstance(a, rpki.ipaddrs.v6addr):
+ return resource_range_ipv6(a, b)
+ elif isinstance(a, cls.datum_type):
+ return cls(a, b)
+ raise TypeError
+
+class resource_range_ipv4(resource_range_ip):
+ """
+ Range of IPv4 addresses.
+ """
+
+ ## @var datum_type
+ # Type of underlying data (min and max).
+
+ datum_type = rpki.ipaddrs.v4addr
+
+class resource_range_ipv6(resource_range_ip):
+ """
+ Range of IPv6 addresses.
+ """
+
+ ## @var datum_type
+ # Type of underlying data (min and max).
+
+ datum_type = rpki.ipaddrs.v6addr
+
+def _rsplit(rset, that):
+ """
+ Utility function to split a resource range into two resource ranges.
+ """
+ this = rset.pop(0)
+ cell_type = type(this.min)
+ assert type(this) is type(that) and type(this.max) is cell_type and \
+ type(that.min) is cell_type and type(that.max) is cell_type
+ if this.min < that.min:
+ rset.insert(0, type(this)(this.min, cell_type(that.min - 1)))
+ rset.insert(1, type(this)(that.min, this.max))
+ else:
+ assert this.max > that.max
+ rset.insert(0, type(this)(this.min, that.max))
+ rset.insert(1, type(this)(cell_type(that.max + 1), this.max))
+
+class resource_set(list):
+ """
+ Generic resource set, a list subclass containing resource ranges.
+
+ This is a virtual class. You probably don't want to use it
+ directly.
+ """
+
+ ## @var inherit
+ # Boolean indicating whether this resource_set uses RFC 3779 inheritance.
+
+ inherit = False
+
+ ## @var canonical
+ # Whether this resource_set is currently in canonical form.
+
+ canonical = False
+
+ def __init__(self, ini = None):
+ """
+ Initialize a resource_set.
+ """
+ list.__init__(self)
+ if isinstance(ini, (int, long)):
+ ini = str(ini)
+ if ini is inherit_token:
+ self.inherit = True
+ elif isinstance(ini, str) and len(ini):
+ self.extend(self.parse_str(s) for s in ini.split(","))
+ elif isinstance(ini, tuple):
+ self.parse_rfc3779_tuple(ini)
+ elif isinstance(ini, list):
+ self.extend(ini)
+ else:
+ assert ini is None or (isinstance(ini, str) and ini == ""), "Unexpected initializer: %s" % str(ini)
+ self.canonize()
+
+ def canonize(self):
+ """
+ Whack this resource_set into canonical form.
+ """
+ assert not self.inherit or not self
+ if not self.canonical:
+ self.sort()
+ for i in xrange(len(self) - 2, -1, -1):
+ if self[i].max + 1 == self[i+1].min:
+ self[i] = type(self[i])(self[i].min, self[i+1].max)
+ self.pop(i + 1)
+ if __debug__:
+ for i in xrange(0, len(self) - 1):
+ assert self[i].max < self[i+1].min, "Resource overlap: %s %s" % (self[i], self[i+1])
+ self.canonical = True
+
+ def append(self, item):
+ """
+ Wrapper around list.append() (q.v.) to reset canonical flag.
+ """
+ list.append(self, item)
+ self.canonical = False
+
+ def extend(self, item):
+ """
+ Wrapper around list.extend() (q.v.) to reset canonical flag.
+ """
+ list.extend(self, item)
+ self.canonical = False
+
+ def __str__(self):
+ """
+ Convert a resource_set to string format.
+ """
+ if self.inherit:
+ return inherit_token
+ else:
+ return ",".join(str(x) for x in self)
+
+ def _comm(self, other):
+ """
+ Like comm(1), sort of.
+
+ Returns a tuple of three resource sets: resources only in self,
+ resources only in other, and resources in both. Used (not very
+ efficiently) as the basis for most set operations on resource
+ sets.
+ """
+
+ assert not self.inherit
+ assert type(self) is type(other), "Type mismatch %r %r" % (type(self), type(other))
+ set1 = type(self)(self) # clone and whack into canonical form
+ set2 = type(other)(other) # ditto
+ only1, only2, both = [], [], []
+ while set1 or set2:
+ if set1 and (not set2 or set1[0].max < set2[0].min):
+ only1.append(set1.pop(0))
+ elif set2 and (not set1 or set2[0].max < set1[0].min):
+ only2.append(set2.pop(0))
+ elif set1[0].min < set2[0].min:
+ _rsplit(set1, set2[0])
+ elif set2[0].min < set1[0].min:
+ _rsplit(set2, set1[0])
+ elif set1[0].max < set2[0].max:
+ _rsplit(set2, set1[0])
+ elif set2[0].max < set1[0].max:
+ _rsplit(set1, set2[0])
+ else:
+ assert set1[0].min == set2[0].min and set1[0].max == set2[0].max
+ both.append(set1.pop(0))
+ set2.pop(0)
+ return type(self)(only1), type(self)(only2), type(self)(both)
+
+ def union(self, other):
+ """
+ Set union for resource sets.
+ """
+
+ assert not self.inherit
+ assert type(self) is type(other), "Type mismatch: %r %r" % (type(self), type(other))
+ set1 = type(self)(self) # clone and whack into canonical form
+ set2 = type(other)(other) # ditto
+ result = []
+ while set1 or set2:
+ if set1 and (not set2 or set1[0].max < set2[0].min):
+ result.append(set1.pop(0))
+ elif set2 and (not set1 or set2[0].max < set1[0].min):
+ result.append(set2.pop(0))
+ else:
+ this = set1.pop(0)
+ that = set2.pop(0)
+ assert type(this) is type(that)
+ if this.min < that.min: min = this.min
+ else: min = that.min
+ if this.max > that.max: max = this.max
+ else: max = that.max
+ result.append(type(this)(min, max))
+ while set1 and set1[0].max <= max:
+ assert set1[0].min >= min
+ del set1[0]
+ while set2 and set2[0].max <= max:
+ assert set2[0].min >= min
+ del set2[0]
+ return type(self)(result)
+
+ def intersection(self, other):
+ """
+ Set intersection for resource sets.
+ """
+ return self._comm(other)[2]
+
+ def difference(self, other):
+ """
+ Set difference for resource sets.
+ """
+ return self._comm(other)[0]
+
+ def symmetric_difference(self, other):
+ """
+ Set symmetric difference (XOR) for resource sets.
+ """
+ com = self._comm(other)
+ return com[0].union(com[1])
+
+ def contains(self, item):
+ """
+ Set membership test for resource sets.
+ """
+ assert not self.inherit
+ self.canonize()
+ if not self:
+ return False
+ if type(item) is type(self[0]):
+ min = item.min
+ max = item.max
+ else:
+ min = item
+ max = item
+ lo = 0
+ hi = len(self)
+ while lo < hi:
+ mid = (lo + hi) / 2
+ if self[mid].max < max:
+ lo = mid + 1
+ else:
+ hi = mid
+ return lo < len(self) and self[lo].min <= min and self[lo].max >= max
+
+ def issubset(self, other):
+ """
+ Test whether self is a subset (possibly improper) of other.
+ """
+ for i in self:
+ if not other.contains(i):
+ return False
+ return True
+
+ def issuperset(self, other):
+ """
+ Test whether self is a superset (possibly improper) of other.
+ """
+ return other.issubset(self)
+
+ @classmethod
+ def from_sql(cls, sql, query, args = None):
+ """
+ Create resource set from an SQL query.
+
+ sql is an object that supports execute() and fetchall() methods
+ like a DB API 2.0 cursor object.
+
+ query is an SQL query that returns a sequence of (min, max) pairs.
+ """
+
+ sql.execute(query, args)
+ return cls(ini = [cls.range_type(cls.range_type.datum_type(b),
+ cls.range_type.datum_type(e))
+ for (b, e) in sql.fetchall()])
+
+ @classmethod
+ def parse_str(cls, s):
+ """
+ Parse resource set from text string (eg, XML attributes). This is
+ a backwards compatability wrapper, real functionality is now part
+ of the range classes.
+ """
+ return cls.range_type.parse_str(s)
+
+class resource_set_as(resource_set):
+ """
+ Autonomous System Number resource set.
+ """
+
+ ## @var range_type
+ # Type of range underlying this type of resource_set.
+
+ range_type = resource_range_as
+
+ def parse_rfc3779_tuple(self, x):
+ """
+ Parse ASN resource from tuple format generated by RFC 3779 ASN.1
+ decoder.
+ """
+ if x[0] == "asIdsOrRanges":
+ for aor in x[1]:
+ if aor[0] == "range":
+ min = aor[1][0]
+ max = aor[1][1]
+ else:
+ min = aor[1]
+ max = min
+ self.append(resource_range_as(min, max))
+ else:
+ assert x[0] == "inherit"
+ self.inherit = True
+
+ def to_rfc3779_tuple(self):
+ """
+ Convert ASN resource set into tuple format used for RFC 3779 ASN.1
+ encoding.
+ """
+ self.canonize()
+ if self:
+ return ("asIdsOrRanges", tuple(a.to_rfc3779_tuple() for a in self))
+ elif self.inherit:
+ return ("inherit", "")
+ else:
+ return None
+
+class resource_set_ip(resource_set):
+ """
+ (Generic) IP address resource set.
+
+ This is a virtual class. You probably don't want to use it
+ directly.
+ """
+
+ def parse_rfc3779_tuple(self, x):
+ """
+ Parse IP address resource sets from tuple format generated by RFC
+ 3779 ASN.1 decoder.
+ """
+ if x[0] == "addressesOrRanges":
+ for aor in x[1]:
+ if aor[0] == "addressRange":
+ min = _bs2long(aor[1][0], self.range_type.datum_type.bits, 0)
+ max = _bs2long(aor[1][1], self.range_type.datum_type.bits, 1)
+ else:
+ min = _bs2long(aor[1], self.range_type.datum_type.bits, 0)
+ max = _bs2long(aor[1], self.range_type.datum_type.bits, 1)
+ self.append(self.range_type(self.range_type.datum_type(min), self.range_type.datum_type(max)))
+ else:
+ assert x[0] == "inherit"
+ self.inherit = True
+
+ def to_roa_prefix_set(self):
+ """
+ Convert from a resource set to a ROA prefix set.
+ """
+ prefix_ranges = []
+ for r in self:
+ r.chop_into_prefixes(prefix_ranges)
+ return self.roa_prefix_set_type([
+ self.roa_prefix_set_type.prefix_type(r.min, r.prefixlen())
+ for r in prefix_ranges])
+
+ def to_rfc3779_tuple(self):
+ """
+ Convert IP resource set into tuple format used by RFC 3779 ASN.1
+ encoder.
+ """
+ self.canonize()
+ if self:
+ return (self.afi, ("addressesOrRanges", tuple(a.to_rfc3779_tuple() for a in self)))
+ elif self.inherit:
+ return (self.afi, ("inherit", ""))
+ else:
+ return None
+
+class resource_set_ipv4(resource_set_ip):
+ """
+ IPv4 address resource set.
+ """
+
+ ## @var range_type
+ # Type of range underlying this type of resource_set.
+
+ range_type = resource_range_ipv4
+
+ ## @var afi
+ # Address Family Identifier value for IPv4.
+
+ afi = "\x00\x01"
+
+class resource_set_ipv6(resource_set_ip):
+ """
+ IPv6 address resource set.
+ """
+
+ ## @var range_type
+ # Type of range underlying this type of resource_set.
+
+ range_type = resource_range_ipv6
+
+ ## @var afi
+ # Address Family Identifier value for IPv6.
+
+ afi = "\x00\x02"
+
+def _bs2long(bs, addrlen, fill):
+ """
+ Utility function to convert a bitstring (POW.pkix tuple
+ representation) into a Python long.
+ """
+ x = 0L
+ for y in bs:
+ x = (x << 1) | y
+ for y in xrange(addrlen - len(bs)):
+ x = (x << 1) | fill
+ return x
+
+def _long2bs(number, addrlen, prefixlen = None, strip = None):
+ """
+ Utility function to convert a Python long into a POW.pkix tuple
+ bitstring. This is a bit complicated because it supports the
+ fiendishly compact encoding used in RFC 3779.
+ """
+ assert prefixlen is None or strip is None
+ bs = []
+ while number:
+ bs.append(int(number & 1))
+ number >>= 1
+ if addrlen > len(bs):
+ bs.extend((0 for i in xrange(addrlen - len(bs))))
+ bs.reverse()
+ if prefixlen is not None:
+ return tuple(bs[0:prefixlen])
+ if strip is not None:
+ while bs and bs[-1] == strip:
+ bs.pop()
+ return tuple(bs)
+
+class resource_bag(object):
+ """
+ Container to simplify passing around the usual triple of ASN, IPv4,
+ and IPv6 resource sets.
+ """
+
+ ## @var asn
+ # Set of Autonomous System Number resources.
+
+ ## @var v4
+ # Set of IPv4 resources.
+
+ ## @var v6
+ # Set of IPv6 resources.
+
+ ## @var valid_until
+ # Expiration date of resources, for setting certificate notAfter field.
+
+ def __init__(self, asn = None, v4 = None, v6 = None, valid_until = None):
+ self.asn = asn or resource_set_as()
+ self.v4 = v4 or resource_set_ipv4()
+ self.v6 = v6 or resource_set_ipv6()
+ self.valid_until = valid_until
+
+ def oversized(self, other):
+ """
+ True iff self is oversized with respect to other.
+ """
+ return not self.asn.issubset(other.asn) or \
+ not self.v4.issubset(other.v4) or \
+ not self.v6.issubset(other.v6)
+
+ def undersized(self, other):
+ """
+ True iff self is undersized with respect to other.
+ """
+ return not other.asn.issubset(self.asn) or \
+ not other.v4.issubset(self.v4) or \
+ not other.v6.issubset(self.v6)
+
+ @classmethod
+ def from_rfc3779_tuples(cls, exts):
+ """
+ Build a resource_bag from intermediate form generated by RFC 3779
+ ASN.1 decoder.
+ """
+ asn = None
+ v4 = None
+ v6 = None
+ for x in exts:
+ if x[0] == rpki.oids.name2oid["sbgp-autonomousSysNum"]:
+ assert len(x[2]) == 1 or x[2][1] is None, "RDI not implemented: %s" % (str(x))
+ assert asn is None
+ asn = resource_set_as(x[2][0])
+ if x[0] == rpki.oids.name2oid["sbgp-ipAddrBlock"]:
+ for fam in x[2]:
+ if fam[0] == resource_set_ipv4.afi:
+ assert v4 is None
+ v4 = resource_set_ipv4(fam[1])
+ if fam[0] == resource_set_ipv6.afi:
+ assert v6 is None
+ v6 = resource_set_ipv6(fam[1])
+ return cls(asn, v4, v6)
+
+ def empty(self):
+ """
+ True iff all resource sets in this bag are empty.
+ """
+ return not self.asn and not self.v4 and not self.v6
+
+ def __eq__(self, other):
+ return self.asn == other.asn and \
+ self.v4 == other.v4 and \
+ self.v6 == other.v6 and \
+ self.valid_until == other.valid_until
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def intersection(self, other):
+ """
+ Compute intersection with another resource_bag. valid_until
+ attribute (if any) inherits from self.
+ """
+ return self.__class__(self.asn.intersection(other.asn),
+ self.v4.intersection(other.v4),
+ self.v6.intersection(other.v6),
+ self.valid_until)
+
+ def union(self, other):
+ """
+ Compute union with another resource_bag. valid_until attribute
+ (if any) inherits from self.
+ """
+ return self.__class__(self.asn.union(other.asn),
+ self.v4.union(other.v4),
+ self.v6.union(other.v6),
+ self.valid_until)
+
+ def __str__(self):
+ s = ""
+ if self.asn:
+ s += "ASN: %s" % self.asn
+ if self.v4:
+ if s:
+ s += ", "
+ s += "V4: %s" % self.v4
+ if self.v6:
+ if s:
+ s += ", "
+ s += "V6: %s" % self.v6
+ return s
+
+# Sadly, there are enough differences between RFC 3779 and the data
+# structures in the latest proposed ROA format that we can't just use
+# the RFC 3779 code for ROAs. So we need a separate set of classes
+# that are similar in concept but different in detail, with conversion
+# functions. Such is life. I suppose it might be possible to do this
+# with multiple inheritance, but that's probably more bother than it's
+# worth.
+
+class roa_prefix(object):
+ """
+ ROA prefix. This is similar to the resource_range_ip class, but
+ differs in that it only represents prefixes, never ranges, and
+ includes the maximum prefix length as an additional value.
+
+ This is a virtual class, you probably don't want to use it directly.
+ """
+
+ ## @var prefix
+ # The prefix itself, an IP address with bits beyond the prefix
+ # length zeroed.
+
+ ## @var prefixlen
+ # (Minimum) prefix length.
+
+ ## @var max_prefixlen
+ # Maxmimum prefix length.
+
+ def __init__(self, prefix, prefixlen, max_prefixlen = None):
+ """
+ Initialize a ROA prefix. max_prefixlen is optional and defaults
+ to prefixlen. max_prefixlen must not be smaller than prefixlen.
+ """
+ if max_prefixlen is None:
+ max_prefixlen = prefixlen
+ assert max_prefixlen >= prefixlen, "Bad max_prefixlen: %d must not be shorter than %d" % (max_prefixlen, prefixlen)
+ self.prefix = prefix
+ self.prefixlen = prefixlen
+ self.max_prefixlen = max_prefixlen
+
+ def __cmp__(self, other):
+ """
+ Compare two ROA prefix objects. Comparision is based on prefix,
+ prefixlen, and max_prefixlen, in that order.
+ """
+ assert self.__class__ is other.__class__
+ return (cmp(self.prefix, other.prefix) or
+ cmp(self.prefixlen, other.prefixlen) or
+ cmp(self.max_prefixlen, other.max_prefixlen))
+
+ def __str__(self):
+ """
+ Convert a ROA prefix to string format.
+ """
+ if self.prefixlen == self.max_prefixlen:
+ return str(self.prefix) + "/" + str(self.prefixlen)
+ else:
+ return str(self.prefix) + "/" + str(self.prefixlen) + "-" + str(self.max_prefixlen)
+
+ def to_resource_range(self):
+ """
+ Convert this ROA prefix to the equivilent resource_range_ip
+ object. This is an irreversable transformation because it loses
+ the max_prefixlen attribute, nothing we can do about that.
+ """
+ return self.range_type.make_prefix(self.prefix, self.prefixlen)
+
+ def min(self):
+ """
+ Return lowest address covered by prefix.
+ """
+ return self.prefix
+
+ def max(self):
+ """
+ Return highest address covered by prefix.
+ """
+ t = self.range_type.datum_type
+ return t(self.prefix | ((1 << (t.bits - self.prefixlen)) - 1))
+
+ def to_roa_tuple(self):
+ """
+ Convert a resource_range_ip to tuple format for ROA ASN.1
+ encoding.
+ """
+ return (_long2bs(self.prefix, self.range_type.datum_type.bits, prefixlen = self.prefixlen),
+ None if self.prefixlen == self.max_prefixlen else self.max_prefixlen)
+
+ @classmethod
+ def parse_str(cls, x):
+ """
+ Parse ROA prefix from text (eg, an XML attribute).
+ """
+ r = re_prefix_with_maxlen.match(x)
+ if r:
+ return cls(cls.range_type.datum_type(r.group(1)), int(r.group(2)), int(r.group(3)))
+ r = re_prefix.match(x)
+ if r:
+ return cls(cls.range_type.datum_type(r.group(1)), int(r.group(2)))
+ raise rpki.exceptions.BadROAPrefix, 'Bad ROA prefix "%s"' % (x)
+
+class roa_prefix_ipv4(roa_prefix):
+ """
+ IPv4 ROA prefix.
+ """
+
+ ## @var range_type
+ # Type of corresponding resource_range_ip.
+
+ range_type = resource_range_ipv4
+
+class roa_prefix_ipv6(roa_prefix):
+ """
+ IPv6 ROA prefix.
+ """
+
+ ## @var range_type
+ # Type of corresponding resource_range_ip.
+
+ range_type = resource_range_ipv6
+
+class roa_prefix_set(list):
+ """
+ Set of ROA prefixes, analogous to the resource_set_ip class.
+ """
+
+ def __init__(self, ini = None):
+ """
+ Initialize a ROA prefix set.
+ """
+ list.__init__(self)
+ if isinstance(ini, str) and len(ini):
+ self.extend(self.parse_str(s) for s in ini.split(","))
+ elif isinstance(ini, (list, tuple)):
+ self.extend(ini)
+ else:
+ assert ini is None or ini == "", "Unexpected initializer: %s" % str(ini)
+ self.sort()
+
+ def __str__(self):
+ """
+ Convert a ROA prefix set to string format.
+ """
+ return ",".join(str(x) for x in self)
+
+ @classmethod
+ def parse_str(cls, s):
+ """
+ Parse ROA prefix from text (eg, an XML attribute).
+ This method is a backwards compatability shim.
+ """
+ return cls.prefix_type.parse_str(s)
+
+ def to_resource_set(self):
+ """
+ Convert a ROA prefix set to a resource set. This is an
+ irreversable transformation. We have to compute a union here
+ because ROA prefix sets can include overlaps, while RFC 3779
+ resource sets cannot. This is ugly, and there is almost certainly
+ a more efficient way to do this, but start by getting the output
+ right before worrying about making it fast or pretty.
+ """
+ r = self.resource_set_type()
+ s = self.resource_set_type()
+ s.append(None)
+ for p in self:
+ s[0] = p.to_resource_range()
+ r = r.union(s)
+ return r
+
+ @classmethod
+ def from_sql(cls, sql, query, args = None):
+ """
+ Create ROA prefix set from an SQL query.
+
+ sql is an object that supports execute() and fetchall() methods
+ like a DB API 2.0 cursor object.
+
+ query is an SQL query that returns a sequence of (prefix,
+ prefixlen, max_prefixlen) triples.
+ """
+
+ sql.execute(query, args)
+ return cls([cls.prefix_type(cls.prefix_type.range_type.datum_type(x), int(y), int(z))
+ for (x, y, z) in sql.fetchall()])
+
+ def to_roa_tuple(self):
+ """
+ Convert ROA prefix set into tuple format used by ROA ASN.1
+ encoder. This is a variation on the format used in RFC 3779.
+ """
+ if self:
+ return (self.resource_set_type.afi, tuple(a.to_roa_tuple() for a in self))
+ else:
+ return None
+
+class roa_prefix_set_ipv4(roa_prefix_set):
+ """
+ Set of IPv4 ROA prefixes.
+ """
+
+ ## @var prefix_type
+ # Type of underlying roa_prefix.
+
+ prefix_type = roa_prefix_ipv4
+
+ ## @var resource_set_type
+ # Type of corresponding resource_set_ip class.
+
+ resource_set_type = resource_set_ipv4
+
+# Fix back link from resource_set to roa_prefix
+resource_set_ipv4.roa_prefix_set_type = roa_prefix_set_ipv4
+
+class roa_prefix_set_ipv6(roa_prefix_set):
+ """
+ Set of IPv6 ROA prefixes.
+ """
+
+ ## @var prefix_type
+ # Type of underlying roa_prefix.
+
+ prefix_type = roa_prefix_ipv6
+
+ ## @var resource_set_type
+ # Type of corresponding resource_set_ip class.
+
+ resource_set_type = resource_set_ipv6
+
+# Fix back link from resource_set to roa_prefix
+resource_set_ipv6.roa_prefix_set_type = roa_prefix_set_ipv6
+
+# Test suite for set operations.
+
+if __name__ == "__main__":
+
+ def testprefix(v):
+ return " (%s)" % v.to_roa_prefix_set() if isinstance(v, resource_set_ip) else ""
+
+ def test1(t, s1, s2):
+ if isinstance(s1, str) and isinstance(s2, str):
+ print "x: ", s1
+ print "y: ", s2
+ r1 = t(s1)
+ r2 = t(s2)
+ print "x: ", r1, testprefix(r1)
+ print "y: ", r2, testprefix(r2)
+ v1 = r1._comm(r2)
+ v2 = r2._comm(r1)
+ assert v1[0] == v2[1] and v1[1] == v2[0] and v1[2] == v2[2]
+ for i in r1: assert r1.contains(i) and r1.contains(i.min) and r1.contains(i.max)
+ for i in r2: assert r2.contains(i) and r2.contains(i.min) and r2.contains(i.max)
+ for i in v1[0]: assert r1.contains(i) and not r2.contains(i)
+ for i in v1[1]: assert not r1.contains(i) and r2.contains(i)
+ for i in v1[2]: assert r1.contains(i) and r2.contains(i)
+ v1 = r1.union(r2)
+ v2 = r2.union(r1)
+ assert v1 == v2
+ print "x|y:", v1, testprefix(v1)
+ v1 = r1.difference(r2)
+ v2 = r2.difference(r1)
+ print "x-y:", v1, testprefix(v1)
+ print "y-x:", v2, testprefix(v2)
+ v1 = r1.symmetric_difference(r2)
+ v2 = r2.symmetric_difference(r1)
+ assert v1 == v2
+ print "x^y:", v1, testprefix(v1)
+ v1 = r1.intersection(r2)
+ v2 = r2.intersection(r1)
+ assert v1 == v2
+ print "x&y:", v1, testprefix(v1)
+
+ def test2(t, s1, s2):
+ print "x: ", s1
+ print "y: ", s2
+ r1 = t(s1)
+ r2 = t(s2)
+ print "x: ", r1
+ print "y: ", r2
+ print "x>y:", (r1 > r2)
+ print "x<y:", (r1 < r2)
+ test1(t.resource_set_type,
+ r1.to_resource_set(),
+ r2.to_resource_set())
+
+ def test3(t, s1, s2):
+ test1(t, s1, s2)
+ r1 = t(s1).to_roa_prefix_set()
+ r2 = t(s2).to_roa_prefix_set()
+ print "x: ", r1
+ print "y: ", r2
+ print "x>y:", (r1 > r2)
+ print "x<y:", (r1 < r2)
+ test1(t.roa_prefix_set_type.resource_set_type,
+ r1.to_resource_set(),
+ r2.to_resource_set())
+
+ print
+ print "Testing set operations on resource sets"
+ print
+ test1(resource_set_as, "1,2,3,4,5,6,11,12,13,14,15", "1,2,3,4,5,6,111,121,131,141,151")
+ print
+ test1(resource_set_ipv4, "10.0.0.44/32,10.6.0.2/32", "10.3.0.0/24,10.0.0.77/32")
+ print
+ test1(resource_set_ipv4, "10.0.0.44/32,10.6.0.2/32", "10.0.0.0/24")
+ print
+ test1(resource_set_ipv4, "10.0.0.0/24", "10.3.0.0/24,10.0.0.77/32")
+ print
+ test1(resource_set_ipv4, "10.0.0.0/24", "10.0.0.0/32,10.0.0.2/32,10.0.0.4/32")
+ print
+ print "Testing set operations on ROA prefixes"
+ print
+ test2(roa_prefix_set_ipv4, "10.0.0.44/32,10.6.0.2/32", "10.3.0.0/24,10.0.0.77/32")
+ print
+ test2(roa_prefix_set_ipv4, "10.0.0.0/24-32,10.6.0.0/24-32", "10.3.0.0/24,10.0.0.0/16-32")
+ print
+ test2(roa_prefix_set_ipv4, "10.3.0.0/24-24,10.0.0.0/16-32", "10.3.0.0/24,10.0.0.0/16-32")
+ print
+ test2(roa_prefix_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::2/128")
+ print
+ test2(roa_prefix_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::7/128")
+ print
+ test2(roa_prefix_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::/120")
+ print
+ test2(roa_prefix_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::/120-128")
+ print
+ test3(resource_set_ipv4, "10.0.0.44/32,10.6.0.2/32", "10.3.0.0/24,10.0.0.77/32")
+ print
+ test3(resource_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::2/128")
+ print
+ test3(resource_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::/120")
diff --git a/rpkid.without_tls/rpki/roa.py b/rpkid.without_tls/rpki/roa.py
new file mode 100644
index 00000000..9f7c1849
--- /dev/null
+++ b/rpkid.without_tls/rpki/roa.py
@@ -0,0 +1,76 @@
+"""
+ROA (Route Origin Authorization).
+
+At the moment this is just the ASN.1 encoder.
+
+This corresponds to draft-ietf-sidr-roa-format, which is a work in
+progress, so this may need updating later.
+
+$Id$
+
+Copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+
+draft-ietf-sidr-roa-format-03 2.1.3.2 specifies:
+
+ RouteOriginAttestation ::= SEQUENCE {
+ version [0] INTEGER DEFAULT 0,
+ asID ASID,
+ ipAddrBlocks SEQUENCE OF ROAIPAddressFamily }
+
+ ASID ::= INTEGER
+
+ ROAIPAddressFamily ::= SEQUENCE {
+ addressFamily OCTET STRING (SIZE (2..3)),
+ addresses SEQUENCE OF ROAIPAddress }
+
+ ROAIPAddress ::= SEQUENCE {
+ address IPAddress,
+ maxLength INTEGER OPTIONAL }
+
+ IPAddress ::= BIT STRING
+"""
+
+from POW._der import *
+
+class ROAIPAddress(Sequence):
+ def __init__(self, optional=0, default=''):
+ self.address = BitString()
+ self.maxLength = Integer(1)
+ contents = [ self.address, self.maxLength ]
+ Sequence.__init__(self, contents, optional, default)
+
+class ROAIPAddresses(SequenceOf):
+ def __init__(self, optional=0, default=''):
+ SequenceOf.__init__(self, ROAIPAddress, optional, default)
+
+class ROAIPAddressFamily(Sequence):
+ def __init__(self, optional=0, default=''):
+ self.addressFamily = OctetString()
+ self.addresses = ROAIPAddresses()
+ contents = [ self.addressFamily, self.addresses ]
+ Sequence.__init__(self, contents, optional, default)
+
+class ROAIPAddressFamilies(SequenceOf):
+ def __init__(self, optional=0, default=''):
+ SequenceOf.__init__(self, ROAIPAddressFamily, optional, default)
+
+class RouteOriginAttestation(Sequence):
+ def __init__(self, optional=0, default=''):
+ self.version = Integer()
+ self.explicitVersion = Explicit(CLASS_CONTEXT, FORM_CONSTRUCTED, 0, self.version, 0, 'oAMCAQA=')
+ self.asID = Integer()
+ self.ipAddrBlocks = ROAIPAddressFamilies()
+ contents = [ self.explicitVersion, self.asID, self.ipAddrBlocks ]
+ Sequence.__init__(self, contents, optional, default)
diff --git a/rpkid.without_tls/rpki/rpki_engine.py b/rpkid.without_tls/rpki/rpki_engine.py
new file mode 100644
index 00000000..f31e1df7
--- /dev/null
+++ b/rpkid.without_tls/rpki/rpki_engine.py
@@ -0,0 +1,1411 @@
+"""
+Global context for rpkid.
+
+$Id$
+
+Copyright (C) 2009--2010 Internet Systems Consortium ("ISC")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+
+Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+"""
+
+import lxml.etree, re, random
+import rpki.resource_set, rpki.up_down, rpki.left_right, rpki.x509, rpki.sql
+import rpki.http, rpki.config, rpki.exceptions, rpki.relaxng, rpki.log, rpki.async
+
+class rpkid_context(object):
+ """
+ A container for various global rpkid parameters.
+ """
+
+ def __init__(self, cfg):
+
+ self.sql = rpki.sql.session(cfg)
+
+ self.bpki_ta = rpki.x509.X509(Auto_update = cfg.get("bpki-ta"))
+ self.irdb_cert = rpki.x509.X509(Auto_update = cfg.get("irdb-cert"))
+ self.irbe_cert = rpki.x509.X509(Auto_update = cfg.get("irbe-cert"))
+ self.rpkid_cert = rpki.x509.X509(Auto_update = cfg.get("rpkid-cert"))
+ self.rpkid_key = rpki.x509.RSA( Auto_update = cfg.get("rpkid-key"))
+
+ self.irdb_url = cfg.get("irdb-url")
+
+ self.http_server_host = cfg.get("server-host", "")
+ self.http_server_port = cfg.getint("server-port", 4433)
+
+ self.publication_kludge_base = cfg.get("publication-kludge-base", "publication/")
+
+ self.use_internal_cron = cfg.getboolean("use-internal-cron", True)
+
+ self.initial_delay = random.randint(cfg.getint("initial-delay-min", 10),
+ cfg.getint("initial-delay-max", 120))
+
+ # Should be much longer in production
+ self.cron_period = rpki.sundial.timedelta(seconds = cfg.getint("cron-period", 120))
+ self.cron_keepalive = rpki.sundial.timedelta(seconds = cfg.getint("cron-keepalive", 0))
+ if not self.cron_keepalive:
+ self.cron_keepalive = self.cron_period * 4
+ self.cron_timeout = None
+
+ def start_cron(self):
+ """
+ Start clock for rpkid's internal cron process.
+ """
+
+ if self.use_internal_cron:
+ self.cron_timer = rpki.async.timer(handler = self.cron)
+ when = rpki.sundial.now() + rpki.sundial.timedelta(seconds = self.initial_delay)
+ rpki.log.debug("Scheduling initial cron pass at %s" % when)
+ self.cron_timer.set(when)
+ else:
+ rpki.log.debug("Not using internal clock, start_cron() call ignored")
+
+ def irdb_query(self, q_pdu, callback, errback, expected_pdu_count = None):
+ """
+ Perform an IRDB callback query.
+ """
+
+ rpki.log.trace()
+
+ q_msg = rpki.left_right.msg.query()
+ q_msg.append(q_pdu)
+ q_der = rpki.left_right.cms_msg().wrap(q_msg, self.rpkid_key, self.rpkid_cert)
+
+ def unwrap(r_der):
+ r_cms = rpki.left_right.cms_msg(DER = r_der)
+ r_msg = r_cms.unwrap((self.bpki_ta, self.irdb_cert))
+ if not r_msg.is_reply() or not all(type(r_pdu) is type(q_pdu) for r_pdu in r_msg):
+ raise rpki.exceptions.BadIRDBReply, "Unexpected response to IRDB query: %s" % r_cms.pretty_print_content()
+ if expected_pdu_count is not None and len(r_msg) != expected_pdu_count:
+ assert isinstance(expected_pdu_count, (int, long))
+ raise rpki.exceptions.BadIRDBReply, "Expected exactly %d PDU%s from IRDB: %s" (
+ expected_pdu_count, "" if expected_pdu_count == 1 else "s", r_cms.pretty_print_content())
+ callback(r_msg)
+
+ rpki.http.client(
+ url = self.irdb_url,
+ msg = q_der,
+ callback = unwrap,
+ errback = errback)
+
+ def irdb_query_child_resources(self, self_handle, child_handle, callback, errback):
+ """
+ Ask IRDB about a child's resources.
+ """
+
+ rpki.log.trace()
+
+ q_pdu = rpki.left_right.list_resources_elt()
+ q_pdu.self_handle = self_handle
+ q_pdu.child_handle = child_handle
+
+ def done(r_msg):
+ callback(rpki.resource_set.resource_bag(
+ asn = r_msg[0].asn,
+ v4 = r_msg[0].ipv4,
+ v6 = r_msg[0].ipv6,
+ valid_until = r_msg[0].valid_until))
+
+ self.irdb_query(q_pdu, done, errback, expected_pdu_count = 1)
+
+ def irdb_query_roa_requests(self, self_handle, callback, errback):
+ """
+ Ask IRDB about self's ROA requests.
+ """
+
+ rpki.log.trace()
+
+ q_pdu = rpki.left_right.list_roa_requests_elt()
+ q_pdu.self_handle = self_handle
+
+ self.irdb_query(q_pdu, callback, errback)
+
+ def left_right_handler(self, query, path, cb):
+ """
+ Process one left-right PDU.
+ """
+
+ rpki.log.trace()
+
+ def done(r_msg):
+ reply = rpki.left_right.cms_msg().wrap(r_msg, self.rpkid_key, self.rpkid_cert)
+ self.sql.sweep()
+ cb(200, reply)
+
+ try:
+ self.sql.ping()
+ q_msg = rpki.left_right.cms_msg(DER = query).unwrap((self.bpki_ta, self.irbe_cert))
+ if not q_msg.is_query():
+ raise rpki.exceptions.BadQuery, "Message type is not query"
+ q_msg.serve_top_level(self, done)
+ except (rpki.async.ExitNow, SystemExit):
+ raise
+ except Exception, data:
+ rpki.log.traceback()
+ cb(500, "Unhandled exception %s" % data)
+
+ up_down_url_regexp = re.compile("/up-down/([-A-Z0-9_]+)/([-A-Z0-9_]+)$", re.I)
+
+ def up_down_handler(self, query, path, cb):
+ """
+ Process one up-down PDU.
+ """
+
+ rpki.log.trace()
+
+ def done(reply):
+ self.sql.sweep()
+ cb(200, reply)
+
+ try:
+ self.sql.ping()
+ match = self.up_down_url_regexp.search(path)
+ if match is None:
+ raise rpki.exceptions.BadContactURL, "Bad path: %s" % path
+ self_handle, child_handle = match.groups()
+ child = rpki.left_right.child_elt.sql_fetch_where1(self, "self.self_handle = %s AND child.child_handle = %s AND child.self_id = self.self_id",
+ (self_handle, child_handle), "self")
+ if child is None:
+ raise rpki.exceptions.ChildNotFound, "Could not find child %s" % child_handle
+ child.serve_up_down(query, done)
+ except (rpki.async.ExitNow, SystemExit):
+ raise
+ except Exception, data:
+ rpki.log.traceback()
+ cb(400, "Could not process PDU: %s" % data)
+
+ def checkpoint(self):
+ """
+ Record that we were still alive when we got here, by resetting
+ keepalive timer.
+ """
+ self.cron_timeout = rpki.sundial.now() + self.cron_keepalive
+
+ def cron(self, cb = None):
+ """
+ Periodic tasks.
+ """
+
+ rpki.log.trace()
+ self.sql.ping()
+
+ now = rpki.sundial.now()
+
+ assert self.use_internal_cron or self.cron_timeout is None
+
+ if self.use_internal_cron:
+
+ if self.cron_timeout and self.cron_timeout < now:
+ rpki.log.warn("cron keepalive threshold %s has expired, breaking lock" % self.cron_timeout)
+ self.cron_timeout = None
+
+ when = now + self.cron_period
+ rpki.log.debug("Scheduling next cron run at %s" % when)
+ self.cron_timer.set(when)
+
+ if self.cron_timeout:
+ rpki.log.warn("cron already running, keepalive will expire at %s" % self.cron_timeout)
+ return
+
+ self.checkpoint()
+
+ def loop(iterator, s):
+ self.checkpoint()
+ s.cron(iterator)
+
+ def done():
+ self.sql.sweep()
+ self.cron_timeout = None
+ rpki.log.info("Finished cron run started at %s" % now)
+ if not self.use_internal_cron:
+ cb()
+
+ def lose(e):
+ self.cron_timeout = None
+ if self.use_internal_cron:
+ rpki.log.traceback()
+ else:
+ raise
+
+ try:
+ rpki.async.iterator(rpki.left_right.self_elt.sql_fetch_all(self), loop, done)
+
+ except (rpki.async.ExitNow, SystemExit):
+ self.cron_timeout = None
+ raise
+
+ except Exception, e:
+ lose(e)
+
+ def cronjob_handler(self, query, path, cb):
+ """
+ External trigger for periodic tasks. This is somewhat obsolete
+ now that we have internal timers, but the test framework still
+ uses it.
+ """
+
+ if self.use_internal_cron:
+ cb(500, "Running cron internally")
+ else:
+ self.cron(lambda: cb(200, "OK"))
+
+class ca_obj(rpki.sql.sql_persistent):
+ """
+ Internal CA object.
+ """
+
+ sql_template = rpki.sql.template(
+ "ca",
+ "ca_id",
+ "last_crl_sn",
+ ("next_crl_update", rpki.sundial.datetime),
+ "last_issued_sn", "last_manifest_sn",
+ ("next_manifest_update", rpki.sundial.datetime),
+ "sia_uri", "parent_id", "parent_resource_class")
+
+ last_crl_sn = 0
+ last_issued_sn = 0
+ last_manifest_sn = 0
+
+ def parent(self):
+ """Fetch parent object to which this CA object links."""
+ return rpki.left_right.parent_elt.sql_fetch(self.gctx, self.parent_id)
+
+ def ca_details(self):
+ """Fetch all ca_detail objects that link to this CA object."""
+ return ca_detail_obj.sql_fetch_where(self.gctx, "ca_id = %s", (self.ca_id,))
+
+ def fetch_pending(self):
+ """Fetch the pending ca_details for this CA, if any."""
+ return ca_detail_obj.sql_fetch_where(self.gctx, "ca_id = %s AND state = 'pending'", (self.ca_id,))
+
+ def fetch_active(self):
+ """Fetch the active ca_detail for this CA, if any."""
+ return ca_detail_obj.sql_fetch_where1(self.gctx, "ca_id = %s AND state = 'active'", (self.ca_id,))
+
+ def fetch_deprecated(self):
+ """Fetch deprecated ca_details for this CA, if any."""
+ return ca_detail_obj.sql_fetch_where(self.gctx, "ca_id = %s AND state = 'deprecated'", (self.ca_id,))
+
+ def fetch_revoked(self):
+ """Fetch revoked ca_details for this CA, if any."""
+ return ca_detail_obj.sql_fetch_where(self.gctx, "ca_id = %s AND state = 'revoked'", (self.ca_id,))
+
+ def fetch_issue_response_candidates(self):
+ """
+ Fetch ca_details which are candidates for consideration when
+ processing an up-down issue_response PDU.
+ """
+ #return ca_detail_obj.sql_fetch_where(self.gctx, "ca_id = %s AND latest_ca_cert IS NOT NULL AND state != 'revoked'", (self.ca_id,))
+ return ca_detail_obj.sql_fetch_where(self.gctx, "ca_id = %s AND state != 'revoked'", (self.ca_id,))
+
+ def construct_sia_uri(self, parent, rc):
+ """
+ Construct the sia_uri value for this CA given configured
+ information and the parent's up-down protocol list_response PDU.
+ """
+
+ sia_uri = rc.suggested_sia_head and rc.suggested_sia_head.rsync()
+ if not sia_uri or not sia_uri.startswith(parent.sia_base):
+ sia_uri = parent.sia_base
+ if not sia_uri.endswith("/"):
+ raise rpki.exceptions.BadURISyntax, "SIA URI must end with a slash: %s" % sia_uri
+ return sia_uri + str(self.ca_id) + "/"
+
+ def check_for_updates(self, parent, rc, cb, eb):
+ """
+ Parent has signaled continued existance of a resource class we
+ already knew about, so we need to check for an updated
+ certificate, changes in resource coverage, revocation and reissue
+ with the same key, etc.
+ """
+
+ sia_uri = self.construct_sia_uri(parent, rc)
+ sia_uri_changed = self.sia_uri != sia_uri
+ if sia_uri_changed:
+ self.sia_uri = sia_uri
+ self.sql_mark_dirty()
+
+ rc_resources = rc.to_resource_bag()
+ cert_map = dict((c.cert.get_SKI(), c) for c in rc.certs)
+
+ def loop(iterator, ca_detail):
+
+ self.gctx.checkpoint()
+
+ rc_cert = cert_map.pop(ca_detail.public_key.get_SKI(), None)
+
+ if rc_cert is None:
+
+ rpki.log.warn("Certificate in database missing from list_response, class %r, SKI %s, maybe parent certificate went away?"
+ % (rc.class_name, ca_detail.public_key.gSKI()))
+ publisher = publication_queue()
+ ca_detail.delete(ca = ca_detail.ca(), publisher = publisher)
+ return publisher.call_pubd(iterator, eb)
+
+ else:
+
+ if ca_detail.state in ("pending", "active"):
+
+ if ca_detail.state == "pending":
+ current_resources = rpki.resource_set.resource_bag()
+ else:
+ current_resources = ca_detail.latest_ca_cert.get_3779resources()
+
+ if (ca_detail.state == "pending" or
+ sia_uri_changed or
+ ca_detail.latest_ca_cert != rc_cert.cert or
+ current_resources.undersized(rc_resources) or
+ current_resources.oversized(rc_resources)):
+ return ca_detail.update(
+ parent = parent,
+ ca = self,
+ rc = rc,
+ sia_uri_changed = sia_uri_changed,
+ old_resources = current_resources,
+ callback = iterator,
+ errback = eb)
+
+ iterator()
+
+ def done():
+ if cert_map:
+ rpki.log.warn("Certificates in list_response missing from our database, class %r, SKIs %s"
+ % (rc.class_name, ", ".join(c.cert.gSKI() for c in cert_map.values())))
+ self.gctx.checkpoint()
+ cb()
+
+ ca_details = self.fetch_issue_response_candidates()
+
+ if True:
+ for x in cert_map.itervalues():
+ rpki.log.debug("Parent thinks I have %r %s" % (x, x.cert.gSKI()))
+ for x in ca_details:
+ if x.latest_ca_cert is not None:
+ rpki.log.debug("I think I have %r %s" % (x, x.latest_ca_cert.gSKI()))
+
+ if ca_details:
+ rpki.async.iterator(ca_details, loop, done)
+ else:
+ rpki.log.warn("Existing certificate class %r with no certificates, rekeying" % rc.class_name)
+ self.gctx.checkpoint()
+ self.rekey(cb, eb)
+
+ @classmethod
+ def create(cls, parent, rc, cb, eb):
+ """
+ Parent has signaled existance of a new resource class, so we need
+ to create and set up a corresponding CA object.
+ """
+
+ self = cls()
+ self.gctx = parent.gctx
+ self.parent_id = parent.parent_id
+ self.parent_resource_class = rc.class_name
+ self.sql_store()
+ self.sia_uri = self.construct_sia_uri(parent, rc)
+ ca_detail = ca_detail_obj.create(self)
+
+ def done(issue_response):
+ ca_detail.activate(
+ ca = self,
+ cert = issue_response.payload.classes[0].certs[0].cert,
+ uri = issue_response.payload.classes[0].certs[0].cert_url,
+ callback = cb,
+ errback = eb)
+
+ rpki.up_down.issue_pdu.query(parent, self, ca_detail, done, eb)
+
+ def delete(self, parent, callback):
+ """
+ The list of current resource classes received from parent does not
+ include the class corresponding to this CA, so we need to delete
+ it (and its little dog too...).
+
+ All certs published by this CA are now invalid, so need to
+ withdraw them, the CRL, and the manifest from the repository,
+ delete all child_cert and ca_detail records associated with this
+ CA, then finally delete this CA itself.
+ """
+
+ def lose(e):
+ rpki.log.traceback()
+ rpki.log.warn("Could not delete CA %r, skipping: %s" % (self, e))
+ callback()
+
+ def done():
+ self.sql_delete()
+ callback()
+
+ publisher = publication_queue()
+ for ca_detail in self.ca_details():
+ ca_detail.delete(ca = self, publisher = publisher)
+ publisher.call_pubd(done, lose)
+
+ def next_serial_number(self):
+ """
+ Allocate a certificate serial number.
+ """
+ self.last_issued_sn += 1
+ self.sql_mark_dirty()
+ return self.last_issued_sn
+
+ def next_manifest_number(self):
+ """
+ Allocate a manifest serial number.
+ """
+ self.last_manifest_sn += 1
+ self.sql_mark_dirty()
+ return self.last_manifest_sn
+
+ def next_crl_number(self):
+ """
+ Allocate a CRL serial number.
+ """
+ self.last_crl_sn += 1
+ self.sql_mark_dirty()
+ return self.last_crl_sn
+
+ def rekey(self, cb, eb):
+ """
+ Initiate a rekey operation for this ca. Generate a new keypair.
+ Request cert from parent using new keypair. Mark result as our
+ active ca_detail. Reissue all child certs issued by this ca using
+ the new ca_detail.
+ """
+
+ rpki.log.trace()
+
+ parent = self.parent()
+ old_detail = self.fetch_active()
+ new_detail = ca_detail_obj.create(self)
+
+ def done(issue_response):
+ new_detail.activate(
+ ca = self,
+ cert = issue_response.payload.classes[0].certs[0].cert,
+ uri = issue_response.payload.classes[0].certs[0].cert_url,
+ predecessor = old_detail,
+ callback = cb,
+ errback = eb)
+
+ rpki.up_down.issue_pdu.query(parent, self, new_detail, done, eb)
+
+ def revoke(self, cb, eb):
+ """
+ Revoke deprecated ca_detail objects associated with this ca.
+ """
+
+ rpki.log.trace()
+
+ def loop(iterator, ca_detail):
+ ca_detail.revoke(cb = iterator, eb = eb)
+
+ rpki.async.iterator(self.fetch_deprecated(), loop, cb)
+
+class ca_detail_obj(rpki.sql.sql_persistent):
+ """
+ Internal CA detail object.
+ """
+
+ sql_template = rpki.sql.template(
+ "ca_detail",
+ "ca_detail_id",
+ ("private_key_id", rpki.x509.RSA),
+ ("public_key", rpki.x509.RSApublic),
+ ("latest_ca_cert", rpki.x509.X509),
+ ("manifest_private_key_id", rpki.x509.RSA),
+ ("manifest_public_key", rpki.x509.RSApublic),
+ ("latest_manifest_cert", rpki.x509.X509),
+ ("latest_manifest", rpki.x509.SignedManifest),
+ ("latest_crl", rpki.x509.CRL),
+ ("crl_published", rpki.sundial.datetime),
+ ("manifest_published", rpki.sundial.datetime),
+ "state",
+ "ca_cert_uri",
+ "ca_id")
+
+ crl_published = None
+ manifest_published = None
+ latest_ca_cert = None
+
+ def sql_decode(self, vals):
+ """
+ Extra assertions for SQL decode of a ca_detail_obj.
+ """
+ rpki.sql.sql_persistent.sql_decode(self, vals)
+ assert self.public_key is None or self.private_key_id is None or self.public_key.get_DER() == self.private_key_id.get_public_DER()
+ assert self.manifest_public_key is None or self.manifest_private_key_id is None or self.manifest_public_key.get_DER() == self.manifest_private_key_id.get_public_DER()
+
+ def ca(self):
+ """Fetch CA object to which this ca_detail links."""
+ return ca_obj.sql_fetch(self.gctx, self.ca_id)
+
+ def child_certs(self, child = None, ski = None, unique = False):
+ """Fetch all child_cert objects that link to this ca_detail."""
+ return rpki.rpki_engine.child_cert_obj.fetch(self.gctx, child, self, ski, unique)
+
+ def revoked_certs(self):
+ """Fetch all revoked_cert objects that link to this ca_detail."""
+ return revoked_cert_obj.sql_fetch_where(self.gctx, "ca_detail_id = %s", (self.ca_detail_id,))
+
+ def roas(self):
+ """Fetch all ROA objects that link to this ca_detail."""
+ return rpki.rpki_engine.roa_obj.sql_fetch_where(self.gctx, "ca_detail_id = %s", (self.ca_detail_id,))
+
+ def crl_uri(self, ca):
+ """Return publication URI for this ca_detail's CRL."""
+ return ca.sia_uri + self.crl_uri_tail()
+
+ def crl_uri_tail(self):
+ """Return tail (filename portion) of publication URI for this ca_detail's CRL."""
+ return self.public_key.gSKI() + ".crl"
+
+ def manifest_uri(self, ca):
+ """Return publication URI for this ca_detail's manifest."""
+ return ca.sia_uri + self.public_key.gSKI() + ".mnf"
+
+ def activate(self, ca, cert, uri, callback, errback, predecessor = None):
+ """
+ Activate this ca_detail.
+ """
+
+ publisher = publication_queue()
+
+ self.latest_ca_cert = cert
+ self.ca_cert_uri = uri.rsync()
+ self.generate_manifest_cert(ca)
+ self.state = "active"
+ self.generate_crl(publisher = publisher)
+ self.generate_manifest(publisher = publisher)
+ self.sql_mark_dirty()
+
+ if predecessor is not None:
+ predecessor.state = "deprecated"
+ predecessor.sql_mark_dirty()
+ for child_cert in predecessor.child_certs():
+ child_cert.reissue(ca_detail = self, publisher = publisher)
+ for roa in predecessor.roas():
+ roa.regenerate(publisher = publisher)
+
+ publisher.call_pubd(callback, errback)
+
+ def delete(self, ca, publisher, allow_failure = False):
+ """
+ Delete this ca_detail and all of the certs it issued.
+
+ If allow_failure is true, we clean up as much as we can but don't
+ raise an exception.
+ """
+
+ repository = ca.parent().repository()
+ for child_cert in self.child_certs():
+ publisher.withdraw(cls = rpki.publication.certificate_elt, uri = child_cert.uri(ca), obj = child_cert.cert, repository = repository,
+ handler = False if allow_failure else None)
+ for roa in self.roas():
+ roa.revoke(publisher = publisher, allow_failure = allow_failure)
+ try:
+ latest_manifest = self.latest_manifest
+ except AttributeError:
+ latest_manifest = None
+ if latest_manifest is not None:
+ publisher.withdraw(cls = rpki.publication.manifest_elt, uri = self.manifest_uri(ca), obj = self.latest_manifest, repository = repository,
+ handler = False if allow_failure else None)
+ try:
+ latest_crl = self.latest_crl
+ except AttributeError:
+ latest_crl = None
+ if latest_crl is not None:
+ publisher.withdraw(cls = rpki.publication.crl_elt, uri = self.crl_uri(ca), obj = self.latest_crl, repository = repository,
+ handler = False if allow_failure else None)
+ for cert in self.child_certs() + self.revoked_certs():
+ cert.sql_delete()
+ self.sql_delete()
+
+ def revoke(self, cb, eb):
+ """
+ Request revocation of all certificates whose SKI matches the key
+ for this ca_detail.
+
+ Tasks:
+
+ - Request revocation of old keypair by parent.
+
+ - Revoke all child certs issued by the old keypair.
+
+ - Generate a final CRL, signed with the old keypair, listing all
+ the revoked certs, with a next CRL time after the last cert or
+ CRL signed by the old keypair will have expired.
+
+ - Generate a corresponding final manifest.
+
+ - Destroy old keypairs.
+
+ - Leave final CRL and manifest in place until their nextupdate
+ time has passed.
+ """
+
+ ca = self.ca()
+ parent = ca.parent()
+
+ def parent_revoked(r_msg):
+
+ if r_msg.payload.ski != self.latest_ca_cert.gSKI():
+ raise rpki.exceptions.SKIMismatch
+
+ crl_interval = rpki.sundial.timedelta(seconds = parent.self().crl_interval)
+
+ nextUpdate = rpki.sundial.now()
+
+ if self.latest_manifest is not None:
+ try:
+ self.latest_manifest.get_content()
+ except rpki.exceptions.CMSContentNotSet:
+ self.latest_manifest.extract()
+ nextUpdate = nextUpdate.later(self.latest_manifest.getNextUpdate())
+
+ if self.latest_crl is not None:
+ nextUpdate = nextUpdate.later(self.latest_crl.getNextUpdate())
+
+ publisher = publication_queue()
+
+ for child_cert in self.child_certs():
+ nextUpdate = nextUpdate.later(child_cert.cert.getNotAfter())
+ child_cert.revoke(publisher = publisher)
+
+ nextUpdate += crl_interval
+ self.generate_crl(publisher = publisher, nextUpdate = nextUpdate)
+ self.generate_manifest(publisher = publisher, nextUpdate = nextUpdate)
+ self.private_key_id = None
+ self.manifest_private_key_id = None
+ self.manifest_public_key = None
+ self.latest_manifest_cert = None
+ self.state = "revoked"
+ self.sql_mark_dirty()
+ publisher.call_pubd(cb, eb)
+
+ rpki.up_down.revoke_pdu.query(ca, self.latest_ca_cert.gSKI(), parent_revoked, eb)
+
+ def update(self, parent, ca, rc, sia_uri_changed, old_resources, callback, errback):
+ """
+ Need to get a new certificate for this ca_detail and perhaps frob
+ children of this ca_detail.
+ """
+
+ def issued(issue_response):
+ self.latest_ca_cert = issue_response.payload.classes[0].certs[0].cert
+ new_resources = self.latest_ca_cert.get_3779resources()
+ publisher = publication_queue()
+
+ if sia_uri_changed or old_resources.oversized(new_resources):
+ for child_cert in self.child_certs():
+ child_resources = child_cert.cert.get_3779resources()
+ if sia_uri_changed or child_resources.oversized(new_resources):
+ child_cert.reissue(
+ ca_detail = self,
+ resources = child_resources.intersection(new_resources),
+ publisher = publisher)
+
+ publisher.call_pubd(callback, errback)
+
+ rpki.up_down.issue_pdu.query(parent, ca, self, issued, errback)
+
+ @classmethod
+ def create(cls, ca):
+ """
+ Create a new ca_detail object for a specified CA.
+ """
+ self = cls()
+ self.gctx = ca.gctx
+ self.ca_id = ca.ca_id
+ self.state = "pending"
+
+ self.private_key_id = rpki.x509.RSA.generate()
+ self.public_key = self.private_key_id.get_RSApublic()
+
+ self.manifest_private_key_id = rpki.x509.RSA.generate()
+ self.manifest_public_key = self.manifest_private_key_id.get_RSApublic()
+
+ self.sql_store()
+ return self
+
+ def issue_ee(self, ca, resources, subject_key, sia = None):
+ """
+ Issue a new EE certificate.
+ """
+
+ return self.latest_ca_cert.issue(
+ keypair = self.private_key_id,
+ subject_key = subject_key,
+ serial = ca.next_serial_number(),
+ sia = sia,
+ aia = self.ca_cert_uri,
+ crldp = self.crl_uri(ca),
+ resources = resources,
+ notAfter = self.latest_ca_cert.getNotAfter(),
+ is_ca = False)
+
+
+ def generate_manifest_cert(self, ca):
+ """
+ Generate a new manifest certificate for this ca_detail.
+ """
+
+ resources = rpki.resource_set.resource_bag(
+ asn = rpki.resource_set.resource_set_as(rpki.resource_set.inherit_token),
+ v4 = rpki.resource_set.resource_set_ipv4(rpki.resource_set.inherit_token),
+ v6 = rpki.resource_set.resource_set_ipv6(rpki.resource_set.inherit_token))
+
+ self.latest_manifest_cert = self.issue_ee(ca, resources, self.manifest_public_key)
+
+ def issue(self, ca, child, subject_key, sia, resources, publisher, child_cert = None):
+ """
+ Issue a new certificate to a child. Optional child_cert argument
+ specifies an existing child_cert object to update in place; if not
+ specified, we create a new one. Returns the child_cert object
+ containing the newly issued cert.
+ """
+
+ assert child_cert is None or (child_cert.child_id == child.child_id and
+ child_cert.ca_detail_id == self.ca_detail_id)
+
+ cert = self.latest_ca_cert.issue(
+ keypair = self.private_key_id,
+ subject_key = subject_key,
+ serial = ca.next_serial_number(),
+ aia = self.ca_cert_uri,
+ crldp = self.crl_uri(ca),
+ sia = sia,
+ resources = resources,
+ notAfter = resources.valid_until)
+
+ if child_cert is None:
+ child_cert = rpki.rpki_engine.child_cert_obj(
+ gctx = child.gctx,
+ child_id = child.child_id,
+ ca_detail_id = self.ca_detail_id,
+ cert = cert)
+ rpki.log.debug("Created new child_cert %r" % child_cert)
+ else:
+ child_cert.cert = cert
+ rpki.log.debug("Reusing existing child_cert %r" % child_cert)
+
+ child_cert.ski = cert.get_SKI()
+ child_cert.published = rpki.sundial.now()
+ child_cert.sql_store()
+ publisher.publish(cls = rpki.publication.certificate_elt, uri = child_cert.uri(ca), obj = child_cert.cert, repository = ca.parent().repository(),
+ handler = child_cert.published_callback)
+ self.generate_manifest(publisher = publisher)
+ return child_cert
+
+ def generate_crl(self, publisher, nextUpdate = None):
+ """
+ Generate a new CRL for this ca_detail. At the moment this is
+ unconditional, that is, it is up to the caller to decide whether a
+ new CRL is needed.
+ """
+
+ ca = self.ca()
+ parent = ca.parent()
+ crl_interval = rpki.sundial.timedelta(seconds = parent.self().crl_interval)
+ now = rpki.sundial.now()
+
+ if nextUpdate is None:
+ nextUpdate = now + crl_interval
+
+ certlist = []
+ for revoked_cert in self.revoked_certs():
+ if now > revoked_cert.expires + crl_interval:
+ revoked_cert.sql_delete()
+ else:
+ certlist.append((revoked_cert.serial, revoked_cert.revoked.toASN1tuple(), ()))
+ certlist.sort()
+
+ self.latest_crl = rpki.x509.CRL.generate(
+ keypair = self.private_key_id,
+ issuer = self.latest_ca_cert,
+ serial = ca.next_crl_number(),
+ thisUpdate = now,
+ nextUpdate = nextUpdate,
+ revokedCertificates = certlist)
+
+ self.crl_published = rpki.sundial.now()
+ self.sql_mark_dirty()
+ publisher.publish(cls = rpki.publication.crl_elt, uri = self.crl_uri(ca), obj = self.latest_crl, repository = parent.repository(),
+ handler = self.crl_published_callback)
+
+ def crl_published_callback(self, pdu):
+ """
+ Check result of CRL publication.
+ """
+ pdu.raise_if_error()
+ self.crl_published = None
+ self.sql_mark_dirty()
+
+ def generate_manifest(self, publisher, nextUpdate = None):
+ """
+ Generate a new manifest for this ca_detail.
+ """
+
+ ca = self.ca()
+ parent = ca.parent()
+ crl_interval = rpki.sundial.timedelta(seconds = parent.self().crl_interval)
+ now = rpki.sundial.now()
+
+ if nextUpdate is None:
+ nextUpdate = now + crl_interval
+
+ if self.latest_manifest_cert is None or self.latest_manifest_cert.getNotAfter() < nextUpdate:
+ self.generate_manifest_cert(ca)
+
+ objs = [(self.crl_uri_tail(), self.latest_crl)]
+ objs.extend((c.uri_tail(), c.cert) for c in self.child_certs())
+ objs.extend((r.uri_tail(), r.roa) for r in self.roas() if r.roa is not None)
+
+ self.latest_manifest = rpki.x509.SignedManifest.build(
+ serial = ca.next_manifest_number(),
+ thisUpdate = now,
+ nextUpdate = nextUpdate,
+ names_and_objs = objs,
+ keypair = self.manifest_private_key_id,
+ certs = self.latest_manifest_cert)
+
+
+ self.manifest_published = rpki.sundial.now()
+ self.sql_mark_dirty()
+ publisher.publish(cls = rpki.publication.manifest_elt, uri = self.manifest_uri(ca), obj = self.latest_manifest, repository = parent.repository(),
+ handler = self.manifest_published_callback)
+
+ def manifest_published_callback(self, pdu):
+ """
+ Check result of manifest publication.
+ """
+ pdu.raise_if_error()
+ self.manifest_published = None
+ self.sql_mark_dirty()
+
+
+class child_cert_obj(rpki.sql.sql_persistent):
+ """
+ Certificate that has been issued to a child.
+ """
+
+ sql_template = rpki.sql.template(
+ "child_cert",
+ "child_cert_id",
+ ("cert", rpki.x509.X509),
+ "child_id",
+ "ca_detail_id",
+ "ski",
+ ("published", rpki.sundial.datetime))
+
+ def __init__(self, gctx = None, child_id = None, ca_detail_id = None, cert = None):
+ """
+ Initialize a child_cert_obj.
+ """
+ rpki.sql.sql_persistent.__init__(self)
+ self.gctx = gctx
+ self.child_id = child_id
+ self.ca_detail_id = ca_detail_id
+ self.cert = cert
+ self.published = None
+ if child_id or ca_detail_id or cert:
+ self.sql_mark_dirty()
+
+ def child(self):
+ """Fetch child object to which this child_cert object links."""
+ return rpki.left_right.child_elt.sql_fetch(self.gctx, self.child_id)
+
+ def ca_detail(self):
+ """Fetch ca_detail object to which this child_cert object links."""
+ return ca_detail_obj.sql_fetch(self.gctx, self.ca_detail_id)
+
+ def uri_tail(self):
+ """Return the tail (filename) portion of the URI for this child_cert."""
+ return self.cert.gSKI() + ".cer"
+
+ def uri(self, ca):
+ """Return the publication URI for this child_cert."""
+ return ca.sia_uri + self.uri_tail()
+
+ def revoke(self, publisher, generate_crl_and_manifest = False):
+ """
+ Revoke a child cert.
+ """
+
+ ca_detail = self.ca_detail()
+ ca = ca_detail.ca()
+ rpki.log.debug("Revoking %r %r" % (self, self.uri(ca)))
+ revoked_cert_obj.revoke(cert = self.cert, ca_detail = ca_detail)
+ publisher.withdraw(cls = rpki.publication.certificate_elt, uri = self.uri(ca), obj = self.cert, repository = ca.parent().repository())
+ self.gctx.sql.sweep()
+ self.sql_delete()
+ if generate_crl_and_manifest:
+ ca_detail.generate_crl(publisher = publisher)
+ ca_detail.generate_manifest(publisher = publisher)
+
+ def reissue(self, ca_detail, publisher, resources = None, sia = None):
+ """
+ Reissue an existing child cert, reusing the public key. If the
+ child cert we would generate is identical to the one we already
+ have, we just return the one we already have. If we have to
+ revoke the old child cert when generating the new one, we have to
+ generate a new child_cert_obj, so calling code that needs the
+ updated child_cert_obj must use the return value from this method.
+ """
+
+ ca = ca_detail.ca()
+ child = self.child()
+
+ old_resources = self.cert.get_3779resources()
+ old_sia = self.cert.get_SIA()
+ old_ca_detail = self.ca_detail()
+
+ if resources is None:
+ resources = old_resources
+
+ if sia is None:
+ sia = old_sia
+
+ assert resources.valid_until is not None and old_resources.valid_until is not None
+
+ if resources == old_resources and sia == old_sia and ca_detail == old_ca_detail:
+ rpki.log.debug("No change to %r" % self)
+ return self
+
+ must_revoke = old_resources.oversized(resources) or old_resources.valid_until > resources.valid_until
+ new_issuer = ca_detail != old_ca_detail
+
+ rpki.log.debug("Reissuing %r, must_revoke %s, new_issuer %s" % (self, must_revoke, new_issuer))
+
+ if resources.valid_until != old_resources.valid_until:
+ rpki.log.debug("Validity changed: %s %s" % ( old_resources.valid_until, resources.valid_until))
+
+ if must_revoke:
+ for x in child.child_certs(ca_detail = ca_detail, ski = self.ski):
+ rpki.log.debug("Revoking child_cert %r" % x)
+ x.revoke(publisher = publisher)
+ ca_detail.generate_crl(publisher = publisher)
+ ca_detail.generate_manifest(publisher = publisher)
+
+ child_cert = ca_detail.issue(
+ ca = ca,
+ child = child,
+ subject_key = self.cert.getPublicKey(),
+ sia = sia,
+ resources = resources,
+ child_cert = None if must_revoke or new_issuer else self,
+ publisher = publisher)
+
+ rpki.log.debug("New child_cert %r uri %s" % (child_cert, child_cert.uri(ca)))
+
+ return child_cert
+
+ @classmethod
+ def fetch(cls, gctx = None, child = None, ca_detail = None, ski = None, unique = False):
+ """
+ Fetch all child_cert objects matching a particular set of
+ parameters. This is a wrapper to consolidate various queries that
+ would otherwise be inline SQL WHERE expressions. In most cases
+ code calls this indirectly, through methods in other classes.
+ """
+
+ args = []
+ where = []
+
+ if child:
+ where.append("child_id = %s")
+ args.append(child.child_id)
+
+ if ca_detail:
+ where.append("ca_detail_id = %s")
+ args.append(ca_detail.ca_detail_id)
+
+ if ski:
+ where.append("ski = %s")
+ args.append(ski)
+
+ where = " AND ".join(where)
+
+ gctx = gctx or (child and child.gctx) or (ca_detail and ca_detail.gctx) or None
+
+ if unique:
+ return cls.sql_fetch_where1(gctx, where, args)
+ else:
+ return cls.sql_fetch_where(gctx, where, args)
+
+ def published_callback(self, pdu):
+ """
+ Publication callback: check result and mark published.
+ """
+ pdu.raise_if_error()
+ self.published = None
+ self.sql_mark_dirty()
+
+class revoked_cert_obj(rpki.sql.sql_persistent):
+ """
+ Tombstone for a revoked certificate.
+ """
+
+ sql_template = rpki.sql.template(
+ "revoked_cert",
+ "revoked_cert_id",
+ "serial",
+ "ca_detail_id",
+ ("revoked", rpki.sundial.datetime),
+ ("expires", rpki.sundial.datetime))
+
+ def __init__(self, gctx = None, serial = None, revoked = None, expires = None, ca_detail_id = None):
+ """Initialize a revoked_cert_obj."""
+ rpki.sql.sql_persistent.__init__(self)
+ self.gctx = gctx
+ self.serial = serial
+ self.revoked = revoked
+ self.expires = expires
+ self.ca_detail_id = ca_detail_id
+ if serial or revoked or expires or ca_detail_id:
+ self.sql_mark_dirty()
+
+ def ca_detail(self):
+ """Fetch ca_detail object to which this revoked_cert_obj links."""
+ return ca_detail_obj.sql_fetch(self.gctx, self.ca_detail_id)
+
+ @classmethod
+ def revoke(cls, cert, ca_detail):
+ """
+ Revoke a certificate.
+ """
+ return cls(
+ serial = cert.getSerial(),
+ expires = cert.getNotAfter(),
+ revoked = rpki.sundial.now(),
+ gctx = ca_detail.gctx,
+ ca_detail_id = ca_detail.ca_detail_id)
+
+class roa_obj(rpki.sql.sql_persistent):
+ """
+ Route Origin Authorization.
+ """
+
+ sql_template = rpki.sql.template(
+ "roa",
+ "roa_id",
+ "ca_detail_id",
+ "self_id",
+ "asn",
+ ("roa", rpki.x509.ROA),
+ ("cert", rpki.x509.X509),
+ ("published", rpki.sundial.datetime))
+
+ ca_detail_id = None
+ cert = None
+ roa = None
+ published = None
+
+ def self(self):
+ """
+ Fetch self object to which this roa_obj links.
+ """
+ return rpki.left_right.self_elt.sql_fetch(self.gctx, self.self_id)
+
+ def ca_detail(self):
+ """
+ Fetch ca_detail object to which this roa_obj links.
+ """
+ return rpki.rpki_engine.ca_detail_obj.sql_fetch(self.gctx, self.ca_detail_id)
+
+ def sql_fetch_hook(self):
+ """
+ Extra SQL fetch actions for roa_obj -- handle prefix lists.
+ """
+ for version, datatype, attribute in ((4, rpki.resource_set.roa_prefix_set_ipv4, "ipv4"),
+ (6, rpki.resource_set.roa_prefix_set_ipv6, "ipv6")):
+ setattr(self, attribute, datatype.from_sql(
+ self.gctx.sql,
+ """
+ SELECT prefix, prefixlen, max_prefixlen FROM roa_prefix
+ WHERE roa_id = %s AND version = %s
+ """,
+ (self.roa_id, version)))
+
+ def sql_insert_hook(self):
+ """
+ Extra SQL insert actions for roa_obj -- handle prefix lists.
+ """
+ for version, prefix_set in ((4, self.ipv4), (6, self.ipv6)):
+ if prefix_set:
+ self.gctx.sql.executemany(
+ """
+ INSERT roa_prefix (roa_id, prefix, prefixlen, max_prefixlen, version)
+ VALUES (%s, %s, %s, %s, %s)
+ """,
+ ((self.roa_id, x.prefix, x.prefixlen, x.max_prefixlen, version)
+ for x in prefix_set))
+
+ def sql_delete_hook(self):
+ """
+ Extra SQL delete actions for roa_obj -- handle prefix lists.
+ """
+ self.gctx.sql.execute("DELETE FROM roa_prefix WHERE roa_id = %s", (self.roa_id,))
+
+ def __init__(self, gctx = None, self_id = None, asn = None, ipv4 = None, ipv6 = None):
+ rpki.sql.sql_persistent.__init__(self)
+ self.gctx = gctx
+ self.self_id = self_id
+ self.asn = asn
+ self.ipv4 = ipv4
+ self.ipv6 = ipv6
+
+ # Defer marking new ROA as dirty until .generate() has a chance to
+ # finish setup, otherwise we get SQL consistency errors.
+ #
+ #if self_id or asn or ipv4 or ipv6: self.sql_mark_dirty()
+
+ def update(self, publisher, fast = False):
+ """
+ Bring this roa_obj's ROA up to date if necesssary.
+ """
+
+ v4 = self.ipv4.to_resource_set() if self.ipv4 is not None else rpki.resource_set.resource_set_ipv4()
+ v6 = self.ipv6.to_resource_set() if self.ipv6 is not None else rpki.resource_set.resource_set_ipv6()
+
+ me = "<%s %s>" % (self.asn, ("%s,%s" % (v4, v6)).strip(","))
+
+ if self.roa is None:
+ rpki.log.debug("ROA doesn't exist, generating %s" % me)
+ return self.generate(publisher = publisher, fast = fast)
+
+ ca_detail = self.ca_detail()
+
+ if ca_detail is None:
+ rpki.log.debug("ROA has no associated ca_detail, generating %s" % me)
+ return self.generate(publisher = publisher, fast = fast)
+
+ if ca_detail.state != "active":
+ rpki.log.debug("ROA's associated ca_detail not active (state %r), regenerating %s" % (ca_detail.state, me))
+ return self.regenerate(publisher = publisher, fast = fast)
+
+ regen_time = self.cert.getNotAfter() - rpki.sundial.timedelta(seconds = self.self().regen_margin)
+
+ if rpki.sundial.now() > regen_time:
+ rpki.log.debug("ROA past threshold %s, regenerating %s" % (regen_time, me))
+ return self.regenerate(publisher = publisher, fast = fast)
+
+ ca_resources = ca_detail.latest_ca_cert.get_3779resources()
+ ee_resources = self.cert.get_3779resources()
+
+ if ee_resources.oversized(ca_resources):
+ rpki.log.debug("ROA oversized with respect to CA, regenerating %s" % me)
+ return self.regenerate(publisher = publisher, fast = fast)
+
+ if ee_resources.v4 != v4 or ee_resources.v6 != v6:
+ rpki.log.debug("ROA resources do not match EE, regenerating %s" % me)
+ return self.regenerate(publisher = publisher, fast = fast)
+
+ def generate(self, publisher, fast = False):
+ """
+ Generate a ROA.
+
+ At present this does not support ROAs with multiple signatures
+ (neither does the current CMS code).
+
+ At present we have no way of performing a direct lookup from a
+ desired set of resources to a covering certificate, so we have to
+ search. This could be quite slow if we have a lot of active
+ ca_detail objects. Punt on the issue for now, revisit if
+ profiling shows this as a hotspot.
+
+ Once we have the right covering certificate, we generate the ROA
+ payload, generate a new EE certificate, use the EE certificate to
+ sign the ROA payload, publish the result, then throw away the
+ private key for the EE cert, all per the ROA specification. This
+ implies that generating a lot of ROAs will tend to thrash
+ /dev/random, but there is not much we can do about that.
+
+ If fast is set, we leave generating the new manifest for our
+ caller to handle, presumably at the end of a bulk operation.
+ """
+
+ if self.ipv4 is None and self.ipv6 is None:
+ raise rpki.exceptions.EmptyROAPrefixList
+
+ # Ugly and expensive search for covering ca_detail, there has to
+ # be a better way, but it would require the ability to test for
+ # resource subsets in SQL.
+
+ v4 = self.ipv4.to_resource_set() if self.ipv4 is not None else rpki.resource_set.resource_set_ipv4()
+ v6 = self.ipv6.to_resource_set() if self.ipv6 is not None else rpki.resource_set.resource_set_ipv6()
+
+ ca_detail = self.ca_detail()
+ if ca_detail is None or ca_detail.state != "active":
+ ca_detail = None
+ for parent in self.self().parents():
+ for ca in parent.cas():
+ ca_detail = ca.fetch_active()
+ if ca_detail is not None:
+ resources = ca_detail.latest_ca_cert.get_3779resources()
+ if v4.issubset(resources.v4) and v6.issubset(resources.v6):
+ break
+ ca_detail = None
+ if ca_detail is not None:
+ break
+
+ if ca_detail is None:
+ raise rpki.exceptions.NoCoveringCertForROA, "generate() could not find a certificate covering %s %s" % (v4, v6)
+
+ ca = ca_detail.ca()
+ resources = rpki.resource_set.resource_bag(v4 = v4, v6 = v6)
+ keypair = rpki.x509.RSA.generate()
+
+ self.ca_detail_id = ca_detail.ca_detail_id
+ self.cert = ca_detail.issue_ee(
+ ca = ca,
+ resources = resources,
+ subject_key = keypair.get_RSApublic(),
+ sia = ((rpki.oids.name2oid["id-ad-signedObject"], ("uri", self.uri(keypair))),))
+ self.roa = rpki.x509.ROA.build(self.asn, self.ipv4, self.ipv6, keypair, (self.cert,))
+ self.published = rpki.sundial.now()
+ self.sql_store()
+
+ rpki.log.debug("Generating ROA %r" % self.uri())
+ publisher.publish(cls = rpki.publication.roa_elt, uri = self.uri(), obj = self.roa, repository = ca.parent().repository(), handler = self.published_callback)
+ if not fast:
+ ca_detail.generate_manifest(publisher = publisher)
+
+ def published_callback(self, pdu):
+ """
+ Check publication result.
+ """
+ pdu.raise_if_error()
+ self.published = None
+ self.sql_mark_dirty()
+
+ def revoke(self, publisher, regenerate = False, allow_failure = False, fast = False):
+ """
+ Withdraw ROA associated with this roa_obj.
+
+ In order to preserve make-before-break properties without
+ duplicating code, this method also handles generating a
+ replacement ROA when requested.
+
+ If allow_failure is set, failing to withdraw the ROA will not be
+ considered an error.
+
+ If fast is set, SQL actions will be deferred, on the assumption
+ that our caller will handle regenerating CRL and manifest and
+ flushing the SQL cache.
+ """
+
+ ca_detail = self.ca_detail()
+ cert = self.cert
+ roa = self.roa
+ uri = self.uri()
+
+ if ca_detail.state != 'active':
+ self.ca_detail_id = None
+
+ if regenerate:
+ self.generate(publisher = publisher, fast = fast)
+
+ rpki.log.debug("Withdrawing ROA %r and revoking its EE cert" % uri)
+ rpki.rpki_engine.revoked_cert_obj.revoke(cert = cert, ca_detail = ca_detail)
+ publisher.withdraw(cls = rpki.publication.roa_elt, uri = uri, obj = roa, repository = ca_detail.ca().parent().repository(),
+ handler = False if allow_failure else None)
+ self.sql_mark_deleted()
+ if not fast:
+ ca_detail.generate_crl(publisher = publisher)
+ ca_detail.generate_manifest(publisher = publisher)
+ self.gctx.sql.sweep()
+
+ def regenerate(self, publisher, fast = False):
+ """
+ Reissue ROA associated with this roa_obj.
+ """
+ if self.ca_detail() is None:
+ self.generate(publisher = publisher, fast = fast)
+ else:
+ self.revoke(publisher = publisher, regenerate = True, fast = fast)
+
+ def uri(self, key = None):
+ """
+ Return the publication URI for this roa_obj's ROA.
+ """
+ return self.ca_detail().ca().sia_uri + self.uri_tail(key)
+
+ def uri_tail(self, key = None):
+ """
+ Return the tail (filename portion) of the publication URI for this
+ roa_obj's ROA.
+ """
+ return (key or self.cert).gSKI() + ".roa"
+
+
+class publication_queue(object):
+ """
+ Utility to simplify publication from within rpkid.
+
+ General idea here is to accumulate a collection of objects to be
+ published, in one or more repositories, each potentially with its
+ own completion callback. Eventually we want to publish everything
+ we've accumulated, at which point we need to iterate over the
+ collection and do repository.call_pubd() for each repository.
+ """
+
+ replace = True
+
+ def __init__(self):
+ self.repositories = {}
+ self.msgs = {}
+ self.handlers = {}
+ if self.replace:
+ self.uris = {}
+
+ def _add(self, uri, obj, repository, handler, make_pdu):
+ rid = id(repository)
+ if rid not in self.repositories:
+ self.repositories[rid] = repository
+ self.msgs[rid] = rpki.publication.msg.query()
+ if self.replace and uri in self.uris:
+ rpki.log.debug("Removing publication duplicate <%s %r %r>" % (self.uris[uri].action, self.uris[uri].uri, self.uris[uri].payload))
+ self.msgs[rid].remove(self.uris.pop(uri))
+ pdu = make_pdu(uri = uri, obj = obj)
+ if handler is not None:
+ self.handlers[id(pdu)] = handler
+ pdu.tag = id(pdu)
+ self.msgs[rid].append(pdu)
+ if self.replace:
+ self.uris[uri] = pdu
+
+ def publish(self, cls, uri, obj, repository, handler = None):
+ return self._add( uri, obj, repository, handler, cls.make_publish)
+
+ def withdraw(self, cls, uri, obj, repository, handler = None):
+ return self._add( uri, obj, repository, handler, cls.make_withdraw)
+
+ def call_pubd(self, cb, eb):
+ def loop(iterator, rid):
+ self.repositories[rid].call_pubd(iterator, eb, self.msgs[rid], self.handlers)
+ rpki.async.iterator(self.repositories, loop, cb)
diff --git a/rpkid.without_tls/rpki/sql.py b/rpkid.without_tls/rpki/sql.py
new file mode 100644
index 00000000..88ede288
--- /dev/null
+++ b/rpkid.without_tls/rpki/sql.py
@@ -0,0 +1,352 @@
+"""
+SQL interface code.
+
+$Id$
+
+Copyright (C) 2009 Internet Systems Consortium ("ISC")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+
+Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+"""
+
+from __future__ import with_statement
+
+import warnings
+
+# Silence warning while loading MySQLdb in Python 2.6, sigh
+if hasattr(warnings, "catch_warnings"):
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", DeprecationWarning)
+ import MySQLdb
+else:
+ import MySQLdb
+
+import _mysql_exceptions
+import rpki.x509, rpki.resource_set, rpki.sundial, rpki.log
+
+class session(object):
+ """
+ SQL session layer.
+ """
+
+ _exceptions_enabled = False
+
+ def __init__(self, cfg):
+
+ if not self._exceptions_enabled:
+ warnings.simplefilter("error", _mysql_exceptions.Warning)
+ self.__class__._exceptions_enabled = True
+
+ self.username = cfg.get("sql-username")
+ self.database = cfg.get("sql-database")
+ self.password = cfg.get("sql-password")
+
+ self.cache = {}
+ self.dirty = set()
+
+ self.connect()
+
+ def connect(self):
+ self.db = MySQLdb.connect(user = self.username, db = self.database, passwd = self.password)
+ self.cur = self.db.cursor()
+ self.db.autocommit(True)
+
+ def close(self):
+ if self.cur:
+ self.cur.close()
+ self.cur = None
+ if self.db:
+ self.db.close()
+ self.db = None
+
+ def ping(self):
+ return self.db.ping(True)
+
+ def _wrap_execute(self, func, query, args):
+ try:
+ return func(query, args)
+ except _mysql_exceptions.MySQLError:
+ if self.dirty:
+ rpki.log.warn("MySQL exception with dirty objects in SQL cache!")
+ raise
+
+ def execute(self, query, args = None):
+ return self._wrap_execute(self.cur.execute, query, args)
+
+ def executemany(self, query, args):
+ return self._wrap_execute(self.cur.executemany, query, args)
+
+ def fetchall(self):
+ return self.cur.fetchall()
+
+ def lastrowid(self):
+ return self.cur.lastrowid
+
+ def cache_clear(self):
+ """Clear the object cache."""
+ self.cache.clear()
+
+ def assert_pristine(self):
+ """Assert that there are no dirty objects in the cache."""
+ assert not self.dirty, "Dirty objects in SQL cache: %s" % self.dirty
+
+ def sweep(self):
+ """
+ Write any dirty objects out to SQL.
+ """
+ for s in self.dirty.copy():
+ rpki.log.debug("Sweeping %r" % s)
+ if s.sql_deleted:
+ s.sql_delete()
+ else:
+ s.sql_store()
+ self.assert_pristine()
+
+class template(object):
+ """
+ SQL template generator.
+ """
+
+ def __init__(self, table_name, index_column, *data_columns):
+ """Build a SQL template."""
+ type_map = dict((x[0], x[1]) for x in data_columns if isinstance(x, tuple))
+ data_columns = tuple(isinstance(x, tuple) and x[0] or x for x in data_columns)
+ columns = (index_column,) + data_columns
+ self.table = table_name
+ self.index = index_column
+ self.columns = columns
+ self.map = type_map
+ self.select = "SELECT %s FROM %s" % (", ".join("%s.%s" % (table_name, c) for c in columns), table_name)
+ self.insert = "INSERT %s (%s) VALUES (%s)" % (table_name,
+ ", ".join(data_columns),
+ ", ".join("%(" + s + ")s" for s in data_columns))
+ self.update = "UPDATE %s SET %s WHERE %s = %%(%s)s" % (table_name,
+ ", ".join(s + " = %(" + s + ")s" for s in data_columns),
+ index_column,
+ index_column)
+ self.delete = "DELETE FROM %s WHERE %s = %%s" % (table_name, index_column)
+
+class sql_persistent(object):
+ """
+ Mixin for persistent class that needs to be stored in SQL.
+ """
+
+ ## @var sql_in_db
+ # Whether this object is already in SQL or not.
+
+ sql_in_db = False
+
+ ## @var sql_deleted
+ # Whether our cached copy of this object has been deleted.
+
+ sql_deleted = False
+
+ ## @var sql_debug
+ # Enable logging of SQL actions
+
+ sql_debug = False
+
+ @classmethod
+ def sql_fetch(cls, gctx, id):
+ """
+ Fetch one object from SQL, based on its primary key.
+
+ Since in this one case we know that the primary index is also the
+ cache key, we check for a cache hit directly in the hope of
+ bypassing the SQL lookup entirely.
+
+ This method is usually called via a one-line class-specific
+ wrapper. As a convenience, we also accept an id of None, and just
+ return None in this case.
+ """
+
+ if id is None:
+ return None
+ assert isinstance(id, (int, long)), "id should be an integer, was %r" % type(id)
+ key = (cls, id)
+ if key in gctx.sql.cache:
+ return gctx.sql.cache[key]
+ else:
+ return cls.sql_fetch_where1(gctx, "%s = %%s" % cls.sql_template.index, (id,))
+
+ @classmethod
+ def sql_fetch_where1(cls, gctx, where, args = None, also_from = None):
+ """
+ Fetch one object from SQL, based on an arbitrary SQL WHERE expression.
+ """
+ results = cls.sql_fetch_where(gctx, where, args, also_from)
+ if len(results) == 0:
+ return None
+ elif len(results) == 1:
+ return results[0]
+ else:
+ raise rpki.exceptions.DBConsistancyError, \
+ "Database contained multiple matches for %s where %s: %r" % \
+ (cls.__name__, where % tuple(repr(a) for a in args), results)
+
+ @classmethod
+ def sql_fetch_all(cls, gctx):
+ """Fetch all objects of this type from SQL."""
+ return cls.sql_fetch_where(gctx, None)
+
+ @classmethod
+ def sql_fetch_where(cls, gctx, where, args = None, also_from = None):
+ """
+ Fetch objects of this type matching an arbitrary SQL WHERE expression.
+ """
+ if where is None:
+ assert args is None and also_from is None
+ if cls.sql_debug:
+ rpki.log.debug("sql_fetch_where(%r)" % cls.sql_template.select)
+ gctx.sql.execute(cls.sql_template.select)
+ else:
+ query = cls.sql_template.select
+ if also_from is not None:
+ query += "," + also_from
+ query += " WHERE " + where
+ if cls.sql_debug:
+ rpki.log.debug("sql_fetch_where(%r, %r)" % (query, args))
+ gctx.sql.execute(query, args)
+ results = []
+ for row in gctx.sql.fetchall():
+ key = (cls, row[0])
+ if key in gctx.sql.cache:
+ results.append(gctx.sql.cache[key])
+ else:
+ results.append(cls.sql_init(gctx, row, key))
+ return results
+
+ @classmethod
+ def sql_init(cls, gctx, row, key):
+ """
+ Initialize one Python object from the result of a SQL query.
+ """
+ self = cls()
+ self.gctx = gctx
+ self.sql_decode(dict(zip(cls.sql_template.columns, row)))
+ gctx.sql.cache[key] = self
+ self.sql_in_db = True
+ self.sql_fetch_hook()
+ return self
+
+ def sql_mark_dirty(self):
+ """Mark this object as needing to be written back to SQL."""
+ self.gctx.sql.dirty.add(self)
+
+ def sql_mark_clean(self):
+ """Mark this object as not needing to be written back to SQL."""
+ self.gctx.sql.dirty.discard(self)
+
+ def sql_is_dirty(self):
+ """Query whether this object needs to be written back to SQL."""
+ return self in self.gctx.sql.dirty
+
+ def sql_mark_deleted(self):
+ """Mark this object as needing to be deleted in SQL."""
+ self.sql_deleted = True
+ self.sql_mark_dirty()
+
+ def sql_store(self):
+ """
+ Store this object to SQL.
+ """
+ args = self.sql_encode()
+ if not self.sql_in_db:
+ if self.sql_debug:
+ rpki.log.debug("sql_fetch_store(%r, %r)" % (self.sql_template.insert, args))
+ self.gctx.sql.execute(self.sql_template.insert, args)
+ setattr(self, self.sql_template.index, self.gctx.sql.lastrowid())
+ self.gctx.sql.cache[(self.__class__, self.gctx.sql.lastrowid())] = self
+ self.sql_insert_hook()
+ else:
+ if self.sql_debug:
+ rpki.log.debug("sql_fetch_store(%r, %r)" % (self.sql_template.update, args))
+ self.gctx.sql.execute(self.sql_template.update, args)
+ self.sql_update_hook()
+ key = (self.__class__, getattr(self, self.sql_template.index))
+ assert key in self.gctx.sql.cache and self.gctx.sql.cache[key] == self
+ self.sql_mark_clean()
+ self.sql_in_db = True
+
+ def sql_delete(self):
+ """
+ Delete this object from SQL.
+ """
+ if self.sql_in_db:
+ id = getattr(self, self.sql_template.index)
+ if self.sql_debug:
+ rpki.log.debug("sql_fetch_delete(%r, %r)" % (self.sql_template.delete, id))
+ self.sql_delete_hook()
+ self.gctx.sql.execute(self.sql_template.delete, id)
+ key = (self.__class__, id)
+ if self.gctx.sql.cache.get(key) == self:
+ del self.gctx.sql.cache[key]
+ self.sql_in_db = False
+ self.sql_mark_clean()
+
+ def sql_encode(self):
+ """
+ Convert object attributes into a dict for use with canned SQL
+ queries. This is a default version that assumes a one-to-one
+ mapping between column names in SQL and attribute names in Python.
+ If you need something fancier, override this.
+ """
+ d = dict((a, getattr(self, a, None)) for a in self.sql_template.columns)
+ for i in self.sql_template.map:
+ if d.get(i) is not None:
+ d[i] = self.sql_template.map[i].to_sql(d[i])
+ return d
+
+ def sql_decode(self, vals):
+ """
+ Initialize an object with values returned by self.sql_fetch().
+ This is a default version that assumes a one-to-one mapping
+ between column names in SQL and attribute names in Python. If you
+ need something fancier, override this.
+ """
+ for a in self.sql_template.columns:
+ if vals.get(a) is not None and a in self.sql_template.map:
+ setattr(self, a, self.sql_template.map[a].from_sql(vals[a]))
+ else:
+ setattr(self, a, vals[a])
+
+ def sql_fetch_hook(self):
+ """Customization hook."""
+ pass
+
+ def sql_insert_hook(self):
+ """Customization hook."""
+ pass
+
+ def sql_update_hook(self):
+ """Customization hook."""
+ self.sql_delete_hook()
+ self.sql_insert_hook()
+
+ def sql_delete_hook(self):
+ """Customization hook."""
+ pass
+
diff --git a/rpkid.without_tls/rpki/sundial.py b/rpkid.without_tls/rpki/sundial.py
new file mode 100644
index 00000000..eef69258
--- /dev/null
+++ b/rpkid.without_tls/rpki/sundial.py
@@ -0,0 +1,287 @@
+"""
+Unified RPKI date/time handling, based on the standard Python datetime module.
+
+Module name chosen to sidestep a nightmare of import-related errors
+that occur with the more obvious module names.
+
+List of arithmetic methods that require result casting was derived by
+inspection of the datetime module, to wit:
+
+ >>> import datetime
+ >>> for t in (datetime.datetime, datetime.timedelta):
+ ... for k in t.__dict__.keys():
+ ... if k.startswith("__"):
+ ... print "%s.%s()" % (t.__name__, k)
+
+$Id$
+
+Copyright (C) 2009--2010 Internet Systems Consortium ("ISC")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+
+Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+"""
+
+import datetime as pydatetime
+import re
+
+def now():
+ """Get current timestamp."""
+ return datetime.utcnow()
+
+class datetime(pydatetime.datetime):
+ """
+ RPKI extensions to standard datetime.datetime class. All work here
+ is in UTC, so we use naive datetime objects.
+ """
+
+ def totimestamp(self):
+ """
+ Convert to seconds from epoch (like time.time()). Conversion
+ method is a bit silly, but avoids time module timezone whackiness.
+ """
+ return int(self.strftime("%s"))
+
+ @classmethod
+ def fromUTCTime(cls, x):
+ """Convert from ASN.1 UTCTime."""
+ x = str(x)
+ return cls.fromGeneralizedTime(("19" if x[0] >= "5" else "20") + x)
+
+ def toUTCTime(self):
+ """Convert to ASN.1 UTCTime."""
+ return self.strftime("%y%m%d%H%M%SZ")
+
+ @classmethod
+ def fromGeneralizedTime(cls, x):
+ """Convert from ASN.1 GeneralizedTime."""
+ return cls.strptime(x, "%Y%m%d%H%M%SZ")
+
+ def toGeneralizedTime(self):
+ """Convert to ASN.1 GeneralizedTime."""
+ return self.strftime("%Y%m%d%H%M%SZ")
+
+ @classmethod
+ def fromASN1tuple(cls, x):
+ """
+ Convert from ASN.1 tuple representation.
+ """
+ assert isinstance(x, tuple) and len(x) == 2 and x[0] in ("utcTime", "generalTime")
+ if x[0] == "utcTime":
+ return cls.fromUTCTime(x[1])
+ else:
+ return cls.fromGeneralizedTime(x[1])
+
+ ## @var PKIX_threshhold
+ # Threshold specified in RFC 3280 for switchover from UTCTime to GeneralizedTime.
+
+ PKIX_threshhold = pydatetime.datetime(2050, 1, 1)
+
+ def toASN1tuple(self):
+ """
+ Convert to ASN.1 tuple representation.
+ """
+ if self < self.PKIX_threshhold:
+ return "utcTime", self.toUTCTime()
+ else:
+ return "generalTime", self.toGeneralizedTime()
+
+ @classmethod
+ def fromXMLtime(cls, x):
+ """
+ Convert from XML time representation.
+ """
+ if x is None:
+ return None
+ else:
+ return cls.strptime(x, "%Y-%m-%dT%H:%M:%SZ")
+
+ def toXMLtime(self):
+ """Convert to XML time representation."""
+ return self.strftime("%Y-%m-%dT%H:%M:%SZ")
+
+ def __str__(self):
+ return self.toXMLtime()
+
+ @classmethod
+ def fromdatetime(cls, x):
+ """
+ Convert a datetime.datetime object into this subclass. This is
+ whacky due to the weird constructors for datetime.
+ """
+ return cls.combine(x.date(), x.time())
+
+ @classmethod
+ def from_sql(cls, x):
+ """Convert from SQL storage format."""
+ return cls.fromdatetime(x)
+
+ def to_sql(self):
+ """
+ Convert to SQL storage format.
+
+ There's something whacky going on in the MySQLdb module, it throws
+ range errors when storing a derived type into a DATETIME column.
+ Investigate some day, but for now brute force this by copying the
+ relevant fields into a datetime.datetime for MySQLdb's
+ consumption.
+
+ """
+ return pydatetime.datetime(year = self.year, month = self.month, day = self.day,
+ hour = self.hour, minute = self.minute, second = self.second,
+ microsecond = 0, tzinfo = None)
+
+ def later(self, other):
+ """Return the later of two timestamps."""
+ return other if other > self else self
+
+ def earlier(self, other):
+ """Return the earlier of two timestamps."""
+ return other if other < self else self
+
+ def __add__(self, y): return _cast(pydatetime.datetime.__add__(self, y))
+ def __radd__(self, y): return _cast(pydatetime.datetime.__radd__(self, y))
+ def __rsub__(self, y): return _cast(pydatetime.datetime.__rsub__(self, y))
+ def __sub__(self, y): return _cast(pydatetime.datetime.__sub__(self, y))
+
+class timedelta(pydatetime.timedelta):
+ """
+ Timedelta with text parsing. This accepts two input formats:
+
+ - A simple integer, indicating a number of seconds.
+
+ - A string of the form "uY vW wD xH yM zS" where u, v, w, x, y, and z
+ are integers and Y, W, D, H, M, and S indicate years, weeks, days,
+ hours, minutes, and seconds. All of the fields are optional, but
+ at least one must be specified. Eg,"3D4H" means "three days plus
+ four hours".
+
+ There is no "months" format, because the definition of a month is too
+ fuzzy to be useful (what day is six months from August 30th?)
+
+ Similarly, the "years" conversion may produce surprising results, as
+ "one year" in conventional English does not refer to a fixed interval
+ but rather a fixed (and in some cases undefined) offset within the
+ Gregorian calendar (what day is one year from February 29th?) 1Y as
+ implemented by this code refers to a specific number of seconds.
+ If you mean 365 days or 52 weeks, say that instead.
+ """
+
+ ## @var regexp
+ # Hideously ugly regular expression to parse the complex text form.
+ # Tags are intended for use with re.MatchObject.groupdict() and map
+ # directly to the keywords expected by the timedelta constructor.
+
+ regexp = re.compile("\\s*".join(("^",
+ "(?:(?P<years>\\d+)Y)?",
+ "(?:(?P<weeks>\\d+)W)?",
+ "(?:(?P<days>\\d+)D)?",
+ "(?:(?P<hours>\\d+)H)?",
+ "(?:(?P<minutes>\\d+)M)?",
+ "(?:(?P<seconds>\\d+)S)?",
+ "$")),
+ re.I)
+
+ ## @var years_to_seconds
+ # Conversion factor from years to seconds (value furnished by the
+ # "units" program).
+
+ years_to_seconds = 31556926
+
+ @classmethod
+ def parse(cls, arg):
+ """
+ Parse text into a timedelta object.
+ """
+ if not isinstance(arg, str):
+ return cls(seconds = arg)
+ elif arg.isdigit():
+ return cls(seconds = int(arg))
+ else:
+ match = cls.regexp.match(arg)
+ if match:
+ #return cls(**dict((k, int(v)) for (k, v) in match.groupdict().items() if v is not None))
+ d = match.groupdict("0")
+ for k, v in d.iteritems():
+ d[k] = int(v)
+ d["days"] += d.pop("weeks") * 7
+ d["seconds"] += d.pop("years") * cls.years_to_seconds
+ return cls(**d)
+ else:
+ raise RuntimeError, "Couldn't parse timedelta %r" % (arg,)
+
+ def convert_to_seconds(self):
+ """Convert a timedelta interval to seconds."""
+ return self.days * 24 * 60 * 60 + self.seconds
+
+ @classmethod
+ def fromtimedelta(cls, x):
+ """Convert a datetime.timedelta object into this subclass."""
+ return cls(days = x.days, seconds = x.seconds, microseconds = x.microseconds)
+
+ def __abs__(self): return _cast(pydatetime.timedelta.__abs__(self))
+ def __add__(self, x): return _cast(pydatetime.timedelta.__add__(self, x))
+ def __div__(self, x): return _cast(pydatetime.timedelta.__div__(self, x))
+ def __floordiv__(self, x): return _cast(pydatetime.timedelta.__floordiv__(self, x))
+ def __mul__(self, x): return _cast(pydatetime.timedelta.__mul__(self, x))
+ def __neg__(self): return _cast(pydatetime.timedelta.__neg__(self))
+ def __pos__(self): return _cast(pydatetime.timedelta.__pos__(self))
+ def __radd__(self, x): return _cast(pydatetime.timedelta.__radd__(self, x))
+ def __rdiv__(self, x): return _cast(pydatetime.timedelta.__rdiv__(self, x))
+ def __rfloordiv__(self, x): return _cast(pydatetime.timedelta.__rfloordiv__(self, x))
+ def __rmul__(self, x): return _cast(pydatetime.timedelta.__rmul__(self, x))
+ def __rsub__(self, x): return _cast(pydatetime.timedelta.__rsub__(self, x))
+ def __sub__(self, x): return _cast(pydatetime.timedelta.__sub__(self, x))
+
+def _cast(x):
+ """
+ Cast result of arithmetic operations back into correct subtype.
+ """
+ if isinstance(x, pydatetime.datetime):
+ return datetime.fromdatetime(x)
+ if isinstance(x, pydatetime.timedelta):
+ return timedelta.fromtimedelta(x)
+ return x
+
+if __name__ == "__main__":
+
+ def test(t):
+ print
+ print "str: ", t
+ print "repr: ", repr(t)
+ print "seconds since epoch:", t.strftime("%s")
+ print "UTCTime: ", t.toUTCTime()
+ print "GeneralizedTime: ", t.toGeneralizedTime()
+ print "ASN1tuple: ", t.toASN1tuple()
+ print "XMLtime: ", t.toXMLtime()
+ print
+
+ print
+ print "Testing time conversion routines"
+ test(now())
+ test(now() + timedelta(days = 30))
+ test(now() + timedelta.parse("3d5s"))
+ test(now() + timedelta.parse(" 3d 5s "))
+ test(now() + timedelta.parse("1y3d5h"))
diff --git a/rpkid.without_tls/rpki/up_down.py b/rpkid.without_tls/rpki/up_down.py
new file mode 100644
index 00000000..4320b173
--- /dev/null
+++ b/rpkid.without_tls/rpki/up_down.py
@@ -0,0 +1,689 @@
+"""
+RPKI "up-down" protocol.
+
+$Id$
+
+Copyright (C) 2009--2010 Internet Systems Consortium ("ISC")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+
+Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+"""
+
+import base64, lxml.etree
+import rpki.resource_set, rpki.x509, rpki.exceptions, rpki.log
+import rpki.xml_utils, rpki.relaxng
+
+xmlns = "http://www.apnic.net/specs/rescerts/up-down/"
+
+nsmap = { None : xmlns }
+
+class base_elt(object):
+ """
+ Generic PDU object.
+
+ Virtual class, just provides some default methods.
+ """
+
+ def startElement(self, stack, name, attrs):
+ """
+ Ignore startElement() if there's no specific handler.
+
+ Some elements have no attributes and we only care about their
+ text content.
+ """
+ pass
+
+ def endElement(self, stack, name, text):
+ """
+ Ignore endElement() if there's no specific handler.
+
+ If we don't need to do anything else, just pop the stack.
+ """
+ stack.pop()
+
+ def make_elt(self, name, *attrs):
+ """
+ Construct a element, copying over a set of attributes.
+ """
+ elt = lxml.etree.Element("{%s}%s" % (xmlns, name), nsmap=nsmap)
+ for key in attrs:
+ val = getattr(self, key, None)
+ if val is not None:
+ elt.set(key, str(val))
+ return elt
+
+ def make_b64elt(self, elt, name, value):
+ """
+ Construct a sub-element with Base64 text content.
+ """
+ if value is not None and not value.empty():
+ lxml.etree.SubElement(elt, "{%s}%s" % (xmlns, name), nsmap=nsmap).text = value.get_Base64()
+
+ def serve_pdu(self, q_msg, r_msg, child, callback, errback):
+ """Default PDU handler to catch unexpected types."""
+ raise rpki.exceptions.BadQuery, "Unexpected query type %s" % q_msg.type
+
+ def check_response(self):
+ """Placeholder for response checking."""
+ pass
+
+class multi_uri(list):
+ """
+ Container for a set of URIs.
+ """
+
+ def __init__(self, ini):
+ """
+ Initialize a set of URIs, which includes basic some syntax checking.
+ """
+ list.__init__(self)
+ if isinstance(ini, (list, tuple)):
+ self[:] = ini
+ elif isinstance(ini, str):
+ self[:] = ini.split(",")
+ for s in self:
+ if s.strip() != s or "://" not in s:
+ raise rpki.exceptions.BadURISyntax, "Bad URI \"%s\"" % s
+ else:
+ raise TypeError
+
+ def __str__(self):
+ """Convert a multi_uri back to a string representation."""
+ return ",".join(self)
+
+ def rsync(self):
+ """
+ Find first rsync://... URI in self.
+ """
+ for s in self:
+ if s.startswith("rsync://"):
+ return s
+ return None
+
+class certificate_elt(base_elt):
+ """
+ Up-Down protocol representation of an issued certificate.
+ """
+
+ def startElement(self, stack, name, attrs):
+ """
+ Handle attributes of <certificate/> element.
+ """
+ assert name == "certificate", "Unexpected name %s, stack %s" % (name, stack)
+ self.cert_url = multi_uri(attrs["cert_url"])
+ self.req_resource_set_as = rpki.resource_set.resource_set_as(attrs.get("req_resource_set_as"))
+ self.req_resource_set_ipv4 = rpki.resource_set.resource_set_ipv4(attrs.get("req_resource_set_ipv4"))
+ self.req_resource_set_ipv6 = rpki.resource_set.resource_set_ipv6(attrs.get("req_resource_set_ipv6"))
+
+ def endElement(self, stack, name, text):
+ """
+ Handle text content of a <certificate/> element.
+ """
+ assert name == "certificate", "Unexpected name %s, stack %s" % (name, stack)
+ self.cert = rpki.x509.X509(Base64 = text)
+ stack.pop()
+
+ def toXML(self):
+ """
+ Generate a <certificate/> element.
+ """
+ elt = self.make_elt("certificate", "cert_url",
+ "req_resource_set_as", "req_resource_set_ipv4", "req_resource_set_ipv6")
+ elt.text = self.cert.get_Base64()
+ return elt
+
+class class_elt(base_elt):
+ """
+ Up-Down protocol representation of a resource class.
+ """
+
+ issuer = None
+
+ def __init__(self):
+ """Initialize class_elt."""
+ base_elt.__init__(self)
+ self.certs = []
+
+ def startElement(self, stack, name, attrs):
+ """
+ Handle <class/> elements and their children.
+ """
+ if name == "certificate":
+ cert = certificate_elt()
+ self.certs.append(cert)
+ stack.append(cert)
+ cert.startElement(stack, name, attrs)
+ elif name != "issuer":
+ assert name == "class", "Unexpected name %s, stack %s" % (name, stack)
+ self.class_name = attrs["class_name"]
+ self.cert_url = multi_uri(attrs["cert_url"])
+ self.suggested_sia_head = attrs.get("suggested_sia_head")
+ self.resource_set_as = rpki.resource_set.resource_set_as(attrs["resource_set_as"])
+ self.resource_set_ipv4 = rpki.resource_set.resource_set_ipv4(attrs["resource_set_ipv4"])
+ self.resource_set_ipv6 = rpki.resource_set.resource_set_ipv6(attrs["resource_set_ipv6"])
+ self.resource_set_notafter = rpki.sundial.datetime.fromXMLtime(attrs.get("resource_set_notafter"))
+
+ def endElement(self, stack, name, text):
+ """
+ Handle <class/> elements and their children.
+ """
+ if name == "issuer":
+ self.issuer = rpki.x509.X509(Base64 = text)
+ else:
+ assert name == "class", "Unexpected name %s, stack %s" % (name, stack)
+ stack.pop()
+
+ def toXML(self):
+ """
+ Generate a <class/> element.
+ """
+ elt = self.make_elt("class", "class_name", "cert_url", "resource_set_as",
+ "resource_set_ipv4", "resource_set_ipv6",
+ "resource_set_notafter", "suggested_sia_head")
+ elt.extend([i.toXML() for i in self.certs])
+ self.make_b64elt(elt, "issuer", self.issuer)
+ return elt
+
+ def to_resource_bag(self):
+ """
+ Build a resource_bag from from this <class/> element.
+ """
+ return rpki.resource_set.resource_bag(self.resource_set_as,
+ self.resource_set_ipv4,
+ self.resource_set_ipv6,
+ self.resource_set_notafter)
+
+ def from_resource_bag(self, bag):
+ """
+ Set resources of this class element from a resource_bag.
+ """
+ self.resource_set_as = bag.asn
+ self.resource_set_ipv4 = bag.v4
+ self.resource_set_ipv6 = bag.v6
+ self.resource_set_notafter = bag.valid_until
+
+class list_pdu(base_elt):
+ """
+ Up-Down protocol "list" PDU.
+ """
+
+ def toXML(self):
+ """Generate (empty) payload of "list" PDU."""
+ return []
+
+ def serve_pdu(self, q_msg, r_msg, child, callback, errback):
+ """
+ Serve one "list" PDU.
+ """
+
+ def handle(irdb_resources):
+
+ r_msg.payload = list_response_pdu()
+
+ for parent in child.parents():
+ for ca in parent.cas():
+ ca_detail = ca.fetch_active()
+ if not ca_detail:
+ continue
+ resources = ca_detail.latest_ca_cert.get_3779resources().intersection(irdb_resources)
+ if resources.empty():
+ continue
+ rc = class_elt()
+ rc.class_name = str(ca.ca_id)
+ rc.cert_url = multi_uri(ca_detail.ca_cert_uri)
+ rc.from_resource_bag(resources)
+ for child_cert in child.child_certs(ca_detail = ca_detail):
+ c = certificate_elt()
+ c.cert_url = multi_uri(child_cert.uri(ca))
+ c.cert = child_cert.cert
+ rc.certs.append(c)
+ rc.issuer = ca_detail.latest_ca_cert
+ r_msg.payload.classes.append(rc)
+ callback()
+
+ self.gctx.irdb_query_child_resources(child.self().self_handle, child.child_handle, handle, errback)
+
+ @classmethod
+ def query(cls, parent, cb, eb):
+ """
+ Send a "list" query to parent.
+ """
+ try:
+ rpki.log.info('Sending "list" request to parent %s' % parent.parent_handle)
+ parent.query_up_down(cls(), cb, eb)
+ except (rpki.async.ExitNow, SystemExit):
+ raise
+ except Exception, e:
+ eb(e)
+
+class class_response_syntax(base_elt):
+ """
+ Syntax for Up-Down protocol "list_response" and "issue_response" PDUs.
+ """
+
+ def __init__(self):
+ """
+ Initialize class_response_syntax.
+ """
+ base_elt.__init__(self)
+ self.classes = []
+
+ def startElement(self, stack, name, attrs):
+ """
+ Handle "list_response" and "issue_response" PDUs.
+ """
+ assert name == "class", "Unexpected name %s, stack %s" % (name, stack)
+ c = class_elt()
+ self.classes.append(c)
+ stack.append(c)
+ c.startElement(stack, name, attrs)
+
+ def toXML(self):
+ """Generate payload of "list_response" and "issue_response" PDUs."""
+ return [c.toXML() for c in self.classes]
+
+class list_response_pdu(class_response_syntax):
+ """
+ Up-Down protocol "list_response" PDU.
+ """
+ pass
+
+class issue_pdu(base_elt):
+ """
+ Up-Down protocol "issue" PDU.
+ """
+
+ def startElement(self, stack, name, attrs):
+ """
+ Handle "issue" PDU.
+ """
+ assert name == "request", "Unexpected name %s, stack %s" % (name, stack)
+ self.class_name = attrs["class_name"]
+ self.req_resource_set_as = rpki.resource_set.resource_set_as(attrs.get("req_resource_set_as"))
+ self.req_resource_set_ipv4 = rpki.resource_set.resource_set_ipv4(attrs.get("req_resource_set_ipv4"))
+ self.req_resource_set_ipv6 = rpki.resource_set.resource_set_ipv6(attrs.get("req_resource_set_ipv6"))
+
+ def endElement(self, stack, name, text):
+ """
+ Handle "issue" PDU.
+ """
+ assert name == "request", "Unexpected name %s, stack %s" % (name, stack)
+ self.pkcs10 = rpki.x509.PKCS10(Base64 = text)
+ stack.pop()
+
+ def toXML(self):
+ """
+ Generate payload of "issue" PDU.
+ """
+ elt = self.make_elt("request", "class_name", "req_resource_set_as",
+ "req_resource_set_ipv4", "req_resource_set_ipv6")
+ elt.text = self.pkcs10.get_Base64()
+ return [elt]
+
+ def serve_pdu(self, q_msg, r_msg, child, callback, errback):
+ """
+ Serve one issue request PDU.
+ """
+
+ # Subsetting not yet implemented, this is the one place where we
+ # have to handle it, by reporting that we're lame.
+
+ if self.req_resource_set_as or \
+ self.req_resource_set_ipv4 or \
+ self.req_resource_set_ipv6:
+ raise rpki.exceptions.NotImplementedYet, "req_* attributes not implemented yet, sorry"
+
+ # Check the request
+ self.pkcs10.check_valid_rpki()
+ ca = child.ca_from_class_name(self.class_name)
+ ca_detail = ca.fetch_active()
+ if ca_detail is None:
+ raise rpki.exceptions.NoActiveCA, "No active CA for class %r" % self.class_name
+
+ # Check current cert, if any
+
+ def got_resources(irdb_resources):
+
+ resources = irdb_resources.intersection(ca_detail.latest_ca_cert.get_3779resources())
+ req_key = self.pkcs10.getPublicKey()
+ req_sia = self.pkcs10.get_SIA()
+ child_cert = child.child_certs(ca_detail = ca_detail, ski = req_key.get_SKI(), unique = True)
+
+ # Generate new cert or regenerate old one if necessary
+
+ publisher = rpki.rpki_engine.publication_queue()
+
+ if child_cert is None:
+ child_cert = ca_detail.issue(
+ ca = ca,
+ child = child,
+ subject_key = req_key,
+ sia = req_sia,
+ resources = resources,
+ publisher = publisher)
+ else:
+ child_cert = child_cert.reissue(
+ ca_detail = ca_detail,
+ sia = req_sia,
+ resources = resources,
+ publisher = publisher)
+
+ def done():
+ c = certificate_elt()
+ c.cert_url = multi_uri(child_cert.uri(ca))
+ c.cert = child_cert.cert
+ rc = class_elt()
+ rc.class_name = self.class_name
+ rc.cert_url = multi_uri(ca_detail.ca_cert_uri)
+ rc.from_resource_bag(resources)
+ rc.certs.append(c)
+ rc.issuer = ca_detail.latest_ca_cert
+ r_msg.payload = issue_response_pdu()
+ r_msg.payload.classes.append(rc)
+ callback()
+
+ self.gctx.sql.sweep()
+ assert child_cert and child_cert.sql_in_db
+ publisher.call_pubd(done, errback)
+
+ self.gctx.irdb_query_child_resources(child.self().self_handle, child.child_handle, got_resources, errback)
+
+ @classmethod
+ def query(cls, parent, ca, ca_detail, callback, errback):
+ """
+ Send an "issue" request to parent associated with ca.
+ """
+ assert ca_detail is not None and ca_detail.state in ("pending", "active")
+ sia = ((rpki.oids.name2oid["id-ad-caRepository"], ("uri", ca.sia_uri)),
+ (rpki.oids.name2oid["id-ad-rpkiManifest"], ("uri", ca_detail.manifest_uri(ca))))
+ self = cls()
+ self.class_name = ca.parent_resource_class
+ self.pkcs10 = rpki.x509.PKCS10.create_ca(ca_detail.private_key_id, sia)
+ rpki.log.info('Sending "issue" request to parent %s' % parent.parent_handle)
+ parent.query_up_down(self, callback, errback)
+
+class issue_response_pdu(class_response_syntax):
+ """
+ Up-Down protocol "issue_response" PDU.
+ """
+
+ def check_response(self):
+ """
+ Check whether this looks like a reasonable issue_response PDU.
+ XML schema should be tighter for this response.
+ """
+ if len(self.classes) != 1 or len(self.classes[0].certs) != 1:
+ raise rpki.exceptions.BadIssueResponse
+
+class revoke_syntax(base_elt):
+ """
+ Syntax for Up-Down protocol "revoke" and "revoke_response" PDUs.
+ """
+
+ def startElement(self, stack, name, attrs):
+ """Handle "revoke" PDU."""
+ self.class_name = attrs["class_name"]
+ self.ski = attrs["ski"]
+
+ def toXML(self):
+ """Generate payload of "revoke" PDU."""
+ return [self.make_elt("key", "class_name", "ski")]
+
+class revoke_pdu(revoke_syntax):
+ """
+ Up-Down protocol "revoke" PDU.
+ """
+
+ def get_SKI(self):
+ """Convert g(SKI) encoding from PDU back to raw SKI."""
+ return base64.urlsafe_b64decode(self.ski + "=")
+
+ def serve_pdu(self, q_msg, r_msg, child, cb, eb):
+ """
+ Serve one revoke request PDU.
+ """
+
+ def done():
+ r_msg.payload = revoke_response_pdu()
+ r_msg.payload.class_name = self.class_name
+ r_msg.payload.ski = self.ski
+ cb()
+
+ ca = child.ca_from_class_name(self.class_name)
+ publisher = rpki.rpki_engine.publication_queue()
+ for ca_detail in ca.ca_details():
+ for child_cert in child.child_certs(ca_detail = ca_detail, ski = self.get_SKI()):
+ child_cert.revoke(publisher = publisher)
+ self.gctx.sql.sweep()
+ publisher.call_pubd(done, eb)
+
+ @classmethod
+ def query(cls, ca, gski, cb, eb):
+ """
+ Send a "revoke" request for certificate(s) named by gski to parent associated with ca.
+ """
+ parent = ca.parent()
+ self = cls()
+ self.class_name = ca.parent_resource_class
+ self.ski = gski
+ rpki.log.info('Sending "revoke" request for SKI %s to parent %s' % (gski, parent.parent_handle))
+ parent.query_up_down(self, cb, eb)
+
+class revoke_response_pdu(revoke_syntax):
+ """
+ Up-Down protocol "revoke_response" PDU.
+ """
+
+ pass
+
+class error_response_pdu(base_elt):
+ """
+ Up-Down protocol "error_response" PDU.
+ """
+
+ codes = {
+ 1101 : "Already processing request",
+ 1102 : "Version number error",
+ 1103 : "Unrecognised request type",
+ 1201 : "Request - no such resource class",
+ 1202 : "Request - no resources allocated in resource class",
+ 1203 : "Request - badly formed certificate request",
+ 1301 : "Revoke - no such resource class",
+ 1302 : "Revoke - no such key",
+ 2001 : "Internal Server Error - Request not performed" }
+
+ exceptions = {
+ rpki.exceptions.NoActiveCA : 1202 }
+
+ def __init__(self, exception = None):
+ """
+ Initialize an error_response PDU from an exception object.
+ """
+ base_elt.__init__(self)
+ if exception is not None:
+ self.status = self.exceptions.get(type(exception), 2001)
+ self.description = str(exception)
+
+ def endElement(self, stack, name, text):
+ """
+ Handle "error_response" PDU.
+ """
+ if name == "status":
+ code = int(text)
+ if code not in self.codes:
+ raise rpki.exceptions.BadStatusCode, "%s is not a known status code" % code
+ self.status = code
+ elif name == "description":
+ self.description = text
+ else:
+ assert name == "message", "Unexpected name %s, stack %s" % (name, stack)
+ stack.pop()
+ stack[-1].endElement(stack, name, text)
+
+ def toXML(self):
+ """
+ Generate payload of "error_response" PDU.
+ """
+ assert self.status in self.codes
+ elt = self.make_elt("status")
+ elt.text = str(self.status)
+ payload = [elt]
+ if self.description:
+ elt = self.make_elt("description")
+ elt.text = str(self.description)
+ elt.set("{http://www.w3.org/XML/1998/namespace}lang", "en-US")
+ payload.append(elt)
+ return payload
+
+ def check_response(self):
+ """
+ Handle an error response. For now, just raise an exception,
+ perhaps figure out something more clever to do later.
+ """
+ raise rpki.exceptions.UpstreamError, self.codes[self.status]
+
+class message_pdu(base_elt):
+ """
+ Up-Down protocol message wrapper PDU.
+ """
+
+ version = 1
+
+ name2type = {
+ "list" : list_pdu,
+ "list_response" : list_response_pdu,
+ "issue" : issue_pdu,
+ "issue_response" : issue_response_pdu,
+ "revoke" : revoke_pdu,
+ "revoke_response" : revoke_response_pdu,
+ "error_response" : error_response_pdu }
+
+ type2name = dict((v, k) for k, v in name2type.items())
+
+ def toXML(self):
+ """
+ Generate payload of message PDU.
+ """
+ elt = self.make_elt("message", "version", "sender", "recipient", "type")
+ elt.extend(self.payload.toXML())
+ return elt
+
+ def startElement(self, stack, name, attrs):
+ """
+ Handle message PDU.
+
+ Payload of the <message/> element varies depending on the "type"
+ attribute, so after some basic checks we have to instantiate the
+ right class object to handle whatever kind of PDU this is.
+ """
+ assert name == "message", "Unexpected name %s, stack %s" % (name, stack)
+ assert self.version == int(attrs["version"])
+ self.sender = attrs["sender"]
+ self.recipient = attrs["recipient"]
+ self.type = attrs["type"]
+ self.payload = self.name2type[attrs["type"]]()
+ stack.append(self.payload)
+
+ def __str__(self):
+ """Convert a message PDU to a string."""
+ lxml.etree.tostring(self.toXML(), pretty_print = True, encoding = "UTF-8")
+
+ def serve_top_level(self, child, callback):
+ """
+ Serve one message request PDU.
+ """
+
+ r_msg = message_pdu()
+ r_msg.sender = self.recipient
+ r_msg.recipient = self.sender
+
+ def done():
+ r_msg.type = self.type2name[type(r_msg.payload)]
+ callback(r_msg)
+
+ def lose(e):
+ rpki.log.traceback()
+ callback(self.serve_error(e))
+
+ try:
+ self.log_query(child)
+ self.payload.serve_pdu(self, r_msg, child, done, lose)
+ except (rpki.async.ExitNow, SystemExit):
+ raise
+ except Exception, e:
+ lose(e)
+
+ def log_query(self, child):
+ """
+ Log query we're handling. Separate method so rootd can override.
+ """
+ rpki.log.info("Serving %s query from child %s [sender %s, recipient %s]" % (self.type, child.child_handle, self.sender, self.recipient))
+
+ def serve_error(self, exception):
+ """
+ Generate an error_response message PDU.
+ """
+ r_msg = message_pdu()
+ r_msg.sender = self.recipient
+ r_msg.recipient = self.sender
+ r_msg.payload = error_response_pdu(exception)
+ r_msg.type = self.type2name[type(r_msg.payload)]
+ return r_msg
+
+ @classmethod
+ def make_query(cls, payload, sender, recipient):
+ """
+ Construct one message PDU.
+ """
+ assert not cls.type2name[type(payload)].endswith("_response")
+ if sender is None:
+ sender = "tweedledee"
+ if recipient is None:
+ recipient = "tweedledum"
+ self = cls()
+ self.sender = sender
+ self.recipient = recipient
+ self.payload = payload
+ self.type = self.type2name[type(payload)]
+ return self
+
+class sax_handler(rpki.xml_utils.sax_handler):
+ """
+ SAX handler for Up-Down protocol.
+ """
+
+ pdu = message_pdu
+ name = "message"
+ version = "1"
+
+class cms_msg(rpki.x509.XML_CMS_object):
+ """
+ Class to hold a CMS-signed up-down PDU.
+ """
+
+ encoding = "UTF-8"
+ schema = rpki.relaxng.up_down
+ saxify = sax_handler.saxify
diff --git a/rpkid.without_tls/rpki/x509.py b/rpkid.without_tls/rpki/x509.py
new file mode 100644
index 00000000..d013d247
--- /dev/null
+++ b/rpkid.without_tls/rpki/x509.py
@@ -0,0 +1,1242 @@
+"""
+One X.509 implementation to rule them all...
+
+...and in the darkness hide the twisty maze of partially overlapping
+X.509 support packages in Python.
+
+There are several existing packages, none of which do quite what I
+need, due to age, lack of documentation, specialization, or lack of
+foresight on somebody's part (perhaps mine). This module attempts to
+bring together the functionality I need in a way that hides at least
+some of the nasty details. This involves a lot of format conversion.
+
+$Id$
+
+
+Copyright (C) 2009--2010 Internet Systems Consortium ("ISC")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+
+
+Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+"""
+
+import POW, POW.pkix, base64, lxml.etree, os, subprocess, sys
+import email.mime.application, email.utils, mailbox, time
+import rpki.exceptions, rpki.resource_set, rpki.oids, rpki.sundial
+import rpki.manifest, rpki.roa, rpki.log, rpki.async
+
+def base64_with_linebreaks(der):
+ """
+ Encode DER (really, anything) as Base64 text, with linebreaks to
+ keep the result (sort of) readable.
+ """
+ b = base64.b64encode(der)
+ n = len(b)
+ return "\n" + "\n".join(b[i : min(i + 64, n)] for i in xrange(0, n, 64)) + "\n"
+
+def calculate_SKI(public_key_der):
+ """
+ Calculate the SKI value given the DER representation of a public
+ key, which requires first peeling the ASN.1 wrapper off the key.
+ """
+ k = POW.pkix.SubjectPublicKeyInfo()
+ k.fromString(public_key_der)
+ d = POW.Digest(POW.SHA1_DIGEST)
+ d.update(k.subjectPublicKey.get())
+ return d.digest()
+
+class PEM_converter(object):
+ """
+ Convert between DER and PEM encodings for various kinds of ASN.1 data.
+ """
+
+ def __init__(self, kind): # "CERTIFICATE", "RSA PRIVATE KEY", ...
+ """
+ Initialize PEM_converter.
+ """
+ self.b = "-----BEGIN %s-----" % kind
+ self.e = "-----END %s-----" % kind
+
+ def looks_like_PEM(self, text):
+ """
+ Guess whether text looks like a PEM encoding.
+ """
+ b = text.find(self.b)
+ return b >= 0 and text.find(self.e) > b + len(self.b)
+
+ def to_DER(self, pem):
+ """
+ Convert from PEM to DER.
+ """
+ lines = [line.strip() for line in pem.splitlines(0)]
+ while lines and lines.pop(0) != self.b:
+ pass
+ while lines and lines.pop(-1) != self.e:
+ pass
+ if not lines:
+ raise rpki.exceptions.EmptyPEM, "Could not find PEM in:\n%s" % pem
+ return base64.b64decode("".join(lines))
+
+ def to_PEM(self, der):
+ """
+ Convert from DER to PEM.
+ """
+ return self.b + base64_with_linebreaks(der) + self.e + "\n"
+
+def _find_xia_uri(extension, name):
+ """
+ Find a rsync URI in an SIA or AIA extension.
+ Returns the URI if found, otherwise None.
+ """
+ oid = rpki.oids.name2oid[name]
+
+ for method, location in extension:
+ if method == oid and location[0] == "uri" and location[1].startswith("rsync://"):
+ return location[1]
+ return None
+
+class DER_object(object):
+ """
+ Virtual class to hold a generic DER object.
+ """
+
+ ## Formats supported in this object
+ formats = ("DER",)
+
+ ## PEM converter for this object
+ pem_converter = None
+
+ ## Other attributes that self.clear() should whack
+ other_clear = ()
+
+ ## @var DER
+ ## DER value of this object
+
+ def empty(self):
+ """
+ Test whether this object is empty.
+ """
+ return all(getattr(self, a, None) is None for a in self.formats)
+
+ def clear(self):
+ """
+ Make this object empty.
+ """
+ for a in self.formats + self.other_clear:
+ setattr(self, a, None)
+ self.filename = None
+ self.timestamp = None
+
+ def __init__(self, **kw):
+ """
+ Initialize a DER_object.
+ """
+ self.clear()
+ if len(kw):
+ self.set(**kw)
+
+ def set(self, **kw):
+ """
+ Set this object by setting one of its known formats.
+
+ This method only allows one to set one format at a time.
+ Subsequent calls will clear the object first. The point of all
+ this is to let the object's internal converters handle mustering
+ the object into whatever format you need at the moment.
+ """
+
+ if len(kw) == 1:
+ name = kw.keys()[0]
+ if name in self.formats:
+ self.clear()
+ setattr(self, name, kw[name])
+ return
+ if name == "PEM":
+ self.clear()
+ self.DER = self.pem_converter.to_DER(kw[name])
+ return
+ if name == "Base64":
+ self.clear()
+ self.DER = base64.b64decode(kw[name])
+ return
+ if name == "Auto_update":
+ self.filename = kw[name]
+ self.check_auto_update()
+ return
+ if name in ("PEM_file", "DER_file", "Auto_file"):
+ f = open(kw[name], "rb")
+ value = f.read()
+ f.close()
+ if name == "PEM_file" or (name == "Auto_file" and self.pem_converter.looks_like_PEM(value)):
+ value = self.pem_converter.to_DER(value)
+ self.clear()
+ self.DER = value
+ return
+ raise rpki.exceptions.DERObjectConversionError, "Can't honor conversion request %r" % (kw,)
+
+ def check_auto_update(self):
+ """
+ Check for updates to a DER object that auto-updates from a file.
+ """
+ if self.filename is None:
+ return
+ filename = self.filename
+ timestamp = os.stat(self.filename).st_mtime
+ if self.timestamp is None or self.timestamp < timestamp:
+ rpki.log.debug("Updating %s, timestamp %s" % (filename, rpki.sundial.datetime.fromtimestamp(timestamp)))
+ f = open(filename, "rb")
+ value = f.read()
+ f.close()
+ if self.pem_converter.looks_like_PEM(value):
+ value = self.pem_converter.to_DER(value)
+ self.clear()
+ self.DER = value
+ self.filename = filename
+ self.timestamp = timestamp
+
+ def check(self):
+ """
+ Perform basic checks on a DER object.
+ """
+ assert not self.empty()
+ self.check_auto_update()
+
+ def get_DER(self):
+ """
+ Get the DER value of this object.
+
+ Subclasses will almost certainly override this method.
+ """
+ self.check()
+ if self.DER:
+ return self.DER
+ raise rpki.exceptions.DERObjectConversionError, "No conversion path to DER available"
+
+ def get_Base64(self):
+ """Get the Base64 encoding of the DER value of this object."""
+ return base64_with_linebreaks(self.get_DER())
+
+ def get_PEM(self):
+ """Get the PEM representation of this object."""
+ return self.pem_converter.to_PEM(self.get_DER())
+
+ def __cmp__(self, other):
+ """
+ Compare two DER-encoded objects.
+ """
+ if self is None and other is None:
+ return 0
+ elif self is None:
+ return -1
+ elif other is None:
+ return 1
+ else:
+ return cmp(self.get_DER(), other.get_DER())
+
+ def hSKI(self):
+ """
+ Return hexadecimal string representation of SKI for this object.
+ Only work for subclasses that implement get_SKI().
+ """
+ ski = self.get_SKI()
+ return ":".join(("%02X" % ord(i) for i in ski)) if ski else ""
+
+ def gSKI(self):
+ """
+ Calculate g(SKI) for this object. Only work for subclasses
+ that implement get_SKI().
+ """
+ return base64.urlsafe_b64encode(self.get_SKI()).rstrip("=")
+
+ def hAKI(self):
+ """
+ Return hexadecimal string representation of AKI for this
+ object. Only work for subclasses that implement get_AKI().
+ """
+ aki = self.get_AKI()
+ return ":".join(("%02X" % ord(i) for i in aki)) if aki else ""
+
+ def gAKI(self):
+ """
+ Calculate g(AKI) for this object. Only work for subclasses
+ that implement get_AKI().
+ """
+ return base64.urlsafe_b64encode(self.get_AKI()).rstrip("=")
+
+ def get_AKI(self):
+ """
+ Get the AKI extension from this object. Only works for subclasses
+ that support getExtension().
+ """
+ aki = (self.get_POWpkix().getExtension(rpki.oids.name2oid["authorityKeyIdentifier"]) or ((), 0, None))[2]
+ return aki[0] if isinstance(aki, tuple) else aki
+
+ def get_SKI(self):
+ """
+ Get the SKI extension from this object. Only works for subclasses
+ that support getExtension().
+ """
+ return (self.get_POWpkix().getExtension(rpki.oids.name2oid["subjectKeyIdentifier"]) or ((), 0, None))[2]
+
+ def get_SIA(self):
+ """
+ Get the SIA extension from this object. Only works for subclasses
+ that support getExtension().
+ """
+ return (self.get_POWpkix().getExtension(rpki.oids.name2oid["subjectInfoAccess"]) or ((), 0, None))[2]
+
+ def get_sia_directory_uri(self):
+ """
+ Get SIA directory (id-ad-caRepository) URI from this object.
+ Only works for subclasses that support getExtension().
+ """
+ return _find_xia_uri(self.get_SIA(), "id-ad-caRepository")
+
+ def get_sia_manifest_uri(self):
+ """
+ Get SIA manifest (id-ad-rpkiManifest) URI from this object.
+ Only works for subclasses that support getExtension().
+ """
+ return _find_xia_uri(self.get_SIA(), "id-ad-rpkiManifest")
+
+ def get_AIA(self):
+ """
+ Get the SIA extension from this object. Only works for subclasses
+ that support getExtension().
+ """
+ return (self.get_POWpkix().getExtension(rpki.oids.name2oid["authorityInfoAccess"]) or ((), 0, None))[2]
+
+ def get_aia_uri(self):
+ """
+ Get AIA (id-ad-caIssuers) URI from this object.
+ Only works for subclasses that support getExtension().
+ """
+ return _find_xia_uri(self.get_AIA(), "id-ad-caIssuers")
+
+ def get_basicConstraints(self):
+ """
+ Get the basicConstraints extension from this object. Only works
+ for subclasses that support getExtension().
+ """
+ return (self.get_POWpkix().getExtension(rpki.oids.name2oid["basicConstraints"]) or ((), 0, None))[2]
+
+ def is_CA(self):
+ """
+ Return True if and only if object has the basicConstraints
+ extension and its cA value is true.
+ """
+ basicConstraints = self.get_basicConstraints()
+ return basicConstraints and basicConstraints[0] != 0
+
+ def get_3779resources(self):
+ """
+ Get RFC 3779 resources as rpki.resource_set objects. Only works
+ for subclasses that support getExtensions().
+ """
+ resources = rpki.resource_set.resource_bag.from_rfc3779_tuples(self.get_POWpkix().getExtensions())
+ try:
+ resources.valid_until = self.getNotAfter()
+ except AttributeError:
+ pass
+ return resources
+
+ @classmethod
+ def from_sql(cls, x):
+ """Convert from SQL storage format."""
+ return cls(DER = x)
+
+ def to_sql(self):
+ """Convert to SQL storage format."""
+ return self.get_DER()
+
+ def dumpasn1(self):
+ """
+ Pretty print an ASN.1 DER object using cryptlib dumpasn1 tool.
+ Use a temporary file rather than popen4() because dumpasn1 uses
+ seek() when decoding ASN.1 content nested in OCTET STRING values.
+ """
+
+ ret = None
+ fn = "dumpasn1.%d.tmp" % os.getpid()
+ try:
+ f = open(fn, "wb")
+ f.write(self.get_DER())
+ f.close()
+ p = subprocess.Popen(("dumpasn1", "-a", fn), stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
+ ret = "\n".join(x for x in p.communicate()[0].splitlines() if x.startswith(" "))
+ finally:
+ os.unlink(fn)
+ return ret
+
+class X509(DER_object):
+ """
+ X.509 certificates.
+
+ This class is designed to hold all the different representations of
+ X.509 certs we're using and convert between them. X.509 support in
+ Python a nasty maze of half-cooked stuff (except perhaps for
+ cryptlib, which is just different). Users of this module should not
+ have to care about this implementation nightmare.
+ """
+
+ formats = ("DER", "POW", "POWpkix")
+ pem_converter = PEM_converter("CERTIFICATE")
+
+ def get_DER(self):
+ """
+ Get the DER value of this certificate.
+ """
+ self.check()
+ if self.DER:
+ return self.DER
+ if self.POW:
+ self.DER = self.POW.derWrite()
+ return self.get_DER()
+ if self.POWpkix:
+ self.DER = self.POWpkix.toString()
+ return self.get_DER()
+ raise rpki.exceptions.DERObjectConversionError, "No conversion path to DER available"
+
+ def get_POW(self):
+ """
+ Get the POW value of this certificate.
+ """
+ self.check()
+ if not self.POW:
+ self.POW = POW.derRead(POW.X509_CERTIFICATE, self.get_DER())
+ return self.POW
+
+ def get_POWpkix(self):
+ """
+ Get the POW.pkix value of this certificate.
+ """
+ self.check()
+ if not self.POWpkix:
+ cert = POW.pkix.Certificate()
+ cert.fromString(self.get_DER())
+ self.POWpkix = cert
+ return self.POWpkix
+
+ def getIssuer(self):
+ """Get the issuer of this certificate."""
+ return self.get_POW().getIssuer()
+
+ def getSubject(self):
+ """Get the subject of this certificate."""
+ return self.get_POW().getSubject()
+
+ def getNotBefore(self):
+ """Get the inception time of this certificate."""
+ return rpki.sundial.datetime.fromASN1tuple(self.get_POWpkix().tbs.validity.notBefore.get())
+
+ def getNotAfter(self):
+ """Get the expiration time of this certificate."""
+ return rpki.sundial.datetime.fromASN1tuple(self.get_POWpkix().tbs.validity.notAfter.get())
+
+ def getSerial(self):
+ """Get the serial number of this certificate."""
+ return self.get_POW().getSerial()
+
+ def getPublicKey(self):
+ """Extract the public key from this certificate."""
+ return RSApublic(DER = self.get_POWpkix().tbs.subjectPublicKeyInfo.toString())
+
+ def expired(self):
+ """Test whether this certificate has expired."""
+ return self.getNotAfter() <= rpki.sundial.now()
+
+ def issue(self, keypair, subject_key, serial, sia, aia, crldp, notAfter,
+ cn = None, resources = None, is_ca = True):
+ """
+ Issue a certificate.
+ """
+
+ now = rpki.sundial.now()
+ aki = self.get_SKI()
+ ski = subject_key.get_SKI()
+
+ if cn is None:
+ cn = "".join(("%02X" % ord(i) for i in ski))
+
+ # if notAfter is None: notAfter = now + rpki.sundial.timedelta(days = 30)
+
+ cert = POW.pkix.Certificate()
+ cert.setVersion(2)
+ cert.setSerial(serial)
+ cert.setIssuer(self.get_POWpkix().getSubject())
+ cert.setSubject((((rpki.oids.name2oid["commonName"], ("printableString", cn)),),))
+ cert.setNotBefore(now.toASN1tuple())
+ cert.setNotAfter(notAfter.toASN1tuple())
+ cert.tbs.subjectPublicKeyInfo.fromString(subject_key.get_DER())
+
+ exts = [ ["subjectKeyIdentifier", False, ski],
+ ["authorityKeyIdentifier", False, (aki, (), None)],
+ ["cRLDistributionPoints", False, ((("fullName", (("uri", crldp),)), None, ()),)],
+ ["authorityInfoAccess", False, ((rpki.oids.name2oid["id-ad-caIssuers"], ("uri", aia)),)],
+ ["certificatePolicies", True, ((rpki.oids.name2oid["id-cp-ipAddr-asNumber"], ()),)] ]
+
+ if is_ca:
+ exts.append(["basicConstraints", True, (1, None)])
+ exts.append(["keyUsage", True, (0, 0, 0, 0, 0, 1, 1)])
+ else:
+ exts.append(["keyUsage", True, (1,)])
+
+ if sia is not None:
+ exts.append(["subjectInfoAccess", False, sia])
+ else:
+ assert not is_ca
+
+ if resources is not None and resources.asn:
+ exts.append(["sbgp-autonomousSysNum", True, (resources.asn.to_rfc3779_tuple(), None)])
+
+ if resources is not None and (resources.v4 or resources.v6):
+ exts.append(["sbgp-ipAddrBlock", True, [x for x in (resources.v4.to_rfc3779_tuple(), resources.v6.to_rfc3779_tuple()) if x is not None]])
+
+ for x in exts:
+ x[0] = rpki.oids.name2oid[x[0]]
+ cert.setExtensions(exts)
+
+ cert.sign(keypair.get_POW(), POW.SHA256_DIGEST)
+
+ return X509(POWpkix = cert)
+
+ def cross_certify(self, keypair, source_cert, serial, notAfter, now = None, pathLenConstraint = 0):
+ """
+ Issue a certificate with values taking from an existing certificate.
+ This is used to construct some kinds oF BPKI certificates.
+ """
+
+ if now is None:
+ now = rpki.sundial.now()
+
+ assert isinstance(pathLenConstraint, int) and pathLenConstraint >= 0
+
+ cert = POW.pkix.Certificate()
+ cert.setVersion(2)
+ cert.setSerial(serial)
+ cert.setIssuer(self.get_POWpkix().getSubject())
+ cert.setSubject(source_cert.get_POWpkix().getSubject())
+ cert.setNotBefore(now.toASN1tuple())
+ cert.setNotAfter(notAfter.toASN1tuple())
+ cert.tbs.subjectPublicKeyInfo.set(
+ source_cert.get_POWpkix().tbs.subjectPublicKeyInfo.get())
+ cert.setExtensions((
+ (rpki.oids.name2oid["subjectKeyIdentifier" ], False, source_cert.get_SKI()),
+ (rpki.oids.name2oid["authorityKeyIdentifier"], False, (self.get_SKI(), (), None)),
+ (rpki.oids.name2oid["basicConstraints" ], True, (1, 0))))
+ cert.sign(keypair.get_POW(), POW.SHA256_DIGEST)
+
+ return X509(POWpkix = cert)
+
+ @classmethod
+ def normalize_chain(cls, chain):
+ """
+ Normalize a chain of certificates into a tuple of X509 objects.
+ Given all the glue certificates needed for BPKI cross
+ certification, it's easiest to allow sloppy arguments to the CMS
+ validation methods and provide a single method that normalizes the
+ allowed cases. So this method allows X509, None, lists, and
+ tuples, and returns a tuple of X509 objects.
+ """
+ if isinstance(chain, cls):
+ chain = (chain,)
+ return tuple(x for x in chain if x is not None)
+
+class PKCS10(DER_object):
+ """
+ Class to hold a PKCS #10 request.
+ """
+
+ formats = ("DER", "POWpkix")
+ pem_converter = PEM_converter("CERTIFICATE REQUEST")
+
+ def get_DER(self):
+ """
+ Get the DER value of this certification request.
+ """
+ self.check()
+ if self.DER:
+ return self.DER
+ if self.POWpkix:
+ self.DER = self.POWpkix.toString()
+ return self.get_DER()
+ raise rpki.exceptions.DERObjectConversionError, "No conversion path to DER available"
+
+ def get_POWpkix(self):
+ """
+ Get the POW.pkix value of this certification request.
+ """
+ self.check()
+ if not self.POWpkix:
+ req = POW.pkix.CertificationRequest()
+ req.fromString(self.get_DER())
+ self.POWpkix = req
+ return self.POWpkix
+
+ def getPublicKey(self):
+ """Extract the public key from this certification request."""
+ return RSApublic(DER = self.get_POWpkix().certificationRequestInfo.subjectPublicKeyInfo.toString())
+
+ def check_valid_rpki(self):
+ """
+ Check this certification request to see whether it's a valid
+ request for an RPKI certificate. This is broken out of the
+ up-down protocol code because it's somewhat involved and the
+ up-down code doesn't need to know the details.
+
+ Throws an exception if the request isn't valid, so if this method
+ returns at all, the request is ok.
+ """
+
+ if not self.get_POWpkix().verify():
+ raise rpki.exceptions.BadPKCS10, "Signature check failed"
+
+ if self.get_POWpkix().certificationRequestInfo.version.get() != 0:
+ raise rpki.exceptions.BadPKCS10, \
+ "Bad version number %s" % self.get_POWpkix().certificationRequestInfo.version
+
+ if rpki.oids.oid2name.get(self.get_POWpkix().signatureAlgorithm.algorithm.get()) \
+ not in ("sha256WithRSAEncryption", "sha384WithRSAEncryption", "sha512WithRSAEncryption"):
+ raise rpki.exceptions.BadPKCS10, "Bad signature algorithm %s" % self.get_POWpkix().signatureAlgorithm
+
+ exts = self.get_POWpkix().getExtensions()
+ for oid, critical, value in exts:
+ if rpki.oids.oid2name.get(oid) not in ("basicConstraints", "keyUsage", "subjectInfoAccess"):
+ raise rpki.exceptions.BadExtension, "Forbidden extension %s" % oid
+ req_exts = dict((rpki.oids.oid2name[oid], value) for (oid, critical, value) in exts)
+
+ if "basicConstraints" not in req_exts or not req_exts["basicConstraints"][0]:
+ raise rpki.exceptions.BadPKCS10, "request for EE cert not allowed here"
+
+ if req_exts["basicConstraints"][1] is not None:
+ raise rpki.exceptions.BadPKCS10, "basicConstraints must not specify Path Length"
+
+ if "keyUsage" in req_exts and (not req_exts["keyUsage"][5] or not req_exts["keyUsage"][6]):
+ raise rpki.exceptions.BadPKCS10, "keyUsage doesn't match basicConstraints"
+
+ for method, location in req_exts.get("subjectInfoAccess", ()):
+ if rpki.oids.oid2name.get(method) == "id-ad-caRepository" and \
+ (location[0] != "uri" or (location[1].startswith("rsync://") and not location[1].endswith("/"))):
+ raise rpki.exceptions.BadPKCS10, "Certificate request includes bad SIA component: %r" % location
+
+ # This one is an implementation restriction. I don't yet
+ # understand what the spec is telling me to do in this case.
+ assert "subjectInfoAccess" in req_exts, "Can't (yet) handle PKCS #10 without an SIA extension"
+
+ @classmethod
+ def create_ca(cls, keypair, sia = None):
+ """
+ Create a new request for a given keypair, including given SIA value.
+ """
+ exts = [["basicConstraints", True, (1, None)],
+ ["keyUsage", True, (0, 0, 0, 0, 0, 1, 1)]]
+ if sia is not None:
+ exts.append(["subjectInfoAccess", False, sia])
+ for x in exts:
+ x[0] = rpki.oids.name2oid[x[0]]
+ return cls.create(keypair, exts)
+
+ @classmethod
+ def create(cls, keypair, exts = None):
+ """
+ Create a new request for a given keypair, including given extensions.
+ """
+ cn = "".join(("%02X" % ord(i) for i in keypair.get_SKI()))
+ req = POW.pkix.CertificationRequest()
+ req.certificationRequestInfo.version.set(0)
+ req.certificationRequestInfo.subject.set((((rpki.oids.name2oid["commonName"],
+ ("printableString", cn)),),))
+ if exts is not None:
+ req.setExtensions(exts)
+ req.sign(keypair.get_POW(), POW.SHA256_DIGEST)
+ return cls(POWpkix = req)
+
+class RSA(DER_object):
+ """
+ Class to hold an RSA key pair.
+ """
+
+ formats = ("DER", "POW")
+ pem_converter = PEM_converter("RSA PRIVATE KEY")
+
+ def get_DER(self):
+ """
+ Get the DER value of this keypair.
+ """
+ self.check()
+ if self.DER:
+ return self.DER
+ if self.POW:
+ self.DER = self.POW.derWrite(POW.RSA_PRIVATE_KEY)
+ return self.get_DER()
+ raise rpki.exceptions.DERObjectConversionError, "No conversion path to DER available"
+
+ def get_POW(self):
+ """
+ Get the POW value of this keypair.
+ """
+ self.check()
+ if not self.POW:
+ self.POW = POW.derRead(POW.RSA_PRIVATE_KEY, self.get_DER())
+ return self.POW
+
+ @classmethod
+ def generate(cls, keylength = 2048):
+ """
+ Generate a new keypair.
+ """
+ rpki.log.debug("Generating new %d-bit RSA key" % keylength)
+ return cls(POW = POW.Asymmetric(POW.RSA_CIPHER, keylength))
+
+ def get_public_DER(self):
+ """Get the DER encoding of the public key from this keypair."""
+ return self.get_POW().derWrite(POW.RSA_PUBLIC_KEY)
+
+ def get_SKI(self):
+ """Calculate the SKI of this keypair."""
+ return calculate_SKI(self.get_public_DER())
+
+ def get_RSApublic(self):
+ """Convert the public key of this keypair into a RSApublic object."""
+ return RSApublic(DER = self.get_public_DER())
+
+class RSApublic(DER_object):
+ """
+ Class to hold an RSA public key.
+ """
+
+ formats = ("DER", "POW")
+ pem_converter = PEM_converter("RSA PUBLIC KEY")
+
+ def get_DER(self):
+ """
+ Get the DER value of this public key.
+ """
+ self.check()
+ if self.DER:
+ return self.DER
+ if self.POW:
+ self.DER = self.POW.derWrite(POW.RSA_PUBLIC_KEY)
+ return self.get_DER()
+ raise rpki.exceptions.DERObjectConversionError, "No conversion path to DER available"
+
+ def get_POW(self):
+ """
+ Get the POW value of this public key.
+ """
+ self.check()
+ if not self.POW:
+ self.POW = POW.derRead(POW.RSA_PUBLIC_KEY, self.get_DER())
+ return self.POW
+
+ def get_SKI(self):
+ """Calculate the SKI of this public key."""
+ return calculate_SKI(self.get_DER())
+
+def POWify_OID(oid):
+ """
+ Utility function to convert tuple form of an OID to the
+ dotted-decimal string form that POW uses.
+ """
+ if isinstance(oid, str):
+ return POWify_OID(rpki.oids.name2oid[oid])
+ else:
+ return ".".join(str(i) for i in oid)
+
+class CMS_object(DER_object):
+ """
+ Class to hold a CMS-wrapped object.
+
+ CMS-wrapped objects are a little different from the other DER_object
+ types because the signed object is CMS wrapping inner content that's
+ also ASN.1, and due to our current minimal support for CMS we can't
+ just handle this as a pretty composite object. So, for now anyway,
+ a CMS_object is the outer CMS wrapped object so that the usual DER
+ and PEM operations do the obvious things, and the inner content is
+ handle via separate methods.
+ """
+
+ formats = ("DER", "POW")
+ other_clear = ("content",)
+ econtent_oid = POWify_OID("id-data")
+ pem_converter = PEM_converter("CMS")
+
+ ## @var dump_on_verify_failure
+ # Set this to True to get dumpasn1 dumps of ASN.1 on CMS verify failures.
+
+ dump_on_verify_failure = True
+
+ ## @var debug_cms_certs
+ # Set this to True to log a lot of chatter about CMS certificates.
+
+ debug_cms_certs = False
+
+ ## @var require_crls
+ # Set this to False to make CMS CRLs optional in the cases where we
+ # would otherwise require them. Some day this option should go away
+ # and CRLs should be uncondtionally mandatory in such cases.
+
+ require_crls = False
+
+ ## @var print_on_der_error
+ # Set this to True to log alleged DER when we have trouble parsing
+ # it, in case it's really a Perl backtrace or something.
+
+ print_on_der_error = True
+
+ def get_DER(self):
+ """
+ Get the DER value of this CMS_object.
+ """
+ self.check()
+ if self.DER:
+ return self.DER
+ if self.POW:
+ self.DER = self.POW.derWrite()
+ return self.get_DER()
+ raise rpki.exceptions.DERObjectConversionError, "No conversion path to DER available"
+
+ def get_POW(self):
+ """
+ Get the POW value of this CMS_object.
+ """
+ self.check()
+ if not self.POW:
+ self.POW = POW.derRead(POW.CMS_MESSAGE, self.get_DER())
+ return self.POW
+
+ def get_content(self):
+ """
+ Get the inner content of this CMS_object.
+ """
+ if self.content is None:
+ raise rpki.exceptions.CMSContentNotSet, "Inner content of CMS object %r is not set" % self
+ return self.content
+
+ def set_content(self, content):
+ """
+ Set the (inner) content of this CMS_object, clearing the wrapper.
+ """
+ self.clear()
+ self.content = content
+
+ def get_signingTime(self):
+ """
+ Extract signingTime from CMS signed attributes.
+ """
+ return rpki.sundial.datetime.fromGeneralizedTime(self.get_POW().signingTime())
+
+ def verify(self, ta):
+ """
+ Verify CMS wrapper and store inner content.
+ """
+
+ try:
+ cms = self.get_POW()
+ except (rpki.async.ExitNow, SystemExit):
+ raise
+ except:
+ if self.print_on_der_error:
+ rpki.log.debug("Problem parsing DER CMS message, might not really be DER: %r" % self.get_DER())
+ raise rpki.exceptions.UnparsableCMSDER
+
+ if cms.eContentType() != self.econtent_oid:
+ raise rpki.exceptions.WrongEContentType, "Got CMS eContentType %s, expected %s" % (cms.eContentType(), self.econtent_oid)
+
+ certs = [X509(POW = x) for x in cms.certs()]
+ crls = [CRL(POW = c) for c in cms.crls()]
+
+ if self.debug_cms_certs:
+ for x in certs:
+ rpki.log.debug("Received CMS cert issuer %s subject %s SKI %s" % (x.getIssuer(), x.getSubject(), x.hSKI()))
+ for c in crls:
+ rpki.log.debug("Received CMS CRL issuer %r" % (c.getIssuer(),))
+
+ store = POW.X509Store()
+
+ trusted_ee = None
+
+ for x in X509.normalize_chain(ta):
+ if self.debug_cms_certs:
+ rpki.log.debug("CMS trusted cert issuer %s subject %s SKI %s" % (x.getIssuer(), x.getSubject(), x.hSKI()))
+ if not x.is_CA():
+ assert trusted_ee is None, "Can't have two EE certs in the same validation chain"
+ trusted_ee = x
+ store.addTrust(x.get_POW())
+
+ if trusted_ee:
+ if self.debug_cms_certs:
+ rpki.log.debug("Trusted CMS EE cert issuer %s subject %s SKI %s" % (trusted_ee.getIssuer(), trusted_ee.getSubject(), trusted_ee.hSKI()))
+ if certs and (len(certs) > 1 or certs[0].getSubject() != trusted_ee.getSubject() or certs[0].getPublicKey() != trusted_ee.getPublicKey()):
+ raise rpki.exceptions.UnexpectedCMSCerts, certs
+ if crls:
+ raise rpki.exceptions.UnexpectedCMSCRLs, crls
+ else:
+ if not certs:
+ raise rpki.exceptions.MissingCMSEEcert, certs
+ if len(certs) > 1 or certs[0].is_CA():
+ raise rpki.exceptions.UnexpectedCMSCerts, certs
+ if not crls:
+ if self.require_crls:
+ raise rpki.exceptions.MissingCMSCRL, crls
+ else:
+ rpki.log.warn("MISSING CMS CRL! Ignoring per self.require_crls setting")
+ if len(crls) > 1:
+ raise rpki.exceptions.UnexpectedCMSCRLs, crls
+
+ try:
+ content = cms.verify(store)
+ except (rpki.async.ExitNow, SystemExit):
+ raise
+ except:
+ if self.dump_on_verify_failure:
+ if True:
+ dbg = self.dumpasn1()
+ else:
+ dbg = cms.pprint()
+ sys.stderr.write("CMS verification failed, dumping ASN.1 (%d octets):\n%s\n" % (len(self.get_DER()), dbg))
+ raise rpki.exceptions.CMSVerificationFailed, "CMS verification failed"
+
+ self.decode(content)
+ return self.get_content()
+
+ def extract(self):
+ """
+ Extract and store inner content from CMS wrapper without verifying
+ the CMS.
+
+ DANGER WILL ROBINSON!!!
+
+ Do not use this method on unvalidated data. Use the verify()
+ method instead.
+
+ If you don't understand this warning, don't use this method.
+ """
+
+ try:
+ cms = self.get_POW()
+ except (rpki.async.ExitNow, SystemExit):
+ raise
+ except:
+ raise rpki.exceptions.UnparsableCMSDER
+
+ if cms.eContentType() != self.econtent_oid:
+ raise rpki.exceptions.WrongEContentType, "Got CMS eContentType %s, expected %s" % (cms.eContentType(), self.econtent_oid)
+
+ content = cms.verify(POW.X509Store(), None, POW.CMS_NOCRL | POW.CMS_NO_SIGNER_CERT_VERIFY | POW.CMS_NO_ATTR_VERIFY | POW.CMS_NO_CONTENT_VERIFY)
+
+ self.decode(content)
+ return self.get_content()
+
+ def sign(self, keypair, certs, crls = None, no_certs = False):
+ """
+ Sign and wrap inner content.
+ """
+
+ rpki.log.trace()
+
+ if isinstance(certs, X509):
+ cert = certs
+ certs = ()
+ else:
+ cert = certs[0]
+ certs = certs[1:]
+
+ if crls is None:
+ crls = ()
+ elif isinstance(crls, CRL):
+ crls = (crls,)
+
+ if self.debug_cms_certs:
+ rpki.log.debug("Signing with cert issuer %s subject %s SKI %s" % (cert.getIssuer(), cert.getSubject(), cert.hSKI()))
+ for i, c in enumerate(certs):
+ rpki.log.debug("Additional cert %d issuer %s subject %s SKI %s" % (i, c.getIssuer(), c.getSubject(), c.hSKI()))
+
+ cms = POW.CMS()
+
+ cms.sign(cert.get_POW(),
+ keypair.get_POW(),
+ self.encode(),
+ [x.get_POW() for x in certs],
+ [c.get_POW() for c in crls],
+ self.econtent_oid,
+ POW.CMS_NOCERTS if no_certs else 0)
+
+ self.POW = cms
+
+class DER_CMS_object(CMS_object):
+ """
+ Class to hold CMS objects with DER-based content.
+ """
+
+ def encode(self):
+ """Encode inner content for signing."""
+ return self.get_content().toString()
+
+ def decode(self, der):
+ """
+ Decode DER and set inner content.
+ """
+ obj = self.content_class()
+ obj.fromString(der)
+ self.content = obj
+
+class SignedManifest(DER_CMS_object):
+ """
+ Class to hold a signed manifest.
+ """
+
+ pem_converter = PEM_converter("RPKI MANIFEST")
+ content_class = rpki.manifest.Manifest
+ econtent_oid = POWify_OID("id-ct-rpkiManifest")
+
+ def getThisUpdate(self):
+ """Get thisUpdate value from this manifest."""
+ return rpki.sundial.datetime.fromGeneralizedTime(self.get_content().thisUpdate.get())
+
+ def getNextUpdate(self):
+ """Get nextUpdate value from this manifest."""
+ return rpki.sundial.datetime.fromGeneralizedTime(self.get_content().nextUpdate.get())
+
+ @classmethod
+ def build(cls, serial, thisUpdate, nextUpdate, names_and_objs, keypair, certs, version = 0):
+ """
+ Build a signed manifest.
+ """
+ self = cls()
+ filelist = []
+ for name, obj in names_and_objs:
+ d = POW.Digest(POW.SHA256_DIGEST)
+ d.update(obj.get_DER())
+ filelist.append((name.rpartition("/")[2], d.digest()))
+ filelist.sort(key = lambda x: x[0])
+ m = rpki.manifest.Manifest()
+ m.version.set(version)
+ m.manifestNumber.set(serial)
+ m.thisUpdate.set(thisUpdate.toGeneralizedTime())
+ m.nextUpdate.set(nextUpdate.toGeneralizedTime())
+ m.fileHashAlg.set(rpki.oids.name2oid["id-sha256"])
+ m.fileList.set(filelist)
+ self.set_content(m)
+ self.sign(keypair, certs)
+ return self
+
+class ROA(DER_CMS_object):
+ """
+ Class to hold a signed ROA.
+ """
+
+ pem_converter = PEM_converter("ROUTE ORIGIN ATTESTATION")
+ content_class = rpki.roa.RouteOriginAttestation
+ econtent_oid = POWify_OID("id-ct-routeOriginAttestation")
+
+ @classmethod
+ def build(cls, asn, ipv4, ipv6, keypair, certs, version = 0):
+ """
+ Build a ROA.
+ """
+ try:
+ self = cls()
+ r = rpki.roa.RouteOriginAttestation()
+ r.version.set(version)
+ r.asID.set(asn)
+ r.ipAddrBlocks.set((a.to_roa_tuple() for a in (ipv4, ipv6) if a))
+ self.set_content(r)
+ self.sign(keypair, certs)
+ return self
+ except POW.pkix.DerError, e:
+ rpki.log.debug("Encoding error while generating ROA %r: %s" % (self, e))
+ rpki.log.debug("ROA inner content: %r" % (r.get(),))
+ raise
+
+class DeadDrop(object):
+ """
+ Dead-drop utility for storing copies of CMS messages for debugging or
+ audit. At the moment this uses Maildir mailbox format, as it has
+ approximately the right properties and a number of useful tools for
+ manipulating it already exist.
+ """
+
+ def __init__(self, name):
+ self.maildir = mailbox.Maildir(name, factory = None, create = True)
+ self.pid = os.getpid()
+
+ def dump(self, obj):
+ now = time.time()
+ msg = email.mime.application.MIMEApplication(obj.get_DER(), "x-rpki")
+ msg["Date"] = email.utils.formatdate(now)
+ msg["Subject"] = "Process %s dump of %r" % (self.pid, obj)
+ msg["Message-ID"] = email.utils.make_msgid()
+ msg["X-RPKI-PID"] = str(self.pid)
+ msg["X-RPKI-Object"] = repr(obj)
+ msg["X-RPKI-Timestamp"] = "%f" % now
+ self.maildir.add(msg)
+
+class XML_CMS_object(CMS_object):
+ """
+ Class to hold CMS-wrapped XML protocol data.
+ """
+
+ econtent_oid = POWify_OID("id-ct-xml")
+
+ ## @var dump_outbound_cms
+ # If set, we write all outbound XML-CMS PDUs to disk, for debugging.
+ # If set, value should be a DeadDrop object.
+
+ dump_outbound_cms = None
+
+ ## @var dump_inbound_cms
+ # If set, we write all inbound XML-CMS PDUs to disk, for debugging.
+ # If set, value should be a DeadDrop object.
+
+ dump_inbound_cms = None
+
+ def encode(self):
+ """Encode inner content for signing."""
+ return lxml.etree.tostring(self.get_content(), pretty_print = True, encoding = self.encoding, xml_declaration = True)
+
+ def decode(self, xml):
+ """Decode XML and set inner content."""
+ self.content = lxml.etree.fromstring(xml)
+
+ def pretty_print_content(self):
+ """Pretty print XML content of this message."""
+ return lxml.etree.tostring(self.get_content(), pretty_print = True, encoding = self.encoding, xml_declaration = True)
+
+ def schema_check(self):
+ """
+ Handle XML RelaxNG schema check.
+ """
+ try:
+ self.schema.assertValid(self.get_content())
+ except lxml.etree.DocumentInvalid:
+ rpki.log.error("PDU failed schema check")
+ for line in self.pretty_print_content().splitlines():
+ rpki.log.warn(line)
+ raise
+
+ def dump_to_disk(self, prefix):
+ """
+ Write DER of current message to disk, for debugging.
+ """
+ f = open(prefix + rpki.sundial.now().isoformat() + "Z.cms", "wb")
+ f.write(self.get_DER())
+ f.close()
+
+ def wrap(self, msg, keypair, certs, crls = None):
+ """
+ Wrap an XML PDU in CMS and return its DER encoding.
+ """
+ rpki.log.trace()
+ self.set_content(msg.toXML())
+ self.schema_check()
+ self.sign(keypair, certs, crls)
+ if self.dump_outbound_cms:
+ self.dump_outbound_cms.dump(self)
+ return self.get_DER()
+
+ def unwrap(self, ta):
+ """
+ Unwrap a CMS-wrapped XML PDU and return Python objects.
+ """
+ if self.dump_inbound_cms:
+ self.dump_inbound_cms.dump(self)
+ self.verify(ta)
+ self.schema_check()
+ return self.saxify(self.get_content())
+
+class CRL(DER_object):
+ """
+ Class to hold a Certificate Revocation List.
+ """
+
+ formats = ("DER", "POW", "POWpkix")
+ pem_converter = PEM_converter("X509 CRL")
+
+ def get_DER(self):
+ """
+ Get the DER value of this CRL.
+ """
+ self.check()
+ if self.DER:
+ return self.DER
+ if self.POW:
+ self.DER = self.POW.derWrite()
+ return self.get_DER()
+ if self.POWpkix:
+ self.DER = self.POWpkix.toString()
+ return self.get_DER()
+ raise rpki.exceptions.DERObjectConversionError, "No conversion path to DER available"
+
+ def get_POW(self):
+ """
+ Get the POW value of this CRL.
+ """
+ self.check()
+ if not self.POW:
+ self.POW = POW.derRead(POW.X509_CRL, self.get_DER())
+ return self.POW
+
+ def get_POWpkix(self):
+ """
+ Get the POW.pkix value of this CRL.
+ """
+ self.check()
+ if not self.POWpkix:
+ crl = POW.pkix.CertificateList()
+ crl.fromString(self.get_DER())
+ self.POWpkix = crl
+ return self.POWpkix
+
+ def getThisUpdate(self):
+ """Get thisUpdate value from this CRL."""
+ return rpki.sundial.datetime.fromASN1tuple(self.get_POWpkix().getThisUpdate())
+
+ def getNextUpdate(self):
+ """Get nextUpdate value from this CRL."""
+ return rpki.sundial.datetime.fromASN1tuple(self.get_POWpkix().getNextUpdate())
+
+ def getIssuer(self):
+ """Get issuer value of this CRL."""
+ return self.get_POW().getIssuer()
+
+ @classmethod
+ def generate(cls, keypair, issuer, serial, thisUpdate, nextUpdate, revokedCertificates, version = 1, digestType = "sha256WithRSAEncryption"):
+ """
+ Generate a new CRL.
+ """
+ crl = POW.pkix.CertificateList()
+ crl.setVersion(version)
+ crl.setIssuer(issuer.get_POWpkix().getSubject())
+ crl.setThisUpdate(thisUpdate.toASN1tuple())
+ crl.setNextUpdate(nextUpdate.toASN1tuple())
+ if revokedCertificates:
+ crl.setRevokedCertificates(revokedCertificates)
+ crl.setExtensions(
+ ((rpki.oids.name2oid["authorityKeyIdentifier"], False, (issuer.get_SKI(), (), None)),
+ (rpki.oids.name2oid["cRLNumber"], False, serial)))
+ crl.sign(keypair.get_POW(), digestType)
+ return cls(POWpkix = crl)
diff --git a/rpkid.without_tls/rpki/xml_utils.py b/rpkid.without_tls/rpki/xml_utils.py
new file mode 100644
index 00000000..4933cda9
--- /dev/null
+++ b/rpkid.without_tls/rpki/xml_utils.py
@@ -0,0 +1,470 @@
+"""
+XML utilities.
+
+$Id$
+
+Copyright (C) 2009 Internet Systems Consortium ("ISC")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+
+Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+"""
+
+import xml.sax, lxml.sax, lxml.etree, base64
+import rpki.exceptions
+
+class sax_handler(xml.sax.handler.ContentHandler):
+ """
+ SAX handler for RPKI protocols.
+
+ This class provides some basic amenities for parsing protocol XML of
+ the kind we use in the RPKI protocols, including whacking all the
+ protocol element text into US-ASCII, simplifying accumulation of
+ text fields, and hiding some of the fun relating to XML namespaces.
+
+ General assumption: by the time this parsing code gets invoked, the
+ XML has already passed RelaxNG validation, so we only have to check
+ for errors that the schema can't catch, and we don't have to play as
+ many XML namespace games.
+ """
+
+ def __init__(self):
+ """
+ Initialize SAX handler.
+ """
+ xml.sax.handler.ContentHandler.__init__(self)
+ self.text = ""
+ self.stack = []
+
+ def startElementNS(self, name, qname, attrs):
+ """Redirect startElementNS() events to startElement()."""
+ return self.startElement(name[1], attrs)
+
+ def endElementNS(self, name, qname):
+ """Redirect endElementNS() events to endElement()."""
+ return self.endElement(name[1])
+
+ def characters(self, content):
+ """Accumulate a chuck of element content (text)."""
+ self.text += content
+
+ def startElement(self, name, attrs):
+ """
+ Handle startElement() events.
+
+ We maintain a stack of nested elements under construction so that
+ we can feed events directly to the current element rather than
+ having to pass them through all the nesting elements.
+
+ If the stack is empty, this event is for the outermost element, so
+ we call a virtual method to create the corresponding object and
+ that's the object we'll be returning as our final result.
+ """
+
+ a = dict()
+ for k, v in attrs.items():
+ if isinstance(k, tuple):
+ if k == ("http://www.w3.org/XML/1998/namespace", "lang"):
+ k = "xml:lang"
+ else:
+ assert k[0] is None
+ k = k[1]
+ a[k.encode("ascii")] = v.encode("ascii")
+ if len(self.stack) == 0:
+ assert not hasattr(self, "result")
+ self.result = self.create_top_level(name, a)
+ self.stack.append(self.result)
+ self.stack[-1].startElement(self.stack, name, a)
+
+ def endElement(self, name):
+ """
+ Handle endElement() events. Mostly this means handling any
+ accumulated element text.
+ """
+ text = self.text.encode("ascii").strip()
+ self.text = ""
+ self.stack[-1].endElement(self.stack, name, text)
+
+ @classmethod
+ def saxify(cls, elt):
+ """
+ Create a one-off SAX parser, parse an ETree, return the result.
+ """
+ self = cls()
+ lxml.sax.saxify(elt, self)
+ return self.result
+
+ def create_top_level(self, name, attrs):
+ """
+ Handle top-level PDU for this protocol.
+ """
+ assert name == self.name and attrs["version"] == self.version
+ return self.pdu()
+
+class base_elt(object):
+ """
+ Virtual base class for XML message elements. The left-right and
+ publication protocols use this. At least for now, the up-down
+ protocol does not, due to different design assumptions.
+ """
+
+ ## @var attributes
+ # XML attributes for this element.
+ attributes = ()
+
+ ## @var elements
+ # XML elements contained by this element.
+ elements = ()
+
+ ## @var booleans
+ # Boolean attributes (value "yes" or "no") for this element.
+ booleans = ()
+
+ def startElement(self, stack, name, attrs):
+ """
+ Default startElement() handler: just process attributes.
+ """
+ if name not in self.elements:
+ assert name == self.element_name, "Unexpected name %s, stack %s" % (name, stack)
+ self.read_attrs(attrs)
+
+ def endElement(self, stack, name, text):
+ """
+ Default endElement() handler: just pop the stack.
+ """
+ assert name == self.element_name, "Unexpected name %s, stack %s" % (name, stack)
+ stack.pop()
+
+ def toXML(self):
+ """
+ Default toXML() element generator.
+ """
+ return self.make_elt()
+
+ def read_attrs(self, attrs):
+ """
+ Template-driven attribute reader.
+ """
+ for key in self.attributes:
+ val = attrs.get(key, None)
+ if isinstance(val, str) and val.isdigit() and not key.endswith("_handle"):
+ val = long(val)
+ setattr(self, key, val)
+ for key in self.booleans:
+ setattr(self, key, attrs.get(key, False))
+
+ def make_elt(self):
+ """
+ XML element constructor.
+ """
+ elt = lxml.etree.Element("{%s}%s" % (self.xmlns, self.element_name), nsmap = self.nsmap)
+ for key in self.attributes:
+ val = getattr(self, key, None)
+ if val is not None:
+ elt.set(key, str(val))
+ for key in self.booleans:
+ if getattr(self, key, False):
+ elt.set(key, "yes")
+ return elt
+
+ def make_b64elt(self, elt, name, value):
+ """
+ Constructor for Base64-encoded subelement.
+ """
+ if value is not None and not value.empty():
+ lxml.etree.SubElement(elt, "{%s}%s" % (self.xmlns, name), nsmap = self.nsmap).text = value.get_Base64()
+
+ def __str__(self):
+ """
+ Convert a base_elt object to string format.
+ """
+ lxml.etree.tostring(self.toXML(), pretty_print = True, encoding = "us-ascii")
+
+ @classmethod
+ def make_pdu(cls, **kargs):
+ """
+ Generic PDU constructor.
+ """
+ self = cls()
+ for k, v in kargs.items():
+ if isinstance(v, bool):
+ v = 1 if v else 0
+ setattr(self, k, v)
+ return self
+
+class text_elt(base_elt):
+ """
+ Virtual base class for XML message elements that contain text.
+ """
+
+ ## @var text_attribute
+ # Name of the class attribute that holds the text value.
+ text_attribute = None
+
+ def endElement(self, stack, name, text):
+ """
+ Extract text from parsed XML.
+ """
+ base_elt.endElement(self, stack, name, text)
+ setattr(self, self.text_attribute, text)
+
+ def toXML(self):
+ """
+ Insert text into generated XML.
+ """
+ elt = self.make_elt()
+ elt.text = getattr(self, self.text_attribute) or None
+ return elt
+
+class data_elt(base_elt):
+ """
+ Virtual base class for PDUs that map to SQL objects. These objects
+ all implement the create/set/get/list/destroy action attribute.
+ """
+
+ def endElement(self, stack, name, text):
+ """
+ Default endElement handler for SQL-based objects. This assumes
+ that sub-elements are Base64-encoded using the sql_template
+ mechanism.
+ """
+ if name in self.elements:
+ elt_type = self.sql_template.map.get(name)
+ assert elt_type is not None, "Couldn't find element type for %s, stack %s" % (name, stack)
+ setattr(self, name, elt_type(Base64 = text))
+ else:
+ assert name == self.element_name, "Unexpected name %s, stack %s" % (name, stack)
+ stack.pop()
+
+ def toXML(self):
+ """
+ Default element generator for SQL-based objects. This assumes
+ that sub-elements are Base64-encoded DER objects.
+ """
+ elt = self.make_elt()
+ for i in self.elements:
+ self.make_b64elt(elt, i, getattr(self, i, None))
+ return elt
+
+ def make_reply(self, r_pdu = None):
+ """
+ Construct a reply PDU.
+ """
+ if r_pdu is None:
+ r_pdu = self.__class__()
+ self.make_reply_clone_hook(r_pdu)
+ handle_name = self.element_name + "_handle"
+ setattr(r_pdu, handle_name, getattr(self, handle_name, None))
+ else:
+ self.make_reply_clone_hook(r_pdu)
+ for b in r_pdu.booleans:
+ setattr(r_pdu, b, False)
+ r_pdu.action = self.action
+ r_pdu.tag = self.tag
+ return r_pdu
+
+ def make_reply_clone_hook(self, r_pdu):
+ """Overridable hook."""
+ pass
+
+ def serve_fetch_one(self):
+ """
+ Find the object on which a get, set, or destroy method should
+ operate.
+ """
+ r = self.serve_fetch_one_maybe()
+ if r is None:
+ raise rpki.exceptions.NotFound
+ return r
+
+ def serve_pre_save_hook(self, q_pdu, r_pdu, cb, eb):
+ """Overridable hook."""
+ cb()
+
+ def serve_post_save_hook(self, q_pdu, r_pdu, cb, eb):
+ """Overridable hook."""
+ cb()
+
+ def serve_create(self, r_msg, cb, eb):
+ """
+ Handle a create action.
+ """
+
+ r_pdu = self.make_reply()
+
+ def one():
+ self.sql_store()
+ setattr(r_pdu, self.sql_template.index, getattr(self, self.sql_template.index))
+ self.serve_post_save_hook(self, r_pdu, two, eb)
+
+ def two():
+ r_msg.append(r_pdu)
+ cb()
+
+ oops = self.serve_fetch_one_maybe()
+ if oops is not None:
+ raise rpki.exceptions.DuplicateObject, "Object already exists: %r[%r] %r[%r]" % (self, getattr(self, self.element_name + "_handle"),
+ oops, getattr(oops, oops.element_name + "_handle"))
+
+ self.serve_pre_save_hook(self, r_pdu, one, eb)
+
+ def serve_set(self, r_msg, cb, eb):
+ """
+ Handle a set action.
+ """
+
+ db_pdu = self.serve_fetch_one()
+ r_pdu = self.make_reply()
+ for a in db_pdu.sql_template.columns[1:]:
+ v = getattr(self, a, None)
+ if v is not None:
+ setattr(db_pdu, a, v)
+ db_pdu.sql_mark_dirty()
+
+ def one():
+ db_pdu.sql_store()
+ db_pdu.serve_post_save_hook(self, r_pdu, two, eb)
+
+ def two():
+ r_msg.append(r_pdu)
+ cb()
+
+ db_pdu.serve_pre_save_hook(self, r_pdu, one, eb)
+
+ def serve_get(self, r_msg, cb, eb):
+ """
+ Handle a get action.
+ """
+ r_pdu = self.serve_fetch_one()
+ self.make_reply(r_pdu)
+ r_msg.append(r_pdu)
+ cb()
+
+ def serve_list(self, r_msg, cb, eb):
+ """
+ Handle a list action for non-self objects.
+ """
+ for r_pdu in self.serve_fetch_all():
+ self.make_reply(r_pdu)
+ r_msg.append(r_pdu)
+ cb()
+
+ def serve_destroy_hook(self, cb, eb):
+ """
+ Overridable hook.
+ """
+ cb()
+
+ def serve_destroy(self, r_msg, cb, eb):
+ """
+ Handle a destroy action.
+ """
+ def done():
+ db_pdu.sql_delete()
+ r_msg.append(self.make_reply())
+ cb()
+ db_pdu = self.serve_fetch_one()
+ db_pdu.serve_destroy_hook(done, eb)
+
+ def serve_dispatch(self, r_msg, cb, eb):
+ """
+ Action dispatch handler.
+ """
+ dispatch = { "create" : self.serve_create,
+ "set" : self.serve_set,
+ "get" : self.serve_get,
+ "list" : self.serve_list,
+ "destroy" : self.serve_destroy }
+ if self.action not in dispatch:
+ raise rpki.exceptions.BadQuery, "Unexpected query: action %s" % self.action
+ dispatch[self.action](r_msg, cb, eb)
+
+ def unimplemented_control(self, *controls):
+ """
+ Uniform handling for unimplemented control operations.
+ """
+ unimplemented = [x for x in controls if getattr(self, x, False)]
+ if unimplemented:
+ raise rpki.exceptions.NotImplementedYet, "Unimplemented control %s" % ", ".join(unimplemented)
+
+class msg(list):
+ """
+ Generic top-level PDU.
+ """
+
+ def startElement(self, stack, name, attrs):
+ """
+ Handle top-level PDU.
+ """
+ if name == "msg":
+ assert self.version == int(attrs["version"])
+ self.type = attrs["type"]
+ else:
+ elt = self.pdus[name]()
+ self.append(elt)
+ stack.append(elt)
+ elt.startElement(stack, name, attrs)
+
+ def endElement(self, stack, name, text):
+ """
+ Handle top-level PDU.
+ """
+ assert name == "msg", "Unexpected name %s, stack %s" % (name, stack)
+ assert len(stack) == 1
+ stack.pop()
+
+ def __str__(self):
+ """Convert msg object to string."""
+ lxml.etree.tostring(self.toXML(), pretty_print = True, encoding = "us-ascii")
+
+ def toXML(self):
+ """
+ Generate top-level PDU.
+ """
+ elt = lxml.etree.Element("{%s}msg" % (self.xmlns), nsmap = self.nsmap, version = str(self.version), type = self.type)
+ elt.extend([i.toXML() for i in self])
+ return elt
+
+ @classmethod
+ def query(cls, *args):
+ """Create a query PDU."""
+ self = cls(args)
+ self.type = "query"
+ return self
+
+ @classmethod
+ def reply(cls, *args):
+ """Create a reply PDU."""
+ self = cls(args)
+ self.type = "reply"
+ return self
+
+ def is_query(self):
+ """Is this msg a query?"""
+ return self.type == "query"
+
+ def is_reply(self):
+ """Is this msg a reply?"""
+ return self.type == "reply"