aboutsummaryrefslogtreecommitdiff
path: root/rpki
diff options
context:
space:
mode:
Diffstat (limited to 'rpki')
-rw-r--r--rpki/POW/__init__.py192
-rw-r--r--rpki/adns.py590
-rw-r--r--rpki/async.py433
-rw-r--r--rpki/cli.py423
-rw-r--r--rpki/config.py833
-rw-r--r--rpki/csv_utils.py164
-rw-r--r--rpki/daemonize.py89
-rw-r--r--rpki/django_settings/__init__.py (renamed from rpki/gui/cacheview/__init__.py)0
-rw-r--r--rpki/django_settings/common.py125
-rw-r--r--rpki/django_settings/gui.py159
-rw-r--r--rpki/django_settings/irdb.py47
-rw-r--r--rpki/django_settings/pubd.py45
-rw-r--r--rpki/django_settings/rcynic.py68
-rw-r--r--rpki/django_settings/rpkid.py45
-rw-r--r--rpki/exceptions.py300
-rw-r--r--rpki/fields.py205
-rw-r--r--rpki/gui/app/check_expired.py53
-rw-r--r--rpki/gui/app/forms.py194
-rw-r--r--rpki/gui/app/glue.py88
-rw-r--r--rpki/gui/app/migrations/0001_initial.py439
-rw-r--r--rpki/gui/app/models.py9
-rwxr-xr-xrpki/gui/app/range_list.py2
-rw-r--r--rpki/gui/app/south_migrations/0001_initial.py192
-rw-r--r--rpki/gui/app/south_migrations/0002_auto__add_field_resourcecert_conf.py (renamed from rpki/gui/app/migrations/0002_auto__add_field_resourcecert_conf.py)0
-rw-r--r--rpki/gui/app/south_migrations/0003_set_conf_from_parent.py (renamed from rpki/gui/app/migrations/0003_set_conf_from_parent.py)0
-rw-r--r--rpki/gui/app/south_migrations/0004_auto__chg_field_resourcecert_conf.py (renamed from rpki/gui/app/migrations/0004_auto__chg_field_resourcecert_conf.py)0
-rw-r--r--rpki/gui/app/south_migrations/0005_auto__chg_field_resourcecert_parent.py (renamed from rpki/gui/app/migrations/0005_auto__chg_field_resourcecert_parent.py)0
-rw-r--r--rpki/gui/app/south_migrations/0006_add_conf_acl.py (renamed from rpki/gui/app/migrations/0006_add_conf_acl.py)0
-rw-r--r--rpki/gui/app/south_migrations/0007_default_acls.py (renamed from rpki/gui/app/migrations/0007_default_acls.py)0
-rw-r--r--rpki/gui/app/south_migrations/0008_add_alerts.py (renamed from rpki/gui/app/migrations/0008_add_alerts.py)0
-rw-r--r--rpki/gui/app/south_migrations/__init__.py0
-rw-r--r--rpki/gui/app/views.py135
-rw-r--r--rpki/gui/cacheview/forms.py51
-rw-r--r--rpki/gui/cacheview/misc.py31
-rw-r--r--rpki/gui/cacheview/templates/cacheview/addressrange_detail.html18
-rw-r--r--rpki/gui/cacheview/templates/cacheview/cacheview_base.html10
-rw-r--r--rpki/gui/cacheview/templates/cacheview/cert_detail.html105
-rw-r--r--rpki/gui/cacheview/templates/cacheview/ghostbuster_detail.html13
-rw-r--r--rpki/gui/cacheview/templates/cacheview/global_summary.html26
-rw-r--r--rpki/gui/cacheview/templates/cacheview/query_result.html21
-rw-r--r--rpki/gui/cacheview/templates/cacheview/roa_detail.html18
-rw-r--r--rpki/gui/cacheview/templates/cacheview/search_form.html17
-rw-r--r--rpki/gui/cacheview/templates/cacheview/search_result.html42
-rw-r--r--rpki/gui/cacheview/templates/cacheview/signedobject_detail.html58
-rw-r--r--rpki/gui/cacheview/tests.py23
-rw-r--r--rpki/gui/cacheview/urls.py32
-rw-r--r--rpki/gui/cacheview/util.py441
-rw-r--r--rpki/gui/cacheview/views.py172
-rw-r--r--rpki/gui/default_settings.py188
-rw-r--r--rpki/gui/gui_rpki_cache/__init__.py0
-rw-r--r--rpki/gui/gui_rpki_cache/migrations/0001_initial.py136
-rw-r--r--rpki/gui/gui_rpki_cache/migrations/0002_auto_20160411_2311.py41
-rw-r--r--rpki/gui/gui_rpki_cache/migrations/0003_auto_20160420_2146.py24
-rw-r--r--rpki/gui/gui_rpki_cache/migrations/__init__.py0
-rw-r--r--rpki/gui/gui_rpki_cache/models.py (renamed from rpki/gui/cacheview/models.py)121
-rw-r--r--rpki/gui/gui_rpki_cache/util.py308
-rw-r--r--rpki/gui/models.py110
-rw-r--r--rpki/gui/routeview/api.py2
-rw-r--r--rpki/gui/routeview/models.py8
-rw-r--r--rpki/gui/routeview/util.py56
-rw-r--r--rpki/gui/script_util.py43
-rw-r--r--rpki/gui/urls.py3
-rw-r--r--rpki/http.py1058
-rw-r--r--rpki/http_simple.py138
-rw-r--r--rpki/ipaddrs.py151
-rw-r--r--rpki/irdb/__init__.py3
-rw-r--r--rpki/irdb/migrations/0001_initial.py362
-rw-r--r--rpki/irdb/migrations/__init__.py0
-rw-r--r--rpki/irdb/models.py854
-rw-r--r--rpki/irdb/router.py131
-rw-r--r--rpki/irdb/zookeeper.py2936
-rw-r--r--rpki/irdbd.py444
-rw-r--r--rpki/left_right.py1291
-rw-r--r--rpki/log.py266
-rw-r--r--rpki/myrpki.py4
-rw-r--r--rpki/mysql_import.py8
-rw-r--r--rpki/oids.py26
-rw-r--r--rpki/old_irdbd.py497
-rw-r--r--rpki/pubd.py350
-rw-r--r--rpki/pubdb/__init__.py0
-rw-r--r--rpki/pubdb/migrations/0001_initial.py69
-rw-r--r--rpki/pubdb/migrations/0002_auto_20160221_0617.py22
-rw-r--r--rpki/pubdb/migrations/0003_remove_delta_xml.py18
-rw-r--r--rpki/pubdb/migrations/__init__.py0
-rw-r--r--rpki/pubdb/models.py329
-rw-r--r--rpki/publication.py484
-rw-r--r--rpki/publication_control.py74
-rw-r--r--rpki/rcynic.py447
-rw-r--r--rpki/rcynicdb/__init__.py0
-rw-r--r--rpki/rcynicdb/iterator.py49
-rw-r--r--rpki/rcynicdb/migrations/0001_initial.py58
-rw-r--r--rpki/rcynicdb/migrations/0002_auto_20160227_2003.py29
-rw-r--r--rpki/rcynicdb/migrations/0003_auto_20160301_0333.py24
-rw-r--r--rpki/rcynicdb/migrations/__init__.py0
-rw-r--r--rpki/rcynicdb/models.py81
-rw-r--r--rpki/relaxng.py983
-rw-r--r--rpki/relaxng_parser.py32
-rw-r--r--rpki/resource_set.py1948
-rw-r--r--rpki/rootd.py757
-rw-r--r--rpki/rpkic.py1549
-rw-r--r--rpki/rpkid.py2990
-rw-r--r--rpki/rpkid_tasks.py1265
-rw-r--r--rpki/rpkidb/__init__.py3
-rw-r--r--rpki/rpkidb/migrations/0001_initial.py222
-rw-r--r--rpki/rpkidb/migrations/0002_root.py29
-rw-r--r--rpki/rpkidb/migrations/__init__.py0
-rw-r--r--rpki/rpkidb/models.py2466
-rwxr-xr-xrpki/rtr/bgpdump.py485
-rw-r--r--rpki/rtr/channels.py366
-rw-r--r--rpki/rtr/client.py816
-rw-r--r--rpki/rtr/generator.py959
-rw-r--r--rpki/rtr/main.py85
-rw-r--r--rpki/rtr/pdus.py990
-rw-r--r--rpki/rtr/server.py874
-rw-r--r--rpki/sql.py437
-rw-r--r--rpki/sql_schemas.py319
-rw-r--r--rpki/sundial.py459
-rw-r--r--rpki/up_down.py745
-rw-r--r--rpki/x509.py3494
-rw-r--r--rpki/xml_utils.py494
120 files changed, 19146 insertions, 20772 deletions
diff --git a/rpki/POW/__init__.py b/rpki/POW/__init__.py
index a9371553..b6f15a39 100644
--- a/rpki/POW/__init__.py
+++ b/rpki/POW/__init__.py
@@ -17,13 +17,197 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-# pylint: disable=W0622,W0401
+# pylint: disable=W0401,W0622
-from rpki.POW._POW import *
-from rpki.POW._POW import __doc__
+from ._POW import *
+from ._POW import __doc__
-# Set callback to let POW construct rpki.sundial.datetime objects
+
+# Set callback to let POW construct rpki.sundial.datetime objects.
from rpki.sundial import datetime as sundial_datetime
customDatetime(sundial_datetime)
del sundial_datetime
+
+
+# Status code mechanism, (mostly) moved out of POW.c.
+
+class StatusCode(object):
+
+ def __init__(self, name, text, kind, code = None):
+ assert code is None or isinstance(code, int)
+ assert kind in ("good", "bad", "warn")
+ self.code = code
+ self.name = name
+ self.text = text
+ self.kind = kind
+
+ def __str__(self):
+ return self.name
+
+ def __repr__(self):
+ return "<StatusCode object \"{}\" at {}>".format(self.text, id(self))
+
+ def __hash__(self):
+ return hash(self.name)
+
+ def __cmp__(self, other):
+ return cmp(self.name, str(other))
+
+
+class StatusCodeDB(object):
+
+ def __init__(self, bad, warn, good, verification_errors):
+ self._map = dict((name, StatusCode(code = code, name = name, text = text,
+ kind = "bad" if code != 0 else "good"))
+ for code, name, text in verification_errors)
+ self._map.update((k, StatusCode(name = k, text = v, kind = "bad"))
+ for k, v in bad.iteritems())
+ self._map.update((k, StatusCode(name = k, text = v, kind = "warn"))
+ for k, v in warn.iteritems())
+ self._map.update((k, StatusCode(name = k, text = v, kind = "good"))
+ for k, v in good.iteritems())
+ for k, v in self._map.iteritems():
+ setattr(self, k, v)
+ self._map.update((s.code, s) for s in self._map.values() if s.code is not None)
+
+ def all(self):
+ return set(self._map.itervalues())
+
+ def normalize(self, status):
+ for s in [s for s in status if isinstance(s, (int, str)) and s in self._map]:
+ status.remove(s)
+ status.add(self._map[s])
+
+ def find(self, code):
+ return self._map[code]
+
+
+validation_status = StatusCodeDB(
+ bad = dict(
+ AIA_EXTENSION_MISSING = "AIA extension missing",
+ AIA_EXTENSION_FORBIDDEN = "AIA extension forbidden",
+ AIA_URI_MISSING = "AIA URI missing",
+ AKI_EXTENSION_ISSUER_MISMATCH = "AKI extension issuer mismatch",
+ AKI_EXTENSION_MISSING = "AKI extension missing",
+ AKI_EXTENSION_WRONG_FORMAT = "AKI extension is wrong format",
+ BAD_ASIDENTIFIERS = "Bad ASIdentifiers extension",
+ BAD_CERTIFICATE_POLICY = "Bad certificate policy",
+ BAD_CMS_ECONTENTTYPE = "Bad CMS eContentType",
+ BAD_CMS_SI_CONTENTTYPE = "Bad CMS SI ContentType",
+ BAD_CMS_SIGNER = "Bad CMS signer",
+ BAD_CMS_SIGNER_INFOS = "Bad CMS signerInfos",
+ BAD_CRL = "Bad CRL",
+ BAD_IPADDRBLOCKS = "Bad IPAddrBlocks extension",
+ BAD_KEY_USAGE = "Bad keyUsage",
+ BAD_MANIFEST_DIGEST_LENGTH = "Bad manifest digest length",
+ BAD_PUBLIC_KEY = "Bad public key",
+ BAD_ROA_ASID = "Bad ROA asID",
+ BAD_CERTIFICATE_SERIAL_NUMBER = "Bad certificate serialNumber",
+ BAD_MANIFEST_NUMBER = "Bad manifestNumber",
+ CERTIFICATE_BAD_SIGNATURE = "Bad certificate signature",
+ CERTIFICATE_FAILED_VALIDATION = "Certificate failed validation",
+ CMS_ECONTENT_DECODE_ERROR = "CMS eContent decode error",
+ CMS_INCLUDES_CRLS = "CMS includes CRLs",
+ CMS_SIGNER_MISSING = "CMS signer missing",
+ CMS_SKI_MISMATCH = "CMS SKI mismatch",
+ CMS_VALIDATION_FAILURE = "CMS validation failure",
+ CRL_ISSUER_NAME_MISMATCH = "CRL issuer name mismatch",
+ CRL_NOT_IN_MANIFEST = "CRL not listed in manifest",
+ CRL_NOT_YET_VALID = "CRL not yet valid",
+ CRL_NUMBER_EXTENSION_MISSING = "CRL number extension missing",
+ CRL_NUMBER_IS_NEGATIVE = "CRL number is negative",
+ CRL_NUMBER_OUT_OF_RANGE = "CRL number out of range",
+ CRLDP_DOESNT_MATCH_ISSUER_SIA = "CRLDP doesn't match issuer's SIA",
+ CRLDP_EXTENSION_FORBIDDEN = "CRLDP extension forbidden",
+ CRLDP_EXTENSION_MISSING = "CRLDP extension missing",
+ CRLDP_URI_MISSING = "CRLDP URI missing",
+ DISALLOWED_X509V3_EXTENSION = "Disallowed X.509v3 extension",
+ DUPLICATE_NAME_IN_MANIFEST = "Duplicate name in manifest",
+ INAPPROPRIATE_EKU_EXTENSION = "Inappropriate EKU extension",
+ MALFORMED_AIA_EXTENSION = "Malformed AIA extension",
+ MALFORMED_SIA_EXTENSION = "Malformed SIA extension",
+ MALFORMED_BASIC_CONSTRAINTS = "Malformed basicConstraints",
+ MALFORMED_TRUST_ANCHOR = "Malformed trust anchor",
+ MALFORMED_CADIRECTORY_URI = "Malformed caDirectory URI",
+ MALFORMED_CRLDP_EXTENSION = "Malformed CRDLP extension",
+ MALFORMED_CRLDP_URI = "Malformed CRDLP URI",
+ MALFORMED_ROA_ADDRESSFAMILY = "Malformed ROA addressFamily",
+ MALFORMED_TAL_URI = "Malformed TAL URI",
+ MANIFEST_CAREPOSITORY_MISMATCH = "Manifest caRepository mismatch",
+ MANIFEST_INTERVAL_OVERRUNS_CERT = "Manifest interval overruns certificate",
+ MANIFEST_LISTS_MISSING_OBJECT = "Manifest lists missing object",
+ MANIFEST_NOT_YET_VALID = "Manifest not yet valid",
+ MANIFEST_EE_REVOKED = "Manifest EE certificate revoked",
+ MISSING_RESOURCES = "Missing resources",
+ NONCONFORMANT_ASN1_TIME_VALUE = "Nonconformant ASN.1 time value",
+ NONCONFORMANT_PUBLIC_KEY_ALGORITHM = "Nonconformant public key algorithm",
+ NONCONFORMANT_SIGNATURE_ALGORITHM = "Nonconformant signature algorithm",
+ NONCONFORMANT_DIGEST_ALGORITHM = "Nonconformant digest algorithm",
+ NONCONFORMANT_CERTIFICATE_UID = "Nonconformant certificate UID",
+ OBJECT_REJECTED = "Object rejected",
+ RFC3779_INHERITANCE_REQUIRED = "RFC 3779 inheritance required",
+ ROA_CONTAINS_BAD_AFI_VALUE = "ROA contains bad AFI value",
+ ROA_MAX_PREFIXLEN_TOO_SHORT = "ROA maxPrefixlen too short",
+ ROA_RESOURCE_NOT_IN_EE = "ROA resource not in EE",
+ ROA_RESOURCES_MALFORMED = "ROA resources malformed",
+ RSYNC_TRANSFER_FAILED = "rsync transfer failed",
+ RSYNC_TRANSFER_TIMED_OUT = "rsync transfer timed out",
+ SAFI_NOT_ALLOWED = "SAFI not allowed",
+ SIA_CADIRECTORY_URI_MISSING = "SIA caDirectory URI missing",
+ SIA_EXTENSION_FORBIDDEN = "SIA extension forbidden",
+ SIA_EXTENSION_MISSING = "SIA extension missing",
+ SIA_MANIFEST_URI_MISSING = "SIA manifest URI missing",
+ SKI_EXTENSION_MISSING = "SKI extension missing",
+ SKI_PUBLIC_KEY_MISMATCH = "SKI public key mismatch",
+ TRUST_ANCHOR_KEY_MISMATCH = "Trust anchor key mismatch",
+ TRUST_ANCHOR_WITH_CRLDP = "Trust anchor can't have CRLDP",
+ UNKNOWN_AFI = "Unknown AFI",
+ UNKNOWN_OPENSSL_VERIFY_ERROR = "Unknown OpenSSL verify error",
+ UNREADABLE_OBJECT = "Unreadable object",
+ UNREADABLE_TRUST_ANCHOR = "Unreadable trust anchor",
+ UNREADABLE_TRUST_ANCHOR_LOCATOR = "Unreadable trust anchor locator",
+ WRONG_OBJECT_VERSION = "Wrong object version",
+ OBJECT_NOT_FOUND = "Object not found",
+ KEY_USAGE_MISSING = "Key usage missing"),
+
+ warn = dict(
+ AIA_DOESNT_MATCH_ISSUER = "AIA doesn't match issuer",
+ BACKUP_THISUPDATE_NEWER_THAN_CURRENT = "Backup thisUpdate newer than current",
+ BACKUP_NUMBER_HIGHER_THAN_CURRENT = "Backup number higher than current",
+ BAD_THISUPDATE = "Bad CRL thisUpdate",
+ BAD_CMS_SI_SIGNED_ATTRIBUTES = "Bad CMS SI signed attributes",
+ BAD_SIGNED_OBJECT_URI = "Bad signedObject URI",
+ CRLDP_NAMES_NEWER_CRL = "CRLDP names newer CRL",
+ DIGEST_MISMATCH = "Digest mismatch",
+ EE_CERTIFICATE_WITH_1024_BIT_KEY = "EE certificate with 1024 bit key",
+ GRATUITOUSLY_CRITICAL_EXTENSION = "Gratuitously critical extension",
+ INAPPROPRIATE_OBJECT_TYPE_SKIPPED = "Inappropriate object type skipped",
+ ISSUER_USES_MULTIPLE_CRLDP_VALUES = "Issuer uses multiple CRLDP values",
+ MULTIPLE_RSYNC_URIS_IN_EXTENSION = "Multiple rsync URIs in extension",
+ NONCONFORMANT_ISSUER_NAME = "Nonconformant X.509 issuer name",
+ NONCONFORMANT_SUBJECT_NAME = "Nonconformant X.509 subject name",
+ POLICY_QUALIFIER_CPS = "Policy Qualifier CPS",
+ RSYNC_PARTIAL_TRANSFER = "rsync partial transfer",
+ RSYNC_TRANSFER_SKIPPED = "rsync transfer skipped",
+ SIA_EXTENSION_MISSING_FROM_EE = "SIA extension missing from EE",
+ SKIPPED_BECAUSE_NOT_IN_MANIFEST = "Skipped because not in manifest",
+ STALE_CRL_OR_MANIFEST = "Stale CRL or manifest",
+ TAINTED_BY_STALE_CRL = "Tainted by stale CRL",
+ TAINTED_BY_STALE_MANIFEST = "Tainted by stale manifest",
+ TAINTED_BY_NOT_BEING_IN_MANIFEST = "Tainted by not being in manifest",
+ TRUST_ANCHOR_NOT_SELF_SIGNED = "Trust anchor not self-signed",
+ TRUST_ANCHOR_SKIPPED = "Trust anchor skipped",
+ UNKNOWN_OBJECT_TYPE_SKIPPED = "Unknown object type skipped",
+ URI_TOO_LONG = "URI too long",
+ WRONG_CMS_SI_SIGNATURE_ALGORITHM = "Wrong CMS SI signature algorithm",
+ WRONG_CMS_SI_DIGEST_ALGORITHM = "Wrong CMS SI digest algorithm"),
+
+ good = dict(
+ NON_RSYNC_URI_IN_EXTENSION = "Non-rsync URI in extension",
+ OBJECT_ACCEPTED = "Object accepted",
+ RECHECKING_OBJECT = "Rechecking object",
+ RSYNC_TRANSFER_SUCCEEDED = "rsync transfer succeeded",
+ VALIDATION_OK = "OK"),
+
+ verification_errors = _POW.getVerificationErrors())
diff --git a/rpki/adns.py b/rpki/adns.py
index 968684b5..4f8cf7ea 100644
--- a/rpki/adns.py
+++ b/rpki/adns.py
@@ -22,24 +22,25 @@ Basic asynchronous DNS code, using asyncore and Bob Halley's excellent
dnspython package.
"""
+# pylint: skip-file
+
import sys
import time
import socket
import logging
import asyncore
-import rpki.async
import rpki.sundial
import rpki.log
try:
- import dns.resolver, dns.rdatatype, dns.rdataclass, dns.name, dns.message
- import dns.inet, dns.exception, dns.query, dns.rcode, dns.ipv4, dns.ipv6
+ import dns.resolver, dns.rdatatype, dns.rdataclass, dns.name, dns.message
+ import dns.inet, dns.exception, dns.query, dns.rcode, dns.ipv4, dns.ipv6
except ImportError:
- if __name__ == "__main__":
- sys.stderr.write("DNSPython not available, skipping rpki.adns unit test\n")
- sys.exit(0)
- else:
- raise
+ if __name__ == "__main__":
+ sys.stderr.write("DNSPython not available, skipping rpki.adns unit test\n")
+ sys.exit(0)
+ else:
+ raise
logger = logging.getLogger(__name__)
@@ -48,7 +49,7 @@ logger = logging.getLogger(__name__)
resolver = dns.resolver.Resolver()
if resolver.cache is None:
- resolver.cache = dns.resolver.Cache()
+ resolver.cache = dns.resolver.Cache()
## @var nameservers
# Nameservers from resolver.nameservers converted to (af, address)
@@ -59,313 +60,326 @@ if resolver.cache is None:
nameservers = []
for ns in resolver.nameservers:
- try:
- nameservers.append((socket.AF_INET, dns.ipv4.inet_aton(ns)))
- continue
- except Exception:
- pass
- try:
- nameservers.append((socket.AF_INET6, dns.ipv6.inet_aton(ns)))
- continue
- except Exception:
- pass
- logger.error("Couldn't parse nameserver address %r", ns)
+ try:
+ nameservers.append((socket.AF_INET, dns.ipv4.inet_aton(ns)))
+ continue
+ except:
+ pass
+ try:
+ nameservers.append((socket.AF_INET6, dns.ipv6.inet_aton(ns)))
+ continue
+ except:
+ pass
+ logger.error("Couldn't parse nameserver address %r", ns)
class dispatcher(asyncore.dispatcher):
- """
- Basic UDP socket reader for use with asyncore.
- """
-
- def __init__(self, cb, eb, af, bufsize = 65535):
- asyncore.dispatcher.__init__(self)
- self.cb = cb
- self.eb = eb
- self.af = af
- self.bufsize = bufsize
- self.create_socket(af, socket.SOCK_DGRAM)
-
- def handle_read(self):
"""
- Receive a packet, hand it off to query class callback.
+ Basic UDP socket reader for use with asyncore.
"""
- wire, from_address = self.recvfrom(self.bufsize)
- self.cb(self.af, from_address[0], from_address[1], wire)
- def handle_error(self):
- """
- Pass errors to query class errback.
- """
- self.eb(sys.exc_info()[1])
+ def __init__(self, cb, eb, af, bufsize = 65535):
+ asyncore.dispatcher.__init__(self)
+ self.cb = cb
+ self.eb = eb
+ self.af = af
+ self.bufsize = bufsize
+ self.create_socket(af, socket.SOCK_DGRAM)
- def handle_connect(self):
- """
- Quietly ignore UDP "connection" events.
- """
- pass
+ def handle_read(self):
+ """
+ Receive a packet, hand it off to query class callback.
+ """
- def writable(self):
- """
- We don't need to hear about UDP socket becoming writable.
- """
- return False
+ wire, from_address = self.recvfrom(self.bufsize)
+ self.cb(self.af, from_address[0], from_address[1], wire)
+ def handle_error(self):
+ """
+ Pass errors to query class errback.
+ """
-class query(object):
- """
- Simplified (no search paths) asynchronous adaptation of
- dns.resolver.Resolver.query() (q.v.).
- """
-
- def __init__(self, cb, eb, qname, qtype = dns.rdatatype.A, qclass = dns.rdataclass.IN):
- if isinstance(qname, (str, unicode)):
- qname = dns.name.from_text(qname)
- if isinstance(qtype, str):
- qtype = dns.rdatatype.from_text(qtype)
- if isinstance(qclass, str):
- qclass = dns.rdataclass.from_text(qclass)
- assert qname.is_absolute()
- self.cb = cb
- self.eb = eb
- self.qname = qname
- self.qtype = qtype
- self.qclass = qclass
- self.start = time.time()
- rpki.async.event_defer(self.go)
-
- def go(self):
- """
- Start running the query. Check our cache before doing network
- query; if we find an answer there, just return it. Otherwise
- start the network query.
- """
- if resolver.cache:
- answer = resolver.cache.get((self.qname, self.qtype, self.qclass))
- else:
- answer = None
- if answer:
- self.cb(self, answer)
- else:
- self.timer = rpki.async.timer()
- self.sockets = {}
- self.request = dns.message.make_query(self.qname, self.qtype, self.qclass)
- if resolver.keyname is not None:
- self.request.use_tsig(resolver.keyring, resolver.keyname, resolver.keyalgorithm)
- self.request.use_edns(resolver.edns, resolver.ednsflags, resolver.payload)
- self.response = None
- self.backoff = 0.10
- self.nameservers = nameservers[:]
- self.loop1()
-
- def loop1(self):
- """
- Outer loop. If we haven't got a response yet and still have
- nameservers to check, start inner loop. Otherwise, we're done.
- """
- self.timer.cancel()
- if self.response is None and self.nameservers:
- self.iterator = rpki.async.iterator(self.nameservers[:], self.loop2, self.done2)
- else:
- self.done1()
+ self.eb(sys.exc_info()[1])
- def loop2(self, iterator, nameserver):
- """
- Inner loop. Send query to next nameserver in our list, unless
- we've hit the overall timeout for this query.
- """
- self.timer.cancel()
- try:
- timeout = resolver._compute_timeout(self.start)
- except dns.resolver.Timeout, e:
- self.lose(e)
- else:
- af, addr = nameserver
- if af not in self.sockets:
- self.sockets[af] = dispatcher(self.socket_cb, self.socket_eb, af)
- self.sockets[af].sendto(self.request.to_wire(),
- (dns.inet.inet_ntop(af, addr), resolver.port))
- self.timer.set_handler(self.socket_timeout)
- self.timer.set_errback(self.socket_eb)
- self.timer.set(rpki.sundial.timedelta(seconds = timeout))
-
- def socket_timeout(self):
- """
- No answer from nameserver, move on to next one (inner loop).
- """
- self.response = None
- self.iterator()
+ def handle_connect(self):
+ """
+ Quietly ignore UDP "connection" events.
+ """
- def socket_eb(self, e):
- """
- UDP socket signaled error. If it really is some kind of socket
- error, handle as if we've timed out on this nameserver; otherwise,
- pass error back to caller.
- """
- self.timer.cancel()
- if isinstance(e, socket.error):
- self.response = None
- self.iterator()
- else:
- self.lose(e)
+ pass
- def socket_cb(self, af, from_host, from_port, wire):
- """
- Received a packet that might be a DNS message. If it doesn't look
- like it came from one of our nameservers, just drop it and leave
- the timer running. Otherwise, try parsing it: if it's an answer,
- we're done, otherwise handle error appropriately and move on to
- next nameserver.
- """
- sender = (af, dns.inet.inet_pton(af, from_host))
- if from_port != resolver.port or sender not in self.nameservers:
- return
- self.timer.cancel()
- try:
- self.response = dns.message.from_wire(wire, keyring = self.request.keyring, request_mac = self.request.mac, one_rr_per_rrset = False)
- except dns.exception.FormError:
- self.nameservers.remove(sender)
- else:
- rcode = self.response.rcode()
- if rcode in (dns.rcode.NOERROR, dns.rcode.NXDOMAIN):
- self.done1()
- return
- if rcode != dns.rcode.SERVFAIL:
- self.nameservers.remove(sender)
- self.response = None
- self.iterator()
-
- def done2(self):
- """
- Done with inner loop. If we still haven't got an answer and
- haven't (yet?) eliminated all of our nameservers, wait a little
- while before starting the cycle again, unless we've hit the
- timeout threshold for the whole query.
- """
- if self.response is None and self.nameservers:
- try:
- delay = rpki.sundial.timedelta(seconds = min(resolver._compute_timeout(self.start), self.backoff))
- self.backoff *= 2
- self.timer.set_handler(self.loop1)
- self.timer.set_errback(self.lose)
- self.timer.set(delay)
- except dns.resolver.Timeout, e:
- self.lose(e)
- else:
- self.loop1()
+ def writable(self):
+ """
+ We don't need to hear about UDP socket becoming writable.
+ """
- def cleanup(self):
- """
- Shut down our timer and sockets.
- """
- self.timer.cancel()
- for s in self.sockets.itervalues():
- s.close()
+ return False
- def lose(self, e):
- """
- Something bad happened. Clean up, then pass error back to caller.
- """
- self.cleanup()
- self.eb(self, e)
- def done1(self):
+class query(object):
"""
- Done with outer loop. If we got a useful answer, cache it, then
- pass it back to caller; if we got an error, pass the appropriate
- exception back to caller.
+ Simplified (no search paths) asynchronous adaptation of
+ dns.resolver.Resolver.query() (q.v.).
"""
- self.cleanup()
- try:
- if not self.nameservers:
- raise dns.resolver.NoNameservers
- if self.response.rcode() == dns.rcode.NXDOMAIN:
- raise dns.resolver.NXDOMAIN
- answer = dns.resolver.Answer(self.qname, self.qtype, self.qclass, self.response)
- if resolver.cache:
- resolver.cache.put((self.qname, self.qtype, self.qclass), answer)
- self.cb(self, answer)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- self.lose(e)
+
+ def __init__(self, cb, eb, qname, qtype = dns.rdatatype.A, qclass = dns.rdataclass.IN):
+ if isinstance(qname, (str, unicode)):
+ qname = dns.name.from_text(qname)
+ if isinstance(qtype, str):
+ qtype = dns.rdatatype.from_text(qtype)
+ if isinstance(qclass, str):
+ qclass = dns.rdataclass.from_text(qclass)
+ assert qname.is_absolute()
+ self.cb = cb
+ self.eb = eb
+ self.qname = qname
+ self.qtype = qtype
+ self.qclass = qclass
+ self.start = time.time()
+ rpki.async.event_defer(self.go)
+
+ def go(self):
+ """
+ Start running the query. Check our cache before doing network
+ query; if we find an answer there, just return it. Otherwise
+ start the network query.
+ """
+
+ if resolver.cache:
+ answer = resolver.cache.get((self.qname, self.qtype, self.qclass))
+ else:
+ answer = None
+ if answer:
+ self.cb(self, answer)
+ else:
+ self.timer = rpki.async.timer()
+ self.sockets = {}
+ self.request = dns.message.make_query(self.qname, self.qtype, self.qclass)
+ if resolver.keyname is not None:
+ self.request.use_tsig(resolver.keyring, resolver.keyname, resolver.keyalgorithm)
+ self.request.use_edns(resolver.edns, resolver.ednsflags, resolver.payload)
+ self.response = None
+ self.backoff = 0.10
+ self.nameservers = nameservers[:]
+ self.loop1()
+
+ def loop1(self):
+ """
+ Outer loop. If we haven't got a response yet and still have
+ nameservers to check, start inner loop. Otherwise, we're done.
+ """
+
+ self.timer.cancel()
+ if self.response is None and self.nameservers:
+ self.iterator = rpki.async.iterator(self.nameservers[:], self.loop2, self.done2)
+ else:
+ self.done1()
+
+ def loop2(self, iterator, nameserver):
+ """
+ Inner loop. Send query to next nameserver in our list, unless
+ we've hit the overall timeout for this query.
+ """
+
+ self.timer.cancel()
+ try:
+ timeout = resolver._compute_timeout(self.start)
+ except dns.resolver.Timeout, e:
+ self.lose(e)
+ else:
+ af, addr = nameserver
+ if af not in self.sockets:
+ self.sockets[af] = dispatcher(self.socket_cb, self.socket_eb, af)
+ self.sockets[af].sendto(self.request.to_wire(),
+ (dns.inet.inet_ntop(af, addr), resolver.port))
+ self.timer.set_handler(self.socket_timeout)
+ self.timer.set_errback(self.socket_eb)
+ self.timer.set(rpki.sundial.timedelta(seconds = timeout))
+
+ def socket_timeout(self):
+ """
+ No answer from nameserver, move on to next one (inner loop).
+ """
+
+ self.response = None
+ self.iterator()
+
+ def socket_eb(self, e):
+ """
+ UDP socket signaled error. If it really is some kind of socket
+ error, handle as if we've timed out on this nameserver; otherwise,
+ pass error back to caller.
+ """
+
+ self.timer.cancel()
+ if isinstance(e, socket.error):
+ self.response = None
+ self.iterator()
+ else:
+ self.lose(e)
+
+ def socket_cb(self, af, from_host, from_port, wire):
+ """
+ Received a packet that might be a DNS message. If it doesn't look
+ like it came from one of our nameservers, just drop it and leave
+ the timer running. Otherwise, try parsing it: if it's an answer,
+ we're done, otherwise handle error appropriately and move on to
+ next nameserver.
+ """
+
+ sender = (af, dns.inet.inet_pton(af, from_host))
+ if from_port != resolver.port or sender not in self.nameservers:
+ return
+ self.timer.cancel()
+ try:
+ self.response = dns.message.from_wire(wire, keyring = self.request.keyring, request_mac = self.request.mac, one_rr_per_rrset = False)
+ except dns.exception.FormError:
+ self.nameservers.remove(sender)
+ else:
+ rcode = self.response.rcode()
+ if rcode in (dns.rcode.NOERROR, dns.rcode.NXDOMAIN):
+ self.done1()
+ return
+ if rcode != dns.rcode.SERVFAIL:
+ self.nameservers.remove(sender)
+ self.response = None
+ self.iterator()
+
+ def done2(self):
+ """
+ Done with inner loop. If we still haven't got an answer and
+ haven't (yet?) eliminated all of our nameservers, wait a little
+ while before starting the cycle again, unless we've hit the
+ timeout threshold for the whole query.
+ """
+
+ if self.response is None and self.nameservers:
+ try:
+ delay = rpki.sundial.timedelta(seconds = min(resolver._compute_timeout(self.start), self.backoff))
+ self.backoff *= 2
+ self.timer.set_handler(self.loop1)
+ self.timer.set_errback(self.lose)
+ self.timer.set(delay)
+ except dns.resolver.Timeout, e:
+ self.lose(e)
+ else:
+ self.loop1()
+
+ def cleanup(self):
+ """
+ Shut down our timer and sockets.
+ """
+
+ self.timer.cancel()
+ for s in self.sockets.itervalues():
+ s.close()
+
+ def lose(self, e):
+ """
+ Something bad happened. Clean up, then pass error back to caller.
+ """
+
+ self.cleanup()
+ self.eb(self, e)
+
+ def done1(self):
+ """
+ Done with outer loop. If we got a useful answer, cache it, then
+ pass it back to caller; if we got an error, pass the appropriate
+ exception back to caller.
+ """
+
+ self.cleanup()
+ try:
+ if not self.nameservers:
+ raise dns.resolver.NoNameservers
+ if self.response.rcode() == dns.rcode.NXDOMAIN:
+ raise dns.resolver.NXDOMAIN
+ answer = dns.resolver.Answer(self.qname, self.qtype, self.qclass, self.response)
+ if resolver.cache:
+ resolver.cache.put((self.qname, self.qtype, self.qclass), answer)
+ self.cb(self, answer)
+ except (rpki.async.ExitNow, SystemExit):
+ raise
+ except Exception, e:
+ self.lose(e)
class getaddrinfo(object):
- typemap = { dns.rdatatype.A : socket.AF_INET,
- dns.rdatatype.AAAA : socket.AF_INET6 }
-
- def __init__(self, cb, eb, host, address_families = typemap.values()):
- self.cb = cb
- self.eb = eb
- self.host = host
- self.result = []
- self.queries = [query(self.done, self.lose, host, qtype)
- for qtype in self.typemap
- if self.typemap[qtype] in address_families]
-
- def done(self, q, answer):
- if answer is not None:
- for a in answer:
- self.result.append((self.typemap[a.rdtype], a.address))
- self.queries.remove(q)
- if not self.queries:
- self.cb(self.result)
-
- def lose(self, q, e):
- if isinstance(e, dns.resolver.NoAnswer):
- self.done(q, None)
- else:
- for q in self.queries:
- q.cleanup()
- self.eb(e)
+ typemap = { dns.rdatatype.A : socket.AF_INET,
+ dns.rdatatype.AAAA : socket.AF_INET6 }
+
+ def __init__(self, cb, eb, host, address_families = typemap.values()):
+ self.cb = cb
+ self.eb = eb
+ self.host = host
+ self.result = []
+ self.queries = [query(self.done, self.lose, host, qtype)
+ for qtype in self.typemap
+ if self.typemap[qtype] in address_families]
+
+ def done(self, q, answer):
+ if answer is not None:
+ for a in answer:
+ self.result.append((self.typemap[a.rdtype], a.address))
+ self.queries.remove(q)
+ if not self.queries:
+ self.cb(self.result)
+
+ def lose(self, q, e):
+ if isinstance(e, dns.resolver.NoAnswer):
+ self.done(q, None)
+ else:
+ for q in self.queries:
+ q.cleanup()
+ self.eb(e)
if __name__ == "__main__":
- rpki.log.init("test-adns")
- print "Some adns tests may take a minute or two, please be patient"
+ print "Some adns tests may take a minute or two, please be patient"
- class test_getaddrinfo(object):
+ class test_getaddrinfo(object):
- def __init__(self, qname):
- self.qname = qname
- getaddrinfo(self.done, self.lose, qname)
+ def __init__(self, qname):
+ self.qname = qname
+ getaddrinfo(self.done, self.lose, qname)
- def done(self, result):
- print "getaddrinfo(%s) returned: %s" % (
- self.qname,
- ", ".join(str(r) for r in result))
+ def done(self, result):
+ print "getaddrinfo(%s) returned: %s" % (
+ self.qname,
+ ", ".join(str(r) for r in result))
- def lose(self, e):
- print "getaddrinfo(%s) failed: %r" % (self.qname, e)
+ def lose(self, e):
+ print "getaddrinfo(%s) failed: %r" % (self.qname, e)
- class test_query(object):
+ class test_query(object):
- def __init__(self, qname, qtype = dns.rdatatype.A, qclass = dns.rdataclass.IN):
- self.qname = qname
- self.qtype = qtype
- self.qclass = qclass
- query(self.done, self.lose, qname, qtype = qtype, qclass = qclass)
+ def __init__(self, qname, qtype = dns.rdatatype.A, qclass = dns.rdataclass.IN):
+ self.qname = qname
+ self.qtype = qtype
+ self.qclass = qclass
+ query(self.done, self.lose, qname, qtype = qtype, qclass = qclass)
- def done(self, q, result):
- print "query(%s, %s, %s) returned: %s" % (
- self.qname,
- dns.rdatatype.to_text(self.qtype),
- dns.rdataclass.to_text(self.qclass),
- ", ".join(str(r) for r in result))
+ def done(self, q, result):
+ print "query(%s, %s, %s) returned: %s" % (
+ self.qname,
+ dns.rdatatype.to_text(self.qtype),
+ dns.rdataclass.to_text(self.qclass),
+ ", ".join(str(r) for r in result))
- def lose(self, q, e):
- print "getaddrinfo(%s, %s, %s) failed: %r" % (
- self.qname,
- dns.rdatatype.to_text(self.qtype),
- dns.rdataclass.to_text(self.qclass),
- e)
-
- if True:
- for t in (dns.rdatatype.A, dns.rdatatype.AAAA, dns.rdatatype.HINFO):
- test_query("subvert-rpki.hactrn.net", t)
- test_query("nonexistant.rpki.net")
- test_query("subvert-rpki.hactrn.net", qclass = dns.rdataclass.CH)
-
- for h in ("subvert-rpki.hactrn.net", "nonexistant.rpki.net"):
- test_getaddrinfo(h)
-
- rpki.async.event_loop()
+ def lose(self, q, e):
+ print "getaddrinfo(%s, %s, %s) failed: %r" % (
+ self.qname,
+ dns.rdatatype.to_text(self.qtype),
+ dns.rdataclass.to_text(self.qclass),
+ e)
+
+ if True:
+ for t in (dns.rdatatype.A, dns.rdatatype.AAAA, dns.rdatatype.HINFO):
+ test_query("subvert-rpki.hactrn.net", t)
+ test_query("nonexistant.rpki.net")
+ test_query("subvert-rpki.hactrn.net", qclass = dns.rdataclass.CH)
+
+ for h in ("subvert-rpki.hactrn.net", "nonexistant.rpki.net"):
+ test_getaddrinfo(h)
+
+ rpki.async.event_loop()
diff --git a/rpki/async.py b/rpki/async.py
deleted file mode 100644
index 75b4b656..00000000
--- a/rpki/async.py
+++ /dev/null
@@ -1,433 +0,0 @@
-# $Id$
-#
-# Copyright (C) 2009--2012 Internet Systems Consortium ("ISC")
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-"""
-Utilities for event-driven programming.
-"""
-
-import gc
-import sys
-import signal
-import logging
-import asyncore
-import traceback
-import rpki.log
-import rpki.sundial
-
-logger = logging.getLogger(__name__)
-
-ExitNow = asyncore.ExitNow
-
-class iterator(object):
- """
- Iteration construct for event-driven code. Takes three
- arguments:
-
- - Some kind of iterable object
-
- - A callback to call on each item in the iteration
-
- - A callback to call after the iteration terminates.
-
- The item callback receives two arguments: the callable iterator
- object and the current value of the iteration. It should call the
- iterator (or arrange for the iterator to be called) when it is time
- to continue to the next item in the iteration.
-
- The termination callback receives no arguments.
-
- Special case for memory constrained cases: if keyword argument
- pop_list is True, iterable must be a list, which is modified in
- place, popping items off of it until it's empty.
- """
-
- def __init__(self, iterable, item_callback, done_callback, unwind_stack = True, pop_list = False):
- assert not pop_list or isinstance(iterable, list), "iterable must be a list when using pop_list"
- self.item_callback = item_callback
- self.done_callback = done_callback if done_callback is not None else lambda: None
- self.caller_file, self.caller_line, self.caller_function = traceback.extract_stack(limit = 2)[0][0:3]
- self.unwind_stack = unwind_stack
- self.pop_list = pop_list
- try:
- if self.pop_list:
- self.iterator = iterable
- else:
- self.iterator = iter(iterable)
- except (ExitNow, SystemExit):
- raise
- except Exception:
- logger.debug("Problem constructing iterator for %s", repr(iterable))
- raise
- self.doit()
-
- def __repr__(self):
- return rpki.log.log_repr(self,
- "created at %s:%s" % (self.caller_file,
- self.caller_line),
- self.caller_function)
-
- def __call__(self):
- if self.unwind_stack:
- event_defer(self.doit)
- else:
- self.doit()
-
- def doit(self):
- """
- Implement the iterator protocol: attempt to call the item handler
- with the next iteration value, call the termination handler if the
- iterator signaled StopIteration.
- """
-
- try:
- if self.pop_list:
- val = self.iterator.pop(0)
- else:
- val = self.iterator.next()
- except (IndexError, StopIteration):
- self.done_callback()
- else:
- self.item_callback(self, val)
-
-## @var timer_queue
-# Timer queue.
-
-timer_queue = []
-
-class timer(object):
- """
- Timer construct for event-driven code.
- """
-
- ## @var gc_debug
- # Verbose chatter about timers states and garbage collection.
- gc_debug = False
-
- ## @var run_debug
- # Verbose chatter about timers being run.
- run_debug = False
-
- def __init__(self, handler = None, errback = None):
- self.set_handler(handler)
- self.set_errback(errback)
- self.when = None
- if self.gc_debug:
- self.trace("Creating %r" % self)
-
- def trace(self, msg):
- """
- Debug logging.
- """
- if self.gc_debug:
- bt = traceback.extract_stack(limit = 3)
- logger.debug("%s from %s:%d", msg, bt[0][0], bt[0][1])
-
- def set(self, when):
- """
- Set a timer. Argument can be a datetime, to specify an absolute
- time, or a timedelta, to specify an offset time.
- """
- if self.gc_debug:
- self.trace("Setting %r to %r" % (self, when))
- if isinstance(when, rpki.sundial.timedelta):
- self.when = rpki.sundial.now() + when
- else:
- self.when = when
- assert isinstance(self.when, rpki.sundial.datetime), "%r: Expecting a datetime, got %r" % (self, self.when)
- if self not in timer_queue:
- timer_queue.append(self)
- timer_queue.sort(key = lambda x: x.when)
-
- def __cmp__(self, other):
- return cmp(id(self), id(other))
-
- if gc_debug:
- def __del__(self):
- logger.debug("Deleting %r", self)
-
- def cancel(self):
- """
- Cancel a timer, if it was set.
- """
- if self.gc_debug:
- self.trace("Canceling %r" % self)
- try:
- while True:
- timer_queue.remove(self)
- except ValueError:
- pass
-
- def is_set(self):
- """
- Test whether this timer is currently set.
- """
- return self in timer_queue
-
- def set_handler(self, handler):
- """
- Set timer's expiration handler. This is an alternative to
- subclassing the timer class, and may be easier to use when
- integrating timers into other classes (eg, the handler can be a
- bound method to an object in a class representing a network
- connection).
- """
- self.handler = handler
-
- def set_errback(self, errback):
- """
- Set a timer's errback. Like set_handler(), for errbacks.
- """
- self.errback = errback
-
- @classmethod
- def runq(cls):
- """
- Run the timer queue: for each timer whose call time has passed,
- pull the timer off the queue and call its handler() method.
-
- Comparisions are made against time at which this function was
- called, so that even if new events keep getting scheduled, we'll
- return to the I/O loop reasonably quickly.
- """
- now = rpki.sundial.now()
- while timer_queue and now >= timer_queue[0].when:
- t = timer_queue.pop(0)
- if cls.run_debug:
- logger.debug("Running %r", t)
- try:
- if t.handler is not None:
- t.handler()
- else:
- logger.warning("Timer %r expired with no handler set", t)
- except (ExitNow, SystemExit):
- raise
- except Exception, e:
- if t.errback is not None:
- t.errback(e)
- else:
- logger.exception("Unhandled exception from timer %r", t)
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.when, repr(self.handler))
-
- @classmethod
- def seconds_until_wakeup(cls):
- """
- Calculate delay until next timer expires, or None if no timers are
- set and we should wait indefinitely. Rounds up to avoid spinning
- in select() or poll(). We could calculate fractional seconds in
- the right units instead, but select() and poll() don't even take
- the same units (argh!), and we're not doing anything that
- hair-triggered, so rounding up is simplest.
- """
- if not timer_queue:
- return None
- now = rpki.sundial.now()
- if now >= timer_queue[0].when:
- return 0
- delay = timer_queue[0].when - now
- seconds = delay.convert_to_seconds()
- if delay.microseconds:
- seconds += 1
- return seconds
-
- @classmethod
- def clear(cls):
- """
- Cancel every timer on the queue. We could just throw away the
- queue content, but this way we can notify subclasses that provide
- their own cancel() method.
- """
- while timer_queue:
- timer_queue.pop(0).cancel()
-
-def _raiseExitNow(signum, frame):
- """
- Signal handler for event_loop().
- """
- raise ExitNow
-
-def exit_event_loop():
- """
- Force exit from event_loop().
- """
- raise ExitNow
-
-def event_defer(handler, delay = rpki.sundial.timedelta(seconds = 0)):
- """
- Use a near-term (default: zero interval) timer to schedule an event
- to run after letting the I/O system have a turn.
- """
- timer(handler).set(delay)
-
-## @var debug_event_timing
-# Enable insanely verbose logging of event timing
-
-debug_event_timing = False
-
-def event_loop(catch_signals = (signal.SIGINT, signal.SIGTERM)):
- """
- Replacement for asyncore.loop(), adding timer and signal support.
- """
- old_signal_handlers = {}
- while True:
- save_sigs = len(old_signal_handlers) == 0
- try:
- for sig in catch_signals:
- old = signal.signal(sig, _raiseExitNow)
- if save_sigs:
- old_signal_handlers[sig] = old
- while asyncore.socket_map or timer_queue:
- t = timer.seconds_until_wakeup()
- if debug_event_timing:
- logger.debug("Dismissing to asyncore.poll(), t = %s, q = %r", t, timer_queue)
- asyncore.poll(t, asyncore.socket_map)
- timer.runq()
- if timer.gc_debug:
- gc.collect()
- if gc.garbage:
- for i in gc.garbage:
- logger.debug("GC-cycle %r", i)
- del gc.garbage[:]
- except ExitNow:
- break
- except SystemExit:
- raise
- except ValueError, e:
- if str(e) == "filedescriptor out of range in select()":
- logger.error("Something is badly wrong, select() thinks we gave it a bad file descriptor.")
- logger.error("Content of asyncore.socket_map:")
- for fd in sorted(asyncore.socket_map.iterkeys()):
- logger.error(" fd %s obj %r", fd, asyncore.socket_map[fd])
- logger.error("Not safe to continue due to risk of spin loop on select(). Exiting.")
- sys.exit(1)
- logger.exception("event_loop() exited with exception %r, this is not supposed to happen, restarting")
- except Exception, e:
- logger.exception("event_loop() exited with exception %r, this is not supposed to happen, restarting")
- else:
- break
- finally:
- for sig in old_signal_handlers:
- signal.signal(sig, old_signal_handlers[sig])
-
-class sync_wrapper(object):
- """
- Synchronous wrapper around asynchronous functions. Running in
- asynchronous mode at all times makes sense for event-driven daemons,
- but is kind of tedious for simple scripts, hence this wrapper.
-
- The wrapped function should take at least two arguments: a callback
- function and an errback function. If any arguments are passed to
- the wrapper, they will be passed as additional arguments to the
- wrapped function.
- """
-
- res = None
- err = None
- fin = False
-
- def __init__(self, func, disable_signal_handlers = False):
- self.func = func
- self.disable_signal_handlers = disable_signal_handlers
-
- def cb(self, res = None):
- """
- Wrapped code has requested normal termination. Store result, and
- exit the event loop.
- """
- self.res = res
- self.fin = True
- logger.debug("%r callback with result %r", self, self.res)
- raise ExitNow
-
- def eb(self, err):
- """
- Wrapped code raised an exception. Store exception data, then exit
- the event loop.
- """
- exc_info = sys.exc_info()
- self.err = exc_info if exc_info[1] is err else err
- self.fin = True
- logger.debug("%r errback with exception %r", self, self.err)
- raise ExitNow
-
- def __call__(self, *args, **kwargs):
-
- def thunk():
- try:
- self.func(self.cb, self.eb, *args, **kwargs)
- except ExitNow:
- raise
- except Exception, e:
- self.eb(e)
-
- event_defer(thunk)
- if self.disable_signal_handlers:
- event_loop(catch_signals = ())
- else:
- event_loop()
- if not self.fin:
- logger.warning("%r event_loop terminated without callback or errback", self)
- if self.err is None:
- return self.res
- elif isinstance(self.err, tuple):
- raise self.err[0], self.err[1], self.err[2]
- else:
- raise self.err
-
-class gc_summary(object):
- """
- Periodic summary of GC state, for tracking down memory bloat.
- """
-
- def __init__(self, interval, threshold = 0):
- if isinstance(interval, (int, long)):
- interval = rpki.sundial.timedelta(seconds = interval)
- self.interval = interval
- self.threshold = threshold
- self.timer = timer(handler = self.handler)
- self.timer.set(self.interval)
-
- def handler(self):
- """
- Collect and log GC state for this period, reset timer.
- """
- logger.debug("gc_summary: Running gc.collect()")
- gc.collect()
- logger.debug("gc_summary: Summarizing (threshold %d)", self.threshold)
- total = {}
- tuples = {}
- for g in gc.get_objects():
- k = type(g).__name__
- total[k] = total.get(k, 0) + 1
- if isinstance(g, tuple):
- k = ", ".join(type(x).__name__ for x in g)
- tuples[k] = tuples.get(k, 0) + 1
- logger.debug("gc_summary: Sorting result")
- total = total.items()
- total.sort(reverse = True, key = lambda x: x[1])
- tuples = tuples.items()
- tuples.sort(reverse = True, key = lambda x: x[1])
- logger.debug("gc_summary: Object type counts in descending order")
- for name, count in total:
- if count > self.threshold:
- logger.debug("gc_summary: %8d %s", count, name)
- logger.debug("gc_summary: Tuple content type signature counts in descending order")
- for types, count in tuples:
- if count > self.threshold:
- logger.debug("gc_summary: %8d (%s)", count, types)
- logger.debug("gc_summary: Scheduling next cycle")
- self.timer.set(self.interval)
diff --git a/rpki/cli.py b/rpki/cli.py
index e75b8430..cbd2b1e1 100644
--- a/rpki/cli.py
+++ b/rpki/cli.py
@@ -28,244 +28,265 @@ import argparse
import traceback
try:
- import readline
- have_readline = True
+ import readline
+ have_readline = True
except ImportError:
- have_readline = False
+ have_readline = False
class BadCommandSyntax(Exception):
- "Bad command line syntax."
+ "Bad command line syntax."
class ExitArgparse(Exception):
- "Exit method from ArgumentParser."
+ "Exit method from ArgumentParser."
- def __init__(self, message = None, status = 0):
- super(ExitArgparse, self).__init__()
- self.message = message
- self.status = status
+ def __init__(self, message = None, status = 0):
+ super(ExitArgparse, self).__init__()
+ self.message = message
+ self.status = status
class Cmd(cmd.Cmd):
- """
- Customized subclass of Python cmd module.
- """
+ """
+ Customized subclass of Python cmd module.
+ """
- emptyline_repeats_last_command = False
+ emptyline_repeats_last_command = False
- EOF_exits_command_loop = True
+ EOF_exits_command_loop = True
- identchars = cmd.IDENTCHARS + "/-."
+ identchars = cmd.IDENTCHARS + "/-."
- histfile = None
+ histfile = None
- last_command_failed = False
+ last_command_failed = False
- def onecmd(self, line):
- """
- Wrap error handling around cmd.Cmd.onecmd(). Might want to do
- something kinder than showing a traceback, eventually.
- """
+ def onecmd(self, line):
+ """
+ Wrap error handling around cmd.Cmd.onecmd(). Might want to do
+ something kinder than showing a traceback, eventually.
+ """
- self.last_command_failed = False
- try:
- return cmd.Cmd.onecmd(self, line)
- except SystemExit:
- raise
- except ExitArgparse, e:
- if e.message is not None:
- print e.message
- self.last_command_failed = e.status != 0
- return False
- except BadCommandSyntax, e:
- print e
- except Exception:
- traceback.print_exc()
- self.last_command_failed = True
- return False
-
- def do_EOF(self, arg):
- if self.EOF_exits_command_loop and self.prompt:
- print
- return self.EOF_exits_command_loop
-
- def do_exit(self, arg):
- """
- Exit program.
- """
+ self.last_command_failed = False
+ try:
+ return cmd.Cmd.onecmd(self, line)
+ except SystemExit:
+ raise
+ except ExitArgparse, e:
+ if e.message is not None:
+ print e.message
+ self.last_command_failed = e.status != 0
+ return False
+ except BadCommandSyntax, e:
+ print e
+ except:
+ traceback.print_exc()
+ self.last_command_failed = True
+ return False
+
+ def do_EOF(self, arg):
+ if self.EOF_exits_command_loop and self.prompt:
+ print
+ return self.EOF_exits_command_loop
+
+ def do_exit(self, arg):
+ """
+ Exit program.
+ """
+
+ return True
+
+ do_quit = do_exit
+
+ def emptyline(self):
+ """
+ Handle an empty line. cmd module default is to repeat the last
+ command, which I find to be violation of the principal of least
+ astonishment, so my preference is that an empty line does nothing.
+ """
+
+ if self.emptyline_repeats_last_command:
+ cmd.Cmd.emptyline(self)
+
+ def filename_complete(self, text, line, begidx, endidx):
+ """
+ Filename completion handler, with hack to restore what I consider
+ the normal (bash-like) behavior when one hits the completion key
+ and there's only one match.
+ """
+
+ result = glob.glob(text + "*")
+ if len(result) == 1:
+ path = result.pop()
+ if os.path.isdir(path) or (os.path.islink(path) and os.path.isdir(os.path.join(path, "."))):
+ result.append(path + os.path.sep)
+ else:
+ result.append(path + " ")
+ return result
+
+ def completenames(self, text, *ignored):
+ """
+ Command name completion handler, with hack to restore what I
+ consider the normal (bash-like) behavior when one hits the
+ completion key and there's only one match.
+ """
+
+ result = cmd.Cmd.completenames(self, text, *ignored)
+ if len(result) == 1:
+ result[0] += " "
+ return result
+
+ def help_help(self):
+ """
+ Type "help [topic]" for help on a command,
+ or just "help" for a list of commands.
+ """
+
+ self.stdout.write(self.help_help.__doc__ + "\n")
+
+ def complete_help(self, *args):
+ """
+ Better completion function for help command arguments.
+ """
+
+ text = args[0]
+ names = self.get_names()
+ result = []
+ for prefix in ("do_", "help_"):
+ result.extend(s[len(prefix):] for s in names if s.startswith(prefix + text) and s != "do_EOF")
+ return result
+
+ if have_readline:
+
+ def cmdloop_with_history(self):
+ """
+ Better command loop, with history file and tweaked readline
+ completion delimiters.
+ """
+
+ old_completer_delims = readline.get_completer_delims()
+ if self.histfile is not None:
+ try:
+ self.read_history()
+ except IOError:
+ pass
+ try:
+ readline.set_completer_delims("".join(set(old_completer_delims) - set(self.identchars)))
+ self.cmdloop()
+ finally:
+ if self.histfile is not None and readline.get_current_history_length():
+ try:
+ self.save_history()
+ except IOError:
+ pass
+ readline.set_completer_delims(old_completer_delims)
+
+ def read_history(self):
+ """
+ Read readline history from file.
+
+ This is a separate method so that subclasses can wrap it when necessary.
+ """
+
+ readline.read_history_file(self.histfile)
+
+ def save_history(self):
+ """
+ Save readline history to file.
+
+ This is a separate method so that subclasses can wrap it when necessary.
+ """
+
+ readline.write_history_file(self.histfile)
+
+ else:
+
+ cmdloop_with_history = cmd.Cmd.cmdloop
- return True
- do_quit = do_exit
- def emptyline(self):
+def yes_or_no(prompt, default = None, require_full_word = False):
"""
- Handle an empty line. cmd module default is to repeat the last
- command, which I find to be violation of the principal of least
- astonishment, so my preference is that an empty line does nothing.
+ Ask a yes-or-no question.
"""
- if self.emptyline_repeats_last_command:
- cmd.Cmd.emptyline(self)
+ prompt = prompt.rstrip() + _yes_or_no_prompts[default]
+ while True:
+ answer = raw_input(prompt).strip().lower()
+ if not answer and default is not None:
+ return default
+ if answer == "yes" or (not require_full_word and answer.startswith("y")):
+ return True
+ if answer == "no" or (not require_full_word and answer.startswith("n")):
+ return False
+ print 'Please answer "yes" or "no"'
- def filename_complete(self, text, line, begidx, endidx):
- """
- Filename completion handler, with hack to restore what I consider
- the normal (bash-like) behavior when one hits the completion key
- and there's only one match.
- """
+_yes_or_no_prompts = {
+ True : ' ("yes" or "no" ["yes"]) ',
+ False : ' ("yes" or "no" ["no"]) ',
+ None : ' ("yes" or "no") ' }
- result = glob.glob(text + "*")
- if len(result) == 1:
- path = result.pop()
- if os.path.isdir(path) or (os.path.islink(path) and os.path.isdir(os.path.join(path, "."))):
- result.append(path + os.path.sep)
- else:
- result.append(path + " ")
- return result
- def completenames(self, text, *ignored):
+class NonExitingArgumentParser(argparse.ArgumentParser):
"""
- Command name completion handler, with hack to restore what I
- consider the normal (bash-like) behavior when one hits the
- completion key and there's only one match.
+ ArgumentParser tweaked to throw ExitArgparse exception
+ rather than using sys.exit(), for use with command loop.
"""
- result = cmd.Cmd.completenames(self, text, *ignored)
- if len(result) == 1:
- result[0] += " "
- return result
-
- def help_help(self):
- """
- Type "help [topic]" for help on a command,
- or just "help" for a list of commands.
- """
+ def exit(self, status = 0, message = None):
+ raise ExitArgparse(status = status, message = message)
- self.stdout.write(self.help_help.__doc__ + "\n")
- def complete_help(self, *args):
- """
- Better completion function for help command arguments.
+def parsecmd(subparsers, *arg_clauses):
"""
+ Decorator to combine the argparse and cmd modules.
- text = args[0]
- names = self.get_names()
- result = []
- for prefix in ("do_", "help_"):
- result.extend(s[len(prefix):] for s in names if s.startswith(prefix + text) and s != "do_EOF")
- return result
+ subparsers is an instance of argparse.ArgumentParser (or subclass) which was
+ returned by calling the .add_subparsers() method on an ArgumentParser instance
+ intended to handle parsing for the entire program on the command line.
- if have_readline:
+ arg_clauses is a series of defarg() invocations defining arguments to be parsed
+ by the argparse code.
- def cmdloop_with_history(self):
- """
- Better command loop, with history file and tweaked readline
- completion delimiters.
- """
+ The decorator will use arg_clauses to construct two separate argparse parser
+ instances: one will be attached to the global parser as a subparser, the
+ other will be used to parse arguments for this command when invoked by cmd.
- old_completer_delims = readline.get_completer_delims()
- if self.histfile is not None:
- try:
- readline.read_history_file(self.histfile)
- except IOError:
- pass
- try:
- readline.set_completer_delims("".join(set(old_completer_delims) - set(self.identchars)))
- self.cmdloop()
- finally:
- if self.histfile is not None and readline.get_current_history_length():
- readline.write_history_file(self.histfile)
- readline.set_completer_delims(old_completer_delims)
-
- else:
-
- cmdloop_with_history = cmd.Cmd.cmdloop
-
-
-
-def yes_or_no(prompt, default = None, require_full_word = False):
- """
- Ask a yes-or-no question.
- """
-
- prompt = prompt.rstrip() + _yes_or_no_prompts[default]
- while True:
- answer = raw_input(prompt).strip().lower()
- if not answer and default is not None:
- return default
- if answer == "yes" or (not require_full_word and answer.startswith("y")):
- return True
- if answer == "no" or (not require_full_word and answer.startswith("n")):
- return False
- print 'Please answer "yes" or "no"'
-
-_yes_or_no_prompts = {
- True : ' ("yes" or "no" ["yes"]) ',
- False : ' ("yes" or "no" ["no"]) ',
- None : ' ("yes" or "no") ' }
-
-
-class NonExitingArgumentParser(argparse.ArgumentParser):
- """
- ArgumentParser tweaked to throw ExitArgparse exception
- rather than using sys.exit(), for use with command loop.
- """
+ The decorator will replace the original do_whatever method with a wrapped version
+ which uses the local argparse instance to parse the single string supplied by
+ the cmd module.
- def exit(self, status = 0, message = None):
- raise ExitArgparse(status = status, message = message)
+ The intent is that, from the command's point of view, all of this should work
+ pretty much the same way regardless of whether the command was invoked from
+ the global command line or from within the cmd command loop. Either way,
+ the command method should get an argparse.Namespace object.
+ In theory, we could generate a completion handler from the argparse definitions,
+ much as the separate argcomplete package does. In practice this is a lot of
+ work and I'm not ready to get into that just yet.
+ """
-def parsecmd(subparsers, *arg_clauses):
- """
- Decorator to combine the argparse and cmd modules.
-
- subparsers is an instance of argparse.ArgumentParser (or subclass) which was
- returned by calling the .add_subparsers() method on an ArgumentParser instance
- intended to handle parsing for the entire program on the command line.
-
- arg_clauses is a series of defarg() invocations defining arguments to be parsed
- by the argparse code.
-
- The decorator will use arg_clauses to construct two separate argparse parser
- instances: one will be attached to the global parser as a subparser, the
- other will be used to parse arguments for this command when invoked by cmd.
-
- The decorator will replace the original do_whatever method with a wrapped version
- which uses the local argparse instance to parse the single string supplied by
- the cmd module.
-
- The intent is that, from the command's point of view, all of this should work
- pretty much the same way regardless of whether the command was invoked from
- the global command line or from within the cmd command loop. Either way,
- the command method should get an argparse.Namespace object.
-
- In theory, we could generate a completion handler from the argparse definitions,
- much as the separate argcomplete package does. In practice this is a lot of
- work and I'm not ready to get into that just yet.
- """
-
- def decorate(func):
- assert func.__name__.startswith("do_")
- parser = NonExitingArgumentParser(description = func.__doc__,
- prog = func.__name__[3:],
- add_help = False)
- subparser = subparsers.add_parser(func.__name__[3:],
- description = func.__doc__,
- help = func.__doc__.lstrip().partition("\n")[0])
- for positional, keywords in arg_clauses:
- parser.add_argument(*positional, **keywords)
- subparser.add_argument(*positional, **keywords)
- subparser.set_defaults(func = func)
- def wrapped(self, arg):
- return func(self, parser.parse_args(shlex.split(arg)))
- wrapped.argparser = parser
- wrapped.__doc__ = func.__doc__
- return wrapped
- return decorate
+ def decorate(func):
+ assert func.__name__.startswith("do_")
+ parser = NonExitingArgumentParser(description = func.__doc__,
+ prog = func.__name__[3:],
+ add_help = False)
+ subparser = subparsers.add_parser(func.__name__[3:],
+ description = func.__doc__,
+ help = func.__doc__.lstrip().partition("\n")[0])
+ for positional, keywords in arg_clauses:
+ parser.add_argument(*positional, **keywords)
+ subparser.add_argument(*positional, **keywords)
+ subparser.set_defaults(func = func)
+ def wrapped(self, arg):
+ return func(self, parser.parse_args(shlex.split(arg)))
+ wrapped.argparser = parser
+ wrapped.__doc__ = func.__doc__
+ return wrapped
+ return decorate
def cmdarg(*positional, **keywords):
- """
- Syntactic sugar to let us use keyword arguments normally when constructing
- arguments for deferred calls to argparse.ArgumentParser.add_argument().
- """
+ """
+ Syntactic sugar to let us use keyword arguments normally when constructing
+ arguments for deferred calls to argparse.ArgumentParser.add_argument().
+ """
- return positional, keywords
+ return positional, keywords
diff --git a/rpki/config.py b/rpki/config.py
index 253e56cf..2f507f90 100644
--- a/rpki/config.py
+++ b/rpki/config.py
@@ -23,7 +23,12 @@ ConfigParser module.
"""
import ConfigParser
+import argparse
import logging
+import logging.handlers
+import traceback
+import time
+import sys
import os
import re
@@ -32,280 +37,604 @@ logger = logging.getLogger(__name__)
## @var default_filename
# Default name of config file if caller doesn't specify one explictly.
-default_filename = "rpki.conf"
-
-## @var default_dirname
-# Default name of directory to check for global config file, or None
-# if no global config file. Autoconf-generated code may set this to a
-# non-None value during script startup.
-
try:
- import rpki.autoconf
- default_dirname = rpki.autoconf.sysconfdir
+ import rpki.autoconf
+ default_filename = os.path.join(rpki.autoconf.sysconfdir, "rpki.conf")
except ImportError:
- default_dirname = None
+ default_filename = None
-## @var default_envname
+## @var rpki_conf_envname
# Name of environment variable containing config file name.
-default_envname = "RPKI_CONF"
+rpki_conf_envname = "RPKI_CONF"
+
class parser(object):
- """
- Extensions to stock Python ConfigParser:
-
- Read config file and set default section while initializing parser object.
-
- Support for OpenSSL-style subscripted options and a limited form of
- OpenSSL-style indirect variable references (${section::option}).
-
- get-methods with default values and default section name.
-
- If no filename is given to the constructor (filename = None), we
- check for an environment variable naming the config file, then we
- check for a default filename in the current directory, then finally
- we check for a global config file if autoconf provided a directory
- name to check.
- """
-
- def __init__(self, filename = None, section = None, allow_missing = False):
-
- self.cfg = ConfigParser.RawConfigParser()
- self.default_section = section
-
- filenames = []
- if filename is not None:
- filenames.append(filename)
- else:
- if default_envname in os.environ:
- filenames.append(os.environ[default_envname])
- filenames.append(default_filename)
- if default_dirname is not None:
- filenames.append("%s/%s" % (default_dirname, default_filename))
-
- f = fn = None
-
- for fn in filenames:
- try:
- f = open(fn)
- break
- except IOError:
- f = None
-
- if f is not None:
- self.filename = fn
- self.cfg.readfp(f, fn)
- elif allow_missing:
- self.filename = None
- else:
- raise
-
- def has_section(self, section):
- """
- Test whether a section exists.
"""
+ Extensions to stock Python ConfigParser:
- return self.cfg.has_section(section)
+ Read config file and set default section while initializing parser object.
- def has_option(self, option, section = None):
- """
- Test whether an option exists.
- """
+ Support for OpenSSL-style subscripted options and a limited form of
+ OpenSSL-style indirect variable references (${section::option}).
- if section is None:
- section = self.default_section
- return self.cfg.has_option(section, option)
+ get-methods with default values and default section name.
- def multiget(self, option, section = None):
- """
- Parse OpenSSL-style foo.0, foo.1, ... subscripted options.
+ If no filename is given to the constructor (filename and
+ set_filename both None), we check for an environment variable naming
+ the config file, then finally we check for a global config file if
+ autoconf provided a directory name to check.
- Returns iteration of values matching the specified option name.
+ NB: Programs which accept a configuration filename on the command
+ lines should pass that filename using set_filename so that we can
+ set the magic environment variable. Constraints from some external
+ libraries (principally Django) sometimes require library code to
+ look things up in the configuration file without the knowledge of
+ the controlling program, but setting the environment variable
+ insures that everybody's reading from the same script, as it were.
"""
- matches = []
- if section is None:
- section = self.default_section
- if self.cfg.has_option(section, option):
- yield self.cfg.get(section, option)
- option += "."
- matches = [o for o in self.cfg.options(section) if o.startswith(option) and o[len(option):].isdigit()]
- matches.sort()
- for option in matches:
- yield self.cfg.get(section, option)
+ # Odd keyword-only calling sequence is a defense against old code
+ # that thinks it knows how __init__() handles positional arguments.
- _regexp = re.compile("\\${(.*?)::(.*?)}")
+ def __init__(self, **kwargs):
+ section = kwargs.pop("section", None)
+ allow_missing = kwargs.pop("allow_missing", False)
+ set_filename = kwargs.pop("set_filename", None)
+ filename = kwargs.pop("filename", set_filename)
+ argparser = kwargs.pop("argparser", None)
- def _repl(self, m):
- """
- Replacement function for indirect variable substitution.
- This is intended for use with re.subn().
- """
- section, option = m.group(1, 2)
- if section == "ENV":
- return os.getenv(option, "")
- else:
- return self.cfg.get(section, option)
+ assert not kwargs, "Unexpected keyword arguments: {}".format(
+ ", ".join("{} = {!r}".format(k, v) for k, v in kwargs.iteritems()))
+
+ if set_filename is not None:
+ os.environ[rpki_conf_envname] = set_filename
+
+ self.cfg = ConfigParser.RawConfigParser()
+ self.default_section = section
+
+ self.filename = filename or os.getenv(rpki_conf_envname) or default_filename
+ self.argparser = argparser
+ self.logging_defaults = None
+
+ try:
+ with open(self.filename, "r") as f:
+ self.cfg.readfp(f)
+ except IOError:
+ if allow_missing:
+ self.filename = None
+ else:
+ raise
+
+
+ def has_section(self, section):
+ """
+ Test whether a section exists.
+ """
+
+ return self.cfg.has_section(section)
+
+
+ def has_option(self, option, section = None):
+ """
+ Test whether an option exists.
+ """
+
+ if section is None:
+ section = self.default_section
+ return self.cfg.has_option(section, option)
+
+
+ def multiget(self, option, section = None):
+ """
+ Parse OpenSSL-style foo.0, foo.1, ... subscripted options.
+
+ Returns iteration of values matching the specified option name.
+ """
+
+ matches = []
+ if section is None:
+ section = self.default_section
+ if self.cfg.has_option(section, option):
+ yield self.cfg.get(section, option)
+ option += "."
+ matches = [o for o in self.cfg.options(section)
+ if o.startswith(option) and o[len(option):].isdigit()]
+ matches.sort()
+ for option in matches:
+ yield self.cfg.get(section, option)
+
+
+ _regexp = re.compile("\\${(.*?)::(.*?)}")
+
+ def _repl(self, m):
+ """
+ Replacement function for indirect variable substitution.
+ This is intended for use with re.subn().
+ """
+
+ section, option = m.group(1, 2)
+ if section == "ENV":
+ return os.getenv(option, "")
+ else:
+ return self.cfg.get(section, option)
+
+
+ def get(self, option, default = None, section = None):
+ """
+ Get an option, perhaps with a default value.
+ """
+
+ if section is None:
+ section = self.default_section
+ if default is not None and not self.cfg.has_option(section, option):
+ return default
+ val = self.cfg.get(section, option)
+ while True:
+ val, modified = self._regexp.subn(self._repl, val, 1)
+ if not modified:
+ return val
+
+
+ def getboolean(self, option, default = None, section = None):
+ """
+ Get a boolean option, perhaps with a default value.
+ """
+
+ # pylint: disable=W0212
+ v = self.get(option, default, section)
+ if isinstance(v, str):
+ v = v.lower()
+ if v not in self.cfg._boolean_states:
+ raise ValueError("Not boolean: {}".format(v))
+ v = self.cfg._boolean_states[v]
+ return v
- def get(self, option, default = None, section = None):
- """
- Get an option, perhaps with a default value.
- """
- if section is None:
- section = self.default_section
- if default is not None and not self.cfg.has_option(section, option):
- return default
- val = self.cfg.get(section, option)
- while True:
- val, modified = self._regexp.subn(self._repl, val, 1)
- if not modified:
- return val
-
- def getboolean(self, option, default = None, section = None):
- """
- Get a boolean option, perhaps with a default value.
- """
- v = self.get(option, default, section)
- if isinstance(v, str):
- v = v.lower()
- if v not in self.cfg._boolean_states:
- raise ValueError("Not a boolean: %s" % v)
- v = self.cfg._boolean_states[v]
- return v
-
- def getint(self, option, default = None, section = None):
- """
- Get an integer option, perhaps with a default value.
- """
- return int(self.get(option, default, section))
- def getlong(self, option, default = None, section = None):
+ def getint(self, option, default = None, section = None):
+ """
+ Get an integer option, perhaps with a default value.
+ """
+
+ return int(self.get(option, default, section))
+
+
+ def getlong(self, option, default = None, section = None):
+ """
+ Get a long integer option, perhaps with a default value.
+ """
+
+ return long(self.get(option, default, section))
+
+
+ def _get_argument_default(self, names, kwargs):
+ section = kwargs.pop("section", None)
+ default = kwargs.pop("default", None)
+
+ for name in names:
+ if name.startswith("--"):
+ name = name[2:]
+ break
+ else:
+ raise ValueError
+
+ if self.has_option(option = name, section = section):
+ default = self.get(option = name, section = section, default = default)
+
+ if "type" in kwargs:
+ default = kwargs["type"](default)
+
+ if "choices" in kwargs and default not in kwargs["choices"]:
+ raise ValueError
+
+ kwargs["default"] = default
+
+ return name, default, kwargs
+
+
+ def add_argument(self, *names, **kwargs):
+ """
+ Combined command line and config file argument. Takes
+ arguments mostly like ArgumentParser.add_argument(), but also
+ looks in config file for option of the same name.
+
+ The "section" and "default" arguments are used for the config file
+ lookup; the resulting value is used as the "default" parameter for
+ the argument parser.
+
+ If a "type" argument is specified, it applies to both the value
+ parsed from the config file and the argument parser.
+ """
+
+ name, default, kwargs = self._get_argument_default(names, kwargs)
+ return self.argparser.add_argument(*names, **kwargs)
+
+
+ def add_boolean_argument(self, name, **kwargs):
+ """
+ Combined command line and config file boolean argument. Takes
+ arguments mostly like ArgumentParser.add_argument(), but also
+ looks in config file for option of the same name.
+
+ The "section" and "default" arguments are used for the config file
+ lookup; the resulting value is used as the default value for
+ the argument parser.
+
+ Usage is a bit different from the normal ArgumentParser boolean
+ handling: because the command line default is controlled by the
+ config file, the "store_true" / "store_false" semantics don't
+ really work for us. So, instead, we use the --foo / --no-foo
+ convention, and generate a pair of command line arguments with
+ those names controlling a single "foo" value in the result.
+ """
+
+ section = kwargs.pop("section", None)
+ default = kwargs.pop("default", None)
+ help = kwargs.pop("help", None)
+
+ if not name.startswith("--"):
+ raise ValueError
+ name = name[2:]
+
+ default = self.getboolean(name, default = default, section = section)
+
+ kwargs["action"] = "store_const"
+ kwargs["dest"] = name.replace("-", "_")
+
+ group = self.argparser.add_mutually_exclusive_group()
+
+ kwargs["const"] = True
+ group.add_argument("--" + name, **kwargs)
+
+ kwargs["const"] = False
+ kwargs["help"] = help
+ group.add_argument("--no-" + name, **kwargs)
+
+ self.argparser.set_defaults(**{ kwargs["dest"] : default })
+
+
+ def _add_logging_argument(self, *names, **kwargs):
+ group = kwargs.pop("group", self.argparser)
+ name, default, kwargs = self._get_argument_default(names, kwargs)
+ setattr(self.logging_defaults, name.replace("-", "_"), default)
+ if group is not None:
+ group.add_argument(*names, **kwargs)
+
+
+ def add_logging_arguments(self, section = None):
+ """
+ Set up standard logging-related arguments. This can be called
+ even when we're not going to parse the command line (eg,
+ because we're a WSGI app and therefore don't have a command
+ line), to handle whacking arguments from the config file into
+ the format that the logging setup code expects to see.
+ """
+
+ self.logging_defaults = argparse.Namespace(
+ default_log_destination = None)
+
+ if self.argparser is not None:
+ self.argparser.set_defaults(
+ default_log_destination = None)
+
+ class non_negative_integer(int):
+ def __init__(self, value):
+ if self < 0:
+ raise ValueError
+
+ class positive_integer(int):
+ def __init__(self, value):
+ if self <= 0:
+ raise ValueError
+
+ if self.argparser is None:
+ limit_group = None
+ else:
+ limit_group = self.argparser.add_mutually_exclusive_group()
+
+ self._add_logging_argument(
+ "--log-level",
+ default = "warning",
+ choices = ("debug", "info", "warning", "error", "critical"),
+ help = "how verbosely to log")
+
+ self._add_logging_argument(
+ "--log-destination",
+ choices = ("syslog", "stdout", "stderr", "file"),
+ help = "logging mechanism to use")
+
+ self._add_logging_argument(
+ "--log-filename",
+ help = "where to log when log destination is \"file\"")
+
+ self._add_logging_argument(
+ "--log-facility",
+ default = "daemon",
+ choices = sorted(logging.handlers.SysLogHandler.facility_names.keys()),
+ help = "syslog facility to use when log destination is \"syslog\"")
+
+ self._add_logging_argument(
+ "--log-count",
+ default = "7",
+ type = positive_integer,
+ help = "how many logs to keep when rotating for log destination \"file\""),
+
+ self._add_logging_argument(
+ "--log-size-limit",
+ group = limit_group,
+ default = 0,
+ type = non_negative_integer,
+ help = "size in kbytes after which to rotate log for destination \"file\"")
+
+ self._add_logging_argument(
+ "--log-time-limit",
+ group = limit_group,
+ default = 0,
+ type = non_negative_integer,
+ help = "hours after which to rotate log for destination \"file\"")
+
+
+ def configure_logging(self, args = None, ident = None):
+ """
+ Configure the logging system, using information from both the
+ config file and the command line; if this particular program
+ doesn't use the command line (eg, a WSGI app), we just use the
+ config file.
+ """
+
+ if self.logging_defaults is None:
+ self.add_logging_arguments()
+
+ if args is None:
+ args = self.logging_defaults
+
+ log_level = getattr(logging, args.log_level.upper())
+
+ log_destination = args.log_destination or args.default_log_destination or "stderr"
+
+ if log_destination == "stderr":
+ log_handler = logging.StreamHandler(
+ stream = sys.stderr)
+
+ elif log_destination == "stdout":
+ log_handler = logging.StreamHandler(
+ stream = sys.stdout)
+
+ elif log_destination == "syslog":
+ log_handler = logging.handlers.SysLogHandler(
+ address = ("/dev/log" if os.path.exists("/dev/log")
+ else ("localhost", logging.handlers.SYSLOG_UDP_PORT)),
+ facility = logging.handlers.SysLogHandler.facility_names[args.log_facility])
+
+ elif log_destination == "file" and (args.log_size_limit == 0 and
+ args.log_time_limit == 0):
+ log_handler = logging.handlers.WatchedFileHandler(
+ filename = args.log_filename)
+
+ elif log_destination == "file" and args.log_time_limit == 0:
+ log_handler = logging.handlers.RotatingFileHandler(
+ filename = args.log_filename,
+ maxBytes = args.log_size_limit * 1024,
+ backupCount = args.log_count)
+
+ elif log_destination == "file" and args.log_size_limit == 0:
+ log_handler = logging.handlers.TimedRotatingFileHandler(
+ filename = args.log_filename,
+ interval = args.log_time_limit,
+ backupCount = args.log_count,
+ when = "H",
+ utc = True)
+
+ else:
+ raise ValueError
+
+ if ident is None:
+ ident = os.path.basename(sys.argv[0])
+
+ log_handler.setFormatter(Formatter(ident, log_handler, log_level))
+
+ root_logger = logging.getLogger()
+ root_logger.addHandler(log_handler)
+ root_logger.setLevel(log_level)
+
+
+ def set_global_flags(self):
+ """
+ Consolidated control for all the little global control flags
+ scattered through the libraries. This isn't a particularly good
+ place for this function to live, but it has to live somewhere and
+ making it a method of the config parser from which it gets all of
+ its data is less silly than the available alternatives.
+ """
+
+ # pylint: disable=W0621
+ import rpki.x509
+ import rpki.daemonize
+
+ for line in self.multiget("configure_logger"):
+ try:
+ name, level = line.split()
+ logging.getLogger(name).setLevel(getattr(logging, level.upper()))
+ except Exception, e:
+ logger.warning("Could not process configure_logger line %r: %s", line, e)
+
+ try:
+ rpki.x509.CMS_object.debug_cms_certs = self.getboolean("debug_cms_certs")
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.x509.XML_CMS_object.dump_outbound_cms = rpki.x509.DeadDrop(
+ self.get("dump_outbound_cms"))
+ except OSError, e:
+ logger.warning("Couldn't initialize mailbox %s: %s", self.get("dump_outbound_cms"), e)
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.x509.XML_CMS_object.dump_inbound_cms = rpki.x509.DeadDrop(
+ self.get("dump_inbound_cms"))
+ except OSError, e:
+ logger.warning("Couldn't initialize mailbox %s: %s", self.get("dump_inbound_cms"), e)
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.x509.XML_CMS_object.check_inbound_schema = self.getboolean("check_inbound_schema")
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.x509.XML_CMS_object.check_outbound_schema = self.getboolean("check_outbound_schema")
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.daemonize.default_pid_directory = self.get("pid_directory")
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.daemonize.pid_filename = self.get("pid_filename")
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.x509.generate_insecure_debug_only_rsa_key = rpki.x509.insecure_debug_only_rsa_key_generator(*self.get("insecure-debug-only-rsa-key-db").split())
+ except ConfigParser.NoOptionError:
+ pass
+ except:
+ logger.warning("insecure-debug-only-rsa-key-db configured but initialization failed, check for corrupted database file")
+
+ try:
+ rpki.up_down.content_type = self.get("up_down_content_type")
+ except ConfigParser.NoOptionError:
+ pass
+
+
+def argparser(section = None, doc = None, cfg_optional = False):
"""
- Get a long integer option, perhaps with a default value.
+ First cut at a combined configuration mechanism based on ConfigParser and argparse.
+
+ General idea here is to do an initial pass on the arguments to handle the config file,
+ then return the config file and a parser to use for the rest of the arguments.
"""
- return long(self.get(option, default, section))
- def set_global_flags(self):
+ # Basic approach here is a variation on:
+ # http://blog.vwelch.com/2011/04/combining-configparser-and-argparse.html
+
+ # For most of our uses of argparse, this should be a trivial
+ # drop-in, and should reduce the amount of repetitive code. There
+ # are a couple of special cases which will require attention:
+ #
+ # - rpki.rtr: The rpki-rtr modules have their own handling of all
+ # the logging setup, and use an argparse subparser. I -think-
+ # that the way they're already handling the logging setup should
+ # work fine, but there may be a few tricky bits reconciling the
+ # rpki-rtr logging setup with the generalized version in rpki.log.
+ #
+ # - rpki.rpkic: Use of argparse in rpkic is very complicated due to
+ # support for both the external command line and the internal
+ # command loop. Overall it works quite well, but the setup is
+ # tricky. rpki.rpkic.main.top_argparse may need to move outside
+ # the main class, but that may raise its own issues. Maybe we
+ # can get away with just replacing the current setup of
+ # top_argparser with a call to this function and otherwise
+ # leaving the whole structure alone? Try and see, I guess.
+
+ # Setting cfg_optional here doesn't really work, because the cfg
+ # object returned here is separate from the one that the Django
+ # ORM gets when it tries to look for databases. Given that just
+ # about everything which uses this module also uses Django,
+ # perhaps we should just resign ourselves to the config being a
+ # global thing we read exactly once, so we can stop playing this
+ # game.
+
+ topparser = argparse.ArgumentParser(add_help = False)
+ topparser.add_argument("-c", "--config",
+ default = os.getenv(rpki_conf_envname, default_filename),
+ help = "override default location of configuration file")
+
+ cfgparser = argparse.ArgumentParser(parents = [topparser], add_help = False)
+ cfgparser.add_argument("-h", "--help", action = "store_true")
+
+ args, remaining_argv = cfgparser.parse_known_args()
+
+ argparser = argparse.ArgumentParser(parents = [topparser], description = doc)
+
+ cfg = parser(section = section,
+ set_filename = args.config,
+ argparser = argparser,
+ allow_missing = cfg_optional or args.help)
+
+ return cfg
+
+
+class Formatter(object):
"""
- Consolidated control for all the little global control flags
- scattered through the libraries. This isn't a particularly good
- place for this function to live, but it has to live somewhere and
- making it a method of the config parser from which it gets all of
- its data is less silly than the available alternatives.
+ Reimplementation (easier than subclassing in this case) of
+ logging.Formatter.
+
+ It turns out that the logging code only cares about this class's
+ .format(record) method, everything else is internal; so long as
+ .format() converts a record into a properly formatted string, the
+ logging code is happy.
+
+ So, rather than mess around with dynamically constructing and
+ deconstructing and tweaking format strings and ten zillion options
+ we don't use, we just provide our own implementation that supports
+ what we do need.
"""
- # pylint: disable=W0621
- import rpki.http
- import rpki.x509
- import rpki.sql
- import rpki.async
- import rpki.log
- import rpki.daemonize
-
- for line in self.multiget("configure_logger"):
- try:
- name, level = line.split()
- logging.getLogger(name).setLevel(getattr(logging, level.upper()))
- except Exception, e:
- logger.warning("Could not process configure_logger line %r: %s", line, e)
-
- try:
- rpki.http.want_persistent_client = self.getboolean("want_persistent_client")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.http.want_persistent_server = self.getboolean("want_persistent_server")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.http.use_adns = self.getboolean("use_adns")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.http.enable_ipv6_clients = self.getboolean("enable_ipv6_clients")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.http.enable_ipv6_servers = self.getboolean("enable_ipv6_servers")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.x509.CMS_object.debug_cms_certs = self.getboolean("debug_cms_certs")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.sql.sql_persistent.sql_debug = self.getboolean("sql_debug")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.async.timer.gc_debug = self.getboolean("gc_debug")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.async.timer.run_debug = self.getboolean("timer_debug")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.x509.XML_CMS_object.dump_outbound_cms = rpki.x509.DeadDrop(self.get("dump_outbound_cms"))
- except OSError, e:
- logger.warning("Couldn't initialize mailbox %s: %s", self.get("dump_outbound_cms"), e)
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.x509.XML_CMS_object.dump_inbound_cms = rpki.x509.DeadDrop(self.get("dump_inbound_cms"))
- except OSError, e:
- logger.warning("Couldn't initialize mailbox %s: %s", self.get("dump_inbound_cms"), e)
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.x509.XML_CMS_object.check_inbound_schema = self.getboolean("check_inbound_schema")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.x509.XML_CMS_object.check_outbound_schema = self.getboolean("check_outbound_schema")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.async.gc_summary(self.getint("gc_summary"), self.getint("gc_summary_threshold", 0))
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.log.enable_tracebacks = self.getboolean("enable_tracebacks")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.daemonize.default_pid_directory = self.get("pid_directory")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.daemonize.pid_filename = self.get("pid_filename")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.x509.generate_insecure_debug_only_rsa_key = rpki.x509.insecure_debug_only_rsa_key_generator(*self.get("insecure-debug-only-rsa-key-db").split())
- except ConfigParser.NoOptionError:
- pass
- except:
- logger.warning("insecure-debug-only-rsa-key-db configured but initialization failed, check for corrupted database file")
-
- try:
- rpki.up_down.content_type = self.get("up_down_content_type")
- except ConfigParser.NoOptionError:
- pass
+ converter = time.gmtime
+
+ def __init__(self, ident, handler, level):
+ self.ident = ident
+ self.is_syslog = isinstance(handler, logging.handlers.SysLogHandler)
+ self.debugging = level == logging.DEBUG
+
+ def format(self, record):
+ return "".join(self.coformat(record)).rstrip("\n")
+
+ def coformat(self, record):
+
+ try:
+ if not self.is_syslog:
+ yield time.strftime("%Y-%m-%d %H:%M:%S ", time.gmtime(record.created))
+ except:
+ yield "[$!$Time format failed]"
+
+ try:
+ yield "{}[{:d}]: ".format(self.ident, record.process)
+ except:
+ yield "[$!$ident format failed]"
+
+ try:
+ if isinstance(record.context, (str, unicode)):
+ yield record.context + " "
+ else:
+ yield repr(record.context) + " "
+ except AttributeError:
+ pass
+ except:
+ yield "[$!$context format failed]"
+
+ try:
+ yield record.getMessage()
+ except:
+ yield "[$!$record.getMessage() failed]"
+
+ try:
+ if record.exc_info:
+ if self.is_syslog or not self.debugging:
+ lines = traceback.format_exception_only(
+ record.exc_info[0], record.exc_info[1])
+ lines.insert(0, ": ")
+ else:
+ lines = traceback.format_exception(
+ record.exc_info[0], record.exc_info[1], record.exc_info[2])
+ lines.insert(0, "\n")
+ for line in lines:
+ yield line
+ except:
+ yield "[$!$exception formatting failed]"
diff --git a/rpki/csv_utils.py b/rpki/csv_utils.py
index 9ba04a02..5fa498a1 100644
--- a/rpki/csv_utils.py
+++ b/rpki/csv_utils.py
@@ -22,91 +22,99 @@ import csv
import os
class BadCSVSyntax(Exception):
- """
- Bad CSV syntax.
- """
+ """
+ Bad CSV syntax.
+ """
class csv_reader(object):
- """
- Reader for tab-delimited text that's (slightly) friendlier than the
- stock Python csv module (which isn't intended for direct use by
- humans anyway, and neither was this package originally, but that
- seems to be the way that it has evolved...).
-
- Columns parameter specifies how many columns users of the reader
- expect to see; lines with fewer columns will be padded with None
- values.
-
- Original API design for this class courtesy of Warren Kumari, but
- don't blame him if you don't like what I did with his ideas.
- """
-
- def __init__(self, filename, columns = None, min_columns = None, comment_characters = "#;"):
- assert columns is None or isinstance(columns, int)
- assert min_columns is None or isinstance(min_columns, int)
- if columns is not None and min_columns is None:
- min_columns = columns
- self.filename = filename
- self.columns = columns
- self.min_columns = min_columns
- self.comment_characters = comment_characters
- self.file = open(filename, "r")
-
- def __iter__(self):
- line_number = 0
- for line in self.file:
- line_number += 1
- line = line.strip()
- if not line or line[0] in self.comment_characters:
- continue
- fields = line.split()
- if self.min_columns is not None and len(fields) < self.min_columns:
- raise BadCSVSyntax("%s:%d: Not enough columns in line %r" % (self.filename, line_number, line))
- if self.columns is not None and len(fields) > self.columns:
- raise BadCSVSyntax("%s:%d: Too many columns in line %r" % (self.filename, line_number, line))
- if self.columns is not None and len(fields) < self.columns:
- fields += tuple(None for i in xrange(self.columns - len(fields)))
- yield fields
-
- def __enter__(self):
- return self
-
- def __exit__(self, _type, value, traceback):
- self.file.close()
+ """
+ Reader for tab-delimited text that's (slightly) friendlier than the
+ stock Python csv module (which isn't intended for direct use by
+ humans anyway, and neither was this package originally, but that
+ seems to be the way that it has evolved...).
+
+ Columns parameter specifies how many columns users of the reader
+ expect to see; lines with fewer columns will be padded with None
+ values.
+
+ Original API design for this class courtesy of Warren Kumari, but
+ don't blame him if you don't like what I did with his ideas.
+ """
+
+ def __init__(self, filename, columns = None, min_columns = None, comment_characters = "#;"):
+ assert columns is None or isinstance(columns, int)
+ assert min_columns is None or isinstance(min_columns, int)
+ if columns is not None and min_columns is None:
+ min_columns = columns
+ self.columns = columns
+ self.min_columns = min_columns
+ self.comment_characters = comment_characters
+ if isinstance(filename, (str, unicode)):
+ # Name of a file to open
+ self.filename = filename
+ self.file = open(filename, "r")
+ else:
+ # File-like object, already opened
+ self.filename = None
+ self.file = filename
+
+ def __iter__(self):
+ line_number = 0
+ for line in self.file:
+ line_number += 1
+ line = line.strip()
+ if not line or line[0] in self.comment_characters:
+ continue
+ fields = line.split()
+ if self.min_columns is not None and len(fields) < self.min_columns:
+ raise BadCSVSyntax("%s:%d: Not enough columns in line %r" % (self.filename, line_number, line))
+ if self.columns is not None and len(fields) > self.columns:
+ raise BadCSVSyntax("%s:%d: Too many columns in line %r" % (self.filename, line_number, line))
+ if self.columns is not None and len(fields) < self.columns:
+ fields += tuple(None for i in xrange(self.columns - len(fields)))
+ yield fields
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, _type, value, traceback):
+ self.file.close()
class csv_writer(object):
- """
- Writer object for tab delimited text. We just use the stock CSV
- module in excel-tab mode for this.
+ """
+ Writer object for tab delimited text. We just use the stock CSV
+ module in excel-tab mode for this.
- If "renmwo" is set (default), the file will be written to
- a temporary name and renamed to the real filename after closing.
- """
+ If "renmwo" is set (default), the file will be written to
+ a temporary name and renamed to the real filename after closing.
+ """
- def __init__(self, filename, renmwo = True):
- self.filename = filename
- self.renmwo = "%s.~renmwo%d~" % (filename, os.getpid()) if renmwo else filename
- self.file = open(self.renmwo, "w")
- self.writer = csv.writer(self.file, dialect = csv.get_dialect("excel-tab"))
+ def __init__(self, filename, renmwo = True):
+ self.filename = filename
+ self.renmwo = "%s.~renmwo%d~" % (filename, os.getpid()) if renmwo else filename
+ self.file = open(self.renmwo, "w")
+ self.writer = csv.writer(self.file, dialect = csv.get_dialect("excel-tab"))
- def __enter__(self):
- return self
+ def __enter__(self):
+ return self
- def __exit__(self, _type, value, traceback):
- self.close()
+ def __exit__(self, _type, value, traceback):
+ self.close()
- def close(self):
- """
- Close this writer.
- """
- if self.file is not None:
- self.file.close()
- self.file = None
- if self.filename != self.renmwo:
- os.rename(self.renmwo, self.filename)
+ def close(self):
+ """
+ Close this writer.
+ """
- def __getattr__(self, attr):
- """
- Fake inheritance from whatever object csv.writer deigns to give us.
- """
- return getattr(self.writer, attr)
+ if self.file is not None:
+ self.file.close()
+ self.file = None
+ if self.filename != self.renmwo:
+ os.rename(self.renmwo, self.filename)
+
+ def __getattr__(self, attr):
+ """
+ Fake inheritance from whatever object csv.writer deigns to give us.
+ """
+
+ return getattr(self.writer, attr)
diff --git a/rpki/daemonize.py b/rpki/daemonize.py
index 6a825566..5a1c3979 100644
--- a/rpki/daemonize.py
+++ b/rpki/daemonize.py
@@ -80,56 +80,57 @@ default_pid_directory = "/var/run/rpki"
pid_filename = None
def daemon(nochdir = False, noclose = False, pidfile = None):
- """
- Make this program become a daemon, like 4.4BSD daemon(3), and
- write its pid out to a file with cleanup on exit.
- """
-
- if pidfile is None:
- if pid_filename is None:
- prog = os.path.splitext(os.path.basename(sys.argv[0]))[0]
- pidfile = os.path.join(default_pid_directory, "%s.pid" % prog)
+ """
+ Make this program become a daemon, like 4.4BSD daemon(3), and
+ write its pid out to a file with cleanup on exit.
+ """
+
+ if pidfile is None:
+ if pid_filename is None:
+ prog = os.path.splitext(os.path.basename(sys.argv[0]))[0]
+ pidfile = os.path.join(default_pid_directory, "%s.pid" % prog)
+ else:
+ pidfile = pid_filename
+
+ old_sighup_action = signal.signal(signal.SIGHUP, signal.SIG_IGN)
+
+ try:
+ pid = os.fork()
+ except OSError, e:
+ logging.fatal("fork() failed: %d (%s)", e.errno, e.strerror)
+ sys.exit(1)
else:
- pidfile = pid_filename
+ if pid > 0:
+ os._exit(0) # pylint: disable=W0212
- old_sighup_action = signal.signal(signal.SIGHUP, signal.SIG_IGN)
+ if not nochdir:
+ os.chdir("/")
- try:
- pid = os.fork()
- except OSError, e:
- sys.exit("fork() failed: %d (%s)" % (e.errno, e.strerror))
- else:
- if pid > 0:
- os._exit(0)
+ os.setsid()
- if not nochdir:
- os.chdir("/")
+ if not noclose:
+ sys.stdout.flush()
+ sys.stderr.flush()
+ fd = os.open(os.devnull, os.O_RDWR)
+ os.dup2(fd, 0)
+ os.dup2(fd, 1)
+ os.dup2(fd, 2)
+ if fd > 2:
+ os.close(fd)
- os.setsid()
+ signal.signal(signal.SIGHUP, old_sighup_action)
- if not noclose:
- sys.stdout.flush()
- sys.stderr.flush()
- fd = os.open(os.devnull, os.O_RDWR)
- os.dup2(fd, 0)
- os.dup2(fd, 1)
- os.dup2(fd, 2)
- if fd > 2:
- os.close(fd)
+ def delete_pid_file():
+ try:
+ os.unlink(pidfile)
+ except OSError:
+ pass
- signal.signal(signal.SIGHUP, old_sighup_action)
+ atexit.register(delete_pid_file)
- def delete_pid_file():
try:
- os.unlink(pidfile)
- except OSError:
- pass
-
- atexit.register(delete_pid_file)
-
- try:
- f = open(pidfile, "w")
- f.write("%d\n" % os.getpid())
- f.close()
- except IOError, e:
- logger.warning("Couldn't write PID file %s: %s", pidfile, e.strerror)
+ f = open(pidfile, "w")
+ f.write("%d\n" % os.getpid())
+ f.close()
+ except IOError, e:
+ logger.warning("Couldn't write PID file %s: %s", pidfile, e.strerror)
diff --git a/rpki/gui/cacheview/__init__.py b/rpki/django_settings/__init__.py
index e69de29b..e69de29b 100644
--- a/rpki/gui/cacheview/__init__.py
+++ b/rpki/django_settings/__init__.py
diff --git a/rpki/django_settings/common.py b/rpki/django_settings/common.py
new file mode 100644
index 00000000..2f41fe77
--- /dev/null
+++ b/rpki/django_settings/common.py
@@ -0,0 +1,125 @@
+# $Id$
+
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This module contains common configuration settings for Django libraries.
+
+Most of our CA code uses at least the Django ORM; the web interface
+uses a lot more of Django. We also want to handle all normal user
+configuration via rpki.conf, so some of the code here is just pulling
+settings from rpki.conf and stuffing them into the form Django wants.
+"""
+
+__version__ = "$Id$"
+
+import os
+import rpki.config
+import rpki.autoconf
+
+# Some configuration, including SQL authorization, comes from rpki.conf.
+cfg = rpki.config.parser()
+
+
+# Do -not- turn on DEBUG here except for short-lived tests, otherwise
+# long-running programs like irdbd will eventually run out of memory
+# and crash. This is also why this is controlled by an environment
+# variable rather than by an rpki.conf setting: just because we want
+# debugging enabled in the GUI doesn't mean we want it in irdb.
+#
+# If you must enable debugging, you may need to add code that uses
+# django.db.reset_queries() to clear the query list manually, but it's
+# probably better just to run with debugging disabled, since that's
+# the expectation for production code.
+#
+# https://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory
+
+if os.getenv("RPKI_DJANGO_DEBUG") == "yes":
+ DEBUG = True
+
+
+# Database configuration differs from program to program, but includes
+# a lot of boilerplate. So we define a class here to handle this,
+# then use it and clean up in the modules that import from this one.
+
+class DatabaseConfigurator(object):
+
+ default_sql_engine = "mysql"
+ cfg = None
+ section = None
+
+ def configure(self, cfg, section): # pylint: disable=W0621
+ self.cfg = cfg
+ self.section = section
+ engine = cfg.get("sql-engine", section = section,
+ default = self.default_sql_engine)
+ return dict(
+ default = getattr(self, engine))
+
+ @property
+ def mysql(self):
+ return dict(
+ ENGINE = "django.db.backends.mysql",
+ NAME = cfg.get("sql-database", section = self.section),
+ USER = cfg.get("sql-username", section = self.section),
+ PASSWORD = cfg.get("sql-password", section = self.section),
+ #
+ # Using "latin1" here is totally evil and wrong, but
+ # without it MySQL 5.6 (and, probably, later versions)
+ # whine incessantly about bad UTF-8 characters in BLOB
+ # columns. Which makes no freaking sense at all, but this
+ # is MySQL, which has the character set management interface
+ # from hell, so good luck with that. If anybody really
+ # understands how to fix this, tell me; for now, we force
+ # MySQL to revert to the default behavior in MySQL 5.5.
+ #
+ OPTIONS = dict(charset = "latin1"))
+
+ @property
+ def sqlite3(self):
+ return dict(
+ ENGINE = "django.db.backends.sqlite3",
+ NAME = cfg.get("sql-database", section = self.section))
+
+ @property
+ def postgresql(self):
+ return dict(
+ ENGINE = "django.db.backends.postgresql_psycopg2",
+ NAME = cfg.get("sql-database", section = self.section),
+ USER = cfg.get("sql-username", section = self.section),
+ PASSWORD = cfg.get("sql-password", section = self.section))
+
+
+# Apps are also handled by the modules that import this one, now that
+# we don't require South.
+
+
+# Silence whining about MIDDLEWARE_CLASSES
+
+MIDDLEWARE_CLASSES = ()
+
+# That would be it if we just need the ORM, but Django throws a hissy
+# fit if SECRET_KEY isn't set, whether we use it for anything or not.
+#
+# Make this unique, and don't share it with anybody.
+if cfg.has_option("secret-key", section = "web_portal"):
+ SECRET_KEY = cfg.get("secret-key", section = "web_portal")
+else:
+ SECRET_KEY = os.urandom(66).encode("hex")
+
+
+# Django defaults to thinking everybody lives in Chicago.
+
+TIME_ZONE = "UTC"
diff --git a/rpki/django_settings/gui.py b/rpki/django_settings/gui.py
new file mode 100644
index 00000000..071d845f
--- /dev/null
+++ b/rpki/django_settings/gui.py
@@ -0,0 +1,159 @@
+# $Id$
+
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This module contains GUI-specific configuration settings for Django libraries.
+"""
+
+# Pull in the irdb configuration, which in turn pulls in the common configuration.
+
+from .irdb import * # pylint: disable=W0401,W0614
+
+__version__ = "$Id$"
+
+import socket
+
+# GUI uses the IRDB database configuration, so we don't need to set
+# anything here.
+
+# Where to put static files.
+STATIC_ROOT = rpki.autoconf.datarootdir + "/rpki/media"
+
+# Must end with a slash!
+STATIC_URL = "/media/"
+
+# Where to email server errors.
+ADMINS = (("Administrator", "root@localhost"),)
+
+LOGGING = {
+ "version": 1,
+ "formatters": {
+ "verbose": {
+ # see http://docs.python.org/2.7/library/logging.html#logging.LogRecord
+ "format": "%(levelname)s %(asctime)s %(name)s %(message)s"
+ },
+ },
+ "handlers": {
+ "stderr": {
+ "class": "logging.StreamHandler",
+ "level": "DEBUG",
+ "formatter": "verbose",
+ },
+ "mail_admins": {
+ "level": "ERROR",
+ "class": "django.utils.log.AdminEmailHandler",
+ },
+ },
+ "loggers": {
+ "django": {
+ "level": "ERROR",
+ "handlers": ["stderr", "mail_admins"],
+ },
+ "rpki.gui": {
+ "level": "WARNING",
+ "handlers": ["stderr"],
+ },
+ },
+}
+
+def select_tz():
+ "Find a supported timezone that looks like UTC"
+ for tz in ("UTC", "GMT", "Etc/UTC", "Etc/GMT"):
+ if os.path.exists("/usr/share/zoneinfo/" + tz):
+ return tz
+ # Can't determine the proper timezone, fall back to UTC and let Django
+ # report the error to the user.
+ return "UTC"
+
+# Local time zone for this installation. Choices can be found here:
+# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
+# although not all choices may be available on all operating systems.
+# If running in a Windows environment this must be set to the same as your
+# system time zone.
+TIME_ZONE = select_tz()
+
+# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
+# for details on why you might need this.
+def get_allowed_hosts():
+ allowed_hosts = set(cfg.multiget("allowed-hosts", section = "web_portal"))
+ allowed_hosts.add(socket.getfqdn())
+ allowed_hosts.add("127.0.0.1")
+ allowed_hosts.add("::1")
+ try:
+ import netifaces
+ for interface in netifaces.interfaces():
+ addresses = netifaces.ifaddresses(interface)
+ for af in (netifaces.AF_INET, netifaces.AF_INET6):
+ if af in addresses:
+ for address in addresses[af]:
+ if "addr" in address:
+ allowed_hosts.add(address["addr"])
+ except ImportError:
+ pass
+ return list(allowed_hosts)
+
+ALLOWED_HOSTS = get_allowed_hosts()
+
+DOWNLOAD_DIRECTORY = cfg.get("download-directory", "/var/tmp", section = "web_portal")
+
+# List of callables that know how to import templates from various sources.
+TEMPLATE_LOADERS = (
+ "django.template.loaders.filesystem.Loader",
+ "django.template.loaders.app_directories.Loader",
+ "django.template.loaders.eggs.Loader"
+)
+
+MIDDLEWARE_CLASSES = (
+ "django.middleware.common.CommonMiddleware",
+ "django.contrib.sessions.middleware.SessionMiddleware",
+ "django.middleware.csrf.CsrfViewMiddleware",
+ "django.contrib.auth.middleware.AuthenticationMiddleware",
+ "django.contrib.messages.middleware.MessageMiddleware"
+)
+
+ROOT_URLCONF = "rpki.gui.urls"
+
+INSTALLED_APPS.extend((
+ "django.contrib.auth",
+ #"django.contrib.admin",
+ #"django.contrib.admindocs",
+ "django.contrib.contenttypes",
+ "django.contrib.sessions",
+ "django.contrib.staticfiles",
+ "rpki.gui.app",
+ "rpki.gui.gui_rpki_cache",
+ "rpki.gui.routeview",
+ "rpki.rcynicdb"
+))
+
+TEMPLATE_CONTEXT_PROCESSORS = (
+ "django.contrib.auth.context_processors.auth",
+ "django.core.context_processors.debug",
+ "django.core.context_processors.i18n",
+ "django.core.context_processors.media",
+ "django.contrib.messages.context_processors.messages",
+ "django.core.context_processors.request",
+ "django.core.context_processors.static"
+)
+
+# Allow local site to override any setting above -- but if there's
+# anything that local sites routinely need to modify, please consider
+# putting that configuration into rpki.conf and just adding code here
+# to read that configuration.
+try:
+ from local_settings import * # pylint: disable=W0401,F0401
+except ImportError:
+ pass
diff --git a/rpki/django_settings/irdb.py b/rpki/django_settings/irdb.py
new file mode 100644
index 00000000..da42a111
--- /dev/null
+++ b/rpki/django_settings/irdb.py
@@ -0,0 +1,47 @@
+# $Id$
+
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This module contains configuration settings for Django libraries. All
+of the back-end programs (rpkic, irdbd, etc) use this configuration;
+the GUI code also uses this but adds a bunch of other stuff, thus has
+its own settings file.
+"""
+
+from .common import * # pylint: disable=W0401,W0614
+
+__version__ = "$Id$"
+
+
+# Database configuration.
+
+DATABASES = DatabaseConfigurator().configure(cfg, "irdbd")
+del DatabaseConfigurator
+
+
+# Apps.
+
+INSTALLED_APPS = ["rpki.irdb"]
+
+
+# Allow local site to override any setting above -- but if there's
+# anything that local sites routinely need to modify, please consider
+# putting that configuration into rpki.conf and just adding code here
+# to read that configuration.
+try:
+ from local_settings import * # pylint: disable=W0401,F0401
+except ImportError:
+ pass
diff --git a/rpki/django_settings/pubd.py b/rpki/django_settings/pubd.py
new file mode 100644
index 00000000..6bd9fdc0
--- /dev/null
+++ b/rpki/django_settings/pubd.py
@@ -0,0 +1,45 @@
+# $Id$
+
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This module contains configuration settings for Django libraries for
+the pubd program.
+"""
+
+from .common import * # pylint: disable=W0401,W0614
+
+__version__ = "$Id$"
+
+
+# Database configuration.
+
+DATABASES = DatabaseConfigurator().configure(cfg, "pubd")
+del DatabaseConfigurator
+
+
+# Apps.
+
+INSTALLED_APPS = ["rpki.pubdb"]
+
+
+# Allow local site to override any setting above -- but if there's
+# anything that local sites routinely need to modify, please consider
+# putting that configuration into rpki.conf and just adding code here
+# to read that configuration.
+try:
+ from local_settings import * # pylint: disable=W0401,F0401
+except ImportError:
+ pass
diff --git a/rpki/django_settings/rcynic.py b/rpki/django_settings/rcynic.py
new file mode 100644
index 00000000..0845604c
--- /dev/null
+++ b/rpki/django_settings/rcynic.py
@@ -0,0 +1,68 @@
+# $Id$
+
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This module contains configuration settings for Django libraries.
+At present, rcynicng only uses the Django ORM, not the rest of Django.
+Unlike the CA tools rcynicng defaults to using SQLite3 as its database
+engine, so we tweak the defaults a little before instantiating the
+database configuration here.
+"""
+
+from .common import * # pylint: disable=W0401,W0614
+
+__version__ = "$Id$"
+
+
+# Database configuration.
+
+class DBConfigurator(DatabaseConfigurator):
+
+ default_sql_engine = "sqlite3"
+
+ @property
+ def sqlite3(self):
+ return dict(
+ ENGINE = "django.db.backends.sqlite3",
+ NAME = cfg.get("sql-database", section = self.section, default = "rcynic.db"))
+
+
+DATABASES = DBConfigurator().configure(cfg, "rcynic")
+
+del DBConfigurator
+del DatabaseConfigurator
+
+
+# Apps.
+
+INSTALLED_APPS = ["rpki.rcynicdb"]
+
+
+# Debugging
+#
+# DO NOT ENABLE DJANGO DEBUGGING IN PRODUCTION!
+#
+#DEBUG = True
+
+
+# Allow local site to override any setting above -- but if there's
+# anything that local sites routinely need to modify, please consider
+# putting that configuration into rpki.conf and just adding code here
+# to read that configuration.
+try:
+ from local_settings import * # pylint: disable=W0401,F0401
+except ImportError:
+ pass
diff --git a/rpki/django_settings/rpkid.py b/rpki/django_settings/rpkid.py
new file mode 100644
index 00000000..e34518bb
--- /dev/null
+++ b/rpki/django_settings/rpkid.py
@@ -0,0 +1,45 @@
+# $Id$
+
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This module contains configuration settings for Django libraries for
+the rpkid program.
+"""
+
+from .common import * # pylint: disable=W0401,W0614
+
+__version__ = "$Id$"
+
+
+# Database configuration.
+
+DATABASES = DatabaseConfigurator().configure(cfg, "rpkid")
+del DatabaseConfigurator
+
+
+# Apps.
+
+INSTALLED_APPS = ["rpki.rpkidb"]
+
+
+# Allow local site to override any setting above -- but if there's
+# anything that local sites routinely need to modify, please consider
+# putting that configuration into rpki.conf and just adding code here
+# to read that configuration.
+try:
+ from local_settings import * # pylint: disable=W0401,F0401
+except ImportError:
+ pass
diff --git a/rpki/exceptions.py b/rpki/exceptions.py
index 504c6f28..d66ad00c 100644
--- a/rpki/exceptions.py
+++ b/rpki/exceptions.py
@@ -22,346 +22,228 @@ Exception definitions for RPKI modules.
"""
class RPKI_Exception(Exception):
- """
- Base class for RPKI exceptions.
- """
+ "Base class for RPKI exceptions."
class NotInDatabase(RPKI_Exception):
- """
- Lookup failed for an object expected to be in the database.
- """
+ "Lookup failed for an object expected to be in the database."
class BadURISyntax(RPKI_Exception):
- """
- Illegal syntax for a URI.
- """
+ "Illegal syntax for a URI."
class BadStatusCode(RPKI_Exception):
- """
- Unrecognized protocol status code.
- """
+ "Unrecognized protocol status code."
class BadQuery(RPKI_Exception):
- """
- Unexpected protocol query.
- """
+ "Unexpected protocol query."
class DBConsistancyError(RPKI_Exception):
- """
- Found multiple matches for a database query that shouldn't ever
- return that.
- """
+ "Found multiple matches for a database query that shouldn't ever return that."
class CMSVerificationFailed(RPKI_Exception):
- """
- Verification of a CMS message failed.
- """
+ "Verification of a CMS message failed."
class HTTPRequestFailed(RPKI_Exception):
- """
- HTTP request failed.
- """
+ "HTTP request failed."
class DERObjectConversionError(RPKI_Exception):
- """
- Error trying to convert a DER-based object from one representation
- to another.
- """
+ "Error trying to convert a DER-based object from one representation to another."
class NotACertificateChain(RPKI_Exception):
- """
- Certificates don't form a proper chain.
- """
+ "Certificates don't form a proper chain."
class BadContactURL(RPKI_Exception):
- """
- Error trying to parse contact URL.
- """
+ "Error trying to parse contact URL."
class BadClassNameSyntax(RPKI_Exception):
- """
- Illegal syntax for a class_name.
- """
+ "Illegal syntax for a class_name."
class BadIssueResponse(RPKI_Exception):
- """
- issue_response PDU with wrong number of classes or certificates.
- """
+ "issue_response PDU with wrong number of classes or certificates."
class NotImplementedYet(RPKI_Exception):
- """
- Internal error -- not implemented yet.
- """
+ "Internal error -- not implemented yet."
class BadPKCS10(RPKI_Exception):
- """
- Bad PKCS #10 object.
- """
+ "Bad PKCS #10 object."
class UpstreamError(RPKI_Exception):
- """
- Received an error from upstream.
- """
+ "Received an error from upstream."
class ChildNotFound(RPKI_Exception):
- """
- Could not find specified child in database.
- """
+ "Could not find specified child in database."
class BSCNotFound(RPKI_Exception):
- """
- Could not find specified BSC in database.
- """
+ "Could not find specified BSC in database."
class BadSender(RPKI_Exception):
- """
- Unexpected XML sender value.
- """
+ "Unexpected XML sender value."
class ClassNameMismatch(RPKI_Exception):
- """
- class_name does not match child context.
- """
+ "class_name does not match child context."
class ClassNameUnknown(RPKI_Exception):
- """
- Unknown class_name.
- """
+ "Unknown class_name."
class SKIMismatch(RPKI_Exception):
- """
- SKI value in response does not match request.
- """
+ "SKI value in response does not match request."
class SubprocessError(RPKI_Exception):
- """
- Subprocess returned unexpected error.
- """
+ "Subprocess returned unexpected error."
class BadIRDBReply(RPKI_Exception):
- """
- Unexpected reply to IRDB query.
- """
+ "Unexpected reply to IRDB query."
class NotFound(RPKI_Exception):
- """
- Object not found in database.
- """
+ "Object not found in database."
class MustBePrefix(RPKI_Exception):
- """
- Resource range cannot be expressed as a prefix.
- """
+ "Resource range cannot be expressed as a prefix."
class TLSValidationError(RPKI_Exception):
- """
- TLS certificate validation error.
- """
+ "TLS certificate validation error."
class MultipleTLSEECert(TLSValidationError):
- """
- Received more than one TLS EE certificate.
- """
+ "Received more than one TLS EE certificate."
class ReceivedTLSCACert(TLSValidationError):
- """
- Received CA certificate via TLS.
- """
+ "Received CA certificate via TLS."
class WrongEContentType(RPKI_Exception):
- """
- Received wrong CMS eContentType.
- """
+ "Received wrong CMS eContentType."
class EmptyPEM(RPKI_Exception):
- """
- Couldn't find PEM block to convert.
- """
+ "Couldn't find PEM block to convert."
class UnexpectedCMSCerts(RPKI_Exception):
- """
- Received CMS certs when not expecting any.
- """
+ "Received CMS certs when not expecting any."
class UnexpectedCMSCRLs(RPKI_Exception):
- """
- Received CMS CRLs when not expecting any.
- """
+ "Received CMS CRLs when not expecting any."
class MissingCMSEEcert(RPKI_Exception):
- """
- Didn't receive CMS EE cert when expecting one.
- """
+ "Didn't receive CMS EE cert when expecting one."
class MissingCMSCRL(RPKI_Exception):
- """
- Didn't receive CMS CRL when expecting one.
- """
+ "Didn't receive CMS CRL when expecting one."
class UnparsableCMSDER(RPKI_Exception):
- """
- Alleged CMS DER wasn't parsable.
- """
+ "Alleged CMS DER wasn't parsable."
class CMSCRLNotSet(RPKI_Exception):
- """
- CMS CRL has not been configured.
- """
+ "CMS CRL has not been configured."
class ServerShuttingDown(RPKI_Exception):
- """
- Server is shutting down.
- """
+ "Server is shutting down."
class NoActiveCA(RPKI_Exception):
- """
- No active ca_detail for specified class.
- """
+ "No active ca_detail for specified class."
class BadClientURL(RPKI_Exception):
- """
- URL given to HTTP client does not match profile.
- """
+ "URL given to HTTP client does not match profile."
class ClientNotFound(RPKI_Exception):
- """
- Could not find specified client in database.
- """
+ "Could not find specified client in database."
class BadExtension(RPKI_Exception):
- """
- Forbidden X.509 extension.
- """
+ "Forbidden X.509 extension."
class ForbiddenURI(RPKI_Exception):
- """
- Forbidden URI, does not start with correct base URI.
- """
+ "Forbidden URI, does not start with correct base URI."
class HTTPClientAborted(RPKI_Exception):
- """
- HTTP client connection closed while in request-sent state.
- """
+ "HTTP client connection closed while in request-sent state."
class BadPublicationReply(RPKI_Exception):
- """
- Unexpected reply to publication query.
- """
+ "Unexpected reply to publication query."
class DuplicateObject(RPKI_Exception):
- """
- Attempt to create an object that already exists.
- """
+ "Attempt to create an object that already exists."
class EmptyROAPrefixList(RPKI_Exception):
- """
- Can't create ROA with an empty prefix list.
- """
+ "Can't create ROA with an empty prefix list."
class NoCoveringCertForROA(RPKI_Exception):
- """
- Couldn't find a covering certificate to generate ROA.
- """
+ "Couldn't find a covering certificate to generate ROA."
class BSCNotReady(RPKI_Exception):
- """
- BSC not yet in a usable state, signing_cert not set.
- """
+ "BSC not yet in a usable state, signing_cert not set."
class HTTPUnexpectedState(RPKI_Exception):
- """
- HTTP event occurred in an unexpected state.
- """
+ "HTTP event occurred in an unexpected state."
class HTTPBadVersion(RPKI_Exception):
- """
- HTTP couldn't parse HTTP version.
- """
+ "HTTP couldn't parse HTTP version."
class HandleTranslationError(RPKI_Exception):
- """
- Internal error translating protocol handle -> SQL id.
- """
+ "Internal error translating protocol handle -> SQL id."
class NoObjectAtURI(RPKI_Exception):
- """
- No object published at specified URI.
- """
+ "No object published at specified URI."
+
+class ExistingObjectAtURI(RPKI_Exception):
+ "An object has already been published at specified URI."
+
+class DifferentObjectAtURI(RPKI_Exception):
+ "An object with a different hash exists at specified URI."
class CMSContentNotSet(RPKI_Exception):
- """
- Inner content of a CMS_object has not been set. If object is known
- to be valid, the .extract() method should be able to set the
- content; otherwise, only the .verify() method (which checks
- signatures) is safe.
- """
+ """
+ Inner content of a CMS_object has not been set. If object is known
+ to be valid, the .extract() method should be able to set the
+ content; otherwise, only the .verify() method (which checks
+ signatures) is safe.
+ """
class HTTPTimeout(RPKI_Exception):
- """
- HTTP connection timed out.
- """
+ "HTTP connection timed out."
class BadIPResource(RPKI_Exception):
- """
- Parse failure for alleged IP resource string.
- """
+ "Parse failure for alleged IP resource string."
class BadROAPrefix(RPKI_Exception):
- """
- Parse failure for alleged ROA prefix string.
- """
+ "Parse failure for alleged ROA prefix string."
class CommandParseFailure(RPKI_Exception):
- """
- Failed to parse command line.
- """
+ "Failed to parse command line."
class CMSCertHasExpired(RPKI_Exception):
- """
- CMS certificate has expired.
- """
+ "CMS certificate has expired."
class TrustedCMSCertHasExpired(RPKI_Exception):
- """
- Trusted CMS certificate has expired.
- """
+ "Trusted CMS certificate has expired."
class MultipleCMSEECert(RPKI_Exception):
- """
- Can't have more than one CMS EE certificate in validation chain.
- """
+ "Can't have more than one CMS EE certificate in validation chain."
class ResourceOverlap(RPKI_Exception):
- """
- Overlapping resources in resource_set.
- """
+ "Overlapping resources in resource_set."
class CMSReplay(RPKI_Exception):
- """
- Possible CMS replay attack detected.
- """
+ "Possible CMS replay attack detected."
class PastNotAfter(RPKI_Exception):
- """
- Requested notAfter value is already in the past.
- """
+ "Requested notAfter value is already in the past."
class NullValidityInterval(RPKI_Exception):
- """
- Requested validity interval is null.
- """
+ "Requested validity interval is null."
class BadX510DN(RPKI_Exception):
- """
- X.510 distinguished name does not match profile.
- """
+ "X.510 distinguished name does not match profile."
class BadAutonomousSystemNumber(RPKI_Exception):
- """
- Bad AutonomousSystem number.
- """
+ "Bad AutonomousSystem number."
class WrongEKU(RPKI_Exception):
- """
- Extended Key Usage extension does not match profile.
- """
+ "Extended Key Usage extension does not match profile."
+
+class UnexpectedUpDownResponse(RPKI_Exception):
+ "Up-down message is not of the expected type."
+
+class BadContentType(RPKI_Exception):
+ "Bad HTTP Content-Type."
+
+class ResourceClassMismatch(RPKI_Exception):
+ "Up-down resource class does not match."
+
+class IRDBExpired(RPKI_Exception):
+ "Back-end database record has expired."
diff --git a/rpki/fields.py b/rpki/fields.py
new file mode 100644
index 00000000..6a2dc4d0
--- /dev/null
+++ b/rpki/fields.py
@@ -0,0 +1,205 @@
+# $Id$
+#
+# Copyright (C) 2013--2014 Dragon Research Labs ("DRL")
+# Portions copyright (C) 2011--2012 Internet Systems Consortium ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notices and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ISC DISCLAIM ALL
+# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
+# ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA
+# OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Common Django ORM field classes.
+
+Many of these are complex ASN.1 DER objects stored as binaray data,
+since the only sane text representation would just be the Base64
+encoding of the DER and thus would add no value.
+"""
+
+import logging
+
+from django.db import models
+
+import rpki.x509
+import rpki.sundial
+
+logger = logging.getLogger(__name__)
+
+
+class EnumField(models.PositiveSmallIntegerField):
+ """
+ An enumeration type that uses strings in Python and small integers
+ in SQL.
+ """
+
+ description = "An enumeration type"
+
+ def __init__(self, *args, **kwargs):
+ if isinstance(kwargs.get("choices"), (tuple, list)) and isinstance(kwargs["choices"][0], (str, unicode)):
+ kwargs["choices"] = tuple(enumerate(kwargs["choices"], 1))
+ # Might need something here to handle string-valued default parameter
+ models.PositiveSmallIntegerField.__init__(self, *args, **kwargs)
+ self.enum_i2s = dict(self.flatchoices)
+ self.enum_s2i = dict((v, k) for k, v in self.flatchoices)
+
+ def from_db_value(self, value, expression, connection, context):
+ return self.enum_i2s.get(value, value)
+
+ def to_python(self, value):
+ value = super(EnumField, self).to_python(value)
+ return self.enum_i2s.get(value, value)
+
+ def get_prep_value(self, value):
+ return self.enum_s2i.get(value, value)
+
+
+class SundialField(models.DateTimeField):
+ """
+ A field type for our customized datetime objects.
+ """
+
+ description = "A datetime type using our customized datetime objects"
+
+ def from_db_value(self, value, expression, connection, context):
+ return self.to_python(value)
+
+ def to_python(self, value):
+ if isinstance(value, rpki.sundial.pydatetime.datetime):
+ return rpki.sundial.datetime.from_datetime(
+ models.DateTimeField.to_python(self, value))
+ else:
+ return value
+
+ def get_prep_value(self, value):
+ if isinstance(value, rpki.sundial.datetime):
+ return value.to_datetime()
+ else:
+ return value
+
+
+class BlobField(models.Field):
+ """
+ Old BLOB field type, predating Django's BinaryField type.
+
+ Do not use, this is only here for backwards compatabilty during migrations.
+ """
+
+ description = "Raw BLOB type without ASN.1 encoding/decoding"
+
+ def __init__(self, *args, **kwargs):
+ self.blob_type = kwargs.pop("blob_type", None)
+ kwargs["serialize"] = False
+ kwargs["blank"] = True
+ kwargs["default"] = None
+ models.Field.__init__(self, *args, **kwargs)
+
+ def deconstruct(self):
+ name, path, args, kwargs = super(BlobField, self).deconstruct()
+ del kwargs["serialize"]
+ del kwargs["blank"]
+ del kwargs["default"]
+ return name, path, args, kwargs
+
+ def db_type(self, connection):
+ if self.blob_type is not None:
+ return self.blob_type
+ elif connection.settings_dict['ENGINE'] == "django.db.backends.mysql":
+ return "LONGBLOB"
+ elif connection.settings_dict['ENGINE'] == "django.db.backends.posgresql":
+ return "bytea"
+ else:
+ return "BLOB"
+
+
+# For reasons which now escape me, I had a few fields in the old
+# hand-coded SQL which used MySQL type BINARY(20) to hold SKIs.
+# Presumably this was so that I could then use those SKIs in indexes
+# and searches, but apparently I never got around to that part.
+#
+# SKIs probably would be better stored as hex strings anyway, so not
+# bothering with a separate binary type model for this. Deal with
+# this if and when it ever becomes an issue.
+
+
+# DERField used to be a subclass of BlobField. Try changing it to be
+# a subclass of BinaryField instead, leave BlobField (for now) for
+# backwards compatability during migrations,
+
+class DERField(models.BinaryField):
+ """
+ Field class for DER objects, with automatic translation between
+ ASN.1 and Python types. This is an abstract class, concrete field
+ classes are derived from it.
+ """
+
+ rpki_type = rpki.x509.DER_object
+
+ def __init__(self, *args, **kwargs):
+ kwargs["blank"] = True
+ kwargs["default"] = None
+ super(DERField, self).__init__(*args, **kwargs)
+
+ def deconstruct(self):
+ name, path, args, kwargs = super(DERField, self).deconstruct()
+ del kwargs["blank"]
+ del kwargs["default"]
+ return name, path, args, kwargs
+
+ def from_db_value(self, value, expression, connection, context):
+ if value is not None:
+ value = self.rpki_type(DER = str(value))
+ return value
+
+ def to_python(self, value):
+ value = super(DERField, self).to_python(value)
+ if value is not None and not isinstance(value, self.rpki_type):
+ value = self.rpki_type(DER = str(value))
+ return value
+
+ def get_prep_value(self, value):
+ if value is not None:
+ value = value.get_DER()
+ return super(DERField, self).get_prep_value(value)
+
+
+class CertificateField(DERField):
+ description = "X.509 certificate"
+ rpki_type = rpki.x509.X509
+
+class RSAPrivateKeyField(DERField):
+ description = "RSA keypair"
+ rpki_type = rpki.x509.RSA
+
+KeyField = RSAPrivateKeyField
+
+class PublicKeyField(DERField):
+ description = "RSA keypair"
+ rpki_type = rpki.x509.PublicKey
+
+class CRLField(DERField):
+ description = "Certificate Revocation List"
+ rpki_type = rpki.x509.CRL
+
+class PKCS10Field(DERField):
+ description = "PKCS #10 certificate request"
+ rpki_type = rpki.x509.PKCS10
+
+class ManifestField(DERField):
+ description = "RPKI Manifest"
+ rpki_type = rpki.x509.SignedManifest
+
+class ROAField(DERField):
+ description = "ROA"
+ rpki_type = rpki.x509.ROA
+
+class GhostbusterField(DERField):
+ description = "Ghostbuster Record"
+ rpki_type = rpki.x509.Ghostbuster
diff --git a/rpki/gui/app/check_expired.py b/rpki/gui/app/check_expired.py
index a084af79..65f4315f 100644
--- a/rpki/gui/app/check_expired.py
+++ b/rpki/gui/app/check_expired.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2012, 2013, 2014 SPARTA, Inc. a Parsons Company
+# Copyright (C) 2012, 2013, 2014, 2016 SPARTA, Inc. a Parsons Company
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -21,13 +21,14 @@ from cStringIO import StringIO
import logging
import datetime
-from rpki.gui.cacheview.models import Cert
+from rpki.gui.gui_rpki_cache.models import Cert
from rpki.gui.app.models import Conf, ResourceCert, Timestamp, Alert
from rpki.gui.app.glue import list_received_resources
from rpki.irdb import Zookeeper
-from rpki.left_right import report_error_elt, list_published_objects_elt
from rpki.x509 import X509
+from rpki.left_right import version, nsmap, tag_msg, tag_list_published_objects
+from lxml.etree import Element, SubElement
from django.core.mail import send_mail
logger = logging.getLogger(__name__)
@@ -41,8 +42,8 @@ def check_cert(handle, p, errs):
The displayed object name defaults to the class name, but can be overridden
using the `object_name` argument.
-
"""
+
t = p.certificate.getNotAfter()
if t <= expire_time:
e = 'expired' if t <= now else 'will expire'
@@ -60,8 +61,8 @@ def check_expire(conf, errs):
# get certs for `handle'
cert_set = ResourceCert.objects.filter(conf=conf)
for cert in cert_set:
- # look up cert in cacheview db
- obj_set = Cert.objects.filter(repo__uri=cert.uri)
+ # look up cert in gui_rpki_cache db
+ obj_set = Cert.objects.filter(uri=cert.uri)
if not obj_set:
# since the <list_received_resources/> output is cached, this can
# occur if the cache is out of date as well..
@@ -76,7 +77,7 @@ def check_expire(conf, errs):
f = '*'
else:
f = ' '
- msg.append("%s [%d] uri=%s ski=%s name=%s expires=%s" % (f, n, c.repo.uri, c.keyid, c.name, c.not_after))
+ msg.append("%s [%d] uri=%s expires=%s" % (f, n, c.uri, c.not_after))
# find ghostbuster records attached to this cert
for gbr in c.ghostbusters.all():
@@ -102,30 +103,26 @@ def check_expire(conf, errs):
def check_child_certs(conf, errs):
"""Fetch the list of published objects from rpkid, and inspect the issued
resource certs (uri ending in .cer).
-
"""
+
z = Zookeeper(handle=conf.handle)
- req = list_published_objects_elt.make_pdu(action="list",
- tag="list_published_objects",
- self_handle=conf.handle)
+ req = Element(tag_msg, nsmap=nsmap, type="query", version=version)
+ SubElement(req, tag_list_published_objects,
+ tag="list_published_objects", tenant_handle=conf.handle)
pdus = z.call_rpkid(req)
for pdu in pdus:
- if isinstance(pdu, report_error_elt):
- logger.error("rpkid reported an error: %s", pdu.error_code)
- elif isinstance(pdu, list_published_objects_elt):
- if pdu.uri.endswith('.cer'):
- cert = X509()
- cert.set(Base64=pdu.obj)
- t = cert.getNotAfter()
- if t <= expire_time:
- e = 'expired' if t <= now else 'will expire'
- errs.write("%(handle)s's rescert for Child %(child)s %(expire)s on %(date)s uri=%(uri)s subject=%(subject)s\n" % {
- 'handle': conf.handle,
- 'child': pdu.child_handle,
- 'uri': pdu.uri,
- 'subject': cert.getSubject(),
- 'expire': e,
- 'date': t})
+ if pdu.get("uri").endswith('.cer'):
+ cert = X509(Base64=pdu.text)
+ t = cert.getNotAfter()
+ if t <= expire_time:
+ e = 'expired' if t <= now else 'will expire'
+ errs.write("%(handle)s's rescert for Child %(child)s %(expire)s on %(date)s uri=%(uri)s subject=%(subject)s\n" % {
+ 'handle': conf.handle,
+ 'child': pdu.get("child_handle"),
+ 'uri': pdu.get("uri"),
+ 'subject': cert.getSubject(),
+ 'expire': e,
+ 'date': t})
class NetworkError(Exception):
@@ -139,8 +136,8 @@ def notify_expired(expire_days=14, from_email=None):
expire_days: the number of days ahead of today to warn
from_email: set the From: address for the email
-
"""
+
global expire_time # so i don't have to pass it around
global now
diff --git a/rpki/gui/app/forms.py b/rpki/gui/app/forms.py
index a1214297..4a95c8da 100644
--- a/rpki/gui/app/forms.py
+++ b/rpki/gui/app/forms.py
@@ -170,105 +170,105 @@ def ROARequestFormFactory(conf):
"""
class Cls(forms.Form):
- """Form for entering a ROA request.
-
- Handles both IPv4 and IPv6."""
-
- prefix = forms.CharField(
- widget=forms.TextInput(attrs={
- 'autofocus': 'true', 'placeholder': 'Prefix',
- 'class': 'span4'
- })
- )
- max_prefixlen = forms.CharField(
- required=False,
- widget=forms.TextInput(attrs={
- 'placeholder': 'Max len',
- 'class': 'span1'
- })
- )
- asn = forms.IntegerField(
- widget=forms.TextInput(attrs={
- 'placeholder': 'ASN',
- 'class': 'span1'
- })
- )
+ """Form for entering a ROA request.
+
+ Handles both IPv4 and IPv6."""
+
+ prefix = forms.CharField(
+ widget=forms.TextInput(attrs={
+ 'autofocus': 'true', 'placeholder': 'Prefix',
+ 'class': 'span4'
+ })
+ )
+ max_prefixlen = forms.CharField(
+ required=False,
+ widget=forms.TextInput(attrs={
+ 'placeholder': 'Max len',
+ 'class': 'span1'
+ })
+ )
+ asn = forms.IntegerField(
+ widget=forms.TextInput(attrs={
+ 'placeholder': 'ASN',
+ 'class': 'span1'
+ })
+ )
protect_children = forms.BooleanField(required=False)
- def __init__(self, *args, **kwargs):
- kwargs['auto_id'] = False
- super(Cls, self).__init__(*args, **kwargs)
- self.conf = conf # conf is the arg to ROARequestFormFactory
- self.inline = True
- self.use_table = False
-
- def _as_resource_range(self):
- """Convert the prefix in the form to a
- rpki.resource_set.resource_range_ip object.
-
- If there is no mask provided, assume the closest classful mask.
-
- """
- prefix = self.cleaned_data.get('prefix')
- if '/' not in prefix:
- p = IPAddress(prefix)
-
- # determine the first nonzero bit starting from the lsb and
- # subtract from the address size to find the closest classful
- # mask that contains this single address
- prefixlen = 0
- while (p != 0) and (p & 1) == 0:
- prefixlen = prefixlen + 1
- p = p >> 1
- mask = p.bits - (8 * (prefixlen / 8))
- prefix = prefix + '/' + str(mask)
-
- return resource_range_ip.parse_str(prefix)
-
- def clean_asn(self):
- value = self.cleaned_data.get('asn')
- if value < 0:
- raise forms.ValidationError('AS must be a positive value or 0')
- return value
-
- def clean_prefix(self):
- try:
- r = self._as_resource_range()
- except:
- raise forms.ValidationError('invalid prefix')
-
- manager = models.ResourceRangeAddressV4 if r.version == 4 else models.ResourceRangeAddressV6
- if not manager.objects.filter(cert__conf=self.conf,
- prefix_min__lte=r.min,
- prefix_max__gte=r.max).exists():
- raise forms.ValidationError('prefix is not allocated to you')
- return str(r)
-
- def clean_max_prefixlen(self):
- v = self.cleaned_data.get('max_prefixlen')
- if v:
- if v[0] == '/':
- v = v[1:] # allow user to specify /24
- try:
- if int(v) < 0:
- raise forms.ValidationError('max prefix length must be positive or 0')
- except ValueError:
- raise forms.ValidationError('invalid integer value')
- return v
-
- def clean(self):
- if 'prefix' in self.cleaned_data:
- r = self._as_resource_range()
- max_prefixlen = self.cleaned_data.get('max_prefixlen')
- max_prefixlen = int(max_prefixlen) if max_prefixlen else r.prefixlen()
- if max_prefixlen < r.prefixlen():
- raise forms.ValidationError(
- 'max prefix length must be greater than or equal to the prefix length')
- if max_prefixlen > r.min.bits:
- raise forms.ValidationError(
- 'max prefix length (%d) is out of range for IP version (%d)' % (max_prefixlen, r.min.bits))
- self.cleaned_data['max_prefixlen'] = str(max_prefixlen)
- return self.cleaned_data
+ def __init__(self, *args, **kwargs):
+ kwargs['auto_id'] = False
+ super(Cls, self).__init__(*args, **kwargs)
+ self.conf = conf # conf is the arg to ROARequestFormFactory
+ self.inline = True
+ self.use_table = False
+
+ def _as_resource_range(self):
+ """Convert the prefix in the form to a
+ rpki.resource_set.resource_range_ip object.
+
+ If there is no mask provided, assume the closest classful mask.
+
+ """
+ prefix = self.cleaned_data.get('prefix')
+ if '/' not in prefix:
+ p = IPAddress(prefix)
+
+ # determine the first nonzero bit starting from the lsb and
+ # subtract from the address size to find the closest classful
+ # mask that contains this single address
+ prefixlen = 0
+ while (p != 0) and (p & 1) == 0:
+ prefixlen = prefixlen + 1
+ p = p >> 1
+ mask = p.bits - (8 * (prefixlen / 8))
+ prefix = prefix + '/' + str(mask)
+
+ return resource_range_ip.parse_str(prefix)
+
+ def clean_asn(self):
+ value = self.cleaned_data.get('asn')
+ if value < 0:
+ raise forms.ValidationError('AS must be a positive value or 0')
+ return value
+
+ def clean_prefix(self):
+ try:
+ r = self._as_resource_range()
+ except:
+ raise forms.ValidationError('invalid prefix')
+
+ manager = models.ResourceRangeAddressV4 if r.version == 4 else models.ResourceRangeAddressV6
+ if not manager.objects.filter(cert__conf=self.conf,
+ prefix_min__lte=r.min,
+ prefix_max__gte=r.max).exists():
+ raise forms.ValidationError('prefix is not allocated to you')
+ return str(r)
+
+ def clean_max_prefixlen(self):
+ v = self.cleaned_data.get('max_prefixlen')
+ if v:
+ if v[0] == '/':
+ v = v[1:] # allow user to specify /24
+ try:
+ if int(v) < 0:
+ raise forms.ValidationError('max prefix length must be positive or 0')
+ except ValueError:
+ raise forms.ValidationError('invalid integer value')
+ return v
+
+ def clean(self):
+ if 'prefix' in self.cleaned_data:
+ r = self._as_resource_range()
+ max_prefixlen = self.cleaned_data.get('max_prefixlen')
+ max_prefixlen = int(max_prefixlen) if max_prefixlen else r.prefixlen()
+ if max_prefixlen < r.prefixlen():
+ raise forms.ValidationError(
+ 'max prefix length must be greater than or equal to the prefix length')
+ if max_prefixlen > r.min.bits:
+ raise forms.ValidationError(
+ 'max prefix length (%d) is out of range for IP version (%d)' % (max_prefixlen, r.min.bits))
+ self.cleaned_data['max_prefixlen'] = str(max_prefixlen)
+ return self.cleaned_data
return Cls
diff --git a/rpki/gui/app/glue.py b/rpki/gui/app/glue.py
index a2dddb51..745638c4 100644
--- a/rpki/gui/app/glue.py
+++ b/rpki/gui/app/glue.py
@@ -16,7 +16,6 @@
"""
This file contains code that interfaces between the django views implementing
the portal gui and the rpki.* modules.
-
"""
from __future__ import with_statement
@@ -28,17 +27,22 @@ from datetime import datetime
from rpki.resource_set import (resource_set_as, resource_set_ipv4,
resource_set_ipv6, resource_range_ipv4,
resource_range_ipv6)
-from rpki.left_right import list_received_resources_elt, report_error_elt
from rpki.irdb.zookeeper import Zookeeper
from rpki.gui.app import models
from rpki.exceptions import BadIPResource
+from rpki.left_right import nsmap, version, tag_msg, tag_list_received_resources
+from lxml.etree import Element, SubElement
from django.contrib.auth.models import User
-from django.db.transaction import commit_on_success
+from django.db.transaction import atomic
+
+import logging
+logger = logging.getLogger(__name__)
def ghostbuster_to_vcard(gbr):
"""Convert a GhostbusterRequest object into a vCard object."""
+
import vobject
vcard = vobject.vCard()
@@ -66,19 +70,7 @@ def ghostbuster_to_vcard(gbr):
return vcard.serialize()
-class LeftRightError(Exception):
- """Class for wrapping report_error_elt errors from Zookeeper.call_rpkid().
-
- It expects a single argument, which is the associated report_error_elt instance."""
-
- def __str__(self):
- return 'Error occurred while communicating with rpkid: handle=%s code=%s text=%s' % (
- self.args[0].self_handle,
- self.args[0].error_code,
- self.args[0].error_text)
-
-
-@commit_on_success
+@atomic
def list_received_resources(log, conf):
"""
Query rpkid for this resource handle's received resources.
@@ -86,11 +78,18 @@ def list_received_resources(log, conf):
The semantics are to clear the entire table and populate with the list of
certs received. Other models should not reference the table directly with
foreign keys.
-
"""
z = Zookeeper(handle=conf.handle, disable_signal_handlers=True)
- pdus = z.call_rpkid(list_received_resources_elt.make_pdu(self_handle=conf.handle))
+ req = Element(tag_msg, nsmap=nsmap, type="query", version=version)
+ SubElement(req, tag_list_received_resources, tenant_handle=conf.handle, tag=conf.handle)
+ try:
+ pdus = z.call_rpkid(req)
+ except Exception as err:
+ logger.error('caught exception while attempting to query rpkid')
+ logger.exception(err)
+ return
+
# pdus is sometimes None (see https://trac.rpki.net/ticket/681)
if pdus is None:
print >>log, 'error: call_rpkid() returned None for handle %s when fetching received resources' % conf.handle
@@ -99,34 +98,27 @@ def list_received_resources(log, conf):
models.ResourceCert.objects.filter(conf=conf).delete()
for pdu in pdus:
- if isinstance(pdu, report_error_elt):
- # this will cause the db to be rolled back so the above delete()
- # won't clobber existing resources
- raise LeftRightError(pdu)
- elif isinstance(pdu, list_received_resources_elt):
- if pdu.parent_handle != conf.handle:
- parent = models.Parent.objects.get(issuer=conf,
- handle=pdu.parent_handle)
- else:
- # root cert, self-signed
- parent = None
-
- not_before = datetime.strptime(pdu.notBefore, "%Y-%m-%dT%H:%M:%SZ")
- not_after = datetime.strptime(pdu.notAfter, "%Y-%m-%dT%H:%M:%SZ")
-
- cert = models.ResourceCert.objects.create(
- conf=conf, parent=parent, not_before=not_before,
- not_after=not_after, uri=pdu.uri)
-
- for asn in resource_set_as(pdu.asn):
- cert.asn_ranges.create(min=asn.min, max=asn.max)
-
- for rng in resource_set_ipv4(pdu.ipv4):
- cert.address_ranges.create(prefix_min=rng.min,
- prefix_max=rng.max)
-
- for rng in resource_set_ipv6(pdu.ipv6):
- cert.address_ranges_v6.create(prefix_min=rng.min,
- prefix_max=rng.max)
+ if pdu.get("parent_handle") != conf.handle:
+ parent = models.Parent.objects.get(issuer=conf,
+ handle=pdu.get("parent_handle"))
else:
- print >>log, "error: unexpected pdu from rpkid type=%s" % type(pdu)
+ # root cert, self-signed
+ parent = None
+
+ not_before = datetime.strptime(pdu.get("notBefore"), "%Y-%m-%dT%H:%M:%SZ")
+ not_after = datetime.strptime(pdu.get("notAfter"), "%Y-%m-%dT%H:%M:%SZ")
+
+ cert = models.ResourceCert.objects.create(
+ conf=conf, parent=parent, not_before=not_before,
+ not_after=not_after, uri=pdu.get("uri"))
+
+ for asn in resource_set_as(pdu.get("asn")):
+ cert.asn_ranges.create(min=asn.min, max=asn.max)
+
+ for rng in resource_set_ipv4(pdu.get("ipv4")):
+ cert.address_ranges.create(prefix_min=rng.min,
+ prefix_max=rng.max)
+
+ for rng in resource_set_ipv6(pdu.get("ipv6")):
+ cert.address_ranges_v6.create(prefix_min=rng.min,
+ prefix_max=rng.max)
diff --git a/rpki/gui/app/migrations/0001_initial.py b/rpki/gui/app/migrations/0001_initial.py
index 80877901..79d21324 100644
--- a/rpki/gui/app/migrations/0001_initial.py
+++ b/rpki/gui/app/migrations/0001_initial.py
@@ -1,192 +1,249 @@
# -*- coding: utf-8 -*-
-import datetime
-from south.db import db
-from south.v2 import SchemaMigration
-from django.db import models
-
-
-class Migration(SchemaMigration):
-
- def forwards(self, orm):
- # Adding model 'ResourceCert'
- db.create_table('app_resourcecert', (
- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
- ('parent', self.gf('django.db.models.fields.related.ForeignKey')(related_name='certs', to=orm['irdb.Parent'])),
- ('not_before', self.gf('django.db.models.fields.DateTimeField')()),
- ('not_after', self.gf('django.db.models.fields.DateTimeField')()),
- ('uri', self.gf('django.db.models.fields.CharField')(max_length=255)),
- ))
- db.send_create_signal('app', ['ResourceCert'])
-
- # Adding model 'ResourceRangeAddressV4'
- db.create_table('app_resourcerangeaddressv4', (
- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
- ('prefix_min', self.gf('rpki.gui.models.IPv4AddressField')(db_index=True)),
- ('prefix_max', self.gf('rpki.gui.models.IPv4AddressField')(db_index=True)),
- ('cert', self.gf('django.db.models.fields.related.ForeignKey')(related_name='address_ranges', to=orm['app.ResourceCert'])),
- ))
- db.send_create_signal('app', ['ResourceRangeAddressV4'])
-
- # Adding model 'ResourceRangeAddressV6'
- db.create_table('app_resourcerangeaddressv6', (
- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
- ('prefix_min', self.gf('rpki.gui.models.IPv6AddressField')(db_index=True)),
- ('prefix_max', self.gf('rpki.gui.models.IPv6AddressField')(db_index=True)),
- ('cert', self.gf('django.db.models.fields.related.ForeignKey')(related_name='address_ranges_v6', to=orm['app.ResourceCert'])),
- ))
- db.send_create_signal('app', ['ResourceRangeAddressV6'])
-
- # Adding model 'ResourceRangeAS'
- db.create_table('app_resourcerangeas', (
- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
- ('min', self.gf('django.db.models.fields.PositiveIntegerField')()),
- ('max', self.gf('django.db.models.fields.PositiveIntegerField')()),
- ('cert', self.gf('django.db.models.fields.related.ForeignKey')(related_name='asn_ranges', to=orm['app.ResourceCert'])),
- ))
- db.send_create_signal('app', ['ResourceRangeAS'])
-
- # Adding model 'GhostbusterRequest'
- db.create_table('app_ghostbusterrequest', (
- ('ghostbusterrequest_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['irdb.GhostbusterRequest'], unique=True, primary_key=True)),
- ('full_name', self.gf('django.db.models.fields.CharField')(max_length=40)),
- ('family_name', self.gf('django.db.models.fields.CharField')(max_length=20)),
- ('given_name', self.gf('django.db.models.fields.CharField')(max_length=20)),
- ('additional_name', self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True)),
- ('honorific_prefix', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
- ('honorific_suffix', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
- ('email_address', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
- ('organization', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
- ('telephone', self.gf('rpki.gui.app.models.TelephoneField')(max_length=40, null=True, blank=True)),
- ('box', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
- ('extended', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
- ('street', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
- ('city', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
- ('region', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
- ('code', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
- ('country', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
- ))
- db.send_create_signal('app', ['GhostbusterRequest'])
-
- # Adding model 'Timestamp'
- db.create_table('app_timestamp', (
- ('name', self.gf('django.db.models.fields.CharField')(max_length=30, primary_key=True)),
- ('ts', self.gf('django.db.models.fields.DateTimeField')()),
- ))
- db.send_create_signal('app', ['Timestamp'])
-
-
- def backwards(self, orm):
- # Deleting model 'ResourceCert'
- db.delete_table('app_resourcecert')
-
- # Deleting model 'ResourceRangeAddressV4'
- db.delete_table('app_resourcerangeaddressv4')
-
- # Deleting model 'ResourceRangeAddressV6'
- db.delete_table('app_resourcerangeaddressv6')
-
- # Deleting model 'ResourceRangeAS'
- db.delete_table('app_resourcerangeas')
-
- # Deleting model 'GhostbusterRequest'
- db.delete_table('app_ghostbusterrequest')
-
- # Deleting model 'Timestamp'
- db.delete_table('app_timestamp')
-
-
- models = {
- 'app.ghostbusterrequest': {
- 'Meta': {'ordering': "('family_name', 'given_name')", 'object_name': 'GhostbusterRequest', '_ormbases': ['irdb.GhostbusterRequest']},
- 'additional_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
- 'box': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
- 'city': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
- 'code': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
- 'country': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
- 'email_address': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
- 'extended': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
- 'family_name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
- 'full_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
- 'ghostbusterrequest_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['irdb.GhostbusterRequest']", 'unique': 'True', 'primary_key': 'True'}),
- 'given_name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
- 'honorific_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
- 'honorific_suffix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
- 'organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
- 'region': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
- 'street': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
- 'telephone': ('rpki.gui.app.models.TelephoneField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'})
- },
- 'app.resourcecert': {
- 'Meta': {'object_name': 'ResourceCert'},
- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
- 'not_after': ('django.db.models.fields.DateTimeField', [], {}),
- 'not_before': ('django.db.models.fields.DateTimeField', [], {}),
- 'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'certs'", 'to': "orm['irdb.Parent']"}),
- 'uri': ('django.db.models.fields.CharField', [], {'max_length': '255'})
- },
- 'app.resourcerangeaddressv4': {
- 'Meta': {'ordering': "('prefix_min',)", 'object_name': 'ResourceRangeAddressV4'},
- 'cert': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'address_ranges'", 'to': "orm['app.ResourceCert']"}),
- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
- 'prefix_max': ('rpki.gui.models.IPv4AddressField', [], {'db_index': 'True'}),
- 'prefix_min': ('rpki.gui.models.IPv4AddressField', [], {'db_index': 'True'})
- },
- 'app.resourcerangeaddressv6': {
- 'Meta': {'ordering': "('prefix_min',)", 'object_name': 'ResourceRangeAddressV6'},
- 'cert': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'address_ranges_v6'", 'to': "orm['app.ResourceCert']"}),
- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
- 'prefix_max': ('rpki.gui.models.IPv6AddressField', [], {'db_index': 'True'}),
- 'prefix_min': ('rpki.gui.models.IPv6AddressField', [], {'db_index': 'True'})
- },
- 'app.resourcerangeas': {
- 'Meta': {'ordering': "('min', 'max')", 'object_name': 'ResourceRangeAS'},
- 'cert': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'asn_ranges'", 'to': "orm['app.ResourceCert']"}),
- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
- 'max': ('django.db.models.fields.PositiveIntegerField', [], {}),
- 'min': ('django.db.models.fields.PositiveIntegerField', [], {})
- },
- 'app.timestamp': {
- 'Meta': {'object_name': 'Timestamp'},
- 'name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'primary_key': 'True'}),
- 'ts': ('django.db.models.fields.DateTimeField', [], {})
- },
- 'irdb.ghostbusterrequest': {
- 'Meta': {'object_name': 'GhostbusterRequest'},
- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
- 'issuer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ghostbuster_requests'", 'to': "orm['irdb.ResourceHolderCA']"}),
- 'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ghostbuster_requests'", 'null': 'True', 'to': "orm['irdb.Parent']"}),
- 'vcard': ('django.db.models.fields.TextField', [], {})
- },
- 'irdb.parent': {
- 'Meta': {'unique_together': "(('issuer', 'handle'),)", 'object_name': 'Parent', '_ormbases': ['irdb.Turtle']},
- 'certificate': ('rpki.irdb.models.CertificateField', [], {'default': 'None', 'blank': 'True'}),
- 'child_handle': ('rpki.irdb.models.HandleField', [], {'max_length': '120'}),
- 'handle': ('rpki.irdb.models.HandleField', [], {'max_length': '120'}),
- 'issuer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parents'", 'to': "orm['irdb.ResourceHolderCA']"}),
- 'parent_handle': ('rpki.irdb.models.HandleField', [], {'max_length': '120'}),
- 'referral_authorization': ('rpki.irdb.models.SignedReferralField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
- 'referrer': ('rpki.irdb.models.HandleField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
- 'repository_type': ('rpki.irdb.models.EnumField', [], {}),
- 'ta': ('rpki.irdb.models.CertificateField', [], {'default': 'None', 'blank': 'True'}),
- 'turtle_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['irdb.Turtle']", 'unique': 'True', 'primary_key': 'True'})
- },
- 'irdb.resourceholderca': {
- 'Meta': {'object_name': 'ResourceHolderCA'},
- 'certificate': ('rpki.irdb.models.CertificateField', [], {'default': 'None', 'blank': 'True'}),
- 'handle': ('rpki.irdb.models.HandleField', [], {'unique': 'True', 'max_length': '120'}),
- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
- 'last_crl_update': ('rpki.irdb.models.SundialField', [], {}),
- 'latest_crl': ('rpki.irdb.models.CRLField', [], {'default': 'None', 'blank': 'True'}),
- 'next_crl_number': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
- 'next_crl_update': ('rpki.irdb.models.SundialField', [], {}),
- 'next_serial': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
- 'private_key': ('rpki.irdb.models.RSAKeyField', [], {'default': 'None', 'blank': 'True'})
- },
- 'irdb.turtle': {
- 'Meta': {'object_name': 'Turtle'},
- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
- 'service_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'})
- }
- }
-
- complete_apps = ['app'] \ No newline at end of file
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+from django.conf import settings
+import rpki.gui.models
+import rpki.gui.app.models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ migrations.swappable_dependency(settings.AUTH_USER_MODEL),
+ ('irdb', '0001_initial'),
+ ('routeview', '__first__'),
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='Alert',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('severity', models.SmallIntegerField(default=0, choices=[(0, b'info'), (1, b'warning'), (2, b'error')])),
+ ('when', models.DateTimeField(auto_now_add=True)),
+ ('seen', models.BooleanField(default=False)),
+ ('subject', models.CharField(max_length=66)),
+ ('text', models.TextField()),
+ ],
+ ),
+ migrations.CreateModel(
+ name='ConfACL',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='GhostbusterRequest',
+ fields=[
+ ('ghostbusterrequest_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='irdb.GhostbusterRequest')),
+ ('full_name', models.CharField(max_length=40)),
+ ('family_name', models.CharField(max_length=20)),
+ ('given_name', models.CharField(max_length=20)),
+ ('additional_name', models.CharField(max_length=20, null=True, blank=True)),
+ ('honorific_prefix', models.CharField(max_length=10, null=True, blank=True)),
+ ('honorific_suffix', models.CharField(max_length=10, null=True, blank=True)),
+ ('email_address', models.EmailField(max_length=254, null=True, blank=True)),
+ ('organization', models.CharField(max_length=255, null=True, blank=True)),
+ ('telephone', rpki.gui.app.models.TelephoneField(max_length=40, null=True, blank=True)),
+ ('box', models.CharField(max_length=40, null=True, verbose_name=b'P.O. Box', blank=True)),
+ ('extended', models.CharField(max_length=255, null=True, blank=True)),
+ ('street', models.CharField(max_length=255, null=True, blank=True)),
+ ('city', models.CharField(max_length=40, null=True, blank=True)),
+ ('region', models.CharField(help_text=b'state or province', max_length=40, null=True, blank=True)),
+ ('code', models.CharField(max_length=40, null=True, verbose_name=b'Postal Code', blank=True)),
+ ('country', models.CharField(max_length=40, null=True, blank=True)),
+ ],
+ options={
+ 'ordering': ('family_name', 'given_name'),
+ },
+ bases=('irdb.ghostbusterrequest',),
+ ),
+ migrations.CreateModel(
+ name='ResourceCert',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('not_before', models.DateTimeField()),
+ ('not_after', models.DateTimeField()),
+ ('uri', models.CharField(max_length=255)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='ResourceRangeAddressV4',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('prefix_min', rpki.gui.models.IPAddressField(db_index=True)),
+ ('prefix_max', rpki.gui.models.IPAddressField(db_index=True)),
+ ('cert', models.ForeignKey(related_name='address_ranges', to='app.ResourceCert')),
+ ],
+ options={
+ 'ordering': ('prefix_min',),
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='ResourceRangeAddressV6',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('prefix_min', rpki.gui.models.IPAddressField(db_index=True)),
+ ('prefix_max', rpki.gui.models.IPAddressField(db_index=True)),
+ ('cert', models.ForeignKey(related_name='address_ranges_v6', to='app.ResourceCert')),
+ ],
+ options={
+ 'ordering': ('prefix_min',),
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='ResourceRangeAS',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('min', models.BigIntegerField(validators=[rpki.gui.models.validate_asn])),
+ ('max', models.BigIntegerField(validators=[rpki.gui.models.validate_asn])),
+ ('cert', models.ForeignKey(related_name='asn_ranges', to='app.ResourceCert')),
+ ],
+ options={
+ 'ordering': ('min', 'max'),
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='Timestamp',
+ fields=[
+ ('name', models.CharField(max_length=30, serialize=False, primary_key=True)),
+ ('ts', models.DateTimeField()),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Child',
+ fields=[
+ ],
+ options={
+ 'proxy': True,
+ 'verbose_name_plural': 'children',
+ },
+ bases=('irdb.child',),
+ ),
+ migrations.CreateModel(
+ name='ChildASN',
+ fields=[
+ ],
+ options={
+ 'proxy': True,
+ },
+ bases=('irdb.childasn',),
+ ),
+ migrations.CreateModel(
+ name='ChildNet',
+ fields=[
+ ],
+ options={
+ 'proxy': True,
+ },
+ bases=('irdb.childnet',),
+ ),
+ migrations.CreateModel(
+ name='Client',
+ fields=[
+ ],
+ options={
+ 'verbose_name': 'Client',
+ 'proxy': True,
+ },
+ bases=('irdb.client',),
+ ),
+ migrations.CreateModel(
+ name='Conf',
+ fields=[
+ ],
+ options={
+ 'proxy': True,
+ },
+ bases=('irdb.resourceholderca',),
+ ),
+ migrations.CreateModel(
+ name='Parent',
+ fields=[
+ ],
+ options={
+ 'proxy': True,
+ },
+ bases=('irdb.parent',),
+ ),
+ migrations.CreateModel(
+ name='Repository',
+ fields=[
+ ],
+ options={
+ 'verbose_name': 'Repository',
+ 'proxy': True,
+ 'verbose_name_plural': 'Repositories',
+ },
+ bases=('irdb.repository',),
+ ),
+ migrations.CreateModel(
+ name='ROARequest',
+ fields=[
+ ],
+ options={
+ 'proxy': True,
+ },
+ bases=('irdb.roarequest',),
+ ),
+ migrations.CreateModel(
+ name='ROARequestPrefix',
+ fields=[
+ ],
+ options={
+ 'proxy': True,
+ },
+ bases=('irdb.roarequestprefix',),
+ ),
+ migrations.CreateModel(
+ name='RouteOrigin',
+ fields=[
+ ],
+ options={
+ 'proxy': True,
+ },
+ bases=('routeview.routeorigin',),
+ ),
+ migrations.CreateModel(
+ name='RouteOriginV6',
+ fields=[
+ ],
+ options={
+ 'proxy': True,
+ },
+ bases=('routeview.routeoriginv6',),
+ ),
+ migrations.AddField(
+ model_name='resourcecert',
+ name='conf',
+ field=models.ForeignKey(related_name='certs', to='app.Conf'),
+ ),
+ migrations.AddField(
+ model_name='resourcecert',
+ name='parent',
+ field=models.ForeignKey(related_name='certs', to='app.Parent', null=True),
+ ),
+ migrations.AddField(
+ model_name='confacl',
+ name='conf',
+ field=models.ForeignKey(to='app.Conf'),
+ ),
+ migrations.AddField(
+ model_name='confacl',
+ name='user',
+ field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
+ ),
+ migrations.AddField(
+ model_name='alert',
+ name='conf',
+ field=models.ForeignKey(related_name='alerts', to='app.Conf'),
+ ),
+ migrations.AlterUniqueTogether(
+ name='confacl',
+ unique_together=set([('user', 'conf')]),
+ ),
+ ]
diff --git a/rpki/gui/app/models.py b/rpki/gui/app/models.py
index 40bdbe2c..fb1cafff 100644
--- a/rpki/gui/app/models.py
+++ b/rpki/gui/app/models.py
@@ -1,5 +1,5 @@
# Copyright (C) 2010 SPARTA, Inc. dba Cobham Analytic Solutions
-# Copyright (C) 2012 SPARTA, Inc. a Parsons Company
+# Copyright (C) 2012, 2016 SPARTA, Inc. a Parsons Company
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -26,7 +26,6 @@ import rpki.irdb.models
import rpki.gui.models
import rpki.gui.routeview.models
import rpki.oids
-from south.modelsinspector import add_introspection_rules
class TelephoneField(models.CharField):
@@ -35,8 +34,6 @@ class TelephoneField(models.CharField):
kwargs['max_length'] = 40
models.CharField.__init__(self, **kwargs)
-add_introspection_rules([], [r'^rpki\.gui\.app\.models\.TelephoneField'])
-
class Parent(rpki.irdb.models.Parent):
"""proxy model for irdb Parent"""
@@ -123,7 +120,7 @@ class Alert(models.Model):
class Conf(rpki.irdb.models.ResourceHolderCA):
"""This is the center of the universe, also known as a place to
- have a handle on a resource-holding entity. It's the <self>
+ have a handle on a resource-holding entity. It's the <tenant/>
in the rpkid schema.
"""
@@ -262,7 +259,7 @@ class ResourceCert(models.Model):
not_after = models.DateTimeField()
# Locator for this object. Used to look up the validation status, expiry
- # of ancestor certs in cacheview
+ # of ancestor certs in gui_rpki_cache
uri = models.CharField(max_length=255)
def __unicode__(self):
diff --git a/rpki/gui/app/range_list.py b/rpki/gui/app/range_list.py
index 21fd1f29..5cb4f5e4 100755
--- a/rpki/gui/app/range_list.py
+++ b/rpki/gui/app/range_list.py
@@ -70,6 +70,7 @@ class RangeList(list):
def difference(self, other):
"""Return a RangeList object which contains ranges in this object which
are not in "other"."""
+
it = iter(other)
try:
@@ -85,6 +86,7 @@ class RangeList(list):
def V(v):
"""convert the integer value to the appropriate type for this
range"""
+
return x.__class__.datum_type(v)
try:
diff --git a/rpki/gui/app/south_migrations/0001_initial.py b/rpki/gui/app/south_migrations/0001_initial.py
new file mode 100644
index 00000000..80877901
--- /dev/null
+++ b/rpki/gui/app/south_migrations/0001_initial.py
@@ -0,0 +1,192 @@
+# -*- coding: utf-8 -*-
+import datetime
+from south.db import db
+from south.v2 import SchemaMigration
+from django.db import models
+
+
+class Migration(SchemaMigration):
+
+ def forwards(self, orm):
+ # Adding model 'ResourceCert'
+ db.create_table('app_resourcecert', (
+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
+ ('parent', self.gf('django.db.models.fields.related.ForeignKey')(related_name='certs', to=orm['irdb.Parent'])),
+ ('not_before', self.gf('django.db.models.fields.DateTimeField')()),
+ ('not_after', self.gf('django.db.models.fields.DateTimeField')()),
+ ('uri', self.gf('django.db.models.fields.CharField')(max_length=255)),
+ ))
+ db.send_create_signal('app', ['ResourceCert'])
+
+ # Adding model 'ResourceRangeAddressV4'
+ db.create_table('app_resourcerangeaddressv4', (
+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
+ ('prefix_min', self.gf('rpki.gui.models.IPv4AddressField')(db_index=True)),
+ ('prefix_max', self.gf('rpki.gui.models.IPv4AddressField')(db_index=True)),
+ ('cert', self.gf('django.db.models.fields.related.ForeignKey')(related_name='address_ranges', to=orm['app.ResourceCert'])),
+ ))
+ db.send_create_signal('app', ['ResourceRangeAddressV4'])
+
+ # Adding model 'ResourceRangeAddressV6'
+ db.create_table('app_resourcerangeaddressv6', (
+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
+ ('prefix_min', self.gf('rpki.gui.models.IPv6AddressField')(db_index=True)),
+ ('prefix_max', self.gf('rpki.gui.models.IPv6AddressField')(db_index=True)),
+ ('cert', self.gf('django.db.models.fields.related.ForeignKey')(related_name='address_ranges_v6', to=orm['app.ResourceCert'])),
+ ))
+ db.send_create_signal('app', ['ResourceRangeAddressV6'])
+
+ # Adding model 'ResourceRangeAS'
+ db.create_table('app_resourcerangeas', (
+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
+ ('min', self.gf('django.db.models.fields.PositiveIntegerField')()),
+ ('max', self.gf('django.db.models.fields.PositiveIntegerField')()),
+ ('cert', self.gf('django.db.models.fields.related.ForeignKey')(related_name='asn_ranges', to=orm['app.ResourceCert'])),
+ ))
+ db.send_create_signal('app', ['ResourceRangeAS'])
+
+ # Adding model 'GhostbusterRequest'
+ db.create_table('app_ghostbusterrequest', (
+ ('ghostbusterrequest_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['irdb.GhostbusterRequest'], unique=True, primary_key=True)),
+ ('full_name', self.gf('django.db.models.fields.CharField')(max_length=40)),
+ ('family_name', self.gf('django.db.models.fields.CharField')(max_length=20)),
+ ('given_name', self.gf('django.db.models.fields.CharField')(max_length=20)),
+ ('additional_name', self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True)),
+ ('honorific_prefix', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
+ ('honorific_suffix', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
+ ('email_address', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
+ ('organization', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
+ ('telephone', self.gf('rpki.gui.app.models.TelephoneField')(max_length=40, null=True, blank=True)),
+ ('box', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
+ ('extended', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
+ ('street', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
+ ('city', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
+ ('region', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
+ ('code', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
+ ('country', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
+ ))
+ db.send_create_signal('app', ['GhostbusterRequest'])
+
+ # Adding model 'Timestamp'
+ db.create_table('app_timestamp', (
+ ('name', self.gf('django.db.models.fields.CharField')(max_length=30, primary_key=True)),
+ ('ts', self.gf('django.db.models.fields.DateTimeField')()),
+ ))
+ db.send_create_signal('app', ['Timestamp'])
+
+
+ def backwards(self, orm):
+ # Deleting model 'ResourceCert'
+ db.delete_table('app_resourcecert')
+
+ # Deleting model 'ResourceRangeAddressV4'
+ db.delete_table('app_resourcerangeaddressv4')
+
+ # Deleting model 'ResourceRangeAddressV6'
+ db.delete_table('app_resourcerangeaddressv6')
+
+ # Deleting model 'ResourceRangeAS'
+ db.delete_table('app_resourcerangeas')
+
+ # Deleting model 'GhostbusterRequest'
+ db.delete_table('app_ghostbusterrequest')
+
+ # Deleting model 'Timestamp'
+ db.delete_table('app_timestamp')
+
+
+ models = {
+ 'app.ghostbusterrequest': {
+ 'Meta': {'ordering': "('family_name', 'given_name')", 'object_name': 'GhostbusterRequest', '_ormbases': ['irdb.GhostbusterRequest']},
+ 'additional_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
+ 'box': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
+ 'city': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
+ 'code': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
+ 'country': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
+ 'email_address': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
+ 'extended': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
+ 'family_name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
+ 'full_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
+ 'ghostbusterrequest_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['irdb.GhostbusterRequest']", 'unique': 'True', 'primary_key': 'True'}),
+ 'given_name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
+ 'honorific_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
+ 'honorific_suffix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
+ 'organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
+ 'region': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
+ 'street': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
+ 'telephone': ('rpki.gui.app.models.TelephoneField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'})
+ },
+ 'app.resourcecert': {
+ 'Meta': {'object_name': 'ResourceCert'},
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'not_after': ('django.db.models.fields.DateTimeField', [], {}),
+ 'not_before': ('django.db.models.fields.DateTimeField', [], {}),
+ 'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'certs'", 'to': "orm['irdb.Parent']"}),
+ 'uri': ('django.db.models.fields.CharField', [], {'max_length': '255'})
+ },
+ 'app.resourcerangeaddressv4': {
+ 'Meta': {'ordering': "('prefix_min',)", 'object_name': 'ResourceRangeAddressV4'},
+ 'cert': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'address_ranges'", 'to': "orm['app.ResourceCert']"}),
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'prefix_max': ('rpki.gui.models.IPv4AddressField', [], {'db_index': 'True'}),
+ 'prefix_min': ('rpki.gui.models.IPv4AddressField', [], {'db_index': 'True'})
+ },
+ 'app.resourcerangeaddressv6': {
+ 'Meta': {'ordering': "('prefix_min',)", 'object_name': 'ResourceRangeAddressV6'},
+ 'cert': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'address_ranges_v6'", 'to': "orm['app.ResourceCert']"}),
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'prefix_max': ('rpki.gui.models.IPv6AddressField', [], {'db_index': 'True'}),
+ 'prefix_min': ('rpki.gui.models.IPv6AddressField', [], {'db_index': 'True'})
+ },
+ 'app.resourcerangeas': {
+ 'Meta': {'ordering': "('min', 'max')", 'object_name': 'ResourceRangeAS'},
+ 'cert': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'asn_ranges'", 'to': "orm['app.ResourceCert']"}),
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'max': ('django.db.models.fields.PositiveIntegerField', [], {}),
+ 'min': ('django.db.models.fields.PositiveIntegerField', [], {})
+ },
+ 'app.timestamp': {
+ 'Meta': {'object_name': 'Timestamp'},
+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'primary_key': 'True'}),
+ 'ts': ('django.db.models.fields.DateTimeField', [], {})
+ },
+ 'irdb.ghostbusterrequest': {
+ 'Meta': {'object_name': 'GhostbusterRequest'},
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'issuer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ghostbuster_requests'", 'to': "orm['irdb.ResourceHolderCA']"}),
+ 'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ghostbuster_requests'", 'null': 'True', 'to': "orm['irdb.Parent']"}),
+ 'vcard': ('django.db.models.fields.TextField', [], {})
+ },
+ 'irdb.parent': {
+ 'Meta': {'unique_together': "(('issuer', 'handle'),)", 'object_name': 'Parent', '_ormbases': ['irdb.Turtle']},
+ 'certificate': ('rpki.irdb.models.CertificateField', [], {'default': 'None', 'blank': 'True'}),
+ 'child_handle': ('rpki.irdb.models.HandleField', [], {'max_length': '120'}),
+ 'handle': ('rpki.irdb.models.HandleField', [], {'max_length': '120'}),
+ 'issuer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parents'", 'to': "orm['irdb.ResourceHolderCA']"}),
+ 'parent_handle': ('rpki.irdb.models.HandleField', [], {'max_length': '120'}),
+ 'referral_authorization': ('rpki.irdb.models.SignedReferralField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
+ 'referrer': ('rpki.irdb.models.HandleField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
+ 'repository_type': ('rpki.irdb.models.EnumField', [], {}),
+ 'ta': ('rpki.irdb.models.CertificateField', [], {'default': 'None', 'blank': 'True'}),
+ 'turtle_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['irdb.Turtle']", 'unique': 'True', 'primary_key': 'True'})
+ },
+ 'irdb.resourceholderca': {
+ 'Meta': {'object_name': 'ResourceHolderCA'},
+ 'certificate': ('rpki.irdb.models.CertificateField', [], {'default': 'None', 'blank': 'True'}),
+ 'handle': ('rpki.irdb.models.HandleField', [], {'unique': 'True', 'max_length': '120'}),
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'last_crl_update': ('rpki.irdb.models.SundialField', [], {}),
+ 'latest_crl': ('rpki.irdb.models.CRLField', [], {'default': 'None', 'blank': 'True'}),
+ 'next_crl_number': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
+ 'next_crl_update': ('rpki.irdb.models.SundialField', [], {}),
+ 'next_serial': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
+ 'private_key': ('rpki.irdb.models.RSAKeyField', [], {'default': 'None', 'blank': 'True'})
+ },
+ 'irdb.turtle': {
+ 'Meta': {'object_name': 'Turtle'},
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'service_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'})
+ }
+ }
+
+ complete_apps = ['app'] \ No newline at end of file
diff --git a/rpki/gui/app/migrations/0002_auto__add_field_resourcecert_conf.py b/rpki/gui/app/south_migrations/0002_auto__add_field_resourcecert_conf.py
index d3326f90..d3326f90 100644
--- a/rpki/gui/app/migrations/0002_auto__add_field_resourcecert_conf.py
+++ b/rpki/gui/app/south_migrations/0002_auto__add_field_resourcecert_conf.py
diff --git a/rpki/gui/app/migrations/0003_set_conf_from_parent.py b/rpki/gui/app/south_migrations/0003_set_conf_from_parent.py
index a90a11cc..a90a11cc 100644
--- a/rpki/gui/app/migrations/0003_set_conf_from_parent.py
+++ b/rpki/gui/app/south_migrations/0003_set_conf_from_parent.py
diff --git a/rpki/gui/app/migrations/0004_auto__chg_field_resourcecert_conf.py b/rpki/gui/app/south_migrations/0004_auto__chg_field_resourcecert_conf.py
index a236ad4a..a236ad4a 100644
--- a/rpki/gui/app/migrations/0004_auto__chg_field_resourcecert_conf.py
+++ b/rpki/gui/app/south_migrations/0004_auto__chg_field_resourcecert_conf.py
diff --git a/rpki/gui/app/migrations/0005_auto__chg_field_resourcecert_parent.py b/rpki/gui/app/south_migrations/0005_auto__chg_field_resourcecert_parent.py
index 11e9c814..11e9c814 100644
--- a/rpki/gui/app/migrations/0005_auto__chg_field_resourcecert_parent.py
+++ b/rpki/gui/app/south_migrations/0005_auto__chg_field_resourcecert_parent.py
diff --git a/rpki/gui/app/migrations/0006_add_conf_acl.py b/rpki/gui/app/south_migrations/0006_add_conf_acl.py
index 88fe8171..88fe8171 100644
--- a/rpki/gui/app/migrations/0006_add_conf_acl.py
+++ b/rpki/gui/app/south_migrations/0006_add_conf_acl.py
diff --git a/rpki/gui/app/migrations/0007_default_acls.py b/rpki/gui/app/south_migrations/0007_default_acls.py
index 40656d0f..40656d0f 100644
--- a/rpki/gui/app/migrations/0007_default_acls.py
+++ b/rpki/gui/app/south_migrations/0007_default_acls.py
diff --git a/rpki/gui/app/migrations/0008_add_alerts.py b/rpki/gui/app/south_migrations/0008_add_alerts.py
index 77af68d2..77af68d2 100644
--- a/rpki/gui/app/migrations/0008_add_alerts.py
+++ b/rpki/gui/app/south_migrations/0008_add_alerts.py
diff --git a/rpki/gui/app/south_migrations/__init__.py b/rpki/gui/app/south_migrations/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rpki/gui/app/south_migrations/__init__.py
diff --git a/rpki/gui/app/views.py b/rpki/gui/app/views.py
index bf152f8e..03c7c168 100644
--- a/rpki/gui/app/views.py
+++ b/rpki/gui/app/views.py
@@ -1,5 +1,5 @@
# Copyright (C) 2010, 2011 SPARTA, Inc. dba Cobham Analytic Solutions
-# Copyright (C) 2012, 2014 SPARTA, Inc. a Parsons Company
+# Copyright (C) 2012, 2014, 2016 SPARTA, Inc. a Parsons Company
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -23,7 +23,6 @@ __version__ = '$Id$'
import os
import os.path
-from tempfile import NamedTemporaryFile
import cStringIO
import csv
import logging
@@ -42,7 +41,8 @@ from django.forms.formsets import formset_factory, BaseFormSet
from django.contrib import messages
from django.db.models import Q
-from rpki.irdb import Zookeeper, ChildASN, ChildNet, ROARequestPrefix
+from rpki.irdb import Zookeeper
+from rpki.irdb.models import ChildASN, ChildNet, ROARequestPrefix
from rpki.gui.app import models, forms, glue, range_list
from rpki.resource_set import (resource_range_as, resource_range_ip,
roa_prefix_ipv4)
@@ -50,7 +50,6 @@ from rpki import sundial
import rpki.exceptions
import rpki.csv_utils
-from rpki.gui.cacheview.models import ROA
from rpki.gui.routeview.models import RouteOrigin
from rpki.gui.decorators import tls_required
@@ -136,10 +135,6 @@ def generic_import(request, queryset, configure, form_class=None,
if request.method == 'POST':
form = form_class(request.POST, request.FILES)
if form.is_valid():
- tmpf = NamedTemporaryFile(prefix='import', suffix='.xml',
- delete=False)
- tmpf.write(form.cleaned_data['xml'].read())
- tmpf.close()
z = Zookeeper(handle=conf.handle)
handle = form.cleaned_data.get('handle')
# CharField uses an empty string for the empty value, rather than
@@ -148,27 +143,25 @@ def generic_import(request, queryset, configure, form_class=None,
if handle == '':
handle = None
try:
- # configure_repository returns None, so can't use tuple expansion
- # here. Unpack the tuple below if post_import_redirect is None.
- r = configure(z, tmpf.name, handle)
+ # configure_repository returns None, so can't use tuple expansion
+ # here. Unpack the tuple below if post_import_redirect is None.
+ r = configure(z, form.cleaned_data['xml'], handle)
except lxml.etree.XMLSyntaxError as e:
- logger.exception('caught XMLSyntaxError while parsing uploaded file')
+ logger.exception('caught XMLSyntaxError while parsing uploaded file')
messages.error(
request,
'The uploaded file has an invalid XML syntax'
)
else:
- # force rpkid run now
- z.synchronize_ca(poke=True)
- if post_import_redirect:
- url = post_import_redirect
- else:
- _, handle = r
- url = queryset.get(issuer=conf,
- handle=handle).get_absolute_url()
- return http.HttpResponseRedirect(url)
- finally:
- os.remove(tmpf.name)
+ # force rpkid run now
+ z.synchronize_ca(poke=True)
+ if post_import_redirect:
+ url = post_import_redirect
+ else:
+ _, handle = r
+ url = queryset.get(issuer=conf,
+ handle=handle).get_absolute_url()
+ return http.HttpResponseRedirect(url)
else:
form = form_class()
@@ -298,10 +291,10 @@ def serve_xml(content, basename, ext='xml'):
`basename` is the prefix to specify for the XML filename.
- `csv` is the type (default: xml)
+ `ext` is the type (default: xml)
"""
- resp = http.HttpResponse(content, mimetype='application/%s' % ext)
+ resp = http.HttpResponse(content, content_type='application/%s' % ext)
resp['Content-Disposition'] = 'attachment; filename=%s.%s' % (basename, ext)
return resp
@@ -332,13 +325,10 @@ def import_asns(request):
if request.method == 'POST':
form = forms.ImportCSVForm(request.POST, request.FILES)
if form.is_valid():
- f = NamedTemporaryFile(prefix='asns', suffix='.csv', delete=False)
- f.write(request.FILES['csv'].read())
- f.close()
z = Zookeeper(handle=conf.handle, disable_signal_handlers=True)
try:
z.load_asns(
- f.name,
+ request.FILES['csv'],
ignore_missing_children=form.cleaned_data['ignore_missing_children']
)
except rpki.irdb.models.Child.DoesNotExist:
@@ -353,8 +343,6 @@ def import_asns(request):
z.run_rpkid_now()
messages.success(request, 'Successfully imported AS delgations from CSV file.')
return redirect(dashboard)
- finally:
- os.unlink(f.name)
else:
form = forms.ImportCSVForm()
return render(request, 'app/import_resource_form.html', {
@@ -381,13 +369,10 @@ def import_prefixes(request):
if request.method == 'POST':
form = forms.ImportCSVForm(request.POST, request.FILES)
if form.is_valid():
- f = NamedTemporaryFile(prefix='prefixes', suffix='.csv', delete=False)
- f.write(request.FILES['csv'].read())
- f.close()
z = Zookeeper(handle=conf.handle, disable_signal_handlers=True)
try:
z.load_prefixes(
- f.name,
+ request.FILES['csv'],
ignore_missing_children=form.cleaned_data['ignore_missing_children']
)
except rpki.irdb.models.Child.DoesNotExist:
@@ -399,8 +384,6 @@ def import_prefixes(request):
z.run_rpkid_now()
messages.success(request, 'Successfully imported AS delgations from CSV file.')
return redirect(dashboard)
- finally:
- os.unlink(f.name)
else:
form = forms.ImportCSVForm()
return render(request, 'app/import_resource_form.html', {
@@ -474,10 +457,10 @@ def child_add_prefix(request, pk):
child.address_ranges.create(start_ip=str(r.min), end_ip=str(r.max),
version=version)
Zookeeper(
- handle=conf.handle,
- logstream=logstream,
- disable_signal_handlers=True
- ).run_rpkid_now()
+ handle=conf.handle,
+ logstream=logstream,
+ disable_signal_handlers=True
+ ).run_rpkid_now()
return http.HttpResponseRedirect(child.get_absolute_url())
else:
form = forms.AddNetForm(child=child)
@@ -497,10 +480,10 @@ def child_add_asn(request, pk):
r = resource_range_as.parse_str(asns)
child.asns.create(start_as=r.min, end_as=r.max)
Zookeeper(
- handle=conf.handle,
- logstream=logstream,
- disable_signal_handlers=True
- ).run_rpkid_now()
+ handle=conf.handle,
+ logstream=logstream,
+ disable_signal_handlers=True
+ ).run_rpkid_now()
return http.HttpResponseRedirect(child.get_absolute_url())
else:
form = forms.AddASNForm(child=child)
@@ -531,10 +514,10 @@ def child_edit(request, pk):
models.ChildASN.objects.filter(child=child).exclude(pk__in=form.cleaned_data.get('as_ranges')).delete()
models.ChildNet.objects.filter(child=child).exclude(pk__in=form.cleaned_data.get('address_ranges')).delete()
Zookeeper(
- handle=conf.handle,
- logstream=logstream,
- disable_signal_handlers=True
- ).run_rpkid_now()
+ handle=conf.handle,
+ logstream=log,
+ disable_signal_handlers=True
+ ).run_rpkid_now()
return http.HttpResponseRedirect(child.get_absolute_url())
else:
form = form_class(initial={
@@ -713,27 +696,27 @@ def roa_create_multi(request):
v = []
rng.chop_into_prefixes(v)
init.extend([{'asn': asn, 'prefix': str(p)} for p in v])
- extra = 0 if init else 1
+ extra = 0 if init else 1
formset = formset_factory(forms.ROARequestFormFactory(conf), extra=extra)(initial=init)
elif request.method == 'POST':
formset = formset_factory(forms.ROARequestFormFactory(conf), extra=0)(request.POST, request.FILES)
- # We need to check .has_changed() because .is_valid() will return true
- # if the user clicks the Preview button without filling in the blanks
- # in the ROA form, leaving the form invalid from this view's POV.
+ # We need to check .has_changed() because .is_valid() will return true
+ # if the user clicks the Preview button without filling in the blanks
+ # in the ROA form, leaving the form invalid from this view's POV.
if formset.has_changed() and formset.is_valid():
routes = []
v = []
query = Q() # for matching routes
roas = []
for form in formset:
- asn = form.cleaned_data['asn']
- rng = resource_range_ip.parse_str(form.cleaned_data['prefix'])
- max_prefixlen = int(form.cleaned_data['max_prefixlen'])
+ asn = form.cleaned_data['asn']
+ rng = resource_range_ip.parse_str(form.cleaned_data['prefix'])
+ max_prefixlen = int(form.cleaned_data['max_prefixlen'])
protect_children = form.cleaned_data['protect_children']
roas.append((rng, max_prefixlen, asn, protect_children))
- v.append({'prefix': str(rng), 'max_prefixlen': max_prefixlen,
- 'asn': asn})
+ v.append({'prefix': str(rng), 'max_prefixlen': max_prefixlen,
+ 'asn': asn})
query |= Q(prefix_min__gte=rng.min, prefix_max__lte=rng.max)
@@ -903,14 +886,10 @@ def roa_import(request):
if request.method == 'POST':
form = forms.ImportCSVForm(request.POST, request.FILES)
if form.is_valid():
- import tempfile
- tmp = tempfile.NamedTemporaryFile(suffix='.csv', prefix='roas', delete=False)
- tmp.write(request.FILES['csv'].read())
- tmp.close()
z = Zookeeper(handle=request.session['handle'],
disable_signal_handlers=True)
try:
- z.load_roa_requests(tmp.name)
+ z.load_roa_requests(request.FILES['csv'])
except rpki.csv_utils.BadCSVSyntax as e:
messages.error(request,
'CSV has bad syntax: %s' % (e,))
@@ -918,8 +897,6 @@ def roa_import(request):
z.run_rpkid_now()
messages.success(request, 'Successfully imported ROAs.')
return redirect(dashboard)
- finally:
- os.unlink(tmp.name)
else:
form = forms.ImportCSVForm()
return render(request, 'app/import_resource_form.html', {
@@ -939,7 +916,7 @@ def roa_export(request):
# each roa prefix gets a unique group so rpkid will issue separate roas
for group, roapfx in enumerate(ROARequestPrefix.objects.filter(roa_request__issuer=conf)):
csv_writer.writerow([str(roapfx.as_roa_prefix()), roapfx.roa_request.asn, '%s-%d' % (conf.handle, group)])
- resp = http.HttpResponse(f.getvalue(), mimetype='application/csv')
+ resp = http.HttpResponse(f.getvalue(), content_type='application/csv')
resp['Content-Disposition'] = 'attachment; filename=roas.csv'
return resp
@@ -1215,7 +1192,7 @@ def resource_holder_delete(request, pk):
form = forms.Empty(request.POST)
if form.is_valid():
z = Zookeeper(handle=conf.handle, logstream=log)
- z.delete_self()
+ z.delete_tenant()
z.synchronize_deleted_ca()
return redirect(resource_holder_list)
else:
@@ -1239,22 +1216,13 @@ def resource_holder_create(request):
zk_child = Zookeeper(handle=handle, logstream=log)
identity_xml = zk_child.initialize_resource_bpki()
if parent:
- # FIXME etree_wrapper should allow us to deal with file objects
- t = NamedTemporaryFile(delete=False)
- t.close()
-
- identity_xml.save(t.name)
zk_parent = Zookeeper(handle=parent.handle, logstream=log)
- parent_response, _ = zk_parent.configure_child(t.name)
- parent_response.save(t.name)
+ parent_response, _ = zk_parent.configure_child(identity_xml)
zk_parent.synchronize_ca()
- repo_req, _ = zk_child.configure_parent(t.name)
- repo_req.save(t.name)
- repo_resp, _ = zk_parent.configure_publication_client(t.name)
- repo_resp.save(t.name)
+ repo_req, _ = zk_child.configure_parent(parent_response)
+ repo_resp, _ = zk_parent.configure_publication_client(repo_req)
zk_parent.synchronize_pubd()
- zk_child.configure_repository(t.name)
- os.remove(t.name)
+ zk_child.configure_repository(repo_resp)
zk_child.synchronize_ca()
return redirect(resource_holder_list)
else:
@@ -1460,14 +1428,9 @@ class RouterImportView(FormView):
def form_valid(self, form):
conf = get_conf(self.request.user, self.request.session['handle'])
- tmpf = NamedTemporaryFile(prefix='import', suffix='.xml',
- delete=False)
- tmpf.write(form.cleaned_data['xml'].read())
- tmpf.close()
z = Zookeeper(handle=conf.handle, disable_signal_handlers=True)
- z.add_router_certificate_request(tmpf.name)
+ z.add_router_certificate_request(form.cleaned_data['xml'])
z.run_rpkid_now()
- os.remove(tmpf.name)
return super(RouterImportView, self).form_valid(form)
def get_context_data(self, **kwargs):
diff --git a/rpki/gui/cacheview/forms.py b/rpki/gui/cacheview/forms.py
deleted file mode 100644
index 7ae3601f..00000000
--- a/rpki/gui/cacheview/forms.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright (C) 2011 SPARTA, Inc. dba Cobham Analytic Solutions
-# Copyright (C) 2013 SPARTA, Inc. a Parsons Company
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND SPARTA DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL SPARTA BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-__version__ = '$Id$'
-
-from django import forms
-
-from rpki.gui.cacheview.misc import parse_ipaddr
-from rpki.exceptions import BadIPResource
-from rpki.resource_set import resource_range_as
-
-
-class SearchForm(forms.Form):
- asn = forms.CharField(required=False, help_text='AS or range', label='AS')
- addr = forms.CharField(required=False, max_length=40, help_text='range/CIDR', label='IP Address')
-
- def clean(self):
- asn = self.cleaned_data.get('asn')
- addr = self.cleaned_data.get('addr')
- if (asn and addr) or ((not asn) and (not addr)):
- raise forms.ValidationError('Please specify either an AS or IP range, not both')
-
- if asn:
- try:
- resource_range_as.parse_str(asn)
- except ValueError:
- raise forms.ValidationError('invalid AS range')
-
- if addr:
- #try:
- parse_ipaddr(addr)
- #except BadIPResource:
- # raise forms.ValidationError('invalid IP address range/prefix')
-
- return self.cleaned_data
-
-
-class SearchForm2(forms.Form):
- resource = forms.CharField(required=True)
diff --git a/rpki/gui/cacheview/misc.py b/rpki/gui/cacheview/misc.py
deleted file mode 100644
index 54431224..00000000
--- a/rpki/gui/cacheview/misc.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright (C) 2011 SPARTA, Inc. dba Cobham Analytic Solutions
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND SPARTA DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL SPARTA BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-from rpki.resource_set import resource_range_ipv4, resource_range_ipv6
-from rpki.exceptions import BadIPResource
-
-def parse_ipaddr(s):
- # resource_set functions only accept str
- if isinstance(s, unicode):
- s = s.encode()
- s = s.strip()
- r = resource_range_ipv4.parse_str(s)
- try:
- r = resource_range_ipv4.parse_str(s)
- return 4, r
- except BadIPResource:
- r = resource_range_ipv6.parse_str(s)
- return 6, r
-
-# vim:sw=4 ts=8 expandtab
diff --git a/rpki/gui/cacheview/templates/cacheview/addressrange_detail.html b/rpki/gui/cacheview/templates/cacheview/addressrange_detail.html
deleted file mode 100644
index 76edc1ba..00000000
--- a/rpki/gui/cacheview/templates/cacheview/addressrange_detail.html
+++ /dev/null
@@ -1,18 +0,0 @@
-{% extends "cacheview/cacheview_base.html" %}
-
-{% block content %}
-<h1>{% block title %}IP Range Detail{% endblock %}</h1>
-
-<p>
-IP Range: {{ object }}
-</p>
-
-<p>Covered by the following resource certs:</p>
-
-<ul>
-{% for cert in object.certs.all %}
-<li><a href="{{ cert.get_absolute_url }}">{{ cert }}</a></li>
-{% endfor %}
-</ul>
-
-{% endblock %}
diff --git a/rpki/gui/cacheview/templates/cacheview/cacheview_base.html b/rpki/gui/cacheview/templates/cacheview/cacheview_base.html
deleted file mode 100644
index ec71d740..00000000
--- a/rpki/gui/cacheview/templates/cacheview/cacheview_base.html
+++ /dev/null
@@ -1,10 +0,0 @@
-{% extends "base.html" %}
-{% load url from future %}
-
-{% block sidebar %}
-<form method='post' action='{% url 'res-search' %}'>
- {% csrf_token %}
- <input type='text' id='id_resource' name='resource' placeholder='prefix or AS'>
- <button type='submit'>Search</button>
-</form>
-{% endblock %}
diff --git a/rpki/gui/cacheview/templates/cacheview/cert_detail.html b/rpki/gui/cacheview/templates/cacheview/cert_detail.html
deleted file mode 100644
index 256e7780..00000000
--- a/rpki/gui/cacheview/templates/cacheview/cert_detail.html
+++ /dev/null
@@ -1,105 +0,0 @@
-{% extends "cacheview/signedobject_detail.html" %}
-
-{% block title %}
-Resource Certificate Detail
-{% endblock %}
-
-{% block detail %}
-
-<h2>RFC3779 Resources</h2>
-
-<table class='table table-striped'>
- <thead>
- <tr><th>AS Ranges</th><th>IP Ranges</th></tr>
- </thead>
- <tbody>
- <tr>
- <td style='text-align:left;vertical-align:top'>
- <ul class='compact'>
- {% for asn in object.asns.all %}
- <li><a href="{{ asn.get_absolute_url }}">{{ asn }}</a></li>
- {% endfor %}
- </ul>
- </td>
- <td style='text-align:left;vertical-align:top'>
- <ul class='compact'>
- {% for rng in object.addresses.all %}
- <li><a href="{{ rng.get_absolute_url }}">{{ rng }}</a></li>
- {% endfor %}
- </ul>
- </td>
- </tr>
- </tbody>
-</table>
-
-<div class='section'>
-<h2>Issued Objects</h2>
-<ul>
-
-{% if object.ghostbusters.all %}
- <li>
-<h3>Ghostbusters</h3>
-
-<table class='table table-striped'>
- <thead>
- <tr><th>Name</th><th>Expires</th></tr>
- </thead>
- <tbody>
-
-{% for g in object.ghostbusters.all %}
- <tr class='{{ g.status_id }}'>
- <td><a href="{{ g.get_absolute_url }}">{{ g }}</a></td>
- <td>{{ g.not_after }}</td>
- </tr>
- </tbody>
-{% endfor %}
-
-</table>
-{% endif %}
-
-{% if object.roas.all %}
- <li>
-<h3>ROAs</h3>
-<table class='table table-striped'>
- <thead>
- <tr><th>#</th><th>Prefix</th><th>AS</th><th>Expires</th></tr>
- </thead>
- <tbody>
- {% for roa in object.roas.all %}
- {% for pfx in roa.prefixes.all %}
- <tr class='{{ roa.status_id }}'>
- <td><a href="{{ roa.get_absolute_url }}">#</a></td>
- <td>{{ pfx }}</td>
- <td>{{ roa.asid }}</td>
- <td>{{ roa.not_after }}</td>
- </tr>
- {% endfor %}
- {% endfor %}
- </tbody>
-</table>
-{% endif %}
-
-{% if object.children.all %}
-<li>
-<h3>Children</h3>
-<table class='table table-striped'>
- <thead>
- <tr><th>Name</th><th>Expires</th></tr>
- </thead>
- <tbody>
-
- {% for child in object.children.all %}
- <tr class='{{ child.status_id }}'>
- <td><a href="{{ child.get_absolute_url }}">{{ child.name }}</a></td>
- <td>{{ child.not_after }}</td>
- </tr>
- {% endfor %}
- </tbody>
-</table>
-{% endif %}
-
-</ul>
-
-</div><!--issued objects-->
-
-{% endblock %}
diff --git a/rpki/gui/cacheview/templates/cacheview/ghostbuster_detail.html b/rpki/gui/cacheview/templates/cacheview/ghostbuster_detail.html
deleted file mode 100644
index 4215f757..00000000
--- a/rpki/gui/cacheview/templates/cacheview/ghostbuster_detail.html
+++ /dev/null
@@ -1,13 +0,0 @@
-{% extends "cacheview/signedobject_detail.html" %}
-
-{% block title %}Ghostbuster Detail{% endblock %}
-
-{% block detail %}
-<p>
-<table class='table'>
- <tr><td>Full Name</td><td>{{ object.full_name }}</td></tr>
- <tr><td>Organization</td><td>{{ object.organization }}</td></tr>
- <tr><td>Email</td><td>{{ object.email_address }}</td></tr>
- <tr><td>Telephone</td><td>{{ object.telephone }}</td></tr>
-</table>
-{% endblock %}
diff --git a/rpki/gui/cacheview/templates/cacheview/global_summary.html b/rpki/gui/cacheview/templates/cacheview/global_summary.html
deleted file mode 100644
index 0dbd0ffc..00000000
--- a/rpki/gui/cacheview/templates/cacheview/global_summary.html
+++ /dev/null
@@ -1,26 +0,0 @@
-{% extends "cacheview/cacheview_base.html" %}
-
-{% block content %}
-<div class='page-header'>
- <h1>Browse Global RPKI</h1>
-</div>
-
-<table class="table table-striped">
- <thead>
- <tr>
- <th>Name</th>
- <th>Expires</th>
- <th>URI</th>
- </tr>
- </thead>
- <tbody>
- {% for r in roots %}
- <tr>
- <td><a href="{{ r.get_absolute_url }}">{{ r.name }}</a></td>
- <td>{{ r.not_after }}</td>
- <td>{{ r.repo.uri }}</td>
- </tr>
- {% endfor %}
- </tbody>
-</table>
-{% endblock content %}
diff --git a/rpki/gui/cacheview/templates/cacheview/query_result.html b/rpki/gui/cacheview/templates/cacheview/query_result.html
deleted file mode 100644
index 0694c531..00000000
--- a/rpki/gui/cacheview/templates/cacheview/query_result.html
+++ /dev/null
@@ -1,21 +0,0 @@
-{% extends "cacheview/cacheview_base.html" %}
-
-{% block content %}
-
-<h1>{% block title %}Query Results{% endblock %}</h1>
-
-<table>
- <tr><th>Prefix</th><th>AS</th><th>Valid</th><th>Until</th></tr>
- {% for object in object_list %}
- <tr class='{{ object.1.status.kind_as_str }}'>
- <td>{{ object.0 }}</td>
- <td>{{ object.1.asid }}</td>
- <td><a href="{{ object.1.get_absolute_url }}">{{ object.1.ok }}</a></td>
- <td>{{ object.1.not_after }}</td>
- </tr>
- {% endfor %}
-</table>
-
-<p><a href="{% url rpki.gui.cacheview.views.query_view %}">new query</a></p>
-
-{% endblock %}
diff --git a/rpki/gui/cacheview/templates/cacheview/roa_detail.html b/rpki/gui/cacheview/templates/cacheview/roa_detail.html
deleted file mode 100644
index 39cc547b..00000000
--- a/rpki/gui/cacheview/templates/cacheview/roa_detail.html
+++ /dev/null
@@ -1,18 +0,0 @@
-{% extends "cacheview/signedobject_detail.html" %}
-
-{% block title %}ROA Detail{% endblock %}
-
-{% block detail %}
-<p>
-<table>
- <tr><td>AS</td><td>{{ object.asid }}</td></tr>
-</table>
-
-<h2>Prefixes</h2>
-
-<ul>
-{% for pfx in object.prefixes.all %}
-<li>{{ pfx }}
-{% endfor %}
-</ul>
-{% endblock %}
diff --git a/rpki/gui/cacheview/templates/cacheview/search_form.html b/rpki/gui/cacheview/templates/cacheview/search_form.html
deleted file mode 100644
index 1141615d..00000000
--- a/rpki/gui/cacheview/templates/cacheview/search_form.html
+++ /dev/null
@@ -1,17 +0,0 @@
-{% extends "cacheview/cacheview_base.html" %}
-
-{% block title %}
-{{ search_type }} Search
-{% endblock %}
-
-{% block content %}
-
-<h1>{{search_type}} Search</h1>
-
-<form method='post' action='{{ request.url }}'>
- {% csrf_token %}
- {{ form.as_p }}
- <input type='submit' name='Search'>
-</form>
-
-{% endblock %}
diff --git a/rpki/gui/cacheview/templates/cacheview/search_result.html b/rpki/gui/cacheview/templates/cacheview/search_result.html
deleted file mode 100644
index 7cbf852e..00000000
--- a/rpki/gui/cacheview/templates/cacheview/search_result.html
+++ /dev/null
@@ -1,42 +0,0 @@
-{% extends "cacheview/cacheview_base.html" %}
-
-{% block content %}
-
-<div class='page-header'>
- <h1>Search Results <small>{{ resource }}</small></h1>
-</div>
-
-<h2>Matching Resource Certificates</h2>
-{% if certs %}
-<ul>
-{% for cert in certs %}
-<li><a href="{{ cert.get_absolute_url }}">{{ cert }}</a>
-{% endfor %}
-</ul>
-{% else %}
-<p>none</p>
-{% endif %}
-
-<h2>Matching ROAs</h2>
-{% if roas %}
-<table class='table table-striped'>
- <thead>
- <tr>
- <th>#</th><th>Prefix</th><th>AS</th>
- </tr>
- </thead>
- <tbody>
-{% for roa in roas %}
-<tr>
- <td><a href="{{ roa.get_absolute_url }}">#</a></td>
- <td>{{ roa.prefixes.all.0 }}</td>
- <td>{{ roa.asid }}</td>
-</tr>
-{% endfor %}
-</tbody>
-</table>
-{% else %}
-<p>none</p>
-{% endif %}
-
-{% endblock %}
diff --git a/rpki/gui/cacheview/templates/cacheview/signedobject_detail.html b/rpki/gui/cacheview/templates/cacheview/signedobject_detail.html
deleted file mode 100644
index 22ae3d27..00000000
--- a/rpki/gui/cacheview/templates/cacheview/signedobject_detail.html
+++ /dev/null
@@ -1,58 +0,0 @@
-{% extends "cacheview/cacheview_base.html" %}
-
-{% block content %}
-<div class='page-header'>
-<h1>{% block title %}Signed Object Detail{% endblock %}</h1>
-</div>
-
-<h2>Cert Info</h2>
-<table class='table table-striped'>
- <tr><td>Subject Name</td><td>{{ object.name }}</td></tr>
- <tr><td>SKI</td><td>{{ object.keyid }}</td></tr>
- {% if object.sia %}
- <tr><td>SIA</td><td>{{ object.sia }}</td></tr>
- {% endif %}
- <tr><td>Not Before</td><td>{{ object.not_before }}</td></tr>
- <tr><td>Not After</td><td>{{ object.not_after }}</td></tr>
-</table>
-
-<h2>Metadata</h2>
-
-<table class='table table-striped'>
- <tr><td>URI</td><td>{{ object.repo.uri }}</td></tr>
- <tr><td>Last Modified</td><td>{{ object.mtime_as_datetime|date:"DATETIME_FORMAT" }}</td></tr>
-</table>
-
-<h2>Validation Status</h2>
-<table class='table table-striped'>
- <thead>
- <tr><th>Timestamp</th><th>Generation</th><th>Status</th></tr>
- </thead>
- <tbody>
- {% for status in object.repo.statuses.all %}
- <tr class="{{ status.status.get_kind_display }}"><td>{{ status.timestamp }}</td><td>{{ status.get_generation_display }}</td><td>{{ status.status.status }}</td></tr>
- {% endfor %}
- </tbody>
-</table>
-
-<h2>X.509 Certificate Chain</h2>
-
-<table class='table table-striped'>
- <thead>
- <tr><th>Depth</th><th>Name</th></tr>
- </thead>
- <tbody>
-
-{% for cert in chain %}
-<tr class='{{ cert.1.status_id }}'>
- <td>{{ cert.0 }}</td>
- <td><a href="{{ cert.1.get_absolute_url }}">{{ cert.1.name }}</a></td>
-</tr>
-{% endfor %}
-</tbody>
-
-</table>
-
-{% block detail %}{% endblock %}
-
-{% endblock %}
diff --git a/rpki/gui/cacheview/tests.py b/rpki/gui/cacheview/tests.py
deleted file mode 100644
index 2247054b..00000000
--- a/rpki/gui/cacheview/tests.py
+++ /dev/null
@@ -1,23 +0,0 @@
-"""
-This file demonstrates two different styles of tests (one doctest and one
-unittest). These will both pass when you run "manage.py test".
-
-Replace these with more appropriate tests for your application.
-"""
-
-from django.test import TestCase
-
-class SimpleTest(TestCase):
- def test_basic_addition(self):
- """
- Tests that 1 + 1 always equals 2.
- """
- self.failUnlessEqual(1 + 1, 2)
-
-__test__ = {"doctest": """
-Another way to test that 1 + 1 is equal to 2.
-
->>> 1 + 1 == 2
-True
-"""}
-
diff --git a/rpki/gui/cacheview/urls.py b/rpki/gui/cacheview/urls.py
deleted file mode 100644
index cc03a587..00000000
--- a/rpki/gui/cacheview/urls.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright (C) 2011 SPARTA, Inc. dba Cobham Analytic Solutions
-# Copyright (C) 2013 SPARTA, Inc. a Parsons Company
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND SPARTA DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL SPARTA BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-__version__ = '$Id$'
-
-from django.conf.urls import patterns, url
-from rpki.gui.cacheview.views import (CertDetailView, RoaDetailView,
- GhostbusterDetailView)
-
-urlpatterns = patterns('',
- url(r'^search$', 'rpki.gui.cacheview.views.search_view',
- name='res-search'),
- url(r'^cert/(?P<pk>[^/]+)$', CertDetailView.as_view(), name='cert-detail'),
- url(r'^gbr/(?P<pk>[^/]+)$', GhostbusterDetailView.as_view(),
- name='ghostbuster-detail'),
- url(r'^roa/(?P<pk>[^/]+)$', RoaDetailView.as_view(), name='roa-detail'),
- (r'^$', 'rpki.gui.cacheview.views.global_summary'),
-)
-
-# vim:sw=4 ts=8 expandtab
diff --git a/rpki/gui/cacheview/util.py b/rpki/gui/cacheview/util.py
deleted file mode 100644
index 47425c8c..00000000
--- a/rpki/gui/cacheview/util.py
+++ /dev/null
@@ -1,441 +0,0 @@
-# Copyright (C) 2011 SPARTA, Inc. dba Cobham
-# Copyright (C) 2012, 2013 SPARTA, Inc. a Parsons Company
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND SPARTA DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL SPARTA BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-__version__ = '$Id$'
-__all__ = ('import_rcynic_xml')
-
-default_logfile = '/var/rcynic/data/rcynic.xml'
-default_root = '/var/rcynic/data'
-object_accepted = None # set by import_rcynic_xml()
-
-import time
-import vobject
-import logging
-import os
-import stat
-from socket import getfqdn
-from cStringIO import StringIO
-
-from django.db import transaction
-import django.db.models
-
-import rpki
-import rpki.gui.app.timestamp
-from rpki.gui.app.models import Conf, Alert
-from rpki.gui.cacheview import models
-from rpki.rcynic import rcynic_xml_iterator, label_iterator
-from rpki.sundial import datetime
-from rpki.irdb.zookeeper import Zookeeper
-
-logger = logging.getLogger(__name__)
-
-
-class SomeoneShowMeAWayToGetOuttaHere(Exception):
- "'Cause I constantly pray I'll get outta here."
-
-
-def rcynic_cert(cert, obj):
- if not cert.sia_directory_uri:
- raise SomeoneShowMeAWayToGetOuttaHere
-
- obj.sia = cert.sia_directory_uri
-
- # object must be saved for the related manager methods below to work
- obj.save()
-
- # for the root cert, we can't set inst.issuer = inst until
- # after inst.save() has been called.
- if obj.issuer is None:
- obj.issuer = obj
- obj.save()
-
- # resources can change when a cert is updated
- obj.asns.clear()
- obj.addresses.clear()
-
- if cert.resources.asn.inherit:
- # FIXME: what happens when the parent's resources change and the child
- # cert is not reissued?
- obj.asns.add(*obj.issuer.asns.all())
- else:
- for asr in cert.resources.asn:
- logger.debug('processing %s', asr)
-
- attrs = {'min': asr.min, 'max': asr.max}
- q = models.ASRange.objects.filter(**attrs)
- if not q:
- obj.asns.create(**attrs)
- else:
- obj.asns.add(q[0])
-
- # obj.issuer is None the first time we process the root cert in the
- # hierarchy, so we need to guard against dereference
- for cls, addr_obj, addrset, parentset in (
- models.AddressRange, obj.addresses, cert.resources.v4,
- obj.issuer.addresses.all() if obj.issuer else []
- ), (
- models.AddressRangeV6, obj.addresses_v6, cert.resources.v6,
- obj.issuer.addresses_v6.all() if obj.issuer else []
- ):
- if addrset.inherit:
- addr_obj.add(*parentset)
- else:
- for rng in addrset:
- logger.debug('processing %s', rng)
-
- attrs = {'prefix_min': rng.min, 'prefix_max': rng.max}
- q = cls.objects.filter(**attrs)
- if not q:
- addr_obj.create(**attrs)
- else:
- addr_obj.add(q[0])
-
-
-def rcynic_roa(roa, obj):
- obj.asid = roa.asID
- # object must be saved for the related manager methods below to work
- obj.save()
- obj.prefixes.clear()
- obj.prefixes_v6.clear()
- for pfxset in roa.prefix_sets:
- if pfxset.__class__.__name__ == 'roa_prefix_set_ipv6':
- roa_cls = models.ROAPrefixV6
- prefix_obj = obj.prefixes_v6
- else:
- roa_cls = models.ROAPrefixV4
- prefix_obj = obj.prefixes
-
- for pfx in pfxset:
- attrs = {'prefix_min': pfx.min(),
- 'prefix_max': pfx.max(),
- 'max_length': pfx.max_prefixlen}
- q = roa_cls.objects.filter(**attrs)
- if not q:
- prefix_obj.create(**attrs)
- else:
- prefix_obj.add(q[0])
-
-
-def rcynic_gbr(gbr, obj):
- vcard = vobject.readOne(gbr.vcard)
- obj.full_name = vcard.fn.value if hasattr(vcard, 'fn') else None
- obj.email_address = vcard.email.value if hasattr(vcard, 'email') else None
- obj.telephone = vcard.tel.value if hasattr(vcard, 'tel') else None
- obj.organization = vcard.org.value[0] if hasattr(vcard, 'org') else None
- obj.save()
-
-LABEL_CACHE = {}
-
-# dict keeping mapping of uri to (handle, old status, new status) for objects
-# published by the local rpkid
-uris = {}
-
-dispatch = {
- 'rcynic_certificate': rcynic_cert,
- 'rcynic_roa': rcynic_roa,
- 'rcynic_ghostbuster': rcynic_gbr
-}
-
-model_class = {
- 'rcynic_certificate': models.Cert,
- 'rcynic_roa': models.ROA,
- 'rcynic_ghostbuster': models.Ghostbuster
-}
-
-
-def save_status(repo, vs):
- timestamp = datetime.fromXMLtime(vs.timestamp).to_sql()
- status = LABEL_CACHE[vs.status]
- g = models.generations_dict[vs.generation] if vs.generation else None
- repo.statuses.create(generation=g, timestamp=timestamp, status=status)
-
- # if this object is in our interest set, update with the current validation
- # status
- if repo.uri in uris:
- x, y, z, q = uris[repo.uri]
- valid = z or (status is object_accepted) # don't clobber previous True value
- uris[repo.uri] = x, y, valid, repo
-
- if status is not object_accepted:
- return
-
- cls = model_class[vs.file_class.__name__]
- # find the instance of the signedobject subclass that is associated with
- # this repo instance (may be empty when not accepted)
- inst_qs = cls.objects.filter(repo=repo)
-
- logger.debug('processing %s', vs.filename)
-
- if not inst_qs:
- inst = cls(repo=repo)
- logger.debug('object not found in db, creating new object cls=%s id=%s',
- cls, id(inst))
- else:
- inst = inst_qs[0]
-
- try:
- # determine if the object is changed/new
- mtime = os.stat(vs.filename)[stat.ST_MTIME]
- except OSError as e:
- logger.error('unable to stat %s: %s %s',
- vs.filename, type(e), e)
- # treat as if missing from rcynic.xml
- # use inst_qs rather than deleting inst so that we don't raise an
- # exception for newly created objects (inst_qs will be empty)
- inst_qs.delete()
- return
-
- if mtime != inst.mtime:
- inst.mtime = mtime
- try:
- obj = vs.obj # causes object to be lazily loaded
- except Exception, e:
- logger.warning('Caught %s while processing %s: %s',
- type(e), vs.filename, e)
- return
-
- inst.not_before = obj.notBefore.to_sql()
- inst.not_after = obj.notAfter.to_sql()
- inst.name = obj.subject
- inst.keyid = obj.ski
-
- # look up signing cert
- if obj.issuer == obj.subject:
- # self-signed cert (TA)
- assert isinstance(inst, models.Cert)
- inst.issuer = None
- else:
- # if an object has moved in the repository, the entry for
- # the old location will still be in the database, but
- # without any object_accepted in its validtion status
- qs = models.Cert.objects.filter(
- keyid=obj.aki,
- name=obj.issuer,
- repo__statuses__status=object_accepted
- )
- ncerts = len(qs)
- if ncerts == 0:
- logger.warning('unable to find signing cert with ski=%s (%s)', obj.aki, obj.issuer)
- return
- else:
- if ncerts > 1:
- # multiple matching certs, all of which are valid
- logger.warning('Found multiple certs matching ski=%s sn=%s', obj.aki, obj.issuer)
- for c in qs:
- logger.warning(c.repo.uri)
- # just use the first match
- inst.issuer = qs[0]
-
- try:
- # do object-specific tasks
- dispatch[vs.file_class.__name__](obj, inst)
- except SomeoneShowMeAWayToGetOuttaHere:
- logger.error("something wrong with %s, skipping", vs.filename)
- inst_qs.delete()
- return
- except:
- logger.error('caught exception while processing rcynic_object:\n'
- 'vs=' + repr(vs) + '\nobj=' + repr(obj))
- # .show() writes to stdout
- obj.show()
- raise
-
- logger.debug('object saved id=%s', id(inst))
- else:
- logger.debug('object is unchanged')
-
-
-@transaction.commit_on_success
-def process_cache(root, xml_file):
-
- last_uri = None
- repo = None
-
- logger.info('clearing validation statuses')
- models.ValidationStatus.objects.all().delete()
-
- logger.info('updating validation status')
- for vs in rcynic_xml_iterator(root, xml_file):
- if vs.uri != last_uri:
- repo, created = models.RepositoryObject.objects.get_or_create(uri=vs.uri)
- last_uri = vs.uri
- save_status(repo, vs)
-
- # garbage collection
- # remove all objects which have no ValidationStatus references, which
- # means they did not appear in the last XML output
- logger.info('performing garbage collection')
-
- # Delete all objects that have zero validation status elements.
- models.RepositoryObject.objects.annotate(num_statuses=django.db.models.Count('statuses')).filter(num_statuses=0).delete()
-
- # Delete all SignedObject instances that were not accepted. There may
- # exist rows for objects that were previously accepted.
- # See https://trac.rpki.net/ticket/588#comment:30
- #
- # We have to do this here rather than in save_status() because the
- # <validation_status/> elements are not guaranteed to be consecutive for a
- # given URI. see https://trac.rpki.net/ticket/625#comment:5
- models.SignedObject.objects.exclude(repo__statuses__status=object_accepted).delete()
-
- # ROAPrefixV* objects are M2M so they are not automatically deleted when
- # their ROA object disappears
- models.ROAPrefixV4.objects.annotate(num_roas=django.db.models.Count('roas')).filter(num_roas=0).delete()
- models.ROAPrefixV6.objects.annotate(num_roas=django.db.models.Count('roas')).filter(num_roas=0).delete()
- logger.info('done with garbage collection')
-
-
-@transaction.commit_on_success
-def process_labels(xml_file):
- logger.info('updating labels...')
-
- for label, kind, desc in label_iterator(xml_file):
- logger.debug('label=%s kind=%s desc=%s', label, kind, desc)
- if kind:
- q = models.ValidationLabel.objects.filter(label=label)
- if not q:
- obj = models.ValidationLabel(label=label)
- else:
- obj = q[0]
-
- obj.kind = models.kinds_dict[kind]
- obj.status = desc
- obj.save()
-
- LABEL_CACHE[label] = obj
-
-
-def fetch_published_objects():
- """Query rpkid for all objects published by local users, and look up the
- current validation status of each object. The validation status is used
- later to send alerts for objects which have transitioned to invalid.
-
- """
- logger.info('querying for published objects')
-
- handles = [conf.handle for conf in Conf.objects.all()]
- req = [rpki.left_right.list_published_objects_elt.make_pdu(action='list', self_handle=h, tag=h) for h in handles]
- z = Zookeeper()
- pdus = z.call_rpkid(*req)
- for pdu in pdus:
- if isinstance(pdu, rpki.left_right.list_published_objects_elt):
- # Look up the object in the rcynic cache
- qs = models.RepositoryObject.objects.filter(uri=pdu.uri)
- if qs:
- # get the current validity state
- valid = qs[0].statuses.filter(status=object_accepted).exists()
- uris[pdu.uri] = (pdu.self_handle, valid, False, None)
- logger.debug('adding ' + pdu.uri)
- else:
- # this object is not in the cache. it was either published
- # recently, or disappared previously. if it disappeared
- # previously, it has already been alerted. in either case, we
- # omit the uri from the list since we are interested only in
- # objects which were valid and are no longer valid
- pass
- elif isinstance(pdu, rpki.left_right.report_error_elt):
- logging.error('rpkid reported an error: %s', pdu.error_code)
-
-
-class Handle(object):
- def __init__(self):
- self.invalid = []
- self.missing = []
-
- def add_invalid(self, v):
- self.invalid.append(v)
-
- def add_missing(self, v):
- self.missing.append(v)
-
-
-def notify_invalid():
- """Send email alerts to the addresses registered in ghostbuster records for
- any invalid objects that were published by users of this system.
-
- """
-
- logger.info('sending notifications for invalid objects')
-
- # group invalid objects by user
- notify = {}
- for uri, v in uris.iteritems():
- handle, old_status, new_status, obj = v
-
- if obj is None:
- # object went missing
- n = notify.get(handle, Handle())
- n.add_missing(uri)
- # only select valid->invalid
- elif old_status and not new_status:
- n = notify.get(handle, Handle())
- n.add_invalid(obj)
-
- for handle, v in notify.iteritems():
- conf = Conf.objects.get(handle)
-
- msg = StringIO()
- msg.write('This is an alert about problems with objects published by '
- 'the resource handle %s.\n\n' % handle)
-
- if v.invalid:
- msg.write('The following objects were previously valid, but are '
- 'now invalid:\n')
-
- for o in v.invalid:
- msg.write('\n')
- msg.write(o.repo.uri)
- msg.write('\n')
- for s in o.statuses.all():
- msg.write('\t')
- msg.write(s.status.label)
- msg.write(': ')
- msg.write(s.status.status)
- msg.write('\n')
-
- if v.missing:
- msg.write('The following objects were previously valid but are no '
- 'longer in the cache:\n')
-
- for o in v.missing:
- msg.write(o)
- msg.write('\n')
-
- msg.write("""--
-You are receiving this email because your address is published in a Ghostbuster
-record, or is the default email address for this resource holder account on
-%s.""" % getfqdn())
-
- from_email = 'root@' + getfqdn()
- subj = 'invalid RPKI object alert for resource handle %s' % conf.handle
- conf.send_alert(subj, msg.getvalue(), from_email, severity=Alert.ERROR)
-
-
-def import_rcynic_xml(root=default_root, logfile=default_logfile):
- """Load the contents of rcynic.xml into the rpki.gui.cacheview database."""
-
- global object_accepted
-
- start = time.time()
- process_labels(logfile)
- object_accepted = LABEL_CACHE['object_accepted']
- fetch_published_objects()
- process_cache(root, logfile)
- notify_invalid()
-
- rpki.gui.app.timestamp.update('rcynic_import')
-
- stop = time.time()
- logger.info('elapsed time %d seconds.', (stop - start))
diff --git a/rpki/gui/cacheview/views.py b/rpki/gui/cacheview/views.py
deleted file mode 100644
index 94870eb2..00000000
--- a/rpki/gui/cacheview/views.py
+++ /dev/null
@@ -1,172 +0,0 @@
-# Copyright (C) 2011 SPARTA, Inc. dba Cobham Analytic Solutions
-# Copyright (C) 2013 SPARTA, Inc. a Parsons Company
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND SPARTA DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL SPARTA BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-__version__ = '$Id$'
-
-from django.views.generic import DetailView
-from django.shortcuts import render
-from django.db.models import F
-
-from rpki.gui.cacheview import models, forms, misc
-from rpki.resource_set import resource_range_as, resource_range_ip
-from rpki.POW import IPAddress
-from rpki.exceptions import BadIPResource
-
-
-def cert_chain(obj):
- """
- returns an iterator covering all certs from the root cert down to the EE.
- """
- chain = [obj]
- while obj != obj.issuer:
- obj = obj.issuer
- chain.append(obj)
- return zip(range(len(chain)), reversed(chain))
-
-
-class SignedObjectDetailView(DetailView):
- def get_context_data(self, **kwargs):
- context = super(SignedObjectDetailView,
- self).get_context_data(**kwargs)
- context['chain'] = cert_chain(self.object)
- return context
-
-
-class RoaDetailView(SignedObjectDetailView):
- model = models.ROA
-
-
-class CertDetailView(SignedObjectDetailView):
- model = models.Cert
-
-
-class GhostbusterDetailView(SignedObjectDetailView):
- model = models.Ghostbuster
-
-
-def search_view(request):
- certs = None
- roas = None
-
- if request.method == 'POST':
- form = forms.SearchForm2(request.POST, request.FILES)
- if form.is_valid():
- resource = form.cleaned_data.get('resource')
- # try to determine the type of input given
- try:
- r = resource_range_as.parse_str(resource)
- certs = models.Cert.objects.filter(asns__min__gte=r.min,
- asns__max__lte=r.max)
- roas = models.ROA.objects.filter(asid__gte=r.min,
- asid__lte=r.max)
- except:
- try:
- r = resource_range_ip.parse_str(resource)
- if r.version == 4:
- certs = models.Cert.objects.filter(
- addresses__prefix_min__lte=r.min,
- addresses__prefix_max__gte=r.max)
- roas = models.ROA.objects.filter(
- prefixes__prefix_min__lte=r.min,
- prefixes__prefix_max__gte=r.max)
- else:
- certs = models.Cert.objects.filter(
- addresses_v6__prefix_min__lte=r.min,
- addresses_v6__prefix_max__gte=r.max)
- roas = models.ROA.objects.filter(
- prefixes_v6__prefix_min__lte=r.min,
- prefixes_v6__prefix_max__gte=r.max)
- except BadIPResource:
- pass
-
- return render(request, 'cacheview/search_result.html',
- {'resource': resource, 'certs': certs, 'roas': roas})
-
-
-def cmp_prefix(x, y):
- r = cmp(x[0].family, y[0].family)
- if r == 0:
- r = cmp(x[2], y[2]) # integer address
- if r == 0:
- r = cmp(x[0].bits, y[0].bits)
- if r == 0:
- r = cmp(x[0].max_length, y[0].max_length)
- if r == 0:
- r = cmp(x[1].asid, y[1].asid)
- return r
-
-
-#def cmp_prefix(x,y):
-# for attr in ('family', 'prefix', 'bits', 'max_length'):
-# r = cmp(getattr(x[0], attr), getattr(y[0], attr))
-# if r:
-# return r
-# return cmp(x[1].asid, y[1].asid)
-
-
-def query_view(request):
- """
- Allow the user to search for an AS or prefix, and show all published ROA
- information.
- """
-
- if request.method == 'POST':
- form = forms.SearchForm(request.POST, request.FILES)
- if form.is_valid():
- certs = None
- roas = None
-
- addr = form.cleaned_data.get('addr')
- asn = form.cleaned_data.get('asn')
-
- if addr:
- family, r = misc.parse_ipaddr(addr)
- prefixes = models.ROAPrefix.objects.filter(family=family, prefix=str(r.min))
-
- prefix_list = []
- for pfx in prefixes:
- for roa in pfx.roas.all():
- prefix_list.append((pfx, roa))
- elif asn:
- r = resource_range_as.parse_str(asn)
- roas = models.ROA.objects.filter(asid__gte=r.min, asid__lte=r.max)
-
- # display the results sorted by prefix
- prefix_list = []
- for roa in roas:
- for pfx in roa.prefixes.all():
- addr = IPAddress(pfx.prefix.encode())
- prefix_list.append((pfx, roa, addr))
- prefix_list.sort(cmp=cmp_prefix)
-
- return render('cacheview/query_result.html',
- {'object_list': prefix_list}, request)
- else:
- form = forms.SearchForm()
-
- return render('cacheview/search_form.html', {
- 'form': form, 'search_type': 'ROA '}, request)
-
-
-def global_summary(request):
- """Display a table summarizing the state of the global RPKI."""
-
- roots = models.Cert.objects.filter(issuer=F('pk')) # self-signed
-
- return render(request, 'cacheview/global_summary.html', {
- 'roots': roots
- })
-
-# vim:sw=4 ts=8 expandtab
diff --git a/rpki/gui/default_settings.py b/rpki/gui/default_settings.py
deleted file mode 100644
index a30b0362..00000000
--- a/rpki/gui/default_settings.py
+++ /dev/null
@@ -1,188 +0,0 @@
-"""
-This module contains static configuration settings for the web portal.
-"""
-
-__version__ = '$Id$'
-
-import os
-import random
-import string
-import socket
-
-import rpki.config
-import rpki.autoconf
-
-# Where to put static files.
-STATIC_ROOT = rpki.autoconf.datarootdir + '/rpki/media'
-
-# Must end with a slash!
-STATIC_URL = '/media/'
-
-# Where to email server errors.
-ADMINS = (('Administrator', 'root@localhost'),)
-
-LOGGING = {
- 'version': 1,
- 'formatters': {
- 'verbose': {
- # see http://docs.python.org/2.7/library/logging.html#logging.LogRecord
- 'format': '%(levelname)s %(asctime)s %(name)s %(message)s'
- },
- },
- 'handlers': {
- 'stderr': {
- 'class': 'logging.StreamHandler',
- 'level': 'DEBUG',
- 'formatter': 'verbose',
- },
- 'mail_admins': {
- 'level': 'ERROR',
- 'class': 'django.utils.log.AdminEmailHandler',
- },
- },
- 'loggers': {
- 'rpki.async': {
- # enabled for tracking https://trac.rpki.net/ticket/681
- # need to change this to WARNING once ticket is closed
- 'level': 'DEBUG',
- },
- # The Django default LOGGING configuration disables propagate on these
- # two loggers. Re-enable propagate so they will hit our root logger.
- 'django.request': {
- 'propagate': True,
- },
- 'django.security': {
- 'propagate': True,
- },
- },
- 'root': {
- 'level': 'WARNING',
- 'handlers': ['stderr', 'mail_admins'],
- },
-}
-
-# Load the SQL authentication bits from the system rpki.conf.
-rpki_config = rpki.config.parser(section='web_portal')
-
-DATABASES = {
- 'default': {
- 'ENGINE': 'django.db.backends.mysql',
- 'NAME': rpki_config.get('sql-database'),
- 'USER': rpki_config.get('sql-username'),
- 'PASSWORD': rpki_config.get('sql-password'),
-
- # Ensure the default storage engine is InnoDB since we need
- # foreign key support. The Django documentation suggests
- # removing this after the syncdb is performed as an optimization,
- # but there isn't an easy way to do this automatically.
-
- # Setting charset to latin1 is a disgusting kludge, but without
- # this MySQL 5.6 (and, proably, later) gets tetchy about ASN.1
- # DER stored in BLOB columns not being well-formed UTF8 (sic).
- # If you know of a better solution, tell us.
-
- 'OPTIONS': {
- 'init_command': 'SET storage_engine=INNODB',
- 'charset': 'latin1',
- }
- }
-}
-
-
-def select_tz():
- "Find a supported timezone that looks like UTC"
- for tz in ('UTC', 'GMT', 'Etc/UTC', 'Etc/GMT'):
- if os.path.exists('/usr/share/zoneinfo/' + tz):
- return tz
- # Can't determine the proper timezone, fall back to UTC and let Django
- # report the error to the user.
- return 'UTC'
-
-# Local time zone for this installation. Choices can be found here:
-# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
-# although not all choices may be available on all operating systems.
-# If running in a Windows environment this must be set to the same as your
-# system time zone.
-TIME_ZONE = select_tz()
-
-def get_secret_key():
- """Retrieve the secret-key value from rpki.conf or generate a random value
- if it is not present."""
- d = string.letters + string.digits
- val = ''.join([random.choice(d) for _ in range(50)])
- return rpki_config.get('secret-key', val)
-
-# Make this unique, and don't share it with anybody.
-SECRET_KEY = get_secret_key()
-
-# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
-# for details on why you might need this.
-def get_allowed_hosts():
- allowed_hosts = set(rpki_config.multiget("allowed-hosts"))
- allowed_hosts.add(socket.getfqdn())
- try:
- import netifaces
- for interface in netifaces.interfaces():
- addresses = netifaces.ifaddresses(interface)
- for af in (netifaces.AF_INET, netifaces.AF_INET6):
- if af in addresses:
- for address in addresses[af]:
- if "addr" in address:
- allowed_hosts.add(address["addr"])
- except ImportError:
- pass
- return list(allowed_hosts)
-
-ALLOWED_HOSTS = get_allowed_hosts()
-
-DOWNLOAD_DIRECTORY = rpki_config.get('download-directory', '/var/tmp')
-
-# List of callables that know how to import templates from various sources.
-TEMPLATE_LOADERS = (
- 'django.template.loaders.filesystem.Loader',
- 'django.template.loaders.app_directories.Loader',
- 'django.template.loaders.eggs.Loader'
-)
-
-MIDDLEWARE_CLASSES = (
- 'django.middleware.common.CommonMiddleware',
- 'django.contrib.sessions.middleware.SessionMiddleware',
- 'django.middleware.csrf.CsrfViewMiddleware',
- 'django.contrib.auth.middleware.AuthenticationMiddleware',
- 'django.contrib.messages.middleware.MessageMiddleware'
-)
-
-ROOT_URLCONF = 'rpki.gui.urls'
-
-INSTALLED_APPS = (
- 'django.contrib.auth',
- #'django.contrib.admin',
- #'django.contrib.admindocs',
- 'django.contrib.contenttypes',
- 'django.contrib.sessions',
- 'django.contrib.staticfiles',
- 'rpki.irdb',
- 'rpki.gui.app',
- 'rpki.gui.cacheview',
- 'rpki.gui.routeview',
- 'south',
-)
-
-TEMPLATE_CONTEXT_PROCESSORS = (
- "django.contrib.auth.context_processors.auth",
- "django.core.context_processors.debug",
- "django.core.context_processors.i18n",
- "django.core.context_processors.media",
- "django.contrib.messages.context_processors.messages",
- "django.core.context_processors.request",
- "django.core.context_processors.static"
-)
-
-# Allow local site to override any setting above -- but if there's
-# anything that local sites routinely need to modify, please consider
-# putting that configuration into rpki.conf and just adding code here
-# to read that configuration.
-try:
- from local_settings import *
-except:
- pass
diff --git a/rpki/gui/gui_rpki_cache/__init__.py b/rpki/gui/gui_rpki_cache/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rpki/gui/gui_rpki_cache/__init__.py
diff --git a/rpki/gui/gui_rpki_cache/migrations/0001_initial.py b/rpki/gui/gui_rpki_cache/migrations/0001_initial.py
new file mode 100644
index 00000000..23625f56
--- /dev/null
+++ b/rpki/gui/gui_rpki_cache/migrations/0001_initial.py
@@ -0,0 +1,136 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+import rpki.gui.gui_rpki_cache.models
+import rpki.gui.models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='AddressRange',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('prefix_min', rpki.gui.models.IPAddressField(db_index=True)),
+ ('prefix_max', rpki.gui.models.IPAddressField(db_index=True)),
+ ],
+ options={
+ 'ordering': ('prefix_min',),
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='AddressRangeV6',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('prefix_min', rpki.gui.models.IPAddressField(db_index=True)),
+ ('prefix_max', rpki.gui.models.IPAddressField(db_index=True)),
+ ],
+ options={
+ 'ordering': ('prefix_min',),
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='ASRange',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('min', models.BigIntegerField(validators=[rpki.gui.models.validate_asn])),
+ ('max', models.BigIntegerField(validators=[rpki.gui.models.validate_asn])),
+ ],
+ options={
+ 'ordering': ('min', 'max'),
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='Cert',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('uri', models.TextField()),
+ ('sha256', models.SlugField(unique=True, max_length=64)),
+ ('not_before', models.DateTimeField()),
+ ('not_after', models.DateTimeField()),
+ ('ski', models.SlugField(max_length=40)),
+ ('addresses', models.ManyToManyField(related_name='certs', to='gui_rpki_cache.AddressRange')),
+ ('addresses_v6', models.ManyToManyField(related_name='certs', to='gui_rpki_cache.AddressRangeV6')),
+ ('asns', models.ManyToManyField(related_name='certs', to='gui_rpki_cache.ASRange')),
+ ('issuer', models.ForeignKey(related_name='children', to='gui_rpki_cache.Cert', null=True)),
+ ],
+ options={
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='Ghostbuster',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('uri', models.TextField()),
+ ('sha256', models.SlugField(unique=True, max_length=64)),
+ ('not_before', models.DateTimeField()),
+ ('not_after', models.DateTimeField()),
+ ('full_name', models.CharField(max_length=40)),
+ ('email_address', models.EmailField(max_length=254, null=True, blank=True)),
+ ('organization', models.CharField(max_length=255, null=True, blank=True)),
+ ('telephone', rpki.gui.gui_rpki_cache.models.TelephoneField(max_length=255, null=True, blank=True)),
+ ('issuer', models.ForeignKey(related_name='ghostbusters', to='gui_rpki_cache.Cert')),
+ ],
+ options={
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='ROA',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('uri', models.TextField()),
+ ('sha256', models.SlugField(unique=True, max_length=64)),
+ ('not_before', models.DateTimeField()),
+ ('not_after', models.DateTimeField()),
+ ('asid', models.PositiveIntegerField()),
+ ('issuer', models.ForeignKey(related_name='roas', to='gui_rpki_cache.Cert')),
+ ],
+ options={
+ 'ordering': ('asid',),
+ },
+ ),
+ migrations.CreateModel(
+ name='ROAPrefixV4',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('prefix_min', rpki.gui.models.IPAddressField(db_index=True)),
+ ('prefix_max', rpki.gui.models.IPAddressField(db_index=True)),
+ ('max_length', models.PositiveSmallIntegerField()),
+ ],
+ options={
+ 'ordering': ('prefix_min',),
+ },
+ ),
+ migrations.CreateModel(
+ name='ROAPrefixV6',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('prefix_min', rpki.gui.models.IPAddressField(db_index=True)),
+ ('prefix_max', rpki.gui.models.IPAddressField(db_index=True)),
+ ('max_length', models.PositiveSmallIntegerField()),
+ ],
+ options={
+ 'ordering': ('prefix_min',),
+ },
+ ),
+ migrations.AddField(
+ model_name='roa',
+ name='prefixes',
+ field=models.ManyToManyField(related_name='roas', to='gui_rpki_cache.ROAPrefixV4'),
+ ),
+ migrations.AddField(
+ model_name='roa',
+ name='prefixes_v6',
+ field=models.ManyToManyField(related_name='roas', to='gui_rpki_cache.ROAPrefixV6'),
+ ),
+ ]
diff --git a/rpki/gui/gui_rpki_cache/migrations/0002_auto_20160411_2311.py b/rpki/gui/gui_rpki_cache/migrations/0002_auto_20160411_2311.py
new file mode 100644
index 00000000..e9ceaac0
--- /dev/null
+++ b/rpki/gui/gui_rpki_cache/migrations/0002_auto_20160411_2311.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('gui_rpki_cache', '0001_initial'),
+ ]
+
+ operations = [
+ migrations.RemoveField(
+ model_name='cert',
+ name='sha256',
+ ),
+ migrations.RemoveField(
+ model_name='ghostbuster',
+ name='sha256',
+ ),
+ migrations.RemoveField(
+ model_name='roa',
+ name='sha256',
+ ),
+ migrations.AlterField(
+ model_name='cert',
+ name='issuer',
+ field=models.ForeignKey(to='gui_rpki_cache.Cert', null=True),
+ ),
+ migrations.AlterField(
+ model_name='ghostbuster',
+ name='issuer',
+ field=models.ForeignKey(to='gui_rpki_cache.Cert', null=True),
+ ),
+ migrations.AlterField(
+ model_name='roa',
+ name='issuer',
+ field=models.ForeignKey(to='gui_rpki_cache.Cert', null=True),
+ ),
+ ]
diff --git a/rpki/gui/gui_rpki_cache/migrations/0003_auto_20160420_2146.py b/rpki/gui/gui_rpki_cache/migrations/0003_auto_20160420_2146.py
new file mode 100644
index 00000000..e43ab1de
--- /dev/null
+++ b/rpki/gui/gui_rpki_cache/migrations/0003_auto_20160420_2146.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('gui_rpki_cache', '0002_auto_20160411_2311'),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name='ghostbuster',
+ name='issuer',
+ field=models.ForeignKey(related_name='ghostbusters', to='gui_rpki_cache.Cert', null=True),
+ ),
+ migrations.AlterField(
+ model_name='roa',
+ name='issuer',
+ field=models.ForeignKey(related_name='roas', to='gui_rpki_cache.Cert', null=True),
+ ),
+ ]
diff --git a/rpki/gui/gui_rpki_cache/migrations/__init__.py b/rpki/gui/gui_rpki_cache/migrations/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rpki/gui/gui_rpki_cache/migrations/__init__.py
diff --git a/rpki/gui/cacheview/models.py b/rpki/gui/gui_rpki_cache/models.py
index c3ee8421..dd0739c0 100644
--- a/rpki/gui/cacheview/models.py
+++ b/rpki/gui/gui_rpki_cache/models.py
@@ -1,5 +1,5 @@
# Copyright (C) 2011 SPARTA, Inc. dba Cobham Analytic Solutions
-# Copyright (C) 2012 SPARTA, Inc. a Parsons Company
+# Copyright (C) 2012, 2016 SPARTA, Inc. a Parsons Company
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -13,16 +13,13 @@
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
-__version__ = '$Id$'
-
-from datetime import datetime
-import time
+__version__ = '$Id: $'
from django.db import models
-from django.core.urlresolvers import reverse
import rpki.resource_set
import rpki.gui.models
+import rpki.rcynicdb.models
class TelephoneField(models.CharField):
@@ -31,56 +28,13 @@ class TelephoneField(models.CharField):
models.CharField.__init__(self, *args, **kwargs)
-class AddressRange(rpki.gui.models.PrefixV4):
- @models.permalink
- def get_absolute_url(self):
- return ('rpki.gui.cacheview.views.addressrange_detail', [str(self.pk)])
-
-
-class AddressRangeV6(rpki.gui.models.PrefixV6):
- @models.permalink
- def get_absolute_url(self):
- return ('rpki.gui.cacheview.views.addressrange_detail_v6',
- [str(self.pk)])
-
-
-class ASRange(rpki.gui.models.ASN):
- @models.permalink
- def get_absolute_url(self):
- return ('rpki.gui.cacheview.views.asrange_detail', [str(self.pk)])
-
-kinds = list(enumerate(('good', 'warn', 'bad')))
-kinds_dict = dict((v, k) for k, v in kinds)
-
+class AddressRange(rpki.gui.models.PrefixV4): pass
-class ValidationLabel(models.Model):
- """
- Represents a specific error condition defined in the rcynic XML
- output file.
- """
- label = models.CharField(max_length=79, db_index=True, unique=True)
- status = models.CharField(max_length=255)
- kind = models.PositiveSmallIntegerField(choices=kinds)
-
- def __unicode__(self):
- return self.label
-
-
-class RepositoryObject(models.Model):
- """
- Represents a globally unique RPKI repository object, specified by its URI.
- """
- uri = models.URLField(unique=True, db_index=True)
-generations = list(enumerate(('current', 'backup')))
-generations_dict = dict((val, key) for (key, val) in generations)
+class AddressRangeV6(rpki.gui.models.PrefixV6): pass
-class ValidationStatus(models.Model):
- timestamp = models.DateTimeField()
- generation = models.PositiveSmallIntegerField(choices=generations, null=True)
- status = models.ForeignKey(ValidationLabel)
- repo = models.ForeignKey(RepositoryObject, related_name='statuses')
+class ASRange(rpki.gui.models.ASN): pass
class SignedObject(models.Model):
@@ -89,58 +43,47 @@ class SignedObject(models.Model):
The signing certificate is ommitted here in order to give a proper
value for the 'related_name' attribute.
"""
- repo = models.ForeignKey(RepositoryObject, related_name='cert', unique=True)
-
- # on-disk file modification time
- mtime = models.PositiveIntegerField(default=0)
- # SubjectName
- name = models.CharField(max_length=255)
+ class Meta:
+ abstract = True
- # value from the SKI extension
- keyid = models.CharField(max_length=60, db_index=True)
+ # Duplicate of rpki.rcynicdb.models.RPKIObject
+ uri = models.TextField()
# validity period from EE cert which signed object
not_before = models.DateTimeField()
not_after = models.DateTimeField()
- def mtime_as_datetime(self):
- """
- convert the local timestamp to UTC and convert to a datetime object
- """
- return datetime.utcfromtimestamp(self.mtime + time.timezone)
-
- def status_id(self):
- """
- Returns a HTML class selector for the current object based on its validation status.
- The selector is chosen based on the current generation only. If there is any bad status,
- return bad, else if there are any warn status, return warn, else return good.
- """
- for x in reversed(kinds):
- if self.repo.statuses.filter(generation=generations_dict['current'], status__kind=x[0]):
- return x[1]
- return None # should not happen
-
def __unicode__(self):
- return u'%s' % self.name
+ return u'%s' % self.uri
+
+ def __repr__(self):
+ return u'<%s name=%s uri=%s>' % (self.__class__.__name__, self.uri)
class Cert(SignedObject):
"""
- Object representing a resource certificate.
+ Object representing a resource CA certificate.
"""
+ # Duplicate of rpki.rcynicdb.models.RPKIObject
+ ski = models.SlugField(max_length=40) # hex SHA-1
+
addresses = models.ManyToManyField(AddressRange, related_name='certs')
addresses_v6 = models.ManyToManyField(AddressRangeV6, related_name='certs')
asns = models.ManyToManyField(ASRange, related_name='certs')
- issuer = models.ForeignKey('self', related_name='children', null=True)
- sia = models.CharField(max_length=255)
- def get_absolute_url(self):
- return reverse('cert-detail', args=[str(self.pk)])
+ issuer = models.ForeignKey('self', on_delete=models.CASCADE, null=True)
+
+ def __repr__(self):
+ return u'<Cert uri=%s ski=%s not_before=%s not_after=%s>' % (self.uri, self.ski, self.not_before, self.not_after)
+
+ def __unicode__(self):
+ return u'RPKI CA Cert %s' % (self.uri,)
def get_cert_chain(self):
"""Return a list containing the complete certificate chain for this
certificate."""
+
cert = self
x = [cert]
while cert != cert.issuer:
@@ -180,6 +123,7 @@ class ROAPrefixV4(ROAPrefix, rpki.gui.models.PrefixV4):
@property
def routes(self):
"""return all routes covered by this roa prefix"""
+
return RouteOrigin.objects.filter(prefix_min__gte=self.prefix_min,
prefix_max__lte=self.prefix_max)
@@ -201,10 +145,7 @@ class ROA(SignedObject):
asid = models.PositiveIntegerField()
prefixes = models.ManyToManyField(ROAPrefixV4, related_name='roas')
prefixes_v6 = models.ManyToManyField(ROAPrefixV6, related_name='roas')
- issuer = models.ForeignKey('Cert', related_name='roas')
-
- def get_absolute_url(self):
- return reverse('roa-detail', args=[str(self.pk)])
+ issuer = models.ForeignKey(Cert, on_delete=models.CASCADE, null=True, related_name='roas')
class Meta:
ordering = ('asid',)
@@ -218,11 +159,7 @@ class Ghostbuster(SignedObject):
email_address = models.EmailField(blank=True, null=True)
organization = models.CharField(blank=True, null=True, max_length=255)
telephone = TelephoneField(blank=True, null=True)
- issuer = models.ForeignKey('Cert', related_name='ghostbusters')
-
- def get_absolute_url(self):
- # note that ghostbuster-detail is different from gbr-detail! sigh
- return reverse('ghostbuster-detail', args=[str(self.pk)])
+ issuer = models.ForeignKey(Cert, on_delete=models.CASCADE, null=True, related_name='ghostbusters')
def __unicode__(self):
if self.full_name:
diff --git a/rpki/gui/gui_rpki_cache/util.py b/rpki/gui/gui_rpki_cache/util.py
new file mode 100644
index 00000000..0bc4fa5d
--- /dev/null
+++ b/rpki/gui/gui_rpki_cache/util.py
@@ -0,0 +1,308 @@
+# Copyright (C) 2011 SPARTA, Inc. dba Cobham
+# Copyright (C) 2012, 2013, 2016 SPARTA, Inc. a Parsons Company
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND SPARTA DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL SPARTA BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+__version__ = '$Id: util.py 6335 2016-03-29 03:09:13Z sra $'
+
+import logging
+import time
+import vobject
+from socket import getfqdn
+from cStringIO import StringIO
+
+if __name__ == '__main__':
+ import os
+ logging.basicConfig(level=logging.DEBUG)
+ os.environ.update(DJANGO_SETTINGS_MODULE='rpki.django_settings.gui')
+ import django
+ django.setup()
+
+import os.path
+
+logger = logging.getLogger(__name__)
+
+from django.db import transaction
+import django.db.models
+
+import rpki
+import rpki.resource_set
+import rpki.left_right
+import rpki.gui.app.timestamp
+from rpki.gui.app.models import Conf, Alert
+from rpki.gui.gui_rpki_cache import models
+from rpki.irdb.zookeeper import Zookeeper
+
+from lxml.etree import Element, SubElement
+
+
+def process_certificate(auth, obj):
+ cert = models.Cert.objects.filter(ski=obj.ski).first()
+ if cert:
+ logger.debug('cache hit for CA cert uri=%s ski=%s' % (cert.uri, cert.ski))
+ return cert # cache hit
+
+ logger.debug('parsing cert at %s' % (obj.uri,))
+
+ """Process Resource CA Certificates"""
+ x509 = rpki.POW.X509.derRead(obj.der)
+
+ # ensure this is a resource CA Certificate (ignore Router certs)
+ bc = x509.getBasicConstraints()
+ is_ca = bc is not None and bc[0]
+ if not is_ca:
+ return
+
+ # locate the parent certificate
+ if obj.aki and obj.aki != obj.ski:
+ try:
+ issuer = models.Cert.objects.get(ski=obj.aki)
+ except models.Cert.DoesNotExist:
+ # process parent cert first
+ issuer = process_certificate(auth, rpki.rcynicdb.models.RPKIObject.objects.get(ski=obj.aki, authenticated=auth))
+ else:
+ issuer = None # root
+
+ asns, v4, v6 = x509.getRFC3779()
+
+ cert = models.Cert.objects.create(
+ uri=obj.uri,
+ ski=obj.ski,
+ not_before=x509.getNotBefore(),
+ not_after=x509.getNotAfter(),
+ issuer=issuer
+ )
+
+ if issuer is None:
+ cert.issuer = cert # self-signed
+ cert.save()
+
+ if asns == 'inherit':
+ cert.asns.add(issuer.asns.all())
+ elif asns:
+ for asmin, asmax in asns:
+ asr, _ = models.ASRange.objects.get_or_create(min=asmin, max=asmax)
+ cert.asns.add(asr)
+
+ if v4 == 'inherit':
+ cert.addresses.add(issuer.addresses.all())
+ elif v4:
+ for v4min, v4max in v4:
+ pfx, _ = models.AddressRange.objects.get_or_create(prefix_min=v4min, prefix_max=v4max)
+ cert.addresses.add(pfx)
+
+ if v6 == 'inherit':
+ cert.addresses_v6.add(issuer.addresses_v6.all())
+ elif v6:
+ for v6min, v6max in v6:
+ pfx, _ = models.AddressRangeV6.objects.get_or_create(prefix_min=v6min, prefix_max=v6max)
+ cert.addresses_v6.add(pfx)
+
+ return cert
+
+def process_roa(auth, obj):
+ logger.debug('parsing roa at %s' % (obj.uri,))
+
+ r = rpki.POW.ROA.derRead(obj.der)
+ r.verify() # required in order to extract asID
+ ee = r.certs()[0] # rpki.POW.X509
+ aki = ee.getAKI().encode('hex')
+
+ logger.debug('looking for ca cert with ski=%s' % (aki,))
+
+ # Locate the Resource CA cert that issued the EE that signed this ROA
+ issuer = models.Cert.objects.get(ski=aki)
+
+ roa = models.ROA.objects.create(
+ uri=obj.uri,
+ asid=r.getASID(),
+ not_before=ee.getNotBefore(),
+ not_after=ee.getNotAfter(),
+ issuer=issuer)
+
+ prefixes = r.getPrefixes()
+ if prefixes[0]: # v4
+ for p in prefixes[0]:
+ v = rpki.resource_set.roa_prefix_ipv4(*p)
+ roapfx, _ = models.ROAPrefixV4.objects.get_or_create(prefix_min=v.min(), prefix_max=v.max(), max_length=v.max_prefixlen)
+ roa.prefixes.add(roapfx)
+ if prefixes[1]: # v6
+ for p in prefixes[1]:
+ v = rpki.resource_set.roa_prefix_ipv6(*p)
+ roapfx, _ = models.ROAPrefixV6.objects.get_or_create(prefix_min=v.min(), prefix_max=v.max(), max_length=v.max_prefixlen)
+ roa.prefixes_v6.add(roapfx)
+
+ return roa
+
+def process_ghostbuster(auth, obj):
+ logger.debug('parsing ghostbuster at %s' % (obj.uri,))
+ g = rpki.POW.CMS.derRead(obj.der)
+ ee = g.certs()[0] # rpki.POW.X509
+ aki = ee.getAKI().encode('hex')
+ vcard = vobject.readOne(g.verify())
+
+ # Locate the Resource CA cert that issued the EE that signed this ROA
+ issuer = models.Cert.objects.get(ski=aki)
+
+ gbr = models.Ghostbuster.objects.create(
+ uri=obj.uri,
+ issuer=issuer,
+ not_before=ee.getNotBefore(),
+ not_after=ee.getNotAfter(),
+ full_name = vcard.fn.value if hasattr(vcard, 'fn') else None,
+ email_address = vcard.email.value if hasattr(vcard, 'email') else None,
+ telephone = vcard.tel.value if hasattr(vcard, 'tel') else None,
+ organization = vcard.org.value[0] if hasattr(vcard, 'org') else None
+ )
+
+ return gbr
+
+@transaction.atomic
+def process_cache():
+ logger.info('processing rpki cache')
+
+ # foreign key constraints should cause all other objects to be removed
+ models.Cert.objects.all().delete()
+
+ # certs must be processed first in order to build proper foreign keys for roa/gbr
+ dispatch = {
+ '.cer': process_certificate,
+ '.gbr': process_ghostbuster,
+ '.roa': process_roa
+ }
+
+ auth = rpki.rcynicdb.models.Authenticated.objects.order_by('started').first()
+
+ # Resource CA Certs are processed first in order to attach ROAs and Ghostbusters
+ for suffix in ('.cer', '.roa', '.gbr'):
+ cb = dispatch[suffix]
+
+ for rpkiobj in auth.rpkiobject_set.filter(uri__endswith=suffix):
+ cb(auth, rpkiobj)
+
+ # Garbage collection - remove M2M relations for certs/ROAs which no longer exist
+ models.ASRange.objects.annotate(num_certs=django.db.models.Count('certs')).filter(num_certs=0).delete()
+ models.AddressRange.objects.annotate(num_certs=django.db.models.Count('certs')).filter(num_certs=0).delete()
+ models.AddressRangeV6.objects.annotate(num_certs=django.db.models.Count('certs')).filter(num_certs=0).delete()
+
+ models.ROAPrefixV4.objects.annotate(num_roas=django.db.models.Count('roas')).filter(num_roas=0).delete()
+ models.ROAPrefixV6.objects.annotate(num_roas=django.db.models.Count('roas')).filter(num_roas=0).delete()
+
+
+# dict mapping resource handle to list of published objects, use for notifying objects which have become invalid
+uris = {}
+model_map = { '.cer': models.Cert, '.roa': models.ROA, '.gbr': models.Ghostbuster }
+
+def fetch_published_objects():
+ """Query rpkid for all objects published by local users, and look up the
+ current validation status of each object. The validation status is used
+ later to send alerts for objects which have transitioned to invalid.
+ """
+ logger.info('querying for published objects')
+
+ handles = [conf.handle for conf in Conf.objects.all()]
+ q_msg = Element(rpki.left_right.tag_msg, nsmap = rpki.left_right.nsmap,
+ type = "query", version = rpki.left_right.version)
+ for h in handles:
+ SubElement(q_msg, rpki.left_right.tag_list_published_objects, tenant_handle=h, tag=h)
+ try:
+ z = Zookeeper()
+ r_msg = z.call_rpkid(q_msg)
+ except Exception as err:
+ logger.error('Unable to connect to rpkid to fetch list of published objects')
+ logger.exception(err)
+ # Should be safe to continue processing the rcynic cache, we just don't do any notifications
+ return
+
+ for r_pdu in r_msg:
+ if r_pdu.tag == rpki.left_right.tag_list_published_objects:
+ # Look up the object in the rcynic cache
+ uri = r_pdu.get('uri')
+ ext = os.path.splitext(uri)[1]
+ if ext in model_map:
+ model = model_map[ext]
+ handle = r_pdu.get('tenant_handle')
+
+ if model.objects.filter(uri=uri).exists():
+ v = uris.setdefault(handle, [])
+ v.append(uri)
+ logger.debug('adding %s', uri)
+ #else:
+ # this object is not in the cache. it was either published
+ # recently, or disappared previously. if it disappeared
+ # previously, it has already been alerted. in either case, we
+ # omit the uri from the list since we are interested only in
+ # objects which were valid and are no longer valid
+ else:
+ logger.debug('skipping object ext=%s uri=%s' % (ext, uri))
+
+ elif r_pdu.tag == rpki.left_right.tag_report_error:
+ logging.error('rpkid reported an error: %s', r_pdu.get("error_code"))
+
+
+def notify_invalid():
+ """Send email alerts to the addresses registered in ghostbuster records for
+ any invalid objects that were published by users of this system.
+ """
+
+ logger.info('sending notifications for invalid objects')
+
+ for handle, published_objects in uris.iteritems():
+ missing = []
+ for u in published_objects:
+ ext = os.path.splitext(u)[1]
+ model = model_map[ext]
+ if not model.objects.filter(uri=u).exists():
+ missing.append(u)
+
+ if missing:
+ conf = Conf.objects.get(handle)
+
+ msg = StringIO()
+ msg.write('This is an alert about problems with objects published by '
+ 'the resource handle %s.\n\n' % handle)
+
+ msg.write('The following objects were previously valid, but are '
+ 'now invalid:\n')
+
+ for u in missing:
+ msg.write('\n')
+ msg.write(u)
+ msg.write('\n')
+
+ msg.write("""--
+You are receiving this email because your address is published in a Ghostbuster
+record, or is the default email address for this resource holder account on
+%s.""" % getfqdn())
+
+ from_email = 'root@' + getfqdn()
+ subj = 'invalid RPKI object alert for resource handle %s' % conf.handle
+ conf.send_alert(subj, msg.getvalue(), from_email, severity=Alert.ERROR)
+
+
+def update_cache():
+ """Cache information from the current rcynicdb for display by the gui"""
+
+ start = time.time()
+ fetch_published_objects()
+ process_cache()
+ notify_invalid()
+
+ rpki.gui.app.timestamp.update('rcynic_import')
+
+ stop = time.time()
+ logger.info('elapsed time %d seconds.', (stop - start))
+
+
+if __name__ == '__main__':
+ process_cache()
diff --git a/rpki/gui/models.py b/rpki/gui/models.py
index 184383c0..4d56c18e 100644
--- a/rpki/gui/models.py
+++ b/rpki/gui/models.py
@@ -19,57 +19,72 @@ Common classes for reuse in apps.
__version__ = '$Id$'
from django.db import models
+from django.core.exceptions import ValidationError
import rpki.resource_set
import rpki.POW
-from south.modelsinspector import add_introspection_rules
-class IPv6AddressField(models.Field):
- "Field large enough to hold a 128-bit unsigned integer."
-
- __metaclass__ = models.SubfieldBase
-
- def db_type(self, connection):
- return 'binary(16)'
-
- def to_python(self, value):
- if isinstance(value, rpki.POW.IPAddress):
+class IPAddressField(models.CharField):
+ """
+ Field class for rpki.POW.IPAddress, stored as zero-padded
+ hexadecimal so lexicographic order is identical to numeric order.
+ """
+
+ # Django's CharField type doesn't distinguish between the length
+ # of the human readable form and the length of the storage form,
+ # so we have to leave room for IPv6 punctuation even though we
+ # only store hexadecimal digits and thus will never use the full
+ # width of the database field. Price we pay for portability.
+ #
+ # Documentation on the distinction between the various conversion
+ # methods is fairly opaque, to put it politely, and we have to
+ # handle database engines which sometimes return buffers or other
+ # classes instead of strings, so the conversions are a bit
+ # finicky. If this goes haywire, your best bet is probably to
+ # litter the code with logging.debug() calls and debug by printf.
+
+ def __init__(self, *args, **kwargs):
+ kwargs["max_length"] = 40
+ super(IPAddressField, self).__init__(*args, **kwargs)
+
+ def deconstruct(self):
+ name, path, args, kwargs = super(IPAddressField, self).deconstruct()
+ del kwargs["max_length"]
+ return name, path, args, kwargs
+
+ @staticmethod
+ def _value_to_ipaddress(value):
+ if value is None or isinstance(value, rpki.POW.IPAddress):
return value
- return rpki.POW.IPAddress.fromBytes(value)
-
- def get_db_prep_value(self, value, connection, prepared):
- """
- Note that we add a custom conversion to encode long values as hex
- strings in SQL statements. See settings.get_conv() for details.
-
- """
- return value.toBytes()
+ value = str(value)
+ if ":" in value or "." in value:
+ return rpki.POW.IPAddress(value)
+ else:
+ return rpki.POW.IPAddress.fromBytes(value.decode("hex"))
-
-class IPv4AddressField(models.Field):
- "Wrapper around rpki.POW.IPAddress."
-
- __metaclass__ = models.SubfieldBase
-
- def db_type(self, connection):
- return 'int UNSIGNED'
+ def from_db_value(self, value, expression, connection, context):
+ # Can't use super() here, see Django documentation.
+ return self._value_to_ipaddress(value)
def to_python(self, value):
+ return self._value_to_ipaddress(
+ super(IPAddressField, self).to_python(value))
+
+ @staticmethod
+ def _hex_from_ipaddress(value):
if isinstance(value, rpki.POW.IPAddress):
+ return value.toBytes().encode("hex")
+ else:
return value
- return rpki.POW.IPAddress(value, version=4)
- def get_db_prep_value(self, value, connection, prepared):
- return long(value)
+ def get_prep_value(self, value):
+ return super(IPAddressField, self).get_prep_value(
+ self._hex_from_ipaddress(value))
-add_introspection_rules(
- [
- ([IPv4AddressField, IPv6AddressField], [], {})
- ],
- [r'^rpki\.gui\.models\.IPv4AddressField',
- r'^rpki\.gui\.models\.IPv6AddressField']
-)
+ def get_db_prep_value(self, value, connection, prepared = False):
+ return self._hex_from_ipaddress(
+ super(IPAddressField, self).get_db_prep_value(value, connection, prepared))
class Prefix(models.Model):
@@ -82,6 +97,7 @@ class Prefix(models.Model):
"""
Returns the prefix as a rpki.resource_set.resource_range_ip object.
"""
+
return self.range_cls(self.prefix_min, self.prefix_max)
@property
@@ -96,6 +112,7 @@ class Prefix(models.Model):
def __unicode__(self):
"""This method may be overridden by subclasses. The default
implementation calls get_prefix_display(). """
+
return self.get_prefix_display()
class Meta:
@@ -110,8 +127,8 @@ class PrefixV4(Prefix):
range_cls = rpki.resource_set.resource_range_ipv4
- prefix_min = IPv4AddressField(db_index=True, null=False)
- prefix_max = IPv4AddressField(db_index=True, null=False)
+ prefix_min = IPAddressField(db_index=True, null=False)
+ prefix_max = IPAddressField(db_index=True, null=False)
class Meta(Prefix.Meta):
abstract = True
@@ -122,20 +139,25 @@ class PrefixV6(Prefix):
range_cls = rpki.resource_set.resource_range_ipv6
- prefix_min = IPv6AddressField(db_index=True, null=False)
- prefix_max = IPv6AddressField(db_index=True, null=False)
+ prefix_min = IPAddressField(db_index=True, null=False)
+ prefix_max = IPAddressField(db_index=True, null=False)
class Meta(Prefix.Meta):
abstract = True
+def validate_asn(value):
+ if value < 0 or value > 0xFFFFFFFFL:
+ raise ValidationError('%s is not valid autonomous sequence number' % value)
+
+
class ASN(models.Model):
"""Represents a range of ASNs.
This model is abstract, and is intended to be reused by applications."""
- min = models.PositiveIntegerField(null=False)
- max = models.PositiveIntegerField(null=False)
+ min = models.BigIntegerField(null=False, validators=[validate_asn])
+ max = models.BigIntegerField(null=False, validators=[validate_asn])
class Meta:
abstract = True
diff --git a/rpki/gui/routeview/api.py b/rpki/gui/routeview/api.py
index cf699c9a..b4ff297a 100644
--- a/rpki/gui/routeview/api.py
+++ b/rpki/gui/routeview/api.py
@@ -29,8 +29,8 @@ def route_list(request):
By default, only returns up to 10 matching routes, but the client may
request a different limit with the 'count=' query string parameter.
-
"""
+
hard_limit = 100
if request.method == 'GET' and 'prefix__in' in request.GET:
diff --git a/rpki/gui/routeview/models.py b/rpki/gui/routeview/models.py
index 052860c4..35039136 100644
--- a/rpki/gui/routeview/models.py
+++ b/rpki/gui/routeview/models.py
@@ -1,5 +1,5 @@
# Copyright (C) 2010, 2011 SPARTA, Inc. dba Cobham Analytic Solutions
-# Copyright (C) 2012 SPARTA, Inc. a Parsons Company
+# Copyright (C) 2012, 2016 SPARTA, Inc. a Parsons Company
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -31,7 +31,7 @@ class RouteOrigin(rpki.gui.models.PrefixV4):
@property
def roas(self):
"Return a queryset of ROAs which cover this route."
- return rpki.gui.cacheview.models.ROA.objects.filter(
+ return rpki.gui.gui_rpki_cache.models.ROA.objects.filter(
prefixes__prefix_min__lte=self.prefix_min,
prefixes__prefix_max__gte=self.prefix_max
)
@@ -39,7 +39,7 @@ class RouteOrigin(rpki.gui.models.PrefixV4):
@property
def roa_prefixes(self):
"Return a queryset of ROA prefixes which cover this route."
- return rpki.gui.cacheview.models.ROAPrefixV4.objects.filter(
+ return rpki.gui.gui_rpki_cache.models.ROAPrefixV4.objects.filter(
prefix_min__lte=self.prefix_min,
prefix_max__gte=self.prefix_max
)
@@ -78,4 +78,4 @@ class RouteOriginV6(rpki.gui.models.PrefixV6):
# this goes at the end of the file to avoid problems with circular imports
-import rpki.gui.cacheview.models
+import rpki.gui.gui_rpki_cache.models
diff --git a/rpki/gui/routeview/util.py b/rpki/gui/routeview/util.py
index 1340e9fa..14ac3cf9 100644
--- a/rpki/gui/routeview/util.py
+++ b/rpki/gui/routeview/util.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2012, 2013 SPARTA, Inc. a Parsons Company
+# Copyright (C) 2012, 2013, 2016 SPARTA, Inc. a Parsons Company
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -16,7 +16,6 @@ __version__ = '$Id$'
__all__ = ('import_routeviews_dump')
import itertools
-import _mysql_exceptions
import os.path
import subprocess
import time
@@ -25,12 +24,13 @@ import urlparse
import bz2
from urllib import urlretrieve, unquote
-from django.db import transaction, connection
+from django.db import transaction
from django.conf import settings
from rpki.resource_set import resource_range_ipv4, resource_range_ipv6
from rpki.exceptions import BadIPResource
import rpki.gui.app.timestamp
+from rpki.gui.routeview.models import RouteOrigin
# globals
logger = logging.getLogger(__name__)
@@ -43,28 +43,17 @@ class ParseError(Exception): pass
class RouteDumpParser(object):
"""Base class for parsing various route dump formats."""
- table = 'routeview_routeorigin'
- sql = "INSERT INTO %s_new SET asn=%%s, prefix_min=%%s, prefix_max=%%s" % table
range_class = resource_range_ipv4
def __init__(self, path, *args, **kwargs):
+ transaction.set_autocommit(False)
+
self.path = path
- self.cursor = connection.cursor()
self.last_prefix = None
self.asns = set()
def parse(self):
- try:
- logger.info('Dropping existing staging table...')
- self.cursor.execute('DROP TABLE IF EXISTS %s_new' % self.table)
- except _mysql_exceptions.Warning:
- pass
-
- logger.info('Creating staging table...')
- self.cursor.execute('CREATE TABLE %(table)s_new LIKE %(table)s' % {'table': self.table})
-
- logger.info('Disabling autocommit...')
- self.cursor.execute('SET autocommit=0')
+ RouteOrigin.objects.all().delete()
logger.info('Adding rows to table...')
for line in self.input:
@@ -88,25 +77,13 @@ class RouteDumpParser(object):
self.ins_routes() # process data from last line
- logger.info('Committing...')
- self.cursor.execute('COMMIT')
-
- try:
- logger.info('Dropping old table...')
- self.cursor.execute('DROP TABLE IF EXISTS %s_old' % self.table)
- except _mysql_exceptions.Warning:
- pass
-
- logger.info('Swapping staging table with live table...')
- self.cursor.execute('RENAME TABLE %(table)s TO %(table)s_old, %(table)s_new TO %(table)s' % {'table': self.table})
-
self.cleanup() # allow cleanup function to throw prior to COMMIT
- transaction.commit_unless_managed()
-
logger.info('Updating timestamp metadata...')
rpki.gui.app.timestamp.update('bgp_v4_import')
+ transaction.commit() # not sure if requried, or if transaction.commit() will do it
+
def parse_line(self, row):
"Parse one line of input. Return a (prefix, origin_as) tuple."
return None
@@ -119,9 +96,8 @@ class RouteDumpParser(object):
if self.last_prefix is not None:
try:
rng = self.range_class.parse_str(self.last_prefix)
- rmin = long(rng.min)
- rmax = long(rng.max)
- self.cursor.executemany(self.sql, [(asn, rmin, rmax) for asn in self.asns])
+ for asn in self.asns:
+ RouteOrigin.objects.create(asn=asn, prefix_min=rng.min, prefix_max=rng.max)
except BadIPResource:
logger.warning('skipping bad prefix: ' + self.last_prefix)
self.asns = set() # reset
@@ -151,6 +127,10 @@ class TextDumpParser(RouteDumpParser):
except ValueError:
raise ParseError('bad AS value')
+ # FIXME Django doesn't have a field for positive integers up to 2^32-1
+ if origin_as < 0 or origin_as > 2147483647:
+ raise ParseError('AS value out of supported database range')
+
prefix = cols[1]
# validate the prefix since the "sh ip bgp" output is sometimes
@@ -215,8 +195,8 @@ def import_routeviews_dump(filename=DEFAULT_URL, filetype='text'):
filename [optional]: the full path to the downloaded file to parse
filetype [optional]: 'text' or 'mrt'
-
"""
+
start_time = time.time()
tmpname = None
@@ -229,10 +209,8 @@ def import_routeviews_dump(filename=DEFAULT_URL, filetype='text'):
logger.info("Downloading %s to %s", filename, tmpname)
if os.path.exists(tmpname):
- os.remove(tmpname)
- # filename is replaced with a local filename containing cached copy of
- # URL
- filename, headers = urlretrieve(filename, tmpname)
+ os.remove(tmpname)
+ filename, headers = urlretrieve(filename, tmpname)
try:
dispatch = {'text': TextDumpParser, 'mrt': MrtDumpParser}
diff --git a/rpki/gui/script_util.py b/rpki/gui/script_util.py
index c8248527..289dbbb7 100644
--- a/rpki/gui/script_util.py
+++ b/rpki/gui/script_util.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2013 SPARTA, Inc. a Parsons Company
+# Copyright (C) 2013, 2016 SPARTA, Inc. a Parsons Company
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -16,13 +16,6 @@
This module contains utility functions for use in standalone scripts.
"""
-import django
-
-from django.conf import settings
-
-from rpki import config
-from rpki import autoconf
-
__version__ = '$Id$'
@@ -30,29 +23,11 @@ def setup():
"""
Configure Django enough to use the ORM.
"""
- cfg = config.parser(section='web_portal')
- # INSTALLED_APPS doesn't seem necessary so long as you are only accessing
- # existing tables.
- #
- # Setting charset to latin1 is a disgusting kludge, but without
- # this MySQL 5.6 (and, proably, later) gets tetchy about ASN.1 DER
- # stored in BLOB columns not being well-formed UTF8 (sic). If you
- # know of a better solution, tell us.
- settings.configure(
- DATABASES={
- 'default': {
- 'ENGINE': 'django.db.backends.mysql',
- 'NAME': cfg.get('sql-database'),
- 'USER': cfg.get('sql-username'),
- 'PASSWORD': cfg.get('sql-password'),
- 'OPTIONS': {
- 'charset': 'latin1',
- }
- }
- },
- MIDDLEWARE_CLASSES = (),
- DOWNLOAD_DIRECTORY = cfg.get('download-directory', '/var/tmp'),
- )
- if django.VERSION >= (1, 7):
- from django.apps import apps
- apps.populate(settings.INSTALLED_APPS)
+
+ import os
+
+ os.environ.update(DJANGO_SETTINGS_MODULE = "rpki.django_settings.gui")
+
+ # Initialize Django.
+ import django
+ django.setup()
diff --git a/rpki/gui/urls.py b/rpki/gui/urls.py
index 955092f5..ac1d2916 100644
--- a/rpki/gui/urls.py
+++ b/rpki/gui/urls.py
@@ -1,5 +1,5 @@
# Copyright (C) 2010, 2011 SPARTA, Inc. dba Cobham Analytic Solutions
-# Copyright (C) 2012, 2013 SPARTA, Inc. a Parsons Company
+# Copyright (C) 2012, 2013, 2016 SPARTA, Inc. a Parsons Company
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -28,7 +28,6 @@ urlpatterns = patterns(
#(r'^admin/', include(admin.site.urls)),
(r'^api/', include('rpki.gui.api.urls')),
- (r'^cacheview/', include('rpki.gui.cacheview.urls')),
(r'^rpki/', include('rpki.gui.app.urls')),
(r'^accounts/login/$', 'rpki.gui.views.login'),
diff --git a/rpki/http.py b/rpki/http.py
deleted file mode 100644
index 71239c7f..00000000
--- a/rpki/http.py
+++ /dev/null
@@ -1,1058 +0,0 @@
-# $Id$
-#
-# Copyright (C) 2013--2014 Dragon Research Labs ("DRL")
-# Portions copyright (C) 2009--2012 Internet Systems Consortium ("ISC")
-# Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notices and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND DRL, ISC, AND ARIN DISCLAIM ALL
-# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL,
-# ISC, OR ARIN BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
-# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
-# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-HTTP utilities, both client and server.
-"""
-
-import time
-import socket
-import asyncore
-import asynchat
-import urlparse
-import sys
-import random
-import logging
-import rpki.async
-import rpki.sundial
-import rpki.x509
-import rpki.exceptions
-import rpki.log
-import rpki.POW
-
-logger = logging.getLogger(__name__)
-
-## @var default_content_type
-# HTTP content type used for RPKI messages.
-# Can be overriden on a per-client or per-server basis.
-default_content_type = "application/x-rpki"
-
-## @var want_persistent_client
-# Whether we want persistent HTTP client streams, when server also supports them.
-want_persistent_client = False
-
-## @var want_persistent_server
-# Whether we want persistent HTTP server streams, when client also supports them.
-want_persistent_server = False
-
-## @var default_client_timeout
-# Default HTTP client connection timeout.
-default_client_timeout = rpki.sundial.timedelta(minutes = 5)
-
-## @var default_server_timeout
-# Default HTTP server connection timeouts. Given our druthers, we'd
-# prefer that the client close the connection, as this avoids the
-# problem of client starting to reuse connection just as server closes
-# it, so this should be longer than the client timeout.
-default_server_timeout = rpki.sundial.timedelta(minutes = 10)
-
-## @var default_http_version
-# Preferred HTTP version.
-default_http_version = (1, 0)
-
-## @var default_tcp_port
-# Default port for clients and servers that don't specify one.
-default_tcp_port = 80
-
-## @var enable_ipv6_servers
-# Whether to enable IPv6 listeners. Enabled by default, as it should
-# be harmless. Has no effect if kernel doesn't support IPv6.
-enable_ipv6_servers = True
-
-## @var enable_ipv6_clients
-# Whether to consider IPv6 addresses when making connections.
-# Disabled by default, as IPv6 connectivity is still a bad joke in
-# far too much of the world.
-enable_ipv6_clients = False
-
-## @var have_ipv6
-# Whether the current machine claims to support IPv6. Note that just
-# because the kernel supports it doesn't mean that the machine has
-# usable IPv6 connectivity. I don't know of a simple portable way to
-# probe for connectivity at runtime (the old test of "can you ping
-# SRI-NIC.ARPA?" seems a bit dated...). Don't set this, it's set
-# automatically by probing using the socket() system call at runtime.
-try:
- # pylint: disable=W0702,W0104
- socket.socket(socket.AF_INET6).close()
- socket.IPPROTO_IPV6
- socket.IPV6_V6ONLY
-except:
- have_ipv6 = False
-else:
- have_ipv6 = True
-
-## @var use_adns
-
-# Whether to use rpki.adns code. This is still experimental, so it's
-# not (yet) enabled by default.
-use_adns = False
-try:
- import rpki.adns
-except ImportError:
- pass
-
-def supported_address_families(enable_ipv6):
- """
- IP address families on which servers should listen, and to consider
- when selecting addresses for client connections.
- """
- if enable_ipv6 and have_ipv6:
- return (socket.AF_INET, socket.AF_INET6)
- else:
- return (socket.AF_INET,)
-
-def localhost_addrinfo():
- """
- Return pseudo-getaddrinfo results for localhost.
- """
- result = [(socket.AF_INET, "127.0.0.1")]
- if enable_ipv6_clients and have_ipv6:
- result.append((socket.AF_INET6, "::1"))
- return result
-
-class http_message(object):
- """
- Virtual class representing of one HTTP message.
- """
-
- software_name = "ISC RPKI library"
-
- def __init__(self, version = None, body = None, headers = None):
- self.version = version
- self.body = body
- self.headers = headers
- self.normalize_headers()
-
- def normalize_headers(self, headers = None):
- """
- Clean up (some of) the horrible messes that HTTP allows in its
- headers.
- """
- if headers is None:
- headers = () if self.headers is None else self.headers.items()
- translate_underscore = True
- else:
- translate_underscore = False
- result = {}
- for k, v in headers:
- if translate_underscore:
- k = k.replace("_", "-")
- k = "-".join(s.capitalize() for s in k.split("-"))
- v = v.strip()
- if k in result:
- result[k] += ", " + v
- else:
- result[k] = v
- self.headers = result
-
- @classmethod
- def parse_from_wire(cls, headers):
- """
- Parse and normalize an incoming HTTP message.
- """
- self = cls()
- headers = headers.split("\r\n")
- self.parse_first_line(*headers.pop(0).split(None, 2))
- for i in xrange(len(headers) - 2, -1, -1):
- if headers[i + 1][0].isspace():
- headers[i] += headers[i + 1]
- del headers[i + 1]
- self.normalize_headers([h.split(":", 1) for h in headers])
- return self
-
- def format(self):
- """
- Format an outgoing HTTP message.
- """
- s = self.format_first_line()
- if self.body is not None:
- assert isinstance(self.body, str)
- self.headers["Content-Length"] = len(self.body)
- for kv in self.headers.iteritems():
- s += "%s: %s\r\n" % kv
- s += "\r\n"
- if self.body is not None:
- s += self.body
- return s
-
- def __str__(self):
- return self.format()
-
- def parse_version(self, version):
- """
- Parse HTTP version, raise an exception if we can't.
- """
- if version[:5] != "HTTP/":
- raise rpki.exceptions.HTTPBadVersion("Couldn't parse version %s" % version)
- self.version = tuple(int(i) for i in version[5:].split("."))
-
- @property
- def persistent(self):
- """
- Figure out whether this HTTP message encourages a persistent connection.
- """
- c = self.headers.get("Connection")
- if self.version == (1, 1):
- return c is None or "close" not in c.lower()
- elif self.version == (1, 0):
- return c is not None and "keep-alive" in c.lower()
- else:
- return False
-
-class http_request(http_message):
- """
- HTTP request message.
- """
-
- def __init__(self, cmd = None, path = None, version = default_http_version, body = None, callback = None, errback = None, **headers):
- assert cmd == "POST" or body is None
- http_message.__init__(self, version = version, body = body, headers = headers)
- self.cmd = cmd
- self.path = path
- self.callback = callback
- self.errback = errback
- self.retried = False
-
- def parse_first_line(self, cmd, path, version):
- """
- Parse first line of HTTP request message.
- """
- self.parse_version(version)
- self.cmd = cmd
- self.path = path
-
- def format_first_line(self):
- """
- Format first line of HTTP request message, and set up the
- User-Agent header.
- """
- self.headers.setdefault("User-Agent", self.software_name)
- return "%s %s HTTP/%d.%d\r\n" % (self.cmd, self.path, self.version[0], self.version[1])
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.cmd, self.path)
-
-class http_response(http_message):
- """
- HTTP response message.
- """
-
- def __init__(self, code = None, reason = None, version = default_http_version, body = None, **headers):
- http_message.__init__(self, version = version, body = body, headers = headers)
- self.code = code
- self.reason = reason
-
- def parse_first_line(self, version, code, reason):
- """
- Parse first line of HTTP response message.
- """
- self.parse_version(version)
- self.code = int(code)
- self.reason = reason
-
- def format_first_line(self):
- """
- Format first line of HTTP response message, and set up Date and
- Server headers.
- """
- self.headers.setdefault("Date", time.strftime("%a, %d %b %Y %T GMT"))
- self.headers.setdefault("Server", self.software_name)
- return "HTTP/%d.%d %s %s\r\n" % (self.version[0], self.version[1], self.code, self.reason)
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.code, self.reason)
-
-def addr_to_string(addr):
- """
- Convert socket addr tuple to printable string. Assumes 2-element
- tuple is IPv4, 4-element tuple is IPv6, throws TypeError for
- anything else.
- """
-
- if len(addr) == 2:
- return "%s:%d" % (addr[0], addr[1])
- if len(addr) == 4:
- return "%s.%d" % (addr[0], addr[1])
- raise TypeError
-
-@rpki.log.class_logger(logger)
-class http_stream(asynchat.async_chat):
- """
- Virtual class representing an HTTP message stream.
- """
-
- # Keep pylint happy; @class_logger overwrites this.
- logger = None
-
- def __repr__(self):
- status = ["connected"] if self.connected else []
- try:
- status.append(addr_to_string(self.addr))
- except TypeError:
- pass
- return rpki.log.log_repr(self, *status)
-
- def __init__(self, sock = None):
- self.logger = logging.LoggerAdapter(self.logger, dict(context = self))
- asynchat.async_chat.__init__(self, sock)
- self.buffer = []
- self.timer = rpki.async.timer(self.handle_timeout)
- self.restart()
-
- def restart(self):
- """
- (Re)start HTTP message parser, reset timer.
- """
- assert not self.buffer
- self.chunk_handler = None
- self.set_terminator("\r\n\r\n")
- self.update_timeout()
-
- def update_timeout(self):
- """
- Put this stream's timer in known good state: set it to the
- stream's timeout value if we're doing timeouts, otherwise clear
- it.
- """
- if self.timeout is not None:
- self.logger.debug("Setting timeout %s", self.timeout)
- self.timer.set(self.timeout)
- else:
- self.logger.debug("Clearing timeout")
- self.timer.cancel()
-
- def collect_incoming_data(self, data):
- """
- Buffer incoming data from asynchat.
- """
- self.buffer.append(data)
- self.update_timeout()
-
- def get_buffer(self):
- """
- Consume data buffered from asynchat.
- """
- val = "".join(self.buffer)
- self.buffer = []
- return val
-
- def found_terminator(self):
- """
- Asynchat reported that it found whatever terminator we set, so
- figure out what to do next. This can be messy, because we can be
- in any of several different states:
-
- @li We might be handling chunked HTTP, in which case we have to
- initialize the chunk decoder;
-
- @li We might have found the end of the message body, in which case
- we can (finally) process it; or
-
- @li We might have just gotten to the end of the message headers,
- in which case we have to parse them to figure out which of three
- separate mechanisms (chunked, content-length, TCP close) is going
- to tell us how to find the end of the message body.
- """
- self.update_timeout()
- if self.chunk_handler:
- self.chunk_handler()
- elif not isinstance(self.get_terminator(), str):
- self.handle_body()
- else:
- self.msg = self.parse_type.parse_from_wire(self.get_buffer())
- if self.msg.version == (1, 1) and "chunked" in self.msg.headers.get("Transfer-Encoding", "").lower():
- self.msg.body = []
- self.chunk_handler = self.chunk_header
- self.set_terminator("\r\n")
- elif "Content-Length" in self.msg.headers:
- self.set_terminator(int(self.msg.headers["Content-Length"]))
- else:
- self.handle_no_content_length()
-
- def chunk_header(self):
- """
- Asynchat just handed us what should be the header of one chunk of
- a chunked encoding stream. If this chunk has a body, set the
- stream up to read it; otherwise, this is the last chunk, so start
- the process of exiting the chunk decoder.
- """
- n = int(self.get_buffer().partition(";")[0], 16)
- self.logger.debug("Chunk length %s", n)
- if n:
- self.chunk_handler = self.chunk_body
- self.set_terminator(n)
- else:
- self.msg.body = "".join(self.msg.body)
- self.chunk_handler = self.chunk_discard_trailer
-
- def chunk_body(self):
- """
- Asynchat just handed us what should be the body of a chunk of the
- body of a chunked message (sic). Save it, and prepare to move on
- to the next chunk.
- """
- self.logger.debug("Chunk body")
- self.msg.body += self.buffer
- self.buffer = []
- self.chunk_handler = self.chunk_discard_crlf
- self.set_terminator("\r\n")
-
- def chunk_discard_crlf(self):
- """
- Consume the CRLF that terminates a chunk, reinitialize chunk
- decoder to be ready for the next chunk.
- """
- self.logger.debug("Chunk CRLF")
- s = self.get_buffer()
- assert s == "", "%r: Expected chunk CRLF, got '%s'" % (self, s)
- self.chunk_handler = self.chunk_header
-
- def chunk_discard_trailer(self):
- """
- Consume chunk trailer, which should be empty, then (finally!) exit
- the chunk decoder and hand complete message off to the application.
- """
- self.logger.debug("Chunk trailer")
- s = self.get_buffer()
- assert s == "", "%r: Expected end of chunk trailers, got '%s'" % (self, s)
- self.chunk_handler = None
- self.handle_message()
-
- def handle_body(self):
- """
- Hand normal (not chunked) message off to the application.
- """
- self.msg.body = self.get_buffer()
- self.handle_message()
-
- def handle_error(self):
- """
- Asynchat (or asyncore, or somebody) raised an exception. See
- whether it's one we should just pass along, otherwise log a stack
- trace and close the stream.
- """
- self.timer.cancel()
- etype = sys.exc_info()[0]
- if etype in (SystemExit, rpki.async.ExitNow):
- raise
- if etype is not rpki.exceptions.HTTPClientAborted:
- self.logger.exception("Closing due to error")
- self.close()
-
- def handle_timeout(self):
- """
- Inactivity timer expired, close connection with prejudice.
- """
- self.logger.debug("Timeout, closing")
- self.close()
-
- def handle_close(self):
- """
- Wrapper around asynchat connection close handler, so that we can
- log the event, cancel timer, and so forth.
- """
- self.logger.debug("Close event in HTTP stream handler")
- self.timer.cancel()
- asynchat.async_chat.handle_close(self)
-
-@rpki.log.class_logger(logger)
-class http_server(http_stream):
- """
- HTTP server stream.
- """
-
- ## @var parse_type
- # Stream parser should look for incoming HTTP request messages.
- parse_type = http_request
-
- ## @var timeout
- # Use the default server timeout value set in the module header.
- timeout = default_server_timeout
-
- def __init__(self, sock, handlers):
- self.handlers = handlers
- self.received_content_type = None
- http_stream.__init__(self, sock = sock)
- self.expect_close = not want_persistent_server
- self.logger.debug("Starting")
-
- def handle_no_content_length(self):
- """
- Handle an incoming message that used neither chunking nor a
- Content-Length header (that is: this message will be the last one
- in this server stream). No special action required.
- """
- self.handle_message()
-
- def find_handler(self, path):
- """
- Helper method to search self.handlers.
- """
- for h in self.handlers:
- if path.startswith(h[0]):
- return h[1], h[2] if len(h) > 2 else (default_content_type,)
- return None, None
-
- def handle_message(self):
- """
- HTTP layer managed to deliver a complete HTTP request to
- us, figure out what to do with it. Check the command and
- Content-Type, look for a handler, and if everything looks right,
- pass the message body, path, and a reply callback to the handler.
- """
- self.logger.debug("Received request %r", self.msg)
- if not self.msg.persistent:
- self.expect_close = True
- handler, allowed_content_types = self.find_handler(self.msg.path)
- self.received_content_type = self.msg.headers["Content-Type"]
- error = None
- if self.msg.cmd != "POST":
- error = 501, "No handler for method %s" % self.msg.cmd
- elif self.received_content_type not in allowed_content_types:
- error = 415, "No handler for Content-Type %s" % self.received_content_type
- elif handler is None:
- error = 404, "No handler for URL %s" % self.msg.path
- if error is None:
- try:
- handler(self.msg.body, self.msg.path, self.send_reply)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- self.logger.exception("Unhandled exception while handling HTTP request")
- self.send_error(500, reason = "Unhandled exception %s: %s" % (e.__class__.__name__, e))
- else:
- self.send_error(code = error[0], reason = error[1])
-
- def send_error(self, code, reason):
- """
- Send an error response to this request.
- """
- self.send_message(code = code, reason = reason)
-
- def send_reply(self, code, body = None, reason = "OK"):
- """
- Send a reply to this request.
- """
- self.send_message(code = code, body = body, reason = reason)
-
- def send_message(self, code, reason = "OK", body = None):
- """
- Queue up reply message. If both parties agree that connection is
- persistant, and if no error occurred, restart this stream to
- listen for next message; otherwise, queue up a close event for
- this stream so it will shut down once the reply has been sent.
- """
- self.logger.debug("Sending response %s %s", code, reason)
- if code >= 400:
- self.expect_close = True
- msg = http_response(code = code, reason = reason, body = body,
- Content_Type = self.received_content_type,
- Connection = "Close" if self.expect_close else "Keep-Alive")
- self.push(msg.format())
- if self.expect_close:
- self.logger.debug("Closing")
- self.timer.cancel()
- self.close_when_done()
- else:
- self.logger.debug("Listening for next message")
- self.restart()
-
-@rpki.log.class_logger(logger)
-class http_listener(asyncore.dispatcher):
- """
- Listener for incoming HTTP connections.
- """
-
- def __repr__(self):
- try:
- status = (addr_to_string(self.addr),)
- except TypeError:
- status = ()
- return rpki.log.log_repr(self, *status)
-
- def __init__(self, handlers, addrinfo):
- self.logger = logging.LoggerAdapter(self.logger, dict(context = self))
- asyncore.dispatcher.__init__(self)
- self.handlers = handlers
- try:
- af, socktype, proto, canonname, sockaddr = addrinfo # pylint: disable=W0612
- self.create_socket(af, socktype)
- self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- try:
- self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
- except AttributeError:
- pass
- if have_ipv6 and af == socket.AF_INET6:
- self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
- self.bind(sockaddr)
- self.listen(5)
- except Exception:
- self.logger.exception("Couldn't set up HTTP listener")
- self.close()
- for h in handlers:
- self.logger.debug("Handling %s", h[0])
-
- def handle_accept(self):
- """
- Asyncore says we have an incoming connection, spawn an http_server
- stream for it and pass along all of our handler data.
- """
- try:
- res = self.accept()
- if res is None:
- raise
- sock, addr = res # pylint: disable=W0633
- self.logger.debug("Accepting connection from %s", addr_to_string(addr))
- http_server(sock = sock, handlers = self.handlers)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception:
- self.logger.exception("Unable to accept connection")
-
- def handle_error(self):
- """
- Asyncore signaled an error, pass it along or log it.
- """
- if sys.exc_info()[0] in (SystemExit, rpki.async.ExitNow):
- raise
- self.logger.exception("Error in HTTP listener")
-
-@rpki.log.class_logger(logger)
-class http_client(http_stream):
- """
- HTTP client stream.
- """
-
- ## @var parse_type
- # Stream parser should look for incoming HTTP response messages.
- parse_type = http_response
-
- ## @var timeout
- # Use the default client timeout value set in the module header.
- timeout = default_client_timeout
-
- ## @var state
- # Application layer connection state.
- state = None
-
- def __init__(self, queue, hostport):
- http_stream.__init__(self)
- self.logger.debug("Creating new connection to %s", addr_to_string(hostport))
- self.queue = queue
- self.host = hostport[0]
- self.port = hostport[1]
- self.set_state("opening")
- self.expect_close = not want_persistent_client
-
- def start(self):
- """
- Create socket and request a connection.
- """
- if not use_adns:
- self.logger.debug("Not using ADNS")
- self.gotaddrinfo([(socket.AF_INET, self.host)])
- elif self.host == "localhost":
- self.logger.debug("Bypassing DNS for localhost")
- self.gotaddrinfo(localhost_addrinfo())
- else:
- families = supported_address_families(enable_ipv6_clients)
- self.logger.debug("Starting ADNS lookup for %s in families %r", self.host, families)
- rpki.adns.getaddrinfo(self.gotaddrinfo, self.dns_error, self.host, families)
-
- def dns_error(self, e):
- """
- Handle DNS lookup errors. For now, just whack the connection.
- Undoubtedly we should do something better with diagnostics here.
- """
- self.handle_error()
-
- def gotaddrinfo(self, addrinfo):
- """
- Got address data from DNS, create socket and request connection.
- """
- try:
- self.af, self.address = random.choice(addrinfo)
- self.logger.debug("Connecting to AF %s host %s port %s addr %s", self.af, self.host, self.port, self.address)
- self.create_socket(self.af, socket.SOCK_STREAM)
- self.connect((self.address, self.port))
- if self.addr is None:
- self.addr = (self.host, self.port)
- self.update_timeout()
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception:
- self.handle_error()
-
- def handle_connect(self):
- """
- Asyncore says socket has connected.
- """
- self.logger.debug("Socket connected")
- self.set_state("idle")
- assert self.queue.client is self
- self.queue.send_request()
-
- def set_state(self, state):
- """
- Set HTTP client connection state.
- """
- self.logger.debug("State transition %s => %s", self.state, state)
- self.state = state
-
- def handle_no_content_length(self):
- """
- Handle response message that used neither chunking nor a
- Content-Length header (that is: this message will be the last one
- in this server stream). In this case we want to read until we
- reach the end of the data stream.
- """
- self.set_terminator(None)
-
- def send_request(self, msg):
- """
- Queue up request message and kickstart connection.
- """
- self.logger.debug("Sending request %r", msg)
- assert self.state == "idle", "%r: state should be idle, is %s" % (self, self.state)
- self.set_state("request-sent")
- msg.headers["Connection"] = "Close" if self.expect_close else "Keep-Alive"
- self.push(msg.format())
- self.restart()
-
- def handle_message(self):
- """
- Handle incoming HTTP response message. Make sure we're in a state
- where we expect to see such a message (and allow the mysterious
- empty messages that Apache sends during connection close, no idea
- what that is supposed to be about). If everybody agrees that the
- connection should stay open, put it into an idle state; otherwise,
- arrange for the stream to shut down.
- """
-
- self.logger.debug("Message received, state %s", self.state)
-
- if not self.msg.persistent:
- self.expect_close = True
-
- if self.state != "request-sent":
- if self.state == "closing":
- assert not self.msg.body
- self.logger.debug("Ignoring empty response received while closing")
- return
- raise rpki.exceptions.HTTPUnexpectedState("%r received message while in unexpected state %s" % (self, self.state))
-
- if self.expect_close:
- self.logger.debug("Closing")
- self.set_state("closing")
- self.close_when_done()
- else:
- self.logger.debug("Idling")
- self.set_state("idle")
- self.update_timeout()
-
- if self.msg.code != 200:
- errmsg = "HTTP request failed"
- if self.msg.code is not None:
- errmsg += " with status %s" % self.msg.code
- if self.msg.reason:
- errmsg += ", reason %s" % self.msg.reason
- if self.msg.body:
- errmsg += ", response %s" % self.msg.body
- raise rpki.exceptions.HTTPRequestFailed(errmsg)
- self.queue.return_result(self, self.msg, detach = self.expect_close)
-
- def handle_close(self):
- """
- Asyncore signaled connection close. If we were waiting for that
- to find the end of a response message, process the resulting
- message now; if we were waiting for the response to a request we
- sent, signal the error.
- """
- http_stream.handle_close(self)
- self.logger.debug("State %s", self.state)
- if self.get_terminator() is None:
- self.handle_body()
- elif self.state == "request-sent":
- raise rpki.exceptions.HTTPClientAborted("HTTP request aborted by close event")
- else:
- self.queue.detach(self)
-
- def handle_timeout(self):
- """
- Connection idle timer has expired. Shut down connection in any
- case, noisily if we weren't idle.
- """
- bad = self.state not in ("idle", "closing")
- if bad:
- self.logger.warning("Timeout while in state %s", self.state)
- http_stream.handle_timeout(self)
- if bad:
- try:
- raise rpki.exceptions.HTTPTimeout
- except: # pylint: disable=W0702
- self.handle_error()
- else:
- self.queue.detach(self)
-
- def handle_error(self):
- """
- Asyncore says something threw an exception. Log it, then shut
- down the connection and pass back the exception.
- """
- eclass, edata = sys.exc_info()[0:2]
- self.logger.warning("Error on HTTP client connection %s:%s %s %s", self.host, self.port, eclass, edata)
- http_stream.handle_error(self)
- self.queue.return_result(self, edata, detach = True)
-
-@rpki.log.class_logger(logger)
-class http_queue(object):
- """
- Queue of pending HTTP requests for a single destination. This class
- is very tightly coupled to http_client; http_client handles the HTTP
- stream itself, this class provides a slightly higher-level API.
- """
-
- def __repr__(self):
- return rpki.log.log_repr(self, addr_to_string(self.hostport))
-
- def __init__(self, hostport):
- self.logger = logging.LoggerAdapter(self.logger, dict(context = self))
- self.hostport = hostport
- self.client = None
- self.logger.debug("Created")
- self.queue = []
-
- def request(self, *requests):
- """
- Append http_request object(s) to this queue.
- """
- self.logger.debug("Adding requests %r", requests)
- self.queue.extend(requests)
-
- def restart(self):
- """
- Send next request for this queue, if we can. This may involve
- starting a new http_client stream, reusing an existing idle
- stream, or just ignoring this request if there's an active client
- stream already; in the last case, handling of the response (or
- exception, or timeout) for the query currently in progress will
- call this method when it's time to kick out the next query.
- """
- try:
- if self.client is None:
- self.client = http_client(self, self.hostport)
- self.logger.debug("Attached client %r", self.client)
- self.client.start()
- elif self.client.state == "idle":
- self.logger.debug("Sending request to existing client %r", self.client)
- self.send_request()
- else:
- self.logger.debug("Client %r exists in state %r", self.client, self.client.state)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- self.return_result(self.client, e, detach = True)
-
- def send_request(self):
- """
- Kick out the next query in this queue, if any.
- """
- if self.queue:
- self.client.send_request(self.queue[0])
-
- def detach(self, client_):
- """
- Detatch a client from this queue. Silently ignores attempting to
- detach a client that is not attached to this queue, to simplify
- handling of what otherwise would be a nasty set of race
- conditions.
- """
- if client_ is self.client:
- self.logger.debug("Detaching client %r", client_)
- self.client = None
-
- def return_result(self, client, result, detach = False): # pylint: disable=W0621
- """
- Client stream has returned a result, which we need to pass along
- to the original caller. Result may be either an HTTP response
- message or an exception. In either case, once we're done
- processing this result, kick off next message in the queue, if any.
- """
-
- if client is not self.client:
- self.logger.warning("Wrong client trying to return result. THIS SHOULD NOT HAPPEN. Dropping result %r", result)
- return
-
- if detach:
- self.detach(client)
-
- try:
- req = self.queue.pop(0)
- self.logger.debug("Dequeuing request %r", req)
- except IndexError:
- self.logger.warning("No caller. THIS SHOULD NOT HAPPEN. Dropping result %r", result)
- return
-
- assert isinstance(result, http_response) or isinstance(result, Exception)
-
- if isinstance(result, http_response):
- try:
- self.logger.debug("Returning result %r to caller", result)
- req.callback(result.body)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- result = e
-
- if isinstance(result, Exception):
- try:
- self.logger.warning("Returning exception %r to caller: %s", result, result)
- req.errback(result)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception:
- self.logger.exception("Exception in exception callback, may have lost event chain")
-
- self.logger.debug("Queue: %r", self.queue)
-
- if self.queue:
- self.restart()
-
-## @var client_queues
-# Map of (host, port) tuples to http_queue objects.
-client_queues = {}
-
-def client(msg, url, callback, errback, content_type = default_content_type):
- """
- Open client HTTP connection, send a message, set up callbacks to
- handle response.
- """
-
- u = urlparse.urlparse(url)
-
- if (u.scheme not in ("", "http") or
- u.username is not None or
- u.password is not None or
- u.params != "" or
- u.query != "" or
- u.fragment != ""):
- raise rpki.exceptions.BadClientURL("Unusable URL %s" % url)
-
- logger.debug("Contacting %s", url)
-
- request = http_request(
- cmd = "POST",
- path = u.path,
- body = msg,
- callback = callback,
- errback = errback,
- Host = u.hostname,
- Content_Type = content_type)
-
- hostport = (u.hostname or "localhost", u.port or default_tcp_port)
-
- logger.debug("Created request %r for %s", request, addr_to_string(hostport))
- if hostport not in client_queues:
- client_queues[hostport] = http_queue(hostport)
- client_queues[hostport].request(request)
-
- # Defer connection attempt until after we've had time to process any
- # pending I/O events, in case connections have closed.
-
- logger.debug("Scheduling connection startup for %r", request)
- rpki.async.event_defer(client_queues[hostport].restart)
-
-def server(handlers, port, host = ""):
- """
- Run an HTTP server and wait (forever) for connections.
- """
-
- if not isinstance(handlers, (tuple, list)):
- handlers = (("/", handlers),)
-
- # Yes, this is sick. So is getaddrinfo() returning duplicate
- # records, which RedHat has the gall to claim is a feature.
- ai = []
- for af in supported_address_families(enable_ipv6_servers):
- try:
- if host:
- h = host
- elif have_ipv6 and af == socket.AF_INET6:
- h = "::"
- else:
- h = "0.0.0.0"
- for a in socket.getaddrinfo(h, port, af, socket.SOCK_STREAM):
- if a not in ai:
- ai.append(a)
- except socket.gaierror:
- pass
-
- for a in ai:
- http_listener(addrinfo = a, handlers = handlers)
-
- rpki.async.event_loop()
-
-class caller(object):
- """
- Handle client-side mechanics for protocols based on HTTP, CMS, and
- rpki.xml_utils. Calling sequence is intended to nest within
- rpki.async.sync_wrapper.
- """
-
- debug = False
-
- def __init__(self, proto, client_key, client_cert, server_ta, server_cert, url, debug = None):
- self.proto = proto
- self.client_key = client_key
- self.client_cert = client_cert
- self.server_ta = server_ta
- self.server_cert = server_cert
- self.url = url
- self.cms_timestamp = None
- if debug is not None:
- self.debug = debug
-
- def __call__(self, cb, eb, *pdus):
-
- def done(r_der):
- """
- Handle CMS-wrapped XML response message.
- """
- try:
- r_cms = self.proto.cms_msg(DER = r_der)
- r_msg = r_cms.unwrap((self.server_ta, self.server_cert))
- self.cms_timestamp = r_cms.check_replay(self.cms_timestamp, self.url)
- if self.debug:
- print "<!-- Reply -->"
- print r_cms.pretty_print_content()
- cb(r_msg)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- eb(e)
-
- q_msg = self.proto.msg.query(*pdus)
- q_cms = self.proto.cms_msg()
- q_der = q_cms.wrap(q_msg, self.client_key, self.client_cert)
- if self.debug:
- print "<!-- Query -->"
- print q_cms.pretty_print_content()
-
- client(url = self.url, msg = q_der, callback = done, errback = eb)
diff --git a/rpki/http_simple.py b/rpki/http_simple.py
new file mode 100644
index 00000000..86b2eb5a
--- /dev/null
+++ b/rpki/http_simple.py
@@ -0,0 +1,138 @@
+# $Id$
+#
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+HTTP using Python standard libraries, for RPKI programs that don't
+need the full-blown rpki.http asynchronous code.
+"""
+
+import logging
+import httplib
+import urlparse
+import BaseHTTPServer
+
+logger = logging.getLogger(__name__)
+
+
+default_content_type = "application/x-rpki"
+
+
+class HTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+ """
+ HTTP request handler simple RPKI servers.
+ """
+
+ rpki_handlers = ()
+
+ def do_POST(self):
+ try:
+ content_type = self.headers.get("Content-Type")
+ content_length = self.headers.get("Content-Length")
+ for handler_path, handler, handler_content_type in self.rpki_handlers:
+ if self.path.startswith(handler_path) and content_type in handler_content_type:
+ return handler(self,
+ self.rfile.read()
+ if content_length is None else
+ self.rfile.read(int(content_length)))
+ self.send_error(404, "No handler for path %s" % self.path)
+ except Exception, e:
+ logger.exception("Unhandled exception")
+ self.send_error(501, "Unhandled exception %s" % e)
+
+ def send_cms_response(self, der):
+ self.send_response(200)
+ self.send_header("Content-Type", default_content_type)
+ self.send_header("Content-Length", str(len(der)))
+ self.end_headers()
+ self.wfile.write(der)
+
+ def log_message(self, *args):
+ logger.info(*args, extra = dict(context = "%s:%s" % self.client_address))
+
+ def send_error(self, code, message = None):
+ # BaseHTTPRequestHandler.send_error() generates HTML error messages,
+ # which we don't want, so we override the method to suppress this.
+ self.send_response(code, message)
+ self.send_header("Content-Type", default_content_type)
+ self.send_header("Connection", "close")
+ self.end_headers()
+
+
+def server(handlers, port, host = ""):
+ """
+ Run an HTTP server and wait (forever) for connections.
+ """
+
+ if isinstance(handlers, (tuple, list)):
+ handlers = tuple(h[:3] if len(h) > 2 else (h[0], h[1], default_content_type)
+ for h in handlers)
+ else:
+ handlers = (("/", handlers, default_content_type),)
+
+ class RequestHandler(HTTPRequestHandler):
+ rpki_handlers = handlers
+
+ BaseHTTPServer.HTTPServer((host, port), RequestHandler).serve_forever()
+
+
+class BadURL(Exception):
+ "Bad contact URL"
+
+class RequestFailed(Exception):
+ "HTTP returned failure"
+
+class BadContentType(Exception):
+ "Wrong HTTP Content-Type"
+
+
+def client(proto_cms_msg, client_key, client_cert, server_ta, server_cert, url, q_msg,
+ debug = None, replay_track = None, client_crl = None, content_type = default_content_type):
+ """
+ Issue single a query and return the response, handling all the CMS and XML goo.
+ """
+
+ u = urlparse.urlparse(url)
+
+ if u.scheme not in ("", "http") or u.username or u.password or u.params or u.query or u.fragment:
+ raise BadURL("Unusable URL %s", url)
+
+ q_cms = proto_cms_msg()
+ q_der = q_cms.wrap(q_msg, client_key, client_cert, client_crl)
+
+ if debug is not None:
+ debug.write("<!-- Query -->\n" + q_cms.pretty_print_content() + "\n")
+
+ http = httplib.HTTPConnection(u.hostname, u.port or httplib.HTTP_PORT)
+ http.request("POST", u.path, q_der, {"Content-Type" : content_type})
+ r = http.getresponse()
+
+ if r.status != 200:
+ raise RequestFailed("HTTP request failed with status %r reason %r" % (r.status, r.reason))
+
+ if r.getheader("Content-Type") != content_type:
+ raise BadContentType("HTTP Content-Type %r, expected %r" % (r.getheader("Content-Type"), content_type))
+
+ r_der = r.read()
+ r_cms = proto_cms_msg(DER = r_der)
+ r_msg = r_cms.unwrap((server_ta, server_cert))
+
+ if replay_track is not None:
+ replay_track.cms_timestamp = r_cms.check_replay(replay_track.cms_timestamp, url)
+
+ if debug is not None:
+ debug.write("<!-- Reply -->\n" + r_cms.pretty_print_content() + "\n")
+
+ return r_msg
diff --git a/rpki/ipaddrs.py b/rpki/ipaddrs.py
index 68b2d27d..5117585c 100644
--- a/rpki/ipaddrs.py
+++ b/rpki/ipaddrs.py
@@ -48,90 +48,99 @@ once, here, thus avoiding a lot of duplicate code elsewhere.
import socket, struct
class v4addr(long):
- """
- IPv4 address.
+ """
+ IPv4 address.
- Derived from long, but supports IPv4 print syntax.
- """
+ Derived from long, but supports IPv4 print syntax.
+ """
- bits = 32
- ipversion = 4
+ bits = 32
+ ipversion = 4
- def __new__(cls, x):
- """
- Construct a v4addr object.
- """
- if isinstance(x, unicode):
- x = x.encode("ascii")
- if isinstance(x, str):
- return cls.from_bytes(socket.inet_pton(socket.AF_INET, ".".join(str(int(i)) for i in x.split("."))))
- else:
- return long.__new__(cls, x)
-
- def to_bytes(self):
- """
- Convert a v4addr object to a raw byte string.
- """
- return struct.pack("!I", long(self))
+ def __new__(cls, x):
+ """
+ Construct a v4addr object.
+ """
- @classmethod
- def from_bytes(cls, x):
- """
- Convert from a raw byte string to a v4addr object.
- """
- return cls(struct.unpack("!I", x)[0])
+ if isinstance(x, unicode):
+ x = x.encode("ascii")
+ if isinstance(x, str):
+ return cls.from_bytes(socket.inet_pton(socket.AF_INET, ".".join(str(int(i)) for i in x.split("."))))
+ else:
+ return long.__new__(cls, x)
- def __str__(self):
- """
- Convert a v4addr object to string format.
- """
- return socket.inet_ntop(socket.AF_INET, self.to_bytes())
+ def to_bytes(self):
+ """
+ Convert a v4addr object to a raw byte string.
+ """
-class v6addr(long):
- """
- IPv6 address.
+ return struct.pack("!I", long(self))
- Derived from long, but supports IPv6 print syntax.
- """
+ @classmethod
+ def from_bytes(cls, x):
+ """
+ Convert from a raw byte string to a v4addr object.
+ """
- bits = 128
- ipversion = 6
+ return cls(struct.unpack("!I", x)[0])
- def __new__(cls, x):
- """
- Construct a v6addr object.
- """
- if isinstance(x, unicode):
- x = x.encode("ascii")
- if isinstance(x, str):
- return cls.from_bytes(socket.inet_pton(socket.AF_INET6, x))
- else:
- return long.__new__(cls, x)
-
- def to_bytes(self):
- """
- Convert a v6addr object to a raw byte string.
- """
- return struct.pack("!QQ", long(self) >> 64, long(self) & 0xFFFFFFFFFFFFFFFF)
+ def __str__(self):
+ """
+ Convert a v4addr object to string format.
+ """
- @classmethod
- def from_bytes(cls, x):
+ return socket.inet_ntop(socket.AF_INET, self.to_bytes())
+
+class v6addr(long):
"""
- Convert from a raw byte string to a v6addr object.
+ IPv6 address.
+
+ Derived from long, but supports IPv6 print syntax.
"""
- x = struct.unpack("!QQ", x)
- return cls((x[0] << 64) | x[1])
- def __str__(self):
+ bits = 128
+ ipversion = 6
+
+ def __new__(cls, x):
+ """
+ Construct a v6addr object.
+ """
+
+ if isinstance(x, unicode):
+ x = x.encode("ascii")
+ if isinstance(x, str):
+ return cls.from_bytes(socket.inet_pton(socket.AF_INET6, x))
+ else:
+ return long.__new__(cls, x)
+
+ def to_bytes(self):
+ """
+ Convert a v6addr object to a raw byte string.
+ """
+
+ return struct.pack("!QQ", long(self) >> 64, long(self) & 0xFFFFFFFFFFFFFFFF)
+
+ @classmethod
+ def from_bytes(cls, x):
+ """
+ Convert from a raw byte string to a v6addr object.
+ """
+
+ x = struct.unpack("!QQ", x)
+ return cls((x[0] << 64) | x[1])
+
+ def __str__(self):
+ """
+ Convert a v6addr object to string format.
+ """
+
+ return socket.inet_ntop(socket.AF_INET6, self.to_bytes())
+
+def parse(s):
"""
- Convert a v6addr object to string format.
+ Parse a string as either an IPv4 or IPv6 address, and return object of appropriate class.
"""
- return socket.inet_ntop(socket.AF_INET6, self.to_bytes())
-def parse(s):
- """
- Parse a string as either an IPv4 or IPv6 address, and return object of appropriate class.
- """
- if isinstance(s, unicode):
- s = s.encode("ascii")
- return v6addr(s) if ":" in s else v4addr(s)
+ if isinstance(s, unicode):
+ s = s.encode("ascii")
+ return v6addr(s) if ":" in s else v4addr(s)
diff --git a/rpki/irdb/__init__.py b/rpki/irdb/__init__.py
index 7f3b880e..64b0ea28 100644
--- a/rpki/irdb/__init__.py
+++ b/rpki/irdb/__init__.py
@@ -19,8 +19,5 @@ Django really wants its models packaged up in a "models" module within a
Python package, so humor it.
"""
-# pylint: disable=W0401
-
-from rpki.irdb.models import *
from rpki.irdb.zookeeper import Zookeeper
from rpki.irdb.router import DBContextRouter, database
diff --git a/rpki/irdb/migrations/0001_initial.py b/rpki/irdb/migrations/0001_initial.py
new file mode 100644
index 00000000..f2d34d8b
--- /dev/null
+++ b/rpki/irdb/migrations/0001_initial.py
@@ -0,0 +1,362 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+import rpki.irdb.models
+import rpki.fields
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='BSC',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('certificate', rpki.fields.CertificateField()),
+ ('handle', rpki.irdb.models.HandleField(max_length=120)),
+ ('pkcs10', rpki.fields.PKCS10Field()),
+ ],
+ options={
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='Child',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('certificate', rpki.fields.CertificateField()),
+ ('handle', rpki.irdb.models.HandleField(max_length=120)),
+ ('ta', rpki.fields.CertificateField()),
+ ('valid_until', rpki.fields.SundialField()),
+ ('name', models.TextField(null=True, blank=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='ChildASN',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('start_as', models.BigIntegerField()),
+ ('end_as', models.BigIntegerField()),
+ ('child', models.ForeignKey(related_name='asns', to='irdb.Child')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='ChildNet',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('start_ip', models.CharField(max_length=40)),
+ ('end_ip', models.CharField(max_length=40)),
+ ('version', rpki.fields.EnumField(choices=[(4, b'IPv4'), (6, b'IPv6')])),
+ ('child', models.ForeignKey(related_name='address_ranges', to='irdb.Child')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Client',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('certificate', rpki.fields.CertificateField()),
+ ('handle', rpki.irdb.models.HandleField(max_length=120)),
+ ('ta', rpki.fields.CertificateField()),
+ ('sia_base', models.TextField()),
+ ],
+ ),
+ migrations.CreateModel(
+ name='EECertificateRequest',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('valid_until', rpki.fields.SundialField()),
+ ('pkcs10', rpki.fields.PKCS10Field()),
+ ('gski', models.CharField(max_length=27)),
+ ('cn', models.CharField(max_length=64)),
+ ('sn', models.CharField(max_length=64)),
+ ('eku', models.TextField(null=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='EECertificateRequestASN',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('start_as', models.BigIntegerField()),
+ ('end_as', models.BigIntegerField()),
+ ('ee_certificate_request', models.ForeignKey(related_name='asns', to='irdb.EECertificateRequest')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='EECertificateRequestNet',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('start_ip', models.CharField(max_length=40)),
+ ('end_ip', models.CharField(max_length=40)),
+ ('version', rpki.fields.EnumField(choices=[(4, b'IPv4'), (6, b'IPv6')])),
+ ('ee_certificate_request', models.ForeignKey(related_name='address_ranges', to='irdb.EECertificateRequest')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='GhostbusterRequest',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('vcard', models.TextField()),
+ ],
+ ),
+ migrations.CreateModel(
+ name='HostedCA',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('certificate', rpki.fields.CertificateField()),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Parent',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('certificate', rpki.fields.CertificateField()),
+ ('handle', rpki.irdb.models.HandleField(max_length=120)),
+ ('ta', rpki.fields.CertificateField()),
+ ('service_uri', models.CharField(max_length=255)),
+ ('parent_handle', rpki.irdb.models.HandleField(max_length=120)),
+ ('child_handle', rpki.irdb.models.HandleField(max_length=120)),
+ ('repository_type', rpki.fields.EnumField(choices=[(1, b'none'), (2, b'offer'), (3, b'referral')])),
+ ('referrer', rpki.irdb.models.HandleField(max_length=120, null=True, blank=True)),
+ ('referral_authorization', rpki.irdb.models.SignedReferralField(null=True)),
+ ('asn_resources', models.TextField(blank=True)),
+ ('ipv4_resources', models.TextField(blank=True)),
+ ('ipv6_resources', models.TextField(blank=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Referral',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('certificate', rpki.fields.CertificateField()),
+ ('private_key', rpki.fields.RSAPrivateKeyField()),
+ ],
+ options={
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='Repository',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('certificate', rpki.fields.CertificateField()),
+ ('handle', rpki.irdb.models.HandleField(max_length=120)),
+ ('ta', rpki.fields.CertificateField()),
+ ('client_handle', rpki.irdb.models.HandleField(max_length=120)),
+ ('service_uri', models.CharField(max_length=255)),
+ ('sia_base', models.TextField()),
+ ('rrdp_notification_uri', models.TextField(null=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='ResourceHolderCA',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('certificate', rpki.fields.CertificateField()),
+ ('private_key', rpki.fields.RSAPrivateKeyField()),
+ ('latest_crl', rpki.fields.CRLField()),
+ ('next_serial', models.BigIntegerField(default=1)),
+ ('next_crl_number', models.BigIntegerField(default=1)),
+ ('last_crl_update', rpki.fields.SundialField()),
+ ('next_crl_update', rpki.fields.SundialField()),
+ ('handle', rpki.irdb.models.HandleField(unique=True, max_length=120)),
+ ],
+ options={
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='ResourceHolderRevocation',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('serial', models.BigIntegerField()),
+ ('revoked', rpki.fields.SundialField()),
+ ('expires', rpki.fields.SundialField()),
+ ('issuer', models.ForeignKey(related_name='revocations', to='irdb.ResourceHolderCA')),
+ ],
+ options={
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='ROARequest',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('asn', models.BigIntegerField()),
+ ('issuer', models.ForeignKey(related_name='roa_requests', to='irdb.ResourceHolderCA')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='ROARequestPrefix',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('version', rpki.fields.EnumField(choices=[(4, b'IPv4'), (6, b'IPv6')])),
+ ('prefix', models.CharField(max_length=40)),
+ ('prefixlen', models.PositiveSmallIntegerField()),
+ ('max_prefixlen', models.PositiveSmallIntegerField()),
+ ('roa_request', models.ForeignKey(related_name='prefixes', to='irdb.ROARequest')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='ServerCA',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('certificate', rpki.fields.CertificateField()),
+ ('private_key', rpki.fields.RSAPrivateKeyField()),
+ ('latest_crl', rpki.fields.CRLField()),
+ ('next_serial', models.BigIntegerField(default=1)),
+ ('next_crl_number', models.BigIntegerField(default=1)),
+ ('last_crl_update', rpki.fields.SundialField()),
+ ('next_crl_update', rpki.fields.SundialField()),
+ ],
+ options={
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='ServerEE',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('certificate', rpki.fields.CertificateField()),
+ ('private_key', rpki.fields.RSAPrivateKeyField()),
+ ('purpose', rpki.fields.EnumField(choices=[(1, b'rpkid'), (2, b'pubd'), (3, b'irdbd'), (4, b'irbe')])),
+ ('issuer', models.ForeignKey(related_name='ee_certificates', to='irdb.ServerCA')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='ServerRevocation',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('serial', models.BigIntegerField()),
+ ('revoked', rpki.fields.SundialField()),
+ ('expires', rpki.fields.SundialField()),
+ ('issuer', models.ForeignKey(related_name='revocations', to='irdb.ServerCA')),
+ ],
+ options={
+ 'abstract': False,
+ },
+ ),
+ migrations.AddField(
+ model_name='repository',
+ name='issuer',
+ field=models.ForeignKey(related_name='repositories', to='irdb.ResourceHolderCA'),
+ ),
+ migrations.AddField(
+ model_name='repository',
+ name='parent',
+ field=models.OneToOneField(related_name='repository', to='irdb.Parent'),
+ ),
+ migrations.AddField(
+ model_name='referral',
+ name='issuer',
+ field=models.OneToOneField(related_name='referral_certificate', to='irdb.ResourceHolderCA'),
+ ),
+ migrations.AddField(
+ model_name='parent',
+ name='issuer',
+ field=models.ForeignKey(related_name='parents', to='irdb.ResourceHolderCA'),
+ ),
+ migrations.AddField(
+ model_name='hostedca',
+ name='hosted',
+ field=models.OneToOneField(related_name='hosted_by', to='irdb.ResourceHolderCA'),
+ ),
+ migrations.AddField(
+ model_name='hostedca',
+ name='issuer',
+ field=models.ForeignKey(to='irdb.ServerCA'),
+ ),
+ migrations.AddField(
+ model_name='ghostbusterrequest',
+ name='issuer',
+ field=models.ForeignKey(related_name='ghostbuster_requests', to='irdb.ResourceHolderCA'),
+ ),
+ migrations.AddField(
+ model_name='ghostbusterrequest',
+ name='parent',
+ field=models.ForeignKey(related_name='ghostbuster_requests', to='irdb.Parent', null=True),
+ ),
+ migrations.AddField(
+ model_name='eecertificaterequest',
+ name='issuer',
+ field=models.ForeignKey(related_name='ee_certificate_requests', to='irdb.ResourceHolderCA'),
+ ),
+ migrations.AddField(
+ model_name='client',
+ name='issuer',
+ field=models.ForeignKey(related_name='clients', to='irdb.ServerCA'),
+ ),
+ migrations.AddField(
+ model_name='child',
+ name='issuer',
+ field=models.ForeignKey(related_name='children', to='irdb.ResourceHolderCA'),
+ ),
+ migrations.AddField(
+ model_name='bsc',
+ name='issuer',
+ field=models.ForeignKey(related_name='bscs', to='irdb.ResourceHolderCA'),
+ ),
+ migrations.AlterUniqueTogether(
+ name='serverrevocation',
+ unique_together=set([('issuer', 'serial')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='serveree',
+ unique_together=set([('issuer', 'purpose')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='roarequestprefix',
+ unique_together=set([('roa_request', 'version', 'prefix', 'prefixlen', 'max_prefixlen')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='resourceholderrevocation',
+ unique_together=set([('issuer', 'serial')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='repository',
+ unique_together=set([('issuer', 'handle')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='parent',
+ unique_together=set([('issuer', 'handle')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='hostedca',
+ unique_together=set([('issuer', 'hosted')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='eecertificaterequestnet',
+ unique_together=set([('ee_certificate_request', 'start_ip', 'end_ip', 'version')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='eecertificaterequestasn',
+ unique_together=set([('ee_certificate_request', 'start_as', 'end_as')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='eecertificaterequest',
+ unique_together=set([('issuer', 'gski')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='client',
+ unique_together=set([('issuer', 'handle')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='childnet',
+ unique_together=set([('child', 'start_ip', 'end_ip', 'version')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='childasn',
+ unique_together=set([('child', 'start_as', 'end_as')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='child',
+ unique_together=set([('issuer', 'handle')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='bsc',
+ unique_together=set([('issuer', 'handle')]),
+ ),
+ ]
diff --git a/rpki/irdb/migrations/__init__.py b/rpki/irdb/migrations/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rpki/irdb/migrations/__init__.py
diff --git a/rpki/irdb/models.py b/rpki/irdb/models.py
index 6fa48c59..55e3012f 100644
--- a/rpki/irdb/models.py
+++ b/rpki/irdb/models.py
@@ -24,7 +24,7 @@ to be usable by command line programs and other scripts, not just
Django GUI code, so be careful.
"""
-# pylint: disable=W0232,C1001
+# pylint: disable=W5101,W5103
import django.db.models
import rpki.x509
@@ -32,7 +32,8 @@ import rpki.sundial
import rpki.resource_set
import socket
import rpki.POW
-from south.modelsinspector import add_introspection_rules
+
+from rpki.fields import EnumField, SundialField, CertificateField, DERField, RSAPrivateKeyField, CRLField, PKCS10Field
## @var ip_version_choices
# Choice argument for fields implementing IP version numbers.
@@ -61,586 +62,491 @@ ee_certificate_lifetime = rpki.sundial.timedelta(days = 60)
###
-# Field types
+# Field classes
class HandleField(django.db.models.CharField):
- """
- A handle field type.
- """
-
- description = 'A "handle" in one of the RPKI protocols'
-
- def __init__(self, *args, **kwargs):
- kwargs["max_length"] = 120
- django.db.models.CharField.__init__(self, *args, **kwargs)
-
-class EnumField(django.db.models.PositiveSmallIntegerField):
- """
- An enumeration type that uses strings in Python and small integers
- in SQL.
- """
-
- description = "An enumeration type"
-
- __metaclass__ = django.db.models.SubfieldBase
-
- def __init__(self, *args, **kwargs):
- if isinstance(kwargs.get("choices"), (tuple, list)) and isinstance(kwargs["choices"][0], str):
- kwargs["choices"] = tuple(enumerate(kwargs["choices"], 1))
- django.db.models.PositiveSmallIntegerField.__init__(self, *args, **kwargs)
- self.enum_i2s = dict(self.flatchoices)
- self.enum_s2i = dict((v, k) for k, v in self.flatchoices)
-
- def to_python(self, value):
- return self.enum_i2s.get(value, value)
-
- def get_prep_value(self, value):
- return self.enum_s2i.get(value, value)
-
-class SundialField(django.db.models.DateTimeField):
- """
- A field type for our customized datetime objects.
- """
- __metaclass__ = django.db.models.SubfieldBase
-
- description = "A datetime type using our customized datetime objects"
-
- def to_python(self, value):
- if isinstance(value, rpki.sundial.pydatetime.datetime):
- return rpki.sundial.datetime.from_datetime(
- django.db.models.DateTimeField.to_python(self, value))
- else:
- return value
-
- def get_prep_value(self, value):
- if isinstance(value, rpki.sundial.datetime):
- return value.to_datetime()
- else:
- return value
-
-
-class DERField(django.db.models.Field):
- """
- Field types for DER objects.
- """
-
- __metaclass__ = django.db.models.SubfieldBase
-
- def __init__(self, *args, **kwargs):
- kwargs["serialize"] = False
- kwargs["blank"] = True
- kwargs["default"] = None
- django.db.models.Field.__init__(self, *args, **kwargs)
-
- def db_type(self, connection):
- if connection.settings_dict['ENGINE'] == "django.db.backends.posgresql":
- return "bytea"
- else:
- return "BLOB"
-
- def to_python(self, value):
- assert value is None or isinstance(value, (self.rpki_type, str))
- if isinstance(value, str):
- return self.rpki_type(DER = value)
- else:
- return value
-
- def get_prep_value(self, value):
- assert value is None or isinstance(value, (self.rpki_type, str))
- if isinstance(value, self.rpki_type):
- return value.get_DER()
- else:
- return value
-
-class CertificateField(DERField):
- description = "X.509 certificate"
- rpki_type = rpki.x509.X509
-
-class RSAKeyField(DERField):
- description = "RSA keypair"
- rpki_type = rpki.x509.RSA
-
-class CRLField(DERField):
- description = "Certificate Revocation List"
- rpki_type = rpki.x509.CRL
-
-class PKCS10Field(DERField):
- description = "PKCS #10 certificate request"
- rpki_type = rpki.x509.PKCS10
+ """
+ A handle field class. Replace this with SlugField?
+ """
+
+ description = 'A "handle" in one of the RPKI protocols'
+
+ def __init__(self, *args, **kwargs):
+ kwargs["max_length"] = 120
+ django.db.models.CharField.__init__(self, *args, **kwargs)
+
class SignedReferralField(DERField):
- description = "CMS signed object containing XML"
- rpki_type = rpki.x509.SignedReferral
+ description = "CMS signed object containing XML"
+ rpki_type = rpki.x509.SignedReferral
# Custom managers
class CertificateManager(django.db.models.Manager):
- def get_or_certify(self, **kwargs):
- """
- Sort of like .get_or_create(), but for models containing
- certificates which need to be generated based on other fields.
-
- Takes keyword arguments like .get(), checks for existing object.
- If none, creates a new one; if found an existing object but some
- of the non-key fields don't match, updates the existing object.
- Runs certification method for new or updated objects. Returns a
- tuple consisting of the object and a boolean indicating whether
- anything has changed.
- """
+ def get_or_certify(self, **kwargs):
+ """
+ Sort of like .get_or_create(), but for models containing
+ certificates which need to be generated based on other fields.
+
+ Takes keyword arguments like .get(), checks for existing object.
+ If none, creates a new one; if found an existing object but some
+ of the non-key fields don't match, updates the existing object.
+ Runs certification method for new or updated objects. Returns a
+ tuple consisting of the object and a boolean indicating whether
+ anything has changed.
+ """
- changed = False
+ # pylint: disable=E1101
- try:
- obj = self.get(**self._get_or_certify_keys(kwargs))
+ changed = False
- except self.model.DoesNotExist:
- obj = self.model(**kwargs)
- changed = True
+ try:
+ obj = self.get(**self._get_or_certify_keys(kwargs))
- else:
- for k in kwargs:
- if getattr(obj, k) != kwargs[k]:
- setattr(obj, k, kwargs[k])
- changed = True
+ except self.model.DoesNotExist:
+ obj = self.model(**kwargs)
+ changed = True
- if changed:
- obj.avow()
- obj.save()
+ else:
+ for k in kwargs:
+ if getattr(obj, k) != kwargs[k]:
+ setattr(obj, k, kwargs[k])
+ changed = True
- return obj, changed
+ if changed:
+ obj.avow()
+ obj.save()
- def _get_or_certify_keys(self, kwargs):
- assert len(self.model._meta.unique_together) == 1
- return dict((k, kwargs[k]) for k in self.model._meta.unique_together[0])
+ return obj, changed
+
+ def _get_or_certify_keys(self, kwargs):
+ # pylint: disable=E1101,W0212
+ assert len(self.model._meta.unique_together) == 1
+ return dict((k, kwargs[k]) for k in self.model._meta.unique_together[0])
class ResourceHolderCAManager(CertificateManager):
- def _get_or_certify_keys(self, kwargs):
- return { "handle" : kwargs["handle"] }
+ def _get_or_certify_keys(self, kwargs):
+ return { "handle" : kwargs["handle"] }
class ServerCAManager(CertificateManager):
- def _get_or_certify_keys(self, kwargs):
- return { "pk" : 1 }
+ def _get_or_certify_keys(self, kwargs):
+ return { "pk" : 1 }
class ResourceHolderEEManager(CertificateManager):
- def _get_or_certify_keys(self, kwargs):
- return { "issuer" : kwargs["issuer"] }
+ def _get_or_certify_keys(self, kwargs):
+ return { "issuer" : kwargs["issuer"] }
###
class CA(django.db.models.Model):
- certificate = CertificateField()
- private_key = RSAKeyField()
- latest_crl = CRLField()
-
- # Might want to bring these into line with what rpkid does. Current
- # variables here were chosen to map easily to what OpenSSL command
- # line tool was keeping on disk.
-
- next_serial = django.db.models.BigIntegerField(default = 1)
- next_crl_number = django.db.models.BigIntegerField(default = 1)
- last_crl_update = SundialField()
- next_crl_update = SundialField()
-
- class Meta:
- abstract = True
-
- def avow(self):
- if self.private_key is None:
- self.private_key = rpki.x509.RSA.generate(quiet = True)
- now = rpki.sundial.now()
- notAfter = now + ca_certificate_lifetime
- self.certificate = rpki.x509.X509.bpki_self_certify(
- keypair = self.private_key,
- subject_name = self.subject_name,
- serial = self.next_serial,
- now = now,
- notAfter = notAfter)
- self.next_serial += 1
- self.generate_crl()
- return self.certificate
-
- def certify(self, subject_name, subject_key, validity_interval, is_ca, pathLenConstraint = None):
- now = rpki.sundial.now()
- notAfter = now + validity_interval
- result = self.certificate.bpki_certify(
- keypair = self.private_key,
- subject_name = subject_name,
- subject_key = subject_key,
- serial = self.next_serial,
- now = now,
- notAfter = notAfter,
- is_ca = is_ca,
- pathLenConstraint = pathLenConstraint)
- self.next_serial += 1
- return result
-
- def revoke(self, cert):
- Revocation.objects.create(
- issuer = self,
- revoked = rpki.sundial.now(),
- serial = cert.certificate.getSerial(),
- expires = cert.certificate.getNotAfter() + crl_interval)
- cert.delete()
- self.generate_crl()
-
- def generate_crl(self):
- now = rpki.sundial.now()
- self.revocations.filter(expires__lt = now).delete()
- revoked = [(r.serial, r.revoked) for r in self.revocations.all()]
- self.latest_crl = rpki.x509.CRL.generate(
- keypair = self.private_key,
- issuer = self.certificate,
- serial = self.next_crl_number,
- thisUpdate = now,
- nextUpdate = now + crl_interval,
- revokedCertificates = revoked)
- self.last_crl_update = now
- self.next_crl_update = now + crl_interval
- self.next_crl_number += 1
+ certificate = CertificateField()
+ private_key = RSAPrivateKeyField()
+ latest_crl = CRLField()
+
+ # Might want to bring these into line with what rpkid does. Current
+ # variables here were chosen to map easily to what OpenSSL command
+ # line tool was keeping on disk.
+
+ next_serial = django.db.models.BigIntegerField(default = 1)
+ next_crl_number = django.db.models.BigIntegerField(default = 1)
+ last_crl_update = SundialField()
+ next_crl_update = SundialField()
+
+ class Meta:
+ abstract = True
+
+ @property
+ def subject_name(self):
+ raise NotImplementedError
+
+ def avow(self):
+ if self.private_key is None:
+ self.private_key = rpki.x509.RSA.generate(quiet = True)
+ now = rpki.sundial.now()
+ notAfter = now + ca_certificate_lifetime
+ self.certificate = rpki.x509.X509.bpki_self_certify(
+ keypair = self.private_key,
+ subject_name = self.subject_name,
+ serial = self.next_serial,
+ now = now,
+ notAfter = notAfter)
+ self.next_serial += 1
+ self.generate_crl()
+ return self.certificate
+
+ def certify(self, subject_name, subject_key, validity_interval, is_ca, pathLenConstraint = None):
+ now = rpki.sundial.now()
+ notAfter = now + validity_interval
+ result = self.certificate.bpki_certify(
+ keypair = self.private_key,
+ subject_name = subject_name,
+ subject_key = subject_key,
+ serial = self.next_serial,
+ now = now,
+ notAfter = notAfter,
+ is_ca = is_ca,
+ pathLenConstraint = pathLenConstraint)
+ self.next_serial += 1
+ return result
+
+ def revoke(self, cert):
+ Revocation.objects.create(
+ issuer = self,
+ revoked = rpki.sundial.now(),
+ serial = cert.certificate.getSerial(),
+ expires = cert.certificate.getNotAfter() + crl_interval)
+ cert.delete()
+ self.generate_crl()
+
+ def generate_crl(self):
+ now = rpki.sundial.now()
+ self.revocations.filter(expires__lt = now).delete()
+ revoked = [(r.serial, r.revoked) for r in self.revocations.all()]
+ self.latest_crl = rpki.x509.CRL.generate(
+ keypair = self.private_key,
+ issuer = self.certificate,
+ serial = self.next_crl_number,
+ thisUpdate = now,
+ nextUpdate = now + crl_interval,
+ revokedCertificates = revoked)
+ self.last_crl_update = now
+ self.next_crl_update = now + crl_interval
+ self.next_crl_number += 1
class ServerCA(CA):
- objects = ServerCAManager()
+ objects = ServerCAManager()
- def __unicode__(self):
- return ""
+ def __unicode__(self):
+ return ""
- @property
- def subject_name(self):
- if self.certificate is not None:
- return self.certificate.getSubject()
- else:
- return rpki.x509.X501DN.from_cn("%s BPKI server CA" % socket.gethostname())
+ @property
+ def subject_name(self):
+ if self.certificate is not None:
+ return self.certificate.getSubject()
+ else:
+ return rpki.x509.X501DN.from_cn("%s BPKI server CA" % socket.gethostname())
class ResourceHolderCA(CA):
- handle = HandleField(unique = True)
- objects = ResourceHolderCAManager()
+ handle = HandleField(unique = True)
+ objects = ResourceHolderCAManager()
- def __unicode__(self):
- return self.handle
+ def __unicode__(self):
+ return self.handle
- @property
- def subject_name(self):
- if self.certificate is not None:
- return self.certificate.getSubject()
- else:
- return rpki.x509.X501DN.from_cn("%s BPKI resource CA" % self.handle)
+ @property
+ def subject_name(self):
+ if self.certificate is not None:
+ return self.certificate.getSubject()
+ else:
+ return rpki.x509.X501DN.from_cn("%s BPKI resource CA" % self.handle)
class Certificate(django.db.models.Model):
- certificate = CertificateField()
- objects = CertificateManager()
+ certificate = CertificateField()
+ objects = CertificateManager()
- class Meta:
- abstract = True
- unique_together = ("issuer", "handle")
+ class Meta:
+ abstract = True
+ unique_together = ("issuer", "handle")
- def revoke(self):
- self.issuer.revoke(self)
+ def revoke(self):
+ self.issuer.revoke(self) # pylint: disable=E1101
class CrossCertification(Certificate):
- handle = HandleField()
- ta = CertificateField()
+ handle = HandleField()
+ ta = CertificateField() # pylint: disable=C0103
- class Meta:
- abstract = True
+ class Meta:
+ abstract = True
- def avow(self):
- self.certificate = self.issuer.certify(
- subject_name = self.ta.getSubject(),
- subject_key = self.ta.getPublicKey(),
- validity_interval = ee_certificate_lifetime,
- is_ca = True,
- pathLenConstraint = 0)
+ def avow(self):
+ # pylint: disable=E1101
+ self.certificate = self.issuer.certify(
+ subject_name = self.ta.getSubject(),
+ subject_key = self.ta.getPublicKey(),
+ validity_interval = ee_certificate_lifetime,
+ is_ca = True,
+ pathLenConstraint = 0)
- def __unicode__(self):
- return self.handle
+ def __unicode__(self):
+ return self.handle
class HostedCA(Certificate):
- issuer = django.db.models.ForeignKey(ServerCA)
- hosted = django.db.models.OneToOneField(ResourceHolderCA, related_name = "hosted_by")
+ issuer = django.db.models.ForeignKey(ServerCA)
+ hosted = django.db.models.OneToOneField(ResourceHolderCA, related_name = "hosted_by")
- def avow(self):
- self.certificate = self.issuer.certify(
- subject_name = self.hosted.certificate.getSubject(),
- subject_key = self.hosted.certificate.getPublicKey(),
- validity_interval = ee_certificate_lifetime,
- is_ca = True,
- pathLenConstraint = 1)
+ def avow(self):
+ self.certificate = self.issuer.certify(
+ subject_name = self.hosted.certificate.getSubject(),
+ subject_key = self.hosted.certificate.getPublicKey(),
+ validity_interval = ee_certificate_lifetime,
+ is_ca = True,
+ pathLenConstraint = 1)
- class Meta:
- unique_together = ("issuer", "hosted")
+ class Meta:
+ unique_together = ("issuer", "hosted")
- def __unicode__(self):
- return self.hosted.handle
+ def __unicode__(self):
+ return self.hosted.handle
class Revocation(django.db.models.Model):
- serial = django.db.models.BigIntegerField()
- revoked = SundialField()
- expires = SundialField()
+ serial = django.db.models.BigIntegerField()
+ revoked = SundialField()
+ expires = SundialField()
- class Meta:
- abstract = True
- unique_together = ("issuer", "serial")
+ class Meta:
+ abstract = True
+ unique_together = ("issuer", "serial")
class ServerRevocation(Revocation):
- issuer = django.db.models.ForeignKey(ServerCA, related_name = "revocations")
+ issuer = django.db.models.ForeignKey(ServerCA, related_name = "revocations")
class ResourceHolderRevocation(Revocation):
- issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "revocations")
+ issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "revocations")
class EECertificate(Certificate):
- private_key = RSAKeyField()
+ private_key = RSAPrivateKeyField()
- class Meta:
- abstract = True
+ class Meta:
+ abstract = True
- def avow(self):
- if self.private_key is None:
- self.private_key = rpki.x509.RSA.generate(quiet = True)
- self.certificate = self.issuer.certify(
- subject_name = self.subject_name,
- subject_key = self.private_key.get_public(),
- validity_interval = ee_certificate_lifetime,
- is_ca = False)
+ def avow(self):
+ # pylint: disable=E1101
+ if self.private_key is None:
+ self.private_key = rpki.x509.RSA.generate(quiet = True)
+ self.certificate = self.issuer.certify(
+ subject_name = self.subject_name,
+ subject_key = self.private_key.get_public(),
+ validity_interval = ee_certificate_lifetime,
+ is_ca = False)
class ServerEE(EECertificate):
- issuer = django.db.models.ForeignKey(ServerCA, related_name = "ee_certificates")
- purpose = EnumField(choices = ("rpkid", "pubd", "irdbd", "irbe"))
+ issuer = django.db.models.ForeignKey(ServerCA, related_name = "ee_certificates")
+ purpose = EnumField(choices = ("rpkid", "pubd", "irdbd", "irbe"))
- class Meta:
- unique_together = ("issuer", "purpose")
+ class Meta:
+ unique_together = ("issuer", "purpose")
- @property
- def subject_name(self):
- return rpki.x509.X501DN.from_cn("%s BPKI %s EE" % (socket.gethostname(),
- self.get_purpose_display()))
+ @property
+ def subject_name(self):
+ return rpki.x509.X501DN.from_cn("%s BPKI %s EE" % (socket.gethostname(),
+ self.get_purpose_display()))
class Referral(EECertificate):
- issuer = django.db.models.OneToOneField(ResourceHolderCA, related_name = "referral_certificate")
- objects = ResourceHolderEEManager()
-
- @property
- def subject_name(self):
- return rpki.x509.X501DN.from_cn("%s BPKI Referral EE" % self.issuer.handle)
+ issuer = django.db.models.OneToOneField(ResourceHolderCA, related_name = "referral_certificate")
+ objects = ResourceHolderEEManager()
-class Turtle(django.db.models.Model):
- service_uri = django.db.models.CharField(max_length = 255)
-
-class Rootd(EECertificate, Turtle):
- issuer = django.db.models.OneToOneField(ResourceHolderCA, related_name = "rootd")
- objects = ResourceHolderEEManager()
-
- @property
- def subject_name(self):
- return rpki.x509.X501DN.from_cn("%s BPKI rootd EE" % self.issuer.handle)
+ @property
+ def subject_name(self):
+ return rpki.x509.X501DN.from_cn("%s BPKI Referral EE" % self.issuer.handle)
class BSC(Certificate):
- issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "bscs")
- handle = HandleField()
- pkcs10 = PKCS10Field()
+ issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "bscs")
+ handle = HandleField()
+ pkcs10 = PKCS10Field()
- def avow(self):
- self.certificate = self.issuer.certify(
- subject_name = self.pkcs10.getSubject(),
- subject_key = self.pkcs10.getPublicKey(),
- validity_interval = ee_certificate_lifetime,
- is_ca = False)
+ def avow(self):
+ # pylint: disable=E1101
+ self.certificate = self.issuer.certify(
+ subject_name = self.pkcs10.getSubject(),
+ subject_key = self.pkcs10.getPublicKey(),
+ validity_interval = ee_certificate_lifetime,
+ is_ca = False)
- def __unicode__(self):
- return self.handle
+ def __unicode__(self):
+ return self.handle
class ResourceSet(django.db.models.Model):
- valid_until = SundialField()
+ valid_until = SundialField()
+
+ class Meta:
+ abstract = True
- class Meta:
- abstract = True
+ def _select_resource_bag(self):
+ return (), ()
- @property
- def resource_bag(self):
- raw_asn, raw_net = self._select_resource_bag()
- asns = rpki.resource_set.resource_set_as.from_django(
- (a.start_as, a.end_as) for a in raw_asn)
- ipv4 = rpki.resource_set.resource_set_ipv4.from_django(
- (a.start_ip, a.end_ip) for a in raw_net if a.version == "IPv4")
- ipv6 = rpki.resource_set.resource_set_ipv6.from_django(
- (a.start_ip, a.end_ip) for a in raw_net if a.version == "IPv6")
- return rpki.resource_set.resource_bag(
- valid_until = self.valid_until, asn = asns, v4 = ipv4, v6 = ipv6)
+ @property
+ def resource_bag(self):
+ raw_asn, raw_net = self._select_resource_bag()
+ asns = rpki.resource_set.resource_set_as.from_django(
+ (a.start_as, a.end_as) for a in raw_asn)
+ ipv4 = rpki.resource_set.resource_set_ipv4.from_django(
+ (a.start_ip, a.end_ip) for a in raw_net if a.version == "IPv4")
+ ipv6 = rpki.resource_set.resource_set_ipv6.from_django(
+ (a.start_ip, a.end_ip) for a in raw_net if a.version == "IPv6")
+ return rpki.resource_set.resource_bag(
+ valid_until = self.valid_until, asn = asns, v4 = ipv4, v6 = ipv6)
- # Writing of .setter method deferred until something needs it.
+ # Writing of .setter method deferred until something needs it.
class ResourceSetASN(django.db.models.Model):
- start_as = django.db.models.BigIntegerField()
- end_as = django.db.models.BigIntegerField()
+ start_as = django.db.models.BigIntegerField()
+ end_as = django.db.models.BigIntegerField()
- class Meta:
- abstract = True
+ class Meta:
+ abstract = True
- def as_resource_range(self):
- return rpki.resource_set.resource_range_as(self.start_as, self.end_as)
+ def as_resource_range(self):
+ return rpki.resource_set.resource_range_as(self.start_as, self.end_as)
class ResourceSetNet(django.db.models.Model):
- start_ip = django.db.models.CharField(max_length = 40)
- end_ip = django.db.models.CharField(max_length = 40)
- version = EnumField(choices = ip_version_choices)
+ start_ip = django.db.models.CharField(max_length = 40)
+ end_ip = django.db.models.CharField(max_length = 40)
+ version = EnumField(choices = ip_version_choices)
- class Meta:
- abstract = True
+ class Meta:
+ abstract = True
- def as_resource_range(self):
- return rpki.resource_set.resource_range_ip.from_strings(self.start_ip, self.end_ip)
+ def as_resource_range(self):
+ return rpki.resource_set.resource_range_ip.from_strings(self.start_ip, self.end_ip)
class Child(CrossCertification, ResourceSet):
- issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "children")
- name = django.db.models.TextField(null = True, blank = True)
-
- def _select_resource_bag(self):
- child_asn = rpki.irdb.ChildASN.objects.raw("""
- SELECT *
- FROM irdb_childasn
- WHERE child_id = %s
- """, [self.id])
- child_net = list(rpki.irdb.ChildNet.objects.raw("""
- SELECT *
- FROM irdb_childnet
- WHERE child_id = %s
- """, [self.id]))
- return child_asn, child_net
-
- class Meta:
- unique_together = ("issuer", "handle")
+ issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "children")
+ name = django.db.models.TextField(null = True, blank = True)
+
+ def _select_resource_bag(self):
+ child_asn = rpki.irdb.models.ChildASN.objects.raw("""
+ SELECT *
+ FROM irdb_childasn
+ WHERE child_id = %s
+ """, [self.id])
+ child_net = list(rpki.irdb.models.ChildNet.objects.raw("""
+ SELECT *
+ FROM irdb_childnet
+ WHERE child_id = %s
+ """, [self.id]))
+ return child_asn, child_net
+
+ class Meta:
+ unique_together = ("issuer", "handle")
class ChildASN(ResourceSetASN):
- child = django.db.models.ForeignKey(Child, related_name = "asns")
+ child = django.db.models.ForeignKey(Child, related_name = "asns")
- class Meta:
- unique_together = ("child", "start_as", "end_as")
+ class Meta:
+ unique_together = ("child", "start_as", "end_as")
class ChildNet(ResourceSetNet):
- child = django.db.models.ForeignKey(Child, related_name = "address_ranges")
-
- class Meta:
- unique_together = ("child", "start_ip", "end_ip", "version")
-
-class Parent(CrossCertification, Turtle):
- issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "parents")
- parent_handle = HandleField()
- child_handle = HandleField()
- repository_type = EnumField(choices = ("none", "offer", "referral"))
- referrer = HandleField(null = True, blank = True)
- referral_authorization = SignedReferralField(null = True, blank = True)
-
- # This shouldn't be necessary
- class Meta:
- unique_together = ("issuer", "handle")
+ child = django.db.models.ForeignKey(Child, related_name = "address_ranges")
+
+ class Meta:
+ unique_together = ("child", "start_ip", "end_ip", "version")
+
+class Parent(CrossCertification):
+ issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "parents")
+ service_uri = django.db.models.CharField(max_length = 255)
+ parent_handle = HandleField()
+ child_handle = HandleField()
+ repository_type = EnumField(choices = ("none", "offer", "referral"))
+ referrer = HandleField(null = True, blank = True)
+ referral_authorization = SignedReferralField(null = True, blank = True)
+ asn_resources = django.db.models.TextField(blank = True) # root only
+ ipv4_resources = django.db.models.TextField(blank = True) # root only
+ ipv6_resources = django.db.models.TextField(blank = True) # root only
+
+ # This shouldn't be necessary
+ class Meta:
+ unique_together = ("issuer", "handle")
class ROARequest(django.db.models.Model):
- issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "roa_requests")
- asn = django.db.models.BigIntegerField()
-
- @property
- def roa_prefix_bag(self):
- prefixes = list(rpki.irdb.ROARequestPrefix.objects.raw("""
- SELECT *
- FROM irdb_roarequestprefix
- WHERE roa_request_id = %s
- """, [self.id]))
- v4 = rpki.resource_set.roa_prefix_set_ipv4.from_django(
- (p.prefix, p.prefixlen, p.max_prefixlen) for p in prefixes if p.version == "IPv4")
- v6 = rpki.resource_set.roa_prefix_set_ipv6.from_django(
- (p.prefix, p.prefixlen, p.max_prefixlen) for p in prefixes if p.version == "IPv6")
- return rpki.resource_set.roa_prefix_bag(v4 = v4, v6 = v6)
-
- # Writing of .setter method deferred until something needs it.
+ issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "roa_requests")
+ asn = django.db.models.BigIntegerField()
+
+ @property
+ def roa_prefix_bag(self):
+ prefixes = list(rpki.irdb.models.ROARequestPrefix.objects.raw("""
+ SELECT *
+ FROM irdb_roarequestprefix
+ WHERE roa_request_id = %s
+ """, [self.id]))
+ v4 = rpki.resource_set.roa_prefix_set_ipv4.from_django(
+ (p.prefix, p.prefixlen, p.max_prefixlen) for p in prefixes if p.version == "IPv4")
+ v6 = rpki.resource_set.roa_prefix_set_ipv6.from_django(
+ (p.prefix, p.prefixlen, p.max_prefixlen) for p in prefixes if p.version == "IPv6")
+ return rpki.resource_set.roa_prefix_bag(v4 = v4, v6 = v6)
+
+ # Writing of .setter method deferred until something needs it.
class ROARequestPrefix(django.db.models.Model):
- roa_request = django.db.models.ForeignKey(ROARequest, related_name = "prefixes")
- version = EnumField(choices = ip_version_choices)
- prefix = django.db.models.CharField(max_length = 40)
- prefixlen = django.db.models.PositiveSmallIntegerField()
- max_prefixlen = django.db.models.PositiveSmallIntegerField()
-
- def as_roa_prefix(self):
- if self.version == 'IPv4':
- return rpki.resource_set.roa_prefix_ipv4(rpki.POW.IPAddress(self.prefix), self.prefixlen, self.max_prefixlen)
- else:
- return rpki.resource_set.roa_prefix_ipv6(rpki.POW.IPAddress(self.prefix), self.prefixlen, self.max_prefixlen)
-
- def as_resource_range(self):
- return self.as_roa_prefix().to_resource_range()
-
- class Meta:
- unique_together = ("roa_request", "version", "prefix", "prefixlen", "max_prefixlen")
+ roa_request = django.db.models.ForeignKey(ROARequest, related_name = "prefixes")
+ version = EnumField(choices = ip_version_choices)
+ prefix = django.db.models.CharField(max_length = 40)
+ prefixlen = django.db.models.PositiveSmallIntegerField()
+ max_prefixlen = django.db.models.PositiveSmallIntegerField()
+
+ def as_roa_prefix(self):
+ if self.version == 'IPv4':
+ return rpki.resource_set.roa_prefix_ipv4(rpki.POW.IPAddress(self.prefix),
+ self.prefixlen, self.max_prefixlen)
+ else:
+ return rpki.resource_set.roa_prefix_ipv6(rpki.POW.IPAddress(self.prefix),
+ self.prefixlen, self.max_prefixlen)
+
+ def as_resource_range(self):
+ return self.as_roa_prefix().to_resource_range()
+
+ class Meta:
+ unique_together = ("roa_request", "version", "prefix", "prefixlen", "max_prefixlen")
class GhostbusterRequest(django.db.models.Model):
- issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "ghostbuster_requests")
- parent = django.db.models.ForeignKey(Parent, related_name = "ghostbuster_requests", null = True)
- vcard = django.db.models.TextField()
+ issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "ghostbuster_requests")
+ parent = django.db.models.ForeignKey(Parent, related_name = "ghostbuster_requests", null = True)
+ vcard = django.db.models.TextField()
class EECertificateRequest(ResourceSet):
- issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "ee_certificate_requests")
- pkcs10 = PKCS10Field()
- gski = django.db.models.CharField(max_length = 27)
- cn = django.db.models.CharField(max_length = 64)
- sn = django.db.models.CharField(max_length = 64)
- eku = django.db.models.TextField(null = True)
-
- def _select_resource_bag(self):
- ee_asn = rpki.irdb.EECertificateRequestASN.objects.raw("""
- SELECT *
- FROM irdb_eecertificaterequestasn
- WHERE ee_certificate_request_id = %s
- """, [self.id])
- ee_net = rpki.irdb.EECertificateRequestNet.objects.raw("""
- SELECT *
- FROM irdb_eecertificaterequestnet
- WHERE ee_certificate_request_id = %s
- """, [self.id])
- return ee_asn, ee_net
-
- class Meta:
- unique_together = ("issuer", "gski")
+ issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "ee_certificate_requests")
+ pkcs10 = PKCS10Field()
+ gski = django.db.models.CharField(max_length = 27)
+ cn = django.db.models.CharField(max_length = 64) # pylint: disable=C0103
+ sn = django.db.models.CharField(max_length = 64) # pylint: disable=C0103
+ eku = django.db.models.TextField(null = True)
+
+ def _select_resource_bag(self):
+ ee_asn = rpki.irdb.models.EECertificateRequestASN.objects.raw("""
+ SELECT *
+ FROM irdb_eecertificaterequestasn
+ WHERE ee_certificate_request_id = %s
+ """, [self.id])
+ ee_net = rpki.irdb.models.EECertificateRequestNet.objects.raw("""
+ SELECT *
+ FROM irdb_eecertificaterequestnet
+ WHERE ee_certificate_request_id = %s
+ """, [self.id])
+ return ee_asn, ee_net
+
+ class Meta:
+ unique_together = ("issuer", "gski")
class EECertificateRequestASN(ResourceSetASN):
- ee_certificate_request = django.db.models.ForeignKey(EECertificateRequest, related_name = "asns")
+ ee_certificate_request = django.db.models.ForeignKey(EECertificateRequest, related_name = "asns")
- class Meta:
- unique_together = ("ee_certificate_request", "start_as", "end_as")
+ class Meta:
+ unique_together = ("ee_certificate_request", "start_as", "end_as")
class EECertificateRequestNet(ResourceSetNet):
- ee_certificate_request = django.db.models.ForeignKey(EECertificateRequest, related_name = "address_ranges")
+ ee_certificate_request = django.db.models.ForeignKey(EECertificateRequest, related_name = "address_ranges")
- class Meta:
- unique_together = ("ee_certificate_request", "start_ip", "end_ip", "version")
+ class Meta:
+ unique_together = ("ee_certificate_request", "start_ip", "end_ip", "version")
class Repository(CrossCertification):
- issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "repositories")
- client_handle = HandleField()
- service_uri = django.db.models.CharField(max_length = 255)
- sia_base = django.db.models.TextField()
- turtle = django.db.models.OneToOneField(Turtle, related_name = "repository")
+ issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "repositories")
+ client_handle = HandleField()
+ service_uri = django.db.models.CharField(max_length = 255)
+ sia_base = django.db.models.TextField()
+ rrdp_notification_uri = django.db.models.TextField(null = True)
+ parent = django.db.models.OneToOneField(Parent, related_name = "repository")
- # This shouldn't be necessary
- class Meta:
- unique_together = ("issuer", "handle")
+ # This shouldn't be necessary
+ class Meta:
+ unique_together = ("issuer", "handle")
class Client(CrossCertification):
- issuer = django.db.models.ForeignKey(ServerCA, related_name = "clients")
- sia_base = django.db.models.TextField()
- parent_handle = HandleField()
-
- # This shouldn't be necessary
- class Meta:
- unique_together = ("issuer", "handle")
-
-# for Django South -- these are just simple subclasses
-add_introspection_rules([],
- (r'^rpki\.irdb\.models\.CertificateField',
- r'^rpki\.irdb\.models\.CRLField',
- r'^rpki\.irdb\.models\.EnumField',
- r'^rpki\.irdb\.models\.HandleField',
- r'^rpki\.irdb\.models\.RSAKeyField',
- r'^rpki\.irdb\.models\.SignedReferralField',
- r'^rpki\.irdb\.models\.SundialField'))
+ issuer = django.db.models.ForeignKey(ServerCA, related_name = "clients")
+ sia_base = django.db.models.TextField()
+
+ # This shouldn't be necessary
+ class Meta:
+ unique_together = ("issuer", "handle")
diff --git a/rpki/irdb/router.py b/rpki/irdb/router.py
index 97e3d0b7..a2ba81c7 100644
--- a/rpki/irdb/router.py
+++ b/rpki/irdb/router.py
@@ -26,70 +26,73 @@ passing database names everywhere. Using a database router
accomplishes this.
"""
+# pylint: disable=W0212
+
class DBContextRouter(object):
- """
- A Django database router for use with multiple IRDBs.
-
- This router is designed to work in conjunction with the
- rpki.irdb.database context handler (q.v.).
- """
-
- _app = "irdb"
-
- _database = None
-
- def db_for_read(self, model, **hints):
- if model._meta.app_label == self._app:
- return self._database
- else:
- return None
-
- def db_for_write(self, model, **hints):
- if model._meta.app_label == self._app:
- return self._database
- else:
- return None
-
- def allow_relation(self, obj1, obj2, **hints):
- if self._database is None:
- return None
- elif obj1._meta.app_label == self._app and obj2._meta.app_label == self._app:
- return True
- else:
- return None
-
- def allow_syncdb(self, db, model):
- if db == self._database and model._meta.app_label == self._app:
- return True
- else:
- return None
+ """
+ A Django database router for use with multiple IRDBs.
+
+ This router is designed to work in conjunction with the
+ rpki.irdb.database context handler (q.v.).
+ """
+
+ _app = "irdb"
+
+ _database = None
+
+ def db_for_read(self, model, **hints):
+ if model._meta.app_label == self._app:
+ return self._database
+ else:
+ return None
+
+ def db_for_write(self, model, **hints):
+ if model._meta.app_label == self._app:
+ return self._database
+ else:
+ return None
+
+ def allow_relation(self, obj1, obj2, **hints):
+ if self._database is None:
+ return None
+ elif obj1._meta.app_label == self._app and obj2._meta.app_label == self._app:
+ return True
+ else:
+ return None
+
+ def allow_migrate(self, db, model):
+ if db == self._database and model._meta.app_label == self._app:
+ return True
+ else:
+ return None
class database(object):
- """
- Context manager for use with DBContextRouter. Use thusly:
-
- with rpki.irdb.database("blarg"):
- do_stuff()
-
- This binds IRDB operations to database blarg for the duration of
- the call to do_stuff(), then restores the prior state.
- """
-
- def __init__(self, name, on_entry = None, on_exit = None):
- if not isinstance(name, str):
- raise ValueError("database name must be a string, not %r" % name)
- self.name = name
- self.on_entry = on_entry
- self.on_exit = on_exit
-
- def __enter__(self):
- if self.on_entry is not None:
- self.on_entry()
- self.former = DBContextRouter._database
- DBContextRouter._database = self.name
-
- def __exit__(self, _type, value, traceback):
- assert DBContextRouter._database is self.name
- DBContextRouter._database = self.former
- if self.on_exit is not None:
- self.on_exit()
+ """
+ Context manager for use with DBContextRouter. Use thusly:
+
+ with rpki.irdb.database("blarg"):
+ do_stuff()
+
+ This binds IRDB operations to database blarg for the duration of
+ the call to do_stuff(), then restores the prior state.
+ """
+
+ def __init__(self, name, on_entry = None, on_exit = None):
+ if not isinstance(name, str):
+ raise ValueError("database name must be a string, not %r" % name)
+ self.name = name
+ self.on_entry = on_entry
+ self.on_exit = on_exit
+ self.former = None
+
+ def __enter__(self):
+ if self.on_entry is not None:
+ self.on_entry()
+ self.former = DBContextRouter._database
+ DBContextRouter._database = self.name
+
+ def __exit__(self, _type, value, traceback):
+ assert DBContextRouter._database is self.name
+ DBContextRouter._database = self.former
+ if self.on_exit is not None:
+ self.on_exit()
diff --git a/rpki/irdb/zookeeper.py b/rpki/irdb/zookeeper.py
index c7038889..a30ef7a8 100644
--- a/rpki/irdb/zookeeper.py
+++ b/rpki/irdb/zookeeper.py
@@ -20,21 +20,20 @@
Management code for the IRDB.
"""
-# pylint: disable=W0612,C0325
-
import os
import copy
-import types
+
import rpki.config
import rpki.sundial
import rpki.oids
-import rpki.http
+import rpki.http_simple
import rpki.resource_set
import rpki.relaxng
import rpki.left_right
import rpki.x509
-import rpki.async
import rpki.irdb
+import rpki.publication_control
+
import django.db.transaction
from lxml.etree import (Element, SubElement, ElementTree,
@@ -42,12 +41,11 @@ from lxml.etree import (Element, SubElement, ElementTree,
from rpki.csv_utils import csv_reader
-# XML namespace and protocol version for OOB setup protocol. The name
-# is historical and may change before we propose this as the basis for
-# a standard.
+# XML namespace and protocol version for OOB setup protocol.
-myrpki_xmlns = rpki.relaxng.myrpki.xmlns
-myrpki_version = rpki.relaxng.myrpki.version
+oob_xmlns = rpki.relaxng.oob_setup.xmlns
+oob_nsmap = rpki.relaxng.oob_setup.nsmap
+oob_version = rpki.relaxng.oob_setup.version
# XML namespace and protocol version for router certificate requests.
# We probably ought to be pulling this sort of thing from the schema,
@@ -56,13 +54,32 @@ myrpki_version = rpki.relaxng.myrpki.version
# I'm ready to rewrite the rpki.relaxng code.
routercert_xmlns = rpki.relaxng.router_certificate.xmlns
+routercert_nsmap = rpki.relaxng.router_certificate.nsmap
routercert_version = rpki.relaxng.router_certificate.version
+# XML tags for elements in the above
+
+tag_oob_authorization = oob_xmlns + "authorization"
+tag_oob_child_bpki_ta = oob_xmlns + "child_bpki_ta"
+tag_oob_child_request = oob_xmlns + "child_request"
+tag_oob_error = oob_xmlns + "error"
+tag_oob_offer = oob_xmlns + "offer"
+tag_oob_parent_bpki_ta = oob_xmlns + "parent_bpki_ta"
+tag_oob_parent_response = oob_xmlns + "parent_response"
+tag_oob_publisher_bpki_ta = oob_xmlns + "publisher_bpki_ta"
+tag_oob_publisher_request = oob_xmlns + "publisher_request"
+tag_oob_referral = oob_xmlns + "referral"
+tag_oob_repository_bpki_ta = oob_xmlns + "repository_bpki_ta"
+tag_oob_repository_response = oob_xmlns + "repository_response"
+
+tag_router_certificate_request = routercert_xmlns + "router_certificate_request"
+
+# Configuration file section names
+
myrpki_section = "myrpki"
irdbd_section = "irdbd"
rpkid_section = "rpkid"
pubd_section = "pubd"
-rootd_section = "rootd"
# A whole lot of exceptions
@@ -71,1606 +88,1603 @@ class MissingHandle(Exception): "Missing handle."
class CouldntTalkToDaemon(Exception): "Couldn't talk to daemon."
class BadXMLMessage(Exception): "Bad XML message."
class PastExpiration(Exception): "Expiration date has already passed."
-class CantRunRootd(Exception): "Can't run rootd."
+class CouldntFindRepoParent(Exception): "Couldn't find repository's parent."
def B64Element(e, tag, obj, **kwargs):
- """
- Create an XML element containing Base64 encoded data taken from a
- DER object.
- """
-
- if e is None:
- se = Element(tag, **kwargs)
- else:
- se = SubElement(e, tag, **kwargs)
- if e is not None and e.text is None:
- e.text = "\n"
- se.text = "\n" + obj.get_Base64()
- se.tail = "\n"
- return se
-
-class PEM_writer(object):
- """
- Write PEM files to disk, keeping track of which ones we've already
- written and setting the file mode appropriately.
-
- Comparing the old file with what we're about to write serves no real
- purpose except to calm users who find repeated messages about
- writing the same file confusing.
- """
-
- def __init__(self, logstream = None):
- self.wrote = set()
- self.logstream = logstream
-
- def __call__(self, filename, obj, compare = True):
- filename = os.path.realpath(filename)
- if filename in self.wrote:
- return
- tempname = filename
- pem = obj.get_PEM()
- if not filename.startswith("/dev/"):
- try:
- if compare and pem == open(filename, "r").read():
- return
- except: # pylint: disable=W0702
- pass
- tempname += ".%s.tmp" % os.getpid()
- mode = 0400 if filename.endswith(".key") else 0444
- if self.logstream is not None:
- self.logstream.write("Writing %s\n" % filename)
- f = os.fdopen(os.open(tempname, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, mode), "w")
- f.write(pem)
- f.close()
- if tempname != filename:
- os.rename(tempname, filename)
- self.wrote.add(filename)
-
-
-def etree_read(filename):
- """
- Read an etree from a file, verifying then stripping XML namespace
- cruft.
- """
-
- e = ElementTree(file = filename).getroot()
- rpki.relaxng.myrpki.assertValid(e)
- for i in e.getiterator():
- if i.tag.startswith(myrpki_xmlns):
- i.tag = i.tag[len(myrpki_xmlns):]
- else:
- raise BadXMLMessage("XML tag %r is not in namespace %r" % (i.tag, myrpki_xmlns[1:-1]))
- return e
-
-
-class etree_wrapper(object):
- """
- Wrapper for ETree objects so we can return them as function results
- without requiring the caller to understand much about them.
-
- """
-
- def __init__(self, e, msg = None, debug = False):
- self.msg = msg
- e = copy.deepcopy(e)
- e.set("version", myrpki_version)
- for i in e.getiterator():
- if i.tag[0] != "{":
- i.tag = myrpki_xmlns + i.tag
- assert i.tag.startswith(myrpki_xmlns)
- if debug:
- print ElementToString(e)
- rpki.relaxng.myrpki.assertValid(e)
- self.etree = e
-
- def __str__(self):
- return ElementToString(self.etree)
-
- def save(self, filename, logstream = None):
- filename = os.path.realpath(filename)
- tempname = filename
- if not filename.startswith("/dev/"):
- tempname += ".%s.tmp" % os.getpid()
- ElementTree(self.etree).write(tempname)
- if tempname != filename:
- os.rename(tempname, filename)
- if logstream is not None:
- logstream.write("Wrote %s\n" % filename)
- if self.msg is not None:
- logstream.write(self.msg + "\n")
-
- @property
- def file(self):
- from cStringIO import StringIO
- return StringIO(ElementToString(self.etree))
-
-
-class Zookeeper(object):
-
- ## @var show_xml
- # Whether to show XML for debugging
-
- show_xml = False
-
- def __init__(self, cfg = None, handle = None, logstream = None, disable_signal_handlers = False):
-
- if cfg is None:
- cfg = rpki.config.parser()
-
- if handle is None:
- handle = cfg.get("handle", section = myrpki_section)
-
- self.cfg = cfg
-
- self.logstream = logstream
- self.disable_signal_handlers = disable_signal_handlers
-
- self.run_rpkid = cfg.getboolean("run_rpkid", section = myrpki_section)
- self.run_pubd = cfg.getboolean("run_pubd", section = myrpki_section)
- self.run_rootd = cfg.getboolean("run_rootd", section = myrpki_section)
-
- if self.run_rootd and (not self.run_pubd or not self.run_rpkid):
- raise CantRunRootd("Can't run rootd unless also running rpkid and pubd")
-
- self.default_repository = cfg.get("default_repository", "", section = myrpki_section)
- self.pubd_contact_info = cfg.get("pubd_contact_info", "", section = myrpki_section)
-
- self.rsync_module = cfg.get("publication_rsync_module", section = myrpki_section)
- self.rsync_server = cfg.get("publication_rsync_server", section = myrpki_section)
-
- self.reset_identity(handle)
-
-
- def reset_identity(self, handle):
"""
- Select handle of current resource holding entity.
+ Create an XML element containing Base64 encoded data taken from a
+ DER object.
"""
- if handle is None:
- raise MissingHandle
- self.handle = handle
-
-
- def set_logstream(self, logstream):
- """
- Set log stream for this Zookeeper. The log stream is a file-like
- object, or None to suppress all logging.
- """
-
- self.logstream = logstream
-
-
- def log(self, msg):
- """
- Send some text to this Zookeeper's log stream, if one is set.
- """
-
- if self.logstream is not None:
- self.logstream.write(msg)
- self.logstream.write("\n")
-
-
- @property
- def resource_ca(self):
- """
- Get ResourceHolderCA object associated with current handle.
- """
-
- if self.handle is None:
- raise HandleNotSet
- return rpki.irdb.ResourceHolderCA.objects.get(handle = self.handle)
-
-
- @property
- def server_ca(self):
- """
- Get ServerCA object.
- """
-
- return rpki.irdb.ServerCA.objects.get()
-
-
- @django.db.transaction.commit_on_success
- def initialize_server_bpki(self):
- """
- Initialize server BPKI portion of an RPKI installation. Reads the
- configuration file and generates the initial BPKI server
- certificates needed to start daemons.
- """
-
- if self.run_rpkid or self.run_pubd:
- server_ca, created = rpki.irdb.ServerCA.objects.get_or_certify()
- rpki.irdb.ServerEE.objects.get_or_certify(issuer = server_ca, purpose = "irbe")
-
- if self.run_rpkid:
- rpki.irdb.ServerEE.objects.get_or_certify(issuer = server_ca, purpose = "rpkid")
- rpki.irdb.ServerEE.objects.get_or_certify(issuer = server_ca, purpose = "irdbd")
-
- if self.run_pubd:
- rpki.irdb.ServerEE.objects.get_or_certify(issuer = server_ca, purpose = "pubd")
-
-
- @django.db.transaction.commit_on_success
- def initialize_resource_bpki(self):
- """
- Initialize the resource-holding BPKI for an RPKI installation.
- Returns XML describing the resource holder.
-
- This method is present primarily for backwards compatibility with
- the old combined initialize() method which initialized both the
- server BPKI and the default resource-holding BPKI in a single
- method call. In the long run we want to replace this with
- something that takes a handle as argument and creates the
- resource-holding BPKI idenity if needed.
- """
-
- resource_ca, created = rpki.irdb.ResourceHolderCA.objects.get_or_certify(handle = self.handle)
- return self.generate_identity()
-
-
- def initialize(self):
- """
- Backwards compatibility wrapper: calls initialize_server_bpki()
- and initialize_resource_bpki(), returns latter's result.
- """
-
- self.initialize_server_bpki()
- return self.initialize_resource_bpki()
-
-
- def generate_identity(self):
- """
- Generate identity XML. Broken out of .initialize() because it's
- easier for the GUI this way.
- """
-
- e = Element("identity", handle = self.handle)
- B64Element(e, "bpki_ta", self.resource_ca.certificate)
- return etree_wrapper(e, msg = 'This is the "identity" file you will need to send to your parent')
-
-
- @django.db.transaction.commit_on_success
- def delete_self(self):
- """
- Delete the ResourceHolderCA object corresponding to the current handle.
- This corresponds to deleting an rpkid <self/> object.
+ if e is None:
+ se = Element(tag, **kwargs)
+ else:
+ se = SubElement(e, tag, **kwargs)
+ if e is not None and e.text is None:
+ e.text = "\n"
+ se.text = "\n" + obj.get_Base64()
+ se.tail = "\n"
+ return se
- This code assumes the normal Django cascade-on-delete behavior,
- that is, we assume that deleting the ResourceHolderCA object
- deletes all the subordinate objects that refer to it via foreign
- key relationships.
+class PEM_writer(object):
"""
-
- resource_ca = self.resource_ca
- if resource_ca is not None:
- resource_ca.delete()
+ Write PEM files to disk, keeping track of which ones we've already
+ written and setting the file mode appropriately.
+
+ Comparing the old file with what we're about to write serves no real
+ purpose except to calm users who find repeated messages about
+ writing the same file confusing.
+ """
+
+ def __init__(self, logstream = None):
+ self.wrote = set()
+ self.logstream = logstream
+
+ def __call__(self, filename, obj, compare = True):
+ filename = os.path.realpath(filename)
+ if filename in self.wrote:
+ return
+ tempname = filename
+ pem = obj.get_PEM()
+ if not filename.startswith("/dev/"):
+ try:
+ if compare and pem == open(filename, "r").read():
+ return
+ except:
+ pass
+ tempname += ".%s.tmp" % os.getpid()
+ mode = 0400 if filename.endswith(".key") else 0444
+ if self.logstream is not None:
+ self.logstream.write("Writing %s\n" % filename)
+ f = os.fdopen(os.open(tempname, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, mode), "w")
+ f.write(pem)
+ f.close()
+ if tempname != filename:
+ os.rename(tempname, filename)
+ self.wrote.add(filename)
+
+
+def etree_read(xml_file, schema = rpki.relaxng.oob_setup):
+ """
+ Read an etree from a file-like object, verifying it against a schema.
+
+ As a convenience, we also accept an etree_wrapper object in place
+ of a filename, in which case we deepcopy the etree directly from
+ the etree_wrapper and there's no need for a file.
+ """
+
+ if isinstance(xml_file, etree_wrapper):
+ e = copy.deepcopy(xml_file.etree)
else:
- self.log("No such ResourceHolderCA \"%s\"" % self.handle)
-
-
- @django.db.transaction.commit_on_success
- def configure_rootd(self):
+ e = ElementTree(file = xml_file).getroot()
+ schema.assertValid(e)
+ return e
- assert self.run_rpkid and self.run_pubd and self.run_rootd
- rpki.irdb.Rootd.objects.get_or_certify(
- issuer = self.resource_ca,
- service_uri = "http://localhost:%s/" % self.cfg.get("rootd_server_port", section = myrpki_section))
-
- return self.generate_rootd_repository_offer()
-
-
- def generate_rootd_repository_offer(self):
+class etree_wrapper(object):
"""
- Generate repository offer for rootd. Split out of
- configure_rootd() because that's easier for the GUI.
+ Wrapper for ETree objects so we can return them as function results
+ without requiring the caller to understand much about them.
"""
- # The following assumes we'll set up the respository manually.
- # Not sure this is a reasonable assumption, particularly if we
- # ever fix rootd to use the publication protocol.
+ def __init__(self, e, msg = None, debug = False, schema = rpki.relaxng.oob_setup):
+ self.msg = msg
+ e = copy.deepcopy(e)
+ if debug:
+ print ElementToString(e)
+ schema.assertValid(e)
+ self.etree = e
- try:
- self.resource_ca.repositories.get(handle = self.handle)
- return None
+ def __str__(self):
+ return ElementToString(self.etree)
- except rpki.irdb.Repository.DoesNotExist:
- e = Element("repository", type = "offer", handle = self.handle, parent_handle = self.handle)
- B64Element(e, "bpki_client_ta", self.resource_ca.certificate)
- return etree_wrapper(e, msg = 'This is the "repository offer" file for you to use if you want to publish in your own repository')
+ def save(self, filename, logstream = None):
+ filename = os.path.realpath(filename)
+ tempname = filename
+ if not filename.startswith("/dev/"):
+ tempname += ".%s.tmp" % os.getpid()
+ ElementTree(self.etree).write(tempname)
+ if tempname != filename:
+ os.rename(tempname, filename)
+ if logstream is not None:
+ logstream.write("Wrote %s\n" % filename)
+ if self.msg is not None:
+ logstream.write(self.msg + "\n")
+ @property
+ def file(self):
+ from cStringIO import StringIO
+ return StringIO(ElementToString(self.etree))
- def write_bpki_files(self):
- """
- Write out BPKI certificate, key, and CRL files for daemons that
- need them.
- """
-
- writer = PEM_writer(self.logstream)
-
- if self.run_rpkid:
- rpkid = self.server_ca.ee_certificates.get(purpose = "rpkid")
- writer(self.cfg.get("bpki-ta", section = rpkid_section), self.server_ca.certificate)
- writer(self.cfg.get("rpkid-key", section = rpkid_section), rpkid.private_key)
- writer(self.cfg.get("rpkid-cert", section = rpkid_section), rpkid.certificate)
- writer(self.cfg.get("irdb-cert", section = rpkid_section),
- self.server_ca.ee_certificates.get(purpose = "irdbd").certificate)
- writer(self.cfg.get("irbe-cert", section = rpkid_section),
- self.server_ca.ee_certificates.get(purpose = "irbe").certificate)
-
- if self.run_pubd:
- pubd = self.server_ca.ee_certificates.get(purpose = "pubd")
- writer(self.cfg.get("bpki-ta", section = pubd_section), self.server_ca.certificate)
- writer(self.cfg.get("pubd-key", section = pubd_section), pubd.private_key)
- writer(self.cfg.get("pubd-cert", section = pubd_section), pubd.certificate)
- writer(self.cfg.get("irbe-cert", section = pubd_section),
- self.server_ca.ee_certificates.get(purpose = "irbe").certificate)
-
- if self.run_rootd:
- try:
- rootd = rpki.irdb.ResourceHolderCA.objects.get(handle = self.handle).rootd
- writer(self.cfg.get("bpki-ta", section = rootd_section), self.server_ca.certificate)
- writer(self.cfg.get("rootd-bpki-crl", section = rootd_section), self.server_ca.latest_crl)
- writer(self.cfg.get("rootd-bpki-key", section = rootd_section), rootd.private_key)
- writer(self.cfg.get("rootd-bpki-cert", section = rootd_section), rootd.certificate)
- writer(self.cfg.get("child-bpki-cert", section = rootd_section), rootd.issuer.certificate)
- except rpki.irdb.ResourceHolderCA.DoesNotExist:
- self.log("rootd enabled but resource holding entity not yet configured, skipping rootd setup")
- except rpki.irdb.Rootd.DoesNotExist:
- self.log("rootd enabled but not yet configured, skipping rootd setup")
-
-
- @django.db.transaction.commit_on_success
- def update_bpki(self):
- """
- Update BPKI certificates. Assumes an existing RPKI installation.
- Basic plan here is to reissue all BPKI certificates we can, right
- now. In the long run we might want to be more clever about only
- touching ones that need maintenance, but this will do for a start.
-
- We also reissue CRLs for all CAs.
+class Zookeeper(object):
- Most likely this should be run under cron.
- """
+ ## @var show_xml
+ # If not None, a file-like object to which to prettyprint XML, for debugging.
- for model in (rpki.irdb.ServerCA,
- rpki.irdb.ResourceHolderCA,
- rpki.irdb.ServerEE,
- rpki.irdb.Referral,
- rpki.irdb.Rootd,
- rpki.irdb.HostedCA,
- rpki.irdb.BSC,
- rpki.irdb.Child,
- rpki.irdb.Parent,
- rpki.irdb.Client,
- rpki.irdb.Repository):
- for obj in model.objects.all():
- self.log("Regenerating BPKI certificate %s" % obj.certificate.getSubject())
- obj.avow()
- obj.save()
-
- self.log("Regenerating Server BPKI CRL")
- self.server_ca.generate_crl()
- self.server_ca.save()
-
- for ca in rpki.irdb.ResourceHolderCA.objects.all():
- self.log("Regenerating BPKI CRL for Resource Holder %s" % ca.handle)
- ca.generate_crl()
- ca.save()
-
-
- @django.db.transaction.commit_on_success
- def synchronize_bpki(self):
- """
- Synchronize BPKI updates. This is separate from .update_bpki()
- because this requires rpkid to be running and none of the other
- BPKI update stuff does; there may be circumstances under which it
- makes sense to do the rest of the BPKI update and allow this to
- fail with a warning.
- """
+ show_xml = None
- if self.run_rpkid:
- updates = []
-
- updates.extend(
- rpki.left_right.self_elt.make_pdu(
- action = "set",
- tag = "%s__self" % ca.handle,
- self_handle = ca.handle,
- bpki_cert = ca.certificate)
- for ca in rpki.irdb.ResourceHolderCA.objects.all())
-
- updates.extend(
- rpki.left_right.bsc_elt.make_pdu(
- action = "set",
- tag = "%s__bsc__%s" % (bsc.issuer.handle, bsc.handle),
- self_handle = bsc.issuer.handle,
- bsc_handle = bsc.handle,
- signing_cert = bsc.certificate,
- signing_cert_crl = bsc.issuer.latest_crl)
- for bsc in rpki.irdb.BSC.objects.all())
-
- updates.extend(
- rpki.left_right.repository_elt.make_pdu(
- action = "set",
- tag = "%s__repository__%s" % (repository.issuer.handle, repository.handle),
- self_handle = repository.issuer.handle,
- repository_handle = repository.handle,
- bpki_cert = repository.certificate)
- for repository in rpki.irdb.Repository.objects.all())
-
- updates.extend(
- rpki.left_right.parent_elt.make_pdu(
- action = "set",
- tag = "%s__parent__%s" % (parent.issuer.handle, parent.handle),
- self_handle = parent.issuer.handle,
- parent_handle = parent.handle,
- bpki_cms_cert = parent.certificate)
- for parent in rpki.irdb.Parent.objects.all())
-
- updates.extend(
- rpki.left_right.parent_elt.make_pdu(
- action = "set",
- tag = "%s__rootd" % rootd.issuer.handle,
- self_handle = rootd.issuer.handle,
- parent_handle = rootd.issuer.handle,
- bpki_cms_cert = rootd.certificate)
- for rootd in rpki.irdb.Rootd.objects.all())
-
- updates.extend(
- rpki.left_right.child_elt.make_pdu(
- action = "set",
- tag = "%s__child__%s" % (child.issuer.handle, child.handle),
- self_handle = child.issuer.handle,
- child_handle = child.handle,
- bpki_cert = child.certificate)
- for child in rpki.irdb.Child.objects.all())
-
- if updates:
- self.check_error_report(self.call_rpkid(updates))
-
- if self.run_pubd:
- updates = []
-
- updates.append(
- rpki.publication.config_elt.make_pdu(
- action = "set",
- bpki_crl = self.server_ca.latest_crl))
-
- updates.extend(
- rpki.publication.client_elt.make_pdu(
- action = "set",
- client_handle = client.handle,
- bpki_cert = client.certificate)
- for client in self.server_ca.clients.all())
-
- if updates:
- self.check_error_report(self.call_pubd(updates))
-
-
- @django.db.transaction.commit_on_success
- def configure_child(self, filename, child_handle = None, valid_until = None):
- """
- Configure a new child of this RPKI entity, given the child's XML
- identity file as an input. Extracts the child's data from the
- XML, cross-certifies the child's resource-holding BPKI
- certificate, and generates an XML file describing the relationship
- between the child and this parent, including this parent's BPKI
- data and up-down protocol service URI.
- """
+ def __init__(self, cfg = None, handle = None, logstream = None, disable_signal_handlers = False):
- c = etree_read(filename)
+ if cfg is None:
+ cfg = rpki.config.parser()
- if child_handle is None:
- child_handle = c.get("handle")
+ if handle is None:
+ handle = cfg.get("handle", section = myrpki_section)
- if valid_until is None:
- valid_until = rpki.sundial.now() + rpki.sundial.timedelta(days = 365)
- else:
- valid_until = rpki.sundial.datetime.fromXMLtime(valid_until)
- if valid_until < rpki.sundial.now():
- raise PastExpiration("Specified new expiration time %s has passed" % valid_until)
+ self.cfg = cfg
- self.log("Child calls itself %r, we call it %r" % (c.get("handle"), child_handle))
+ self.logstream = logstream
+ self.disable_signal_handlers = disable_signal_handlers
- child, created = rpki.irdb.Child.objects.get_or_certify(
- issuer = self.resource_ca,
- handle = child_handle,
- ta = rpki.x509.X509(Base64 = c.findtext("bpki_ta")),
- valid_until = valid_until)
+ self.run_rpkid = cfg.getboolean("run_rpkid", section = myrpki_section)
+ self.run_pubd = cfg.getboolean("run_pubd", section = myrpki_section)
- return self.generate_parental_response(child), child_handle
+ self.default_repository = cfg.get("default_repository", "", section = myrpki_section)
+ self.pubd_contact_info = cfg.get("pubd_contact_info", "", section = myrpki_section)
+ self.rsync_module = cfg.get("publication_rsync_module", section = myrpki_section)
+ self.rsync_server = cfg.get("publication_rsync_server", section = myrpki_section)
- @django.db.transaction.commit_on_success
- def generate_parental_response(self, child):
- """
- Generate parental response XML. Broken out of .configure_child()
- for GUI.
- """
+ self.reset_identity(handle)
- service_uri = "http://%s:%s/up-down/%s/%s" % (
- self.cfg.get("rpkid_server_host", section = myrpki_section),
- self.cfg.get("rpkid_server_port", section = myrpki_section),
- self.handle, child.handle)
- e = Element("parent", parent_handle = self.handle, child_handle = child.handle,
- service_uri = service_uri, valid_until = str(child.valid_until))
- B64Element(e, "bpki_resource_ta", self.resource_ca.certificate)
- B64Element(e, "bpki_child_ta", child.ta)
+ def reset_identity(self, handle):
+ """
+ Select handle of current resource holding entity.
+ """
- try:
- if self.default_repository:
- repo = self.resource_ca.repositories.get(handle = self.default_repository)
- else:
- repo = self.resource_ca.repositories.get()
- except rpki.irdb.Repository.DoesNotExist:
- repo = None
+ if handle is None:
+ raise MissingHandle
+ self.handle = handle
- if repo is None:
- self.log("Couldn't find any usable repositories, not giving referral")
- elif repo.handle == self.handle:
- SubElement(e, "repository", type = "offer")
+ def set_logstream(self, logstream):
+ """
+ Set log stream for this Zookeeper. The log stream is a file-like
+ object, or None to suppress all logging.
+ """
- else:
- proposed_sia_base = repo.sia_base + child.handle + "/"
- referral_cert, created = rpki.irdb.Referral.objects.get_or_certify(issuer = self.resource_ca)
- auth = rpki.x509.SignedReferral()
- auth.set_content(B64Element(None, myrpki_xmlns + "referral", child.ta,
- version = myrpki_version,
- authorized_sia_base = proposed_sia_base))
- auth.schema_check()
- auth.sign(referral_cert.private_key, referral_cert.certificate, self.resource_ca.latest_crl)
+ self.logstream = logstream
- r = SubElement(e, "repository", type = "referral")
- B64Element(r, "authorization", auth, referrer = repo.client_handle)
- SubElement(r, "contact_info")
- return etree_wrapper(e, msg = "Send this file back to the child you just configured")
+ def log(self, msg):
+ """
+ Send some text to this Zookeeper's log stream, if one is set.
+ """
+ if self.logstream is not None:
+ self.logstream.write(msg)
+ self.logstream.write("\n")
- @django.db.transaction.commit_on_success
- def delete_child(self, child_handle):
- """
- Delete a child of this RPKI entity.
- """
- self.resource_ca.children.get(handle = child_handle).delete()
+ @property
+ def resource_ca(self):
+ """
+ Get ResourceHolderCA object associated with current handle.
+ """
+ if self.handle is None:
+ raise HandleNotSet
+ return rpki.irdb.models.ResourceHolderCA.objects.get(handle = self.handle)
- @django.db.transaction.commit_on_success
- def configure_parent(self, filename, parent_handle = None):
- """
- Configure a new parent of this RPKI entity, given the output of
- the parent's configure_child command as input. Reads the parent's
- response XML, extracts the parent's BPKI and service URI
- information, cross-certifies the parent's BPKI data into this
- entity's BPKI, and checks for offers or referrals of publication
- service. If a publication offer or referral is present, we
- generate a request-for-service message to that repository, in case
- the user wants to avail herself of the referral or offer.
- """
- p = etree_read(filename)
+ @property
+ def server_ca(self):
+ """
+ Get ServerCA object.
+ """
- if parent_handle is None:
- parent_handle = p.get("parent_handle")
+ return rpki.irdb.models.ServerCA.objects.get()
- r = p.find("repository")
- repository_type = "none"
- referrer = None
- referral_authorization = None
+ @django.db.transaction.atomic
+ def initialize_server_bpki(self):
+ """
+ Initialize server BPKI portion of an RPKI installation. Reads the
+ configuration file and generates the initial BPKI server
+ certificates needed to start daemons.
+ """
- if r is not None:
- repository_type = r.get("type")
+ if self.run_rpkid or self.run_pubd:
+ server_ca = rpki.irdb.models.ServerCA.objects.get_or_certify()[0]
+ rpki.irdb.models.ServerEE.objects.get_or_certify(issuer = server_ca, purpose = "irbe")
- if repository_type == "referral":
- a = r.find("authorization")
- referrer = a.get("referrer")
- referral_authorization = rpki.x509.SignedReferral(Base64 = a.text)
+ if self.run_rpkid:
+ rpki.irdb.models.ServerEE.objects.get_or_certify(issuer = server_ca, purpose = "rpkid")
+ rpki.irdb.models.ServerEE.objects.get_or_certify(issuer = server_ca, purpose = "irdbd")
- self.log("Parent calls itself %r, we call it %r" % (p.get("parent_handle"), parent_handle))
- self.log("Parent calls us %r" % p.get("child_handle"))
+ if self.run_pubd:
+ rpki.irdb.models.ServerEE.objects.get_or_certify(issuer = server_ca, purpose = "pubd")
- parent, created = rpki.irdb.Parent.objects.get_or_certify(
- issuer = self.resource_ca,
- handle = parent_handle,
- child_handle = p.get("child_handle"),
- parent_handle = p.get("parent_handle"),
- service_uri = p.get("service_uri"),
- ta = rpki.x509.X509(Base64 = p.findtext("bpki_resource_ta")),
- repository_type = repository_type,
- referrer = referrer,
- referral_authorization = referral_authorization)
- return self.generate_repository_request(parent), parent_handle
+ @django.db.transaction.atomic
+ def initialize_resource_bpki(self):
+ """
+ Initialize the resource-holding BPKI for an RPKI installation.
+ Returns XML describing the resource holder.
+ This method is present primarily for backwards compatibility with
+ the old combined initialize() method which initialized both the
+ server BPKI and the default resource-holding BPKI in a single
+ method call. In the long run we want to replace this with
+ something that takes a handle as argument and creates the
+ resource-holding BPKI idenity if needed.
+ """
- def generate_repository_request(self, parent):
- """
- Generate repository request for a given parent.
- """
+ rpki.irdb.models.ResourceHolderCA.objects.get_or_certify(handle = self.handle)
+ return self.generate_identity()
- e = Element("repository", handle = self.handle,
- parent_handle = parent.handle, type = parent.repository_type)
- if parent.repository_type == "referral":
- B64Element(e, "authorization", parent.referral_authorization, referrer = parent.referrer)
- SubElement(e, "contact_info")
- B64Element(e, "bpki_client_ta", self.resource_ca.certificate)
- return etree_wrapper(e, msg = "This is the file to send to the repository operator")
+ def initialize(self):
+ """
+ Backwards compatibility wrapper: calls initialize_server_bpki()
+ and initialize_resource_bpki(), returns latter's result.
+ """
- @django.db.transaction.commit_on_success
- def delete_parent(self, parent_handle):
- """
- Delete a parent of this RPKI entity.
- """
+ self.initialize_server_bpki()
+ return self.initialize_resource_bpki()
- self.resource_ca.parents.get(handle = parent_handle).delete()
+ def generate_identity(self):
+ """
+ Generate identity XML. Broken out of .initialize() because it's
+ easier for the GUI this way.
+ """
- @django.db.transaction.commit_on_success
- def delete_rootd(self):
- """
- Delete rootd associated with this RPKI entity.
- """
+ e = Element(tag_oob_child_request, nsmap = oob_nsmap, version = oob_version,
+ child_handle = self.handle)
+ B64Element(e, tag_oob_child_bpki_ta, self.resource_ca.certificate)
+ return etree_wrapper(e, msg = 'This is the "identity" file you will need to send to your parent')
- self.resource_ca.rootd.delete()
+ @django.db.transaction.atomic
+ def delete_tenant(self):
+ """
+ Delete the ResourceHolderCA object corresponding to the current handle.
+ This corresponds to deleting an rpkid <tenant/> object.
- @django.db.transaction.commit_on_success
- def configure_publication_client(self, filename, sia_base = None, flat = False):
- """
- Configure publication server to know about a new client, given the
- client's request-for-service message as input. Reads the client's
- request for service, cross-certifies the client's BPKI data, and
- generates a response message containing the repository's BPKI data
- and service URI.
- """
+ This code assumes the normal Django cascade-on-delete behavior,
+ that is, we assume that deleting the ResourceHolderCA object
+ deletes all the subordinate objects that refer to it via foreign
+ key relationships.
+ """
- client = etree_read(filename)
-
- client_ta = rpki.x509.X509(Base64 = client.findtext("bpki_client_ta"))
-
- if sia_base is None and flat:
- self.log("Flat publication structure forced, homing client at top-level")
- sia_base = "rsync://%s/%s/%s/" % (self.rsync_server, self.rsync_module, client.get("handle"))
-
- if sia_base is None and client.get("type") == "referral":
- self.log("This looks like a referral, checking")
- try:
- auth = client.find("authorization")
- referrer = self.server_ca.clients.get(handle = auth.get("referrer"))
- referral_cms = rpki.x509.SignedReferral(Base64 = auth.text)
- referral_xml = referral_cms.unwrap(ta = (referrer.certificate, self.server_ca.certificate))
- if rpki.x509.X509(Base64 = referral_xml.text) != client_ta:
- raise BadXMLMessage("Referral trust anchor does not match")
- sia_base = referral_xml.get("authorized_sia_base")
- except rpki.irdb.Client.DoesNotExist:
- self.log("We have no record of the client (%s) alleged to have made this referral" % auth.get("referrer"))
-
- if sia_base is None and client.get("type") == "offer":
- self.log("This looks like an offer, checking")
- try:
- parent = rpki.irdb.ResourceHolderCA.objects.get(children__ta__exact = client_ta)
- if "/" in parent.repositories.get(ta = self.server_ca.certificate).client_handle:
- self.log("Client's parent is not top-level, this is not a valid offer")
- else:
- self.log("Found client and its parent, nesting")
- sia_base = "rsync://%s/%s/%s/%s/" % (self.rsync_server, self.rsync_module,
- parent.handle, client.get("handle"))
- except rpki.irdb.Repository.DoesNotExist:
- self.log("Found client's parent, but repository isn't set, this shouldn't happen!")
- except rpki.irdb.ResourceHolderCA.DoesNotExist:
- try:
- rpki.irdb.Rootd.objects.get(issuer__certificate__exact = client_ta)
- except rpki.irdb.Rootd.DoesNotExist:
- self.log("We don't host this client's parent, so we didn't make this offer")
+ resource_ca = self.resource_ca
+ if resource_ca is not None:
+ resource_ca.delete()
else:
- self.log("This client's parent is rootd")
-
- if sia_base is None:
- self.log("Don't know where to nest this client, defaulting to top-level")
- sia_base = "rsync://%s/%s/%s/" % (self.rsync_server, self.rsync_module, client.get("handle"))
-
- if not sia_base.startswith("rsync://"):
- raise BadXMLMessage("Malformed sia_base parameter %r, should start with 'rsync://'" % sia_base)
-
- client_handle = "/".join(sia_base.rstrip("/").split("/")[4:])
-
- parent_handle = client.get("parent_handle")
-
- self.log("Client calls itself %r, we call it %r" % (client.get("handle"), client_handle))
- self.log("Client says its parent handle is %r" % parent_handle)
-
- client, created = rpki.irdb.Client.objects.get_or_certify(
- issuer = self.server_ca,
- handle = client_handle,
- parent_handle = parent_handle,
- ta = client_ta,
- sia_base = sia_base)
-
- return self.generate_repository_response(client), client_handle
-
+ self.log("No such ResourceHolderCA \"%s\"" % self.handle)
+
+
+ @django.db.transaction.atomic
+ def configure_root(self, handle, resources):
+
+ if not handle:
+ handle = self.handle
+
+ parent = rpki.irdb.models.Parent.objects.get_or_certify(
+ issuer = self.resource_ca,
+ handle = handle,
+ parent_handle = handle,
+ child_handle = handle,
+ ta = self.resource_ca.certificate,
+ repository_type = "none",
+ asn_resources = str(resources.asn),
+ ipv4_resources = str(resources.v4),
+ ipv6_resources = str(resources.v6))[0]
+
+ return self.generate_repository_request(parent)
+
+
+ def extract_root_certificate_and_uris(self, handle):
+
+ if not handle:
+ handle = self.handle
+
+ q_msg = self.compose_left_right_query()
+ SubElement(q_msg, rpki.left_right.tag_parent, action = "get",
+ tenant_handle = self.handle, parent_handle = handle)
+ r_msg = self.call_rpkid(q_msg)
+ assert len(r_msg) == 1 and r_msg[0].tag == rpki.left_right.tag_parent
+
+ b64 = r_msg[0].findtext(rpki.left_right.tag_rpki_root_cert)
+ if not b64:
+ return None, ()
- def generate_repository_response(self, client):
- """
- Generate repository response XML to a given client.
- """
-
- service_uri = "http://%s:%s/client/%s" % (
- self.cfg.get("pubd_server_host", section = myrpki_section),
- self.cfg.get("pubd_server_port", section = myrpki_section),
- client.handle)
-
- e = Element("repository", type = "confirmed",
- client_handle = client.handle,
- parent_handle = client.parent_handle,
- sia_base = client.sia_base,
- service_uri = service_uri)
-
- B64Element(e, "bpki_server_ta", self.server_ca.certificate)
- B64Element(e, "bpki_client_ta", client.ta)
- SubElement(e, "contact_info").text = self.pubd_contact_info
- return etree_wrapper(e, msg = "Send this file back to the publication client you just configured")
-
-
- @django.db.transaction.commit_on_success
- def delete_publication_client(self, client_handle):
- """
- Delete a publication client of this RPKI entity.
- """
+ cert = rpki.x509.X509(Base64 = b64)
+ caDirectory, rpkiManifest, signedObjectRepository, rpkiNotify = cert.get_SIA()
+ sia_base = r_msg[0].get("sia_base")
+ fn = cert.gSKI() + ".cer"
+
+ https_uri = os.path.join(os.path.dirname(rpkiNotify[0]), fn)
+ rsync_uri = sia_base + fn
+
+ return cert, (https_uri, rsync_uri)
+
+
+ def write_bpki_files(self):
+ """
+ Write out BPKI certificate, key, and CRL files for daemons that
+ need them.
+ """
+
+ writer = PEM_writer(self.logstream)
+
+ if self.run_rpkid:
+ rpkid = self.server_ca.ee_certificates.get(purpose = "rpkid")
+ writer(self.cfg.get("bpki-ta", section = rpkid_section), self.server_ca.certificate)
+ writer(self.cfg.get("rpkid-key", section = rpkid_section), rpkid.private_key)
+ writer(self.cfg.get("rpkid-cert", section = rpkid_section), rpkid.certificate)
+ writer(self.cfg.get("irdb-cert", section = rpkid_section),
+ self.server_ca.ee_certificates.get(purpose = "irdbd").certificate)
+ writer(self.cfg.get("irbe-cert", section = rpkid_section),
+ self.server_ca.ee_certificates.get(purpose = "irbe").certificate)
+
+ if self.run_pubd:
+ pubd = self.server_ca.ee_certificates.get(purpose = "pubd")
+ writer(self.cfg.get("bpki-ta", section = pubd_section), self.server_ca.certificate)
+ writer(self.cfg.get("pubd-crl", section = pubd_section), self.server_ca.latest_crl)
+ writer(self.cfg.get("pubd-key", section = pubd_section), pubd.private_key)
+ writer(self.cfg.get("pubd-cert", section = pubd_section), pubd.certificate)
+ writer(self.cfg.get("irbe-cert", section = pubd_section),
+ self.server_ca.ee_certificates.get(purpose = "irbe").certificate)
+
+
+ @django.db.transaction.atomic
+ def update_bpki(self):
+ """
+ Update BPKI certificates. Assumes an existing RPKI installation.
+
+ Basic plan here is to reissue all BPKI certificates we can, right
+ now. In the long run we might want to be more clever about only
+ touching ones that need maintenance, but this will do for a start.
+
+ We also reissue CRLs for all CAs.
+
+ Most likely this should be run under cron.
+ """
+
+ for model in (rpki.irdb.models.ServerCA,
+ rpki.irdb.models.ResourceHolderCA,
+ rpki.irdb.models.ServerEE,
+ rpki.irdb.models.Referral,
+ rpki.irdb.models.HostedCA,
+ rpki.irdb.models.BSC,
+ rpki.irdb.models.Child,
+ rpki.irdb.models.Parent,
+ rpki.irdb.models.Client,
+ rpki.irdb.models.Repository):
+ for obj in model.objects.all():
+ self.log("Regenerating BPKI certificate %s" % obj.certificate.getSubject())
+ obj.avow()
+ obj.save()
+
+ self.log("Regenerating Server BPKI CRL")
+ self.server_ca.generate_crl()
+ self.server_ca.save()
+
+ for ca in rpki.irdb.models.ResourceHolderCA.objects.all():
+ self.log("Regenerating BPKI CRL for Resource Holder %s" % ca.handle)
+ ca.generate_crl()
+ ca.save()
+
+
+ @staticmethod
+ def compose_left_right_query():
+ """
+ Compose top level element of a left-right query.
+ """
+
+ return Element(rpki.left_right.tag_msg, nsmap = rpki.left_right.nsmap,
+ type = "query", version = rpki.left_right.version)
+
+
+ @staticmethod
+ def _compose_publication_control_query():
+ """
+ Compose top level element of a publication-control query.
+ """
+
+ return Element(rpki.publication_control.tag_msg, nsmap = rpki.publication_control.nsmap,
+ type = "query", version = rpki.publication_control.version)
+
+
+ @django.db.transaction.atomic
+ def synchronize_bpki(self):
+ """
+ Synchronize BPKI updates. This is separate from .update_bpki()
+ because this requires rpkid to be running and none of the other
+ BPKI update stuff does; there may be circumstances under which it
+ makes sense to do the rest of the BPKI update and allow this to
+ fail with a warning.
+ """
+
+ if self.run_rpkid:
+ q_msg = self.compose_left_right_query()
+
+ for ca in rpki.irdb.models.ResourceHolderCA.objects.all():
+ q_pdu = SubElement(q_msg, rpki.left_right.tag_tenant,
+ action = "set",
+ tag = "%s__tenant" % ca.handle,
+ tenant_handle = ca.handle)
+ SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = ca.certificate.get_Base64()
+
+ for bsc in rpki.irdb.models.BSC.objects.all():
+ q_pdu = SubElement(q_msg, rpki.left_right.tag_bsc,
+ action = "set",
+ tag = "%s__bsc__%s" % (bsc.issuer.handle, bsc.handle),
+ tenant_handle = bsc.issuer.handle,
+ bsc_handle = bsc.handle)
+ SubElement(q_pdu, rpki.left_right.tag_signing_cert).text = bsc.certificate.get_Base64()
+ SubElement(q_pdu, rpki.left_right.tag_signing_cert_crl).text = bsc.issuer.latest_crl.get_Base64()
+
+ for repository in rpki.irdb.models.Repository.objects.all():
+ q_pdu = SubElement(q_msg, rpki.left_right.tag_repository,
+ action = "set",
+ tag = "%s__repository__%s" % (repository.issuer.handle, repository.handle),
+ tenant_handle = repository.issuer.handle,
+ repository_handle = repository.handle)
+ SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = repository.certificate.get_Base64()
+
+ for parent in rpki.irdb.models.Parent.objects.all():
+ q_pdu = SubElement(q_msg, rpki.left_right.tag_parent,
+ action = "set",
+ tag = "%s__parent__%s" % (parent.issuer.handle, parent.handle),
+ tenant_handle = parent.issuer.handle,
+ parent_handle = parent.handle)
+ SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = parent.certificate.get_Base64()
+
+ for child in rpki.irdb.models.Child.objects.all():
+ q_pdu = SubElement(q_msg, rpki.left_right.tag_child,
+ action = "set",
+ tag = "%s__child__%s" % (child.issuer.handle, child.handle),
+ tenant_handle = child.issuer.handle,
+ child_handle = child.handle)
+ SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = child.certificate.get_Base64()
+
+ if len(q_msg) > 0:
+ self.call_rpkid(q_msg)
+
+ if self.run_pubd:
+ q_msg = self._compose_publication_control_query()
+
+ for client in self.server_ca.clients.all():
+ q_pdu = SubElement(q_msg, rpki.publication_control.tag_client, action = "set", client_handle = client.handle)
+ SubElement(q_pdu, rpki.publication_control.tag_bpki_cert).text = client.certificate.get_Base64()
+
+ if len(q_msg) > 0:
+ self.call_pubd(q_msg)
+
+
+ @django.db.transaction.atomic
+ def configure_child(self, xml_file, child_handle = None, valid_until = None):
+ """
+ Configure a new child of this RPKI entity, given the child's XML
+ identity file as an input. Extracts the child's data from the
+ XML, cross-certifies the child's resource-holding BPKI
+ certificate, and generates an XML file describing the relationship
+ between the child and this parent, including this parent's BPKI
+ data and up-down protocol service URI.
+ """
+
+ x = etree_read(xml_file)
+
+ if x.tag != tag_oob_child_request:
+ raise BadXMLMessage("Expected %s, got %s", tag_oob_child_request, x.tag)
+
+ if child_handle is None:
+ child_handle = x.get("child_handle")
+
+ if valid_until is None:
+ valid_until = rpki.sundial.now() + rpki.sundial.timedelta(days = 365)
+ else:
+ valid_until = rpki.sundial.datetime.fromXMLtime(valid_until)
+ if valid_until < rpki.sundial.now():
+ raise PastExpiration("Specified new expiration time %s has passed" % valid_until)
- self.server_ca.clients.get(handle = client_handle).delete()
+ self.log("Child calls itself %r, we call it %r" % (x.get("child_handle"), child_handle))
+ child = rpki.irdb.models.Child.objects.get_or_certify(
+ issuer = self.resource_ca,
+ handle = child_handle,
+ ta = rpki.x509.X509(Base64 = x.findtext(tag_oob_child_bpki_ta)),
+ valid_until = valid_until)[0]
- @django.db.transaction.commit_on_success
- def configure_repository(self, filename, parent_handle = None):
- """
- Configure a publication repository for this RPKI entity, given the
- repository's response to our request-for-service message as input.
- Reads the repository's response, extracts and cross-certifies the
- BPKI data and service URI, and links the repository data with the
- corresponding parent data in our local database.
- """
+ return self.generate_parental_response(child), child_handle
- r = etree_read(filename)
- if parent_handle is None:
- parent_handle = r.get("parent_handle")
+ @django.db.transaction.atomic
+ def generate_parental_response(self, child):
+ """
+ Generate parental response XML. Broken out of .configure_child()
+ for GUI.
+ """
- self.log("Repository calls us %r" % (r.get("client_handle")))
- self.log("Repository response associated with parent_handle %r" % parent_handle)
+ service_uri = "http://%s:%s/up-down/%s/%s" % (
+ self.cfg.get("rpkid_server_host", section = myrpki_section),
+ self.cfg.get("rpkid_server_port", section = myrpki_section),
+ self.handle, child.handle)
- try:
- if parent_handle == self.handle:
- turtle = self.resource_ca.rootd
- else:
- turtle = self.resource_ca.parents.get(handle = parent_handle)
+ e = Element(tag_oob_parent_response, nsmap = oob_nsmap, version = oob_version,
+ service_uri = service_uri,
+ child_handle = child.handle,
+ parent_handle = self.handle)
+ B64Element(e, tag_oob_parent_bpki_ta, self.resource_ca.certificate)
- except (rpki.irdb.Parent.DoesNotExist, rpki.irdb.Rootd.DoesNotExist):
- self.log("Could not find parent %r in our database" % parent_handle)
+ try:
+ if self.default_repository:
+ repo = self.resource_ca.repositories.get(handle = self.default_repository)
+ else:
+ repo = self.resource_ca.repositories.get()
+ except rpki.irdb.models.Repository.DoesNotExist:
+ repo = None
- else:
- rpki.irdb.Repository.objects.get_or_certify(
- issuer = self.resource_ca,
- handle = parent_handle,
- client_handle = r.get("client_handle"),
- service_uri = r.get("service_uri"),
- sia_base = r.get("sia_base"),
- ta = rpki.x509.X509(Base64 = r.findtext("bpki_server_ta")),
- turtle = turtle)
-
-
- @django.db.transaction.commit_on_success
- def delete_repository(self, repository_handle):
- """
- Delete a repository of this RPKI entity.
- """
+ if repo is None:
+ self.log("Couldn't find any usable repositories, not giving referral")
- self.resource_ca.repositories.get(handle = repository_handle).delete()
+ elif repo.handle == self.handle:
+ SubElement(e, tag_oob_offer)
+ else:
+ proposed_sia_base = repo.sia_base + child.handle + "/"
+ referral_cert = rpki.irdb.models.Referral.objects.get_or_certify(issuer = self.resource_ca)[0]
+ auth = rpki.x509.SignedReferral()
+ auth.set_content(B64Element(None, tag_oob_authorization, child.ta,
+ nsmap = oob_nsmap, version = oob_version,
+ authorized_sia_base = proposed_sia_base))
+ auth.schema_check()
+ auth.sign(referral_cert.private_key, referral_cert.certificate, self.resource_ca.latest_crl)
+ B64Element(e, tag_oob_referral, auth, referrer = repo.client_handle)
+
+ return etree_wrapper(e, msg = "Send this file back to the child you just configured")
+
+
+ @django.db.transaction.atomic
+ def delete_child(self, child_handle):
+ """
+ Delete a child of this RPKI entity.
+ """
+
+ self.resource_ca.children.get(handle = child_handle).delete()
+
+
+ @django.db.transaction.atomic
+ def configure_parent(self, xml_file, parent_handle = None):
+ """
+ Configure a new parent of this RPKI entity, given the output of
+ the parent's configure_child command as input. Reads the parent's
+ response XML, extracts the parent's BPKI and service URI
+ information, cross-certifies the parent's BPKI data into this
+ entity's BPKI, and checks for offers or referrals of publication
+ service. If a publication offer or referral is present, we
+ generate a request-for-service message to that repository, in case
+ the user wants to avail herself of the referral or offer.
+ """
+
+ x = etree_read(xml_file)
+
+ if x.tag != tag_oob_parent_response:
+ raise BadXMLMessage("Expected %s, got %s", tag_oob_parent_response, x.tag)
+
+ if parent_handle is None:
+ parent_handle = x.get("parent_handle")
+
+ offer = x.find(tag_oob_offer)
+ referral = x.find(tag_oob_referral)
+
+ if offer is not None:
+ repository_type = "offer"
+ referrer = None
+ referral_authorization = None
+
+ elif referral is not None:
+ repository_type = "referral"
+ referrer = referral.get("referrer")
+ referral_authorization = rpki.x509.SignedReferral(Base64 = referral.text)
- @django.db.transaction.commit_on_success
- def renew_children(self, child_handle, valid_until = None):
- """
- Update validity period for one child entity or, if child_handle is
- None, for all child entities.
- """
+ else:
+ repository_type = "none"
+ referrer = None
+ referral_authorization = None
+
+ self.log("Parent calls itself %r, we call it %r" % (x.get("parent_handle"), parent_handle))
+ self.log("Parent calls us %r" % x.get("child_handle"))
+
+ parent = rpki.irdb.models.Parent.objects.get_or_certify(
+ issuer = self.resource_ca,
+ handle = parent_handle,
+ child_handle = x.get("child_handle"),
+ parent_handle = x.get("parent_handle"),
+ service_uri = x.get("service_uri"),
+ ta = rpki.x509.X509(Base64 = x.findtext(tag_oob_parent_bpki_ta)),
+ repository_type = repository_type,
+ referrer = referrer,
+ referral_authorization = referral_authorization)[0]
+
+ return self.generate_repository_request(parent), parent_handle
+
+
+ def generate_repository_request(self, parent):
+ """
+ Generate repository request for a given parent.
+ """
+
+ e = Element(tag_oob_publisher_request, nsmap = oob_nsmap, version = oob_version,
+ publisher_handle = self.handle)
+ B64Element(e, tag_oob_publisher_bpki_ta, self.resource_ca.certificate)
+ if parent.repository_type == "referral":
+ B64Element(e, tag_oob_referral, parent.referral_authorization,
+ referrer = parent.referrer)
+
+ return etree_wrapper(e, msg = "This is the file to send to the repository operator")
+
+
+ @django.db.transaction.atomic
+ def delete_parent(self, parent_handle):
+ """
+ Delete a parent of this RPKI entity.
+ """
+
+ self.resource_ca.parents.get(handle = parent_handle).delete()
+
+
+ @django.db.transaction.atomic
+ def configure_publication_client(self, xml_file, sia_base = None, flat = False):
+ """
+ Configure publication server to know about a new client, given the
+ client's request-for-service message as input. Reads the client's
+ request for service, cross-certifies the client's BPKI data, and
+ generates a response message containing the repository's BPKI data
+ and service URI.
+ """
+
+ # pylint: disable=E1124
+
+ x = etree_read(xml_file)
- if child_handle is None:
- children = self.resource_ca.children.all()
- else:
- children = self.resource_ca.children.filter(handle = child_handle)
+ if x.tag != tag_oob_publisher_request:
+ raise BadXMLMessage("Expected %s, got %s", tag_oob_publisher_request, x.tag)
+
+ client_ta = rpki.x509.X509(Base64 = x.findtext(tag_oob_publisher_bpki_ta))
+
+ referral = x.find(tag_oob_referral)
+
+ default_sia_base = "rsync://{self.rsync_server}/{self.rsync_module}/{handle}/".format(
+ self = self,
+ handle = x.get("publisher_handle"))
+
+ if sia_base is None and flat:
+ self.log("Flat publication structure forced, homing client at top-level")
+ sia_base = default_sia_base
+
+ if sia_base is None and referral is not None:
+ self.log("This looks like a referral, checking")
+ try:
+ referrer = referral.get("referrer")
+ referrer = self.server_ca.clients.get(handle = referrer)
+ referral = rpki.x509.SignedReferral(Base64 = referral.text)
+ referral = referral.unwrap(ta = (referrer.certificate, self.server_ca.certificate))
+ if rpki.x509.X509(Base64 = referral.text) != client_ta:
+ raise BadXMLMessage("Referral trust anchor does not match")
+ sia_base = referral.get("authorized_sia_base")
+ except rpki.irdb.models.Client.DoesNotExist:
+ self.log("We have no record of the client ({}) alleged to have made this referral".format(referrer))
+
+ if sia_base is None and referral is None:
+ self.log("This might be an offer, checking")
+ try:
+ parent = rpki.irdb.models.ResourceHolderCA.objects.get(children__ta = client_ta)
+ if "/" in parent.repositories.get(ta = self.server_ca.certificate).client_handle:
+ self.log("Client's parent is not top-level, this is not a valid offer")
+ else:
+ self.log("Found client and its parent, nesting")
+ sia_base = "rsync://{self.rsync_server}/{self.rsync_module}/{parent_handle}/{client_handle}/".format(
+ self = self,
+ parent_handle = parent.handle,
+ client_handle = x.get("publisher_handle"))
+ except rpki.irdb.models.Repository.DoesNotExist:
+ self.log("Found client's parent, but repository isn't set, this shouldn't happen!")
+ except rpki.irdb.models.ResourceHolderCA.DoesNotExist:
+ self.log("We don't host this client's parent, so we didn't make an offer")
+
+ if sia_base is None:
+ self.log("Don't know where else to nest this client, so defaulting to top-level")
+ sia_base = default_sia_base
+
+ if not sia_base.startswith("rsync://"):
+ raise BadXMLMessage("Malformed sia_base parameter %r, should start with 'rsync://'" % sia_base)
+
+ client_handle = "/".join(sia_base.rstrip("/").split("/")[4:])
- if valid_until is None:
- valid_until = rpki.sundial.now() + rpki.sundial.timedelta(days = 365)
- else:
- valid_until = rpki.sundial.datetime.fromXMLtime(valid_until)
- if valid_until < rpki.sundial.now():
- raise PastExpiration("Specified new expiration time %s has passed" % valid_until)
+ self.log("Client calls itself %r, we call it %r" % (
+ x.get("publisher_handle"), client_handle))
+
+ client = rpki.irdb.models.Client.objects.get_or_certify(
+ issuer = self.server_ca,
+ handle = client_handle,
+ ta = client_ta,
+ sia_base = sia_base)[0]
+
+ return self.generate_repository_response(client), client_handle
- self.log("New validity date %s" % valid_until)
- for child in children:
- child.valid_until = valid_until
- child.save()
+ def generate_repository_response(self, client):
+ """
+ Generate repository response XML to a given client.
+ """
+ service_uri = "http://{host}:{port}/client/{handle}".format(
+ host = self.cfg.get("pubd_server_host", section = myrpki_section),
+ port = self.cfg.get("pubd_server_port", section = myrpki_section),
+ handle = client.handle)
- @django.db.transaction.commit_on_success
- def load_prefixes(self, filename, ignore_missing_children = False):
- """
- Whack IRDB to match prefixes.csv.
- """
+ rrdp_uri = self.cfg.get("publication_rrdp_notification_uri", section = myrpki_section, default = "")
+
+ e = Element(tag_oob_repository_response, nsmap = oob_nsmap, version = oob_version,
+ service_uri = service_uri,
+ publisher_handle = client.handle,
+ sia_base = client.sia_base)
- grouped4 = {}
- grouped6 = {}
+ if rrdp_uri:
+ e.set("rrdp_notification_uri", rrdp_uri)
+
+ B64Element(e, tag_oob_repository_bpki_ta, self.server_ca.certificate)
+ return etree_wrapper(e, msg = "Send this file back to the publication client you just configured")
+
+
+ @django.db.transaction.atomic
+ def delete_publication_client(self, client_handle):
+ """
+ Delete a publication client of this RPKI entity.
+ """
- for handle, prefix in csv_reader(filename, columns = 2):
- grouped = grouped6 if ":" in prefix else grouped4
- if handle not in grouped:
- grouped[handle] = []
- grouped[handle].append(prefix)
+ self.server_ca.clients.get(handle = client_handle).delete()
- primary_keys = []
- for version, grouped, rset in ((4, grouped4, rpki.resource_set.resource_set_ipv4),
- (6, grouped6, rpki.resource_set.resource_set_ipv6)):
- for handle, prefixes in grouped.iteritems():
- try:
- child = self.resource_ca.children.get(handle = handle)
- except rpki.irdb.Child.DoesNotExist:
- if not ignore_missing_children:
- raise
+ @django.db.transaction.atomic
+ def configure_repository(self, xml_file, parent_handle = None):
+ """
+ Configure a publication repository for this RPKI entity, given the
+ repository's response to our request-for-service message as input.
+ Reads the repository's response, extracts and cross-certifies the
+ BPKI data and service URI, and links the repository data with the
+ corresponding parent data in our local database.
+ """
+
+ x = etree_read(xml_file)
+
+ if x.tag != tag_oob_repository_response:
+ raise BadXMLMessage("Expected %s, got %s", tag_oob_repository_response, x.tag)
+
+ self.log("Repository calls us %r" % (x.get("publisher_handle")))
+
+ if parent_handle is not None:
+ self.log("Explicit parent_handle given")
+ try:
+ parent = self.resource_ca.parents.get(handle = parent_handle)
+ except rpki.irdb.models.Parent.DoesNotExist:
+ self.log("Could not find parent %r in our database" % parent_handle)
+ raise CouldntFindRepoParent
+
else:
- for prefix in rset(",".join(prefixes)):
- obj, created = rpki.irdb.ChildNet.objects.get_or_create(
- child = child,
- start_ip = str(prefix.min),
- end_ip = str(prefix.max),
- version = version)
- primary_keys.append(obj.pk)
-
- q = rpki.irdb.ChildNet.objects
- q = q.filter(child__issuer__exact = self.resource_ca)
- q = q.exclude(pk__in = primary_keys)
- q.delete()
-
-
- @django.db.transaction.commit_on_success
- def load_asns(self, filename, ignore_missing_children = False):
- """
- Whack IRDB to match asns.csv.
- """
-
- grouped = {}
-
- for handle, asn in csv_reader(filename, columns = 2):
- if handle not in grouped:
- grouped[handle] = []
- grouped[handle].append(asn)
-
- primary_keys = []
-
- for handle, asns in grouped.iteritems():
- try:
- child = self.resource_ca.children.get(handle = handle)
- except rpki.irdb.Child.DoesNotExist:
- if not ignore_missing_children:
- raise
- else:
- for asn in rpki.resource_set.resource_set_as(",".join(asns)):
- obj, created = rpki.irdb.ChildASN.objects.get_or_create(
- child = child,
- start_as = str(asn.min),
- end_as = str(asn.max))
- primary_keys.append(obj.pk)
-
- q = rpki.irdb.ChildASN.objects
- q = q.filter(child__issuer__exact = self.resource_ca)
- q = q.exclude(pk__in = primary_keys)
- q.delete()
-
-
- @django.db.transaction.commit_on_success
- def load_roa_requests(self, filename):
- """
- Whack IRDB to match roa.csv.
- """
-
- grouped = {}
-
- # format: p/n-m asn group
- for pnm, asn, group in csv_reader(filename, columns = 3, min_columns = 2):
- key = (asn, group or pnm)
- if key not in grouped:
- grouped[key] = []
- grouped[key].append(pnm)
-
- # Deleting and recreating all the ROA requests is inefficient,
- # but rpkid's current representation of ROA requests is wrong
- # (see #32), so it's not worth a lot of effort here as we're
- # just going to have to rewrite this soon anyway.
-
- self.resource_ca.roa_requests.all().delete()
-
- for key, pnms in grouped.iteritems():
- asn, group = key
-
- roa_request = self.resource_ca.roa_requests.create(asn = asn)
-
- for pnm in pnms:
- if ":" in pnm:
- p = rpki.resource_set.roa_prefix_ipv6.parse_str(pnm)
- v = 6
+ # In theory this could be rewritten using an .exists() filter.
+ parents = []
+ for parent in self.resource_ca.parents.all():
+ try:
+ _ = parent.repository # pylint: disable=W0612
+ except rpki.irdb.models.Repository.DoesNotExist:
+ parents.append(parent)
+ if len(parents) != 1:
+ self.log("No explicit parent_handle given and unable to guess")
+ raise CouldntFindRepoParent
+ parent = parents[0]
+ parent_handle = parent.handle
+ self.log("No explicit parent_handle given, guessing parent {}".format(parent_handle))
+
+ rpki.irdb.models.Repository.objects.get_or_certify(
+ issuer = self.resource_ca,
+ handle = parent_handle,
+ client_handle = x.get("publisher_handle"),
+ service_uri = x.get("service_uri"),
+ sia_base = x.get("sia_base"),
+ rrdp_notification_uri = x.get("rrdp_notification_uri"),
+ ta = rpki.x509.X509(Base64 = x.findtext(tag_oob_repository_bpki_ta)),
+ parent = parent)
+
+
+ @django.db.transaction.atomic
+ def delete_repository(self, repository_handle):
+ """
+ Delete a repository of this RPKI entity.
+ """
+
+ self.resource_ca.repositories.get(handle = repository_handle).delete()
+
+
+ @django.db.transaction.atomic
+ def renew_children(self, child_handle, valid_until = None):
+ """
+ Update validity period for one child entity or, if child_handle is
+ None, for all child entities.
+ """
+
+ if child_handle is None:
+ children = self.resource_ca.children.all()
else:
- p = rpki.resource_set.roa_prefix_ipv4.parse_str(pnm)
- v = 4
- roa_request.prefixes.create(
- version = v,
- prefix = str(p.prefix),
- prefixlen = int(p.prefixlen),
- max_prefixlen = int(p.max_prefixlen))
-
+ children = self.resource_ca.children.filter(handle = child_handle)
- @django.db.transaction.commit_on_success
- def load_ghostbuster_requests(self, filename, parent = None):
- """
- Whack IRDB to match ghostbusters.vcard.
-
- This accepts one or more vCards from a file.
- """
-
- self.resource_ca.ghostbuster_requests.filter(parent = parent).delete()
-
- vcard = []
+ if valid_until is None:
+ valid_until = rpki.sundial.now() + rpki.sundial.timedelta(days = 365)
+ else:
+ valid_until = rpki.sundial.datetime.fromXMLtime(valid_until)
+ if valid_until < rpki.sundial.now():
+ raise PastExpiration("Specified new expiration time %s has passed" % valid_until)
+
+ self.log("New validity date %s" % valid_until)
+
+ for child in children:
+ child.valid_until = valid_until
+ child.save()
+
+
+ @django.db.transaction.atomic
+ def load_prefixes(self, csv_file, ignore_missing_children = False):
+ """
+ Whack IRDB to match prefixes.csv.
+ """
+
+ grouped4 = {}
+ grouped6 = {}
+
+ for handle, prefix in csv_reader(csv_file, columns = 2):
+ grouped = grouped6 if ":" in prefix else grouped4
+ if handle not in grouped:
+ grouped[handle] = []
+ grouped[handle].append(prefix)
+
+ primary_keys = []
+
+ for version, grouped, rset in ((4, grouped4, rpki.resource_set.resource_set_ipv4),
+ (6, grouped6, rpki.resource_set.resource_set_ipv6)):
+ for handle, prefixes in grouped.iteritems():
+ try:
+ child = self.resource_ca.children.get(handle = handle)
+ except rpki.irdb.models.Child.DoesNotExist:
+ if not ignore_missing_children:
+ raise
+ else:
+ for prefix in rset(",".join(prefixes)):
+ obj = rpki.irdb.models.ChildNet.objects.get_or_create(
+ child = child,
+ start_ip = str(prefix.min),
+ end_ip = str(prefix.max),
+ version = version)[0]
+ primary_keys.append(obj.pk)
+
+ q = rpki.irdb.models.ChildNet.objects
+ q = q.filter(child__issuer = self.resource_ca)
+ q = q.exclude(pk__in = primary_keys)
+ q.delete()
+
+
+ @django.db.transaction.atomic
+ def load_asns(self, csv_file, ignore_missing_children = False):
+ """
+ Whack IRDB to match asns.csv.
+ """
+
+ grouped = {}
+
+ for handle, asn in csv_reader(csv_file, columns = 2):
+ if handle not in grouped:
+ grouped[handle] = []
+ grouped[handle].append(asn)
+
+ primary_keys = []
+
+ for handle, asns in grouped.iteritems():
+ try:
+ child = self.resource_ca.children.get(handle = handle)
+ except rpki.irdb.models.Child.DoesNotExist:
+ if not ignore_missing_children:
+ raise
+ else:
+ for asn in rpki.resource_set.resource_set_as(",".join(asns)):
+ obj = rpki.irdb.models.ChildASN.objects.get_or_create(
+ child = child,
+ start_as = str(asn.min),
+ end_as = str(asn.max))[0]
+ primary_keys.append(obj.pk)
+
+ q = rpki.irdb.models.ChildASN.objects
+ q = q.filter(child__issuer = self.resource_ca)
+ q = q.exclude(pk__in = primary_keys)
+ q.delete()
+
+
+ @django.db.transaction.atomic
+ def load_roa_requests(self, csv_file):
+ """
+ Whack IRDB to match roa.csv.
+ """
+
+ grouped = {}
+
+ # format: p/n-m asn group
+ for pnm, asn, group in csv_reader(csv_file, columns = 3, min_columns = 2):
+ key = (asn, group or pnm)
+ if key not in grouped:
+ grouped[key] = []
+ grouped[key].append(pnm)
+
+ # Deleting and recreating all the ROA requests is inefficient,
+ # but rpkid's current representation of ROA requests is wrong
+ # (see #32), so it's not worth a lot of effort here as we're
+ # just going to have to rewrite this soon anyway.
+
+ self.resource_ca.roa_requests.all().delete()
+
+ for key, pnms in grouped.iteritems():
+ asn, group = key
+
+ roa_request = self.resource_ca.roa_requests.create(asn = asn)
+
+ for pnm in pnms:
+ if ":" in pnm:
+ p = rpki.resource_set.roa_prefix_ipv6.parse_str(pnm)
+ v = 6
+ else:
+ p = rpki.resource_set.roa_prefix_ipv4.parse_str(pnm)
+ v = 4
+ roa_request.prefixes.create(
+ version = v,
+ prefix = str(p.prefix),
+ prefixlen = int(p.prefixlen),
+ max_prefixlen = int(p.max_prefixlen))
+
+
+ @django.db.transaction.atomic
+ def load_ghostbuster_requests(self, vcard_file, parent = None):
+ """
+ Whack IRDB to match ghostbusters.vcard.
+
+ This accepts one or more vCards from a file.
+ """
+
+ self.resource_ca.ghostbuster_requests.filter(parent = parent).delete()
- for line in open(filename, "r"):
- if not vcard and not line.upper().startswith("BEGIN:VCARD"):
- continue
- vcard.append(line)
- if line.upper().startswith("END:VCARD"):
- self.resource_ca.ghostbuster_requests.create(vcard = "".join(vcard), parent = parent)
vcard = []
+ for line in vcard_file.read().splitlines(True):
+ if not vcard and not line.upper().startswith("BEGIN:VCARD"):
+ continue
+ vcard.append(line)
+ if line.upper().startswith("END:VCARD"):
+ self.resource_ca.ghostbuster_requests.create(vcard = "".join(vcard), parent = parent)
+ vcard = []
- def call_rpkid(self, *pdus):
- """
- Issue a call to rpkid, return result.
-
- Implementation is a little silly, constructs a wrapper object,
- invokes it once, then throws it away. Hard to do better without
- rewriting a bit of the HTTP code, as we want to be sure we're
- using the current BPKI certificate and key objects.
- """
-
- url = "http://%s:%s/left-right" % (
- self.cfg.get("rpkid_server_host", section = myrpki_section),
- self.cfg.get("rpkid_server_port", section = myrpki_section))
-
- rpkid = self.server_ca.ee_certificates.get(purpose = "rpkid")
- irbe = self.server_ca.ee_certificates.get(purpose = "irbe")
-
- if len(pdus) == 1 and isinstance(pdus[0], types.GeneratorType):
- pdus = tuple(pdus[0])
- elif len(pdus) == 1 and isinstance(pdus[0], (tuple, list)):
- pdus = pdus[0]
-
- call_rpkid = rpki.async.sync_wrapper(
- disable_signal_handlers = self.disable_signal_handlers,
- func = rpki.http.caller(
- proto = rpki.left_right,
- client_key = irbe.private_key,
- client_cert = irbe.certificate,
- server_ta = self.server_ca.certificate,
- server_cert = rpkid.certificate,
- url = url,
- debug = self.show_xml))
-
- return call_rpkid(*pdus)
-
-
- def run_rpkid_now(self):
- """
- Poke rpkid to immediately run the cron job for the current handle.
-
- This method is used by the GUI when a user has changed something in the
- IRDB (ghostbuster, roa) which does not require a full synchronize() call,
- to force the object to be immediately issued.
- """
-
- self.call_rpkid(rpki.left_right.self_elt.make_pdu(
- action = "set", self_handle = self.handle, run_now = "yes"))
-
-
- def publish_world_now(self):
- """
- Poke rpkid to (re)publish everything for the current handle.
- """
-
- self.call_rpkid(rpki.left_right.self_elt.make_pdu(
- action = "set", self_handle = self.handle, publish_world_now = "yes"))
-
-
- def reissue(self):
- """
- Poke rpkid to reissue everything for the current handle.
- """
-
- self.call_rpkid(rpki.left_right.self_elt.make_pdu(
- action = "set", self_handle = self.handle, reissue = "yes"))
-
- def rekey(self):
- """
- Poke rpkid to rekey all RPKI certificates received for the current
- handle.
- """
-
- self.call_rpkid(rpki.left_right.self_elt.make_pdu(
- action = "set", self_handle = self.handle, rekey = "yes"))
-
-
- def revoke(self):
- """
- Poke rpkid to revoke old RPKI keys for the current handle.
- """
-
- self.call_rpkid(rpki.left_right.self_elt.make_pdu(
- action = "set", self_handle = self.handle, revoke = "yes"))
-
-
- def revoke_forgotten(self):
- """
- Poke rpkid to revoke old forgotten RPKI keys for the current handle.
- """
-
- self.call_rpkid(rpki.left_right.self_elt.make_pdu(
- action = "set", self_handle = self.handle, revoke_forgotten = "yes"))
-
-
- def clear_all_sql_cms_replay_protection(self):
- """
- Tell rpkid and pubd to clear replay protection for all SQL-based
- entities. This is a fairly blunt instrument, but as we don't
- expect this to be necessary except in the case of gross
- misconfiguration, it should suffice
- """
-
- self.call_rpkid(rpki.left_right.self_elt.make_pdu(action = "set", self_handle = ca.handle,
- clear_replay_protection = "yes")
- for ca in rpki.irdb.ResourceHolderCA.objects.all())
- if self.run_pubd:
- self.call_pubd(rpki.publication.client_elt.make_pdu(action = "set",
- client_handle = client.handle,
- clear_replay_protection = "yes")
- for client in self.server_ca.clients.all())
-
-
- def call_pubd(self, *pdus):
- """
- Issue a call to pubd, return result.
- Implementation is a little silly, constructs a wrapper object,
- invokes it once, then throws it away. Hard to do better without
- rewriting a bit of the HTTP code, as we want to be sure we're
- using the current BPKI certificate and key objects.
- """
-
- url = "http://%s:%s/control" % (
- self.cfg.get("pubd_server_host", section = myrpki_section),
- self.cfg.get("pubd_server_port", section = myrpki_section))
+ def call_rpkid(self, q_msg, suppress_error_check = False):
+ """
+ Issue a call to rpkid, return result.
+ """
- pubd = self.server_ca.ee_certificates.get(purpose = "pubd")
- irbe = self.server_ca.ee_certificates.get(purpose = "irbe")
+ url = "http://%s:%s/left-right" % (
+ self.cfg.get("rpkid_server_host", section = myrpki_section),
+ self.cfg.get("rpkid_server_port", section = myrpki_section))
- if len(pdus) == 1 and isinstance(pdus[0], types.GeneratorType):
- pdus = tuple(pdus[0])
- elif len(pdus) == 1 and isinstance(pdus[0], (tuple, list)):
- pdus = pdus[0]
+ rpkid = self.server_ca.ee_certificates.get(purpose = "rpkid")
+ irbe = self.server_ca.ee_certificates.get(purpose = "irbe")
- call_pubd = rpki.async.sync_wrapper(
- disable_signal_handlers = self.disable_signal_handlers,
- func = rpki.http.caller(
- proto = rpki.publication,
- client_key = irbe.private_key,
- client_cert = irbe.certificate,
- server_ta = self.server_ca.certificate,
- server_cert = pubd.certificate,
- url = url,
- debug = self.show_xml))
+ r_msg = rpki.http_simple.client(
+ proto_cms_msg = rpki.left_right.cms_msg,
+ client_key = irbe.private_key,
+ client_cert = irbe.certificate,
+ server_ta = self.server_ca.certificate,
+ server_cert = rpkid.certificate,
+ url = url,
+ q_msg = q_msg,
+ debug = self.show_xml)
- return call_pubd(*pdus)
+ if not suppress_error_check:
+ self.check_error_report(r_msg)
+ return r_msg
- def check_error_report(self, pdus):
- """
- Check a response from rpkid or pubd for error_report PDUs, log and
- throw exceptions as needed.
- """
-
- if any(isinstance(pdu, (rpki.left_right.report_error_elt, rpki.publication.report_error_elt)) for pdu in pdus):
- for pdu in pdus:
- if isinstance(pdu, rpki.left_right.report_error_elt):
- self.log("rpkid reported failure: %s" % pdu.error_code)
- elif isinstance(pdu, rpki.publication.report_error_elt):
- self.log("pubd reported failure: %s" % pdu.error_code)
- else:
- continue
- if pdu.error_text:
- self.log(pdu.error_text)
- raise CouldntTalkToDaemon
+ def _rpkid_tenant_control(self, *bools):
+ assert all(isinstance(b, str) for b in bools)
+ q_msg = self.compose_left_right_query()
+ q_pdu = SubElement(q_msg, rpki.left_right.tag_tenant, action = "set", tenant_handle = self.handle)
+ for b in bools:
+ q_pdu.set(b, "yes")
+ return self.call_rpkid(q_msg)
- @django.db.transaction.commit_on_success
- def synchronize(self, *handles_to_poke):
- """
- Configure RPKI daemons with the data built up by the other
- commands in this program. Commands which modify the IRDB and want
- to whack everything into sync should call this when they're done,
- but be warned that this can be slow with a lot of CAs.
+ def run_rpkid_now(self):
+ """
+ Poke rpkid to immediately run the cron job for the current handle.
- Any arguments given are handles of CAs which should be poked with a
- <self run_now="yes"/> operation.
- """
+ This method is used by the GUI when a user has changed something in the
+ IRDB (ghostbuster, roa) which does not require a full synchronize() call,
+ to force the object to be immediately issued.
+ """
- for ca in rpki.irdb.ResourceHolderCA.objects.all():
- self.synchronize_rpkid_one_ca_core(ca, ca.handle in handles_to_poke)
- self.synchronize_pubd_core()
- self.synchronize_rpkid_deleted_core()
+ return self._rpkid_tenant_control("run_now")
- @django.db.transaction.commit_on_success
- def synchronize_ca(self, ca = None, poke = False):
- """
- Synchronize one CA. Most commands which modify a CA should call
- this. CA to synchronize defaults to the current resource CA.
- """
-
- if ca is None:
- ca = self.resource_ca
- self.synchronize_rpkid_one_ca_core(ca, poke)
-
-
- @django.db.transaction.commit_on_success
- def synchronize_deleted_ca(self):
- """
- Delete CAs which are present in rpkid's database but not in the
- IRDB.
- """
-
- self.synchronize_rpkid_deleted_core()
+ def publish_world_now(self):
+ """
+ Poke rpkid to (re)publish everything for the current handle.
+ """
+ return self._rpkid_tenant_control("publish_world_now")
- @django.db.transaction.commit_on_success
- def synchronize_pubd(self):
- """
- Synchronize pubd. Most commands which modify pubd should call this.
- """
- self.synchronize_pubd_core()
+ def reissue(self):
+ """
+ Poke rpkid to reissue everything for the current handle.
+ """
+ return self._rpkid_tenant_control("reissue")
- def synchronize_rpkid_one_ca_core(self, ca, poke = False):
- """
- Synchronize one CA. This is the core synchronization code. Don't
- call this directly, instead call one of the methods that calls
- this inside a Django commit wrapper.
- This method configures rpkid with data built up by the other
- commands in this program. Most commands which modify IRDB values
- related to rpkid should call this when they're done.
+ def rekey(self):
+ """
+ Poke rpkid to rekey all RPKI certificates received for the current
+ handle.
+ """
+
+ return self._rpkid_tenant_control("rekey")
- If poke is True, we append a left-right run_now operation for this
- CA to the end of whatever other commands this method generates.
- """
- # We can use a single BSC for everything -- except BSC key
- # rollovers. Drive off that bridge when we get to it.
-
- bsc_handle = "bsc"
-
- # A default RPKI CRL cycle time of six hours seems sane. One
- # might make a case for a day instead, but we've been running with
- # six hours for a while now and haven't seen a lot of whining.
-
- self_crl_interval = self.cfg.getint("self_crl_interval", 6 * 60 * 60, section = myrpki_section)
-
- # regen_margin now just controls how long before RPKI certificate
- # expiration we should regenerate; it used to control the interval
- # before RPKI CRL staleness at which to regenerate the CRL, but
- # using the same timer value for both of these is hopeless.
- #
- # A default regeneration margin of two weeks gives enough time for
- # humans to react. We add a two hour fudge factor in the hope
- # that this will regenerate certificates just *before* the
- # companion cron job warns of impending doom.
-
- self_regen_margin = self.cfg.getint("self_regen_margin", 14 * 24 * 60 * 60 + 2 * 60, section = myrpki_section)
-
- # See what rpkid already has on file for this entity.
-
- rpkid_reply = self.call_rpkid(
- rpki.left_right.self_elt.make_pdu( action = "get", tag = "self", self_handle = ca.handle),
- rpki.left_right.bsc_elt.make_pdu( action = "list", tag = "bsc", self_handle = ca.handle),
- rpki.left_right.repository_elt.make_pdu(action = "list", tag = "repository", self_handle = ca.handle),
- rpki.left_right.parent_elt.make_pdu( action = "list", tag = "parent", self_handle = ca.handle),
- rpki.left_right.child_elt.make_pdu( action = "list", tag = "child", self_handle = ca.handle))
-
- self_pdu = rpkid_reply[0]
- bsc_pdus = dict((x.bsc_handle, x) for x in rpkid_reply if isinstance(x, rpki.left_right.bsc_elt))
- repository_pdus = dict((x.repository_handle, x) for x in rpkid_reply if isinstance(x, rpki.left_right.repository_elt))
- parent_pdus = dict((x.parent_handle, x) for x in rpkid_reply if isinstance(x, rpki.left_right.parent_elt))
- child_pdus = dict((x.child_handle, x) for x in rpkid_reply if isinstance(x, rpki.left_right.child_elt))
-
- rpkid_query = []
-
- self_cert, created = rpki.irdb.HostedCA.objects.get_or_certify(
- issuer = self.server_ca,
- hosted = ca)
-
- # There should be exactly one <self/> object per hosted entity, by definition
-
- if (isinstance(self_pdu, rpki.left_right.report_error_elt) or
- self_pdu.crl_interval != self_crl_interval or
- self_pdu.regen_margin != self_regen_margin or
- self_pdu.bpki_cert != self_cert.certificate):
- rpkid_query.append(rpki.left_right.self_elt.make_pdu(
- action = "create" if isinstance(self_pdu, rpki.left_right.report_error_elt) else "set",
- tag = "self",
- self_handle = ca.handle,
- bpki_cert = ca.certificate,
- crl_interval = self_crl_interval,
- regen_margin = self_regen_margin))
-
- # In general we only need one <bsc/> per <self/>. BSC objects
- # are a little unusual in that the keypair and PKCS #10
- # subelement is generated by rpkid, so complete setup requires
- # two round trips.
-
- bsc_pdu = bsc_pdus.pop(bsc_handle, None)
-
- if bsc_pdu is None:
- rpkid_query.append(rpki.left_right.bsc_elt.make_pdu(
- action = "create",
- tag = "bsc",
- self_handle = ca.handle,
- bsc_handle = bsc_handle,
- generate_keypair = "yes"))
-
- elif bsc_pdu.pkcs10_request is None:
- rpkid_query.append(rpki.left_right.bsc_elt.make_pdu(
- action = "set",
- tag = "bsc",
- self_handle = ca.handle,
- bsc_handle = bsc_handle,
- generate_keypair = "yes"))
-
- rpkid_query.extend(rpki.left_right.bsc_elt.make_pdu(
- action = "destroy", self_handle = ca.handle, bsc_handle = b) for b in bsc_pdus)
-
- # If we've already got actions queued up, run them now, so we
- # can finish setting up the BSC before anything tries to use it.
-
- if rpkid_query:
- rpkid_query.append(rpki.left_right.bsc_elt.make_pdu(action = "list", tag = "bsc", self_handle = ca.handle))
- rpkid_reply = self.call_rpkid(rpkid_query)
- bsc_pdus = dict((x.bsc_handle, x)
- for x in rpkid_reply
- if isinstance(x, rpki.left_right.bsc_elt) and x.action == "list")
- bsc_pdu = bsc_pdus.pop(bsc_handle, None)
- self.check_error_report(rpkid_reply)
-
- rpkid_query = []
-
- assert bsc_pdu.pkcs10_request is not None
-
- bsc, created = rpki.irdb.BSC.objects.get_or_certify(
- issuer = ca,
- handle = bsc_handle,
- pkcs10 = bsc_pdu.pkcs10_request)
-
- if bsc_pdu.signing_cert != bsc.certificate or bsc_pdu.signing_cert_crl != ca.latest_crl:
- rpkid_query.append(rpki.left_right.bsc_elt.make_pdu(
- action = "set",
- tag = "bsc",
- self_handle = ca.handle,
- bsc_handle = bsc_handle,
- signing_cert = bsc.certificate,
- signing_cert_crl = ca.latest_crl))
-
- # At present we need one <repository/> per <parent/>, not because
- # rpkid requires that, but because pubd does. pubd probably should
- # be fixed to support a single client allowed to update multiple
- # trees, but for the moment the easiest way forward is just to
- # enforce a 1:1 mapping between <parent/> and <repository/> objects
-
- for repository in ca.repositories.all():
-
- repository_pdu = repository_pdus.pop(repository.handle, None)
-
- if (repository_pdu is None or
- repository_pdu.bsc_handle != bsc_handle or
- repository_pdu.peer_contact_uri != repository.service_uri or
- repository_pdu.bpki_cert != repository.certificate):
- rpkid_query.append(rpki.left_right.repository_elt.make_pdu(
- action = "create" if repository_pdu is None else "set",
- tag = repository.handle,
- self_handle = ca.handle,
- repository_handle = repository.handle,
- bsc_handle = bsc_handle,
- peer_contact_uri = repository.service_uri,
- bpki_cert = repository.certificate))
-
- rpkid_query.extend(rpki.left_right.repository_elt.make_pdu(
- action = "destroy", self_handle = ca.handle, repository_handle = r) for r in repository_pdus)
-
- # <parent/> setup code currently assumes 1:1 mapping between
- # <repository/> and <parent/>, and further assumes that the handles
- # for an associated pair are the identical (that is:
- # parent.repository_handle == parent.parent_handle).
- #
- # If no such repository exists, our choices are to ignore the
- # parent entry or throw an error. For now, we ignore the parent.
-
- for parent in ca.parents.all():
-
- try:
-
- parent_pdu = parent_pdus.pop(parent.handle, None)
-
- if (parent_pdu is None or
- parent_pdu.bsc_handle != bsc_handle or
- parent_pdu.repository_handle != parent.handle or
- parent_pdu.peer_contact_uri != parent.service_uri or
- parent_pdu.sia_base != parent.repository.sia_base or
- parent_pdu.sender_name != parent.child_handle or
- parent_pdu.recipient_name != parent.parent_handle or
- parent_pdu.bpki_cms_cert != parent.certificate):
- rpkid_query.append(rpki.left_right.parent_elt.make_pdu(
- action = "create" if parent_pdu is None else "set",
- tag = parent.handle,
- self_handle = ca.handle,
- parent_handle = parent.handle,
- bsc_handle = bsc_handle,
- repository_handle = parent.handle,
- peer_contact_uri = parent.service_uri,
- sia_base = parent.repository.sia_base,
- sender_name = parent.child_handle,
- recipient_name = parent.parent_handle,
- bpki_cms_cert = parent.certificate))
-
- except rpki.irdb.Repository.DoesNotExist:
- pass
-
- try:
-
- parent_pdu = parent_pdus.pop(ca.handle, None)
-
- if (parent_pdu is None or
- parent_pdu.bsc_handle != bsc_handle or
- parent_pdu.repository_handle != ca.handle or
- parent_pdu.peer_contact_uri != ca.rootd.service_uri or
- parent_pdu.sia_base != ca.rootd.repository.sia_base or
- parent_pdu.sender_name != ca.handle or
- parent_pdu.recipient_name != ca.handle or
- parent_pdu.bpki_cms_cert != ca.rootd.certificate):
- rpkid_query.append(rpki.left_right.parent_elt.make_pdu(
- action = "create" if parent_pdu is None else "set",
- tag = ca.handle,
- self_handle = ca.handle,
- parent_handle = ca.handle,
- bsc_handle = bsc_handle,
- repository_handle = ca.handle,
- peer_contact_uri = ca.rootd.service_uri,
- sia_base = ca.rootd.repository.sia_base,
- sender_name = ca.handle,
- recipient_name = ca.handle,
- bpki_cms_cert = ca.rootd.certificate))
-
- except rpki.irdb.Rootd.DoesNotExist:
- pass
-
- rpkid_query.extend(rpki.left_right.parent_elt.make_pdu(
- action = "destroy", self_handle = ca.handle, parent_handle = p) for p in parent_pdus)
-
- # Children are simpler than parents, because they call us, so no URL
- # to construct and figuring out what certificate to use is their
- # problem, not ours.
-
- for child in ca.children.all():
-
- child_pdu = child_pdus.pop(child.handle, None)
-
- if (child_pdu is None or
- child_pdu.bsc_handle != bsc_handle or
- child_pdu.bpki_cert != child.certificate):
- rpkid_query.append(rpki.left_right.child_elt.make_pdu(
- action = "create" if child_pdu is None else "set",
- tag = child.handle,
- self_handle = ca.handle,
- child_handle = child.handle,
- bsc_handle = bsc_handle,
- bpki_cert = child.certificate))
-
- rpkid_query.extend(rpki.left_right.child_elt.make_pdu(
- action = "destroy", self_handle = ca.handle, child_handle = c) for c in child_pdus)
-
- # If caller wants us to poke rpkid, add that to the very end of the message
-
- if poke:
- rpkid_query.append(rpki.left_right.self_elt.make_pdu(
- action = "set", self_handle = ca.handle, run_now = "yes"))
-
- # If we changed anything, ship updates off to rpkid
-
- if rpkid_query:
- rpkid_reply = self.call_rpkid(rpkid_query)
- bsc_pdus = dict((x.bsc_handle, x) for x in rpkid_reply if isinstance(x, rpki.left_right.bsc_elt))
- if bsc_handle in bsc_pdus and bsc_pdus[bsc_handle].pkcs10_request:
- bsc_req = bsc_pdus[bsc_handle].pkcs10_request
- self.check_error_report(rpkid_reply)
-
-
- def synchronize_pubd_core(self):
- """
- Configure pubd with data built up by the other commands in this
- program. This is the core synchronization code. Don't call this
- directly, instead call a methods that calls this inside a Django
- commit wrapper.
-
- This method configures pubd with data built up by the other
- commands in this program. Commands which modify IRDB fields
- related to pubd should call this when they're done.
- """
+ def revoke(self):
+ """
+ Poke rpkid to revoke old RPKI keys for the current handle.
+ """
- # If we're not running pubd, the rest of this is a waste of time
+ return self._rpkid_tenant_control("revoke")
- if not self.run_pubd:
- return
- # Make sure that pubd's BPKI CRL is up to date.
+ def revoke_forgotten(self):
+ """
+ Poke rpkid to revoke old forgotten RPKI keys for the current handle.
+ """
+
+ return self._rpkid_tenant_control("revoke_forgotten")
- self.call_pubd(rpki.publication.config_elt.make_pdu(
- action = "set",
- bpki_crl = self.server_ca.latest_crl))
- # See what pubd already has on file
+ def clear_all_sql_cms_replay_protection(self):
+ """
+ Tell rpkid and pubd to clear replay protection for all SQL-based
+ entities. This is a fairly blunt instrument, but as we don't
+ expect this to be necessary except in the case of gross
+ misconfiguration, it should suffice.
+ """
- pubd_reply = self.call_pubd(rpki.publication.client_elt.make_pdu(action = "list"))
- client_pdus = dict((x.client_handle, x) for x in pubd_reply if isinstance(x, rpki.publication.client_elt))
- pubd_query = []
+ if self.run_rpkid:
+ q_msg = self.compose_left_right_query()
+ for ca in rpki.irdb.models.ResourceHolderCA.objects.all():
+ SubElement(q_msg, rpki.left_right.tag_tenant, action = "set",
+ tenant_handle = ca.handle, clear_replay_protection = "yes")
+ self.call_rpkid(q_msg)
- # Check all clients
+ if self.run_pubd:
+ q_msg = self._compose_publication_control_query()
+ for client in self.server_ca.clients.all():
+ SubElement(q_msg, rpki.publication_control.tag_client, action = "set",
+ client_handle = client.handle, clear_replay_protection = "yes")
+ self.call_pubd(q_msg)
- for client in self.server_ca.clients.all():
- client_pdu = client_pdus.pop(client.handle, None)
+ def call_pubd(self, q_msg):
+ """
+ Issue a call to pubd, return result.
+ """
- if (client_pdu is None or
- client_pdu.base_uri != client.sia_base or
- client_pdu.bpki_cert != client.certificate):
- pubd_query.append(rpki.publication.client_elt.make_pdu(
- action = "create" if client_pdu is None else "set",
- client_handle = client.handle,
- bpki_cert = client.certificate,
- base_uri = client.sia_base))
+ url = "http://%s:%s/control" % (
+ self.cfg.get("pubd_server_host", section = myrpki_section),
+ self.cfg.get("pubd_server_port", section = myrpki_section))
- # Delete any unknown clients
+ pubd = self.server_ca.ee_certificates.get(purpose = "pubd")
+ irbe = self.server_ca.ee_certificates.get(purpose = "irbe")
- pubd_query.extend(rpki.publication.client_elt.make_pdu(
- action = "destroy", client_handle = p) for p in client_pdus)
+ r_msg = rpki.http_simple.client(
+ proto_cms_msg = rpki.publication_control.cms_msg,
+ client_key = irbe.private_key,
+ client_cert = irbe.certificate,
+ server_ta = self.server_ca.certificate,
+ server_cert = pubd.certificate,
+ url = url,
+ q_msg = q_msg,
+ debug = self.show_xml)
- # If we changed anything, ship updates off to pubd
+ self.check_error_report(r_msg)
+ return r_msg
- if pubd_query:
- pubd_reply = self.call_pubd(pubd_query)
- self.check_error_report(pubd_reply)
+ def check_error_report(self, r_msg):
+ """
+ Check a response from rpkid or pubd for error_report PDUs, log and
+ throw exceptions as needed.
+ """
- def synchronize_rpkid_deleted_core(self):
- """
- Remove any <self/> objects present in rpkid's database but not
- present in the IRDB. This is the core synchronization code.
- Don't call this directly, instead call a methods that calls this
- inside a Django commit wrapper.
- """
+ failed = False
+ for r_pdu in r_msg.getiterator(rpki.left_right.tag_report_error):
+ failed = True
+ self.log("rpkid reported failure: %s" % r_pdu.get("error_code"))
+ if r_pdu.text:
+ self.log(r_pdu.text)
+ for r_pdu in r_msg.getiterator(rpki.publication_control.tag_report_error):
+ failed = True
+ self.log("pubd reported failure: %s" % r_pdu.get("error_code"))
+ if r_pdu.text:
+ self.log(r_pdu.text)
+ if failed:
+ raise CouldntTalkToDaemon
+
+
+ @django.db.transaction.atomic
+ def synchronize(self, *handles_to_poke):
+ """
+ Configure RPKI daemons with the data built up by the other
+ commands in this program. Commands which modify the IRDB and want
+ to whack everything into sync should call this when they're done,
+ but be warned that this can be slow with a lot of CAs.
- rpkid_reply = self.call_rpkid(rpki.left_right.self_elt.make_pdu(action = "list"))
- self.check_error_report(rpkid_reply)
+ Any arguments given are handles of CAs which should be poked with a
+ <tenant run_now="yes"/> operation.
+ """
+
+ for ca in rpki.irdb.models.ResourceHolderCA.objects.all():
+ self.synchronize_rpkid_one_ca_core(ca, ca.handle in handles_to_poke)
+ self.synchronize_pubd_core()
+ self.synchronize_rpkid_deleted_core()
+
+
+ @django.db.transaction.atomic
+ def synchronize_ca(self, ca = None, poke = False):
+ """
+ Synchronize one CA. Most commands which modify a CA should call
+ this. CA to synchronize defaults to the current resource CA.
+ """
+
+ if ca is None:
+ ca = self.resource_ca
+ self.synchronize_rpkid_one_ca_core(ca, poke)
+
+
+ @django.db.transaction.atomic
+ def synchronize_deleted_ca(self):
+ """
+ Delete CAs which are present in rpkid's database but not in the
+ IRDB.
+ """
+
+ self.synchronize_rpkid_deleted_core()
+
+
+ @django.db.transaction.atomic
+ def synchronize_pubd(self):
+ """
+ Synchronize pubd. Most commands which modify pubd should call this.
+ """
+
+ self.synchronize_pubd_core()
+
+
+ def synchronize_rpkid_one_ca_core(self, ca, poke = False):
+ """
+ Synchronize one CA. This is the core synchronization code. Don't
+ call this directly, instead call one of the methods that calls
+ this inside a Django commit wrapper.
+
+ This method configures rpkid with data built up by the other
+ commands in this program. Most commands which modify IRDB values
+ related to rpkid should call this when they're done.
+
+ If poke is True, we append a left-right run_now operation for this
+ CA to the end of whatever other commands this method generates.
+ """
+
+ # pylint: disable=C0330
+
+ # We can use a single BSC for everything -- except BSC key
+ # rollovers. Drive off that bridge when we get to it.
+
+ bsc_handle = "bsc"
+
+ # A default RPKI CRL cycle time of six hours seems sane. One
+ # might make a case for a day instead, but we've been running with
+ # six hours for a while now and haven't seen a lot of whining.
+
+ tenant_crl_interval = self.cfg.getint("tenant_crl_interval",
+ 6 * 60 * 60,
+ section = myrpki_section)
+
+ # regen_margin now just controls how long before RPKI certificate
+ # expiration we should regenerate; it used to control the interval
+ # before RPKI CRL staleness at which to regenerate the CRL, but
+ # using the same timer value for both of these is hopeless.
+ #
+ # A default regeneration margin of two weeks gives enough time for
+ # humans to react. We add a two hour fudge factor in the hope
+ # that this will regenerate certificates just *before* the
+ # companion cron job warns of impending doom.
+
+ tenant_regen_margin = self.cfg.getint("tenant_regen_margin",
+ 14 * 24 * 60 * 60 + 2 * 60,
+ section = myrpki_section)
+
+ # See what rpkid already has on file for this entity.
+
+ q_msg = self.compose_left_right_query()
+ SubElement(q_msg, rpki.left_right.tag_tenant, action = "get", tenant_handle = ca.handle)
+ SubElement(q_msg, rpki.left_right.tag_bsc, action = "list", tenant_handle = ca.handle)
+ SubElement(q_msg, rpki.left_right.tag_repository, action = "list", tenant_handle = ca.handle)
+ SubElement(q_msg, rpki.left_right.tag_parent, action = "list", tenant_handle = ca.handle)
+ SubElement(q_msg, rpki.left_right.tag_child, action = "list", tenant_handle = ca.handle)
+
+ r_msg = self.call_rpkid(q_msg, suppress_error_check = True)
+
+ self.check_error_report(r_msg)
+
+ tenant_pdu = r_msg.find(rpki.left_right.tag_tenant)
+
+ bsc_pdus = dict((r_pdu.get("bsc_handle"), r_pdu)
+ for r_pdu in r_msg.getiterator(rpki.left_right.tag_bsc))
+ repository_pdus = dict((r_pdu.get("repository_handle"), r_pdu)
+ for r_pdu in r_msg.getiterator(rpki.left_right.tag_repository))
+ parent_pdus = dict((r_pdu.get("parent_handle"), r_pdu)
+ for r_pdu in r_msg.getiterator(rpki.left_right.tag_parent))
+ child_pdus = dict((r_pdu.get("child_handle"), r_pdu)
+ for r_pdu in r_msg.getiterator(rpki.left_right.tag_child))
+
+ q_msg = self.compose_left_right_query()
+
+ tenant_cert = rpki.irdb.models.HostedCA.objects.get_or_certify(
+ issuer = self.server_ca,
+ hosted = ca)[0]
+
+ # There should be exactly one <tenant/> object per hosted entity, by definition
+
+ if (tenant_pdu is None or
+ tenant_pdu.get("crl_interval") != str(tenant_crl_interval) or
+ tenant_pdu.get("regen_margin") != str(tenant_regen_margin) or
+ tenant_pdu.findtext(rpki.left_right.tag_bpki_cert,
+ "").decode("base64") != tenant_cert.certificate.get_DER()):
+ q_pdu = SubElement(q_msg, rpki.left_right.tag_tenant,
+ action = "create" if tenant_pdu is None else "set",
+ tag = "tenant",
+ tenant_handle = ca.handle,
+ crl_interval = str(tenant_crl_interval),
+ regen_margin = str(tenant_regen_margin))
+ SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = ca.certificate.get_Base64()
+
+ # In general we only need one <bsc/> per <tenant/>. BSC objects
+ # are a little unusual in that the keypair and PKCS #10
+ # subelement are generated by rpkid, so complete setup requires
+ # two round trips.
+
+ bsc_pdu = bsc_pdus.pop(bsc_handle, None)
+
+ if bsc_pdu is None or bsc_pdu.find(rpki.left_right.tag_pkcs10_request) is None:
+ SubElement(q_msg, rpki.left_right.tag_bsc,
+ action = "create" if bsc_pdu is None else "set",
+ tag = "bsc",
+ tenant_handle = ca.handle,
+ bsc_handle = bsc_handle,
+ generate_keypair = "yes")
+
+ for bsc_handle in bsc_pdus:
+ SubElement(q_msg, rpki.left_right.tag_bsc,
+ action = "destroy", tenant_handle = ca.handle, bsc_handle = bsc_handle)
+
+ # If we've already got actions queued up, run them now, so we
+ # can finish setting up the BSC before anything tries to use it.
+
+ if len(q_msg) > 0:
+ SubElement(q_msg, rpki.left_right.tag_bsc,
+ action = "list", tag = "bsc", tenant_handle = ca.handle)
+ r_msg = self.call_rpkid(q_msg)
+ bsc_pdus = dict((r_pdu.get("bsc_handle"), r_pdu)
+ for r_pdu in r_msg.getiterator(rpki.left_right.tag_bsc)
+ if r_pdu.get("action") == "list")
+ bsc_pdu = bsc_pdus.pop(bsc_handle, None)
+
+ q_msg = self.compose_left_right_query()
+
+ bsc_pkcs10 = bsc_pdu.find(rpki.left_right.tag_pkcs10_request)
+ assert bsc_pkcs10 is not None
+
+ bsc = rpki.irdb.models.BSC.objects.get_or_certify(
+ issuer = ca,
+ handle = bsc_handle,
+ pkcs10 = rpki.x509.PKCS10(Base64 = bsc_pkcs10.text))[0]
+
+ if (bsc_pdu.findtext(rpki.left_right.tag_signing_cert,
+ "").decode("base64") != bsc.certificate.get_DER() or
+ bsc_pdu.findtext(rpki.left_right.tag_signing_cert_crl,
+ "").decode("base64") != ca.latest_crl.get_DER()):
+ q_pdu = SubElement(q_msg, rpki.left_right.tag_bsc,
+ action = "set",
+ tag = "bsc",
+ tenant_handle = ca.handle,
+ bsc_handle = bsc_handle)
+ SubElement(q_pdu, rpki.left_right.tag_signing_cert).text = bsc.certificate.get_Base64()
+ SubElement(q_pdu, rpki.left_right.tag_signing_cert_crl).text = ca.latest_crl.get_Base64()
+
+ # At present we need one <repository/> per <parent/>, not because
+ # rpkid requires that, but because pubd does. pubd probably should
+ # be fixed to support a single client allowed to update multiple
+ # trees, but for the moment the easiest way forward is just to
+ # enforce a 1:1 mapping between <parent/> and <repository/> objects
+
+ for repository in ca.repositories.all():
+
+ repository_pdu = repository_pdus.pop(repository.handle, None)
+
+ if (repository_pdu is None or
+ repository_pdu.get("bsc_handle") != bsc_handle or
+ repository_pdu.get("peer_contact_uri") != repository.service_uri or
+ repository_pdu.get("rrdp_notification_uri") != repository.rrdp_notification_uri or
+ repository_pdu.findtext(rpki.left_right.tag_bpki_cert,
+ "").decode("base64") != repository.certificate.get_DER()):
+ q_pdu = SubElement(q_msg, rpki.left_right.tag_repository,
+ action = "create" if repository_pdu is None else "set",
+ tag = repository.handle,
+ tenant_handle = ca.handle,
+ repository_handle = repository.handle,
+ bsc_handle = bsc_handle,
+ peer_contact_uri = repository.service_uri)
+ if repository.rrdp_notification_uri:
+ q_pdu.set("rrdp_notification_uri", repository.rrdp_notification_uri)
+ SubElement(q_pdu,
+ rpki.left_right.tag_bpki_cert).text = repository.certificate.get_Base64()
+
+ for repository_handle in repository_pdus:
+ SubElement(q_msg, rpki.left_right.tag_repository, action = "destroy",
+ tenant_handle = ca.handle, repository_handle = repository_handle)
+
+ # <parent/> setup code currently assumes 1:1 mapping between
+ # <repository/> and <parent/>, and further assumes that the handles
+ # for an associated pair are the identical (that is:
+ # parent.repository_handle == parent.parent_handle).
+ #
+ # If no such repository exists, our choices are to ignore the
+ # parent entry or throw an error. For now, we ignore the parent.
+
+ for parent in ca.parents.all():
+
+ try:
+ parent_pdu = parent_pdus.pop(parent.handle, None)
+
+ if (parent_pdu is None or
+ parent_pdu.get("bsc_handle") != bsc_handle or
+ parent_pdu.get("repository_handle") != parent.handle or
+ parent_pdu.get("peer_contact_uri") != parent.service_uri or
+ parent_pdu.get("sia_base") != parent.repository.sia_base or
+ parent_pdu.get("sender_name") != parent.child_handle or
+ parent_pdu.get("recipient_name") != parent.parent_handle or
+ parent_pdu.get("root_asn_resources", "") != parent.asn_resources or
+ parent_pdu.get("root_ipv4_resources", "") != parent.ipv4_resources or
+ parent_pdu.get("root_ipv6_resources", "") != parent.ipv6_resources or
+ parent_pdu.findtext(rpki.left_right.tag_bpki_cert,
+ "").decode("base64") != parent.certificate.get_DER()):
+ q_pdu = SubElement(q_msg, rpki.left_right.tag_parent,
+ action = "create" if parent_pdu is None else "set",
+ tag = parent.handle,
+ tenant_handle = ca.handle,
+ parent_handle = parent.handle,
+ bsc_handle = bsc_handle,
+ repository_handle = parent.handle,
+ peer_contact_uri = parent.service_uri,
+ sia_base = parent.repository.sia_base,
+ sender_name = parent.child_handle,
+ recipient_name = parent.parent_handle,
+ root_asn_resources = parent.asn_resources,
+ root_ipv4_resources = parent.ipv4_resources,
+ root_ipv6_resources = parent.ipv6_resources)
+ SubElement(q_pdu,
+ rpki.left_right.tag_bpki_cert).text = parent.certificate.get_Base64()
+
+ except rpki.irdb.models.Repository.DoesNotExist:
+ pass
+
+ for parent_handle in parent_pdus:
+ SubElement(q_msg, rpki.left_right.tag_parent, action = "destroy",
+ tenant_handle = ca.handle, parent_handle = parent_handle)
+
+ # Children are simpler than parents, because they call us, so no URL
+ # to construct and figuring out what certificate to use is their
+ # problem, not ours.
+
+ for child in ca.children.all():
+
+ child_pdu = child_pdus.pop(child.handle, None)
+
+ if (child_pdu is None or
+ child_pdu.get("bsc_handle") != bsc_handle or
+ child_pdu.findtext(rpki.left_right.tag_bpki_cert).decode("base64") != child.certificate.get_DER()):
+ q_pdu = SubElement(q_msg, rpki.left_right.tag_child,
+ action = "create" if child_pdu is None else "set",
+ tag = child.handle,
+ tenant_handle = ca.handle,
+ child_handle = child.handle,
+ bsc_handle = bsc_handle)
+ SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = child.certificate.get_Base64()
+
+ for child_handle in child_pdus:
+ SubElement(q_msg, rpki.left_right.tag_child, action = "destroy",
+ tenant_handle = ca.handle, child_handle = child_handle)
+
+ # If caller wants us to poke rpkid, add that to the very end of the message
+
+ if poke:
+ SubElement(q_msg, rpki.left_right.tag_tenant, action = "set", tenant_handle = ca.handle, run_now = "yes")
+
+ # If we changed anything, ship updates off to rpkid.
+
+ if len(q_msg) > 0:
+ self.call_rpkid(q_msg)
+
+
+ def synchronize_pubd_core(self):
+ """
+ Configure pubd with data built up by the other commands in this
+ program. This is the core synchronization code. Don't call this
+ directly, instead call a methods that calls this inside a Django
+ commit wrapper.
+
+ This method configures pubd with data built up by the other
+ commands in this program. Commands which modify IRDB fields
+ related to pubd should call this when they're done.
+ """
+
+ # pylint: disable=C0330
+
+ # If we're not running pubd, the rest of this is a waste of time
+
+ if not self.run_pubd:
+ return
+
+ # See what pubd already has on file
+
+ q_msg = self._compose_publication_control_query()
+ SubElement(q_msg, rpki.publication_control.tag_client, action = "list")
+ r_msg = self.call_pubd(q_msg)
+ client_pdus = dict((r_pdu.get("client_handle"), r_pdu)
+ for r_pdu in r_msg)
+
+ # Check all clients
+
+ q_msg = self._compose_publication_control_query()
+
+ for client in self.server_ca.clients.all():
+
+ client_pdu = client_pdus.pop(client.handle, None)
+
+ if (client_pdu is None or
+ client_pdu.get("base_uri") != client.sia_base or
+ client_pdu.findtext(rpki.publication_control.tag_bpki_cert, "").decode("base64") != client.certificate.get_DER()):
+ q_pdu = SubElement(q_msg, rpki.publication_control.tag_client,
+ action = "create" if client_pdu is None else "set",
+ client_handle = client.handle,
+ base_uri = client.sia_base)
+ SubElement(q_pdu, rpki.publication_control.tag_bpki_cert).text = client.certificate.get_Base64()
+
+ # Delete any unknown clients
+
+ for client_handle in client_pdus:
+ SubElement(q_msg, rpki.publication_control.tag_client, action = "destroy", client_handle = client_handle)
- self_handles = set(s.self_handle for s in rpkid_reply)
- ca_handles = set(ca.handle for ca in rpki.irdb.ResourceHolderCA.objects.all())
- assert ca_handles <= self_handles
+ # If we changed anything, ship updates off to pubd
- rpkid_query = [rpki.left_right.self_elt.make_pdu(action = "destroy", self_handle = handle)
- for handle in (self_handles - ca_handles)]
+ if len(q_msg) > 0:
+ self.call_pubd(q_msg)
- if rpkid_query:
- rpkid_reply = self.call_rpkid(rpkid_query)
- self.check_error_report(rpkid_reply)
+ def synchronize_rpkid_deleted_core(self):
+ """
+ Remove any <tenant/> objects present in rpkid's database but not
+ present in the IRDB. This is the core synchronization code.
+ Don't call this directly, instead call a methods that calls this
+ inside a Django commit wrapper.
+ """
- @django.db.transaction.commit_on_success
- def add_ee_certificate_request(self, pkcs10, resources):
- """
- Check a PKCS #10 request to see if it complies with the
- specification for a RPKI EE certificate; if it does, add an
- EECertificateRequest for it to the IRDB.
+ q_msg = self.compose_left_right_query()
+ SubElement(q_msg, rpki.left_right.tag_tenant, action = "list")
+ r_msg = self.call_rpkid(q_msg)
- Not yet sure what we want for update and delete semantics here, so
- for the moment this is straight addition. See methods like
- .load_asns() and .load_prefixes() for other strategies.
- """
+ tenant_handles = set(s.get("tenant_handle") for s in r_msg)
+ ca_handles = set(ca.handle for ca in rpki.irdb.models.ResourceHolderCA.objects.all())
+ assert ca_handles <= tenant_handles
- pkcs10.check_valid_request_ee()
- ee_request = self.resource_ca.ee_certificate_requests.create(
- pkcs10 = pkcs10,
- gski = pkcs10.gSKI(),
- valid_until = resources.valid_until)
- for r in resources.asn:
- ee_request.asns.create(start_as = str(r.min), end_as = str(r.max))
- for r in resources.v4:
- ee_request.address_ranges.create(start_ip = str(r.min), end_ip = str(r.max), version = 4)
- for r in resources.v6:
- ee_request.address_ranges.create(start_ip = str(r.min), end_ip = str(r.max), version = 6)
-
-
- @django.db.transaction.commit_on_success
- def add_router_certificate_request(self, router_certificate_request_xml, valid_until = None):
- """
- Read XML file containing one or more router certificate requests,
- attempt to add request(s) to IRDB.
+ q_msg = self.compose_left_right_query()
+ for handle in (tenant_handles - ca_handles):
+ SubElement(q_msg, rpki.left_right.tag_tenant, action = "destroy", tenant_handle = handle)
- Check each PKCS #10 request to see if it complies with the
- specification for a router certificate; if it does, create an EE
- certificate request for it along with the ASN resources and
- router-ID supplied in the XML.
- """
+ if len(q_msg) > 0:
+ self.call_rpkid(q_msg)
- xml = ElementTree(file = router_certificate_request_xml).getroot()
- rpki.relaxng.router_certificate.assertValid(xml)
- for req in xml.getiterator(routercert_xmlns + "router_certificate_request"):
+ @django.db.transaction.atomic
+ def add_ee_certificate_request(self, pkcs10, resources):
+ """
+ Check a PKCS #10 request to see if it complies with the
+ specification for a RPKI EE certificate; if it does, add an
+ EECertificateRequest for it to the IRDB.
- pkcs10 = rpki.x509.PKCS10(Base64 = req.text)
- router_id = long(req.get("router_id"))
- asns = rpki.resource_set.resource_set_as(req.get("asn"))
- if not valid_until:
- valid_until = req.get("valid_until")
+ Not yet sure what we want for update and delete semantics here, so
+ for the moment this is straight addition. See methods like
+ .load_asns() and .load_prefixes() for other strategies.
+ """
- if valid_until and isinstance(valid_until, (str, unicode)):
- valid_until = rpki.sundial.datetime.fromXMLtime(valid_until)
+ pkcs10.check_valid_request_ee()
+ ee_request = self.resource_ca.ee_certificate_requests.create(
+ pkcs10 = pkcs10,
+ gski = pkcs10.gSKI(),
+ valid_until = resources.valid_until)
+ for r in resources.asn:
+ ee_request.asns.create(start_as = str(r.min), end_as = str(r.max))
+ for r in resources.v4:
+ ee_request.address_ranges.create(start_ip = str(r.min), end_ip = str(r.max), version = 4)
+ for r in resources.v6:
+ ee_request.address_ranges.create(start_ip = str(r.min), end_ip = str(r.max), version = 6)
- if not valid_until:
- valid_until = rpki.sundial.now() + rpki.sundial.timedelta(days = 365)
- elif valid_until < rpki.sundial.now():
- raise PastExpiration("Specified expiration date %s has already passed" % valid_until)
- pkcs10.check_valid_request_router()
+ @django.db.transaction.atomic
+ def add_router_certificate_request(self, xml_file, valid_until = None):
+ """
+ Read XML file containing one or more router certificate requests,
+ attempt to add request(s) to IRDB.
- cn = "ROUTER-%08x" % asns[0].min
- sn = "%08x" % router_id
+ Check each PKCS #10 request to see if it complies with the
+ specification for a router certificate; if it does, create an EE
+ certificate request for it along with the ASN resources and
+ router-ID supplied in the XML.
+ """
- ee_request = self.resource_ca.ee_certificate_requests.create(
- pkcs10 = pkcs10,
- gski = pkcs10.gSKI(),
- valid_until = valid_until,
- cn = cn,
- sn = sn,
- eku = rpki.oids.id_kp_bgpsec_router)
+ x = etree_read(xml_file, schema = rpki.relaxng.router_certificate)
- for r in asns:
- ee_request.asns.create(start_as = str(r.min), end_as = str(r.max))
+ for x in x.getiterator(tag_router_certificate_request):
+ pkcs10 = rpki.x509.PKCS10(Base64 = x.text)
+ router_id = long(x.get("router_id"))
+ asns = rpki.resource_set.resource_set_as(x.get("asn"))
+ if not valid_until:
+ valid_until = x.get("valid_until")
- @django.db.transaction.commit_on_success
- def delete_router_certificate_request(self, gski):
- """
- Delete a router certificate request from this RPKI entity.
- """
+ if valid_until and isinstance(valid_until, (str, unicode)):
+ valid_until = rpki.sundial.datetime.fromXMLtime(valid_until)
- self.resource_ca.ee_certificate_requests.get(gski = gski).delete()
+ if not valid_until:
+ valid_until = rpki.sundial.now() + rpki.sundial.timedelta(days = 365)
+ elif valid_until < rpki.sundial.now():
+ raise PastExpiration("Specified expiration date %s has already passed" % valid_until)
+
+ pkcs10.check_valid_request_router()
+
+ cn = "ROUTER-%08x" % asns[0].min
+ sn = "%08x" % router_id
+
+ ee_request = self.resource_ca.ee_certificate_requests.create(
+ pkcs10 = pkcs10,
+ gski = pkcs10.gSKI(),
+ valid_until = valid_until,
+ cn = cn,
+ sn = sn,
+ eku = rpki.oids.id_kp_bgpsec_router)
+
+ for r in asns:
+ ee_request.asns.create(start_as = str(r.min), end_as = str(r.max))
+
+
+ @django.db.transaction.atomic
+ def delete_router_certificate_request(self, gski):
+ """
+ Delete a router certificate request from this RPKI entity.
+ """
+
+ self.resource_ca.ee_certificate_requests.get(gski = gski).delete()
diff --git a/rpki/irdbd.py b/rpki/irdbd.py
index ae08b6fb..98fe83ea 100644
--- a/rpki/irdbd.py
+++ b/rpki/irdbd.py
@@ -25,8 +25,7 @@ import os
import time
import logging
import argparse
-import urlparse
-import rpki.http
+import rpki.http_simple
import rpki.config
import rpki.resource_set
import rpki.relaxng
@@ -36,241 +35,214 @@ import rpki.log
import rpki.x509
import rpki.daemonize
+from lxml.etree import Element, SubElement, tostring as ElementToString
+
logger = logging.getLogger(__name__)
class main(object):
- def handle_list_resources(self, q_pdu, r_msg):
- child = rpki.irdb.Child.objects.get(
- issuer__handle__exact = q_pdu.self_handle,
- handle = q_pdu.child_handle)
- resources = child.resource_bag
- r_pdu = rpki.left_right.list_resources_elt()
- r_pdu.tag = q_pdu.tag
- r_pdu.self_handle = q_pdu.self_handle
- r_pdu.child_handle = q_pdu.child_handle
- r_pdu.valid_until = child.valid_until.strftime("%Y-%m-%dT%H:%M:%SZ")
- r_pdu.asn = resources.asn
- r_pdu.ipv4 = resources.v4
- r_pdu.ipv6 = resources.v6
- r_msg.append(r_pdu)
-
- def handle_list_roa_requests(self, q_pdu, r_msg):
- for request in rpki.irdb.ROARequest.objects.raw("""
- SELECT irdb_roarequest.*
- FROM irdb_roarequest, irdb_resourceholderca
- WHERE irdb_roarequest.issuer_id = irdb_resourceholderca.id
- AND irdb_resourceholderca.handle = %s
- """, [q_pdu.self_handle]):
- prefix_bag = request.roa_prefix_bag
- r_pdu = rpki.left_right.list_roa_requests_elt()
- r_pdu.tag = q_pdu.tag
- r_pdu.self_handle = q_pdu.self_handle
- r_pdu.asn = request.asn
- r_pdu.ipv4 = prefix_bag.v4
- r_pdu.ipv6 = prefix_bag.v6
- r_msg.append(r_pdu)
-
- def handle_list_ghostbuster_requests(self, q_pdu, r_msg):
- ghostbusters = rpki.irdb.GhostbusterRequest.objects.filter(
- issuer__handle__exact = q_pdu.self_handle,
- parent__handle__exact = q_pdu.parent_handle)
- if ghostbusters.count() == 0:
- ghostbusters = rpki.irdb.GhostbusterRequest.objects.filter(
- issuer__handle__exact = q_pdu.self_handle,
- parent = None)
- for ghostbuster in ghostbusters:
- r_pdu = rpki.left_right.list_ghostbuster_requests_elt()
- r_pdu.tag = q_pdu.tag
- r_pdu.self_handle = q_pdu.self_handle
- r_pdu.parent_handle = q_pdu.parent_handle
- r_pdu.vcard = ghostbuster.vcard
- r_msg.append(r_pdu)
-
- def handle_list_ee_certificate_requests(self, q_pdu, r_msg):
- for ee_req in rpki.irdb.EECertificateRequest.objects.filter(issuer__handle__exact = q_pdu.self_handle):
- resources = ee_req.resource_bag
- r_pdu = rpki.left_right.list_ee_certificate_requests_elt()
- r_pdu.tag = q_pdu.tag
- r_pdu.self_handle = q_pdu.self_handle
- r_pdu.gski = ee_req.gski
- r_pdu.valid_until = ee_req.valid_until.strftime("%Y-%m-%dT%H:%M:%SZ")
- r_pdu.asn = resources.asn
- r_pdu.ipv4 = resources.v4
- r_pdu.ipv6 = resources.v6
- r_pdu.cn = ee_req.cn
- r_pdu.sn = ee_req.sn
- r_pdu.eku = ee_req.eku
- r_pdu.pkcs10 = ee_req.pkcs10
- r_msg.append(r_pdu)
-
- def handler(self, query, path, cb):
- try:
- q_pdu = None
- r_msg = rpki.left_right.msg.reply()
- from django.db import connection
- connection.cursor() # Reconnect to mysqld if necessary
- self.start_new_transaction()
- serverCA = rpki.irdb.ServerCA.objects.get()
- rpkid = serverCA.ee_certificates.get(purpose = "rpkid")
- try:
- q_cms = rpki.left_right.cms_msg(DER = query)
- q_msg = q_cms.unwrap((serverCA.certificate, rpkid.certificate))
- self.cms_timestamp = q_cms.check_replay(self.cms_timestamp, path)
- if not isinstance(q_msg, rpki.left_right.msg) or not q_msg.is_query():
- raise rpki.exceptions.BadQuery("Unexpected %r PDU" % q_msg)
- for q_pdu in q_msg:
- self.dispatch(q_pdu, r_msg)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- logger.exception("Exception while handling HTTP request")
- if q_pdu is None:
- r_msg.append(rpki.left_right.report_error_elt.from_exception(e))
- else:
- r_msg.append(rpki.left_right.report_error_elt.from_exception(e, q_pdu.self_handle, q_pdu.tag))
- irdbd = serverCA.ee_certificates.get(purpose = "irdbd")
- cb(200, body = rpki.left_right.cms_msg().wrap(r_msg, irdbd.private_key, irdbd.certificate))
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- logger.exception("Unhandled exception while processing HTTP request")
- cb(500, reason = "Unhandled exception %s: %s" % (e.__class__.__name__, e))
-
- def dispatch(self, q_pdu, r_msg):
- try:
- handler = self.dispatch_vector[type(q_pdu)]
- except KeyError:
- raise rpki.exceptions.BadQuery("Unexpected %r PDU" % q_pdu)
- else:
- handler(q_pdu, r_msg)
-
- def __init__(self, **kwargs):
-
- global rpki # pylint: disable=W0602
-
- os.environ["TZ"] = "UTC"
- time.tzset()
-
- parser = argparse.ArgumentParser(description = __doc__)
- parser.add_argument("-c", "--config",
- help = "override default location of configuration file")
- parser.add_argument("-f", "--foreground", action = "store_true",
- help = "do not daemonize")
- parser.add_argument("--pidfile",
- help = "override default location of pid file")
- parser.add_argument("--profile",
- help = "enable profiling, saving data to PROFILE")
- rpki.log.argparse_setup(parser)
- args = parser.parse_args()
-
- rpki.log.init("irdbd", args)
-
- self.cfg = rpki.config.parser(args.config, "irdbd")
- self.cfg.set_global_flags()
-
- if not args.foreground:
- rpki.daemonize.daemon(pidfile = args.pidfile)
-
- if args.profile:
- import cProfile
- prof = cProfile.Profile()
- try:
- prof.runcall(self.main)
- finally:
- prof.dump_stats(args.profile)
- logger.info("Dumped profile data to %s", args.profile)
- else:
- self.main()
-
- def main(self):
-
- global rpki # pylint: disable=W0602
-
- import django
-
- from django.conf import settings
-
- startup_msg = self.cfg.get("startup-message", "")
- if startup_msg:
- logger.info(startup_msg)
-
- # Do -not- turn on DEBUG here except for short-lived tests,
- # otherwise irdbd will eventually run out of memory and crash.
- #
- # If you must enable debugging, use django.db.reset_queries() to
- # clear the query list manually, but it's probably better just to
- # run with debugging disabled, since that's the expectation for
- # production code.
- #
- # https://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory
-
- settings.configure(
- DATABASES = {
- "default" : {
- "ENGINE" : "django.db.backends.mysql",
- "NAME" : self.cfg.get("sql-database"),
- "USER" : self.cfg.get("sql-username"),
- "PASSWORD" : self.cfg.get("sql-password"),
- "HOST" : "",
- "PORT" : "" }},
- INSTALLED_APPS = ("rpki.irdb",),
- MIDDLEWARE_CLASSES = (), # API change, feh
- )
-
- if django.VERSION >= (1, 7): # API change, feh
- from django.apps import apps
- apps.populate(settings.INSTALLED_APPS)
-
- import rpki.irdb # pylint: disable=W0621
-
- # Entirely too much fun with read-only access to transactional databases.
- #
- # http://stackoverflow.com/questions/3346124/how-do-i-force-django-to-ignore-any-caches-and-reload-data
- # http://devblog.resolversystems.com/?p=439
- # http://groups.google.com/group/django-users/browse_thread/thread/e25cec400598c06d
- # http://stackoverflow.com/questions/1028671/python-mysqldb-update-query-fails
- # http://dev.mysql.com/doc/refman/5.0/en/set-transaction.html
- #
- # It turns out that MySQL is doing us a favor with this weird
- # transactional behavior on read, because without it there's a
- # race condition if multiple updates are committed to the IRDB
- # while we're in the middle of processing a query. Note that
- # proper transaction management by the committers doesn't protect
- # us, this is a transactional problem on read. So we need to use
- # explicit transaction management. Since irdbd is a read-only
- # consumer of IRDB data, this means we need to commit an empty
- # transaction at the beginning of processing each query, to reset
- # the transaction isolation snapshot.
-
- import django.db.transaction
- self.start_new_transaction = django.db.transaction.commit_manually(django.db.transaction.commit)
-
- self.dispatch_vector = {
- rpki.left_right.list_resources_elt : self.handle_list_resources,
- rpki.left_right.list_roa_requests_elt : self.handle_list_roa_requests,
- rpki.left_right.list_ghostbuster_requests_elt : self.handle_list_ghostbuster_requests,
- rpki.left_right.list_ee_certificate_requests_elt : self.handle_list_ee_certificate_requests}
-
- try:
- self.http_server_host = self.cfg.get("server-host", "")
- self.http_server_port = self.cfg.getint("server-port")
- except: # pylint: disable=W0702
- #
- # Backwards compatibility, remove this eventually.
- #
- u = urlparse.urlparse(self.cfg.get("http-url"))
- if (u.scheme not in ("", "http") or
- u.username is not None or
- u.password is not None or
- u.params or u.query or u.fragment):
- raise
- self.http_server_host = u.hostname
- self.http_server_port = int(u.port)
-
- self.cms_timestamp = None
-
- rpki.http.server(
- host = self.http_server_host,
- port = self.http_server_port,
- handlers = self.handler)
+ # Whether to drop XMl into the log
+
+ debug = False
+
+ def handle_list_resources(self, q_pdu, r_msg):
+ tenant_handle = q_pdu.get("tenant_handle")
+ child_handle = q_pdu.get("child_handle")
+ child = rpki.irdb.models.Child.objects.get(issuer__handle = tenant_handle,
+ handle = child_handle)
+ resources = child.resource_bag
+ r_pdu = SubElement(r_msg, rpki.left_right.tag_list_resources,
+ tenant_handle = tenant_handle, child_handle = child_handle,
+ valid_until = child.valid_until.strftime("%Y-%m-%dT%H:%M:%SZ"))
+ for k, v in (("asn", resources.asn),
+ ("ipv4", resources.v4),
+ ("ipv6", resources.v6),
+ ("tag", q_pdu.get("tag"))):
+ if v:
+ r_pdu.set(k, str(v))
+
+ def handle_list_roa_requests(self, q_pdu, r_msg):
+ tenant_handle = q_pdu.get("tenant_handle")
+ for request in rpki.irdb.models.ROARequest.objects.raw("""
+ SELECT irdb_roarequest.*
+ FROM irdb_roarequest, irdb_resourceholderca
+ WHERE irdb_roarequest.issuer_id = irdb_resourceholderca.id
+ AND irdb_resourceholderca.handle = %s
+ """, [tenant_handle]):
+ prefix_bag = request.roa_prefix_bag
+ r_pdu = SubElement(r_msg, rpki.left_right.tag_list_roa_requests,
+ tenant_handle = tenant_handle, asn = str(request.asn))
+ for k, v in (("ipv4", prefix_bag.v4),
+ ("ipv6", prefix_bag.v6),
+ ("tag", q_pdu.get("tag"))):
+ if v:
+ r_pdu.set(k, str(v))
+
+ def handle_list_ghostbuster_requests(self, q_pdu, r_msg):
+ tenant_handle = q_pdu.get("tenant_handle")
+ parent_handle = q_pdu.get("parent_handle")
+ ghostbusters = rpki.irdb.models.GhostbusterRequest.objects.filter(
+ issuer__handle = tenant_handle, parent__handle = parent_handle)
+ if ghostbusters.count() == 0:
+ ghostbusters = rpki.irdb.models.GhostbusterRequest.objects.filter(
+ issuer__handle = tenant_handle, parent = None)
+ for ghostbuster in ghostbusters:
+ r_pdu = SubElement(r_msg, q_pdu.tag,
+ tenant_handle = tenant_handle, parent_handle = parent_handle)
+ if q_pdu.get("tag"):
+ r_pdu.set("tag", q_pdu.get("tag"))
+ r_pdu.text = ghostbuster.vcard
+
+ def handle_list_ee_certificate_requests(self, q_pdu, r_msg):
+ tenant_handle = q_pdu.get("tenant_handle")
+ for ee_req in rpki.irdb.models.EECertificateRequest.objects.filter(
+ issuer__handle = tenant_handle):
+ resources = ee_req.resource_bag
+ r_pdu = SubElement(r_msg, q_pdu.tag, tenant_handle = tenant_handle, gski = ee_req.gski,
+ valid_until = ee_req.valid_until.strftime("%Y-%m-%dT%H:%M:%SZ"),
+ cn = ee_req.cn, sn = ee_req.sn)
+ for k, v in (("asn", resources.asn),
+ ("ipv4", resources.v4),
+ ("ipv6", resources.v6),
+ ("eku", ee_req.eku),
+ ("tag", q_pdu.get("tag"))):
+ if v:
+ r_pdu.set(k, str(v))
+ SubElement(r_pdu, rpki.left_right.tag_pkcs10).text = ee_req.pkcs10.get_Base64()
+
+ def handler(self, request, q_der):
+ try:
+ from django.db import connection
+ connection.cursor() # Reconnect to mysqld if necessary
+ self.start_new_transaction()
+ serverCA = rpki.irdb.models.ServerCA.objects.get()
+ rpkid = serverCA.ee_certificates.get(purpose = "rpkid")
+ irdbd = serverCA.ee_certificates.get(purpose = "irdbd")
+ q_cms = rpki.left_right.cms_msg(DER = q_der)
+ q_msg = q_cms.unwrap((serverCA.certificate, rpkid.certificate))
+ self.cms_timestamp = q_cms.check_replay(self.cms_timestamp, request.path)
+ if self.debug:
+ logger.debug("Received: %s", ElementToString(q_msg))
+ if q_msg.get("type") != "query":
+ raise rpki.exceptions.BadQuery("Message type is {}, expected query".format(
+ q_msg.get("type")))
+ r_msg = Element(rpki.left_right.tag_msg, nsmap = rpki.left_right.nsmap,
+ type = "reply", version = rpki.left_right.version)
+ try:
+ for q_pdu in q_msg:
+ getattr(self, "handle_" + q_pdu.tag[len(rpki.left_right.xmlns):])(q_pdu, r_msg)
+
+ except Exception, e:
+ logger.exception("Exception processing PDU %r", q_pdu)
+ r_pdu = SubElement(r_msg, rpki.left_right.tag_report_error,
+ error_code = e.__class__.__name__)
+ r_pdu.text = str(e)
+ if q_pdu.get("tag") is not None:
+ r_pdu.set("tag", q_pdu.get("tag"))
+
+ if self.debug:
+ logger.debug("Sending: %s", ElementToString(r_msg))
+ request.send_cms_response(rpki.left_right.cms_msg().wrap(
+ r_msg, irdbd.private_key, irdbd.certificate))
+
+ except Exception, e:
+ logger.exception("Unhandled exception while processing HTTP request")
+ request.send_error(500, "Unhandled exception %s: %s" % (e.__class__.__name__, e))
+
+ def __init__(self, **kwargs):
+
+ global rpki # pylint: disable=W0602
+
+ os.environ.update(TZ = "UTC",
+ DJANGO_SETTINGS_MODULE = "rpki.django_settings.irdb")
+ time.tzset()
+
+ self.cfg = rpki.config.argparser(section = "irdbd", doc = __doc__)
+ self.cfg.add_boolean_argument("--foreground",
+ default = False,
+ help = "whether to daemonize")
+ self.cfg.add_argument("--pidfile",
+ default = os.path.join(rpki.daemonize.default_pid_directory,
+ "irdbd.pid"),
+ help = "override default location of pid file")
+ self.cfg.add_argument("--profile",
+ default = "",
+ help = "enable profiling, saving data to PROFILE")
+ self.cfg.add_logging_arguments()
+ args = self.cfg.argparser.parse_args()
+
+ self.cfg.configure_logging(args = args, ident = "irdbd")
+
+ try:
+ self.cfg.set_global_flags()
+
+ self.cms_timestamp = None
+
+ if not args.foreground:
+ rpki.daemonize.daemon(pidfile = args.pidfile)
+
+ if args.profile:
+ import cProfile
+ prof = cProfile.Profile()
+ try:
+ prof.runcall(self.main)
+ finally:
+ prof.dump_stats(args.profile)
+ logger.info("Dumped profile data to %s", args.profile)
+ else:
+ self.main()
+
+ except:
+ logger.exception("Unandled exception in rpki.irdbd.main()")
+ sys.exit(1)
+
+
+ def main(self):
+
+ startup_msg = self.cfg.get("startup-message", "")
+ if startup_msg:
+ logger.info(startup_msg)
+
+ # Now that we know which configuration file to use, it's OK to
+ # load modules that require Django's settings module.
+
+ import django
+ django.setup()
+
+ global rpki # pylint: disable=W0602
+ import rpki.irdb # pylint: disable=W0621
+
+ self.http_server_host = self.cfg.get("server-host", "")
+ self.http_server_port = self.cfg.getint("server-port")
+
+ rpki.http_simple.server(
+ host = self.http_server_host,
+ port = self.http_server_port,
+ handlers = self.handler)
+
+ def start_new_transaction(self):
+
+ # Entirely too much fun with read-only access to transactional databases.
+ #
+ # http://stackoverflow.com/questions/3346124/how-do-i-force-django-to-ignore-any-caches-and-reload-data
+ # http://devblog.resolversystems.com/?p=439
+ # http://groups.google.com/group/django-users/browse_thread/thread/e25cec400598c06d
+ # http://stackoverflow.com/questions/1028671/python-mysqldb-update-query-fails
+ # http://dev.mysql.com/doc/refman/5.0/en/set-transaction.html
+ #
+ # It turns out that MySQL is doing us a favor with this weird
+ # transactional behavior on read, because without it there's a
+ # race condition if multiple updates are committed to the IRDB
+ # while we're in the middle of processing a query. Note that
+ # proper transaction management by the committers doesn't protect
+ # us, this is a transactional problem on read. So we need to use
+ # explicit transaction management. Since irdbd is a read-only
+ # consumer of IRDB data, this means we need to commit an empty
+ # transaction at the beginning of processing each query, to reset
+ # the transaction isolation snapshot.
+
+ import django.db.transaction
+
+ with django.db.transaction.atomic():
+ #django.db.transaction.commit()
+ pass
diff --git a/rpki/left_right.py b/rpki/left_right.py
index c8b6d19b..02b118c0 100644
--- a/rpki/left_right.py
+++ b/rpki/left_right.py
@@ -22,1270 +22,59 @@ RPKI "left-right" protocol.
"""
import logging
-import rpki.resource_set
+
import rpki.x509
-import rpki.sql
import rpki.exceptions
-import rpki.xml_utils
-import rpki.http
import rpki.up_down
import rpki.relaxng
import rpki.sundial
import rpki.log
import rpki.publication
-import rpki.async
import rpki.rpkid_tasks
-logger = logging.getLogger(__name__)
-
-## @var enforce_strict_up_down_xml_sender
-# Enforce strict checking of XML "sender" field in up-down protocol
-
-enforce_strict_up_down_xml_sender = False
-
-class left_right_namespace(object):
- """
- XML namespace parameters for left-right protocol.
- """
-
- xmlns = rpki.relaxng.left_right.xmlns
- nsmap = rpki.relaxng.left_right.nsmap
-
-class data_elt(rpki.xml_utils.data_elt, rpki.sql.sql_persistent, left_right_namespace):
- """
- Virtual class for top-level left-right protocol data elements.
- """
-
- handles = ()
-
- self_id = None
- self_handle = None
-
- @property
- @rpki.sql.cache_reference
- def self(self):
- """
- Fetch self object to which this object links.
- """
- return self_elt.sql_fetch(self.gctx, self.self_id)
-
- @property
- @rpki.sql.cache_reference
- def bsc(self):
- """
- Return BSC object to which this object links.
- """
- return bsc_elt.sql_fetch(self.gctx, self.bsc_id)
-
- def make_reply_clone_hook(self, r_pdu):
- """
- Set handles when cloning, including _id -> _handle translation.
- """
- if r_pdu.self_handle is None:
- r_pdu.self_handle = self.self_handle
- for tag, elt in self.handles:
- id_name = tag + "_id"
- handle_name = tag + "_handle"
- if getattr(r_pdu, handle_name, None) is None:
- try:
- setattr(r_pdu, handle_name, getattr(elt.sql_fetch(self.gctx, getattr(r_pdu, id_name)), handle_name))
- except AttributeError:
- continue
-
- @classmethod
- def serve_fetch_handle(cls, gctx, self_id, handle):
- """
- Find an object based on its handle.
- """
- return cls.sql_fetch_where1(gctx, cls.element_name + "_handle = %s AND self_id = %s", (handle, self_id))
-
- def serve_fetch_one_maybe(self):
- """
- Find the object on which a get, set, or destroy method should
- operate, or which would conflict with a create method.
- """
- where = "%s.%s_handle = %%s AND %s.self_id = self.self_id AND self.self_handle = %%s" % ((self.element_name,) * 3)
- args = (getattr(self, self.element_name + "_handle"), self.self_handle)
- return self.sql_fetch_where1(self.gctx, where, args, "self")
-
- def serve_fetch_all(self):
- """
- Find the objects on which a list method should operate.
- """
- where = "%s.self_id = self.self_id and self.self_handle = %%s" % self.element_name
- return self.sql_fetch_where(self.gctx, where, (self.self_handle,), "self")
-
- def serve_pre_save_hook(self, q_pdu, r_pdu, cb, eb):
- """
- Hook to do _handle => _id translation before saving.
-
- self is always the object to be saved to SQL. For create
- operations, self and q_pdu are be the same object; for set
- operations, self is the pre-existing object from SQL and q_pdu is
- the set request received from the the IRBE.
- """
- for tag, elt in self.handles:
- id_name = tag + "_id"
- if getattr(self, id_name, None) is None:
- x = elt.serve_fetch_handle(self.gctx, self.self_id, getattr(q_pdu, tag + "_handle"))
- if x is None:
- raise rpki.exceptions.HandleTranslationError("Could not translate %r %s_handle" % (self, tag))
- setattr(self, id_name, getattr(x, id_name))
- cb()
-
-class self_elt(data_elt):
- """
- <self/> element.
- """
-
- element_name = "self"
- attributes = ("action", "tag", "self_handle", "crl_interval", "regen_margin")
- elements = ("bpki_cert", "bpki_glue")
- booleans = ("rekey", "reissue", "revoke", "run_now", "publish_world_now", "revoke_forgotten",
- "clear_replay_protection")
-
- sql_template = rpki.sql.template(
- "self",
- "self_id",
- "self_handle",
- "use_hsm",
- "crl_interval",
- "regen_margin",
- ("bpki_cert", rpki.x509.X509),
- ("bpki_glue", rpki.x509.X509))
-
- handles = ()
-
- use_hsm = False
- crl_interval = None
- regen_margin = None
- bpki_cert = None
- bpki_glue = None
- cron_tasks = None
-
- def __repr__(self):
- return rpki.log.log_repr(self)
-
- @property
- def bscs(self):
- """
- Fetch all BSC objects that link to this self object.
- """
- return bsc_elt.sql_fetch_where(self.gctx, "self_id = %s", (self.self_id,))
-
- @property
- def repositories(self):
- """
- Fetch all repository objects that link to this self object.
- """
- return repository_elt.sql_fetch_where(self.gctx, "self_id = %s", (self.self_id,))
-
- @property
- def parents(self):
- """
- Fetch all parent objects that link to this self object.
- """
- return parent_elt.sql_fetch_where(self.gctx, "self_id = %s", (self.self_id,))
-
- @property
- def children(self):
- """
- Fetch all child objects that link to this self object.
- """
- return child_elt.sql_fetch_where(self.gctx, "self_id = %s", (self.self_id,))
-
- @property
- def roas(self):
- """
- Fetch all ROA objects that link to this self object.
- """
- return rpki.rpkid.roa_obj.sql_fetch_where(self.gctx, "self_id = %s", (self.self_id,))
-
- @property
- def ghostbusters(self):
- """
- Fetch all Ghostbuster record objects that link to this self object.
- """
- return rpki.rpkid.ghostbuster_obj.sql_fetch_where(self.gctx, "self_id = %s", (self.self_id,))
-
- @property
- def ee_certificates(self):
- """
- Fetch all EE certificate objects that link to this self object.
- """
- return rpki.rpkid.ee_cert_obj.sql_fetch_where(self.gctx, "self_id = %s", (self.self_id,))
-
-
- def serve_post_save_hook(self, q_pdu, r_pdu, cb, eb):
- """
- Extra server actions for self_elt.
- """
- actions = []
- if q_pdu.rekey:
- actions.append(self.serve_rekey)
- if q_pdu.revoke:
- actions.append(self.serve_revoke)
- if q_pdu.reissue:
- actions.append(self.serve_reissue)
- if q_pdu.revoke_forgotten:
- actions.append(self.serve_revoke_forgotten)
- if q_pdu.publish_world_now:
- actions.append(self.serve_publish_world_now)
- if q_pdu.run_now:
- actions.append(self.serve_run_now)
- if q_pdu.clear_replay_protection:
- actions.append(self.serve_clear_replay_protection)
- def loop(iterator, action):
- action(iterator, eb)
- rpki.async.iterator(actions, loop, cb)
-
- def serve_rekey(self, cb, eb):
- """
- Handle a left-right rekey action for this self.
- """
- def loop(iterator, parent):
- parent.serve_rekey(iterator, eb)
- rpki.async.iterator(self.parents, loop, cb)
-
- def serve_revoke(self, cb, eb):
- """
- Handle a left-right revoke action for this self.
- """
- def loop(iterator, parent):
- parent.serve_revoke(iterator, eb)
- rpki.async.iterator(self.parents, loop, cb)
-
- def serve_reissue(self, cb, eb):
- """
- Handle a left-right reissue action for this self.
- """
- def loop(iterator, parent):
- parent.serve_reissue(iterator, eb)
- rpki.async.iterator(self.parents, loop, cb)
-
- def serve_revoke_forgotten(self, cb, eb):
- """
- Handle a left-right revoke_forgotten action for this self.
- """
- def loop(iterator, parent):
- parent.serve_revoke_forgotten(iterator, eb)
- rpki.async.iterator(self.parents, loop, cb)
-
- def serve_clear_replay_protection(self, cb, eb):
- """
- Handle a left-right clear_replay_protection action for this self.
- """
- def loop(iterator, obj):
- obj.serve_clear_replay_protection(iterator, eb)
- rpki.async.iterator(self.parents + self.children + self.repositories, loop, cb)
-
- def serve_destroy_hook(self, cb, eb):
- """
- Extra cleanup actions when destroying a self_elt.
- """
- def loop(iterator, parent):
- parent.delete(iterator)
- rpki.async.iterator(self.parents, loop, cb)
-
-
- def serve_publish_world_now(self, cb, eb):
- """
- Handle a left-right publish_world_now action for this self.
-
- The publication stuff needs refactoring, right now publication is
- interleaved with local operations in a way that forces far too
- many bounces through the task system for any complex update. The
- whole thing ought to be rewritten to queue up outgoing publication
- PDUs and only send them when we're all done or when we need to
- force publication at a particular point in a multi-phase operation.
-
- Once that reorganization has been done, this method should be
- rewritten to reuse the low-level publish() methods that each
- object will have...but we're not there yet. So, for now, we just
- do this via brute force. Think of it as a trial version to see
- whether we've identified everything that needs to be republished
- for this operation.
- """
-
- def loop(iterator, parent):
- q_msg = rpki.publication.msg.query()
- for ca in parent.cas:
- ca_detail = ca.active_ca_detail
- if ca_detail is not None:
- q_msg.append(rpki.publication.crl_elt.make_publish(
- ca_detail.crl_uri, ca_detail.latest_crl))
- q_msg.append(rpki.publication.manifest_elt.make_publish(
- ca_detail.manifest_uri, ca_detail.latest_manifest))
- q_msg.extend(rpki.publication.certificate_elt.make_publish(
- c.uri, c.cert) for c in ca_detail.child_certs)
- q_msg.extend(rpki.publication.roa_elt.make_publish(
- r.uri, r.roa) for r in ca_detail.roas if r.roa is not None)
- q_msg.extend(rpki.publication.ghostbuster_elt.make_publish(
- g.uri, g.ghostbuster) for g in ca_detail.ghostbusters)
- parent.repository.call_pubd(iterator, eb, q_msg)
-
- rpki.async.iterator(self.parents, loop, cb)
-
- def serve_run_now(self, cb, eb):
- """
- Handle a left-right run_now action for this self.
- """
- logger.debug("Forced immediate run of periodic actions for self %s[%d]",
- self.self_handle, self.self_id)
- completion = rpki.rpkid_tasks.CompletionHandler(cb)
- self.schedule_cron_tasks(completion)
- assert completion.count > 0
- self.gctx.task_run()
-
- def serve_fetch_one_maybe(self):
- """
- Find the self object upon which a get, set, or destroy action
- should operate, or which would conflict with a create method.
- """
- return self.serve_fetch_handle(self.gctx, None, self.self_handle)
-
- @classmethod
- def serve_fetch_handle(cls, gctx, self_id, self_handle):
- """
- Find a self object based on its self_handle.
- """
- return cls.sql_fetch_where1(gctx, "self_handle = %s", (self_handle,))
-
- def serve_fetch_all(self):
- """
- Find the self objects upon which a list action should operate.
- This is different from the list action for all other objects,
- where list only works within a given self_id context.
- """
- return self.sql_fetch_all(self.gctx)
-
- def schedule_cron_tasks(self, completion):
- """
- Schedule periodic tasks.
- """
-
- if self.cron_tasks is None:
- self.cron_tasks = tuple(task(self) for task in rpki.rpkid_tasks.task_classes)
-
- for task in self.cron_tasks:
- self.gctx.task_add(task)
- completion.register(task)
-
- def find_covering_ca_details(self, resources):
- """
- Return all active ca_detail_objs for this <self/> which cover a
- particular set of resources.
- If we expected there to be a large number of ca_detail_objs, we
- could add index tables and write fancy SQL query to do this, but
- for the expected common case where there are only one or two
- active ca_detail_objs per <self/>, it's probably not worth it. In
- any case, this is an optimization we can leave for later.
- """
-
- results = set()
- for parent in self.parents:
- for ca in parent.cas:
- ca_detail = ca.active_ca_detail
- if ca_detail is not None and ca_detail.covers(resources):
- results.add(ca_detail)
- return results
-
-
-class bsc_elt(data_elt):
- """
- <bsc/> (Business Signing Context) element.
- """
-
- element_name = "bsc"
- attributes = ("action", "tag", "self_handle", "bsc_handle", "key_type", "hash_alg", "key_length")
- elements = ("signing_cert", "signing_cert_crl", "pkcs10_request")
- booleans = ("generate_keypair",)
-
- sql_template = rpki.sql.template(
- "bsc",
- "bsc_id",
- "bsc_handle",
- "self_id",
- "hash_alg",
- ("private_key_id", rpki.x509.RSA),
- ("pkcs10_request", rpki.x509.PKCS10),
- ("signing_cert", rpki.x509.X509),
- ("signing_cert_crl", rpki.x509.CRL))
-
- handles = (("self", self_elt),)
-
- private_key_id = None
- pkcs10_request = None
- signing_cert = None
- signing_cert_crl = None
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.bsc_handle)
-
- @property
- def repositories(self):
- """
- Fetch all repository objects that link to this BSC object.
- """
- return repository_elt.sql_fetch_where(self.gctx, "bsc_id = %s", (self.bsc_id,))
-
- @property
- def parents(self):
- """
- Fetch all parent objects that link to this BSC object.
- """
- return parent_elt.sql_fetch_where(self.gctx, "bsc_id = %s", (self.bsc_id,))
-
- @property
- def children(self):
- """
- Fetch all child objects that link to this BSC object.
- """
- return child_elt.sql_fetch_where(self.gctx, "bsc_id = %s", (self.bsc_id,))
-
- def serve_pre_save_hook(self, q_pdu, r_pdu, cb, eb):
- """
- Extra server actions for bsc_elt -- handle key generation. For
- now this only allows RSA with SHA-256.
- """
- if q_pdu.generate_keypair:
- assert q_pdu.key_type in (None, "rsa") and q_pdu.hash_alg in (None, "sha256")
- self.private_key_id = rpki.x509.RSA.generate(keylength = q_pdu.key_length or 2048)
- self.pkcs10_request = rpki.x509.PKCS10.create(keypair = self.private_key_id)
- r_pdu.pkcs10_request = self.pkcs10_request
- data_elt.serve_pre_save_hook(self, q_pdu, r_pdu, cb, eb)
-
-class repository_elt(data_elt):
- """
- <repository/> element.
- """
-
- element_name = "repository"
- attributes = ("action", "tag", "self_handle", "repository_handle", "bsc_handle", "peer_contact_uri")
- elements = ("bpki_cert", "bpki_glue")
- booleans = ("clear_replay_protection",)
-
- sql_template = rpki.sql.template(
- "repository",
- "repository_id",
- "repository_handle",
- "self_id",
- "bsc_id",
- "peer_contact_uri",
- ("bpki_cert", rpki.x509.X509),
- ("bpki_glue", rpki.x509.X509),
- ("last_cms_timestamp", rpki.sundial.datetime))
-
- handles = (("self", self_elt),
- ("bsc", bsc_elt))
-
- bpki_cert = None
- bpki_glue = None
- last_cms_timestamp = None
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.repository_handle)
-
- @property
- def parents(self):
- """
- Fetch all parent objects that link to this repository object.
- """
- return parent_elt.sql_fetch_where(self.gctx, "repository_id = %s", (self.repository_id,))
-
- def serve_post_save_hook(self, q_pdu, r_pdu, cb, eb):
- """
- Extra server actions for repository_elt.
- """
- actions = []
- if q_pdu.clear_replay_protection:
- actions.append(self.serve_clear_replay_protection)
- def loop(iterator, action):
- action(iterator, eb)
- rpki.async.iterator(actions, loop, cb)
-
- def serve_clear_replay_protection(self, cb, eb):
- """
- Handle a left-right clear_replay_protection action for this repository.
- """
- self.last_cms_timestamp = None
- self.sql_mark_dirty()
- cb()
-
- @staticmethod
- def default_pubd_handler(pdu):
- """
- Default handler for publication response PDUs.
- """
- pdu.raise_if_error()
-
- def call_pubd(self, callback, errback, q_msg, handlers = None):
- """
- Send a message to publication daemon and return the response.
-
- As a convenience, attempting to send an empty message returns
- immediate success without sending anything.
-
- Handlers is a dict of handler functions to process the response
- PDUs. If the tag value in the response PDU appears in the dict,
- the associated handler is called to process the PDU. If no tag
- matches, default_pubd_handler() is called. A handler value of
- False suppresses calling of the default handler.
- """
-
- try:
- self.gctx.sql.sweep()
-
- if not q_msg:
- return callback()
-
- if handlers is None:
- handlers = {}
-
- for q_pdu in q_msg:
- logger.info("Sending %s %s to pubd", q_pdu.action, q_pdu.uri)
-
- bsc = self.bsc
- q_der = rpki.publication.cms_msg().wrap(q_msg, bsc.private_key_id, bsc.signing_cert, bsc.signing_cert_crl)
- bpki_ta_path = (self.gctx.bpki_ta, self.self.bpki_cert, self.self.bpki_glue, self.bpki_cert, self.bpki_glue)
-
- def done(r_der):
- try:
- logger.debug("Received response from pubd")
- r_cms = rpki.publication.cms_msg(DER = r_der)
- r_msg = r_cms.unwrap(bpki_ta_path)
- r_cms.check_replay_sql(self, self.peer_contact_uri)
- for r_pdu in r_msg:
- handler = handlers.get(r_pdu.tag, self.default_pubd_handler)
- if handler:
- logger.debug("Calling pubd handler %r", handler)
- handler(r_pdu)
- if len(q_msg) != len(r_msg):
- raise rpki.exceptions.BadPublicationReply("Wrong number of response PDUs from pubd: sent %r, got %r" % (q_msg, r_msg))
- callback()
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- errback(e)
-
- logger.debug("Sending request to pubd")
- rpki.http.client(
- url = self.peer_contact_uri,
- msg = q_der,
- callback = done,
- errback = errback)
-
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- errback(e)
-
-class parent_elt(data_elt):
- """
- <parent/> element.
- """
-
- element_name = "parent"
- attributes = ("action", "tag", "self_handle", "parent_handle", "bsc_handle", "repository_handle",
- "peer_contact_uri", "sia_base", "sender_name", "recipient_name")
- elements = ("bpki_cms_cert", "bpki_cms_glue")
- booleans = ("rekey", "reissue", "revoke", "revoke_forgotten", "clear_replay_protection")
-
- sql_template = rpki.sql.template(
- "parent",
- "parent_id",
- "parent_handle",
- "self_id",
- "bsc_id",
- "repository_id",
- "peer_contact_uri",
- "sia_base",
- "sender_name",
- "recipient_name",
- ("bpki_cms_cert", rpki.x509.X509),
- ("bpki_cms_glue", rpki.x509.X509),
- ("last_cms_timestamp", rpki.sundial.datetime))
-
- handles = (("self", self_elt),
- ("bsc", bsc_elt),
- ("repository", repository_elt))
-
- bpki_cms_cert = None
- bpki_cms_glue = None
- last_cms_timestamp = None
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.parent_handle)
-
- @property
- @rpki.sql.cache_reference
- def repository(self):
- """
- Fetch repository object to which this parent object links.
- """
- return repository_elt.sql_fetch(self.gctx, self.repository_id)
-
- @property
- def cas(self):
- """
- Fetch all CA objects that link to this parent object.
- """
- return rpki.rpkid.ca_obj.sql_fetch_where(self.gctx, "parent_id = %s", (self.parent_id,))
-
- def serve_post_save_hook(self, q_pdu, r_pdu, cb, eb):
- """
- Extra server actions for parent_elt.
- """
- actions = []
- if q_pdu.rekey:
- actions.append(self.serve_rekey)
- if q_pdu.revoke:
- actions.append(self.serve_revoke)
- if q_pdu.reissue:
- actions.append(self.serve_reissue)
- if q_pdu.revoke_forgotten:
- actions.append(self.serve_revoke_forgotten)
- if q_pdu.clear_replay_protection:
- actions.append(self.serve_clear_replay_protection)
- def loop(iterator, action):
- action(iterator, eb)
- rpki.async.iterator(actions, loop, cb)
-
- def serve_rekey(self, cb, eb):
- """
- Handle a left-right rekey action for this parent.
- """
- def loop(iterator, ca):
- ca.rekey(iterator, eb)
- rpki.async.iterator(self.cas, loop, cb)
-
- def serve_revoke(self, cb, eb):
- """
- Handle a left-right revoke action for this parent.
- """
- def loop(iterator, ca):
- ca.revoke(cb = iterator, eb = eb)
- rpki.async.iterator(self.cas, loop, cb)
-
- def serve_reissue(self, cb, eb):
- """
- Handle a left-right reissue action for this parent.
- """
- def loop(iterator, ca):
- ca.reissue(cb = iterator, eb = eb)
- rpki.async.iterator(self.cas, loop, cb)
-
- def serve_clear_replay_protection(self, cb, eb):
- """
- Handle a left-right clear_replay_protection action for this parent.
- """
- self.last_cms_timestamp = None
- self.sql_mark_dirty()
- cb()
-
-
- def get_skis(self, cb, eb):
- """
- Fetch SKIs that this parent thinks we have. In theory this should
- agree with our own database, but in practice stuff can happen, so
- sometimes we need to know what our parent thinks.
-
- Result is a dictionary with the resource class name as key and a
- set of SKIs as value.
- """
-
- def done(r_msg):
- cb(dict((rc.class_name, set(c.cert.gSKI() for c in rc.certs))
- for rc in r_msg.payload.classes))
-
- rpki.up_down.list_pdu.query(self, done, eb)
-
-
- def revoke_skis(self, rc_name, skis_to_revoke, cb, eb):
- """
- Revoke a set of SKIs within a particular resource class.
- """
-
- def loop(iterator, ski):
- logger.debug("Asking parent %r to revoke class %r, SKI %s", self, rc_name, ski)
- q_pdu = rpki.up_down.revoke_pdu()
- q_pdu.class_name = rc_name
- q_pdu.ski = ski
- self.query_up_down(q_pdu, lambda r_pdu: iterator(), eb)
-
- rpki.async.iterator(skis_to_revoke, loop, cb)
-
-
- def serve_revoke_forgotten(self, cb, eb):
- """
- Handle a left-right revoke_forgotten action for this parent.
-
- This is a bit fiddly: we have to compare the result of an up-down
- list query with what we have locally and identify the SKIs of any
- certificates that have gone missing. This should never happen in
- ordinary operation, but can arise if we have somehow lost a
- private key, in which case there is nothing more we can do with
- the issued cert, so we have to clear it. As this really is not
- supposed to happen, we don't clear it automatically, instead we
- require an explicit trigger.
- """
-
- def got_skis(skis_from_parent):
-
- def loop(iterator, item):
- rc_name, skis_to_revoke = item
- if rc_name in ca_map:
- for ca_detail in ca_map[rc_name].issue_response_candidate_ca_details:
- skis_to_revoke.discard(ca_detail.latest_ca_cert.gSKI())
- self.revoke_skis(rc_name, skis_to_revoke, iterator, eb)
-
- ca_map = dict((ca.parent_resource_class, ca) for ca in self.cas)
- rpki.async.iterator(skis_from_parent.items(), loop, cb)
-
- self.get_skis(got_skis, eb)
-
-
- def delete(self, cb, delete_parent = True):
- """
- Delete all the CA stuff under this parent, and perhaps the parent
- itself.
- """
-
- def loop(iterator, ca):
- self.gctx.checkpoint()
- ca.delete(self, iterator)
-
- def revoke():
- self.gctx.checkpoint()
- self.serve_revoke_forgotten(done, fail)
-
- def fail(e):
- logger.warning("Trouble getting parent to revoke certificates, blundering onwards: %s", e)
- done()
-
- def done():
- self.gctx.checkpoint()
- self.gctx.sql.sweep()
- if delete_parent:
- self.sql_delete()
- cb()
-
- rpki.async.iterator(self.cas, loop, revoke)
-
-
- def serve_destroy_hook(self, cb, eb):
- """
- Extra server actions when destroying a parent_elt.
- """
-
- self.delete(cb, delete_parent = False)
-
-
- def query_up_down(self, q_pdu, cb, eb):
- """
- Client code for sending one up-down query PDU to this parent.
- """
-
- bsc = self.bsc
- if bsc is None:
- raise rpki.exceptions.BSCNotFound("Could not find BSC %s" % self.bsc_id)
-
- if bsc.signing_cert is None:
- raise rpki.exceptions.BSCNotReady("BSC %r[%s] is not yet usable" % (bsc.bsc_handle, bsc.bsc_id))
-
- q_msg = rpki.up_down.message_pdu.make_query(
- payload = q_pdu,
- sender = self.sender_name,
- recipient = self.recipient_name)
-
- q_der = rpki.up_down.cms_msg().wrap(q_msg, bsc.private_key_id,
- bsc.signing_cert,
- bsc.signing_cert_crl)
-
- def unwrap(r_der):
- try:
- r_cms = rpki.up_down.cms_msg(DER = r_der)
- r_msg = r_cms.unwrap((self.gctx.bpki_ta,
- self.self.bpki_cert,
- self.self.bpki_glue,
- self.bpki_cms_cert,
- self.bpki_cms_glue))
- r_cms.check_replay_sql(self, self.peer_contact_uri)
- r_msg.payload.check_response()
- except (SystemExit, rpki.async.ExitNow):
- raise
- except Exception, e:
- eb(e)
- else:
- cb(r_msg)
-
- rpki.http.client(
- msg = q_der,
- url = self.peer_contact_uri,
- callback = unwrap,
- errback = eb,
- content_type = rpki.up_down.content_type)
-
-class child_elt(data_elt):
- """
- <child/> element.
- """
-
- element_name = "child"
- attributes = ("action", "tag", "self_handle", "child_handle", "bsc_handle")
- elements = ("bpki_cert", "bpki_glue")
- booleans = ("reissue", "clear_replay_protection")
-
- sql_template = rpki.sql.template(
- "child",
- "child_id",
- "child_handle",
- "self_id",
- "bsc_id",
- ("bpki_cert", rpki.x509.X509),
- ("bpki_glue", rpki.x509.X509),
- ("last_cms_timestamp", rpki.sundial.datetime))
-
- handles = (("self", self_elt),
- ("bsc", bsc_elt))
-
- bpki_cert = None
- bpki_glue = None
- last_cms_timestamp = None
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.child_handle)
-
- def fetch_child_certs(self, ca_detail = None, ski = None, unique = False):
- """
- Fetch all child_cert objects that link to this child object.
- """
- return rpki.rpkid.child_cert_obj.fetch(self.gctx, self, ca_detail, ski, unique)
-
- @property
- def child_certs(self):
- """
- Fetch all child_cert objects that link to this child object.
- """
- return self.fetch_child_certs()
-
- @property
- def parents(self):
- """
- Fetch all parent objects that link to self object to which this child object links.
- """
- return parent_elt.sql_fetch_where(self.gctx, "self_id = %s", (self.self_id,))
-
- def serve_post_save_hook(self, q_pdu, r_pdu, cb, eb):
- """
- Extra server actions for child_elt.
- """
- actions = []
- if q_pdu.reissue:
- actions.append(self.serve_reissue)
- if q_pdu.clear_replay_protection:
- actions.append(self.serve_clear_replay_protection)
- def loop(iterator, action):
- action(iterator, eb)
- rpki.async.iterator(actions, loop, cb)
-
- def serve_reissue(self, cb, eb):
- """
- Handle a left-right reissue action for this child.
- """
- publisher = rpki.rpkid.publication_queue()
- for child_cert in self.child_certs:
- child_cert.reissue(child_cert.ca_detail, publisher, force = True)
- publisher.call_pubd(cb, eb)
-
- def serve_clear_replay_protection(self, cb, eb):
- """
- Handle a left-right clear_replay_protection action for this child.
- """
- self.last_cms_timestamp = None
- self.sql_mark_dirty()
- cb()
-
- def ca_from_class_name(self, class_name):
- """
- Fetch the CA corresponding to an up-down class_name.
- """
- if not class_name.isdigit():
- raise rpki.exceptions.BadClassNameSyntax("Bad class name %s" % class_name)
- ca = rpki.rpkid.ca_obj.sql_fetch(self.gctx, long(class_name))
- if ca is None:
- raise rpki.exceptions.ClassNameUnknown("Unknown class name %s" % class_name)
- parent = ca.parent
- if self.self_id != parent.self_id:
- raise rpki.exceptions.ClassNameMismatch(
- "Class name mismatch: child.self_id = %d, parent.self_id = %d" % (
- self.self_id, parent.self_id))
- return ca
-
- def serve_destroy_hook(self, cb, eb):
- """
- Extra server actions when destroying a child_elt.
- """
- publisher = rpki.rpkid.publication_queue()
- for child_cert in self.child_certs:
- child_cert.revoke(publisher = publisher,
- generate_crl_and_manifest = True)
- publisher.call_pubd(cb, eb)
-
- def serve_up_down(self, query, callback):
- """
- Outer layer of server handling for one up-down PDU from this child.
- """
-
- bsc = self.bsc
- if bsc is None:
- raise rpki.exceptions.BSCNotFound("Could not find BSC %s" % self.bsc_id)
- q_cms = rpki.up_down.cms_msg(DER = query)
- q_msg = q_cms.unwrap((self.gctx.bpki_ta,
- self.self.bpki_cert,
- self.self.bpki_glue,
- self.bpki_cert,
- self.bpki_glue))
- q_cms.check_replay_sql(self, "child", self.child_handle)
- q_msg.payload.gctx = self.gctx
- if enforce_strict_up_down_xml_sender and q_msg.sender != self.child_handle:
- raise rpki.exceptions.BadSender("Unexpected XML sender %s" % q_msg.sender)
- self.gctx.sql.sweep()
-
- def done(r_msg):
- #
- # Exceptions from this point on are problematic, as we have no
- # sane way of reporting errors in the error reporting mechanism.
- # May require refactoring, ignore the issue for now.
- #
- reply = rpki.up_down.cms_msg().wrap(r_msg, bsc.private_key_id,
- bsc.signing_cert, bsc.signing_cert_crl)
- callback(reply)
-
- try:
- q_msg.serve_top_level(self, done)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except rpki.exceptions.NoActiveCA, data:
- done(q_msg.serve_error(data))
- except Exception, e:
- logger.exception("Unhandled exception serving up-down request from %r", self)
- done(q_msg.serve_error(e))
-
-class list_resources_elt(rpki.xml_utils.base_elt, left_right_namespace):
- """
- <list_resources/> element.
- """
-
- element_name = "list_resources"
- attributes = ("self_handle", "tag", "child_handle", "valid_until", "asn", "ipv4", "ipv6")
- valid_until = None
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.self_handle, self.child_handle, self.asn, self.ipv4, self.ipv6)
-
- def startElement(self, stack, name, attrs):
- """
- Handle <list_resources/> element. This requires special handling
- due to the data types of some of the attributes.
- """
- assert name == "list_resources", "Unexpected name %s, stack %s" % (name, stack)
- self.read_attrs(attrs)
- if isinstance(self.valid_until, str):
- self.valid_until = rpki.sundial.datetime.fromXMLtime(self.valid_until)
- if self.asn is not None:
- self.asn = rpki.resource_set.resource_set_as(self.asn)
- if self.ipv4 is not None:
- self.ipv4 = rpki.resource_set.resource_set_ipv4(self.ipv4)
- if self.ipv6 is not None:
- self.ipv6 = rpki.resource_set.resource_set_ipv6(self.ipv6)
-
- def toXML(self):
- """
- Generate <list_resources/> element. This requires special
- handling due to the data types of some of the attributes.
- """
- elt = self.make_elt()
- if isinstance(self.valid_until, int):
- elt.set("valid_until", self.valid_until.toXMLtime())
- return elt
-
-class list_roa_requests_elt(rpki.xml_utils.base_elt, left_right_namespace):
- """
- <list_roa_requests/> element.
- """
-
- element_name = "list_roa_requests"
- attributes = ("self_handle", "tag", "asn", "ipv4", "ipv6")
-
- def startElement(self, stack, name, attrs):
- """
- Handle <list_roa_requests/> element. This requires special handling
- due to the data types of some of the attributes.
- """
- assert name == "list_roa_requests", "Unexpected name %s, stack %s" % (name, stack)
- self.read_attrs(attrs)
- if self.ipv4 is not None:
- self.ipv4 = rpki.resource_set.roa_prefix_set_ipv4(self.ipv4)
- if self.ipv6 is not None:
- self.ipv6 = rpki.resource_set.roa_prefix_set_ipv6(self.ipv6)
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.self_handle, self.asn, self.ipv4, self.ipv6)
-
-class list_ghostbuster_requests_elt(rpki.xml_utils.text_elt, left_right_namespace):
- """
- <list_ghostbuster_requests/> element.
- """
-
- element_name = "list_ghostbuster_requests"
- attributes = ("self_handle", "tag", "parent_handle")
- text_attribute = "vcard"
-
- vcard = None
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.self_handle, self.parent_handle)
-
-class list_ee_certificate_requests_elt(rpki.xml_utils.base_elt, left_right_namespace):
- """
- <list_ee_certificate_requests/> element.
- """
-
- element_name = "list_ee_certificate_requests"
- attributes = ("self_handle", "tag", "gski", "valid_until", "asn", "ipv4", "ipv6", "cn", "sn", "eku")
- elements = ("pkcs10",)
-
- pkcs10 = None
- valid_until = None
- eku = None
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.self_handle, self.gski, self.cn, self.sn, self.asn, self.ipv4, self.ipv6)
-
- def startElement(self, stack, name, attrs):
- """
- Handle <list_ee_certificate_requests/> element. This requires special
- handling due to the data types of some of the attributes.
- """
- if name not in self.elements:
- assert name == self.element_name, "Unexpected name %s, stack %s" % (name, stack)
- self.read_attrs(attrs)
- if isinstance(self.valid_until, str):
- self.valid_until = rpki.sundial.datetime.fromXMLtime(self.valid_until)
- if self.asn is not None:
- self.asn = rpki.resource_set.resource_set_as(self.asn)
- if self.ipv4 is not None:
- self.ipv4 = rpki.resource_set.resource_set_ipv4(self.ipv4)
- if self.ipv6 is not None:
- self.ipv6 = rpki.resource_set.resource_set_ipv6(self.ipv6)
- if self.eku is not None:
- self.eku = self.eku.split(",")
-
- def endElement(self, stack, name, text):
- """
- Handle <pkcs10/> sub-element.
- """
- assert len(self.elements) == 1
- if name == self.elements[0]:
- self.pkcs10 = rpki.x509.PKCS10(Base64 = text)
- else:
- assert name == self.element_name, "Unexpected name %s, stack %s" % (name, stack)
- stack.pop()
-
- def toXML(self):
- """
- Generate <list_ee_certificate_requests/> element. This requires special
- handling due to the data types of some of the attributes.
- """
- if isinstance(self.eku, (tuple, list)):
- self.eku = ",".join(self.eku)
- elt = self.make_elt()
- for i in self.elements:
- self.make_b64elt(elt, i, getattr(self, i, None))
- if isinstance(self.valid_until, int):
- elt.set("valid_until", self.valid_until.toXMLtime())
- return elt
-
-class list_published_objects_elt(rpki.xml_utils.text_elt, left_right_namespace):
- """
- <list_published_objects/> element.
- """
-
- element_name = "list_published_objects"
- attributes = ("self_handle", "tag", "uri", "child_handle")
- text_attribute = "obj"
-
- obj = None
- child_handle = None
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.self_handle, self.child_handle, self.uri)
-
- def serve_dispatch(self, r_msg, cb, eb):
- """
- Handle a <list_published_objects/> query. The method name is a
- misnomer here, there's no action attribute and no dispatch, we
- just dump every published object for the specified <self/> and return.
- """
- for parent in self_elt.serve_fetch_handle(self.gctx, None, self.self_handle).parents:
- for ca in parent.cas:
- ca_detail = ca.active_ca_detail
- if ca_detail is not None:
- r_msg.append(self.make_reply(ca_detail.crl_uri, ca_detail.latest_crl))
- r_msg.append(self.make_reply(ca_detail.manifest_uri, ca_detail.latest_manifest))
- r_msg.extend(self.make_reply(c.uri, c.cert, c.child.child_handle)
- for c in ca_detail.child_certs)
- r_msg.extend(self.make_reply(r.uri, r.roa)
- for r in ca_detail.roas if r.roa is not None)
- r_msg.extend(self.make_reply(g.uri, g.ghostbuster)
- for g in ca_detail.ghostbusters)
- r_msg.extend(self.make_reply(c.uri, c.cert)
- for c in ca_detail.ee_certificates)
- cb()
-
- def make_reply(self, uri, obj, child_handle = None):
- """
- Generate one reply PDU.
- """
- r_pdu = self.make_pdu(tag = self.tag, self_handle = self.self_handle,
- uri = uri, child_handle = child_handle)
- r_pdu.obj = obj.get_Base64()
- return r_pdu
-
-class list_received_resources_elt(rpki.xml_utils.base_elt, left_right_namespace):
- """
- <list_received_resources/> element.
- """
-
- element_name = "list_received_resources"
- attributes = ("self_handle", "tag", "parent_handle",
- "notBefore", "notAfter", "uri", "sia_uri", "aia_uri", "asn", "ipv4", "ipv6")
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.self_handle, self.parent_handle, self.uri, self.notAfter)
-
- def serve_dispatch(self, r_msg, cb, eb):
- """
- Handle a <list_received_resources/> query. The method name is a
- misnomer here, there's no action attribute and no dispatch, we
- just dump a bunch of data about every certificate issued to us by
- one of our parents, then return.
- """
- for parent in self_elt.serve_fetch_handle(self.gctx, None, self.self_handle).parents:
- for ca in parent.cas:
- ca_detail = ca.active_ca_detail
- if ca_detail is not None and ca_detail.latest_ca_cert is not None:
- r_msg.append(self.make_reply(parent.parent_handle, ca_detail.ca_cert_uri, ca_detail.latest_ca_cert))
- cb()
-
- def make_reply(self, parent_handle, uri, cert):
- """
- Generate one reply PDU.
- """
- resources = cert.get_3779resources()
- return self.make_pdu(
- tag = self.tag,
- self_handle = self.self_handle,
- parent_handle = parent_handle,
- notBefore = str(cert.getNotBefore()),
- notAfter = str(cert.getNotAfter()),
- uri = uri,
- sia_uri = cert.get_sia_directory_uri(),
- aia_uri = cert.get_aia_uri(),
- asn = resources.asn,
- ipv4 = resources.v4,
- ipv6 = resources.v6)
-
-class report_error_elt(rpki.xml_utils.text_elt, left_right_namespace):
- """
- <report_error/> element.
- """
-
- element_name = "report_error"
- attributes = ("tag", "self_handle", "error_code")
- text_attribute = "error_text"
-
- error_text = None
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.self_handle, self.error_code)
-
- @classmethod
- def from_exception(cls, e, self_handle = None, tag = None):
- """
- Generate a <report_error/> element from an exception.
- """
- self = cls()
- self.self_handle = self_handle
- self.tag = tag
- self.error_code = e.__class__.__name__
- self.error_text = str(e)
- return self
-
-class msg(rpki.xml_utils.msg, left_right_namespace):
- """
- Left-right PDU.
- """
+logger = logging.getLogger(__name__)
- ## @var version
- # Protocol version
- version = int(rpki.relaxng.left_right.version)
+xmlns = rpki.relaxng.left_right.xmlns
+nsmap = rpki.relaxng.left_right.nsmap
+version = rpki.relaxng.left_right.version
+
+tag_bpki_cert = xmlns + "bpki_cert"
+tag_bpki_glue = xmlns + "bpki_glue"
+tag_bsc = xmlns + "bsc"
+tag_child = xmlns + "child"
+tag_list_ee_certificate_requests = xmlns + "list_ee_certificate_requests"
+tag_list_ghostbuster_requests = xmlns + "list_ghostbuster_requests"
+tag_list_published_objects = xmlns + "list_published_objects"
+tag_list_received_resources = xmlns + "list_received_resources"
+tag_list_resources = xmlns + "list_resources"
+tag_list_roa_requests = xmlns + "list_roa_requests"
+tag_msg = xmlns + "msg"
+tag_parent = xmlns + "parent"
+tag_pkcs10 = xmlns + "pkcs10"
+tag_pkcs10_request = xmlns + "pkcs10_request"
+tag_report_error = xmlns + "report_error"
+tag_repository = xmlns + "repository"
+tag_rpki_root_cert = xmlns + "rpki_root_cert"
+tag_tenant = xmlns + "tenant"
+tag_signing_cert = xmlns + "signing_cert"
+tag_signing_cert_crl = xmlns + "signing_cert_crl"
+
+## @var content_type
+# Content type to use when sending left-right queries
+content_type = "application/x-rpki"
+
+## @var allowed_content_types
+# Content types we consider acceptable for incoming left-right
+# queries.
+
+allowed_content_types = (content_type,)
- ## @var pdus
- # Dispatch table of PDUs for this protocol.
- pdus = dict((x.element_name, x)
- for x in (self_elt, child_elt, parent_elt, bsc_elt,
- repository_elt, list_resources_elt,
- list_roa_requests_elt, list_ghostbuster_requests_elt,
- list_ee_certificate_requests_elt,
- list_published_objects_elt,
- list_received_resources_elt, report_error_elt))
- def serve_top_level(self, gctx, cb):
+class cms_msg(rpki.x509.XML_CMS_object):
"""
- Serve one msg PDU.
+ CMS-signed left-right PDU.
"""
- r_msg = self.__class__.reply()
-
- def loop(iterator, q_pdu):
-
- def fail(e):
- if not isinstance(e, rpki.exceptions.NotFound):
- logger.exception("Unhandled exception serving left-right PDU %r", q_pdu)
- r_msg.append(report_error_elt.from_exception(
- e, self_handle = q_pdu.self_handle, tag = q_pdu.tag))
- cb(r_msg)
-
- try:
- q_pdu.gctx = gctx
- q_pdu.serve_dispatch(r_msg, iterator, fail)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- fail(e)
-
- def done():
- cb(r_msg)
-
- rpki.async.iterator(self, loop, done)
-
-class sax_handler(rpki.xml_utils.sax_handler):
- """
- SAX handler for Left-Right protocol.
- """
-
- pdu = msg
- name = "msg"
- version = rpki.relaxng.left_right.version
-
-class cms_msg(rpki.x509.XML_CMS_object):
- """
- Class to hold a CMS-signed left-right PDU.
- """
-
- encoding = "us-ascii"
- schema = rpki.relaxng.left_right
- saxify = sax_handler.saxify
+ encoding = "us-ascii"
+ schema = rpki.relaxng.left_right
diff --git a/rpki/log.py b/rpki/log.py
index 2abb3b2c..14805fee 100644
--- a/rpki/log.py
+++ b/rpki/log.py
@@ -29,14 +29,6 @@ import logging.handlers
import argparse
import traceback as tb
-try:
- have_setproctitle = False
- if os.getenv("DISABLE_SETPROCTITLE") is None:
- import setproctitle
- have_setproctitle = True
-except ImportError:
- pass
-
logger = logging.getLogger(__name__)
## @var show_python_ids
@@ -44,221 +36,59 @@ logger = logging.getLogger(__name__)
show_python_ids = False
-## @var enable_tracebacks
-# Whether tracebacks are enabled globally. Individual classes and
-# modules may choose to override this.
-
-enable_tracebacks = False
-
-## @var use_setproctitle
-# Whether to use setproctitle (if available) to change name shown for
-# this process in ps listings (etc).
-
-use_setproctitle = True
-
-## @var proctitle_extra
-
-# Extra text to include in proctitle display. By default this is the
-# tail of the current directory name, as this is often useful, but you
-# can set it to something else if you like. If None or the empty
-# string, the extra information field will be omitted from the proctitle.
-
-proctitle_extra = os.path.basename(os.getcwd())
-
-
-class Formatter(object):
- """
- Reimplementation (easier than subclassing in this case) of
- logging.Formatter.
-
- It turns out that the logging code only cares about this class's
- .format(record) method, everything else is internal; so long as
- .format() converts a record into a properly formatted string, the
- logging code is happy.
-
- So, rather than mess around with dynamically constructing and
- deconstructing and tweaking format strings and ten zillion options
- we don't use, we just provide our own implementation that supports
- what we do need.
- """
-
- converter = time.gmtime
-
- def __init__(self, ident, handler):
- self.ident = ident
- self.is_syslog = isinstance(handler, logging.handlers.SysLogHandler)
-
- def format(self, record):
- return "".join(self.coformat(record)).rstrip("\n")
-
- def coformat(self, record):
- if not self.is_syslog:
- yield time.strftime("%Y-%m-%d %H:%M:%S ", time.gmtime(record.created))
- yield "%s[%d]: " % (self.ident, record.process)
- try:
- yield repr(record.context) + " "
- except AttributeError:
- pass
- yield record.getMessage()
- if record.exc_info:
- if self.is_syslog or not enable_tracebacks:
- lines = tb.format_exception_only(record.exc_info[0], record.exc_info[1])
- lines.insert(0, ": ")
- else:
- lines = tb.format_exception(record.exc_info[0], record.exc_info[1], record.exc_info[2])
- lines.insert(0, "\n")
- for line in lines:
- yield line
-
-
-def argparse_setup(parser, default_thunk = None):
- """
- Set up argparse stuff for functionality in this module.
-
- Default logging destination is syslog, but you can change this
- by setting default_thunk to a callable which takes no arguments
- and which returns a instance of a logging.Handler subclass.
-
- Also see rpki.log.init().
- """
-
- class LogLevelAction(argparse.Action):
- def __call__(self, parser, namespace, values, option_string = None):
- setattr(namespace, self.dest, getattr(logging, values.upper()))
-
- parser.add_argument("--log-level", default = logging.WARNING, action = LogLevelAction,
- choices = ("debug", "info", "warning", "error", "critical"),
- help = "how verbosely to log")
-
- group = parser.add_mutually_exclusive_group()
-
- syslog_address = "/dev/log" if os.path.exists("/dev/log") else ("localhost", logging.handlers.SYSLOG_UDP_PORT)
-
- class SyslogAction(argparse.Action):
- def __call__(self, parser, namespace, values, option_string = None):
- namespace.log_handler = lambda: logging.handlers.SysLogHandler(address = syslog_address, facility = values)
-
- group.add_argument("--log-syslog", nargs = "?", const = "daemon", action = SyslogAction,
- choices = sorted(logging.handlers.SysLogHandler.facility_names.keys()),
- help = "send logging to syslog")
-
- class StreamAction(argparse.Action):
- def __call__(self, parser, namespace, values, option_string = None):
- namespace.log_handler = lambda: logging.StreamHandler(stream = self.const)
-
- group.add_argument("--log-stderr", nargs = 0, action = StreamAction, const = sys.stderr,
- help = "send logging to standard error")
-
- group.add_argument("--log-stdout", nargs = 0, action = StreamAction, const = sys.stdout,
- help = "send logging to standard output")
-
- class WatchedFileAction(argparse.Action):
- def __call__(self, parser, namespace, values, option_string = None):
- namespace.log_handler = lambda: logging.handlers.WatchedFileHandler(filename = values)
-
- group.add_argument("--log-file", action = WatchedFileAction,
- help = "send logging to a file, reopening if rotated away")
-
- class RotatingFileAction(argparse.Action):
- def __call__(self, parser, namespace, values, option_string = None):
- namespace.log_handler = lambda: logging.handlers.RotatingFileHandler(
- filename = values[0],
- maxBytes = int(values[1]) * 1024,
- backupCount = int(values[2]))
-
- group.add_argument("--log-rotating-file", action = RotatingFileAction,
- nargs = 3, metavar = ("FILENAME", "KBYTES", "COUNT"),
- help = "send logging to rotating file")
-
- class TimedRotatingFileAction(argparse.Action):
- def __call__(self, parser, namespace, values, option_string = None):
- namespace.log_handler = lambda: logging.handlers.TimedRotatingFileHandler(
- filename = values[0],
- interval = int(values[1]),
- backupCount = int(values[2]),
- when = "H",
- utc = True)
-
- group.add_argument("--log-timed-rotating-file", action = TimedRotatingFileAction,
- nargs = 3, metavar = ("FILENAME", "HOURS", "COUNT"),
- help = "send logging to timed rotating file")
-
- if default_thunk is None:
- default_thunk = lambda: logging.handlers.SysLogHandler(address = syslog_address, facility = "daemon")
-
- parser.set_defaults(log_handler = default_thunk)
-
-
-def init(ident = None, args = None):
- """
- Initialize logging system.
-
- Default logging destination is stderr if "args" is not specified.
- """
-
- # pylint: disable=E1103
-
- if ident is None:
- ident = os.path.basename(sys.argv[0])
-
- if args is None:
- args = argparse.Namespace(log_level = logging.WARNING,
- log_handler = logging.StreamHandler)
-
- handler = args.log_handler()
- handler.setFormatter(Formatter(ident, handler))
-
- root_logger = logging.getLogger()
- root_logger.addHandler(handler)
- root_logger.setLevel(args.log_level)
-
- if ident and have_setproctitle and use_setproctitle:
- if proctitle_extra:
- setproctitle.setproctitle("%s (%s)" % (ident, proctitle_extra))
- else:
- setproctitle.setproctitle(ident)
-
def class_logger(module_logger, attribute = "logger"):
- """
- Class decorator to add a class-level Logger object as a class
- attribute. This allows control of debugging messages at the class
- level rather than just the module level.
+ """
+ Class decorator to add a class-level Logger object as a class
+ attribute. This allows control of debugging messages at the class
+ level rather than just the module level.
- This decorator takes the module logger as an argument.
- """
+ This decorator takes the module logger as an argument.
+ """
- def decorator(cls):
- setattr(cls, attribute, module_logger.getChild(cls.__name__))
- return cls
- return decorator
+ def decorator(cls):
+ setattr(cls, attribute, module_logger.getChild(cls.__name__))
+ return cls
+ return decorator
def log_repr(obj, *tokens):
- """
- Constructor for __repr__() strings, handles suppression of Python
- IDs as needed, includes self_handle when available.
- """
-
- # pylint: disable=W0702
-
- words = ["%s.%s" % (obj.__class__.__module__, obj.__class__.__name__)]
- try:
- words.append("{%s}" % obj.self.self_handle)
- except:
- pass
+ """
+ Constructor for __repr__() strings, handles suppression of Python
+ IDs as needed, includes tenant_handle when available.
+ """
- for token in tokens:
- if token is not None:
- try:
- s = str(token)
- except:
- s = "???"
- logger.exception("Failed to generate repr() string for object of type %r", type(token))
- if s:
- words.append(s)
-
- if show_python_ids:
- words.append(" at %#x" % id(obj))
-
- return "<" + " ".join(words) + ">"
+ words = ["%s.%s" % (obj.__class__.__module__, obj.__class__.__name__)]
+ try:
+ words.append("{%s}" % obj.tenant.tenant_handle)
+ except:
+ pass
+
+ for token in tokens:
+ if token is not None:
+ try:
+ s = str(token)
+ except:
+ s = "???"
+ logger.exception("Failed to generate repr() string for object of type %r", type(token))
+ if s:
+ words.append(s)
+
+ if show_python_ids:
+ words.append(" at %#x" % id(obj))
+
+ return "<" + " ".join(words) + ">"
+
+
+def show_stack(stack_logger = None):
+ """
+ Log a stack trace.
+ """
+
+ if stack_logger is None:
+ stack_logger = logger
+
+ for frame in tb.format_stack():
+ for line in frame.split("\n"):
+ if line:
+ stack_logger.debug("%s", line.rstrip())
diff --git a/rpki/myrpki.py b/rpki/myrpki.py
index 2ae912f0..929c2a70 100644
--- a/rpki/myrpki.py
+++ b/rpki/myrpki.py
@@ -19,5 +19,5 @@ This is a tombstone for a program that no longer exists.
"""
if __name__ != "__main__": # sic -- don't break regression tests
- import sys
- sys.exit('"myrpki" is obsolete. Please use "rpkic" instead.')
+ import sys
+ sys.exit('"myrpki" is obsolete. Please use "rpkic" instead.')
diff --git a/rpki/mysql_import.py b/rpki/mysql_import.py
index 538e1916..bbb7ac22 100644
--- a/rpki/mysql_import.py
+++ b/rpki/mysql_import.py
@@ -52,11 +52,11 @@ from __future__ import with_statement
import warnings
if hasattr(warnings, "catch_warnings"):
- with warnings.catch_warnings():
- warnings.simplefilter("ignore", DeprecationWarning)
- import MySQLdb
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", DeprecationWarning)
+ import MySQLdb
else:
- import MySQLdb
+ import MySQLdb
import _mysql_exceptions
diff --git a/rpki/oids.py b/rpki/oids.py
index 9fa30a04..b27be6f3 100644
--- a/rpki/oids.py
+++ b/rpki/oids.py
@@ -57,6 +57,7 @@ id_ad_caRepository = "1.3.6.1.5.5.7.48.5"
id_ad_signedObjectRepository = "1.3.6.1.5.5.7.48.9"
id_ad_rpkiManifest = "1.3.6.1.5.5.7.48.10"
id_ad_signedObject = "1.3.6.1.5.5.7.48.11"
+id_ad_rpkiNotify = "1.3.6.1.5.5.7.48.13"
commonName = "2.5.4.3"
serialNumber = "2.5.4.5"
countryName = "2.5.4.6"
@@ -81,22 +82,21 @@ id_sha256 = "2.16.840.1.101.3.4.2.1"
_oid2name = {}
for _sym in dir():
- if not _sym.startswith("_"):
- _val = globals()[_sym]
- if not isinstance(_val, str) or not all(_v.isdigit() for _v in _val.split(".")):
- raise ValueError("Bad OID definition: %s = %r" % (_sym, _val))
- _oid2name[_val] = _sym.replace("_", "-")
+ if not _sym.startswith("_"):
+ _val = globals()[_sym]
+ if not isinstance(_val, str) or not all(_v.isdigit() for _v in _val.split(".")):
+ raise ValueError("Bad OID definition: %s = %r" % (_sym, _val))
+ _oid2name[_val] = _sym.replace("_", "-")
-# pylint: disable=W0631
-del _sym
+del _sym # pylint: disable=W0631
del _val
def oid2name(oid):
- """
- Translate an OID into a string suitable for printing.
- """
+ """
+ Translate an OID into a string suitable for printing.
+ """
- if not isinstance(oid, (str, unicode)) or not all(o.isdigit() for o in oid.split(".")):
- raise ValueError("Parameter does not look like an OID string: " + repr(oid))
+ if not isinstance(oid, (str, unicode)) or not all(o.isdigit() for o in oid.split(".")):
+ raise ValueError("Parameter does not look like an OID string: " + repr(oid))
- return _oid2name.get(oid, oid)
+ return _oid2name.get(oid, oid)
diff --git a/rpki/old_irdbd.py b/rpki/old_irdbd.py
index 6c026a31..c08ce362 100644
--- a/rpki/old_irdbd.py
+++ b/rpki/old_irdbd.py
@@ -25,12 +25,14 @@ and perhaps still useful as a minimal example. This does NOT work with
the GUI, rpkic, or any of the other more recent tools.
"""
+# pylint: skip-file
+
import os
import time
import logging
import argparse
import urlparse
-import rpki.http
+import rpki.http_simple
import rpki.config
import rpki.resource_set
import rpki.relaxng
@@ -46,279 +48,266 @@ logger = logging.getLogger(__name__)
class main(object):
- def handle_list_resources(self, q_pdu, r_msg):
-
- r_pdu = rpki.left_right.list_resources_elt()
- r_pdu.tag = q_pdu.tag
- r_pdu.self_handle = q_pdu.self_handle
- r_pdu.child_handle = q_pdu.child_handle
-
- self.cur.execute(
- """
- SELECT registrant_id, valid_until
- FROM registrant
- WHERE registry_handle = %s AND registrant_handle = %s
- """,
- (q_pdu.self_handle, q_pdu.child_handle))
-
- if self.cur.rowcount != 1:
- raise rpki.exceptions.NotInDatabase(
- "This query should have produced a single exact match, something's messed up"
- " (rowcount = %d, self_handle = %s, child_handle = %s)"
- % (self.cur.rowcount, q_pdu.self_handle, q_pdu.child_handle))
-
- registrant_id, valid_until = self.cur.fetchone()
-
- r_pdu.valid_until = valid_until.strftime("%Y-%m-%dT%H:%M:%SZ")
-
- r_pdu.asn = rpki.resource_set.resource_set_as.from_sql(
- self.cur,
- """
- SELECT start_as, end_as
- FROM registrant_asn
- WHERE registrant_id = %s
- """,
- (registrant_id,))
-
- r_pdu.ipv4 = rpki.resource_set.resource_set_ipv4.from_sql(
- self.cur,
- """
- SELECT start_ip, end_ip
- FROM registrant_net
- WHERE registrant_id = %s AND version = 4
- """,
- (registrant_id,))
-
- r_pdu.ipv6 = rpki.resource_set.resource_set_ipv6.from_sql(
- self.cur,
- """
- SELECT start_ip, end_ip
- FROM registrant_net
- WHERE registrant_id = %s AND version = 6
- """,
- (registrant_id,))
-
- r_msg.append(r_pdu)
-
-
- def handle_list_roa_requests(self, q_pdu, r_msg):
-
- self.cur.execute(
- "SELECT roa_request_id, asn FROM roa_request WHERE self_handle = %s",
- (q_pdu.self_handle,))
-
- for roa_request_id, asn in self.cur.fetchall():
-
- r_pdu = rpki.left_right.list_roa_requests_elt()
- r_pdu.tag = q_pdu.tag
- r_pdu.self_handle = q_pdu.self_handle
- r_pdu.asn = asn
-
- r_pdu.ipv4 = rpki.resource_set.roa_prefix_set_ipv4.from_sql(
- self.cur,
- """
- SELECT prefix, prefixlen, max_prefixlen
- FROM roa_request_prefix
- WHERE roa_request_id = %s AND version = 4
- """,
- (roa_request_id,))
-
- r_pdu.ipv6 = rpki.resource_set.roa_prefix_set_ipv6.from_sql(
- self.cur,
- """
- SELECT prefix, prefixlen, max_prefixlen
- FROM roa_request_prefix
- WHERE roa_request_id = %s AND version = 6
- """,
- (roa_request_id,))
-
- r_msg.append(r_pdu)
-
-
- def handle_list_ghostbuster_requests(self, q_pdu, r_msg):
-
- self.cur.execute(
- """
- SELECT vcard
- FROM ghostbuster_request
- WHERE self_handle = %s AND parent_handle = %s
- """,
- (q_pdu.self_handle, q_pdu.parent_handle))
-
- vcards = [result[0] for result in self.cur.fetchall()]
-
- if not vcards:
-
- self.cur.execute(
- """
- SELECT vcard
- FROM ghostbuster_request
- WHERE self_handle = %s AND parent_handle IS NULL
- """,
- (q_pdu.self_handle,))
-
- vcards = [result[0] for result in self.cur.fetchall()]
-
- for vcard in vcards:
- r_pdu = rpki.left_right.list_ghostbuster_requests_elt()
- r_pdu.tag = q_pdu.tag
- r_pdu.self_handle = q_pdu.self_handle
- r_pdu.parent_handle = q_pdu.parent_handle
- r_pdu.vcard = vcard
- r_msg.append(r_pdu)
-
-
- def handle_list_ee_certificate_requests(self, q_pdu, r_msg):
-
- self.cur.execute(
- """
- SELECT ee_certificate_id, pkcs10, gski, cn, sn, eku, valid_until
- FROM ee_certificate
- WHERE self_handle = %s
- """,
- (q_pdu.self_handle,))
-
- for ee_certificate_id, pkcs10, gski, cn, sn, eku, valid_until in self.cur.fetchall():
-
- r_pdu = rpki.left_right.list_ee_certificate_requests_elt()
- r_pdu.tag = q_pdu.tag
- r_pdu.self_handle = q_pdu.self_handle
- r_pdu.valid_until = valid_until.strftime("%Y-%m-%dT%H:%M:%SZ")
- r_pdu.pkcs10 = rpki.x509.PKCS10(DER = pkcs10)
- r_pdu.gski = gski
- r_pdu.cn = cn
- r_pdu.sn = sn
- r_pdu.eku = eku
-
- r_pdu.asn = rpki.resource_set.resource_set_as.from_sql(
- self.cur,
- """
- SELECT start_as, end_as
- FROM ee_certificate_asn
- WHERE ee_certificate_id = %s
- """,
- (ee_certificate_id,))
-
- r_pdu.ipv4 = rpki.resource_set.resource_set_ipv4.from_sql(
- self.cur,
- """
- SELECT start_ip, end_ip
- FROM ee_certificate_net
- WHERE ee_certificate_id = %s AND version = 4
- """,
- (ee_certificate_id,))
-
- r_pdu.ipv6 = rpki.resource_set.resource_set_ipv6.from_sql(
- self.cur,
- """
- SELECT start_ip, end_ip
- FROM ee_certificate_net
- WHERE ee_certificate_id = %s AND version = 6
- """,
- (ee_certificate_id,))
-
- r_msg.append(r_pdu)
-
-
- handle_dispatch = {
- rpki.left_right.list_resources_elt : handle_list_resources,
- rpki.left_right.list_roa_requests_elt : handle_list_roa_requests,
- rpki.left_right.list_ghostbuster_requests_elt : handle_list_ghostbuster_requests,
- rpki.left_right.list_ee_certificate_requests_elt : handle_list_ee_certificate_requests }
-
- def handler(self, query, path, cb):
- try:
-
- self.db.ping(True)
-
- r_msg = rpki.left_right.msg.reply()
-
- try:
-
- q_msg = rpki.left_right.cms_msg(DER = query).unwrap((self.bpki_ta, self.rpkid_cert))
-
- if not isinstance(q_msg, rpki.left_right.msg) or not q_msg.is_query():
- raise rpki.exceptions.BadQuery("Unexpected %r PDU" % q_msg)
-
- for q_pdu in q_msg:
-
- try:
+ def handle_list_resources(self, q_pdu, r_msg):
+
+ r_pdu = rpki.left_right.list_resources_elt()
+ r_pdu.tag = q_pdu.tag
+ r_pdu.self_handle = q_pdu.self_handle
+ r_pdu.child_handle = q_pdu.child_handle
+
+ self.cur.execute(
+ """
+ SELECT registrant_id, valid_until
+ FROM registrant
+ WHERE registry_handle = %s AND registrant_handle = %s
+ """,
+ (q_pdu.self_handle, q_pdu.child_handle))
+
+ if self.cur.rowcount != 1:
+ raise rpki.exceptions.NotInDatabase(
+ "This query should have produced a single exact match, something's messed up"
+ " (rowcount = %d, self_handle = %s, child_handle = %s)"
+ % (self.cur.rowcount, q_pdu.self_handle, q_pdu.child_handle))
+
+ registrant_id, valid_until = self.cur.fetchone()
+
+ r_pdu.valid_until = valid_until.strftime("%Y-%m-%dT%H:%M:%SZ")
+
+ r_pdu.asn = rpki.resource_set.resource_set_as.from_sql(
+ self.cur,
+ """
+ SELECT start_as, end_as
+ FROM registrant_asn
+ WHERE registrant_id = %s
+ """,
+ (registrant_id,))
+
+ r_pdu.ipv4 = rpki.resource_set.resource_set_ipv4.from_sql(
+ self.cur,
+ """
+ SELECT start_ip, end_ip
+ FROM registrant_net
+ WHERE registrant_id = %s AND version = 4
+ """,
+ (registrant_id,))
+
+ r_pdu.ipv6 = rpki.resource_set.resource_set_ipv6.from_sql(
+ self.cur,
+ """
+ SELECT start_ip, end_ip
+ FROM registrant_net
+ WHERE registrant_id = %s AND version = 6
+ """,
+ (registrant_id,))
+
+ r_msg.append(r_pdu)
+
+
+ def handle_list_roa_requests(self, q_pdu, r_msg):
+
+ self.cur.execute(
+ "SELECT roa_request_id, asn FROM roa_request WHERE self_handle = %s",
+ (q_pdu.self_handle,))
+
+ for roa_request_id, asn in self.cur.fetchall():
+
+ r_pdu = rpki.left_right.list_roa_requests_elt()
+ r_pdu.tag = q_pdu.tag
+ r_pdu.self_handle = q_pdu.self_handle
+ r_pdu.asn = asn
+
+ r_pdu.ipv4 = rpki.resource_set.roa_prefix_set_ipv4.from_sql(
+ self.cur,
+ """
+ SELECT prefix, prefixlen, max_prefixlen
+ FROM roa_request_prefix
+ WHERE roa_request_id = %s AND version = 4
+ """,
+ (roa_request_id,))
+
+ r_pdu.ipv6 = rpki.resource_set.roa_prefix_set_ipv6.from_sql(
+ self.cur,
+ """
+ SELECT prefix, prefixlen, max_prefixlen
+ FROM roa_request_prefix
+ WHERE roa_request_id = %s AND version = 6
+ """,
+ (roa_request_id,))
+
+ r_msg.append(r_pdu)
+
+
+ def handle_list_ghostbuster_requests(self, q_pdu, r_msg):
+
+ self.cur.execute(
+ """
+ SELECT vcard
+ FROM ghostbuster_request
+ WHERE self_handle = %s AND parent_handle = %s
+ """,
+ (q_pdu.self_handle, q_pdu.parent_handle))
+
+ vcards = [result[0] for result in self.cur.fetchall()]
+
+ if not vcards:
+
+ self.cur.execute(
+ """
+ SELECT vcard
+ FROM ghostbuster_request
+ WHERE self_handle = %s AND parent_handle IS NULL
+ """,
+ (q_pdu.self_handle,))
+
+ vcards = [result[0] for result in self.cur.fetchall()]
+
+ for vcard in vcards:
+ r_pdu = rpki.left_right.list_ghostbuster_requests_elt()
+ r_pdu.tag = q_pdu.tag
+ r_pdu.self_handle = q_pdu.self_handle
+ r_pdu.parent_handle = q_pdu.parent_handle
+ r_pdu.vcard = vcard
+ r_msg.append(r_pdu)
+
+
+ def handle_list_ee_certificate_requests(self, q_pdu, r_msg):
+
+ self.cur.execute(
+ """
+ SELECT ee_certificate_id, pkcs10, gski, cn, sn, eku, valid_until
+ FROM ee_certificate
+ WHERE self_handle = %s
+ """,
+ (q_pdu.self_handle,))
+
+ for ee_certificate_id, pkcs10, gski, cn, sn, eku, valid_until in self.cur.fetchall():
+
+ r_pdu = rpki.left_right.list_ee_certificate_requests_elt()
+ r_pdu.tag = q_pdu.tag
+ r_pdu.self_handle = q_pdu.self_handle
+ r_pdu.valid_until = valid_until.strftime("%Y-%m-%dT%H:%M:%SZ")
+ r_pdu.pkcs10 = rpki.x509.PKCS10(DER = pkcs10)
+ r_pdu.gski = gski
+ r_pdu.cn = cn
+ r_pdu.sn = sn
+ r_pdu.eku = eku
+
+ r_pdu.asn = rpki.resource_set.resource_set_as.from_sql(
+ self.cur,
+ """
+ SELECT start_as, end_as
+ FROM ee_certificate_asn
+ WHERE ee_certificate_id = %s
+ """,
+ (ee_certificate_id,))
+
+ r_pdu.ipv4 = rpki.resource_set.resource_set_ipv4.from_sql(
+ self.cur,
+ """
+ SELECT start_ip, end_ip
+ FROM ee_certificate_net
+ WHERE ee_certificate_id = %s AND version = 4
+ """,
+ (ee_certificate_id,))
+
+ r_pdu.ipv6 = rpki.resource_set.resource_set_ipv6.from_sql(
+ self.cur,
+ """
+ SELECT start_ip, end_ip
+ FROM ee_certificate_net
+ WHERE ee_certificate_id = %s AND version = 6
+ """,
+ (ee_certificate_id,))
+
+ r_msg.append(r_pdu)
+
+
+ handle_dispatch = {
+ rpki.left_right.list_resources_elt : handle_list_resources,
+ rpki.left_right.list_roa_requests_elt : handle_list_roa_requests,
+ rpki.left_right.list_ghostbuster_requests_elt : handle_list_ghostbuster_requests,
+ rpki.left_right.list_ee_certificate_requests_elt : handle_list_ee_certificate_requests }
+
+ def handler(self, request, q_der):
+ try:
+
+ self.db.ping(True)
+
+ r_msg = rpki.left_right.msg.reply()
try:
- h = self.handle_dispatch[type(q_pdu)]
- except KeyError:
- raise rpki.exceptions.BadQuery("Unexpected %r PDU" % q_pdu)
- else:
- h(self, q_pdu, r_msg)
- except (rpki.async.ExitNow, SystemExit):
- raise
+ q_msg = rpki.left_right.cms_msg_saxify(DER = q_der).unwrap((self.bpki_ta, self.rpkid_cert))
+
+ if not isinstance(q_msg, rpki.left_right.msg) or not q_msg.is_query():
+ raise rpki.exceptions.BadQuery("Unexpected %r PDU" % q_msg)
- except Exception, e:
- logger.exception("Exception serving PDU %r", q_pdu)
- r_msg.append(rpki.left_right.report_error_elt.from_exception(e, q_pdu.self_handle, q_pdu.tag))
+ for q_pdu in q_msg:
- except (rpki.async.ExitNow, SystemExit):
- raise
+ try:
- except Exception, e:
- logger.exception("Exception decoding query")
- r_msg.append(rpki.left_right.report_error_elt.from_exception(e))
+ try:
+ h = self.handle_dispatch[type(q_pdu)]
+ except KeyError:
+ raise rpki.exceptions.BadQuery("Unexpected %r PDU" % q_pdu)
+ else:
+ h(self, q_pdu, r_msg)
- cb(200, body = rpki.left_right.cms_msg().wrap(r_msg, self.irdbd_key, self.irdbd_cert))
+ except Exception, e:
+ logger.exception("Exception serving PDU %r", q_pdu)
+ r_msg.append(rpki.left_right.report_error_elt.from_exception(e, q_pdu.self_handle, q_pdu.tag))
- except (rpki.async.ExitNow, SystemExit):
- raise
+ except Exception, e:
+ logger.exception("Exception decoding query")
+ r_msg.append(rpki.left_right.report_error_elt.from_exception(e))
- except Exception, e:
- logger.exception("Unhandled exception, returning HTTP failure")
- cb(500, reason = "Unhandled exception %s: %s" % (e.__class__.__name__, e))
+ request.send_cms_response(rpki.left_right.cms_msg_saxify().wrap(r_msg, self.irdbd_key, self.irdbd_cert))
+ except Exception, e:
+ logger.exception("Unhandled exception, returning HTTP failure")
+ request.send_error(500, "Unhandled exception %s: %s" % (e.__class__.__name__, e))
- def __init__(self):
- os.environ["TZ"] = "UTC"
- time.tzset()
+ def __init__(self):
- parser = argparse.ArgumentParser(description = __doc__)
- parser.add_argument("-c", "--config",
- help = "override default location of configuration file")
- parser.add_argument("-f", "--foreground", action = "store_true",
- help = "do not daemonize (ignored, old_irdbd never daemonizes)")
- rpki.log.argparse_setup(parser)
- args = parser.parse_args()
+ os.environ["TZ"] = "UTC"
+ time.tzset()
- rpki.log.init("irdbd", args)
+ self.cfg = rpki.config.argparser(section = "irdbd", doc = __doc__)
+ self.cfg.add_boolean_argument("--foreground", default = False,
+ help = "do not daemonize (ignored, old_irdbd never daemonizes)")
+ self.cfg.add_logging_arguments()
+ args = parser.parse_args()
- self.cfg = rpki.config.parser(args.config, "irdbd")
+ cfg.configure_logging(args = args, ident = "irdbd")
- startup_msg = self.cfg.get("startup-message", "")
- if startup_msg:
- logger.info(startup_msg)
+ startup_msg = self.cfg.get("startup-message", "")
+ if startup_msg:
+ logger.info(startup_msg)
- self.cfg.set_global_flags()
+ self.cfg.set_global_flags()
- self.db = MySQLdb.connect(user = self.cfg.get("sql-username"),
- db = self.cfg.get("sql-database"),
- passwd = self.cfg.get("sql-password"))
+ self.db = MySQLdb.connect(user = self.cfg.get("sql-username"),
+ db = self.cfg.get("sql-database"),
+ passwd = self.cfg.get("sql-password"))
- self.cur = self.db.cursor()
- self.db.autocommit(True)
+ self.cur = self.db.cursor()
+ self.db.autocommit(True)
- self.bpki_ta = rpki.x509.X509(Auto_update = self.cfg.get("bpki-ta"))
- self.rpkid_cert = rpki.x509.X509(Auto_update = self.cfg.get("rpkid-cert"))
- self.irdbd_cert = rpki.x509.X509(Auto_update = self.cfg.get("irdbd-cert"))
- self.irdbd_key = rpki.x509.RSA( Auto_update = self.cfg.get("irdbd-key"))
+ self.bpki_ta = rpki.x509.X509(Auto_update = self.cfg.get("bpki-ta"))
+ self.rpkid_cert = rpki.x509.X509(Auto_update = self.cfg.get("rpkid-cert"))
+ self.irdbd_cert = rpki.x509.X509(Auto_update = self.cfg.get("irdbd-cert"))
+ self.irdbd_key = rpki.x509.RSA( Auto_update = self.cfg.get("irdbd-key"))
- u = urlparse.urlparse(self.cfg.get("http-url"))
+ u = urlparse.urlparse(self.cfg.get("http-url"))
- assert u.scheme in ("", "http") and \
- u.username is None and \
- u.password is None and \
- u.params == "" and \
- u.query == "" and \
- u.fragment == ""
+ assert u.scheme in ("", "http") and \
+ u.username is None and \
+ u.password is None and \
+ u.params == "" and \
+ u.query == "" and \
+ u.fragment == ""
- rpki.http.server(host = u.hostname or "localhost",
- port = u.port or 443,
- handlers = ((u.path, self.handler),))
+ rpki.http_simple.server(host = u.hostname or "localhost",
+ port = u.port or 443,
+ handlers = ((u.path, self.handler),))
diff --git a/rpki/pubd.py b/rpki/pubd.py
index 79315a78..389936bb 100644
--- a/rpki/pubd.py
+++ b/rpki/pubd.py
@@ -23,151 +23,285 @@ RPKI publication engine.
import os
import re
+import uuid
import time
+import socket
import logging
import argparse
+
import rpki.resource_set
-import rpki.up_down
import rpki.x509
-import rpki.sql
-import rpki.http
import rpki.config
import rpki.exceptions
-import rpki.relaxng
import rpki.log
import rpki.publication
+import rpki.publication_control
import rpki.daemonize
+import rpki.http_simple
+
+from lxml.etree import Element, SubElement
logger = logging.getLogger(__name__)
+
class main(object):
- """
- Main program for pubd.
- """
+ """
+ Main program for pubd.
+ """
- def __init__(self):
+ def __init__(self):
- os.environ["TZ"] = "UTC"
- time.tzset()
+ os.environ.update(TZ = "UTC",
+ DJANGO_SETTINGS_MODULE = "rpki.django_settings.pubd")
+ time.tzset()
- self.irbe_cms_timestamp = None
+ self.irbe_cms_timestamp = None
- parser = argparse.ArgumentParser(description = __doc__)
- parser.add_argument("-c", "--config",
- help = "override default location of configuration file")
- parser.add_argument("-f", "--foreground", action = "store_true",
- help = "do not daemonize")
- parser.add_argument("--pidfile",
- help = "override default location of pid file")
- parser.add_argument("--profile",
- help = "enable profiling, saving data to PROFILE")
- rpki.log.argparse_setup(parser)
- args = parser.parse_args()
+ self.cfg = rpki.config.argparser(section = "pubd", doc = __doc__)
+ self.cfg.add_boolean_argument("--foreground",
+ default = False,
+ help = "whether to daemonize")
+ self.cfg.add_argument("--pidfile",
+ default = os.path.join(rpki.daemonize.default_pid_directory,
+ "pubd.pid"),
+ help = "override default location of pid file")
+ self.cfg.add_argument("--profile",
+ default = "",
+ help = "enable profiling, saving data to PROFILE")
+ self.cfg.add_logging_arguments()
+ args = self.cfg.argparser.parse_args()
- self.profile = args.profile
+ self.profile = args.profile
- rpki.log.init("pubd", args)
+ self.cfg.configure_logging(args = args, ident = "pubd")
- self.cfg = rpki.config.parser(args.config, "pubd")
- self.cfg.set_global_flags()
+ try:
+ self.cfg.set_global_flags()
- if not args.foreground:
- rpki.daemonize.daemon(pidfile = args.pidfile)
+ if not args.foreground:
+ rpki.daemonize.daemon(pidfile = args.pidfile)
- if self.profile:
- import cProfile
- prof = cProfile.Profile()
- try:
- prof.runcall(self.main)
- finally:
- prof.dump_stats(self.profile)
- logger.info("Dumped profile data to %s", self.profile)
- else:
- self.main()
+ if self.profile:
+ import cProfile
+ prof = cProfile.Profile()
+ try:
+ prof.runcall(self.main)
+ finally:
+ prof.dump_stats(self.profile)
+ logger.info("Dumped profile data to %s", self.profile)
+ else:
+ self.main()
- def main(self):
+ except:
+ logger.exception("Unandled exception in rpki.pubd.main()")
+ sys.exit(1)
- if self.profile:
- logger.info("Running in profile mode with output to %s", self.profile)
- self.sql = rpki.sql.session(self.cfg)
+ def main(self):
- self.bpki_ta = rpki.x509.X509(Auto_update = self.cfg.get("bpki-ta"))
- self.irbe_cert = rpki.x509.X509(Auto_update = self.cfg.get("irbe-cert"))
- self.pubd_cert = rpki.x509.X509(Auto_update = self.cfg.get("pubd-cert"))
- self.pubd_key = rpki.x509.RSA( Auto_update = self.cfg.get("pubd-key"))
+ if self.profile:
+ logger.info("Running in profile mode with output to %s", self.profile)
- self.http_server_host = self.cfg.get("server-host", "")
- self.http_server_port = self.cfg.getint("server-port")
+ import django
+ django.setup()
- self.publication_base = self.cfg.get("publication-base", "publication/")
+ global rpki # pylint: disable=W0602
+ import rpki.pubdb # pylint: disable=W0621
- self.publication_multimodule = self.cfg.getboolean("publication-multimodule", False)
+ self.bpki_ta = rpki.x509.X509(Auto_update = self.cfg.get("bpki-ta"))
+ self.irbe_cert = rpki.x509.X509(Auto_update = self.cfg.get("irbe-cert"))
+ self.pubd_cert = rpki.x509.X509(Auto_update = self.cfg.get("pubd-cert"))
+ self.pubd_key = rpki.x509.RSA( Auto_update = self.cfg.get("pubd-key"))
+ self.pubd_crl = rpki.x509.CRL( Auto_update = self.cfg.get("pubd-crl"))
- rpki.http.server(
- host = self.http_server_host,
- port = self.http_server_port,
- handlers = (("/control", self.control_handler),
- ("/client/", self.client_handler)))
+ self.http_server_host = self.cfg.get("server-host", "")
+ self.http_server_port = self.cfg.getint("server-port")
- def handler_common(self, query, client, cb, certs, crl = None):
- """
- Common PDU handler code.
- """
+ self.publication_base = self.cfg.get("publication-base", "publication/")
- def done(r_msg):
- reply = rpki.publication.cms_msg().wrap(r_msg, self.pubd_key, self.pubd_cert, crl)
- self.sql.sweep()
- cb(reply)
+ self.rrdp_base_uri = self.cfg.get("rrdp-base-uri", "https://%s/rrdp/" % socket.getfqdn())
+ self.rrdp_expiration_interval = rpki.sundial.timedelta.parse(self.cfg.get("rrdp-expiration-interval", "6h"))
+ self.rrdp_publication_base = self.cfg.get("rrdp-publication-base", "rrdp-publication/")
- q_cms = rpki.publication.cms_msg(DER = query)
- q_msg = q_cms.unwrap(certs)
- if client is None:
- self.irbe_cms_timestamp = q_cms.check_replay(self.irbe_cms_timestamp, "control")
- else:
- q_cms.check_replay_sql(client, client.client_handle)
- q_msg.serve_top_level(self, client, done)
+ try:
+ self.session = rpki.pubdb.models.Session.objects.get()
+ except rpki.pubdb.models.Session.DoesNotExist:
+ self.session = rpki.pubdb.models.Session.objects.create(uuid = str(uuid.uuid4()), serial = 0)
- def control_handler(self, query, path, cb):
- """
- Process one PDU from the IRBE.
- """
+ rpki.http_simple.server(
+ host = self.http_server_host,
+ port = self.http_server_port,
+ handlers = (("/control", self.control_handler),
+ ("/client/", self.client_handler)))
- def done(body):
- cb(200, body = body)
- try:
- self.handler_common(query, None, done, (self.bpki_ta, self.irbe_cert))
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- logger.exception("Unhandled exception processing control query, path %r", path)
- cb(500, reason = "Unhandled exception %s: %s" % (e.__class__.__name__, e))
+ def control_handler(self, request, q_der):
+ """
+ Process one PDU from the IRBE.
+ """
- client_url_regexp = re.compile("/client/([-A-Z0-9_/]+)$", re.I)
+ from django.db import transaction, connection
- def client_handler(self, query, path, cb):
- """
- Process one PDU from a client.
- """
+ try:
+ connection.cursor() # Reconnect to mysqld if necessary
+ q_cms = rpki.publication_control.cms_msg(DER = q_der)
+ q_msg = q_cms.unwrap((self.bpki_ta, self.irbe_cert))
+ self.irbe_cms_timestamp = q_cms.check_replay(self.irbe_cms_timestamp, "control")
+ if q_msg.get("type") != "query":
+ raise rpki.exceptions.BadQuery("Message type is %s, expected query" % q_msg.get("type"))
+ r_msg = Element(rpki.publication_control.tag_msg, nsmap = rpki.publication_control.nsmap,
+ type = "reply", version = rpki.publication_control.version)
+
+ try:
+ q_pdu = None
+ with transaction.atomic():
+
+ for q_pdu in q_msg:
+ if q_pdu.tag != rpki.publication_control.tag_client:
+ raise rpki.exceptions.BadQuery("PDU is %s, expected client" % q_pdu.tag)
+ client_handle = q_pdu.get("client_handle")
+ action = q_pdu.get("action")
+ if client_handle is None:
+ logger.info("Control %s request", action)
+ else:
+ logger.info("Control %s request for %s", action, client_handle)
+
+ if action in ("get", "list"):
+ if action == "get":
+ clients = rpki.pubdb.models.Client.objects.get(client_handle = client_handle),
+ else:
+ clients = rpki.pubdb.models.Client.objects.all()
+ for client in clients:
+ r_pdu = SubElement(r_msg, q_pdu.tag, action = action,
+ client_handle = client.client_handle, base_uri = client.base_uri)
+ if q_pdu.get("tag"):
+ r_pdu.set("tag", q_pdu.get("tag"))
+ SubElement(r_pdu, rpki.publication_control.tag_bpki_cert).text = client.bpki_cert.get_Base64()
+ if client.bpki_glue is not None:
+ SubElement(r_pdu, rpki.publication_control.tag_bpki_glue).text = client.bpki_glue.get_Base64()
+
+ if action in ("create", "set"):
+ if action == "create":
+ client = rpki.pubdb.models.Client(client_handle = client_handle)
+ else:
+ client = rpki.pubdb.models.Client.objects.get(client_handle = client_handle)
+ if q_pdu.get("base_uri"):
+ client.base_uri = q_pdu.get("base_uri")
+ bpki_cert = q_pdu.find(rpki.publication_control.tag_bpki_cert)
+ if bpki_cert is not None:
+ client.bpki_cert = rpki.x509.X509(Base64 = bpki_cert.text)
+ bpki_glue = q_pdu.find(rpki.publication_control.tag_bpki_glue)
+ if bpki_glue is not None:
+ client.bpki_glue = rpki.x509.X509(Base64 = bpki_glue.text)
+ if q_pdu.get("clear_replay_protection") == "yes":
+ client.last_cms_timestamp = None
+ client.save()
+ logger.debug("Stored client_handle %s, base_uri %s, bpki_cert %r, bpki_glue %r, last_cms_timestamp %s",
+ client.client_handle, client.base_uri, client.bpki_cert, client.bpki_glue,
+ client.last_cms_timestamp)
+ r_pdu = SubElement(r_msg, q_pdu.tag, action = action, client_handle = client_handle)
+ if q_pdu.get("tag"):
+ r_pdu.set("tag", q_pdu.get("tag"))
+
+ if action == "destroy":
+ rpki.pubdb.models.Client.objects.filter(client_handle = client_handle).delete()
+ r_pdu = SubElement(r_msg, q_pdu.tag, action = action, client_handle = client_handle)
+ if q_pdu.get("tag"):
+ r_pdu.set("tag", q_pdu.get("tag"))
+
+ except Exception as e:
+ logger.exception("Exception processing PDU %r action = %s client_handle = %s", q_pdu, q_pdu.get("action"), q_pdu.get("client_handle"))
+ r_pdu = SubElement(r_msg, rpki.publication_control.tag_report_error, error_code = e.__class__.__name__)
+ r_pdu.text = str(e)
+ if q_pdu.get("tag") is not None:
+ r_pdu.set("tag", q_pdu.get("tag"))
+
+ request.send_cms_response(rpki.publication_control.cms_msg().wrap(r_msg, self.pubd_key, self.pubd_cert))
+
+ except Exception as e:
+ logger.exception("Unhandled exception processing control query, path %r", request.path)
+ request.send_error(500, "Unhandled exception %s: %s" % (e.__class__.__name__, e))
+
+
+ client_url_regexp = re.compile("/client/([-A-Z0-9_/]+)$", re.I)
+
+ def client_handler(self, request, q_der):
+ """
+ Process one PDU from a client.
+ """
+
+ from django.db import transaction, connection
+
+ try:
+ connection.cursor() # Reconnect to mysqld if necessary
+ match = self.client_url_regexp.search(request.path)
+ if match is None:
+ raise rpki.exceptions.BadContactURL("Bad path: %s" % request.path)
+ client = rpki.pubdb.models.Client.objects.get(client_handle = match.group(1))
+ q_cms = rpki.publication.cms_msg(DER = q_der)
+ q_msg = q_cms.unwrap((self.bpki_ta, client.bpki_cert, client.bpki_glue))
+ client.last_cms_timestamp = q_cms.check_replay(client.last_cms_timestamp, client.client_handle)
+ client.save()
+ if q_msg.get("type") != "query":
+ raise rpki.exceptions.BadQuery("Message type is %s, expected query" % q_msg.get("type"))
+ r_msg = Element(rpki.publication.tag_msg, nsmap = rpki.publication.nsmap,
+ type = "reply", version = rpki.publication.version)
+ delta = None
+ try:
+ with transaction.atomic():
+ for q_pdu in q_msg:
+ if q_pdu.get("uri"):
+ logger.info("Client %s request for %s", q_pdu.tag, q_pdu.get("uri"))
+ else:
+ logger.info("Client %s request", q_pdu.tag)
+
+ if q_pdu.tag == rpki.publication.tag_list:
+ for obj in client.publishedobject_set.all():
+ r_pdu = SubElement(r_msg, q_pdu.tag, uri = obj.uri, hash = obj.hash)
+ if q_pdu.get("tag") is not None:
+ r_pdu.set("tag", q_pdu.get("tag"))
+
+ else:
+ assert q_pdu.tag in (rpki.publication.tag_publish, rpki.publication.tag_withdraw)
+ if delta is None:
+ delta = self.session.new_delta(rpki.sundial.now() + self.rrdp_expiration_interval)
+ client.check_allowed_uri(q_pdu.get("uri"))
+ if q_pdu.tag == rpki.publication.tag_publish:
+ der = q_pdu.text.decode("base64")
+ logger.info("Publishing %s", rpki.x509.uri_dispatch(q_pdu.get("uri"))(DER = der).tracking_data(q_pdu.get("uri")))
+ delta.publish(client, der, q_pdu.get("uri"), q_pdu.get("hash"))
+ else:
+ logger.info("Withdrawing %s", q_pdu.get("uri"))
+ delta.withdraw(client, q_pdu.get("uri"), q_pdu.get("hash"))
+ r_pdu = SubElement(r_msg, q_pdu.tag, uri = q_pdu.get("uri"))
+ if q_pdu.get("tag") is not None:
+ r_pdu.set("tag", q_pdu.get("tag"))
+
+ if delta is not None:
+ delta.activate(self.rrdp_publication_base)
+ self.session.expire_deltas()
+
+ except Exception as e:
+ if isinstance(e, (rpki.exceptions.ExistingObjectAtURI,
+ rpki.exceptions.DifferentObjectAtURI,
+ rpki.exceptions.NoObjectAtURI)):
+ logger.warn("Database synchronization error processing PDU %r hash %s uri %s: %s",
+ q_pdu, q_pdu.get("hash"), q_pdu.get("uri"), e)
+ else:
+ logger.exception("Exception processing PDU %r hash = %s uri = %s",
+ q_pdu, q_pdu.get("hash"), q_pdu.get("uri"))
+ r_pdu = SubElement(r_msg, rpki.publication.tag_report_error, error_code = e.__class__.__name__)
+ r_pdu.text = str(e)
+ if q_pdu.get("tag") is not None:
+ r_pdu.set("tag", q_pdu.get("tag"))
+
+ else:
+ if delta is not None:
+ self.session.synchronize_rrdp_files(self.rrdp_publication_base, self.rrdp_base_uri)
+ delta.update_rsync_files(self.publication_base)
+
+ request.send_cms_response(rpki.publication.cms_msg().wrap(r_msg, self.pubd_key, self.pubd_cert, self.pubd_crl))
- def done(body):
- cb(200, body = body)
-
- try:
- match = self.client_url_regexp.search(path)
- if match is None:
- raise rpki.exceptions.BadContactURL("Bad path: %s" % path)
- client_handle = match.group(1)
- client = rpki.publication.client_elt.sql_fetch_where1(self, "client_handle = %s", (client_handle,))
- if client is None:
- raise rpki.exceptions.ClientNotFound("Could not find client %s" % client_handle)
- config = rpki.publication.config_elt.fetch(self)
- if config is None or config.bpki_crl is None:
- raise rpki.exceptions.CMSCRLNotSet
- self.handler_common(query, client, done, (self.bpki_ta, client.bpki_cert, client.bpki_glue), config.bpki_crl)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- logger.exception("Unhandled exception processing client query, path %r", path)
- cb(500, reason = "Could not process PDU: %s" % e)
+ except Exception as e:
+ logger.exception("Unhandled exception processing client query, path %r", request.path)
+ request.send_error(500, "Could not process PDU: %s" % e)
diff --git a/rpki/pubdb/__init__.py b/rpki/pubdb/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rpki/pubdb/__init__.py
diff --git a/rpki/pubdb/migrations/0001_initial.py b/rpki/pubdb/migrations/0001_initial.py
new file mode 100644
index 00000000..e278d7dd
--- /dev/null
+++ b/rpki/pubdb/migrations/0001_initial.py
@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+import rpki.fields
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='Client',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('client_handle', models.CharField(unique=True, max_length=255)),
+ ('base_uri', models.TextField()),
+ ('bpki_cert', rpki.fields.CertificateField()),
+ ('bpki_glue', rpki.fields.CertificateField(null=True)),
+ ('last_cms_timestamp', rpki.fields.SundialField(null=True, blank=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Delta',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('serial', models.BigIntegerField()),
+ ('xml', models.TextField()),
+ ('hash', models.CharField(max_length=64)),
+ ('expires', rpki.fields.SundialField()),
+ ],
+ ),
+ migrations.CreateModel(
+ name='PublishedObject',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('uri', models.CharField(max_length=255)),
+ ('der', models.BinaryField()),
+ ('hash', models.CharField(max_length=64)),
+ ('client', models.ForeignKey(to='pubdb.Client')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Session',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('uuid', models.CharField(unique=True, max_length=36)),
+ ('serial', models.BigIntegerField()),
+ ('snapshot', models.TextField(blank=True)),
+ ('hash', models.CharField(max_length=64, blank=True)),
+ ],
+ ),
+ migrations.AddField(
+ model_name='publishedobject',
+ name='session',
+ field=models.ForeignKey(to='pubdb.Session'),
+ ),
+ migrations.AddField(
+ model_name='delta',
+ name='session',
+ field=models.ForeignKey(to='pubdb.Session'),
+ ),
+ migrations.AlterUniqueTogether(
+ name='publishedobject',
+ unique_together=set([('session', 'hash'), ('session', 'uri')]),
+ ),
+ ]
diff --git a/rpki/pubdb/migrations/0002_auto_20160221_0617.py b/rpki/pubdb/migrations/0002_auto_20160221_0617.py
new file mode 100644
index 00000000..a83ad3d3
--- /dev/null
+++ b/rpki/pubdb/migrations/0002_auto_20160221_0617.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('pubdb', '0001_initial'),
+ ]
+
+ operations = [
+ migrations.RemoveField(
+ model_name='session',
+ name='hash',
+ ),
+ migrations.RemoveField(
+ model_name='session',
+ name='snapshot',
+ ),
+ ]
diff --git a/rpki/pubdb/migrations/0003_remove_delta_xml.py b/rpki/pubdb/migrations/0003_remove_delta_xml.py
new file mode 100644
index 00000000..e2c0ce16
--- /dev/null
+++ b/rpki/pubdb/migrations/0003_remove_delta_xml.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('pubdb', '0002_auto_20160221_0617'),
+ ]
+
+ operations = [
+ migrations.RemoveField(
+ model_name='delta',
+ name='xml',
+ ),
+ ]
diff --git a/rpki/pubdb/migrations/__init__.py b/rpki/pubdb/migrations/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rpki/pubdb/migrations/__init__.py
diff --git a/rpki/pubdb/models.py b/rpki/pubdb/models.py
new file mode 100644
index 00000000..21508bed
--- /dev/null
+++ b/rpki/pubdb/models.py
@@ -0,0 +1,329 @@
+# $Id$
+#
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Django ORM models for pubd.
+"""
+
+from __future__ import unicode_literals
+from django.db import models
+from rpki.fields import CertificateField, SundialField
+from lxml.etree import Element, SubElement, ElementTree, xmlfile as XMLFile
+
+import os
+import logging
+import rpki.exceptions
+import rpki.relaxng
+import rpki.x509
+import rpki.POW
+
+logger = logging.getLogger(__name__)
+
+
+# pylint: disable=W5101
+
+# Some of this probably ought to move into a rpki.rrdp module.
+
+rrdp_xmlns = rpki.relaxng.rrdp.xmlns
+rrdp_nsmap = rpki.relaxng.rrdp.nsmap
+rrdp_version = "1"
+
+rrdp_tag_delta = rrdp_xmlns + "delta"
+rrdp_tag_notification = rrdp_xmlns + "notification"
+rrdp_tag_publish = rrdp_xmlns + "publish"
+rrdp_tag_snapshot = rrdp_xmlns + "snapshot"
+rrdp_tag_withdraw = rrdp_xmlns + "withdraw"
+
+
+# This would probably be useful to more than just this module, not
+# sure quite where to put it at the moment.
+
+def DERSubElement(elt, name, der, attrib = None, **kwargs):
+ """
+ Convenience wrapper around SubElement for use with Base64 text.
+ """
+
+ se = SubElement(elt, name, attrib, **kwargs)
+ se.text = rpki.x509.base64_with_linebreaks(der)
+ se.tail = "\n"
+ return se
+
+
+def sha256_file(f):
+ """
+ Read data from a file-like object, return hex-encoded sha256 hash.
+ """
+
+ h = rpki.POW.Digest(rpki.POW.SHA256_DIGEST)
+ while True:
+ x = f.read(8192)
+ if len(x) == 0:
+ return h.digest().encode("hex")
+ h.update(x)
+
+
+class Client(models.Model):
+ client_handle = models.CharField(unique = True, max_length = 255)
+ base_uri = models.TextField()
+ bpki_cert = CertificateField()
+ bpki_glue = CertificateField(null = True)
+ last_cms_timestamp = SundialField(blank = True, null = True)
+
+
+ def check_allowed_uri(self, uri):
+ """
+ Make sure that a target URI is within this client's allowed URI space.
+ """
+
+ if not uri.startswith(self.base_uri):
+ raise rpki.exceptions.ForbiddenURI
+
+
+class Session(models.Model):
+ uuid = models.CharField(unique = True, max_length=36)
+ serial = models.BigIntegerField()
+
+
+ def new_delta(self, expires):
+ """
+ Construct a new delta associated with this session.
+ """
+
+ # pylint: disable=W0201
+
+ delta = Delta(session = self,
+ serial = self.serial + 1,
+ expires = expires)
+ delta.xml = Element(rrdp_tag_delta,
+ nsmap = rrdp_nsmap,
+ version = rrdp_version,
+ session_id = self.uuid,
+ serial = str(delta.serial))
+ return delta
+
+
+ def expire_deltas(self):
+ """
+ Delete deltas whose expiration date has passed.
+ """
+
+ self.delta_set.filter(expires__lt = rpki.sundial.now()).delete()
+
+
+ @property
+ def snapshot_fn(self):
+ return "%s/snapshot/%s.xml" % (self.uuid, self.serial)
+
+
+ @property
+ def notification_fn(self):
+ return "notify.xml"
+
+
+ @staticmethod
+ def _rrdp_filename_to_uri(fn, rrdp_base_uri):
+ return "%s/%s" % (rrdp_base_uri.rstrip("/"), fn)
+
+
+ def write_snapshot_file(self, rrdp_publication_base):
+ fn = os.path.join(rrdp_publication_base, self.snapshot_fn)
+ tn = fn + ".%s.tmp" % os.getpid()
+ dn = os.path.dirname(fn)
+ if not os.path.isdir(dn):
+ os.makedirs(dn)
+ with open(tn, "wb+") as f:
+ with XMLFile(f) as xf:
+ with xf.element(rrdp_tag_snapshot, nsmap = rrdp_nsmap,
+ version = rrdp_version, session_id = self.uuid, serial = str(self.serial)):
+ xf.write("\n")
+ for obj in self.publishedobject_set.all():
+ e = Element(rrdp_tag_publish, nsmap = rrdp_nsmap, uri = obj.uri)
+ e.text = rpki.x509.base64_with_linebreaks(obj.der)
+ xf.write(e, pretty_print = True)
+ f.seek(0)
+ h = sha256_file(f)
+ os.rename(tn, fn)
+ return h
+
+
+ def write_notification_xml(self, rrdp_base_uri, snapshot_hash, rrdp_publication_base):
+ xml = Element(rrdp_tag_notification, nsmap = rrdp_nsmap,
+ version = rrdp_version,
+ session_id = self.uuid,
+ serial = str(self.serial))
+ SubElement(xml, rrdp_tag_snapshot,
+ uri = self._rrdp_filename_to_uri(self.snapshot_fn, rrdp_base_uri),
+ hash = snapshot_hash)
+ for delta in self.delta_set.all():
+ SubElement(xml, rrdp_tag_delta,
+ uri = self._rrdp_filename_to_uri(delta.fn, rrdp_base_uri),
+ hash = delta.hash,
+ serial = str(delta.serial))
+ rpki.relaxng.rrdp.assertValid(xml)
+ fn = os.path.join(rrdp_publication_base, self.notification_fn)
+ tn = fn + ".%s.tmp" % os.getpid()
+ ElementTree(xml).write(file = tn, pretty_print = True)
+ os.rename(tn, fn)
+
+
+ def synchronize_rrdp_files(self, rrdp_publication_base, rrdp_base_uri):
+ """
+ Write current RRDP files to disk, clean up old files and directories.
+ """
+
+ if os.path.isdir(rrdp_publication_base):
+ current_filenames = set(fn for fn in os.listdir(rrdp_publication_base)
+ if fn.endswith(".cer") or fn.endswith(".tal"))
+ else:
+ current_filenames = set()
+
+ snapshot_hash = self.write_snapshot_file(rrdp_publication_base)
+ current_filenames.add(self.snapshot_fn)
+
+ for delta in self.delta_set.all():
+ current_filenames.add(delta.fn)
+
+ self.write_notification_xml(rrdp_base_uri, snapshot_hash, rrdp_publication_base),
+ current_filenames.add(self.notification_fn)
+
+ for root, dirs, files in os.walk(rrdp_publication_base, topdown = False):
+ for fn in files:
+ fn = os.path.join(root, fn)
+ if fn[len(rrdp_publication_base):].lstrip("/") not in current_filenames:
+ os.remove(fn)
+ for dn in dirs:
+ try:
+ os.rmdir(os.path.join(root, dn))
+ except OSError:
+ pass
+
+
+class Delta(models.Model):
+ serial = models.BigIntegerField()
+ hash = models.CharField(max_length = 64)
+ expires = SundialField()
+ session = models.ForeignKey(Session)
+
+
+ @staticmethod
+ def _uri_to_filename(uri, publication_base):
+ if not uri.startswith("rsync://"):
+ raise rpki.exceptions.BadURISyntax(uri)
+ path = uri.split("/")[4:]
+ path.insert(0, publication_base.rstrip("/"))
+ filename = "/".join(path)
+ if "/../" in filename or filename.endswith("/.."):
+ raise rpki.exceptions.BadURISyntax(filename)
+ return filename
+
+
+ @property
+ def fn(self):
+ return "%s/deltas/%s.xml" % (self.session.uuid, self.serial)
+
+
+ def activate(self, rrdp_publication_base):
+ rpki.relaxng.rrdp.assertValid(self.xml)
+ fn = os.path.join(rrdp_publication_base, self.fn)
+ tn = fn + ".%s.tmp" % os.getpid()
+ dn = os.path.dirname(fn)
+ if not os.path.isdir(dn):
+ os.makedirs(dn)
+ with open(tn, "wb+") as f:
+ ElementTree(self.xml).write(file = f, pretty_print = True)
+ f.flush()
+ f.seek(0)
+ self.hash = sha256_file(f)
+ os.rename(tn, fn)
+ self.save()
+ self.session.serial += 1
+ self.session.save()
+
+
+ def publish(self, client, der, uri, obj_hash):
+ try:
+ obj = client.publishedobject_set.get(session = self.session, uri = uri)
+ if obj.hash == obj_hash:
+ obj.delete()
+ elif obj_hash is None:
+ raise rpki.exceptions.ExistingObjectAtURI("Object already published at %s" % uri)
+ else:
+ raise rpki.exceptions.DifferentObjectAtURI("Found different object at %s (old %s, new %s)" % (uri, obj.hash, obj_hash))
+ except rpki.pubdb.models.PublishedObject.DoesNotExist:
+ pass
+ logger.debug("Publishing %s", uri)
+ PublishedObject.objects.create(session = self.session, client = client, der = der, uri = uri,
+ hash = rpki.x509.sha256(der).encode("hex"))
+ se = DERSubElement(self.xml, rrdp_tag_publish, der = der, uri = uri)
+ if obj_hash is not None:
+ se.set("hash", obj_hash)
+ rpki.relaxng.rrdp.assertValid(self.xml)
+
+
+ def withdraw(self, client, uri, obj_hash):
+ try:
+ obj = client.publishedobject_set.get(session = self.session, uri = uri)
+ except rpki.pubdb.models.PublishedObject.DoesNotExist:
+ raise rpki.exceptions.NoObjectAtURI("No published object found at %s" % uri)
+ if obj.hash != obj_hash:
+ raise rpki.exceptions.DifferentObjectAtURI("Found different object at %s (old %s, new %s)" % (uri, obj.hash, obj_hash))
+ logger.debug("Withdrawing %s", uri)
+ obj.delete()
+ SubElement(self.xml, rrdp_tag_withdraw, uri = uri, hash = obj_hash).tail = "\n"
+ rpki.relaxng.rrdp.assertValid(self.xml)
+
+
+ def update_rsync_files(self, publication_base):
+ from errno import ENOENT
+ min_path_len = len(publication_base.rstrip("/"))
+ for pdu in self.xml:
+ assert pdu.tag in (rrdp_tag_publish, rrdp_tag_withdraw)
+ fn = self._uri_to_filename(pdu.get("uri"), publication_base)
+ if pdu.tag == rrdp_tag_publish:
+ tn = fn + ".tmp"
+ dn = os.path.dirname(fn)
+ if not os.path.isdir(dn):
+ os.makedirs(dn)
+ with open(tn, "wb") as f:
+ f.write(pdu.text.decode("base64"))
+ os.rename(tn, fn)
+ else:
+ try:
+ os.remove(fn)
+ except OSError, e:
+ if e.errno != ENOENT:
+ raise
+ dn = os.path.dirname(fn)
+ while len(dn) > min_path_len:
+ try:
+ os.rmdir(dn)
+ except OSError:
+ break
+ else:
+ dn = os.path.dirname(dn)
+ del self.xml
+
+
+class PublishedObject(models.Model):
+ uri = models.CharField(max_length = 255)
+ der = models.BinaryField()
+ hash = models.CharField(max_length = 64)
+ client = models.ForeignKey(Client)
+ session = models.ForeignKey(Session)
+
+ class Meta:
+ unique_together = (("session", "hash"),
+ ("session", "uri"))
diff --git a/rpki/publication.py b/rpki/publication.py
index 5fc7f3dd..393e078e 100644
--- a/rpki/publication.py
+++ b/rpki/publication.py
@@ -1,470 +1,84 @@
# $Id$
#
-# Copyright (C) 2009--2012 Internet Systems Consortium ("ISC")
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-#
+# Copyright (C) 2013--2014 Dragon Research Labs ("DRL")
+# Portions copyright (C) 2009--2012 Internet Systems Consortium ("ISC")
# Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
+# copyright notices and this permission notice appear in all copies.
#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL, ISC, AND ARIN DISCLAIM ALL
+# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL,
+# ISC, OR ARIN BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
+# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
-RPKI "publication" protocol.
+RPKI publication protocol.
"""
-import os
-import errno
import logging
-import rpki.resource_set
+
import rpki.x509
-import rpki.sql
import rpki.exceptions
-import rpki.xml_utils
-import rpki.http
-import rpki.up_down
import rpki.relaxng
-import rpki.sundial
-import rpki.log
logger = logging.getLogger(__name__)
-class publication_namespace(object):
- """
- XML namespace parameters for publication protocol.
- """
-
- xmlns = rpki.relaxng.publication.xmlns
- nsmap = rpki.relaxng.publication.nsmap
-
-class control_elt(rpki.xml_utils.data_elt, rpki.sql.sql_persistent, publication_namespace):
- """
- Virtual class for control channel objects.
- """
-
- def serve_dispatch(self, r_msg, cb, eb):
- """
- Action dispatch handler. This needs special handling because we
- need to make sure that this PDU arrived via the control channel.
- """
- if self.client is not None:
- raise rpki.exceptions.BadQuery("Control query received on client channel")
- rpki.xml_utils.data_elt.serve_dispatch(self, r_msg, cb, eb)
-
-class config_elt(control_elt):
- """
- <config/> element. This is a little weird because there should
- never be more than one row in the SQL config table, but we have to
- put the BPKI CRL somewhere and SQL is the least bad place available.
-
- So we reuse a lot of the SQL machinery, but we nail config_id at 1,
- we don't expose it in the XML protocol, and we only support the get
- and set actions.
- """
-
- attributes = ("action", "tag")
- element_name = "config"
- elements = ("bpki_crl",)
-
- sql_template = rpki.sql.template(
- "config",
- "config_id",
- ("bpki_crl", rpki.x509.CRL))
-
- wired_in_config_id = 1
-
- def startElement(self, stack, name, attrs):
- """
- StartElement() handler for config object. This requires special
- handling because of the weird way we treat config_id.
- """
- control_elt.startElement(self, stack, name, attrs)
- self.config_id = self.wired_in_config_id
-
- @classmethod
- def fetch(cls, gctx):
- """
- Fetch the config object from SQL. This requires special handling
- because of the weird way we treat config_id.
- """
- return cls.sql_fetch(gctx, cls.wired_in_config_id)
-
- def serve_set(self, r_msg, cb, eb):
- """
- Handle a set action. This requires special handling because
- config doesn't support the create method.
- """
- if self.sql_fetch(self.gctx, self.config_id) is None:
- control_elt.serve_create(self, r_msg, cb, eb)
- else:
- control_elt.serve_set(self, r_msg, cb, eb)
-
- def serve_fetch_one_maybe(self):
- """
- Find the config object on which a get or set method should
- operate.
- """
- return self.sql_fetch(self.gctx, self.config_id)
-
-class client_elt(control_elt):
- """
- <client/> element.
- """
-
- element_name = "client"
- attributes = ("action", "tag", "client_handle", "base_uri")
- elements = ("bpki_cert", "bpki_glue")
- booleans = ("clear_replay_protection",)
-
- sql_template = rpki.sql.template(
- "client",
- "client_id",
- "client_handle",
- "base_uri",
- ("bpki_cert", rpki.x509.X509),
- ("bpki_glue", rpki.x509.X509),
- ("last_cms_timestamp", rpki.sundial.datetime))
-
- base_uri = None
- bpki_cert = None
- bpki_glue = None
- last_cms_timestamp = None
-
- def serve_post_save_hook(self, q_pdu, r_pdu, cb, eb):
- """
- Extra server actions for client_elt.
- """
- actions = []
- if q_pdu.clear_replay_protection:
- actions.append(self.serve_clear_replay_protection)
- def loop(iterator, action):
- action(iterator, eb)
- rpki.async.iterator(actions, loop, cb)
-
- def serve_clear_replay_protection(self, cb, eb):
- """
- Handle a clear_replay_protection action for this client.
- """
- self.last_cms_timestamp = None
- self.sql_mark_dirty()
- cb()
-
- def serve_fetch_one_maybe(self):
- """
- Find the client object on which a get, set, or destroy method
- should operate, or which would conflict with a create method.
- """
- return self.sql_fetch_where1(self.gctx, "client_handle = %s", (self.client_handle,))
-
- def serve_fetch_all(self):
- """
- Find client objects on which a list method should operate.
- """
- return self.sql_fetch_all(self.gctx)
-
- def check_allowed_uri(self, uri):
- """
- Make sure that a target URI is within this client's allowed URI space.
- """
- if not uri.startswith(self.base_uri):
- raise rpki.exceptions.ForbiddenURI
-
-class publication_object_elt(rpki.xml_utils.base_elt, publication_namespace):
- """
- Virtual class for publishable objects. These have very similar
- syntax, differences lie in underlying datatype and methods. XML
- methods are a little different from the pattern used for objects
- that support the create/set/get/list/destroy actions, but
- publishable objects don't go in SQL either so these classes would be
- different in any case.
- """
-
- attributes = ("action", "tag", "client_handle", "uri")
- payload_type = None
- payload = None
+nsmap = rpki.relaxng.publication.nsmap
+version = rpki.relaxng.publication.version
- def endElement(self, stack, name, text):
- """
- Handle a publishable element element.
- """
- assert name == self.element_name, "Unexpected name %s, stack %s" % (name, stack)
- if text:
- self.payload = self.payload_type(Base64 = text) # pylint: disable=E1102
- stack.pop()
+tag_msg = rpki.relaxng.publication.xmlns + "msg"
+tag_list = rpki.relaxng.publication.xmlns + "list"
+tag_publish = rpki.relaxng.publication.xmlns + "publish"
+tag_withdraw = rpki.relaxng.publication.xmlns + "withdraw"
+tag_report_error = rpki.relaxng.publication.xmlns + "report_error"
- def toXML(self):
- """
- Generate XML element for publishable object.
- """
- elt = self.make_elt()
- if self.payload:
- elt.text = self.payload.get_Base64()
- return elt
- def serve_dispatch(self, r_msg, cb, eb):
- """
- Action dispatch handler.
- """
- # pylint: disable=E0203
- try:
- if self.client is None:
- raise rpki.exceptions.BadQuery("Client query received on control channel")
- dispatch = { "publish" : self.serve_publish,
- "withdraw" : self.serve_withdraw }
- if self.action not in dispatch:
- raise rpki.exceptions.BadQuery("Unexpected query: action %s" % self.action)
- self.client.check_allowed_uri(self.uri)
- dispatch[self.action]()
- r_pdu = self.__class__()
- r_pdu.action = self.action
- r_pdu.tag = self.tag
- r_pdu.uri = self.uri
- r_msg.append(r_pdu)
- cb()
- except rpki.exceptions.NoObjectAtURI, e:
- # This can happen when we're cleaning up from a prior mess, so
- # we generate a <report_error/> PDU then carry on.
- r_msg.append(report_error_elt.from_exception(e, self.tag))
- cb()
+## @var content_type
+# Content type to use when sending left-right queries
+content_type = "application/x-rpki"
- def serve_publish(self):
- """
- Publish an object.
- """
- logger.info("Publishing %s", self.payload.tracking_data(self.uri))
- filename = self.uri_to_filename()
- filename_tmp = filename + ".tmp"
- dirname = os.path.dirname(filename)
- if not os.path.isdir(dirname):
- os.makedirs(dirname)
- f = open(filename_tmp, "wb")
- f.write(self.payload.get_DER())
- f.close()
- os.rename(filename_tmp, filename)
+## @var allowed_content_types
+# Content types we consider acceptable for incoming left-right
+# queries.
- def serve_withdraw(self):
- """
- Withdraw an object, then recursively delete empty directories.
- """
- logger.info("Withdrawing %s", self.uri)
- filename = self.uri_to_filename()
- try:
- os.remove(filename)
- except OSError, e:
- if e.errno == errno.ENOENT:
- raise rpki.exceptions.NoObjectAtURI("No object published at %s" % self.uri)
- else:
- raise
- min_path_len = len(self.gctx.publication_base.rstrip("/"))
- dirname = os.path.dirname(filename)
- while len(dirname) > min_path_len:
- try:
- os.rmdir(dirname)
- except OSError:
- break
- else:
- dirname = os.path.dirname(dirname)
+allowed_content_types = (content_type,)
- def uri_to_filename(self):
- """
- Convert a URI to a local filename.
- """
- if not self.uri.startswith("rsync://"):
- raise rpki.exceptions.BadURISyntax(self.uri)
- path = self.uri.split("/")[3:]
- if not self.gctx.publication_multimodule:
- del path[0]
- path.insert(0, self.gctx.publication_base.rstrip("/"))
- filename = "/".join(path)
- if "/../" in filename or filename.endswith("/.."):
- raise rpki.exceptions.BadURISyntax(filename)
- return filename
- @classmethod
- def make_publish(cls, uri, obj, tag = None):
- """
- Construct a publication PDU.
+def raise_if_error(pdu):
"""
- assert cls.payload_type is not None and type(obj) is cls.payload_type
- return cls.make_pdu(action = "publish", uri = uri, payload = obj, tag = tag)
+ Raise an appropriate error if this is a <report_error/> PDU.
- @classmethod
- def make_withdraw(cls, uri, obj, tag = None):
+ As a convenience, this will also accept a <msg/> PDU and raise an
+ appropriate error if it contains any <report_error/> PDUs or if
+ the <msg/> is not a reply.
"""
- Construct a withdrawal PDU.
- """
- assert cls.payload_type is not None and type(obj) is cls.payload_type
- return cls.make_pdu(action = "withdraw", uri = uri, tag = tag)
-
- def raise_if_error(self):
- """
- No-op, since this is not a <report_error/> PDU.
- """
- pass
-
-class certificate_elt(publication_object_elt):
- """
- <certificate/> element.
- """
-
- element_name = "certificate"
- payload_type = rpki.x509.X509
-
-class crl_elt(publication_object_elt):
- """
- <crl/> element.
- """
-
- element_name = "crl"
- payload_type = rpki.x509.CRL
-
-class manifest_elt(publication_object_elt):
- """
- <manifest/> element.
- """
-
- element_name = "manifest"
- payload_type = rpki.x509.SignedManifest
-
-class roa_elt(publication_object_elt):
- """
- <roa/> element.
- """
-
- element_name = "roa"
- payload_type = rpki.x509.ROA
-class ghostbuster_elt(publication_object_elt):
- """
- <ghostbuster/> element.
- """
+ if pdu.tag == tag_report_error:
+ code = pdu.get("error_code")
+ logger.debug("<report_error/> code %r", code)
+ e = getattr(rpki.exceptions, code, None)
+ if e is not None and issubclass(e, rpki.exceptions.RPKI_Exception):
+ raise e(pdu.text)
+ else:
+ raise rpki.exceptions.BadPublicationReply("Unexpected response from pubd: %r, %r" % (code, pdu))
- element_name = "ghostbuster"
- payload_type = rpki.x509.Ghostbuster
+ if pdu.tag == tag_msg:
+ if pdu.get("type") != "reply":
+ raise rpki.exceptions.BadPublicationReply("Unexpected response from pubd: expected reply, got %r" % pdu.get("type"))
+ for p in pdu:
+ raise_if_error(p)
-publication_object_elt.obj2elt = dict(
- (e.payload_type, e) for e in
- (certificate_elt, crl_elt, manifest_elt, roa_elt, ghostbuster_elt))
-class report_error_elt(rpki.xml_utils.text_elt, publication_namespace):
- """
- <report_error/> element.
- """
-
- element_name = "report_error"
- attributes = ("tag", "error_code")
- text_attribute = "error_text"
-
- error_text = None
-
- @classmethod
- def from_exception(cls, e, tag = None):
- """
- Generate a <report_error/> element from an exception.
- """
- self = cls()
- self.tag = tag
- self.error_code = e.__class__.__name__
- self.error_text = str(e)
- return self
-
- def __str__(self):
- s = ""
- if getattr(self, "tag", None) is not None:
- s += "[%s] " % self.tag
- s += self.error_code
- if getattr(self, "error_text", None) is not None:
- s += ": " + self.error_text
- return s
-
- def raise_if_error(self):
- """
- Raise exception associated with this <report_error/> PDU.
- """
- t = rpki.exceptions.__dict__.get(self.error_code)
- if isinstance(t, type) and issubclass(t, rpki.exceptions.RPKI_Exception):
- raise t(getattr(self, "text", None))
- else:
- raise rpki.exceptions.BadPublicationReply("Unexpected response from pubd: %s" % self)
-
-class msg(rpki.xml_utils.msg, publication_namespace):
- """
- Publication PDU.
- """
-
- ## @var version
- # Protocol version
- version = int(rpki.relaxng.publication.version)
-
- ## @var pdus
- # Dispatch table of PDUs for this protocol.
- pdus = dict((x.element_name, x) for x in
- (config_elt, client_elt, certificate_elt, crl_elt, manifest_elt, roa_elt, ghostbuster_elt, report_error_elt))
-
- def serve_top_level(self, gctx, client, cb):
+class cms_msg(rpki.x509.XML_CMS_object):
"""
- Serve one msg PDU.
+ CMS-signed publication PDU.
"""
- if not self.is_query():
- raise rpki.exceptions.BadQuery("Message type is not query")
- r_msg = self.__class__.reply()
-
- def loop(iterator, q_pdu):
-
- def fail(e):
- if not isinstance(e, rpki.exceptions.NotFound):
- logger.exception("Exception processing PDU %r", q_pdu)
- r_msg.append(report_error_elt.from_exception(e, q_pdu.tag))
- cb(r_msg)
-
- try:
- q_pdu.gctx = gctx
- q_pdu.client = client
- q_pdu.serve_dispatch(r_msg, iterator, fail)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- fail(e)
-
- def done():
- cb(r_msg)
-
- rpki.async.iterator(self, loop, done)
-
-class sax_handler(rpki.xml_utils.sax_handler):
- """
- SAX handler for publication protocol.
- """
-
- pdu = msg
- name = "msg"
- version = rpki.relaxng.publication.version
-
-
-class cms_msg(rpki.x509.XML_CMS_object):
- """
- Class to hold a CMS-signed publication PDU.
- """
- encoding = "us-ascii"
- schema = rpki.relaxng.publication
- saxify = sax_handler.saxify
+ encoding = "us-ascii"
+ schema = rpki.relaxng.publication
diff --git a/rpki/publication_control.py b/rpki/publication_control.py
new file mode 100644
index 00000000..b0668eef
--- /dev/null
+++ b/rpki/publication_control.py
@@ -0,0 +1,74 @@
+# $Id$
+#
+# Copyright (C) 2013--2014 Dragon Research Labs ("DRL")
+# Portions copyright (C) 2009--2012 Internet Systems Consortium ("ISC")
+# Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notices and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL, ISC, AND ARIN DISCLAIM ALL
+# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL,
+# ISC, OR ARIN BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
+# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+RPKI publication control protocol.
+
+Per IETF SIDR WG discussion, this is now separate from the publication
+protocol itself.
+"""
+
+import logging
+
+import rpki.x509
+import rpki.exceptions
+import rpki.relaxng
+
+logger = logging.getLogger(__name__)
+
+
+nsmap = rpki.relaxng.publication_control.nsmap
+version = rpki.relaxng.publication_control.version
+
+tag_msg = rpki.relaxng.publication_control.xmlns + "msg"
+tag_client = rpki.relaxng.publication_control.xmlns + "client"
+tag_bpki_cert = rpki.relaxng.publication_control.xmlns + "bpki_cert"
+tag_bpki_glue = rpki.relaxng.publication_control.xmlns + "bpki_glue"
+tag_report_error = rpki.relaxng.publication_control.xmlns + "report_error"
+
+
+def raise_if_error(pdu):
+ """
+ Raise an appropriate error if this is a <report_error/> PDU.
+
+ As a convience, this will also accept a <msg/> PDU and raise an
+ appropriate error if it contains any <report_error/> PDUs.
+ """
+
+ if pdu.tag == tag_report_error:
+ code = pdu.get("error_code")
+ logger.debug("<report_error/> code %r", code)
+ e = getattr(rpki.exceptions, code, None)
+ if e is not None and issubclass(e, rpki.exceptions.RPKI_Exception):
+ raise e(pdu.text)
+ else:
+ raise rpki.exceptions.BadPublicationReply("Unexpected response from pubd: %r, %r" % (code, pdu))
+
+ if pdu.tag == tag_msg:
+ for p in pdu:
+ raise_if_error(p)
+
+
+class cms_msg(rpki.x509.XML_CMS_object):
+ """
+ CMS-signed publication control PDU.
+ """
+
+ encoding = "us-ascii"
+ schema = rpki.relaxng.publication_control
diff --git a/rpki/rcynic.py b/rpki/rcynic.py
index 10ad7516..c6ad60d5 100644
--- a/rpki/rcynic.py
+++ b/rpki/rcynic.py
@@ -25,251 +25,258 @@ import rpki.resource_set
from xml.etree.ElementTree import ElementTree
class UnknownObject(rpki.exceptions.RPKI_Exception):
- """
- Unrecognized object in rcynic result cache.
- """
+ """
+ Unrecognized object in rcynic result cache.
+ """
class NotRsyncURI(rpki.exceptions.RPKI_Exception):
- """
- URI is not an rsync URI.
- """
+ """
+ URI is not an rsync URI.
+ """
class rcynic_object(object):
- """
- An object read from rcynic cache.
- """
+ """
+ An object read from rcynic cache.
+ """
- def __init__(self, filename, **kwargs):
- self.filename = filename
- for k, v in kwargs.iteritems():
- setattr(self, k, v)
- self.obj = self.obj_class(DER_file = filename)
+ def __init__(self, filename, **kwargs):
+ self.filename = filename
+ for k, v in kwargs.iteritems():
+ setattr(self, k, v)
+ self.obj = self.obj_class(DER_file = filename) # pylint: disable=E1101
- def __repr__(self):
- return "<%s %s %s at 0x%x>" % (self.__class__.__name__, self.uri, self.resources, id(self))
+ def __repr__(self):
+ # pylint: disable=E1101
+ return "<%s %s %s at 0x%x>" % (self.__class__.__name__, self.uri, self.resources, id(self))
- def show_attrs(self, *attrs):
- """
- Print a bunch of object attributes, quietly ignoring any that
- might be missing.
- """
- for a in attrs:
- try:
- print "%s: %s" % (a.capitalize(), getattr(self, a))
- except AttributeError:
- pass
+ def show_attrs(self, *attrs):
+ """
+ Print a bunch of object attributes, quietly ignoring any that
+ might be missing.
+ """
- def show(self):
- """
- Print common object attributes.
- """
- self.show_attrs("filename", "uri", "status", "timestamp")
+ for a in attrs:
+ try:
+ print "%s: %s" % (a.capitalize(), getattr(self, a))
+ except AttributeError:
+ pass
+
+ def show(self):
+ """
+ Print common object attributes.
+ """
+
+ self.show_attrs("filename", "uri", "status", "timestamp")
class rcynic_certificate(rcynic_object):
- """
- A certificate from rcynic cache.
- """
-
- obj_class = rpki.x509.X509
-
- def __init__(self, filename, **kwargs):
- rcynic_object.__init__(self, filename, **kwargs)
- self.notBefore = self.obj.getNotBefore()
- self.notAfter = self.obj.getNotAfter()
- self.aia_uri = self.obj.get_aia_uri()
- self.sia_directory_uri = self.obj.get_sia_directory_uri()
- self.manifest_uri = self.obj.get_sia_manifest_uri()
- self.resources = self.obj.get_3779resources()
- self.is_ca = self.obj.is_CA()
- self.serial = self.obj.getSerial()
- self.issuer = self.obj.getIssuer()
- self.subject = self.obj.getSubject()
- self.ski = self.obj.hSKI()
- self.aki = self.obj.hAKI()
-
- def show(self):
"""
- Print certificate attributes.
+ A certificate from rcynic cache.
"""
- rcynic_object.show(self)
- self.show_attrs("notBefore", "notAfter", "aia_uri", "sia_directory_uri", "resources")
+
+ obj_class = rpki.x509.X509
+
+ def __init__(self, filename, **kwargs):
+ rcynic_object.__init__(self, filename, **kwargs)
+ self.notBefore = self.obj.getNotBefore()
+ self.notAfter = self.obj.getNotAfter()
+ self.aia_uri = self.obj.get_aia_uri()
+ self.sia_directory_uri = self.obj.get_sia_directory_uri()
+ self.manifest_uri = self.obj.get_sia_manifest_uri()
+ self.resources = self.obj.get_3779resources()
+ self.is_ca = self.obj.is_CA()
+ self.serial = self.obj.getSerial()
+ self.issuer = self.obj.getIssuer()
+ self.subject = self.obj.getSubject()
+ self.ski = self.obj.hSKI()
+ self.aki = self.obj.hAKI()
+
+ def show(self):
+ """
+ Print certificate attributes.
+ """
+
+ rcynic_object.show(self)
+ self.show_attrs("notBefore", "notAfter", "aia_uri", "sia_directory_uri", "resources")
class rcynic_roa(rcynic_object):
- """
- A ROA from rcynic cache.
- """
-
- obj_class = rpki.x509.ROA
-
- def __init__(self, filename, **kwargs):
- rcynic_object.__init__(self, filename, **kwargs)
- self.obj.extract()
- self.asID = self.obj.get_POW().getASID()
- self.prefix_sets = []
- v4, v6 = self.obj.get_POW().getPrefixes()
- if v4:
- self.prefix_sets.append(rpki.resource_set.roa_prefix_set_ipv4([
- rpki.resource_set.roa_prefix_ipv4(p[0], p[1], p[2]) for p in v4]))
- if v6:
- self.prefix_sets.append(rpki.resource_set.roa_prefix_set_ipv6([
- rpki.resource_set.roa_prefix_ipv6(p[0], p[1], p[2]) for p in v6]))
- self.ee = rpki.x509.X509(POW = self.obj.get_POW().certs()[0])
- self.notBefore = self.ee.getNotBefore()
- self.notAfter = self.ee.getNotAfter()
- self.aia_uri = self.ee.get_aia_uri()
- self.resources = self.ee.get_3779resources()
- self.issuer = self.ee.getIssuer()
- self.serial = self.ee.getSerial()
- self.subject = self.ee.getSubject()
- self.aki = self.ee.hAKI()
- self.ski = self.ee.hSKI()
-
- def show(self):
"""
- Print ROA attributes.
+ A ROA from rcynic cache.
"""
- rcynic_object.show(self)
- self.show_attrs("notBefore", "notAfter", "aia_uri", "resources", "asID")
- if self.prefix_sets:
- print "Prefixes:", ",".join(str(i) for i in self.prefix_sets)
+
+ obj_class = rpki.x509.ROA
+
+ def __init__(self, filename, **kwargs):
+ rcynic_object.__init__(self, filename, **kwargs)
+ self.obj.extract()
+ self.asID = self.obj.get_POW().getASID()
+ self.prefix_sets = []
+ v4, v6 = self.obj.get_POW().getPrefixes()
+ if v4:
+ self.prefix_sets.append(rpki.resource_set.roa_prefix_set_ipv4([
+ rpki.resource_set.roa_prefix_ipv4(p[0], p[1], p[2]) for p in v4]))
+ if v6:
+ self.prefix_sets.append(rpki.resource_set.roa_prefix_set_ipv6([
+ rpki.resource_set.roa_prefix_ipv6(p[0], p[1], p[2]) for p in v6]))
+ self.ee = rpki.x509.X509(POW = self.obj.get_POW().certs()[0])
+ self.notBefore = self.ee.getNotBefore()
+ self.notAfter = self.ee.getNotAfter()
+ self.aia_uri = self.ee.get_aia_uri()
+ self.resources = self.ee.get_3779resources()
+ self.issuer = self.ee.getIssuer()
+ self.serial = self.ee.getSerial()
+ self.subject = self.ee.getSubject()
+ self.aki = self.ee.hAKI()
+ self.ski = self.ee.hSKI()
+
+ def show(self):
+ """
+ Print ROA attributes.
+ """
+
+ rcynic_object.show(self)
+ self.show_attrs("notBefore", "notAfter", "aia_uri", "resources", "asID")
+ if self.prefix_sets:
+ print "Prefixes:", ",".join(str(i) for i in self.prefix_sets)
class rcynic_ghostbuster(rcynic_object):
- """
- Ghostbuster record from the rcynic cache.
- """
-
- obj_class = rpki.x509.Ghostbuster
-
- def __init__(self, *args, **kwargs):
- rcynic_object.__init__(self, *args, **kwargs)
- self.obj.extract()
- self.vcard = self.obj.get_content()
- self.ee = rpki.x509.X509(POW = self.obj.get_POW().certs()[0])
- self.notBefore = self.ee.getNotBefore()
- self.notAfter = self.ee.getNotAfter()
- self.aia_uri = self.ee.get_aia_uri()
- self.issuer = self.ee.getIssuer()
- self.serial = self.ee.getSerial()
- self.subject = self.ee.getSubject()
- self.aki = self.ee.hAKI()
- self.ski = self.ee.hSKI()
-
- def show(self):
- rcynic_object.show(self)
- self.show_attrs("notBefore", "notAfter", "vcard")
+ """
+ Ghostbuster record from the rcynic cache.
+ """
+
+ obj_class = rpki.x509.Ghostbuster
+
+ def __init__(self, *args, **kwargs):
+ rcynic_object.__init__(self, *args, **kwargs)
+ self.obj.extract()
+ self.vcard = self.obj.get_content()
+ self.ee = rpki.x509.X509(POW = self.obj.get_POW().certs()[0])
+ self.notBefore = self.ee.getNotBefore()
+ self.notAfter = self.ee.getNotAfter()
+ self.aia_uri = self.ee.get_aia_uri()
+ self.issuer = self.ee.getIssuer()
+ self.serial = self.ee.getSerial()
+ self.subject = self.ee.getSubject()
+ self.aki = self.ee.hAKI()
+ self.ski = self.ee.hSKI()
+
+ def show(self):
+ rcynic_object.show(self)
+ self.show_attrs("notBefore", "notAfter", "vcard")
file_name_classes = {
- ".cer" : rcynic_certificate,
- ".gbr" : rcynic_ghostbuster,
- ".roa" : rcynic_roa }
+ ".cer" : rcynic_certificate,
+ ".gbr" : rcynic_ghostbuster,
+ ".roa" : rcynic_roa }
class rcynic_file_iterator(object):
- """
- Iterate over files in an rcynic output tree, yielding a Python
- representation of each object found.
- """
-
- def __init__(self, rcynic_root,
- authenticated_subdir = "authenticated"):
- self.rcynic_dir = os.path.join(rcynic_root, authenticated_subdir)
-
- def __iter__(self):
- for root, dirs, files in os.walk(self.rcynic_dir): # pylint: disable=W0612
- for filename in files:
- filename = os.path.join(root, filename)
- ext = os.path.splitext(filename)[1]
- if ext in file_name_classes:
- yield file_name_classes[ext](filename)
+ """
+ Iterate over files in an rcynic output tree, yielding a Python
+ representation of each object found.
+ """
+
+ def __init__(self, rcynic_root,
+ authenticated_subdir = "authenticated"):
+ self.rcynic_dir = os.path.join(rcynic_root, authenticated_subdir)
+
+ def __iter__(self):
+ # pylint: disable=W0612
+ for root, dirs, files in os.walk(self.rcynic_dir):
+ for filename in files:
+ filename = os.path.join(root, filename)
+ ext = os.path.splitext(filename)[1]
+ if ext in file_name_classes:
+ yield file_name_classes[ext](filename)
class validation_status_element(object):
- def __init__(self, *args, **kwargs):
- self.attrs = []
- for k, v in kwargs.iteritems():
- setattr(self, k, v)
- # attribute names are saved so that the __repr__ method can
- # display the subset of attributes the user specified
- self.attrs.append(k)
- self._obj = None
-
- def get_obj(self):
- if not self._obj:
- self._obj = self.file_class(filename=self.filename, uri=self.uri)
- return self._obj
-
- def __repr__(self):
- v = [self.__class__.__name__, 'id=%s' % str(id(self))]
- v.extend(['%s=%s' % (x, getattr(self, x)) for x in self.attrs])
- return '<%s>' % (' '.join(v),)
-
- obj = property(get_obj)
+ def __init__(self, *args, **kwargs):
+ self.attrs = []
+ for k, v in kwargs.iteritems():
+ setattr(self, k, v)
+ # attribute names are saved so that the __repr__ method can
+ # display the subset of attributes the user specified
+ self.attrs.append(k)
+ self._obj = None
+
+ def get_obj(self):
+ # pylint: disable=E1101
+ if not self._obj:
+ self._obj = self.file_class(filename=self.filename, uri=self.uri)
+ return self._obj
+
+ def __repr__(self):
+ v = [self.__class__.__name__, 'id=%s' % str(id(self))]
+ v.extend(['%s=%s' % (x, getattr(self, x)) for x in self.attrs])
+ return '<%s>' % (' '.join(v),)
+
+ obj = property(get_obj)
class rcynic_xml_iterator(object):
- """
- Iterate over validation_status entries in the XML output from an
- rcynic run. Yields a tuple for each entry:
-
- timestamp, generation, status, object
-
- where URI, status, and timestamp are the corresponding values from
- the XML element, OK is a boolean indicating whether validation was
- considered succesful, and object is a Python representation of the
- object in question. If OK is True, object will be from rcynic's
- authenticated output tree; otherwise, object will be from rcynic's
- unauthenticated output tree.
-
- Note that it is possible for the same URI to appear in more than one
- validation_status element; in such cases, the succesful case (OK
- True) should be the last entry (as rcynic will stop trying once it
- gets a good copy), but there may be multiple failures, which might
- or might not have different status codes.
- """
-
- def __init__(self, rcynic_root, xml_file,
- authenticated_old_subdir = "authenticated.old",
- unauthenticated_subdir = "unauthenticated"):
- self.rcynic_root = rcynic_root
- self.xml_file = xml_file
- self.authenticated_subdir = os.path.join(rcynic_root, 'authenticated')
- self.authenticated_old_subdir = os.path.join(rcynic_root, authenticated_old_subdir)
- self.unauthenticated_subdir = os.path.join(rcynic_root, unauthenticated_subdir)
-
- base_uri = "rsync://"
-
- def uri_to_filename(self, uri):
- if uri.startswith(self.base_uri):
- return uri[len(self.base_uri):]
- else:
- raise NotRsyncURI("Not an rsync URI %r" % uri)
-
- def __iter__(self):
- for validation_status in ElementTree(file=self.xml_file).getroot().getiterator("validation_status"):
- timestamp = validation_status.get("timestamp")
- status = validation_status.get("status")
- uri = validation_status.text.strip()
- generation = validation_status.get("generation")
-
- # determine the path to this object
- if status == 'object_accepted':
- d = self.authenticated_subdir
- elif generation == 'backup':
- d = self.authenticated_old_subdir
- else:
- d = self.unauthenticated_subdir
-
- filename = os.path.join(d, self.uri_to_filename(uri))
-
- ext = os.path.splitext(filename)[1]
- if ext in file_name_classes:
- yield validation_status_element(timestamp = timestamp, generation = generation,
- uri=uri, status = status, filename = filename,
- file_class = file_name_classes[ext])
+ """
+ Iterate over validation_status entries in the XML output from an
+ rcynic run. Yields a tuple for each entry:
+
+ timestamp, generation, status, object
+
+ where URI, status, and timestamp are the corresponding values from
+ the XML element, OK is a boolean indicating whether validation was
+ considered succesful, and object is a Python representation of the
+ object in question. If OK is True, object will be from rcynic's
+ authenticated output tree; otherwise, object will be from rcynic's
+ unauthenticated output tree.
+
+ Note that it is possible for the same URI to appear in more than one
+ validation_status element; in such cases, the succesful case (OK
+ True) should be the last entry (as rcynic will stop trying once it
+ gets a good copy), but there may be multiple failures, which might
+ or might not have different status codes.
+ """
+
+ def __init__(self, rcynic_root, xml_file,
+ authenticated_old_subdir = "authenticated.old",
+ unauthenticated_subdir = "unauthenticated"):
+ self.rcynic_root = rcynic_root
+ self.xml_file = xml_file
+ self.authenticated_subdir = os.path.join(rcynic_root, 'authenticated')
+ self.authenticated_old_subdir = os.path.join(rcynic_root, authenticated_old_subdir)
+ self.unauthenticated_subdir = os.path.join(rcynic_root, unauthenticated_subdir)
+
+ base_uri = "rsync://"
+
+ def uri_to_filename(self, uri):
+ if uri.startswith(self.base_uri):
+ return uri[len(self.base_uri):]
+ else:
+ raise NotRsyncURI("Not an rsync URI %r" % uri)
+
+ def __iter__(self):
+ for validation_status in ElementTree(file=self.xml_file).getroot().getiterator("validation_status"):
+ timestamp = validation_status.get("timestamp")
+ status = validation_status.get("status")
+ uri = validation_status.text.strip()
+ generation = validation_status.get("generation")
+
+ # determine the path to this object
+ if status == 'object_accepted':
+ d = self.authenticated_subdir
+ elif generation == 'backup':
+ d = self.authenticated_old_subdir
+ else:
+ d = self.unauthenticated_subdir
+
+ filename = os.path.join(d, self.uri_to_filename(uri))
+
+ ext = os.path.splitext(filename)[1]
+ if ext in file_name_classes:
+ yield validation_status_element(timestamp = timestamp, generation = generation,
+ uri=uri, status = status, filename = filename,
+ file_class = file_name_classes[ext])
def label_iterator(xml_file):
- """
- Returns an iterator which contains all defined labels from an rcynic XML
- output file. Each item is a tuple of the form
- (label, kind, description).
- """
-
- for label in ElementTree(file=xml_file).find("labels"):
- yield label.tag, label.get("kind"), label.text.strip()
+ """
+ Returns an iterator which contains all defined labels from an rcynic XML
+ output file. Each item is a tuple of the form
+ (label, kind, description).
+ """
+
+ for label in ElementTree(file=xml_file).find("labels"):
+ yield label.tag, label.get("kind"), label.text.strip()
diff --git a/rpki/rcynicdb/__init__.py b/rpki/rcynicdb/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rpki/rcynicdb/__init__.py
diff --git a/rpki/rcynicdb/iterator.py b/rpki/rcynicdb/iterator.py
new file mode 100644
index 00000000..a754ed72
--- /dev/null
+++ b/rpki/rcynicdb/iterator.py
@@ -0,0 +1,49 @@
+"""
+rcynic database iterator.
+
+At least for the moment, we attempt to provide an iterator that works
+with both old-style (directory tree of file objects with names similar
+to what wget would use) and new style (Django ORM) databases.
+"""
+
+import os
+
+initialized_django = False
+
+def _uri_to_class(uri, class_map):
+ return class_map[uri[uri.rindex(".")+1:]]
+
+def authenticated_objects(directory_tree = None, uri_suffix = None, class_map = None):
+
+ if class_map is None:
+ import rpki.POW
+ class_map = dict(cer = rpki.POW.X509,
+ crl = rpki.POW.CRL,
+ gbr = rpki.POW.CMS,
+ mft = rpki.POW.Manifest,
+ roa = rpki.POW.ROA)
+
+ if directory_tree:
+ for head, dirs, files in os.walk(directory_tree):
+ for fn in files:
+ if uri_suffix is None or fn.endswith(uri_suffix):
+ fn = os.path.join(head, fn)
+ uri = "rsync://" + fn[len(directory_tree):].lstrip("/")
+ yield uri, _uri_to_class(uri, class_map).derReadFile(fn)
+ return
+
+ global initialized_django
+ if not initialized_django:
+ os.environ.update(DJANGO_SETTINGS_MODULE = "rpki.django_settings.rcynic")
+ import django
+ django.setup()
+ initialized_django = True
+
+ import rpki.rcynicdb
+ auth = rpki.rcynicdb.models.Authenticated.objects.order_by("-started").first()
+ if auth is None:
+ return
+
+ q = auth.rpkiobject_set
+ for obj in q.filter(uri__endswith = uri_suffix) if uri_suffix else q.all():
+ yield obj.uri, _uri_to_class(obj.uri, class_map).derRead(obj.der)
diff --git a/rpki/rcynicdb/migrations/0001_initial.py b/rpki/rcynicdb/migrations/0001_initial.py
new file mode 100644
index 00000000..5f60253b
--- /dev/null
+++ b/rpki/rcynicdb/migrations/0001_initial.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='Authenticated',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('started', models.DateTimeField()),
+ ('finished', models.DateTimeField(null=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Retrieval',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('uri', models.TextField()),
+ ('started', models.DateTimeField()),
+ ('finished', models.DateTimeField()),
+ ('successful', models.BooleanField()),
+ ],
+ ),
+ migrations.CreateModel(
+ name='RPKIObject',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('der', models.BinaryField()),
+ ('uri', models.TextField()),
+ ('aki', models.SlugField(max_length=40)),
+ ('ski', models.SlugField(max_length=40)),
+ ('sha256', models.SlugField(unique=True, max_length=64)),
+ ('authenticated', models.ManyToManyField(to='rcynicdb.Authenticated')),
+ ('retrieved', models.ForeignKey(to='rcynicdb.Retrieval')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='RRDPSnapshot',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('session_id', models.UUIDField()),
+ ('serial', models.BigIntegerField()),
+ ('retrieved', models.OneToOneField(to='rcynicdb.Retrieval')),
+ ],
+ ),
+ migrations.AddField(
+ model_name='rpkiobject',
+ name='snapshot',
+ field=models.ManyToManyField(to='rcynicdb.RRDPSnapshot'),
+ ),
+ ]
diff --git a/rpki/rcynicdb/migrations/0002_auto_20160227_2003.py b/rpki/rcynicdb/migrations/0002_auto_20160227_2003.py
new file mode 100644
index 00000000..9c3acecb
--- /dev/null
+++ b/rpki/rcynicdb/migrations/0002_auto_20160227_2003.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('rcynicdb', '0001_initial'),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name='retrieval',
+ name='finished',
+ field=models.DateTimeField(null=True),
+ ),
+ migrations.AlterField(
+ model_name='retrieval',
+ name='successful',
+ field=models.BooleanField(default=False),
+ ),
+ migrations.AlterField(
+ model_name='rrdpsnapshot',
+ name='retrieved',
+ field=models.OneToOneField(null=True, to='rcynicdb.Retrieval'),
+ ),
+ ]
diff --git a/rpki/rcynicdb/migrations/0003_auto_20160301_0333.py b/rpki/rcynicdb/migrations/0003_auto_20160301_0333.py
new file mode 100644
index 00000000..ea6e5499
--- /dev/null
+++ b/rpki/rcynicdb/migrations/0003_auto_20160301_0333.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('rcynicdb', '0002_auto_20160227_2003'),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name='retrieval',
+ name='finished',
+ field=models.DateTimeField(),
+ ),
+ migrations.AlterField(
+ model_name='retrieval',
+ name='successful',
+ field=models.BooleanField(),
+ ),
+ ]
diff --git a/rpki/rcynicdb/migrations/__init__.py b/rpki/rcynicdb/migrations/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rpki/rcynicdb/migrations/__init__.py
diff --git a/rpki/rcynicdb/models.py b/rpki/rcynicdb/models.py
new file mode 100644
index 00000000..9a790230
--- /dev/null
+++ b/rpki/rcynicdb/models.py
@@ -0,0 +1,81 @@
+# First cut at ORM models for rcynicng.
+
+from django.db import models
+
+# HTTP/HTTPS/RSYNC fetch event.
+
+class Retrieval(models.Model):
+ uri = models.TextField()
+ started = models.DateTimeField()
+ finished = models.DateTimeField()
+ successful = models.BooleanField()
+
+ def __repr__(self):
+ try:
+ return "<Retrieval: {0.uri} started {0.started} finished {0.finished} successful {0.successful}>".format(self)
+ except:
+ return "<Retrieval: {}>".format(id(self))
+
+# Collection of validated objects.
+
+class Authenticated(models.Model):
+ started = models.DateTimeField()
+ finished = models.DateTimeField(null = True)
+
+ def __repr__(self):
+ try:
+ return "<Authenticated: started {0.started} finished {0.finished}>".format(self)
+ except:
+ return "<Authenticated: {}>".format(id(self))
+
+# One instance of an RRDP snapshot.
+
+class RRDPSnapshot(models.Model):
+ session_id = models.UUIDField()
+ serial = models.BigIntegerField()
+ retrieved = models.OneToOneField(Retrieval, null = True)
+
+ def __repr__(self):
+ try:
+ return "<RRDPSnapshot: serial {0.serial} session_id {0.session_id} retrieved {0.retrieved!r}>".format(self)
+ except:
+ return "<RRDPSnapshot: {}>".format(id(self))
+
+
+# RPKI objects.
+#
+# Might need to add an on_delete argument to the ForeignKey for the
+# retrieved field: the default behavior is CASCADE, which is may not
+# what we want in this case.
+#
+# https://docs.djangoproject.com/en/1.9/ref/models/fields/#django.db.models.ForeignKey.on_delete
+#
+# Might also want to provide names for the reverse relationships, code
+# uses blah_set for now.
+
+# Setting unique = True on the der field breaks with PostgreSQL, see
+# https://code.djangoproject.com/ticket/14904
+#
+# In theory collisions on sha256 are possible, but in practice they're
+# not going to occur by accident. Setting unique = True on the sha256
+# field risks deliberate collisions, defending against that would
+# require detecting the collision and figuring out which is the
+# attacking object (easy in theory, as it probably won't validate),
+# then figuring out what to do about it (possibly harder -- do we drop
+# an entire RRDP zone because of one evil object?).
+
+class RPKIObject(models.Model):
+ der = models.BinaryField() # unique = True
+ uri = models.TextField()
+ aki = models.SlugField(max_length = 40) # hex SHA-1
+ ski = models.SlugField(max_length = 40) # hex SHA-1
+ sha256 = models.SlugField(max_length = 64, unique = True) # hex SHA-256
+ retrieved = models.ForeignKey(Retrieval)
+ authenticated = models.ManyToManyField(Authenticated)
+ snapshot = models.ManyToManyField(RRDPSnapshot)
+
+ def __repr__(self):
+ try:
+ return "<RPKIObject: uri {0.uri} sha256 {0.sha256} ski {0.ski} aki {0.aki} retrieved {0.retrieved!r}>".format(self)
+ except:
+ return "<RPKIObject: {}>".format(id(self))
diff --git a/rpki/relaxng.py b/rpki/relaxng.py
index e43384e7..9a01306c 100644
--- a/rpki/relaxng.py
+++ b/rpki/relaxng.py
@@ -1,12 +1,14 @@
# Automatically generated, do not edit.
+# pylint: skip-file
+
from rpki.relaxng_parser import RelaxNGParser
## @var left_right
## Parsed RelaxNG left_right schema
left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<!--
- $Id: left-right-schema.rnc 5902 2014-07-18 16:37:04Z sra $
+ $Id$
RelaxNG schema for RPKI left-right protocol.
@@ -61,7 +63,7 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
</start>
<!-- PDUs allowed in a query -->
<define name="query_elt" combine="choice">
- <ref name="self_query"/>
+ <ref name="tenant_query"/>
</define>
<define name="query_elt" combine="choice">
<ref name="bsc_query"/>
@@ -95,7 +97,7 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
</define>
<!-- PDUs allowed in a reply -->
<define name="reply_elt" combine="choice">
- <ref name="self_reply"/>
+ <ref name="tenant_reply"/>
</define>
<define name="reply_elt" combine="choice">
<ref name="bsc_reply"/>
@@ -227,8 +229,8 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<param name="pattern">[\-,0-9/:a-fA-F]*</param>
</data>
</define>
- <!-- <self/> element -->
- <define name="self_bool">
+ <!-- <tenant/> element -->
+ <define name="tenant_bool">
<optional>
<attribute name="rekey">
<value>yes</value>
@@ -265,7 +267,7 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
</attribute>
</optional>
</define>
- <define name="self_payload">
+ <define name="tenant_payload">
<optional>
<attribute name="use_hsm">
<choice>
@@ -295,74 +297,74 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
</element>
</optional>
</define>
- <define name="self_handle">
- <attribute name="self_handle">
+ <define name="tenant_handle">
+ <attribute name="tenant_handle">
<ref name="object_handle"/>
</attribute>
</define>
- <define name="self_query" combine="choice">
- <element name="self">
+ <define name="tenant_query" combine="choice">
+ <element name="tenant">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
- <ref name="self_bool"/>
- <ref name="self_payload"/>
+ <ref name="tenant_handle"/>
+ <ref name="tenant_bool"/>
+ <ref name="tenant_payload"/>
</element>
</define>
- <define name="self_reply" combine="choice">
- <element name="self">
+ <define name="tenant_reply" combine="choice">
+ <element name="tenant">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
- <define name="self_query" combine="choice">
- <element name="self">
+ <define name="tenant_query" combine="choice">
+ <element name="tenant">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
- <ref name="self_bool"/>
- <ref name="self_payload"/>
+ <ref name="tenant_handle"/>
+ <ref name="tenant_bool"/>
+ <ref name="tenant_payload"/>
</element>
</define>
- <define name="self_reply" combine="choice">
- <element name="self">
+ <define name="tenant_reply" combine="choice">
+ <element name="tenant">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
- <define name="self_query" combine="choice">
- <element name="self">
+ <define name="tenant_query" combine="choice">
+ <element name="tenant">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
- <define name="self_reply" combine="choice">
- <element name="self">
+ <define name="tenant_reply" combine="choice">
+ <element name="tenant">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
- <ref name="self_payload"/>
+ <ref name="tenant_handle"/>
+ <ref name="tenant_payload"/>
</element>
</define>
- <define name="self_query" combine="choice">
- <element name="self">
+ <define name="tenant_query" combine="choice">
+ <element name="tenant">
<ref name="ctl_list"/>
</element>
</define>
- <define name="self_reply" combine="choice">
- <element name="self">
+ <define name="tenant_reply" combine="choice">
+ <element name="tenant">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
- <ref name="self_payload"/>
+ <ref name="tenant_handle"/>
+ <ref name="tenant_payload"/>
</element>
</define>
- <define name="self_query" combine="choice">
- <element name="self">
+ <define name="tenant_query" combine="choice">
+ <element name="tenant">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
- <define name="self_reply" combine="choice">
- <element name="self">
+ <define name="tenant_reply" combine="choice">
+ <element name="tenant">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<!-- <bsc/> element. Key parameters hardwired for now. -->
@@ -415,7 +417,7 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="bsc_query" combine="choice">
<element name="bsc">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
<ref name="bsc_bool"/>
<ref name="bsc_payload"/>
@@ -424,7 +426,7 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="bsc_reply" combine="choice">
<element name="bsc">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
<ref name="bsc_readonly"/>
</element>
@@ -432,7 +434,7 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="bsc_query" combine="choice">
<element name="bsc">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
<ref name="bsc_bool"/>
<ref name="bsc_payload"/>
@@ -441,7 +443,7 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="bsc_reply" combine="choice">
<element name="bsc">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
<ref name="bsc_readonly"/>
</element>
@@ -449,14 +451,14 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="bsc_query" combine="choice">
<element name="bsc">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
</element>
</define>
<define name="bsc_reply" combine="choice">
<element name="bsc">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
<ref name="bsc_payload"/>
<ref name="bsc_readonly"/>
@@ -465,13 +467,13 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="bsc_query" combine="choice">
<element name="bsc">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<define name="bsc_reply" combine="choice">
<element name="bsc">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
<ref name="bsc_payload"/>
<ref name="bsc_readonly"/>
@@ -480,14 +482,14 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="bsc_query" combine="choice">
<element name="bsc">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
</element>
</define>
<define name="bsc_reply" combine="choice">
<element name="bsc">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
</element>
</define>
@@ -552,12 +554,34 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
</attribute>
</optional>
<optional>
- <element name="bpki_cms_cert">
+ <attribute name="root_asn_resources">
+ <ref name="asn_list"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="root_ipv4_resources">
+ <ref name="ipv4_list"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="root_ipv6_resources">
+ <ref name="ipv6_list"/>
+ </attribute>
+ </optional>
+ <optional>
+ <element name="bpki_cert">
+ <ref name="base64"/>
+ </element>
+ </optional>
+ <optional>
+ <element name="bpki_glue">
<ref name="base64"/>
</element>
</optional>
+ </define>
+ <define name="parent_readonly">
<optional>
- <element name="bpki_cms_glue">
+ <element name="rpki_root_cert">
<ref name="base64"/>
</element>
</optional>
@@ -565,7 +589,7 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="parent_query" combine="choice">
<element name="parent">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
<ref name="parent_bool"/>
<ref name="parent_payload"/>
@@ -574,14 +598,15 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="parent_reply" combine="choice">
<element name="parent">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
+ <ref name="parent_readonly"/>
</element>
</define>
<define name="parent_query" combine="choice">
<element name="parent">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
<ref name="parent_bool"/>
<ref name="parent_payload"/>
@@ -590,50 +615,53 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="parent_reply" combine="choice">
<element name="parent">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
+ <ref name="parent_readonly"/>
</element>
</define>
<define name="parent_query" combine="choice">
<element name="parent">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
</element>
</define>
<define name="parent_reply" combine="choice">
<element name="parent">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
<ref name="parent_payload"/>
+ <ref name="parent_readonly"/>
</element>
</define>
<define name="parent_query" combine="choice">
<element name="parent">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<define name="parent_reply" combine="choice">
<element name="parent">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
<ref name="parent_payload"/>
+ <ref name="parent_readonly"/>
</element>
</define>
<define name="parent_query" combine="choice">
<element name="parent">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
</element>
</define>
<define name="parent_reply" combine="choice">
<element name="parent">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
</element>
</define>
@@ -673,7 +701,7 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="child_query" combine="choice">
<element name="child">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
<ref name="child_bool"/>
<ref name="child_payload"/>
@@ -682,14 +710,14 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="child_reply" combine="choice">
<element name="child">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
</element>
</define>
<define name="child_query" combine="choice">
<element name="child">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
<ref name="child_bool"/>
<ref name="child_payload"/>
@@ -698,21 +726,21 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="child_reply" combine="choice">
<element name="child">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
</element>
</define>
<define name="child_query" combine="choice">
<element name="child">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
</element>
</define>
<define name="child_reply" combine="choice">
<element name="child">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
<ref name="child_payload"/>
</element>
@@ -720,13 +748,13 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="child_query" combine="choice">
<element name="child">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<define name="child_reply" combine="choice">
<element name="child">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
<ref name="child_payload"/>
</element>
@@ -734,14 +762,14 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="child_query" combine="choice">
<element name="child">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
</element>
</define>
<define name="child_reply" combine="choice">
<element name="child">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
</element>
</define>
@@ -768,6 +796,11 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<ref name="bsc_handle"/>
</optional>
<optional>
+ <attribute name="rrdp_notification_uri">
+ <ref name="uri"/>
+ </attribute>
+ </optional>
+ <optional>
<element name="bpki_cert">
<ref name="base64"/>
</element>
@@ -781,7 +814,7 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="repository_query" combine="choice">
<element name="repository">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
<ref name="repository_bool"/>
<ref name="repository_payload"/>
@@ -790,14 +823,14 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="repository_reply" combine="choice">
<element name="repository">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
</element>
</define>
<define name="repository_query" combine="choice">
<element name="repository">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
<ref name="repository_bool"/>
<ref name="repository_payload"/>
@@ -806,21 +839,21 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="repository_reply" combine="choice">
<element name="repository">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
</element>
</define>
<define name="repository_query" combine="choice">
<element name="repository">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
</element>
</define>
<define name="repository_reply" combine="choice">
<element name="repository">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
<ref name="repository_payload"/>
</element>
@@ -828,13 +861,13 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="repository_query" combine="choice">
<element name="repository">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<define name="repository_reply" combine="choice">
<element name="repository">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
<ref name="repository_payload"/>
</element>
@@ -842,14 +875,14 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="repository_query" combine="choice">
<element name="repository">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
</element>
</define>
<define name="repository_reply" combine="choice">
<element name="repository">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
</element>
</define>
@@ -857,14 +890,14 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="list_resources_query">
<element name="list_resources">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
</element>
</define>
<define name="list_resources_reply">
<element name="list_resources">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
<attribute name="valid_until">
<data type="dateTime">
@@ -892,13 +925,13 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="list_roa_requests_query">
<element name="list_roa_requests">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<define name="list_roa_requests_reply">
<element name="list_roa_requests">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<attribute name="asn">
<data type="nonNegativeInteger"/>
</attribute>
@@ -918,14 +951,14 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="list_ghostbuster_requests_query">
<element name="list_ghostbuster_requests">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
</element>
</define>
<define name="list_ghostbuster_requests_reply">
<element name="list_ghostbuster_requests">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
<data type="string"/>
</element>
@@ -934,13 +967,13 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="list_ee_certificate_requests_query">
<element name="list_ee_certificate_requests">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<define name="list_ee_certificate_requests_reply">
<element name="list_ee_certificate_requests">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<attribute name="gski">
<data type="token">
<param name="minLength">27</param>
@@ -967,14 +1000,12 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<ref name="ipv6_list"/>
</attribute>
</optional>
- <optional>
- <attribute name="cn">
- <data type="string">
- <param name="maxLength">64</param>
- <param name="pattern">[\-0-9A-Za-z_ ]+</param>
- </data>
- </attribute>
- </optional>
+ <attribute name="cn">
+ <data type="string">
+ <param name="maxLength">64</param>
+ <param name="pattern">[\-0-9A-Za-z_ ]+</param>
+ </data>
+ </attribute>
<optional>
<attribute name="sn">
<data type="string">
@@ -1000,13 +1031,13 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="list_published_objects_query">
<element name="list_published_objects">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<define name="list_published_objects_reply">
<element name="list_published_objects">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<attribute name="uri">
<ref name="uri"/>
</attribute>
@@ -1022,13 +1053,13 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="list_received_resources_query">
<element name="list_received_resources">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<define name="list_received_resources_reply">
<element name="list_received_resources">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
<attribute name="notBefore">
<data type="dateTime">
@@ -1076,7 +1107,7 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<element name="report_error">
<ref name="tag"/>
<optional>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</optional>
<attribute name="error_code">
<ref name="error"/>
@@ -1102,7 +1133,7 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
## Parsed RelaxNG myrpki schema
myrpki = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<!--
- $Id: myrpki.rnc 5757 2014-04-05 22:42:12Z sra $
+ $Id: myrpki.rnc 5876 2014-06-26 19:00:12Z sra $
RelaxNG schema for MyRPKI XML messages.
@@ -1481,11 +1512,183 @@ myrpki = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
-->
''')
-## @var publication
-## Parsed RelaxNG publication schema
-publication = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
+## @var oob_setup
+## Parsed RelaxNG oob_setup schema
+oob_setup = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
+<!-- $Id: rpki-setup.rnc 3429 2015-10-14 23:46:50Z sra $ -->
+<grammar ns="http://www.hactrn.net/uris/rpki/rpki-setup/" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <define name="version">
+ <value>1</value>
+ </define>
+ <define name="base64">
+ <data type="base64Binary">
+ <param name="maxLength">512000</param>
+ </data>
+ </define>
+ <define name="handle">
+ <data type="string">
+ <param name="maxLength">255</param>
+ <param name="pattern">[\-_A-Za-z0-9/]*</param>
+ </data>
+ </define>
+ <define name="uri">
+ <data type="anyURI">
+ <param name="maxLength">4096</param>
+ </data>
+ </define>
+ <define name="any">
+ <element>
+ <anyName/>
+ <zeroOrMore>
+ <attribute>
+ <anyName/>
+ </attribute>
+ </zeroOrMore>
+ <zeroOrMore>
+ <choice>
+ <ref name="any"/>
+ <text/>
+ </choice>
+ </zeroOrMore>
+ </element>
+ </define>
+ <define name="authorization_token">
+ <ref name="base64"/>
+ </define>
+ <define name="bpki_ta">
+ <ref name="base64"/>
+ </define>
+ <start combine="choice">
+ <element name="child_request">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="child_handle">
+ <ref name="handle"/>
+ </attribute>
+ <element name="child_bpki_ta">
+ <ref name="bpki_ta"/>
+ </element>
+ </element>
+ </start>
+ <start combine="choice">
+ <element name="parent_response">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="service_uri">
+ <ref name="uri"/>
+ </attribute>
+ <attribute name="child_handle">
+ <ref name="handle"/>
+ </attribute>
+ <attribute name="parent_handle">
+ <ref name="handle"/>
+ </attribute>
+ <element name="parent_bpki_ta">
+ <ref name="bpki_ta"/>
+ </element>
+ <optional>
+ <element name="offer">
+ <empty/>
+ </element>
+ </optional>
+ <zeroOrMore>
+ <element name="referral">
+ <attribute name="referrer">
+ <ref name="handle"/>
+ </attribute>
+ <optional>
+ <attribute name="contact_uri">
+ <ref name="uri"/>
+ </attribute>
+ </optional>
+ <ref name="authorization_token"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </start>
+ <start combine="choice">
+ <element name="publisher_request">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="publisher_handle">
+ <ref name="handle"/>
+ </attribute>
+ <element name="publisher_bpki_ta">
+ <ref name="bpki_ta"/>
+ </element>
+ <zeroOrMore>
+ <element name="referral">
+ <attribute name="referrer">
+ <ref name="handle"/>
+ </attribute>
+ <ref name="authorization_token"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </start>
+ <start combine="choice">
+ <element name="repository_response">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="service_uri">
+ <ref name="uri"/>
+ </attribute>
+ <attribute name="publisher_handle">
+ <ref name="handle"/>
+ </attribute>
+ <attribute name="sia_base">
+ <ref name="uri"/>
+ </attribute>
+ <optional>
+ <attribute name="rrdp_notification_uri">
+ <ref name="uri"/>
+ </attribute>
+ </optional>
+ <element name="repository_bpki_ta">
+ <ref name="bpki_ta"/>
+ </element>
+ </element>
+ </start>
+ <start combine="choice">
+ <element name="authorization">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="authorized_sia_base">
+ <ref name="uri"/>
+ </attribute>
+ <ref name="bpki_ta"/>
+ </element>
+ </start>
+ <start combine="choice">
+ <element name="error">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="reason">
+ <choice>
+ <value>syntax-error</value>
+ <value>authentication-failure</value>
+ <value>refused</value>
+ </choice>
+ </attribute>
+ <optional>
+ <ref name="any"/>
+ </optional>
+ </element>
+ </start>
+</grammar>
+''')
+
+## @var publication_control
+## Parsed RelaxNG publication_control schema
+publication_control = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<!--
- $Id: publication-schema.rnc 5902 2014-07-18 16:37:04Z sra $
+ $Id: publication-control.rnc 5903 2014-07-18 17:08:13Z sra $
RelaxNG schema for RPKI publication protocol.
@@ -1506,7 +1709,7 @@ publication = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-->
-<grammar ns="http://www.hactrn.net/uris/rpki/publication-spec/" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+<grammar ns="http://www.hactrn.net/uris/rpki/publication-control/" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
<define name="version">
<value>1</value>
</define>
@@ -1540,26 +1743,12 @@ publication = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
</start>
<!-- PDUs allowed in a query -->
<define name="query_elt">
- <choice>
- <ref name="config_query"/>
- <ref name="client_query"/>
- <ref name="certificate_query"/>
- <ref name="crl_query"/>
- <ref name="manifest_query"/>
- <ref name="roa_query"/>
- <ref name="ghostbuster_query"/>
- </choice>
+ <ref name="client_query"/>
</define>
<!-- PDUs allowed in a reply -->
<define name="reply_elt">
<choice>
- <ref name="config_reply"/>
<ref name="client_reply"/>
- <ref name="certificate_reply"/>
- <ref name="crl_reply"/>
- <ref name="manifest_reply"/>
- <ref name="roa_reply"/>
- <ref name="ghostbuster_reply"/>
<ref name="report_error_reply"/>
</choice>
</define>
@@ -1603,60 +1792,7 @@ publication = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<param name="pattern">[\-_A-Za-z0-9/]+</param>
</data>
</define>
- <!--
- <config/> element (use restricted to repository operator)
- config_handle attribute, create, list, and destroy commands omitted deliberately, see code for details
- -->
- <define name="config_payload">
- <optional>
- <element name="bpki_crl">
- <ref name="base64"/>
- </element>
- </optional>
- </define>
- <define name="config_query" combine="choice">
- <element name="config">
- <attribute name="action">
- <value>set</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="config_payload"/>
- </element>
- </define>
- <define name="config_reply" combine="choice">
- <element name="config">
- <attribute name="action">
- <value>set</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- </element>
- </define>
- <define name="config_query" combine="choice">
- <element name="config">
- <attribute name="action">
- <value>get</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- </element>
- </define>
- <define name="config_reply" combine="choice">
- <element name="config">
- <attribute name="action">
- <value>get</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="config_payload"/>
- </element>
- </define>
- <!-- <client/> element (use restricted to repository operator) -->
+ <!-- <client/> element -->
<define name="client_handle">
<attribute name="client_handle">
<ref name="object_handle"/>
@@ -1801,242 +1937,217 @@ publication = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<ref name="client_handle"/>
</element>
</define>
- <!-- <certificate/> element -->
- <define name="certificate_query" combine="choice">
- <element name="certificate">
- <attribute name="action">
- <value>publish</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- <ref name="base64"/>
- </element>
+ <!-- <report_error/> element -->
+ <define name="error">
+ <data type="token">
+ <param name="maxLength">1024</param>
+ </data>
</define>
- <define name="certificate_reply" combine="choice">
- <element name="certificate">
- <attribute name="action">
- <value>publish</value>
- </attribute>
+ <define name="report_error_reply">
+ <element name="report_error">
<optional>
<ref name="tag"/>
</optional>
- <ref name="uri"/>
- </element>
- </define>
- <define name="certificate_query" combine="choice">
- <element name="certificate">
- <attribute name="action">
- <value>withdraw</value>
+ <attribute name="error_code">
+ <ref name="error"/>
</attribute>
<optional>
- <ref name="tag"/>
+ <data type="string">
+ <param name="maxLength">512000</param>
+ </data>
</optional>
- <ref name="uri"/>
</element>
</define>
- <define name="certificate_reply" combine="choice">
- <element name="certificate">
- <attribute name="action">
- <value>withdraw</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
+</grammar>
+<!--
+ Local Variables:
+ indent-tabs-mode: nil
+ comment-start: "# "
+ comment-start-skip: "#[ \t]*"
+ End:
+-->
+''')
+
+## @var publication
+## Parsed RelaxNG publication schema
+publication = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ $Id: publication.rnc 5896 2014-07-15 19:34:32Z sra $
+
+ RelaxNG schema for RPKI publication protocol, from current I-D.
+
+ Copyright (c) 2014 IETF Trust and the persons identified as authors
+ of the code. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ * Neither the name of Internet Society, IETF or IETF Trust, nor the
+ names of specific contributors, may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+-->
+<grammar ns="http://www.hactrn.net/uris/rpki/publication-spec/" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <!-- This is version 3 of the protocol. -->
+ <define name="version">
+ <value>3</value>
</define>
- <!-- <crl/> element -->
- <define name="crl_query" combine="choice">
- <element name="crl">
- <attribute name="action">
- <value>publish</value>
+ <!-- Top level PDU is either a query or a reply. -->
+ <start combine="choice">
+ <element name="msg">
+ <attribute name="version">
+ <ref name="version"/>
</attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- <ref name="base64"/>
- </element>
- </define>
- <define name="crl_reply" combine="choice">
- <element name="crl">
- <attribute name="action">
- <value>publish</value>
+ <attribute name="type">
+ <value>query</value>
</attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
+ <zeroOrMore>
+ <ref name="query_elt"/>
+ </zeroOrMore>
</element>
- </define>
- <define name="crl_query" combine="choice">
- <element name="crl">
- <attribute name="action">
- <value>withdraw</value>
+ </start>
+ <start combine="choice">
+ <element name="msg">
+ <attribute name="version">
+ <ref name="version"/>
</attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
- </define>
- <define name="crl_reply" combine="choice">
- <element name="crl">
- <attribute name="action">
- <value>withdraw</value>
+ <attribute name="type">
+ <value>reply</value>
</attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
+ <zeroOrMore>
+ <ref name="reply_elt"/>
+ </zeroOrMore>
</element>
+ </start>
+ <!-- PDUs allowed in queries and replies. -->
+ <define name="query_elt">
+ <choice>
+ <ref name="publish_query"/>
+ <ref name="withdraw_query"/>
+ <ref name="list_query"/>
+ </choice>
</define>
- <!-- <manifest/> element -->
- <define name="manifest_query" combine="choice">
- <element name="manifest">
- <attribute name="action">
- <value>publish</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- <ref name="base64"/>
- </element>
+ <define name="reply_elt">
+ <choice>
+ <ref name="publish_reply"/>
+ <ref name="withdraw_reply"/>
+ <ref name="list_reply"/>
+ <ref name="report_error_reply"/>
+ </choice>
</define>
- <define name="manifest_reply" combine="choice">
- <element name="manifest">
- <attribute name="action">
- <value>publish</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
+ <!-- Tag attributes for bulk operations. -->
+ <define name="tag">
+ <attribute name="tag">
+ <data type="token">
+ <param name="maxLength">1024</param>
+ </data>
+ </attribute>
</define>
- <define name="manifest_query" combine="choice">
- <element name="manifest">
- <attribute name="action">
- <value>withdraw</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
+ <!-- Base64 encoded DER stuff. -->
+ <define name="base64">
+ <data type="base64Binary"/>
</define>
- <define name="manifest_reply" combine="choice">
- <element name="manifest">
- <attribute name="action">
- <value>withdraw</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
+ <!-- Publication URIs. -->
+ <define name="uri">
+ <attribute name="uri">
+ <data type="anyURI">
+ <param name="maxLength">4096</param>
+ </data>
+ </attribute>
</define>
- <!-- <roa/> element -->
- <define name="roa_query" combine="choice">
- <element name="roa">
- <attribute name="action">
- <value>publish</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- <ref name="base64"/>
- </element>
+ <!-- Digest of objects being withdrawn -->
+ <define name="hash">
+ <attribute name="hash">
+ <data type="string">
+ <param name="pattern">[0-9a-fA-F]+</param>
+ </data>
+ </attribute>
</define>
- <define name="roa_reply" combine="choice">
- <element name="roa">
- <attribute name="action">
- <value>publish</value>
- </attribute>
+ <!-- Error codes. -->
+ <define name="error">
+ <data type="token">
+ <param name="maxLength">1024</param>
+ </data>
+ </define>
+ <!-- <publish/> element -->
+ <define name="publish_query">
+ <element name="publish">
<optional>
<ref name="tag"/>
</optional>
<ref name="uri"/>
- </element>
- </define>
- <define name="roa_query" combine="choice">
- <element name="roa">
- <attribute name="action">
- <value>withdraw</value>
- </attribute>
<optional>
- <ref name="tag"/>
+ <ref name="hash"/>
</optional>
- <ref name="uri"/>
+ <ref name="base64"/>
</element>
</define>
- <define name="roa_reply" combine="choice">
- <element name="roa">
- <attribute name="action">
- <value>withdraw</value>
- </attribute>
+ <define name="publish_reply">
+ <element name="publish">
<optional>
<ref name="tag"/>
</optional>
<ref name="uri"/>
</element>
</define>
- <!-- <ghostbuster/> element -->
- <define name="ghostbuster_query" combine="choice">
- <element name="ghostbuster">
- <attribute name="action">
- <value>publish</value>
- </attribute>
+ <!-- <withdraw/> element -->
+ <define name="withdraw_query">
+ <element name="withdraw">
<optional>
<ref name="tag"/>
</optional>
<ref name="uri"/>
- <ref name="base64"/>
+ <ref name="hash"/>
</element>
</define>
- <define name="ghostbuster_reply" combine="choice">
- <element name="ghostbuster">
- <attribute name="action">
- <value>publish</value>
- </attribute>
+ <define name="withdraw_reply">
+ <element name="withdraw">
<optional>
<ref name="tag"/>
</optional>
<ref name="uri"/>
</element>
</define>
- <define name="ghostbuster_query" combine="choice">
- <element name="ghostbuster">
- <attribute name="action">
- <value>withdraw</value>
- </attribute>
+ <!-- <list/> element -->
+ <define name="list_query">
+ <element name="list">
<optional>
<ref name="tag"/>
</optional>
- <ref name="uri"/>
</element>
</define>
- <define name="ghostbuster_reply" combine="choice">
- <element name="ghostbuster">
- <attribute name="action">
- <value>withdraw</value>
- </attribute>
+ <define name="list_reply">
+ <element name="list">
<optional>
<ref name="tag"/>
</optional>
<ref name="uri"/>
+ <ref name="hash"/>
</element>
</define>
<!-- <report_error/> element -->
- <define name="error">
- <data type="token">
- <param name="maxLength">1024</param>
- </data>
- </define>
<define name="report_error_reply">
<element name="report_error">
<optional>
@@ -2066,7 +2177,7 @@ publication = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
## Parsed RelaxNG router_certificate schema
router_certificate = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<!--
- $Id: router-certificate-schema.rnc 5757 2014-04-05 22:42:12Z sra $
+ $Id: router-certificate.rnc 5881 2014-07-03 16:55:02Z sra $
RelaxNG schema for BGPSEC router certificate interchange format.
@@ -2164,11 +2275,165 @@ router_certificate = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
-->
''')
+## @var rrdp
+## Parsed RelaxNG rrdp schema
+rrdp = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ $Id: rrdp.rnc 6010 2014-11-08 18:01:58Z sra $
+
+ RelaxNG schema for RPKI Repository Delta Protocol (RRDP).
+
+ Copyright (C) 2014 Dragon Research Labs ("DRL")
+
+ Permission to use, copy, modify, and distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+ REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+ INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ PERFORMANCE OF THIS SOFTWARE.
+-->
+<grammar ns="http://www.ripe.net/rpki/rrdp" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <define name="version">
+ <data type="positiveInteger">
+ <param name="maxInclusive">1</param>
+ </data>
+ </define>
+ <define name="serial">
+ <data type="nonNegativeInteger"/>
+ </define>
+ <define name="uri">
+ <data type="anyURI"/>
+ </define>
+ <define name="uuid">
+ <data type="string">
+ <param name="pattern">[\-0-9a-fA-F]+</param>
+ </data>
+ </define>
+ <define name="hash">
+ <data type="string">
+ <param name="pattern">[0-9a-fA-F]+</param>
+ </data>
+ </define>
+ <define name="base64">
+ <data type="base64Binary"/>
+ </define>
+ <!-- Notification file: lists current snapshots and deltas -->
+ <start combine="choice">
+ <element name="notification">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="session_id">
+ <ref name="uuid"/>
+ </attribute>
+ <attribute name="serial">
+ <ref name="serial"/>
+ </attribute>
+ <element name="snapshot">
+ <attribute name="uri">
+ <ref name="uri"/>
+ </attribute>
+ <attribute name="hash">
+ <ref name="hash"/>
+ </attribute>
+ </element>
+ <zeroOrMore>
+ <element name="delta">
+ <attribute name="serial">
+ <ref name="serial"/>
+ </attribute>
+ <attribute name="uri">
+ <ref name="uri"/>
+ </attribute>
+ <attribute name="hash">
+ <ref name="hash"/>
+ </attribute>
+ </element>
+ </zeroOrMore>
+ </element>
+ </start>
+ <!-- Snapshot segment: think DNS AXFR. -->
+ <start combine="choice">
+ <element name="snapshot">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="session_id">
+ <ref name="uuid"/>
+ </attribute>
+ <attribute name="serial">
+ <ref name="serial"/>
+ </attribute>
+ <zeroOrMore>
+ <element name="publish">
+ <attribute name="uri">
+ <ref name="uri"/>
+ </attribute>
+ <ref name="base64"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </start>
+ <!-- Delta segment: think DNS IXFR. -->
+ <start combine="choice">
+ <element name="delta">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="session_id">
+ <ref name="uuid"/>
+ </attribute>
+ <attribute name="serial">
+ <ref name="serial"/>
+ </attribute>
+ <oneOrMore>
+ <ref name="delta_element"/>
+ </oneOrMore>
+ </element>
+ </start>
+ <define name="delta_element" combine="choice">
+ <element name="publish">
+ <attribute name="uri">
+ <ref name="uri"/>
+ </attribute>
+ <optional>
+ <attribute name="hash">
+ <ref name="hash"/>
+ </attribute>
+ </optional>
+ <ref name="base64"/>
+ </element>
+ </define>
+ <define name="delta_element" combine="choice">
+ <element name="withdraw">
+ <attribute name="uri">
+ <ref name="uri"/>
+ </attribute>
+ <attribute name="hash">
+ <ref name="hash"/>
+ </attribute>
+ </element>
+ </define>
+</grammar>
+<!--
+ Local Variables:
+ indent-tabs-mode: nil
+ comment-start: "# "
+ comment-start-skip: "#[ \t]*"
+ End:
+-->
+''')
+
## @var up_down
## Parsed RelaxNG up_down schema
up_down = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<!--
- $Id: up-down-schema.rnc 5757 2014-04-05 22:42:12Z sra $
+ $Id: up-down.rnc 5881 2014-07-03 16:55:02Z sra $
RelaxNG schema for the up-down protocol, extracted from RFC 6492.
diff --git a/rpki/relaxng_parser.py b/rpki/relaxng_parser.py
index 466b1a79..53ec8f0d 100644
--- a/rpki/relaxng_parser.py
+++ b/rpki/relaxng_parser.py
@@ -22,21 +22,21 @@ from an XML-format RelaxNG schema.
import lxml.etree
class RelaxNGParser(object):
- """
- Parse schema, extract XML namespace and protocol version (if any).
- Method calls are just passed along to the parsed RelaxNG schema.
- """
+ """
+ Parse schema, extract XML namespace and protocol version (if any).
+ Method calls are just passed along to the parsed RelaxNG schema.
+ """
- def __init__(self, text):
- xml = lxml.etree.fromstring(text)
- self.schema = lxml.etree.RelaxNG(xml)
- ns = xml.get("ns")
- self.xmlns = "{" + ns + "}"
- self.nsmap = { None : ns }
- x = xml.xpath("ns0:define[@name = 'version']/ns0:value",
- namespaces = dict(ns0 = "http://relaxng.org/ns/structure/1.0"))
- if len(x) == 1:
- self.version = x[0].text
+ def __init__(self, text):
+ xml = lxml.etree.fromstring(text)
+ self.schema = lxml.etree.RelaxNG(xml)
+ ns = xml.get("ns")
+ self.xmlns = "{" + ns + "}"
+ self.nsmap = { None : ns }
+ x = xml.xpath("ns0:define[@name = 'version']/ns0:value",
+ namespaces = dict(ns0 = "http://relaxng.org/ns/structure/1.0"))
+ if len(x) == 1:
+ self.version = x[0].text
- def __getattr__(self, name):
- return getattr(self.schema, name)
+ def __getattr__(self, name):
+ return getattr(self.schema, name)
diff --git a/rpki/resource_set.py b/rpki/resource_set.py
index fea6ad2d..055076dd 100644
--- a/rpki/resource_set.py
+++ b/rpki/resource_set.py
@@ -44,745 +44,784 @@ re_prefix_with_maxlen = re.compile("^([0-9:.a-fA-F]+)/([0-9]+)-([0-9]+)$")
re_prefix = re.compile("^([0-9:.a-fA-F]+)/([0-9]+)$")
class resource_range(object):
- """
- Generic resource range type. Assumes underlying type is some kind
- of integer.
-
- This is a virtual class. You probably don't want to use this type
- directly.
- """
-
- def __init__(self, range_min, range_max):
- assert range_min.__class__ is range_max.__class__, \
- "Type mismatch, %r doesn't match %r" % (range_min.__class__, range_max.__class__)
- assert range_min <= range_max, "Mis-ordered range: %s before %s" % (range_min, range_max)
- self.min = range_min
- self.max = range_max
-
- def __cmp__(self, other):
- assert self.__class__ is other.__class__, \
- "Type mismatch, comparing %r with %r" % (self.__class__, other.__class__)
- return cmp(self.min, other.min) or cmp(self.max, other.max)
-
-class resource_range_as(resource_range):
- """
- Range of Autonomous System Numbers.
-
- Denotes a single ASN by a range whose min and max values are
- identical.
- """
-
- ## @var datum_type
- # Type of underlying data (min and max).
-
- datum_type = long
-
- def __init__(self, range_min, range_max):
- resource_range.__init__(self,
- long(range_min) if isinstance(range_min, int) else range_min,
- long(range_max) if isinstance(range_max, int) else range_max)
-
- def __str__(self):
- """
- Convert a resource_range_as to string format.
- """
- if self.min == self.max:
- return str(self.min)
- else:
- return str(self.min) + "-" + str(self.max)
-
- @classmethod
- def parse_str(cls, x):
"""
- Parse ASN resource range from text (eg, XML attributes).
- """
- r = re_asn_range.match(x)
- if r:
- return cls(long(r.group(1)), long(r.group(2)))
- else:
- return cls(long(x), long(x))
+ Generic resource range type. Assumes underlying type is some kind
+ of integer.
- @classmethod
- def from_strings(cls, a, b = None):
- """
- Construct ASN range from strings.
+ This is a virtual class. You probably don't want to use this type
+ directly.
"""
- if b is None:
- b = a
- return cls(long(a), long(b))
-
-class resource_range_ip(resource_range):
- """
- Range of (generic) IP addresses.
-
- Prefixes are converted to ranges on input, and ranges that can be
- represented as prefixes are written as prefixes on output.
- This is a virtual class. You probably don't want to use it
- directly.
- """
+ # Give pylint a little help here
- ## @var datum_type
- # Type of underlying data (min and max).
-
- datum_type = rpki.POW.IPAddress
-
- def prefixlen(self):
- """
- Determine whether a resource_range_ip can be expressed as a
- prefix. Returns prefix length if it can, otherwise raises
- MustBePrefix exception.
- """
- mask = self.min ^ self.max
- if self.min & mask != 0:
- raise rpki.exceptions.MustBePrefix
- prefixlen = self.min.bits
- while mask & 1:
- prefixlen -= 1
- mask >>= 1
- if mask:
- raise rpki.exceptions.MustBePrefix
- return prefixlen
-
- @property
- def can_be_prefix(self):
- """
- Boolean property indicating whether this range can be expressed as
- a prefix.
+ datum_type = int
+ parse_str = int
- This just calls .prefixlen() to do the work, so that we can keep
- the logic in one place. This property is useful primarily in
- context where catching an exception isn't practical.
- """
- try:
- self.prefixlen()
- return True
- except rpki.exceptions.MustBePrefix:
- return False
+ def __init__(self, range_min, range_max):
+ assert range_min.__class__ is range_max.__class__, \
+ "Type mismatch, %r doesn't match %r" % (range_min.__class__, range_max.__class__)
+ assert range_min <= range_max, "Mis-ordered range: %s before %s" % (range_min, range_max)
+ self.min = range_min
+ self.max = range_max
- def __str__(self):
- """
- Convert a resource_range_ip to string format.
- """
- try:
- return str(self.min) + "/" + str(self.prefixlen())
- except rpki.exceptions.MustBePrefix:
- return str(self.min) + "-" + str(self.max)
+ def __cmp__(self, other):
+ assert self.__class__ is other.__class__, \
+ "Type mismatch, comparing %r with %r" % (self.__class__, other.__class__)
+ return cmp(self.min, other.min) or cmp(self.max, other.max)
- @classmethod
- def parse_str(cls, x):
- """
- Parse IP address range or prefix from text (eg, XML attributes).
- """
- r = re_address_range.match(x)
- if r:
- return cls.from_strings(r.group(1), r.group(2))
- r = re_prefix.match(x)
- if r:
- a = rpki.POW.IPAddress(r.group(1))
- if cls is resource_range_ip and a.version == 4:
- cls = resource_range_ipv4
- if cls is resource_range_ip and a.version == 6:
- cls = resource_range_ipv6
- return cls.make_prefix(a, int(r.group(2)))
- raise rpki.exceptions.BadIPResource('Bad IP resource "%s"' % x)
-
- @classmethod
- def make_prefix(cls, prefix, prefixlen):
- """
- Construct a resource range corresponding to a prefix.
+class resource_range_as(resource_range):
"""
- assert isinstance(prefix, rpki.POW.IPAddress) and isinstance(prefixlen, (int, long))
- assert prefixlen >= 0 and prefixlen <= prefix.bits, "Nonsensical prefix length: %s" % prefixlen
- mask = (1 << (prefix.bits - prefixlen)) - 1
- assert (prefix & mask) == 0, "Resource not in canonical form: %s/%s" % (prefix, prefixlen)
- return cls(prefix, rpki.POW.IPAddress(prefix | mask))
+ Range of Autonomous System Numbers.
- def chop_into_prefixes(self, result):
- """
- Chop up a resource_range_ip into ranges that can be represented as
- prefixes.
- """
- try:
- self.prefixlen()
- result.append(self)
- except rpki.exceptions.MustBePrefix:
- range_min = self.min
- range_max = self.max
- while range_max >= range_min:
- bits = int(math.log(long(range_max - range_min + 1), 2))
- while True:
- mask = ~(~0 << bits)
- assert range_min + mask <= range_max
- if range_min & mask == 0:
- break
- assert bits > 0
- bits -= 1
- result.append(self.make_prefix(range_min, range_min.bits - bits))
- range_min = range_min + mask + 1
-
- @classmethod
- def from_strings(cls, a, b = None):
+ Denotes a single ASN by a range whose min and max values are
+ identical.
"""
- Construct IP address range from strings.
- """
- if b is None:
- b = a
- a = rpki.POW.IPAddress(a)
- b = rpki.POW.IPAddress(b)
- if a.version != b.version:
- raise TypeError
- if cls is resource_range_ip:
- if a.version == 4:
- return resource_range_ipv4(a, b)
- if a.version == 6:
- return resource_range_ipv6(a, b)
- elif a.version == cls.version:
- return cls(a, b)
- else:
- raise TypeError
-class resource_range_ipv4(resource_range_ip):
- """
- Range of IPv4 addresses.
- """
-
- version = 4
-
-class resource_range_ipv6(resource_range_ip):
- """
- Range of IPv6 addresses.
- """
-
- version = 6
+ ## @var datum_type
+ # Type of underlying data (min and max).
-def _rsplit(rset, that):
- """
- Utility function to split a resource range into two resource ranges.
- """
+ datum_type = long
- this = rset.pop(0)
+ def __init__(self, range_min, range_max):
+ resource_range.__init__(self,
+ long(range_min) if isinstance(range_min, int) else range_min,
+ long(range_max) if isinstance(range_max, int) else range_max)
- assert type(this) is type(that), "type(this) [%r] is not type(that) [%r]" % (type(this), type(that))
-
- assert type(this.min) is type(that.min), "type(this.min) [%r] is not type(that.min) [%r]" % (type(this.min), type(that.min))
- assert type(this.min) is type(this.max), "type(this.min) [%r] is not type(this.max) [%r]" % (type(this.min), type(this.max))
- assert type(that.min) is type(that.max), "type(that.min) [%r] is not type(that.max) [%r]" % (type(that.min), type(that.max))
-
- if this.min < that.min:
- rset.insert(0, type(this)(this.min, type(that.min)(that.min - 1)))
- rset.insert(1, type(this)(that.min, this.max))
-
- else:
- assert this.max > that.max
- rset.insert(0, type(this)(this.min, that.max))
- rset.insert(1, type(this)(type(that.max)(that.max + 1), this.max))
-
-class resource_set(list):
- """
- Generic resource set, a list subclass containing resource ranges.
+ def __str__(self):
+ """
+ Convert a resource_range_as to string format.
+ """
- This is a virtual class. You probably don't want to use it
- directly.
- """
+ if self.min == self.max:
+ return str(self.min)
+ else:
+ return str(self.min) + "-" + str(self.max)
- ## @var inherit
- # Boolean indicating whether this resource_set uses RFC 3779 inheritance.
+ @classmethod
+ def parse_str(cls, x):
+ """
+ Parse ASN resource range from text (eg, XML attributes).
+ """
- inherit = False
+ r = re_asn_range.match(x)
+ if r:
+ return cls(long(r.group(1)), long(r.group(2)))
+ else:
+ return cls(long(x), long(x))
- ## @var canonical
- # Whether this resource_set is currently in canonical form.
+ @classmethod
+ def from_strings(cls, a, b = None):
+ """
+ Construct ASN range from strings.
+ """
- canonical = False
+ if b is None:
+ b = a
+ return cls(long(a), long(b))
- def __init__(self, ini = None, allow_overlap = False):
- """
- Initialize a resource_set.
- """
- list.__init__(self)
- if isinstance(ini, (int, long)):
- ini = str(ini)
- if ini is inherit_token:
- self.inherit = True
- elif isinstance(ini, str) and len(ini):
- self.extend(self.parse_str(s) for s in ini.split(","))
- elif isinstance(ini, list):
- self.extend(ini)
- elif ini is not None and ini != "":
- raise ValueError("Unexpected initializer: %s" % str(ini))
- self.canonize(allow_overlap)
-
- def canonize(self, allow_overlap = False):
- """
- Whack this resource_set into canonical form.
+class resource_range_ip(resource_range):
"""
- assert not self.inherit or len(self) == 0
- if not self.canonical:
- self.sort()
- i = 0
- while i + 1 < len(self):
- if allow_overlap and self[i].max + 1 >= self[i+1].min:
- self[i] = type(self[i])(self[i].min, max(self[i].max, self[i+1].max))
- del self[i+1]
- elif self[i].max + 1 == self[i+1].min:
- self[i] = type(self[i])(self[i].min, self[i+1].max)
- del self[i+1]
+ Range of (generic) IP addresses.
+
+ Prefixes are converted to ranges on input, and ranges that can be
+ represented as prefixes are written as prefixes on output.
+
+ This is a virtual class. You probably don't want to use it
+ directly.
+ """
+
+ ## @var datum_type
+ # Type of underlying data (min and max).
+
+ datum_type = rpki.POW.IPAddress
+
+ # Give pylint a little help here
+ version = None
+
+ def prefixlen(self):
+ """
+ Determine whether a resource_range_ip can be expressed as a
+ prefix. Returns prefix length if it can, otherwise raises
+ MustBePrefix exception.
+ """
+
+ mask = self.min ^ self.max
+ if self.min & mask != 0:
+ raise rpki.exceptions.MustBePrefix
+ prefixlen = self.min.bits
+ while mask & 1:
+ prefixlen -= 1
+ mask >>= 1
+ if mask:
+ raise rpki.exceptions.MustBePrefix
+ return prefixlen
+
+ @property
+ def can_be_prefix(self):
+ """
+ Boolean property indicating whether this range can be expressed as
+ a prefix.
+
+ This just calls .prefixlen() to do the work, so that we can keep
+ the logic in one place. This property is useful primarily in
+ context where catching an exception isn't practical.
+ """
+
+ try:
+ self.prefixlen()
+ return True
+ except rpki.exceptions.MustBePrefix:
+ return False
+
+ def __str__(self):
+ """
+ Convert a resource_range_ip to string format.
+ """
+
+ try:
+ return str(self.min) + "/" + str(self.prefixlen())
+ except rpki.exceptions.MustBePrefix:
+ return str(self.min) + "-" + str(self.max)
+
+ @classmethod
+ def parse_str(cls, x):
+ """
+ Parse IP address range or prefix from text (eg, XML attributes).
+ """
+
+ r = re_address_range.match(x)
+ if r:
+ return cls.from_strings(r.group(1), r.group(2))
+ r = re_prefix.match(x)
+ if r:
+ a = rpki.POW.IPAddress(r.group(1))
+ if cls is resource_range_ip and a.version == 4:
+ cls = resource_range_ipv4
+ if cls is resource_range_ip and a.version == 6:
+ cls = resource_range_ipv6
+ return cls.make_prefix(a, int(r.group(2)))
+ raise rpki.exceptions.BadIPResource('Bad IP resource "%s"' % x)
+
+ @classmethod
+ def make_prefix(cls, prefix, prefixlen):
+ """
+ Construct a resource range corresponding to a prefix.
+ """
+
+ assert isinstance(prefix, rpki.POW.IPAddress) and isinstance(prefixlen, (int, long))
+ assert prefixlen >= 0 and prefixlen <= prefix.bits, "Nonsensical prefix length: %s" % prefixlen
+ mask = (1 << (prefix.bits - prefixlen)) - 1
+ assert (prefix & mask) == 0, "Resource not in canonical form: %s/%s" % (prefix, prefixlen)
+ return cls(prefix, rpki.POW.IPAddress(prefix | mask))
+
+ def chop_into_prefixes(self, result):
+ """
+ Chop up a resource_range_ip into ranges that can be represented as
+ prefixes.
+ """
+
+ try:
+ self.prefixlen()
+ result.append(self)
+ except rpki.exceptions.MustBePrefix:
+ range_min = self.min
+ range_max = self.max
+ while range_max >= range_min:
+ bits = int(math.log(long(range_max - range_min + 1), 2))
+ while True:
+ mask = ~(~0 << bits)
+ assert range_min + mask <= range_max
+ if range_min & mask == 0:
+ break
+ assert bits > 0
+ bits -= 1
+ result.append(self.make_prefix(range_min, range_min.bits - bits))
+ range_min = range_min + mask + 1
+
+ @classmethod
+ def from_strings(cls, a, b = None):
+ """
+ Construct IP address range from strings.
+ """
+
+ if b is None:
+ b = a
+ a = rpki.POW.IPAddress(a)
+ b = rpki.POW.IPAddress(b)
+ if a.version != b.version:
+ raise TypeError
+ if cls is resource_range_ip:
+ if a.version == 4:
+ return resource_range_ipv4(a, b)
+ if a.version == 6:
+ return resource_range_ipv6(a, b)
+ elif a.version == cls.version:
+ return cls(a, b)
else:
- i += 1
- for i in xrange(0, len(self) - 1):
- if self[i].max >= self[i+1].min:
- raise rpki.exceptions.ResourceOverlap("Resource overlap: %s %s" % (self[i], self[i+1]))
- self.canonical = True
+ raise TypeError
- def append(self, item):
- """
- Wrapper around list.append() (q.v.) to reset canonical flag.
- """
- list.append(self, item)
- self.canonical = False
-
- def extend(self, item):
+class resource_range_ipv4(resource_range_ip):
"""
- Wrapper around list.extend() (q.v.) to reset canonical flag.
+ Range of IPv4 addresses.
"""
- list.extend(self, item)
- self.canonical = False
- def __str__(self):
- """
- Convert a resource_set to string format.
- """
- if self.inherit:
- return inherit_token
- else:
- return ",".join(str(x) for x in self)
+ version = 4
- def _comm(self, other):
+class resource_range_ipv6(resource_range_ip):
"""
- Like comm(1), sort of.
-
- Returns a tuple of three resource sets: resources only in self,
- resources only in other, and resources in both. Used (not very
- efficiently) as the basis for most set operations on resource
- sets.
+ Range of IPv6 addresses.
"""
- assert not self.inherit
- assert type(self) is type(other), "Type mismatch %r %r" % (type(self), type(other))
- set1 = type(self)(self) # clone and whack into canonical form
- set2 = type(other)(other) # ditto
- only1, only2, both = [], [], []
- while set1 or set2:
- if set1 and (not set2 or set1[0].max < set2[0].min):
- only1.append(set1.pop(0))
- elif set2 and (not set1 or set2[0].max < set1[0].min):
- only2.append(set2.pop(0))
- elif set1[0].min < set2[0].min:
- _rsplit(set1, set2[0])
- elif set2[0].min < set1[0].min:
- _rsplit(set2, set1[0])
- elif set1[0].max < set2[0].max:
- _rsplit(set2, set1[0])
- elif set2[0].max < set1[0].max:
- _rsplit(set1, set2[0])
- else:
- assert set1[0].min == set2[0].min and set1[0].max == set2[0].max
- both.append(set1.pop(0))
- set2.pop(0)
- return type(self)(only1), type(self)(only2), type(self)(both)
-
- def union(self, other):
- """
- Set union for resource sets.
- """
+ version = 6
- assert not self.inherit
- assert type(self) is type(other), "Type mismatch: %r %r" % (type(self), type(other))
- set1 = type(self)(self) # clone and whack into canonical form
- set2 = type(other)(other) # ditto
- result = []
- while set1 or set2:
- if set1 and (not set2 or set1[0].max < set2[0].min):
- result.append(set1.pop(0))
- elif set2 and (not set1 or set2[0].max < set1[0].min):
- result.append(set2.pop(0))
- else:
- this = set1.pop(0)
- that = set2.pop(0)
- assert type(this) is type(that)
- range_min = min(this.min, that.min)
- range_max = max(this.max, that.max)
- result.append(type(this)(range_min, range_max))
- while set1 and set1[0].max <= range_max:
- assert set1[0].min >= range_min
- del set1[0]
- while set2 and set2[0].max <= range_max:
- assert set2[0].min >= range_min
- del set2[0]
- return type(self)(result)
-
- __or__ = union
-
- def intersection(self, other):
+def _rsplit(rset, that):
"""
- Set intersection for resource sets.
+ Utility function to split a resource range into two resource ranges.
"""
- return self._comm(other)[2]
- __and__ = intersection
+ this = rset.pop(0)
- def difference(self, other):
- """
- Set difference for resource sets.
- """
- return self._comm(other)[0]
+ assert type(this) is type(that), "type(this) [%r] is not type(that) [%r]" % (type(this), type(that))
- __sub__ = difference
-
- def symmetric_difference(self, other):
- """
- Set symmetric difference (XOR) for resource sets.
- """
- com = self._comm(other)
- return com[0] | com[1]
+ assert type(this.min) is type(that.min), "type(this.min) [%r] is not type(that.min) [%r]" % (type(this.min), type(that.min))
+ assert type(this.min) is type(this.max), "type(this.min) [%r] is not type(this.max) [%r]" % (type(this.min), type(this.max))
+ assert type(that.min) is type(that.max), "type(that.min) [%r] is not type(that.max) [%r]" % (type(that.min), type(that.max))
- __xor__ = symmetric_difference
+ if this.min < that.min:
+ rset.insert(0, type(this)(this.min, type(that.min)(that.min - 1)))
+ rset.insert(1, type(this)(that.min, this.max))
- def contains(self, item):
- """
- Set membership test for resource sets.
- """
- assert not self.inherit
- self.canonize()
- if not self:
- return False
- if type(item) is type(self[0]):
- range_min = item.min
- range_max = item.max
else:
- range_min = item
- range_max = item
- lo = 0
- hi = len(self)
- while lo < hi:
- mid = (lo + hi) / 2
- if self[mid].max < range_max:
- lo = mid + 1
- else:
- hi = mid
- return lo < len(self) and self[lo].min <= range_min and self[lo].max >= range_max
-
- __contains__ = contains
-
- def issubset(self, other):
- """
- Test whether self is a subset (possibly improper) of other.
- """
- for i in self:
- if not other.contains(i):
- return False
- return True
-
- __le__ = issubset
+ assert this.max > that.max
+ rset.insert(0, type(this)(this.min, that.max))
+ rset.insert(1, type(this)(type(that.max)(that.max + 1), this.max))
- def issuperset(self, other):
- """
- Test whether self is a superset (possibly improper) of other.
+class resource_set(list):
"""
- return other.issubset(self)
+ Generic resource set, a list subclass containing resource ranges.
+
+ This is a virtual class. You probably don't want to use it
+ directly.
+ """
+
+ ## @var inherit
+ # Boolean indicating whether this resource_set uses RFC 3779 inheritance.
+
+ inherit = False
+
+ ## @var canonical
+ # Whether this resource_set is currently in canonical form.
+
+ canonical = False
+
+ # Give pylint a little help here
+ range_type = resource_range
+
+ def __init__(self, ini = None, allow_overlap = False):
+ """
+ Initialize a resource_set.
+ """
+
+ list.__init__(self)
+ if isinstance(ini, (int, long)):
+ ini = str(ini)
+ if ini is inherit_token:
+ self.inherit = True
+ elif isinstance(ini, (str, unicode)) and len(ini):
+ self.extend(self.parse_str(s) for s in ini.split(","))
+ elif isinstance(ini, list):
+ self.extend(ini)
+ elif ini is not None and ini != "":
+ raise ValueError("Unexpected initializer: %s" % str(ini))
+ self.canonize(allow_overlap)
+
+ def canonize(self, allow_overlap = False):
+ """
+ Whack this resource_set into canonical form.
+ """
+
+ assert not self.inherit or len(self) == 0
+ if not self.canonical:
+ self.sort()
+ i = 0
+ while i + 1 < len(self):
+ if allow_overlap and self[i].max + 1 >= self[i+1].min:
+ self[i] = type(self[i])(self[i].min, max(self[i].max, self[i+1].max))
+ del self[i+1]
+ elif self[i].max + 1 == self[i+1].min:
+ self[i] = type(self[i])(self[i].min, self[i+1].max)
+ del self[i+1]
+ else:
+ i += 1
+ for i in xrange(0, len(self) - 1):
+ if self[i].max >= self[i+1].min:
+ raise rpki.exceptions.ResourceOverlap("Resource overlap: %s %s" % (self[i], self[i+1]))
+ self.canonical = True
+
+ def append(self, item):
+ """
+ Wrapper around list.append() (q.v.) to reset canonical flag.
+ """
+
+ list.append(self, item)
+ self.canonical = False
+
+ def extend(self, item):
+ """
+ Wrapper around list.extend() (q.v.) to reset canonical flag.
+ """
+
+ list.extend(self, item)
+ self.canonical = False
+
+ def __str__(self):
+ """
+ Convert a resource_set to string format.
+ """
+
+ if self.inherit:
+ return inherit_token
+ else:
+ return ",".join(str(x) for x in self)
+
+ def _comm(self, other):
+ """
+ Like comm(1), sort of.
+
+ Returns a tuple of three resource sets: resources only in self,
+ resources only in other, and resources in both. Used (not very
+ efficiently) as the basis for most set operations on resource
+ sets.
+ """
+
+ assert not self.inherit
+ assert type(self) is type(other), "Type mismatch %r %r" % (type(self), type(other))
+ set1 = type(self)(self) # clone and whack into canonical form
+ set2 = type(other)(other) # ditto
+ only1, only2, both = [], [], []
+ while set1 or set2:
+ if set1 and (not set2 or set1[0].max < set2[0].min):
+ only1.append(set1.pop(0))
+ elif set2 and (not set1 or set2[0].max < set1[0].min):
+ only2.append(set2.pop(0))
+ elif set1[0].min < set2[0].min:
+ _rsplit(set1, set2[0])
+ elif set2[0].min < set1[0].min:
+ _rsplit(set2, set1[0])
+ elif set1[0].max < set2[0].max:
+ _rsplit(set2, set1[0])
+ elif set2[0].max < set1[0].max:
+ _rsplit(set1, set2[0])
+ else:
+ assert set1[0].min == set2[0].min and set1[0].max == set2[0].max
+ both.append(set1.pop(0))
+ set2.pop(0)
+ return type(self)(only1), type(self)(only2), type(self)(both)
+
+ def union(self, other):
+ """
+ Set union for resource sets.
+ """
+
+ assert not self.inherit
+ assert type(self) is type(other), "Type mismatch: %r %r" % (type(self), type(other))
+ set1 = type(self)(self) # clone and whack into canonical form
+ set2 = type(other)(other) # ditto
+ result = []
+ while set1 or set2:
+ if set1 and (not set2 or set1[0].max < set2[0].min):
+ result.append(set1.pop(0))
+ elif set2 and (not set1 or set2[0].max < set1[0].min):
+ result.append(set2.pop(0))
+ else:
+ this = set1.pop(0)
+ that = set2.pop(0)
+ assert type(this) is type(that)
+ range_min = min(this.min, that.min)
+ range_max = max(this.max, that.max)
+ result.append(type(this)(range_min, range_max))
+ while set1 and set1[0].max <= range_max:
+ assert set1[0].min >= range_min
+ del set1[0]
+ while set2 and set2[0].max <= range_max:
+ assert set2[0].min >= range_min
+ del set2[0]
+ return type(self)(result)
+
+ __or__ = union
+
+ def intersection(self, other):
+ """
+ Set intersection for resource sets.
+ """
+
+ return self._comm(other)[2]
+
+ __and__ = intersection
+
+ def difference(self, other):
+ """
+ Set difference for resource sets.
+ """
+
+ return self._comm(other)[0]
+
+ __sub__ = difference
+
+ def symmetric_difference(self, other):
+ """
+ Set symmetric difference (XOR) for resource sets.
+ """
+
+ com = self._comm(other)
+ return com[0] | com[1]
+
+ __xor__ = symmetric_difference
+
+ def contains(self, item):
+ """
+ Set membership test for resource sets.
+ """
+
+ assert not self.inherit
+ self.canonize()
+ if not self:
+ return False
+ if type(item) is type(self[0]):
+ range_min = item.min
+ range_max = item.max
+ else:
+ range_min = item
+ range_max = item
+ lo = 0
+ hi = len(self)
+ while lo < hi:
+ mid = (lo + hi) / 2
+ if self[mid].max < range_max:
+ lo = mid + 1
+ else:
+ hi = mid
+ return lo < len(self) and self[lo].min <= range_min and self[lo].max >= range_max
- __ge__ = issuperset
+ __contains__ = contains
- def __lt__(self, other):
- return not self.issuperset(other)
+ def issubset(self, other):
+ """
+ Test whether self is a subset (possibly improper) of other.
+ """
- def __gt__(self, other):
- return not self.issubset(other)
+ for i in self:
+ if not other.contains(i):
+ return False
+ return True
- def __ne__(self, other):
- """
- A set with the inherit bit set is always unequal to any other set, because
- we can't know the answer here. This is also consistent with __nonzero__
- which returns True for inherit sets, and False for empty sets.
- """
- return self.inherit or other.inherit or list.__ne__(self, other)
+ __le__ = issubset
- def __eq__(self, other):
- return not self.__ne__(other)
+ def issuperset(self, other):
+ """
+ Test whether self is a superset (possibly improper) of other.
+ """
- def __nonzero__(self):
- """
- Tests whether or not this set is empty. Note that sets with the inherit
- bit set are considered non-empty, despite having zero length.
- """
- return self.inherit or len(self)
+ return other.issubset(self)
- @classmethod
- def from_sql(cls, sql, query, args = None):
- """
- Create resource set from an SQL query.
+ __ge__ = issuperset
- sql is an object that supports execute() and fetchall() methods
- like a DB API 2.0 cursor object.
+ def __lt__(self, other):
+ return not self.issuperset(other)
- query is an SQL query that returns a sequence of (min, max) pairs.
- """
+ def __gt__(self, other):
+ return not self.issubset(other)
- sql.execute(query, args)
- return cls(ini = [cls.range_type(cls.range_type.datum_type(b),
- cls.range_type.datum_type(e))
- for (b, e) in sql.fetchall()])
+ def __ne__(self, other):
+ """
+ A set with the inherit bit set is always unequal to any other set, because
+ we can't know the answer here. This is also consistent with __nonzero__
+ which returns True for inherit sets, and False for empty sets.
+ """
- @classmethod
- def from_django(cls, iterable):
- """
- Create resource set from a Django query.
+ return self.inherit or other.inherit or list.__ne__(self, other)
- iterable is something which returns (min, max) pairs.
- """
+ def __eq__(self, other):
+ return not self.__ne__(other)
- return cls(ini = [cls.range_type(cls.range_type.datum_type(b),
- cls.range_type.datum_type(e))
- for (b, e) in iterable])
+ def __nonzero__(self):
+ """
+ Tests whether or not this set is empty. Note that sets with the inherit
+ bit set are considered non-empty, despite having zero length.
+ """
- @classmethod
- def parse_str(cls, s):
- """
- Parse resource set from text string (eg, XML attributes). This is
- a backwards compatability wrapper, real functionality is now part
- of the range classes.
- """
- return cls.range_type.parse_str(s)
+ return self.inherit or len(self)
-class resource_set_as(resource_set):
- """
- Autonomous System Number resource set.
- """
+ @classmethod
+ def from_django(cls, iterable):
+ """
+ Create resource set from a Django query.
- ## @var range_type
- # Type of range underlying this type of resource_set.
+ iterable is something which returns (min, max) pairs.
+ """
- range_type = resource_range_as
+ return cls(ini = [cls.range_type(cls.range_type.datum_type(b),
+ cls.range_type.datum_type(e))
+ for (b, e) in iterable])
-class resource_set_ip(resource_set):
- """
- (Generic) IP address resource set.
+ @classmethod
+ def parse_str(cls, s):
+ """
+ Parse resource set from text string (eg, XML attributes). This is
+ a backwards compatability wrapper, real functionality is now part
+ of the range classes.
+ """
- This is a virtual class. You probably don't want to use it
- directly.
- """
+ return cls.range_type.parse_str(s)
- def to_roa_prefix_set(self):
+class resource_set_as(resource_set):
"""
- Convert from a resource set to a ROA prefix set.
+ Autonomous System Number resource set.
"""
- prefix_ranges = []
- for r in self:
- r.chop_into_prefixes(prefix_ranges)
- return self.roa_prefix_set_type([
- self.roa_prefix_set_type.prefix_type(r.min, r.prefixlen())
- for r in prefix_ranges])
-
-class resource_set_ipv4(resource_set_ip):
- """
- IPv4 address resource set.
- """
-
- ## @var range_type
- # Type of range underlying this type of resource_set.
-
- range_type = resource_range_ipv4
-
-class resource_set_ipv6(resource_set_ip):
- """
- IPv6 address resource set.
- """
-
- ## @var range_type
- # Type of range underlying this type of resource_set.
-
- range_type = resource_range_ipv6
-
-class resource_bag(object):
- """
- Container to simplify passing around the usual triple of ASN, IPv4,
- and IPv6 resource sets.
- """
- ## @var asn
- # Set of Autonomous System Number resources.
+ ## @var range_type
+ # Type of range underlying this type of resource_set.
- ## @var v4
- # Set of IPv4 resources.
+ range_type = resource_range_as
- ## @var v6
- # Set of IPv6 resources.
-
- ## @var valid_until
- # Expiration date of resources, for setting certificate notAfter field.
-
- def __init__(self, asn = None, v4 = None, v6 = None, valid_until = None):
- self.asn = asn or resource_set_as()
- self.v4 = v4 or resource_set_ipv4()
- self.v6 = v6 or resource_set_ipv6()
- self.valid_until = valid_until
-
- def oversized(self, other):
- """
- True iff self is oversized with respect to other.
- """
- return not self.asn.issubset(other.asn) or \
- not self.v4.issubset(other.v4) or \
- not self.v6.issubset(other.v6)
-
- def undersized(self, other):
- """
- True iff self is undersized with respect to other.
- """
- return not other.asn.issubset(self.asn) or \
- not other.v4.issubset(self.v4) or \
- not other.v6.issubset(self.v6)
-
- @classmethod
- def from_inheritance(cls):
- """
- Build a resource bag that just inherits everything from its
- parent.
- """
- self = cls()
- self.asn = resource_set_as()
- self.v4 = resource_set_ipv4()
- self.v6 = resource_set_ipv6()
- self.asn.inherit = True
- self.v4.inherit = True
- self.v6.inherit = True
- return self
-
- @classmethod
- def from_str(cls, text, allow_overlap = False):
- """
- Parse a comma-separated text string into a resource_bag. Not
- particularly efficient, fix that if and when it becomes an issue.
- """
- asns = []
- v4s = []
- v6s = []
- for word in text.split(","):
- if "." in word:
- v4s.append(word)
- elif ":" in word:
- v6s.append(word)
- else:
- asns.append(word)
- return cls(asn = resource_set_as(",".join(asns), allow_overlap) if asns else None,
- v4 = resource_set_ipv4(",".join(v4s), allow_overlap) if v4s else None,
- v6 = resource_set_ipv6(",".join(v6s), allow_overlap) if v6s else None)
-
- @classmethod
- def from_POW_rfc3779(cls, resources):
+class resource_set_ip(resource_set):
"""
- Build a resource_bag from data returned by
- rpki.POW.X509.getRFC3779().
+ (Generic) IP address resource set.
- The conversion to long for v4 and v6 is (intended to be)
- temporary: in the long run, we should be using rpki.POW.IPAddress
- rather than long here.
+ This is a virtual class. You probably don't want to use it
+ directly.
"""
- asn = inherit_token if resources[0] == "inherit" else [resource_range_as( r[0], r[1]) for r in resources[0] or ()]
- v4 = inherit_token if resources[1] == "inherit" else [resource_range_ipv4(r[0], r[1]) for r in resources[1] or ()]
- v6 = inherit_token if resources[2] == "inherit" else [resource_range_ipv6(r[0], r[1]) for r in resources[2] or ()]
- return cls(resource_set_as(asn) if asn else None,
- resource_set_ipv4(v4) if v4 else None,
- resource_set_ipv6(v6) if v6 else None)
-
- def empty(self):
- """
- True iff all resource sets in this bag are empty.
- """
- return not self.asn and not self.v4 and not self.v6
-
- def __nonzero__(self):
- return not self.empty()
- def __eq__(self, other):
- return self.asn == other.asn and \
- self.v4 == other.v4 and \
- self.v6 == other.v6 and \
- self.valid_until == other.valid_until
+ def to_roa_prefix_set(self):
+ """
+ Convert from a resource set to a ROA prefix set.
+ """
- def __ne__(self, other):
- return not (self == other) # pylint: disable=C0325
+ # pylint: disable=E1101
+ prefix_ranges = []
+ for r in self:
+ r.chop_into_prefixes(prefix_ranges)
+ return self.roa_prefix_set_type([
+ self.roa_prefix_set_type.prefix_type(r.min, r.prefixlen())
+ for r in prefix_ranges])
- def intersection(self, other):
+class resource_set_ipv4(resource_set_ip):
"""
- Compute intersection with another resource_bag. valid_until
- attribute (if any) inherits from self.
+ IPv4 address resource set.
"""
- return self.__class__(self.asn & other.asn,
- self.v4 & other.v4,
- self.v6 & other.v6,
- self.valid_until)
- __and__ = intersection
+ ## @var range_type
+ # Type of range underlying this type of resource_set.
- def union(self, other):
- """
- Compute union with another resource_bag. valid_until attribute
- (if any) inherits from self.
- """
- return self.__class__(self.asn | other.asn,
- self.v4 | other.v4,
- self.v6 | other.v6,
- self.valid_until)
+ range_type = resource_range_ipv4
- __or__ = union
-
- def difference(self, other):
+class resource_set_ipv6(resource_set_ip):
"""
- Compute difference against another resource_bag. valid_until
- attribute (if any) inherits from self
+ IPv6 address resource set.
"""
- return self.__class__(self.asn - other.asn,
- self.v4 - other.v4,
- self.v6 - other.v6,
- self.valid_until)
- __sub__ = difference
+ ## @var range_type
+ # Type of range underlying this type of resource_set.
- def symmetric_difference(self, other):
- """
- Compute symmetric difference against another resource_bag.
- valid_until attribute (if any) inherits from self
+ range_type = resource_range_ipv6
+
+class resource_bag(object):
"""
- return self.__class__(self.asn ^ other.asn,
- self.v4 ^ other.v4,
- self.v6 ^ other.v6,
- self.valid_until)
-
- __xor__ = symmetric_difference
-
- def __str__(self):
- s = ""
- if self.asn:
- s += "ASN: %s" % self.asn
- if self.v4:
- if s:
- s += ", "
- s += "V4: %s" % self.v4
- if self.v6:
- if s:
- s += ", "
- s += "V6: %s" % self.v6
- return s
-
- def __iter__(self):
- for r in self.asn:
- yield r
- for r in self.v4:
- yield r
- for r in self.v6:
- yield r
+ Container to simplify passing around the usual triple of ASN, IPv4,
+ and IPv6 resource sets.
+ """
+
+ ## @var asn
+ # Set of Autonomous System Number resources.
+
+ ## @var v4
+ # Set of IPv4 resources.
+
+ ## @var v6
+ # Set of IPv6 resources.
+
+ ## @var valid_until
+ # Expiration date of resources, for setting certificate notAfter field.
+
+ def __init__(self, asn = None, v4 = None, v6 = None, valid_until = None):
+ if isinstance(asn, (str, unicode)):
+ asn = resource_set_as(asn)
+ if isinstance(v4, (str, unicode)):
+ v4 = resource_set_ipv4(v4)
+ if isinstance(v6, (str, unicode)):
+ v6 = resource_set_ipv6(v6)
+ if isinstance(valid_until, (str, unicode)):
+ valid_until = rpki.sundial.datetime.fromXMLtime(valid_until)
+ self.asn = asn or resource_set_as()
+ self.v4 = v4 or resource_set_ipv4()
+ self.v6 = v6 or resource_set_ipv6()
+ self.valid_until = valid_until
+
+ def oversized(self, other):
+ """
+ True iff self is oversized with respect to other.
+ """
+
+ return not self.asn.issubset(other.asn) or \
+ not self.v4.issubset(other.v4) or \
+ not self.v6.issubset(other.v6)
+
+ def undersized(self, other):
+ """
+ True iff self is undersized with respect to other.
+ """
+
+ return not other.asn.issubset(self.asn) or \
+ not other.v4.issubset(self.v4) or \
+ not other.v6.issubset(self.v6)
+
+ @classmethod
+ def from_inheritance(cls):
+ """
+ Build a resource bag that just inherits everything from its
+ parent.
+ """
+
+ self = cls()
+ self.asn = resource_set_as()
+ self.v4 = resource_set_ipv4()
+ self.v6 = resource_set_ipv6()
+ self.asn.inherit = True
+ self.v4.inherit = True
+ self.v6.inherit = True
+ return self
+
+ @classmethod
+ def from_str(cls, text, allow_overlap = False):
+ """
+ Parse a comma-separated text string into a resource_bag. Not
+ particularly efficient, fix that if and when it becomes an issue.
+ """
+
+ asns = []
+ v4s = []
+ v6s = []
+ for word in text.split(","):
+ if "." in word:
+ v4s.append(word)
+ elif ":" in word:
+ v6s.append(word)
+ else:
+ asns.append(word)
+ return cls(asn = resource_set_as(",".join(asns), allow_overlap) if asns else None,
+ v4 = resource_set_ipv4(",".join(v4s), allow_overlap) if v4s else None,
+ v6 = resource_set_ipv6(",".join(v6s), allow_overlap) if v6s else None)
+
+ @classmethod
+ def from_POW_rfc3779(cls, resources):
+ """
+ Build a resource_bag from data returned by
+ rpki.POW.X509.getRFC3779().
+
+ The conversion to long for v4 and v6 is (intended to be)
+ temporary: in the long run, we should be using rpki.POW.IPAddress
+ rather than long here.
+ """
+
+ asn = inherit_token if resources[0] == "inherit" else [resource_range_as( r[0], r[1]) for r in resources[0] or ()]
+ v4 = inherit_token if resources[1] == "inherit" else [resource_range_ipv4(r[0], r[1]) for r in resources[1] or ()]
+ v6 = inherit_token if resources[2] == "inherit" else [resource_range_ipv6(r[0], r[1]) for r in resources[2] or ()]
+ return cls(resource_set_as(asn) if asn else None,
+ resource_set_ipv4(v4) if v4 else None,
+ resource_set_ipv6(v6) if v6 else None)
+
+ def empty(self):
+ """
+ True iff all resource sets in this bag are empty.
+ """
+
+ return not self.asn and not self.v4 and not self.v6
+
+ def __nonzero__(self):
+ return not self.empty()
+
+ def __eq__(self, other):
+ return self.asn == other.asn and \
+ self.v4 == other.v4 and \
+ self.v6 == other.v6 and \
+ self.valid_until == other.valid_until
+
+ def __ne__(self, other):
+ return not (self == other) # pylint: disable=C0325
+
+ def intersection(self, other):
+ """
+ Compute intersection with another resource_bag. valid_until
+ attribute (if any) inherits from self.
+ """
+
+ return self.__class__(self.asn & other.asn,
+ self.v4 & other.v4,
+ self.v6 & other.v6,
+ self.valid_until)
+
+ __and__ = intersection
+
+ def union(self, other):
+ """
+ Compute union with another resource_bag. valid_until attribute
+ (if any) inherits from self.
+ """
+
+ return self.__class__(self.asn | other.asn,
+ self.v4 | other.v4,
+ self.v6 | other.v6,
+ self.valid_until)
+
+ __or__ = union
+
+ def difference(self, other):
+ """
+ Compute difference against another resource_bag. valid_until
+ attribute (if any) inherits from self
+ """
+
+ return self.__class__(self.asn - other.asn,
+ self.v4 - other.v4,
+ self.v6 - other.v6,
+ self.valid_until)
+
+ __sub__ = difference
+
+ def symmetric_difference(self, other):
+ """
+ Compute symmetric difference against another resource_bag.
+ valid_until attribute (if any) inherits from self
+ """
+
+ return self.__class__(self.asn ^ other.asn,
+ self.v4 ^ other.v4,
+ self.v6 ^ other.v6,
+ self.valid_until)
+
+ __xor__ = symmetric_difference
+
+ def __str__(self):
+ s = ""
+ if self.asn:
+ s += "ASN: %s" % self.asn
+ if self.v4:
+ if s:
+ s += ", "
+ s += "V4: %s" % self.v4
+ if self.v6:
+ if s:
+ s += ", "
+ s += "V6: %s" % self.v6
+ return s
+
+ def __iter__(self):
+ for r in self.asn:
+ yield r
+ for r in self.v4:
+ yield r
+ for r in self.v6:
+ yield r
# Sadly, there are enough differences between RFC 3779 and the data
# structures in the latest proposed ROA format that we can't just use
@@ -793,356 +832,377 @@ class resource_bag(object):
# worth.
class roa_prefix(object):
- """
- ROA prefix. This is similar to the resource_range_ip class, but
- differs in that it only represents prefixes, never ranges, and
- includes the maximum prefix length as an additional value.
+ """
+ ROA prefix. This is similar to the resource_range_ip class, but
+ differs in that it only represents prefixes, never ranges, and
+ includes the maximum prefix length as an additional value.
- This is a virtual class, you probably don't want to use it directly.
- """
+ This is a virtual class, you probably don't want to use it directly.
+ """
- ## @var prefix
- # The prefix itself, an IP address with bits beyond the prefix
- # length zeroed.
+ ## @var prefix
+ # The prefix itself, an IP address with bits beyond the prefix
+ # length zeroed.
- ## @var prefixlen
- # (Minimum) prefix length.
+ ## @var prefixlen
+ # (Minimum) prefix length.
- ## @var max_prefixlen
- # Maxmimum prefix length.
+ ## @var max_prefixlen
+ # Maxmimum prefix length.
- def __init__(self, prefix, prefixlen, max_prefixlen = None):
- """
- Initialize a ROA prefix. max_prefixlen is optional and defaults
- to prefixlen. max_prefixlen must not be smaller than prefixlen.
- """
- if max_prefixlen is None:
- max_prefixlen = prefixlen
- assert max_prefixlen >= prefixlen, "Bad max_prefixlen: %d must not be shorter than %d" % (max_prefixlen, prefixlen)
- self.prefix = prefix
- self.prefixlen = prefixlen
- self.max_prefixlen = max_prefixlen
-
- def __cmp__(self, other):
- """
- Compare two ROA prefix objects. Comparision is based on prefix,
- prefixlen, and max_prefixlen, in that order.
- """
- assert self.__class__ is other.__class__
- return (cmp(self.prefix, other.prefix) or
- cmp(self.prefixlen, other.prefixlen) or
- cmp(self.max_prefixlen, other.max_prefixlen))
+ # Give pylint a little help
+ range_type = resource_range_ip
- def __str__(self):
- """
- Convert a ROA prefix to string format.
- """
- if self.prefixlen == self.max_prefixlen:
- return str(self.prefix) + "/" + str(self.prefixlen)
- else:
- return str(self.prefix) + "/" + str(self.prefixlen) + "-" + str(self.max_prefixlen)
+ def __init__(self, prefix, prefixlen, max_prefixlen = None):
+ """
+ Initialize a ROA prefix. max_prefixlen is optional and defaults
+ to prefixlen. max_prefixlen must not be smaller than prefixlen.
+ """
- def to_resource_range(self):
- """
- Convert this ROA prefix to the equivilent resource_range_ip
- object. This is an irreversable transformation because it loses
- the max_prefixlen attribute, nothing we can do about that.
- """
- return self.range_type.make_prefix(self.prefix, self.prefixlen)
+ if max_prefixlen is None:
+ max_prefixlen = prefixlen
+ assert max_prefixlen >= prefixlen, "Bad max_prefixlen: %d must not be shorter than %d" % (max_prefixlen, prefixlen)
+ self.prefix = prefix
+ self.prefixlen = prefixlen
+ self.max_prefixlen = max_prefixlen
- def min(self):
- """
- Return lowest address covered by prefix.
- """
- return self.prefix
+ def __cmp__(self, other):
+ """
+ Compare two ROA prefix objects. Comparision is based on prefix,
+ prefixlen, and max_prefixlen, in that order.
+ """
- def max(self):
- """
- Return highest address covered by prefix.
- """
- return self.prefix | ((1 << (self.prefix.bits - self.prefixlen)) - 1)
+ assert self.__class__ is other.__class__
+ return (cmp(self.prefix, other.prefix) or
+ cmp(self.prefixlen, other.prefixlen) or
+ cmp(self.max_prefixlen, other.max_prefixlen))
- def to_POW_roa_tuple(self):
- """
- Convert a resource_range_ip to rpki.POW.ROA.setPrefixes() format.
- """
- return self.prefix, self.prefixlen, self.max_prefixlen
+ def __str__(self):
+ """
+ Convert a ROA prefix to string format.
+ """
- @classmethod
- def parse_str(cls, x):
- """
- Parse ROA prefix from text (eg, an XML attribute).
- """
- r = re_prefix_with_maxlen.match(x)
- if r:
- return cls(rpki.POW.IPAddress(r.group(1)), int(r.group(2)), int(r.group(3)))
- r = re_prefix.match(x)
- if r:
- return cls(rpki.POW.IPAddress(r.group(1)), int(r.group(2)))
- raise rpki.exceptions.BadROAPrefix('Bad ROA prefix "%s"' % x)
+ if self.prefixlen == self.max_prefixlen:
+ return str(self.prefix) + "/" + str(self.prefixlen)
+ else:
+ return str(self.prefix) + "/" + str(self.prefixlen) + "-" + str(self.max_prefixlen)
-class roa_prefix_ipv4(roa_prefix):
- """
- IPv4 ROA prefix.
- """
+ def to_resource_range(self):
+ """
+ Convert this ROA prefix to the equivilent resource_range_ip
+ object. This is an irreversable transformation because it loses
+ the max_prefixlen attribute, nothing we can do about that.
+ """
- ## @var range_type
- # Type of corresponding resource_range_ip.
+ return self.range_type.make_prefix(self.prefix, self.prefixlen)
- range_type = resource_range_ipv4
+ def min(self):
+ """
+ Return lowest address covered by prefix.
+ """
-class roa_prefix_ipv6(roa_prefix):
- """
- IPv6 ROA prefix.
- """
+ return self.prefix
- ## @var range_type
- # Type of corresponding resource_range_ip.
+ def max(self):
+ """
+ Return highest address covered by prefix.
+ """
- range_type = resource_range_ipv6
+ return self.prefix | ((1 << (self.prefix.bits - self.prefixlen)) - 1)
-class roa_prefix_set(list):
- """
- Set of ROA prefixes, analogous to the resource_set_ip class.
- """
+ def to_POW_roa_tuple(self):
+ """
+ Convert a resource_range_ip to rpki.POW.ROA.setPrefixes() format.
+ """
- def __init__(self, ini = None):
- """
- Initialize a ROA prefix set.
- """
- list.__init__(self)
- if isinstance(ini, str) and len(ini):
- self.extend(self.parse_str(s) for s in ini.split(","))
- elif isinstance(ini, (list, tuple)):
- self.extend(ini)
- else:
- assert ini is None or ini == "", "Unexpected initializer: %s" % str(ini)
- self.sort()
+ return self.prefix, self.prefixlen, self.max_prefixlen
- def __str__(self):
- """
- Convert a ROA prefix set to string format.
- """
- return ",".join(str(x) for x in self)
+ @classmethod
+ def parse_str(cls, x):
+ """
+ Parse ROA prefix from text (eg, an XML attribute).
+ """
- @classmethod
- def parse_str(cls, s):
- """
- Parse ROA prefix from text (eg, an XML attribute).
- This method is a backwards compatability shim.
- """
- return cls.prefix_type.parse_str(s)
+ r = re_prefix_with_maxlen.match(x)
+ if r:
+ return cls(rpki.POW.IPAddress(r.group(1)), int(r.group(2)), int(r.group(3)))
+ r = re_prefix.match(x)
+ if r:
+ return cls(rpki.POW.IPAddress(r.group(1)), int(r.group(2)))
+ raise rpki.exceptions.BadROAPrefix('Bad ROA prefix "%s"' % x)
- def to_resource_set(self):
- """
- Convert a ROA prefix set to a resource set. This is an
- irreversable transformation. We have to compute a union here
- because ROA prefix sets can include overlaps, while RFC 3779
- resource sets cannot. This is ugly, and there is almost certainly
- a more efficient way to do this, but start by getting the output
- right before worrying about making it fast or pretty.
+class roa_prefix_ipv4(roa_prefix):
"""
- r = self.resource_set_type()
- s = self.resource_set_type()
- s.append(None)
- for p in self:
- s[0] = p.to_resource_range()
- r |= s
- return r
-
- @classmethod
- def from_sql(cls, sql, query, args = None):
+ IPv4 ROA prefix.
"""
- Create ROA prefix set from an SQL query.
-
- sql is an object that supports execute() and fetchall() methods
- like a DB API 2.0 cursor object.
- query is an SQL query that returns a sequence of (prefix,
- prefixlen, max_prefixlen) triples.
- """
+ ## @var range_type
+ # Type of corresponding resource_range_ip.
- sql.execute(query, args)
- return cls([cls.prefix_type(rpki.POW.IPAddress(x), int(y), int(z))
- for (x, y, z) in sql.fetchall()])
+ range_type = resource_range_ipv4
- @classmethod
- def from_django(cls, iterable):
+class roa_prefix_ipv6(roa_prefix):
"""
- Create ROA prefix set from a Django query.
-
- iterable is something which returns (prefix, prefixlen,
- max_prefixlen) triples.
+ IPv6 ROA prefix.
"""
- return cls([cls.prefix_type(rpki.POW.IPAddress(x), int(y), int(z))
- for (x, y, z) in iterable])
+ ## @var range_type
+ # Type of corresponding resource_range_ip.
- def to_POW_roa_tuple(self):
+ range_type = resource_range_ipv6
+
+class roa_prefix_set(list):
"""
- Convert ROA prefix set to form used by rpki.POW.ROA.setPrefixes().
+ Set of ROA prefixes, analogous to the resource_set_ip class.
"""
- if self:
- return tuple(a.to_POW_roa_tuple() for a in self)
- else:
- return None
+
+ # Give pylint a little help
+
+ prefix_type = roa_prefix
+ resource_set_type = resource_set_ip
+
+ def __init__(self, ini = None):
+ """
+ Initialize a ROA prefix set.
+ """
+
+ list.__init__(self)
+ if isinstance(ini, (str, unicode)) and len(ini):
+ self.extend(self.parse_str(s) for s in ini.split(","))
+ elif isinstance(ini, (list, tuple)):
+ self.extend(ini)
+ else:
+ assert ini is None or ini == "", "Unexpected initializer: %s" % str(ini)
+ self.sort()
+
+ def __str__(self):
+ """
+ Convert a ROA prefix set to string format.
+ """
+
+ return ",".join(str(x) for x in self)
+
+ @classmethod
+ def parse_str(cls, s):
+ """
+ Parse ROA prefix from text (eg, an XML attribute).
+ This method is a backwards compatability shim.
+ """
+
+ return cls.prefix_type.parse_str(s)
+
+ def to_resource_set(self):
+ """
+ Convert a ROA prefix set to a resource set. This is an
+ irreversable transformation. We have to compute a union here
+ because ROA prefix sets can include overlaps, while RFC 3779
+ resource sets cannot. This is ugly, and there is almost certainly
+ a more efficient way to do this, but start by getting the output
+ right before worrying about making it fast or pretty.
+ """
+
+ r = self.resource_set_type()
+ s = self.resource_set_type()
+ s.append(None)
+ for p in self:
+ s[0] = p.to_resource_range()
+ r |= s
+ return r
+
+ @classmethod
+ def from_sql(cls, sql, query, args = None):
+ """
+ Create ROA prefix set from an SQL query.
+
+ sql is an object that supports execute() and fetchall() methods
+ like a DB API 2.0 cursor object.
+
+ query is an SQL query that returns a sequence of (prefix,
+ prefixlen, max_prefixlen) triples.
+ """
+
+ sql.execute(query, args)
+ return cls([cls.prefix_type(rpki.POW.IPAddress(x), int(y), int(z))
+ for (x, y, z) in sql.fetchall()])
+
+ @classmethod
+ def from_django(cls, iterable):
+ """
+ Create ROA prefix set from a Django query.
+
+ iterable is something which returns (prefix, prefixlen,
+ max_prefixlen) triples.
+ """
+
+ return cls([cls.prefix_type(rpki.POW.IPAddress(x), int(y), int(z))
+ for (x, y, z) in iterable])
+
+ def to_POW_roa_tuple(self):
+ """
+ Convert ROA prefix set to form used by rpki.POW.ROA.setPrefixes().
+ """
+
+ if self:
+ return tuple(a.to_POW_roa_tuple() for a in self)
+ else:
+ return None
class roa_prefix_set_ipv4(roa_prefix_set):
- """
- Set of IPv4 ROA prefixes.
- """
+ """
+ Set of IPv4 ROA prefixes.
+ """
- ## @var prefix_type
- # Type of underlying roa_prefix.
+ ## @var prefix_type
+ # Type of underlying roa_prefix.
- prefix_type = roa_prefix_ipv4
+ prefix_type = roa_prefix_ipv4
- ## @var resource_set_type
- # Type of corresponding resource_set_ip class.
+ ## @var resource_set_type
+ # Type of corresponding resource_set_ip class.
- resource_set_type = resource_set_ipv4
+ resource_set_type = resource_set_ipv4
# Fix back link from resource_set to roa_prefix
resource_set_ipv4.roa_prefix_set_type = roa_prefix_set_ipv4
class roa_prefix_set_ipv6(roa_prefix_set):
- """
- Set of IPv6 ROA prefixes.
- """
+ """
+ Set of IPv6 ROA prefixes.
+ """
- ## @var prefix_type
- # Type of underlying roa_prefix.
+ ## @var prefix_type
+ # Type of underlying roa_prefix.
- prefix_type = roa_prefix_ipv6
+ prefix_type = roa_prefix_ipv6
- ## @var resource_set_type
- # Type of corresponding resource_set_ip class.
+ ## @var resource_set_type
+ # Type of corresponding resource_set_ip class.
- resource_set_type = resource_set_ipv6
+ resource_set_type = resource_set_ipv6
# Fix back link from resource_set to roa_prefix
resource_set_ipv6.roa_prefix_set_type = roa_prefix_set_ipv6
class roa_prefix_bag(object):
- """
- Container to simplify passing around the combination of an IPv4 ROA
- prefix set and an IPv6 ROA prefix set.
- """
+ """
+ Container to simplify passing around the combination of an IPv4 ROA
+ prefix set and an IPv6 ROA prefix set.
+ """
- ## @var v4
- # Set of IPv4 prefixes.
+ ## @var v4
+ # Set of IPv4 prefixes.
- ## @var v6
- # Set of IPv6 prefixes.
+ ## @var v6
+ # Set of IPv6 prefixes.
- def __init__(self, v4 = None, v6 = None):
- self.v4 = v4 or roa_prefix_set_ipv4()
- self.v6 = v6 or roa_prefix_set_ipv6()
+ def __init__(self, v4 = None, v6 = None):
+ self.v4 = v4 or roa_prefix_set_ipv4()
+ self.v6 = v6 or roa_prefix_set_ipv6()
- def __eq__(self, other):
- return self.v4 == other.v4 and self.v6 == other.v6
+ def __eq__(self, other):
+ return self.v4 == other.v4 and self.v6 == other.v6
- def __ne__(self, other):
- return not (self == other) # pylint: disable=C0325
+ def __ne__(self, other):
+ return not (self == other) # pylint: disable=C0325
# Test suite for set operations.
if __name__ == "__main__":
- def testprefix(v):
- return " (%s)" % v.to_roa_prefix_set() if isinstance(v, resource_set_ip) else ""
-
- def test1(t, s1, s2):
- if isinstance(s1, str) and isinstance(s2, str):
- print "x: ", s1
- print "y: ", s2
- r1 = t(s1)
- r2 = t(s2)
- print "x: ", r1, testprefix(r1)
- print "y: ", r2, testprefix(r2)
- v1 = r1._comm(r2)
- v2 = r2._comm(r1)
- assert v1[0] == v2[1] and v1[1] == v2[0] and v1[2] == v2[2]
- for i in r1: assert i in r1 and i.min in r1 and i.max in r1
- for i in r2: assert i in r2 and i.min in r2 and i.max in r2
- for i in v1[0]: assert i in r1 and i not in r2
- for i in v1[1]: assert i not in r1 and i in r2
- for i in v1[2]: assert i in r1 and i in r2
- v1 = r1 | r2
- v2 = r2 | r1
- assert v1 == v2
- print "x|y:", v1, testprefix(v1)
- v1 = r1 - r2
- v2 = r2 - r1
- print "x-y:", v1, testprefix(v1)
- print "y-x:", v2, testprefix(v2)
- v1 = r1 ^ r2
- v2 = r2 ^ r1
- assert v1 == v2
- print "x^y:", v1, testprefix(v1)
- v1 = r1 & r2
- v2 = r2 & r1
- assert v1 == v2
- print "x&y:", v1, testprefix(v1)
-
- def test2(t, s1, s2):
- print "x: ", s1
- print "y: ", s2
- r1 = t(s1)
- r2 = t(s2)
- print "x: ", r1
- print "y: ", r2
- print "x>y:", (r1 > r2)
- print "x<y:", (r1 < r2)
- test1(t.resource_set_type,
- r1.to_resource_set(),
- r2.to_resource_set())
-
- def test3(t, s1, s2):
- test1(t, s1, s2)
- r1 = t(s1).to_roa_prefix_set()
- r2 = t(s2).to_roa_prefix_set()
- print "x: ", r1
- print "y: ", r2
- print "x>y:", (r1 > r2)
- print "x<y:", (r1 < r2)
- test1(t.roa_prefix_set_type.resource_set_type,
- r1.to_resource_set(),
- r2.to_resource_set())
-
- print
- print "Testing set operations on resource sets"
- print
- test1(resource_set_as, "1,2,3,4,5,6,11,12,13,14,15", "1,2,3,4,5,6,111,121,131,141,151")
- print
- test1(resource_set_ipv4, "10.0.0.44/32,10.6.0.2/32", "10.3.0.0/24,10.0.0.77/32")
- print
- test1(resource_set_ipv4, "10.0.0.44/32,10.6.0.2/32", "10.0.0.0/24")
- print
- test1(resource_set_ipv4, "10.0.0.0/24", "10.3.0.0/24,10.0.0.77/32")
- print
- test1(resource_set_ipv4, "10.0.0.0/24", "10.0.0.0/32,10.0.0.2/32,10.0.0.4/32")
- print
- print "Testing set operations on ROA prefixes"
- print
- test2(roa_prefix_set_ipv4, "10.0.0.44/32,10.6.0.2/32", "10.3.0.0/24,10.0.0.77/32")
- print
- test2(roa_prefix_set_ipv4, "10.0.0.0/24-32,10.6.0.0/24-32", "10.3.0.0/24,10.0.0.0/16-32")
- print
- test2(roa_prefix_set_ipv4, "10.3.0.0/24-24,10.0.0.0/16-32", "10.3.0.0/24,10.0.0.0/16-32")
- print
- test2(roa_prefix_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::2/128")
- print
- test2(roa_prefix_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::7/128")
- print
- test2(roa_prefix_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::/120")
- print
- test2(roa_prefix_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::/120-128")
- print
- test3(resource_set_ipv4, "10.0.0.44/32,10.6.0.2/32", "10.3.0.0/24,10.0.0.77/32")
- print
- test3(resource_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::2/128")
- print
- test3(resource_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::/120")
+ def testprefix(v):
+ return " (%s)" % v.to_roa_prefix_set() if isinstance(v, resource_set_ip) else ""
+
+ def test1(t, s1, s2):
+ if isinstance(s1, (str, unicode)) and isinstance(s2, (str, unicode)):
+ print "x: ", s1
+ print "y: ", s2
+ r1 = t(s1)
+ r2 = t(s2)
+ print "x: ", r1, testprefix(r1)
+ print "y: ", r2, testprefix(r2)
+ v1 = r1._comm(r2) # pylint: disable=W0212
+ v2 = r2._comm(r1) # pylint: disable=W0212
+ assert v1[0] == v2[1] and v1[1] == v2[0] and v1[2] == v2[2]
+ assert all(i in r1 and i.min in r1 and i.max in r1 for i in r1)
+ assert all(i in r2 and i.min in r2 and i.max in r2 for i in r2)
+ assert all(i in r1 and i not in r2 for i in v1[0])
+ assert all(i not in r1 and i in r2 for i in v1[1])
+ assert all(i in r1 and i in r2 for i in v1[2])
+ v1 = r1 | r2
+ v2 = r2 | r1
+ assert v1 == v2
+ print "x|y:", v1, testprefix(v1)
+ v1 = r1 - r2
+ v2 = r2 - r1
+ print "x-y:", v1, testprefix(v1)
+ print "y-x:", v2, testprefix(v2)
+ v1 = r1 ^ r2
+ v2 = r2 ^ r1
+ assert v1 == v2
+ print "x^y:", v1, testprefix(v1)
+ v1 = r1 & r2
+ v2 = r2 & r1
+ assert v1 == v2
+ print "x&y:", v1, testprefix(v1)
+
+ def test2(t, s1, s2):
+ print "x: ", s1
+ print "y: ", s2
+ r1 = t(s1)
+ r2 = t(s2)
+ print "x: ", r1
+ print "y: ", r2
+ print "x>y:", (r1 > r2)
+ print "x<y:", (r1 < r2)
+ test1(t.resource_set_type,
+ r1.to_resource_set(),
+ r2.to_resource_set())
+
+ def test3(t, s1, s2):
+ test1(t, s1, s2)
+ r1 = t(s1).to_roa_prefix_set()
+ r2 = t(s2).to_roa_prefix_set()
+ print "x: ", r1
+ print "y: ", r2
+ print "x>y:", (r1 > r2)
+ print "x<y:", (r1 < r2)
+ test1(t.roa_prefix_set_type.resource_set_type,
+ r1.to_resource_set(),
+ r2.to_resource_set())
+
+ print
+ print "Testing set operations on resource sets"
+ print
+ test1(resource_set_as, "1,2,3,4,5,6,11,12,13,14,15", "1,2,3,4,5,6,111,121,131,141,151")
+ print
+ test1(resource_set_ipv4, "10.0.0.44/32,10.6.0.2/32", "10.3.0.0/24,10.0.0.77/32")
+ print
+ test1(resource_set_ipv4, "10.0.0.44/32,10.6.0.2/32", "10.0.0.0/24")
+ print
+ test1(resource_set_ipv4, "10.0.0.0/24", "10.3.0.0/24,10.0.0.77/32")
+ print
+ test1(resource_set_ipv4, "10.0.0.0/24", "10.0.0.0/32,10.0.0.2/32,10.0.0.4/32")
+ print
+ print "Testing set operations on ROA prefixes"
+ print
+ test2(roa_prefix_set_ipv4, "10.0.0.44/32,10.6.0.2/32", "10.3.0.0/24,10.0.0.77/32")
+ print
+ test2(roa_prefix_set_ipv4, "10.0.0.0/24-32,10.6.0.0/24-32", "10.3.0.0/24,10.0.0.0/16-32")
+ print
+ test2(roa_prefix_set_ipv4, "10.3.0.0/24-24,10.0.0.0/16-32", "10.3.0.0/24,10.0.0.0/16-32")
+ print
+ test2(roa_prefix_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::2/128")
+ print
+ test2(roa_prefix_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::7/128")
+ print
+ test2(roa_prefix_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::/120")
+ print
+ test2(roa_prefix_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::/120-128")
+ print
+ test3(resource_set_ipv4, "10.0.0.44/32,10.6.0.2/32", "10.3.0.0/24,10.0.0.77/32")
+ print
+ test3(resource_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::2/128")
+ print
+ test3(resource_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::/120")
diff --git a/rpki/rootd.py b/rpki/rootd.py
index 78a71bba..dca60956 100644
--- a/rpki/rootd.py
+++ b/rpki/rootd.py
@@ -18,371 +18,444 @@
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
-Trivial RPKI up-down protocol root server. Not recommended for
-production use. Overrides a bunch of method definitions from the
-rpki.* classes in order to reuse as much code as possible.
+Trivial RPKI up-down protocol root server.
"""
import os
import time
import logging
import argparse
+
import rpki.resource_set
import rpki.up_down
-import rpki.left_right
import rpki.x509
-import rpki.http
+import rpki.http_simple
import rpki.config
import rpki.exceptions
import rpki.relaxng
import rpki.sundial
import rpki.log
import rpki.daemonize
+import rpki.publication
+
+from lxml.etree import Element, SubElement
logger = logging.getLogger(__name__)
-rootd = None
-
-class list_pdu(rpki.up_down.list_pdu):
- def serve_pdu(self, q_msg, r_msg, ignored, callback, errback):
- r_msg.payload = rpki.up_down.list_response_pdu()
- rootd.compose_response(r_msg)
- callback()
-
-class issue_pdu(rpki.up_down.issue_pdu):
- def serve_pdu(self, q_msg, r_msg, ignored, callback, errback):
- self.pkcs10.check_valid_request_ca()
- r_msg.payload = rpki.up_down.issue_response_pdu()
- rootd.compose_response(r_msg, self.pkcs10)
- callback()
-
-class revoke_pdu(rpki.up_down.revoke_pdu):
- def serve_pdu(self, q_msg, r_msg, ignored, callback, errback):
- logger.debug("Revocation requested for SKI %s", self.ski)
- subject_cert = rootd.get_subject_cert()
- if subject_cert is None:
- logger.debug("No subject certificate, nothing to revoke")
- raise rpki.exceptions.NotInDatabase
- if subject_cert.gSKI() != self.ski:
- logger.debug("Subject certificate has different SKI %s, not revoking", subject_cert.gSKI())
- raise rpki.exceptions.NotInDatabase
- logger.debug("Revoking certificate %s", self.ski)
- now = rpki.sundial.now()
- rootd.revoke_subject_cert(now)
- rootd.del_subject_cert()
- rootd.del_subject_pkcs10()
- rootd.generate_crl_and_manifest(now)
- r_msg.payload = rpki.up_down.revoke_response_pdu()
- r_msg.payload.class_name = self.class_name
- r_msg.payload.ski = self.ski
- callback()
-
-class error_response_pdu(rpki.up_down.error_response_pdu):
- exceptions = rpki.up_down.error_response_pdu.exceptions.copy()
- exceptions[rpki.exceptions.ClassNameUnknown, revoke_pdu] = 1301
- exceptions[rpki.exceptions.NotInDatabase, revoke_pdu] = 1302
-
-class message_pdu(rpki.up_down.message_pdu):
-
- name2type = {
- "list" : list_pdu,
- "list_response" : rpki.up_down.list_response_pdu,
- "issue" : issue_pdu,
- "issue_response" : rpki.up_down.issue_response_pdu,
- "revoke" : revoke_pdu,
- "revoke_response" : rpki.up_down.revoke_response_pdu,
- "error_response" : error_response_pdu }
-
- type2name = dict((v, k) for k, v in name2type.items())
-
- error_pdu_type = error_response_pdu
-
- def log_query(self, child):
+
+class ReplayTracker(object):
"""
- Log query we're handling.
+ Stash for replay protection timestamps.
"""
- logger.info("Serving %s query", self.type)
-class sax_handler(rpki.up_down.sax_handler):
- pdu = message_pdu
+ def __init__(self):
+ self.cms_timestamp = None
+
-class cms_msg(rpki.up_down.cms_msg):
- saxify = sax_handler.saxify
class main(object):
- def get_root_cert(self):
- logger.debug("Read root cert %s", self.rpki_root_cert_file)
- self.rpki_root_cert = rpki.x509.X509(Auto_file = self.rpki_root_cert_file)
-
- def root_newer_than_subject(self):
- return os.stat(self.rpki_root_cert_file).st_mtime > \
- os.stat(os.path.join(self.rpki_root_dir, self.rpki_subject_cert)).st_mtime
-
- def get_subject_cert(self):
- filename = os.path.join(self.rpki_root_dir, self.rpki_subject_cert)
- try:
- x = rpki.x509.X509(Auto_file = filename)
- logger.debug("Read subject cert %s", filename)
- return x
- except IOError:
- return None
-
- def set_subject_cert(self, cert):
- filename = os.path.join(self.rpki_root_dir, self.rpki_subject_cert)
- logger.debug("Writing subject cert %s, SKI %s", filename, cert.hSKI())
- f = open(filename, "wb")
- f.write(cert.get_DER())
- f.close()
-
- def del_subject_cert(self):
- filename = os.path.join(self.rpki_root_dir, self.rpki_subject_cert)
- logger.debug("Deleting subject cert %s", filename)
- os.remove(filename)
-
- def get_subject_pkcs10(self):
- try:
- x = rpki.x509.PKCS10(Auto_file = self.rpki_subject_pkcs10)
- logger.debug("Read subject PKCS #10 %s", self.rpki_subject_pkcs10)
- return x
- except IOError:
- return None
-
- def set_subject_pkcs10(self, pkcs10):
- logger.debug("Writing subject PKCS #10 %s", self.rpki_subject_pkcs10)
- f = open(self.rpki_subject_pkcs10, "wb")
- f.write(pkcs10.get_DER())
- f.close()
-
- def del_subject_pkcs10(self):
- logger.debug("Deleting subject PKCS #10 %s", self.rpki_subject_pkcs10)
- try:
- os.remove(self.rpki_subject_pkcs10)
- except OSError:
- pass
-
- def issue_subject_cert_maybe(self, new_pkcs10):
- now = rpki.sundial.now()
- subject_cert = self.get_subject_cert()
- old_pkcs10 = self.get_subject_pkcs10()
- if new_pkcs10 is not None and new_pkcs10 != old_pkcs10:
- self.set_subject_pkcs10(new_pkcs10)
- if subject_cert is not None:
- logger.debug("PKCS #10 changed, regenerating subject certificate")
+
+ def root_newer_than_subject(self):
+ return self.rpki_root_cert.mtime > os.stat(self.rpki_subject_cert_file).st_mtime
+
+
+ def get_subject_cert(self):
+ try:
+ x = rpki.x509.X509(Auto_file = self.rpki_subject_cert_file)
+ logger.debug("Read subject cert %s", self.rpki_subject_cert_file)
+ return x
+ except IOError:
+ return None
+
+
+ def set_subject_cert(self, cert):
+ logger.debug("Writing subject cert %s, SKI %s", self.rpki_subject_cert_file, cert.hSKI())
+ with open(self.rpki_subject_cert_file, "wb") as f:
+ f.write(cert.get_DER())
+
+
+ def del_subject_cert(self):
+ logger.debug("Deleting subject cert %s", self.rpki_subject_cert_file)
+ os.remove(self.rpki_subject_cert_file)
+
+
+ def get_subject_pkcs10(self):
+ try:
+ x = rpki.x509.PKCS10(Auto_file = self.rpki_subject_pkcs10)
+ logger.debug("Read subject PKCS #10 %s", self.rpki_subject_pkcs10)
+ return x
+ except IOError:
+ return None
+
+
+ def set_subject_pkcs10(self, pkcs10):
+ logger.debug("Writing subject PKCS #10 %s", self.rpki_subject_pkcs10)
+ with open(self.rpki_subject_pkcs10, "wb") as f:
+ f.write(pkcs10.get_DER())
+
+
+ def del_subject_pkcs10(self):
+ logger.debug("Deleting subject PKCS #10 %s", self.rpki_subject_pkcs10)
+ try:
+ os.remove(self.rpki_subject_pkcs10)
+ except OSError:
+ pass
+
+
+ def issue_subject_cert_maybe(self, new_pkcs10):
+ now = rpki.sundial.now()
+ subject_cert = self.get_subject_cert()
+ if subject_cert is None:
+ subject_cert_hash = None
+ else:
+ subject_cert_hash = rpki.x509.sha256(subject_cert.get_DER()).encode("hex")
+ old_pkcs10 = self.get_subject_pkcs10()
+ if new_pkcs10 is not None and new_pkcs10 != old_pkcs10:
+ self.set_subject_pkcs10(new_pkcs10)
+ if subject_cert is not None:
+ logger.debug("PKCS #10 changed, regenerating subject certificate")
+ self.revoke_subject_cert(now)
+ subject_cert = None
+ if subject_cert is not None and subject_cert.getNotAfter() <= now + self.rpki_subject_regen:
+ logger.debug("Subject certificate has reached expiration threshold, regenerating")
+ self.revoke_subject_cert(now)
+ subject_cert = None
+ if subject_cert is not None and self.root_newer_than_subject():
+ logger.debug("Root certificate has changed, regenerating subject")
+ self.revoke_subject_cert(now)
+ subject_cert = None
+ if subject_cert is not None:
+ return subject_cert, None
+ pkcs10 = old_pkcs10 if new_pkcs10 is None else new_pkcs10
+ if pkcs10 is None:
+ logger.debug("No PKCS #10 request, can't generate subject certificate yet")
+ return None, None
+ resources = self.rpki_root_cert.get_3779resources()
+ notAfter = now + self.rpki_subject_lifetime
+ logger.info("Generating subject cert %s with resources %s, expires %s",
+ self.rpki_subject_cert_uri, resources, notAfter)
+ req_key = pkcs10.getPublicKey()
+ req_sia = pkcs10.get_SIA()
+ self.next_serial_number()
+ subject_cert = self.rpki_root_cert.issue(
+ keypair = self.rpki_root_key,
+ subject_key = req_key,
+ serial = self.serial_number,
+ sia = req_sia,
+ aia = self.rpki_root_cert_uri,
+ crldp = self.rpki_root_crl_uri,
+ resources = resources,
+ notBefore = now,
+ notAfter = notAfter)
+ self.set_subject_cert(subject_cert)
+ pubd_msg = Element(rpki.publication.tag_msg, nsmap = rpki.publication.nsmap,
+ type = "query", version = rpki.publication.version)
+ pdu = SubElement(pubd_msg, rpki.publication.tag_publish, uri = self.rpki_subject_cert_uri)
+ pdu.text = subject_cert.get_Base64()
+ if subject_cert_hash is not None:
+ pdu.set("hash", subject_cert_hash)
+ self.generate_crl_and_manifest(now, pubd_msg)
+ return subject_cert, pubd_msg
+
+
+ def generate_crl_and_manifest(self, now, pubd_msg):
+ subject_cert = self.get_subject_cert()
+ self.next_serial_number()
+ self.next_crl_number()
+ while self.revoked and self.revoked[0][1] + 2 * self.rpki_subject_regen < now:
+ del self.revoked[0]
+ crl = rpki.x509.CRL.generate(
+ keypair = self.rpki_root_key,
+ issuer = self.rpki_root_cert,
+ serial = self.crl_number,
+ thisUpdate = now,
+ nextUpdate = now + self.rpki_subject_regen,
+ revokedCertificates = self.revoked)
+ crl_hash = self.read_hash_maybe(self.rpki_root_crl_file)
+ logger.debug("Writing CRL %s", self.rpki_root_crl_file)
+ with open(self.rpki_root_crl_file, "wb") as f:
+ f.write(crl.get_DER())
+ pdu = SubElement(pubd_msg, rpki.publication.tag_publish, uri = self.rpki_root_crl_uri)
+ pdu.text = crl.get_Base64()
+ if crl_hash is not None:
+ pdu.set("hash", crl_hash)
+ manifest_content = [(os.path.basename(self.rpki_root_crl_uri), crl)]
+ if subject_cert is not None:
+ manifest_content.append((os.path.basename(self.rpki_subject_cert_uri), subject_cert))
+ manifest_resources = rpki.resource_set.resource_bag.from_inheritance()
+ manifest_keypair = rpki.x509.RSA.generate()
+ manifest_cert = self.rpki_root_cert.issue(
+ keypair = self.rpki_root_key,
+ subject_key = manifest_keypair.get_public(),
+ serial = self.serial_number,
+ sia = (None, None, self.rpki_root_manifest_uri, self.rrdp_notification_uri),
+ aia = self.rpki_root_cert_uri,
+ crldp = self.rpki_root_crl_uri,
+ resources = manifest_resources,
+ notBefore = now,
+ notAfter = now + self.rpki_subject_lifetime,
+ is_ca = False)
+ manifest = rpki.x509.SignedManifest.build(
+ serial = self.crl_number,
+ thisUpdate = now,
+ nextUpdate = now + self.rpki_subject_regen,
+ names_and_objs = manifest_content,
+ keypair = manifest_keypair,
+ certs = manifest_cert)
+ mft_hash = self.read_hash_maybe(self.rpki_root_manifest_file)
+ logger.debug("Writing manifest %s", self.rpki_root_manifest_file)
+ with open(self.rpki_root_manifest_file, "wb") as f:
+ f.write(manifest.get_DER())
+ pdu = SubElement(pubd_msg, rpki.publication.tag_publish, uri = self.rpki_root_manifest_uri)
+ pdu.text = manifest.get_Base64()
+ if mft_hash is not None:
+ pdu.set("hash", mft_hash)
+ cer_hash = rpki.x509.sha256(self.rpki_root_cert.get_DER()).encode("hex")
+ if cer_hash != self.rpki_root_cert_hash:
+ pdu = SubElement(pubd_msg, rpki.publication.tag_publish, uri = self.rpki_root_cert_uri)
+ pdu.text = self.rpki_root_cert.get_Base64()
+ if self.rpki_root_cert_hash is not None:
+ pdu.set("hash", self.rpki_root_cert_hash)
+ self.rpki_root_cert_hash = cer_hash
+
+
+ @staticmethod
+ def read_hash_maybe(fn):
+ try:
+ with open(fn, "rb") as f:
+ return rpki.x509.sha256(f.read()).encode("hex")
+ except IOError:
+ return None
+
+
+ def revoke_subject_cert(self, now):
+ self.revoked.append((self.get_subject_cert().getSerial(), now))
+
+
+ def publish(self, q_msg):
+ if q_msg is None:
+ return
+ assert len(q_msg) > 0
+
+ if not all(q_pdu.get("hash") is not None for q_pdu in q_msg):
+ logger.debug("Some publication PDUs are missing hashes, checking published data...")
+ q = Element(rpki.publication.tag_msg, nsmap = rpki.publication.nsmap,
+ type = "query", version = rpki.publication.version)
+ SubElement(q, rpki.publication.tag_list)
+ published_hash = dict((r.get("uri"), r.get("hash")) for r in self.call_pubd(q))
+ for q_pdu in q_msg:
+ q_uri = q_pdu.get("uri")
+ if q_pdu.get("hash") is None and published_hash.get(q_uri) is not None:
+ logger.debug("Updating hash of %s to %s from previously published data", q_uri, published_hash[q_uri])
+ q_pdu.set("hash", published_hash[q_uri])
+
+ r_msg = self.call_pubd(q_msg)
+ if len(q_msg) != len(r_msg):
+ raise rpki.exceptions.BadPublicationReply("Wrong number of response PDUs from pubd: sent %s, got %s" % (len(q_msg), len(r_msg)))
+
+
+ def call_pubd(self, q_msg):
+ for q_pdu in q_msg:
+ logger.info("Sending %s to pubd", q_pdu.get("uri"))
+ r_msg = rpki.http_simple.client(
+ proto_cms_msg = rpki.publication.cms_msg,
+ client_key = self.rootd_bpki_key,
+ client_cert = self.rootd_bpki_cert,
+ client_crl = self.rootd_bpki_crl,
+ server_ta = self.bpki_ta,
+ server_cert = self.pubd_bpki_cert,
+ url = self.pubd_url,
+ q_msg = q_msg,
+ replay_track = self.pubd_replay_tracker)
+ rpki.publication.raise_if_error(r_msg)
+ return r_msg
+
+
+ def compose_response(self, r_msg, pkcs10 = None):
+ subject_cert, pubd_msg = self.issue_subject_cert_maybe(pkcs10)
+ bag = self.rpki_root_cert.get_3779resources()
+ rc = SubElement(r_msg, rpki.up_down.tag_class,
+ class_name = self.rpki_class_name,
+ cert_url = str(rpki.up_down.multi_uri(self.rpki_root_cert_uri)),
+ resource_set_as = str(bag.asn),
+ resource_set_ipv4 = str(bag.v4),
+ resource_set_ipv6 = str(bag.v6),
+ resource_set_notafter = str(bag.valid_until))
+ if subject_cert is not None:
+ c = SubElement(rc, rpki.up_down.tag_certificate,
+ cert_url = str(rpki.up_down.multi_uri(self.rpki_subject_cert_uri)))
+ c.text = subject_cert.get_Base64()
+ SubElement(rc, rpki.up_down.tag_issuer).text = self.rpki_root_cert.get_Base64()
+ self.publish(pubd_msg)
+
+
+ def handle_list(self, q_msg, r_msg):
+ self.compose_response(r_msg)
+
+
+ def handle_issue(self, q_msg, r_msg):
+ # This is where we'd check q_msg[0].get("class_name") if this weren't rootd.
+ self.compose_response(r_msg, rpki.x509.PKCS10(Base64 = q_msg[0].text))
+
+
+ def handle_revoke(self, q_msg, r_msg):
+ class_name = q_msg[0].get("class_name")
+ ski = q_msg[0].get("ski")
+ logger.debug("Revocation requested for class %s SKI %s", class_name, ski)
+ subject_cert = self.get_subject_cert()
+ if subject_cert is None:
+ logger.debug("No subject certificate, nothing to revoke")
+ raise rpki.exceptions.NotInDatabase
+ if subject_cert.gSKI() != ski:
+ logger.debug("Subject certificate has different SKI %s, not revoking", subject_cert.gSKI())
+ raise rpki.exceptions.NotInDatabase
+ logger.debug("Revoking certificate %s", ski)
+ now = rpki.sundial.now()
+ pubd_msg = Element(rpki.publication.tag_msg, nsmap = rpki.publication.nsmap,
+ type = "query", version = rpki.publication.version)
self.revoke_subject_cert(now)
- subject_cert = None
- if subject_cert is not None and subject_cert.getNotAfter() <= now + self.rpki_subject_regen:
- logger.debug("Subject certificate has reached expiration threshold, regenerating")
- self.revoke_subject_cert(now)
- subject_cert = None
- if subject_cert is not None and self.root_newer_than_subject():
- logger.debug("Root certificate has changed, regenerating subject")
- self.revoke_subject_cert(now)
- subject_cert = None
- self.get_root_cert()
- if subject_cert is not None:
- return subject_cert
- pkcs10 = old_pkcs10 if new_pkcs10 is None else new_pkcs10
- if pkcs10 is None:
- logger.debug("No PKCS #10 request, can't generate subject certificate yet")
- return None
- resources = self.rpki_root_cert.get_3779resources()
- notAfter = now + self.rpki_subject_lifetime
- logger.info("Generating subject cert %s with resources %s, expires %s",
- self.rpki_base_uri + self.rpki_subject_cert, resources, notAfter)
- req_key = pkcs10.getPublicKey()
- req_sia = pkcs10.get_SIA()
- self.next_serial_number()
- subject_cert = self.rpki_root_cert.issue(
- keypair = self.rpki_root_key,
- subject_key = req_key,
- serial = self.serial_number,
- sia = req_sia,
- aia = self.rpki_root_cert_uri,
- crldp = self.rpki_base_uri + self.rpki_root_crl,
- resources = resources,
- notBefore = now,
- notAfter = notAfter)
- self.set_subject_cert(subject_cert)
- self.generate_crl_and_manifest(now)
- return subject_cert
-
- def generate_crl_and_manifest(self, now):
- subject_cert = self.get_subject_cert()
- self.next_serial_number()
- self.next_crl_number()
- while self.revoked and self.revoked[0][1] + 2 * self.rpki_subject_regen < now:
- del self.revoked[0]
- crl = rpki.x509.CRL.generate(
- keypair = self.rpki_root_key,
- issuer = self.rpki_root_cert,
- serial = self.crl_number,
- thisUpdate = now,
- nextUpdate = now + self.rpki_subject_regen,
- revokedCertificates = self.revoked)
- fn = os.path.join(self.rpki_root_dir, self.rpki_root_crl)
- logger.debug("Writing CRL %s", fn)
- f = open(fn, "wb")
- f.write(crl.get_DER())
- f.close()
- manifest_content = [(self.rpki_root_crl, crl)]
- if subject_cert is not None:
- manifest_content.append((self.rpki_subject_cert, subject_cert))
- manifest_resources = rpki.resource_set.resource_bag.from_inheritance()
- manifest_keypair = rpki.x509.RSA.generate()
- manifest_cert = self.rpki_root_cert.issue(
- keypair = self.rpki_root_key,
- subject_key = manifest_keypair.get_public(),
- serial = self.serial_number,
- sia = (None, None, self.rpki_base_uri + self.rpki_root_manifest),
- aia = self.rpki_root_cert_uri,
- crldp = self.rpki_base_uri + self.rpki_root_crl,
- resources = manifest_resources,
- notBefore = now,
- notAfter = now + self.rpki_subject_lifetime,
- is_ca = False)
- manifest = rpki.x509.SignedManifest.build(
- serial = self.crl_number,
- thisUpdate = now,
- nextUpdate = now + self.rpki_subject_regen,
- names_and_objs = manifest_content,
- keypair = manifest_keypair,
- certs = manifest_cert)
- fn = os.path.join(self.rpki_root_dir, self.rpki_root_manifest)
- logger.debug("Writing manifest %s", fn)
- f = open(fn, "wb")
- f.write(manifest.get_DER())
- f.close()
-
- def revoke_subject_cert(self, now):
- self.revoked.append((self.get_subject_cert().getSerial(), now))
-
- def compose_response(self, r_msg, pkcs10 = None):
- subject_cert = self.issue_subject_cert_maybe(pkcs10)
- rc = rpki.up_down.class_elt()
- rc.class_name = self.rpki_class_name
- rc.cert_url = rpki.up_down.multi_uri(self.rpki_root_cert_uri)
- rc.from_resource_bag(self.rpki_root_cert.get_3779resources())
- rc.issuer = self.rpki_root_cert
- r_msg.payload.classes.append(rc)
- if subject_cert is not None:
- rc.certs.append(rpki.up_down.certificate_elt())
- rc.certs[0].cert_url = rpki.up_down.multi_uri(self.rpki_base_uri + self.rpki_subject_cert)
- rc.certs[0].cert = subject_cert
-
- def up_down_handler(self, query, path, cb):
- try:
- q_cms = cms_msg(DER = query)
- q_msg = q_cms.unwrap((self.bpki_ta, self.child_bpki_cert))
- self.cms_timestamp = q_cms.check_replay(self.cms_timestamp, path)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- logger.exception("Problem decoding PDU")
- return cb(400, reason = "Could not decode PDU: %s" % e)
-
- def done(r_msg):
- cb(200, body = cms_msg().wrap(
- r_msg, self.rootd_bpki_key, self.rootd_bpki_cert,
- self.rootd_bpki_crl if self.include_bpki_crl else None))
-
- try:
- q_msg.serve_top_level(None, done)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- try:
- logger.exception("Exception serving up-down request %r", q_msg)
- done(q_msg.serve_error(e))
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- logger.exception("Exception while generating error report")
- cb(500, reason = "Could not process PDU: %s" % e)
-
-
- def next_crl_number(self):
- if self.crl_number is None:
- try:
- crl = rpki.x509.CRL(DER_file = os.path.join(self.rpki_root_dir, self.rpki_root_crl))
- self.crl_number = crl.getCRLNumber()
- except: # pylint: disable=W0702
- self.crl_number = 0
- self.crl_number += 1
- return self.crl_number
-
-
- def next_serial_number(self):
- if self.serial_number is None:
- subject_cert = self.get_subject_cert()
- if subject_cert is not None:
- self.serial_number = subject_cert.getSerial() + 1
- else:
- self.serial_number = 0
- self.serial_number += 1
- return self.serial_number
-
-
- def __init__(self):
-
- global rootd
- rootd = self # Gross, but simpler than what we'd have to do otherwise
-
- self.rpki_root_cert = None
- self.serial_number = None
- self.crl_number = None
- self.revoked = []
- self.cms_timestamp = None
-
- os.environ["TZ"] = "UTC"
- time.tzset()
-
- parser = argparse.ArgumentParser(description = __doc__)
- parser.add_argument("-c", "--config",
- help = "override default location of configuration file")
- parser.add_argument("-f", "--foreground", action = "store_true",
- help = "do not daemonize")
- parser.add_argument("--pidfile",
- help = "override default location of pid file")
- rpki.log.argparse_setup(parser)
- args = parser.parse_args()
-
- rpki.log.init("rootd", args)
-
- self.cfg = rpki.config.parser(args.config, "rootd")
- self.cfg.set_global_flags()
-
- if not args.foreground:
- rpki.daemonize.daemon(pidfile = args.pidfile)
-
- self.bpki_ta = rpki.x509.X509(Auto_update = self.cfg.get("bpki-ta"))
- self.rootd_bpki_key = rpki.x509.RSA( Auto_update = self.cfg.get("rootd-bpki-key"))
- self.rootd_bpki_cert = rpki.x509.X509(Auto_update = self.cfg.get("rootd-bpki-cert"))
- self.rootd_bpki_crl = rpki.x509.CRL( Auto_update = self.cfg.get("rootd-bpki-crl"))
- self.child_bpki_cert = rpki.x509.X509(Auto_update = self.cfg.get("child-bpki-cert"))
-
- self.http_server_host = self.cfg.get("server-host", "")
- self.http_server_port = self.cfg.getint("server-port")
-
- self.rpki_class_name = self.cfg.get("rpki-class-name", "wombat")
-
- self.rpki_root_dir = self.cfg.get("rpki-root-dir")
- self.rpki_base_uri = self.cfg.get("rpki-base-uri", "rsync://" + self.rpki_class_name + ".invalid/")
-
- self.rpki_root_key = rpki.x509.RSA(Auto_update = self.cfg.get("rpki-root-key"))
- self.rpki_root_cert_file = self.cfg.get("rpki-root-cert")
- self.rpki_root_cert_uri = self.cfg.get("rpki-root-cert-uri", self.rpki_base_uri + "root.cer")
-
- self.rpki_root_manifest = self.cfg.get("rpki-root-manifest", "root.mft")
- self.rpki_root_crl = self.cfg.get("rpki-root-crl", "root.crl")
- self.rpki_subject_cert = self.cfg.get("rpki-subject-cert", "child.cer")
- self.rpki_subject_pkcs10 = self.cfg.get("rpki-subject-pkcs10", "child.pkcs10")
-
- self.rpki_subject_lifetime = rpki.sundial.timedelta.parse(self.cfg.get("rpki-subject-lifetime", "8w"))
- self.rpki_subject_regen = rpki.sundial.timedelta.parse(self.cfg.get("rpki-subject-regen", self.rpki_subject_lifetime.convert_to_seconds() / 2))
-
- self.include_bpki_crl = self.cfg.getboolean("include-bpki-crl", False)
-
- rpki.http.server(host = self.http_server_host,
- port = self.http_server_port,
- handlers = (("/", self.up_down_handler, rpki.up_down.allowed_content_types),))
+ self.del_subject_cert()
+ self.del_subject_pkcs10()
+ SubElement(r_msg, q_msg[0].tag, class_name = class_name, ski = ski)
+ self.generate_crl_and_manifest(now, pubd_msg)
+ self.publish(pubd_msg)
+
+
+ # Need to do something about mapping exceptions to up-down error
+ # codes, right now everything shows up as "internal error".
+ #
+ #exceptions = {
+ # rpki.exceptions.ClassNameUnknown : 1201,
+ # rpki.exceptions.NoActiveCA : 1202,
+ # (rpki.exceptions.ClassNameUnknown, revoke_pdu) : 1301,
+ # (rpki.exceptions.NotInDatabase, revoke_pdu) : 1302 }
+ #
+ # Might be that what we want here is a subclass of
+ # rpki.exceptions.RPKI_Exception which carries an extra data field
+ # for the up-down error code, so that we can add the correct code
+ # when we instantiate it.
+ #
+ # There are also a few that are also schema violations, which means
+ # we'd have to catch them before validating or pick them out of a
+ # message that failed validation or otherwise break current
+ # modularity. Maybe an optional pre-validation check method hook in
+ # rpki.x509.XML_CMS_object which we can use to intercept such things?
+
+
+ def handler(self, request, q_der):
+ try:
+ q_cms = rpki.up_down.cms_msg(DER = q_der)
+ q_msg = q_cms.unwrap((self.bpki_ta, self.child_bpki_cert))
+ q_type = q_msg.get("type")
+ logger.info("Serving %s query", q_type)
+ r_msg = Element(rpki.up_down.tag_message, nsmap = rpki.up_down.nsmap,
+ version = rpki.up_down.version,
+ sender = q_msg.get("recipient"), recipient = q_msg.get("sender"),
+ type = q_type + "_response")
+ try:
+ self.rpkid_cms_timestamp = q_cms.check_replay(self.rpkid_cms_timestamp, request.path)
+ getattr(self, "handle_" + q_type)(q_msg, r_msg)
+ except Exception, e:
+ logger.exception("Exception processing up-down %s message", q_type)
+ rpki.up_down.generate_error_response_from_exception(r_msg, e, q_type)
+ request.send_cms_response(rpki.up_down.cms_msg().wrap(
+ r_msg, self.rootd_bpki_key, self.rootd_bpki_cert,
+ self.rootd_bpki_crl if self.include_bpki_crl else None))
+ except Exception, e:
+ logger.exception("Unhandled exception processing up-down message")
+ request.send_error(500, "Unhandled exception %s: %s" % (e.__class__.__name__, e))
+
+
+ def next_crl_number(self):
+ if self.crl_number is None:
+ try:
+ crl = rpki.x509.CRL(DER_file = self.rpki_root_crl_file)
+ self.crl_number = crl.getCRLNumber()
+ except:
+ self.crl_number = 0
+ self.crl_number += 1
+ return self.crl_number
+
+
+ def next_serial_number(self):
+ if self.serial_number is None:
+ subject_cert = self.get_subject_cert()
+ if subject_cert is not None:
+ self.serial_number = subject_cert.getSerial() + 1
+ else:
+ self.serial_number = 0
+ self.serial_number += 1
+ return self.serial_number
+
+
+ def __init__(self):
+ self.serial_number = None
+ self.crl_number = None
+ self.revoked = []
+ self.rpkid_cms_timestamp = None
+ self.pubd_replay_tracker = ReplayTracker()
+
+ os.environ["TZ"] = "UTC"
+ time.tzset()
+
+ self.cfg = rpki.config.argparser(section = "rootd", doc = __doc__)
+ self.cfg.add_boolean_argument("--foreground", default = False,
+ help = "do not daemonize")
+ self.cfg.add_argument("--pidfile",
+ default = os.pat.join(rpki.daemonize.default_pid_directory,
+ "rootd.pid"),
+ help = "override default location of pid file")
+ self.cfg.add_logging_arguments()
+ args = parser.parse_args()
+
+ self.cfg.configure_logging(args = args, ident = "rootd")
+
+ self.cfg.set_global_flags()
+
+ if not args.foreground:
+ rpki.daemonize.daemon(pidfile = args.pidfile)
+
+ self.bpki_ta = rpki.x509.X509(Auto_update = self.cfg.get("bpki-ta"))
+ self.rootd_bpki_key = rpki.x509.RSA( Auto_update = self.cfg.get("rootd-bpki-key"))
+ self.rootd_bpki_cert = rpki.x509.X509(Auto_update = self.cfg.get("rootd-bpki-cert"))
+ self.rootd_bpki_crl = rpki.x509.CRL( Auto_update = self.cfg.get("rootd-bpki-crl"))
+ self.child_bpki_cert = rpki.x509.X509(Auto_update = self.cfg.get("child-bpki-cert"))
+
+ if self.cfg.has_option("pubd-bpki-cert"):
+ self.pubd_bpki_cert = rpki.x509.X509(Auto_update = self.cfg.get("pubd-bpki-cert"))
+ else:
+ self.pubd_bpki_cert = None
+
+ self.http_server_host = self.cfg.get("server-host", "")
+ self.http_server_port = self.cfg.getint("server-port")
+
+ self.rpki_class_name = self.cfg.get("rpki-class-name")
+
+ self.rpki_root_key = rpki.x509.RSA( Auto_update = self.cfg.get("rpki-root-key-file"))
+ self.rpki_root_cert = rpki.x509.X509(Auto_update = self.cfg.get("rpki-root-cert-file"))
+ self.rpki_root_cert_uri = self.cfg.get("rpki-root-cert-uri")
+ self.rpki_root_cert_hash = None
+
+ self.rpki_root_manifest_file = self.cfg.get("rpki-root-manifest-file")
+ self.rpki_root_manifest_uri = self.cfg.get("rpki-root-manifest-uri")
+
+ self.rpki_root_crl_file = self.cfg.get("rpki-root-crl-file")
+ self.rpki_root_crl_uri = self.cfg.get("rpki-root-crl-uri")
+
+ self.rpki_subject_cert_file = self.cfg.get("rpki-subject-cert-file")
+ self.rpki_subject_cert_uri = self.cfg.get("rpki-subject-cert-uri")
+ self.rpki_subject_pkcs10 = self.cfg.get("rpki-subject-pkcs10-file")
+ self.rpki_subject_lifetime = rpki.sundial.timedelta.parse(self.cfg.get("rpki-subject-lifetime", "8w"))
+ self.rpki_subject_regen = rpki.sundial.timedelta.parse(self.cfg.get("rpki-subject-regen",
+ self.rpki_subject_lifetime.convert_to_seconds() / 2))
+
+ self.include_bpki_crl = self.cfg.getboolean("include-bpki-crl", False)
+
+ self.pubd_url = self.cfg.get("pubd-contact-uri")
+
+ self.rrdp_notification_uri = self.cfg.get("rrdp-notification-uri")
+
+ rpki.http_simple.server(host = self.http_server_host,
+ port = self.http_server_port,
+ handlers = (("/", self.handler, rpki.up_down.allowed_content_types),))
diff --git a/rpki/rpkic.py b/rpki/rpkic.py
index 126ce828..5e0efe0f 100644
--- a/rpki/rpkic.py
+++ b/rpki/rpkic.py
@@ -24,864 +24,941 @@ an overview of the available commands; type "help foo" for (more) detailed help
on the "foo" command.
"""
-# NB: As of this writing, I'm trying really hard to avoid having this
-# program depend on a Django settings.py file. This may prove to be a
-# waste of time in the long run, but for for now, this means that one
-# has to be careful about exactly how and when one imports Django
-# modules, or anything that imports Django modules. Bottom line is
-# that we don't import such modules until we need them.
-
import os
-import argparse
import sys
+import pwd
import time
+import argparse
import rpki.config
import rpki.sundial
import rpki.log
-import rpki.http
import rpki.resource_set
import rpki.relaxng
import rpki.exceptions
import rpki.left_right
import rpki.x509
-import rpki.async
import rpki.version
-from rpki.cli import Cmd, parsecmd, cmdarg
+from lxml.etree import SubElement
-class BadPrefixSyntax(Exception): "Bad prefix syntax."
-class CouldntTalkToDaemon(Exception): "Couldn't talk to daemon."
-class BadXMLMessage(Exception): "Bad XML message."
-class PastExpiration(Exception): "Expiration date has already passed."
-class CantRunRootd(Exception): "Can't run rootd."
+from rpki.cli import Cmd, parsecmd, cmdarg
module_doc = __doc__
-class main(Cmd):
- prompt = "rpkic> "
-
- completedefault = Cmd.filename_complete
-
- # Top-level argparser, for stuff that one might want when starting
- # up the interactive command loop. Not sure -i belongs here, but
- # it's harmless so leave it here for the moment.
-
- top_argparser = argparse.ArgumentParser(add_help = False)
- top_argparser.add_argument("-c", "--config",
- help = "override default location of configuration file")
- top_argparser.add_argument("-i", "--identity", "--handle",
- help = "set initial entity handdle")
- top_argparser.add_argument("--profile",
- help = "enable profiling, saving data to PROFILE")
-
- # Argparser for non-interactive commands (no command loop).
-
- full_argparser = argparse.ArgumentParser(parents = [top_argparser],
- description = module_doc)
- argsubparsers = full_argparser.add_subparsers(title = "Commands", metavar = "")
-
- def __init__(self):
-
- Cmd.__init__(self)
- os.environ["TZ"] = "UTC"
- time.tzset()
-
- # Try parsing just the arguments that make sense if we're
- # going to be running an interactive command loop. If that
- # parses everything, we're interactive, otherwise, it's either
- # a non-interactive command or a parse error, so we let the full
- # parser sort that out for us.
-
- args, argv = self.top_argparser.parse_known_args()
- self.interactive = not argv
- if not self.interactive:
- args = self.full_argparser.parse_args()
-
- self.cfg_file = args.config
- self.handle = args.identity
-
- if args.profile:
- import cProfile
- prof = cProfile.Profile()
- try:
- prof.runcall(self.main, args)
- finally:
- prof.dump_stats(args.profile)
- print "Dumped profile data to %s" % args.profile
- else:
- self.main(args)
-
- def main(self, args):
- rpki.log.init("rpkic")
- self.read_config()
- if self.interactive:
- self.cmdloop_with_history()
- else:
- args.func(self, args)
-
- def read_config(self):
- global rpki # pylint: disable=W0602
-
- try:
- cfg = rpki.config.parser(self.cfg_file, "myrpki")
- cfg.set_global_flags()
- except IOError, e:
- sys.exit("%s: %s" % (e.strerror, e.filename))
-
- self.histfile = cfg.get("history_file", os.path.expanduser("~/.rpkic_history"))
- self.autosync = cfg.getboolean("autosync", True, section = "rpkic")
-
- import django
-
- from django.conf import settings
-
- settings.configure(
- DATABASES = { "default" : {
- "ENGINE" : "django.db.backends.mysql",
- "NAME" : cfg.get("sql-database", section = "irdbd"),
- "USER" : cfg.get("sql-username", section = "irdbd"),
- "PASSWORD" : cfg.get("sql-password", section = "irdbd"),
- "HOST" : "",
- "PORT" : "",
- "OPTIONS" : { "init_command": "SET storage_engine=INNODB",
- "charset" : "latin1" }}},
- INSTALLED_APPS = ("rpki.irdb",),
- MIDDLEWARE_CLASSES = (), # API change, feh
- )
-
- if django.VERSION >= (1, 7): # API change, feh
- from django.apps import apps
- apps.populate(settings.INSTALLED_APPS)
-
- import rpki.irdb # pylint: disable=W0621
-
- try:
- rpki.irdb.models.ca_certificate_lifetime = rpki.sundial.timedelta.parse(
- cfg.get("bpki_ca_certificate_lifetime", section = "rpkic"))
- except rpki.config.ConfigParser.Error:
- pass
-
- try:
- rpki.irdb.models.ee_certificate_lifetime = rpki.sundial.timedelta.parse(
- cfg.get("bpki_ee_certificate_lifetime", section = "rpkic"))
- except rpki.config.ConfigParser.Error:
- pass
-
- try:
- rpki.irdb.models.crl_interval = rpki.sundial.timedelta.parse(
- cfg.get("bpki_crl_interval", section = "rpkic"))
- except rpki.config.ConfigParser.Error:
- pass
-
- import django.core.management
- django.core.management.call_command("syncdb", verbosity = 0, load_initial_data = False)
-
- self.zoo = rpki.irdb.Zookeeper(cfg = cfg, handle = self.handle, logstream = sys.stdout)
-
-
- def do_help(self, arg):
+class swap_uids(object):
"""
- List available commands with "help" or detailed help with "help cmd".
+ Context manager to wrap os.setreuid() calls safely.
"""
- argv = arg.split()
-
- if not argv:
- #return self.full_argparser.print_help()
- return self.print_topics(
- self.doc_header,
- sorted(set(name[3:] for name in self.get_names()
- if name.startswith("do_")
- and getattr(self, name).__doc__)),
- 15, 80)
-
- try:
- return getattr(self, "help_" + argv[0])()
- except AttributeError:
- pass
-
- func = getattr(self, "do_" + argv[0], None)
-
- try:
- return func.argparser.print_help()
- except AttributeError:
- pass
+ def __init__(self):
+ self.uid = os.getuid()
+ self.euid = os.geteuid()
- try:
- return self.stdout.write(func.__doc__ + "\n")
- except AttributeError:
- pass
+ def __enter__(self):
+ os.setreuid(self.euid, self.uid)
+ return self
- self.stdout.write((self.nohelp + "\n") % arg)
+ def __exit__(self, _type, value, traceback):
+ os.setreuid(self.uid, self.euid)
+ return False
- def irdb_handle_complete(self, manager, text, line, begidx, endidx):
- return [obj.handle for obj in manager.all() if obj.handle and obj.handle.startswith(text)]
-
-
- @parsecmd(argsubparsers,
- cmdarg("handle", help = "new handle"))
- def do_select_identity(self, args):
- """
- Select an identity handle for use with later commands.
+def open_swapped_uids(*open_args):
"""
-
- self.zoo.reset_identity(args.handle)
-
- def complete_select_identity(self, *args):
- return self.irdb_handle_complete(rpki.irdb.ResourceHolderCA.objects, *args)
-
-
- @parsecmd(argsubparsers)
- def do_initialize(self, args):
- """
- Initialize an RPKI installation. DEPRECATED.
-
- This command reads the configuration file, creates the BPKI and
- EntityDB directories, generates the initial BPKI certificates, and
- creates an XML file describing the resource-holding aspect of this
- RPKI installation.
+ Open a file with UIDs swapped for the duration of the open() call.
"""
- rootd_case = self.zoo.run_rootd and self.zoo.handle == self.zoo.cfg.get("handle")
+ with swap_uids():
+ return open(*open_args)
- r = self.zoo.initialize()
- r.save("%s.identity.xml" % self.zoo.handle,
- None if rootd_case else sys.stdout)
- if rootd_case:
- r = self.zoo.configure_rootd()
- if r is not None:
- r.save("%s.%s.repository-request.xml" % (self.zoo.handle, self.zoo.handle), sys.stdout)
-
- self.zoo.write_bpki_files()
-
-
- @parsecmd(argsubparsers,
- cmdarg("handle", help = "handle of entity to create"))
- def do_create_identity(self, args):
- """
- Create a new resource-holding entity.
-
- Returns XML file describing the new resource holder.
-
- This command is idempotent: calling it for a resource holder which
- already exists returns the existing identity.
- """
-
- self.zoo.reset_identity(args.handle)
-
- r = self.zoo.initialize_resource_bpki()
- r.save("%s.identity.xml" % self.zoo.handle, sys.stdout)
-
-
- @parsecmd(argsubparsers)
- def do_initialize_server_bpki(self, args):
- """
- Initialize server BPKI portion of an RPKI installation.
-
- Reads server configuration from configuration file and creates the
- server BPKI objects needed to start daemons.
- """
-
- self.zoo.initialize_server_bpki()
- self.zoo.write_bpki_files()
-
-
- @parsecmd(argsubparsers)
- def do_update_bpki(self, args):
- """
- Update BPKI certificates. Assumes an existing RPKI installation.
-
- Basic plan here is to reissue all BPKI certificates we can, right
- now. In the long run we might want to be more clever about only
- touching ones that need maintenance, but this will do for a start.
-
- We also reissue CRLs for all CAs.
-
- Most likely this should be run under cron.
- """
-
- self.zoo.update_bpki()
- self.zoo.write_bpki_files()
- try:
- self.zoo.synchronize_bpki()
- except Exception, e:
- print "Couldn't push updated BPKI material into daemons: %s" % e
-
-
- @parsecmd(argsubparsers,
- cmdarg("--child_handle", help = "override default handle for new child"),
- cmdarg("--valid_until", help = "override default validity interval"),
- cmdarg("child_xml", help = "XML file containing child's identity"))
- def do_configure_child(self, args):
- """
- Configure a new child of this RPKI entity.
-
- This command extracts the child's data from an XML input file,
- cross-certifies the child's resource-holding BPKI certificate, and
- generates an XML output file describing the relationship between
- the child and this parent, including this parent's BPKI data and
- up-down protocol service URI.
- """
-
- r, child_handle = self.zoo.configure_child(args.child_xml, args.child_handle, args.valid_until)
- r.save("%s.%s.parent-response.xml" % (self.zoo.handle, child_handle), sys.stdout)
- self.zoo.synchronize_ca()
-
-
- @parsecmd(argsubparsers,
- cmdarg("child_handle", help = "handle of child to delete"))
- def do_delete_child(self, args):
- """
- Delete a child of this RPKI entity.
- """
-
- try:
- self.zoo.delete_child(args.child_handle)
- self.zoo.synchronize_ca()
- except rpki.irdb.ResourceHolderCA.DoesNotExist:
- print "No such resource holder \"%s\"" % self.zoo.handle
- except rpki.irdb.Child.DoesNotExist:
- print "No such child \"%s\"" % args.child_handle
-
- def complete_delete_child(self, *args):
- return self.irdb_handle_complete(self.zoo.resource_ca.children, *args)
-
-
- @parsecmd(argsubparsers,
- cmdarg("--parent_handle", help = "override default handle for new parent"),
- cmdarg("parent_xml", help = "XML file containing parent's response"))
- def do_configure_parent(self, args):
- """
- Configure a new parent of this RPKI entity.
-
- This command reads the parent's response XML, extracts the
- parent's BPKI and service URI information, cross-certifies the
- parent's BPKI data into this entity's BPKI, and checks for offers
- or referrals of publication service. If a publication offer or
- referral is present, we generate a request-for-service message to
- that repository, in case the user wants to avail herself of the
- referral or offer.
-
- We do NOT attempt automatic synchronization with rpkid at the
- completion of this command, because synchronization at this point
- will usually fail due to the repository not being set up yet. If
- you know what you are doing and for some reason really want to
- synchronize here, run the synchronize command yourself.
- """
-
- r, parent_handle = self.zoo.configure_parent(args.parent_xml, args.parent_handle)
- r.save("%s.%s.repository-request.xml" % (self.zoo.handle, parent_handle), sys.stdout)
-
-
- @parsecmd(argsubparsers,
- cmdarg("parent_handle", help = "handle of parent to delete"))
- def do_delete_parent(self, args):
- """
- Delete a parent of this RPKI entity.
- """
-
- try:
- self.zoo.delete_parent(args.parent_handle)
- self.zoo.synchronize_ca()
- except rpki.irdb.ResourceHolderCA.DoesNotExist:
- print "No such resource holder \"%s\"" % self.zoo.handle
- except rpki.irdb.Parent.DoesNotExist:
- print "No such parent \"%s\"" % args.parent_handle
+class main(Cmd):
- def complete_delete_parent(self, *args):
- return self.irdb_handle_complete(self.zoo.resource_ca.parents, *args)
+ prompt = "rpkic> "
+ completedefault = Cmd.filename_complete
+
+ # Top-level argparser, for stuff that one might want when starting
+ # up the interactive command loop. Not sure -i belongs here, but
+ # it's harmless so leave it here for the moment.
+
+ top_argparser = argparse.ArgumentParser(add_help = False)
+ top_argparser.add_argument("-c", "--config",
+ help = "override default location of configuration file")
+ top_argparser.add_argument("-i", "--identity", "--handle",
+ help = "set initial entity handdle")
+ top_argparser.add_argument("--profile",
+ help = "enable profiling, saving data to PROFILE")
+
+ # Argparser for non-interactive commands (no command loop).
+
+ full_argparser = argparse.ArgumentParser(parents = [top_argparser],
+ description = module_doc)
+ argsubparsers = full_argparser.add_subparsers(title = "Commands", metavar = "")
+
+ def __init__(self):
+ Cmd.__init__(self)
+ os.environ["TZ"] = "UTC"
+ time.tzset()
+
+ # Try parsing just the arguments that make sense if we're
+ # going to be running an interactive command loop. If that
+ # parses everything, we're interactive, otherwise, it's either
+ # a non-interactive command or a parse error, so we let the full
+ # parser sort that out for us.
+
+ args, argv = self.top_argparser.parse_known_args()
+ self.interactive = not argv
+ if not self.interactive:
+ args = self.full_argparser.parse_args()
+
+ self.cfg_file = args.config
+ self.handle = args.identity
+
+ if args.profile:
+ import cProfile
+ prof = cProfile.Profile()
+ try:
+ prof.runcall(self.main, args)
+ finally:
+ prof.dump_stats(args.profile)
+ print "Dumped profile data to %s" % args.profile
+ else:
+ self.main(args)
+
+ def main(self, args):
+ self.read_config()
+ if self.interactive:
+ self.cmdloop_with_history()
+ else:
+ args.func(self, args)
+
+ def read_history(self):
+ """
+ UID-swapping wrapper for parent .read_history() method.
+ """
- @parsecmd(argsubparsers)
- def do_configure_root(self, args):
- """
- Configure the current resource holding identity as a root.
+ with swap_uids():
+ Cmd.read_history(self)
+
+ def save_history(self):
+ """
+ UID-swapping wrapper for parent .save_history() method.
+ """
- This configures rpkid to talk to rootd as (one of) its parent(s).
- Returns repository request XML file like configure_parent does.
- """
+ with swap_uids():
+ Cmd.save_history(self)
- r = self.zoo.configure_rootd()
- if r is not None:
- r.save("%s.%s.repository-request.xml" % (self.zoo.handle, self.zoo.handle), sys.stdout)
- self.zoo.write_bpki_files()
+ def read_config(self):
+ # pylint: disable=W0201,W0602,W0621
- @parsecmd(argsubparsers)
- def do_delete_root(self, args):
- """
- Delete local RPKI root as parent of the current entity.
+ global rpki
- This tells the current rpkid identity (<self/>) to stop talking to
- rootd.
- """
+ try:
+ cfg = rpki.config.parser(set_filename = self.cfg_file, section = "myrpki")
+ cfg.configure_logging(
+ args = argparse.Namespace(
+ log_destination = "stderr",
+ log_level = "warning"),
+ ident = "rpkic")
+ cfg.set_global_flags()
+ except IOError, e:
+ sys.exit("%s: %s" % (e.strerror, e.filename))
- try:
- self.zoo.delete_rootd()
- self.zoo.synchronize_ca()
- except rpki.irdb.ResourceHolderCA.DoesNotExist:
- print "No such resource holder \"%s\"" % self.zoo.handle
- except rpki.irdb.Rootd.DoesNotExist:
- print "No associated rootd"
+ self.histfile = cfg.get("history_file", os.path.expanduser("~/.rpkic_history"))
+ self.autosync = cfg.getboolean("autosync", True, section = "rpkic")
+ os.environ.update(DJANGO_SETTINGS_MODULE = "rpki.django_settings.irdb")
- @parsecmd(argsubparsers,
- cmdarg("--flat", help = "use flat publication scheme", action = "store_true"),
- cmdarg("--sia_base", help = "override SIA base value"),
- cmdarg("client_xml", help = "XML file containing client request"))
- def do_configure_publication_client(self, args):
- """
- Configure publication server to know about a new client.
-
- This command reads the client's request for service,
- cross-certifies the client's BPKI data, and generates a response
- message containing the repository's BPKI data and service URI.
- """
-
- r, client_handle = self.zoo.configure_publication_client(args.client_xml, args.sia_base, args.flat)
- r.save("%s.repository-response.xml" % client_handle.replace("/", "."), sys.stdout)
- try:
- self.zoo.synchronize_pubd()
- except rpki.irdb.Repository.DoesNotExist:
- pass
+ import django
+ django.setup()
+ import rpki.irdb
- @parsecmd(argsubparsers,
- cmdarg("client_handle", help = "handle of client to delete"))
- def do_delete_publication_client(self, args):
- """
- Delete a publication client of this RPKI entity.
- """
+ try:
+ rpki.irdb.models.ca_certificate_lifetime = rpki.sundial.timedelta.parse(
+ cfg.get("bpki_ca_certificate_lifetime", section = "rpkic"))
+ except rpki.config.ConfigParser.Error:
+ pass
- try:
- self.zoo.delete_publication_client(args.client_handle)
- self.zoo.synchronize_pubd()
- except rpki.irdb.ResourceHolderCA.DoesNotExist:
- print "No such resource holder \"%s\"" % self.zoo.handle
- except rpki.irdb.Client.DoesNotExist:
- print "No such client \"%s\"" % args.client_handle
+ try:
+ rpki.irdb.models.ee_certificate_lifetime = rpki.sundial.timedelta.parse(
+ cfg.get("bpki_ee_certificate_lifetime", section = "rpkic"))
+ except rpki.config.ConfigParser.Error:
+ pass
- def complete_delete_publication_client(self, *args):
- return self.irdb_handle_complete(self.zoo.server_ca.clients, *args)
+ try:
+ rpki.irdb.models.crl_interval = rpki.sundial.timedelta.parse(
+ cfg.get("bpki_crl_interval", section = "rpkic"))
+ except rpki.config.ConfigParser.Error:
+ pass
+ self.zoo = rpki.irdb.Zookeeper(cfg = cfg, handle = self.handle, logstream = sys.stdout)
- @parsecmd(argsubparsers,
- cmdarg("--parent_handle", help = "override default parent handle"),
- cmdarg("repository_xml", help = "XML file containing repository response"))
- def do_configure_repository(self, args):
- """
- Configure a publication repository for this RPKI entity.
- This command reads the repository's response to this entity's
- request for publication service, extracts and cross-certifies the
- BPKI data and service URI, and links the repository data with the
- corresponding parent data in our local database.
- """
+ def do_help(self, arg):
+ """
+ List available commands with "help" or detailed help with "help cmd".
+ """
- self.zoo.configure_repository(args.repository_xml, args.parent_handle)
- self.zoo.synchronize_ca()
+ argv = arg.split()
+ if not argv:
+ #return self.full_argparser.print_help()
+ return self.print_topics(
+ self.doc_header,
+ sorted(set(name[3:] for name in self.get_names()
+ if name.startswith("do_")
+ and getattr(self, name).__doc__)),
+ 15, 80)
- @parsecmd(argsubparsers,
- cmdarg("repository_handle", help = "handle of repository to delete"))
- def do_delete_repository(self, args):
- """
- Delete a repository of this RPKI entity.
- """
+ try:
+ return getattr(self, "help_" + argv[0])()
+ except AttributeError:
+ pass
- try:
- self.zoo.delete_repository(args.repository_handle)
- self.zoo.synchronize_ca()
- except rpki.irdb.ResourceHolderCA.DoesNotExist:
- print "No such resource holder \"%s\"" % self.zoo.handle
- except rpki.irdb.Repository.DoesNotExist:
- print "No such repository \"%s\"" % args.repository_handle
+ func = getattr(self, "do_" + argv[0], None)
- def complete_delete_repository(self, *args):
- return self.irdb_handle_complete(self.zoo.resource_ca.repositories, *args)
+ try:
+ return func.argparser.print_help()
+ except AttributeError:
+ pass
+ try:
+ return self.stdout.write(func.__doc__ + "\n")
+ except AttributeError:
+ pass
- @parsecmd(argsubparsers)
- def do_delete_identity(self, args):
- """
- Delete the current RPKI identity (rpkid <self/> object).
- """
+ self.stdout.write((self.nohelp + "\n") % arg)
- try:
- self.zoo.delete_self()
- self.zoo.synchronize_deleted_ca()
- except rpki.irdb.ResourceHolderCA.DoesNotExist:
- print "No such resource holder \"%s\"" % self.zoo.handle
+ def irdb_handle_complete(self, manager, text, line, begidx, endidx):
+ return [obj.handle for obj in manager.all() if obj.handle and obj.handle.startswith(text)]
- @parsecmd(argsubparsers,
- cmdarg("--valid_until", help = "override default new validity interval"),
- cmdarg("child_handle", help = "handle of child to renew"))
- def do_renew_child(self, args):
- """
- Update validity period for one child entity.
- """
- self.zoo.renew_children(args.child_handle, args.valid_until)
- self.zoo.synchronize_ca()
- if self.autosync:
- self.zoo.run_rpkid_now()
+ @parsecmd(argsubparsers,
+ cmdarg("handle", help = "new handle"))
+ def do_select_identity(self, args):
+ """
+ Select an identity handle for use with later commands.
+ """
- def complete_renew_child(self, *args):
- return self.irdb_handle_complete(self.zoo.resource_ca.children, *args)
+ self.zoo.reset_identity(args.handle)
+ def complete_select_identity(self, *args):
+ return self.irdb_handle_complete(rpki.irdb.models.ResourceHolderCA.objects, *args)
- @parsecmd(argsubparsers,
- cmdarg("--valid_until", help = "override default new validity interval"))
- def do_renew_all_children(self, args):
- """
- Update validity period for all child entities.
- """
- self.zoo.renew_children(None, args.valid_until)
- self.zoo.synchronize_ca()
- if self.autosync:
- self.zoo.run_rpkid_now()
+ @parsecmd(argsubparsers)
+ def do_initialize(self, args):
+ """
+ Initialize an RPKI installation. DEPRECATED.
+ This command reads the configuration file, creates the BPKI and
+ EntityDB directories, generates the initial BPKI certificates, and
+ creates an XML file describing the resource-holding aspect of this
+ RPKI installation.
+ """
- @parsecmd(argsubparsers,
- cmdarg("prefixes_csv", help = "CSV file listing prefixes"))
- def do_load_prefixes(self, args):
- """
- Load prefixes into IRDB from CSV file.
- """
+ r = self.zoo.initialize()
+ with swap_uids():
+ r.save("%s.identity.xml" % self.zoo.handle, sys.stdout)
- self.zoo.load_prefixes(args.prefixes_csv, True)
- if self.autosync:
- self.zoo.run_rpkid_now()
+ self.zoo.write_bpki_files()
- @parsecmd(argsubparsers)
- def do_show_child_resources(self, args):
- """
- Show resources assigned to children.
- """
+ @parsecmd(argsubparsers,
+ cmdarg("handle", help = "handle of entity to create"))
+ def do_create_identity(self, args):
+ """
+ Create a new resource-holding entity.
- for child in self.zoo.resource_ca.children.all():
- resources = child.resource_bag
- print "Child:", child.handle
- if resources.asn:
- print " ASN:", resources.asn
- if resources.v4:
- print " IPv4:", resources.v4
- if resources.v6:
- print " IPv6:", resources.v6
+ Returns XML file describing the new resource holder.
+ This command is idempotent: calling it for a resource holder which
+ already exists returns the existing identity.
+ """
- @parsecmd(argsubparsers)
- def do_show_roa_requests(self, args):
- """
- Show ROA requests.
- """
+ self.zoo.reset_identity(args.handle)
- for roa_request in self.zoo.resource_ca.roa_requests.all():
- prefixes = roa_request.roa_prefix_bag
- print "ASN: ", roa_request.asn
- if prefixes.v4:
- print " IPv4:", prefixes.v4
- if prefixes.v6:
- print " IPv6:", prefixes.v6
+ r = self.zoo.initialize_resource_bpki()
+ with swap_uids():
+ r.save("%s.identity.xml" % self.zoo.handle, sys.stdout)
- @parsecmd(argsubparsers)
- def do_show_ghostbuster_requests(self, args):
- """
- Show Ghostbuster requests.
- """
+ @parsecmd(argsubparsers)
+ def do_initialize_server_bpki(self, args):
+ """
+ Initialize server BPKI portion of an RPKI installation.
- for ghostbuster_request in self.zoo.resource_ca.ghostbuster_requests.all():
- print "Parent:", ghostbuster_request.parent or "*"
- print ghostbuster_request.vcard
+ Reads server configuration from configuration file and creates the
+ server BPKI objects needed to start daemons.
+ """
+ self.zoo.initialize_server_bpki()
+ self.zoo.write_bpki_files()
- @parsecmd(argsubparsers)
- def do_show_received_resources(self, args):
- """
- Show resources received by this entity from its parent(s).
- """
- for pdu in self.zoo.call_rpkid(
- rpki.left_right.list_received_resources_elt.make_pdu(self_handle = self.zoo.handle)):
+ @parsecmd(argsubparsers)
+ def do_update_bpki(self, args):
+ """
+ Update BPKI certificates. Assumes an existing RPKI installation.
+
+ Basic plan here is to reissue all BPKI certificates we can, right
+ now. In the long run we might want to be more clever about only
+ touching ones that need maintenance, but this will do for a start.
- print "Parent: ", pdu.parent_handle
- print " notBefore:", pdu.notBefore
- print " notAfter: ", pdu.notAfter
- print " URI: ", pdu.uri
- print " SIA URI: ", pdu.sia_uri
- print " AIA URI: ", pdu.aia_uri
- print " ASN: ", pdu.asn
- print " IPv4: ", pdu.ipv4
- print " IPv6: ", pdu.ipv6
+ We also reissue CRLs for all CAs.
+ Most likely this should be run under cron.
+ """
+
+ self.zoo.update_bpki()
+ self.zoo.write_bpki_files()
+ try:
+ self.zoo.synchronize_bpki()
+ except Exception, e:
+ print "Couldn't push updated BPKI material into daemons: %s" % e
- @parsecmd(argsubparsers)
- def do_show_published_objects(self, args):
- """
- Show published objects.
- """
- for pdu in self.zoo.call_rpkid(
- rpki.left_right.list_published_objects_elt.make_pdu(self_handle = self.zoo.handle)):
+ @parsecmd(argsubparsers,
+ cmdarg("--child_handle", help = "override default handle for new child"),
+ cmdarg("--valid_until", help = "override default validity interval"),
+ cmdarg("child_xml", help = "XML file containing child's identity"))
+ def do_configure_child(self, args):
+ """
+ Configure a new child of this RPKI entity.
- track = rpki.x509.uri_dispatch(pdu.uri)(Base64 = pdu.obj).tracking_data(pdu.uri)
- child = pdu.child_handle
+ This command extracts the child's data from an XML input file,
+ cross-certifies the child's resource-holding BPKI certificate, and
+ generates an XML output file describing the relationship between
+ the child and this parent, including this parent's BPKI data and
+ up-down protocol service URI.
+ """
+
+ with open_swapped_uids(args.child_xml) as f:
+ r, child_handle = self.zoo.configure_child(f, args.child_handle, args.valid_until)
+ with swap_uids():
+ r.save("%s.%s.parent-response.xml" % (self.zoo.handle, child_handle), sys.stdout)
+ self.zoo.synchronize_ca()
- if child is None:
- print track
- else:
- print track, child
+ @parsecmd(argsubparsers,
+ cmdarg("child_handle", help = "handle of child to delete"))
+ def do_delete_child(self, args):
+ """
+ Delete a child of this RPKI entity.
+ """
+
+ try:
+ self.zoo.delete_child(args.child_handle)
+ self.zoo.synchronize_ca()
+ except rpki.irdb.models.ResourceHolderCA.DoesNotExist:
+ print "No such resource holder \"%s\"" % self.zoo.handle
+ except rpki.irdb.models.Child.DoesNotExist:
+ print "No such child \"%s\"" % args.child_handle
+
+ def complete_delete_child(self, *args):
+ return self.irdb_handle_complete(self.zoo.resource_ca.children, *args)
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("--parent_handle", help = "override default handle for new parent"),
+ cmdarg("parent_xml", help = "XML file containing parent's response"))
+ def do_configure_parent(self, args):
+ """
+ Configure a new parent of this RPKI entity.
+
+ This command reads the parent's response XML, extracts the
+ parent's BPKI and service URI information, cross-certifies the
+ parent's BPKI data into this entity's BPKI, and checks for offers
+ or referrals of publication service. If a publication offer or
+ referral is present, we generate a request-for-service message to
+ that repository, in case the user wants to avail herself of the
+ referral or offer.
+
+ We do NOT attempt automatic synchronization with rpkid at the
+ completion of this command, because synchronization at this point
+ will usually fail due to the repository not being set up yet. If
+ you know what you are doing and for some reason really want to
+ synchronize here, run the synchronize command yourself.
+ """
+
+ with open_swapped_uids(args.parent_xml) as f:
+ r, parent_handle = self.zoo.configure_parent(f, args.parent_handle)
+ with swap_uids():
+ r.save("%s.%s.repository-request.xml" % (self.zoo.handle, parent_handle), sys.stdout)
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("parent_handle", help = "handle of parent to delete"))
+ def do_delete_parent(self, args):
+ """
+ Delete a parent of this RPKI entity.
+ """
+
+ try:
+ self.zoo.delete_parent(args.parent_handle)
+ self.zoo.synchronize_ca()
+ except rpki.irdb.models.ResourceHolderCA.DoesNotExist:
+ print "No such resource holder \"%s\"" % self.zoo.handle
+ except rpki.irdb.models.Parent.DoesNotExist:
+ print "No such parent \"%s\"" % args.parent_handle
+
+ def complete_delete_parent(self, *args):
+ return self.irdb_handle_complete(self.zoo.resource_ca.parents, *args)
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("--resources", help = "restrict root to specified resources",
+ type = rpki.resource_set.resource_bag.from_str,
+ default = "0.0.0.0/0,::/0,0-4294967295"),
+ cmdarg("--root_handle", help = "override default handle for new root"))
+ def do_configure_root(self, args):
+ """
+ Configure the current resource holding identity as a root.
+
+ Returns repository request XML file like configure_parent does.
+ """
+
+ print "Generating root for resources {!s}".format(args.resources) # XXX
+
+ r = self.zoo.configure_root(args.root_handle, args.resources)
+ if r is not None:
+ with swap_uids():
+ r.save("%s.%s.repository-request.xml" % (self.zoo.handle, self.zoo.handle), sys.stdout)
+ self.zoo.write_bpki_files()
+
+
+ @parsecmd(argsubparsers)
+ def do_delete_root(self, args):
+ """
+ Delete local RPKI root as parent of the current entity.
+ """
+
+ raise NotImplementedError
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("--root_handle", help = "override default handle"),
+ cmdarg("--output_file", help = "override default output filename"))
+ def do_extract_root_certificate(self, args):
+ """
+ Extract self-signed RPKI certificate from a root object.
+ """
+
+ cert, uris = self.zoo.extract_root_certificate_and_uris(args.root_handle)
+ if cert is None:
+ print "No certificate currently available"
+ else:
+ fn = args.output_file or (cert.gSKI() + ".cer")
+ with open_swapped_uids(fn, "wb") as f:
+ print "Writing", f.name
+ f.write(cert.get_DER())
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("--root_handle", help = "override default handle"),
+ cmdarg("--output_file", help = "override default output filename"))
+ def do_extract_root_tal(self, args):
+ """
+ Extract self-signed RPKI certificate from a root object.
+ """
+
+ cert, uris = self.zoo.extract_root_certificate_and_uris(args.root_handle)
+ if cert is None:
+ print "No certificate currently available"
+ else:
+ fn = args.output_file or (cert.gSKI() + ".tal")
+ with open_swapped_uids(fn, "w") as f:
+ print "Writing", f.name
+ for uri in uris:
+ f.write(uri + "\n")
+ f.write("\n")
+ f.write(cert.getPublicKey().get_Base64())
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("--flat", help = "use flat publication scheme", action = "store_true"),
+ cmdarg("--sia_base", help = "override SIA base value"),
+ cmdarg("client_xml", help = "XML file containing client request"))
+ def do_configure_publication_client(self, args):
+ """
+ Configure publication server to know about a new client.
+
+ This command reads the client's request for service,
+ cross-certifies the client's BPKI data, and generates a response
+ message containing the repository's BPKI data and service URI.
+ """
+
+ with open_swapped_uids(args.client_xml) as f:
+ r, client_handle = self.zoo.configure_publication_client(f, args.sia_base, args.flat)
+ with swap_uids():
+ r.save("%s.repository-response.xml" % client_handle.replace("/", "."), sys.stdout)
+ try:
+ self.zoo.synchronize_pubd()
+ except rpki.irdb.models.Repository.DoesNotExist:
+ pass
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("client_handle", help = "handle of client to delete"))
+ def do_delete_publication_client(self, args):
+ """
+ Delete a publication client of this RPKI entity.
+ """
+
+ try:
+ self.zoo.delete_publication_client(args.client_handle)
+ self.zoo.synchronize_pubd()
+ except rpki.irdb.models.ResourceHolderCA.DoesNotExist:
+ print "No such resource holder \"%s\"" % self.zoo.handle
+ except rpki.irdb.models.Client.DoesNotExist:
+ print "No such client \"%s\"" % args.client_handle
+
+ def complete_delete_publication_client(self, *args):
+ return self.irdb_handle_complete(self.zoo.server_ca.clients, *args)
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("--parent_handle", help = "override default parent handle"),
+ cmdarg("repository_xml", help = "XML file containing repository response"))
+ def do_configure_repository(self, args):
+ """
+ Configure a publication repository for this RPKI entity.
+
+ This command reads the repository's response to this entity's
+ request for publication service, extracts and cross-certifies the
+ BPKI data and service URI, and links the repository data with the
+ corresponding parent data in our local database.
+ """
+
+ with open_swapped_uids(args.repository_xml) as f:
+ self.zoo.configure_repository(f, args.parent_handle)
+ self.zoo.synchronize_ca()
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("repository_handle", help = "handle of repository to delete"))
+ def do_delete_repository(self, args):
+ """
+ Delete a repository of this RPKI entity.
+ """
+
+ try:
+ self.zoo.delete_repository(args.repository_handle)
+ self.zoo.synchronize_ca()
+ except rpki.irdb.models.ResourceHolderCA.DoesNotExist:
+ print "No such resource holder \"%s\"" % self.zoo.handle
+ except rpki.irdb.models.Repository.DoesNotExist:
+ print "No such repository \"%s\"" % args.repository_handle
+
+ def complete_delete_repository(self, *args):
+ return self.irdb_handle_complete(self.zoo.resource_ca.repositories, *args)
+
+
+ @parsecmd(argsubparsers)
+ def do_delete_identity(self, args):
+ """
+ Delete the current RPKI identity (rpkid <tenant/> object).
+ """
+
+ try:
+ self.zoo.delete_tenant()
+ self.zoo.synchronize_deleted_ca()
+ except rpki.irdb.models.ResourceHolderCA.DoesNotExist:
+ print "No such resource holder \"%s\"" % self.zoo.handle
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("--valid_until", help = "override default new validity interval"),
+ cmdarg("child_handle", help = "handle of child to renew"))
+ def do_renew_child(self, args):
+ """
+ Update validity period for one child entity.
+ """
+
+ self.zoo.renew_children(args.child_handle, args.valid_until)
+ self.zoo.synchronize_ca()
+ if self.autosync:
+ self.zoo.run_rpkid_now()
+
+ def complete_renew_child(self, *args):
+ return self.irdb_handle_complete(self.zoo.resource_ca.children, *args)
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("--valid_until", help = "override default new validity interval"))
+ def do_renew_all_children(self, args):
+ """
+ Update validity period for all child entities.
+ """
+
+ self.zoo.renew_children(None, args.valid_until)
+ self.zoo.synchronize_ca()
+ if self.autosync:
+ self.zoo.run_rpkid_now()
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("prefixes_csv", help = "CSV file listing prefixes"))
+ def do_load_prefixes(self, args):
+ """
+ Load prefixes into IRDB from CSV file.
+ """
+
+ with open_swapped_uids(args.prefixes_csv) as f:
+ self.zoo.load_prefixes(f, True)
+ if self.autosync:
+ self.zoo.run_rpkid_now()
+
+
+ @parsecmd(argsubparsers)
+ def do_show_child_resources(self, args):
+ """
+ Show resources assigned to children.
+ """
+
+ for child in self.zoo.resource_ca.children.all():
+ resources = child.resource_bag
+ print "Child:", child.handle
+ if resources.asn:
+ print " ASN:", resources.asn
+ if resources.v4:
+ print " IPv4:", resources.v4
+ if resources.v6:
+ print " IPv6:", resources.v6
+
+
+ @parsecmd(argsubparsers)
+ def do_show_roa_requests(self, args):
+ """
+ Show ROA requests.
+ """
+
+ for roa_request in self.zoo.resource_ca.roa_requests.all():
+ prefixes = roa_request.roa_prefix_bag
+ print "ASN: ", roa_request.asn
+ if prefixes.v4:
+ print " IPv4:", prefixes.v4
+ if prefixes.v6:
+ print " IPv6:", prefixes.v6
+
+
+ @parsecmd(argsubparsers)
+ def do_show_ghostbuster_requests(self, args):
+ """
+ Show Ghostbuster requests.
+ """
+
+ for ghostbuster_request in self.zoo.resource_ca.ghostbuster_requests.all():
+ print "Parent:", ghostbuster_request.parent or "*"
+ print ghostbuster_request.vcard
+
+
+ @parsecmd(argsubparsers)
+ def do_show_received_resources(self, args):
+ """
+ Show resources received by this entity from its parent(s).
+ """
+
+ q_msg = self.zoo.compose_left_right_query()
+ SubElement(q_msg, rpki.left_right.tag_list_received_resources, tenant_handle = self.zoo.handle)
+
+ for r_pdu in self.zoo.call_rpkid(q_msg):
+
+ print "Parent: ", r_pdu.get("parent_handle")
+ print " notBefore:", r_pdu.get("notBefore")
+ print " notAfter: ", r_pdu.get("notAfter")
+ print " URI: ", r_pdu.get("uri")
+ print " SIA URI: ", r_pdu.get("sia_uri")
+ print " AIA URI: ", r_pdu.get("aia_uri")
+ print " ASN: ", r_pdu.get("asn")
+ print " IPv4: ", r_pdu.get("ipv4")
+ print " IPv6: ", r_pdu.get("ipv6")
+
+
+ @parsecmd(argsubparsers)
+ def do_show_published_objects(self, args):
+ """
+ Show published objects.
+ """
+
+ q_msg = self.zoo.compose_left_right_query()
+ SubElement(q_msg, rpki.left_right.tag_list_published_objects, tenant_handle = self.zoo.handle)
+
+ for r_pdu in self.zoo.call_rpkid(q_msg):
+ uri = r_pdu.get("uri")
+ track = rpki.x509.uri_dispatch(uri)(Base64 = r_pdu.text).tracking_data(uri)
+ child_handle = r_pdu.get("child_handle")
+
+ if child_handle is None:
+ print track
+ else:
+ print track, child_handle
+
+
+ @parsecmd(argsubparsers)
+ def do_show_bpki(self, args):
+ """
+ Show this entity's BPKI objects.
+ """
+
+ print "Self: ", self.zoo.resource_ca.handle
+ print " notBefore:", self.zoo.resource_ca.certificate.getNotBefore()
+ print " notAfter: ", self.zoo.resource_ca.certificate.getNotAfter()
+ print " Subject: ", self.zoo.resource_ca.certificate.getSubject()
+ print " SKI: ", self.zoo.resource_ca.certificate.hSKI()
+ for bsc in self.zoo.resource_ca.bscs.all():
+ print "BSC: ", bsc.handle
+ print " notBefore:", bsc.certificate.getNotBefore()
+ print " notAfter: ", bsc.certificate.getNotAfter()
+ print " Subject: ", bsc.certificate.getSubject()
+ print " SKI: ", bsc.certificate.hSKI()
+ for parent in self.zoo.resource_ca.parents.all():
+ print "Parent: ", parent.handle
+ print " notBefore:", parent.certificate.getNotBefore()
+ print " notAfter: ", parent.certificate.getNotAfter()
+ print " Subject: ", parent.certificate.getSubject()
+ print " SKI: ", parent.certificate.hSKI()
+ print " URL: ", parent.service_uri
+ for child in self.zoo.resource_ca.children.all():
+ print "Child: ", child.handle
+ print " notBefore:", child.certificate.getNotBefore()
+ print " notAfter: ", child.certificate.getNotAfter()
+ print " Subject: ", child.certificate.getSubject()
+ print " SKI: ", child.certificate.hSKI()
+ for repository in self.zoo.resource_ca.repositories.all():
+ print "Repository: ", repository.handle
+ print " notBefore:", repository.certificate.getNotBefore()
+ print " notAfter: ", repository.certificate.getNotAfter()
+ print " Subject: ", repository.certificate.getSubject()
+ print " SKI: ", repository.certificate.hSKI()
+ print " URL: ", repository.service_uri
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("asns_csv", help = "CSV file listing ASNs"))
+ def do_load_asns(self, args):
+ """
+ Load ASNs into IRDB from CSV file.
+ """
+
+ with open_swapped_uids(args.asns_csv) as f:
+ self.zoo.load_asns(f, True)
+ if self.autosync:
+ self.zoo.run_rpkid_now()
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("roa_requests_csv", help = "CSV file listing ROA requests"))
+ def do_load_roa_requests(self, args):
+ """
+ Load ROA requests into IRDB from CSV file.
+ """
+
+ with open_swapped_uids(args.roa_requests_csv) as f:
+ self.zoo.load_roa_requests(f)
+ if self.autosync:
+ self.zoo.run_rpkid_now()
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("ghostbuster_requests", help = "file listing Ghostbuster requests as a sequence of VCards"))
+ def do_load_ghostbuster_requests(self, args):
+ """
+ Load Ghostbuster requests into IRDB from file.
+ """
+
+ with open_swapped_uids(args.ghostbuster_requests) as f:
+ self.zoo.load_ghostbuster_requests(f)
+ if self.autosync:
+ self.zoo.run_rpkid_now()
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("--valid_until", help = "override default validity interval"),
+ cmdarg("router_certificate_request_xml", help = "file containing XML router certificate request"))
+ def do_add_router_certificate_request(self, args):
+ """
+ Load router certificate request(s) into IRDB from XML file.
+ """
+
+ with open_swapped_uids(args.router_certificate_request_xml) as f:
+ self.zoo.add_router_certificate_request(f, args.valid_until)
+ if self.autosync:
+ self.zoo.run_rpkid_now()
+
+ @parsecmd(argsubparsers,
+ cmdarg("gski", help = "g(SKI) of router certificate request to delete"))
+ def do_delete_router_certificate_request(self, args):
+ """
+ Delete a router certificate request from the IRDB.
+ """
+
+ try:
+ self.zoo.delete_router_certificate_request(args.gski)
+ if self.autosync:
+ self.zoo.run_rpkid_now()
+ except rpki.irdb.models.ResourceHolderCA.DoesNotExist:
+ print "No such resource holder \"%s\"" % self.zoo.handle
+ except rpki.irdb.models.EECertificateRequest.DoesNotExist:
+ print "No certificate request matching g(SKI) \"%s\"" % args.gski
+
+ def complete_delete_router_certificate_request(self, text, line, begidx, endidx):
+ return [obj.gski for obj in self.zoo.resource_ca.ee_certificate_requests.all()
+ if obj.gski and obj.gski.startswith(text)]
+
+
+ @parsecmd(argsubparsers)
+ def do_show_router_certificate_requests(self, args):
+ """
+ Show this entity's router certificate requests.
+ """
+
+ for req in self.zoo.resource_ca.ee_certificate_requests.all():
+ print "%s %s %s %s" % (req.gski, req.valid_until, req.cn, req.sn)
+
+
+ # What about updates? Validity interval, change router-id, change
+ # ASNs. Not sure what this looks like yet, blunder ahead with the
+ # core code while mulling over the UI.
+
+
+ @parsecmd(argsubparsers)
+ def do_synchronize(self, args):
+ """
+ Whack daemons to match IRDB.
- @parsecmd(argsubparsers)
- def do_show_bpki(self, args):
- """
- Show this entity's BPKI objects.
- """
+ This command may be replaced by implicit synchronization embedded
+ in of other commands, haven't decided yet.
+ """
- print "Self: ", self.zoo.resource_ca.handle
- print " notBefore:", self.zoo.resource_ca.certificate.getNotBefore()
- print " notAfter: ", self.zoo.resource_ca.certificate.getNotAfter()
- print " Subject: ", self.zoo.resource_ca.certificate.getSubject()
- print " SKI: ", self.zoo.resource_ca.certificate.hSKI()
- for bsc in self.zoo.resource_ca.bscs.all():
- print "BSC: ", bsc.handle
- print " notBefore:", bsc.certificate.getNotBefore()
- print " notAfter: ", bsc.certificate.getNotAfter()
- print " Subject: ", bsc.certificate.getSubject()
- print " SKI: ", bsc.certificate.hSKI()
- for parent in self.zoo.resource_ca.parents.all():
- print "Parent: ", parent.handle
- print " notBefore:", parent.certificate.getNotBefore()
- print " notAfter: ", parent.certificate.getNotAfter()
- print " Subject: ", parent.certificate.getSubject()
- print " SKI: ", parent.certificate.hSKI()
- print " URL: ", parent.service_uri
- for child in self.zoo.resource_ca.children.all():
- print "Child: ", child.handle
- print " notBefore:", child.certificate.getNotBefore()
- print " notAfter: ", child.certificate.getNotAfter()
- print " Subject: ", child.certificate.getSubject()
- print " SKI: ", child.certificate.hSKI()
- for repository in self.zoo.resource_ca.repositories.all():
- print "Repository: ", repository.handle
- print " notBefore:", repository.certificate.getNotBefore()
- print " notAfter: ", repository.certificate.getNotAfter()
- print " Subject: ", repository.certificate.getSubject()
- print " SKI: ", repository.certificate.hSKI()
- print " URL: ", repository.service_uri
-
-
- @parsecmd(argsubparsers,
- cmdarg("asns_csv", help = "CSV file listing ASNs"))
- def do_load_asns(self, args):
- """
- Load ASNs into IRDB from CSV file.
- """
+ self.zoo.synchronize()
- self.zoo.load_asns(args.asns_csv, True)
- if self.autosync:
- self.zoo.run_rpkid_now()
+ @parsecmd(argsubparsers)
+ def do_force_publication(self, args):
+ """
+ Whack rpkid to force (re)publication of everything.
- @parsecmd(argsubparsers,
- cmdarg("roa_requests_csv", help = "CSV file listing ROA requests"))
- def do_load_roa_requests(self, args):
- """
- Load ROA requests into IRDB from CSV file.
- """
+ This is not usually necessary, as rpkid automatically publishes
+ changes it makes, but this command can be useful occasionally when
+ a fault or configuration error has left rpkid holding data which
+ it has not been able to publish.
+ """
- self.zoo.load_roa_requests(args.roa_requests_csv)
- if self.autosync:
- self.zoo.run_rpkid_now()
+ self.zoo.publish_world_now()
- @parsecmd(argsubparsers,
- cmdarg("ghostbuster_requests", help = "file listing Ghostbuster requests as a sequence of VCards"))
- def do_load_ghostbuster_requests(self, args):
- """
- Load Ghostbuster requests into IRDB from file.
- """
+ @parsecmd(argsubparsers)
+ def do_force_reissue(self, args):
+ """
+ Whack rpkid to force reissuance of everything.
- self.zoo.load_ghostbuster_requests(args.ghostbuster_requests)
- if self.autosync:
- self.zoo.run_rpkid_now()
+ This is not usually necessary, as rpkid reissues automatically
+ objects automatically as needed, but this command can be useful
+ occasionally when a fault or configuration error has prevented
+ rpkid from reissuing when it should have.
+ """
+ self.zoo.reissue()
- @parsecmd(argsubparsers,
- cmdarg("--valid_until", help = "override default validity interval"),
- cmdarg("router_certificate_request_xml", help = "file containing XML router certificate request"))
- def do_add_router_certificate_request(self, args):
- """
- Load router certificate request(s) into IRDB from XML file.
- """
- self.zoo.add_router_certificate_request(args.router_certificate_request_xml, args.valid_until)
- if self.autosync:
- self.zoo.run_rpkid_now()
+ @parsecmd(argsubparsers)
+ def do_force_run_now(self, args):
+ """
+ Force rpkid to run periodic tasks for this Tenant immediately.
- @parsecmd(argsubparsers,
- cmdarg("gski", help = "g(SKI) of router certificate request to delete"))
- def do_delete_router_certificate_request(self, args):
- """
- Delete a router certificate request from the IRDB.
- """
+ This is not usually necessary, as rpkid runs all of these
+ tasks on a regular schedule, but this command can be useful
+ occasionally when configuration change is taking a long time
+ to percolate through a series of parent/child exchanges.
+ """
- try:
- self.zoo.delete_router_certificate_request(args.gski)
- if self.autosync:
self.zoo.run_rpkid_now()
- except rpki.irdb.ResourceHolderCA.DoesNotExist:
- print "No such resource holder \"%s\"" % self.zoo.handle
- except rpki.irdb.EECertificateRequest.DoesNotExist:
- print "No certificate request matching g(SKI) \"%s\"" % args.gski
- def complete_delete_router_certificate_request(self, text, line, begidx, endidx):
- return [obj.gski for obj in self.zoo.resource_ca.ee_certificate_requests.all()
- if obj.gski and obj.gski.startswith(text)]
+ @parsecmd(argsubparsers)
+ def do_up_down_rekey(self, args):
+ """
+ Initiate a "rekey" operation.
- @parsecmd(argsubparsers)
- def do_show_router_certificate_requests(self, args):
- """
- Show this entity's router certificate requests.
- """
-
- for req in self.zoo.resource_ca.ee_certificate_requests.all():
- print "%s %s %s %s" % (req.gski, req.valid_until, req.cn, req.sn)
-
-
- # What about updates? Validity interval, change router-id, change
- # ASNs. Not sure what this looks like yet, blunder ahead with the
- # core code while mulling over the UI.
-
-
- @parsecmd(argsubparsers)
- def do_synchronize(self, args):
- """
- Whack daemons to match IRDB.
-
- This command may be replaced by implicit synchronization embedded
- in of other commands, haven't decided yet.
- """
-
- self.zoo.synchronize()
-
-
- @parsecmd(argsubparsers)
- def do_force_publication(self, args):
- """
- Whack rpkid to force (re)publication of everything.
-
- This is not usually necessary, as rpkid automatically publishes
- changes it makes, but this command can be useful occasionally when
- a fault or configuration error has left rpkid holding data which
- it has not been able to publish.
- """
-
- self.zoo.publish_world_now()
-
-
- @parsecmd(argsubparsers)
- def do_force_reissue(self, args):
- """
- Whack rpkid to force reissuance of everything.
+ This tells rpkid to generate new keys for each certificate issued
+ to it via the up-down protocol.
- This is not usually necessary, as rpkid reissues automatically
- objects automatically as needed, but this command can be useful
- occasionally when a fault or configuration error has prevented
- rpkid from reissuing when it should have.
- """
+ Rekeying is the first stage of a key rollover operation. You will
+ need to follow it up later with a "revoke" operation to clean up
+ the old keys
+ """
- self.zoo.reissue()
+ self.zoo.rekey()
- @parsecmd(argsubparsers)
- def do_up_down_rekey(self, args):
- """
- Initiate a "rekey" operation.
+ @parsecmd(argsubparsers)
+ def do_up_down_revoke(self, args):
+ """
+ Initiate a "revoke" operation.
- This tells rpkid to generate new keys for each certificate issued
- to it via the up-down protocol.
-
- Rekeying is the first stage of a key rollover operation. You will
- need to follow it up later with a "revoke" operation to clean up
- the old keys
- """
-
- self.zoo.rekey()
-
-
- @parsecmd(argsubparsers)
- def do_up_down_revoke(self, args):
- """
- Initiate a "revoke" operation.
+ This tells rpkid to clean up old keys formerly used by
+ certificates issued to it via the up-down protocol.
- This tells rpkid to clean up old keys formerly used by
- certificates issued to it via the up-down protocol.
+ This is the cleanup stage of a key rollover operation.
+ """
- This is the cleanup stage of a key rollover operation.
- """
+ self.zoo.revoke()
- self.zoo.revoke()
+ @parsecmd(argsubparsers)
+ def do_revoke_forgotten(self, args):
+ """
+ Initiate a "revoke_forgotten" operation.
- @parsecmd(argsubparsers)
- def do_revoke_forgotten(self, args):
- """
- Initiate a "revoke_forgotten" operation.
-
- This tells rpkid to ask its parent to revoke certificates for
- which rpkid does not know the private keys.
-
- This should never happen during ordinary operation, but can happen
- if rpkid is misconfigured or its database has been damaged, so we
- need a way to resynchronize rpkid with its parent in such cases.
- We could do this automatically, but as we don't know the precise
- cause of the failure we don't know if it's recoverable locally
- (eg, from an SQL backup), so we require a manual trigger before
- discarding possibly-useful certificates.
- """
+ This tells rpkid to ask its parent to revoke certificates for
+ which rpkid does not know the private keys.
- self.zoo.revoke_forgotten()
+ This should never happen during ordinary operation, but can happen
+ if rpkid is misconfigured or its database has been damaged, so we
+ need a way to resynchronize rpkid with its parent in such cases.
+ We could do this automatically, but as we don't know the precise
+ cause of the failure we don't know if it's recoverable locally
+ (eg, from an SQL backup), so we require a manual trigger before
+ discarding possibly-useful certificates.
+ """
+ self.zoo.revoke_forgotten()
- @parsecmd(argsubparsers)
- def do_clear_all_sql_cms_replay_protection(self, args):
- """
- Tell rpkid and pubd to clear replay protection.
- This clears the replay protection timestamps stored in SQL for all
- entities known to rpkid and pubd. This is a fairly blunt
- instrument, but as we don't expect this to be necessary except in
- the case of gross misconfiguration, it should suffice
- """
+ @parsecmd(argsubparsers)
+ def do_clear_all_sql_cms_replay_protection(self, args):
+ """
+ Tell rpkid and pubd to clear replay protection.
- self.zoo.clear_all_sql_cms_replay_protection()
+ This clears the replay protection timestamps stored in SQL for all
+ entities known to rpkid and pubd. This is a fairly blunt
+ instrument, but as we don't expect this to be necessary except in
+ the case of gross misconfiguration, it should suffice
+ """
+ self.zoo.clear_all_sql_cms_replay_protection()
- @parsecmd(argsubparsers)
- def do_version(self, args):
- """
- Show current software version number.
- """
- print rpki.version.VERSION
+ @parsecmd(argsubparsers)
+ def do_version(self, args):
+ """
+ Show current software version number.
+ """
+ print rpki.version.VERSION
- @parsecmd(argsubparsers)
- def do_list_self_handles(self, args):
- """
- List all <self/> handles in this rpkid instance.
- """
- for ca in rpki.irdb.ResourceHolderCA.objects.all():
- print ca.handle
+ @parsecmd(argsubparsers)
+ def do_list_tenant_handles(self, args):
+ """
+ List all <tenant/> handles in this rpkid instance.
+ """
+ for ca in rpki.irdb.models.ResourceHolderCA.objects.all():
+ print ca.handle
diff --git a/rpki/rpkid.py b/rpki/rpkid.py
index 628209af..4b2333d2 100644
--- a/rpki/rpkid.py
+++ b/rpki/rpkid.py
@@ -22,2470 +22,768 @@ RPKI CA engine.
"""
import os
-import re
import time
import random
-import base64
import logging
+import weakref
import argparse
+import urlparse
+
+import tornado.gen
+import tornado.web
+import tornado.locks
+import tornado.ioloop
+import tornado.queues
+import tornado.httputil
+import tornado.httpclient
+import tornado.httpserver
+
+from lxml.etree import Element, SubElement, tostring as ElementToString
+
import rpki.resource_set
import rpki.up_down
import rpki.left_right
import rpki.x509
-import rpki.sql
-import rpki.http
import rpki.config
import rpki.exceptions
import rpki.relaxng
import rpki.log
-import rpki.async
import rpki.daemonize
-import rpki.rpkid_tasks
-
-logger = logging.getLogger(__name__)
-
-class main(object):
- """
- Main program for rpkid.
- """
-
- def __init__(self):
-
- os.environ["TZ"] = "UTC"
- time.tzset()
-
- self.irdbd_cms_timestamp = None
- self.irbe_cms_timestamp = None
- self.task_current = None
- self.task_queue = []
-
- parser = argparse.ArgumentParser(description = __doc__)
- parser.add_argument("-c", "--config",
- help = "override default location of configuration file")
- parser.add_argument("-f", "--foreground", action = "store_true",
- help = "do not daemonize")
- parser.add_argument("--pidfile",
- help = "override default location of pid file")
- parser.add_argument("--profile",
- help = "enable profiling, saving data to PROFILE")
- rpki.log.argparse_setup(parser)
- args = parser.parse_args()
-
- self.profile = args.profile
-
- rpki.log.init("rpkid", args)
-
- self.cfg = rpki.config.parser(args.config, "rpkid")
- self.cfg.set_global_flags()
-
- if not args.foreground:
- rpki.daemonize.daemon(pidfile = args.pidfile)
-
- if self.profile:
- import cProfile
- prof = cProfile.Profile()
- try:
- prof.runcall(self.main)
- finally:
- prof.dump_stats(self.profile)
- logger.info("Dumped profile data to %s", self.profile)
- else:
- self.main()
-
- def main(self):
-
- startup_msg = self.cfg.get("startup-message", "")
- if startup_msg:
- logger.info(startup_msg)
-
- if self.profile:
- logger.info("Running in profile mode with output to %s", self.profile)
-
- self.sql = rpki.sql.session(self.cfg)
-
- self.bpki_ta = rpki.x509.X509(Auto_update = self.cfg.get("bpki-ta"))
- self.irdb_cert = rpki.x509.X509(Auto_update = self.cfg.get("irdb-cert"))
- self.irbe_cert = rpki.x509.X509(Auto_update = self.cfg.get("irbe-cert"))
- self.rpkid_cert = rpki.x509.X509(Auto_update = self.cfg.get("rpkid-cert"))
- self.rpkid_key = rpki.x509.RSA( Auto_update = self.cfg.get("rpkid-key"))
-
- self.irdb_url = self.cfg.get("irdb-url")
-
- self.http_server_host = self.cfg.get("server-host", "")
- self.http_server_port = self.cfg.getint("server-port")
-
- self.publication_kludge_base = self.cfg.get("publication-kludge-base", "publication/")
-
- # Icky hack to let Iain do some testing quickly, should go away
- # once we sort out whether we can make this change permanent.
- #
- # OK, the stuff to add router certificate support makes enough
- # other changes that we're going to need a migration program in
- # any case, so might as well throw the switch here too, or at
- # least find out if it (still) works as expected.
-
- self.merge_publication_directories = self.cfg.getboolean("merge_publication_directories",
- True)
-
- self.use_internal_cron = self.cfg.getboolean("use-internal-cron", True)
-
- self.initial_delay = random.randint(self.cfg.getint("initial-delay-min", 10),
- self.cfg.getint("initial-delay-max", 120))
-
- # Should be much longer in production
- self.cron_period = rpki.sundial.timedelta(seconds = self.cfg.getint("cron-period", 120))
- self.cron_keepalive = rpki.sundial.timedelta(seconds = self.cfg.getint("cron-keepalive", 0))
- if not self.cron_keepalive:
- self.cron_keepalive = self.cron_period * 4
- self.cron_timeout = None
-
- self.start_cron()
-
- rpki.http.server(
- host = self.http_server_host,
- port = self.http_server_port,
- handlers = (("/left-right", self.left_right_handler),
- ("/up-down/", self.up_down_handler, rpki.up_down.allowed_content_types),
- ("/cronjob", self.cronjob_handler)))
-
- def start_cron(self):
- """
- Start clock for rpkid's internal cron process.
- """
-
- if self.use_internal_cron:
- self.cron_timer = rpki.async.timer(handler = self.cron)
- when = rpki.sundial.now() + rpki.sundial.timedelta(seconds = self.initial_delay)
- logger.debug("Scheduling initial cron pass at %s", when)
- self.cron_timer.set(when)
- else:
- logger.debug("Not using internal clock, start_cron() call ignored")
-
- def irdb_query(self, callback, errback, *q_pdus, **kwargs):
- """
- Perform an IRDB callback query.
- """
-
- try:
- q_types = tuple(type(q_pdu) for q_pdu in q_pdus)
-
- expected_pdu_count = kwargs.pop("expected_pdu_count", None)
- assert len(kwargs) == 0
-
- q_msg = rpki.left_right.msg.query()
- q_msg.extend(q_pdus)
- q_der = rpki.left_right.cms_msg().wrap(q_msg, self.rpkid_key, self.rpkid_cert)
-
- def unwrap(r_der):
- try:
- r_cms = rpki.left_right.cms_msg(DER = r_der)
- r_msg = r_cms.unwrap((self.bpki_ta, self.irdb_cert))
- self.irdbd_cms_timestamp = r_cms.check_replay(self.irdbd_cms_timestamp, self.irdb_url)
- if not r_msg.is_reply() or not all(type(r_pdu) in q_types for r_pdu in r_msg):
- raise rpki.exceptions.BadIRDBReply(
- "Unexpected response to IRDB query: %s" % r_cms.pretty_print_content())
- if expected_pdu_count is not None and len(r_msg) != expected_pdu_count:
- assert isinstance(expected_pdu_count, (int, long))
- raise rpki.exceptions.BadIRDBReply(
- "Expected exactly %d PDU%s from IRDB: %s" % (
- expected_pdu_count, "" if expected_pdu_count == 1 else "s",
- r_cms.pretty_print_content()))
- callback(r_msg)
- except Exception, e:
- errback(e)
-
- rpki.http.client(
- url = self.irdb_url,
- msg = q_der,
- callback = unwrap,
- errback = errback)
-
- except Exception, e:
- errback(e)
-
-
- def irdb_query_child_resources(self, self_handle, child_handle, callback, errback):
- """
- Ask IRDB about a child's resources.
- """
-
- q_pdu = rpki.left_right.list_resources_elt()
- q_pdu.self_handle = self_handle
- q_pdu.child_handle = child_handle
-
- def done(r_msg):
- callback(rpki.resource_set.resource_bag(
- asn = r_msg[0].asn,
- v4 = r_msg[0].ipv4,
- v6 = r_msg[0].ipv6,
- valid_until = r_msg[0].valid_until))
-
- self.irdb_query(done, errback, q_pdu, expected_pdu_count = 1)
-
- def irdb_query_roa_requests(self, self_handle, callback, errback):
- """
- Ask IRDB about self's ROA requests.
- """
-
- q_pdu = rpki.left_right.list_roa_requests_elt()
- q_pdu.self_handle = self_handle
-
- self.irdb_query(callback, errback, q_pdu)
-
- def irdb_query_ghostbuster_requests(self, self_handle, parent_handles, callback, errback):
- """
- Ask IRDB about self's ghostbuster record requests.
- """
-
- q_pdus = []
-
- for parent_handle in parent_handles:
- q_pdu = rpki.left_right.list_ghostbuster_requests_elt()
- q_pdu.self_handle = self_handle
- q_pdu.parent_handle = parent_handle
- q_pdus.append(q_pdu)
-
- self.irdb_query(callback, errback, *q_pdus)
-
- def irdb_query_ee_certificate_requests(self, self_handle, callback, errback):
- """
- Ask IRDB about self's EE certificate requests.
- """
-
- q_pdu = rpki.left_right.list_ee_certificate_requests_elt()
- q_pdu.self_handle = self_handle
-
- self.irdb_query(callback, errback, q_pdu)
-
- def left_right_handler(self, query, path, cb):
- """
- Process one left-right PDU.
- """
-
- def done(r_msg):
- reply = rpki.left_right.cms_msg().wrap(r_msg, self.rpkid_key, self.rpkid_cert)
- self.sql.sweep()
- cb(200, body = reply)
-
- try:
- q_cms = rpki.left_right.cms_msg(DER = query)
- q_msg = q_cms.unwrap((self.bpki_ta, self.irbe_cert))
- self.irbe_cms_timestamp = q_cms.check_replay(self.irbe_cms_timestamp, path)
- if not q_msg.is_query():
- raise rpki.exceptions.BadQuery("Message type is not query")
- q_msg.serve_top_level(self, done)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- logger.exception("Unhandled exception serving left-right request")
- cb(500, reason = "Unhandled exception %s: %s" % (e.__class__.__name__, e))
-
- up_down_url_regexp = re.compile("/up-down/([-A-Z0-9_]+)/([-A-Z0-9_]+)$", re.I)
-
- def up_down_handler(self, query, path, cb):
- """
- Process one up-down PDU.
- """
-
- def done(reply):
- self.sql.sweep()
- cb(200, body = reply)
-
- try:
- match = self.up_down_url_regexp.search(path)
- if match is None:
- raise rpki.exceptions.BadContactURL("Bad URL path received in up_down_handler(): %s" % path)
- self_handle, child_handle = match.groups()
- child = rpki.left_right.child_elt.sql_fetch_where1(self,
- "self.self_handle = %s AND child.child_handle = %s AND child.self_id = self.self_id",
- (self_handle, child_handle),
- "self")
- if child is None:
- raise rpki.exceptions.ChildNotFound("Could not find child %s of self %s in up_down_handler()" % (child_handle, self_handle))
- child.serve_up_down(query, done)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except (rpki.exceptions.ChildNotFound, rpki.exceptions.BadContactURL), e:
- logger.warning(str(e))
- cb(400, reason = str(e))
- except Exception, e:
- logger.exception("Unhandled exception processing up-down request")
- cb(400, reason = "Could not process PDU: %s" % e)
-
- def checkpoint(self, force = False):
- """
- Record that we were still alive when we got here, by resetting
- keepalive timer.
- """
- if force or self.cron_timeout is not None:
- self.cron_timeout = rpki.sundial.now() + self.cron_keepalive
-
- def task_add(self, task):
- """
- Add a task to the scheduler task queue, unless it's already queued.
- """
- if task not in self.task_queue:
- logger.debug("Adding %r to task queue", task)
- self.task_queue.append(task)
- return True
- else:
- logger.debug("Task %r was already in the task queue", task)
- return False
-
- def task_next(self):
- """
- Pull next task from the task queue and put it the deferred event
- queue (we don't want to run it directly, as that could eventually
- blow out our call stack).
- """
- try:
- self.task_current = self.task_queue.pop(0)
- except IndexError:
- self.task_current = None
- else:
- rpki.async.event_defer(self.task_current)
-
- def task_run(self):
- """
- Run first task on the task queue, unless one is running already.
- """
- if self.task_current is None:
- self.task_next()
-
- def cron(self, cb = None):
- """
- Periodic tasks.
- """
-
- now = rpki.sundial.now()
-
- logger.debug("Starting cron run")
-
- def done():
- self.sql.sweep()
- self.cron_timeout = None
- logger.info("Finished cron run started at %s", now)
- if cb is not None:
- cb()
-
- completion = rpki.rpkid_tasks.CompletionHandler(done)
- try:
- selves = rpki.left_right.self_elt.sql_fetch_all(self)
- except Exception:
- logger.exception("Error pulling self_elts from SQL, maybe SQL server is down?")
- else:
- for s in selves:
- s.schedule_cron_tasks(completion)
- nothing_queued = completion.count == 0
-
- assert self.use_internal_cron or self.cron_timeout is None
-
- if self.cron_timeout is not None and self.cron_timeout < now:
- logger.warning("cron keepalive threshold %s has expired, breaking lock", self.cron_timeout)
- self.cron_timeout = None
-
- if self.use_internal_cron:
- when = now + self.cron_period
- logger.debug("Scheduling next cron run at %s", when)
- self.cron_timer.set(when)
-
- if self.cron_timeout is None:
- self.checkpoint(self.use_internal_cron)
- self.task_run()
-
- elif self.use_internal_cron:
- logger.warning("cron already running, keepalive will expire at %s", self.cron_timeout)
-
- if nothing_queued:
- done()
-
- def cronjob_handler(self, query, path, cb):
- """
- External trigger for periodic tasks. This is somewhat obsolete
- now that we have internal timers, but the test framework still
- uses it.
- """
-
- def done():
- cb(200, body = "OK")
-
- if self.use_internal_cron:
- cb(500, reason = "Running cron internally")
- else:
- logger.debug("Starting externally triggered cron")
- self.cron(done)
-
-class ca_obj(rpki.sql.sql_persistent):
- """
- Internal CA object.
- """
-
- sql_template = rpki.sql.template(
- "ca",
- "ca_id",
- "last_crl_sn",
- ("next_crl_update", rpki.sundial.datetime),
- "last_issued_sn",
- "last_manifest_sn",
- ("next_manifest_update", rpki.sundial.datetime),
- "sia_uri",
- "parent_id",
- "parent_resource_class")
-
- last_crl_sn = 0
- last_issued_sn = 0
- last_manifest_sn = 0
-
- def __repr__(self):
- return rpki.log.log_repr(self, repr(self.parent), self.parent_resource_class)
-
- @property
- @rpki.sql.cache_reference
- def parent(self):
- """
- Fetch parent object to which this CA object links.
- """
- return rpki.left_right.parent_elt.sql_fetch(self.gctx, self.parent_id)
-
- @property
- def ca_details(self):
- """
- Fetch all ca_detail objects that link to this CA object.
- """
- return ca_detail_obj.sql_fetch_where(self.gctx, "ca_id = %s", (self.ca_id,))
-
- @property
- def pending_ca_details(self):
- """
- Fetch the pending ca_details for this CA, if any.
- """
- return ca_detail_obj.sql_fetch_where(self.gctx, "ca_id = %s AND state = 'pending'", (self.ca_id,))
-
- @property
- def active_ca_detail(self):
- """
- Fetch the active ca_detail for this CA, if any.
- """
- return ca_detail_obj.sql_fetch_where1(self.gctx, "ca_id = %s AND state = 'active'", (self.ca_id,))
-
- @property
- def deprecated_ca_details(self):
- """
- Fetch deprecated ca_details for this CA, if any.
- """
- return ca_detail_obj.sql_fetch_where(self.gctx, "ca_id = %s AND state = 'deprecated'", (self.ca_id,))
-
- @property
- def active_or_deprecated_ca_details(self):
- """
- Fetch active and deprecated ca_details for this CA, if any.
- """
- return ca_detail_obj.sql_fetch_where(self.gctx, "ca_id = %s AND (state = 'active' OR state = 'deprecated')", (self.ca_id,))
-
- @property
- def revoked_ca_details(self):
- """
- Fetch revoked ca_details for this CA, if any.
- """
- return ca_detail_obj.sql_fetch_where(self.gctx, "ca_id = %s AND state = 'revoked'", (self.ca_id,))
-
- @property
- def issue_response_candidate_ca_details(self):
- """
- Fetch ca_details which are candidates for consideration when
- processing an up-down issue_response PDU.
- """
- #return ca_detail_obj.sql_fetch_where(self.gctx, "ca_id = %s AND latest_ca_cert IS NOT NULL AND state != 'revoked'", (self.ca_id,))
- return ca_detail_obj.sql_fetch_where(self.gctx, "ca_id = %s AND state != 'revoked'", (self.ca_id,))
-
- def construct_sia_uri(self, parent, rc):
- """
- Construct the sia_uri value for this CA given configured
- information and the parent's up-down protocol list_response PDU.
- """
-
- sia_uri = rc.suggested_sia_head and rc.suggested_sia_head.rsync()
- if not sia_uri or not sia_uri.startswith(parent.sia_base):
- sia_uri = parent.sia_base
- if not sia_uri.endswith("/"):
- raise rpki.exceptions.BadURISyntax("SIA URI must end with a slash: %s" % sia_uri)
- # With luck this can go away sometime soon.
- if self.gctx.merge_publication_directories:
- return sia_uri
- else:
- return sia_uri + str(self.ca_id) + "/"
-
- def check_for_updates(self, parent, rc, cb, eb):
- """
- Parent has signaled continued existance of a resource class we
- already knew about, so we need to check for an updated
- certificate, changes in resource coverage, revocation and reissue
- with the same key, etc.
- """
-
- sia_uri = self.construct_sia_uri(parent, rc)
- sia_uri_changed = self.sia_uri != sia_uri
- if sia_uri_changed:
- logger.debug("SIA changed: was %s now %s", self.sia_uri, sia_uri)
- self.sia_uri = sia_uri
- self.sql_mark_dirty()
-
- rc_resources = rc.to_resource_bag()
- cert_map = dict((c.cert.get_SKI(), c) for c in rc.certs)
-
- def loop(iterator, ca_detail):
-
- self.gctx.checkpoint()
-
- rc_cert = cert_map.pop(ca_detail.public_key.get_SKI(), None)
-
- if rc_cert is None:
-
- logger.warning("SKI %s in resource class %s is in database but missing from list_response to %s from %s, maybe parent certificate went away?",
- ca_detail.public_key.gSKI(), rc.class_name, parent.self.self_handle, parent.parent_handle)
- publisher = publication_queue()
- ca_detail.delete(ca = ca_detail.ca, publisher = publisher)
- return publisher.call_pubd(iterator, eb)
-
- else:
-
- if ca_detail.state == "active" and ca_detail.ca_cert_uri != rc_cert.cert_url.rsync():
- logger.debug("AIA changed: was %s now %s", ca_detail.ca_cert_uri, rc_cert.cert_url.rsync())
- ca_detail.ca_cert_uri = rc_cert.cert_url.rsync()
- ca_detail.sql_mark_dirty()
-
- if ca_detail.state in ("pending", "active"):
-
- if ca_detail.state == "pending":
- current_resources = rpki.resource_set.resource_bag()
- else:
- current_resources = ca_detail.latest_ca_cert.get_3779resources()
-
- if (ca_detail.state == "pending" or
- sia_uri_changed or
- ca_detail.latest_ca_cert != rc_cert.cert or
- ca_detail.latest_ca_cert.getNotAfter() != rc_resources.valid_until or
- current_resources.undersized(rc_resources) or
- current_resources.oversized(rc_resources)):
- return ca_detail.update(
- parent = parent,
- ca = self,
- rc = rc,
- sia_uri_changed = sia_uri_changed,
- old_resources = current_resources,
- callback = iterator,
- errback = eb)
-
- iterator()
-
- def done():
- if cert_map:
- logger.warning("Unknown certificate SKI%s %s in resource class %s in list_response to %s from %s, maybe you want to \"revoke_forgotten\"?",
- "" if len(cert_map) == 1 else "s",
- ", ".join(c.cert.gSKI() for c in cert_map.values()),
- rc.class_name, parent.self.self_handle, parent.parent_handle)
- self.gctx.sql.sweep()
- self.gctx.checkpoint()
- cb()
-
- ca_details = self.issue_response_candidate_ca_details
-
- if True:
- skis_parent = set(x.cert.gSKI()
- for x in cert_map.itervalues())
- skis_me = set(x.latest_ca_cert.gSKI()
- for x in ca_details
- if x.latest_ca_cert is not None)
- for ski in skis_parent & skis_me:
- logger.debug("Parent %s agrees that %s has SKI %s in resource class %s",
- parent.parent_handle, parent.self.self_handle, ski, rc.class_name)
- for ski in skis_parent - skis_me:
- logger.debug("Parent %s thinks %s has SKI %s in resource class %s but I don't think so",
- parent.parent_handle, parent.self.self_handle, ski, rc.class_name)
- for ski in skis_me - skis_parent:
- logger.debug("I think %s has SKI %s in resource class %s but parent %s doesn't think so",
- parent.self.self_handle, ski, rc.class_name, parent.parent_handle)
-
- if ca_details:
- rpki.async.iterator(ca_details, loop, done)
- else:
- logger.warning("Existing resource class %s to %s from %s with no certificates, rekeying",
- rc.class_name, parent.self.self_handle, parent.parent_handle)
- self.gctx.checkpoint()
- self.rekey(cb, eb)
-
- @classmethod
- def create(cls, parent, rc, cb, eb):
- """
- Parent has signaled existance of a new resource class, so we need
- to create and set up a corresponding CA object.
- """
-
- self = cls()
- self.gctx = parent.gctx
- self.parent_id = parent.parent_id
- self.parent_resource_class = rc.class_name
- self.sql_store()
- try:
- self.sia_uri = self.construct_sia_uri(parent, rc)
- except rpki.exceptions.BadURISyntax:
- self.sql_delete()
- raise
- ca_detail = ca_detail_obj.create(self)
-
- def done(issue_response):
- c = issue_response.payload.classes[0].certs[0]
- logger.debug("CA %r received certificate %s", self, c.cert_url)
- ca_detail.activate(
- ca = self,
- cert = c.cert,
- uri = c.cert_url,
- callback = cb,
- errback = eb)
-
- logger.debug("Sending issue request to %r from %r", parent, self.create)
- rpki.up_down.issue_pdu.query(parent, self, ca_detail, done, eb)
-
- def delete(self, parent, callback):
- """
- The list of current resource classes received from parent does not
- include the class corresponding to this CA, so we need to delete
- it (and its little dog too...).
-
- All certs published by this CA are now invalid, so need to
- withdraw them, the CRL, and the manifest from the repository,
- delete all child_cert and ca_detail records associated with this
- CA, then finally delete this CA itself.
- """
-
- def lose(e):
- logger.exception("Could not delete CA %r, skipping", self)
- callback()
-
- def done():
- logger.debug("Deleting %r", self)
- self.sql_delete()
- callback()
-
- publisher = publication_queue()
- for ca_detail in self.ca_details:
- ca_detail.delete(ca = self, publisher = publisher, allow_failure = True)
- publisher.call_pubd(done, lose)
-
- def next_serial_number(self):
- """
- Allocate a certificate serial number.
- """
- self.last_issued_sn += 1
- self.sql_mark_dirty()
- return self.last_issued_sn
-
- def next_manifest_number(self):
- """
- Allocate a manifest serial number.
- """
- self.last_manifest_sn += 1
- self.sql_mark_dirty()
- return self.last_manifest_sn
-
- def next_crl_number(self):
- """
- Allocate a CRL serial number.
- """
- self.last_crl_sn += 1
- self.sql_mark_dirty()
- return self.last_crl_sn
-
- def rekey(self, cb, eb):
- """
- Initiate a rekey operation for this ca. Generate a new keypair.
- Request cert from parent using new keypair. Mark result as our
- active ca_detail. Reissue all child certs issued by this ca using
- the new ca_detail.
- """
-
- parent = self.parent
- old_detail = self.active_ca_detail
- new_detail = ca_detail_obj.create(self)
-
- def done(issue_response):
- c = issue_response.payload.classes[0].certs[0]
- logger.debug("CA %r received certificate %s", self, c.cert_url)
- new_detail.activate(
- ca = self,
- cert = c.cert,
- uri = c.cert_url,
- predecessor = old_detail,
- callback = cb,
- errback = eb)
-
- logger.debug("Sending issue request to %r from %r", parent, self.rekey)
- rpki.up_down.issue_pdu.query(parent, self, new_detail, done, eb)
-
- def revoke(self, cb, eb, revoke_all = False):
- """
- Revoke deprecated ca_detail objects associated with this CA, or
- all ca_details associated with this CA if revoke_all is set.
- """
-
- def loop(iterator, ca_detail):
- ca_detail.revoke(cb = iterator, eb = eb)
-
- ca_details = self.ca_details if revoke_all else self.deprecated_ca_details
-
- rpki.async.iterator(ca_details, loop, cb)
-
- def reissue(self, cb, eb):
- """
- Reissue all current certificates issued by this CA.
- """
-
- ca_detail = self.active_ca_detail
- if ca_detail:
- ca_detail.reissue(cb, eb)
- else:
- cb()
-
-class ca_detail_obj(rpki.sql.sql_persistent):
- """
- Internal CA detail object.
- """
-
- sql_template = rpki.sql.template(
- "ca_detail",
- "ca_detail_id",
- ("private_key_id", rpki.x509.RSA),
- ("public_key", rpki.x509.PublicKey),
- ("latest_ca_cert", rpki.x509.X509),
- ("manifest_private_key_id", rpki.x509.RSA),
- ("manifest_public_key", rpki.x509.PublicKey),
- ("latest_manifest_cert", rpki.x509.X509),
- ("latest_manifest", rpki.x509.SignedManifest),
- ("latest_crl", rpki.x509.CRL),
- ("crl_published", rpki.sundial.datetime),
- ("manifest_published", rpki.sundial.datetime),
- "state",
- "ca_cert_uri",
- "ca_id")
-
- crl_published = None
- manifest_published = None
- latest_ca_cert = None
- latest_crl = None
- latest_manifest = None
- ca_cert_uri = None
-
- def __repr__(self):
- return rpki.log.log_repr(self, repr(self.ca), self.state, self.ca_cert_uri)
-
- def sql_decode(self, vals):
- """
- Extra assertions for SQL decode of a ca_detail_obj.
- """
- rpki.sql.sql_persistent.sql_decode(self, vals)
- assert self.public_key is None or self.private_key_id is None or self.public_key.get_DER() == self.private_key_id.get_public_DER()
- assert self.manifest_public_key is None or self.manifest_private_key_id is None or self.manifest_public_key.get_DER() == self.manifest_private_key_id.get_public_DER()
-
- @property
- @rpki.sql.cache_reference
- def ca(self):
- """
- Fetch CA object to which this ca_detail links.
- """
- return ca_obj.sql_fetch(self.gctx, self.ca_id)
-
- def fetch_child_certs(self, child = None, ski = None, unique = False, unpublished = None):
- """
- Fetch all child_cert objects that link to this ca_detail.
- """
- return rpki.rpkid.child_cert_obj.fetch(self.gctx, child, self, ski, unique, unpublished)
-
- @property
- def child_certs(self):
- """
- Fetch all child_cert objects that link to this ca_detail.
- """
- return self.fetch_child_certs()
-
- def unpublished_child_certs(self, when):
- """
- Fetch all unpublished child_cert objects linked to this ca_detail
- with attempted publication dates older than when.
- """
- return self.fetch_child_certs(unpublished = when)
-
- @property
- def revoked_certs(self):
- """
- Fetch all revoked_cert objects that link to this ca_detail.
- """
- return revoked_cert_obj.sql_fetch_where(self.gctx, "ca_detail_id = %s", (self.ca_detail_id,))
-
- @property
- def roas(self):
- """
- Fetch all ROA objects that link to this ca_detail.
- """
- return rpki.rpkid.roa_obj.sql_fetch_where(self.gctx, "ca_detail_id = %s", (self.ca_detail_id,))
-
- def unpublished_roas(self, when):
- """
- Fetch all unpublished ROA objects linked to this ca_detail with
- attempted publication dates older than when.
- """
- return rpki.rpkid.roa_obj.sql_fetch_where(self.gctx, "ca_detail_id = %s AND published IS NOT NULL and published < %s", (self.ca_detail_id, when))
-
- @property
- def ghostbusters(self):
- """
- Fetch all Ghostbuster objects that link to this ca_detail.
- """
- return rpki.rpkid.ghostbuster_obj.sql_fetch_where(self.gctx, "ca_detail_id = %s", (self.ca_detail_id,))
-
- @property
- def ee_certificates(self):
- """
- Fetch all EE certificate objects that link to this ca_detail.
- """
- return rpki.rpkid.ee_cert_obj.sql_fetch_where(self.gctx, "ca_detail_id = %s", (self.ca_detail_id,))
-
- def unpublished_ghostbusters(self, when):
- """
- Fetch all unpublished Ghostbusters objects linked to this
- ca_detail with attempted publication dates older than when.
- """
- return rpki.rpkid.ghostbuster_obj.sql_fetch_where(self.gctx, "ca_detail_id = %s AND published IS NOT NULL and published < %s", (self.ca_detail_id, when))
-
- @property
- def crl_uri(self):
- """
- Return publication URI for this ca_detail's CRL.
- """
- return self.ca.sia_uri + self.crl_uri_tail
-
- @property
- def crl_uri_tail(self):
- """
- Return tail (filename portion) of publication URI for this ca_detail's CRL.
- """
- return self.public_key.gSKI() + ".crl"
-
- @property
- def manifest_uri(self):
- """
- Return publication URI for this ca_detail's manifest.
- """
- return self.ca.sia_uri + self.public_key.gSKI() + ".mft"
-
- def has_expired(self):
- """
- Return whether this ca_detail's certificate has expired.
- """
- return self.latest_ca_cert.getNotAfter() <= rpki.sundial.now()
-
- def covers(self, target):
- """
- Test whether this ca-detail covers a given set of resources.
- """
-
- assert not target.asn.inherit and not target.v4.inherit and not target.v6.inherit
- me = self.latest_ca_cert.get_3779resources()
- return target.asn <= me.asn and target.v4 <= me.v4 and target.v6 <= me.v6
-
- def activate(self, ca, cert, uri, callback, errback, predecessor = None):
- """
- Activate this ca_detail.
- """
-
- publisher = publication_queue()
-
- self.latest_ca_cert = cert
- self.ca_cert_uri = uri.rsync()
- self.generate_manifest_cert()
- self.state = "active"
- self.generate_crl(publisher = publisher)
- self.generate_manifest(publisher = publisher)
- self.sql_store()
-
- if predecessor is not None:
- predecessor.state = "deprecated"
- predecessor.sql_store()
- for child_cert in predecessor.child_certs:
- child_cert.reissue(ca_detail = self, publisher = publisher)
- for roa in predecessor.roas:
- roa.regenerate(publisher = publisher)
- for ghostbuster in predecessor.ghostbusters:
- ghostbuster.regenerate(publisher = publisher)
- predecessor.generate_crl(publisher = publisher)
- predecessor.generate_manifest(publisher = publisher)
-
- publisher.call_pubd(callback, errback)
-
- def delete(self, ca, publisher, allow_failure = False):
- """
- Delete this ca_detail and all of the certs it issued.
-
- If allow_failure is true, we clean up as much as we can but don't
- raise an exception.
- """
-
- repository = ca.parent.repository
- handler = False if allow_failure else None
- for child_cert in self.child_certs:
- publisher.withdraw(cls = rpki.publication.certificate_elt,
- uri = child_cert.uri,
- obj = child_cert.cert,
- repository = repository,
- handler = handler)
- child_cert.sql_mark_deleted()
- for roa in self.roas:
- roa.revoke(publisher = publisher, allow_failure = allow_failure, fast = True)
- for ghostbuster in self.ghostbusters:
- ghostbuster.revoke(publisher = publisher, allow_failure = allow_failure, fast = True)
- try:
- latest_manifest = self.latest_manifest
- except AttributeError:
- latest_manifest = None
- if latest_manifest is not None:
- publisher.withdraw(cls = rpki.publication.manifest_elt,
- uri = self.manifest_uri,
- obj = self.latest_manifest,
- repository = repository,
- handler = handler)
- try:
- latest_crl = self.latest_crl
- except AttributeError:
- latest_crl = None
- if latest_crl is not None:
- publisher.withdraw(cls = rpki.publication.crl_elt,
- uri = self.crl_uri,
- obj = self.latest_crl,
- repository = repository,
- handler = handler)
- self.gctx.sql.sweep()
- for cert in self.revoked_certs: # + self.child_certs
- logger.debug("Deleting %r", cert)
- cert.sql_delete()
- logger.debug("Deleting %r", self)
- self.sql_delete()
-
- def revoke(self, cb, eb):
- """
- Request revocation of all certificates whose SKI matches the key
- for this ca_detail.
-
- Tasks:
-
- - Request revocation of old keypair by parent.
-
- - Revoke all child certs issued by the old keypair.
-
- - Generate a final CRL, signed with the old keypair, listing all
- the revoked certs, with a next CRL time after the last cert or
- CRL signed by the old keypair will have expired.
-
- - Generate a corresponding final manifest.
-
- - Destroy old keypairs.
-
- - Leave final CRL and manifest in place until their nextupdate
- time has passed.
- """
-
- ca = self.ca
- parent = ca.parent
-
- def parent_revoked(r_msg):
-
- if r_msg.payload.ski != self.latest_ca_cert.gSKI():
- raise rpki.exceptions.SKIMismatch
-
- logger.debug("Parent revoked %s, starting cleanup", self.latest_ca_cert.gSKI())
-
- crl_interval = rpki.sundial.timedelta(seconds = parent.self.crl_interval)
-
- nextUpdate = rpki.sundial.now()
-
- if self.latest_manifest is not None:
- self.latest_manifest.extract_if_needed()
- nextUpdate = nextUpdate.later(self.latest_manifest.getNextUpdate())
-
- if self.latest_crl is not None:
- nextUpdate = nextUpdate.later(self.latest_crl.getNextUpdate())
-
- publisher = publication_queue()
-
- for child_cert in self.child_certs:
- nextUpdate = nextUpdate.later(child_cert.cert.getNotAfter())
- child_cert.revoke(publisher = publisher)
-
- for roa in self.roas:
- nextUpdate = nextUpdate.later(roa.cert.getNotAfter())
- roa.revoke(publisher = publisher)
-
- for ghostbuster in self.ghostbusters:
- nextUpdate = nextUpdate.later(ghostbuster.cert.getNotAfter())
- ghostbuster.revoke(publisher = publisher)
-
- nextUpdate += crl_interval
- self.generate_crl(publisher = publisher, nextUpdate = nextUpdate)
- self.generate_manifest(publisher = publisher, nextUpdate = nextUpdate)
- self.private_key_id = None
- self.manifest_private_key_id = None
- self.manifest_public_key = None
- self.latest_manifest_cert = None
- self.state = "revoked"
- self.sql_mark_dirty()
- publisher.call_pubd(cb, eb)
-
- logger.debug("Asking parent to revoke CA certificate %s", self.latest_ca_cert.gSKI())
- rpki.up_down.revoke_pdu.query(ca, self.latest_ca_cert.gSKI(), parent_revoked, eb)
- def update(self, parent, ca, rc, sia_uri_changed, old_resources, callback, errback):
- """
- Need to get a new certificate for this ca_detail and perhaps frob
- children of this ca_detail.
- """
-
- def issued(issue_response):
- c = issue_response.payload.classes[0].certs[0]
- logger.debug("CA %r received certificate %s", self, c.cert_url)
-
- if self.state == "pending":
- return self.activate(
- ca = ca,
- cert = c.cert,
- uri = c.cert_url,
- callback = callback,
- errback = errback)
-
- validity_changed = self.latest_ca_cert is None or self.latest_ca_cert.getNotAfter() != c.cert.getNotAfter()
-
- publisher = publication_queue()
-
- if self.latest_ca_cert != c.cert:
- self.latest_ca_cert = c.cert
- self.sql_mark_dirty()
- self.generate_manifest_cert()
- self.generate_crl(publisher = publisher)
- self.generate_manifest(publisher = publisher)
-
- new_resources = self.latest_ca_cert.get_3779resources()
-
- if sia_uri_changed or old_resources.oversized(new_resources):
- for child_cert in self.child_certs:
- child_resources = child_cert.cert.get_3779resources()
- if sia_uri_changed or child_resources.oversized(new_resources):
- child_cert.reissue(
- ca_detail = self,
- resources = child_resources & new_resources,
- publisher = publisher)
-
- if sia_uri_changed or validity_changed or old_resources.oversized(new_resources):
- for roa in self.roas:
- roa.update(publisher = publisher, fast = True)
-
- if sia_uri_changed or validity_changed:
- for ghostbuster in self.ghostbusters:
- ghostbuster.update(publisher = publisher, fast = True)
-
- publisher.call_pubd(callback, errback)
-
- logger.debug("Sending issue request to %r from %r", parent, self.update)
- rpki.up_down.issue_pdu.query(parent, ca, self, issued, errback)
-
- @classmethod
- def create(cls, ca):
- """
- Create a new ca_detail object for a specified CA.
- """
- self = cls()
- self.gctx = ca.gctx
- self.ca_id = ca.ca_id
- self.state = "pending"
-
- self.private_key_id = rpki.x509.RSA.generate()
- self.public_key = self.private_key_id.get_public()
-
- self.manifest_private_key_id = rpki.x509.RSA.generate()
- self.manifest_public_key = self.manifest_private_key_id.get_public()
-
- self.sql_store()
- return self
-
- def issue_ee(self, ca, resources, subject_key, sia,
- cn = None, sn = None, notAfter = None, eku = None):
- """
- Issue a new EE certificate.
- """
-
- if notAfter is None:
- notAfter = self.latest_ca_cert.getNotAfter()
-
- return self.latest_ca_cert.issue(
- keypair = self.private_key_id,
- subject_key = subject_key,
- serial = ca.next_serial_number(),
- sia = sia,
- aia = self.ca_cert_uri,
- crldp = self.crl_uri,
- resources = resources,
- notAfter = notAfter,
- is_ca = False,
- cn = cn,
- sn = sn,
- eku = eku)
-
- def generate_manifest_cert(self):
- """
- Generate a new manifest certificate for this ca_detail.
- """
-
- resources = rpki.resource_set.resource_bag.from_inheritance()
- self.latest_manifest_cert = self.issue_ee(
- ca = self.ca,
- resources = resources,
- subject_key = self.manifest_public_key,
- sia = (None, None, self.manifest_uri))
-
- def issue(self, ca, child, subject_key, sia, resources, publisher, child_cert = None):
- """
- Issue a new certificate to a child. Optional child_cert argument
- specifies an existing child_cert object to update in place; if not
- specified, we create a new one. Returns the child_cert object
- containing the newly issued cert.
- """
-
- self.check_failed_publication(publisher)
-
- assert child_cert is None or child_cert.child_id == child.child_id
-
- cert = self.latest_ca_cert.issue(
- keypair = self.private_key_id,
- subject_key = subject_key,
- serial = ca.next_serial_number(),
- aia = self.ca_cert_uri,
- crldp = self.crl_uri,
- sia = sia,
- resources = resources,
- notAfter = resources.valid_until)
-
- if child_cert is None:
- child_cert = rpki.rpkid.child_cert_obj(
- gctx = child.gctx,
- child_id = child.child_id,
- ca_detail_id = self.ca_detail_id,
- cert = cert)
- logger.debug("Created new child_cert %r", child_cert)
- else:
- child_cert.cert = cert
- del child_cert.ca_detail
- child_cert.ca_detail_id = self.ca_detail_id
- logger.debug("Reusing existing child_cert %r", child_cert)
-
- child_cert.ski = cert.get_SKI()
- child_cert.published = rpki.sundial.now()
- child_cert.sql_store()
- publisher.publish(
- cls = rpki.publication.certificate_elt,
- uri = child_cert.uri,
- obj = child_cert.cert,
- repository = ca.parent.repository,
- handler = child_cert.published_callback)
- self.generate_manifest(publisher = publisher)
- return child_cert
-
- def generate_crl(self, publisher, nextUpdate = None):
- """
- Generate a new CRL for this ca_detail. At the moment this is
- unconditional, that is, it is up to the caller to decide whether a
- new CRL is needed.
- """
-
- self.check_failed_publication(publisher)
-
- ca = self.ca
- parent = ca.parent
- crl_interval = rpki.sundial.timedelta(seconds = parent.self.crl_interval)
- now = rpki.sundial.now()
-
- if nextUpdate is None:
- nextUpdate = now + crl_interval
-
- certlist = []
- for revoked_cert in self.revoked_certs:
- if now > revoked_cert.expires + crl_interval:
- revoked_cert.sql_delete()
- else:
- certlist.append((revoked_cert.serial, revoked_cert.revoked))
- certlist.sort()
-
- self.latest_crl = rpki.x509.CRL.generate(
- keypair = self.private_key_id,
- issuer = self.latest_ca_cert,
- serial = ca.next_crl_number(),
- thisUpdate = now,
- nextUpdate = nextUpdate,
- revokedCertificates = certlist)
-
- self.crl_published = rpki.sundial.now()
- self.sql_mark_dirty()
- publisher.publish(
- cls = rpki.publication.crl_elt,
- uri = self.crl_uri,
- obj = self.latest_crl,
- repository = parent.repository,
- handler = self.crl_published_callback)
-
- def crl_published_callback(self, pdu):
- """
- Check result of CRL publication.
- """
- pdu.raise_if_error()
- self.crl_published = None
- self.sql_mark_dirty()
-
- def generate_manifest(self, publisher, nextUpdate = None):
- """
- Generate a new manifest for this ca_detail.
- """
-
- self.check_failed_publication(publisher)
-
- ca = self.ca
- parent = ca.parent
- crl_interval = rpki.sundial.timedelta(seconds = parent.self.crl_interval)
- now = rpki.sundial.now()
- uri = self.manifest_uri
-
- if nextUpdate is None:
- nextUpdate = now + crl_interval
-
- if (self.latest_manifest_cert is None or
- (self.latest_manifest_cert.getNotAfter() < nextUpdate and
- self.latest_manifest_cert.getNotAfter() < self.latest_ca_cert.getNotAfter())):
- logger.debug("Generating EE certificate for %s", uri)
- self.generate_manifest_cert()
- logger.debug("Latest CA cert notAfter %s, new %s EE notAfter %s",
- self.latest_ca_cert.getNotAfter(), uri, self.latest_manifest_cert.getNotAfter())
-
- logger.debug("Constructing manifest object list for %s", uri)
- objs = [(self.crl_uri_tail, self.latest_crl)]
- objs.extend((c.uri_tail, c.cert) for c in self.child_certs)
- objs.extend((r.uri_tail, r.roa) for r in self.roas if r.roa is not None)
- objs.extend((g.uri_tail, g.ghostbuster) for g in self.ghostbusters)
- objs.extend((e.uri_tail, e.cert) for e in self.ee_certificates)
-
- logger.debug("Building manifest object %s", uri)
- self.latest_manifest = rpki.x509.SignedManifest.build(
- serial = ca.next_manifest_number(),
- thisUpdate = now,
- nextUpdate = nextUpdate,
- names_and_objs = objs,
- keypair = self.manifest_private_key_id,
- certs = self.latest_manifest_cert)
-
- logger.debug("Manifest generation took %s", rpki.sundial.now() - now)
-
- self.manifest_published = rpki.sundial.now()
- self.sql_mark_dirty()
- publisher.publish(cls = rpki.publication.manifest_elt,
- uri = uri,
- obj = self.latest_manifest,
- repository = parent.repository,
- handler = self.manifest_published_callback)
-
- def manifest_published_callback(self, pdu):
- """
- Check result of manifest publication.
- """
- pdu.raise_if_error()
- self.manifest_published = None
- self.sql_mark_dirty()
-
- def reissue(self, cb, eb):
- """
- Reissue all current certificates issued by this ca_detail.
- """
+import rpki.rpkid_tasks
- publisher = publication_queue()
- self.check_failed_publication(publisher)
- for roa in self.roas:
- roa.regenerate(publisher, fast = True)
- for ghostbuster in self.ghostbusters:
- ghostbuster.regenerate(publisher, fast = True)
- for ee_certificate in self.ee_certificates:
- ee_certificate.reissue(publisher, force = True)
- for child_cert in self.child_certs:
- child_cert.reissue(self, publisher, force = True)
- self.gctx.sql.sweep()
- self.generate_manifest_cert()
- self.sql_mark_dirty()
- self.generate_crl(publisher = publisher)
- self.generate_manifest(publisher = publisher)
- self.gctx.sql.sweep()
- publisher.call_pubd(cb, eb)
-
- def check_failed_publication(self, publisher, check_all = True):
- """
- Check for failed publication of objects issued by this ca_detail.
-
- All publishable objects have timestamp fields recording time of
- last attempted publication, and callback methods which clear these
- timestamps once publication has succeeded. Our task here is to
- look for objects issued by this ca_detail which have timestamps
- set (indicating that they have not been published) and for which
- the timestamps are not very recent (for some definition of very
- recent -- intent is to allow a bit of slack in case pubd is just
- being slow). In such cases, we want to retry publication.
-
- As an optimization, we can probably skip checking other products
- if manifest and CRL have been published, thus saving ourselves
- several complex SQL queries. Not sure yet whether this
- optimization is worthwhile.
-
- For the moment we check everything without optimization, because
- it simplifies testing.
-
- For the moment our definition of staleness is hardwired; this
- should become configurable.
- """
- logger.debug("Checking for failed publication for %r", self)
-
- stale = rpki.sundial.now() - rpki.sundial.timedelta(seconds = 60)
- repository = self.ca.parent.repository
-
- if self.latest_crl is not None and \
- self.crl_published is not None and \
- self.crl_published < stale:
- logger.debug("Retrying publication for %s", self.crl_uri)
- publisher.publish(cls = rpki.publication.crl_elt,
- uri = self.crl_uri,
- obj = self.latest_crl,
- repository = repository,
- handler = self.crl_published_callback)
-
- if self.latest_manifest is not None and \
- self.manifest_published is not None and \
- self.manifest_published < stale:
- logger.debug("Retrying publication for %s", self.manifest_uri)
- publisher.publish(cls = rpki.publication.manifest_elt,
- uri = self.manifest_uri,
- obj = self.latest_manifest,
- repository = repository,
- handler = self.manifest_published_callback)
-
- if not check_all:
- return
-
- # Might also be able to return here if manifest and CRL are up to
- # date, but let's avoid premature optimization
-
- for child_cert in self.unpublished_child_certs(stale):
- logger.debug("Retrying publication for %s", child_cert)
- publisher.publish(
- cls = rpki.publication.certificate_elt,
- uri = child_cert.uri,
- obj = child_cert.cert,
- repository = repository,
- handler = child_cert.published_callback)
-
- for roa in self.unpublished_roas(stale):
- logger.debug("Retrying publication for %s", roa)
- publisher.publish(
- cls = rpki.publication.roa_elt,
- uri = roa.uri,
- obj = roa.roa,
- repository = repository,
- handler = roa.published_callback)
-
- for ghostbuster in self.unpublished_ghostbusters(stale):
- logger.debug("Retrying publication for %s", ghostbuster)
- publisher.publish(
- cls = rpki.publication.ghostbuster_elt,
- uri = ghostbuster.uri,
- obj = ghostbuster.ghostbuster,
- repository = repository,
- handler = ghostbuster.published_callback)
-
-class child_cert_obj(rpki.sql.sql_persistent):
- """
- Certificate that has been issued to a child.
- """
-
- sql_template = rpki.sql.template(
- "child_cert",
- "child_cert_id",
- ("cert", rpki.x509.X509),
- "child_id",
- "ca_detail_id",
- "ski",
- ("published", rpki.sundial.datetime))
-
- def __repr__(self):
- args = [self]
- try:
- args.append(self.uri)
- except: # pylint: disable=W0702
- pass
- return rpki.log.log_repr(*args)
-
- def __init__(self, gctx = None, child_id = None, ca_detail_id = None, cert = None):
- """
- Initialize a child_cert_obj.
- """
- rpki.sql.sql_persistent.__init__(self)
- self.gctx = gctx
- self.child_id = child_id
- self.ca_detail_id = ca_detail_id
- self.cert = cert
- self.published = None
- if child_id or ca_detail_id or cert:
- self.sql_mark_dirty()
-
- @property
- @rpki.sql.cache_reference
- def child(self):
- """
- Fetch child object to which this child_cert object links.
- """
- return rpki.left_right.child_elt.sql_fetch(self.gctx, self.child_id)
-
- @property
- @rpki.sql.cache_reference
- def ca_detail(self):
- """
- Fetch ca_detail object to which this child_cert object links.
- """
- return ca_detail_obj.sql_fetch(self.gctx, self.ca_detail_id)
+logger = logging.getLogger(__name__)
- @ca_detail.deleter
- def ca_detail(self):
- try:
- del self._ca_detail
- except AttributeError:
- pass
- @property
- def uri_tail(self):
+class main(object):
"""
- Return the tail (filename) portion of the URI for this child_cert.
+ Main program for rpkid.
"""
- return self.cert.gSKI() + ".cer"
- @property
- def uri(self):
- """
- Return the publication URI for this child_cert.
- """
- return self.ca_detail.ca.sia_uri + self.uri_tail
+ def __init__(self):
- def revoke(self, publisher, generate_crl_and_manifest = True):
- """
- Revoke a child cert.
- """
+ os.environ.update(TZ = "UTC",
+ DJANGO_SETTINGS_MODULE = "rpki.django_settings.rpkid")
+ time.tzset()
- ca_detail = self.ca_detail
- ca = ca_detail.ca
- logger.debug("Revoking %r %r", self, self.uri)
- revoked_cert_obj.revoke(cert = self.cert, ca_detail = ca_detail)
- publisher.withdraw(
- cls = rpki.publication.certificate_elt,
- uri = self.uri,
- obj = self.cert,
- repository = ca.parent.repository)
- self.gctx.sql.sweep()
- self.sql_delete()
- if generate_crl_and_manifest:
- ca_detail.generate_crl(publisher = publisher)
- ca_detail.generate_manifest(publisher = publisher)
-
- def reissue(self, ca_detail, publisher, resources = None, sia = None, force = False):
- """
- Reissue an existing child cert, reusing the public key. If the
- child cert we would generate is identical to the one we already
- have, we just return the one we already have. If we have to
- revoke the old child cert when generating the new one, we have to
- generate a new child_cert_obj, so calling code that needs the
- updated child_cert_obj must use the return value from this method.
- """
+ self.irdbd_cms_timestamp = None
+ self.irbe_cms_timestamp = None
- ca = ca_detail.ca
- child = self.child
+ self.task_queue = tornado.queues.Queue()
+ self.task_ready = set()
- old_resources = self.cert.get_3779resources()
- old_sia = self.cert.get_SIA()
- old_aia = self.cert.get_AIA()[0]
- old_ca_detail = self.ca_detail
+ self.http_client_serialize = weakref.WeakValueDictionary()
- needed = False
+ self.cfg = rpki.config.argparser(section = "rpkid", doc = __doc__)
+ self.cfg.add_boolean_argument("--foreground",
+ default = False,
+ help = "whether to daemonize")
+ self.cfg.add_argument("--pidfile",
+ default = os.path.join(rpki.daemonize.default_pid_directory,
+ "rpkid.pid"),
+ help = "override default location of pid file")
+ self.cfg.add_argument("--profile",
+ default = "",
+ help = "enable profiling, saving data to PROFILE")
+ self.cfg.add_logging_arguments()
+ args = self.cfg.argparser.parse_args()
- if resources is None:
- resources = old_resources
+ self.cfg.configure_logging(args = args, ident = "rpkid")
- if sia is None:
- sia = old_sia
+ self.profile = args.profile
- assert resources.valid_until is not None and old_resources.valid_until is not None
+ try:
+ self.cfg.set_global_flags()
- if resources.asn != old_resources.asn or resources.v4 != old_resources.v4 or resources.v6 != old_resources.v6:
- logger.debug("Resources changed for %r: old %s new %s", self, old_resources, resources)
- needed = True
+ if not args.foreground:
+ rpki.daemonize.daemon(pidfile = args.pidfile)
- if resources.valid_until != old_resources.valid_until:
- logger.debug("Validity changed for %r: old %s new %s",
- self, old_resources.valid_until, resources.valid_until)
- needed = True
+ if self.profile:
+ import cProfile
+ prof = cProfile.Profile()
+ try:
+ prof.runcall(self.main)
+ finally:
+ prof.dump_stats(self.profile)
+ logger.info("Dumped profile data to %s", self.profile)
+ else:
+ self.main()
+ except:
+ logger.exception("Unandled exception in rpki.rpkid.main()")
+ sys.exit(1)
- if sia != old_sia:
- logger.debug("SIA changed for %r: old %r new %r", self, old_sia, sia)
- needed = True
- if ca_detail != old_ca_detail:
- logger.debug("Issuer changed for %r: old %r new %r", self, old_ca_detail, ca_detail)
- needed = True
+ def main(self):
- if ca_detail.ca_cert_uri != old_aia:
- logger.debug("AIA changed for %r: old %r new %r", self, old_aia, ca_detail.ca_cert_uri)
- needed = True
+ startup_msg = self.cfg.get("startup-message", "")
+ if startup_msg:
+ logger.info(startup_msg)
- must_revoke = old_resources.oversized(resources) or old_resources.valid_until > resources.valid_until
- if must_revoke:
- logger.debug("Must revoke any existing cert(s) for %r", self)
- needed = True
+ if self.profile:
+ logger.info("Running in profile mode with output to %s", self.profile)
- if not needed and force:
- logger.debug("No change needed for %r, forcing reissuance anyway", self)
- needed = True
+ logger.debug("Initializing Django")
+ import django
+ django.setup()
- if not needed:
- logger.debug("No change to %r", self)
- return self
+ logger.debug("Initializing rpkidb...")
+ global rpki # pylint: disable=W0602
+ import rpki.rpkidb # pylint: disable=W0621
- if must_revoke:
- for x in child.fetch_child_certs(ca_detail = ca_detail, ski = self.ski):
- logger.debug("Revoking child_cert %r", x)
- x.revoke(publisher = publisher)
- ca_detail.generate_crl(publisher = publisher)
- ca_detail.generate_manifest(publisher = publisher)
+ logger.debug("Initializing rpkidb...done")
- child_cert = ca_detail.issue(
- ca = ca,
- child = child,
- subject_key = self.cert.getPublicKey(),
- sia = sia,
- resources = resources,
- child_cert = None if must_revoke else self,
- publisher = publisher)
+ self.bpki_ta = rpki.x509.X509(Auto_update = self.cfg.get("bpki-ta"))
+ self.irdb_cert = rpki.x509.X509(Auto_update = self.cfg.get("irdb-cert"))
+ self.irbe_cert = rpki.x509.X509(Auto_update = self.cfg.get("irbe-cert"))
+ self.rpkid_cert = rpki.x509.X509(Auto_update = self.cfg.get("rpkid-cert"))
+ self.rpkid_key = rpki.x509.RSA( Auto_update = self.cfg.get("rpkid-key"))
- logger.debug("New child_cert %r uri %s", child_cert, child_cert.uri)
+ self.irdb_url = self.cfg.get("irdb-url")
- return child_cert
+ self.http_server_host = self.cfg.get("server-host", "")
+ self.http_server_port = self.cfg.getint("server-port")
- @classmethod
- def fetch(cls, gctx = None, child = None, ca_detail = None, ski = None, unique = False, unpublished = None):
- """
- Fetch all child_cert objects matching a particular set of
- parameters. This is a wrapper to consolidate various queries that
- would otherwise be inline SQL WHERE expressions. In most cases
- code calls this indirectly, through methods in other classes.
- """
+ self.http_client_timeout = self.cfg.getint("http-client-timeout", 900)
- args = []
- where = []
+ self.use_internal_cron = self.cfg.getboolean("use-internal-cron", True)
- if child:
- where.append("child_id = %s")
- args.append(child.child_id)
+ self.initial_delay = random.randint(self.cfg.getint("initial-delay-min", 10),
+ self.cfg.getint("initial-delay-max", 120))
- if ca_detail:
- where.append("ca_detail_id = %s")
- args.append(ca_detail.ca_detail_id)
+ self.cron_period = self.cfg.getint("cron-period", 1800)
- if ski:
- where.append("ski = %s")
- args.append(ski)
+ if self.use_internal_cron:
+ logger.debug("Scheduling initial cron pass in %s seconds", self.initial_delay)
+ tornado.ioloop.IOLoop.current().spawn_callback(self.cron_loop)
- if unpublished is not None:
- where.append("published IS NOT NULL AND published < %s")
- args.append(unpublished)
+ logger.debug("Scheduling task loop")
+ tornado.ioloop.IOLoop.current().spawn_callback(self.task_loop)
- where = " AND ".join(where)
+ rpkid = self
- gctx = gctx or (child and child.gctx) or (ca_detail and ca_detail.gctx) or None
+ class LeftRightHandler(tornado.web.RequestHandler): # pylint: disable=W0223
+ @tornado.gen.coroutine
+ def post(self):
+ yield rpkid.left_right_handler(self)
- if unique:
- return cls.sql_fetch_where1(gctx, where, args)
- else:
- return cls.sql_fetch_where(gctx, where, args)
+ class UpDownHandler(tornado.web.RequestHandler): # pylint: disable=W0223
+ @tornado.gen.coroutine
+ def post(self, tenant_handle, child_handle): # pylint: disable=W0221
+ yield rpkid.up_down_handler(self, tenant_handle, child_handle)
- def published_callback(self, pdu):
- """
- Publication callback: check result and mark published.
- """
- pdu.raise_if_error()
- self.published = None
- self.sql_mark_dirty()
-
-class revoked_cert_obj(rpki.sql.sql_persistent):
- """
- Tombstone for a revoked certificate.
- """
-
- sql_template = rpki.sql.template(
- "revoked_cert",
- "revoked_cert_id",
- "serial",
- "ca_detail_id",
- ("revoked", rpki.sundial.datetime),
- ("expires", rpki.sundial.datetime))
-
- def __repr__(self):
- return rpki.log.log_repr(self, repr(self.ca_detail), self.serial, self.revoked)
-
- def __init__(self, gctx = None, serial = None, revoked = None, expires = None, ca_detail_id = None):
- """
- Initialize a revoked_cert_obj.
- """
- rpki.sql.sql_persistent.__init__(self)
- self.gctx = gctx
- self.serial = serial
- self.revoked = revoked
- self.expires = expires
- self.ca_detail_id = ca_detail_id
- if serial or revoked or expires or ca_detail_id:
- self.sql_mark_dirty()
-
- @property
- @rpki.sql.cache_reference
- def ca_detail(self):
- """
- Fetch ca_detail object to which this revoked_cert_obj links.
- """
- return ca_detail_obj.sql_fetch(self.gctx, self.ca_detail_id)
+ class CronjobHandler(tornado.web.RequestHandler): # pylint: disable=W0223
+ @tornado.gen.coroutine
+ def post(self):
+ yield rpkid.cronjob_handler(self)
- @classmethod
- def revoke(cls, cert, ca_detail):
- """
- Revoke a certificate.
- """
- return cls(
- serial = cert.getSerial(),
- expires = cert.getNotAfter(),
- revoked = rpki.sundial.now(),
- gctx = ca_detail.gctx,
- ca_detail_id = ca_detail.ca_detail_id)
-
-class roa_obj(rpki.sql.sql_persistent):
- """
- Route Origin Authorization.
- """
-
- sql_template = rpki.sql.template(
- "roa",
- "roa_id",
- "ca_detail_id",
- "self_id",
- "asn",
- ("roa", rpki.x509.ROA),
- ("cert", rpki.x509.X509),
- ("published", rpki.sundial.datetime))
-
- ca_detail_id = None
- cert = None
- roa = None
- published = None
-
- @property
- @rpki.sql.cache_reference
- def self(self):
- """
- Fetch self object to which this roa_obj links.
- """
- return rpki.left_right.self_elt.sql_fetch(self.gctx, self.self_id)
+ application = tornado.web.Application((
+ (r"/left-right", LeftRightHandler),
+ (r"/up-down/([-a-zA-Z0-9_]+)/([-a-zA-Z0-9_]+)", UpDownHandler),
+ (r"/cronjob", CronjobHandler)))
- @property
- @rpki.sql.cache_reference
- def ca_detail(self):
- """
- Fetch ca_detail object to which this roa_obj links.
- """
- return rpki.rpkid.ca_detail_obj.sql_fetch(self.gctx, self.ca_detail_id)
+ application.listen(
+ address = self.http_server_host,
+ port = self.http_server_port)
- @ca_detail.deleter
- def ca_detail(self):
- try:
- del self._ca_detail
- except AttributeError:
- pass
+ tornado.ioloop.IOLoop.current().start()
- def sql_fetch_hook(self):
- """
- Extra SQL fetch actions for roa_obj -- handle prefix lists.
- """
- for version, datatype, attribute in ((4, rpki.resource_set.roa_prefix_set_ipv4, "ipv4"),
- (6, rpki.resource_set.roa_prefix_set_ipv6, "ipv6")):
- setattr(self, attribute, datatype.from_sql(
- self.gctx.sql,
+ def task_add(self, *tasks):
+ """
+ Add tasks to the task queue.
"""
- SELECT prefix, prefixlen, max_prefixlen FROM roa_prefix
- WHERE roa_id = %s AND version = %s
- """,
- (self.roa_id, version)))
-
- def sql_insert_hook(self):
- """
- Extra SQL insert actions for roa_obj -- handle prefix lists.
- """
- for version, prefix_set in ((4, self.ipv4), (6, self.ipv6)):
- if prefix_set:
- self.gctx.sql.executemany(
- """
- INSERT roa_prefix (roa_id, prefix, prefixlen, max_prefixlen, version)
- VALUES (%s, %s, %s, %s, %s)
- """,
- ((self.roa_id, x.prefix, x.prefixlen, x.max_prefixlen, version)
- for x in prefix_set))
-
- def sql_delete_hook(self):
- """
- Extra SQL delete actions for roa_obj -- handle prefix lists.
- """
- self.gctx.sql.execute("DELETE FROM roa_prefix WHERE roa_id = %s", (self.roa_id,))
-
- def __repr__(self):
- args = [self, self.asn, self.ipv4, self.ipv6]
- try:
- args.append(self.uri)
- except: # pylint: disable=W0702
- pass
- return rpki.log.log_repr(*args)
-
- def __init__(self, gctx = None, self_id = None, asn = None, ipv4 = None, ipv6 = None):
- rpki.sql.sql_persistent.__init__(self)
- self.gctx = gctx
- self.self_id = self_id
- self.asn = asn
- self.ipv4 = ipv4
- self.ipv6 = ipv6
-
- # Defer marking new ROA as dirty until .generate() has a chance to
- # finish setup, otherwise we get SQL consistency errors.
- #
- #if self_id or asn or ipv4 or ipv6: self.sql_mark_dirty()
-
- def update(self, publisher, fast = False):
- """
- Bring this roa_obj's ROA up to date if necesssary.
- """
-
- v4 = self.ipv4.to_resource_set() if self.ipv4 is not None else rpki.resource_set.resource_set_ipv4()
- v6 = self.ipv6.to_resource_set() if self.ipv6 is not None else rpki.resource_set.resource_set_ipv6()
-
- if self.roa is None:
- logger.debug("%r doesn't exist, generating", self)
- return self.generate(publisher = publisher, fast = fast)
-
- ca_detail = self.ca_detail
-
- if ca_detail is None:
- logger.debug("%r has no associated ca_detail, generating", self)
- return self.generate(publisher = publisher, fast = fast)
-
- if ca_detail.state != "active":
- logger.debug("ca_detail associated with %r not active (state %s), regenerating", self, ca_detail.state)
- return self.regenerate(publisher = publisher, fast = fast)
-
- now = rpki.sundial.now()
- regen_time = self.cert.getNotAfter() - rpki.sundial.timedelta(seconds = self.self.regen_margin)
-
- if now > regen_time and self.cert.getNotAfter() < ca_detail.latest_ca_cert.getNotAfter():
- logger.debug("%r past threshold %s, regenerating", self, regen_time)
- return self.regenerate(publisher = publisher, fast = fast)
-
- if now > regen_time:
- logger.warning("%r is past threshold %s but so is issuer %r, can't regenerate", self, regen_time, ca_detail)
-
- ca_resources = ca_detail.latest_ca_cert.get_3779resources()
- ee_resources = self.cert.get_3779resources()
-
- if ee_resources.oversized(ca_resources):
- logger.debug("%r oversized with respect to CA, regenerating", self)
- return self.regenerate(publisher = publisher, fast = fast)
-
- if ee_resources.v4 != v4 or ee_resources.v6 != v6:
- logger.debug("%r resources do not match EE, regenerating", self)
- return self.regenerate(publisher = publisher, fast = fast)
-
- if self.cert.get_AIA()[0] != ca_detail.ca_cert_uri:
- logger.debug("%r AIA changed, regenerating", self)
- return self.regenerate(publisher = publisher, fast = fast)
-
- def generate(self, publisher, fast = False):
- """
- Generate a ROA.
-
- At present we have no way of performing a direct lookup from a
- desired set of resources to a covering certificate, so we have to
- search. This could be quite slow if we have a lot of active
- ca_detail objects. Punt on the issue for now, revisit if
- profiling shows this as a hotspot.
-
- Once we have the right covering certificate, we generate the ROA
- payload, generate a new EE certificate, use the EE certificate to
- sign the ROA payload, publish the result, then throw away the
- private key for the EE cert, all per the ROA specification. This
- implies that generating a lot of ROAs will tend to thrash
- /dev/random, but there is not much we can do about that.
-
- If fast is set, we leave generating the new manifest for our
- caller to handle, presumably at the end of a bulk operation.
- """
-
- if self.ipv4 is None and self.ipv6 is None:
- raise rpki.exceptions.EmptyROAPrefixList
-
- # Ugly and expensive search for covering ca_detail, there has to
- # be a better way, but it would require the ability to test for
- # resource subsets in SQL.
-
- v4 = self.ipv4.to_resource_set() if self.ipv4 is not None else rpki.resource_set.resource_set_ipv4()
- v6 = self.ipv6.to_resource_set() if self.ipv6 is not None else rpki.resource_set.resource_set_ipv6()
-
- ca_detail = self.ca_detail
- if ca_detail is None or ca_detail.state != "active" or ca_detail.has_expired():
- logger.debug("Searching for new ca_detail for ROA %r", self)
- ca_detail = None
- for parent in self.self.parents:
- for ca in parent.cas:
- ca_detail = ca.active_ca_detail
- assert ca_detail is None or ca_detail.state == "active"
- if ca_detail is not None and not ca_detail.has_expired():
- resources = ca_detail.latest_ca_cert.get_3779resources()
- if v4.issubset(resources.v4) and v6.issubset(resources.v6):
- break
- ca_detail = None
- if ca_detail is not None:
- break
- else:
- logger.debug("Keeping old ca_detail for ROA %r", self)
-
- if ca_detail is None:
- raise rpki.exceptions.NoCoveringCertForROA("Could not find a certificate covering %r" % self)
-
- logger.debug("Using new ca_detail %r for ROA %r, ca_detail_state %s",
- ca_detail, self, ca_detail.state)
-
- ca = ca_detail.ca
- resources = rpki.resource_set.resource_bag(v4 = v4, v6 = v6)
- keypair = rpki.x509.RSA.generate()
-
- del self.ca_detail
- self.ca_detail_id = ca_detail.ca_detail_id
- self.cert = ca_detail.issue_ee(
- ca = ca,
- resources = resources,
- subject_key = keypair.get_public(),
- sia = (None, None, self.uri_from_key(keypair)))
- self.roa = rpki.x509.ROA.build(self.asn, self.ipv4, self.ipv6, keypair, (self.cert,))
- self.published = rpki.sundial.now()
- self.sql_store()
-
- logger.debug("Generating %r URI %s", self, self.uri)
- publisher.publish(
- cls = rpki.publication.roa_elt,
- uri = self.uri,
- obj = self.roa,
- repository = ca.parent.repository,
- handler = self.published_callback)
- if not fast:
- ca_detail.generate_manifest(publisher = publisher)
-
-
- def published_callback(self, pdu):
- """
- Check publication result.
- """
- pdu.raise_if_error()
- self.published = None
- self.sql_mark_dirty()
-
- def revoke(self, publisher, regenerate = False, allow_failure = False, fast = False):
- """
- Withdraw ROA associated with this roa_obj.
-
- In order to preserve make-before-break properties without
- duplicating code, this method also handles generating a
- replacement ROA when requested.
-
- If allow_failure is set, failing to withdraw the ROA will not be
- considered an error.
-
- If fast is set, SQL actions will be deferred, on the assumption
- that our caller will handle regenerating CRL and manifest and
- flushing the SQL cache.
- """
-
- ca_detail = self.ca_detail
- cert = self.cert
- roa = self.roa
- uri = self.uri
-
- logger.debug("%s %r, ca_detail %r state is %s",
- "Regenerating" if regenerate else "Not regenerating",
- self, ca_detail, ca_detail.state)
-
- if regenerate:
- self.generate(publisher = publisher, fast = fast)
-
- logger.debug("Withdrawing %r %s and revoking its EE cert", self, uri)
- rpki.rpkid.revoked_cert_obj.revoke(cert = cert, ca_detail = ca_detail)
- publisher.withdraw(cls = rpki.publication.roa_elt, uri = uri, obj = roa,
- repository = ca_detail.ca.parent.repository,
- handler = False if allow_failure else None)
-
- if not regenerate:
- self.sql_mark_deleted()
-
- if not fast:
- ca_detail.generate_crl(publisher = publisher)
- ca_detail.generate_manifest(publisher = publisher)
- self.gctx.sql.sweep()
-
- def regenerate(self, publisher, fast = False):
- """
- Reissue ROA associated with this roa_obj.
- """
- if self.ca_detail is None:
- self.generate(publisher = publisher, fast = fast)
- else:
- self.revoke(publisher = publisher, regenerate = True, fast = fast)
-
- def uri_from_key(self, key):
- """
- Return publication URI for a public key.
- """
- return self.ca_detail.ca.sia_uri + key.gSKI() + ".roa"
-
- @property
- def uri(self):
- """
- Return the publication URI for this roa_obj's ROA.
- """
- return self.ca_detail.ca.sia_uri + self.uri_tail
-
- @property
- def uri_tail(self):
- """
- Return the tail (filename portion) of the publication URI for this
- roa_obj's ROA.
- """
- return self.cert.gSKI() + ".roa"
-
-
-class ghostbuster_obj(rpki.sql.sql_persistent):
- """
- Ghostbusters record.
- """
-
- sql_template = rpki.sql.template(
- "ghostbuster",
- "ghostbuster_id",
- "ca_detail_id",
- "self_id",
- "vcard",
- ("ghostbuster", rpki.x509.Ghostbuster),
- ("cert", rpki.x509.X509),
- ("published", rpki.sundial.datetime))
-
- ca_detail_id = None
- cert = None
- ghostbuster = None
- published = None
- vcard = None
-
- def __repr__(self):
- args = [self]
- try:
- args.extend(self.vcard.splitlines()[2:-1])
- except: # pylint: disable=W0702
- pass
- try:
- args.append(self.uri)
- except: # pylint: disable=W0702
- pass
- return rpki.log.log_repr(*args)
-
- @property
- @rpki.sql.cache_reference
- def self(self):
- """
- Fetch self object to which this ghostbuster_obj links.
- """
- return rpki.left_right.self_elt.sql_fetch(self.gctx, self.self_id)
-
- @property
- @rpki.sql.cache_reference
- def ca_detail(self):
- """
- Fetch ca_detail object to which this ghostbuster_obj links.
- """
- return rpki.rpkid.ca_detail_obj.sql_fetch(self.gctx, self.ca_detail_id)
-
- def __init__(self, gctx = None, self_id = None, ca_detail_id = None, vcard = None):
- rpki.sql.sql_persistent.__init__(self)
- self.gctx = gctx
- self.self_id = self_id
- self.ca_detail_id = ca_detail_id
- self.vcard = vcard
-
- # Defer marking new ghostbuster as dirty until .generate() has a chance to
- # finish setup, otherwise we get SQL consistency errors.
-
- def update(self, publisher, fast = False):
- """
- Bring this ghostbuster_obj up to date if necesssary.
- """
-
- if self.ghostbuster is None:
- logger.debug("Ghostbuster record doesn't exist, generating")
- return self.generate(publisher = publisher, fast = fast)
- now = rpki.sundial.now()
- regen_time = self.cert.getNotAfter() - rpki.sundial.timedelta(seconds = self.self.regen_margin)
+ for task in tasks:
+ if task in self.task_ready:
+ logger.debug("Task %r already queued", task)
+ else:
+ logger.debug("Adding %r to task queue", task)
+ self.task_queue.put(task)
+ self.task_ready.add(task)
- if now > regen_time and self.cert.getNotAfter() < self.ca_detail.latest_ca_cert.getNotAfter():
- logger.debug("%r past threshold %s, regenerating", self, regen_time)
- return self.regenerate(publisher = publisher, fast = fast)
+ @tornado.gen.coroutine
+ def task_loop(self):
+ """
+ Asynchronous infinite loop to run background tasks.
+ """
- if now > regen_time:
- logger.warning("%r is past threshold %s but so is issuer %r, can't regenerate", self, regen_time, self.ca_detail)
+ logger.debug("Starting task loop")
- if self.cert.get_AIA()[0] != self.ca_detail.ca_cert_uri:
- logger.debug("%r AIA changed, regenerating", self)
- return self.regenerate(publisher = publisher, fast = fast)
+ while True:
+ task = None
+ try:
+ task = yield self.task_queue.get()
+ self.task_ready.discard(task)
+ yield task.start()
+ except:
+ logger.exception("Unhandled exception from %r", task)
- def generate(self, publisher, fast = False):
- """
- Generate a Ghostbuster record
+ @tornado.gen.coroutine
+ def cron_loop(self):
+ """
+ Asynchronous infinite loop to drive internal cron cycle.
+ """
- Once we have the right covering certificate, we generate the
- ghostbuster payload, generate a new EE certificate, use the EE
- certificate to sign the ghostbuster payload, publish the result,
- then throw away the private key for the EE cert. This is modeled
- after the way we handle ROAs.
+ logger.debug("cron_loop(): Starting")
+ assert self.use_internal_cron
+ logger.debug("cron_loop(): Startup delay %d seconds", self.initial_delay)
+ yield tornado.gen.sleep(self.initial_delay)
+ while True:
+ logger.debug("cron_loop(): Running")
+ try:
+ self.cron_run()
+ except:
+ logger.exception("Error queuing cron tasks")
+ logger.debug("cron_loop(): Sleeping %d seconds", self.cron_period)
+ yield tornado.gen.sleep(self.cron_period)
+
+ def cron_run(self):
+ """
+ Schedule periodic tasks.
+ """
- If fast is set, we leave generating the new manifest for our
- caller to handle, presumably at the end of a bulk operation.
- """
+ for tenant in rpki.rpkidb.models.Tenant.objects.all():
+ self.task_add(*tenant.cron_tasks(self))
- ca_detail = self.ca_detail
- ca = ca_detail.ca
-
- resources = rpki.resource_set.resource_bag.from_inheritance()
- keypair = rpki.x509.RSA.generate()
-
- self.cert = ca_detail.issue_ee(
- ca = ca,
- resources = resources,
- subject_key = keypair.get_public(),
- sia = (None, None, self.uri_from_key(keypair)))
- self.ghostbuster = rpki.x509.Ghostbuster.build(self.vcard, keypair, (self.cert,))
- self.published = rpki.sundial.now()
- self.sql_store()
-
- logger.debug("Generating Ghostbuster record %r", self.uri)
- publisher.publish(
- cls = rpki.publication.ghostbuster_elt,
- uri = self.uri,
- obj = self.ghostbuster,
- repository = ca.parent.repository,
- handler = self.published_callback)
- if not fast:
- ca_detail.generate_manifest(publisher = publisher)
-
- def published_callback(self, pdu):
- """
- Check publication result.
- """
- pdu.raise_if_error()
- self.published = None
- self.sql_mark_dirty()
-
- def revoke(self, publisher, regenerate = False, allow_failure = False, fast = False):
- """
- Withdraw Ghostbuster associated with this ghostbuster_obj.
+ @tornado.gen.coroutine
+ def cronjob_handler(self, handler):
+ """
+ External trigger to schedule periodic tasks. Obsolete for
+ production use, but portions of the test framework still use this.
+ """
- In order to preserve make-before-break properties without
- duplicating code, this method also handles generating a
- replacement ghostbuster when requested.
+ if self.use_internal_cron:
+ handler.set_status(500, "Running cron internally")
+ else:
+ logger.debug("Starting externally triggered cron")
+ self.cron_run()
+ handler.set_status(200)
+ handler.finish()
- If allow_failure is set, failing to withdraw the ghostbuster will not be
- considered an error.
+ @tornado.gen.coroutine
+ def http_fetch(self, request, serialize_on_full_url = False):
+ """
+ Wrapper around tornado.httpclient.AsyncHTTPClient() which
+ serializes requests to any particular HTTP server, to avoid
+ spurious CMS replay errors.
+ """
- If fast is set, SQL actions will be deferred, on the assumption
- that our caller will handle regenerating CRL and manifest and
- flushing the SQL cache.
- """
+ # The current definition of "particular HTTP server" is based only
+ # on the "netloc" portion of the URL, which could in theory could
+ # cause deadlocks in a loopback scenario; no such deadlocks have
+ # shown up in testing, but if such a thing were to occur, it would
+ # look like an otherwise inexplicable HTTP timeout. The solution,
+ # should this occur, would be to use the entire URL as the lookup
+ # key, perhaps only for certain protocols.
+ #
+ # The reason for the current scheme is that at least one protocol
+ # (publication) uses RESTful URLs but has a single service-wide
+ # CMS replay detection database, which translates to meaning that
+ # we need to serialize all requests for that service, not just
+ # requests to a particular URL.
+
+ if serialize_on_full_url:
+ netlock = request.url
+ else:
+ netlock = urlparse.urlparse(request.url).netloc
- ca_detail = self.ca_detail
- cert = self.cert
- ghostbuster = self.ghostbuster
- uri = self.uri
+ try:
+ lock = self.http_client_serialize[netlock]
+ except KeyError:
+ lock = self.http_client_serialize[netlock] = tornado.locks.Lock()
- logger.debug("%s %r, ca_detail %r state is %s",
- "Regenerating" if regenerate else "Not regenerating",
- self, ca_detail, ca_detail.state)
+ http_client = tornado.httpclient.AsyncHTTPClient()
- if regenerate:
- self.generate(publisher = publisher, fast = fast)
+ with (yield lock.acquire()):
+ try:
+ started = time.time()
+ response = yield http_client.fetch(request)
+ except tornado.httpclient.HTTPError as e:
- logger.debug("Withdrawing %r %s and revoking its EE cert", self, uri)
- rpki.rpkid.revoked_cert_obj.revoke(cert = cert, ca_detail = ca_detail)
- publisher.withdraw(cls = rpki.publication.ghostbuster_elt, uri = uri, obj = ghostbuster,
- repository = ca_detail.ca.parent.repository,
- handler = False if allow_failure else None)
+ # XXX This is not a solution, just an attempt to
+ # gather data on whether the timeout arguments are
+ # working as expected.
- if not regenerate:
- self.sql_mark_deleted()
+ logger.warning("%r: HTTP error contacting %r: %s", self, request, e)
+ if e.code == 599:
+ logger.warning("%r: HTTP timeout after time %s seconds", self, time.time() - started)
+ raise
- if not fast:
- ca_detail.generate_crl(publisher = publisher)
- ca_detail.generate_manifest(publisher = publisher)
- self.gctx.sql.sweep()
+ raise tornado.gen.Return(response)
- def regenerate(self, publisher, fast = False):
- """
- Reissue Ghostbuster associated with this ghostbuster_obj.
- """
- if self.ghostbuster is None:
- self.generate(publisher = publisher, fast = fast)
- else:
- self.revoke(publisher = publisher, regenerate = True, fast = fast)
+ @staticmethod
+ def compose_left_right_query():
+ """
+ Compose top level element of a left-right query to irdbd.
+ """
- def uri_from_key(self, key):
- """
- Return publication URI for a public key.
- """
- return self.ca_detail.ca.sia_uri + key.gSKI() + ".gbr"
+ return Element(rpki.left_right.tag_msg, nsmap = rpki.left_right.nsmap,
+ type = "query", version = rpki.left_right.version)
- @property
- def uri(self):
- """
- Return the publication URI for this ghostbuster_obj's ghostbuster.
- """
- return self.ca_detail.ca.sia_uri + self.uri_tail
+ @tornado.gen.coroutine
+ def irdb_query(self, q_msg):
+ """
+ Perform an IRDB callback query.
+ """
- @property
- def uri_tail(self):
- """
- Return the tail (filename portion) of the publication URI for this
- ghostbuster_obj's ghostbuster.
- """
- return self.cert.gSKI() + ".gbr"
-
-
-class ee_cert_obj(rpki.sql.sql_persistent):
- """
- EE certificate (router certificate or generic).
- """
-
- sql_template = rpki.sql.template(
- "ee_cert",
- "ee_cert_id",
- "self_id",
- "ca_detail_id",
- "ski",
- ("cert", rpki.x509.X509),
- ("published", rpki.sundial.datetime))
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.cert.getSubject(), self.uri)
-
- def __init__(self, gctx = None, self_id = None, ca_detail_id = None, cert = None):
- rpki.sql.sql_persistent.__init__(self)
- self.gctx = gctx
- self.self_id = self_id
- self.ca_detail_id = ca_detail_id
- self.cert = cert
- self.ski = None if cert is None else cert.get_SKI()
- self.published = None
- if self_id or ca_detail_id or cert:
- self.sql_mark_dirty()
-
- @property
- @rpki.sql.cache_reference
- def self(self):
- """
- Fetch self object to which this ee_cert_obj links.
- """
- return rpki.left_right.self_elt.sql_fetch(self.gctx, self.self_id)
+ q_tags = set(q_pdu.tag for q_pdu in q_msg)
- @property
- @rpki.sql.cache_reference
- def ca_detail(self):
- """
- Fetch ca_detail object to which this ee_cert_obj links.
- """
- return rpki.rpkid.ca_detail_obj.sql_fetch(self.gctx, self.ca_detail_id)
+ q_der = rpki.left_right.cms_msg().wrap(q_msg, self.rpkid_key, self.rpkid_cert)
- @ca_detail.deleter
- def ca_detail(self):
- try:
- del self._ca_detail
- except AttributeError:
- pass
+ http_request = tornado.httpclient.HTTPRequest(
+ url = self.irdb_url,
+ method = "POST",
+ body = q_der,
+ headers = { "Content-Type" : rpki.left_right.content_type },
+ connect_timeout = self.http_client_timeout,
+ request_timeout = self.http_client_timeout)
- @property
- def gski(self):
- """
- Calculate g(SKI), for ease of comparison with XML.
+ http_response = yield self.http_fetch(http_request)
- Although, really, one has to ask why we don't just store g(SKI)
- in rpkid.sql instead of ski....
- """
- return base64.urlsafe_b64encode(self.ski).rstrip("=")
+ # Tornado already checked http_response.code for us
- @gski.setter
- def gski(self, val):
- self.ski = base64.urlsafe_b64decode(val + ("=" * ((4 - len(val)) % 4)))
+ content_type = http_response.headers.get("Content-Type")
- @property
- def uri(self):
- """
- Return the publication URI for this ee_cert_obj.
- """
- return self.ca_detail.ca.sia_uri + self.uri_tail
+ if content_type not in rpki.left_right.allowed_content_types:
+ raise rpki.exceptions.BadContentType("HTTP Content-Type %r, expected %r" % (rpki.left_right.content_type, content_type))
- @property
- def uri_tail(self):
- """
- Return the tail (filename portion) of the publication URI for this
- ee_cert_obj.
- """
- return self.cert.gSKI() + ".cer"
+ r_der = http_response.body
- @classmethod
- def create(cls, ca_detail, subject_name, subject_key, resources, publisher, eku = None):
- """
- Generate a new certificate and stuff it in a new ee_cert_obj.
- """
+ r_cms = rpki.left_right.cms_msg(DER = r_der)
+ r_msg = r_cms.unwrap((self.bpki_ta, self.irdb_cert))
- cn, sn = subject_name.extract_cn_and_sn()
- ca = ca_detail.ca
+ self.irdbd_cms_timestamp = r_cms.check_replay(self.irdbd_cms_timestamp, self.irdb_url)
- cert = ca_detail.issue_ee(
- ca = ca,
- subject_key = subject_key,
- sia = None,
- resources = resources,
- notAfter = resources.valid_until,
- cn = cn,
- sn = sn,
- eku = eku)
+ #rpki.left_right.check_response(r_msg)
- self = cls(
- gctx = ca_detail.gctx,
- self_id = ca.parent.self.self_id,
- ca_detail_id = ca_detail.ca_detail_id,
- cert = cert)
+ if r_msg.get("type") != "reply" or not all(r_pdu.tag in q_tags for r_pdu in r_msg):
+ raise rpki.exceptions.BadIRDBReply("Unexpected response to IRDB query: %s" % r_cms.pretty_print_content())
- publisher.publish(
- cls = rpki.publication.certificate_elt,
- uri = self.uri,
- obj = self.cert,
- repository = ca.parent.repository,
- handler = self.published_callback)
+ raise tornado.gen.Return(r_msg)
- self.sql_store()
+ @tornado.gen.coroutine
+ def irdb_query_children_resources(self, tenant_handle, child_handles):
+ """
+ Ask IRDB about resources for one or more children.
+ """
- ca_detail.generate_manifest(publisher = publisher)
+ q_msg = self.compose_left_right_query()
+ for child_handle in child_handles:
+ SubElement(q_msg, rpki.left_right.tag_list_resources, tenant_handle = tenant_handle, child_handle = child_handle)
- logger.debug("New ee_cert %r", self)
+ r_msg = yield self.irdb_query(q_msg)
- return self
+ if len(r_msg) != len(q_msg):
+ raise rpki.exceptions.BadIRDBReply("Expected IRDB response to be same length as query: %s" % r_msg.pretty_print_content())
- def revoke(self, publisher, generate_crl_and_manifest = True):
- """
- Revoke and withdraw an EE certificate.
- """
+ bags = [rpki.resource_set.resource_bag(asn = r_pdu.get("asn"),
+ v4 = r_pdu.get("ipv4"),
+ v6 = r_pdu.get("ipv6"),
+ valid_until = r_pdu.get("valid_until"))
+ for r_pdu in r_msg]
- ca_detail = self.ca_detail
- ca = ca_detail.ca
- logger.debug("Revoking %r %r", self, self.uri)
- revoked_cert_obj.revoke(cert = self.cert, ca_detail = ca_detail)
- publisher.withdraw(cls = rpki.publication.certificate_elt,
- uri = self.uri,
- obj = self.cert,
- repository = ca.parent.repository)
- self.gctx.sql.sweep()
- self.sql_delete()
- if generate_crl_and_manifest:
- ca_detail.generate_crl(publisher = publisher)
- ca_detail.generate_manifest(publisher = publisher)
-
- def reissue(self, publisher, ca_detail = None, resources = None, force = False):
- """
- Reissue an existing EE cert, reusing the public key. If the EE
- cert we would generate is identical to the one we already have, we
- just return; if we need to reissue, we reuse this ee_cert_obj and
- just update its contents, as the publication URI will not have
- changed.
- """
+ raise tornado.gen.Return(bags)
- needed = False
+ @tornado.gen.coroutine
+ def irdb_query_child_resources(self, tenant_handle, child_handle):
+ """
+ Ask IRDB about a single child's resources.
+ """
- old_cert = self.cert
+ bags = yield self.irdb_query_children_resources(tenant_handle, (child_handle,))
+ raise tornado.gen.Return(bags[0])
- old_ca_detail = self.ca_detail
- if ca_detail is None:
- ca_detail = old_ca_detail
+ @tornado.gen.coroutine
+ def irdb_query_roa_requests(self, tenant_handle):
+ """
+ Ask IRDB about self's ROA requests.
+ """
- assert ca_detail.ca is old_ca_detail.ca
+ q_msg = self.compose_left_right_query()
+ SubElement(q_msg, rpki.left_right.tag_list_roa_requests, tenant_handle = tenant_handle)
+ r_msg = yield self.irdb_query(q_msg)
+ raise tornado.gen.Return(r_msg)
- old_resources = old_cert.get_3779resources()
- if resources is None:
- resources = old_resources
+ @tornado.gen.coroutine
+ def irdb_query_ghostbuster_requests(self, tenant_handle, parent_handles):
+ """
+ Ask IRDB about self's ghostbuster record requests.
+ """
- assert resources.valid_until is not None and old_resources.valid_until is not None
+ q_msg = self.compose_left_right_query()
+ for parent_handle in parent_handles:
+ SubElement(q_msg, rpki.left_right.tag_list_ghostbuster_requests,
+ tenant_handle = tenant_handle, parent_handle = parent_handle)
+ r_msg = yield self.irdb_query(q_msg)
+ raise tornado.gen.Return(r_msg)
- assert ca_detail.covers(resources)
+ @tornado.gen.coroutine
+ def irdb_query_ee_certificate_requests(self, tenant_handle):
+ """
+ Ask IRDB about self's EE certificate requests.
+ """
- if ca_detail != self.ca_detail:
- logger.debug("ca_detail changed for %r: old %r new %r",
- self, self.ca_detail, ca_detail)
- needed = True
+ q_msg = self.compose_left_right_query()
+ SubElement(q_msg, rpki.left_right.tag_list_ee_certificate_requests, tenant_handle = tenant_handle)
+ r_msg = yield self.irdb_query(q_msg)
+ raise tornado.gen.Return(r_msg)
- if ca_detail.ca_cert_uri != old_cert.get_AIA()[0]:
- logger.debug("AIA changed for %r: old %s new %s",
- self, old_cert.get_AIA()[0], ca_detail.ca_cert_uri)
- needed = True
+ @property
+ def left_right_models(self):
+ """
+ Map element tag to rpkidb model.
+ """
- if resources.valid_until != old_resources.valid_until:
- logger.debug("Validity changed for %r: old %s new %s",
- self, old_resources.valid_until, resources.valid_until)
- needed = True
+ # pylint: disable=W0621,W0201
- if resources.asn != old_resources.asn or resources.v4 != old_resources.v4 or resources.v6 != old_resources.v6:
- logger.debug("Resources changed for %r: old %s new %s",
- self, old_resources, resources)
- needed = True
+ try:
+ return self._left_right_models
+ except AttributeError:
+ import rpki.rpkidb.models
+ self._left_right_models = {
+ rpki.left_right.tag_tenant : rpki.rpkidb.models.Tenant,
+ rpki.left_right.tag_bsc : rpki.rpkidb.models.BSC,
+ rpki.left_right.tag_parent : rpki.rpkidb.models.Parent,
+ rpki.left_right.tag_child : rpki.rpkidb.models.Child,
+ rpki.left_right.tag_repository : rpki.rpkidb.models.Repository }
+ return self._left_right_models
+
+ @property
+ def left_right_trivial_handlers(self):
+ """
+ Map element tag to bound handler methods for trivial PDU types.
+ """
- must_revoke = (old_resources.oversized(resources) or
- old_resources.valid_until > resources.valid_until)
- if must_revoke:
- logger.debug("Must revoke existing cert(s) for %r", self)
- needed = True
+ # pylint: disable=W0201
- if not needed and force:
- logger.debug("No change needed for %r, forcing reissuance anyway", self)
- needed = True
+ try:
+ return self._left_right_trivial_handlers
+ except AttributeError:
+ self._left_right_trivial_handlers = {
+ rpki.left_right.tag_list_published_objects : self.handle_list_published_objects,
+ rpki.left_right.tag_list_received_resources : self.handle_list_received_resources }
+ return self._left_right_trivial_handlers
+
+ def handle_list_published_objects(self, q_pdu, r_msg):
+ """
+ <list_published_objects/> server.
+ """
- if not needed:
- logger.debug("No change to %r", self)
- return
+ tenant_handle = q_pdu.get("tenant_handle")
+ msg_tag = q_pdu.get("tag")
+
+ kw = dict(tenant_handle = tenant_handle)
+ if msg_tag is not None:
+ kw.update(tag = msg_tag)
+
+ for ca_detail in rpki.rpkidb.models.CADetail.objects.filter(ca__parent__tenant__tenant_handle = tenant_handle, state = "active"):
+ SubElement(r_msg, rpki.left_right.tag_list_published_objects,
+ uri = ca_detail.crl_uri, **kw).text = ca_detail.latest_crl.get_Base64()
+ SubElement(r_msg, rpki.left_right.tag_list_published_objects,
+ uri = ca_detail.manifest_uri, **kw).text = ca_detail.latest_manifest.get_Base64()
+ for c in ca_detail.child_certs.all():
+ SubElement(r_msg, rpki.left_right.tag_list_published_objects,
+ uri = c.uri, child_handle = c.child.child_handle, **kw).text = c.cert.get_Base64()
+ for r in ca_detail.roas.filter(roa__isnull = False):
+ SubElement(r_msg, rpki.left_right.tag_list_published_objects,
+ uri = r.uri, **kw).text = r.roa.get_Base64()
+ for g in ca_detail.ghostbusters.all():
+ SubElement(r_msg, rpki.left_right.tag_list_published_objects,
+ uri = g.uri, **kw).text = g.ghostbuster.get_Base64()
+ for c in ca_detail.ee_certificates.all():
+ SubElement(r_msg, rpki.left_right.tag_list_published_objects,
+ uri = c.uri, **kw).text = c.cert.get_Base64()
+
+ def handle_list_received_resources(self, q_pdu, r_msg):
+ """
+ <list_received_resources/> server.
+ """
- cn, sn = self.cert.getSubject().extract_cn_and_sn()
+ logger.debug(".handle_list_received_resources() %s", ElementToString(q_pdu))
+ tenant_handle = q_pdu.get("tenant_handle")
+ msg_tag = q_pdu.get("tag")
+ for ca_detail in rpki.rpkidb.models.CADetail.objects.filter(ca__parent__tenant__tenant_handle = tenant_handle,
+ state = "active", latest_ca_cert__isnull = False):
+ cert = ca_detail.latest_ca_cert
+ resources = cert.get_3779resources()
+ r_pdu = SubElement(r_msg, rpki.left_right.tag_list_received_resources,
+ tenant_handle = tenant_handle,
+ parent_handle = ca_detail.ca.parent.parent_handle,
+ uri = ca_detail.ca_cert_uri,
+ notBefore = str(cert.getNotBefore()),
+ notAfter = str(cert.getNotAfter()),
+ sia_uri = cert.get_sia_directory_uri(),
+ aia_uri = cert.get_aia_uri() or "",
+ asn = str(resources.asn),
+ ipv4 = str(resources.v4),
+ ipv6 = str(resources.v6))
+ if msg_tag is not None:
+ r_pdu.set("tag", msg_tag)
+
+ @tornado.gen.coroutine
+ def left_right_handler(self, handler):
+ """
+ Process one left-right message.
+ """
- self.cert = ca_detail.issue_ee(
- ca = ca_detail.ca,
- subject_key = self.cert.getPublicKey(),
- eku = self.cert.get_EKU(),
- sia = None,
- resources = resources,
- notAfter = resources.valid_until,
- cn = cn,
- sn = sn)
+ content_type = handler.request.headers["Content-Type"]
+ if content_type not in rpki.left_right.allowed_content_types:
+ handler.set_status(415, "No handler for Content-Type %s" % content_type)
+ handler.finish()
+ return
- self.sql_mark_dirty()
+ handler.set_header("Content-Type", rpki.left_right.content_type)
- publisher.publish(
- cls = rpki.publication.certificate_elt,
- uri = self.uri,
- obj = self.cert,
- repository = ca_detail.ca.parent.repository,
- handler = self.published_callback)
+ try:
+ q_cms = rpki.left_right.cms_msg(DER = handler.request.body)
+ q_msg = q_cms.unwrap((self.bpki_ta, self.irbe_cert))
+ r_msg = Element(rpki.left_right.tag_msg, nsmap = rpki.left_right.nsmap,
+ type = "reply", version = rpki.left_right.version)
+ self.irbe_cms_timestamp = q_cms.check_replay(self.irbe_cms_timestamp, handler.request.path)
+
+ assert q_msg.tag.startswith(rpki.left_right.xmlns)
+ assert all(q_pdu.tag.startswith(rpki.left_right.xmlns) for q_pdu in q_msg)
+
+ if q_msg.get("version") != rpki.left_right.version:
+ raise rpki.exceptions.BadQuery("Unrecognized protocol version")
+
+ if q_msg.get("type") != "query":
+ raise rpki.exceptions.BadQuery("Message type is not query")
+
+ for q_pdu in q_msg:
+
+ try:
+ action = q_pdu.get("action")
+ model = self.left_right_models.get(q_pdu.tag)
+
+ if q_pdu.tag in self.left_right_trivial_handlers:
+ self.left_right_trivial_handlers[q_pdu.tag](q_pdu, r_msg)
+
+ elif action in ("get", "list"):
+ for obj in model.objects.xml_list(q_pdu):
+ obj.xml_template.encode(obj, q_pdu, r_msg)
+
+ elif action == "destroy":
+ obj = model.objects.xml_get_for_delete(q_pdu)
+ yield obj.xml_pre_delete_hook(self)
+ obj.delete()
+ obj.xml_template.acknowledge(obj, q_pdu, r_msg)
+
+ elif action in ("create", "set"):
+ obj = model.objects.xml_get_or_create(q_pdu)
+ obj.xml_template.decode(obj, q_pdu)
+ obj.xml_pre_save_hook(q_pdu)
+ obj.save()
+ yield obj.xml_post_save_hook(self, q_pdu)
+ obj.xml_template.acknowledge(obj, q_pdu, r_msg)
+
+ else:
+ raise rpki.exceptions.BadQuery("Unrecognized action %r" % action)
+
+ except Exception, e:
+ if not isinstance(e, rpki.exceptions.NotFound):
+ logger.exception("Unhandled exception serving left-right PDU %r", q_pdu)
+ error_tenant_handle = q_pdu.get("tenant_handle")
+ error_tag = q_pdu.get("tag")
+ r_pdu = SubElement(r_msg, rpki.left_right.tag_report_error, error_code = e.__class__.__name__)
+ r_pdu.text = str(e)
+ if error_tag is not None:
+ r_pdu.set("tag", error_tag)
+ if error_tenant_handle is not None:
+ r_pdu.set("tenant_handle", error_tenant_handle)
+ break
+
+ handler.set_status(200)
+ handler.finish(rpki.left_right.cms_msg().wrap(r_msg, self.rpkid_key, self.rpkid_cert))
- if must_revoke:
- revoked_cert_obj.revoke(cert = old_cert.cert, ca_detail = old_ca_detail)
+ except Exception, e:
+ logger.exception("Unhandled exception serving left-right request")
+ handler.set_status(500, "Unhandled exception %s: %s" % (e.__class__.__name__, e))
+ handler.finish()
- self.gctx.sql.sweep()
+ @tornado.gen.coroutine
+ def up_down_handler(self, handler, tenant_handle, child_handle):
+ """
+ Process one up-down PDU.
+ """
- if must_revoke:
- ca_detail.generate_crl(publisher = publisher)
- self.gctx.sql.sweep()
+ content_type = handler.request.headers["Content-Type"]
+ if content_type not in rpki.up_down.allowed_content_types:
+ handler.set_status(415, "No handler for Content-Type %s" % content_type)
+ handler.finish()
+ return
- ca_detail.generate_manifest(publisher = publisher)
+ try:
+ child = rpki.rpkidb.models.Child.objects.get(tenant__tenant_handle = tenant_handle, child_handle = child_handle)
+ q_der = handler.request.body
+ r_der = yield child.serve_up_down(self, q_der)
+ handler.set_header("Content-Type", rpki.up_down.content_type)
+ handler.set_status(200)
+ handler.finish(r_der)
+
+ except rpki.rpkidb.models.Child.DoesNotExist:
+ logger.info("Child %r of tenant %r not found", child_handle, tenant_handle)
+ handler.set_status(400, "Child %r not found" % child_handle)
+ handler.finish()
- def published_callback(self, pdu):
- """
- Publication callback: check result and mark published.
- """
- pdu.raise_if_error()
- self.published = None
- self.sql_mark_dirty()
+ except Exception, e:
+ logger.exception("Unhandled exception processing up-down request")
+ handler.set_status(400, "Could not process PDU: %s" % e)
+ handler.finish()
class publication_queue(object):
- """
- Utility to simplify publication from within rpkid.
-
- General idea here is to accumulate a collection of objects to be
- published, in one or more repositories, each potentially with its
- own completion callback. Eventually we want to publish everything
- we've accumulated, at which point we need to iterate over the
- collection and do repository.call_pubd() for each repository.
- """
-
- replace = True
-
- def __init__(self):
- self.clear()
-
- def clear(self):
- self.repositories = {}
- self.msgs = {}
- self.handlers = {}
- if self.replace:
- self.uris = {}
-
- def _add(self, uri, obj, repository, handler, make_pdu):
- rid = id(repository)
- if rid not in self.repositories:
- self.repositories[rid] = repository
- self.msgs[rid] = rpki.publication.msg.query()
- if self.replace and uri in self.uris:
- logger.debug("Removing publication duplicate <%s %r %r>",
- self.uris[uri].action, self.uris[uri].uri, self.uris[uri].payload)
- self.msgs[rid].remove(self.uris.pop(uri))
- pdu = make_pdu(uri = uri, obj = obj)
- if handler is not None:
- self.handlers[id(pdu)] = handler
- pdu.tag = id(pdu)
- self.msgs[rid].append(pdu)
- if self.replace:
- self.uris[uri] = pdu
-
- def publish(self, cls, uri, obj, repository, handler = None):
- return self._add( uri, obj, repository, handler, cls.make_publish)
-
- def withdraw(self, cls, uri, obj, repository, handler = None):
- return self._add( uri, obj, repository, handler, cls.make_withdraw)
-
- def call_pubd(self, cb, eb):
- def loop(iterator, rid):
- logger.debug("Calling pubd[%r]", self.repositories[rid])
- self.repositories[rid].call_pubd(iterator, eb, self.msgs[rid], self.handlers)
- def done():
- self.clear()
- cb()
- rpki.async.iterator(self.repositories, loop, done)
-
- @property
- def size(self):
- return sum(len(self.msgs[rid]) for rid in self.repositories)
-
- def empty(self):
- assert (not self.msgs) == (self.size == 0)
- return not self.msgs
+ """
+ Utility to simplify publication from within rpkid.
+
+ General idea here is to accumulate a collection of objects to be
+ published, in one or more repositories, each potentially with its
+ own completion callback. Eventually we want to publish everything
+ we've accumulated, at which point we need to iterate over the
+ collection and do repository.call_pubd() for each repository.
+ """
+
+ # At present, ._inplay and .inplay() are debugging tools only. If
+ # there turns out to be a real race condition here, this might
+ # evolve into the hook for some kind of Condition()-based
+ # mechanism.
+
+ _inplay = weakref.WeakValueDictionary()
+
+ def __init__(self, rpkid):
+ self.rpkid = rpkid
+ self.clear()
+
+ def clear(self):
+ self.repositories = {}
+ self.msgs = {}
+ self.handlers = {}
+ self.uris = {}
+
+ def inplay(self, uri):
+ who = self._inplay.get(uri, self)
+ return who is not self and uri in who.uris
+
+ def queue(self, uri, repository, handler = None,
+ old_obj = None, new_obj = None, old_hash = None):
+
+ assert old_obj is not None or new_obj is not None or old_hash is not None
+ assert old_obj is None or old_hash is None
+ assert old_obj is None or isinstance(old_obj, rpki.x509.uri_dispatch(uri))
+ assert new_obj is None or isinstance(new_obj, rpki.x509.uri_dispatch(uri))
+
+ logger.debug("Queuing publication action: uri %s, old %r, new %r, hash %s",
+ uri, old_obj, new_obj, old_hash)
+
+ if self.inplay(uri):
+ logger.warning("%s is already in play", uri)
+
+ rid = repository.peer_contact_uri
+ if rid not in self.repositories:
+ self.repositories[rid] = repository
+ self.msgs[rid] = Element(rpki.publication.tag_msg, nsmap = rpki.publication.nsmap,
+ type = "query", version = rpki.publication.version)
+
+ if uri in self.uris:
+ logger.debug("Removing publication duplicate %r %s hash %s",
+ self.uris[uri], uri, self.uris[uri].get("hash"))
+ old_pdu = self.uris.pop(uri)
+ self.msgs[rid].remove(old_pdu)
+ pdu_hash = old_pdu.get("hash")
+ if pdu_hash is None and new_obj is None:
+ logger.debug("Withdrawing object %r which was never published simplifies to no-op",
+ old_pdu)
+ return
+ elif old_hash is not None:
+ logger.debug("Old hash supplied") # XXX Debug log
+ pdu_hash = old_hash
+ elif old_obj is None:
+ logger.debug("No old object present") # XXX Debug log
+ pdu_hash = None
+ else:
+ logger.debug("Calculating hash of old object") # XXX Debug log
+ pdu_hash = rpki.x509.sha256(old_obj.get_DER()).encode("hex")
+
+ logger.debug("uri %s old hash %s new hash %s", uri, pdu_hash, # XXX Debug log
+ None if new_obj is None else rpki.x509.sha256(new_obj.get_DER()).encode("hex"))
+
+ if new_obj is None:
+ pdu = SubElement(self.msgs[rid], rpki.publication.tag_withdraw, uri = uri, hash = pdu_hash)
+ else:
+ pdu = SubElement(self.msgs[rid], rpki.publication.tag_publish, uri = uri)
+ pdu.text = new_obj.get_Base64()
+ if pdu_hash is not None:
+ pdu.set("hash", pdu_hash)
+
+ if handler is not None:
+ self.handlers[uri] = handler
+
+ self.uris[uri] = pdu
+ self._inplay[uri] = self
+
+ @tornado.gen.coroutine
+ def call_pubd(self):
+ for rid in self.repositories:
+ logger.debug("Calling pubd[%r]", self.repositories[rid])
+ try:
+ yield self.repositories[rid].call_pubd(self.rpkid, self.msgs[rid], self.handlers)
+ except (rpki.exceptions.ExistingObjectAtURI,
+ rpki.exceptions.DifferentObjectAtURI,
+ rpki.exceptions.NoObjectAtURI) as e:
+ logger.warn("Lost synchronization with %r: %s", self.repositories[rid], e)
+ yield self.resync(self.repositories[rid])
+ for k in self.uris.iterkeys():
+ if self._inplay.get(k) is self:
+ del self._inplay[k]
+ self.clear()
+
+ @tornado.gen.coroutine
+ def resync(self, repository):
+ logger.info("Attempting resynchronization with %r", repository)
+
+ # A lot of this is copy and paste from .serve_publish_world_now().
+ # Refactor when we have more of a clue about how this should work.
+
+ q_msg = Element(rpki.publication.tag_msg, nsmap = rpki.publication.nsmap,
+ type = "query", version = rpki.publication.version)
+ SubElement(q_msg, rpki.publication.tag_list, tag = "list")
+ r_msg = yield repository.call_pubd(self.rpkid, q_msg, length_check = False)
+
+ if not all(r_pdu.tag == rpki.publication.tag_list for r_pdu in r_msg):
+ raise rpki.exceptions.BadPublicationReply("Unexpected XML tag in publication response")
+
+ pubd_objs = dict((r_pdu.get("uri"), r_pdu.get("hash")) for r_pdu in r_msg)
+
+ our_objs = []
+ for ca_detail in rpki.rpkidb.models.CADetail.objects.filter(
+ ca__parent__tenant = repository.tenant, state = "active"):
+ our_objs = [(ca_detail.crl_uri, ca_detail.latest_crl),
+ (ca_detail.manifest_uri, ca_detail.latest_manifest)]
+ our_objs.extend((c.uri, c.cert) for c in ca_detail.child_certs.all())
+ our_objs.extend((r.uri, r.roa) for r in ca_detail.roas.filter(roa__isnull = False))
+ our_objs.extend((g.uri, g.ghostbuster) for g in ca_detail.ghostbusters.all())
+ our_objs.extend((c.uri, c.cert) for c in ca_detail.ee_certificates.all())
+
+ q_msg = Element(rpki.publication.tag_msg, nsmap = rpki.publication.nsmap,
+ type = "query", version = rpki.publication.version)
+
+ for uri, obj in our_objs:
+ if uri not in pubd_objs:
+ SubElement(q_msg, rpki.publication.tag_publish, uri = uri).text = obj.get_Base64()
+ else:
+ h = pubd_objs.pop(uri)
+ if h != rpki.x509.sha256(obj.get_DER()).encode("hex"):
+ SubElement(q_msg, rpki.publication.tag_publish,
+ uri = uri, hash = h).text = obj.get_Base64()
+
+ for uri, h in pubd_objs.iteritems():
+ SubElement(q_msg, rpki.publication.tag_withdraw, uri = uri, hash = h)
+
+ yield repository.call_pubd(self.rpkid, q_msg)
+
+ @property
+ def size(self):
+ return sum(len(self.msgs[rid]) for rid in self.repositories)
+
+ def empty(self):
+ return not self.msgs
diff --git a/rpki/rpkid_tasks.py b/rpki/rpkid_tasks.py
index 58b4bcfe..ee4f90d3 100644
--- a/rpki/rpkid_tasks.py
+++ b/rpki/rpkid_tasks.py
@@ -22,9 +22,18 @@ because interactions with rpkid scheduler were getting too complicated.
"""
import logging
+import random
+
+import tornado.gen
+import tornado.web
+import tornado.locks
+import tornado.ioloop
+import tornado.httputil
+import tornado.httpclient
+import tornado.httpserver
+
import rpki.log
import rpki.rpkid
-import rpki.async
import rpki.up_down
import rpki.sundial
import rpki.publication
@@ -35,700 +44,634 @@ logger = logging.getLogger(__name__)
task_classes = ()
def queue_task(cls):
- """
- Class decorator to add a new task class to task_classes.
- """
-
- global task_classes
- task_classes += (cls,)
- return cls
-
-
-class CompletionHandler(object):
- """
- Track one or more scheduled rpkid tasks and execute a callback when
- the last of them terminates.
- """
-
- ## @var debug
- # Debug logging.
-
- debug = False
-
- def __init__(self, cb):
- self.cb = cb
- self.tasks = set()
-
- def register(self, task):
- if self.debug:
- logger.debug("Completion handler %r registering task %r", self, task)
- self.tasks.add(task)
- task.register_completion(self.done)
-
- def done(self, task):
- try:
- self.tasks.remove(task)
- except KeyError:
- logger.warning("Completion handler %r called with unregistered task %r, blundering onwards", self, task)
- else:
- if self.debug:
- logger.debug("Completion handler %r called with registered task %r", self, task)
- if not self.tasks:
- if self.debug:
- logger.debug("Completion handler %r finished, calling %r", self, self.cb)
- self.cb()
-
- @property
- def count(self):
- return len(self.tasks)
+ """
+ Class decorator to add a new task class to task_classes.
+ """
+
+ global task_classes # pylint: disable=W0603
+ task_classes += (cls,)
+ return cls
+
+
+class PostponeTask(Exception):
+ """
+ Exit a task without finishing it. We use this to signal that a
+ long-running task wants to yield to the task loop but hasn't yet
+ run to completion.
+ """
class AbstractTask(object):
- """
- Abstract base class for rpkid scheduler task objects. This just
- handles the scheduler hooks, real work starts in self.start.
-
- NB: This assumes that the rpki.rpkid.rpkid.task_* methods have been
- rewritten to expect instances of subclasses of this class, rather
- than expecting thunks to be wrapped up in the older version of this
- class. Rewrite, rewrite, remove this comment when done, OK!
- """
-
- ## @var timeslice
- # How long before a task really should consider yielding the CPU to
- # let something else run.
-
- timeslice = rpki.sundial.timedelta(seconds = 15)
-
- def __init__(self, s, description = None):
- self.self = s
- self.description = description
- self.completions = []
- self.continuation = None
- self.due_date = None
- self.clear()
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.description)
-
- def register_completion(self, completion):
- self.completions.append(completion)
-
- def exit(self):
- self.self.gctx.sql.sweep()
- while self.completions:
- self.completions.pop(0)(self)
- self.clear()
- self.due_date = None
- self.self.gctx.task_next()
-
- def postpone(self, continuation):
- self.self.gctx.sql.sweep()
- self.continuation = continuation
- self.due_date = None
- self.self.gctx.task_add(self)
- self.self.gctx.task_next()
-
- def __call__(self):
- self.due_date = rpki.sundial.now() + self.timeslice
- if self.continuation is None:
- logger.debug("Running task %r", self)
- self.clear()
- self.start()
- else:
- logger.debug("Restarting task %r at %r", self, self.continuation)
- continuation = self.continuation
- self.continuation = None
- continuation()
-
- @property
- def overdue(self):
- return rpki.sundial.now() > self.due_date
-
- def __getattr__(self, name):
- return getattr(self.self, name)
-
- def start(self):
- raise NotImplementedError
-
- def clear(self):
- pass
+ """
+ Abstract base class for rpkid scheduler task objects.
+ """
+
+ ## @var timeslice
+ # How long before a task really should consider yielding the CPU
+ # to let something else run. Should this be something we can
+ # configure from rpki.conf?
+
+ #timeslice = rpki.sundial.timedelta(seconds = 15)
+ timeslice = rpki.sundial.timedelta(seconds = 120)
+
+ def __init__(self, rpkid, tenant, description = None):
+ self.rpkid = rpkid
+ self.tenant = tenant
+ self.description = description
+ self.done_this = None
+ self.done_next = None
+ self.due_date = None
+ self.started = False
+ self.postponed = False
+ self.clear()
+
+ def __repr__(self):
+ return rpki.log.log_repr(self, self.description)
+
+ @tornado.gen.coroutine
+ def start(self):
+ try:
+ logger.debug("%r: Starting", self)
+ self.due_date = rpki.sundial.now() + self.timeslice
+ self.clear()
+ self.started = True
+ self.postponed = False
+ yield self.main()
+ except PostponeTask:
+ self.postponed = True
+ except:
+ logger.exception("%r: Unhandled exception", self)
+ finally:
+ self.due_date = None
+ self.started = False
+ self.clear()
+ if self.postponed:
+ logger.debug("%r: Postponing", self)
+ self.rpkid.task_add(self)
+ else:
+ logger.debug("%r: Exiting", self)
+ if self.done_this is not None:
+ self.done_this.notify_all()
+ self.done_this = self.done_next
+ self.done_next = None
+
+ def wait(self):
+ done = "done_next" if self.started else "done_this"
+ condition = getattr(self, done)
+ if condition is None:
+ condition = tornado.locks.Condition()
+ setattr(self, done, condition)
+ future = condition.wait()
+ return future
+
+ def waiting(self):
+ return self.done_this is not None
+
+ @tornado.gen.coroutine
+ def overdue(self):
+ yield tornado.gen.moment
+ raise tornado.gen.Return(rpki.sundial.now() > self.due_date and
+ any(not task.postponed for task in self.rpkid.task_ready))
+
+ @tornado.gen.coroutine
+ def main(self):
+ raise NotImplementedError
+
+ def clear(self):
+ pass
@queue_task
class PollParentTask(AbstractTask):
- """
- Run the regular client poll cycle with each of this self's
- parents, in turn.
- """
-
- def clear(self):
- self.parent_iterator = None
- self.parent = None
- self.ca_map = None
- self.class_iterator = None
-
- def start(self):
- self.gctx.checkpoint()
- logger.debug("Self %s[%d] polling parents", self.self_handle, self.self_id)
- rpki.async.iterator(self.parents, self.parent_loop, self.exit)
-
- def parent_loop(self, parent_iterator, parent):
- self.parent_iterator = parent_iterator
- self.parent = parent
- rpki.up_down.list_pdu.query(parent, self.got_list, self.list_failed)
-
- def got_list(self, r_msg):
- self.ca_map = dict((ca.parent_resource_class, ca) for ca in self.parent.cas)
- self.gctx.checkpoint()
- rpki.async.iterator(r_msg.payload.classes, self.class_loop, self.class_done)
-
- def list_failed(self, e):
- logger.exception("Couldn't get resource class list from parent %r, skipping", self.parent)
- self.parent_iterator()
-
- def class_loop(self, class_iterator, rc):
- self.gctx.checkpoint()
- self.class_iterator = class_iterator
- try:
- ca = self.ca_map.pop(rc.class_name)
- except KeyError:
- rpki.rpkid.ca_obj.create(self.parent, rc, class_iterator, self.class_create_failed)
- else:
- ca.check_for_updates(self.parent, rc, class_iterator, self.class_update_failed)
-
- def class_update_failed(self, e):
- logger.exception("Couldn't update class, skipping")
- self.class_iterator()
-
- def class_create_failed(self, e):
- logger.exception("Couldn't create class, skipping")
- self.class_iterator()
-
- def class_done(self):
- rpki.async.iterator(self.ca_map.values(), self.ca_loop, self.ca_done)
-
- def ca_loop(self, iterator, ca):
- self.gctx.checkpoint()
- ca.delete(self.parent, iterator)
-
- def ca_done(self):
- self.gctx.checkpoint()
- self.gctx.sql.sweep()
- self.parent_iterator()
+ """
+ Run the regular client poll cycle with each of this tenant's
+ parents, in turn.
+ """
+
+ @tornado.gen.coroutine
+ def main(self):
+ logger.debug("%r: Polling parents", self)
+
+ for parent in rpki.rpkidb.models.Parent.objects.filter(tenant = self.tenant):
+ try:
+ logger.debug("%r: Executing list query", self)
+ list_r_msg = yield parent.up_down_list_query(rpkid = self.rpkid)
+ except:
+ logger.exception("%r: Couldn't get resource class list from %r, skipping", self, parent)
+ continue
+
+ logger.debug("%r: Parsing list response", self)
+
+ ca_map = dict((ca.parent_resource_class, ca) for ca in parent.cas.all())
+
+ for rc in list_r_msg.getiterator(rpki.up_down.tag_class):
+ try:
+ class_name = rc.get("class_name")
+ ca = ca_map.pop(class_name, None)
+ if ca is None:
+ yield self.create(parent = parent, rc = rc, class_name = class_name)
+ else:
+ yield self.update(parent = parent, rc = rc, class_name = class_name, ca = ca)
+ except:
+ logger.exception("Couldn't update resource class %r, skipping", class_name)
+
+ for class_name, ca in ca_map.iteritems():
+ logger.debug("%r: Destroying orphaned %r for resource class %r", self, ca, class_name)
+ yield ca.destroy(rpkid = self.rpkid, parent = parent)
+
+ @tornado.gen.coroutine
+ def create(self, parent, rc, class_name):
+ logger.debug("%r: Creating new CA for resource class %r", self, class_name)
+ ca = rpki.rpkidb.models.CA.objects.create(
+ parent = parent,
+ parent_resource_class = class_name,
+ sia_uri = parent.construct_sia_uri(rc))
+ ca_detail = ca.create_detail()
+ r_msg = yield parent.up_down_issue_query(rpkid = self.rpkid, ca = ca, ca_detail = ca_detail)
+ elt = r_msg.find(rpki.up_down.tag_class).find(rpki.up_down.tag_certificate)
+ uri = elt.get("cert_url")
+ cert = rpki.x509.X509(Base64 = elt.text)
+ logger.debug("%r: %r received certificate %s", self, ca, uri)
+ yield ca_detail.activate(rpkid = self.rpkid, ca = ca, cert = cert, uri = uri)
+
+ @tornado.gen.coroutine
+ def update(self, parent, rc, class_name, ca):
+
+ # pylint: disable=C0330
+
+ logger.debug("%r: Checking updates for %r", self, ca)
+
+ sia_uri = parent.construct_sia_uri(rc)
+ sia_uri_changed = ca.sia_uri != sia_uri
+
+ if sia_uri_changed:
+ logger.debug("SIA changed: was %s now %s", ca.sia_uri, sia_uri)
+ ca.sia_uri = sia_uri
+
+ rc_resources = rpki.resource_set.resource_bag(
+ asn = rc.get("resource_set_as"),
+ v4 = rc.get("resource_set_ipv4"),
+ v6 = rc.get("resource_set_ipv6"),
+ valid_until = rc.get("resource_set_notafter"))
+
+ cert_map = {}
+
+ for c in rc.getiterator(rpki.up_down.tag_certificate):
+ x = rpki.x509.X509(Base64 = c.text)
+ u = rpki.up_down.multi_uri(c.get("cert_url")).rsync()
+ cert_map[x.gSKI()] = (x, u)
+
+ ca_details = ca.ca_details.exclude(state = "revoked")
+
+ if not ca_details:
+ logger.warning("Existing resource class %s to %s from %s with no certificates, rekeying",
+ class_name, parent.tenant.tenant_handle, parent.parent_handle)
+ yield ca.rekey(rpkid = self.rpkid)
+ return
+
+ for ca_detail in ca_details:
+
+ rc_cert, rc_cert_uri = cert_map.pop(ca_detail.public_key.gSKI(), (None, None))
+
+ if rc_cert is None:
+ logger.warning("g(SKI) %s in resource class %s is in database but missing from list_response to %s from %s, "
+ "maybe parent certificate went away?",
+ ca_detail.public_key.gSKI(), class_name, parent.tenant.tenant_handle, parent.parent_handle)
+ publisher = rpki.rpkid.publication_queue(rpkid = self.rpkid)
+ ca_detail.destroy(publisher = publisher)
+ yield publisher.call_pubd()
+ continue
+
+ if ca_detail.state == "active" and ca_detail.ca_cert_uri != rc_cert_uri:
+ logger.debug("AIA changed: was %s now %s", ca_detail.ca_cert_uri, rc_cert_uri)
+ ca_detail.ca_cert_uri = rc_cert_uri
+ ca_detail.save()
+
+ if ca_detail.state not in ("pending", "active"):
+ continue
+
+ if ca_detail.state == "pending":
+ current_resources = rpki.resource_set.resource_bag()
+ else:
+ current_resources = ca_detail.latest_ca_cert.get_3779resources()
+
+ if (ca_detail.state == "pending" or
+ sia_uri_changed or
+ ca_detail.latest_ca_cert != rc_cert or
+ ca_detail.latest_ca_cert.getNotAfter() != rc_resources.valid_until or
+ current_resources.undersized(rc_resources) or
+ current_resources.oversized(rc_resources)):
+
+ yield ca_detail.update(
+ rpkid = self.rpkid,
+ parent = parent,
+ ca = ca,
+ rc = rc,
+ sia_uri_changed = sia_uri_changed,
+ old_resources = current_resources)
+
+ if cert_map:
+ logger.warning("Unknown certificate g(SKI)%s %s in resource class %s in list_response to %s from %s, maybe you want to \"revoke_forgotten\"?",
+ "" if len(cert_map) == 1 else "s", ", ".join(cert_map), class_name, parent.tenant.tenant_handle, parent.parent_handle)
@queue_task
class UpdateChildrenTask(AbstractTask):
- """
- Check for updated IRDB data for all of this self's children and
- issue new certs as necessary. Must handle changes both in
- resources and in expiration date.
- """
-
- def clear(self):
- self.now = None
- self.rsn = None
- self.publisher = None
- self.iterator = None
- self.child = None
- self.child_certs = None
-
- def start(self):
- self.gctx.checkpoint()
- logger.debug("Self %s[%d] updating children", self.self_handle, self.self_id)
- self.now = rpki.sundial.now()
- self.rsn = self.now + rpki.sundial.timedelta(seconds = self.regen_margin)
- self.publisher = rpki.rpkid.publication_queue()
- rpki.async.iterator(self.children, self.loop, self.done)
-
- def loop(self, iterator, child):
- self.gctx.checkpoint()
- self.gctx.sql.sweep()
- self.iterator = iterator
- self.child = child
- self.child_certs = child.child_certs
- if self.overdue:
- self.publisher.call_pubd(lambda: self.postpone(self.do_child), self.publication_failed)
- else:
- self.do_child()
-
- def do_child(self):
- if self.child_certs:
- self.gctx.irdb_query_child_resources(self.child.self.self_handle, self.child.child_handle,
- self.got_resources, self.lose)
- else:
- self.iterator()
-
- def lose(self, e):
- logger.exception("Couldn't update child %r, skipping", self.child)
- self.iterator()
-
- def got_resources(self, irdb_resources):
- try:
- for child_cert in self.child_certs:
- ca_detail = child_cert.ca_detail
- ca = ca_detail.ca
- if ca_detail.state == "active":
- old_resources = child_cert.cert.get_3779resources()
- new_resources = old_resources & irdb_resources & ca_detail.latest_ca_cert.get_3779resources()
- old_aia = child_cert.cert.get_AIA()[0]
- new_aia = ca_detail.ca_cert_uri
-
- if new_resources.empty():
- logger.debug("Resources shrank to the null set, revoking and withdrawing child %s certificate SKI %s",
- self.child.child_handle, child_cert.cert.gSKI())
- child_cert.revoke(publisher = self.publisher)
- ca_detail.generate_crl(publisher = self.publisher)
- ca_detail.generate_manifest(publisher = self.publisher)
-
- elif (old_resources != new_resources or
- old_aia != new_aia or
- (old_resources.valid_until < self.rsn and
- irdb_resources.valid_until > self.now and
- old_resources.valid_until != irdb_resources.valid_until)):
-
- logger.debug("Need to reissue child %s certificate SKI %s",
- self.child.child_handle, child_cert.cert.gSKI())
- if old_resources != new_resources:
- logger.debug("Child %s SKI %s resources changed: old %s new %s",
- self.child.child_handle, child_cert.cert.gSKI(),
- old_resources, new_resources)
- if old_resources.valid_until != irdb_resources.valid_until:
- logger.debug("Child %s SKI %s validity changed: old %s new %s",
- self.child.child_handle, child_cert.cert.gSKI(),
- old_resources.valid_until, irdb_resources.valid_until)
-
- new_resources.valid_until = irdb_resources.valid_until
- child_cert.reissue(
- ca_detail = ca_detail,
- resources = new_resources,
- publisher = self.publisher)
-
- elif old_resources.valid_until < self.now:
- logger.debug("Child %s certificate SKI %s has expired: cert.valid_until %s, irdb.valid_until %s",
- self.child.child_handle, child_cert.cert.gSKI(),
- old_resources.valid_until, irdb_resources.valid_until)
- child_cert.sql_delete()
- self.publisher.withdraw(
- cls = rpki.publication.certificate_elt,
- uri = child_cert.uri,
- obj = child_cert.cert,
- repository = ca.parent.repository)
- ca_detail.generate_manifest(publisher = self.publisher)
-
- except (SystemExit, rpki.async.ExitNow):
- raise
- except Exception, e:
- self.gctx.checkpoint()
- self.lose(e)
- else:
- self.gctx.checkpoint()
- self.gctx.sql.sweep()
- self.iterator()
-
- def done(self):
- self.gctx.checkpoint()
- self.gctx.sql.sweep()
- self.publisher.call_pubd(self.exit, self.publication_failed)
-
- def publication_failed(self, e):
- logger.exception("Couldn't publish for %s, skipping", self.self_handle)
- self.gctx.checkpoint()
- self.exit()
+ """
+ Check for updated IRDB data for all of this tenant's children and
+ issue new certs as necessary. Must handle changes both in
+ resources and in expiration date.
+ """
+
+ @tornado.gen.coroutine
+ def main(self):
+ logger.debug("%r: Updating children", self)
+ now = rpki.sundial.now()
+ rsn = now + rpki.sundial.timedelta(seconds = self.tenant.regen_margin)
+ publisher = rpki.rpkid.publication_queue(self.rpkid)
+ postponing = False
+
+ child_certs = rpki.rpkidb.models.ChildCert.objects.filter(child__tenant = self.tenant, ca_detail__state = "active")
+ child_handles = sorted(set(child_cert.child.child_handle for child_cert in child_certs))
+ irdb_resources = dict(zip(child_handles, (yield self.rpkid.irdb_query_children_resources(self.tenant.tenant_handle, child_handles))))
+
+ for child_cert in child_certs:
+ try:
+ ca_detail = child_cert.ca_detail
+ child_handle = child_cert.child.child_handle
+ old_resources = child_cert.cert.get_3779resources()
+ new_resources = old_resources & irdb_resources[child_handle] & ca_detail.latest_ca_cert.get_3779resources()
+ old_aia = child_cert.cert.get_AIA()[0]
+ new_aia = ca_detail.ca_cert_uri
+
+ assert child_cert.gski == child_cert.cert.gSKI()
+
+ if new_resources.empty():
+ logger.debug("Resources shrank to null set, revoking and withdrawing child %s g(SKI) %s",
+ child_handle, child_cert.gski)
+ child_cert.revoke(publisher = publisher)
+ ca_detail.generate_crl_and_manifest(publisher = publisher)
+
+ elif (old_resources != new_resources or old_aia != new_aia or
+ (old_resources.valid_until < rsn and
+ irdb_resources[child_handle].valid_until > now and
+ old_resources.valid_until != irdb_resources[child_handle].valid_until)):
+ logger.debug("Need to reissue child %s certificate g(SKI) %s", child_handle,
+ child_cert.gski)
+ if old_resources != new_resources:
+ logger.debug("Child %s g(SKI) %s resources changed: old %s new %s",
+ child_handle, child_cert.gski, old_resources, new_resources)
+ if old_resources.valid_until != irdb_resources[child_handle].valid_until:
+ logger.debug("Child %s g(SKI) %s validity changed: old %s new %s",
+ child_handle, child_cert.gski, old_resources.valid_until,
+ irdb_resources[child_handle].valid_until)
+
+ new_resources.valid_until = irdb_resources[child_handle].valid_until
+ child_cert.reissue(ca_detail = ca_detail, resources = new_resources, publisher = publisher)
+
+ elif old_resources.valid_until < now:
+ logger.debug("Child %s certificate g(SKI) %s has expired: cert.valid_until %s, irdb.valid_until %s",
+ child_handle, child_cert.gski, old_resources.valid_until,
+ irdb_resources[child_handle].valid_until)
+ child_cert.delete()
+ publisher.queue(uri = child_cert.uri,
+ old_obj = child_cert.cert,
+ repository = ca_detail.ca.parent.repository)
+ ca_detail.generate_crl_and_manifest(publisher = publisher)
+
+ except:
+ logger.exception("%r: Couldn't update %r, skipping", self, child_cert)
+
+ finally:
+ if (yield self.overdue()):
+ postponing = True
+ break
+ try:
+ yield publisher.call_pubd()
+ except:
+ logger.exception("%r: Couldn't publish, skipping", self)
-@queue_task
-class UpdateROAsTask(AbstractTask):
- """
- Generate or update ROAs for this self.
- """
-
- def clear(self):
- self.orphans = None
- self.updates = None
- self.publisher = None
- self.ca_details = None
- self.count = None
-
- def start(self):
- self.gctx.checkpoint()
- self.gctx.sql.sweep()
- logger.debug("Self %s[%d] updating ROAs", self.self_handle, self.self_id)
-
- logger.debug("Issuing query for ROA requests")
- self.gctx.irdb_query_roa_requests(self.self_handle, self.got_roa_requests, self.roa_requests_failed)
-
- def got_roa_requests(self, roa_requests):
- self.gctx.checkpoint()
- logger.debug("Received response to query for ROA requests")
-
- if self.gctx.sql.dirty:
- logger.warning("Unexpected dirty SQL cache, flushing")
- self.gctx.sql.sweep()
-
- roas = {}
- seen = set()
- self.orphans = []
- self.updates = []
- self.publisher = rpki.rpkid.publication_queue()
- self.ca_details = set()
-
- for roa in self.roas:
- k = (roa.asn, str(roa.ipv4), str(roa.ipv6))
- if k not in roas:
- roas[k] = roa
- elif (roa.roa is not None and roa.cert is not None and roa.ca_detail is not None and roa.ca_detail.state == "active" and
- (roas[k].roa is None or roas[k].cert is None or roas[k].ca_detail is None or roas[k].ca_detail.state != "active")):
- self.orphans.append(roas[k])
- roas[k] = roa
- else:
- self.orphans.append(roa)
-
- for roa_request in roa_requests:
- k = (roa_request.asn, str(roa_request.ipv4), str(roa_request.ipv6))
- if k in seen:
- logger.warning("Skipping duplicate ROA request %r", roa_request)
- else:
- seen.add(k)
- roa = roas.pop(k, None)
- if roa is None:
- roa = rpki.rpkid.roa_obj(self.gctx, self.self_id, roa_request.asn, roa_request.ipv4, roa_request.ipv6)
- logger.debug("Created new %r", roa)
- else:
- logger.debug("Found existing %r", roa)
- self.updates.append(roa)
-
- self.orphans.extend(roas.itervalues())
-
- if self.overdue:
- self.postpone(self.begin_loop)
- else:
- self.begin_loop()
-
- def begin_loop(self):
- self.count = 0
- rpki.async.iterator(self.updates, self.loop, self.done, pop_list = True)
-
- def loop(self, iterator, roa):
- self.gctx.checkpoint()
- try:
- roa.update(publisher = self.publisher, fast = True)
- self.ca_details.add(roa.ca_detail)
- self.gctx.sql.sweep()
- except (SystemExit, rpki.async.ExitNow):
- raise
- except rpki.exceptions.NoCoveringCertForROA:
- logger.warning("No covering certificate for %r, skipping", roa)
- except Exception:
- logger.exception("Could not update %r, skipping", roa)
- self.count += 1
- if self.overdue:
- self.publish(lambda: self.postpone(iterator))
- else:
- iterator()
-
- def publish(self, done):
- if not self.publisher.empty():
- for ca_detail in self.ca_details:
- logger.debug("Generating new CRL for %r", ca_detail)
- ca_detail.generate_crl(publisher = self.publisher)
- logger.debug("Generating new manifest for %r", ca_detail)
- ca_detail.generate_manifest(publisher = self.publisher)
- self.ca_details.clear()
- self.gctx.sql.sweep()
- self.gctx.checkpoint()
- self.publisher.call_pubd(done, self.publication_failed)
-
- def publication_failed(self, e):
- logger.exception("Couldn't publish for %s, skipping", self.self_handle)
- self.gctx.checkpoint()
- self.exit()
-
- def done(self):
- for roa in self.orphans:
- try:
- self.ca_details.add(roa.ca_detail)
- roa.revoke(publisher = self.publisher, fast = True)
- except (SystemExit, rpki.async.ExitNow):
- raise
- except Exception:
- logger.exception("Could not revoke %r", roa)
- self.gctx.sql.sweep()
- self.gctx.checkpoint()
- self.publish(self.exit)
-
- def roa_requests_failed(self, e):
- logger.exception("Could not fetch ROA requests for %s, skipping", self.self_handle)
- self.exit()
+ if postponing:
+ raise PostponeTask
@queue_task
-class UpdateGhostbustersTask(AbstractTask):
- """
- Generate or update Ghostbuster records for this self.
-
- This was originally based on the ROA update code. It's possible
- that both could benefit from refactoring, but at this point the
- potential scaling issues for ROAs completely dominate structure of
- the ROA code, and aren't relevant here unless someone is being
- exceptionally silly.
- """
-
- def start(self):
- self.gctx.checkpoint()
- logger.debug("Self %s[%d] updating Ghostbuster records",
- self.self_handle, self.self_id)
-
- self.gctx.irdb_query_ghostbuster_requests(self.self_handle,
- (p.parent_handle for p in self.parents),
- self.got_ghostbuster_requests,
- self.ghostbuster_requests_failed)
-
- def got_ghostbuster_requests(self, ghostbuster_requests):
-
- try:
- self.gctx.checkpoint()
- if self.gctx.sql.dirty:
- logger.warning("Unexpected dirty SQL cache, flushing")
- self.gctx.sql.sweep()
-
- ghostbusters = {}
- orphans = []
- publisher = rpki.rpkid.publication_queue()
- ca_details = set()
- seen = set()
-
- parents = dict((p.parent_handle, p) for p in self.parents)
-
- for ghostbuster in self.ghostbusters:
- k = (ghostbuster.ca_detail_id, ghostbuster.vcard)
- if ghostbuster.ca_detail.state != "active" or k in ghostbusters:
- orphans.append(ghostbuster)
- else:
- ghostbusters[k] = ghostbuster
-
- for ghostbuster_request in ghostbuster_requests:
- if ghostbuster_request.parent_handle not in parents:
- logger.warning("Unknown parent_handle %r in Ghostbuster request, skipping", ghostbuster_request.parent_handle)
- continue
- k = (ghostbuster_request.parent_handle, ghostbuster_request.vcard)
- if k in seen:
- logger.warning("Skipping duplicate Ghostbuster request %r", ghostbuster_request)
- continue
- seen.add(k)
- for ca in parents[ghostbuster_request.parent_handle].cas:
- ca_detail = ca.active_ca_detail
- if ca_detail is not None:
- ghostbuster = ghostbusters.pop((ca_detail.ca_detail_id, ghostbuster_request.vcard), None)
- if ghostbuster is None:
- ghostbuster = rpki.rpkid.ghostbuster_obj(self.gctx, self.self_id, ca_detail.ca_detail_id, ghostbuster_request.vcard)
- logger.debug("Created new %r for %r", ghostbuster, ghostbuster_request.parent_handle)
+class UpdateROAsTask(AbstractTask):
+ """
+ Generate or update ROAs for this tenant.
+ """
+
+ # XXX This might need rewriting to avoid race conditions.
+ #
+ # There's a theoretical race condition here if we're chugging away
+ # and something else needs to update the manifest or CRL, or if
+ # some back-end operation generates or destroys ROAs. The risk is
+ # fairly low given that we defer CRL and manifest generation until
+ # we're ready to publish, but it's theoretically present.
+
+ @tornado.gen.coroutine
+ def main(self):
+ logger.debug("%r: Updating ROAs", self)
+
+ try:
+ r_msg = yield self.rpkid.irdb_query_roa_requests(self.tenant.tenant_handle)
+ except:
+ logger.exception("Could not fetch ROA requests for %s, skipping", self.tenant.tenant_handle)
+ return
+
+ logger.debug("%r: Received response to query for ROA requests: %r", self, r_msg)
+
+ roas = {}
+ seen = set()
+ orphans = []
+ creates = []
+ updates = []
+ publisher = rpki.rpkid.publication_queue(self.rpkid)
+ ca_details = set()
+
+ for roa in self.tenant.roas.all():
+ k = "{!s} {!s} {!s}".format(roa.asn, roa.ipv4, roa.ipv6)
+ if k not in roas:
+ roas[k] = roa
+ elif roa.roa is not None and roa.cert is not None and roa.ca_detail is not None and roa.ca_detail.state == "active" and (roas[k].roa is None or roas[k].cert is None or roas[k].ca_detail is None or roas[k].ca_detail.state != "active"):
+ orphans.append(roas[k])
+ roas[k] = roa
+ else:
+ orphans.append(roa)
+
+ for r_pdu in r_msg:
+ k = "{!s} {!s} {!s}".format(r_pdu.get("asn"), r_pdu.get("ipv4"), r_pdu.get("ipv6"))
+ if k in seen:
+ logger.warning("%r: Skipping duplicate ROA request %r", self, r_pdu)
+ continue
+ seen.add(k)
+ roa = roas.pop(k, None)
+ if roa is None:
+ roa = rpki.rpkidb.models.ROA(tenant = self.tenant, asn = long(r_pdu.get("asn")), ipv4 = r_pdu.get("ipv4"), ipv6 = r_pdu.get("ipv6"))
+ logger.debug("%r: Try to create %r", self, roa)
+ creates.append(roa)
else:
- logger.debug("Found existing %r for %s", ghostbuster, ghostbuster_request.parent_handle)
- ghostbuster.update(publisher = publisher, fast = True)
- ca_details.add(ca_detail)
+ logger.debug("%r: Found existing %r", self, roa)
+ updates.append(roa)
+
+ orphans.extend(roas.itervalues())
+
+ roas = creates + updates
+
+ r_msg = seen = creates = updates = None
- orphans.extend(ghostbusters.itervalues())
- for ghostbuster in orphans:
- ca_details.add(ghostbuster.ca_detail)
- ghostbuster.revoke(publisher = publisher, fast = True)
+ postponing = False
- for ca_detail in ca_details:
- ca_detail.generate_crl(publisher = publisher)
- ca_detail.generate_manifest(publisher = publisher)
+ while roas and not postponing:
+ if (yield self.overdue()):
+ postponing = True
+ break
+ roa = roas.pop(0)
+ try:
+ roa.update(publisher = publisher)
+ ca_details.add(roa.ca_detail)
+ except rpki.exceptions.NoCoveringCertForROA:
+ logger.warning("%r: No covering certificate for %r, skipping", self, roa)
+ except:
+ logger.exception("%r: Could not update %r, skipping", self, roa)
- self.gctx.sql.sweep()
+ if not postponing:
+ for roa in orphans:
+ try:
+ ca_details.add(roa.ca_detail)
+ roa.revoke(publisher = publisher)
+ except:
+ logger.exception("%r: Could not revoke %r", self, roa)
- self.gctx.checkpoint()
- publisher.call_pubd(self.exit, self.publication_failed)
+ if not publisher.empty():
+ for ca_detail in ca_details:
+ logger.debug("%r: Generating new CRL and manifest for %r", self, ca_detail)
+ ca_detail.generate_crl_and_manifest(publisher = publisher)
+ yield publisher.call_pubd()
- except (SystemExit, rpki.async.ExitNow):
- raise
- except Exception:
- logger.exception("Could not update Ghostbuster records for %s, skipping", self.self_handle)
- self.exit()
+ if postponing:
+ raise PostponeTask
- def publication_failed(self, e):
- logger.exception("Couldn't publish Ghostbuster updates for %s, skipping", self.self_handle)
- self.gctx.checkpoint()
- self.exit()
- def ghostbuster_requests_failed(self, e):
- logger.exception("Could not fetch Ghostbuster record requests for %s, skipping", self.self_handle)
- self.exit()
+@queue_task
+class UpdateGhostbustersTask(AbstractTask):
+ """
+ Generate or update Ghostbuster records for this tenant.
+
+ This was originally based on the ROA update code. It's possible
+ that both could benefit from refactoring, but at this point the
+ potential scaling issues for ROAs completely dominate structure of
+ the ROA code, and aren't relevant here unless someone is being
+ exceptionally silly.
+ """
+
+ @tornado.gen.coroutine
+ def main(self):
+ logger.debug("%r: Updating Ghostbuster records", self)
+ parent_handles = set(p.parent_handle for p in rpki.rpkidb.models.Parent.objects.filter(tenant = self.tenant))
+
+ try:
+ r_msg = yield self.rpkid.irdb_query_ghostbuster_requests(self.tenant.tenant_handle, parent_handles)
+
+ ghostbusters = {}
+ orphans = []
+ publisher = rpki.rpkid.publication_queue(self.rpkid)
+ ca_details = set()
+ seen = set()
+
+ for ghostbuster in self.tenant.ghostbusters.all():
+ k = (ghostbuster.ca_detail.pk, ghostbuster.vcard)
+ if ghostbuster.ca_detail.state != "active" or k in ghostbusters:
+ orphans.append(ghostbuster)
+ else:
+ ghostbusters[k] = ghostbuster
+
+ for r_pdu in r_msg:
+ if not rpki.rpkidb.models.Parent.objects.filter(tenant = self.tenant, parent_handle = r_pdu.get("parent_handle")).exists():
+ logger.warning("%r: Unknown parent_handle %r in Ghostbuster request, skipping", self, r_pdu.get("parent_handle"))
+ continue
+ k = (r_pdu.get("parent_handle"), r_pdu.text)
+ if k in seen:
+ logger.warning("%r: Skipping duplicate Ghostbuster request %r", self, r_pdu)
+ continue
+ seen.add(k)
+ for ca_detail in rpki.rpkidb.models.CADetail.objects.filter(ca__parent__parent_handle = r_pdu.get("parent_handle"),
+ ca__parent__tenant = self.tenant,
+ state = "active"):
+ ghostbuster = ghostbusters.pop((ca_detail.pk, r_pdu.text), None)
+ if ghostbuster is None:
+ ghostbuster = rpki.rpkidb.models.Ghostbuster(tenant = self.tenant, ca_detail = ca_detail, vcard = r_pdu.text)
+ logger.debug("%r: Created new %r for %r", self, ghostbuster, r_pdu.get("parent_handle"))
+ else:
+ logger.debug("%r: Found existing %r for %r", self, ghostbuster, r_pdu.get("parent_handle"))
+ ghostbuster.update(publisher = publisher)
+ ca_details.add(ca_detail)
+
+ orphans.extend(ghostbusters.itervalues())
+ for ghostbuster in orphans:
+ ca_details.add(ghostbuster.ca_detail)
+ ghostbuster.revoke(publisher = publisher)
+
+ for ca_detail in ca_details:
+ ca_detail.generate_crl_and_manifest(publisher = publisher)
+
+ yield publisher.call_pubd()
+
+ except:
+ logger.exception("Could not update Ghostbuster records for %s, skipping", self.tenant.tenant_handle)
@queue_task
class UpdateEECertificatesTask(AbstractTask):
- """
- Generate or update EE certificates for this self.
-
- Not yet sure what kind of scaling constraints this task might have,
- so keeping it simple for initial version, we can optimize later.
- """
-
- def start(self):
- self.gctx.checkpoint()
- logger.debug("Self %s[%d] updating EE certificates", self.self_handle, self.self_id)
-
- self.gctx.irdb_query_ee_certificate_requests(self.self_handle,
- self.got_requests,
- self.get_requests_failed)
-
- def got_requests(self, requests):
-
- try:
- self.gctx.checkpoint()
- if self.gctx.sql.dirty:
- logger.warning("Unexpected dirty SQL cache, flushing")
- self.gctx.sql.sweep()
-
- publisher = rpki.rpkid.publication_queue()
-
- existing = dict()
- for ee in self.ee_certificates:
- gski = ee.gski
- if gski not in existing:
- existing[gski] = set()
- existing[gski].add(ee)
-
- ca_details = set()
-
- for req in requests:
- ees = existing.pop(req.gski, ())
- resources = rpki.resource_set.resource_bag(
- asn = req.asn,
- v4 = req.ipv4,
- v6 = req.ipv6,
- valid_until = req.valid_until)
- covering = self.find_covering_ca_details(resources)
- ca_details.update(covering)
-
- for ee in ees:
- if ee.ca_detail in covering:
- logger.debug("Updating existing EE certificate for %s %s",
- req.gski, resources)
- ee.reissue(
- resources = resources,
- publisher = publisher)
- covering.remove(ee.ca_detail)
- else:
- logger.debug("Existing EE certificate for %s %s is no longer covered",
- req.gski, resources)
- ee.revoke(publisher = publisher)
-
- for ca_detail in covering:
- logger.debug("No existing EE certificate for %s %s",
- req.gski, resources)
- rpki.rpkid.ee_cert_obj.create(
- ca_detail = ca_detail,
- subject_name = rpki.x509.X501DN.from_cn(req.cn, req.sn),
- subject_key = req.pkcs10.getPublicKey(),
- resources = resources,
- publisher = publisher,
- eku = req.eku or None)
-
- # Anything left is an orphan
- for ees in existing.values():
- for ee in ees:
- ca_details.add(ee.ca_detail)
- ee.revoke(publisher = publisher)
-
- self.gctx.sql.sweep()
-
- for ca_detail in ca_details:
- ca_detail.generate_crl(publisher = publisher)
- ca_detail.generate_manifest(publisher = publisher)
-
- self.gctx.sql.sweep()
-
- self.gctx.checkpoint()
- publisher.call_pubd(self.exit, self.publication_failed)
-
- except (SystemExit, rpki.async.ExitNow):
- raise
- except Exception:
- logger.exception("Could not update EE certificates for %s, skipping", self.self_handle)
- self.exit()
-
- def publication_failed(self, e):
- logger.exception("Couldn't publish EE certificate updates for %s, skipping", self.self_handle)
- self.gctx.checkpoint()
- self.exit()
-
- def get_requests_failed(self, e):
- logger.exception("Could not fetch EE certificate requests for %s, skipping", self.self_handle)
- self.exit()
+ """
+ Generate or update EE certificates for this tenant.
+
+ Not yet sure what kind of scaling constraints this task might have,
+ so keeping it simple for initial version, we can optimize later.
+ """
+
+ @tornado.gen.coroutine
+ def main(self):
+ logger.debug("%r: Updating EE certificates", self)
+
+ try:
+ r_msg = yield self.rpkid.irdb_query_ee_certificate_requests(self.tenant.tenant_handle)
+
+ publisher = rpki.rpkid.publication_queue(self.rpkid)
+
+ logger.debug("%r: Examining EE certificate requests", self)
+
+ existing = dict()
+ for ee in self.tenant.ee_certificates.all():
+ gski = ee.gski
+ if gski not in existing:
+ existing[gski] = set()
+ existing[gski].add(ee)
+
+ ca_details = set()
+
+ for r_pdu in r_msg:
+ gski = r_pdu.get("gski")
+ ees = existing.pop(gski, ())
+
+ resources = rpki.resource_set.resource_bag(
+ asn = r_pdu.get("asn"),
+ v4 = r_pdu.get("ipv4"),
+ v6 = r_pdu.get("ipv6"),
+ valid_until = r_pdu.get("valid_until"))
+ covering = self.tenant.find_covering_ca_details(resources)
+ ca_details.update(covering)
+
+ for ee in ees:
+ if ee.ca_detail in covering:
+ logger.debug("%r: Updating %r for %s %s", self, ee, gski, resources)
+ ee.reissue(resources = resources, publisher = publisher)
+ covering.remove(ee.ca_detail)
+ else:
+ # This probably never happens, as the most likely cause would be a CA certificate
+ # being revoked, which should trigger automatic clean up of issued certificates.
+ logger.debug("%r: %r for %s %s is no longer covered", self, ee, gski, resources)
+ ca_details.add(ee.ca_detail)
+ ee.revoke(publisher = publisher)
+
+ subject_name = rpki.x509.X501DN.from_cn(r_pdu.get("cn"), r_pdu.get("sn"))
+ subject_key = rpki.x509.PKCS10(Base64 = r_pdu[0].text).getPublicKey()
+
+ for ca_detail in covering:
+ logger.debug("%r: No existing EE certificate for %s %s", self, gski, resources)
+ cn, sn = subject_name.extract_cn_and_sn()
+ cert = ca_detail.issue_ee(
+ ca = ca_detail.ca,
+ subject_key = subject_key,
+ sia = None,
+ resources = resources,
+ notAfter = resources.valid_until,
+ cn = cn,
+ sn = sn,
+ eku = r_pdu.get("eku", "").split(",") or None)
+ ee = rpki.rpkidb.models.EECertificate.objects.create(
+ tenant = ca_detail.ca.parent.tenant,
+ ca_detail = ca_detail,
+ cert = cert,
+ gski = subject_key.gSKI())
+ publisher.queue(
+ uri = ee.uri,
+ new_obj = cert,
+ repository = ca_detail.ca.parent.repository,
+ handler = ee.published_callback)
+
+ # Anything left is an orphan
+ for ees in existing.values():
+ for ee in ees:
+ ca_details.add(ee.ca_detail)
+ ee.revoke(publisher = publisher)
+
+ for ca_detail in ca_details:
+ ca_detail.generate_crl_and_manifest(publisher = publisher)
+
+ yield publisher.call_pubd()
+
+ except:
+ logger.exception("%r: Could not update EE certificates, skipping", self)
@queue_task
class RegenerateCRLsAndManifestsTask(AbstractTask):
- """
- Generate new CRLs and manifests as necessary for all of this self's
- CAs. Extracting nextUpdate from a manifest is hard at the moment
- due to implementation silliness, so for now we generate a new
- manifest whenever we generate a new CRL
-
- This code also cleans up tombstones left behind by revoked ca_detail
- objects, since we're walking through the relevant portions of the
- database anyway.
- """
-
- def start(self):
- self.gctx.checkpoint()
- logger.debug("Self %s[%d] regenerating CRLs and manifests",
- self.self_handle, self.self_id)
-
- now = rpki.sundial.now()
- crl_interval = rpki.sundial.timedelta(seconds = self.crl_interval)
- regen_margin = max(self.gctx.cron_period * 2, crl_interval / 4)
- publisher = rpki.rpkid.publication_queue()
-
- for parent in self.parents:
- for ca in parent.cas:
+ """
+ Generate new CRLs and manifests as necessary for all of this tenant's
+ CAs. Extracting nextUpdate from a manifest is hard at the moment
+ due to implementation silliness, so for now we generate a new
+ manifest whenever we generate a new CRL
+
+ This code also cleans up tombstones left behind by revoked ca_detail
+ objects, since we're walking through the relevant portions of the
+ database anyway.
+ """
+
+ @tornado.gen.coroutine
+ def main(self):
+ logger.debug("%r: Regenerating CRLs and manifests", self)
+
try:
- for ca_detail in ca.revoked_ca_details:
- if now > ca_detail.latest_crl.getNextUpdate():
- ca_detail.delete(ca = ca, publisher = publisher)
- for ca_detail in ca.active_or_deprecated_ca_details:
- if now + regen_margin > ca_detail.latest_crl.getNextUpdate():
- ca_detail.generate_crl(publisher = publisher)
- ca_detail.generate_manifest(publisher = publisher)
- except (SystemExit, rpki.async.ExitNow):
- raise
- except Exception:
- logger.exception("Couldn't regenerate CRLs and manifests for CA %r, skipping", ca)
-
- self.gctx.checkpoint()
- self.gctx.sql.sweep()
- publisher.call_pubd(self.exit, self.lose)
-
- def lose(self, e):
- logger.exception("Couldn't publish updated CRLs and manifests for self %r, skipping", self.self_handle)
- self.gctx.checkpoint()
- self.exit()
+ publisher = rpki.rpkid.publication_queue(self.rpkid)
+ now = rpki.sundial.now()
+
+ ca_details = rpki.rpkidb.models.CADetail.objects.filter(ca__parent__tenant = self.tenant,
+ next_crl_manifest_update__isnull = False)
+
+ for ca_detail in ca_details.filter(next_crl_manifest_update__lt = now,
+ state = "revoked"):
+ ca_detail.destroy(publisher = publisher)
+
+ for ca_detail in ca_details.filter(state__in = ("active", "deprecated"),
+ next_crl_manifest_update__lt = now + max(
+ rpki.sundial.timedelta(seconds = self.tenant.crl_interval) / 4,
+ rpki.sundial.timedelta(seconds = self.rpkid.cron_period ) * 2)):
+ ca_detail.generate_crl_and_manifest(publisher = publisher)
+
+ yield publisher.call_pubd()
+
+ except:
+ logger.exception("%r: Couldn't publish updated CRLs and manifests, skipping", self)
@queue_task
class CheckFailedPublication(AbstractTask):
- """
- Periodic check for objects we tried to publish but failed (eg, due
- to pubd being down or unreachable).
- """
-
- def start(self):
- publisher = rpki.rpkid.publication_queue()
- for parent in self.parents:
- for ca in parent.cas:
- ca_detail = ca.active_ca_detail
- if ca_detail is not None:
- ca_detail.check_failed_publication(publisher)
- self.gctx.checkpoint()
- self.gctx.sql.sweep()
- publisher.call_pubd(self.exit, self.publication_failed)
-
- def publication_failed(self, e):
- logger.exception("Couldn't publish for %s, skipping", self.self_handle)
- self.gctx.checkpoint()
- self.exit()
+ """
+ Periodic check for objects we tried to publish but failed (eg, due
+ to pubd being down or unreachable).
+ """
+
+ @tornado.gen.coroutine
+ def main(self):
+ logger.debug("%r: Checking for failed publication actions", self)
+
+ try:
+ publisher = rpki.rpkid.publication_queue(self.rpkid)
+ for ca_detail in rpki.rpkidb.models.CADetail.objects.filter(ca__parent__tenant = self.tenant, state = "active"):
+ ca_detail.check_failed_publication(publisher)
+ yield publisher.call_pubd()
+
+ except:
+ logger.exception("%r: Couldn't run failed publications, skipping", self)
diff --git a/rpki/rpkidb/__init__.py b/rpki/rpkidb/__init__.py
new file mode 100644
index 00000000..7764913c
--- /dev/null
+++ b/rpki/rpkidb/__init__.py
@@ -0,0 +1,3 @@
+# $Id$
+#
+# Placeholder for rpkidb Django models not yet written.
diff --git a/rpki/rpkidb/migrations/0001_initial.py b/rpki/rpkidb/migrations/0001_initial.py
new file mode 100644
index 00000000..274775e3
--- /dev/null
+++ b/rpki/rpkidb/migrations/0001_initial.py
@@ -0,0 +1,222 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+import rpki.fields
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='BSC',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('bsc_handle', models.SlugField(max_length=255)),
+ ('private_key_id', rpki.fields.RSAPrivateKeyField()),
+ ('pkcs10_request', rpki.fields.PKCS10Field()),
+ ('hash_alg', rpki.fields.EnumField(default='sha256', choices=[(1, 'sha256')])),
+ ('signing_cert', rpki.fields.CertificateField(null=True)),
+ ('signing_cert_crl', rpki.fields.CRLField(null=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='CA',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('last_crl_manifest_number', models.BigIntegerField(default=1)),
+ ('last_issued_sn', models.BigIntegerField(default=1)),
+ ('sia_uri', models.TextField(null=True)),
+ ('parent_resource_class', models.TextField(null=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='CADetail',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('public_key', rpki.fields.PublicKeyField(null=True)),
+ ('private_key_id', rpki.fields.RSAPrivateKeyField(null=True)),
+ ('latest_crl', rpki.fields.CRLField(null=True)),
+ ('crl_published', rpki.fields.SundialField(null=True)),
+ ('latest_ca_cert', rpki.fields.CertificateField(null=True)),
+ ('manifest_private_key_id', rpki.fields.RSAPrivateKeyField(null=True)),
+ ('manifest_public_key', rpki.fields.PublicKeyField(null=True)),
+ ('latest_manifest', rpki.fields.ManifestField(null=True)),
+ ('manifest_published', rpki.fields.SundialField(null=True)),
+ ('next_crl_manifest_update', rpki.fields.SundialField(null=True)),
+ ('state', rpki.fields.EnumField(choices=[(1, 'pending'), (2, 'active'), (3, 'deprecated'), (4, 'revoked')])),
+ ('ca_cert_uri', models.TextField(null=True)),
+ ('ca', models.ForeignKey(related_name='ca_details', to='rpkidb.CA')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Child',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('child_handle', models.SlugField(max_length=255)),
+ ('bpki_cert', rpki.fields.CertificateField(null=True)),
+ ('bpki_glue', rpki.fields.CertificateField(null=True)),
+ ('last_cms_timestamp', rpki.fields.SundialField(null=True)),
+ ('bsc', models.ForeignKey(related_name='children', to='rpkidb.BSC')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='ChildCert',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('cert', rpki.fields.CertificateField()),
+ ('published', rpki.fields.SundialField(null=True)),
+ ('gski', models.CharField(max_length=27)),
+ ('ca_detail', models.ForeignKey(related_name='child_certs', to='rpkidb.CADetail')),
+ ('child', models.ForeignKey(related_name='child_certs', to='rpkidb.Child')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='EECertificate',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('gski', models.CharField(max_length=27)),
+ ('cert', rpki.fields.CertificateField()),
+ ('published', rpki.fields.SundialField(null=True)),
+ ('ca_detail', models.ForeignKey(related_name='ee_certificates', to='rpkidb.CADetail')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Ghostbuster',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('vcard', models.TextField()),
+ ('cert', rpki.fields.CertificateField()),
+ ('ghostbuster', rpki.fields.GhostbusterField()),
+ ('published', rpki.fields.SundialField(null=True)),
+ ('ca_detail', models.ForeignKey(related_name='ghostbusters', to='rpkidb.CADetail')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Parent',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('parent_handle', models.SlugField(max_length=255)),
+ ('bpki_cert', rpki.fields.CertificateField(null=True)),
+ ('bpki_glue', rpki.fields.CertificateField(null=True)),
+ ('peer_contact_uri', models.TextField(null=True)),
+ ('sia_base', models.TextField(null=True)),
+ ('sender_name', models.TextField(null=True)),
+ ('recipient_name', models.TextField(null=True)),
+ ('last_cms_timestamp', rpki.fields.SundialField(null=True)),
+ ('bsc', models.ForeignKey(related_name='parents', to='rpkidb.BSC')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Repository',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('repository_handle', models.SlugField(max_length=255)),
+ ('peer_contact_uri', models.TextField(null=True)),
+ ('rrdp_notification_uri', models.TextField(null=True)),
+ ('bpki_cert', rpki.fields.CertificateField(null=True)),
+ ('bpki_glue', rpki.fields.CertificateField(null=True)),
+ ('last_cms_timestamp', rpki.fields.SundialField(null=True)),
+ ('bsc', models.ForeignKey(related_name='repositories', to='rpkidb.BSC')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='RevokedCert',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('serial', models.BigIntegerField()),
+ ('revoked', rpki.fields.SundialField()),
+ ('expires', rpki.fields.SundialField()),
+ ('ca_detail', models.ForeignKey(related_name='revoked_certs', to='rpkidb.CADetail')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='ROA',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('asn', models.BigIntegerField()),
+ ('ipv4', models.TextField(null=True)),
+ ('ipv6', models.TextField(null=True)),
+ ('cert', rpki.fields.CertificateField()),
+ ('roa', rpki.fields.ROAField()),
+ ('published', rpki.fields.SundialField(null=True)),
+ ('ca_detail', models.ForeignKey(related_name='roas', to='rpkidb.CADetail')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Tenant',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('tenant_handle', models.SlugField(max_length=255)),
+ ('use_hsm', models.BooleanField(default=False)),
+ ('crl_interval', models.BigIntegerField(null=True)),
+ ('regen_margin', models.BigIntegerField(null=True)),
+ ('bpki_cert', rpki.fields.CertificateField(null=True)),
+ ('bpki_glue', rpki.fields.CertificateField(null=True)),
+ ],
+ ),
+ migrations.AddField(
+ model_name='roa',
+ name='tenant',
+ field=models.ForeignKey(related_name='roas', to='rpkidb.Tenant'),
+ ),
+ migrations.AddField(
+ model_name='repository',
+ name='tenant',
+ field=models.ForeignKey(related_name='repositories', to='rpkidb.Tenant'),
+ ),
+ migrations.AddField(
+ model_name='parent',
+ name='repository',
+ field=models.ForeignKey(related_name='parents', to='rpkidb.Repository'),
+ ),
+ migrations.AddField(
+ model_name='parent',
+ name='tenant',
+ field=models.ForeignKey(related_name='parents', to='rpkidb.Tenant'),
+ ),
+ migrations.AddField(
+ model_name='ghostbuster',
+ name='tenant',
+ field=models.ForeignKey(related_name='ghostbusters', to='rpkidb.Tenant'),
+ ),
+ migrations.AddField(
+ model_name='eecertificate',
+ name='tenant',
+ field=models.ForeignKey(related_name='ee_certificates', to='rpkidb.Tenant'),
+ ),
+ migrations.AddField(
+ model_name='child',
+ name='tenant',
+ field=models.ForeignKey(related_name='children', to='rpkidb.Tenant'),
+ ),
+ migrations.AddField(
+ model_name='ca',
+ name='parent',
+ field=models.ForeignKey(related_name='cas', to='rpkidb.Parent'),
+ ),
+ migrations.AddField(
+ model_name='bsc',
+ name='tenant',
+ field=models.ForeignKey(related_name='bscs', to='rpkidb.Tenant'),
+ ),
+ migrations.AlterUniqueTogether(
+ name='repository',
+ unique_together=set([('tenant', 'repository_handle')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='parent',
+ unique_together=set([('tenant', 'parent_handle')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='child',
+ unique_together=set([('tenant', 'child_handle')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='bsc',
+ unique_together=set([('tenant', 'bsc_handle')]),
+ ),
+ ]
diff --git a/rpki/rpkidb/migrations/0002_root.py b/rpki/rpkidb/migrations/0002_root.py
new file mode 100644
index 00000000..de2b95dd
--- /dev/null
+++ b/rpki/rpkidb/migrations/0002_root.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('rpkidb', '0001_initial'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='parent',
+ name='root_asn_resources',
+ field=models.TextField(default=''),
+ ),
+ migrations.AddField(
+ model_name='parent',
+ name='root_ipv4_resources',
+ field=models.TextField(default=''),
+ ),
+ migrations.AddField(
+ model_name='parent',
+ name='root_ipv6_resources',
+ field=models.TextField(default=''),
+ ),
+ ]
diff --git a/rpki/rpkidb/migrations/__init__.py b/rpki/rpkidb/migrations/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rpki/rpkidb/migrations/__init__.py
diff --git a/rpki/rpkidb/models.py b/rpki/rpkidb/models.py
new file mode 100644
index 00000000..3021a0d4
--- /dev/null
+++ b/rpki/rpkidb/models.py
@@ -0,0 +1,2466 @@
+"""
+Django ORM models for rpkid.
+"""
+
+from __future__ import unicode_literals
+
+import logging
+
+import tornado.gen
+import tornado.web
+import tornado.ioloop
+import tornado.httputil
+import tornado.httpclient
+import tornado.httpserver
+
+from django.db import models
+
+import rpki.left_right
+import rpki.sundial
+
+from rpki.fields import (EnumField, SundialField,
+ CertificateField, RSAPrivateKeyField,
+ PublicKeyField, CRLField, PKCS10Field,
+ ManifestField, ROAField, GhostbusterField)
+
+from lxml.etree import Element, SubElement, tostring as ElementToString
+
+logger = logging.getLogger(__name__)
+
+# pylint: disable=W5101
+
+
+# XXX Temporary hack to help trace call chains so we can clear some of
+# the historical clutter out of this module.
+
+def trace_call_chain():
+ if False:
+ from traceback import extract_stack
+ caller, callee = extract_stack(None, 3)[:2]
+ caller_file, caller_line, caller_name = caller[:3]
+ callee_file, callee_line, callee_name = callee[:3]
+ logger.debug("<Call trace> %s() at %s:%s called by %s() at %s:%s",
+ callee_name, callee_file, callee_line,
+ caller_name, caller_file, caller_line)
+
+
+# The objects available via the left-right protocol allow NULL values
+# in places we wouldn't otherwise (eg, bpki_cert fields), to support
+# existing protocol which allows back-end to build up objects
+# gradually. We may want to rethink this eventually, but that yak can
+# wait for its shave, particularly since disallowing null should be a
+# very simple change given migrations.
+
+class XMLTemplate(object):
+ """
+ Encapsulate all the voodoo for transcoding between lxml and ORM.
+ """
+
+ # Whether to drop XMl into the log
+
+ debug = False
+
+ # Type map to simplify declaration of Base64 sub-elements.
+
+ element_type = dict(bpki_cert = rpki.x509.X509,
+ bpki_glue = rpki.x509.X509,
+ rpki_root_cert = rpki.x509.X509,
+ pkcs10_request = rpki.x509.PKCS10,
+ signing_cert = rpki.x509.X509,
+ signing_cert_crl = rpki.x509.CRL)
+
+
+ def __init__(self, name, attributes = (), booleans = (), elements = (), readonly = (), handles = ()):
+ self.name = name
+ self.handles = handles
+ self.attributes = attributes
+ self.booleans = booleans
+ self.elements = elements
+ self.readonly = readonly
+
+
+ def encode(self, obj, q_pdu, r_msg):
+ """
+ Encode an ORM object as XML.
+ """
+
+ r_pdu = SubElement(r_msg, rpki.left_right.xmlns + self.name, nsmap = rpki.left_right.nsmap, action = q_pdu.get("action"))
+ if self.name != "tenant":
+ r_pdu.set("tenant_handle", obj.tenant.tenant_handle)
+ r_pdu.set(self.name + "_handle", getattr(obj, self.name + "_handle"))
+ if q_pdu.get("tag"):
+ r_pdu.set("tag", q_pdu.get("tag"))
+ for h in self.handles:
+ k = h.xml_template.name
+ v = getattr(obj, k)
+ if v is not None:
+ r_pdu.set(k + "_handle", getattr(v, k + "_handle"))
+ for k in self.attributes:
+ v = getattr(obj, k)
+ if v is not None:
+ r_pdu.set(k, str(v))
+ for k in self.booleans:
+ if getattr(obj, k):
+ r_pdu.set(k, "yes")
+ for k in self.elements + self.readonly:
+ v = getattr(obj, k)
+ if v is not None and not v.empty():
+ SubElement(r_pdu, rpki.left_right.xmlns + k).text = v.get_Base64()
+ if self.debug:
+ logger.debug("XMLTemplate.encode(): %s", ElementToString(r_pdu))
+
+
+ def acknowledge(self, obj, q_pdu, r_msg):
+ """
+ Add an acknowledgement PDU in response to a create, set, or
+ destroy action.
+ """
+
+ assert q_pdu.tag == rpki.left_right.xmlns + self.name
+ action = q_pdu.get("action")
+ r_pdu = SubElement(r_msg, rpki.left_right.xmlns + self.name, nsmap = rpki.left_right.nsmap, action = action)
+ if self.name != "tenant":
+ r_pdu.set("tenant_handle", obj.tenant.tenant_handle)
+ r_pdu.set(self.name + "_handle", getattr(obj, self.name + "_handle"))
+ if q_pdu.get("tag"):
+ r_pdu.set("tag", q_pdu.get("tag"))
+ if action != "destroy":
+ for k in self.readonly:
+ v = getattr(obj, k)
+ if v is not None and not v.empty():
+ SubElement(r_pdu, rpki.left_right.xmlns + k).text = v.get_Base64()
+ if self.debug:
+ logger.debug("XMLTemplate.acknowledge(): %s", ElementToString(r_pdu))
+
+
+ def decode(self, obj, q_pdu):
+ """
+ Decode XML into an ORM object.
+ """
+
+ if self.debug:
+ logger.debug("XMLTemplate.decode(): %r %s", obj, ElementToString(q_pdu))
+ assert q_pdu.tag == rpki.left_right.xmlns + self.name
+ for h in self.handles:
+ k = h.xml_template.name
+ v = q_pdu.get(k + "_handle")
+ if v is not None:
+ setattr(obj, k, h.objects.get(**{k + "_handle" : v, "tenant" : obj.tenant}))
+ for k in self.attributes:
+ v = q_pdu.get(k)
+ if v is not None:
+ v.encode("ascii")
+ if v.isdigit():
+ v = long(v)
+ setattr(obj, k, v)
+ for k in self.booleans:
+ v = q_pdu.get(k)
+ if v is not None:
+ setattr(obj, k, v == "yes")
+ for k in self.elements:
+ v = q_pdu.findtext(rpki.left_right.xmlns + k)
+ if v and v.strip():
+ setattr(obj, k, self.element_type[k](Base64 = v))
+
+
+class XMLManager(models.Manager):
+ """
+ Add a few methods which locate or create an object or objects
+ corresponding to the handles in an XML element, as appropriate.
+
+ This assumes that models which use it have an "xml_template"
+ class attribute holding an XMLTemplate object (above).
+ """
+
+ # Whether to blather about what we're doing
+
+ debug = False
+
+ # pylint: disable=E1101
+
+ def xml_get_or_create(self, xml):
+ name = self.model.xml_template.name
+ action = xml.get("action")
+ assert xml.tag == rpki.left_right.xmlns + name and action in ("create", "set")
+ d = { name + "_handle" : xml.get(name + "_handle") }
+ if name != "tenant" and action != "create":
+ d["tenant__tenant_handle"] = xml.get("tenant_handle")
+ if self.debug:
+ logger.debug("XMLManager.xml_get_or_create(): name %s action %s filter %r", name, action, d)
+ result = self.model(**d) if action == "create" else self.get(**d)
+ if name != "tenant" and action == "create":
+ result.tenant = Tenant.objects.get(tenant_handle = xml.get("tenant_handle"))
+ if self.debug:
+ logger.debug("XMLManager.xml_get_or_create(): name %s action %s filter %r result %r", name, action, d, result)
+ return result
+
+ def xml_list(self, xml):
+ name = self.model.xml_template.name
+ action = xml.get("action")
+ assert xml.tag == rpki.left_right.xmlns + name and action in ("get", "list")
+ d = {}
+ if action == "get":
+ d[name + "_handle"] = xml.get(name + "_handle")
+ if name != "tenant":
+ d["tenant__tenant_handle"] = xml.get("tenant_handle")
+ if self.debug:
+ logger.debug("XMLManager.xml_list(): name %s action %s filter %r", name, action, d)
+ result = self.filter(**d) if d else self.all()
+ if self.debug:
+ logger.debug("XMLManager.xml_list(): name %s action %s filter %r result %r", name, action, d, result)
+ return result
+
+ def xml_get_for_delete(self, xml):
+ name = self.model.xml_template.name
+ action = xml.get("action")
+ assert xml.tag == rpki.left_right.xmlns + name and action == "destroy"
+ d = { name + "_handle" : xml.get(name + "_handle") }
+ if name != "tenant":
+ d["tenant__tenant_handle"] = xml.get("tenant_handle")
+ if self.debug:
+ logger.debug("XMLManager.xml_get_for_delete(): name %s action %s filter %r", name, action, d)
+ result = self.get(**d)
+ if self.debug:
+ logger.debug("XMLManager.xml_get_for_delete(): name %s action %s filter %r result %r", name, action, d, result)
+ return result
+
+
+def xml_hooks(cls):
+ """
+ Class decorator to add default XML hooks.
+ """
+
+ # Maybe inheritance from an abstract model would work here. Then
+ # again, maybe we could use this decorator to do something prettier
+ # for the XMLTemplate setup. Whatever. Gussie up later.
+
+ def default_xml_pre_save_hook(self, q_pdu):
+ #logger.debug("default_xml_pre_save_hook()")
+ pass
+
+ @tornado.gen.coroutine
+ def default_xml_post_save_hook(self, rpkid, q_pdu):
+ #logger.debug("default_xml_post_save_hook()")
+ pass
+
+ @tornado.gen.coroutine
+ def default_xml_pre_delete_hook(self, rpkid):
+ #logger.debug("default_xml_pre_delete_hook()")
+ pass
+
+ for name, method in (("xml_pre_save_hook", default_xml_pre_save_hook),
+ ("xml_post_save_hook", default_xml_post_save_hook),
+ ("xml_pre_delete_hook", default_xml_pre_delete_hook)):
+ if not hasattr(cls, name):
+ setattr(cls, name, method)
+
+ return cls
+
+
+# Models.
+#
+# There's far too much random code hanging off of model methods, relic
+# of the earlier implementation. Clean up as time permits.
+
+@xml_hooks
+class Tenant(models.Model):
+ tenant_handle = models.SlugField(max_length = 255)
+ use_hsm = models.BooleanField(default = False)
+ crl_interval = models.BigIntegerField(null = True)
+ regen_margin = models.BigIntegerField(null = True)
+ bpki_cert = CertificateField(null = True)
+ bpki_glue = CertificateField(null = True)
+ objects = XMLManager()
+
+ xml_template = XMLTemplate(
+ name = "tenant",
+ attributes = ("crl_interval", "regen_margin"),
+ booleans = ("use_hsm",),
+ elements = ("bpki_cert", "bpki_glue"))
+
+ def __repr__(self):
+ try:
+ return "<Tenant: {}>".format(self.tenant_handle)
+ except:
+ return "<Tenant: Tenant object>"
+
+ @tornado.gen.coroutine
+ def xml_pre_delete_hook(self, rpkid):
+ trace_call_chain()
+ yield [parent.destroy(rpkid = rpkid) for parent in self.parents.all()]
+
+ @tornado.gen.coroutine
+ def xml_post_save_hook(self, rpkid, q_pdu):
+ trace_call_chain()
+
+ rekey = q_pdu.get("rekey")
+ revoke = q_pdu.get("revoke")
+ reissue = q_pdu.get("reissue")
+ revoke_forgotten = q_pdu.get("revoke_forgotten")
+
+ if q_pdu.get("clear_replay_protection"):
+ for parent in self.parents.all():
+ parent.clear_replay_protection()
+ for child in self.children.all():
+ child.clear_replay_protection()
+ for repository in self.repositories.all():
+ repository.clear_replay_protection()
+
+ futures = []
+
+ if rekey or revoke or reissue or revoke_forgotten:
+ for parent in self.parents.all():
+ if rekey:
+ futures.append(parent.serve_rekey(rpkid = rpkid))
+ if revoke:
+ futures.append(parent.serve_revoke(rpkid = rpkid))
+ if reissue:
+ futures.append(parent.serve_reissue(rpkid = rpkid))
+ if revoke_forgotten:
+ futures.append(parent.serve_revoke_forgotten(rpkid = rpkid))
+
+ if q_pdu.get("publish_world_now"):
+ futures.append(self.serve_publish_world_now(rpkid = rpkid))
+ if q_pdu.get("run_now"):
+ futures.append(self.serve_run_now(rpkid = rpkid))
+
+ yield futures
+
+
+ @tornado.gen.coroutine
+ def serve_publish_world_now(self, rpkid):
+ trace_call_chain()
+
+ publisher = rpki.rpkid.publication_queue(rpkid = rpkid)
+ objects = dict()
+
+ for repository in self.repositories.all():
+ q_msg = Element(rpki.publication.tag_msg, nsmap = rpki.publication.nsmap,
+ type = "query", version = rpki.publication.version)
+ SubElement(q_msg, rpki.publication.tag_list, tag = "list")
+ r_msg = yield repository.call_pubd(rpkid, q_msg, length_check = False)
+ if not all(r_pdu.tag == rpki.publication.tag_list for r_pdu in r_msg):
+ raise rpki.exceptions.BadPublicationReply("Unexpected XML tag in publication response")
+ objs = dict((r_pdu.get("uri"), (r_pdu.get("hash"), repository))
+ for r_pdu in r_msg if r_pdu.tag == rpki.publication.tag_list)
+ if any(uri in objects for uri in objs):
+ for uri in sorted(set(objects) & set(objs)):
+ logger.warning("Duplicated publication URI %s between %r and %r, this should not happen",
+ uri, objects[uri][1], objs[uri][1])
+ objects.update(objs)
+
+ for ca_detail in CADetail.objects.filter(ca__parent__tenant = self, state = "active"):
+ repository = ca_detail.ca.parent.repository
+ objs = [(ca_detail.crl_uri, ca_detail.latest_crl),
+ (ca_detail.manifest_uri, ca_detail.latest_manifest)]
+ objs.extend((c.uri, c.cert) for c in ca_detail.child_certs.all())
+ objs.extend((r.uri, r.roa) for r in ca_detail.roas.filter(roa__isnull = False))
+ objs.extend((g.uri, g.ghostbuster) for g in ca_detail.ghostbusters.all())
+ objs.extend((c.uri, c.cert) for c in ca_detail.ee_certificates.all())
+ for uri, obj in objs:
+ h, r = objects.get(uri, (None, None))
+ if uri in objects and r == repository:
+ publisher.queue(uri = uri, new_obj = obj, repository = repository, old_hash = h)
+ del objects[uri]
+ else:
+ publisher.queue(uri = uri, new_obj = obj, repository = repository)
+
+ for u in objects:
+ h, r = objects[u]
+ publisher.queue(uri = u, old_hash = h, repository = r)
+
+ yield publisher.call_pubd()
+
+
+ @tornado.gen.coroutine
+ def serve_run_now(self, rpkid):
+ trace_call_chain()
+ logger.debug("Forced immediate run of periodic actions for %r", self)
+ tasks = self.cron_tasks(rpkid = rpkid)
+ rpkid.task_add(*tasks)
+ yield [task.wait() for task in tasks]
+
+
+ def cron_tasks(self, rpkid):
+ trace_call_chain()
+ # pylint: disable=W0201
+ try:
+ return self._cron_tasks
+ except AttributeError:
+ self._cron_tasks = tuple(task(rpkid, self) for task in rpki.rpkid_tasks.task_classes)
+ return self._cron_tasks
+
+
+ def find_covering_ca_details(self, resources):
+ """
+ Return all active CADetails for this <tenant/> which cover a
+ particular set of resources.
+
+ If we expected there to be a large number of CADetails, we
+ could add index tables and write fancy SQL query to do this, but
+ for the expected common case where there are only one or two
+ active CADetails per <tenant/>, it's probably not worth it. In
+ any case, this is an optimization we can leave for later.
+ """
+
+ trace_call_chain()
+ return set(ca_detail
+ for ca_detail in CADetail.objects.filter(ca__parent__tenant = self, state = "active")
+ if ca_detail.covers(resources))
+
+
+@xml_hooks
+class BSC(models.Model):
+ bsc_handle = models.SlugField(max_length = 255)
+ private_key_id = RSAPrivateKeyField()
+ pkcs10_request = PKCS10Field()
+ hash_alg = EnumField(choices = ("sha256",), default = "sha256")
+ signing_cert = CertificateField(null = True)
+ signing_cert_crl = CRLField(null = True)
+ tenant = models.ForeignKey(Tenant, related_name = "bscs")
+ objects = XMLManager()
+
+ class Meta:
+ unique_together = ("tenant", "bsc_handle")
+
+ xml_template = XMLTemplate(
+ name = "bsc",
+ elements = ("signing_cert", "signing_cert_crl"),
+ readonly = ("pkcs10_request",))
+
+ def __repr__(self):
+ try:
+ return "<BSC: {}.{}>".format(self.tenant.tenant_handle, self.bsc_handle)
+ except:
+ return "<BSC: BSC object>"
+
+ def xml_pre_save_hook(self, q_pdu):
+ # Handle key generation, only supports RSA with SHA-256 for now.
+ if q_pdu.get("generate_keypair"):
+ assert q_pdu.get("key_type") in (None, "rsa") and q_pdu.get("hash_alg") in (None, "sha256")
+ self.private_key_id = rpki.x509.RSA.generate(keylength = int(q_pdu.get("key_length", 2048)))
+ self.pkcs10_request = rpki.x509.PKCS10.create(keypair = self.private_key_id)
+
+
+@xml_hooks
+class Repository(models.Model):
+ repository_handle = models.SlugField(max_length = 255)
+ peer_contact_uri = models.TextField(null = True)
+ rrdp_notification_uri = models.TextField(null = True)
+ bpki_cert = CertificateField(null = True)
+ bpki_glue = CertificateField(null = True)
+ last_cms_timestamp = SundialField(null = True)
+ bsc = models.ForeignKey(BSC, related_name = "repositories")
+ tenant = models.ForeignKey(Tenant, related_name = "repositories")
+ objects = XMLManager()
+
+ class Meta:
+ unique_together = ("tenant", "repository_handle")
+
+ xml_template = XMLTemplate(
+ name = "repository",
+ handles = (BSC,),
+ attributes = ("peer_contact_uri", "rrdp_notification_uri"),
+ elements = ("bpki_cert", "bpki_glue"))
+
+ def __repr__(self):
+ try:
+ uri = " " + self.peer_contact_uri
+ except:
+ uri = ""
+ try:
+ return "<Repository: {}.{}{}>".format(self.tenant.tenant_handle, self.repository_handle, uri)
+ except:
+ return "<Repository: Repository object>"
+
+
+ @tornado.gen.coroutine
+ def xml_post_save_hook(self, rpkid, q_pdu):
+ trace_call_chain()
+ if q_pdu.get("clear_replay_protection"):
+ self.clear_replay_protection()
+
+
+ def clear_replay_protection(self):
+ trace_call_chain()
+ self.last_cms_timestamp = None
+ self.save()
+
+
+ @tornado.gen.coroutine
+ def call_pubd(self, rpkid, q_msg, handlers = None, length_check = True):
+ """
+ Send a message to publication daemon and return the response.
+
+ As a convenience, attempting to send an empty message returns
+ immediate success without sending anything.
+
+ handlers is a dict of handler functions to process the
+ response PDUs. If the uri value in the response PDU appears
+ in the dict, the associated handler is called to process the
+ PDU; otherwise, a default handler is called to check for
+ errors. A handler value of False suppresses calling of the
+ default handler.
+ """
+
+ trace_call_chain()
+ if len(q_msg) == 0:
+ return
+ if handlers is None:
+ handlers = {}
+ for q_pdu in q_msg:
+ logger.info("Sending %r hash = %s uri = %s to pubd", q_pdu, q_pdu.get("hash"), q_pdu.get("uri"))
+ http_request = tornado.httpclient.HTTPRequest(
+ url = self.peer_contact_uri,
+ method = "POST",
+ body = rpki.publication.cms_msg().wrap(q_msg, self.bsc.private_key_id,
+ self.bsc.signing_cert, self.bsc.signing_cert_crl),
+ headers = { "Content-Type" : rpki.publication.content_type },
+ connect_timeout = rpkid.http_client_timeout,
+ request_timeout = rpkid.http_client_timeout)
+ http_response = yield rpkid.http_fetch(http_request)
+ if http_response.headers.get("Content-Type") not in rpki.publication.allowed_content_types:
+ raise rpki.exceptions.BadContentType("HTTP Content-Type %r, expected %r" % (
+ rpki.publication.content_type, http_response.headers.get("Content-Type")))
+ r_cms = rpki.publication.cms_msg(DER = http_response.body)
+ r_msg = r_cms.unwrap((rpkid.bpki_ta, self.tenant.bpki_cert, self.tenant.bpki_glue, self.bpki_cert, self.bpki_glue))
+ r_cms.check_replay_sql(self, self.peer_contact_uri)
+ for r_pdu in r_msg:
+ logger.info("Received %r hash = %s uri = %s from pubd", r_pdu, r_pdu.get("hash"), r_pdu.get("uri"))
+ handler = handlers.get(r_pdu.get("uri"), rpki.publication.raise_if_error)
+ if handler:
+ logger.debug("Calling pubd handler %r", handler)
+ handler(r_pdu)
+ if length_check and len(q_msg) != len(r_msg):
+ raise rpki.exceptions.BadPublicationReply("Wrong number of response PDUs from pubd: sent %r, got %r" % (q_msg, r_msg))
+ raise tornado.gen.Return(r_msg)
+
+
+@xml_hooks
+class Parent(models.Model):
+ parent_handle = models.SlugField(max_length = 255)
+ tenant = models.ForeignKey(Tenant, related_name = "parents")
+ repository = models.ForeignKey(Repository, related_name = "parents")
+ bpki_cert = CertificateField(null = True)
+ bpki_glue = CertificateField(null = True)
+ peer_contact_uri = models.TextField(null = True)
+ sia_base = models.TextField(null = True)
+ sender_name = models.TextField(null = True)
+ recipient_name = models.TextField(null = True)
+ last_cms_timestamp = SundialField(null = True)
+ bsc = models.ForeignKey(BSC, related_name = "parents")
+ root_asn_resources = models.TextField(default = "")
+ root_ipv4_resources = models.TextField(default = "")
+ root_ipv6_resources = models.TextField(default = "")
+ objects = XMLManager()
+
+ class Meta:
+ unique_together = ("tenant", "parent_handle")
+
+ xml_template = XMLTemplate(
+ name = "parent",
+ handles = (BSC, Repository),
+ attributes = ("peer_contact_uri", "sia_base", "sender_name", "recipient_name",
+ "root_asn_resources", "root_ipv4_resources", "root_ipv6_resources"),
+ elements = ("bpki_cert", "bpki_glue"),
+ readonly = ("rpki_root_cert",))
+
+
+ def __repr__(self):
+ try:
+ uri = " " + self.peer_contact_uri
+ except:
+ uri = ""
+ try:
+ return "<Parent: {}.{}{}>".format(self.tenant.tenant_handle, self.parent_handle, uri)
+ except:
+ return "<Parent: Parent object>"
+
+ @property
+ def rpki_root_cert(self):
+ if self.root_asn_resources or self.root_ipv4_resources or self.root_ipv6_resources:
+ logger.debug("%r checking for rpki_root_cert", self)
+ try:
+ return CADetail.objects.get(ca__parent = self, state = "active").latest_ca_cert
+ except CADetail.DoesNotExist:
+ pass
+ return None
+
+ @tornado.gen.coroutine
+ def xml_pre_delete_hook(self, rpkid):
+ trace_call_chain()
+ yield self.destroy(rpkid = rpkid, delete_parent = False)
+
+ @tornado.gen.coroutine
+ def xml_post_save_hook(self, rpkid, q_pdu):
+ trace_call_chain()
+ if q_pdu.get("clear_replay_protection"):
+ self.clear_replay_protection()
+ futures = []
+ if q_pdu.get("rekey"):
+ futures.append(self.serve_rekey(rpkid = rpkid))
+ if q_pdu.get("revoke"):
+ futures.append(self.serve_revoke(rpkid = rpkid))
+ if q_pdu.get("reissue"):
+ futures.append(self.serve_reissue(rpkid = rpkid))
+ if q_pdu.get("revoke_forgotten"):
+ futures.append(self.serve_revoke_forgotten(rpkid = rpkid))
+ yield futures
+
+ @tornado.gen.coroutine
+ def serve_rekey(self, rpkid):
+ trace_call_chain()
+ yield [ca.rekey(rpkid = rpkid) for ca in self.cas.all()]
+
+ @tornado.gen.coroutine
+ def serve_revoke(self, rpkid):
+ trace_call_chain()
+ yield [ca.revoke(rpkid = rpkid) for ca in self.cas.all()]
+
+ @tornado.gen.coroutine
+ def serve_reissue(self, rpkid):
+ trace_call_chain()
+ yield [ca.reissue(rpkid = rpkid) for ca in self.cas.all()]
+
+ def clear_replay_protection(self):
+ trace_call_chain()
+ self.last_cms_timestamp = None
+ self.save()
+
+
+ @tornado.gen.coroutine
+ def get_skis(self, rpkid):
+ """
+ Fetch SKIs that this parent thinks we have. In theory this should
+ agree with our own database, but in practice stuff can happen, so
+ sometimes we need to know what our parent thinks.
+
+ Result is a dictionary with the resource class name as key and a
+ set of SKIs as value.
+
+ This, like everything else dealing with SKIs in the up-down
+ protocol, is mis-named: we're really dealing with g(SKI) values,
+ not raw SKI values. Sorry.
+ """
+
+ trace_call_chain()
+ r_msg = yield self.up_down_list_query(rpkid = rpkid)
+ ski_map = {}
+ for rc in r_msg.getiterator(rpki.up_down.tag_class):
+ skis = set()
+ for c in rc.getiterator(rpki.up_down.tag_certificate):
+ skis.add(rpki.x509.X509(Base64 = c.text).gSKI())
+ ski_map[rc.get("class_name")] = skis
+ raise tornado.gen.Return(ski_map)
+
+
+ @tornado.gen.coroutine
+ def revoke_skis(self, rpkid, rc_name, skis_to_revoke):
+ """
+ Revoke a set of SKIs within a particular resource class.
+ """
+
+ trace_call_chain()
+ for ski in skis_to_revoke:
+ logger.debug("Asking parent %r to revoke class %r, g(SKI) %s", self, rc_name, ski)
+ yield self.up_down_revoke_query(rpkid = rpkid, class_name = rc_name, ski = ski)
+
+
+ @tornado.gen.coroutine
+ def serve_revoke_forgotten(self, rpkid):
+ """
+ Handle a left-right revoke_forgotten action for this parent.
+
+ This is a bit fiddly: we have to compare the result of an up-down
+ list query with what we have locally and identify the SKIs of any
+ certificates that have gone missing. This should never happen in
+ ordinary operation, but can arise if we have somehow lost a
+ private key, in which case there is nothing more we can do with
+ the issued cert, so we have to clear it. As this really is not
+ supposed to happen, we don't clear it automatically, instead we
+ require an explicit trigger.
+ """
+
+ trace_call_chain()
+ skis_from_parent = yield self.get_skis(rpkid = rpkid)
+ for rc_name, skis_to_revoke in skis_from_parent.iteritems():
+ for ca_detail in CADetail.objects.filter(ca__parent = self).exclude(state = "revoked"):
+ skis_to_revoke.discard(ca_detail.latest_ca_cert.gSKI())
+ yield self.revoke_skis(rpkid, rc_name, skis_to_revoke)
+
+
+ @tornado.gen.coroutine
+ def destroy(self, rpkid, delete_parent = True):
+ """
+ Delete all the CA stuff under this parent, and perhaps the parent
+ itself.
+ """
+
+ trace_call_chain()
+ yield self.serve_revoke_forgotten(rpkid = rpkid)
+ yield [ca.destroy(rpkid = rpkid, parent = self)
+ for ca in self.cas().all()]
+ if delete_parent:
+ self.delete()
+
+
+ def _compose_up_down_query(self, query_type):
+ return Element(rpki.up_down.tag_message, nsmap = rpki.up_down.nsmap,
+ version = rpki.up_down.version, type = query_type,
+ sender = self.sender_name, recipient = self.recipient_name)
+
+
+ @tornado.gen.coroutine
+ def up_down_list_query(self, rpkid):
+ trace_call_chain()
+ q_msg = self._compose_up_down_query("list")
+ r_msg = yield self.query_up_down(rpkid, q_msg)
+ raise tornado.gen.Return(r_msg)
+
+
+ @tornado.gen.coroutine
+ def up_down_issue_query(self, rpkid, ca, ca_detail):
+ trace_call_chain()
+ logger.debug("Parent.up_down_issue_query(): caRepository %r rpkiManifest %r rpkiNotify %r",
+ ca.sia_uri, ca_detail.manifest_uri, ca.parent.repository.rrdp_notification_uri)
+ pkcs10 = rpki.x509.PKCS10.create(
+ keypair = ca_detail.private_key_id,
+ is_ca = True,
+ caRepository = ca.sia_uri,
+ rpkiManifest = ca_detail.manifest_uri,
+ rpkiNotify = ca.parent.repository.rrdp_notification_uri)
+ q_msg = self._compose_up_down_query("issue")
+ q_pdu = SubElement(q_msg, rpki.up_down.tag_request, class_name = ca.parent_resource_class)
+ q_pdu.text = pkcs10.get_Base64()
+ r_msg = yield self.query_up_down(rpkid, q_msg)
+ raise tornado.gen.Return(r_msg)
+
+
+ @tornado.gen.coroutine
+ def up_down_revoke_query(self, rpkid, class_name, ski):
+ trace_call_chain()
+ q_msg = self._compose_up_down_query("revoke")
+ SubElement(q_msg, rpki.up_down.tag_key, class_name = class_name, ski = ski)
+ r_msg = yield self.query_up_down(rpkid, q_msg)
+ raise tornado.gen.Return(r_msg)
+
+
+ @tornado.gen.coroutine
+ def query_up_down(self, rpkid, q_msg):
+ trace_call_chain()
+ #logger.debug("%r query_up_down(): %s", self, ElementToString(q_msg))
+ if self.root_asn_resources or self.root_ipv4_resources or self.root_ipv6_resources:
+ r_msg = yield self.query_up_down_root(rpkid, q_msg)
+ elif self.bsc is None:
+ raise rpki.exceptions.BSCNotFound("Could not find BSC")
+ elif self.bsc.signing_cert is None:
+ raise rpki.exceptions.BSCNotReady("%r is not yet usable" % self.bsc)
+ else:
+ http_request = tornado.httpclient.HTTPRequest(
+ url = self.peer_contact_uri,
+ method = "POST",
+ body = rpki.up_down.cms_msg().wrap(q_msg, self.bsc.private_key_id,
+ self.bsc.signing_cert,
+ self.bsc.signing_cert_crl),
+ headers = { "Content-Type" : rpki.up_down.content_type },
+ connect_timeout = rpkid.http_client_timeout,
+ request_timeout = rpkid.http_client_timeout)
+ http_response = yield rpkid.http_fetch(http_request)
+ if http_response.headers.get("Content-Type") not in rpki.up_down.allowed_content_types:
+ raise rpki.exceptions.BadContentType("HTTP Content-Type %r, expected %r" % (
+ rpki.up_down.content_type, http_response.headers.get("Content-Type")))
+ r_cms = rpki.up_down.cms_msg(DER = http_response.body)
+ r_msg = r_cms.unwrap((rpkid.bpki_ta,
+ self.tenant.bpki_cert, self.tenant.bpki_glue,
+ self.bpki_cert, self.bpki_glue))
+ r_cms.check_replay_sql(self, self.peer_contact_uri)
+ #logger.debug("%r query_up_down(): %s", self, ElementToString(r_msg))
+ rpki.up_down.check_response(r_msg, q_msg.get("type"))
+ raise tornado.gen.Return(r_msg)
+
+
+ @tornado.gen.coroutine
+ def query_up_down_root(self, rpkid, q_msg):
+ """
+ Internal RPKI root, divered from the normal up_down client.
+
+ While it looks a bit silly, the simplest way to drop this in
+ without rewriting all of the up-down client code is to
+ implement a minimal version of the server side of the up-down
+ protocol here, XML and all. This has the additional advantage
+ of using a well-defined protocol, one with a formal schema,
+ even. Yes, there's a bit of XML overhead, but we'd be paying
+ that in any case for an external root, so it's just a minor
+ optimization we've chosen not to take.
+
+ We do skip the CMS wrapper, though, since this is all internal
+ not just to a single Tenant but to a single Parent.
+ """
+
+ trace_call_chain()
+ publisher = rpki.rpkid.publication_queue(rpkid = rpkid)
+
+ r_msg = Element(rpki.up_down.tag_message,
+ nsmap = rpki.up_down.nsmap,
+ version = rpki.up_down.version,
+ sender = self.recipient_name,
+ recipient = self.sender_name)
+
+ try:
+
+ if q_msg.get("type") == "revoke":
+ ca_detail = CADetail.objects.get(
+ ca__parent = self,
+ state__in = ("active", "deprecated"),
+ ca__parent_resource_class = q_msg[0].get("class_name"),
+ ca_cert_uri__endswith = q_msg[0].get("ski") + ".cer")
+ publisher.queue(
+ uri = ca_detail.ca_cert_uri,
+ old_obj = ca_detail.latest_ca_cert.certificate,
+ repository = self.repository)
+ yield publisher.call_pubd()
+ r_msg.set("type", "revoke_response")
+ SubElement(r_msg, rpki.up_down.tag_key,
+ class_name = q_msg[0].get("class_name"),
+ ski = q_msg[0].get("ski"))
+
+ else: # Not revocation
+
+ notAfter = rpki.sundial.now() + rpki.sundial.timedelta.parse(
+ rpkid.cfg.get("rpki-root-certificate-lifetime", "1y"))
+
+ bag = rpki.resource_set.resource_bag(
+ asn = self.root_asn_resources,
+ v4 = self.root_ipv4_resources,
+ v6 = self.root_ipv6_resources,
+ valid_until = notAfter)
+
+ rc = SubElement(
+ r_msg, rpki.up_down.tag_class,
+ class_name = self.parent_handle,
+ cert_url = self.sia_base + "root.cer",
+ resource_set_as = str(bag.asn),
+ resource_set_ipv4 = str(bag.v4),
+ resource_set_ipv6 = str(bag.v6),
+ resource_set_notafter = str(bag.valid_until))
+
+ if q_msg.get("type") == "list":
+ r_msg.set("type", "list_response")
+ for ca_detail in CADetail.objects.filter(
+ ca__parent = self,
+ state__in = ("active", "deprecated"),
+ ca__parent_resource_class = self.parent_handle):
+ uri = self.sia_base + ca_detail.latest_ca_cert.gSKI() + ".cer"
+ SubElement(rc, rpki.up_down.tag_certificate,
+ cert_url = uri).text = ca_detail.latest_ca_cert.get_Base64()
+
+ else:
+ assert q_msg.get("type") == "issue"
+ r_msg.set("type", "issue_response")
+ pkcs10 = rpki.x509.PKCS10(Base64 = q_msg[0].text)
+ pkcs10_key = pkcs10.getPublicKey()
+ pkcs10_sia = pkcs10.get_SIA()
+ pkcs10_gski = pkcs10_key.gSKI()
+
+ uri = self.sia_base + pkcs10_gski + ".cer"
+
+ ca_details = dict(
+ (ca_detail.public_key.gSKI(), ca_detail)
+ for ca_detail in CADetail.objects.filter(
+ ca__parent = self,
+ ca__parent_resource_class = q_msg[0].get("class_name"),
+ state__in = ("pending", "active")))
+
+ ca_detail = ca_details[pkcs10_gski]
+
+ threshold = rpki.sundial.now() + rpki.sundial.timedelta(
+ seconds = self.tenant.regen_margin)
+
+ need_to_issue = (
+ ca_detail.state == "pending" or
+ ca_detail.public_key != pkcs10_key or
+ ca_detail.latest_ca_cert.get_SIA() != pkcs10_sia or
+ ca_detail.latest_ca_cert.getNotAfter() < threshold)
+
+ if need_to_issue:
+ cert = rpki.x509.X509.self_certify(
+ keypair = ca_detail.private_key_id,
+ subject_key = pkcs10_key,
+ serial = ca_detail.ca.next_serial_number(),
+ sia = pkcs10_sia,
+ notAfter = bag.valid_until,
+ resources = bag)
+ publisher.queue(
+ uri = uri,
+ new_obj = cert,
+ repository = self.repository)
+ yield publisher.call_pubd()
+ logger.debug("%r Internal root issued, old CADetail %r, new cert %r",
+ self, ca_detail, cert)
+ else:
+ cert = ca_detail.latest_ca_cert
+
+ SubElement(rc, rpki.up_down.tag_certificate,
+ cert_url = uri).text = cert.get_Base64()
+
+ SubElement(rc, rpki.up_down.tag_issuer)
+
+ except tornado.gen.Return:
+ raise
+
+ except:
+ logger.exception("%r Up-down %s query to internal root failed:",
+ self, q_msg.get("type"))
+ del r_msg[:]
+ r_msg.set("type", "error_response")
+ SubElement(r_msg, rpki.up_down.tag_status).text = "2001"
+
+ raise tornado.gen.Return(r_msg)
+
+
+ def construct_sia_uri(self, rc):
+ """
+ Construct the sia_uri value for a CA under this parent given
+ configured information and the parent's up-down protocol
+ list_response PDU.
+ """
+
+ trace_call_chain()
+ sia_uri = rc.get("suggested_sia_head", "")
+ if not sia_uri.startswith("rsync://") or not sia_uri.startswith(self.sia_base):
+ sia_uri = self.sia_base
+ if not sia_uri.endswith("/"):
+ raise rpki.exceptions.BadURISyntax("SIA URI must end with a slash: %s" % sia_uri)
+ return sia_uri
+
+
+class CA(models.Model):
+ last_crl_manifest_number = models.BigIntegerField(default = 1)
+ last_issued_sn = models.BigIntegerField(default = 1)
+ sia_uri = models.TextField(null = True)
+ parent_resource_class = models.TextField(null = True) # Not sure this should allow NULL
+ parent = models.ForeignKey(Parent, related_name = "cas")
+
+ # So it turns out that there's always a 1:1 mapping between the
+ # class_name we receive from our parent and the class_name we issue
+ # to our children: in spite of the obfuscated way that we used to
+ # handle class names, we never actually added a way for the back-end
+ # to create new classes. Not clear we want to encourage this, but
+ # if we wanted to support it, simple approach would probably be an
+ # optional class_name attribute in the left-right <list_resources/>
+ # response; if not present, we'd use parent's class_name as now,
+ # otherwise we'd use the supplied class_name.
+
+
+ def __repr__(self):
+ try:
+ return "<CA: {}.{} class {}>".format(self.parent.tenant.tenant_handle,
+ self.parent.parent_handle,
+ self.parent_resource_class)
+ except:
+ return "<CA: CA object>"
+
+
+ @tornado.gen.coroutine
+ def destroy(self, rpkid, parent):
+ """
+ The list of current resource classes received from parent does not
+ include the class corresponding to this CA, so we need to delete
+ it (and its little dog too...).
+
+ All certs published by this CA are now invalid, so need to
+ withdraw them, the CRL, and the manifest from the repository,
+ delete all child_cert and ca_detail records associated with this
+ CA, then finally delete this CA itself.
+ """
+
+ trace_call_chain()
+ publisher = rpki.rpkid.publication_queue(rpkid = rpkid)
+ for ca_detail in self.ca_details.all():
+ ca_detail.destroy(publisher = publisher, allow_failure = True)
+ try:
+ yield publisher.call_pubd()
+ except:
+ logger.exception("Could not destroy %r, skipping", self)
+ else:
+ logger.debug("Destroying %r", self)
+ self.delete()
+
+
+ def next_serial_number(self):
+ """
+ Allocate a certificate serial number.
+ """
+
+ trace_call_chain()
+ self.last_issued_sn += 1
+ self.save()
+ return self.last_issued_sn
+
+
+ def next_crl_manifest_number(self):
+ """
+ Allocate a CRL/Manifest number.
+ """
+
+ trace_call_chain()
+ self.last_crl_manifest_number += 1
+ self.save()
+ return self.last_crl_manifest_number
+
+
+ def create_detail(self):
+ """
+ Create a new CADetail object for this CA.
+ """
+
+ trace_call_chain()
+ cer_keypair = rpki.x509.RSA.generate()
+ mft_keypair = rpki.x509.RSA.generate()
+ return CADetail.objects.create(
+ ca = self,
+ state = "pending",
+ private_key_id = cer_keypair,
+ public_key = cer_keypair.get_public(),
+ manifest_private_key_id = mft_keypair,
+ manifest_public_key = mft_keypair.get_public())
+
+
+ @tornado.gen.coroutine
+ def rekey(self, rpkid):
+ """
+ Initiate a rekey operation for this CA. Generate a new keypair.
+ Request cert from parent using new keypair. Mark result as our
+ active ca_detail. Reissue all child certs issued by this CA using
+ the new ca_detail.
+ """
+
+ trace_call_chain()
+ try:
+ old_detail = self.ca_details.get(state = "active")
+ except CADetail.DoesNotExist:
+ old_detail = None
+ new_detail = self.create_detail()
+ logger.debug("Sending issue request to %r from %r", self.parent, self.rekey)
+ r_msg = yield self.parent.up_down_issue_query(rpkid = rpkid, ca = self, ca_detail = new_detail)
+ c = r_msg[0][0]
+ logger.debug("%r received certificate %s", self, c.get("cert_url"))
+ yield new_detail.activate(
+ rpkid = rpkid,
+ ca = self,
+ cert = rpki.x509.X509(Base64 = c.text),
+ uri = c.get("cert_url"),
+ predecessor = old_detail)
+
+
+ @tornado.gen.coroutine
+ def revoke(self, rpkid, revoke_all = False):
+ """
+ Revoke deprecated ca_detail objects associated with this CA, or
+ all ca_details associated with this CA if revoke_all is set.
+
+ For each CADetail, this involves: requesting revocation of the
+ keypair by parent; revoking all issued certificates;
+ generating final CRL and manifest covering the period one CRL
+ cycle past the time that the last certificate would have
+ expired; and destroying the keypair. We leave final CRL and
+ manifest in place until their nextupdate time has passed.
+ """
+
+ trace_call_chain()
+
+ publisher = rpki.rpkid.publication_queue(rpkid = rpkid)
+
+ if revoke_all:
+ ca_details = self.ca_details.all()
+ else:
+ ca_details = self.ca_details.filter(state = "deprecated")
+
+ for ca_detail in ca_details:
+
+ gski = ca_detail.latest_ca_cert.gSKI()
+ logger.debug("Asking parent to revoke CA certificate matching g(SKI) = %s", gski)
+ r_msg = yield self.parent.up_down_revoke_query(rpkid = rpkid, class_name = self.parent_resource_class, ski = gski)
+ if r_msg[0].get("class_name") != self.parent_resource_class:
+ raise rpki.exceptions.ResourceClassMismatch
+ if r_msg[0].get("ski") != gski:
+ raise rpki.exceptions.SKIMismatch
+ logger.debug("Parent revoked g(SKI) %s, starting cleanup", gski)
+
+ nextUpdate = rpki.sundial.now()
+ if ca_detail.latest_manifest is not None:
+ ca_detail.latest_manifest.extract_if_needed()
+ nextUpdate = nextUpdate.later(ca_detail.latest_manifest.getNextUpdate())
+ if ca_detail.latest_crl is not None:
+ nextUpdate = nextUpdate.later(ca_detail.latest_crl.getNextUpdate())
+ for child_cert in ca_detail.child_certs.all():
+ nextUpdate = nextUpdate.later(child_cert.cert.getNotAfter())
+ child_cert.revoke(publisher = publisher)
+ for roa in ca_detail.roas.all():
+ nextUpdate = nextUpdate.later(roa.cert.getNotAfter())
+ roa.revoke(publisher = publisher)
+ for ghostbuster in ca_detail.ghostbusters.all():
+ nextUpdate = nextUpdate.later(ghostbuster.cert.getNotAfter())
+ ghostbuster.revoke(publisher = publisher)
+ for eecert in ca_detail.ee_certificates.all():
+ nextUpdate = nextUpdate.later(eecert.cert.getNotAfter())
+ eecert.revoke(publisher = publisher)
+ nextUpdate += rpki.sundial.timedelta(seconds = self.parent.tenant.crl_interval)
+
+ ca_detail.generate_crl_and_manifest(publisher = publisher, nextUpdate = nextUpdate)
+ ca_detail.private_key_id = None
+ ca_detail.manifest_private_key_id = None
+ ca_detail.manifest_public_key = None
+ ca_detail.state = "revoked"
+ ca_detail.save()
+
+ yield publisher.call_pubd()
+
+
+ @tornado.gen.coroutine
+ def reissue(self, rpkid):
+ """
+ Reissue all current certificates issued by this CA.
+ """
+
+ trace_call_chain()
+ ca_detail = self.ca_details.get(state = "active")
+ if ca_detail:
+ yield ca_detail.reissue(rpkid = rpkid)
+
+
+class CADetail(models.Model):
+ public_key = PublicKeyField(null = True)
+ private_key_id = RSAPrivateKeyField(null = True)
+ latest_crl = CRLField(null = True)
+ crl_published = SundialField(null = True)
+ latest_ca_cert = CertificateField(null = True)
+ manifest_private_key_id = RSAPrivateKeyField(null = True)
+ manifest_public_key = PublicKeyField(null = True)
+ latest_manifest = ManifestField(null = True)
+ manifest_published = SundialField(null = True)
+ next_crl_manifest_update = SundialField(null = True)
+ state = EnumField(choices = ("pending", "active", "deprecated", "revoked"))
+ ca_cert_uri = models.TextField(null = True)
+ ca = models.ForeignKey(CA, related_name = "ca_details") # pylint: disable=C0103
+
+ def __repr__(self):
+ try:
+ return "<CADetail: {}.{} class {} {} {}>".format(self.ca.parent.tenant.tenant_handle,
+ self.ca.parent.parent_handle,
+ self.ca.parent_resource_class,
+ self.state,
+ self.ca_cert_uri)
+ except:
+ return "<CADetail: CADetail object>"
+
+
+ @property
+ def crl_uri(self):
+ """
+ Return publication URI for this ca_detail's CRL.
+ """
+
+ return self.ca.sia_uri + self.crl_uri_tail
+
+
+ @property
+ def crl_uri_tail(self):
+ """
+ Return tail (filename portion) of publication URI for this ca_detail's CRL.
+ """
+
+ # pylint: disable=E1101
+ return self.public_key.gSKI() + ".crl"
+
+
+ @property
+ def manifest_uri(self):
+ """
+ Return publication URI for this ca_detail's manifest.
+ """
+
+ # pylint: disable=E1101
+ return self.ca.sia_uri + self.public_key.gSKI() + ".mft"
+
+
+ def has_expired(self):
+ """
+ Return whether this ca_detail's certificate has expired.
+ """
+
+ return self.latest_ca_cert.getNotAfter() <= rpki.sundial.now()
+
+
+ def covers(self, target):
+ """
+ Test whether this ca-detail covers a given set of resources.
+ """
+
+ assert not target.asn.inherit and not target.v4.inherit and not target.v6.inherit
+ me = self.latest_ca_cert.get_3779resources()
+ return target.asn <= me.asn and target.v4 <= me.v4 and target.v6 <= me.v6
+
+
+ @tornado.gen.coroutine
+ def activate(self, rpkid, ca, cert, uri, predecessor = None):
+ """
+ Activate this ca_detail.
+ """
+
+ trace_call_chain()
+ publisher = rpki.rpkid.publication_queue(rpkid = rpkid)
+ self.latest_ca_cert = cert
+ self.ca_cert_uri = uri
+ self.state = "active"
+ self.generate_crl_and_manifest(publisher = publisher)
+ self.save()
+
+ if predecessor is not None:
+ predecessor.state = "deprecated"
+ predecessor.save()
+ for child_cert in predecessor.child_certs.all():
+ child_cert.reissue(ca_detail = self, publisher = publisher)
+ for roa in predecessor.roas.all():
+ roa.regenerate(publisher = publisher)
+ for ghostbuster in predecessor.ghostbusters.all():
+ ghostbuster.regenerate(publisher = publisher)
+ for eecert in predecessor.ee_certificates.all():
+ eecert.reissue(publisher = publisher, ca_detail = self)
+ predecessor.generate_crl_and_manifest(publisher = publisher)
+
+ yield publisher.call_pubd()
+
+
+ def destroy(self, publisher, allow_failure = False):
+ """
+ Delete this ca_detail and all of the certs it issued.
+
+ If allow_failure is true, we clean up as much as we can but don't
+ raise an exception.
+ """
+
+ trace_call_chain()
+ repository = self.ca.parent.repository
+ handler = False if allow_failure else None
+ for child_cert in self.child_certs.all():
+ publisher.queue(uri = child_cert.uri, old_obj = child_cert.cert, repository = repository, handler = handler)
+ child_cert.delete()
+ for roa in self.roas.all():
+ roa.revoke(publisher = publisher, allow_failure = allow_failure)
+ for ghostbuster in self.ghostbusters.all():
+ ghostbuster.revoke(publisher = publisher, allow_failure = allow_failure)
+ for eecert in self.ee_certificates.all():
+ eecert.revoke(publisher = publisher)
+ if self.latest_manifest is not None:
+ publisher.queue(uri = self.manifest_uri, old_obj = self.latest_manifest, repository = repository, handler = handler)
+ if self.latest_crl is not None:
+ publisher.queue(uri = self.crl_uri, old_obj = self.latest_crl, repository = repository, handler = handler)
+ for cert in self.revoked_certs.all(): # + self.child_certs.all()
+ logger.debug("Deleting %r", cert)
+ cert.delete()
+ logger.debug("Deleting %r", self)
+ self.delete()
+
+
+ @tornado.gen.coroutine
+ def update(self, rpkid, parent, ca, rc, sia_uri_changed, old_resources):
+ """
+ Need to get a new certificate for this ca_detail and perhaps frob
+ children of this ca_detail.
+ """
+
+ trace_call_chain()
+
+ logger.debug("Sending issue request to %r from %r", parent, self.update)
+
+ r_msg = yield parent.up_down_issue_query(rpkid = rpkid, ca = ca, ca_detail = self)
+
+ c = r_msg[0][0]
+
+ cert = rpki.x509.X509(Base64 = c.text)
+ cert_url = c.get("cert_url")
+
+ logger.debug("%r received certificate %s", self, cert_url)
+
+ if self.state == "pending":
+ yield self.activate(rpkid = rpkid, ca = ca, cert = cert, uri = cert_url)
+ return
+
+ validity_changed = self.latest_ca_cert is None or self.latest_ca_cert.getNotAfter() != cert.getNotAfter()
+
+ publisher = rpki.rpkid.publication_queue(rpkid = rpkid)
+
+ if self.latest_ca_cert != cert:
+ self.latest_ca_cert = cert
+ self.save()
+ self.generate_crl_and_manifest(publisher = publisher)
+
+ new_resources = self.latest_ca_cert.get_3779resources()
+
+ if sia_uri_changed or old_resources.oversized(new_resources):
+ for child_cert in self.child_certs.all():
+ child_resources = child_cert.cert.get_3779resources()
+ if sia_uri_changed or child_resources.oversized(new_resources):
+ child_cert.reissue(ca_detail = self, resources = child_resources & new_resources, publisher = publisher)
+
+ if sia_uri_changed or validity_changed or old_resources.oversized(new_resources):
+ for roa in self.roas.all():
+ roa.update(publisher = publisher)
+
+ if sia_uri_changed or validity_changed:
+ for ghostbuster in self.ghostbusters.all():
+ ghostbuster.update(publisher = publisher)
+
+ yield publisher.call_pubd()
+
+
+ def issue_ee(self, ca, resources, subject_key, sia,
+ cn = None, sn = None, notAfter = None, eku = None, notBefore = None):
+ """
+ Issue a new EE certificate.
+ """
+
+ trace_call_chain()
+ if notAfter is None:
+ notAfter = self.latest_ca_cert.getNotAfter()
+ return self.latest_ca_cert.issue(
+ keypair = self.private_key_id,
+ subject_key = subject_key,
+ serial = ca.next_serial_number(),
+ sia = sia,
+ aia = self.ca_cert_uri,
+ crldp = self.crl_uri,
+ resources = resources,
+ notBefore = notBefore,
+ notAfter = notAfter,
+ is_ca = False,
+ cn = cn,
+ sn = sn,
+ eku = eku)
+
+
+ def issue(self, ca, child, subject_key, sia, resources, publisher, child_cert = None):
+ """
+ Issue a new certificate to a child. Optional child_cert argument
+ specifies an existing child_cert object to update in place; if not
+ specified, we create a new one. Returns the child_cert object
+ containing the newly issued cert.
+ """
+
+ trace_call_chain()
+ self.check_failed_publication(publisher)
+ cert = self.latest_ca_cert.issue(
+ keypair = self.private_key_id,
+ subject_key = subject_key,
+ serial = ca.next_serial_number(),
+ aia = self.ca_cert_uri,
+ crldp = self.crl_uri,
+ sia = sia,
+ resources = resources,
+ notAfter = resources.valid_until)
+ if child_cert is None:
+ old_cert = None
+ child_cert = ChildCert(child = child, ca_detail = self, cert = cert)
+ logger.debug("Created new child_cert %r", child_cert)
+ else:
+ old_cert = child_cert.cert
+ child_cert.cert = cert
+ child_cert.ca_detail = self
+ logger.debug("Reusing existing child_cert %r", child_cert)
+ child_cert.gski = cert.gSKI()
+ child_cert.published = rpki.sundial.now()
+ child_cert.save()
+ publisher.queue(
+ uri = child_cert.uri,
+ old_obj = old_cert,
+ new_obj = child_cert.cert,
+ repository = ca.parent.repository,
+ handler = child_cert.published_callback)
+ self.generate_crl_and_manifest(publisher = publisher)
+ return child_cert
+
+
+ def generate_crl_and_manifest(self, publisher, nextUpdate = None):
+ """
+ Generate a new CRL and a new manifest for this ca_detail.
+
+ At the moment this is unconditional, that is, it is up to the
+ caller to decide whether a new CRL is needed.
+
+ We used to handle CRL and manifest as two separate operations,
+ but there's no real point, and it's simpler to do them at once.
+ """
+
+ trace_call_chain()
+
+ self.check_failed_publication(publisher)
+
+ crl_interval = rpki.sundial.timedelta(seconds = self.ca.parent.tenant.crl_interval)
+ now = rpki.sundial.now()
+ if nextUpdate is None:
+ nextUpdate = now + crl_interval
+
+ old_crl = self.latest_crl
+ old_manifest = self.latest_manifest
+ crl_uri = self.crl_uri
+ manifest_uri = self.manifest_uri
+
+ crl_manifest_number = self.ca.next_crl_manifest_number()
+
+ manifest_cert = self.issue_ee(
+ ca = self.ca,
+ resources = rpki.resource_set.resource_bag.from_inheritance(),
+ subject_key = self.manifest_public_key,
+ sia = (None, None, manifest_uri, self.ca.parent.repository.rrdp_notification_uri),
+ notBefore = now)
+
+ certlist = []
+ for revoked_cert in self.revoked_certs.all():
+ if now > revoked_cert.expires + crl_interval:
+ revoked_cert.delete()
+ else:
+ certlist.append((revoked_cert.serial, revoked_cert.revoked))
+ certlist.sort()
+
+ self.latest_crl = rpki.x509.CRL.generate(
+ keypair = self.private_key_id,
+ issuer = self.latest_ca_cert,
+ serial = crl_manifest_number,
+ thisUpdate = now,
+ nextUpdate = nextUpdate,
+ revokedCertificates = certlist)
+
+ # XXX
+ logger.debug("%r Generating manifest, child_certs_all(): %r", self, self.child_certs.all())
+
+ objs = [(self.crl_uri_tail, self.latest_crl)]
+ objs.extend((c.uri_tail, c.cert) for c in self.child_certs.all())
+ objs.extend((r.uri_tail, r.roa) for r in self.roas.filter(roa__isnull = False))
+ objs.extend((g.uri_tail, g.ghostbuster) for g in self.ghostbusters.all())
+ objs.extend((e.uri_tail, e.cert) for e in self.ee_certificates.all())
+
+ # XXX
+ logger.debug("%r Generating manifest, objs: %r", self, objs)
+
+ self.latest_manifest = rpki.x509.SignedManifest.build(
+ serial = crl_manifest_number,
+ thisUpdate = now,
+ nextUpdate = nextUpdate,
+ names_and_objs = objs,
+ keypair = self.manifest_private_key_id,
+ certs = manifest_cert)
+
+ self.crl_published = now
+ self.manifest_published = now
+ self.next_crl_manifest_update = nextUpdate
+ self.save()
+
+ publisher.queue(
+ uri = crl_uri,
+ old_obj = old_crl,
+ new_obj = self.latest_crl,
+ repository = self.ca.parent.repository,
+ handler = self.crl_published_callback)
+
+ publisher.queue(
+ uri = manifest_uri,
+ old_obj = old_manifest,
+ new_obj = self.latest_manifest,
+ repository = self.ca.parent.repository,
+ handler = self.manifest_published_callback)
+
+
+ def crl_published_callback(self, pdu):
+ """
+ Check result of CRL publication.
+ """
+
+ trace_call_chain()
+ rpki.publication.raise_if_error(pdu)
+ self.crl_published = None
+ self.save()
+
+ def manifest_published_callback(self, pdu):
+ """
+ Check result of manifest publication.
+ """
+
+ trace_call_chain()
+ rpki.publication.raise_if_error(pdu)
+ self.manifest_published = None
+ self.save()
+
+
+ @tornado.gen.coroutine
+ def reissue(self, rpkid):
+ """
+ Reissue all current certificates issued by this ca_detail.
+ """
+
+ trace_call_chain()
+ publisher = rpki.rpkid.publication_queue(rpkid = rpkid)
+ self.check_failed_publication(publisher)
+ for roa in self.roas.all():
+ roa.regenerate(publisher)
+ for ghostbuster in self.ghostbusters.all():
+ ghostbuster.regenerate(publisher)
+ for ee_certificate in self.ee_certificates.all():
+ ee_certificate.reissue(publisher, force = True)
+ for child_cert in self.child_certs.all():
+ child_cert.reissue(self, publisher, force = True)
+ self.generate_crl_and_manifest(publisher = publisher)
+ self.save()
+ yield publisher.call_pubd()
+
+
+ def check_failed_publication(self, publisher, check_all = True):
+ """
+ Check for failed publication of objects issued by this ca_detail.
+
+ All publishable objects have timestamp fields recording time of
+ last attempted publication, and callback methods which clear these
+ timestamps once publication has succeeded. Our task here is to
+ look for objects issued by this ca_detail which have timestamps
+ set (indicating that they have not been published) and for which
+ the timestamps are not very recent (for some definition of very
+ recent -- intent is to allow a bit of slack in case pubd is just
+ being slow). In such cases, we want to retry publication.
+
+ As an optimization, we can probably skip checking other products
+ if manifest and CRL have been published, thus saving ourselves
+ several complex SQL queries. Not sure yet whether this
+ optimization is worthwhile.
+
+ For the moment we check everything without optimization, because
+ it simplifies testing.
+
+ For the moment our definition of staleness is hardwired; this
+ should become configurable.
+ """
+
+ trace_call_chain()
+
+ logger.debug("Checking for failed publication for %r", self)
+
+ stale = rpki.sundial.now() - rpki.sundial.timedelta(seconds = 60)
+ repository = self.ca.parent.repository
+ if self.latest_crl is not None and self.crl_published is not None and self.crl_published < stale:
+ logger.debug("Retrying publication for %s", self.crl_uri)
+ publisher.queue(uri = self.crl_uri,
+ new_obj = self.latest_crl,
+ repository = repository,
+ handler = self.crl_published_callback)
+ if self.latest_manifest is not None and self.manifest_published is not None and self.manifest_published < stale:
+ logger.debug("Retrying publication for %s", self.manifest_uri)
+ publisher.queue(uri = self.manifest_uri,
+ new_obj = self.latest_manifest,
+ repository = repository,
+ handler = self.manifest_published_callback)
+ if not check_all:
+ return
+ for child_cert in self.child_certs.filter(published__isnull = False, published__lt = stale):
+ logger.debug("Retrying publication for %s", child_cert)
+ publisher.queue(
+ uri = child_cert.uri,
+ new_obj = child_cert.cert,
+ repository = repository,
+ handler = child_cert.published_callback)
+ for roa in self.roas.filter(published__isnull = False, published__lt = stale):
+ logger.debug("Retrying publication for %s", roa)
+ publisher.queue(
+ uri = roa.uri,
+ new_obj = roa.roa,
+ repository = repository,
+ handler = roa.published_callback)
+ for ghostbuster in self.ghostbusters.filter(published__isnull = False, published__lt = stale):
+ logger.debug("Retrying publication for %s", ghostbuster)
+ publisher.queue(
+ uri = ghostbuster.uri,
+ new_obj = ghostbuster.ghostbuster,
+ repository = repository,
+ handler = ghostbuster.published_callback)
+ for ee_cert in self.ee_certificates.filter(published__isnull = False, published__lt = stale):
+ logger.debug("Retrying publication for %s", ee_cert)
+ publisher.queue(
+ uri = ee_cert.uri,
+ new_obj = ee_cert.cert,
+ repository = repository,
+ handler = ee_cert.published_callback)
+
+
+@xml_hooks
+class Child(models.Model):
+ child_handle = models.SlugField(max_length = 255)
+ bpki_cert = CertificateField(null = True)
+ bpki_glue = CertificateField(null = True)
+ last_cms_timestamp = SundialField(null = True)
+ tenant = models.ForeignKey(Tenant, related_name = "children")
+ bsc = models.ForeignKey(BSC, related_name = "children")
+ objects = XMLManager()
+
+ class Meta:
+ unique_together = ("tenant", "child_handle")
+
+ xml_template = XMLTemplate(
+ name = "child",
+ handles = (BSC,),
+ elements = ("bpki_cert", "bpki_glue"))
+
+ def __repr__(self):
+ try:
+ return "<Child: {}.{}>".format(self.tenant.tenant_handle, self.child_handle)
+ except:
+ return "<Child: Child object>"
+
+
+ @tornado.gen.coroutine
+ def xml_pre_delete_hook(self, rpkid):
+ trace_call_chain()
+ publisher = rpki.rpkid.publication_queue(rpkid = rpkid)
+ ca_details = set()
+ for child_cert in self.child_certs.all():
+ ca_details.add(child_cert.ca_detail)
+ child_cert.revoke(publisher = publisher)
+ for ca_detail in ca_details:
+ ca_detail.generate_crl_and_manifest(publisher = publisher)
+ yield publisher.call_pubd()
+
+
+ @tornado.gen.coroutine
+ def xml_post_save_hook(self, rpkid, q_pdu):
+ trace_call_chain()
+ if q_pdu.get("clear_replay_protection"):
+ self.clear_replay_protection()
+ if q_pdu.get("reissue"):
+ yield self.serve_reissue(rpkid = rpkid)
+
+
+ def serve_reissue(self, rpkid):
+ trace_call_chain()
+ publisher = rpki.rpkid.publication_queue(rpkid = rpkid)
+ for child_cert in self.child_certs.all():
+ child_cert.reissue(child_cert.ca_detail, publisher, force = True)
+ yield publisher.call_pubd()
+
+
+ def clear_replay_protection(self):
+ trace_call_chain()
+ self.last_cms_timestamp = None
+ self.save()
+
+
+ @tornado.gen.coroutine
+ def up_down_handle_list(self, rpkid, q_msg, r_msg):
+
+ trace_call_chain()
+ irdb_resources = yield rpkid.irdb_query_child_resources(self.tenant.tenant_handle, self.child_handle)
+ if irdb_resources.valid_until < rpki.sundial.now():
+ logger.debug("Child %s's resources expired %s", self.child_handle, irdb_resources.valid_until)
+ else:
+ for ca_detail in CADetail.objects.filter(ca__parent__tenant = self.tenant, state = "active"):
+ resources = ca_detail.latest_ca_cert.get_3779resources() & irdb_resources
+ if resources.empty():
+ logger.debug("No overlap between received resources and what child %s should get ([%s], [%s])",
+ self.child_handle, ca_detail.latest_ca_cert.get_3779resources(), irdb_resources)
+ continue
+ rc = SubElement(r_msg, rpki.up_down.tag_class,
+ class_name = ca_detail.ca.parent_resource_class,
+ cert_url = ca_detail.ca_cert_uri,
+ resource_set_as = str(resources.asn),
+ resource_set_ipv4 = str(resources.v4),
+ resource_set_ipv6 = str(resources.v6),
+ resource_set_notafter = str(resources.valid_until))
+ for child_cert in self.child_certs.filter(ca_detail = ca_detail):
+ c = SubElement(rc, rpki.up_down.tag_certificate, cert_url = child_cert.uri)
+ c.text = child_cert.cert.get_Base64()
+ SubElement(rc, rpki.up_down.tag_issuer).text = ca_detail.latest_ca_cert.get_Base64()
+
+
+ @tornado.gen.coroutine
+ def up_down_handle_issue(self, rpkid, q_msg, r_msg):
+
+ trace_call_chain()
+
+ req = q_msg[0]
+ assert req.tag == rpki.up_down.tag_request
+
+ # Subsetting not yet implemented, this is the one place where
+ # we have to handle it, by reporting that we're lame.
+
+ if any(req.get(a) for a in ("req_resource_set_as",
+ "req_resource_set_ipv4", "req_resource_set_ipv6")):
+ raise rpki.exceptions.NotImplementedYet("req_* attributes not implemented yet, sorry")
+
+ class_name = req.get("class_name")
+ pkcs10 = rpki.x509.PKCS10(Base64 = req.text)
+ pkcs10.check_valid_request_ca()
+ ca_detail = CADetail.objects.get(ca__parent__tenant = self.tenant,
+ ca__parent_resource_class = class_name,
+ state = "active")
+
+ irdb_resources = yield rpkid.irdb_query_child_resources(self.tenant.tenant_handle,
+ self.child_handle)
+
+ if irdb_resources.valid_until < rpki.sundial.now():
+ raise rpki.exceptions.IRDBExpired("IRDB entry for child %s expired %s" % (
+ self.child_handle, irdb_resources.valid_until))
+
+ resources = irdb_resources & ca_detail.latest_ca_cert.get_3779resources()
+ resources.valid_until = irdb_resources.valid_until
+ req_key = pkcs10.getPublicKey()
+ req_sia = pkcs10.get_SIA()
+
+ # Generate new cert or regenerate old one if necessary
+
+ publisher = rpki.rpkid.publication_queue(rpkid = rpkid)
+
+ try:
+ child_cert = self.child_certs.get(ca_detail = ca_detail, gski = req_key.gSKI())
+
+ except ChildCert.DoesNotExist:
+ child_cert = ca_detail.issue(
+ ca = ca_detail.ca,
+ child = self,
+ subject_key = req_key,
+ sia = req_sia,
+ resources = resources,
+ publisher = publisher)
+
+ else:
+ child_cert = child_cert.reissue(
+ ca_detail = ca_detail,
+ sia = req_sia,
+ resources = resources,
+ publisher = publisher)
+
+ yield publisher.call_pubd()
+
+ rc = SubElement(r_msg, rpki.up_down.tag_class,
+ class_name = class_name,
+ cert_url = ca_detail.ca_cert_uri,
+ resource_set_as = str(resources.asn),
+ resource_set_ipv4 = str(resources.v4),
+ resource_set_ipv6 = str(resources.v6),
+ resource_set_notafter = str(resources.valid_until))
+ c = SubElement(rc, rpki.up_down.tag_certificate, cert_url = child_cert.uri)
+ c.text = child_cert.cert.get_Base64()
+ SubElement(rc, rpki.up_down.tag_issuer).text = ca_detail.latest_ca_cert.get_Base64()
+
+
+ @tornado.gen.coroutine
+ def up_down_handle_revoke(self, rpkid, q_msg, r_msg):
+ trace_call_chain()
+ key = q_msg[0]
+ assert key.tag == rpki.up_down.tag_key
+ class_name = key.get("class_name")
+ publisher = rpki.rpkid.publication_queue(rpkid = rpkid)
+ ca_details = set()
+ for child_cert in ChildCert.objects.filter(ca_detail__ca__parent__tenant = self.tenant,
+ ca_detail__ca__parent_resource_class = class_name,
+ gski = key.get("ski")):
+ ca_details.add(child_cert.ca_detail)
+ child_cert.revoke(publisher = publisher)
+ for ca_detail in ca_details:
+ ca_detail.generate_crl_and_manifest(publisher = publisher)
+ yield publisher.call_pubd()
+ SubElement(r_msg, key.tag, class_name = class_name, ski = key.get("ski"))
+
+
+ @tornado.gen.coroutine
+ def serve_up_down(self, rpkid, q_der):
+ """
+ Outer layer of server handling for one up-down PDU from this child.
+ """
+
+ trace_call_chain()
+
+ if self.bsc is None:
+ raise rpki.exceptions.BSCNotFound("Could not find BSC")
+
+ q_cms = rpki.up_down.cms_msg(DER = q_der)
+ q_msg = q_cms.unwrap((rpkid.bpki_ta, self.tenant.bpki_cert, self.tenant.bpki_glue, self.bpki_cert, self.bpki_glue))
+ q_cms.check_replay_sql(self, "child", self.child_handle)
+ q_type = q_msg.get("type")
+
+ logger.info("Serving %s query from child %s [sender %s, recipient %s]",
+ q_type, self.child_handle, q_msg.get("sender"), q_msg.get("recipient"))
+
+ if rpki.up_down.enforce_strict_up_down_xml_sender and q_msg.get("sender") != self.child_handle:
+ raise rpki.exceptions.BadSender("Unexpected XML sender %s" % q_msg.get("sender"))
+
+ r_msg = Element(rpki.up_down.tag_message, nsmap = rpki.up_down.nsmap, version = rpki.up_down.version,
+ sender = q_msg.get("recipient"), recipient = q_msg.get("sender"), type = q_type + "_response")
+
+ try:
+ yield getattr(self, "up_down_handle_" + q_type)(rpkid, q_msg, r_msg)
+
+ except Exception, e:
+ logger.exception("Unhandled exception serving child %r", self)
+ rpki.up_down.generate_error_response_from_exception(r_msg, e, q_type)
+
+ r_der = rpki.up_down.cms_msg().wrap(r_msg, self.bsc.private_key_id, self.bsc.signing_cert, self.bsc.signing_cert_crl)
+ raise tornado.gen.Return(r_der)
+
+class ChildCert(models.Model):
+ cert = CertificateField()
+ published = SundialField(null = True)
+ gski = models.CharField(max_length = 27) # Assumes SHA-1 -- SHA-256 would be 43, SHA-512 would be 86, etc.
+ child = models.ForeignKey(Child, related_name = "child_certs")
+ ca_detail = models.ForeignKey(CADetail, related_name = "child_certs")
+
+ def __repr__(self):
+ try:
+ return "<ChildCert: {}.{} {}>".format(self.child.tenant.tenant_handle,
+ self.child.child_handle,
+ self.uri)
+ except:
+ return "<ChildCert: ChildCert object>"
+
+
+ @property
+ def uri_tail(self):
+ """
+ Return the tail (filename) portion of the URI for this child_cert.
+ """
+
+ return self.gski + ".cer"
+
+
+ @property
+ def uri(self):
+ """
+ Return the publication URI for this child_cert.
+ """
+
+ return self.ca_detail.ca.sia_uri + self.uri_tail
+
+
+ def revoke(self, publisher):
+ """
+ Revoke a child cert.
+ """
+
+ trace_call_chain()
+ ca_detail = self.ca_detail
+ logger.debug("Revoking %r", self)
+ RevokedCert.revoke(cert = self.cert, ca_detail = ca_detail)
+ publisher.queue(uri = self.uri, old_obj = self.cert, repository = ca_detail.ca.parent.repository)
+ self.delete()
+
+
+ def reissue(self, ca_detail, publisher, resources = None, sia = None, force = False):
+ """
+ Reissue an existing child cert, reusing the public key. If
+ the child cert we would generate is identical to the one we
+ already have, we just return the one we already have. If we
+ have to revoke the old child cert when generating the new one,
+ we have to generate a new ChildCert, so calling code that
+ needs the updated ChildCert must use the return value from
+ this method.
+ """
+
+ trace_call_chain()
+ # pylint: disable=E1101
+ ca = ca_detail.ca
+ child = self.child
+ old_resources = self.cert.get_3779resources()
+ old_sia = self.cert.get_SIA()
+ old_aia = self.cert.get_AIA()[0]
+ old_ca_detail = self.ca_detail
+ needed = False
+ if resources is None:
+ resources = old_resources
+ if sia is None:
+ sia = old_sia
+ if len(sia) < 4 or not sia[3]:
+ sia = (sia[0], sia[1], sia[2], ca_detail.ca.parent.repository.rrdp_notification_uri)
+ assert resources.valid_until is not None and old_resources.valid_until is not None
+ if resources.asn != old_resources.asn or resources.v4 != old_resources.v4 or resources.v6 != old_resources.v6:
+ logger.debug("Resources changed for %r: old %s new %s", self, old_resources, resources)
+ needed = True
+ if resources.valid_until != old_resources.valid_until:
+ logger.debug("Validity changed for %r: old %s new %s",
+ self, old_resources.valid_until, resources.valid_until)
+ needed = True
+ if sia != old_sia:
+ logger.debug("SIA changed for %r: old %r new %r", self, old_sia, sia)
+ needed = True
+ if ca_detail != old_ca_detail:
+ logger.debug("Issuer changed for %r: old %r new %r", self, old_ca_detail, ca_detail)
+ needed = True
+ if ca_detail.ca_cert_uri != old_aia:
+ logger.debug("AIA changed for %r: old %r new %r", self, old_aia, ca_detail.ca_cert_uri)
+ needed = True
+ must_revoke = old_resources.oversized(resources) or old_resources.valid_until > resources.valid_until
+ if must_revoke:
+ logger.debug("Must revoke any existing cert(s) for %r", self)
+ needed = True
+ if not needed and force:
+ logger.debug("No change needed for %r, forcing reissuance anyway", self)
+ needed = True
+ if not needed:
+ logger.debug("No change to %r", self)
+ return self
+ if must_revoke:
+ for child_cert in child.child_certs.filter(ca_detail = ca_detail, gski = self.gski):
+ logger.debug("Revoking %r", child_cert)
+ child_cert.revoke(publisher = publisher)
+ ca_detail.generate_crl_and_manifest(publisher = publisher)
+ child_cert = ca_detail.issue(
+ ca = ca,
+ child = child,
+ subject_key = self.cert.getPublicKey(),
+ sia = sia,
+ resources = resources,
+ child_cert = None if must_revoke else self,
+ publisher = publisher)
+ logger.debug("New %r", child_cert)
+ return child_cert
+
+
+ def published_callback(self, pdu):
+ """
+ Publication callback: check result and mark published.
+ """
+
+ trace_call_chain()
+ rpki.publication.raise_if_error(pdu)
+ self.published = None
+ self.save()
+
+
+class EECertificate(models.Model):
+ gski = models.CharField(max_length = 27) # Assumes SHA-1 -- SHA-256 would be 43, SHA-512 would be 86, etc.
+ cert = CertificateField()
+ published = SundialField(null = True)
+ tenant = models.ForeignKey(Tenant, related_name = "ee_certificates")
+ ca_detail = models.ForeignKey(CADetail, related_name = "ee_certificates")
+
+ def __repr__(self):
+ try:
+ return "<EECertificate: {} {}>".format(self.tenant.tenant_handle,
+ self.uri)
+ except:
+ return "<EECertificate: EECertificate object>"
+
+
+ @property
+ def uri(self):
+ """
+ Return the publication URI for this EECertificate.
+ """
+
+ return self.ca_detail.ca.sia_uri + self.uri_tail
+
+
+ @property
+ def uri_tail(self):
+ """
+ Return the tail (filename portion) of the publication URI for this
+ EECertificate.
+ """
+
+ return self.gski + ".cer"
+
+
+ def revoke(self, publisher):
+ """
+ Revoke and withdraw an EE certificate.
+ """
+
+ trace_call_chain()
+ ca_detail = self.ca_detail
+ logger.debug("Revoking %r", self)
+ RevokedCert.revoke(cert = self.cert, ca_detail = ca_detail)
+ publisher.queue(uri = self.uri, old_obj = self.cert, repository = ca_detail.ca.parent.repository)
+ self.delete()
+
+
+ def reissue(self, publisher, ca_detail = None, resources = None, force = False):
+ """
+ Reissue an existing EE cert, reusing the public key. If the EE
+ cert we would generate is identical to the one we already have, we
+ just return; if we need to reissue, we reuse this EECertificate and
+ just update its contents, as the publication URI will not have
+ changed.
+ """
+
+ trace_call_chain()
+ needed = False
+ old_cert = self.cert
+ old_ca_detail = self.ca_detail
+ if ca_detail is None:
+ ca_detail = old_ca_detail
+ assert ca_detail.ca is old_ca_detail.ca
+ old_resources = old_cert.get_3779resources()
+ if resources is None:
+ resources = old_resources
+ assert resources.valid_until is not None and old_resources.valid_until is not None
+ assert ca_detail.covers(resources)
+ if ca_detail != self.ca_detail:
+ logger.debug("ca_detail changed for %r: old %r new %r", self, self.ca_detail, ca_detail)
+ needed = True
+ if ca_detail.ca_cert_uri != old_cert.get_AIA()[0]:
+ logger.debug("AIA changed for %r: old %s new %s", self, old_cert.get_AIA()[0], ca_detail.ca_cert_uri)
+ needed = True
+ if resources.valid_until != old_resources.valid_until:
+ logger.debug("Validity changed for %r: old %s new %s", self, old_resources.valid_until, resources.valid_until)
+ needed = True
+ if resources.asn != old_resources.asn or resources.v4 != old_resources.v4 or resources.v6 != old_resources.v6:
+ logger.debug("Resources changed for %r: old %s new %s", self, old_resources, resources)
+ needed = True
+ must_revoke = old_resources.oversized(resources) or old_resources.valid_until > resources.valid_until
+ if must_revoke:
+ logger.debug("Must revoke existing cert(s) for %r", self)
+ needed = True
+ if not needed and force:
+ logger.debug("No change needed for %r, forcing reissuance anyway", self)
+ needed = True
+ if not needed:
+ logger.debug("No change to %r", self)
+ return
+ cn, sn = self.cert.getSubject().extract_cn_and_sn()
+ self.cert = ca_detail.issue_ee(
+ ca = ca_detail.ca,
+ subject_key = self.cert.getPublicKey(),
+ eku = self.cert.get_EKU(),
+ sia = None,
+ resources = resources,
+ notAfter = resources.valid_until,
+ cn = cn,
+ sn = sn)
+ self.save()
+ publisher.queue(
+ uri = self.uri,
+ old_obj = old_cert,
+ new_obj = self.cert,
+ repository = ca_detail.ca.parent.repository,
+ handler = self.published_callback)
+ if must_revoke:
+ RevokedCert.revoke(cert = old_cert.cert, ca_detail = old_ca_detail)
+ ca_detail.generate_crl_and_manifest(publisher = publisher)
+
+
+ def published_callback(self, pdu):
+ """
+ Publication callback: check result and mark published.
+ """
+
+ trace_call_chain()
+ rpki.publication.raise_if_error(pdu)
+ self.published = None
+ self.save()
+
+
+
+class Ghostbuster(models.Model):
+ vcard = models.TextField()
+ cert = CertificateField()
+ ghostbuster = GhostbusterField()
+ published = SundialField(null = True)
+ tenant = models.ForeignKey(Tenant, related_name = "ghostbusters")
+ ca_detail = models.ForeignKey(CADetail, related_name = "ghostbusters")
+
+ def __repr__(self):
+ try:
+ uri = " " + self.uri
+ except:
+ uri = ""
+ try:
+ return "<Ghostbuster: {}{}>".format(self.tenant.tenant_handle, uri)
+ except:
+ return "<Ghostbuster: Ghostbuster object>"
+
+
+ def update(self, publisher):
+ """
+ Bring this Ghostbuster up to date if necesssary.
+ """
+
+ trace_call_chain()
+
+ if self.ghostbuster is None:
+ logger.debug("Ghostbuster record doesn't exist, generating")
+ return self.generate(publisher = publisher)
+
+ now = rpki.sundial.now()
+ regen_time = self.cert.getNotAfter() - rpki.sundial.timedelta(seconds = self.tenant.regen_margin)
+
+ if now > regen_time and self.cert.getNotAfter() < self.ca_detail.latest_ca_cert.getNotAfter():
+ logger.debug("%r past threshold %s, regenerating", self, regen_time)
+ return self.regenerate(publisher = publisher)
+
+ if now > regen_time:
+ logger.warning("%r is past threshold %s but so is issuer %r, can't regenerate", self, regen_time, self.ca_detail)
+
+ if self.cert.get_AIA()[0] != self.ca_detail.ca_cert_uri:
+ logger.debug("%r AIA changed, regenerating", self)
+ return self.regenerate(publisher = publisher)
+
+
+ def generate(self, publisher):
+ """
+ Generate a Ghostbuster record
+
+ As with ROAs, we generate a new keypair every time.
+ """
+
+ trace_call_chain()
+ resources = rpki.resource_set.resource_bag.from_inheritance()
+ keypair = rpki.x509.RSA.generate()
+ self.cert = self.ca_detail.issue_ee(
+ ca = self.ca_detail.ca,
+ resources = resources,
+ subject_key = keypair.get_public(),
+ sia = (None, None, self.uri_from_key(keypair),
+ self.ca_detail.ca.parent.repository.rrdp_notification_uri))
+ self.ghostbuster = rpki.x509.Ghostbuster.build(self.vcard, keypair, (self.cert,))
+ self.published = rpki.sundial.now()
+ self.save()
+ logger.debug("Generating %r", self)
+ publisher.queue(
+ uri = self.uri,
+ new_obj = self.ghostbuster,
+ repository = self.ca_detail.ca.parent.repository,
+ handler = self.published_callback)
+
+
+ def published_callback(self, pdu):
+ """
+ Check publication result.
+ """
+
+ trace_call_chain()
+ rpki.publication.raise_if_error(pdu)
+ self.published = None
+ self.save()
+
+
+ def revoke(self, publisher, regenerate = False, allow_failure = False):
+ """
+ Withdraw Ghostbuster associated with this Ghostbuster.
+
+ In order to preserve make-before-break properties without
+ duplicating code, this method also handles generating a
+ replacement ghostbuster when requested.
+
+ If allow_failure is set, failing to withdraw the ghostbuster will not be
+ considered an error.
+ """
+
+ trace_call_chain()
+ logger.debug("%s %r", "Regenerating" if regenerate else "Not regenerating", self)
+ old_ca_detail = self.ca_detail
+ old_obj = self.ghostbuster
+ old_cer = self.cert
+ old_uri = self.uri
+ if regenerate:
+ self.generate(publisher = publisher)
+ logger.debug("Withdrawing %r and revoking its EE cert", self)
+ RevokedCert.revoke(cert = old_cer, ca_detail = old_ca_detail)
+ publisher.queue(
+ uri = old_uri,
+ old_obj = old_obj,
+ repository = old_ca_detail.ca.parent.repository,
+ handler = False if allow_failure else None)
+ if not regenerate:
+ self.delete()
+
+
+ def regenerate(self, publisher):
+ """
+ Reissue Ghostbuster associated with this Ghostbuster.
+ """
+
+ trace_call_chain()
+ if self.ghostbuster is None:
+ self.generate(publisher = publisher)
+ else:
+ self.revoke(publisher = publisher, regenerate = True)
+
+
+ def uri_from_key(self, key):
+ """
+ Return publication URI for a public key.
+ """
+
+ trace_call_chain()
+ return self.ca_detail.ca.sia_uri + key.gSKI() + ".gbr"
+
+
+ @property
+ def uri(self):
+ """
+ Return the publication URI for this Ghostbuster.
+ """
+
+ return self.ca_detail.ca.sia_uri + self.uri_tail
+
+
+ @property
+ def uri_tail(self):
+ """
+ Return the tail (filename portion) of the publication URI for this
+ Ghostbuster.
+ """
+
+ return self.cert.gSKI() + ".gbr"
+
+
+class RevokedCert(models.Model):
+ serial = models.BigIntegerField()
+ revoked = SundialField()
+ expires = SundialField()
+ ca_detail = models.ForeignKey(CADetail, related_name = "revoked_certs")
+
+ def __repr__(self):
+ try:
+ return "<RevokedCert: {}.{} class {} {} serial {} revoked {} expires {}>".format(
+ self.ca_detail.ca.parent.tenant.tenant_handle,
+ self.ca_detail.ca.parent.parent_handle,
+ self.ca_detail.ca.parent_resource_class,
+ self.ca_detail.crl_uri,
+ self.serial,
+ self.revoked,
+ self.expires)
+ except:
+ return "<RevokedCert: RevokedCert object>"
+
+
+ @classmethod
+ def revoke(cls, cert, ca_detail):
+ """
+ Revoke a certificate.
+ """
+
+ trace_call_chain()
+ return cls.objects.create(
+ serial = cert.getSerial(),
+ expires = cert.getNotAfter(),
+ revoked = rpki.sundial.now(),
+ ca_detail = ca_detail)
+
+
+class ROA(models.Model):
+ asn = models.BigIntegerField()
+ ipv4 = models.TextField(null = True)
+ ipv6 = models.TextField(null = True)
+ cert = CertificateField()
+ roa = ROAField()
+ published = SundialField(null = True)
+ tenant = models.ForeignKey(Tenant, related_name = "roas")
+ ca_detail = models.ForeignKey(CADetail, related_name = "roas")
+
+ def __repr__(self):
+ try:
+ resources = " {} {}".format(self.asn, ",".join(str(ip) for ip in (self.ipv4, self.ipv6) if ip is not None))
+ except:
+ resources = ""
+ try:
+ uri = " " + self.uri
+ except:
+ uri = ""
+ try:
+ return "<ROA: {}{}{}>".format(self.tenant.tenant_handle, resources, uri)
+ except:
+ return "<ROA: ROA object>"
+
+
+ def update(self, publisher):
+ """
+ Bring ROA up to date if necesssary.
+ """
+
+ trace_call_chain()
+
+ if self.roa is None:
+ logger.debug("%r doesn't exist, generating", self)
+ return self.generate(publisher = publisher)
+
+ if self.ca_detail is None:
+ logger.debug("%r has no associated ca_detail, generating", self)
+ return self.generate(publisher = publisher)
+
+ if self.ca_detail.state != "active":
+ logger.debug("ca_detail associated with %r not active (state %s), regenerating", self, self.ca_detail.state)
+ return self.regenerate(publisher = publisher)
+
+ now = rpki.sundial.now()
+ regen_time = self.cert.getNotAfter() - rpki.sundial.timedelta(seconds = self.tenant.regen_margin)
+
+ if now > regen_time and self.cert.getNotAfter() < self.ca_detail.latest_ca_cert.getNotAfter():
+ logger.debug("%r past threshold %s, regenerating", self, regen_time)
+ return self.regenerate(publisher = publisher)
+
+ if now > regen_time:
+ logger.warning("%r is past threshold %s but so is issuer %r, can't regenerate", self, regen_time, self.ca_detail)
+
+ ca_resources = self.ca_detail.latest_ca_cert.get_3779resources()
+ ee_resources = self.cert.get_3779resources()
+
+ if ee_resources.oversized(ca_resources):
+ logger.debug("%r oversized with respect to CA, regenerating", self)
+ return self.regenerate(publisher = publisher)
+
+ v4 = rpki.resource_set.resource_set_ipv4(self.ipv4)
+ v6 = rpki.resource_set.resource_set_ipv6(self.ipv6)
+
+ if ee_resources.v4 != v4 or ee_resources.v6 != v6:
+ logger.debug("%r resources do not match EE, regenerating", self)
+ return self.regenerate(publisher = publisher)
+
+ if self.cert.get_AIA()[0] != self.ca_detail.ca_cert_uri:
+ logger.debug("%r AIA changed, regenerating", self)
+ return self.regenerate(publisher = publisher)
+
+
+ def generate(self, publisher):
+ """
+ Generate a ROA.
+
+ At present we have no way of performing a direct lookup from a
+ desired set of resources to a covering certificate, so we have to
+ search. This could be quite slow if we have a lot of active
+ ca_detail objects. Punt on the issue for now, revisit if
+ profiling shows this as a hotspot.
+
+ Once we have the right covering certificate, we generate the ROA
+ payload, generate a new EE certificate, use the EE certificate to
+ sign the ROA payload, publish the result, then throw away the
+ private key for the EE cert, all per the ROA specification. This
+ implies that generating a lot of ROAs will tend to thrash
+ /dev/random, but there is not much we can do about that.
+ """
+
+ trace_call_chain()
+
+ if self.ipv4 is None and self.ipv6 is None:
+ raise rpki.exceptions.EmptyROAPrefixList
+
+ v4 = rpki.resource_set.resource_set_ipv4(self.ipv4)
+ v6 = rpki.resource_set.resource_set_ipv6(self.ipv6)
+
+ # http://stackoverflow.com/questions/26270042/how-do-you-catch-this-exception
+ # "Django is amazing when its not terrifying."
+ try:
+ ca_detail = self.ca_detail
+ except CADetail.DoesNotExist:
+ ca_detail = None
+
+ if ca_detail is not None and ca_detail.state == "active" and not ca_detail.has_expired():
+ logger.debug("Keeping old ca_detail %r for ROA %r", ca_detail, self)
+ else:
+ logger.debug("Searching for new ca_detail for ROA %r", self)
+ for ca_detail in CADetail.objects.filter(ca__parent__tenant = self.tenant, state = "active"):
+ resources = ca_detail.latest_ca_cert.get_3779resources()
+ if not ca_detail.has_expired() and v4.issubset(resources.v4) and v6.issubset(resources.v6):
+ logger.debug("Using %r for ROA %r", ca_detail, self)
+ self.ca_detail = ca_detail
+ break
+ else:
+ raise rpki.exceptions.NoCoveringCertForROA("Could not find a certificate covering %r" % self)
+
+ resources = rpki.resource_set.resource_bag(v4 = v4, v6 = v6)
+ keypair = rpki.x509.RSA.generate()
+
+ self.cert = self.ca_detail.issue_ee(
+ ca = self.ca_detail.ca,
+ resources = resources,
+ subject_key = keypair.get_public(),
+ sia = (None, None, self.uri_from_key(keypair),
+ self.ca_detail.ca.parent.repository.rrdp_notification_uri))
+ self.roa = rpki.x509.ROA.build(self.asn,
+ rpki.resource_set.roa_prefix_set_ipv4(self.ipv4),
+ rpki.resource_set.roa_prefix_set_ipv6(self.ipv6),
+ keypair,
+ (self.cert,))
+ self.published = rpki.sundial.now()
+ self.save()
+
+ logger.debug("Generating %r", self)
+ publisher.queue(uri = self.uri, new_obj = self.roa,
+ repository = self.ca_detail.ca.parent.repository,
+ handler = self.published_callback)
+
+
+ def published_callback(self, pdu):
+ """
+ Check publication result.
+ """
+
+ trace_call_chain()
+ rpki.publication.raise_if_error(pdu)
+ self.published = None
+ self.save()
+
+
+ def revoke(self, publisher, regenerate = False, allow_failure = False):
+ """
+ Withdraw this ROA.
+
+ In order to preserve make-before-break properties without
+ duplicating code, this method also handles generating a
+ replacement ROA when requested.
+
+ If allow_failure is set, failing to withdraw the ROA will not be
+ considered an error.
+ """
+
+ trace_call_chain()
+ logger.debug("%s %r", "Regenerating" if regenerate else "Not regenerating", self)
+ old_ca_detail = self.ca_detail
+ old_obj = self.roa
+ old_cer = self.cert
+ old_uri = self.uri
+ if regenerate:
+ self.generate(publisher = publisher)
+ logger.debug("Withdrawing %r and revoking its EE cert", self)
+ RevokedCert.revoke(cert = old_cer, ca_detail = old_ca_detail)
+ publisher.queue(
+ uri = old_uri,
+ old_obj = old_obj,
+ repository = old_ca_detail.ca.parent.repository,
+ handler = False if allow_failure else None)
+ if not regenerate:
+ self.delete()
+
+
+ def regenerate(self, publisher):
+ """
+ Reissue this ROA.
+ """
+
+ trace_call_chain()
+ if self.ca_detail is None:
+ self.generate(publisher = publisher)
+ else:
+ self.revoke(publisher = publisher, regenerate = True)
+
+
+ def uri_from_key(self, key):
+ """
+ Return publication URI for a public key.
+ """
+
+ trace_call_chain()
+ return self.ca_detail.ca.sia_uri + key.gSKI() + ".roa"
+
+
+ @property
+ def uri(self):
+ """
+ Return the publication URI for this ROA.
+ """
+
+ return self.ca_detail.ca.sia_uri + self.uri_tail
+
+
+ @property
+ def uri_tail(self):
+ """
+ Return the tail (filename portion) of the publication URI for this
+ ROA.
+ """
+
+ return self.cert.gSKI() + ".roa"
diff --git a/rpki/rtr/bgpdump.py b/rpki/rtr/bgpdump.py
index fc3ae9df..22ac0d83 100755
--- a/rpki/rtr/bgpdump.py
+++ b/rpki/rtr/bgpdump.py
@@ -39,292 +39,295 @@ from rpki.rtr.channels import Timestamp
class IgnoreThisRecord(Exception):
- pass
+ pass
class PrefixPDU(rpki.rtr.generator.PrefixPDU):
- @staticmethod
- def from_bgpdump(line, rib_dump):
- try:
- assert isinstance(rib_dump, bool)
- fields = line.split("|")
-
- # Parse prefix, including figuring out IP protocol version
- cls = rpki.rtr.generator.IPv6PrefixPDU if ":" in fields[5] else rpki.rtr.generator.IPv4PrefixPDU
- self = cls()
- self.timestamp = Timestamp(fields[1])
- p, l = fields[5].split("/")
- self.prefix = rpki.POW.IPAddress(p)
- self.prefixlen = self.max_prefixlen = int(l)
-
- # Withdrawals don't have AS paths, so be careful
- assert fields[2] == "B" if rib_dump else fields[2] in ("A", "W")
- if fields[2] == "W":
- self.asn = 0
- self.announce = 0
- else:
- self.announce = 1
- if not fields[6] or "{" in fields[6] or "(" in fields[6]:
- raise IgnoreThisRecord
- a = fields[6].split()[-1]
- if "." in a:
- a = [int(s) for s in a.split(".")]
- if len(a) != 2 or a[0] < 0 or a[0] > 65535 or a[1] < 0 or a[1] > 65535:
- logging.warn("Bad dotted ASNum %r, ignoring record", fields[6])
+ @staticmethod
+ def from_bgpdump(line, rib_dump):
+ try:
+ assert isinstance(rib_dump, bool)
+ fields = line.split("|")
+
+ # Parse prefix, including figuring out IP protocol version
+ cls = rpki.rtr.generator.IPv6PrefixPDU if ":" in fields[5] else rpki.rtr.generator.IPv4PrefixPDU
+ self = cls(version = min(rpki.rtr.pdus.PDU.version_map))
+ self.timestamp = Timestamp(fields[1])
+ p, l = fields[5].split("/")
+ self.prefix = rpki.POW.IPAddress(p)
+ self.prefixlen = self.max_prefixlen = int(l)
+
+ # Withdrawals don't have AS paths, so be careful
+ assert fields[2] == "B" if rib_dump else fields[2] in ("A", "W")
+ if fields[2] == "W":
+ self.asn = 0
+ self.announce = 0
+ else:
+ self.announce = 1
+ if not fields[6] or "{" in fields[6] or "(" in fields[6]:
+ raise IgnoreThisRecord
+ a = fields[6].split()[-1]
+ if "." in a:
+ a = [int(s) for s in a.split(".")]
+ if len(a) != 2 or a[0] < 0 or a[0] > 65535 or a[1] < 0 or a[1] > 65535:
+ logging.warn("Bad dotted ASNum %r, ignoring record", fields[6])
+ raise IgnoreThisRecord
+ a = (a[0] << 16) | a[1]
+ else:
+ a = int(a)
+ self.asn = a
+
+ self.check()
+ return self
+
+ except IgnoreThisRecord:
+ raise
+
+ except Exception, e:
+ logging.warn("Ignoring line %r: %s", line, e)
raise IgnoreThisRecord
- a = (a[0] << 16) | a[1]
- else:
- a = int(a)
- self.asn = a
- self.check()
- return self
- except IgnoreThisRecord:
- raise
+class AXFRSet(rpki.rtr.generator.AXFRSet):
- except Exception, e:
- logging.warn("Ignoring line %r: %s", line, e)
- raise IgnoreThisRecord
+ serial = None
+
+ @staticmethod
+ def read_bgpdump(filename):
+ assert filename.endswith(".bz2")
+ logging.debug("Reading %s", filename)
+ bunzip2 = subprocess.Popen(("bzip2", "-c", "-d", filename), stdout = subprocess.PIPE)
+ bgpdump = subprocess.Popen(("bgpdump", "-m", "-"), stdin = bunzip2.stdout, stdout = subprocess.PIPE)
+ return bgpdump.stdout
+
+ @classmethod
+ def parse_bgpdump_rib_dump(cls, filename):
+ # pylint: disable=W0201
+ assert os.path.basename(filename).startswith("ribs.")
+ self = cls(version = min(rpki.rtr.pdus.PDU.version_map))
+ self.serial = None
+ for line in cls.read_bgpdump(filename):
+ try:
+ pfx = PrefixPDU.from_bgpdump(line, rib_dump = True)
+ except IgnoreThisRecord:
+ continue
+ self.append(pfx)
+ self.serial = pfx.timestamp
+ if self.serial is None:
+ sys.exit("Failed to parse anything useful from %s" % filename)
+ self.sort()
+ for i in xrange(len(self) - 2, -1, -1):
+ if self[i] == self[i + 1]:
+ del self[i + 1]
+ return self
+
+ def parse_bgpdump_update(self, filename):
+ assert os.path.basename(filename).startswith("updates.")
+ for line in self.read_bgpdump(filename):
+ try:
+ pfx = PrefixPDU.from_bgpdump(line, rib_dump = False)
+ except IgnoreThisRecord:
+ continue
+ announce = pfx.announce
+ pfx.announce = 1
+ i = bisect.bisect_left(self, pfx)
+ if announce:
+ if i >= len(self) or pfx != self[i]:
+ self.insert(i, pfx)
+ else:
+ while i < len(self) and pfx.prefix == self[i].prefix and pfx.prefixlen == self[i].prefixlen:
+ del self[i]
+ self.serial = pfx.timestamp
-class AXFRSet(rpki.rtr.generator.AXFRSet):
+def bgpdump_convert_main(args):
+ """
+ * DANGER WILL ROBINSON! * DEBUGGING AND TEST USE ONLY! *
+ Simulate route origin data from a set of BGP dump files.
+ argv is an ordered list of filenames. Each file must be a BGP RIB
+ dumps, a BGP UPDATE dumps, or an AXFR dump in the format written by
+ this program's --cronjob command. The first file must be a RIB dump
+ or AXFR dump, it cannot be an UPDATE dump. Output will be a set of
+ AXFR and IXFR files with timestamps derived from the BGP dumps,
+ which can be used as input to this program's --server command for
+ test purposes. SUCH DATA PROVIDE NO SECURITY AT ALL.
+ * DANGER WILL ROBINSON! * DEBUGGING AND TEST USE ONLY! *
+ """
+
+ first = True
+ db = None
+ axfrs = []
+ version = max(rpki.rtr.pdus.PDU.version_map.iterkeys())
+
+ for filename in args.files:
+
+ if ".ax.v" in filename:
+ logging.debug("Reading %s", filename)
+ db = AXFRSet.load(filename)
+
+ elif os.path.basename(filename).startswith("ribs."):
+ db = AXFRSet.parse_bgpdump_rib_dump(filename)
+ db.save_axfr()
+
+ elif not first:
+ assert db is not None
+ db.parse_bgpdump_update(filename)
+ db.save_axfr()
- @staticmethod
- def read_bgpdump(filename):
- assert filename.endswith(".bz2")
- logging.debug("Reading %s", filename)
- bunzip2 = subprocess.Popen(("bzip2", "-c", "-d", filename), stdout = subprocess.PIPE)
- bgpdump = subprocess.Popen(("bgpdump", "-m", "-"), stdin = bunzip2.stdout, stdout = subprocess.PIPE)
- return bgpdump.stdout
-
- @classmethod
- def parse_bgpdump_rib_dump(cls, filename):
- assert os.path.basename(filename).startswith("ribs.")
- self = cls()
- self.serial = None
- for line in cls.read_bgpdump(filename):
- try:
- pfx = PrefixPDU.from_bgpdump(line, rib_dump = True)
- except IgnoreThisRecord:
- continue
- self.append(pfx)
- self.serial = pfx.timestamp
- if self.serial is None:
- sys.exit("Failed to parse anything useful from %s" % filename)
- self.sort()
- for i in xrange(len(self) - 2, -1, -1):
- if self[i] == self[i + 1]:
- del self[i + 1]
- return self
-
- def parse_bgpdump_update(self, filename):
- assert os.path.basename(filename).startswith("updates.")
- for line in self.read_bgpdump(filename):
- try:
- pfx = PrefixPDU.from_bgpdump(line, rib_dump = False)
- except IgnoreThisRecord:
- continue
- announce = pfx.announce
- pfx.announce = 1
- i = bisect.bisect_left(self, pfx)
- if announce:
- if i >= len(self) or pfx != self[i]:
- self.insert(i, pfx)
- else:
- while i < len(self) and pfx.prefix == self[i].prefix and pfx.prefixlen == self[i].prefixlen:
- del self[i]
- self.serial = pfx.timestamp
+ else:
+ sys.exit("First argument must be a RIB dump or .ax file, don't know what to do with %s" % filename)
+ logging.debug("DB serial now %d (%s)", db.serial, db.serial)
+ if first and rpki.rtr.server.read_current(version) == (None, None):
+ db.mark_current()
+ first = False
-def bgpdump_convert_main(args):
- """
- * DANGER WILL ROBINSON! * DEBUGGING AND TEST USE ONLY! *
- Simulate route origin data from a set of BGP dump files.
- argv is an ordered list of filenames. Each file must be a BGP RIB
- dumps, a BGP UPDATE dumps, or an AXFR dump in the format written by
- this program's --cronjob command. The first file must be a RIB dump
- or AXFR dump, it cannot be an UPDATE dump. Output will be a set of
- AXFR and IXFR files with timestamps derived from the BGP dumps,
- which can be used as input to this program's --server command for
- test purposes. SUCH DATA PROVIDE NO SECURITY AT ALL.
- * DANGER WILL ROBINSON! * DEBUGGING AND TEST USE ONLY! *
- """
-
- first = True
- db = None
- axfrs = []
- version = max(rpki.rtr.pdus.PDU.version_map.iterkeys())
-
- for filename in args.files:
-
- if ".ax.v" in filename:
- logging.debug("Reading %s", filename)
- db = AXFRSet.load(filename)
-
- elif os.path.basename(filename).startswith("ribs."):
- db = AXFRSet.parse_bgpdump_rib_dump(filename)
- db.save_axfr()
-
- elif not first:
- assert db is not None
- db.parse_bgpdump_update(filename)
- db.save_axfr()
-
- else:
- sys.exit("First argument must be a RIB dump or .ax file, don't know what to do with %s" % filename)
-
- logging.debug("DB serial now %d (%s)", db.serial, db.serial)
- if first and rpki.rtr.server.read_current(version) == (None, None):
- db.mark_current()
- first = False
-
- for axfr in axfrs:
- logging.debug("Loading %s", axfr)
- ax = AXFRSet.load(axfr)
- logging.debug("Computing changes from %d (%s) to %d (%s)", ax.serial, ax.serial, db.serial, db.serial)
- db.save_ixfr(ax)
- del ax
-
- axfrs.append(db.filename())
+ for axfr in axfrs:
+ logging.debug("Loading %s", axfr)
+ ax = AXFRSet.load(axfr)
+ logging.debug("Computing changes from %d (%s) to %d (%s)", ax.serial, ax.serial, db.serial, db.serial)
+ db.save_ixfr(ax)
+ del ax
+
+ axfrs.append(db.filename())
def bgpdump_select_main(args):
- """
- * DANGER WILL ROBINSON! * DEBUGGING AND TEST USE ONLY! *
- Simulate route origin data from a set of BGP dump files.
- Set current serial number to correspond to an .ax file created by
- converting BGP dump files. SUCH DATA PROVIDE NO SECURITY AT ALL.
- * DANGER WILL ROBINSON! * DEBUGGING AND TEST USE ONLY! *
- """
+ """
+ * DANGER WILL ROBINSON! * DEBUGGING AND TEST USE ONLY! *
+ Simulate route origin data from a set of BGP dump files.
+ Set current serial number to correspond to an .ax file created by
+ converting BGP dump files. SUCH DATA PROVIDE NO SECURITY AT ALL.
+ * DANGER WILL ROBINSON! * DEBUGGING AND TEST USE ONLY! *
+ """
- head, sep, tail = os.path.basename(args.ax_file).partition(".")
- if not head.isdigit() or sep != "." or not tail.startswith("ax.v") or not tail[4:].isdigit():
- sys.exit("Argument must be name of a .ax file")
+ head, sep, tail = os.path.basename(args.ax_file).partition(".")
+ if not head.isdigit() or sep != "." or not tail.startswith("ax.v") or not tail[4:].isdigit():
+ sys.exit("Argument must be name of a .ax file")
- serial = Timestamp(head)
- version = int(tail[4:])
+ serial = Timestamp(head)
+ version = int(tail[4:])
- if version not in rpki.rtr.pdus.PDU.version_map:
- sys.exit("Unknown protocol version %d" % version)
+ if version not in rpki.rtr.pdus.PDU.version_map:
+ sys.exit("Unknown protocol version %d" % version)
- nonce = rpki.rtr.server.read_current(version)[1]
- if nonce is None:
- nonce = rpki.rtr.generator.new_nonce()
+ nonce = rpki.rtr.server.read_current(version)[1]
+ if nonce is None:
+ nonce = rpki.rtr.generator.AXFRSet.new_nonce(force_zero_nonce = False)
- rpki.rtr.server.write_current(serial, nonce, version)
- rpki.rtr.generator.kick_all(serial)
+ rpki.rtr.server.write_current(serial, nonce, version)
+ rpki.rtr.generator.kick_all(serial)
class BGPDumpReplayClock(object):
- """
- Internal clock for replaying BGP dump files.
+ """
+ Internal clock for replaying BGP dump files.
- * DANGER WILL ROBINSON! *
- * DEBUGGING AND TEST USE ONLY! *
+ * DANGER WILL ROBINSON! *
+ * DEBUGGING AND TEST USE ONLY! *
- This class replaces the normal on-disk serial number mechanism with
- an in-memory version based on pre-computed data.
+ This class replaces the normal on-disk serial number mechanism with
+ an in-memory version based on pre-computed data.
- bgpdump_server_main() uses this hack to replay historical data for
- testing purposes. DO NOT USE THIS IN PRODUCTION.
+ bgpdump_server_main() uses this hack to replay historical data for
+ testing purposes. DO NOT USE THIS IN PRODUCTION.
- You have been warned.
- """
+ You have been warned.
+ """
- def __init__(self):
- self.timestamps = [Timestamp(int(f.split(".")[0])) for f in glob.iglob("*.ax.v*")]
- self.timestamps.sort()
- self.offset = self.timestamps[0] - int(time.time())
- self.nonce = rpki.rtr.generator.new_nonce()
+ def __init__(self):
+ self.timestamps = [Timestamp(int(f.split(".")[0])) for f in glob.iglob("*.ax.v*")]
+ self.timestamps.sort()
+ self.offset = self.timestamps[0] - int(time.time())
+ self.nonce = rpki.rtr.generator.AXFRSet.new_nonce(force_zero_nonce = False)
- def __nonzero__(self):
- return len(self.timestamps) > 0
+ def __nonzero__(self):
+ return len(self.timestamps) > 0
- def now(self):
- return Timestamp.now(self.offset)
+ def now(self):
+ return Timestamp.now(self.offset)
- def read_current(self, version):
- now = self.now()
- while len(self.timestamps) > 1 and now >= self.timestamps[1]:
- del self.timestamps[0]
- return self.timestamps[0], self.nonce
+ def read_current(self, version):
+ now = self.now()
+ while len(self.timestamps) > 1 and now >= self.timestamps[1]:
+ del self.timestamps[0]
+ return self.timestamps[0], self.nonce
- def siesta(self):
- now = self.now()
- if len(self.timestamps) <= 1:
- return None
- elif now < self.timestamps[1]:
- return self.timestamps[1] - now
- else:
- return 1
+ def siesta(self):
+ now = self.now()
+ if len(self.timestamps) <= 1:
+ return None
+ elif now < self.timestamps[1]:
+ return self.timestamps[1] - now
+ else:
+ return 1
def bgpdump_server_main(args):
- """
- Simulate route origin data from a set of BGP dump files.
+ """
+ Simulate route origin data from a set of BGP dump files.
+
+ * DANGER WILL ROBINSON! *
+ * DEBUGGING AND TEST USE ONLY! *
+
+ This is a clone of server_main() which replaces the external serial
+ number updates triggered via the kickme channel by cronjob_main with
+ an internal clocking mechanism to replay historical test data.
- * DANGER WILL ROBINSON! *
- * DEBUGGING AND TEST USE ONLY! *
+ DO NOT USE THIS IN PRODUCTION.
- This is a clone of server_main() which replaces the external serial
- number updates triggered via the kickme channel by cronjob_main with
- an internal clocking mechanism to replay historical test data.
+ You have been warned.
+ """
- DO NOT USE THIS IN PRODUCTION.
+ logger = logging.LoggerAdapter(logging.root, dict(connection = rpki.rtr.server.hostport_tag()))
- You have been warned.
- """
+ logger.debug("[Starting]")
- logger = logging.LoggerAdapter(logging.root, dict(connection = rpki.rtr.server._hostport_tag()))
+ if args.rpki_rtr_dir:
+ try:
+ os.chdir(args.rpki_rtr_dir)
+ except OSError, e:
+ sys.exit(e)
- logger.debug("[Starting]")
+ # Yes, this really does replace a global function defined in another
+ # module with a bound method to our clock object. Fun stuff, huh?
+ #
+ clock = BGPDumpReplayClock()
+ rpki.rtr.server.read_current = clock.read_current
- if args.rpki_rtr_dir:
try:
- os.chdir(args.rpki_rtr_dir)
- except OSError, e:
- sys.exit(e)
-
- # Yes, this really does replace a global function defined in another
- # module with a bound method to our clock object. Fun stuff, huh?
- #
- clock = BGPDumpReplayClock()
- rpki.rtr.server.read_current = clock.read_current
-
- try:
- server = rpki.rtr.server.ServerChannel(logger = logger)
- old_serial = server.get_serial()
- logger.debug("[Starting at serial %d (%s)]", old_serial, old_serial)
- while clock:
- new_serial = server.get_serial()
- if old_serial != new_serial:
- logger.debug("[Serial bumped from %d (%s) to %d (%s)]", old_serial, old_serial, new_serial, new_serial)
- server.notify()
- old_serial = new_serial
- asyncore.loop(timeout = clock.siesta(), count = 1)
- except KeyboardInterrupt:
- sys.exit(0)
+ server = rpki.rtr.server.ServerChannel(logger = logger, refresh = args.refresh, retry = args.retry, expire = args.expire)
+ old_serial = server.get_serial()
+ logger.debug("[Starting at serial %d (%s)]", old_serial, old_serial)
+ while clock:
+ new_serial = server.get_serial()
+ if old_serial != new_serial:
+ logger.debug("[Serial bumped from %d (%s) to %d (%s)]", old_serial, old_serial, new_serial, new_serial)
+ server.notify()
+ old_serial = new_serial
+ asyncore.loop(timeout = clock.siesta(), count = 1)
+ except KeyboardInterrupt:
+ sys.exit(0)
def argparse_setup(subparsers):
- """
- Set up argparse stuff for commands in this module.
- """
-
- subparser = subparsers.add_parser("bgpdump-convert", description = bgpdump_convert_main.__doc__,
- help = "Convert bgpdump to fake ROAs")
- subparser.set_defaults(func = bgpdump_convert_main, default_log_to = "syslog")
- subparser.add_argument("files", nargs = "+", help = "input files")
-
- subparser = subparsers.add_parser("bgpdump-select", description = bgpdump_select_main.__doc__,
- help = "Set current serial number for fake ROA data")
- subparser.set_defaults(func = bgpdump_select_main, default_log_to = "syslog")
- subparser.add_argument("ax_file", help = "name of the .ax to select")
-
- subparser = subparsers.add_parser("bgpdump-server", description = bgpdump_server_main.__doc__,
- help = "Replay fake ROAs generated from historical data")
- subparser.set_defaults(func = bgpdump_server_main, default_log_to = "syslog")
- subparser.add_argument("rpki_rtr_dir", nargs = "?", help = "directory containing RPKI-RTR database")
+ """
+ Set up argparse stuff for commands in this module.
+ """
+
+ subparser = subparsers.add_parser("bgpdump-convert", description = bgpdump_convert_main.__doc__,
+ help = "Convert bgpdump to fake ROAs")
+ subparser.set_defaults(func = bgpdump_convert_main, default_log_destination = "syslog")
+ subparser.add_argument("files", nargs = "+", help = "input files")
+
+ subparser = subparsers.add_parser("bgpdump-select", description = bgpdump_select_main.__doc__,
+ help = "Set current serial number for fake ROA data")
+ subparser.set_defaults(func = bgpdump_select_main, default_log_destination = "syslog")
+ subparser.add_argument("ax_file", help = "name of the .ax to select")
+
+ subparser = subparsers.add_parser("bgpdump-server", description = bgpdump_server_main.__doc__,
+ help = "Replay fake ROAs generated from historical data")
+ subparser.set_defaults(func = bgpdump_server_main, default_log_destination = "syslog")
+ subparser.add_argument("rpki_rtr_dir", nargs = "?", help = "directory containing RPKI-RTR database")
diff --git a/rpki/rtr/channels.py b/rpki/rtr/channels.py
index d14c024d..a4dccbc1 100644
--- a/rpki/rtr/channels.py
+++ b/rpki/rtr/channels.py
@@ -32,215 +32,217 @@ import rpki.rtr.pdus
class Timestamp(int):
- """
- Wrapper around time module.
- """
-
- def __new__(cls, t):
- # __new__() is a static method, not a class method, hence the odd calling sequence.
- return super(Timestamp, cls).__new__(cls, t)
-
- @classmethod
- def now(cls, delta = 0):
- return cls(time.time() + delta)
-
- def __str__(self):
- return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(self))
-
-
-class ReadBuffer(object):
- """
- Wrapper around synchronous/asynchronous read state.
-
- This also handles tracking the current protocol version,
- because it has to go somewhere and there's no better place.
- """
-
- def __init__(self):
- self.buffer = ""
- self.version = None
-
- def update(self, need, callback):
"""
- Update count of needed bytes and callback, then dispatch to callback.
+ Wrapper around time module.
"""
- self.need = need
- self.callback = callback
- return self.retry()
+ def __new__(cls, t):
+ # __new__() is a static method, not a class method, hence the odd calling sequence.
+ return super(Timestamp, cls).__new__(cls, t)
- def retry(self):
- """
- Try dispatching to the callback again.
- """
+ @classmethod
+ def now(cls, delta = 0):
+ return cls(time.time() + delta)
- return self.callback(self)
+ def __str__(self):
+ return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(self))
- def available(self):
- """
- How much data do we have available in this buffer?
- """
- return len(self.buffer)
-
- def needed(self):
- """
- How much more data does this buffer need to become ready?
+class ReadBuffer(object):
"""
+ Wrapper around synchronous/asynchronous read state.
- return self.need - self.available()
-
- def ready(self):
- """
- Is this buffer ready to read yet?
+ This also handles tracking the current protocol version,
+ because it has to go somewhere and there's no better place.
"""
- return self.available() >= self.need
+ def __init__(self):
+ self.buffer = ""
+ self.version = None
+ self.need = None
+ self.callback = None
- def get(self, n):
- """
- Hand some data to the caller.
- """
+ def update(self, need, callback):
+ """
+ Update count of needed bytes and callback, then dispatch to callback.
+ """
- b = self.buffer[:n]
- self.buffer = self.buffer[n:]
- return b
+ self.need = need
+ self.callback = callback
+ return self.retry()
- def put(self, b):
- """
- Accumulate some data.
- """
+ def retry(self):
+ """
+ Try dispatching to the callback again.
+ """
- self.buffer += b
+ return self.callback(self)
- def check_version(self, version):
- """
- Track version number of PDUs read from this buffer.
- Once set, the version must not change.
- """
+ def available(self):
+ """
+ How much data do we have available in this buffer?
+ """
- if self.version is not None and version != self.version:
- raise rpki.rtr.pdus.CorruptData(
- "Received PDU version %d, expected %d" % (version, self.version))
- if self.version is None and version not in rpki.rtr.pdus.PDU.version_map:
- raise rpki.rtr.pdus.UnsupportedProtocolVersion(
- "Received PDU version %s, known versions %s" % (
- version, ", ".join(str(v) for v in rpki.rtr.pdus.PDU.version_map)))
- self.version = version
+ return len(self.buffer)
+ def needed(self):
+ """
+ How much more data does this buffer need to become ready?
+ """
-class PDUChannel(asynchat.async_chat, object):
- """
- asynchat subclass that understands our PDUs. This just handles
- network I/O. Specific engines (client, server) should be subclasses
- of this with methods that do something useful with the resulting
- PDUs.
- """
-
- def __init__(self, root_pdu_class, sock = None):
- asynchat.async_chat.__init__(self, sock) # Old-style class, can't use super()
- self.reader = ReadBuffer()
- assert issubclass(root_pdu_class, rpki.rtr.pdus.PDU)
- self.root_pdu_class = root_pdu_class
-
- @property
- def version(self):
- return self.reader.version
-
- @version.setter
- def version(self, version):
- self.reader.check_version(version)
-
- def start_new_pdu(self):
- """
- Start read of a new PDU.
- """
-
- try:
- p = self.root_pdu_class.read_pdu(self.reader)
- while p is not None:
- self.deliver_pdu(p)
- p = self.root_pdu_class.read_pdu(self.reader)
- except rpki.rtr.pdus.PDUException, e:
- self.push_pdu(e.make_error_report(version = self.version))
- self.close_when_done()
- else:
- assert not self.reader.ready()
- self.set_terminator(self.reader.needed())
-
- def collect_incoming_data(self, data):
- """
- Collect data into the read buffer.
- """
-
- self.reader.put(data)
-
- def found_terminator(self):
- """
- Got requested data, see if we now have a PDU. If so, pass it
- along, then restart cycle for a new PDU.
- """
-
- p = self.reader.retry()
- if p is None:
- self.set_terminator(self.reader.needed())
- else:
- self.deliver_pdu(p)
- self.start_new_pdu()
-
- def push_pdu(self, pdu):
- """
- Write PDU to stream.
- """
+ return self.need - self.available()
- try:
- self.push(pdu.to_pdu())
- except OSError, e:
- if e.errno != errno.EAGAIN:
- raise
+ def ready(self):
+ """
+ Is this buffer ready to read yet?
+ """
- def log(self, msg):
- """
- Intercept asyncore's logging.
- """
+ return self.available() >= self.need
- logging.info(msg)
+ def get(self, n):
+ """
+ Hand some data to the caller.
+ """
- def log_info(self, msg, tag = "info"):
- """
- Intercept asynchat's logging.
- """
+ b = self.buffer[:n]
+ self.buffer = self.buffer[n:]
+ return b
- logging.info("asynchat: %s: %s", tag, msg)
+ def put(self, b):
+ """
+ Accumulate some data.
+ """
- def handle_error(self):
- """
- Handle errors caught by asyncore main loop.
- """
+ self.buffer += b
- logging.exception("[Unhandled exception]")
- logging.critical("[Exiting after unhandled exception]")
- sys.exit(1)
+ def check_version(self, version):
+ """
+ Track version number of PDUs read from this buffer.
+ Once set, the version must not change.
+ """
- def init_file_dispatcher(self, fd):
- """
- Kludge to plug asyncore.file_dispatcher into asynchat. Call from
- subclass's __init__() method, after calling
- PDUChannel.__init__(), and don't read this on a full stomach.
- """
+ if self.version is not None and version != self.version:
+ raise rpki.rtr.pdus.CorruptData(
+ "Received PDU version %d, expected %d" % (version, self.version))
+ if self.version is None and version not in rpki.rtr.pdus.PDU.version_map:
+ raise rpki.rtr.pdus.UnsupportedProtocolVersion(
+ "Received PDU version %s, known versions %s" % (
+ version, ", ".join(str(v) for v in rpki.rtr.pdus.PDU.version_map)))
+ self.version = version
- self.connected = True
- self._fileno = fd
- self.socket = asyncore.file_wrapper(fd)
- self.add_channel()
- flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
- flags = flags | os.O_NONBLOCK
- fcntl.fcntl(fd, fcntl.F_SETFL, flags)
- def handle_close(self):
- """
- Exit when channel closed.
+class PDUChannel(asynchat.async_chat, object):
"""
-
- asynchat.async_chat.handle_close(self)
- sys.exit(0)
+ asynchat subclass that understands our PDUs. This just handles
+ network I/O. Specific engines (client, server) should be subclasses
+ of this with methods that do something useful with the resulting
+ PDUs.
+ """
+
+ def __init__(self, root_pdu_class, sock = None):
+ asynchat.async_chat.__init__(self, sock) # Old-style class, can't use super()
+ self.reader = ReadBuffer()
+ assert issubclass(root_pdu_class, rpki.rtr.pdus.PDU)
+ self.root_pdu_class = root_pdu_class
+
+ @property
+ def version(self):
+ return self.reader.version
+
+ @version.setter
+ def version(self, version):
+ self.reader.check_version(version)
+
+ def start_new_pdu(self):
+ """
+ Start read of a new PDU.
+ """
+
+ try:
+ p = self.root_pdu_class.read_pdu(self.reader)
+ while p is not None:
+ self.deliver_pdu(p)
+ p = self.root_pdu_class.read_pdu(self.reader)
+ except rpki.rtr.pdus.PDUException, e:
+ self.push_pdu(e.make_error_report(version = self.version))
+ self.close_when_done()
+ else:
+ assert not self.reader.ready()
+ self.set_terminator(self.reader.needed())
+
+ def collect_incoming_data(self, data):
+ """
+ Collect data into the read buffer.
+ """
+
+ self.reader.put(data)
+
+ def found_terminator(self):
+ """
+ Got requested data, see if we now have a PDU. If so, pass it
+ along, then restart cycle for a new PDU.
+ """
+
+ p = self.reader.retry()
+ if p is None:
+ self.set_terminator(self.reader.needed())
+ else:
+ self.deliver_pdu(p)
+ self.start_new_pdu()
+
+ def push_pdu(self, pdu):
+ """
+ Write PDU to stream.
+ """
+
+ try:
+ self.push(pdu.to_pdu())
+ except OSError, e:
+ if e.errno != errno.EAGAIN:
+ raise
+
+ def log(self, msg):
+ """
+ Intercept asyncore's logging.
+ """
+
+ logging.info(msg)
+
+ def log_info(self, msg, tag = "info"):
+ """
+ Intercept asynchat's logging.
+ """
+
+ logging.info("asynchat: %s: %s", tag, msg)
+
+ def handle_error(self):
+ """
+ Handle errors caught by asyncore main loop.
+ """
+
+ logging.exception("[Unhandled exception]")
+ logging.critical("[Exiting after unhandled exception]")
+ sys.exit(1)
+
+ def init_file_dispatcher(self, fd):
+ """
+ Kludge to plug asyncore.file_dispatcher into asynchat. Call from
+ subclass's __init__() method, after calling
+ PDUChannel.__init__(), and don't read this on a full stomach.
+ """
+
+ self.connected = True
+ self._fileno = fd
+ self.socket = asyncore.file_wrapper(fd)
+ self.add_channel()
+ flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
+ flags = flags | os.O_NONBLOCK
+ fcntl.fcntl(fd, fcntl.F_SETFL, flags)
+
+ def handle_close(self):
+ """
+ Exit when channel closed.
+ """
+
+ asynchat.async_chat.handle_close(self)
+ sys.exit(0)
diff --git a/rpki/rtr/client.py b/rpki/rtr/client.py
index a35ab81d..a8348087 100644
--- a/rpki/rtr/client.py
+++ b/rpki/rtr/client.py
@@ -37,13 +37,13 @@ from rpki.rtr.channels import Timestamp
class PDU(rpki.rtr.pdus.PDU):
- def consume(self, client):
- """
- Handle results in test client. Default behavior is just to print
- out the PDU; data PDU subclasses may override this.
- """
+ def consume(self, client):
+ """
+ Handle results in test client. Default behavior is just to print
+ out the PDU; data PDU subclasses may override this.
+ """
- logging.debug(self)
+ logging.debug(self)
clone_pdu = rpki.rtr.pdus.clone_pdu_root(PDU)
@@ -52,407 +52,407 @@ clone_pdu = rpki.rtr.pdus.clone_pdu_root(PDU)
@clone_pdu
class SerialNotifyPDU(rpki.rtr.pdus.SerialNotifyPDU):
- def consume(self, client):
- """
- Respond to a SerialNotifyPDU with either a SerialQueryPDU or a
- ResetQueryPDU, depending on what we already know.
- """
+ def consume(self, client):
+ """
+ Respond to a SerialNotifyPDU with either a SerialQueryPDU or a
+ ResetQueryPDU, depending on what we already know.
+ """
- logging.debug(self)
- if client.serial is None or client.nonce != self.nonce:
- client.push_pdu(ResetQueryPDU(version = client.version))
- elif self.serial != client.serial:
- client.push_pdu(SerialQueryPDU(version = client.version,
- serial = client.serial,
- nonce = client.nonce))
- else:
- logging.debug("[Notify did not change serial number, ignoring]")
+ logging.debug(self)
+ if client.serial is None or client.nonce != self.nonce:
+ client.push_pdu(ResetQueryPDU(version = client.version))
+ elif self.serial != client.serial:
+ client.push_pdu(SerialQueryPDU(version = client.version,
+ serial = client.serial,
+ nonce = client.nonce))
+ else:
+ logging.debug("[Notify did not change serial number, ignoring]")
@clone_pdu
class CacheResponsePDU(rpki.rtr.pdus.CacheResponsePDU):
- def consume(self, client):
- """
- Handle CacheResponsePDU.
- """
+ def consume(self, client):
+ """
+ Handle CacheResponsePDU.
+ """
- logging.debug(self)
- if self.nonce != client.nonce:
- logging.debug("[Nonce changed, resetting]")
- client.cache_reset()
+ logging.debug(self)
+ if self.nonce != client.nonce:
+ logging.debug("[Nonce changed, resetting]")
+ client.cache_reset()
@clone_pdu
class EndOfDataPDUv0(rpki.rtr.pdus.EndOfDataPDUv0):
- def consume(self, client):
- """
- Handle EndOfDataPDU response.
- """
+ def consume(self, client):
+ """
+ Handle EndOfDataPDU response.
+ """
- logging.debug(self)
- client.end_of_data(self.version, self.serial, self.nonce, self.refresh, self.retry, self.expire)
+ logging.debug(self)
+ client.end_of_data(self.version, self.serial, self.nonce, self.refresh, self.retry, self.expire)
@clone_pdu
class EndOfDataPDUv1(rpki.rtr.pdus.EndOfDataPDUv1):
- def consume(self, client):
- """
- Handle EndOfDataPDU response.
- """
+ def consume(self, client):
+ """
+ Handle EndOfDataPDU response.
+ """
- logging.debug(self)
- client.end_of_data(self.version, self.serial, self.nonce, self.refresh, self.retry, self.expire)
+ logging.debug(self)
+ client.end_of_data(self.version, self.serial, self.nonce, self.refresh, self.retry, self.expire)
@clone_pdu
class CacheResetPDU(rpki.rtr.pdus.CacheResetPDU):
- def consume(self, client):
- """
- Handle CacheResetPDU response, by issuing a ResetQueryPDU.
- """
+ def consume(self, client):
+ """
+ Handle CacheResetPDU response, by issuing a ResetQueryPDU.
+ """
- logging.debug(self)
- client.cache_reset()
- client.push_pdu(ResetQueryPDU(version = client.version))
+ logging.debug(self)
+ client.cache_reset()
+ client.push_pdu(ResetQueryPDU(version = client.version))
class PrefixPDU(rpki.rtr.pdus.PrefixPDU):
- """
- Object representing one prefix. This corresponds closely to one PDU
- in the rpki-router protocol, so closely that we use lexical ordering
- of the wire format of the PDU as the ordering for this class.
-
- This is a virtual class, but the .from_text() constructor
- instantiates the correct concrete subclass (IPv4PrefixPDU or
- IPv6PrefixPDU) depending on the syntax of its input text.
- """
-
- def consume(self, client):
"""
- Handle one incoming prefix PDU
+ Object representing one prefix. This corresponds closely to one PDU
+ in the rpki-router protocol, so closely that we use lexical ordering
+ of the wire format of the PDU as the ordering for this class.
+
+ This is a virtual class, but the .from_text() constructor
+ instantiates the correct concrete subclass (IPv4PrefixPDU or
+ IPv6PrefixPDU) depending on the syntax of its input text.
"""
- logging.debug(self)
- client.consume_prefix(self)
+ def consume(self, client):
+ """
+ Handle one incoming prefix PDU
+ """
+
+ logging.debug(self)
+ client.consume_prefix(self)
@clone_pdu
class IPv4PrefixPDU(PrefixPDU, rpki.rtr.pdus.IPv4PrefixPDU):
- pass
+ pass
@clone_pdu
class IPv6PrefixPDU(PrefixPDU, rpki.rtr.pdus.IPv6PrefixPDU):
- pass
+ pass
@clone_pdu
class ErrorReportPDU(PDU, rpki.rtr.pdus.ErrorReportPDU):
- pass
+ pass
@clone_pdu
class RouterKeyPDU(rpki.rtr.pdus.RouterKeyPDU):
- """
- Router Key PDU.
- """
-
- def consume(self, client):
"""
- Handle one incoming Router Key PDU
+ Router Key PDU.
"""
- logging.debug(self)
- client.consume_routerkey(self)
+ def consume(self, client):
+ """
+ Handle one incoming Router Key PDU
+ """
+ logging.debug(self)
+ client.consume_routerkey(self)
-class ClientChannel(rpki.rtr.channels.PDUChannel):
- """
- Client protocol engine, handles upcalls from PDUChannel.
- """
-
- serial = None
- nonce = None
- sql = None
- host = None
- port = None
- cache_id = None
- refresh = rpki.rtr.pdus.default_refresh
- retry = rpki.rtr.pdus.default_retry
- expire = rpki.rtr.pdus.default_expire
- updated = Timestamp(0)
-
- def __init__(self, sock, proc, killsig, args, host = None, port = None):
- self.killsig = killsig
- self.proc = proc
- self.args = args
- self.host = args.host if host is None else host
- self.port = args.port if port is None else port
- super(ClientChannel, self).__init__(sock = sock, root_pdu_class = PDU)
- if args.force_version is not None:
- self.version = args.force_version
- self.start_new_pdu()
- if args.sql_database:
- self.setup_sql()
-
- @classmethod
- def ssh(cls, args):
- """
- Set up ssh connection and start listening for first PDU.
- """
- if args.port is None:
- argv = ("ssh", "-s", args.host, "rpki-rtr")
- else:
- argv = ("ssh", "-p", args.port, "-s", args.host, "rpki-rtr")
- logging.debug("[Running ssh: %s]", " ".join(argv))
- s = socket.socketpair()
- return cls(sock = s[1],
- proc = subprocess.Popen(argv, executable = "/usr/bin/ssh",
- stdin = s[0], stdout = s[0], close_fds = True),
- killsig = signal.SIGKILL, args = args)
-
- @classmethod
- def tcp(cls, args):
- """
- Set up TCP connection and start listening for first PDU.
+class ClientChannel(rpki.rtr.channels.PDUChannel):
"""
-
- logging.debug("[Starting raw TCP connection to %s:%s]", args.host, args.port)
- try:
- addrinfo = socket.getaddrinfo(args.host, args.port, socket.AF_UNSPEC, socket.SOCK_STREAM)
- except socket.error, e:
- logging.debug("[socket.getaddrinfo() failed: %s]", e)
- else:
- for ai in addrinfo:
- af, socktype, proto, cn, sa = ai # pylint: disable=W0612
- logging.debug("[Trying addr %s port %s]", sa[0], sa[1])
+ Client protocol engine, handles upcalls from PDUChannel.
+ """
+
+ serial = None
+ nonce = None
+ sql = None
+ host = None
+ port = None
+ cache_id = None
+ refresh = rpki.rtr.pdus.default_refresh
+ retry = rpki.rtr.pdus.default_retry
+ expire = rpki.rtr.pdus.default_expire
+ updated = Timestamp(0)
+
+ def __init__(self, sock, proc, killsig, args, host = None, port = None):
+ self.killsig = killsig
+ self.proc = proc
+ self.args = args
+ self.host = args.host if host is None else host
+ self.port = args.port if port is None else port
+ super(ClientChannel, self).__init__(sock = sock, root_pdu_class = PDU)
+ if args.force_version is not None:
+ self.version = args.force_version
+ self.start_new_pdu()
+ if args.sql_database:
+ self.setup_sql()
+
+ @classmethod
+ def ssh(cls, args):
+ """
+ Set up ssh connection and start listening for first PDU.
+ """
+
+ if args.port is None:
+ argv = ("ssh", "-s", args.host, "rpki-rtr")
+ else:
+ argv = ("ssh", "-p", args.port, "-s", args.host, "rpki-rtr")
+ logging.debug("[Running ssh: %s]", " ".join(argv))
+ s = socket.socketpair()
+ return cls(sock = s[1],
+ proc = subprocess.Popen(argv, executable = "/usr/bin/ssh",
+ stdin = s[0], stdout = s[0], close_fds = True),
+ killsig = signal.SIGKILL, args = args)
+
+ @classmethod
+ def tcp(cls, args):
+ """
+ Set up TCP connection and start listening for first PDU.
+ """
+
+ logging.debug("[Starting raw TCP connection to %s:%s]", args.host, args.port)
try:
- s = socket.socket(af, socktype, proto)
+ addrinfo = socket.getaddrinfo(args.host, args.port, socket.AF_UNSPEC, socket.SOCK_STREAM)
except socket.error, e:
- logging.debug("[socket.socket() failed: %s]", e)
- continue
+ logging.debug("[socket.getaddrinfo() failed: %s]", e)
+ else:
+ for ai in addrinfo:
+ af, socktype, proto, cn, sa = ai # pylint: disable=W0612
+ logging.debug("[Trying addr %s port %s]", sa[0], sa[1])
+ try:
+ s = socket.socket(af, socktype, proto)
+ except socket.error, e:
+ logging.debug("[socket.socket() failed: %s]", e)
+ continue
+ try:
+ s.connect(sa)
+ except socket.error, e:
+ logging.exception("[socket.connect() failed: %s]", e)
+ s.close()
+ continue
+ return cls(sock = s, proc = None, killsig = None, args = args)
+ sys.exit(1)
+
+ @classmethod
+ def loopback(cls, args):
+ """
+ Set up loopback connection and start listening for first PDU.
+ """
+
+ s = socket.socketpair()
+ logging.debug("[Using direct subprocess kludge for testing]")
+ argv = (sys.executable, sys.argv[0], "server")
+ return cls(sock = s[1],
+ proc = subprocess.Popen(argv, stdin = s[0], stdout = s[0], close_fds = True),
+ killsig = signal.SIGINT, args = args,
+ host = args.host or "none", port = args.port or "none")
+
+ @classmethod
+ def tls(cls, args):
+ """
+ Set up TLS connection and start listening for first PDU.
+
+ NB: This uses OpenSSL's "s_client" command, which does not
+ check server certificates properly, so this is not suitable for
+ production use. Fixing this would be a trivial change, it just
+ requires using a client program which does check certificates
+ properly (eg, gnutls-cli, or stunnel's client mode if that works
+ for such purposes this week).
+ """
+
+ argv = ("openssl", "s_client", "-tls1", "-quiet", "-connect", "%s:%s" % (args.host, args.port))
+ logging.debug("[Running: %s]", " ".join(argv))
+ s = socket.socketpair()
+ return cls(sock = s[1],
+ proc = subprocess.Popen(argv, stdin = s[0], stdout = s[0], close_fds = True),
+ killsig = signal.SIGKILL, args = args)
+
+ def setup_sql(self):
+ """
+ Set up an SQLite database to contain the table we receive. If
+ necessary, we will create the database.
+ """
+
+ import sqlite3
+ missing = not os.path.exists(self.args.sql_database)
+ self.sql = sqlite3.connect(self.args.sql_database, detect_types = sqlite3.PARSE_DECLTYPES)
+ self.sql.text_factory = str
+ cur = self.sql.cursor()
+ cur.execute("PRAGMA foreign_keys = on")
+ if missing:
+ cur.execute('''
+ CREATE TABLE cache (
+ cache_id INTEGER PRIMARY KEY NOT NULL,
+ host TEXT NOT NULL,
+ port TEXT NOT NULL,
+ version INTEGER,
+ nonce INTEGER,
+ serial INTEGER,
+ updated INTEGER,
+ refresh INTEGER,
+ retry INTEGER,
+ expire INTEGER,
+ UNIQUE (host, port))''')
+ cur.execute('''
+ CREATE TABLE prefix (
+ cache_id INTEGER NOT NULL
+ REFERENCES cache(cache_id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ asn INTEGER NOT NULL,
+ prefix TEXT NOT NULL,
+ prefixlen INTEGER NOT NULL,
+ max_prefixlen INTEGER NOT NULL,
+ UNIQUE (cache_id, asn, prefix, prefixlen, max_prefixlen))''')
+ cur.execute('''
+ CREATE TABLE routerkey (
+ cache_id INTEGER NOT NULL
+ REFERENCES cache(cache_id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ asn INTEGER NOT NULL,
+ ski TEXT NOT NULL,
+ key TEXT NOT NULL,
+ UNIQUE (cache_id, asn, ski),
+ UNIQUE (cache_id, asn, key))''')
+ elif self.args.reset_session:
+ cur.execute("DELETE FROM cache WHERE host = ? and port = ?", (self.host, self.port))
+ cur.execute("SELECT cache_id, version, nonce, serial, refresh, retry, expire, updated "
+ "FROM cache WHERE host = ? AND port = ?",
+ (self.host, self.port))
try:
- s.connect(sa)
- except socket.error, e:
- logging.exception("[socket.connect() failed: %s]", e)
- s.close()
- continue
- return cls(sock = s, proc = None, killsig = None, args = args)
- sys.exit(1)
-
- @classmethod
- def loopback(cls, args):
- """
- Set up loopback connection and start listening for first PDU.
- """
-
- s = socket.socketpair()
- logging.debug("[Using direct subprocess kludge for testing]")
- argv = (sys.executable, sys.argv[0], "server")
- return cls(sock = s[1],
- proc = subprocess.Popen(argv, stdin = s[0], stdout = s[0], close_fds = True),
- killsig = signal.SIGINT, args = args,
- host = args.host or "none", port = args.port or "none")
-
- @classmethod
- def tls(cls, args):
- """
- Set up TLS connection and start listening for first PDU.
-
- NB: This uses OpenSSL's "s_client" command, which does not
- check server certificates properly, so this is not suitable for
- production use. Fixing this would be a trivial change, it just
- requires using a client program which does check certificates
- properly (eg, gnutls-cli, or stunnel's client mode if that works
- for such purposes this week).
- """
-
- argv = ("openssl", "s_client", "-tls1", "-quiet", "-connect", "%s:%s" % (args.host, args.port))
- logging.debug("[Running: %s]", " ".join(argv))
- s = socket.socketpair()
- return cls(sock = s[1],
- proc = subprocess.Popen(argv, stdin = s[0], stdout = s[0], close_fds = True),
- killsig = signal.SIGKILL, args = args)
-
- def setup_sql(self):
- """
- Set up an SQLite database to contain the table we receive. If
- necessary, we will create the database.
- """
-
- import sqlite3
- missing = not os.path.exists(self.args.sql_database)
- self.sql = sqlite3.connect(self.args.sql_database, detect_types = sqlite3.PARSE_DECLTYPES)
- self.sql.text_factory = str
- cur = self.sql.cursor()
- cur.execute("PRAGMA foreign_keys = on")
- if missing:
- cur.execute('''
- CREATE TABLE cache (
- cache_id INTEGER PRIMARY KEY NOT NULL,
- host TEXT NOT NULL,
- port TEXT NOT NULL,
- version INTEGER,
- nonce INTEGER,
- serial INTEGER,
- updated INTEGER,
- refresh INTEGER,
- retry INTEGER,
- expire INTEGER,
- UNIQUE (host, port))''')
- cur.execute('''
- CREATE TABLE prefix (
- cache_id INTEGER NOT NULL
- REFERENCES cache(cache_id)
- ON DELETE CASCADE
- ON UPDATE CASCADE,
- asn INTEGER NOT NULL,
- prefix TEXT NOT NULL,
- prefixlen INTEGER NOT NULL,
- max_prefixlen INTEGER NOT NULL,
- UNIQUE (cache_id, asn, prefix, prefixlen, max_prefixlen))''')
- cur.execute('''
- CREATE TABLE routerkey (
- cache_id INTEGER NOT NULL
- REFERENCES cache(cache_id)
- ON DELETE CASCADE
- ON UPDATE CASCADE,
- asn INTEGER NOT NULL,
- ski TEXT NOT NULL,
- key TEXT NOT NULL,
- UNIQUE (cache_id, asn, ski),
- UNIQUE (cache_id, asn, key))''')
- elif self.args.reset_session:
- cur.execute("DELETE FROM cache WHERE host = ? and port = ?", (self.host, self.port))
- cur.execute("SELECT cache_id, version, nonce, serial, refresh, retry, expire, updated "
- "FROM cache WHERE host = ? AND port = ?",
- (self.host, self.port))
- try:
- self.cache_id, version, self.nonce, self.serial, refresh, retry, expire, updated = cur.fetchone()
- if version is not None and self.version is not None and version != self.version:
- cur.execute("DELETE FROM cache WHERE host = ? and port = ?", (self.host, self.port))
- raise TypeError # Simulate lookup failure case
- if version is not None:
- self.version = version
- if refresh is not None:
+ self.cache_id, version, self.nonce, self.serial, refresh, retry, expire, updated = cur.fetchone()
+ if version is not None and self.version is not None and version != self.version:
+ cur.execute("DELETE FROM cache WHERE host = ? and port = ?", (self.host, self.port))
+ raise TypeError # Simulate lookup failure case
+ if version is not None:
+ self.version = version
+ if refresh is not None:
+ self.refresh = refresh
+ if retry is not None:
+ self.retry = retry
+ if expire is not None:
+ self.expire = expire
+ if updated is not None:
+ self.updated = Timestamp(updated)
+ except TypeError:
+ cur.execute("INSERT INTO cache (host, port) VALUES (?, ?)", (self.host, self.port))
+ self.cache_id = cur.lastrowid
+ self.sql.commit()
+ logging.info("[Session %d version %s nonce %s serial %s refresh %s retry %s expire %s updated %s]",
+ self.cache_id, self.version, self.nonce,
+ self.serial, self.refresh, self.retry, self.expire, self.updated)
+
+ def cache_reset(self):
+ """
+ Handle CacheResetPDU actions.
+ """
+
+ self.serial = None
+ if self.sql:
+ cur = self.sql.cursor()
+ cur.execute("DELETE FROM prefix WHERE cache_id = ?", (self.cache_id,))
+ cur.execute("DELETE FROM routerkey WHERE cache_id = ?", (self.cache_id,))
+ cur.execute("UPDATE cache SET version = ?, serial = NULL WHERE cache_id = ?", (self.version, self.cache_id))
+ self.sql.commit()
+
+ def end_of_data(self, version, serial, nonce, refresh, retry, expire):
+ """
+ Handle EndOfDataPDU actions.
+ """
+
+ assert version == self.version
+ self.serial = serial
+ self.nonce = nonce
self.refresh = refresh
- if retry is not None:
- self.retry = retry
- if expire is not None:
- self.expire = expire
- if updated is not None:
- self.updated = Timestamp(updated)
- except TypeError:
- cur.execute("INSERT INTO cache (host, port) VALUES (?, ?)", (self.host, self.port))
- self.cache_id = cur.lastrowid
- self.sql.commit()
- logging.info("[Session %d version %s nonce %s serial %s refresh %s retry %s expire %s updated %s]",
- self.cache_id, self.version, self.nonce,
- self.serial, self.refresh, self.retry, self.expire, self.updated)
-
- def cache_reset(self):
- """
- Handle CacheResetPDU actions.
- """
-
- self.serial = None
- if self.sql:
- cur = self.sql.cursor()
- cur.execute("DELETE FROM prefix WHERE cache_id = ?", (self.cache_id,))
- cur.execute("DELETE FROM routerkey WHERE cache_id = ?", (self.cache_id,))
- cur.execute("UPDATE cache SET version = ?, serial = NULL WHERE cache_id = ?", (self.version, self.cache_id))
- self.sql.commit()
-
- def end_of_data(self, version, serial, nonce, refresh, retry, expire):
- """
- Handle EndOfDataPDU actions.
- """
-
- assert version == self.version
- self.serial = serial
- self.nonce = nonce
- self.refresh = refresh
- self.retry = retry
- self.expire = expire
- self.updated = Timestamp.now()
- if self.sql:
- self.sql.execute("UPDATE cache SET"
- " version = ?, serial = ?, nonce = ?,"
- " refresh = ?, retry = ?, expire = ?,"
- " updated = ? "
- "WHERE cache_id = ?",
- (version, serial, nonce, refresh, retry, expire, int(self.updated), self.cache_id))
- self.sql.commit()
-
- def consume_prefix(self, prefix):
- """
- Handle one prefix PDU.
- """
-
- if self.sql:
- values = (self.cache_id, prefix.asn, str(prefix.prefix), prefix.prefixlen, prefix.max_prefixlen)
- if prefix.announce:
- self.sql.execute("INSERT INTO prefix (cache_id, asn, prefix, prefixlen, max_prefixlen) "
- "VALUES (?, ?, ?, ?, ?)",
- values)
- else:
- self.sql.execute("DELETE FROM prefix "
- "WHERE cache_id = ? AND asn = ? AND prefix = ? AND prefixlen = ? AND max_prefixlen = ?",
- values)
-
- def consume_routerkey(self, routerkey):
- """
- Handle one Router Key PDU.
- """
-
- if self.sql:
- values = (self.cache_id, routerkey.asn,
- base64.urlsafe_b64encode(routerkey.ski).rstrip("="),
- base64.b64encode(routerkey.key))
- if routerkey.announce:
- self.sql.execute("INSERT INTO routerkey (cache_id, asn, ski, key) "
- "VALUES (?, ?, ?, ?)",
- values)
- else:
- self.sql.execute("DELETE FROM routerkey "
- "WHERE cache_id = ? AND asn = ? AND (ski = ? OR key = ?)",
- values)
-
- def deliver_pdu(self, pdu):
- """
- Handle received PDU.
- """
-
- pdu.consume(self)
-
- def push_pdu(self, pdu):
- """
- Log outbound PDU then write it to stream.
- """
-
- logging.debug(pdu)
- super(ClientChannel, self).push_pdu(pdu)
-
- def cleanup(self):
- """
- Force clean up this client's child process. If everything goes
- well, child will have exited already before this method is called,
- but we may need to whack it with a stick if something breaks.
- """
-
- if self.proc is not None and self.proc.returncode is None:
- try:
- os.kill(self.proc.pid, self.killsig)
- except OSError:
- pass
-
- def handle_close(self):
- """
- Intercept close event so we can log it, then shut down.
- """
-
- logging.debug("Server closed channel")
- super(ClientChannel, self).handle_close()
+ self.retry = retry
+ self.expire = expire
+ self.updated = Timestamp.now()
+ if self.sql:
+ self.sql.execute("UPDATE cache SET"
+ " version = ?, serial = ?, nonce = ?,"
+ " refresh = ?, retry = ?, expire = ?,"
+ " updated = ? "
+ "WHERE cache_id = ?",
+ (version, serial, nonce, refresh, retry, expire, int(self.updated), self.cache_id))
+ self.sql.commit()
+
+ def consume_prefix(self, prefix):
+ """
+ Handle one prefix PDU.
+ """
+
+ if self.sql:
+ values = (self.cache_id, prefix.asn, str(prefix.prefix), prefix.prefixlen, prefix.max_prefixlen)
+ if prefix.announce:
+ self.sql.execute("INSERT INTO prefix (cache_id, asn, prefix, prefixlen, max_prefixlen) "
+ "VALUES (?, ?, ?, ?, ?)",
+ values)
+ else:
+ self.sql.execute("DELETE FROM prefix "
+ "WHERE cache_id = ? AND asn = ? AND prefix = ? AND prefixlen = ? AND max_prefixlen = ?",
+ values)
+
+ def consume_routerkey(self, routerkey):
+ """
+ Handle one Router Key PDU.
+ """
+
+ if self.sql:
+ values = (self.cache_id, routerkey.asn,
+ base64.urlsafe_b64encode(routerkey.ski).rstrip("="),
+ base64.b64encode(routerkey.key))
+ if routerkey.announce:
+ self.sql.execute("INSERT INTO routerkey (cache_id, asn, ski, key) "
+ "VALUES (?, ?, ?, ?)",
+ values)
+ else:
+ self.sql.execute("DELETE FROM routerkey "
+ "WHERE cache_id = ? AND asn = ? AND (ski = ? OR key = ?)",
+ values)
+
+ def deliver_pdu(self, pdu):
+ """
+ Handle received PDU.
+ """
+
+ pdu.consume(self)
+
+ def push_pdu(self, pdu):
+ """
+ Log outbound PDU then write it to stream.
+ """
+
+ logging.debug(pdu)
+ super(ClientChannel, self).push_pdu(pdu)
+
+ def cleanup(self):
+ """
+ Force clean up this client's child process. If everything goes
+ well, child will have exited already before this method is called,
+ but we may need to whack it with a stick if something breaks.
+ """
+
+ if self.proc is not None and self.proc.returncode is None:
+ try:
+ os.kill(self.proc.pid, self.killsig)
+ except OSError:
+ pass
+
+ def handle_close(self):
+ """
+ Intercept close event so we can log it, then shut down.
+ """
+
+ logging.debug("Server closed channel")
+ super(ClientChannel, self).handle_close()
# Hack to let us subclass this from scripts without needing to rewrite client_main().
@@ -460,73 +460,73 @@ class ClientChannel(rpki.rtr.channels.PDUChannel):
ClientChannelClass = ClientChannel
def client_main(args):
- """
- Test client, intended primarily for debugging.
- """
+ """
+ Test client, intended primarily for debugging.
+ """
- logging.debug("[Startup]")
+ logging.debug("[Startup]")
- assert issubclass(ClientChannelClass, ClientChannel)
- constructor = getattr(ClientChannelClass, args.protocol)
+ assert issubclass(ClientChannelClass, ClientChannel)
+ constructor = getattr(ClientChannelClass, args.protocol)
- client = None
- try:
- client = constructor(args)
+ client = None
+ try:
+ client = constructor(args)
- polled = client.updated
- wakeup = None
+ polled = client.updated
+ wakeup = None
- while True:
+ while True:
- now = Timestamp.now()
+ now = Timestamp.now()
- if client.serial is not None and now > client.updated + client.expire:
- logging.info("[Expiring client data: serial %s, last updated %s, expire %s]",
- client.serial, client.updated, client.expire)
- client.cache_reset()
+ if client.serial is not None and now > client.updated + client.expire:
+ logging.info("[Expiring client data: serial %s, last updated %s, expire %s]",
+ client.serial, client.updated, client.expire)
+ client.cache_reset()
- if client.serial is None or client.nonce is None:
- polled = now
- client.push_pdu(ResetQueryPDU(version = client.version))
+ if client.serial is None or client.nonce is None:
+ polled = now
+ client.push_pdu(ResetQueryPDU(version = client.version))
- elif now >= client.updated + client.refresh:
- polled = now
- client.push_pdu(SerialQueryPDU(version = client.version,
- serial = client.serial,
- nonce = client.nonce))
+ elif now >= client.updated + client.refresh:
+ polled = now
+ client.push_pdu(SerialQueryPDU(version = client.version,
+ serial = client.serial,
+ nonce = client.nonce))
- remaining = 1
+ remaining = 1
- while remaining > 0:
- now = Timestamp.now()
- timer = client.retry if (now >= client.updated + client.refresh) else client.refresh
- wokeup = wakeup
- wakeup = max(now, Timestamp(max(polled, client.updated) + timer))
- remaining = wakeup - now
- if wakeup != wokeup:
- logging.info("[Last client poll %s, next %s]", polled, wakeup)
- asyncore.loop(timeout = remaining, count = 1)
+ while remaining > 0:
+ now = Timestamp.now()
+ timer = client.retry if (now >= client.updated + client.refresh) else client.refresh
+ wokeup = wakeup
+ wakeup = max(now, Timestamp(max(polled, client.updated) + timer))
+ remaining = wakeup - now
+ if wakeup != wokeup:
+ logging.info("[Last client poll %s, next %s]", polled, wakeup)
+ asyncore.loop(timeout = remaining, count = 1)
- except KeyboardInterrupt:
- sys.exit(0)
+ except KeyboardInterrupt:
+ sys.exit(0)
- finally:
- if client is not None:
- client.cleanup()
+ finally:
+ if client is not None:
+ client.cleanup()
def argparse_setup(subparsers):
- """
- Set up argparse stuff for commands in this module.
- """
-
- subparser = subparsers.add_parser("client", description = client_main.__doc__,
- help = "Test client for RPKI-RTR protocol")
- subparser.set_defaults(func = client_main, default_log_to = "stderr")
- subparser.add_argument("--sql-database", help = "filename for sqlite3 database of client state")
- subparser.add_argument("--force-version", type = int, choices = PDU.version_map, help = "force specific protocol version")
- subparser.add_argument("--reset-session", action = "store_true", help = "reset any existing session found in sqlite3 database")
- subparser.add_argument("protocol", choices = ("loopback", "tcp", "ssh", "tls"), help = "connection protocol")
- subparser.add_argument("host", nargs = "?", help = "server host")
- subparser.add_argument("port", nargs = "?", help = "server port")
- return subparser
+ """
+ Set up argparse stuff for commands in this module.
+ """
+
+ subparser = subparsers.add_parser("client", description = client_main.__doc__,
+ help = "Test client for RPKI-RTR protocol")
+ subparser.set_defaults(func = client_main, default_log_destination = "stderr")
+ subparser.add_argument("--sql-database", help = "filename for sqlite3 database of client state")
+ subparser.add_argument("--force-version", type = int, choices = PDU.version_map, help = "force specific protocol version")
+ subparser.add_argument("--reset-session", action = "store_true", help = "reset any existing session found in sqlite3 database")
+ subparser.add_argument("protocol", choices = ("loopback", "tcp", "ssh", "tls"), help = "connection protocol")
+ subparser.add_argument("host", nargs = "?", help = "server host")
+ subparser.add_argument("port", nargs = "?", help = "server port")
+ return subparser
diff --git a/rpki/rtr/generator.py b/rpki/rtr/generator.py
index 26e25b6e..4536de30 100644
--- a/rpki/rtr/generator.py
+++ b/rpki/rtr/generator.py
@@ -36,540 +36,553 @@ import rpki.rtr.server
from rpki.rtr.channels import Timestamp
-class PrefixPDU(rpki.rtr.pdus.PrefixPDU):
- """
- Object representing one prefix. This corresponds closely to one PDU
- in the rpki-router protocol, so closely that we use lexical ordering
- of the wire format of the PDU as the ordering for this class.
-
- This is a virtual class, but the .from_text() constructor
- instantiates the correct concrete subclass (IPv4PrefixPDU or
- IPv6PrefixPDU) depending on the syntax of its input text.
- """
-
- @staticmethod
- def from_text(version, asn, addr):
- """
- Construct a prefix from its text form.
- """
+from rpki.rcynicdb.iterator import authenticated_objects
- cls = IPv6PrefixPDU if ":" in addr else IPv4PrefixPDU
- self = cls(version = version)
- self.asn = long(asn)
- p, l = addr.split("/")
- self.prefix = rpki.POW.IPAddress(p)
- if "-" in l:
- self.prefixlen, self.max_prefixlen = tuple(int(i) for i in l.split("-"))
- else:
- self.prefixlen = self.max_prefixlen = int(l)
- self.announce = 1
- self.check()
- return self
-
- @staticmethod
- def from_roa(version, asn, prefix_tuple):
- """
- Construct a prefix from a ROA.
+class PrefixPDU(rpki.rtr.pdus.PrefixPDU):
"""
-
- address, length, maxlength = prefix_tuple
- cls = IPv6PrefixPDU if address.version == 6 else IPv4PrefixPDU
- self = cls(version = version)
- self.asn = asn
- self.prefix = address
- self.prefixlen = length
- self.max_prefixlen = length if maxlength is None else maxlength
- self.announce = 1
- self.check()
- return self
+ Object representing one prefix. This corresponds closely to one PDU
+ in the rpki-router protocol, so closely that we use lexical ordering
+ of the wire format of the PDU as the ordering for this class.
+
+ This is a virtual class, but the .from_text() constructor
+ instantiates the correct concrete subclass (IPv4PrefixPDU or
+ IPv6PrefixPDU) depending on the syntax of its input text.
+ """
+
+ @staticmethod
+ def from_text(version, asn, addr):
+ """
+ Construct a prefix from its text form.
+ """
+
+ cls = IPv6PrefixPDU if ":" in addr else IPv4PrefixPDU
+ self = cls(version = version)
+ self.asn = long(asn)
+ p, l = addr.split("/")
+ self.prefix = rpki.POW.IPAddress(p)
+ if "-" in l:
+ self.prefixlen, self.max_prefixlen = tuple(int(i) for i in l.split("-"))
+ else:
+ self.prefixlen = self.max_prefixlen = int(l)
+ self.announce = 1
+ self.check()
+ return self
+
+ @staticmethod
+ def from_roa(version, asn, prefix_tuple):
+ """
+ Construct a prefix from a ROA.
+ """
+
+ address, length, maxlength = prefix_tuple
+ cls = IPv6PrefixPDU if address.version == 6 else IPv4PrefixPDU
+ self = cls(version = version)
+ self.asn = asn
+ self.prefix = address
+ self.prefixlen = length
+ self.max_prefixlen = length if maxlength is None else maxlength
+ self.announce = 1
+ self.check()
+ return self
class IPv4PrefixPDU(PrefixPDU):
- """
- IPv4 flavor of a prefix.
- """
+ """
+ IPv4 flavor of a prefix.
+ """
- pdu_type = 4
- address_byte_count = 4
+ pdu_type = 4
+ address_byte_count = 4
class IPv6PrefixPDU(PrefixPDU):
- """
- IPv6 flavor of a prefix.
- """
-
- pdu_type = 6
- address_byte_count = 16
-
-class RouterKeyPDU(rpki.rtr.pdus.RouterKeyPDU):
- """
- Router Key PDU.
- """
-
- @classmethod
- def from_text(cls, version, asn, gski, key):
"""
- Construct a router key from its text form.
+ IPv6 flavor of a prefix.
"""
- self = cls(version = version)
- self.asn = long(asn)
- self.ski = base64.urlsafe_b64decode(gski + "=")
- self.key = base64.b64decode(key)
- self.announce = 1
- self.check()
- return self
+ pdu_type = 6
+ address_byte_count = 16
- @classmethod
- def from_certificate(cls, version, asn, ski, key):
+class RouterKeyPDU(rpki.rtr.pdus.RouterKeyPDU):
"""
- Construct a router key from a certificate.
+ Router Key PDU.
"""
- self = cls(version = version)
- self.asn = asn
- self.ski = ski
- self.key = key
- self.announce = 1
- self.check()
- return self
+ announce = None
+ ski = None
+ asn = None
+ key = None
+ @classmethod
+ def from_text(cls, version, asn, gski, key):
+ """
+ Construct a router key from its text form.
+ """
-class ROA(rpki.POW.ROA): # pylint: disable=W0232
- """
- Minor additions to rpki.POW.ROA.
- """
-
- @classmethod
- def derReadFile(cls, fn): # pylint: disable=E1002
- self = super(ROA, cls).derReadFile(fn)
- self.extractWithoutVerifying()
- return self
-
- @property
- def prefixes(self):
- v4, v6 = self.getPrefixes()
- if v4 is not None:
- for p in v4:
- yield p
- if v6 is not None:
- for p in v6:
- yield p
+ self = cls(version = version)
+ self.asn = long(asn)
+ self.ski = base64.urlsafe_b64decode(gski + "=")
+ self.key = base64.b64decode(key)
+ self.announce = 1
+ self.check()
+ return self
-class X509(rpki.POW.X509): # pylint: disable=W0232
- """
- Minor additions to rpki.POW.X509.
- """
+ @classmethod
+ def from_certificate(cls, version, asn, ski, key):
+ """
+ Construct a router key from a certificate.
+ """
- @property
- def asns(self):
- resources = self.getRFC3779()
- if resources is not None and resources[0] is not None:
- for min_asn, max_asn in resources[0]:
- for asn in xrange(min_asn, max_asn + 1):
- yield asn
+ self = cls(version = version)
+ self.asn = asn
+ self.ski = ski
+ self.key = key
+ self.announce = 1
+ self.check()
+ return self
-class PDUSet(list):
- """
- Object representing a set of PDUs, that is, one versioned and
- (theoretically) consistant set of prefixes and router keys extracted
- from rcynic's output.
- """
-
- def __init__(self, version):
- assert version in rpki.rtr.pdus.PDU.version_map
- super(PDUSet, self).__init__()
- self.version = version
-
- @classmethod
- def _load_file(cls, filename, version):
+class ROA(rpki.POW.ROA): # pylint: disable=W0232
"""
- Low-level method to read PDUSet from a file.
+ Minor additions to rpki.POW.ROA.
"""
- self = cls(version = version)
- f = open(filename, "rb")
- r = rpki.rtr.channels.ReadBuffer()
- while True:
- p = rpki.rtr.pdus.PDU.read_pdu(r)
- while p is None:
- b = f.read(r.needed())
- if b == "":
- assert r.available() == 0
- return self
- r.put(b)
- p = r.retry()
- assert p.version == self.version
- self.append(p)
-
- @staticmethod
- def seq_ge(a, b):
- return ((a - b) % (1 << 32)) < (1 << 31)
+ @classmethod
+ def derReadFile(cls, fn):
+ # pylint: disable=E1002
+ self = super(ROA, cls).derReadFile(fn)
+ self.extractWithoutVerifying()
+ return self
+ @property
+ def prefixes(self):
+ v4, v6 = self.getPrefixes() # pylint: disable=E1101
+ if v4 is not None:
+ for p in v4:
+ yield p
+ if v6 is not None:
+ for p in v6:
+ yield p
-class AXFRSet(PDUSet):
- """
- Object representing a complete set of PDUs, that is, one versioned
- and (theoretically) consistant set of prefixes and router
- certificates extracted from rcynic's output, all with the announce
- field set.
- """
-
- @classmethod
- def parse_rcynic(cls, rcynic_dir, version, scan_roas = None, scan_routercerts = None):
+class X509(rpki.POW.X509): # pylint: disable=W0232
"""
- Parse ROAS and router certificates fetched (and validated!) by
- rcynic to create a new AXFRSet.
-
- In normal operation, we use os.walk() and the rpki.POW library to
- parse these data directly, but we can, if so instructed, use
- external programs instead, for testing, simulation, or to provide
- a way to inject local data.
-
- At some point the ability to parse these data from external
- programs may move to a separate constructor function, so that we
- can make this one a bit simpler and faster.
+ Minor additions to rpki.POW.X509.
"""
- self = cls(version = version)
- self.serial = rpki.rtr.channels.Timestamp.now()
-
- include_routercerts = RouterKeyPDU.pdu_type in rpki.rtr.pdus.PDU.version_map[version]
-
- if scan_roas is None or (scan_routercerts is None and include_routercerts):
- for root, dirs, files in os.walk(rcynic_dir): # pylint: disable=W0612
- for fn in files:
- if scan_roas is None and fn.endswith(".roa"):
- roa = ROA.derReadFile(os.path.join(root, fn))
- asn = roa.getASID()
- self.extend(PrefixPDU.from_roa(version = version, asn = asn, prefix_tuple = prefix_tuple)
- for prefix_tuple in roa.prefixes)
- if include_routercerts and scan_routercerts is None and fn.endswith(".cer"):
- x = X509.derReadFile(os.path.join(root, fn))
- eku = x.getEKU()
- if eku is not None and rpki.oids.id_kp_bgpsec_router in eku:
- ski = x.getSKI()
- key = x.getPublicKey().derWritePublic()
- self.extend(RouterKeyPDU.from_certificate(version = version, asn = asn, ski = ski, key = key)
- for asn in x.asns)
-
- if scan_roas is not None:
- try:
- p = subprocess.Popen((scan_roas, rcynic_dir), stdout = subprocess.PIPE)
- for line in p.stdout:
- line = line.split()
- asn = line[1]
- self.extend(PrefixPDU.from_text(version = version, asn = asn, addr = addr)
- for addr in line[2:])
- except OSError, e:
- sys.exit("Could not run %s: %s" % (scan_roas, e))
-
- if include_routercerts and scan_routercerts is not None:
- try:
- p = subprocess.Popen((scan_routercerts, rcynic_dir), stdout = subprocess.PIPE)
- for line in p.stdout:
- line = line.split()
- gski = line[0]
- key = line[-1]
- self.extend(RouterKeyPDU.from_text(version = version, asn = asn, gski = gski, key = key)
- for asn in line[1:-1])
- except OSError, e:
- sys.exit("Could not run %s: %s" % (scan_routercerts, e))
-
- self.sort()
- for i in xrange(len(self) - 2, -1, -1):
- if self[i] == self[i + 1]:
- del self[i + 1]
- return self
-
- @classmethod
- def load(cls, filename):
- """
- Load an AXFRSet from a file, parse filename to obtain version and serial.
- """
+ @property
+ def asns(self):
+ resources = self.getRFC3779() # pylint: disable=E1101
+ if resources is not None and resources[0] is not None:
+ for min_asn, max_asn in resources[0]:
+ for asn in xrange(min_asn, max_asn + 1):
+ yield asn
- fn1, fn2, fn3 = os.path.basename(filename).split(".")
- assert fn1.isdigit() and fn2 == "ax" and fn3.startswith("v") and fn3[1:].isdigit()
- version = int(fn3[1:])
- self = cls._load_file(filename, version)
- self.serial = rpki.rtr.channels.Timestamp(fn1)
- return self
- def filename(self):
- """
- Generate filename for this AXFRSet.
+class PDUSet(list):
"""
+ Object representing a set of PDUs, that is, one versioned and
+ (theoretically) consistant set of prefixes and router keys extracted
+ from rcynic's output.
+ """
+
+ def __init__(self, version):
+ assert version in rpki.rtr.pdus.PDU.version_map
+ super(PDUSet, self).__init__()
+ self.version = version
+
+ @classmethod
+ def _load_file(cls, filename, version):
+ """
+ Low-level method to read PDUSet from a file.
+ """
+
+ self = cls(version = version)
+ f = open(filename, "rb")
+ r = rpki.rtr.channels.ReadBuffer()
+ while True:
+ p = rpki.rtr.pdus.PDU.read_pdu(r)
+ while p is None:
+ b = f.read(r.needed())
+ if b == "":
+ assert r.available() == 0
+ return self
+ r.put(b)
+ p = r.retry()
+ assert p.version == self.version
+ self.append(p)
+
+ @staticmethod
+ def seq_ge(a, b):
+ return ((a - b) % (1 << 32)) < (1 << 31)
- return "%d.ax.v%d" % (self.serial, self.version)
- @classmethod
- def load_current(cls, version):
- """
- Load current AXFRSet. Return None if can't.
+class AXFRSet(PDUSet):
"""
+ Object representing a complete set of PDUs, that is, one versioned
+ and (theoretically) consistant set of prefixes and router
+ certificates extracted from rcynic's output, all with the announce
+ field set.
+ """
+
+ class_map = dict(cer = X509, roa = ROA)
+
+ serial = None
+
+ @classmethod
+ def parse_rcynic(cls, rcynic_dir, version, scan_roas = None, scan_routercerts = None):
+ """
+ Parse ROAS and router certificates fetched (and validated!) by
+ rcynic to create a new AXFRSet.
+
+ In normal operation, we parse these data directly from whatever rcynic is using
+ as a validator this week, but we can, if so instructed, use external programs
+ instead, for testing, simulation, or to provide a way to inject local data.
+
+ At some point the ability to parse these data from external
+ programs may move to a separate constructor function, so that we
+ can make this one a bit simpler and faster.
+ """
+
+ self = cls(version = version)
+ self.serial = rpki.rtr.channels.Timestamp.now()
+
+ include_routercerts = RouterKeyPDU.pdu_type in rpki.rtr.pdus.PDU.version_map[version]
+
+ if scan_roas is None:
+ for uri, roa in authenticated_objects(rcynic_dir, uri_suffix = ".roa", class_map = self.class_map):
+ roa.extractWithoutVerifying()
+ asn = roa.getASID()
+ self.extend(PrefixPDU.from_roa(version = version, asn = asn, prefix_tuple = prefix_tuple)
+ for prefix_tuple in roa.prefixes)
+
+ if scan_routercerts is None and include_routercerts:
+ for uri, cer in authenticated_objects(rcynic_dir, uri_suffix = ".cer", class_map = self.class_map):
+ eku = cer.getEKU()
+ if eku is not None and rpki.oids.id_kp_bgpsec_router in eku:
+ ski = cer.getSKI()
+ key = cer.getPublicKey().derWritePublic()
+ self.extend(RouterKeyPDU.from_certificate(version = version, asn = asn, ski = ski, key = key)
+ for asn in cer.asns)
+
+ if scan_roas is not None:
+ try:
+ p = subprocess.Popen((scan_roas, rcynic_dir), stdout = subprocess.PIPE)
+ for line in p.stdout:
+ line = line.split()
+ asn = line[1]
+ self.extend(PrefixPDU.from_text(version = version, asn = asn, addr = addr)
+ for addr in line[2:])
+ except OSError, e:
+ sys.exit("Could not run %s: %s" % (scan_roas, e))
+
+ if include_routercerts and scan_routercerts is not None:
+ try:
+ p = subprocess.Popen((scan_routercerts, rcynic_dir), stdout = subprocess.PIPE)
+ for line in p.stdout:
+ line = line.split()
+ gski = line[0]
+ key = line[-1]
+ self.extend(RouterKeyPDU.from_text(version = version, asn = asn, gski = gski, key = key)
+ for asn in line[1:-1])
+ except OSError, e:
+ sys.exit("Could not run %s: %s" % (scan_routercerts, e))
+
+ self.sort()
+ for i in xrange(len(self) - 2, -1, -1):
+ if self[i] == self[i + 1]:
+ del self[i + 1]
+ return self
+
+ @classmethod
+ def load(cls, filename):
+ """
+ Load an AXFRSet from a file, parse filename to obtain version and serial.
+ """
+
+ fn1, fn2, fn3 = os.path.basename(filename).split(".")
+ assert fn1.isdigit() and fn2 == "ax" and fn3.startswith("v") and fn3[1:].isdigit()
+ version = int(fn3[1:])
+ self = cls._load_file(filename, version)
+ self.serial = rpki.rtr.channels.Timestamp(fn1)
+ return self
+
+ def filename(self):
+ """
+ Generate filename for this AXFRSet.
+ """
+
+ return "%d.ax.v%d" % (self.serial, self.version)
+
+ @classmethod
+ def load_current(cls, version):
+ """
+ Load current AXFRSet. Return None if can't.
+ """
+
+ serial = rpki.rtr.server.read_current(version)[0]
+ if serial is None:
+ return None
+ try:
+ return cls.load("%d.ax.v%d" % (serial, version))
+ except IOError:
+ return None
+
+ def save_axfr(self):
+ """
+ Write AXFRSet to file with magic filename.
+ """
+
+ f = open(self.filename(), "wb")
+ for p in self:
+ f.write(p.to_pdu())
+ f.close()
+
+ def destroy_old_data(self):
+ """
+ Destroy old data files, presumably because our nonce changed and
+ the old serial numbers are no longer valid.
+ """
+
+ for i in glob.iglob("*.ix.*.v%d" % self.version):
+ os.unlink(i)
+ for i in glob.iglob("*.ax.v%d" % self.version):
+ if i != self.filename():
+ os.unlink(i)
+
+ @staticmethod
+ def new_nonce(force_zero_nonce):
+ """
+ Create and return a new nonce value.
+ """
+
+ if force_zero_nonce:
+ return 0
+ try:
+ return int(random.SystemRandom().getrandbits(16))
+ except NotImplementedError:
+ return int(random.getrandbits(16))
+
+ def mark_current(self, force_zero_nonce = False):
+ """
+ Save current serial number and nonce, creating new nonce if
+ necessary. Creating a new nonce triggers cleanup of old state, as
+ the new nonce invalidates all old serial numbers.
+ """
+
+ assert self.version in rpki.rtr.pdus.PDU.version_map
+ old_serial, nonce = rpki.rtr.server.read_current(self.version)
+ if old_serial is None or self.seq_ge(old_serial, self.serial):
+ logging.debug("Creating new nonce and deleting stale data")
+ nonce = self.new_nonce(force_zero_nonce)
+ self.destroy_old_data()
+ rpki.rtr.server.write_current(self.serial, nonce, self.version)
+
+ def save_ixfr(self, other):
+ """
+ Comparing this AXFRSet with an older one and write the resulting
+ IXFRSet to file with magic filename. Since we store PDUSets
+ in sorted order, computing the difference is a trivial linear
+ comparison.
+ """
+
+ f = open("%d.ix.%d.v%d" % (self.serial, other.serial, self.version), "wb")
+ old = other
+ new = self
+ len_old = len(old)
+ len_new = len(new)
+ i_old = i_new = 0
+ while i_old < len_old and i_new < len_new:
+ if old[i_old] < new[i_new]:
+ f.write(old[i_old].to_pdu(announce = 0))
+ i_old += 1
+ elif old[i_old] > new[i_new]:
+ f.write(new[i_new].to_pdu(announce = 1))
+ i_new += 1
+ else:
+ i_old += 1
+ i_new += 1
+ for i in xrange(i_old, len_old):
+ f.write(old[i].to_pdu(announce = 0))
+ for i in xrange(i_new, len_new):
+ f.write(new[i].to_pdu(announce = 1))
+ f.close()
+
+ def show(self):
+ """
+ Print this AXFRSet.
+ """
+
+ logging.debug("# AXFR %d (%s) v%d", self.serial, self.serial, self.version)
+ for p in self:
+ logging.debug(p)
- serial = rpki.rtr.server.read_current(version)[0]
- if serial is None:
- return None
- try:
- return cls.load("%d.ax.v%d" % (serial, version))
- except IOError:
- return None
- def save_axfr(self):
+class IXFRSet(PDUSet):
"""
- Write AXFRSet to file with magic filename.
+ Object representing an incremental set of PDUs, that is, the
+ differences between one versioned and (theoretically) consistant set
+ of prefixes and router certificates extracted from rcynic's output
+ and another, with the announce fields set or cleared as necessary to
+ indicate the changes.
"""
- f = open(self.filename(), "wb")
- for p in self:
- f.write(p.to_pdu())
- f.close()
+ from_serial = None
+ to_serial = None
- def destroy_old_data(self):
- """
- Destroy old data files, presumably because our nonce changed and
- the old serial numbers are no longer valid.
- """
+ @classmethod
+ def load(cls, filename):
+ """
+ Load an IXFRSet from a file, parse filename to obtain version and serials.
+ """
- for i in glob.iglob("*.ix.*.v%d" % self.version):
- os.unlink(i)
- for i in glob.iglob("*.ax.v%d" % self.version):
- if i != self.filename():
- os.unlink(i)
+ fn1, fn2, fn3, fn4 = os.path.basename(filename).split(".")
+ assert fn1.isdigit() and fn2 == "ix" and fn3.isdigit() and fn4.startswith("v") and fn4[1:].isdigit()
+ version = int(fn4[1:])
+ self = cls._load_file(filename, version)
+ self.from_serial = rpki.rtr.channels.Timestamp(fn3)
+ self.to_serial = rpki.rtr.channels.Timestamp(fn1)
+ return self
- @staticmethod
- def new_nonce(force_zero_nonce):
- """
- Create and return a new nonce value.
- """
+ def filename(self):
+ """
+ Generate filename for this IXFRSet.
+ """
- if force_zero_nonce:
- return 0
- try:
- return int(random.SystemRandom().getrandbits(16))
- except NotImplementedError:
- return int(random.getrandbits(16))
+ return "%d.ix.%d.v%d" % (self.to_serial, self.from_serial, self.version)
- def mark_current(self, force_zero_nonce = False):
- """
- Save current serial number and nonce, creating new nonce if
- necessary. Creating a new nonce triggers cleanup of old state, as
- the new nonce invalidates all old serial numbers.
- """
+ def show(self):
+ """
+ Print this IXFRSet.
+ """
- assert self.version in rpki.rtr.pdus.PDU.version_map
- old_serial, nonce = rpki.rtr.server.read_current(self.version)
- if old_serial is None or self.seq_ge(old_serial, self.serial):
- logging.debug("Creating new nonce and deleting stale data")
- nonce = self.new_nonce(force_zero_nonce)
- self.destroy_old_data()
- rpki.rtr.server.write_current(self.serial, nonce, self.version)
+ logging.debug("# IXFR %d (%s) -> %d (%s) v%d",
+ self.from_serial, self.from_serial,
+ self.to_serial, self.to_serial,
+ self.version)
+ for p in self:
+ logging.debug(p)
- def save_ixfr(self, other):
- """
- Comparing this AXFRSet with an older one and write the resulting
- IXFRSet to file with magic filename. Since we store PDUSets
- in sorted order, computing the difference is a trivial linear
- comparison.
- """
- f = open("%d.ix.%d.v%d" % (self.serial, other.serial, self.version), "wb")
- old = other
- new = self
- len_old = len(old)
- len_new = len(new)
- i_old = i_new = 0
- while i_old < len_old and i_new < len_new:
- if old[i_old] < new[i_new]:
- f.write(old[i_old].to_pdu(announce = 0))
- i_old += 1
- elif old[i_old] > new[i_new]:
- f.write(new[i_new].to_pdu(announce = 1))
- i_new += 1
- else:
- i_old += 1
- i_new += 1
- for i in xrange(i_old, len_old):
- f.write(old[i].to_pdu(announce = 0))
- for i in xrange(i_new, len_new):
- f.write(new[i].to_pdu(announce = 1))
- f.close()
-
- def show(self):
+def kick_all(serial):
"""
- Print this AXFRSet.
+ Kick any existing server processes to wake them up.
"""
- logging.debug("# AXFR %d (%s) v%d", self.serial, self.serial, self.version)
- for p in self:
- logging.debug(p)
+ try:
+ os.stat(rpki.rtr.server.kickme_dir)
+ except OSError:
+ logging.debug('# Creating directory "%s"', rpki.rtr.server.kickme_dir)
+ os.makedirs(rpki.rtr.server.kickme_dir)
+
+ msg = "Good morning, serial %d is ready" % serial
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
+ for name in glob.iglob("%s.*" % rpki.rtr.server.kickme_base):
+ try:
+ logging.debug("# Kicking %s", name)
+ sock.sendto(msg, name)
+ except socket.error:
+ try:
+ logging.exception("# Failed to kick %s, probably dead socket, attempting cleanup", name)
+ os.unlink(name)
+ except Exception, e:
+ logging.exception("# Couldn't unlink suspected dead socket %s: %s", name, e)
+ except Exception, e:
+ logging.warning("# Failed to kick %s and don't understand why: %s", name, e)
+ sock.close()
-class IXFRSet(PDUSet):
- """
- Object representing an incremental set of PDUs, that is, the
- differences between one versioned and (theoretically) consistant set
- of prefixes and router certificates extracted from rcynic's output
- and another, with the announce fields set or cleared as necessary to
- indicate the changes.
- """
-
- @classmethod
- def load(cls, filename):
- """
- Load an IXFRSet from a file, parse filename to obtain version and serials.
+def cronjob_main(args):
"""
+ Run this right after running rcynic to wade through the ROAs and
+ router certificates that rcynic collects and translate that data
+ into the form used in the rpki-router protocol. Output is an
+ updated database containing both full dumps (AXFR) and incremental
+ dumps against a specific prior version (IXFR). After updating the
+ database, kicks any active servers, so that they can notify their
+ clients that a new version is available.
+ """
+
+ if args.rpki_rtr_dir:
+ try:
+ if not os.path.isdir(args.rpki_rtr_dir):
+ os.makedirs(args.rpki_rtr_dir)
+ os.chdir(args.rpki_rtr_dir)
+ except OSError, e:
+ logging.critical(str(e))
+ sys.exit(1)
+
+ for version in sorted(rpki.rtr.server.PDU.version_map.iterkeys(), reverse = True):
+
+ logging.debug("# Generating updates for protocol version %d", version)
+
+ old_ixfrs = glob.glob("*.ix.*.v%d" % version)
+
+ current = rpki.rtr.server.read_current(version)[0]
+ cutoff = Timestamp.now(-(24 * 60 * 60))
+ for f in glob.iglob("*.ax.v%d" % version):
+ t = Timestamp(int(f.split(".")[0]))
+ if t < cutoff and t != current:
+ logging.debug("# Deleting old file %s, timestamp %s", f, t)
+ os.unlink(f)
+
+ pdus = rpki.rtr.generator.AXFRSet.parse_rcynic(args.rcynic_dir, version, args.scan_roas, args.scan_routercerts)
+ if pdus == rpki.rtr.generator.AXFRSet.load_current(version):
+ logging.debug("# No change, new serial not needed")
+ continue
+ pdus.save_axfr()
+ for axfr in glob.iglob("*.ax.v%d" % version):
+ if axfr != pdus.filename():
+ pdus.save_ixfr(rpki.rtr.generator.AXFRSet.load(axfr))
+ pdus.mark_current(args.force_zero_nonce)
+
+ logging.debug("# New serial is %d (%s)", pdus.serial, pdus.serial)
+
+ rpki.rtr.generator.kick_all(pdus.serial)
+
+ old_ixfrs.sort()
+ for ixfr in old_ixfrs:
+ try:
+ logging.debug("# Deleting old file %s", ixfr)
+ os.unlink(ixfr)
+ except OSError:
+ pass
- fn1, fn2, fn3, fn4 = os.path.basename(filename).split(".")
- assert fn1.isdigit() and fn2 == "ix" and fn3.isdigit() and fn4.startswith("v") and fn4[1:].isdigit()
- version = int(fn4[1:])
- self = cls._load_file(filename, version)
- self.from_serial = rpki.rtr.channels.Timestamp(fn3)
- self.to_serial = rpki.rtr.channels.Timestamp(fn1)
- return self
- def filename(self):
+def show_main(args):
"""
- Generate filename for this IXFRSet.
+ Display current rpki-rtr server database in textual form.
"""
- return "%d.ix.%d.v%d" % (self.to_serial, self.from_serial, self.version)
+ if args.rpki_rtr_dir:
+ try:
+ os.chdir(args.rpki_rtr_dir)
+ except OSError, e:
+ sys.exit(e)
- def show(self):
- """
- Print this IXFRSet.
- """
+ g = glob.glob("*.ax.v*")
+ g.sort()
+ for f in g:
+ rpki.rtr.generator.AXFRSet.load(f).show()
- logging.debug("# IXFR %d (%s) -> %d (%s) v%d",
- self.from_serial, self.from_serial,
- self.to_serial, self.to_serial,
- self.version)
- for p in self:
- logging.debug(p)
+ g = glob.glob("*.ix.*.v*")
+ g.sort()
+ for f in g:
+ rpki.rtr.generator.IXFRSet.load(f).show()
+def argparse_setup(subparsers):
+ """
+ Set up argparse stuff for commands in this module.
+ """
-def kick_all(serial):
- """
- Kick any existing server processes to wake them up.
- """
-
- try:
- os.stat(rpki.rtr.server.kickme_dir)
- except OSError:
- logging.debug('# Creating directory "%s"', rpki.rtr.server.kickme_dir)
- os.makedirs(rpki.rtr.server.kickme_dir)
-
- msg = "Good morning, serial %d is ready" % serial
- sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
- for name in glob.iglob("%s.*" % rpki.rtr.server.kickme_base):
- try:
- logging.debug("# Kicking %s", name)
- sock.sendto(msg, name)
- except socket.error:
- try:
- logging.exception("# Failed to kick %s, probably dead socket, attempting cleanup", name)
- os.unlink(name)
- except Exception, e:
- logging.exception("# Couldn't unlink suspected dead socket %s: %s", name, e)
- except Exception, e:
- logging.warning("# Failed to kick %s and don't understand why: %s", name, e)
- sock.close()
-
-
-def cronjob_main(args):
- """
- Run this right after running rcynic to wade through the ROAs and
- router certificates that rcynic collects and translate that data
- into the form used in the rpki-router protocol. Output is an
- updated database containing both full dumps (AXFR) and incremental
- dumps against a specific prior version (IXFR). After updating the
- database, kicks any active servers, so that they can notify their
- clients that a new version is available.
- """
-
- if args.rpki_rtr_dir:
- try:
- if not os.path.isdir(args.rpki_rtr_dir):
- os.makedirs(args.rpki_rtr_dir)
- os.chdir(args.rpki_rtr_dir)
- except OSError, e:
- logging.critical(str(e))
- sys.exit(1)
-
- for version in sorted(rpki.rtr.server.PDU.version_map.iterkeys(), reverse = True):
-
- logging.debug("# Generating updates for protocol version %d", version)
-
- old_ixfrs = glob.glob("*.ix.*.v%d" % version)
-
- current = rpki.rtr.server.read_current(version)[0]
- cutoff = Timestamp.now(-(24 * 60 * 60))
- for f in glob.iglob("*.ax.v%d" % version):
- t = Timestamp(int(f.split(".")[0]))
- if t < cutoff and t != current:
- logging.debug("# Deleting old file %s, timestamp %s", f, t)
- os.unlink(f)
-
- pdus = rpki.rtr.generator.AXFRSet.parse_rcynic(args.rcynic_dir, version, args.scan_roas, args.scan_routercerts)
- if pdus == rpki.rtr.generator.AXFRSet.load_current(version):
- logging.debug("# No change, new serial not needed")
- continue
- pdus.save_axfr()
- for axfr in glob.iglob("*.ax.v%d" % version):
- if axfr != pdus.filename():
- pdus.save_ixfr(rpki.rtr.generator.AXFRSet.load(axfr))
- pdus.mark_current(args.force_zero_nonce)
-
- logging.debug("# New serial is %d (%s)", pdus.serial, pdus.serial)
-
- rpki.rtr.generator.kick_all(pdus.serial)
-
- old_ixfrs.sort()
- for ixfr in old_ixfrs:
- try:
- logging.debug("# Deleting old file %s", ixfr)
- os.unlink(ixfr)
- except OSError:
- pass
-
-
-def show_main(args):
- """
- Display current rpki-rtr server database in textual form.
- """
-
- if args.rpki_rtr_dir:
- try:
- os.chdir(args.rpki_rtr_dir)
- except OSError, e:
- sys.exit(e)
-
- g = glob.glob("*.ax.v*")
- g.sort()
- for f in g:
- rpki.rtr.generator.AXFRSet.load(f).show()
-
- g = glob.glob("*.ix.*.v*")
- g.sort()
- for f in g:
- rpki.rtr.generator.IXFRSet.load(f).show()
+ subparser = subparsers.add_parser("cronjob", description = cronjob_main.__doc__,
+ help = "Generate RPKI-RTR database from rcynic output")
+ subparser.set_defaults(func = cronjob_main, default_log_destination = "syslog")
+ subparser.add_argument("--scan-roas", help = "specify an external scan_roas program")
+ subparser.add_argument("--scan-routercerts", help = "specify an external scan_routercerts program")
+ subparser.add_argument("--force_zero_nonce", action = "store_true", help = "force nonce value of zero")
+ subparser.add_argument("rcynic_dir", nargs = "?", help = "directory containing validated rcynic output tree")
+ subparser.add_argument("rpki_rtr_dir", nargs = "?", help = "directory containing RPKI-RTR database")
-def argparse_setup(subparsers):
- """
- Set up argparse stuff for commands in this module.
- """
-
- subparser = subparsers.add_parser("cronjob", description = cronjob_main.__doc__,
- help = "Generate RPKI-RTR database from rcynic output")
- subparser.set_defaults(func = cronjob_main, default_log_to = "syslog")
- subparser.add_argument("--scan-roas", help = "specify an external scan_roas program")
- subparser.add_argument("--scan-routercerts", help = "specify an external scan_routercerts program")
- subparser.add_argument("--force_zero_nonce", action = "store_true", help = "force nonce value of zero")
- subparser.add_argument("rcynic_dir", help = "directory containing validated rcynic output tree")
- subparser.add_argument("rpki_rtr_dir", nargs = "?", help = "directory containing RPKI-RTR database")
-
- subparser = subparsers.add_parser("show", description = show_main.__doc__,
- help = "Display content of RPKI-RTR database")
- subparser.set_defaults(func = show_main, default_log_to = "stderr")
- subparser.add_argument("rpki_rtr_dir", nargs = "?", help = "directory containing RPKI-RTR database")
+ subparser = subparsers.add_parser("show", description = show_main.__doc__,
+ help = "Display content of RPKI-RTR database")
+ subparser.set_defaults(func = show_main, default_log_destination = "stderr")
+ subparser.add_argument("rpki_rtr_dir", nargs = "?", help = "directory containing RPKI-RTR database")
diff --git a/rpki/rtr/main.py b/rpki/rtr/main.py
index 12de30cc..b915f809 100644
--- a/rpki/rtr/main.py
+++ b/rpki/rtr/main.py
@@ -25,70 +25,35 @@ import os
import sys
import time
import logging
-import logging.handlers
-import argparse
+import rpki.config
-class Formatter(logging.Formatter):
-
- converter = time.gmtime
-
- def __init__(self, debug, fmt, datefmt):
- self.debug = debug
- super(Formatter, self).__init__(fmt, datefmt)
-
- def format(self, record):
- if getattr(record, "connection", None) is None:
- record.connection = ""
- return super(Formatter, self).format(record)
-
- def formatException(self, ei):
- if self.debug:
- return super(Formatter, self).formatException(ei)
- else:
- return str(ei[1])
def main():
- os.environ["TZ"] = "UTC"
- time.tzset()
-
- from rpki.rtr.server import argparse_setup as argparse_setup_server
- from rpki.rtr.client import argparse_setup as argparse_setup_client
- from rpki.rtr.generator import argparse_setup as argparse_setup_generator
+ os.environ["TZ"] = "UTC"
+ time.tzset()
- if "rpki.rtr.bgpdump" in sys.modules:
- from rpki.rtr.bgpdump import argparse_setup as argparse_setup_bgpdump
- else:
- def argparse_setup_bgpdump(ignored):
- pass
+ from rpki.rtr.server import argparse_setup as argparse_setup_server
+ from rpki.rtr.client import argparse_setup as argparse_setup_client
+ from rpki.rtr.generator import argparse_setup as argparse_setup_generator
- argparser = argparse.ArgumentParser(description = __doc__)
- argparser.add_argument("--debug", action = "store_true", help = "debugging mode")
- argparser.add_argument("--log-level", default = "debug",
- choices = ("debug", "info", "warning", "error", "critical"),
- type = lambda s: s.lower())
- argparser.add_argument("--log-to",
- choices = ("syslog", "stderr"))
- subparsers = argparser.add_subparsers(title = "Commands", metavar = "", dest = "mode")
- argparse_setup_server(subparsers)
- argparse_setup_client(subparsers)
- argparse_setup_generator(subparsers)
- argparse_setup_bgpdump(subparsers)
- args = argparser.parse_args()
-
- fmt = "rpki-rtr/" + args.mode + "%(connection)s[%(process)d] %(message)s"
-
- if (args.log_to or args.default_log_to) == "stderr":
- handler = logging.StreamHandler()
- fmt = "%(asctime)s " + fmt
- elif os.path.exists("/dev/log"):
- handler = logging.handlers.SysLogHandler("/dev/log")
- else:
- handler = logging.handlers.SysLogHandler()
-
- handler.setFormatter(Formatter(args.debug, fmt, "%Y-%m-%dT%H:%M:%SZ"))
- logging.root.addHandler(handler)
- logging.root.setLevel(int(getattr(logging, args.log_level.upper())))
-
- return args.func(args)
+ if "rpki.rtr.bgpdump" in sys.modules:
+ from rpki.rtr.bgpdump import argparse_setup as argparse_setup_bgpdump
+ else:
+ def argparse_setup_bgpdump(ignored):
+ pass
+
+ cfg = rpki.config.argparser(section = "rpki-rtr", doc = __doc__)
+ cfg.argparser.add_argument("--debug", action = "store_true", help = "debugging mode")
+ cfg.add_logging_arguments()
+ subparsers = cfg.argparser.add_subparsers(title = "Commands", metavar = "", dest = "mode")
+ argparse_setup_server(subparsers)
+ argparse_setup_client(subparsers)
+ argparse_setup_generator(subparsers)
+ argparse_setup_bgpdump(subparsers)
+ args = cfg.argparser.parse_args()
+
+ cfg.configure_logging(args = args, ident = "rpki-rtr/" + args.mode)
+
+ return args.func(args)
diff --git a/rpki/rtr/pdus.py b/rpki/rtr/pdus.py
index 0d2e5928..3fb7457d 100644
--- a/rpki/rtr/pdus.py
+++ b/rpki/rtr/pdus.py
@@ -28,292 +28,300 @@ import rpki.POW
# Exceptions
class PDUException(Exception):
- """
- Parent exception type for exceptions that signal particular protocol
- errors. String value of exception instance will be the message to
- put in the ErrorReportPDU, error_report_code value of exception
- will be the numeric code to use.
- """
-
- def __init__(self, msg = None, pdu = None):
- super(PDUException, self).__init__()
- assert msg is None or isinstance(msg, (str, unicode))
- self.error_report_msg = msg
- self.error_report_pdu = pdu
-
- def __str__(self):
- return self.error_report_msg or self.__class__.__name__
-
- def make_error_report(self, version):
- return ErrorReportPDU(version = version,
- errno = self.error_report_code,
- errmsg = self.error_report_msg,
- errpdu = self.error_report_pdu)
+ """
+ Parent exception type for exceptions that signal particular protocol
+ errors. String value of exception instance will be the message to
+ put in the ErrorReportPDU, error_report_code value of exception
+ will be the numeric code to use.
+ """
+
+ def __init__(self, msg = None, pdu = None):
+ super(PDUException, self).__init__()
+ assert msg is None or isinstance(msg, (str, unicode))
+ self.error_report_msg = msg
+ self.error_report_pdu = pdu
+
+ def __str__(self):
+ return self.error_report_msg or self.__class__.__name__
+
+ def make_error_report(self, version):
+ return ErrorReportPDU(version = version,
+ errno = self.error_report_code,
+ errmsg = self.error_report_msg,
+ errpdu = self.error_report_pdu)
class UnsupportedProtocolVersion(PDUException):
- error_report_code = 4
+ error_report_code = 4
class UnsupportedPDUType(PDUException):
- error_report_code = 5
+ error_report_code = 5
class CorruptData(PDUException):
- error_report_code = 0
+ error_report_code = 0
# Decorators
def wire_pdu(cls, versions = None):
- """
- Class decorator to add a PDU class to the set of known PDUs
- for all supported protocol versions.
- """
+ """
+ Class decorator to add a PDU class to the set of known PDUs
+ for all supported protocol versions.
+ """
- for v in PDU.version_map.iterkeys() if versions is None else versions:
- assert cls.pdu_type not in PDU.version_map[v]
- PDU.version_map[v][cls.pdu_type] = cls
- return cls
+ for v in PDU.version_map.iterkeys() if versions is None else versions:
+ assert cls.pdu_type not in PDU.version_map[v]
+ PDU.version_map[v][cls.pdu_type] = cls
+ return cls
def wire_pdu_only(*versions):
- """
- Class decorator to add a PDU class to the set of known PDUs
- for specific protocol versions.
- """
+ """
+ Class decorator to add a PDU class to the set of known PDUs
+ for specific protocol versions.
+ """
- assert versions and all(v in PDU.version_map for v in versions)
- return lambda cls: wire_pdu(cls, versions)
+ assert versions and all(v in PDU.version_map for v in versions)
+ return lambda cls: wire_pdu(cls, versions)
def clone_pdu_root(root_pdu_class):
- """
- Replace a PDU root class's version_map with a two-level deep copy of itself,
- and return a class decorator which subclasses can use to replace their
- parent classes with themselves in the resulting cloned version map.
+ """
+ Replace a PDU root class's version_map with a two-level deep copy of itself,
+ and return a class decorator which subclasses can use to replace their
+ parent classes with themselves in the resulting cloned version map.
- This function is not itself a decorator, it returns one.
- """
+ This function is not itself a decorator, it returns one.
+ """
- root_pdu_class.version_map = dict((k, v.copy()) for k, v in root_pdu_class.version_map.iteritems())
+ root_pdu_class.version_map = dict((k, v.copy()) for k, v in root_pdu_class.version_map.iteritems())
- def decorator(cls):
- for pdu_map in root_pdu_class.version_map.itervalues():
- for pdu_type, pdu_class in pdu_map.items():
- if pdu_class in cls.__bases__:
- pdu_map[pdu_type] = cls
- return cls
+ def decorator(cls):
+ for pdu_map in root_pdu_class.version_map.itervalues():
+ for pdu_type, pdu_class in pdu_map.items():
+ if pdu_class in cls.__bases__:
+ pdu_map[pdu_type] = cls
+ return cls
- return decorator
+ return decorator
# PDUs
class PDU(object):
- """
- Base PDU. Real PDUs are subclasses of this class.
- """
-
- version_map = {0 : {}, 1 : {}} # Updated by @wire_pdu
-
- _pdu = None # Cached when first generated
+ """
+ Base PDU. Real PDUs are subclasses of this class.
+ """
- header_struct = struct.Struct("!BB2xL")
+ version_map = {0 : {}, 1 : {}} # Updated by @wire_pdu
- def __init__(self, version):
- assert version in self.version_map
- self.version = version
+ _pdu = None # Cached when first generated
- def __cmp__(self, other):
- return cmp(self.to_pdu(), other.to_pdu())
+ header_struct = struct.Struct("!BB2xL")
- @property
- def default_version(self):
- return max(self.version_map.iterkeys())
+ pdu_type = None
- def check(self):
- pass
+ def __init__(self, version):
+ assert version in self.version_map
+ self.version = version
- @classmethod
- def read_pdu(cls, reader):
- return reader.update(need = cls.header_struct.size, callback = cls.got_header)
+ def __cmp__(self, other):
+ return cmp(self.to_pdu(), other.to_pdu())
- @classmethod
- def got_header(cls, reader):
- if not reader.ready():
- return None
- assert reader.available() >= cls.header_struct.size
- version, pdu_type, length = cls.header_struct.unpack(reader.buffer[:cls.header_struct.size])
- reader.check_version(version)
- if pdu_type not in cls.version_map[version]:
- raise UnsupportedPDUType(
- "Received unsupported PDU type %d" % pdu_type)
- if length < 8:
- raise CorruptData(
- "Received PDU with length %d, which is too short to be valid" % length)
- self = cls.version_map[version][pdu_type](version = version)
- return reader.update(need = length, callback = self.got_pdu)
+ def to_pdu(self, announce = None):
+ return NotImplementedError
+ @property
+ def default_version(self):
+ return max(self.version_map.iterkeys())
-class PDUWithSerial(PDU):
- """
- Base class for PDUs consisting of just a serial number and nonce.
- """
+ def check(self):
+ pass
- header_struct = struct.Struct("!BBHLL")
+ @classmethod
+ def read_pdu(cls, reader):
+ return reader.update(need = cls.header_struct.size, callback = cls.got_header)
- def __init__(self, version, serial = None, nonce = None):
- super(PDUWithSerial, self).__init__(version)
- if serial is not None:
- assert isinstance(serial, int)
- self.serial = serial
- if nonce is not None:
- assert isinstance(nonce, int)
- self.nonce = nonce
+ @classmethod
+ def got_header(cls, reader):
+ if not reader.ready():
+ return None
+ assert reader.available() >= cls.header_struct.size
+ version, pdu_type, length = cls.header_struct.unpack(reader.buffer[:cls.header_struct.size])
+ reader.check_version(version)
+ if pdu_type not in cls.version_map[version]:
+ raise UnsupportedPDUType(
+ "Received unsupported PDU type %d" % pdu_type)
+ if length < 8:
+ raise CorruptData(
+ "Received PDU with length %d, which is too short to be valid" % length)
+ self = cls.version_map[version][pdu_type](version = version)
+ return reader.update(need = length, callback = self.got_pdu)
- def __str__(self):
- return "[%s, serial #%d nonce %d]" % (self.__class__.__name__, self.serial, self.nonce)
- def to_pdu(self):
+class PDUWithSerial(PDU):
"""
- Generate the wire format PDU.
+ Base class for PDUs consisting of just a serial number and nonce.
"""
- if self._pdu is None:
- self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.nonce,
- self.header_struct.size, self.serial)
- return self._pdu
-
- def got_pdu(self, reader):
- if not reader.ready():
- return None
- b = reader.get(self.header_struct.size)
- version, pdu_type, self.nonce, length, self.serial = self.header_struct.unpack(b)
- assert version == self.version and pdu_type == self.pdu_type
- if length != 12:
- raise CorruptData("PDU length of %d can't be right" % length, pdu = self)
- assert b == self.to_pdu()
- return self
+ header_struct = struct.Struct("!BBHLL")
+
+ def __init__(self, version, serial = None, nonce = None):
+ super(PDUWithSerial, self).__init__(version)
+ if serial is not None:
+ assert isinstance(serial, int)
+ self.serial = serial
+ if nonce is not None:
+ assert isinstance(nonce, int)
+ self.nonce = nonce
+
+ def __str__(self):
+ return "[%s, serial #%d nonce %d]" % (self.__class__.__name__, self.serial, self.nonce)
+
+ def to_pdu(self, announce = None):
+ """
+ Generate the wire format PDU.
+ """
+
+ assert announce is None
+ if self._pdu is None:
+ self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.nonce,
+ self.header_struct.size, self.serial)
+ return self._pdu
+
+ def got_pdu(self, reader):
+ if not reader.ready():
+ return None
+ b = reader.get(self.header_struct.size)
+ version, pdu_type, self.nonce, length, self.serial = self.header_struct.unpack(b)
+ assert version == self.version and pdu_type == self.pdu_type
+ if length != 12:
+ raise CorruptData("PDU length of %d can't be right" % length, pdu = self)
+ assert b == self.to_pdu()
+ return self
class PDUWithNonce(PDU):
- """
- Base class for PDUs consisting of just a nonce.
- """
-
- header_struct = struct.Struct("!BBHL")
-
- def __init__(self, version, nonce = None):
- super(PDUWithNonce, self).__init__(version)
- if nonce is not None:
- assert isinstance(nonce, int)
- self.nonce = nonce
-
- def __str__(self):
- return "[%s, nonce %d]" % (self.__class__.__name__, self.nonce)
-
- def to_pdu(self):
"""
- Generate the wire format PDU.
+ Base class for PDUs consisting of just a nonce.
"""
- if self._pdu is None:
- self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.nonce, self.header_struct.size)
- return self._pdu
+ header_struct = struct.Struct("!BBHL")
- def got_pdu(self, reader):
- if not reader.ready():
- return None
- b = reader.get(self.header_struct.size)
- version, pdu_type, self.nonce, length = self.header_struct.unpack(b)
- assert version == self.version and pdu_type == self.pdu_type
- if length != 8:
- raise CorruptData("PDU length of %d can't be right" % length, pdu = self)
- assert b == self.to_pdu()
- return self
+ def __init__(self, version, nonce = None):
+ super(PDUWithNonce, self).__init__(version)
+ if nonce is not None:
+ assert isinstance(nonce, int)
+ self.nonce = nonce
+ def __str__(self):
+ return "[%s, nonce %d]" % (self.__class__.__name__, self.nonce)
-class PDUEmpty(PDU):
- """
- Base class for empty PDUs.
- """
+ def to_pdu(self, announce = None):
+ """
+ Generate the wire format PDU.
+ """
- header_struct = struct.Struct("!BBHL")
+ assert announce is None
+ if self._pdu is None:
+ self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.nonce, self.header_struct.size)
+ return self._pdu
- def __str__(self):
- return "[%s]" % self.__class__.__name__
+ def got_pdu(self, reader):
+ if not reader.ready():
+ return None
+ b = reader.get(self.header_struct.size)
+ version, pdu_type, self.nonce, length = self.header_struct.unpack(b)
+ assert version == self.version and pdu_type == self.pdu_type
+ if length != 8:
+ raise CorruptData("PDU length of %d can't be right" % length, pdu = self)
+ assert b == self.to_pdu()
+ return self
- def to_pdu(self):
+
+class PDUEmpty(PDU):
"""
- Generate the wire format PDU for this prefix.
+ Base class for empty PDUs.
"""
- if self._pdu is None:
- self._pdu = self.header_struct.pack(self.version, self.pdu_type, 0, self.header_struct.size)
- return self._pdu
-
- def got_pdu(self, reader):
- if not reader.ready():
- return None
- b = reader.get(self.header_struct.size)
- version, pdu_type, zero, length = self.header_struct.unpack(b)
- assert version == self.version and pdu_type == self.pdu_type
- if zero != 0:
- raise CorruptData("Must-be-zero field isn't zero" % length, pdu = self)
- if length != 8:
- raise CorruptData("PDU length of %d can't be right" % length, pdu = self)
- assert b == self.to_pdu()
- return self
+ header_struct = struct.Struct("!BBHL")
+
+ def __str__(self):
+ return "[%s]" % self.__class__.__name__
+
+ def to_pdu(self, announce = None):
+ """
+ Generate the wire format PDU for this prefix.
+ """
+
+ assert announce is None
+ if self._pdu is None:
+ self._pdu = self.header_struct.pack(self.version, self.pdu_type, 0, self.header_struct.size)
+ return self._pdu
+
+ def got_pdu(self, reader):
+ if not reader.ready():
+ return None
+ b = reader.get(self.header_struct.size)
+ version, pdu_type, zero, length = self.header_struct.unpack(b)
+ assert version == self.version and pdu_type == self.pdu_type
+ if zero != 0:
+ raise CorruptData("Must-be-zero field isn't zero" % length, pdu = self)
+ if length != 8:
+ raise CorruptData("PDU length of %d can't be right" % length, pdu = self)
+ assert b == self.to_pdu()
+ return self
@wire_pdu
class SerialNotifyPDU(PDUWithSerial):
- """
- Serial Notify PDU.
- """
+ """
+ Serial Notify PDU.
+ """
- pdu_type = 0
+ pdu_type = 0
@wire_pdu
class SerialQueryPDU(PDUWithSerial):
- """
- Serial Query PDU.
- """
+ """
+ Serial Query PDU.
+ """
- pdu_type = 1
+ pdu_type = 1
- def __init__(self, version, serial = None, nonce = None):
- super(SerialQueryPDU, self).__init__(self.default_version if version is None else version, serial, nonce)
+ def __init__(self, version, serial = None, nonce = None):
+ super(SerialQueryPDU, self).__init__(self.default_version if version is None else version, serial, nonce)
@wire_pdu
class ResetQueryPDU(PDUEmpty):
- """
- Reset Query PDU.
- """
+ """
+ Reset Query PDU.
+ """
- pdu_type = 2
+ pdu_type = 2
- def __init__(self, version):
- super(ResetQueryPDU, self).__init__(self.default_version if version is None else version)
+ def __init__(self, version):
+ super(ResetQueryPDU, self).__init__(self.default_version if version is None else version)
@wire_pdu
class CacheResponsePDU(PDUWithNonce):
- """
- Cache Response PDU.
- """
+ """
+ Cache Response PDU.
+ """
- pdu_type = 3
+ pdu_type = 3
def EndOfDataPDU(version, *args, **kwargs):
- """
- Factory for the EndOfDataPDU classes, which take different forms in
- different protocol versions.
- """
+ """
+ Factory for the EndOfDataPDU classes, which take different forms in
+ different protocol versions.
+ """
- if version == 0:
- return EndOfDataPDUv0(version, *args, **kwargs)
- if version == 1:
- return EndOfDataPDUv1(version, *args, **kwargs)
- raise NotImplementedError
+ if version == 0:
+ return EndOfDataPDUv0(version, *args, **kwargs)
+ if version == 1:
+ return EndOfDataPDUv1(version, *args, **kwargs)
+ raise NotImplementedError
# Min, max, and default values, from the current RFC 6810 bis I-D.
@@ -324,325 +332,345 @@ def EndOfDataPDU(version, *args, **kwargs):
default_refresh = 3600
def valid_refresh(refresh):
- if not isinstance(refresh, int) or refresh < 120 or refresh > 86400:
- raise ValueError
- return refresh
+ if not isinstance(refresh, int) or refresh < 120 or refresh > 86400:
+ raise ValueError
+ return refresh
default_retry = 600
def valid_retry(retry):
- if not isinstance(retry, int) or retry < 120 or retry > 7200:
- raise ValueError
- return retry
+ if not isinstance(retry, int) or retry < 120 or retry > 7200:
+ raise ValueError
+ return retry
default_expire = 7200
def valid_expire(expire):
- if not isinstance(expire, int) or expire < 600 or expire > 172800:
- raise ValueError
- return expire
+ if not isinstance(expire, int) or expire < 600 or expire > 172800:
+ raise ValueError
+ return expire
@wire_pdu_only(0)
class EndOfDataPDUv0(PDUWithSerial):
- """
- End of Data PDU, protocol version 0.
- """
+ """
+ End of Data PDU, protocol version 0.
+ """
- pdu_type = 7
+ pdu_type = 7
- def __init__(self, version, serial = None, nonce = None, refresh = None, retry = None, expire = None):
- super(EndOfDataPDUv0, self).__init__(version, serial, nonce)
- self.refresh = valid_refresh(default_refresh if refresh is None else refresh)
- self.retry = valid_retry( default_retry if retry is None else retry)
- self.expire = valid_expire( default_expire if expire is None else expire)
+ def __init__(self, version, serial = None, nonce = None, refresh = None, retry = None, expire = None):
+ super(EndOfDataPDUv0, self).__init__(version, serial, nonce)
+ self.refresh = valid_refresh(default_refresh if refresh is None else refresh)
+ self.retry = valid_retry( default_retry if retry is None else retry)
+ self.expire = valid_expire( default_expire if expire is None else expire)
@wire_pdu_only(1)
class EndOfDataPDUv1(EndOfDataPDUv0):
- """
- End of Data PDU, protocol version 1.
- """
-
- header_struct = struct.Struct("!BBHLLLLL")
-
- def __str__(self):
- return "[%s, serial #%d nonce %d refresh %d retry %d expire %d]" % (
- self.__class__.__name__, self.serial, self.nonce, self.refresh, self.retry, self.expire)
-
- def to_pdu(self):
"""
- Generate the wire format PDU.
+ End of Data PDU, protocol version 1.
"""
- if self._pdu is None:
- self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.nonce,
- self.header_struct.size, self.serial,
- self.refresh, self.retry, self.expire)
- return self._pdu
-
- def got_pdu(self, reader):
- if not reader.ready():
- return None
- b = reader.get(self.header_struct.size)
- version, pdu_type, self.nonce, length, self.serial, self.refresh, self.retry, self.expire \
- = self.header_struct.unpack(b)
- assert version == self.version and pdu_type == self.pdu_type
- if length != 24:
- raise CorruptData("PDU length of %d can't be right" % length, pdu = self)
- assert b == self.to_pdu()
- return self
+ header_struct = struct.Struct("!BBHLLLLL")
+
+ def __str__(self):
+ return "[%s, serial #%d nonce %d refresh %d retry %d expire %d]" % (
+ self.__class__.__name__, self.serial, self.nonce, self.refresh, self.retry, self.expire)
+
+ def to_pdu(self, announce = None):
+ """
+ Generate the wire format PDU.
+ """
+
+ assert announce is None
+ if self._pdu is None:
+ self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.nonce,
+ self.header_struct.size, self.serial,
+ self.refresh, self.retry, self.expire)
+ return self._pdu
+
+ def got_pdu(self, reader):
+ if not reader.ready():
+ return None
+ b = reader.get(self.header_struct.size)
+ version, pdu_type, self.nonce, length, self.serial, self.refresh, self.retry, self.expire \
+ = self.header_struct.unpack(b)
+ assert version == self.version and pdu_type == self.pdu_type
+ if length != 24:
+ raise CorruptData("PDU length of %d can't be right" % length, pdu = self)
+ assert b == self.to_pdu()
+ return self
@wire_pdu
class CacheResetPDU(PDUEmpty):
- """
- Cache reset PDU.
- """
+ """
+ Cache reset PDU.
+ """
- pdu_type = 8
+ pdu_type = 8
class PrefixPDU(PDU):
- """
- Object representing one prefix. This corresponds closely to one PDU
- in the rpki-router protocol, so closely that we use lexical ordering
- of the wire format of the PDU as the ordering for this class.
-
- This is a virtual class, but the .from_text() constructor
- instantiates the correct concrete subclass (IPv4PrefixPDU or
- IPv6PrefixPDU) depending on the syntax of its input text.
- """
-
- header_struct = struct.Struct("!BB2xLBBBx")
- asnum_struct = struct.Struct("!L")
-
- def __str__(self):
- plm = "%s/%s-%s" % (self.prefix, self.prefixlen, self.max_prefixlen)
- return "%s %8s %-32s %s" % ("+" if self.announce else "-", self.asn, plm,
- ":".join(("%02X" % ord(b) for b in self.to_pdu())))
-
- def show(self):
- logging.debug("# Class: %s", self.__class__.__name__)
- logging.debug("# ASN: %s", self.asn)
- logging.debug("# Prefix: %s", self.prefix)
- logging.debug("# Prefixlen: %s", self.prefixlen)
- logging.debug("# MaxPrefixlen: %s", self.max_prefixlen)
- logging.debug("# Announce: %s", self.announce)
-
- def check(self):
- """
- Check attributes to make sure they're within range.
- """
-
- if self.announce not in (0, 1):
- raise CorruptData("Announce value %d is neither zero nor one" % self.announce, pdu = self)
- if self.prefix.bits != self.address_byte_count * 8:
- raise CorruptData("IP address length %d does not match expectation" % self.prefix.bits, pdu = self)
- if self.prefixlen < 0 or self.prefixlen > self.prefix.bits:
- raise CorruptData("Implausible prefix length %d" % self.prefixlen, pdu = self)
- if self.max_prefixlen < self.prefixlen or self.max_prefixlen > self.prefix.bits:
- raise CorruptData("Implausible max prefix length %d" % self.max_prefixlen, pdu = self)
- pdulen = self.header_struct.size + self.prefix.bits/8 + self.asnum_struct.size
- if len(self.to_pdu()) != pdulen:
- raise CorruptData("Expected %d byte PDU, got %d" % (pdulen, len(self.to_pdu())), pdu = self)
-
- def to_pdu(self, announce = None):
- """
- Generate the wire format PDU for this prefix.
- """
-
- if announce is not None:
- assert announce in (0, 1)
- elif self._pdu is not None:
- return self._pdu
- pdulen = self.header_struct.size + self.prefix.bits/8 + self.asnum_struct.size
- pdu = (self.header_struct.pack(self.version, self.pdu_type, pdulen,
- announce if announce is not None else self.announce,
- self.prefixlen, self.max_prefixlen) +
- self.prefix.toBytes() +
- self.asnum_struct.pack(self.asn))
- if announce is None:
- assert self._pdu is None
- self._pdu = pdu
- return pdu
-
- def got_pdu(self, reader):
- if not reader.ready():
- return None
- b1 = reader.get(self.header_struct.size)
- b2 = reader.get(self.address_byte_count)
- b3 = reader.get(self.asnum_struct.size)
- version, pdu_type, length, self.announce, self.prefixlen, self.max_prefixlen = self.header_struct.unpack(b1)
- assert version == self.version and pdu_type == self.pdu_type
- if length != len(b1) + len(b2) + len(b3):
- raise CorruptData("Got PDU length %d, expected %d" % (length, len(b1) + len(b2) + len(b3)), pdu = self)
- self.prefix = rpki.POW.IPAddress.fromBytes(b2)
- self.asn = self.asnum_struct.unpack(b3)[0]
- assert b1 + b2 + b3 == self.to_pdu()
- return self
+ """
+ Object representing one prefix. This corresponds closely to one PDU
+ in the rpki-router protocol, so closely that we use lexical ordering
+ of the wire format of the PDU as the ordering for this class.
+
+ This is a virtual class, but the .from_text() constructor
+ instantiates the correct concrete subclass (IPv4PrefixPDU or
+ IPv6PrefixPDU) depending on the syntax of its input text.
+ """
+
+ header_struct = struct.Struct("!BB2xLBBBx")
+ asnum_struct = struct.Struct("!L")
+ address_byte_count = 0
+
+ def __init__(self, version):
+ super(PrefixPDU, self).__init__(version)
+ self.asn = None
+ self.prefix = None
+ self.prefixlen = None
+ self.max_prefixlen = None
+ self.announce = None
+
+ def __str__(self):
+ plm = "%s/%s-%s" % (self.prefix, self.prefixlen, self.max_prefixlen)
+ return "%s %8s %-32s %s" % ("+" if self.announce else "-", self.asn, plm,
+ ":".join(("%02X" % ord(b) for b in self.to_pdu())))
+
+ def show(self):
+ logging.debug("# Class: %s", self.__class__.__name__)
+ logging.debug("# ASN: %s", self.asn)
+ logging.debug("# Prefix: %s", self.prefix)
+ logging.debug("# Prefixlen: %s", self.prefixlen)
+ logging.debug("# MaxPrefixlen: %s", self.max_prefixlen)
+ logging.debug("# Announce: %s", self.announce)
+
+ def check(self):
+ """
+ Check attributes to make sure they're within range.
+ """
+
+ if self.announce not in (0, 1):
+ raise CorruptData("Announce value %d is neither zero nor one" % self.announce, pdu = self)
+ if self.prefix.bits != self.address_byte_count * 8:
+ raise CorruptData("IP address length %d does not match expectation" % self.prefix.bits, pdu = self)
+ if self.prefixlen < 0 or self.prefixlen > self.prefix.bits:
+ raise CorruptData("Implausible prefix length %d" % self.prefixlen, pdu = self)
+ if self.max_prefixlen < self.prefixlen or self.max_prefixlen > self.prefix.bits:
+ raise CorruptData("Implausible max prefix length %d" % self.max_prefixlen, pdu = self)
+ pdulen = self.header_struct.size + self.prefix.bits/8 + self.asnum_struct.size
+ if len(self.to_pdu()) != pdulen:
+ raise CorruptData("Expected %d byte PDU, got %d" % (pdulen, len(self.to_pdu())), pdu = self)
+
+ def to_pdu(self, announce = None):
+ """
+ Generate the wire format PDU for this prefix.
+ """
+
+ if announce is not None:
+ assert announce in (0, 1)
+ elif self._pdu is not None:
+ return self._pdu
+ pdulen = self.header_struct.size + self.prefix.bits/8 + self.asnum_struct.size
+ pdu = (self.header_struct.pack(self.version, self.pdu_type, pdulen,
+ announce if announce is not None else self.announce,
+ self.prefixlen, self.max_prefixlen) +
+ self.prefix.toBytes() +
+ self.asnum_struct.pack(self.asn))
+ if announce is None:
+ assert self._pdu is None
+ self._pdu = pdu
+ return pdu
+
+ def got_pdu(self, reader):
+ if not reader.ready():
+ return None
+ b1 = reader.get(self.header_struct.size)
+ b2 = reader.get(self.address_byte_count)
+ b3 = reader.get(self.asnum_struct.size)
+ version, pdu_type, length, self.announce, self.prefixlen, self.max_prefixlen = self.header_struct.unpack(b1)
+ assert version == self.version and pdu_type == self.pdu_type
+ if length != len(b1) + len(b2) + len(b3):
+ raise CorruptData("Got PDU length %d, expected %d" % (length, len(b1) + len(b2) + len(b3)), pdu = self)
+ self.prefix = rpki.POW.IPAddress.fromBytes(b2)
+ self.asn = self.asnum_struct.unpack(b3)[0]
+ assert b1 + b2 + b3 == self.to_pdu()
+ return self
@wire_pdu
class IPv4PrefixPDU(PrefixPDU):
- """
- IPv4 flavor of a prefix.
- """
+ """
+ IPv4 flavor of a prefix.
+ """
- pdu_type = 4
- address_byte_count = 4
+ pdu_type = 4
+ address_byte_count = 4
@wire_pdu
class IPv6PrefixPDU(PrefixPDU):
- """
- IPv6 flavor of a prefix.
- """
+ """
+ IPv6 flavor of a prefix.
+ """
- pdu_type = 6
- address_byte_count = 16
+ pdu_type = 6
+ address_byte_count = 16
@wire_pdu_only(1)
class RouterKeyPDU(PDU):
- """
- Router Key PDU.
- """
-
- pdu_type = 9
-
- header_struct = struct.Struct("!BBBxL20sL")
-
- def __str__(self):
- return "%s %8s %-32s %s" % ("+" if self.announce else "-", self.asn,
- base64.urlsafe_b64encode(self.ski).rstrip("="),
- ":".join(("%02X" % ord(b) for b in self.to_pdu())))
-
- def check(self):
- """
- Check attributes to make sure they're within range.
- """
-
- if self.announce not in (0, 1):
- raise CorruptData("Announce value %d is neither zero nor one" % self.announce, pdu = self)
- if len(self.ski) != 20:
- raise CorruptData("Implausible SKI length %d" % len(self.ski), pdu = self)
- pdulen = self.header_struct.size + len(self.key)
- if len(self.to_pdu()) != pdulen:
- raise CorruptData("Expected %d byte PDU, got %d" % (pdulen, len(self.to_pdu())), pdu = self)
-
- def to_pdu(self, announce = None):
- if announce is not None:
- assert announce in (0, 1)
- elif self._pdu is not None:
- return self._pdu
- pdulen = self.header_struct.size + len(self.key)
- pdu = (self.header_struct.pack(self.version,
- self.pdu_type,
- announce if announce is not None else self.announce,
- pdulen,
- self.ski,
- self.asn)
- + self.key)
- if announce is None:
- assert self._pdu is None
- self._pdu = pdu
- return pdu
-
- def got_pdu(self, reader):
- if not reader.ready():
- return None
- header = reader.get(self.header_struct.size)
- version, pdu_type, self.announce, length, self.ski, self.asn = self.header_struct.unpack(header)
- assert version == self.version and pdu_type == self.pdu_type
- remaining = length - self.header_struct.size
- if remaining <= 0:
- raise CorruptData("Got PDU length %d, minimum is %d" % (length, self.header_struct.size + 1), pdu = self)
- self.key = reader.get(remaining)
- assert header + self.key == self.to_pdu()
- return self
+ """
+ Router Key PDU.
+ """
+
+ pdu_type = 9
+
+ header_struct = struct.Struct("!BBBxL20sL")
+
+ def __init__(self, version):
+ super(RouterKeyPDU, self).__init__(version)
+ self.announce = None
+ self.ski = None
+ self.asn = None
+ self.key = None
+
+ def __str__(self):
+ return "%s %8s %-32s %s" % ("+" if self.announce else "-", self.asn,
+ base64.urlsafe_b64encode(self.ski).rstrip("="),
+ ":".join(("%02X" % ord(b) for b in self.to_pdu())))
+
+ def check(self):
+ """
+ Check attributes to make sure they're within range.
+ """
+
+ if self.announce not in (0, 1):
+ raise CorruptData("Announce value %d is neither zero nor one" % self.announce, pdu = self)
+ if len(self.ski) != 20:
+ raise CorruptData("Implausible SKI length %d" % len(self.ski), pdu = self)
+ pdulen = self.header_struct.size + len(self.key)
+ if len(self.to_pdu()) != pdulen:
+ raise CorruptData("Expected %d byte PDU, got %d" % (pdulen, len(self.to_pdu())), pdu = self)
+
+ def to_pdu(self, announce = None):
+ if announce is not None:
+ assert announce in (0, 1)
+ elif self._pdu is not None:
+ return self._pdu
+ pdulen = self.header_struct.size + len(self.key)
+ pdu = (self.header_struct.pack(self.version,
+ self.pdu_type,
+ announce if announce is not None else self.announce,
+ pdulen,
+ self.ski,
+ self.asn)
+ + self.key)
+ if announce is None:
+ assert self._pdu is None
+ self._pdu = pdu
+ return pdu
+
+ def got_pdu(self, reader):
+ if not reader.ready():
+ return None
+ header = reader.get(self.header_struct.size)
+ version, pdu_type, self.announce, length, self.ski, self.asn = self.header_struct.unpack(header)
+ assert version == self.version and pdu_type == self.pdu_type
+ remaining = length - self.header_struct.size
+ if remaining <= 0:
+ raise CorruptData("Got PDU length %d, minimum is %d" % (length, self.header_struct.size + 1), pdu = self)
+ self.key = reader.get(remaining)
+ assert header + self.key == self.to_pdu()
+ return self
@wire_pdu
class ErrorReportPDU(PDU):
- """
- Error Report PDU.
- """
-
- pdu_type = 10
-
- header_struct = struct.Struct("!BBHL")
- string_struct = struct.Struct("!L")
-
- errors = {
- 2 : "No Data Available" }
-
- fatal = {
- 0 : "Corrupt Data",
- 1 : "Internal Error",
- 3 : "Invalid Request",
- 4 : "Unsupported Protocol Version",
- 5 : "Unsupported PDU Type",
- 6 : "Withdrawal of Unknown Record",
- 7 : "Duplicate Announcement Received" }
-
- assert set(errors) & set(fatal) == set()
-
- errors.update(fatal)
-
- codes = dict((v, k) for k, v in errors.items())
-
- def __init__(self, version, errno = None, errpdu = None, errmsg = None):
- super(ErrorReportPDU, self).__init__(version)
- assert errno is None or errno in self.errors
- self.errno = errno
- self.errpdu = errpdu
- self.errmsg = errmsg if errmsg is not None or errno is None else self.errors[errno]
-
- def __str__(self):
- return "[%s, error #%s: %r]" % (self.__class__.__name__, self.errno, self.errmsg)
-
- def to_counted_string(self, s):
- return self.string_struct.pack(len(s)) + s
-
- def read_counted_string(self, reader, remaining):
- assert remaining >= self.string_struct.size
- n = self.string_struct.unpack(reader.get(self.string_struct.size))[0]
- assert remaining >= self.string_struct.size + n
- return n, reader.get(n), (remaining - self.string_struct.size - n)
-
- def to_pdu(self):
- """
- Generate the wire format PDU for this error report.
- """
-
- if self._pdu is None:
- assert isinstance(self.errno, int)
- assert not isinstance(self.errpdu, ErrorReportPDU)
- p = self.errpdu
- if p is None:
- p = ""
- elif isinstance(p, PDU):
- p = p.to_pdu()
- assert isinstance(p, str)
- pdulen = self.header_struct.size + self.string_struct.size * 2 + len(p) + len(self.errmsg)
- self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.errno, pdulen)
- self._pdu += self.to_counted_string(p)
- self._pdu += self.to_counted_string(self.errmsg.encode("utf8"))
- return self._pdu
-
- def got_pdu(self, reader):
- if not reader.ready():
- return None
- header = reader.get(self.header_struct.size)
- version, pdu_type, self.errno, length = self.header_struct.unpack(header)
- assert version == self.version and pdu_type == self.pdu_type
- remaining = length - self.header_struct.size
- self.pdulen, self.errpdu, remaining = self.read_counted_string(reader, remaining)
- self.errlen, self.errmsg, remaining = self.read_counted_string(reader, remaining)
- if length != self.header_struct.size + self.string_struct.size * 2 + self.pdulen + self.errlen:
- raise CorruptData("Got PDU length %d, expected %d" % (
- length, self.header_struct.size + self.string_struct.size * 2 + self.pdulen + self.errlen))
- assert (header
- + self.to_counted_string(self.errpdu)
- + self.to_counted_string(self.errmsg.encode("utf8"))
- == self.to_pdu())
- return self
+ """
+ Error Report PDU.
+ """
+
+ pdu_type = 10
+
+ header_struct = struct.Struct("!BBHL")
+ string_struct = struct.Struct("!L")
+
+ errors = {
+ 2 : "No Data Available" }
+
+ fatal = {
+ 0 : "Corrupt Data",
+ 1 : "Internal Error",
+ 3 : "Invalid Request",
+ 4 : "Unsupported Protocol Version",
+ 5 : "Unsupported PDU Type",
+ 6 : "Withdrawal of Unknown Record",
+ 7 : "Duplicate Announcement Received" }
+
+ assert set(errors) & set(fatal) == set()
+
+ errors.update(fatal)
+
+ codes = dict((v, k) for k, v in errors.items())
+
+ def __init__(self, version, errno = None, errpdu = None, errmsg = None):
+ super(ErrorReportPDU, self).__init__(version)
+ assert errno is None or errno in self.errors
+ self.errno = errno
+ self.errpdu = errpdu
+ self.errmsg = errmsg if errmsg is not None or errno is None else self.errors[errno]
+ self.pdulen = None
+ self.errlen = None
+
+ def __str__(self):
+ return "[%s, error #%s: %r]" % (self.__class__.__name__, self.errno, self.errmsg)
+
+ def to_counted_string(self, s):
+ return self.string_struct.pack(len(s)) + s
+
+ def read_counted_string(self, reader, remaining):
+ assert remaining >= self.string_struct.size
+ n = self.string_struct.unpack(reader.get(self.string_struct.size))[0]
+ assert remaining >= self.string_struct.size + n
+ return n, reader.get(n), (remaining - self.string_struct.size - n)
+
+ def to_pdu(self, announce = None):
+ """
+ Generate the wire format PDU for this error report.
+ """
+
+ assert announce is None
+ if self._pdu is None:
+ assert isinstance(self.errno, int)
+ assert not isinstance(self.errpdu, ErrorReportPDU)
+ p = self.errpdu
+ if p is None:
+ p = ""
+ elif isinstance(p, PDU):
+ p = p.to_pdu()
+ assert isinstance(p, str)
+ pdulen = self.header_struct.size + self.string_struct.size * 2 + len(p) + len(self.errmsg)
+ self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.errno, pdulen)
+ self._pdu += self.to_counted_string(p)
+ self._pdu += self.to_counted_string(self.errmsg.encode("utf8"))
+ return self._pdu
+
+ def got_pdu(self, reader):
+ if not reader.ready():
+ return None
+ header = reader.get(self.header_struct.size)
+ version, pdu_type, self.errno, length = self.header_struct.unpack(header)
+ assert version == self.version and pdu_type == self.pdu_type
+ remaining = length - self.header_struct.size
+ self.pdulen, self.errpdu, remaining = self.read_counted_string(reader, remaining)
+ self.errlen, self.errmsg, remaining = self.read_counted_string(reader, remaining)
+ if length != self.header_struct.size + self.string_struct.size * 2 + self.pdulen + self.errlen:
+ raise CorruptData("Got PDU length %d, expected %d" % (
+ length, self.header_struct.size + self.string_struct.size * 2 + self.pdulen + self.errlen))
+ assert (header
+ + self.to_counted_string(self.errpdu)
+ + self.to_counted_string(self.errmsg.encode("utf8"))
+ == self.to_pdu())
+ return self
diff --git a/rpki/rtr/server.py b/rpki/rtr/server.py
index 2ea3a040..c08320fc 100644
--- a/rpki/rtr/server.py
+++ b/rpki/rtr/server.py
@@ -44,37 +44,37 @@ kickme_base = os.path.join(kickme_dir, "kickme")
class PDU(rpki.rtr.pdus.PDU):
- """
- Generic server PDU.
- """
-
- def send_file(self, server, filename):
"""
- Send a content of a file as a cache response. Caller should catch IOError.
+ Generic server PDU.
"""
- fn2 = os.path.splitext(filename)[1]
- assert fn2.startswith(".v") and fn2[2:].isdigit() and int(fn2[2:]) == server.version
-
- f = open(filename, "rb")
- server.push_pdu(CacheResponsePDU(version = server.version,
- nonce = server.current_nonce))
- server.push_file(f)
- server.push_pdu(EndOfDataPDU(version = server.version,
- serial = server.current_serial,
- nonce = server.current_nonce,
- refresh = server.refresh,
- retry = server.retry,
- expire = server.expire))
-
- def send_nodata(self, server):
- """
- Send a nodata error.
- """
+ def send_file(self, server, filename):
+ """
+ Send a content of a file as a cache response. Caller should catch IOError.
+ """
+
+ fn2 = os.path.splitext(filename)[1]
+ assert fn2.startswith(".v") and fn2[2:].isdigit() and int(fn2[2:]) == server.version
- server.push_pdu(ErrorReportPDU(version = server.version,
- errno = ErrorReportPDU.codes["No Data Available"],
- errpdu = self))
+ f = open(filename, "rb")
+ server.push_pdu(CacheResponsePDU(version = server.version,
+ nonce = server.current_nonce))
+ server.push_file(f)
+ server.push_pdu(EndOfDataPDU(version = server.version,
+ serial = server.current_serial,
+ nonce = server.current_nonce,
+ refresh = server.refresh,
+ retry = server.retry,
+ expire = server.expire))
+
+ def send_nodata(self, server):
+ """
+ Send a nodata error.
+ """
+
+ server.push_pdu(ErrorReportPDU(version = server.version,
+ errno = ErrorReportPDU.codes["No Data Available"],
+ errpdu = self))
clone_pdu = clone_pdu_root(PDU)
@@ -82,513 +82,513 @@ clone_pdu = clone_pdu_root(PDU)
@clone_pdu
class SerialQueryPDU(PDU, rpki.rtr.pdus.SerialQueryPDU):
- """
- Serial Query PDU.
- """
-
- def serve(self, server):
- """
- Received a serial query, send incremental transfer in response.
- If client is already up to date, just send an empty incremental
- transfer.
"""
-
- server.logger.debug(self)
- if server.get_serial() is None:
- self.send_nodata(server)
- elif server.current_nonce != self.nonce:
- server.logger.info("[Client requested wrong nonce, resetting client]")
- server.push_pdu(CacheResetPDU(version = server.version))
- elif server.current_serial == self.serial:
- server.logger.debug("[Client is already current, sending empty IXFR]")
- server.push_pdu(CacheResponsePDU(version = server.version,
- nonce = server.current_nonce))
- server.push_pdu(EndOfDataPDU(version = server.version,
- serial = server.current_serial,
- nonce = server.current_nonce,
- refresh = server.refresh,
- retry = server.retry,
- expire = server.expire))
- elif disable_incrementals:
- server.push_pdu(CacheResetPDU(version = server.version))
- else:
- try:
- self.send_file(server, "%d.ix.%d.v%d" % (server.current_serial, self.serial, server.version))
- except IOError:
- server.push_pdu(CacheResetPDU(version = server.version))
+ Serial Query PDU.
+ """
+
+ def serve(self, server):
+ """
+ Received a serial query, send incremental transfer in response.
+ If client is already up to date, just send an empty incremental
+ transfer.
+ """
+
+ server.logger.debug(self)
+ if server.get_serial() is None:
+ self.send_nodata(server)
+ elif server.current_nonce != self.nonce:
+ server.logger.info("[Client requested wrong nonce, resetting client]")
+ server.push_pdu(CacheResetPDU(version = server.version))
+ elif server.current_serial == self.serial:
+ server.logger.debug("[Client is already current, sending empty IXFR]")
+ server.push_pdu(CacheResponsePDU(version = server.version,
+ nonce = server.current_nonce))
+ server.push_pdu(EndOfDataPDU(version = server.version,
+ serial = server.current_serial,
+ nonce = server.current_nonce,
+ refresh = server.refresh,
+ retry = server.retry,
+ expire = server.expire))
+ elif disable_incrementals:
+ server.push_pdu(CacheResetPDU(version = server.version))
+ else:
+ try:
+ self.send_file(server, "%d.ix.%d.v%d" % (server.current_serial, self.serial, server.version))
+ except IOError:
+ server.push_pdu(CacheResetPDU(version = server.version))
@clone_pdu
class ResetQueryPDU(PDU, rpki.rtr.pdus.ResetQueryPDU):
- """
- Reset Query PDU.
- """
-
- def serve(self, server):
"""
- Received a reset query, send full current state in response.
+ Reset Query PDU.
"""
- server.logger.debug(self)
- if server.get_serial() is None:
- self.send_nodata(server)
- else:
- try:
- fn = "%d.ax.v%d" % (server.current_serial, server.version)
- self.send_file(server, fn)
- except IOError:
- server.push_pdu(ErrorReportPDU(version = server.version,
- errno = ErrorReportPDU.codes["Internal Error"],
- errpdu = self,
- errmsg = "Couldn't open %s" % fn))
+ def serve(self, server):
+ """
+ Received a reset query, send full current state in response.
+ """
+
+ server.logger.debug(self)
+ if server.get_serial() is None:
+ self.send_nodata(server)
+ else:
+ try:
+ fn = "%d.ax.v%d" % (server.current_serial, server.version)
+ self.send_file(server, fn)
+ except IOError:
+ server.push_pdu(ErrorReportPDU(version = server.version,
+ errno = ErrorReportPDU.codes["Internal Error"],
+ errpdu = self,
+ errmsg = "Couldn't open %s" % fn))
@clone_pdu
class ErrorReportPDU(rpki.rtr.pdus.ErrorReportPDU):
- """
- Error Report PDU.
- """
-
- def serve(self, server):
"""
- Received an ErrorReportPDU from client. Not much we can do beyond
- logging it, then killing the connection if error was fatal.
+ Error Report PDU.
"""
- server.logger.error(self)
- if self.errno in self.fatal:
- server.logger.error("[Shutting down due to reported fatal protocol error]")
- sys.exit(1)
+ def serve(self, server):
+ """
+ Received an ErrorReportPDU from client. Not much we can do beyond
+ logging it, then killing the connection if error was fatal.
+ """
+
+ server.logger.error(self)
+ if self.errno in self.fatal:
+ server.logger.error("[Shutting down due to reported fatal protocol error]")
+ sys.exit(1)
def read_current(version):
- """
- Read current serial number and nonce. Return None for both if
- serial and nonce not recorded. For backwards compatibility, treat
- file containing just a serial number as having a nonce of zero.
- """
-
- if version is None:
- return None, None
- try:
- with open("current.v%d" % version, "r") as f:
- values = tuple(int(s) for s in f.read().split())
- return values[0], values[1]
- except IndexError:
- return values[0], 0
- except IOError:
- return None, None
+ """
+ Read current serial number and nonce. Return None for both if
+ serial and nonce not recorded. For backwards compatibility, treat
+ file containing just a serial number as having a nonce of zero.
+ """
+
+ if version is None:
+ return None, None
+ try:
+ with open("current.v%d" % version, "r") as f:
+ values = tuple(int(s) for s in f.read().split())
+ return values[0], values[1]
+ except IndexError:
+ return values[0], 0
+ except IOError:
+ return None, None
def write_current(serial, nonce, version):
- """
- Write serial number and nonce.
- """
+ """
+ Write serial number and nonce.
+ """
- curfn = "current.v%d" % version
- tmpfn = curfn + "%d.tmp" % os.getpid()
- with open(tmpfn, "w") as f:
- f.write("%d %d\n" % (serial, nonce))
- os.rename(tmpfn, curfn)
+ curfn = "current.v%d" % version
+ tmpfn = curfn + "%d.tmp" % os.getpid()
+ with open(tmpfn, "w") as f:
+ f.write("%d %d\n" % (serial, nonce))
+ os.rename(tmpfn, curfn)
class FileProducer(object):
- """
- File-based producer object for asynchat.
- """
+ """
+ File-based producer object for asynchat.
+ """
- def __init__(self, handle, buffersize):
- self.handle = handle
- self.buffersize = buffersize
+ def __init__(self, handle, buffersize):
+ self.handle = handle
+ self.buffersize = buffersize
- def more(self):
- return self.handle.read(self.buffersize)
+ def more(self):
+ return self.handle.read(self.buffersize)
class ServerWriteChannel(rpki.rtr.channels.PDUChannel):
- """
- Kludge to deal with ssh's habit of sometimes (compile time option)
- invoking us with two unidirectional pipes instead of one
- bidirectional socketpair. All the server logic is in the
- ServerChannel class, this class just deals with sending the
- server's output to a different file descriptor.
- """
-
- def __init__(self):
"""
- Set up stdout.
+ Kludge to deal with ssh's habit of sometimes (compile time option)
+ invoking us with two unidirectional pipes instead of one
+ bidirectional socketpair. All the server logic is in the
+ ServerChannel class, this class just deals with sending the
+ server's output to a different file descriptor.
"""
- super(ServerWriteChannel, self).__init__(root_pdu_class = PDU)
- self.init_file_dispatcher(sys.stdout.fileno())
+ def __init__(self):
+ """
+ Set up stdout.
+ """
- def readable(self):
- """
- This channel is never readable.
- """
+ super(ServerWriteChannel, self).__init__(root_pdu_class = PDU)
+ self.init_file_dispatcher(sys.stdout.fileno())
- return False
+ def readable(self):
+ """
+ This channel is never readable.
+ """
- def push_file(self, f):
- """
- Write content of a file to stream.
- """
+ return False
- try:
- self.push_with_producer(FileProducer(f, self.ac_out_buffer_size))
- except OSError, e:
- if e.errno != errno.EAGAIN:
- raise
+ def push_file(self, f):
+ """
+ Write content of a file to stream.
+ """
+ try:
+ self.push_with_producer(FileProducer(f, self.ac_out_buffer_size))
+ except OSError, e:
+ if e.errno != errno.EAGAIN:
+ raise
-class ServerChannel(rpki.rtr.channels.PDUChannel):
- """
- Server protocol engine, handles upcalls from PDUChannel to
- implement protocol logic.
- """
- def __init__(self, logger, refresh, retry, expire):
+class ServerChannel(rpki.rtr.channels.PDUChannel):
"""
- Set up stdin and stdout as connection and start listening for
- first PDU.
+ Server protocol engine, handles upcalls from PDUChannel to
+ implement protocol logic.
"""
- super(ServerChannel, self).__init__(root_pdu_class = PDU)
- self.init_file_dispatcher(sys.stdin.fileno())
- self.writer = ServerWriteChannel()
- self.logger = logger
- self.refresh = refresh
- self.retry = retry
- self.expire = expire
- self.get_serial()
- self.start_new_pdu()
-
- def writable(self):
- """
- This channel is never writable.
- """
+ def __init__(self, logger, refresh, retry, expire):
+ """
+ Set up stdin and stdout as connection and start listening for
+ first PDU.
+ """
- return False
+ super(ServerChannel, self).__init__(root_pdu_class = PDU)
+ self.init_file_dispatcher(sys.stdin.fileno())
+ self.writer = ServerWriteChannel()
+ self.logger = logger
+ self.refresh = refresh
+ self.retry = retry
+ self.expire = expire
+ self.get_serial()
+ self.start_new_pdu()
- def push(self, data):
- """
- Redirect to writer channel.
- """
+ def writable(self):
+ """
+ This channel is never writable.
+ """
- return self.writer.push(data)
+ return False
- def push_with_producer(self, producer):
- """
- Redirect to writer channel.
- """
+ def push(self, data):
+ """
+ Redirect to writer channel.
+ """
- return self.writer.push_with_producer(producer)
+ return self.writer.push(data)
- def push_pdu(self, pdu):
- """
- Redirect to writer channel.
- """
+ def push_with_producer(self, producer):
+ """
+ Redirect to writer channel.
+ """
- return self.writer.push_pdu(pdu)
+ return self.writer.push_with_producer(producer)
- def push_file(self, f):
- """
- Redirect to writer channel.
- """
+ def push_pdu(self, pdu):
+ """
+ Redirect to writer channel.
+ """
- return self.writer.push_file(f)
+ return self.writer.push_pdu(pdu)
- def deliver_pdu(self, pdu):
- """
- Handle received PDU.
- """
+ def push_file(self, f):
+ """
+ Redirect to writer channel.
+ """
- pdu.serve(self)
+ return self.writer.push_file(f)
- def get_serial(self):
- """
- Read, cache, and return current serial number, or None if we can't
- find the serial number file. The latter condition should never
- happen, but maybe we got started in server mode while the cronjob
- mode instance is still building its database.
- """
+ def deliver_pdu(self, pdu):
+ """
+ Handle received PDU.
+ """
- self.current_serial, self.current_nonce = read_current(self.version)
- return self.current_serial
+ pdu.serve(self)
- def check_serial(self):
- """
- Check for a new serial number.
- """
+ def get_serial(self):
+ """
+ Read, cache, and return current serial number, or None if we can't
+ find the serial number file. The latter condition should never
+ happen, but maybe we got started in server mode while the cronjob
+ mode instance is still building its database.
+ """
- old_serial = self.current_serial
- return old_serial != self.get_serial()
+ self.current_serial, self.current_nonce = read_current(self.version)
+ return self.current_serial
- def notify(self, data = None, force = False):
- """
- Cronjob instance kicked us: check whether our serial number has
- changed, and send a notify message if so.
+ def check_serial(self):
+ """
+ Check for a new serial number.
+ """
- We have to check rather than just blindly notifying when kicked
- because the cronjob instance has no good way of knowing which
- protocol version we're running, thus has no good way of knowing
- whether we care about a particular change set or not.
- """
+ old_serial = self.current_serial
+ return old_serial != self.get_serial()
- if force or self.check_serial():
- self.push_pdu(SerialNotifyPDU(version = self.version,
- serial = self.current_serial,
- nonce = self.current_nonce))
- else:
- self.logger.debug("Cronjob kicked me but I see no serial change, ignoring")
+ def notify(self, data = None, force = False):
+ """
+ Cronjob instance kicked us: check whether our serial number has
+ changed, and send a notify message if so.
+
+ We have to check rather than just blindly notifying when kicked
+ because the cronjob instance has no good way of knowing which
+ protocol version we're running, thus has no good way of knowing
+ whether we care about a particular change set or not.
+ """
+
+ if force or self.check_serial():
+ self.push_pdu(SerialNotifyPDU(version = self.version,
+ serial = self.current_serial,
+ nonce = self.current_nonce))
+ else:
+ self.logger.debug("Cronjob kicked me but I see no serial change, ignoring")
class KickmeChannel(asyncore.dispatcher, object):
- """
- asyncore dispatcher for the PF_UNIX socket that cronjob mode uses to
- kick servers when it's time to send notify PDUs to clients.
- """
-
- def __init__(self, server):
- asyncore.dispatcher.__init__(self) # Old-style class
- self.server = server
- self.sockname = "%s.%d" % (kickme_base, os.getpid())
- self.create_socket(socket.AF_UNIX, socket.SOCK_DGRAM)
- try:
- self.bind(self.sockname)
- os.chmod(self.sockname, 0660)
- except socket.error, e:
- self.server.logger.exception("Couldn't bind() kickme socket: %r", e)
- self.close()
- except OSError, e:
- self.server.logger.exception("Couldn't chmod() kickme socket: %r", e)
-
- def writable(self):
"""
- This socket is read-only, never writable.
+ asyncore dispatcher for the PF_UNIX socket that cronjob mode uses to
+ kick servers when it's time to send notify PDUs to clients.
"""
- return False
+ def __init__(self, server):
+ asyncore.dispatcher.__init__(self) # Old-style class
+ self.server = server
+ self.sockname = "%s.%d" % (kickme_base, os.getpid())
+ self.create_socket(socket.AF_UNIX, socket.SOCK_DGRAM)
+ try:
+ self.bind(self.sockname)
+ os.chmod(self.sockname, 0660)
+ except socket.error, e:
+ self.server.logger.exception("Couldn't bind() kickme socket: %r", e)
+ self.close()
+ except OSError, e:
+ self.server.logger.exception("Couldn't chmod() kickme socket: %r", e)
+
+ def writable(self):
+ """
+ This socket is read-only, never writable.
+ """
+
+ return False
+
+ def handle_connect(self):
+ """
+ Ignore connect events (not very useful on datagram socket).
+ """
+
+ pass
+
+ def handle_read(self):
+ """
+ Handle receipt of a datagram.
+ """
+
+ data = self.recv(512)
+ self.server.notify(data)
+
+ def cleanup(self):
+ """
+ Clean up this dispatcher's socket.
+ """
+
+ self.close()
+ try:
+ os.unlink(self.sockname)
+ except:
+ pass
- def handle_connect(self):
- """
- Ignore connect events (not very useful on datagram socket).
- """
+ def log(self, msg):
+ """
+ Intercept asyncore's logging.
+ """
- pass
+ self.server.logger.info(msg)
- def handle_read(self):
- """
- Handle receipt of a datagram.
- """
+ def log_info(self, msg, tag = "info"):
+ """
+ Intercept asyncore's logging.
+ """
- data = self.recv(512)
- self.server.notify(data)
+ self.server.logger.info("asyncore: %s: %s", tag, msg)
- def cleanup(self):
- """
- Clean up this dispatcher's socket.
- """
+ def handle_error(self):
+ """
+ Handle errors caught by asyncore main loop.
+ """
+
+ self.server.logger.exception("[Unhandled exception]")
+ self.server.logger.critical("[Exiting after unhandled exception]")
+ sys.exit(1)
- self.close()
- try:
- os.unlink(self.sockname)
- except: # pylint: disable=W0702
- pass
- def log(self, msg):
+def hostport_tag():
"""
- Intercept asyncore's logging.
+ Construct hostname/address + port when we're running under a
+ protocol we understand well enough to do that. This is all
+ kludgery. Just grit your teeth, or perhaps just close your eyes.
"""
- self.server.logger.info(msg)
+ proto = None
- def log_info(self, msg, tag = "info"):
- """
- Intercept asyncore's logging.
- """
+ if proto is None:
+ try:
+ host, port = socket.fromfd(0, socket.AF_INET, socket.SOCK_STREAM).getpeername()
+ proto = "tcp"
+ except:
+ pass
+
+ if proto is None:
+ try:
+ host, port = socket.fromfd(0, socket.AF_INET6, socket.SOCK_STREAM).getpeername()[0:2]
+ proto = "tcp"
+ except:
+ pass
+
+ if proto is None:
+ try:
+ host, port = os.environ["SSH_CONNECTION"].split()[0:2]
+ proto = "ssh"
+ except:
+ pass
+
+ if proto is None:
+ try:
+ host, port = os.environ["REMOTE_HOST"], os.getenv("REMOTE_PORT")
+ proto = "ssl"
+ except:
+ pass
+
+ if proto is None:
+ return ""
+ elif not port:
+ return "/%s/%s" % (proto, host)
+ elif ":" in host:
+ return "/%s/%s.%s" % (proto, host, port)
+ else:
+ return "/%s/%s:%s" % (proto, host, port)
- self.server.logger.info("asyncore: %s: %s", tag, msg)
- def handle_error(self):
+def server_main(args):
"""
- Handle errors caught by asyncore main loop.
+ Implement the server side of the rpkk-router protocol. Other than
+ one PF_UNIX socket inode, this doesn't write anything to disk, so it
+ can be run with minimal privileges. Most of the work has already
+ been done by the database generator, so all this server has to do is
+ pass the results along to a client.
"""
- self.server.logger.exception("[Unhandled exception]")
- self.server.logger.critical("[Exiting after unhandled exception]")
- sys.exit(1)
-
+ logger = logging.LoggerAdapter(logging.root, dict(connection = hostport_tag()))
-def _hostport_tag():
- """
- Construct hostname/address + port when we're running under a
- protocol we understand well enough to do that. This is all
- kludgery. Just grit your teeth, or perhaps just close your eyes.
- """
+ logger.debug("[Starting]")
- proto = None
+ if args.rpki_rtr_dir:
+ try:
+ os.chdir(args.rpki_rtr_dir)
+ except OSError, e:
+ logger.error("[Couldn't chdir(%r), exiting: %s]", args.rpki_rtr_dir, e)
+ sys.exit(1)
- if proto is None:
+ kickme = None
try:
- host, port = socket.fromfd(0, socket.AF_INET, socket.SOCK_STREAM).getpeername()
- proto = "tcp"
- except: # pylint: disable=W0702
- pass
+ server = rpki.rtr.server.ServerChannel(logger = logger, refresh = args.refresh, retry = args.retry, expire = args.expire)
+ kickme = rpki.rtr.server.KickmeChannel(server = server)
+ asyncore.loop(timeout = None)
+ signal.signal(signal.SIGINT, signal.SIG_IGN) # Theorized race condition
+ except KeyboardInterrupt:
+ sys.exit(0)
+ finally:
+ signal.signal(signal.SIGINT, signal.SIG_IGN) # Observed race condition
+ if kickme is not None:
+ kickme.cleanup()
- if proto is None:
- try:
- host, port = socket.fromfd(0, socket.AF_INET6, socket.SOCK_STREAM).getpeername()[0:2]
- proto = "tcp"
- except: # pylint: disable=W0702
- pass
- if proto is None:
- try:
- host, port = os.environ["SSH_CONNECTION"].split()[0:2]
- proto = "ssh"
- except: # pylint: disable=W0702
- pass
+def listener_main(args):
+ """
+ Totally insecure TCP listener for rpki-rtr protocol. We only
+ implement this because it's all that the routers currently support.
+ In theory, we will all be running TCP-AO in the future, at which
+ point this listener will go away or become a TCP-AO listener.
+ """
- if proto is None:
- try:
- host, port = os.environ["REMOTE_HOST"], os.getenv("REMOTE_PORT")
- proto = "ssl"
- except: # pylint: disable=W0702
- pass
+ # Perhaps we should daemonize? Deal with that later.
- if proto is None:
- return ""
- elif not port:
- return "/%s/%s" % (proto, host)
- elif ":" in host:
- return "/%s/%s.%s" % (proto, host, port)
- else:
- return "/%s/%s:%s" % (proto, host, port)
+ # server_main() handles args.rpki_rtr_dir.
+ listener = None
+ try:
+ listener = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+ listener.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
+ except:
+ if listener is not None:
+ listener.close()
+ listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ try:
+ listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+ except AttributeError:
+ pass
+ listener.bind(("", args.port))
+ listener.listen(5)
+ logging.debug("[Listening on port %s]", args.port)
+ while True:
+ try:
+ s, ai = listener.accept()
+ except KeyboardInterrupt:
+ sys.exit(0)
+ logging.debug("[Received connection from %r]", ai)
+ pid = os.fork()
+ if pid == 0:
+ os.dup2(s.fileno(), 0) # pylint: disable=E1101
+ os.dup2(s.fileno(), 1) # pylint: disable=E1101
+ s.close()
+ #os.closerange(3, os.sysconf("SC_OPEN_MAX"))
+ server_main(args)
+ sys.exit()
+ else:
+ logging.debug("[Spawned server %d]", pid)
+ while True:
+ try:
+ pid, status = os.waitpid(0, os.WNOHANG)
+ if pid:
+ logging.debug("[Server %s exited with status 0x%x]", pid, status)
+ continue
+ except:
+ pass
+ break
-def server_main(args):
- """
- Implement the server side of the rpkk-router protocol. Other than
- one PF_UNIX socket inode, this doesn't write anything to disk, so it
- can be run with minimal privileges. Most of the work has already
- been done by the database generator, so all this server has to do is
- pass the results along to a client.
- """
- logger = logging.LoggerAdapter(logging.root, dict(connection = _hostport_tag()))
+def argparse_setup(subparsers):
+ """
+ Set up argparse stuff for commands in this module.
+ """
- logger.debug("[Starting]")
+ # These could have been lambdas, but doing it this way results in
+ # more useful error messages on argparse failures.
- if args.rpki_rtr_dir:
- try:
- os.chdir(args.rpki_rtr_dir)
- except OSError, e:
- logger.error("[Couldn't chdir(%r), exiting: %s]", args.rpki_rtr_dir, e)
- sys.exit(1)
-
- kickme = None
- try:
- server = rpki.rtr.server.ServerChannel(logger = logger, refresh = args.refresh, retry = args.retry, expire = args.expire)
- kickme = rpki.rtr.server.KickmeChannel(server = server)
- asyncore.loop(timeout = None)
- signal.signal(signal.SIGINT, signal.SIG_IGN) # Theorized race condition
- except KeyboardInterrupt:
- sys.exit(0)
- finally:
- signal.signal(signal.SIGINT, signal.SIG_IGN) # Observed race condition
- if kickme is not None:
- kickme.cleanup()
+ def refresh(v):
+ return rpki.rtr.pdus.valid_refresh(int(v))
+ def retry(v):
+ return rpki.rtr.pdus.valid_retry(int(v))
-def listener_main(args):
- """
- Totally insecure TCP listener for rpki-rtr protocol. We only
- implement this because it's all that the routers currently support.
- In theory, we will all be running TCP-AO in the future, at which
- point this listener will go away or become a TCP-AO listener.
- """
-
- # Perhaps we should daemonize? Deal with that later.
-
- # server_main() handles args.rpki_rtr_dir.
-
- listener = None
- try:
- listener = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
- listener.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
- except: # pylint: disable=W0702
- if listener is not None:
- listener.close()
- listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- try:
- listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
- except AttributeError:
- pass
- listener.bind(("", args.port))
- listener.listen(5)
- logging.debug("[Listening on port %s]", args.port)
- while True:
- try:
- s, ai = listener.accept()
- except KeyboardInterrupt:
- sys.exit(0)
- logging.debug("[Received connection from %r]", ai)
- pid = os.fork()
- if pid == 0:
- os.dup2(s.fileno(), 0) # pylint: disable=E1103
- os.dup2(s.fileno(), 1) # pylint: disable=E1103
- s.close()
- #os.closerange(3, os.sysconf("SC_OPEN_MAX"))
- server_main(args)
- sys.exit()
- else:
- logging.debug("[Spawned server %d]", pid)
- while True:
- try:
- pid, status = os.waitpid(0, os.WNOHANG) # pylint: disable=W0612
- if pid:
- logging.debug("[Server %s exited]", pid)
- continue
- except: # pylint: disable=W0702
- pass
- break
+ def expire(v):
+ return rpki.rtr.pdus.valid_expire(int(v))
+ # Some duplication of arguments here, not enough to be worth huge
+ # effort to clean up, worry about it later in any case.
-def argparse_setup(subparsers):
- """
- Set up argparse stuff for commands in this module.
- """
-
- # These could have been lambdas, but doing it this way results in
- # more useful error messages on argparse failures.
-
- def refresh(v):
- return rpki.rtr.pdus.valid_refresh(int(v))
-
- def retry(v):
- return rpki.rtr.pdus.valid_retry(int(v))
-
- def expire(v):
- return rpki.rtr.pdus.valid_expire(int(v))
-
- # Some duplication of arguments here, not enough to be worth huge
- # effort to clean up, worry about it later in any case.
-
- subparser = subparsers.add_parser("server", description = server_main.__doc__,
- help = "RPKI-RTR protocol server")
- subparser.set_defaults(func = server_main, default_log_to = "syslog")
- subparser.add_argument("--refresh", type = refresh, help = "override default refresh timer")
- subparser.add_argument("--retry", type = retry, help = "override default retry timer")
- subparser.add_argument("--expire", type = expire, help = "override default expire timer")
- subparser.add_argument("rpki_rtr_dir", nargs = "?", help = "directory containing RPKI-RTR database")
-
- subparser = subparsers.add_parser("listener", description = listener_main.__doc__,
- help = "TCP listener for RPKI-RTR protocol server")
- subparser.set_defaults(func = listener_main, default_log_to = "syslog")
- subparser.add_argument("--refresh", type = refresh, help = "override default refresh timer")
- subparser.add_argument("--retry", type = retry, help = "override default retry timer")
- subparser.add_argument("--expire", type = expire, help = "override default expire timer")
- subparser.add_argument("port", type = int, help = "TCP port on which to listen")
- subparser.add_argument("rpki_rtr_dir", nargs = "?", help = "directory containing RPKI-RTR database")
+ subparser = subparsers.add_parser("server", description = server_main.__doc__,
+ help = "RPKI-RTR protocol server")
+ subparser.set_defaults(func = server_main, default_log_destination = "syslog")
+ subparser.add_argument("--refresh", type = refresh, help = "override default refresh timer")
+ subparser.add_argument("--retry", type = retry, help = "override default retry timer")
+ subparser.add_argument("--expire", type = expire, help = "override default expire timer")
+ subparser.add_argument("rpki_rtr_dir", nargs = "?", help = "directory containing RPKI-RTR database")
+
+ subparser = subparsers.add_parser("listener", description = listener_main.__doc__,
+ help = "TCP listener for RPKI-RTR protocol server")
+ subparser.set_defaults(func = listener_main, default_log_destination = "syslog")
+ subparser.add_argument("--refresh", type = refresh, help = "override default refresh timer")
+ subparser.add_argument("--retry", type = retry, help = "override default retry timer")
+ subparser.add_argument("--expire", type = expire, help = "override default expire timer")
+ subparser.add_argument("port", type = int, help = "TCP port on which to listen")
+ subparser.add_argument("rpki_rtr_dir", nargs = "?", help = "directory containing RPKI-RTR database")
diff --git a/rpki/sql.py b/rpki/sql.py
deleted file mode 100644
index 96c8d086..00000000
--- a/rpki/sql.py
+++ /dev/null
@@ -1,437 +0,0 @@
-# $Id$
-#
-# Copyright (C) 2009-2013 Internet Systems Consortium ("ISC")
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-#
-# Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-"""
-SQL interface code.
-"""
-
-import logging
-import weakref
-
-from rpki.mysql_import import (MySQLdb, _mysql_exceptions)
-
-import rpki.x509
-import rpki.resource_set
-import rpki.sundial
-import rpki.log
-
-logger = logging.getLogger(__name__)
-
-class session(object):
- """
- SQL session layer.
- """
-
- ## @var ping_threshold
- # Timeout after which we should issue a ping command before the real
- # one. Intent is to keep the MySQL connection alive without pinging
- # before every single command.
-
- ping_threshold = rpki.sundial.timedelta(seconds = 60)
-
- def __init__(self, cfg):
-
- self.username = cfg.get("sql-username")
- self.database = cfg.get("sql-database")
- self.password = cfg.get("sql-password")
-
- self.conv = MySQLdb.converters.conversions.copy()
- self.conv.update({
- rpki.sundial.datetime : MySQLdb.converters.DateTime2literal,
- MySQLdb.converters.FIELD_TYPE.DATETIME : rpki.sundial.datetime.DateTime_or_None })
-
- self.cache = weakref.WeakValueDictionary()
- self.dirty = set()
-
- self.connect()
-
- def connect(self):
- self.db = MySQLdb.connect(user = self.username,
- db = self.database,
- passwd = self.password,
- conv = self.conv)
- self.cur = self.db.cursor()
- self.db.autocommit(True)
- self.timestamp = rpki.sundial.now()
-
- # Try this as a workaround for MySQL 5.6 UTF8 characterset
- # braindamage, in which MySQL starts rejecting ASN.1 DER because
- # it's not valid UTF-8. Twits.
- #
- # Except that it breaks MySQL 5.5, so wrap it and ignore errors. Twits ** 2.
- try:
- self.execute("charset = latin1")
- except:
- logger.info("Whacking charset to Latin1 to save MySQL 5.6 from its own confusion failed, blundering onwards")
-
- def close(self):
- if self.cur:
- self.cur.close()
- self.cur = None
- if self.db:
- self.db.close()
- self.db = None
-
- def _wrap_execute(self, func, query, args):
- try:
- now = rpki.sundial.now()
- if now > self.timestamp + self.ping_threshold:
- self.db.ping(True)
- self.timestamp = now
- return func(query, args)
- except _mysql_exceptions.MySQLError:
- if self.dirty:
- logger.warning("MySQL exception with dirty objects in SQL cache!")
- raise
-
- def execute(self, query, args = None):
- return self._wrap_execute(self.cur.execute, query, args)
-
- def executemany(self, query, args):
- return self._wrap_execute(self.cur.executemany, query, args)
-
- def fetchall(self):
- return self.cur.fetchall()
-
- def lastrowid(self):
- return self.cur.lastrowid
-
- def cache_clear(self):
- """
- Clear the SQL object cache. Shouldn't be necessary now that the
- cache uses weak references, but should be harmless.
- """
- logger.debug("Clearing SQL cache")
- self.assert_pristine()
- self.cache.clear()
-
- def assert_pristine(self):
- """
- Assert that there are no dirty objects in the cache.
- """
- assert not self.dirty, "Dirty objects in SQL cache: %s" % self.dirty
-
- def sweep(self):
- """
- Write any dirty objects out to SQL.
- """
- for s in self.dirty.copy():
- #if s.sql_cache_debug:
- logger.debug("Sweeping (%s) %r", "deleting" if s.sql_deleted else "storing", s)
- if s.sql_deleted:
- s.sql_delete()
- else:
- s.sql_store()
- self.assert_pristine()
-
-class template(object):
- """
- SQL template generator.
- """
-
- def __init__(self, table_name, index_column, *data_columns):
- """
- Build a SQL template.
- """
- type_map = dict((x[0], x[1]) for x in data_columns if isinstance(x, tuple))
- data_columns = tuple(isinstance(x, tuple) and x[0] or x for x in data_columns)
- columns = (index_column,) + data_columns
- self.table = table_name
- self.index = index_column
- self.columns = columns
- self.map = type_map
- self.select = "SELECT %s FROM %s" % (", ".join("%s.%s" % (table_name, c) for c in columns), table_name)
- self.insert = "INSERT %s (%s) VALUES (%s)" % (table_name,
- ", ".join(data_columns),
- ", ".join("%(" + s + ")s" for s in data_columns))
- self.update = "UPDATE %s SET %s WHERE %s = %%(%s)s" % (table_name,
- ", ".join(s + " = %(" + s + ")s" for s in data_columns),
- index_column,
- index_column)
- self.delete = "DELETE FROM %s WHERE %s = %%s" % (table_name, index_column)
-
-class sql_persistent(object):
- """
- Mixin for persistent class that needs to be stored in SQL.
- """
-
- ## @var sql_in_db
- # Whether this object is already in SQL or not.
-
- sql_in_db = False
-
- ## @var sql_deleted
- # Whether our cached copy of this object has been deleted.
-
- sql_deleted = False
-
- ## @var sql_debug
- # Enable logging of SQL actions
-
- sql_debug = False
-
- ## @var sql_cache_debug
- # Enable debugging of SQL cache actions
-
- sql_cache_debug = False
-
- @classmethod
- def sql_fetch(cls, gctx, id): # pylint: disable=W0622
- """
- Fetch one object from SQL, based on its primary key.
-
- Since in this one case we know that the primary index is also the
- cache key, we check for a cache hit directly in the hope of
- bypassing the SQL lookup entirely.
-
- This method is usually called via a one-line class-specific
- wrapper. As a convenience, we also accept an id of None, and just
- return None in this case.
- """
-
- if id is None:
- return None
- assert isinstance(id, (int, long)), "id should be an integer, was %r" % type(id)
- key = (cls, id)
- if key in gctx.sql.cache:
- return gctx.sql.cache[key]
- else:
- return cls.sql_fetch_where1(gctx, "%s = %%s" % cls.sql_template.index, (id,))
-
- @classmethod
- def sql_fetch_where1(cls, gctx, where, args = None, also_from = None):
- """
- Fetch one object from SQL, based on an arbitrary SQL WHERE expression.
- """
- results = cls.sql_fetch_where(gctx, where, args, also_from)
- if len(results) == 0:
- return None
- elif len(results) == 1:
- return results[0]
- else:
- raise rpki.exceptions.DBConsistancyError(
- "Database contained multiple matches for %s where %s: %r" %
- (cls.__name__, where % tuple(repr(a) for a in args), results))
-
- @classmethod
- def sql_fetch_all(cls, gctx):
- """
- Fetch all objects of this type from SQL.
- """
- return cls.sql_fetch_where(gctx, None)
-
- @classmethod
- def sql_fetch_where(cls, gctx, where, args = None, also_from = None):
- """
- Fetch objects of this type matching an arbitrary SQL WHERE expression.
- """
- if where is None:
- assert args is None and also_from is None
- if cls.sql_debug:
- logger.debug("sql_fetch_where(%r)", cls.sql_template.select)
- gctx.sql.execute(cls.sql_template.select)
- else:
- query = cls.sql_template.select
- if also_from is not None:
- query += "," + also_from
- query += " WHERE " + where
- if cls.sql_debug:
- logger.debug("sql_fetch_where(%r, %r)", query, args)
- gctx.sql.execute(query, args)
- results = []
- for row in gctx.sql.fetchall():
- key = (cls, row[0])
- if key in gctx.sql.cache:
- results.append(gctx.sql.cache[key])
- else:
- results.append(cls.sql_init(gctx, row, key))
- return results
-
- @classmethod
- def sql_init(cls, gctx, row, key):
- """
- Initialize one Python object from the result of a SQL query.
- """
- self = cls()
- self.gctx = gctx
- self.sql_decode(dict(zip(cls.sql_template.columns, row)))
- gctx.sql.cache[key] = self
- self.sql_in_db = True
- self.sql_fetch_hook()
- return self
-
- def sql_mark_dirty(self):
- """
- Mark this object as needing to be written back to SQL.
- """
- if self.sql_cache_debug and not self.sql_is_dirty:
- logger.debug("Marking %r SQL dirty", self)
- self.gctx.sql.dirty.add(self)
-
- def sql_mark_clean(self):
- """
- Mark this object as not needing to be written back to SQL.
- """
- if self.sql_cache_debug and self.sql_is_dirty:
- logger.debug("Marking %r SQL clean", self)
- self.gctx.sql.dirty.discard(self)
-
- @property
- def sql_is_dirty(self):
- """
- Query whether this object needs to be written back to SQL.
- """
- return self in self.gctx.sql.dirty
-
- def sql_mark_deleted(self):
- """
- Mark this object as needing to be deleted in SQL.
- """
- self.sql_deleted = True
- self.sql_mark_dirty()
-
- def sql_store(self):
- """
- Store this object to SQL.
- """
- args = self.sql_encode()
- if not self.sql_in_db:
- if self.sql_debug:
- logger.debug("sql_store(%r, %r)", self.sql_template.insert, args)
- self.gctx.sql.execute(self.sql_template.insert, args)
- setattr(self, self.sql_template.index, self.gctx.sql.lastrowid())
- self.gctx.sql.cache[(self.__class__, self.gctx.sql.lastrowid())] = self
- self.sql_insert_hook()
- else:
- if self.sql_debug:
- logger.debug("sql_store(%r, %r)", self.sql_template.update, args)
- self.gctx.sql.execute(self.sql_template.update, args)
- self.sql_update_hook()
- key = (self.__class__, getattr(self, self.sql_template.index))
- assert key in self.gctx.sql.cache and self.gctx.sql.cache[key] == self
- self.sql_mark_clean()
- self.sql_in_db = True
-
- def sql_delete(self):
- """
- Delete this object from SQL.
- """
- if self.sql_in_db:
- id = getattr(self, self.sql_template.index) # pylint: disable=W0622
- if self.sql_debug:
- logger.debug("sql_delete(%r, %r)", self.sql_template.delete, id)
- self.sql_delete_hook()
- self.gctx.sql.execute(self.sql_template.delete, (id,))
- key = (self.__class__, id)
- if self.gctx.sql.cache.get(key) == self:
- del self.gctx.sql.cache[key]
- self.sql_in_db = False
- self.sql_mark_clean()
-
- def sql_encode(self):
- """
- Convert object attributes into a dict for use with canned SQL
- queries. This is a default version that assumes a one-to-one
- mapping between column names in SQL and attribute names in Python.
- If you need something fancier, override this.
- """
- d = dict((a, getattr(self, a, None)) for a in self.sql_template.columns)
- for i in self.sql_template.map:
- if d.get(i) is not None:
- d[i] = self.sql_template.map[i].to_sql(d[i])
- return d
-
- def sql_decode(self, vals):
- """
- Initialize an object with values returned by self.sql_fetch().
- This is a default version that assumes a one-to-one mapping
- between column names in SQL and attribute names in Python. If you
- need something fancier, override this.
- """
- for a in self.sql_template.columns:
- if vals.get(a) is not None and a in self.sql_template.map:
- setattr(self, a, self.sql_template.map[a].from_sql(vals[a]))
- else:
- setattr(self, a, vals[a])
-
- def sql_fetch_hook(self):
- """
- Customization hook.
- """
- pass
-
- def sql_insert_hook(self):
- """
- Customization hook.
- """
- pass
-
- def sql_update_hook(self):
- """
- Customization hook.
- """
- self.sql_delete_hook()
- self.sql_insert_hook()
-
- def sql_delete_hook(self):
- """
- Customization hook.
- """
- pass
-
-
-def cache_reference(func):
- """
- Decorator for use with property methods which just do an SQL lookup based on an ID.
- Check for an existing reference to the object, just return that if we find it,
- otherwise perform the SQL lookup.
-
- Not 100% certain this is a good idea, but I //think// it should work well with the
- current weak reference SQL cache, so long as we create no circular references.
- So don't do that.
- """
-
- attr_name = "_" + func.__name__
-
- def wrapped(self):
- try:
- value = getattr(self, attr_name)
- assert value is not None
- except AttributeError:
- value = func(self)
- if value is not None:
- setattr(self, attr_name, value)
- return value
-
- wrapped.__name__ = func.__name__
- wrapped.__doc__ = func.__doc__
- wrapped.__dict__.update(func.__dict__)
-
- return wrapped
diff --git a/rpki/sql_schemas.py b/rpki/sql_schemas.py
deleted file mode 100644
index 07037970..00000000
--- a/rpki/sql_schemas.py
+++ /dev/null
@@ -1,319 +0,0 @@
-# Automatically generated, do not edit.
-
-## @var rpkid
-## SQL schema rpkid
-rpkid = '''-- $Id: rpkid.sql 5845 2014-05-29 22:31:15Z sra $
-
--- Copyright (C) 2009--2011 Internet Systems Consortium ("ISC")
---
--- Permission to use, copy, modify, and distribute this software for any
--- purpose with or without fee is hereby granted, provided that the above
--- copyright notice and this permission notice appear in all copies.
---
--- THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
--- REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
--- AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
--- INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
--- LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
--- OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
--- PERFORMANCE OF THIS SOFTWARE.
-
--- Copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
---
--- Permission to use, copy, modify, and distribute this software for any
--- purpose with or without fee is hereby granted, provided that the above
--- copyright notice and this permission notice appear in all copies.
---
--- THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
--- REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
--- AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
--- INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
--- LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
--- OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
--- PERFORMANCE OF THIS SOFTWARE.
-
--- SQL objects needed by the RPKI engine (rpkid.py).
-
--- DROP TABLE commands must be in correct (reverse dependency) order
--- to satisfy FOREIGN KEY constraints.
-
-DROP TABLE IF EXISTS ee_cert;
-DROP TABLE IF EXISTS ghostbuster;
-DROP TABLE IF EXISTS roa_prefix;
-DROP TABLE IF EXISTS roa;
-DROP TABLE IF EXISTS revoked_cert;
-DROP TABLE IF EXISTS child_cert;
-DROP TABLE IF EXISTS child;
-DROP TABLE IF EXISTS ca_detail;
-DROP TABLE IF EXISTS ca;
-DROP TABLE IF EXISTS parent;
-DROP TABLE IF EXISTS repository;
-DROP TABLE IF EXISTS bsc;
-DROP TABLE IF EXISTS self;
-
-CREATE TABLE self (
- self_id SERIAL NOT NULL,
- self_handle VARCHAR(255) NOT NULL,
- use_hsm BOOLEAN NOT NULL DEFAULT FALSE,
- crl_interval BIGINT UNSIGNED,
- regen_margin BIGINT UNSIGNED,
- bpki_cert LONGBLOB,
- bpki_glue LONGBLOB,
- PRIMARY KEY (self_id),
- UNIQUE (self_handle)
-) ENGINE=InnoDB;
-
-CREATE TABLE bsc (
- bsc_id SERIAL NOT NULL,
- bsc_handle VARCHAR(255) NOT NULL,
- private_key_id LONGBLOB,
- pkcs10_request LONGBLOB,
- hash_alg ENUM ('sha256'),
- signing_cert LONGBLOB,
- signing_cert_crl LONGBLOB,
- self_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (bsc_id),
- CONSTRAINT bsc_self_id
- FOREIGN KEY (self_id) REFERENCES self (self_id) ON DELETE CASCADE,
- UNIQUE (self_id, bsc_handle)
-) ENGINE=InnoDB;
-
-CREATE TABLE repository (
- repository_id SERIAL NOT NULL,
- repository_handle VARCHAR(255) NOT NULL,
- peer_contact_uri TEXT,
- bpki_cert LONGBLOB,
- bpki_glue LONGBLOB,
- last_cms_timestamp DATETIME,
- bsc_id BIGINT UNSIGNED NOT NULL,
- self_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (repository_id),
- CONSTRAINT repository_self_id
- FOREIGN KEY (self_id) REFERENCES self (self_id) ON DELETE CASCADE,
- CONSTRAINT repository_bsc_id
- FOREIGN KEY (bsc_id) REFERENCES bsc (bsc_id) ON DELETE CASCADE,
- UNIQUE (self_id, repository_handle)
-) ENGINE=InnoDB;
-
-CREATE TABLE parent (
- parent_id SERIAL NOT NULL,
- parent_handle VARCHAR(255) NOT NULL,
- bpki_cms_cert LONGBLOB,
- bpki_cms_glue LONGBLOB,
- peer_contact_uri TEXT,
- sia_base TEXT,
- sender_name TEXT,
- recipient_name TEXT,
- last_cms_timestamp DATETIME,
- self_id BIGINT UNSIGNED NOT NULL,
- bsc_id BIGINT UNSIGNED NOT NULL,
- repository_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (parent_id),
- CONSTRAINT parent_repository_id
- FOREIGN KEY (repository_id) REFERENCES repository (repository_id) ON DELETE CASCADE,
- CONSTRAINT parent_bsc_id
- FOREIGN KEY (bsc_id) REFERENCES bsc (bsc_id) ON DELETE CASCADE,
- CONSTRAINT parent_self_id
- FOREIGN KEY (self_id) REFERENCES self (self_id) ON DELETE CASCADE,
- UNIQUE (self_id, parent_handle)
-) ENGINE=InnoDB;
-
-CREATE TABLE ca (
- ca_id SERIAL NOT NULL,
- last_crl_sn BIGINT UNSIGNED NOT NULL,
- last_manifest_sn BIGINT UNSIGNED NOT NULL,
- next_manifest_update DATETIME,
- next_crl_update DATETIME,
- last_issued_sn BIGINT UNSIGNED NOT NULL,
- sia_uri TEXT,
- parent_resource_class TEXT,
- parent_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (ca_id),
- CONSTRAINT ca_parent_id
- FOREIGN KEY (parent_id) REFERENCES parent (parent_id) ON DELETE CASCADE
-) ENGINE=InnoDB;
-
-CREATE TABLE ca_detail (
- ca_detail_id SERIAL NOT NULL,
- public_key LONGBLOB,
- private_key_id LONGBLOB,
- latest_crl LONGBLOB,
- crl_published DATETIME,
- latest_ca_cert LONGBLOB,
- manifest_private_key_id LONGBLOB,
- manifest_public_key LONGBLOB,
- latest_manifest_cert LONGBLOB,
- latest_manifest LONGBLOB,
- manifest_published DATETIME,
- state ENUM ('pending', 'active', 'deprecated', 'revoked') NOT NULL,
- ca_cert_uri TEXT,
- ca_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (ca_detail_id),
- CONSTRAINT ca_detail_ca_id
- FOREIGN KEY (ca_id) REFERENCES ca (ca_id) ON DELETE CASCADE
-) ENGINE=InnoDB;
-
-CREATE TABLE child (
- child_id SERIAL NOT NULL,
- child_handle VARCHAR(255) NOT NULL,
- bpki_cert LONGBLOB,
- bpki_glue LONGBLOB,
- last_cms_timestamp DATETIME,
- self_id BIGINT UNSIGNED NOT NULL,
- bsc_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (child_id),
- CONSTRAINT child_bsc_id
- FOREIGN KEY (bsc_id) REFERENCES bsc (bsc_id) ON DELETE CASCADE,
- CONSTRAINT child_self_id
- FOREIGN KEY (self_id) REFERENCES self (self_id) ON DELETE CASCADE,
- UNIQUE (self_id, child_handle)
-) ENGINE=InnoDB;
-
-CREATE TABLE child_cert (
- child_cert_id SERIAL NOT NULL,
- cert LONGBLOB NOT NULL,
- published DATETIME,
- ski TINYBLOB NOT NULL,
- child_id BIGINT UNSIGNED NOT NULL,
- ca_detail_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (child_cert_id),
- CONSTRAINT child_cert_ca_detail_id
- FOREIGN KEY (ca_detail_id) REFERENCES ca_detail (ca_detail_id) ON DELETE CASCADE,
- CONSTRAINT child_cert_child_id
- FOREIGN KEY (child_id) REFERENCES child (child_id) ON DELETE CASCADE
-) ENGINE=InnoDB;
-
-CREATE TABLE revoked_cert (
- revoked_cert_id SERIAL NOT NULL,
- serial BIGINT UNSIGNED NOT NULL,
- revoked DATETIME NOT NULL,
- expires DATETIME NOT NULL,
- ca_detail_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (revoked_cert_id),
- CONSTRAINT revoked_cert_ca_detail_id
- FOREIGN KEY (ca_detail_id) REFERENCES ca_detail (ca_detail_id) ON DELETE CASCADE
-) ENGINE=InnoDB;
-
-CREATE TABLE roa (
- roa_id SERIAL NOT NULL,
- asn BIGINT UNSIGNED NOT NULL,
- cert LONGBLOB NOT NULL,
- roa LONGBLOB NOT NULL,
- published DATETIME,
- self_id BIGINT UNSIGNED NOT NULL,
- ca_detail_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (roa_id),
- CONSTRAINT roa_self_id
- FOREIGN KEY (self_id) REFERENCES self (self_id) ON DELETE CASCADE,
- CONSTRAINT roa_ca_detail_id
- FOREIGN KEY (ca_detail_id) REFERENCES ca_detail (ca_detail_id) ON DELETE CASCADE
-) ENGINE=InnoDB;
-
-CREATE TABLE roa_prefix (
- prefix VARCHAR(40) NOT NULL,
- prefixlen TINYINT UNSIGNED NOT NULL,
- max_prefixlen TINYINT UNSIGNED NOT NULL,
- version TINYINT UNSIGNED NOT NULL,
- roa_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (roa_id, prefix, prefixlen, max_prefixlen),
- CONSTRAINT roa_prefix_roa_id
- FOREIGN KEY (roa_id) REFERENCES roa (roa_id) ON DELETE CASCADE
-) ENGINE=InnoDB;
-
-CREATE TABLE ghostbuster (
- ghostbuster_id SERIAL NOT NULL,
- vcard LONGBLOB NOT NULL,
- cert LONGBLOB NOT NULL,
- ghostbuster LONGBLOB NOT NULL,
- published DATETIME,
- self_id BIGINT UNSIGNED NOT NULL,
- ca_detail_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (ghostbuster_id),
- CONSTRAINT ghostbuster_self_id
- FOREIGN KEY (self_id) REFERENCES self (self_id) ON DELETE CASCADE,
- CONSTRAINT ghostbuster_ca_detail_id
- FOREIGN KEY (ca_detail_id) REFERENCES ca_detail (ca_detail_id) ON DELETE CASCADE
-) ENGINE=InnoDB;
-
-CREATE TABLE ee_cert (
- ee_cert_id SERIAL NOT NULL,
- ski BINARY(20) NOT NULL,
- cert LONGBLOB NOT NULL,
- published DATETIME,
- self_id BIGINT UNSIGNED NOT NULL,
- ca_detail_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (ee_cert_id),
- CONSTRAINT ee_cert_self_id
- FOREIGN KEY (self_id) REFERENCES self (self_id) ON DELETE CASCADE,
- CONSTRAINT ee_cert_ca_detail_id
- FOREIGN KEY (ca_detail_id) REFERENCES ca_detail (ca_detail_id) ON DELETE CASCADE
-) ENGINE=InnoDB;
-
--- Local Variables:
--- indent-tabs-mode: nil
--- End:
-'''
-
-## @var pubd
-## SQL schema pubd
-pubd = '''-- $Id: pubd.sql 5757 2014-04-05 22:42:12Z sra $
-
--- Copyright (C) 2009--2010 Internet Systems Consortium ("ISC")
---
--- Permission to use, copy, modify, and distribute this software for any
--- purpose with or without fee is hereby granted, provided that the above
--- copyright notice and this permission notice appear in all copies.
---
--- THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
--- REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
--- AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
--- INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
--- LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
--- OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
--- PERFORMANCE OF THIS SOFTWARE.
-
--- Copyright (C) 2008 American Registry for Internet Numbers ("ARIN")
---
--- Permission to use, copy, modify, and distribute this software for any
--- purpose with or without fee is hereby granted, provided that the above
--- copyright notice and this permission notice appear in all copies.
---
--- THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
--- REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
--- AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
--- INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
--- LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
--- OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
--- PERFORMANCE OF THIS SOFTWARE.
-
--- SQL objects needed by pubd.py.
-
--- The config table is weird because we're really only using it
--- to store one BPKI CRL, but putting this here lets us use a lot of
--- existing machinery and the alternatives are whacky in other ways.
-
-DROP TABLE IF EXISTS client;
-DROP TABLE IF EXISTS config;
-
-CREATE TABLE config (
- config_id SERIAL NOT NULL,
- bpki_crl LONGBLOB,
- PRIMARY KEY (config_id)
-) ENGINE=InnoDB;
-
-CREATE TABLE client (
- client_id SERIAL NOT NULL,
- client_handle VARCHAR(255) NOT NULL,
- base_uri TEXT,
- bpki_cert LONGBLOB,
- bpki_glue LONGBLOB,
- last_cms_timestamp DATETIME,
- PRIMARY KEY (client_id),
- UNIQUE (client_handle)
-) ENGINE=InnoDB;
-
--- Local Variables:
--- indent-tabs-mode: nil
--- End:
-'''
-
diff --git a/rpki/sundial.py b/rpki/sundial.py
index 7be122c8..b788940d 100644
--- a/rpki/sundial.py
+++ b/rpki/sundial.py
@@ -48,242 +48,289 @@ import datetime as pydatetime
import re
def now():
- """
- Get current timestamp.
- """
- return datetime.utcnow()
-
-class ParseFailure(Exception):
- """
- Parse failure constructing timedelta.
- """
-
-class datetime(pydatetime.datetime):
- """
- RPKI extensions to standard datetime.datetime class. All work here
- is in UTC, so we use naive datetime objects.
- """
-
- def totimestamp(self):
"""
- Convert to seconds from epoch (like time.time()). Conversion
- method is a bit silly, but avoids time module timezone whackiness.
+ Get current timestamp.
"""
- return int(self.strftime("%s"))
- @classmethod
- def fromXMLtime(cls, x):
+ return datetime.utcnow()
+
+class ParseFailure(Exception):
"""
- Convert from XML time representation.
+ Parse failure constructing timedelta.
"""
- if x is None:
- return None
- else:
- return cls.strptime(x, "%Y-%m-%dT%H:%M:%SZ")
- def toXMLtime(self):
+class datetime(pydatetime.datetime):
"""
- Convert to XML time representation.
+ RPKI extensions to standard datetime.datetime class. All work here
+ is in UTC, so we use naive datetime objects.
"""
- return self.strftime("%Y-%m-%dT%H:%M:%SZ")
- def __str__(self):
- return self.toXMLtime()
+ def totimestamp(self):
+ """
+ Convert to seconds from epoch (like time.time()). Conversion
+ method is a bit silly, but avoids time module timezone whackiness.
+ """
- @classmethod
- def from_datetime(cls, x):
- """
- Convert a datetime.datetime object into this subclass. This is
- whacky due to the weird constructors for datetime.
- """
- return cls.combine(x.date(), x.time())
+ return int(self.strftime("%s"))
- def to_datetime(self):
- """
- Convert to a datetime.datetime object. In most cases this
- shouldn't be necessary, but convincing SQL interfaces to use
- subclasses of datetime can be hard.
- """
- return pydatetime.datetime(year = self.year, month = self.month, day = self.day,
- hour = self.hour, minute = self.minute, second = self.second,
- microsecond = 0, tzinfo = None)
+ @classmethod
+ def fromXMLtime(cls, x):
+ """
+ Convert from XML time representation.
+ """
+ if x is None:
+ return None
+ else:
+ return cls.strptime(x, "%Y-%m-%dT%H:%M:%SZ")
+
+ def toXMLtime(self):
+ """
+ Convert to XML time representation.
+ """
- @classmethod
- def fromOpenSSL(cls, x):
- """
- Convert from the format OpenSSL's command line tool uses into this
- subclass. May require rewriting if we run into locale problems.
- """
- if x.startswith("notBefore=") or x.startswith("notAfter="):
- x = x.partition("=")[2]
- return cls.strptime(x, "%b %d %H:%M:%S %Y GMT")
+ return self.strftime("%Y-%m-%dT%H:%M:%SZ")
- @classmethod
- def from_sql(cls, x):
- """
- Convert from SQL storage format.
- """
- return cls.from_datetime(x)
+ def __str__(self):
+ return self.toXMLtime()
- def to_sql(self):
- """
- Convert to SQL storage format.
- """
- return self.to_datetime()
+ @classmethod
+ def from_datetime(cls, x):
+ """
+ Convert a datetime.datetime object into this subclass. This is
+ whacky due to the weird constructors for datetime.
+ """
- def later(self, other):
- """
- Return the later of two timestamps.
- """
- return other if other > self else self
+ return cls.combine(x.date(), x.time())
- def earlier(self, other):
- """
- Return the earlier of two timestamps.
- """
- return other if other < self else self
+ def to_datetime(self):
+ """
+ Convert to a datetime.datetime object. In most cases this
+ shouldn't be necessary, but convincing SQL interfaces to use
+ subclasses of datetime can be hard.
+ """
- def __add__(self, y): return _cast(pydatetime.datetime.__add__(self, y))
- def __radd__(self, y): return _cast(pydatetime.datetime.__radd__(self, y))
- def __rsub__(self, y): return _cast(pydatetime.datetime.__rsub__(self, y))
- def __sub__(self, y): return _cast(pydatetime.datetime.__sub__(self, y))
+ return pydatetime.datetime(year = self.year, month = self.month, day = self.day,
+ hour = self.hour, minute = self.minute, second = self.second,
+ microsecond = 0, tzinfo = None)
- @classmethod
- def DateTime_or_None(cls, s):
- """
- MySQLdb converter. Parse as this class if we can, let the default
- MySQLdb DateTime_or_None() converter deal with failure cases.
- """
- for sep in " T":
- d, _, t = s.partition(sep) # pylint: disable=W0612
- if t:
- try:
- return cls(*[int(x) for x in d.split("-") + t.split(":")])
- except: # pylint: disable=W0702
- break
+ @classmethod
+ def fromOpenSSL(cls, x):
+ """
+ Convert from the format OpenSSL's command line tool uses into this
+ subclass. May require rewriting if we run into locale problems.
+ """
+
+ if x.startswith("notBefore=") or x.startswith("notAfter="):
+ x = x.partition("=")[2]
+ return cls.strptime(x, "%b %d %H:%M:%S %Y GMT")
+
+ @classmethod
+ def from_sql(cls, x):
+ """
+ Convert from SQL storage format.
+ """
+
+ return cls.from_datetime(x)
+
+ def to_sql(self):
+ """
+ Convert to SQL storage format.
+ """
+
+ return self.to_datetime()
+
+ def later(self, other):
+ """
+ Return the later of two timestamps.
+ """
+
+ return other if other > self else self
+
+ def earlier(self, other):
+ """
+ Return the earlier of two timestamps.
+ """
+
+ return other if other < self else self
+
+ def __add__(self, y):
+ return _cast(pydatetime.datetime.__add__(self, y))
+
+ def __radd__(self, y):
+ return _cast(pydatetime.datetime.__radd__(self, y))
+
+ def __rsub__(self, y):
+ return _cast(pydatetime.datetime.__rsub__(self, y))
- from rpki.mysql_import import MySQLdb
- return MySQLdb.times.DateTime_or_None(s)
+ def __sub__(self, y):
+ return _cast(pydatetime.datetime.__sub__(self, y))
+
+ @classmethod
+ def DateTime_or_None(cls, s):
+ """
+ MySQLdb converter. Parse as this class if we can, let the default
+ MySQLdb DateTime_or_None() converter deal with failure cases.
+ """
+
+ for sep in " T":
+ d, _, t = s.partition(sep) # pylint: disable=W0612
+ if t:
+ try:
+ return cls(*[int(x) for x in d.split("-") + t.split(":")])
+ except:
+ break
+
+ from rpki.mysql_import import MySQLdb
+ return MySQLdb.times.DateTime_or_None(s)
class timedelta(pydatetime.timedelta):
- """
- Timedelta with text parsing. This accepts two input formats:
-
- - A simple integer, indicating a number of seconds.
-
- - A string of the form "uY vW wD xH yM zS" where u, v, w, x, y, and z
- are integers and Y, W, D, H, M, and S indicate years, weeks, days,
- hours, minutes, and seconds. All of the fields are optional, but
- at least one must be specified. Eg,"3D4H" means "three days plus
- four hours".
-
- There is no "months" format, because the definition of a month is too
- fuzzy to be useful (what day is six months from August 30th?)
-
- Similarly, the "years" conversion may produce surprising results, as
- "one year" in conventional English does not refer to a fixed interval
- but rather a fixed (and in some cases undefined) offset within the
- Gregorian calendar (what day is one year from February 29th?) 1Y as
- implemented by this code refers to a specific number of seconds.
- If you mean 365 days or 52 weeks, say that instead.
- """
-
- ## @var regexp
- # Hideously ugly regular expression to parse the complex text form.
- # Tags are intended for use with re.MatchObject.groupdict() and map
- # directly to the keywords expected by the timedelta constructor.
-
- regexp = re.compile("\\s*".join(("^",
- "(?:(?P<years>\\d+)Y)?",
- "(?:(?P<weeks>\\d+)W)?",
- "(?:(?P<days>\\d+)D)?",
- "(?:(?P<hours>\\d+)H)?",
- "(?:(?P<minutes>\\d+)M)?",
- "(?:(?P<seconds>\\d+)S)?",
- "$")),
- re.I)
-
- ## @var years_to_seconds
- # Conversion factor from years to seconds (value furnished by the
- # "units" program).
-
- years_to_seconds = 31556926
-
- @classmethod
- def parse(cls, arg):
- """
- Parse text into a timedelta object.
"""
- if not isinstance(arg, str):
- return cls(seconds = arg)
- elif arg.isdigit():
- return cls(seconds = int(arg))
- else:
- match = cls.regexp.match(arg)
- if match:
- #return cls(**dict((k, int(v)) for (k, v) in match.groupdict().items() if v is not None))
- d = match.groupdict("0")
- for k, v in d.iteritems():
- d[k] = int(v)
- d["days"] += d.pop("weeks") * 7
- d["seconds"] += d.pop("years") * cls.years_to_seconds
- return cls(**d)
- else:
- raise ParseFailure("Couldn't parse timedelta %r" % (arg,))
-
- def convert_to_seconds(self):
- """
- Convert a timedelta interval to seconds.
+ Timedelta with text parsing. This accepts two input formats:
+
+ - A simple integer, indicating a number of seconds.
+
+ - A string of the form "uY vW wD xH yM zS" where u, v, w, x, y, and z
+ are integers and Y, W, D, H, M, and S indicate years, weeks, days,
+ hours, minutes, and seconds. All of the fields are optional, but
+ at least one must be specified. Eg,"3D4H" means "three days plus
+ four hours".
+
+ There is no "months" format, because the definition of a month is too
+ fuzzy to be useful (what day is six months from August 30th?)
+
+ Similarly, the "years" conversion may produce surprising results, as
+ "one year" in conventional English does not refer to a fixed interval
+ but rather a fixed (and in some cases undefined) offset within the
+ Gregorian calendar (what day is one year from February 29th?) 1Y as
+ implemented by this code refers to a specific number of seconds.
+ If you mean 365 days or 52 weeks, say that instead.
"""
- return self.days * 24 * 60 * 60 + self.seconds
- @classmethod
- def fromtimedelta(cls, x):
+ ## @var regexp
+ # Hideously ugly regular expression to parse the complex text form.
+ # Tags are intended for use with re.MatchObject.groupdict() and map
+ # directly to the keywords expected by the timedelta constructor.
+
+ regexp = re.compile("\\s*".join(("^",
+ "(?:(?P<years>\\d+)Y)?",
+ "(?:(?P<weeks>\\d+)W)?",
+ "(?:(?P<days>\\d+)D)?",
+ "(?:(?P<hours>\\d+)H)?",
+ "(?:(?P<minutes>\\d+)M)?",
+ "(?:(?P<seconds>\\d+)S)?",
+ "$")),
+ re.I)
+
+ ## @var years_to_seconds
+ # Conversion factor from years to seconds (value furnished by the
+ # "units" program).
+
+ years_to_seconds = 31556926
+
+ @classmethod
+ def parse(cls, arg):
+ """
+ Parse text into a timedelta object.
+ """
+
+ if not isinstance(arg, (str, unicode)):
+ return cls(seconds = arg)
+ elif arg.isdigit():
+ return cls(seconds = int(arg))
+ else:
+ match = cls.regexp.match(arg)
+ if match:
+ #return cls(**dict((k, int(v)) for (k, v) in match.groupdict().items() if v is not None))
+ d = match.groupdict("0")
+ for k, v in d.iteritems():
+ d[k] = int(v)
+ d["days"] += d.pop("weeks") * 7
+ d["seconds"] += d.pop("years") * cls.years_to_seconds
+ return cls(**d)
+ else:
+ raise ParseFailure("Couldn't parse timedelta %r" % (arg,))
+
+ def convert_to_seconds(self):
+ """
+ Convert a timedelta interval to seconds.
+ """
+
+ return self.days * 24 * 60 * 60 + self.seconds
+
+ @classmethod
+ def fromtimedelta(cls, x):
+ """
+ Convert a datetime.timedelta object into this subclass.
+ """
+
+ return cls(days = x.days, seconds = x.seconds, microseconds = x.microseconds)
+
+ def __abs__(self):
+ return _cast(pydatetime.timedelta.__abs__(self))
+
+ def __add__(self, x):
+ return _cast(pydatetime.timedelta.__add__(self, x))
+
+ def __div__(self, x):
+ return _cast(pydatetime.timedelta.__div__(self, x))
+
+ def __floordiv__(self, x):
+ return _cast(pydatetime.timedelta.__floordiv__(self, x))
+
+ def __mul__(self, x):
+ return _cast(pydatetime.timedelta.__mul__(self, x))
+
+ def __neg__(self):
+ return _cast(pydatetime.timedelta.__neg__(self))
+
+ def __pos__(self):
+ return _cast(pydatetime.timedelta.__pos__(self))
+
+ def __radd__(self, x):
+ return _cast(pydatetime.timedelta.__radd__(self, x))
+
+ def __rdiv__(self, x):
+ return _cast(pydatetime.timedelta.__rdiv__(self, x))
+
+ def __rfloordiv__(self, x):
+ return _cast(pydatetime.timedelta.__rfloordiv__(self, x))
+
+ def __rmul__(self, x):
+ return _cast(pydatetime.timedelta.__rmul__(self, x))
+
+ def __rsub__(self, x):
+ return _cast(pydatetime.timedelta.__rsub__(self, x))
+
+ def __sub__(self, x):
+ return _cast(pydatetime.timedelta.__sub__(self, x))
+
+def _cast(x):
"""
- Convert a datetime.timedelta object into this subclass.
+ Cast result of arithmetic operations back into correct subtype.
"""
- return cls(days = x.days, seconds = x.seconds, microseconds = x.microseconds)
-
- def __abs__(self): return _cast(pydatetime.timedelta.__abs__(self))
- def __add__(self, x): return _cast(pydatetime.timedelta.__add__(self, x))
- def __div__(self, x): return _cast(pydatetime.timedelta.__div__(self, x))
- def __floordiv__(self, x): return _cast(pydatetime.timedelta.__floordiv__(self, x))
- def __mul__(self, x): return _cast(pydatetime.timedelta.__mul__(self, x))
- def __neg__(self): return _cast(pydatetime.timedelta.__neg__(self))
- def __pos__(self): return _cast(pydatetime.timedelta.__pos__(self))
- def __radd__(self, x): return _cast(pydatetime.timedelta.__radd__(self, x))
- def __rdiv__(self, x): return _cast(pydatetime.timedelta.__rdiv__(self, x))
- def __rfloordiv__(self, x): return _cast(pydatetime.timedelta.__rfloordiv__(self, x))
- def __rmul__(self, x): return _cast(pydatetime.timedelta.__rmul__(self, x))
- def __rsub__(self, x): return _cast(pydatetime.timedelta.__rsub__(self, x))
- def __sub__(self, x): return _cast(pydatetime.timedelta.__sub__(self, x))
-def _cast(x):
- """
- Cast result of arithmetic operations back into correct subtype.
- """
- if isinstance(x, pydatetime.datetime):
- return datetime.from_datetime(x)
- if isinstance(x, pydatetime.timedelta):
- return timedelta.fromtimedelta(x)
- return x
+ if isinstance(x, pydatetime.datetime):
+ return datetime.from_datetime(x)
+ if isinstance(x, pydatetime.timedelta):
+ return timedelta.fromtimedelta(x)
+ return x
if __name__ == "__main__":
- def test(t):
- print
- print "str: ", t
- print "repr: ", repr(t)
- print "seconds since epoch:", t.strftime("%s")
- print "XMLtime: ", t.toXMLtime()
- print
+ def test(t):
+ print
+ print "str: ", t
+ print "repr: ", repr(t)
+ print "seconds since epoch:", t.strftime("%s")
+ print "XMLtime: ", t.toXMLtime()
+ print
- print
- print "Testing time conversion routines"
- test(now())
- test(now() + timedelta(days = 30))
- test(now() + timedelta.parse("3d5s"))
- test(now() + timedelta.parse(" 3d 5s "))
- test(now() + timedelta.parse("1y3d5h"))
+ print
+ print "Testing time conversion routines"
+ test(now())
+ test(now() + timedelta(days = 30))
+ test(now() + timedelta.parse("3d5s"))
+ test(now() + timedelta.parse(" 3d 5s "))
+ test(now() + timedelta.parse("1y3d5h"))
diff --git a/rpki/up_down.py b/rpki/up_down.py
index 5339e9a7..e2292efb 100644
--- a/rpki/up_down.py
+++ b/rpki/up_down.py
@@ -21,520 +21,78 @@
RPKI "up-down" protocol.
"""
-import base64
import logging
-import lxml.etree
import rpki.resource_set
import rpki.x509
import rpki.exceptions
import rpki.log
-import rpki.xml_utils
import rpki.relaxng
+from lxml.etree import SubElement, tostring as ElementToString
+
logger = logging.getLogger(__name__)
-xmlns = rpki.relaxng.up_down.xmlns
-nsmap = rpki.relaxng.up_down.nsmap
+xmlns = rpki.relaxng.up_down.xmlns
+nsmap = rpki.relaxng.up_down.nsmap
+version = "1"
## @var content_type
# MIME content type to use when sending up-down queries.
-#content_type = "application/rpki-updown"
-content_type = "application/x-rpki"
+content_type = "application/rpki-updown"
+#content_type = "application/x-rpki"
## @var allowed_content_types
# MIME content types which we consider acceptable for incoming up-down
# queries.
allowed_content_types = ("application/rpki-updown", "application/x-rpki")
-class base_elt(object):
- """
- Generic PDU object.
-
- Virtual class, just provides some default methods.
- """
-
- def startElement(self, stack, name, attrs):
- """
- Ignore startElement() if there's no specific handler.
-
- Some elements have no attributes and we only care about their
- text content.
- """
- pass
-
- def endElement(self, stack, name, text):
- """
- Ignore endElement() if there's no specific handler.
-
- If we don't need to do anything else, just pop the stack.
- """
- stack.pop()
+## @var enforce_strict_up_down_xml_sender
+# Enforce strict checking of XML "sender" field in up-down protocol
- def make_elt(self, name, *attrs):
- """
- Construct a element, copying over a set of attributes.
- """
- elt = lxml.etree.Element(xmlns + name, nsmap = nsmap)
- for key in attrs:
- val = getattr(self, key, None)
- if val is not None:
- elt.set(key, str(val))
- return elt
-
- def make_b64elt(self, elt, name, value):
- """
- Construct a sub-element with Base64 text content.
- """
- if value is not None and not value.empty():
- lxml.etree.SubElement(elt, xmlns + name, nsmap = nsmap).text = value.get_Base64()
+enforce_strict_up_down_xml_sender = False
- def serve_pdu(self, q_msg, r_msg, child, callback, errback):
- """
- Default PDU handler to catch unexpected types.
- """
- raise rpki.exceptions.BadQuery("Unexpected query type %s" % q_msg.type)
+tag_certificate = xmlns + "certificate"
+tag_class = xmlns + "class"
+tag_description = xmlns + "description"
+tag_issuer = xmlns + "issuer"
+tag_key = xmlns + "key"
+tag_message = xmlns + "message"
+tag_request = xmlns + "request"
+tag_status = xmlns + "status"
- def check_response(self):
- """
- Placeholder for response checking.
- """
- pass
class multi_uri(list):
- """
- Container for a set of URIs.
- """
-
- def __init__(self, ini):
- """
- Initialize a set of URIs, which includes basic some syntax checking.
- """
- list.__init__(self)
- if isinstance(ini, (list, tuple)):
- self[:] = ini
- elif isinstance(ini, str):
- self[:] = ini.split(",")
- for s in self:
- if s.strip() != s or "://" not in s:
- raise rpki.exceptions.BadURISyntax("Bad URI \"%s\"" % s)
- else:
- raise TypeError
-
- def __str__(self):
"""
- Convert a multi_uri back to a string representation.
+ Container for a set of URIs. This probably could be simplified.
"""
- return ",".join(self)
- def rsync(self):
- """
- Find first rsync://... URI in self.
- """
- for s in self:
- if s.startswith("rsync://"):
- return s
- return None
+ def __init__(self, ini):
+ list.__init__(self)
+ if isinstance(ini, (list, tuple)):
+ self[:] = ini
+ elif isinstance(ini, str):
+ self[:] = ini.split(",")
+ for s in self:
+ if s.strip() != s or "://" not in s:
+ raise rpki.exceptions.BadURISyntax("Bad URI \"%s\"" % s)
+ else:
+ raise TypeError
-class certificate_elt(base_elt):
- """
- Up-Down protocol representation of an issued certificate.
- """
+ def __str__(self):
+ return ",".join(self)
- def startElement(self, stack, name, attrs):
- """
- Handle attributes of <certificate/> element.
- """
- assert name == "certificate", "Unexpected name %s, stack %s" % (name, stack)
- self.cert_url = multi_uri(attrs["cert_url"])
- self.req_resource_set_as = rpki.resource_set.resource_set_as(attrs.get("req_resource_set_as"))
- self.req_resource_set_ipv4 = rpki.resource_set.resource_set_ipv4(attrs.get("req_resource_set_ipv4"))
- self.req_resource_set_ipv6 = rpki.resource_set.resource_set_ipv6(attrs.get("req_resource_set_ipv6"))
+ def rsync(self):
+ """
+ Find first rsync://... URI in self.
+ """
- def endElement(self, stack, name, text):
- """
- Handle text content of a <certificate/> element.
- """
- assert name == "certificate", "Unexpected name %s, stack %s" % (name, stack)
- self.cert = rpki.x509.X509(Base64 = text)
- stack.pop()
+ for s in self:
+ if s.startswith("rsync://"):
+ return s
+ return None
- def toXML(self):
- """
- Generate a <certificate/> element.
- """
- elt = self.make_elt("certificate", "cert_url",
- "req_resource_set_as", "req_resource_set_ipv4", "req_resource_set_ipv6")
- elt.text = self.cert.get_Base64()
- return elt
-
-class class_elt(base_elt):
- """
- Up-Down protocol representation of a resource class.
- """
-
- issuer = None
-
- def __init__(self):
- """
- Initialize class_elt.
- """
- base_elt.__init__(self)
- self.certs = []
-
- def startElement(self, stack, name, attrs):
- """
- Handle <class/> elements and their children.
- """
- if name == "certificate":
- cert = certificate_elt()
- self.certs.append(cert)
- stack.append(cert)
- cert.startElement(stack, name, attrs)
- elif name != "issuer":
- assert name == "class", "Unexpected name %s, stack %s" % (name, stack)
- self.class_name = attrs["class_name"]
- self.cert_url = multi_uri(attrs["cert_url"])
- self.suggested_sia_head = attrs.get("suggested_sia_head")
- self.resource_set_as = rpki.resource_set.resource_set_as(attrs["resource_set_as"])
- self.resource_set_ipv4 = rpki.resource_set.resource_set_ipv4(attrs["resource_set_ipv4"])
- self.resource_set_ipv6 = rpki.resource_set.resource_set_ipv6(attrs["resource_set_ipv6"])
- self.resource_set_notafter = rpki.sundial.datetime.fromXMLtime(attrs.get("resource_set_notafter"))
-
- def endElement(self, stack, name, text):
- """
- Handle <class/> elements and their children.
- """
- if name == "issuer":
- self.issuer = rpki.x509.X509(Base64 = text)
- else:
- assert name == "class", "Unexpected name %s, stack %s" % (name, stack)
- stack.pop()
- def toXML(self):
- """
- Generate a <class/> element.
- """
- elt = self.make_elt("class", "class_name", "cert_url", "resource_set_as",
- "resource_set_ipv4", "resource_set_ipv6",
- "resource_set_notafter", "suggested_sia_head")
- elt.extend([i.toXML() for i in self.certs])
- self.make_b64elt(elt, "issuer", self.issuer)
- return elt
-
- def to_resource_bag(self):
- """
- Build a resource_bag from from this <class/> element.
- """
- return rpki.resource_set.resource_bag(self.resource_set_as,
- self.resource_set_ipv4,
- self.resource_set_ipv6,
- self.resource_set_notafter)
-
- def from_resource_bag(self, bag):
- """
- Set resources of this class element from a resource_bag.
- """
- self.resource_set_as = bag.asn
- self.resource_set_ipv4 = bag.v4
- self.resource_set_ipv6 = bag.v6
- self.resource_set_notafter = bag.valid_until
-
-class list_pdu(base_elt):
- """
- Up-Down protocol "list" PDU.
- """
-
- def toXML(self):
- """Generate (empty) payload of "list" PDU."""
- return []
-
- def serve_pdu(self, q_msg, r_msg, child, callback, errback):
- """
- Serve one "list" PDU.
- """
-
- def handle(irdb_resources):
-
- r_msg.payload = list_response_pdu()
-
- if irdb_resources.valid_until < rpki.sundial.now():
- logger.debug("Child %s's resources expired %s", child.child_handle, irdb_resources.valid_until)
- else:
- for parent in child.parents:
- for ca in parent.cas:
- ca_detail = ca.active_ca_detail
- if not ca_detail:
- logger.debug("No active ca_detail, can't issue to %s", child.child_handle)
- continue
- resources = ca_detail.latest_ca_cert.get_3779resources() & irdb_resources
- if resources.empty():
- logger.debug("No overlap between received resources and what child %s should get ([%s], [%s])",
- child.child_handle, ca_detail.latest_ca_cert.get_3779resources(), irdb_resources)
- continue
- rc = class_elt()
- rc.class_name = str(ca.ca_id)
- rc.cert_url = multi_uri(ca_detail.ca_cert_uri)
- rc.from_resource_bag(resources)
- for child_cert in child.fetch_child_certs(ca_detail = ca_detail):
- c = certificate_elt()
- c.cert_url = multi_uri(child_cert.uri)
- c.cert = child_cert.cert
- rc.certs.append(c)
- rc.issuer = ca_detail.latest_ca_cert
- r_msg.payload.classes.append(rc)
-
- callback()
-
- self.gctx.irdb_query_child_resources(child.self.self_handle, child.child_handle, handle, errback)
-
- @classmethod
- def query(cls, parent, cb, eb):
- """
- Send a "list" query to parent.
- """
- try:
- logger.info('Sending "list" request to parent %s', parent.parent_handle)
- parent.query_up_down(cls(), cb, eb)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- eb(e)
-
-class class_response_syntax(base_elt):
- """
- Syntax for Up-Down protocol "list_response" and "issue_response" PDUs.
- """
-
- def __init__(self):
- """
- Initialize class_response_syntax.
- """
- base_elt.__init__(self)
- self.classes = []
-
- def startElement(self, stack, name, attrs):
- """
- Handle "list_response" and "issue_response" PDUs.
- """
- assert name == "class", "Unexpected name %s, stack %s" % (name, stack)
- c = class_elt()
- self.classes.append(c)
- stack.append(c)
- c.startElement(stack, name, attrs)
-
- def toXML(self):
- """Generate payload of "list_response" and "issue_response" PDUs."""
- return [c.toXML() for c in self.classes]
-
-class list_response_pdu(class_response_syntax):
- """
- Up-Down protocol "list_response" PDU.
- """
- pass
-
-class issue_pdu(base_elt):
- """
- Up-Down protocol "issue" PDU.
- """
-
- def startElement(self, stack, name, attrs):
- """
- Handle "issue" PDU.
- """
- assert name == "request", "Unexpected name %s, stack %s" % (name, stack)
- self.class_name = attrs["class_name"]
- self.req_resource_set_as = rpki.resource_set.resource_set_as(attrs.get("req_resource_set_as"))
- self.req_resource_set_ipv4 = rpki.resource_set.resource_set_ipv4(attrs.get("req_resource_set_ipv4"))
- self.req_resource_set_ipv6 = rpki.resource_set.resource_set_ipv6(attrs.get("req_resource_set_ipv6"))
-
- def endElement(self, stack, name, text):
- """
- Handle "issue" PDU.
- """
- assert name == "request", "Unexpected name %s, stack %s" % (name, stack)
- self.pkcs10 = rpki.x509.PKCS10(Base64 = text)
- stack.pop()
-
- def toXML(self):
- """
- Generate payload of "issue" PDU.
- """
- elt = self.make_elt("request", "class_name", "req_resource_set_as",
- "req_resource_set_ipv4", "req_resource_set_ipv6")
- elt.text = self.pkcs10.get_Base64()
- return [elt]
-
- def serve_pdu(self, q_msg, r_msg, child, callback, errback):
- """
- Serve one issue request PDU.
- """
-
- # Subsetting not yet implemented, this is the one place where we
- # have to handle it, by reporting that we're lame.
-
- if self.req_resource_set_as or \
- self.req_resource_set_ipv4 or \
- self.req_resource_set_ipv6:
- raise rpki.exceptions.NotImplementedYet("req_* attributes not implemented yet, sorry")
-
- # Check the request
- self.pkcs10.check_valid_request_ca()
- ca = child.ca_from_class_name(self.class_name)
- ca_detail = ca.active_ca_detail
- if ca_detail is None:
- raise rpki.exceptions.NoActiveCA("No active CA for class %r" % self.class_name)
-
- # Check current cert, if any
-
- def got_resources(irdb_resources):
-
- if irdb_resources.valid_until < rpki.sundial.now():
- raise rpki.exceptions.IRDBExpired("IRDB entry for child %s expired %s" % (
- child.child_handle, irdb_resources.valid_until))
-
- resources = irdb_resources & ca_detail.latest_ca_cert.get_3779resources()
- resources.valid_until = irdb_resources.valid_until
- req_key = self.pkcs10.getPublicKey()
- req_sia = self.pkcs10.get_SIA()
- child_cert = child.fetch_child_certs(ca_detail = ca_detail, ski = req_key.get_SKI(), unique = True)
-
- # Generate new cert or regenerate old one if necessary
-
- publisher = rpki.rpkid.publication_queue()
-
- if child_cert is None:
- child_cert = ca_detail.issue(
- ca = ca,
- child = child,
- subject_key = req_key,
- sia = req_sia,
- resources = resources,
- publisher = publisher)
- else:
- child_cert = child_cert.reissue(
- ca_detail = ca_detail,
- sia = req_sia,
- resources = resources,
- publisher = publisher)
-
- def done():
- c = certificate_elt()
- c.cert_url = multi_uri(child_cert.uri)
- c.cert = child_cert.cert
- rc = class_elt()
- rc.class_name = self.class_name
- rc.cert_url = multi_uri(ca_detail.ca_cert_uri)
- rc.from_resource_bag(resources)
- rc.certs.append(c)
- rc.issuer = ca_detail.latest_ca_cert
- r_msg.payload = issue_response_pdu()
- r_msg.payload.classes.append(rc)
- callback()
-
- self.gctx.sql.sweep()
- assert child_cert and child_cert.sql_in_db
- publisher.call_pubd(done, errback)
-
- self.gctx.irdb_query_child_resources(child.self.self_handle, child.child_handle, got_resources, errback)
-
- @classmethod
- def query(cls, parent, ca, ca_detail, callback, errback):
- """
- Send an "issue" request to parent associated with ca.
- """
- assert ca_detail is not None and ca_detail.state in ("pending", "active")
- self = cls()
- self.class_name = ca.parent_resource_class
- self.pkcs10 = rpki.x509.PKCS10.create(
- keypair = ca_detail.private_key_id,
- is_ca = True,
- caRepository = ca.sia_uri,
- rpkiManifest = ca_detail.manifest_uri)
- logger.info('Sending "issue" request to parent %s', parent.parent_handle)
- parent.query_up_down(self, callback, errback)
-
-class issue_response_pdu(class_response_syntax):
- """
- Up-Down protocol "issue_response" PDU.
- """
-
- def check_response(self):
- """
- Check whether this looks like a reasonable issue_response PDU.
- XML schema should be tighter for this response.
- """
- if len(self.classes) != 1 or len(self.classes[0].certs) != 1:
- raise rpki.exceptions.BadIssueResponse
-
-class revoke_syntax(base_elt):
- """
- Syntax for Up-Down protocol "revoke" and "revoke_response" PDUs.
- """
-
- def startElement(self, stack, name, attrs):
- """Handle "revoke" PDU."""
- self.class_name = attrs["class_name"]
- self.ski = attrs["ski"]
-
- def toXML(self):
- """Generate payload of "revoke" PDU."""
- return [self.make_elt("key", "class_name", "ski")]
-
-class revoke_pdu(revoke_syntax):
- """
- Up-Down protocol "revoke" PDU.
- """
-
- def get_SKI(self):
- """
- Convert g(SKI) encoding from PDU back to raw SKI.
- """
- return base64.urlsafe_b64decode(self.ski + "=")
-
- def serve_pdu(self, q_msg, r_msg, child, cb, eb):
- """
- Serve one revoke request PDU.
- """
-
- def done():
- r_msg.payload = revoke_response_pdu()
- r_msg.payload.class_name = self.class_name
- r_msg.payload.ski = self.ski
- cb()
-
- ca = child.ca_from_class_name(self.class_name)
- publisher = rpki.rpkid.publication_queue()
- for ca_detail in ca.ca_details:
- for child_cert in child.fetch_child_certs(ca_detail = ca_detail, ski = self.get_SKI()):
- child_cert.revoke(publisher = publisher)
- self.gctx.sql.sweep()
- publisher.call_pubd(done, eb)
-
- @classmethod
- def query(cls, ca, gski, cb, eb):
- """
- Send a "revoke" request for certificate(s) named by gski to parent associated with ca.
- """
- parent = ca.parent
- self = cls()
- self.class_name = ca.parent_resource_class
- self.ski = gski
- logger.info('Sending "revoke" request for SKI %s to parent %s', gski, parent.parent_handle)
- parent.query_up_down(self, cb, eb)
-
-class revoke_response_pdu(revoke_syntax):
- """
- Up-Down protocol "revoke_response" PDU.
- """
-
- pass
-
-class error_response_pdu(base_elt):
- """
- Up-Down protocol "error_response" PDU.
- """
-
- codes = {
+error_response_codes = {
1101 : "Already processing request",
1102 : "Version number error",
1103 : "Unrecognised request type",
@@ -545,200 +103,71 @@ class error_response_pdu(base_elt):
1302 : "Revoke - no such key",
2001 : "Internal Server Error - Request not performed" }
- exceptions = {
- rpki.exceptions.NoActiveCA : 1202,
- (rpki.exceptions.ClassNameUnknown, revoke_pdu) : 1301,
- rpki.exceptions.ClassNameUnknown : 1201,
- (rpki.exceptions.NotInDatabase, revoke_pdu) : 1302 }
- def __init__(self, exception = None, request_payload = None):
- """
- Initialize an error_response PDU from an exception object.
- """
- base_elt.__init__(self)
- if exception is not None:
- logger.debug("Constructing up-down error response from exception %s", exception)
- exception_type = type(exception)
- request_type = None if request_payload is None else type(request_payload)
- logger.debug("Constructing up-down error response: exception_type %s, request_type %s",
- exception_type, request_type)
- if False:
- self.status = self.exceptions.get((exception_type, request_type),
- self.exceptions.get(exception_type, 2001))
- else:
- self.status = self.exceptions.get((exception_type, request_type))
- if self.status is None:
- logger.debug("No request-type-specific match, trying exception match")
- self.status = self.exceptions.get(exception_type)
- if self.status is None:
- logger.debug("No exception match either, defaulting")
- self.status = 2001
- self.description = str(exception)
- logger.debug("Chosen status code: %s", self.status)
-
- def endElement(self, stack, name, text):
- """
- Handle "error_response" PDU.
- """
- if name == "status":
- code = int(text)
- if code not in self.codes:
- raise rpki.exceptions.BadStatusCode("%s is not a known status code" % code)
- self.status = code
- elif name == "description":
- self.description = text
- else:
- assert name == "message", "Unexpected name %s, stack %s" % (name, stack)
- stack.pop()
- stack[-1].endElement(stack, name, text)
-
- def toXML(self):
- """
- Generate payload of "error_response" PDU.
- """
- assert self.status in self.codes
- elt = self.make_elt("status")
- elt.text = str(self.status)
- payload = [elt]
- if self.description:
- elt = self.make_elt("description")
- elt.text = str(self.description)
- elt.set("{http://www.w3.org/XML/1998/namespace}lang", "en-US")
- payload.append(elt)
- return payload
-
- def check_response(self):
+exception_map = {
+ rpki.exceptions.NoActiveCA : 1202,
+ (rpki.exceptions.ClassNameUnknown, "revoke") : 1301,
+ rpki.exceptions.ClassNameUnknown : 1201,
+ (rpki.exceptions.NotInDatabase, "revoke") : 1302 }
+
+
+def check_response(r_msg, q_type):
"""
- Handle an error response. For now, just raise an exception,
- perhaps figure out something more clever to do later.
+ Additional checks beyond the XML schema for whether this looks like
+ a reasonable up-down response message.
"""
- raise rpki.exceptions.UpstreamError(self.codes[self.status])
-class message_pdu(base_elt):
- """
- Up-Down protocol message wrapper PDU.
- """
+ r_type = r_msg.get("type")
- version = 1
+ if r_type == "error_response":
+ raise rpki.exceptions.UpstreamError(error_response_codes[int(r_msg.findtext(tag_status))])
- name2type = {
- "list" : list_pdu,
- "list_response" : list_response_pdu,
- "issue" : issue_pdu,
- "issue_response" : issue_response_pdu,
- "revoke" : revoke_pdu,
- "revoke_response" : revoke_response_pdu,
- "error_response" : error_response_pdu }
+ if r_type != q_type + "_response":
+ raise rpki.exceptions.UnexpectedUpDownResponse
- type2name = dict((v, k) for k, v in name2type.items())
+ if r_type == "issue_response" and (len(r_msg) != 1 or len(r_msg[0]) != 2):
+ logger.debug("Weird issue_response %r: len(r_msg) %s len(r_msg[0]) %s",
+ r_msg, len(r_msg), len(r_msg[0]) if len(r_msg) else None)
+ logger.debug("Offending message\n%s", ElementToString(r_msg))
+ raise rpki.exceptions.BadIssueResponse
- error_pdu_type = error_response_pdu
- def toXML(self):
+def generate_error_response(r_msg, status = 2001, description = None):
"""
- Generate payload of message PDU.
+ Generate an error response. If status is given, it specifies the
+ numeric code to use, otherwise we default to "internal error".
+ If description is specified, we use it as the description, otherwise
+ we just use the default string associated with status.
"""
- elt = self.make_elt("message", "version", "sender", "recipient", "type")
- elt.extend(self.payload.toXML())
- return elt
- def startElement(self, stack, name, attrs):
- """
- Handle message PDU.
+ assert status in error_response_codes
+ del r_msg[:]
+ r_msg.set("type", "error_response")
+ SubElement(r_msg, tag_status).text = str(status)
+ se = SubElement(r_msg, tag_description)
+ se.set("{http://www.w3.org/XML/1998/namespace}lang", "en-US")
+ se.text = str(description or error_response_codes[status])
- Payload of the <message/> element varies depending on the "type"
- attribute, so after some basic checks we have to instantiate the
- right class object to handle whatever kind of PDU this is.
- """
- assert name == "message", "Unexpected name %s, stack %s" % (name, stack)
- assert self.version == int(attrs["version"])
- self.sender = attrs["sender"]
- self.recipient = attrs["recipient"]
- self.type = attrs["type"]
- self.payload = self.name2type[attrs["type"]]()
- stack.append(self.payload)
-
- def __str__(self):
- """
- Convert a message PDU to a string.
- """
- return lxml.etree.tostring(self.toXML(), pretty_print = True, encoding = "UTF-8")
- def serve_top_level(self, child, callback):
+def generate_error_response_from_exception(r_msg, e, q_type):
"""
- Serve one message request PDU.
+ Construct an error response from an exception. q_type
+ specifies the kind of query to which this is a response, since the
+ same exception can generate different codes in response to different
+ queries.
"""
- r_msg = message_pdu()
- r_msg.sender = self.recipient
- r_msg.recipient = self.sender
+ t = type(e)
+ code = (exception_map.get((t, q_type)) or exception_map.get(t) or 2001)
+ generate_error_response(r_msg, code, e)
- def done():
- r_msg.type = self.type2name[type(r_msg.payload)]
- callback(r_msg)
- def lose(e):
- logger.exception("Unhandled exception serving child %r", child)
- callback(self.serve_error(e))
-
- try:
- self.log_query(child)
- self.payload.serve_pdu(self, r_msg, child, done, lose)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- lose(e)
-
- def log_query(self, child):
- """
- Log query we're handling. Separate method so rootd can override.
- """
- logger.info("Serving %s query from child %s [sender %s, recipient %s]", self.type, child.child_handle, self.sender, self.recipient)
-
- def serve_error(self, exception):
- """
- Generate an error_response message PDU.
- """
- r_msg = message_pdu()
- r_msg.sender = self.recipient
- r_msg.recipient = self.sender
- r_msg.payload = self.error_pdu_type(exception, self.payload)
- r_msg.type = self.type2name[type(r_msg.payload)]
- return r_msg
-
- @classmethod
- def make_query(cls, payload, sender, recipient):
+class cms_msg(rpki.x509.XML_CMS_object):
"""
- Construct one message PDU.
+ CMS-signed up-down PDU.
"""
- assert not cls.type2name[type(payload)].endswith("_response")
- if sender is None:
- sender = "tweedledee"
- if recipient is None:
- recipient = "tweedledum"
- self = cls()
- self.sender = sender
- self.recipient = recipient
- self.payload = payload
- self.type = self.type2name[type(payload)]
- return self
-
-class sax_handler(rpki.xml_utils.sax_handler):
- """
- SAX handler for Up-Down protocol.
- """
-
- pdu = message_pdu
- name = "message"
- version = "1"
-class cms_msg(rpki.x509.XML_CMS_object):
- """
- Class to hold a CMS-signed up-down PDU.
- """
-
- encoding = "UTF-8"
- schema = rpki.relaxng.up_down
- saxify = sax_handler.saxify
- allow_extra_certs = True
- allow_extra_crls = True
+ encoding = "UTF-8"
+ schema = rpki.relaxng.up_down
+ allow_extra_certs = True
+ allow_extra_crls = True
diff --git a/rpki/x509.py b/rpki/x509.py
index a7e4d17a..0acb3859 100644
--- a/rpki/x509.py
+++ b/rpki/x509.py
@@ -47,1065 +47,1185 @@ import rpki.resource_set
import rpki.oids
import rpki.sundial
import rpki.log
-import rpki.async
import rpki.relaxng
logger = logging.getLogger(__name__)
def base64_with_linebreaks(der):
- """
- Encode DER (really, anything) as Base64 text, with linebreaks to
- keep the result (sort of) readable.
- """
- b = base64.b64encode(der)
- n = len(b)
- return "\n" + "\n".join(b[i : min(i + 64, n)] for i in xrange(0, n, 64)) + "\n"
-
-def looks_like_PEM(text):
- """
- Guess whether text looks like a PEM encoding.
- """
-
- i = text.find("-----BEGIN ")
- return i >= 0 and text.find("\n-----END ", i) > i
-
-def first_rsync_uri(xia):
- """
- Find first rsync URI in a sequence of AIA or SIA URIs.
- Returns the URI if found, otherwise None.
- """
-
- if xia is not None:
- for uri in xia:
- if uri.startswith("rsync://"):
- return uri
- return None
-
-class X501DN(object):
- """
- Class to hold an X.501 Distinguished Name.
-
- This is nothing like a complete implementation, just enough for our
- purposes. See RFC 5280 4.1.2.4 for the ASN.1 details. In brief:
-
- - A DN is a SEQUENCE OF RDNs.
-
- - A RDN is a SET OF AttributeAndValues; in practice, multi-value
- RDNs are rare, so an RDN is almost always a set with a single
- element.
-
- - An AttributeAndValue is a SEQUENCE consisting of a OID and a
- value, where a whole bunch of things including both syntax and
- semantics of the value are determined by the OID.
-
- - The value is some kind of ASN.1 string; there are far too many
- encoding options options, most of which are either strongly
- discouraged or outright forbidden by the PKIX profile, but which
- persist for historical reasons. The only ones PKIX actually
- likes are PrintableString and UTF8String, but there are nuances
- and special cases where some of the others are required.
-
- The RPKI profile further restricts DNs to a single mandatory
- CommonName attribute with a single optional SerialNumber attribute
- (not to be confused with the certificate serial number).
-
- BPKI certificates should (we hope) follow the general PKIX guideline
- but the ones we construct ourselves are likely to be relatively
- simple.
- """
-
- def __str__(self):
- return "".join("/" + "+".join("%s=%s" % (rpki.oids.oid2name(a[0]), a[1])
- for a in rdn)
- for rdn in self.dn)
-
- def __cmp__(self, other):
- return cmp(self.dn, other.dn)
-
- def __repr__(self):
- return rpki.log.log_repr(self, str(self))
-
- def _debug(self):
- logger.debug("++ %r %r", self, self.dn)
-
- @classmethod
- def from_cn(cls, cn, sn = None):
- assert isinstance(cn, (str, unicode))
- if isinstance(sn, (int, long)):
- sn = "%08X" % sn
- elif isinstance(sn, (str, unicode)):
- assert all(c in "0123456789abcdefABCDEF" for c in sn)
- sn = str(sn)
- self = cls()
- if sn is not None:
- self.dn = (((rpki.oids.commonName, cn),), ((rpki.oids.serialNumber, sn),))
- else:
- self.dn = (((rpki.oids.commonName, cn),),)
- return self
-
- @classmethod
- def from_POW(cls, t):
- assert isinstance(t, tuple)
- self = cls()
- self.dn = t
- return self
-
- def get_POW(self):
- return self.dn
-
- def extract_cn_and_sn(self):
- cn = None
- sn = None
-
- for rdn in self.dn:
- if len(rdn) == 1 and len(rdn[0]) == 2:
- oid = rdn[0][0]
- val = rdn[0][1]
- if oid == rpki.oids.commonName and cn is None:
- cn = val
- continue
- if oid == rpki.oids.serialNumber and sn is None:
- sn = val
- continue
- raise rpki.exceptions.BadX510DN("Bad subject name: %s" % (self.dn,))
-
- if cn is None:
- raise rpki.exceptions.BadX510DN("Subject name is missing CN: %s" % (self.dn,))
-
- return cn, sn
-
-
-class DER_object(object):
- """
- Virtual class to hold a generic DER object.
- """
-
- ## @var formats
- # Formats supported in this object. This is kind of redundant now
- # that we're down to a single ASN.1 package and everything supports
- # the same DER and POW formats, it's mostly historical baggage from
- # the days when we had three different ASN.1 encoders, each with its
- # own low-level Python object format. Clean up, some day.
- formats = ("DER", "POW")
-
- ## @var POW_class
- # Class of underlying POW object. Concrete subclasses must supply this.
- POW_class = None
-
- ## Other attributes that self.clear() should whack.
- other_clear = ()
-
- ## @var DER
- # DER value of this object
- DER = None
-
- ## @var failure_threshold
- # Rate-limiting interval between whines about Auto_update objects.
- failure_threshold = rpki.sundial.timedelta(minutes = 5)
-
- def empty(self):
"""
- Test whether this object is empty.
+ Encode DER (really, anything) as Base64 text, with linebreaks to
+ keep the result (sort of) readable.
"""
- return all(getattr(self, a, None) is None for a in self.formats)
- def clear(self):
- """
- Make this object empty.
- """
- for a in self.formats + self.other_clear:
- setattr(self, a, None)
- self.filename = None
- self.timestamp = None
- self.lastfail = None
+ b = base64.b64encode(der)
+ n = len(b)
+ return "\n" + "\n".join(b[i : min(i + 64, n)] for i in xrange(0, n, 64)) + "\n"
- def __init__(self, **kw):
+def looks_like_PEM(text):
"""
- Initialize a DER_object.
+ Guess whether text looks like a PEM encoding.
"""
- self.clear()
- if len(kw):
- self.set(**kw)
- def set(self, **kw):
- """
- Set this object by setting one of its known formats.
+ i = text.find("-----BEGIN ")
+ return i >= 0 and text.find("\n-----END ", i) > i
- This method only allows one to set one format at a time.
- Subsequent calls will clear the object first. The point of all
- this is to let the object's internal converters handle mustering
- the object into whatever format you need at the moment.
+def first_uri_matching_prefix(xia, prefix):
+ """
+ Find first URI in a sequence of AIA or SIA URIs which matches a
+ particular prefix string. Returns the URI if found, otherwise None.
"""
- if len(kw) == 1:
- name = kw.keys()[0]
- if name in self.formats:
- self.clear()
- setattr(self, name, kw[name])
- return
- if name == "PEM":
- self.clear()
- self._set_PEM(kw[name])
- return
- if name == "Base64":
- self.clear()
- self.DER = base64.b64decode(kw[name])
- return
- if name == "Auto_update":
- self.filename = kw[name]
- self.check_auto_update()
- return
- if name in ("PEM_file", "DER_file", "Auto_file"):
- f = open(kw[name], "rb")
- value = f.read()
- f.close()
- self.clear()
- if name == "PEM_file" or (name == "Auto_file" and looks_like_PEM(value)):
- self._set_PEM(value)
- else:
- self.DER = value
- return
- raise rpki.exceptions.DERObjectConversionError("Can't honor conversion request %r" % (kw,))
+ if xia is not None:
+ for uri in xia:
+ if uri.startswith(prefix):
+ return uri
+ return None
- def check_auto_update(self):
- """
- Check for updates to a DER object that auto-updates from a file.
- """
- if self.filename is None:
- return
- try:
- filename = self.filename
- timestamp = os.stat(self.filename).st_mtime
- if self.timestamp is None or self.timestamp < timestamp:
- logger.debug("Updating %s, timestamp %s",
- filename, rpki.sundial.datetime.fromtimestamp(timestamp))
- f = open(filename, "rb")
- value = f.read()
- f.close()
- self.clear()
- if looks_like_PEM(value):
- self._set_PEM(value)
- else:
- self.DER = value
- self.filename = filename
- self.timestamp = timestamp
- except (IOError, OSError), e:
- now = rpki.sundial.now()
- if self.lastfail is None or now > self.lastfail + self.failure_threshold:
- logger.warning("Could not auto_update %r (last failure %s): %s", self, self.lastfail, e)
- self.lastfail = now
- else:
- self.lastfail = None
-
- def check(self):
+def first_rsync_uri(xia):
"""
- Perform basic checks on a DER object.
+ Find first rsync URI in a sequence of AIA or SIA URIs.
+ Returns the URI if found, otherwise None.
"""
- self.check_auto_update()
- assert not self.empty()
- def _set_PEM(self, pem):
- """
- Set the POW value of this object based on a PEM input value.
- Subclasses may need to override this.
- """
- assert self.empty()
- self.POW = self.POW_class.pemRead(pem)
+ return first_uri_matching_prefix(xia, "rsync://")
- def get_DER(self):
- """
- Get the DER value of this object.
- Subclasses may need to override this method.
- """
- self.check()
- if self.DER:
- return self.DER
- if self.POW:
- self.DER = self.POW.derWrite()
- return self.get_DER()
- raise rpki.exceptions.DERObjectConversionError("No conversion path to DER available")
-
- def get_POW(self):
+def first_http_uri(xia):
"""
- Get the rpki.POW value of this object.
- Subclasses may need to override this method.
+ Find first HTTP URI in a sequence of AIA or SIA URIs.
+ Returns the URI if found, otherwise None.
"""
- self.check()
- if not self.POW: # pylint: disable=E0203
- self.POW = self.POW_class.derRead(self.get_DER())
- return self.POW
- def get_Base64(self):
- """
- Get the Base64 encoding of the DER value of this object.
- """
- return base64_with_linebreaks(self.get_DER())
+ return first_uri_matching_prefix(xia, "http://")
- def get_PEM(self):
+def first_https_uri(xia):
"""
- Get the PEM representation of this object.
+ Find first HTTPS URI in a sequence of AIA or SIA URIs.
+ Returns the URI if found, otherwise None.
"""
- return self.get_POW().pemWrite()
- def __cmp__(self, other):
- """
- Compare two DER-encoded objects.
- """
- if self is None and other is None:
- return 0
- elif self is None:
- return -1
- elif other is None:
- return 1
- elif isinstance(other, str):
- return cmp(self.get_DER(), other)
- else:
- return cmp(self.get_DER(), other.get_DER())
-
- def hSKI(self):
- """
- Return hexadecimal string representation of SKI for this object.
- Only work for subclasses that implement get_SKI().
- """
- ski = self.get_SKI()
- return ":".join(("%02X" % ord(i) for i in ski)) if ski else ""
+ return first_uri_matching_prefix(xia, "https://")
- def gSKI(self):
+def sha1(data):
"""
- Calculate g(SKI) for this object. Only work for subclasses
- that implement get_SKI().
+ Calculate SHA-1 digest of some data.
+ Convenience wrapper around rpki.POW.Digest class.
"""
- return base64.urlsafe_b64encode(self.get_SKI()).rstrip("=")
- def hAKI(self):
- """
- Return hexadecimal string representation of AKI for this
- object. Only work for subclasses that implement get_AKI().
- """
- aki = self.get_AKI()
- return ":".join(("%02X" % ord(i) for i in aki)) if aki else ""
+ d = rpki.POW.Digest(rpki.POW.SHA1_DIGEST)
+ d.update(data)
+ return d.digest()
- def gAKI(self):
+def sha256(data):
"""
- Calculate g(AKI) for this object. Only work for subclasses
- that implement get_AKI().
+ Calculate SHA-256 digest of some data.
+ Convenience wrapper around rpki.POW.Digest class.
"""
- return base64.urlsafe_b64encode(self.get_AKI()).rstrip("=")
- def get_AKI(self):
- """
- Get the AKI extension from this object, if supported.
- """
- return self.get_POW().getAKI()
+ d = rpki.POW.Digest(rpki.POW.SHA256_DIGEST)
+ d.update(data)
+ return d.digest()
- def get_SKI(self):
- """
- Get the SKI extension from this object, if supported.
- """
- return self.get_POW().getSKI()
- def get_EKU(self):
- """
- Get the Extended Key Usage extension from this object, if supported.
+class X501DN(object):
"""
- return self.get_POW().getEKU()
+ Class to hold an X.501 Distinguished Name.
- def get_SIA(self):
- """
- Get the SIA extension from this object. Only works for subclasses
- that support getSIA().
- """
- return self.get_POW().getSIA()
+ This is nothing like a complete implementation, just enough for our
+ purposes. See RFC 5280 4.1.2.4 for the ASN.1 details. In brief:
- def get_sia_directory_uri(self):
- """
- Get SIA directory (id-ad-caRepository) URI from this object.
- Only works for subclasses that support getSIA().
- """
- sia = self.get_POW().getSIA()
- return None if sia is None else first_rsync_uri(sia[0])
+ - A DN is a SEQUENCE OF RDNs.
- def get_sia_manifest_uri(self):
- """
- Get SIA manifest (id-ad-rpkiManifest) URI from this object.
- Only works for subclasses that support getSIA().
- """
- sia = self.get_POW().getSIA()
- return None if sia is None else first_rsync_uri(sia[1])
+ - A RDN is a SET OF AttributeAndValues; in practice, multi-value
+ RDNs are rare, so an RDN is almost always a set with a single
+ element.
- def get_sia_object_uri(self):
- """
- Get SIA object (id-ad-signedObject) URI from this object.
- Only works for subclasses that support getSIA().
- """
- sia = self.get_POW().getSIA()
- return None if sia is None else first_rsync_uri(sia[2])
+ - An AttributeAndValue is a SEQUENCE consisting of a OID and a
+ value, where a whole bunch of things including both syntax and
+ semantics of the value are determined by the OID.
- def get_AIA(self):
- """
- Get the SIA extension from this object. Only works for subclasses
- that support getAIA().
- """
- return self.get_POW().getAIA()
+ - The value is some kind of ASN.1 string; there are far too many
+ encoding options options, most of which are either strongly
+ discouraged or outright forbidden by the PKIX profile, but which
+ persist for historical reasons. The only ones PKIX actually
+ likes are PrintableString and UTF8String, but there are nuances
+ and special cases where some of the others are required.
- def get_aia_uri(self):
- """
- Get AIA (id-ad-caIssuers) URI from this object.
- Only works for subclasses that support getAIA().
- """
- return first_rsync_uri(self.get_POW().getAIA())
+ The RPKI profile further restricts DNs to a single mandatory
+ CommonName attribute with a single optional SerialNumber attribute
+ (not to be confused with the certificate serial number).
- def get_basicConstraints(self):
- """
- Get the basicConstraints extension from this object. Only works
- for subclasses that support getExtension().
+ BPKI certificates should (we hope) follow the general PKIX guideline
+ but the ones we construct ourselves are likely to be relatively
+ simple.
"""
- return self.get_POW().getBasicConstraints()
- def is_CA(self):
- """
- Return True if and only if object has the basicConstraints
- extension and its cA value is true.
- """
- basicConstraints = self.get_basicConstraints()
- return basicConstraints is not None and basicConstraints[0]
+ def __init__(self, dn):
+ assert isinstance(dn, tuple)
+ self.dn = dn
- def get_3779resources(self):
- """
- Get RFC 3779 resources as rpki.resource_set objects.
- """
- resources = rpki.resource_set.resource_bag.from_POW_rfc3779(self.get_POW().getRFC3779())
- try:
- resources.valid_until = self.getNotAfter()
- except AttributeError:
- pass
- return resources
-
- @classmethod
- def from_sql(cls, x):
- """
- Convert from SQL storage format.
- """
- return cls(DER = x)
+ def __str__(self):
+ return "".join("/" + "+".join("%s=%s" % (rpki.oids.oid2name(a[0]), a[1])
+ for a in rdn)
+ for rdn in self.dn)
- def to_sql(self):
- """
- Convert to SQL storage format.
- """
- return self.get_DER()
+ def __cmp__(self, other):
+ return cmp(self.dn, other.dn)
- def dumpasn1(self):
- """
- Pretty print an ASN.1 DER object using cryptlib dumpasn1 tool.
- Use a temporary file rather than popen4() because dumpasn1 uses
- seek() when decoding ASN.1 content nested in OCTET STRING values.
- """
+ def __repr__(self):
+ return rpki.log.log_repr(self, str(self))
- ret = None
- fn = "dumpasn1.%d.tmp" % os.getpid()
- try:
- f = open(fn, "wb")
- f.write(self.get_DER())
- f.close()
- p = subprocess.Popen(("dumpasn1", "-a", fn), stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
- ret = "\n".join(x for x in p.communicate()[0].splitlines() if x.startswith(" "))
- except Exception, e:
- ret = "[Could not run dumpasn1: %s]" % e
- finally:
- os.unlink(fn)
- return ret
-
- def tracking_data(self, uri):
- """
- Return a string containing data we want to log when tracking how
- objects move through the RPKI system. Subclasses may wrap this to
- provide more information, but should make sure to include at least
- this information at the start of the tracking line.
- """
- try:
- d = rpki.POW.Digest(rpki.POW.SHA1_DIGEST)
- d.update(self.get_DER())
- return "%s %s %s" % (uri, self.creation_timestamp,
- "".join(("%02X" % ord(b) for b in d.digest())))
- except: # pylint: disable=W0702
- return uri
-
- def __getstate__(self):
- """
- Pickling protocol -- pickle the DER encoding.
- """
- return self.get_DER()
+ def _debug(self):
+ logger.debug("++ %r %r", self, self.dn)
- def __setstate__(self, state):
- """
- Pickling protocol -- unpickle the DER encoding.
- """
- self.set(DER = state)
+ @classmethod
+ def from_cn(cls, cn, sn = None):
+ assert isinstance(cn, (str, unicode))
+ if isinstance(sn, (int, long)):
+ sn = "%08X" % sn
+ elif isinstance(sn, (str, unicode)):
+ assert all(c in "0123456789abcdefABCDEF" for c in sn)
+ sn = str(sn)
+ if sn is not None:
+ dn = (((rpki.oids.commonName, cn),), ((rpki.oids.serialNumber, sn),))
+ else:
+ dn = (((rpki.oids.commonName, cn),),)
+ return cls(dn)
-class X509(DER_object):
- """
- X.509 certificates.
+ @classmethod
+ def from_POW(cls, dn):
+ return cls(dn)
- This class is designed to hold all the different representations of
- X.509 certs we're using and convert between them. X.509 support in
- Python a nasty maze of half-cooked stuff (except perhaps for
- cryptlib, which is just different). Users of this module should not
- have to care about this implementation nightmare.
- """
+ def get_POW(self):
+ return self.dn
- POW_class = rpki.POW.X509
+ def extract_cn_and_sn(self):
+ cn = None
+ sn = None
- def getIssuer(self):
- """
- Get the issuer of this certificate.
- """
- return X501DN.from_POW(self.get_POW().getIssuer())
+ for rdn in self.dn:
+ if len(rdn) == 1 and len(rdn[0]) == 2:
+ oid = rdn[0][0]
+ val = rdn[0][1]
+ if oid == rpki.oids.commonName and cn is None:
+ cn = val
+ continue
+ if oid == rpki.oids.serialNumber and sn is None:
+ sn = val
+ continue
+ raise rpki.exceptions.BadX510DN("Bad subject name: %s" % (self.dn,))
- def getSubject(self):
- """
- Get the subject of this certificate.
- """
- return X501DN.from_POW(self.get_POW().getSubject())
+ if cn is None:
+ raise rpki.exceptions.BadX510DN("Subject name is missing CN: %s" % (self.dn,))
- def getNotBefore(self):
- """
- Get the inception time of this certificate.
- """
- return self.get_POW().getNotBefore()
+ return cn, sn
- def getNotAfter(self):
- """
- Get the expiration time of this certificate.
- """
- return self.get_POW().getNotAfter()
- def getSerial(self):
+class DER_object(object):
"""
- Get the serial number of this certificate.
+ Virtual class to hold a generic DER object.
"""
- return self.get_POW().getSerial()
- def getPublicKey(self):
- """
- Extract the public key from this certificate.
- """
- return PublicKey(POW = self.get_POW().getPublicKey())
+ ## @var formats
+ # Formats supported in this object. This is kind of redundant now
+ # that we're down to a single ASN.1 package and everything supports
+ # the same DER and POW formats, it's mostly historical baggage from
+ # the days when we had three different ASN.1 encoders, each with its
+ # own low-level Python object format. Clean up, some day.
+ formats = ("DER", "POW")
- def get_SKI(self):
- """
- Get the SKI extension from this object.
- """
- return self.get_POW().getSKI()
+ ## @var POW_class
+ # Class of underlying POW object. Concrete subclasses must supply this.
+ POW_class = None
- def expired(self):
- """
- Test whether this certificate has expired.
- """
- return self.getNotAfter() <= rpki.sundial.now()
+ ## Other attributes that self.clear() should whack.
+ other_clear = ()
- def issue(self, keypair, subject_key, serial, sia, aia, crldp, notAfter,
- cn = None, resources = None, is_ca = True, notBefore = None,
- sn = None, eku = None):
- """
- Issue an RPKI certificate.
- """
+ ## @var DER
+ # DER value of this object
+ DER = None
- assert aia is not None and crldp is not None
-
- assert eku is None or not is_ca
-
- return self._issue(
- keypair = keypair,
- subject_key = subject_key,
- serial = serial,
- sia = sia,
- aia = aia,
- crldp = crldp,
- notBefore = notBefore,
- notAfter = notAfter,
- cn = cn,
- sn = sn,
- resources = resources,
- is_ca = is_ca,
- aki = self.get_SKI(),
- issuer_name = self.getSubject(),
- eku = eku)
-
-
- @classmethod
- def self_certify(cls, keypair, subject_key, serial, sia, notAfter,
- cn = None, resources = None, notBefore = None,
- sn = None):
- """
- Generate a self-certified RPKI certificate.
- """
+ ## @var failure_threshold
+ # Rate-limiting interval between whines about Auto_update objects.
+ failure_threshold = rpki.sundial.timedelta(minutes = 5)
- ski = subject_key.get_SKI()
-
- if cn is None:
- cn = "".join(("%02X" % ord(i) for i in ski))
-
- return cls._issue(
- keypair = keypair,
- subject_key = subject_key,
- serial = serial,
- sia = sia,
- aia = None,
- crldp = None,
- notBefore = notBefore,
- notAfter = notAfter,
- cn = cn,
- sn = sn,
- resources = resources,
- is_ca = True,
- aki = ski,
- issuer_name = X501DN.from_cn(cn, sn),
- eku = None)
-
-
- @classmethod
- def _issue(cls, keypair, subject_key, serial, sia, aia, crldp, notAfter,
- cn, sn, resources, is_ca, aki, issuer_name, notBefore, eku):
- """
- Common code to issue an RPKI certificate.
- """
+ def empty(self):
+ """
+ Test whether this object is empty.
+ """
- now = rpki.sundial.now()
- ski = subject_key.get_SKI()
+ return all(getattr(self, a, None) is None for a in self.formats)
- if notBefore is None:
- notBefore = now
+ def clear(self):
+ """
+ Make this object empty.
+ """
- if cn is None:
- cn = "".join(("%02X" % ord(i) for i in ski))
+ for a in self.formats + self.other_clear:
+ setattr(self, a, None)
+ self.filename = None
+ self.timestamp = None
+ self.lastfail = None
- if now >= notAfter:
- raise rpki.exceptions.PastNotAfter("notAfter value %s is already in the past" % notAfter)
+ def __init__(self, **kw):
+ """
+ Initialize a DER_object.
+ """
- if notBefore >= notAfter:
- raise rpki.exceptions.NullValidityInterval("notAfter value %s predates notBefore value %s" %
- (notAfter, notBefore))
+ self.clear()
+ if len(kw):
+ self.set(**kw)
+
+ def set(self, **kw):
+ """
+ Set this object by setting one of its known formats.
+
+ This method only allows one to set one format at a time.
+ Subsequent calls will clear the object first. The point of all
+ this is to let the object's internal converters handle mustering
+ the object into whatever format you need at the moment.
+ """
+
+ if len(kw) == 1:
+ name = kw.keys()[0]
+ if name in self.formats:
+ self.clear()
+ setattr(self, name, kw[name])
+ return
+ if name == "PEM":
+ self.clear()
+ self._set_PEM(kw[name])
+ return
+ if name == "Base64":
+ self.clear()
+ self.DER = base64.b64decode(kw[name])
+ return
+ if name == "Auto_update":
+ self.filename = kw[name]
+ self.check_auto_update()
+ return
+ if name in ("PEM_file", "DER_file", "Auto_file"):
+ f = open(kw[name], "rb")
+ value = f.read()
+ f.close()
+ self.clear()
+ if name == "PEM_file" or (name == "Auto_file" and looks_like_PEM(value)):
+ self._set_PEM(value)
+ else:
+ self.DER = value
+ return
+ raise rpki.exceptions.DERObjectConversionError("Can't honor conversion request %r" % (kw,))
+
+ def check_auto_update(self):
+ """
+ Check for updates to a DER object that auto-updates from a file.
+ """
+
+ # pylint: disable=W0201
+
+ if self.filename is None:
+ return
+ try:
+ filename = self.filename
+ timestamp = os.stat(self.filename).st_mtime
+ if self.timestamp is None or self.timestamp < timestamp:
+ logger.debug("Updating %s, timestamp %s",
+ filename, rpki.sundial.datetime.fromtimestamp(timestamp))
+ f = open(filename, "rb")
+ value = f.read()
+ f.close()
+ self.clear()
+ if looks_like_PEM(value):
+ self._set_PEM(value)
+ else:
+ self.DER = value
+ self.filename = filename
+ self.timestamp = timestamp
+ except (IOError, OSError), e:
+ now = rpki.sundial.now()
+ if self.lastfail is None or now > self.lastfail + self.failure_threshold:
+ logger.warning("Could not auto_update %r (last failure %s): %s", self, self.lastfail, e)
+ self.lastfail = now
+ else:
+ self.lastfail = None
- cert = rpki.POW.X509()
+ @property
+ def mtime(self):
+ """
+ Retrieve os.stat().st_mtime for auto-update files.
+ """
- cert.setVersion(2)
- cert.setSerial(serial)
- cert.setIssuer(issuer_name.get_POW())
- cert.setSubject(X501DN.from_cn(cn, sn).get_POW())
- cert.setNotBefore(notBefore)
- cert.setNotAfter(notAfter)
- cert.setPublicKey(subject_key.get_POW())
- cert.setSKI(ski)
- cert.setAKI(aki)
- cert.setCertificatePolicies((rpki.oids.id_cp_ipAddr_asNumber,))
+ return os.stat(self.filename).st_mtime
- if crldp is not None:
- cert.setCRLDP((crldp,))
+ def check(self):
+ """
+ Perform basic checks on a DER object.
+ """
- if aia is not None:
- cert.setAIA((aia,))
+ self.check_auto_update()
+ assert not self.empty()
+
+ def _set_PEM(self, pem):
+ """
+ Set the POW value of this object based on a PEM input value.
+ Subclasses may need to override this.
+ """
+
+ # pylint: disable=W0201
+ assert self.empty()
+ self.POW = self.POW_class.pemRead(pem)
+
+ def get_DER(self):
+ """
+ Get the DER value of this object.
+ Subclasses may need to override this method.
+ """
+
+ self.check()
+ if self.DER:
+ return self.DER
+ if self.POW:
+ self.DER = self.POW.derWrite()
+ return self.get_DER()
+ raise rpki.exceptions.DERObjectConversionError("No conversion path to DER available")
+
+ def get_POW(self):
+ """
+ Get the rpki.POW value of this object.
+ Subclasses may need to override this method.
+ """
+
+ # pylint: disable=E0203,W0201
+ self.check()
+ if not self.POW:
+ self.POW = self.POW_class.derRead(self.get_DER())
+ return self.POW
+
+ def get_Base64(self):
+ """
+ Get the Base64 encoding of the DER value of this object.
+ """
+
+ return base64_with_linebreaks(self.get_DER())
+
+ def get_PEM(self):
+ """
+ Get the PEM representation of this object.
+ """
+
+ return self.get_POW().pemWrite()
+
+ def __cmp__(self, other):
+ """
+ Compare two DER-encoded objects.
+ """
+
+ if self is None and other is None:
+ return 0
+ elif self is None:
+ return -1
+ elif other is None:
+ return 1
+ elif isinstance(other, str):
+ return cmp(self.get_DER(), other)
+ else:
+ return cmp(self.get_DER(), other.get_DER())
- if is_ca:
- cert.setBasicConstraints(True, None)
- cert.setKeyUsage(frozenset(("keyCertSign", "cRLSign")))
+ def hSKI(self):
+ """
+ Return hexadecimal string representation of SKI for this object.
+ Only work for subclasses that implement get_SKI().
+ """
+
+ ski = self.get_SKI()
+ return ":".join(("%02X" % ord(i) for i in ski)) if ski else ""
- else:
- cert.setKeyUsage(frozenset(("digitalSignature",)))
+ def gSKI(self):
+ """
+ Calculate g(SKI) for this object. Only work for subclasses
+ that implement get_SKI().
+ """
+
+ return base64.urlsafe_b64encode(self.get_SKI()).rstrip("=")
- assert sia is not None or not is_ca
+ def hAKI(self):
+ """
+ Return hexadecimal string representation of AKI for this
+ object. Only work for subclasses that implement get_AKI().
+ """
+
+ aki = self.get_AKI()
+ return ":".join(("%02X" % ord(i) for i in aki)) if aki else ""
+
+ def gAKI(self):
+ """
+ Calculate g(AKI) for this object. Only work for subclasses
+ that implement get_AKI().
+ """
+
+ return base64.urlsafe_b64encode(self.get_AKI()).rstrip("=")
+
+ def get_AKI(self):
+ """
+ Get the AKI extension from this object, if supported.
+ """
+
+ return self.get_POW().getAKI()
+
+ def get_SKI(self):
+ """
+ Get the SKI extension from this object, if supported.
+ """
+
+ return self.get_POW().getSKI()
+
+ def get_EKU(self):
+ """
+ Get the Extended Key Usage extension from this object, if supported.
+ """
+
+ return self.get_POW().getEKU()
+
+ def get_SIA(self):
+ """
+ Get the SIA extension from this object. Only works for subclasses
+ that support getSIA().
+ """
+
+ return self.get_POW().getSIA()
+
+ def get_sia_directory_uri(self):
+ """
+ Get SIA directory (id-ad-caRepository) URI from this object.
+ Only works for subclasses that support getSIA().
+ """
+
+ sia = self.get_POW().getSIA()
+ return None if sia is None else first_rsync_uri(sia[0])
+
+ def get_sia_manifest_uri(self):
+ """
+ Get SIA manifest (id-ad-rpkiManifest) URI from this object.
+ Only works for subclasses that support getSIA().
+ """
+
+ sia = self.get_POW().getSIA()
+ return None if sia is None else first_rsync_uri(sia[1])
+
+ def get_sia_object_uri(self):
+ """
+ Get SIA object (id-ad-signedObject) URI from this object.
+ Only works for subclasses that support getSIA().
+ """
+
+ sia = self.get_POW().getSIA()
+ return None if sia is None else first_rsync_uri(sia[2])
+
+ def get_sia_rrdp_notify(self):
+ """
+ Get SIA RRDP (id-ad-rpkiNotify) URI from this object.
+ We prefer HTTPS over HTTP if both are present.
+ Only works for subclasses that support getSIA().
+ """
+
+ sia = self.get_POW().getSIA()
+ return None if sia is None else first_https_uri(sia[3]) or first_http_uri(sia[3])
+
+ def get_AIA(self):
+ """
+ Get the SIA extension from this object. Only works for subclasses
+ that support getAIA().
+ """
+
+ return self.get_POW().getAIA()
+
+ def get_aia_uri(self):
+ """
+ Get AIA (id-ad-caIssuers) URI from this object.
+ Only works for subclasses that support getAIA().
+ """
+
+ return first_rsync_uri(self.get_POW().getAIA())
+
+ def get_basicConstraints(self):
+ """
+ Get the basicConstraints extension from this object. Only works
+ for subclasses that support getExtension().
+ """
+
+ return self.get_POW().getBasicConstraints()
+
+ def is_CA(self):
+ """
+ Return True if and only if object has the basicConstraints
+ extension and its cA value is true.
+ """
+
+ basicConstraints = self.get_basicConstraints()
+ return basicConstraints is not None and basicConstraints[0]
+
+ def get_3779resources(self):
+ """
+ Get RFC 3779 resources as rpki.resource_set objects.
+ """
+
+ resources = rpki.resource_set.resource_bag.from_POW_rfc3779(self.get_POW().getRFC3779())
+ try:
+ resources.valid_until = self.getNotAfter() # pylint: disable=E1101
+ except AttributeError:
+ pass
+ return resources
+
+ @classmethod
+ def from_sql(cls, x):
+ """
+ Convert from SQL storage format.
+ """
+
+ return cls(DER = x)
+
+ def to_sql(self):
+ """
+ Convert to SQL storage format.
+ """
+
+ return self.get_DER()
+
+ def dumpasn1(self):
+ """
+ Pretty print an ASN.1 DER object using cryptlib dumpasn1 tool.
+ Use a temporary file rather than popen4() because dumpasn1 uses
+ seek() when decoding ASN.1 content nested in OCTET STRING values.
+ """
+
+ ret = None
+ fn = "dumpasn1.%d.tmp" % os.getpid()
+ try:
+ f = open(fn, "wb")
+ f.write(self.get_DER())
+ f.close()
+ p = subprocess.Popen(("dumpasn1", "-a", fn), stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
+ ret = "\n".join(x for x in p.communicate()[0].splitlines() if x.startswith(" "))
+ except Exception, e:
+ ret = "[Could not run dumpasn1: %s]" % e
+ finally:
+ os.unlink(fn)
+ return ret
+
+ def tracking_data(self, uri):
+ """
+ Return a string containing data we want to log when tracking how
+ objects move through the RPKI system. Subclasses may wrap this to
+ provide more information, but should make sure to include at least
+ this information at the start of the tracking line.
+ """
+
+ # pylint: disable=E1101
+
+ try:
+ return "%s %s %s" % (uri, self.creation_timestamp, "".join(("%02X" % ord(b) for b in sha1(self.get_DER()))))
+ except:
+ return uri
+
+ def __getstate__(self):
+ """
+ Pickling protocol -- pickle the DER encoding.
+ """
+
+ return self.get_DER()
+
+ def __setstate__(self, state):
+ """
+ Pickling protocol -- unpickle the DER encoding.
+ """
+
+ self.set(DER = state)
- if sia is not None:
- caRepository, rpkiManifest, signedObject = sia
- cert.setSIA(
- (caRepository,) if isinstance(caRepository, str) else caRepository,
- (rpkiManifest,) if isinstance(rpkiManifest, str) else rpkiManifest,
- (signedObject,) if isinstance(signedObject, str) else signedObject)
+class X509(DER_object):
+ """
+ X.509 certificates.
- if resources is not None:
- cert.setRFC3779(
- asn = ("inherit" if resources.asn.inherit else
- ((r.min, r.max) for r in resources.asn)),
- ipv4 = ("inherit" if resources.v4.inherit else
- ((r.min, r.max) for r in resources.v4)),
- ipv6 = ("inherit" if resources.v6.inherit else
- ((r.min, r.max) for r in resources.v6)))
+ This class is designed to hold all the different representations of
+ X.509 certs we're using and convert between them. X.509 support in
+ Python a nasty maze of half-cooked stuff (except perhaps for
+ cryptlib, which is just different). Users of this module should not
+ have to care about this implementation nightmare.
+ """
- if eku is not None:
- assert not is_ca
- cert.setEKU(eku)
+ POW_class = rpki.POW.X509
- cert.sign(keypair.get_POW(), rpki.POW.SHA256_DIGEST)
+ def getIssuer(self):
+ """
+ Get the issuer of this certificate.
+ """
- return cls(POW = cert)
+ return X501DN.from_POW(self.get_POW().getIssuer())
- def bpki_cross_certify(self, keypair, source_cert, serial, notAfter,
- now = None, pathLenConstraint = 0):
- """
- Issue a BPKI certificate with values taking from an existing certificate.
- """
- return self.bpki_certify(
- keypair = keypair,
- subject_name = source_cert.getSubject(),
- subject_key = source_cert.getPublicKey(),
- serial = serial,
- notAfter = notAfter,
- now = now,
- pathLenConstraint = pathLenConstraint,
- is_ca = True)
-
- @classmethod
- def bpki_self_certify(cls, keypair, subject_name, serial, notAfter,
- now = None, pathLenConstraint = None):
- """
- Issue a self-signed BPKI CA certificate.
- """
- return cls._bpki_certify(
- keypair = keypair,
- issuer_name = subject_name,
- subject_name = subject_name,
- subject_key = keypair.get_public(),
- serial = serial,
- now = now,
- notAfter = notAfter,
- pathLenConstraint = pathLenConstraint,
- is_ca = True)
-
- def bpki_certify(self, keypair, subject_name, subject_key, serial, notAfter, is_ca,
- now = None, pathLenConstraint = None):
- """
- Issue a normal BPKI certificate.
- """
- assert keypair.get_public() == self.getPublicKey()
- return self._bpki_certify(
- keypair = keypair,
- issuer_name = self.getSubject(),
- subject_name = subject_name,
- subject_key = subject_key,
- serial = serial,
- now = now,
- notAfter = notAfter,
- pathLenConstraint = pathLenConstraint,
- is_ca = is_ca)
-
- @classmethod
- def _bpki_certify(cls, keypair, issuer_name, subject_name, subject_key,
- serial, now, notAfter, pathLenConstraint, is_ca):
- """
- Issue a BPKI certificate. This internal method does the real
- work, after one of the wrapper methods has extracted the relevant
- fields.
- """
+ def getSubject(self):
+ """
+ Get the subject of this certificate.
+ """
+
+ return X501DN.from_POW(self.get_POW().getSubject())
+
+ def getNotBefore(self):
+ """
+ Get the inception time of this certificate.
+ """
+
+ return self.get_POW().getNotBefore()
+
+ def getNotAfter(self):
+ """
+ Get the expiration time of this certificate.
+ """
+
+ return self.get_POW().getNotAfter()
+
+ def getSerial(self):
+ """
+ Get the serial number of this certificate.
+ """
+
+ return self.get_POW().getSerial()
+
+ def getPublicKey(self):
+ """
+ Extract the public key from this certificate.
+ """
+
+ return PublicKey(POW = self.get_POW().getPublicKey())
+
+ def get_SKI(self):
+ """
+ Get the SKI extension from this object.
+ """
- if now is None:
- now = rpki.sundial.now()
-
- issuer_key = keypair.get_public()
-
- assert (issuer_key == subject_key) == (issuer_name == subject_name)
- assert is_ca or issuer_name != subject_name
- assert is_ca or pathLenConstraint is None
- assert pathLenConstraint is None or (isinstance(pathLenConstraint, (int, long)) and
- pathLenConstraint >= 0)
-
- cert = rpki.POW.X509()
- cert.setVersion(2)
- cert.setSerial(serial)
- cert.setIssuer(issuer_name.get_POW())
- cert.setSubject(subject_name.get_POW())
- cert.setNotBefore(now)
- cert.setNotAfter(notAfter)
- cert.setPublicKey(subject_key.get_POW())
- cert.setSKI(subject_key.get_POW().calculateSKI())
- if issuer_key != subject_key:
- cert.setAKI(issuer_key.get_POW().calculateSKI())
- if is_ca:
- cert.setBasicConstraints(True, pathLenConstraint)
- cert.sign(keypair.get_POW(), rpki.POW.SHA256_DIGEST)
- return cls(POW = cert)
-
- @classmethod
- def normalize_chain(cls, chain):
- """
- Normalize a chain of certificates into a tuple of X509 objects.
- Given all the glue certificates needed for BPKI cross
- certification, it's easiest to allow sloppy arguments to the CMS
- validation methods and provide a single method that normalizes the
- allowed cases. So this method allows X509, None, lists, and
- tuples, and returns a tuple of X509 objects.
- """
- if isinstance(chain, cls):
- chain = (chain,)
- return tuple(x for x in chain if x is not None)
+ return self.get_POW().getSKI()
+
+ def expired(self):
+ """
+ Test whether this certificate has expired.
+ """
+
+ return self.getNotAfter() <= rpki.sundial.now()
+
+ def issue(self, keypair, subject_key, serial, sia, aia, crldp, notAfter,
+ cn = None, resources = None, is_ca = True, notBefore = None,
+ sn = None, eku = None):
+ """
+ Issue an RPKI certificate.
+ """
+
+ assert aia is not None and crldp is not None
+
+ assert eku is None or not is_ca
+
+ return self._issue(
+ keypair = keypair,
+ subject_key = subject_key,
+ serial = serial,
+ sia = sia,
+ aia = aia,
+ crldp = crldp,
+ notBefore = notBefore,
+ notAfter = notAfter,
+ cn = cn,
+ sn = sn,
+ resources = resources,
+ is_ca = is_ca,
+ aki = self.get_SKI(),
+ issuer_name = self.getSubject(),
+ eku = eku)
+
+
+ @classmethod
+ def self_certify(cls, keypair, subject_key, serial, sia, notAfter,
+ cn = None, resources = None, notBefore = None,
+ sn = None):
+ """
+ Generate a self-certified RPKI certificate.
+ """
+
+ ski = subject_key.get_SKI()
+
+ if cn is None:
+ cn = "".join(("%02X" % ord(i) for i in ski))
+
+ return cls._issue(
+ keypair = keypair,
+ subject_key = subject_key,
+ serial = serial,
+ sia = sia,
+ aia = None,
+ crldp = None,
+ notBefore = notBefore,
+ notAfter = notAfter,
+ cn = cn,
+ sn = sn,
+ resources = resources,
+ is_ca = True,
+ aki = ski,
+ issuer_name = X501DN.from_cn(cn, sn),
+ eku = None)
+
+
+ @classmethod
+ def _issue(cls, keypair, subject_key, serial, sia, aia, crldp, notAfter,
+ cn, sn, resources, is_ca, aki, issuer_name, notBefore, eku):
+ """
+ Common code to issue an RPKI certificate.
+ """
+
+ if sia is not None:
+ assert len(sia) == 4 and sia[3]
+ sia = tuple((str(s),) if isinstance(s, (str, unicode)) else s for s in sia)
+
+ now = rpki.sundial.now()
+ ski = subject_key.get_SKI()
+
+ if notBefore is None:
+ notBefore = now
+
+ if cn is None:
+ cn = "".join(("%02X" % ord(i) for i in ski))
+
+ if now >= notAfter:
+ raise rpki.exceptions.PastNotAfter("notAfter value %s is already in the past" % notAfter)
+
+ if notBefore >= notAfter:
+ raise rpki.exceptions.NullValidityInterval("notAfter value %s predates notBefore value %s" %
+ (notAfter, notBefore))
+
+ cert = rpki.POW.X509()
+
+ cert.setVersion(2)
+ cert.setSerial(serial)
+ cert.setIssuer(issuer_name.get_POW())
+ cert.setSubject(X501DN.from_cn(cn, sn).get_POW())
+ cert.setNotBefore(notBefore)
+ cert.setNotAfter(notAfter)
+ cert.setPublicKey(subject_key.get_POW())
+ cert.setSKI(ski)
+ cert.setAKI(aki)
+ cert.setCertificatePolicies((rpki.oids.id_cp_ipAddr_asNumber,))
+
+ if crldp is not None:
+ cert.setCRLDP((crldp,))
+
+ if aia is not None:
+ cert.setAIA((aia,))
+
+ if is_ca:
+ cert.setBasicConstraints(True, None)
+ cert.setKeyUsage(frozenset(("keyCertSign", "cRLSign")))
+
+ else:
+ cert.setKeyUsage(frozenset(("digitalSignature",)))
+
+ assert sia is not None or not is_ca
+
+ if sia is not None:
+ caRepository, rpkiManifest, signedObject, rpkiNotify = sia
+ cert.setSIA(
+ (caRepository,) if isinstance(caRepository, str) else caRepository,
+ (rpkiManifest,) if isinstance(rpkiManifest, str) else rpkiManifest,
+ (signedObject,) if isinstance(signedObject, str) else signedObject,
+ (rpkiNotify,) if isinstance(rpkiNotify, str) else rpkiNotify)
+
+ if resources is not None:
+ cert.setRFC3779(
+ asn = ("inherit" if resources.asn.inherit else
+ ((r.min, r.max) for r in resources.asn)),
+ ipv4 = ("inherit" if resources.v4.inherit else
+ ((r.min, r.max) for r in resources.v4)),
+ ipv6 = ("inherit" if resources.v6.inherit else
+ ((r.min, r.max) for r in resources.v6)))
+
+ if eku is not None:
+ assert not is_ca
+ cert.setEKU(eku)
+
+ cert.sign(keypair.get_POW(), rpki.POW.SHA256_DIGEST)
+
+ return cls(POW = cert)
+
+ def bpki_cross_certify(self, keypair, source_cert, serial, notAfter,
+ now = None, pathLenConstraint = 0):
+ """
+ Issue a BPKI certificate with values taking from an existing certificate.
+ """
+
+ return self.bpki_certify(
+ keypair = keypair,
+ subject_name = source_cert.getSubject(),
+ subject_key = source_cert.getPublicKey(),
+ serial = serial,
+ notAfter = notAfter,
+ now = now,
+ pathLenConstraint = pathLenConstraint,
+ is_ca = True)
+
+ @classmethod
+ def bpki_self_certify(cls, keypair, subject_name, serial, notAfter,
+ now = None, pathLenConstraint = None):
+ """
+ Issue a self-signed BPKI CA certificate.
+ """
+
+ return cls._bpki_certify(
+ keypair = keypair,
+ issuer_name = subject_name,
+ subject_name = subject_name,
+ subject_key = keypair.get_public(),
+ serial = serial,
+ now = now,
+ notAfter = notAfter,
+ pathLenConstraint = pathLenConstraint,
+ is_ca = True)
+
+ def bpki_certify(self, keypair, subject_name, subject_key, serial, notAfter, is_ca,
+ now = None, pathLenConstraint = None):
+ """
+ Issue a normal BPKI certificate.
+ """
+
+ assert keypair.get_public() == self.getPublicKey()
+ return self._bpki_certify(
+ keypair = keypair,
+ issuer_name = self.getSubject(),
+ subject_name = subject_name,
+ subject_key = subject_key,
+ serial = serial,
+ now = now,
+ notAfter = notAfter,
+ pathLenConstraint = pathLenConstraint,
+ is_ca = is_ca)
+
+ @classmethod
+ def _bpki_certify(cls, keypair, issuer_name, subject_name, subject_key,
+ serial, now, notAfter, pathLenConstraint, is_ca):
+ """
+ Issue a BPKI certificate. This internal method does the real
+ work, after one of the wrapper methods has extracted the relevant
+ fields.
+ """
+
+ if now is None:
+ now = rpki.sundial.now()
+
+ issuer_key = keypair.get_public()
+
+ assert (issuer_key == subject_key) == (issuer_name == subject_name)
+ assert is_ca or issuer_name != subject_name
+ assert is_ca or pathLenConstraint is None
+ assert pathLenConstraint is None or (isinstance(pathLenConstraint, (int, long)) and
+ pathLenConstraint >= 0)
+
+ cert = rpki.POW.X509()
+ cert.setVersion(2)
+ cert.setSerial(serial)
+ cert.setIssuer(issuer_name.get_POW())
+ cert.setSubject(subject_name.get_POW())
+ cert.setNotBefore(now)
+ cert.setNotAfter(notAfter)
+ cert.setPublicKey(subject_key.get_POW())
+ cert.setSKI(subject_key.get_POW().calculateSKI())
+ if issuer_key != subject_key:
+ cert.setAKI(issuer_key.get_POW().calculateSKI())
+ if is_ca:
+ cert.setBasicConstraints(True, pathLenConstraint)
+ cert.sign(keypair.get_POW(), rpki.POW.SHA256_DIGEST)
+ return cls(POW = cert)
+
+ @classmethod
+ def normalize_chain(cls, chain):
+ """
+ Normalize a chain of certificates into a tuple of X509 objects.
+ Given all the glue certificates needed for BPKI cross
+ certification, it's easiest to allow sloppy arguments to the CMS
+ validation methods and provide a single method that normalizes the
+ allowed cases. So this method allows X509, None, lists, and
+ tuples, and returns a tuple of X509 objects.
+ """
+
+ if isinstance(chain, cls):
+ chain = (chain,)
+ return tuple(x for x in chain if x is not None)
+
+ @property
+ def creation_timestamp(self):
+ """
+ Time at which this object was created.
+ """
+
+ return self.getNotBefore()
- @property
- def creation_timestamp(self):
+class PKCS10(DER_object):
"""
- Time at which this object was created.
+ Class to hold a PKCS #10 request.
"""
- return self.getNotBefore()
-class PKCS10(DER_object):
- """
- Class to hold a PKCS #10 request.
- """
+ POW_class = rpki.POW.PKCS10
- POW_class = rpki.POW.PKCS10
+ ## @var expected_ca_keyUsage
+ # KeyUsage extension flags expected for CA requests.
- ## @var expected_ca_keyUsage
- # KeyUsage extension flags expected for CA requests.
+ expected_ca_keyUsage = frozenset(("keyCertSign", "cRLSign"))
- expected_ca_keyUsage = frozenset(("keyCertSign", "cRLSign"))
+ ## @var allowed_extensions
+ # Extensions allowed by RPKI profile.
- ## @var allowed_extensions
- # Extensions allowed by RPKI profile.
+ allowed_extensions = frozenset((rpki.oids.basicConstraints,
+ rpki.oids.keyUsage,
+ rpki.oids.subjectInfoAccess,
+ rpki.oids.extendedKeyUsage))
- allowed_extensions = frozenset((rpki.oids.basicConstraints,
- rpki.oids.keyUsage,
- rpki.oids.subjectInfoAccess,
- rpki.oids.extendedKeyUsage))
+ def get_DER(self):
+ """
+ Get the DER value of this certification request.
+ """
- def get_DER(self):
- """
- Get the DER value of this certification request.
- """
- self.check()
- if self.DER:
- return self.DER
- if self.POW:
- self.DER = self.POW.derWrite()
- return self.get_DER()
- raise rpki.exceptions.DERObjectConversionError("No conversion path to DER available")
-
- def get_POW(self):
- """
- Get the rpki.POW value of this certification request.
- """
- self.check()
- if not self.POW: # pylint: disable=E0203
- self.POW = rpki.POW.PKCS10.derRead(self.get_DER())
- return self.POW
+ self.check()
+ if self.DER:
+ return self.DER
+ if self.POW:
+ self.DER = self.POW.derWrite()
+ return self.get_DER()
+ raise rpki.exceptions.DERObjectConversionError("No conversion path to DER available")
- def getSubject(self):
- """
- Extract the subject name from this certification request.
- """
- return X501DN.from_POW(self.get_POW().getSubject())
+ def get_POW(self):
+ """
+ Get the rpki.POW value of this certification request.
+ """
- def getPublicKey(self):
- """
- Extract the public key from this certification request.
- """
- return PublicKey(POW = self.get_POW().getPublicKey())
+ # pylint: disable=E0203,W0201
+ self.check()
+ if not self.POW:
+ self.POW = rpki.POW.PKCS10.derRead(self.get_DER())
+ return self.POW
- def get_SKI(self):
- """
- Compute SKI for public key from this certification request.
- """
- return self.getPublicKey().get_SKI()
+ def getSubject(self):
+ """
+ Extract the subject name from this certification request.
+ """
+ return X501DN.from_POW(self.get_POW().getSubject())
- def check_valid_request_common(self):
- """
- Common code for checking this certification requests to see
- whether they conform to the RPKI certificate profile.
+ def getPublicKey(self):
+ """
+ Extract the public key from this certification request.
+ """
- Throws an exception if the request isn't valid, so if this method
- returns at all, the request is ok.
+ return PublicKey(POW = self.get_POW().getPublicKey())
- You probably don't want to call this directly, as it only performs
- the checks that are common to all RPKI certificates.
- """
+ def get_SKI(self):
+ """
+ Compute SKI for public key from this certification request.
+ """
- if not self.get_POW().verify():
- raise rpki.exceptions.BadPKCS10("PKCS #10 signature check failed")
+ return self.getPublicKey().get_SKI()
- ver = self.get_POW().getVersion()
- if ver != 0:
- raise rpki.exceptions.BadPKCS10("PKCS #10 request has bad version number %s" % ver)
+ def check_valid_request_common(self):
+ """
+ Common code for checking this certification requests to see
+ whether they conform to the RPKI certificate profile.
- ku = self.get_POW().getKeyUsage()
+ Throws an exception if the request isn't valid, so if this method
+ returns at all, the request is ok.
- if ku is not None and self.expected_ca_keyUsage != ku:
- raise rpki.exceptions.BadPKCS10("PKCS #10 keyUsage doesn't match profile: %r" % ku)
+ You probably don't want to call this directly, as it only performs
+ the checks that are common to all RPKI certificates.
+ """
- forbidden_extensions = self.get_POW().getExtensionOIDs() - self.allowed_extensions
+ if not self.get_POW().verify():
+ raise rpki.exceptions.BadPKCS10("PKCS #10 signature check failed")
- if forbidden_extensions:
- raise rpki.exceptions.BadExtension("Forbidden extension%s in PKCS #10 certificate request: %s" % (
- "" if len(forbidden_extensions) == 1 else "s",
- ", ".join(forbidden_extensions)))
+ ver = self.get_POW().getVersion()
+ if ver != 0:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 request has bad version number %s" % ver)
- def check_valid_request_ca(self):
- """
- Check this certification request to see whether it's a valid
- request for an RPKI CA certificate.
+ ku = self.get_POW().getKeyUsage()
- Throws an exception if the request isn't valid, so if this method
- returns at all, the request is ok.
- """
+ if ku is not None and self.expected_ca_keyUsage != ku:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 keyUsage doesn't match profile: %r" % ku)
- self.check_valid_request_common()
+ forbidden_extensions = self.get_POW().getExtensionOIDs() - self.allowed_extensions
- alg = self.get_POW().getSignatureAlgorithm()
- bc = self.get_POW().getBasicConstraints()
- eku = self.get_POW().getEKU()
- sias = self.get_POW().getSIA()
+ if forbidden_extensions:
+ raise rpki.exceptions.BadExtension("Forbidden extension%s in PKCS #10 certificate request: %s" % (
+ "" if len(forbidden_extensions) == 1 else "s",
+ ", ".join(forbidden_extensions)))
- if alg != rpki.oids.sha256WithRSAEncryption:
- raise rpki.exceptions.BadPKCS10("PKCS #10 has bad signature algorithm for CA: %s" % alg)
- if bc is None or not bc[0] or bc[1] is not None:
- raise rpki.exceptions.BadPKCS10("PKCS #10 CA bad basicConstraints")
+ def check_valid_request_ca(self):
+ """
+ Check this certification request to see whether it's a valid
+ request for an RPKI CA certificate.
- if eku is not None:
- raise rpki.exceptions.BadPKCS10("PKCS #10 CA EKU not allowed")
+ Throws an exception if the request isn't valid, so if this method
+ returns at all, the request is ok.
+ """
- if sias is None:
- raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA missing")
+ self.check_valid_request_common()
- caRepository, rpkiManifest, signedObject = sias
+ alg = self.get_POW().getSignatureAlgorithm()
+ bc = self.get_POW().getBasicConstraints()
+ eku = self.get_POW().getEKU()
+ sia = self.get_POW().getSIA()
- if signedObject:
- raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA must not have id-ad-signedObject")
+ if alg != rpki.oids.sha256WithRSAEncryption:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 has bad signature algorithm for CA: %s" % alg)
- if not caRepository:
- raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA must have id-ad-caRepository")
+ if bc is None or not bc[0] or bc[1] is not None:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 CA bad basicConstraints")
- if not any(uri.startswith("rsync://") for uri in caRepository):
- raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA id-ad-caRepository contains no rsync URIs")
+ if eku is not None:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 CA EKU not allowed")
- if any(uri.startswith("rsync://") and not uri.endswith("/") for uri in caRepository):
- raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA id-ad-caRepository does not end with slash")
+ if sia is None:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA missing")
- if not rpkiManifest:
- raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA must have id-ad-rpkiManifest")
+ caRepository, rpkiManifest, signedObject, rpkiNotify = sia
- if not any(uri.startswith("rsync://") for uri in rpkiManifest):
- raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA id-ad-rpkiManifest contains no rsync URIs")
+ logger.debug("check_valid_request_ca(): sia: %r", sia)
- if any(uri.startswith("rsync://") and uri.endswith("/") for uri in rpkiManifest):
- raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA id-ad-rpkiManifest ends with slash")
+ if signedObject:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA must not have id-ad-signedObject")
+ if not caRepository:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA must have id-ad-caRepository")
- def check_valid_request_ee(self):
- """
- Check this certification request to see whether it's a valid
- request for an RPKI EE certificate.
-
- Throws an exception if the request isn't valid, so if this method
- returns at all, the request is ok.
-
- We're a bit less strict here than we are for either CA
- certificates or BGPSEC router certificates, because the profile is
- less tightly nailed down for unspecified-use RPKI EE certificates.
- Future specific purposes may impose tighter constraints.
-
- Note that this method does NOT apply to so-called "infrastructure"
- EE certificates (eg, the EE certificates embedded in manifests and
- ROAs); those are constrained fairly tightly, but they're also
- generated internally so we don't need to check them as user or
- protocol input.
- """
+ if not any(uri.startswith("rsync://") for uri in caRepository):
+ raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA id-ad-caRepository contains no rsync URIs")
- self.check_valid_request_common()
+ if any(uri.startswith("rsync://") and not uri.endswith("/") for uri in caRepository):
+ raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA id-ad-caRepository does not end with slash")
- alg = self.get_POW().getSignatureAlgorithm()
- bc = self.get_POW().getBasicConstraints()
- sia = self.get_POW().getSIA()
+ if not rpkiManifest:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA must have id-ad-rpkiManifest")
- caRepository, rpkiManifest, signedObject = sia or (None, None, None)
+ if not any(uri.startswith("rsync://") for uri in rpkiManifest):
+ raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA id-ad-rpkiManifest contains no rsync URIs")
- if alg not in (rpki.oids.sha256WithRSAEncryption, rpki.oids.ecdsa_with_SHA256):
- raise rpki.exceptions.BadPKCS10("PKCS #10 has bad signature algorithm for EE: %s" % alg)
+ if any(uri.startswith("rsync://") and uri.endswith("/") for uri in rpkiManifest):
+ raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA id-ad-rpkiManifest ends with slash")
- if bc is not None and (bc[0] or bc[1] is not None):
- raise rpki.exceptions.BadPKCS10("PKCS #10 EE has bad basicConstraints")
+ if any(not uri.startswith("http://") and not uri.startswith("https://") for uri in rpkiNotify):
+ raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA id-ad-rpkiNotify neither HTTP nor HTTPS")
- if caRepository:
- raise rpki.exceptions.BadPKCS10("PKCS #10 EE must not have id-ad-caRepository")
+ def check_valid_request_ee(self):
+ """
+ Check this certification request to see whether it's a valid
+ request for an RPKI EE certificate.
- if rpkiManifest:
- raise rpki.exceptions.BadPKCS10("PKCS #10 EE must not have id-ad-rpkiManifest")
+ Throws an exception if the request isn't valid, so if this method
+ returns at all, the request is ok.
- if signedObject and not any(uri.startswith("rsync://") for uri in signedObject):
- raise rpki.exceptions.BadPKCS10("PKCS #10 EE SIA id-ad-signedObject contains no rsync URIs")
+ We're a bit less strict here than we are for either CA
+ certificates or BGPSEC router certificates, because the profile is
+ less tightly nailed down for unspecified-use RPKI EE certificates.
+ Future specific purposes may impose tighter constraints.
+ Note that this method does NOT apply to so-called "infrastructure"
+ EE certificates (eg, the EE certificates embedded in manifests and
+ ROAs); those are constrained fairly tightly, but they're also
+ generated internally so we don't need to check them as user or
+ protocol input.
+ """
- def check_valid_request_router(self):
- """
- Check this certification request to see whether it's a valid
- request for a BGPSEC router certificate.
+ self.check_valid_request_common()
- Throws an exception if the request isn't valid, so if this method
- returns at all, the request is ok.
+ alg = self.get_POW().getSignatureAlgorithm()
+ bc = self.get_POW().getBasicConstraints()
+ sia = self.get_POW().getSIA()
- draft-ietf-sidr-bgpsec-pki-profiles 3.2 says follow RFC 6487 3
- except where explicitly overriden, and does not override for SIA.
- But draft-ietf-sidr-bgpsec-pki-profiles also says that router
- certificates don't get SIA, while RFC 6487 requires SIA. So what
- do we do with SIA in PKCS #10 for router certificates?
+ logger.debug("check_valid_request_ee(): sia: %r", sia)
- For the moment, ignore it, but make sure we don't include it in
- the certificate when we get to the code that generates that.
- """
+ caRepository, rpkiManifest, signedObject, rpkiNotify = sia or (None, None, None, None)
- self.check_valid_request_ee()
+ if alg not in (rpki.oids.sha256WithRSAEncryption, rpki.oids.ecdsa_with_SHA256):
+ raise rpki.exceptions.BadPKCS10("PKCS #10 has bad signature algorithm for EE: %s" % alg)
- alg = self.get_POW().getSignatureAlgorithm()
- eku = self.get_POW().getEKU()
+ if bc is not None and (bc[0] or bc[1] is not None):
+ raise rpki.exceptions.BadPKCS10("PKCS #10 EE has bad basicConstraints")
- if alg != rpki.oids.ecdsa_with_SHA256:
- raise rpki.exceptions.BadPKCS10("PKCS #10 has bad signature algorithm for router: %s" % alg)
+ if caRepository:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 EE must not have id-ad-caRepository")
- # Not really clear to me whether PKCS #10 should have EKU or not, so allow
- # either, but insist that it be the right one if present.
+ if rpkiManifest:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 EE must not have id-ad-rpkiManifest")
- if eku is not None and rpki.oids.id_kp_bgpsec_router not in eku:
- raise rpki.exceptions.BadPKCS10("PKCS #10 router must have EKU")
+ if signedObject and not any(uri.startswith("rsync://") for uri in signedObject):
+ raise rpki.exceptions.BadPKCS10("PKCS #10 EE SIA id-ad-signedObject contains no rsync URIs")
+ if rpkiNotify and any(not uri.startswith("http://") and not uri.startswith("https://") for uri in rpkiNotify):
+ raise rpki.exceptions.BadPKCS10("PKCS #10 EE SIA id-ad-rpkiNotify neither HTTP nor HTTPS")
- @classmethod
- def create(cls, keypair, exts = None, is_ca = False,
- caRepository = None, rpkiManifest = None, signedObject = None,
- cn = None, sn = None, eku = None):
- """
- Create a new request for a given keypair.
- """
+ def check_valid_request_router(self):
+ """
+ Check this certification request to see whether it's a valid
+ request for a BGPSEC router certificate.
+
+ Throws an exception if the request isn't valid, so if this method
+ returns at all, the request is ok.
+
+ draft-ietf-sidr-bgpsec-pki-profiles 3.2 says follow RFC 6487 3
+ except where explicitly overriden, and does not override for SIA.
+ But draft-ietf-sidr-bgpsec-pki-profiles also says that router
+ certificates don't get SIA, while RFC 6487 requires SIA. So what
+ do we do with SIA in PKCS #10 for router certificates?
+
+ For the moment, ignore it, but make sure we don't include it in
+ the certificate when we get to the code that generates that.
+ """
- assert exts is None, "Old calling sequence to rpki.x509.PKCS10.create()"
+ self.check_valid_request_ee()
- if cn is None:
- cn = "".join(("%02X" % ord(i) for i in keypair.get_SKI()))
+ alg = self.get_POW().getSignatureAlgorithm()
+ eku = self.get_POW().getEKU()
- if isinstance(caRepository, str):
- caRepository = (caRepository,)
+ if alg != rpki.oids.ecdsa_with_SHA256:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 has bad signature algorithm for router: %s" % alg)
- if isinstance(rpkiManifest, str):
- rpkiManifest = (rpkiManifest,)
+ # Not really clear to me whether PKCS #10 should have EKU or not, so allow
+ # either, but insist that it be the right one if present.
- if isinstance(signedObject, str):
- signedObject = (signedObject,)
+ if eku is not None and rpki.oids.id_kp_bgpsec_router not in eku:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 router must have EKU")
- req = rpki.POW.PKCS10()
- req.setVersion(0)
- req.setSubject(X501DN.from_cn(cn, sn).get_POW())
- req.setPublicKey(keypair.get_POW())
- if is_ca:
- req.setBasicConstraints(True, None)
- req.setKeyUsage(cls.expected_ca_keyUsage)
+ @classmethod
+ def create(cls, keypair, exts = None, is_ca = False,
+ caRepository = None, rpkiManifest = None, signedObject = None,
+ cn = None, sn = None, eku = None, rpkiNotify = None):
+ """
+ Create a new request for a given keypair.
+ """
- if caRepository or rpkiManifest or signedObject:
- req.setSIA(caRepository, rpkiManifest, signedObject)
+ if cn is None:
+ cn = "".join(("%02X" % ord(i) for i in keypair.get_SKI()))
- if eku:
- req.setEKU(eku)
+ req = rpki.POW.PKCS10()
+ req.setVersion(0)
+ req.setSubject(X501DN.from_cn(cn, sn).get_POW())
+ req.setPublicKey(keypair.get_POW())
- req.sign(keypair.get_POW(), rpki.POW.SHA256_DIGEST)
- return cls(POW = req)
+ if is_ca:
+ req.setBasicConstraints(True, None)
+ req.setKeyUsage(cls.expected_ca_keyUsage)
+
+ sia = (caRepository, rpkiManifest, signedObject, rpkiNotify)
+ if not all(s is None for s in sia):
+ req.setSIA(*tuple([str(s)] if isinstance(s, (str, unicode)) else s for s in sia))
+
+ if eku:
+ req.setEKU(eku)
+
+ req.sign(keypair.get_POW(), rpki.POW.SHA256_DIGEST)
+ return cls(POW = req)
## @var generate_insecure_debug_only_rsa_key
# Debugging hack to let us save throwaway RSA keys from one debug
@@ -1115,913 +1235,981 @@ generate_insecure_debug_only_rsa_key = None
class insecure_debug_only_rsa_key_generator(object):
- def __init__(self, filename, keyno = 0):
- try:
- try:
- import gdbm as dbm_du_jour
- except ImportError:
- import dbm as dbm_du_jour
- self.keyno = long(keyno)
- self.filename = filename
- self.db = dbm_du_jour.open(filename, "c")
- except:
- logger.warning("insecure_debug_only_rsa_key_generator initialization FAILED, hack inoperative")
- raise
-
- def __call__(self):
- k = str(self.keyno)
- try:
- v = rpki.POW.Asymmetric.derReadPrivate(self.db[k])
- except KeyError:
- v = rpki.POW.Asymmetric.generateRSA(2048)
- self.db[k] = v.derWritePrivate()
- self.keyno += 1
- return v
+ def __init__(self, filename, keyno = 0):
+ try:
+ try:
+ import gdbm as dbm_du_jour
+ except ImportError:
+ import dbm as dbm_du_jour
+ self.keyno = long(keyno)
+ self.filename = filename
+ self.db = dbm_du_jour.open(filename, "c")
+ except:
+ logger.warning("insecure_debug_only_rsa_key_generator initialization FAILED, hack inoperative")
+ raise
+
+ def __call__(self):
+ k = str(self.keyno)
+ try:
+ v = rpki.POW.Asymmetric.derReadPrivate(self.db[k])
+ except KeyError:
+ v = rpki.POW.Asymmetric.generateRSA(2048)
+ self.db[k] = v.derWritePrivate()
+ self.keyno += 1
+ return v
class PrivateKey(DER_object):
- """
- Class to hold a Public/Private key pair.
- """
-
- POW_class = rpki.POW.Asymmetric
-
- def get_DER(self):
"""
- Get the DER value of this keypair.
+ Class to hold a Public/Private key pair.
"""
- self.check()
- if self.DER:
- return self.DER
- if self.POW:
- self.DER = self.POW.derWritePrivate()
- return self.get_DER()
- raise rpki.exceptions.DERObjectConversionError("No conversion path to DER available")
-
- def get_POW(self):
- """
- Get the rpki.POW value of this keypair.
- """
- self.check()
- if not self.POW: # pylint: disable=E0203
- self.POW = rpki.POW.Asymmetric.derReadPrivate(self.get_DER())
- return self.POW
- def get_PEM(self):
- """
- Get the PEM representation of this keypair.
- """
- return self.get_POW().pemWritePrivate()
+ POW_class = rpki.POW.Asymmetric
- def _set_PEM(self, pem):
- """
- Set the POW value of this keypair from a PEM string.
- """
- assert self.empty()
- self.POW = self.POW_class.pemReadPrivate(pem)
+ def get_DER(self):
+ """
+ Get the DER value of this keypair.
+ """
- def get_public_DER(self):
- """
- Get the DER encoding of the public key from this keypair.
- """
- return self.get_POW().derWritePublic()
+ self.check()
+ if self.DER:
+ return self.DER
+ if self.POW:
+ self.DER = self.POW.derWritePrivate()
+ return self.get_DER()
+ raise rpki.exceptions.DERObjectConversionError("No conversion path to DER available")
- def get_SKI(self):
- """
- Calculate the SKI of this keypair.
- """
- return self.get_POW().calculateSKI()
+ def get_POW(self):
+ """
+ Get the rpki.POW value of this keypair.
+ """
- def get_public(self):
- """
- Convert the public key of this keypair into a PublicKey object.
- """
- return PublicKey(DER = self.get_public_DER())
+ # pylint: disable=E0203,W0201
+ self.check()
+ if not self.POW:
+ self.POW = rpki.POW.Asymmetric.derReadPrivate(self.get_DER())
+ return self.POW
-class PublicKey(DER_object):
- """
- Class to hold a public key.
- """
+ def get_PEM(self):
+ """
+ Get the PEM representation of this keypair.
+ """
- POW_class = rpki.POW.Asymmetric
+ return self.get_POW().pemWritePrivate()
- def get_DER(self):
- """
- Get the DER value of this public key.
- """
- self.check()
- if self.DER:
- return self.DER
- if self.POW:
- self.DER = self.POW.derWritePublic()
- return self.get_DER()
- raise rpki.exceptions.DERObjectConversionError("No conversion path to DER available")
-
- def get_POW(self):
+ def _set_PEM(self, pem):
+ """
+ Set the POW value of this keypair from a PEM string.
+ """
+
+ # pylint: disable=W0201
+ assert self.empty()
+ self.POW = self.POW_class.pemReadPrivate(pem)
+
+ def get_public_DER(self):
+ """
+ Get the DER encoding of the public key from this keypair.
+ """
+
+ return self.get_POW().derWritePublic()
+
+ def get_SKI(self):
+ """
+ Calculate the SKI of this keypair.
+ """
+
+ return self.get_POW().calculateSKI()
+
+ def get_public(self):
+ """
+ Convert the public key of this keypair into a PublicKey object.
+ """
+
+ return PublicKey(DER = self.get_public_DER())
+
+class PublicKey(DER_object):
"""
- Get the rpki.POW value of this public key.
+ Class to hold a public key.
"""
- self.check()
- if not self.POW: # pylint: disable=E0203
- self.POW = rpki.POW.Asymmetric.derReadPublic(self.get_DER())
- return self.POW
- def get_PEM(self):
+ POW_class = rpki.POW.Asymmetric
+
+ def get_DER(self):
+ """
+ Get the DER value of this public key.
+ """
+
+ self.check()
+ if self.DER:
+ return self.DER
+ if self.POW:
+ self.DER = self.POW.derWritePublic()
+ return self.get_DER()
+ raise rpki.exceptions.DERObjectConversionError("No conversion path to DER available")
+
+ def get_POW(self):
+ """
+ Get the rpki.POW value of this public key.
+ """
+
+ # pylint: disable=E0203,W0201
+ self.check()
+ if not self.POW:
+ self.POW = rpki.POW.Asymmetric.derReadPublic(self.get_DER())
+ return self.POW
+
+ def get_PEM(self):
+ """
+ Get the PEM representation of this public key.
+ """
+
+ return self.get_POW().pemWritePublic()
+
+ def _set_PEM(self, pem):
+ """
+ Set the POW value of this public key from a PEM string.
+ """
+
+ # pylint: disable=W0201
+ assert self.empty()
+ self.POW = self.POW_class.pemReadPublic(pem)
+
+ def get_SKI(self):
+ """
+ Calculate the SKI of this public key.
+ """
+
+ return self.get_POW().calculateSKI()
+
+class KeyParams(DER_object):
"""
- Get the PEM representation of this public key.
+ Wrapper for OpenSSL's asymmetric key parameter classes.
"""
- return self.get_POW().pemWritePublic()
- def _set_PEM(self, pem):
+ POW_class = rpki.POW.AsymmetricParams
+
+ @classmethod
+ def generateEC(cls, curve = rpki.POW.EC_P256_CURVE):
+ return cls(POW = rpki.POW.AsymmetricParams.generateEC(curve = curve))
+
+class RSA(PrivateKey):
"""
- Set the POW value of this public key from a PEM string.
+ Class to hold an RSA key pair.
"""
- assert self.empty()
- self.POW = self.POW_class.pemReadPublic(pem)
- def get_SKI(self):
+ @classmethod
+ def generate(cls, keylength = 2048, quiet = False):
+ """
+ Generate a new keypair.
+ """
+
+ if not quiet:
+ logger.debug("Generating new %d-bit RSA key", keylength)
+ if generate_insecure_debug_only_rsa_key is not None:
+ return cls(POW = generate_insecure_debug_only_rsa_key())
+ else:
+ return cls(POW = rpki.POW.Asymmetric.generateRSA(keylength))
+
+class ECDSA(PrivateKey):
"""
- Calculate the SKI of this public key.
+ Class to hold an ECDSA key pair.
"""
- return self.get_POW().calculateSKI()
-class KeyParams(DER_object):
- """
- Wrapper for OpenSSL's asymmetric key parameter classes.
- """
+ @classmethod
+ def generate(cls, params = None, quiet = False):
+ """
+ Generate a new keypair.
+ """
- POW_class = rpki.POW.AsymmetricParams
+ if params is None:
+ if not quiet:
+ logger.debug("Generating new ECDSA key parameters")
+ params = KeyParams.generateEC()
- @classmethod
- def generateEC(cls, curve = rpki.POW.EC_P256_CURVE):
- return cls(POW = rpki.POW.AsymmetricParams.generateEC(curve = curve))
+ assert isinstance(params, KeyParams)
-class RSA(PrivateKey):
- """
- Class to hold an RSA key pair.
- """
+ if not quiet:
+ logger.debug("Generating new ECDSA key")
+
+ return cls(POW = rpki.POW.Asymmetric.generateFromParams(params.get_POW()))
- @classmethod
- def generate(cls, keylength = 2048, quiet = False):
+class CMS_object(DER_object):
"""
- Generate a new keypair.
+ Abstract class to hold a CMS object.
"""
- if not quiet:
- logger.debug("Generating new %d-bit RSA key", keylength)
- if generate_insecure_debug_only_rsa_key is not None:
- return cls(POW = generate_insecure_debug_only_rsa_key())
- else:
- return cls(POW = rpki.POW.Asymmetric.generateRSA(keylength))
-class ECDSA(PrivateKey):
- """
- Class to hold an ECDSA key pair.
- """
+ econtent_oid = rpki.oids.id_data
+ POW_class = rpki.POW.CMS
- @classmethod
- def generate(cls, params = None, quiet = False):
- """
- Generate a new keypair.
- """
+ ## @var dump_on_verify_failure
+ # Set this to True to get dumpasn1 dumps of ASN.1 on CMS verify failures.
- if params is None:
- if not quiet:
- logger.debug("Generating new ECDSA key parameters")
- params = KeyParams.generateEC()
+ dump_on_verify_failure = True
- assert isinstance(params, KeyParams)
+ ## @var debug_cms_certs
+ # Set this to True to log a lot of chatter about CMS certificates.
- if not quiet:
- logger.debug("Generating new ECDSA key")
+ debug_cms_certs = False
- return cls(POW = rpki.POW.Asymmetric.generateFromParams(params.get_POW()))
+ ## @var dump_using_dumpasn1
+ # Set this to use external dumpasn1 program, which is prettier and
+ # more informative than OpenSSL's CMS text dump, but which won't
+ # work if the dumpasn1 program isn't installed.
-class CMS_object(DER_object):
- """
- Abstract class to hold a CMS object.
- """
+ dump_using_dumpasn1 = False
- econtent_oid = rpki.oids.id_data
- POW_class = rpki.POW.CMS
+ ## @var require_crls
+ # Set this to False to make CMS CRLs optional in the cases where we
+ # would otherwise require them. Some day this option should go away
+ # and CRLs should be uncondtionally mandatory in such cases.
- ## @var dump_on_verify_failure
- # Set this to True to get dumpasn1 dumps of ASN.1 on CMS verify failures.
+ require_crls = False
- dump_on_verify_failure = True
+ ## @var allow_extra_certs
+ # Set this to True to allow CMS messages to contain CA certificates.
- ## @var debug_cms_certs
- # Set this to True to log a lot of chatter about CMS certificates.
+ allow_extra_certs = False
- debug_cms_certs = False
+ ## @var allow_extra_crls
+ # Set this to True to allow CMS messages to contain multiple CRLs.
- ## @var dump_using_dumpasn1
- # Set this to use external dumpasn1 program, which is prettier and
- # more informative than OpenSSL's CMS text dump, but which won't
- # work if the dumpasn1 program isn't installed.
+ allow_extra_crls = False
- dump_using_dumpasn1 = False
+ ## @var print_on_der_error
+ # Set this to True to log alleged DER when we have trouble parsing
+ # it, in case it's really a Perl backtrace or something.
- ## @var require_crls
- # Set this to False to make CMS CRLs optional in the cases where we
- # would otherwise require them. Some day this option should go away
- # and CRLs should be uncondtionally mandatory in such cases.
+ print_on_der_error = True
- require_crls = False
+ def get_DER(self):
+ """
+ Get the DER value of this CMS_object.
+ """
- ## @var allow_extra_certs
- # Set this to True to allow CMS messages to contain CA certificates.
+ self.check()
+ if self.DER:
+ return self.DER
+ if self.POW:
+ self.DER = self.POW.derWrite()
+ return self.get_DER()
+ raise rpki.exceptions.DERObjectConversionError("No conversion path to DER available")
- allow_extra_certs = False
+ def get_POW(self):
+ """
+ Get the rpki.POW value of this CMS_object.
+ """
- ## @var allow_extra_crls
- # Set this to True to allow CMS messages to contain multiple CRLs.
+ # pylint: disable=E0203,W0201
+ self.check()
+ if not self.POW:
+ self.POW = self.POW_class.derRead(self.get_DER())
+ return self.POW
- allow_extra_crls = False
+ def get_signingTime(self):
+ """
+ Extract signingTime from CMS signed attributes.
+ """
- ## @var print_on_der_error
- # Set this to True to log alleged DER when we have trouble parsing
- # it, in case it's really a Perl backtrace or something.
+ return self.get_POW().signingTime()
- print_on_der_error = True
+ def verify(self, ta):
+ """
+ Verify CMS wrapper and store inner content.
+ """
- def get_DER(self):
- """
- Get the DER value of this CMS_object.
- """
- self.check()
- if self.DER:
- return self.DER
- if self.POW:
- self.DER = self.POW.derWrite()
- return self.get_DER()
- raise rpki.exceptions.DERObjectConversionError("No conversion path to DER available")
-
- def get_POW(self):
- """
- Get the rpki.POW value of this CMS_object.
- """
- self.check()
- if not self.POW: # pylint: disable=E0203
- self.POW = self.POW_class.derRead(self.get_DER())
- return self.POW
+ try:
+ cms = self.get_POW()
+ except:
+ if self.print_on_der_error:
+ logger.debug("Problem parsing DER CMS message, might not really be DER: %r",
+ self.get_DER())
+ raise rpki.exceptions.UnparsableCMSDER
- def get_signingTime(self):
- """
- Extract signingTime from CMS signed attributes.
- """
- return self.get_POW().signingTime()
+ if cms.eContentType() != self.econtent_oid:
+ raise rpki.exceptions.WrongEContentType("Got CMS eContentType %s, expected %s" % (
+ cms.eContentType(), self.econtent_oid))
- def verify(self, ta):
- """
- Verify CMS wrapper and store inner content.
- """
+ certs = [X509(POW = x) for x in cms.certs()]
+ crls = [CRL(POW = c) for c in cms.crls()]
- try:
- cms = self.get_POW()
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception:
- if self.print_on_der_error:
- logger.debug("Problem parsing DER CMS message, might not really be DER: %r",
- self.get_DER())
- raise rpki.exceptions.UnparsableCMSDER
-
- if cms.eContentType() != self.econtent_oid:
- raise rpki.exceptions.WrongEContentType("Got CMS eContentType %s, expected %s" % (
- cms.eContentType(), self.econtent_oid))
-
- certs = [X509(POW = x) for x in cms.certs()]
- crls = [CRL(POW = c) for c in cms.crls()]
-
- if self.debug_cms_certs:
- for x in certs:
- logger.debug("Received CMS cert issuer %s subject %s SKI %s",
- x.getIssuer(), x.getSubject(), x.hSKI())
- for c in crls:
- logger.debug("Received CMS CRL issuer %r", c.getIssuer())
-
- store = rpki.POW.X509Store()
-
- now = rpki.sundial.now()
-
- trusted_ee = None
-
- for x in X509.normalize_chain(ta):
- if self.debug_cms_certs:
- logger.debug("CMS trusted cert issuer %s subject %s SKI %s",
- x.getIssuer(), x.getSubject(), x.hSKI())
- if x.getNotAfter() < now:
- raise rpki.exceptions.TrustedCMSCertHasExpired("Trusted CMS certificate has expired",
- "%s (%s)" % (x.getSubject(), x.hSKI()))
- if not x.is_CA():
- if trusted_ee is None:
- trusted_ee = x
- else:
- raise rpki.exceptions.MultipleCMSEECert("Multiple CMS EE certificates", *("%s (%s)" % (
- x.getSubject(), x.hSKI()) for x in ta if not x.is_CA()))
- store.addTrust(x.get_POW())
-
- if trusted_ee:
- if self.debug_cms_certs:
- logger.debug("Trusted CMS EE cert issuer %s subject %s SKI %s",
- trusted_ee.getIssuer(), trusted_ee.getSubject(), trusted_ee.hSKI())
- if len(certs) > 1 or (len(certs) == 1 and
- (certs[0].getSubject() != trusted_ee.getSubject() or
- certs[0].getPublicKey() != trusted_ee.getPublicKey())):
- raise rpki.exceptions.UnexpectedCMSCerts("Unexpected CMS certificates", *("%s (%s)" % (
- x.getSubject(), x.hSKI()) for x in certs))
- if crls:
- raise rpki.exceptions.UnexpectedCMSCRLs("Unexpected CRLs", *("%s (%s)" % (
- c.getIssuer(), c.hAKI()) for c in crls))
-
- else:
- untrusted_ee = [x for x in certs if not x.is_CA()]
- if len(untrusted_ee) < 1:
- raise rpki.exceptions.MissingCMSEEcert
- if len(untrusted_ee) > 1 or (not self.allow_extra_certs and len(certs) > len(untrusted_ee)):
- raise rpki.exceptions.UnexpectedCMSCerts("Unexpected CMS certificates", *("%s (%s)" % (
- x.getSubject(), x.hSKI()) for x in certs))
- if len(crls) < 1:
- if self.require_crls:
- raise rpki.exceptions.MissingCMSCRL
- else:
- logger.warning("MISSING CMS CRL! Ignoring per self.require_crls setting")
- if len(crls) > 1 and not self.allow_extra_crls:
- raise rpki.exceptions.UnexpectedCMSCRLs("Unexpected CRLs", *("%s (%s)" % (
- c.getIssuer(), c.hAKI()) for c in crls))
-
- for x in certs:
- if x.getNotAfter() < now:
- raise rpki.exceptions.CMSCertHasExpired("CMS certificate has expired", "%s (%s)" % (
- x.getSubject(), x.hSKI()))
-
- for c in crls:
- if c.getNextUpdate() < now:
- logger.warning("Stale BPKI CMS CRL (%s %s %s)", c.getNextUpdate(), c.getIssuer(), c.hAKI())
-
- try:
- content = cms.verify(store)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception:
- if self.dump_on_verify_failure:
- if self.dump_using_dumpasn1:
- dbg = self.dumpasn1()
- else:
- dbg = cms.pprint()
- logger.warning("CMS verification failed, dumping ASN.1 (%d octets):", len(self.get_DER()))
- for line in dbg.splitlines():
- logger.warning(line)
- raise rpki.exceptions.CMSVerificationFailed("CMS verification failed")
+ if self.debug_cms_certs:
+ for x in certs:
+ logger.debug("Received CMS cert issuer %s subject %s SKI %s",
+ x.getIssuer(), x.getSubject(), x.hSKI())
+ for c in crls:
+ logger.debug("Received CMS CRL issuer %r", c.getIssuer())
+
+ now = rpki.sundial.now()
- return content
+ trusted_ee = None
+ trusted_ca = []
+ untrusted_ee = None
+
+ for x in X509.normalize_chain(ta):
+ if self.debug_cms_certs:
+ logger.debug("CMS trusted cert issuer %s subject %s SKI %s",
+ x.getIssuer(), x.getSubject(), x.hSKI())
+ if x.getNotAfter() < now:
+ raise rpki.exceptions.TrustedCMSCertHasExpired("Trusted CMS certificate has expired",
+ "%s (%s)" % (x.getSubject(), x.hSKI()))
+ if x.is_CA():
+ trusted_ca.append(x)
+ else:
+ if trusted_ee is None:
+ trusted_ee = x
+ else:
+ raise rpki.exceptions.MultipleCMSEECert("Multiple CMS EE certificates", *("%s (%s)" % (
+ x.getSubject(), x.hSKI()) for x in ta if not x.is_CA()))
+
+ if trusted_ee:
+ if self.debug_cms_certs:
+ logger.debug("Trusted CMS EE cert issuer %s subject %s SKI %s",
+ trusted_ee.getIssuer(), trusted_ee.getSubject(), trusted_ee.hSKI())
+ if len(certs) > 1 or (len(certs) == 1 and
+ (certs[0].getSubject() != trusted_ee.getSubject() or
+ certs[0].getPublicKey() != trusted_ee.getPublicKey())):
+ raise rpki.exceptions.UnexpectedCMSCerts("Unexpected CMS certificates", *("%s (%s)" % (
+ x.getSubject(), x.hSKI()) for x in certs))
+ if crls:
+ raise rpki.exceptions.UnexpectedCMSCRLs("Unexpected CRLs", *("%s (%s)" % (
+ c.getIssuer(), c.hAKI()) for c in crls))
- def extract(self):
- """
- Extract and store inner content from CMS wrapper without verifying
- the CMS.
+ else:
+ untrusted_ee = [x for x in certs if not x.is_CA()]
+ if len(untrusted_ee) < 1:
+ raise rpki.exceptions.MissingCMSEEcert
+ if len(untrusted_ee) > 1 or (not self.allow_extra_certs and len(certs) > len(untrusted_ee)):
+ raise rpki.exceptions.UnexpectedCMSCerts("Unexpected CMS certificates", *("%s (%s)" % (
+ x.getSubject(), x.hSKI()) for x in certs))
+ untrusted_ee = untrusted_ee[0]
+ if len(crls) < 1:
+ if self.require_crls:
+ raise rpki.exceptions.MissingCMSCRL
+ else:
+ logger.warning("MISSING CMS CRL! Ignoring per self.require_crls setting")
+ if len(crls) > 1 and not self.allow_extra_crls:
+ raise rpki.exceptions.UnexpectedCMSCRLs("Unexpected CRLs", *("%s (%s)" % (
+ c.getIssuer(), c.hAKI()) for c in crls))
+
+ for x in certs:
+ if x.getNotAfter() < now:
+ raise rpki.exceptions.CMSCertHasExpired("CMS certificate has expired", "%s (%s)" % (
+ x.getSubject(), x.hSKI()))
+
+ for c in crls:
+ if c.getNextUpdate() < now:
+ logger.warning("Stale BPKI CMS CRL (%s %s %s)", c.getNextUpdate(), c.getIssuer(), c.hAKI())
+
+ # XXX Verify certificate chain via X.509 machinery, not CMS
+ # machinery. Awful mess due to history, needs cleanup, but
+ # get it working again first.
+
+ cert = (trusted_ee or untrusted_ee).get_POW()
+
+ cert.verify(trusted = (x.get_POW() for x in trusted_ca),
+ crl = crls[0].get_POW() if untrusted_ee and crls else None)
+
+ try:
+ # XXX This isn't right yet, but let's test before gettting more complicated
+ #
+ # Aside from all the type and exception abominations, the
+ # main problem here is that we're no longer verifying the
+ # certificate chain, just the CMS signature. Certificate
+ # verificaiton is a separate step under the new scheme,
+ # and probably comes before this, but let's write down
+ # what the problem is before it gets lost...
+
+ content = cms.verify(certs = (x.get_POW() for x in X509.normalize_chain(ta)),
+ flags = rpki.POW.CMS_NO_SIGNER_CERT_VERIFY)
+ except:
+ if self.dump_on_verify_failure:
+ if self.dump_using_dumpasn1:
+ dbg = self.dumpasn1()
+ else:
+ dbg = cms.pprint()
+ logger.warning("CMS verification failed, dumping ASN.1 (%d octets):", len(self.get_DER()))
+ for line in dbg.splitlines():
+ logger.warning(line)
+
+ # XXX Old code replaced rpki.POW exception with this. For
+ # debugging I'd rather see what POW has to say; decide
+ # later whether to keep this change.
+ #
+ #raise rpki.exceptions.CMSVerificationFailed("CMS verification failed")
+ raise
+
+ return content
+
+ def extract(self):
+ """
+ Extract and store inner content from CMS wrapper without verifying
+ the CMS.
+
+ DANGER WILL ROBINSON!!!
+
+ Do not use this method on unvalidated data. Use the verify()
+ method instead.
+
+ If you don't understand this warning, don't use this method.
+ """
+
+ try:
+ cms = self.get_POW()
+ except:
+ raise rpki.exceptions.UnparsableCMSDER
+
+ if cms.eContentType() != self.econtent_oid:
+ raise rpki.exceptions.WrongEContentType("Got CMS eContentType %s, expected %s" % (
+ cms.eContentType(), self.econtent_oid))
+
+ return cms.verify(flags = (rpki.POW.CMS_NOCRL | rpki.POW.CMS_NO_SIGNER_CERT_VERIFY |
+ rpki.POW.CMS_NO_ATTR_VERIFY | rpki.POW.CMS_NO_CONTENT_VERIFY))
+
+
+ def sign(self, keypair, certs, crls = None, no_certs = False):
+ """
+ Sign and wrap inner content.
+ """
+
+ if isinstance(certs, X509):
+ cert = certs
+ certs = ()
+ else:
+ cert = certs[0]
+ certs = certs[1:]
- DANGER WILL ROBINSON!!!
+ if crls is None:
+ crls = ()
+ elif isinstance(crls, CRL):
+ crls = (crls,)
- Do not use this method on unvalidated data. Use the verify()
- method instead.
+ if self.debug_cms_certs:
+ logger.debug("Signing with cert issuer %s subject %s SKI %s",
+ cert.getIssuer(), cert.getSubject(), cert.hSKI())
+ for i, c in enumerate(certs):
+ logger.debug("Additional cert %d issuer %s subject %s SKI %s",
+ i, c.getIssuer(), c.getSubject(), c.hSKI())
- If you don't understand this warning, don't use this method.
- """
+ self._sign(cert.get_POW(),
+ keypair.get_POW(),
+ [x.get_POW() for x in certs],
+ [c.get_POW() for c in crls],
+ rpki.POW.CMS_NOCERTS if no_certs else 0)
- try:
- cms = self.get_POW()
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception:
- raise rpki.exceptions.UnparsableCMSDER
+ def _sign(self, cert, keypair, certs, crls, flags):
+ raise NotImplementedError
- if cms.eContentType() != self.econtent_oid:
- raise rpki.exceptions.WrongEContentType("Got CMS eContentType %s, expected %s" % (
- cms.eContentType(), self.econtent_oid))
+ @property
+ def creation_timestamp(self):
+ """
+ Time at which this object was created.
+ """
- return cms.verify(rpki.POW.X509Store(), None,
- (rpki.POW.CMS_NOCRL | rpki.POW.CMS_NO_SIGNER_CERT_VERIFY |
- rpki.POW.CMS_NO_ATTR_VERIFY | rpki.POW.CMS_NO_CONTENT_VERIFY))
+ return self.get_signingTime()
- def sign(self, keypair, certs, crls = None, no_certs = False):
- """
- Sign and wrap inner content.
+class Wrapped_CMS_object(CMS_object):
"""
+ Abstract class to hold CMS objects wrapping non-DER content (eg, XML
+ or VCard).
- if isinstance(certs, X509):
- cert = certs
- certs = ()
- else:
- cert = certs[0]
- certs = certs[1:]
-
- if crls is None:
- crls = ()
- elif isinstance(crls, CRL):
- crls = (crls,)
-
- if self.debug_cms_certs:
- logger.debug("Signing with cert issuer %s subject %s SKI %s",
- cert.getIssuer(), cert.getSubject(), cert.hSKI())
- for i, c in enumerate(certs):
- logger.debug("Additional cert %d issuer %s subject %s SKI %s",
- i, c.getIssuer(), c.getSubject(), c.hSKI())
-
- self._sign(cert.get_POW(),
- keypair.get_POW(),
- [x.get_POW() for x in certs],
- [c.get_POW() for c in crls],
- rpki.POW.CMS_NOCERTS if no_certs else 0)
-
- @property
- def creation_timestamp(self):
- """
- Time at which this object was created.
+ CMS-wrapped objects are a little different from the other DER_object
+ types because the signed object is CMS wrapping some other kind of
+ inner content. A Wrapped_CMS_object is the outer CMS wrapped object
+ so that the usual DER and PEM operations do the obvious things, and
+ the inner content is handle via separate methods.
"""
- return self.get_signingTime()
+ other_clear = ("content",)
-class Wrapped_CMS_object(CMS_object):
- """
- Abstract class to hold CMS objects wrapping non-DER content (eg, XML
- or VCard).
+ def get_content(self):
+ """
+ Get the inner content of this Wrapped_CMS_object.
+ """
- CMS-wrapped objects are a little different from the other DER_object
- types because the signed object is CMS wrapping some other kind of
- inner content. A Wrapped_CMS_object is the outer CMS wrapped object
- so that the usual DER and PEM operations do the obvious things, and
- the inner content is handle via separate methods.
- """
+ if self.content is None:
+ raise rpki.exceptions.CMSContentNotSet("Inner content of CMS object %r is not set" % self)
+ return self.content
- other_clear = ("content",)
+ def set_content(self, content):
+ """
+ Set the (inner) content of this Wrapped_CMS_object, clearing the wrapper.
+ """
- def get_content(self):
- """
- Get the inner content of this Wrapped_CMS_object.
- """
- if self.content is None:
- raise rpki.exceptions.CMSContentNotSet("Inner content of CMS object %r is not set" % self)
- return self.content
+ # pylint: disable=W0201
+ self.clear()
+ self.content = content
- def set_content(self, content):
- """
- Set the (inner) content of this Wrapped_CMS_object, clearing the wrapper.
- """
- self.clear()
- self.content = content
+ def verify(self, ta):
+ """
+ Verify CMS wrapper and store inner content.
+ """
- def verify(self, ta):
- """
- Verify CMS wrapper and store inner content.
- """
+ self.decode(CMS_object.verify(self, ta))
+ return self.get_content()
- self.decode(CMS_object.verify(self, ta))
- return self.get_content()
+ def extract(self):
+ """
+ Extract and store inner content from CMS wrapper without verifying
+ the CMS.
- def extract(self):
- """
- Extract and store inner content from CMS wrapper without verifying
- the CMS.
+ DANGER WILL ROBINSON!!!
- DANGER WILL ROBINSON!!!
+ Do not use this method on unvalidated data. Use the verify()
+ method instead.
- Do not use this method on unvalidated data. Use the verify()
- method instead.
+ If you don't understand this warning, don't use this method.
+ """
- If you don't understand this warning, don't use this method.
- """
+ self.decode(CMS_object.extract(self))
+ return self.get_content()
- self.decode(CMS_object.extract(self))
- return self.get_content()
+ def extract_if_needed(self):
+ """
+ Extract inner content if needed. See caveats for .extract(), do
+ not use unless you really know what you are doing.
+ """
- def extract_if_needed(self):
- """
- Extract inner content if needed. See caveats for .extract(), do
- not use unless you really know what you are doing.
- """
+ if self.content is None:
+ self.extract()
+
+ def _sign(self, cert, keypair, certs, crls, flags):
+ """
+ Internal method to call POW to do CMS signature. This is split
+ out from the .sign() API method to handle differences in how
+ different CMS-based POW classes handle the inner content.
+ """
- if self.content is None:
- self.extract()
+ # pylint: disable=W0201
+ cms = self.POW_class()
+ cms.sign(cert, keypair, self.encode(), certs, crls, self.econtent_oid, flags)
+ self.POW = cms
- def _sign(self, cert, keypair, certs, crls, flags):
+ def decode(self, whatever):
+ raise NotImplementedError
+
+ def encode(self):
+ raise NotImplementedError
+
+
+class DER_CMS_object(CMS_object):
"""
- Internal method to call POW to do CMS signature. This is split
- out from the .sign() API method to handle differences in how
- different CMS-based POW classes handle the inner content.
+ Abstract class for CMS-based objects with DER-encoded content
+ handled by C-level subclasses of rpki.POW.CMS.
"""
- cms = self.POW_class()
- cms.sign(cert, keypair, self.encode(), certs, crls, self.econtent_oid, flags)
- self.POW = cms
+ def _sign(self, cert, keypair, certs, crls, flags):
+ self.get_POW().sign(cert, keypair, certs, crls, self.econtent_oid, flags)
-class DER_CMS_object(CMS_object):
- """
- Abstract class for CMS-based objects with DER-encoded content
- handled by C-level subclasses of rpki.POW.CMS.
- """
+ def extract_if_needed(self):
+ """
+ Extract inner content if needed. See caveats for .extract(), do
+ not use unless you really know what you are doing.
+ """
- def _sign(self, cert, keypair, certs, crls, flags):
- self.get_POW().sign(cert, keypair, certs, crls, self.econtent_oid, flags)
+ try:
+ self.get_POW().getVersion()
+ except rpki.POW.NotVerifiedError:
+ self.extract()
- def extract_if_needed(self):
+class SignedManifest(DER_CMS_object):
"""
- Extract inner content if needed. See caveats for .extract(), do
- not use unless you really know what you are doing.
+ Class to hold a signed manifest.
"""
- try:
- self.get_POW().getVersion()
- except rpki.POW.NotVerifiedError:
- self.extract()
+ econtent_oid = rpki.oids.id_ct_rpkiManifest
+ POW_class = rpki.POW.Manifest
+ def getThisUpdate(self):
+ """
+ Get thisUpdate value from this manifest.
+ """
-class SignedManifest(DER_CMS_object):
- """
- Class to hold a signed manifest.
- """
+ return self.get_POW().getThisUpdate()
- econtent_oid = rpki.oids.id_ct_rpkiManifest
- POW_class = rpki.POW.Manifest
+ def getNextUpdate(self):
+ """
+ Get nextUpdate value from this manifest.
+ """
- def getThisUpdate(self):
- """
- Get thisUpdate value from this manifest.
- """
- return self.get_POW().getThisUpdate()
+ return self.get_POW().getNextUpdate()
- def getNextUpdate(self):
+ @classmethod
+ def build(cls, serial, thisUpdate, nextUpdate, names_and_objs, keypair, certs, version = 0):
+ """
+ Build a signed manifest.
+ """
+
+ filelist = []
+ for name, obj in names_and_objs:
+ filelist.append((name.rpartition("/")[2], sha256(obj.get_DER())))
+ filelist.sort(key = lambda x: x[0])
+
+ obj = cls.POW_class()
+ obj.setVersion(version)
+ obj.setManifestNumber(serial)
+ obj.setThisUpdate(thisUpdate)
+ obj.setNextUpdate(nextUpdate)
+ obj.setAlgorithm(rpki.oids.id_sha256)
+ obj.addFiles(filelist)
+
+ self = cls(POW = obj)
+ self.sign(keypair, certs)
+ return self
+
+class ROA(DER_CMS_object):
"""
- Get nextUpdate value from this manifest.
+ Class to hold a signed ROA.
+ """
+
+ econtent_oid = rpki.oids.id_ct_routeOriginAttestation
+ POW_class = rpki.POW.ROA
+
+ @classmethod
+ def build(cls, asn, ipv4, ipv6, keypair, certs, version = 0):
+ """
+ Build a ROA.
+ """
+
+ ipv4 = ipv4.to_POW_roa_tuple() if ipv4 else None
+ ipv6 = ipv6.to_POW_roa_tuple() if ipv6 else None
+ obj = cls.POW_class()
+ obj.setVersion(version)
+ obj.setASID(asn)
+ obj.setPrefixes(ipv4 = ipv4, ipv6 = ipv6)
+ self = cls(POW = obj)
+ self.sign(keypair, certs)
+ return self
+
+ def tracking_data(self, uri):
+ """
+ Return a string containing data we want to log when tracking how
+ objects move through the RPKI system.
+ """
+
+ msg = DER_CMS_object.tracking_data(self, uri)
+ try:
+ self.extract_if_needed()
+ asn = self.get_POW().getASID()
+ text = []
+ for prefixes in self.get_POW().getPrefixes():
+ if prefixes is not None:
+ for prefix, prefixlen, maxprefixlen in prefixes:
+ if maxprefixlen is None or prefixlen == maxprefixlen:
+ text.append("%s/%s" % (prefix, prefixlen))
+ else:
+ text.append("%s/%s-%s" % (prefix, prefixlen, maxprefixlen))
+ text.sort()
+ msg = "%s %s %s" % (msg, asn, ",".join(text))
+ except:
+ pass
+ return msg
+
+class DeadDrop(object):
"""
- return self.get_POW().getNextUpdate()
+ Dead-drop utility for storing copies of CMS messages for debugging or
+ audit. At the moment this uses Maildir mailbox format, as it has
+ approximately the right properties and a number of useful tools for
+ manipulating it already exist.
+ """
+
+ def __init__(self, name):
+ self.name = name
+ self.pid = os.getpid()
+ self.maildir = mailbox.Maildir(name, factory = None, create = True)
+ self.warned = False
+
+ def dump(self, obj):
+ try:
+ now = time.time()
+ msg = email.mime.application.MIMEApplication(obj.get_DER(), "x-rpki")
+ msg["Date"] = email.utils.formatdate(now)
+ msg["Subject"] = "Process %s dump of %r" % (self.pid, obj)
+ msg["Message-ID"] = email.utils.make_msgid()
+ msg["X-RPKI-PID"] = str(self.pid)
+ msg["X-RPKI-Object"] = repr(obj)
+ msg["X-RPKI-Timestamp"] = "%f" % now
+ self.maildir.add(msg)
+ self.warned = False
+ except Exception, e:
+ if not self.warned:
+ logger.warning("Could not write to mailbox %s: %s", self.name, e)
+ self.warned = True
- @classmethod
- def build(cls, serial, thisUpdate, nextUpdate, names_and_objs, keypair, certs, version = 0):
+class XML_CMS_object(Wrapped_CMS_object):
"""
- Build a signed manifest.
+ Class to hold CMS-wrapped XML protocol data.
"""
- filelist = []
- for name, obj in names_and_objs:
- d = rpki.POW.Digest(rpki.POW.SHA256_DIGEST)
- d.update(obj.get_DER())
- filelist.append((name.rpartition("/")[2], d.digest()))
- filelist.sort(key = lambda x: x[0])
-
- obj = cls.POW_class()
- obj.setVersion(version)
- obj.setManifestNumber(serial)
- obj.setThisUpdate(thisUpdate)
- obj.setNextUpdate(nextUpdate)
- obj.setAlgorithm(rpki.oids.id_sha256)
- obj.addFiles(filelist)
-
- self = cls(POW = obj)
- self.sign(keypair, certs)
- return self
+ econtent_oid = rpki.oids.id_ct_xml
+ encoding = None
+ schema = None
-class ROA(DER_CMS_object):
- """
- Class to hold a signed ROA.
- """
+ ## @var dump_outbound_cms
+ # If set, we write all outbound XML-CMS PDUs to disk, for debugging.
+ # If set, value should be a DeadDrop object.
- econtent_oid = rpki.oids.id_ct_routeOriginAttestation
- POW_class = rpki.POW.ROA
+ dump_outbound_cms = None
- @classmethod
- def build(cls, asn, ipv4, ipv6, keypair, certs, version = 0):
- """
- Build a ROA.
- """
- ipv4 = ipv4.to_POW_roa_tuple() if ipv4 else None
- ipv6 = ipv6.to_POW_roa_tuple() if ipv6 else None
- obj = cls.POW_class()
- obj.setVersion(version)
- obj.setASID(asn)
- obj.setPrefixes(ipv4 = ipv4, ipv6 = ipv6)
- self = cls(POW = obj)
- self.sign(keypair, certs)
- return self
-
- def tracking_data(self, uri):
- """
- Return a string containing data we want to log when tracking how
- objects move through the RPKI system.
- """
- msg = DER_CMS_object.tracking_data(self, uri)
- try:
- self.extract_if_needed()
- asn = self.get_POW().getASID()
- text = []
- for prefixes in self.get_POW().getPrefixes():
- if prefixes is not None:
- for prefix, prefixlen, maxprefixlen in prefixes:
- if maxprefixlen is None or prefixlen == maxprefixlen:
- text.append("%s/%s" % (prefix, prefixlen))
- else:
- text.append("%s/%s-%s" % (prefix, prefixlen, maxprefixlen))
- text.sort()
- msg = "%s %s %s" % (msg, asn, ",".join(text))
- except: # pylint: disable=W0702
- pass
- return msg
+ ## @var dump_inbound_cms
+ # If set, we write all inbound XML-CMS PDUs to disk, for debugging.
+ # If set, value should be a DeadDrop object.
-class DeadDrop(object):
- """
- Dead-drop utility for storing copies of CMS messages for debugging or
- audit. At the moment this uses Maildir mailbox format, as it has
- approximately the right properties and a number of useful tools for
- manipulating it already exist.
- """
-
- def __init__(self, name):
- self.name = name
- self.pid = os.getpid()
- self.maildir = mailbox.Maildir(name, factory = None, create = True)
- self.warned = False
-
- def dump(self, obj):
- try:
- now = time.time()
- msg = email.mime.application.MIMEApplication(obj.get_DER(), "x-rpki")
- msg["Date"] = email.utils.formatdate(now)
- msg["Subject"] = "Process %s dump of %r" % (self.pid, obj)
- msg["Message-ID"] = email.utils.make_msgid()
- msg["X-RPKI-PID"] = str(self.pid)
- msg["X-RPKI-Object"] = repr(obj)
- msg["X-RPKI-Timestamp"] = "%f" % now
- self.maildir.add(msg)
- self.warned = False
- except Exception, e:
- if not self.warned:
- logger.warning("Could not write to mailbox %s: %s", self.name, e)
- self.warned = True
+ dump_inbound_cms = None
-class XML_CMS_object(Wrapped_CMS_object):
- """
- Class to hold CMS-wrapped XML protocol data.
- """
+ ## @var check_inbound_schema
+ # If set, perform RelaxNG schema check on inbound messages.
- econtent_oid = rpki.oids.id_ct_xml
+ check_inbound_schema = True
- ## @var dump_outbound_cms
- # If set, we write all outbound XML-CMS PDUs to disk, for debugging.
- # If set, value should be a DeadDrop object.
+ ## @var check_outbound_schema
+ # If set, perform RelaxNG schema check on outbound messages.
- dump_outbound_cms = None
+ check_outbound_schema = True
- ## @var dump_inbound_cms
- # If set, we write all inbound XML-CMS PDUs to disk, for debugging.
- # If set, value should be a DeadDrop object.
+ def encode(self):
+ """
+ Encode inner content for signing.
+ """
- dump_inbound_cms = None
+ return lxml.etree.tostring(self.get_content(),
+ pretty_print = True,
+ encoding = self.encoding,
+ xml_declaration = True)
- ## @var check_inbound_schema
- # If set, perform RelaxNG schema check on inbound messages.
+ def decode(self, xml):
+ """
+ Decode XML and set inner content.
+ """
- check_inbound_schema = True
+ # pylint: disable=W0201
+ self.content = lxml.etree.fromstring(xml)
- ## @var check_outbound_schema
- # If set, perform RelaxNG schema check on outbound messages.
+ def pretty_print_content(self):
+ """
+ Pretty print XML content of this message.
+ """
- check_outbound_schema = False
+ return lxml.etree.tostring(self.get_content(),
+ pretty_print = True,
+ encoding = self.encoding,
+ xml_declaration = True)
- def encode(self):
- """
- Encode inner content for signing.
- """
- return lxml.etree.tostring(self.get_content(),
- pretty_print = True,
- encoding = self.encoding,
- xml_declaration = True)
+ def schema_check(self):
+ """
+ Handle XML RelaxNG schema check.
+ """
- def decode(self, xml):
- """
- Decode XML and set inner content.
- """
- self.content = lxml.etree.fromstring(xml)
+ try:
+ self.schema.assertValid(self.get_content())
+ except lxml.etree.DocumentInvalid:
+ logger.error("PDU failed schema check")
+ for line in self.pretty_print_content().splitlines():
+ logger.warning(line)
+ raise
- def pretty_print_content(self):
- """
- Pretty print XML content of this message.
- """
- return lxml.etree.tostring(self.get_content(),
- pretty_print = True,
- encoding = self.encoding,
- xml_declaration = True)
+ def dump_to_disk(self, prefix):
+ """
+ Write DER of current message to disk, for debugging.
+ """
- def schema_check(self):
- """
- Handle XML RelaxNG schema check.
- """
- try:
- self.schema.assertValid(self.get_content())
- except lxml.etree.DocumentInvalid:
- logger.error("PDU failed schema check")
- for line in self.pretty_print_content().splitlines():
- logger.warning(line)
- raise
-
- def dump_to_disk(self, prefix):
- """
- Write DER of current message to disk, for debugging.
- """
- f = open(prefix + rpki.sundial.now().isoformat() + "Z.cms", "wb")
- f.write(self.get_DER())
- f.close()
+ f = open(prefix + rpki.sundial.now().isoformat() + "Z.cms", "wb")
+ f.write(self.get_DER())
+ f.close()
- def wrap(self, msg, keypair, certs, crls = None):
- """
- Wrap an XML PDU in CMS and return its DER encoding.
- """
- if self.saxify is None:
- self.set_content(msg)
- else:
- self.set_content(msg.toXML())
- if self.check_outbound_schema:
- self.schema_check()
- self.sign(keypair, certs, crls)
- if self.dump_outbound_cms:
- self.dump_outbound_cms.dump(self)
- return self.get_DER()
-
- def unwrap(self, ta):
- """
- Unwrap a CMS-wrapped XML PDU and return Python objects.
- """
- if self.dump_inbound_cms:
- self.dump_inbound_cms.dump(self)
- self.verify(ta)
- if self.check_inbound_schema:
- self.schema_check()
- if self.saxify is None:
- return self.get_content()
- else:
- return self.saxify(self.get_content()) # pylint: disable=E1102
-
- def check_replay(self, timestamp, *context):
- """
- Check CMS signing-time in this object against a recorded
- timestamp. Raises an exception if the recorded timestamp is more
- recent, otherwise returns the new timestamp.
- """
- new_timestamp = self.get_signingTime()
- if timestamp is not None and timestamp > new_timestamp:
- if context:
- context = " (" + " ".join(context) + ")"
- raise rpki.exceptions.CMSReplay(
- "CMS replay: last message %s, this message %s%s" % (
- timestamp, new_timestamp, context))
- return new_timestamp
-
- def check_replay_sql(self, obj, *context):
+ def wrap(self, msg, keypair, certs, crls = None):
+ """
+ Wrap an XML PDU in CMS and return its DER encoding.
+ """
+
+ self.set_content(msg)
+ if self.check_outbound_schema:
+ self.schema_check()
+ self.sign(keypair, certs, crls)
+ if self.dump_outbound_cms:
+ self.dump_outbound_cms.dump(self)
+ return self.get_DER()
+
+ def unwrap(self, ta):
+ """
+ Unwrap a CMS-wrapped XML PDU and return Python objects.
+ """
+
+ if self.dump_inbound_cms:
+ self.dump_inbound_cms.dump(self)
+ self.verify(ta)
+ if self.check_inbound_schema:
+ self.schema_check()
+ return self.get_content()
+
+ def check_replay(self, timestamp, *context):
+ """
+ Check CMS signing-time in this object against a recorded
+ timestamp. Raises an exception if the recorded timestamp is more
+ recent, otherwise returns the new timestamp.
+ """
+
+ new_timestamp = self.get_signingTime()
+ if timestamp is not None and timestamp > new_timestamp:
+ if context:
+ context = " (" + " ".join(context) + ")"
+ raise rpki.exceptions.CMSReplay(
+ "CMS replay: last message %s, this message %s%s" % (
+ timestamp, new_timestamp, context))
+ return new_timestamp
+
+ def check_replay_sql(self, obj, *context):
+ """
+ Like .check_replay() but gets recorded timestamp from
+ "last_cms_timestamp" field of an SQL object and stores the new
+ timestamp back in that same field.
+ """
+
+ obj.last_cms_timestamp = self.check_replay(obj.last_cms_timestamp, *context)
+ obj.save()
+
+class SignedReferral(XML_CMS_object):
+ encoding = "us-ascii"
+ schema = rpki.relaxng.oob_setup
+
+class Ghostbuster(Wrapped_CMS_object):
"""
- Like .check_replay() but gets recorded timestamp from
- "last_cms_timestamp" field of an SQL object and stores the new
- timestamp back in that same field.
+ Class to hold Ghostbusters record (CMS-wrapped VCard). This is
+ quite minimal because we treat the VCard as an opaque byte string
+ managed by the back-end.
"""
- obj.last_cms_timestamp = self.check_replay(obj.last_cms_timestamp, *context)
- obj.sql_mark_dirty()
- ## @var saxify
- # SAX handler hook. Subclasses can set this to a SAX handler, in
- # which case .unwrap() will call it and return the result.
- # Otherwise, .unwrap() just returns a verified element tree.
+ econtent_oid = rpki.oids.id_ct_rpkiGhostbusters
- saxify = None
+ def encode(self):
+ """
+ Encode inner content for signing. At the moment we're treating
+ the VCard as an opaque byte string, so no encoding needed here.
+ """
-class SignedReferral(XML_CMS_object):
- encoding = "us-ascii"
- schema = rpki.relaxng.myrpki
- saxify = None
+ return self.get_content()
-class Ghostbuster(Wrapped_CMS_object):
- """
- Class to hold Ghostbusters record (CMS-wrapped VCard). This is
- quite minimal because we treat the VCard as an opaque byte string
- managed by the back-end.
- """
+ def decode(self, vcard):
+ """
+ Decode XML and set inner content. At the moment we're treating
+ the VCard as an opaque byte string, so no encoding needed here.
+ """
- econtent_oid = rpki.oids.id_ct_rpkiGhostbusters
+ # pylint: disable=W0201
+ self.content = vcard
- def encode(self):
- """
- Encode inner content for signing. At the moment we're treating
- the VCard as an opaque byte string, so no encoding needed here.
- """
- return self.get_content()
+ @classmethod
+ def build(cls, vcard, keypair, certs):
+ """
+ Build a Ghostbuster record.
+ """
+
+ self = cls()
+ self.set_content(vcard)
+ self.sign(keypair, certs)
+ return self
- def decode(self, vcard):
- """
- Decode XML and set inner content. At the moment we're treating
- the VCard as an opaque byte string, so no encoding needed here.
- """
- self.content = vcard
- @classmethod
- def build(cls, vcard, keypair, certs):
+class CRL(DER_object):
"""
- Build a Ghostbuster record.
+ Class to hold a Certificate Revocation List.
"""
- self = cls()
- self.set_content(vcard)
- self.sign(keypair, certs)
- return self
+ POW_class = rpki.POW.CRL
-class CRL(DER_object):
- """
- Class to hold a Certificate Revocation List.
- """
+ def get_DER(self):
+ """
+ Get the DER value of this CRL.
+ """
- POW_class = rpki.POW.CRL
+ self.check()
+ if self.DER:
+ return self.DER
+ if self.POW:
+ self.DER = self.POW.derWrite()
+ return self.get_DER()
+ raise rpki.exceptions.DERObjectConversionError("No conversion path to DER available")
- def get_DER(self):
- """
- Get the DER value of this CRL.
- """
- self.check()
- if self.DER:
- return self.DER
- if self.POW:
- self.DER = self.POW.derWrite()
- return self.get_DER()
- raise rpki.exceptions.DERObjectConversionError("No conversion path to DER available")
-
- def get_POW(self):
- """
- Get the rpki.POW value of this CRL.
- """
- self.check()
- if not self.POW: # pylint: disable=E0203
- self.POW = rpki.POW.CRL.derRead(self.get_DER())
- return self.POW
+ def get_POW(self):
+ """
+ Get the rpki.POW value of this CRL.
+ """
- def getThisUpdate(self):
- """
- Get thisUpdate value from this CRL.
- """
- return self.get_POW().getThisUpdate()
+ # pylint: disable=W0201,E0203
+ self.check()
+ if not self.POW:
+ self.POW = rpki.POW.CRL.derRead(self.get_DER())
+ return self.POW
- def getNextUpdate(self):
- """
- Get nextUpdate value from this CRL.
- """
- return self.get_POW().getNextUpdate()
+ def getThisUpdate(self):
+ """
+ Get thisUpdate value from this CRL.
+ """
- def getIssuer(self):
- """
- Get issuer value of this CRL.
- """
- return X501DN.from_POW(self.get_POW().getIssuer())
+ return self.get_POW().getThisUpdate()
- def getCRLNumber(self):
- """
- Get CRL Number value for this CRL.
- """
- return self.get_POW().getCRLNumber()
+ def getNextUpdate(self):
+ """
+ Get nextUpdate value from this CRL.
+ """
- @classmethod
- def generate(cls, keypair, issuer, serial, thisUpdate, nextUpdate, revokedCertificates, version = 1):
- """
- Generate a new CRL.
- """
- crl = rpki.POW.CRL()
- crl.setVersion(version)
- crl.setIssuer(issuer.getSubject().get_POW())
- crl.setThisUpdate(thisUpdate)
- crl.setNextUpdate(nextUpdate)
- crl.setAKI(issuer.get_SKI())
- crl.setCRLNumber(serial)
- crl.addRevocations(revokedCertificates)
- crl.sign(keypair.get_POW())
- return cls(POW = crl)
-
- @property
- def creation_timestamp(self):
- """
- Time at which this object was created.
- """
- return self.getThisUpdate()
+ return self.get_POW().getNextUpdate()
+
+ def getIssuer(self):
+ """
+ Get issuer value of this CRL.
+ """
+
+ return X501DN.from_POW(self.get_POW().getIssuer())
+
+ def getCRLNumber(self):
+ """
+ Get CRL Number value for this CRL.
+ """
+
+ return self.get_POW().getCRLNumber()
+
+ @classmethod
+ def generate(cls, keypair, issuer, serial, thisUpdate, nextUpdate, revokedCertificates, version = 1):
+ """
+ Generate a new CRL.
+ """
+
+ crl = rpki.POW.CRL()
+ crl.setVersion(version)
+ crl.setIssuer(issuer.getSubject().get_POW())
+ crl.setThisUpdate(thisUpdate)
+ crl.setNextUpdate(nextUpdate)
+ crl.setAKI(issuer.get_SKI())
+ crl.setCRLNumber(serial)
+ crl.addRevocations(revokedCertificates)
+ crl.sign(keypair.get_POW())
+ return cls(POW = crl)
+
+ @property
+ def creation_timestamp(self):
+ """
+ Time at which this object was created.
+ """
+
+ return self.getThisUpdate()
## @var uri_dispatch_map
# Map of known URI filename extensions and corresponding classes.
uri_dispatch_map = {
- ".cer" : X509,
- ".crl" : CRL,
- ".gbr" : Ghostbuster,
- ".mft" : SignedManifest,
- ".mnf" : SignedManifest,
- ".roa" : ROA,
- }
+ ".cer" : X509,
+ ".crl" : CRL,
+ ".gbr" : Ghostbuster,
+ ".mft" : SignedManifest,
+ ".mnf" : SignedManifest,
+ ".roa" : ROA }
def uri_dispatch(uri):
- """
- Return the Python class object corresponding to a given URI.
- """
- return uri_dispatch_map[os.path.splitext(uri)[1]]
+ """
+ Return the Python class object corresponding to a given URI.
+ """
+
+ return uri_dispatch_map[os.path.splitext(uri)[1]]
diff --git a/rpki/xml_utils.py b/rpki/xml_utils.py
deleted file mode 100644
index c276ce98..00000000
--- a/rpki/xml_utils.py
+++ /dev/null
@@ -1,494 +0,0 @@
-# $Id$
-#
-# Copyright (C) 2009-2012 Internet Systems Consortium ("ISC")
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-#
-# Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-"""
-XML utilities.
-"""
-
-import xml.sax
-import lxml.sax
-import lxml.etree
-import rpki.exceptions
-
-class sax_handler(xml.sax.handler.ContentHandler):
- """
- SAX handler for RPKI protocols.
-
- This class provides some basic amenities for parsing protocol XML of
- the kind we use in the RPKI protocols, including whacking all the
- protocol element text into US-ASCII, simplifying accumulation of
- text fields, and hiding some of the fun relating to XML namespaces.
-
- General assumption: by the time this parsing code gets invoked, the
- XML has already passed RelaxNG validation, so we only have to check
- for errors that the schema can't catch, and we don't have to play as
- many XML namespace games.
- """
-
- def __init__(self):
- """
- Initialize SAX handler.
- """
- xml.sax.handler.ContentHandler.__init__(self)
- self.text = ""
- self.stack = []
-
- def startElementNS(self, name, qname, attrs):
- """
- Redirect startElementNS() events to startElement().
- """
- return self.startElement(name[1], attrs)
-
- def endElementNS(self, name, qname):
- """
- Redirect endElementNS() events to endElement().
- """
- return self.endElement(name[1])
-
- def characters(self, content):
- """
- Accumulate a chuck of element content (text).
- """
- self.text += content
-
- def startElement(self, name, attrs):
- """
- Handle startElement() events.
-
- We maintain a stack of nested elements under construction so that
- we can feed events directly to the current element rather than
- having to pass them through all the nesting elements.
-
- If the stack is empty, this event is for the outermost element, so
- we call a virtual method to create the corresponding object and
- that's the object we'll be returning as our final result.
- """
-
- a = dict()
- for k, v in attrs.items():
- if isinstance(k, tuple):
- if k == ("http://www.w3.org/XML/1998/namespace", "lang"):
- k = "xml:lang"
- else:
- assert k[0] is None
- k = k[1]
- a[k.encode("ascii")] = v.encode("ascii")
- if len(self.stack) == 0:
- assert not hasattr(self, "result")
- self.result = self.create_top_level(name, a)
- self.stack.append(self.result)
- self.stack[-1].startElement(self.stack, name, a)
-
- def endElement(self, name):
- """
- Handle endElement() events. Mostly this means handling any
- accumulated element text.
- """
- text = self.text.encode("ascii").strip()
- self.text = ""
- self.stack[-1].endElement(self.stack, name, text)
-
- @classmethod
- def saxify(cls, elt):
- """
- Create a one-off SAX parser, parse an ETree, return the result.
- """
- self = cls()
- lxml.sax.saxify(elt, self)
- return self.result
-
- def create_top_level(self, name, attrs):
- """
- Handle top-level PDU for this protocol.
- """
- assert name == self.name and attrs["version"] == self.version
- return self.pdu()
-
-class base_elt(object):
- """
- Virtual base class for XML message elements. The left-right and
- publication protocols use this. At least for now, the up-down
- protocol does not, due to different design assumptions.
- """
-
- ## @var attributes
- # XML attributes for this element.
- attributes = ()
-
- ## @var elements
- # XML elements contained by this element.
- elements = ()
-
- ## @var booleans
- # Boolean attributes (value "yes" or "no") for this element.
- booleans = ()
-
- def startElement(self, stack, name, attrs):
- """
- Default startElement() handler: just process attributes.
- """
- if name not in self.elements:
- assert name == self.element_name, "Unexpected name %s, stack %s" % (name, stack)
- self.read_attrs(attrs)
-
- def endElement(self, stack, name, text):
- """
- Default endElement() handler: just pop the stack.
- """
- assert name == self.element_name, "Unexpected name %s, stack %s" % (name, stack)
- stack.pop()
-
- def toXML(self):
- """
- Default toXML() element generator.
- """
- return self.make_elt()
-
- def read_attrs(self, attrs):
- """
- Template-driven attribute reader.
- """
- for key in self.attributes:
- val = attrs.get(key, None)
- if isinstance(val, str) and val.isdigit() and not key.endswith("_handle"):
- val = long(val)
- setattr(self, key, val)
- for key in self.booleans:
- setattr(self, key, attrs.get(key, False))
-
- def make_elt(self):
- """
- XML element constructor.
- """
- elt = lxml.etree.Element(self.xmlns + self.element_name, nsmap = self.nsmap)
- for key in self.attributes:
- val = getattr(self, key, None)
- if val is not None:
- elt.set(key, str(val))
- for key in self.booleans:
- if getattr(self, key, False):
- elt.set(key, "yes")
- return elt
-
- def make_b64elt(self, elt, name, value):
- """
- Constructor for Base64-encoded subelement.
- """
- if value is not None and not value.empty():
- lxml.etree.SubElement(elt, self.xmlns + name, nsmap = self.nsmap).text = value.get_Base64()
-
- def __str__(self):
- """
- Convert a base_elt object to string format.
- """
- return lxml.etree.tostring(self.toXML(), pretty_print = True, encoding = "us-ascii")
-
- @classmethod
- def make_pdu(cls, **kargs):
- """
- Generic PDU constructor.
- """
- self = cls()
- for k, v in kargs.items():
- if isinstance(v, bool):
- v = 1 if v else 0
- setattr(self, k, v)
- return self
-
-class text_elt(base_elt):
- """
- Virtual base class for XML message elements that contain text.
- """
-
- ## @var text_attribute
- # Name of the class attribute that holds the text value.
- text_attribute = None
-
- def endElement(self, stack, name, text):
- """
- Extract text from parsed XML.
- """
- base_elt.endElement(self, stack, name, text)
- setattr(self, self.text_attribute, text)
-
- def toXML(self):
- """
- Insert text into generated XML.
- """
- elt = self.make_elt()
- elt.text = getattr(self, self.text_attribute) or None
- return elt
-
-class data_elt(base_elt):
- """
- Virtual base class for PDUs that map to SQL objects. These objects
- all implement the create/set/get/list/destroy action attribute.
- """
-
- def endElement(self, stack, name, text):
- """
- Default endElement handler for SQL-based objects. This assumes
- that sub-elements are Base64-encoded using the sql_template
- mechanism.
- """
- if name in self.elements:
- elt_type = self.sql_template.map.get(name)
- assert elt_type is not None, "Couldn't find element type for %s, stack %s" % (name, stack)
- setattr(self, name, elt_type(Base64 = text))
- else:
- assert name == self.element_name, "Unexpected name %s, stack %s" % (name, stack)
- stack.pop()
-
- def toXML(self):
- """
- Default element generator for SQL-based objects. This assumes
- that sub-elements are Base64-encoded DER objects.
- """
- elt = self.make_elt()
- for i in self.elements:
- self.make_b64elt(elt, i, getattr(self, i, None))
- return elt
-
- def make_reply(self, r_pdu = None):
- """
- Construct a reply PDU.
- """
- if r_pdu is None:
- r_pdu = self.__class__()
- self.make_reply_clone_hook(r_pdu)
- handle_name = self.element_name + "_handle"
- setattr(r_pdu, handle_name, getattr(self, handle_name, None))
- else:
- self.make_reply_clone_hook(r_pdu)
- for b in r_pdu.booleans:
- setattr(r_pdu, b, False)
- r_pdu.action = self.action
- r_pdu.tag = self.tag
- return r_pdu
-
- def make_reply_clone_hook(self, r_pdu):
- """
- Overridable hook.
- """
- pass
-
- def serve_fetch_one(self):
- """
- Find the object on which a get, set, or destroy method should
- operate.
- """
- r = self.serve_fetch_one_maybe()
- if r is None:
- raise rpki.exceptions.NotFound
- return r
-
- def serve_pre_save_hook(self, q_pdu, r_pdu, cb, eb):
- """
- Overridable hook.
- """
- cb()
-
- def serve_post_save_hook(self, q_pdu, r_pdu, cb, eb):
- """
- Overridable hook.
- """
- cb()
-
- def serve_create(self, r_msg, cb, eb):
- """
- Handle a create action.
- """
-
- r_pdu = self.make_reply()
-
- def one():
- self.sql_store()
- setattr(r_pdu, self.sql_template.index, getattr(self, self.sql_template.index))
- self.serve_post_save_hook(self, r_pdu, two, eb)
-
- def two():
- r_msg.append(r_pdu)
- cb()
-
- oops = self.serve_fetch_one_maybe()
- if oops is not None:
- raise rpki.exceptions.DuplicateObject("Object already exists: %r[%r] %r[%r]" % (self, getattr(self, self.element_name + "_handle"),
- oops, getattr(oops, oops.element_name + "_handle")))
-
- self.serve_pre_save_hook(self, r_pdu, one, eb)
-
- def serve_set(self, r_msg, cb, eb):
- """
- Handle a set action.
- """
-
- db_pdu = self.serve_fetch_one()
- r_pdu = self.make_reply()
- for a in db_pdu.sql_template.columns[1:]:
- v = getattr(self, a, None)
- if v is not None:
- setattr(db_pdu, a, v)
- db_pdu.sql_mark_dirty()
-
- def one():
- db_pdu.sql_store()
- db_pdu.serve_post_save_hook(self, r_pdu, two, eb)
-
- def two():
- r_msg.append(r_pdu)
- cb()
-
- db_pdu.serve_pre_save_hook(self, r_pdu, one, eb)
-
- def serve_get(self, r_msg, cb, eb):
- """
- Handle a get action.
- """
- r_pdu = self.serve_fetch_one()
- self.make_reply(r_pdu)
- r_msg.append(r_pdu)
- cb()
-
- def serve_list(self, r_msg, cb, eb):
- """
- Handle a list action for non-self objects.
- """
- for r_pdu in self.serve_fetch_all():
- self.make_reply(r_pdu)
- r_msg.append(r_pdu)
- cb()
-
- def serve_destroy_hook(self, cb, eb):
- """
- Overridable hook.
- """
- cb()
-
- def serve_destroy(self, r_msg, cb, eb):
- """
- Handle a destroy action.
- """
- def done():
- db_pdu.sql_delete()
- r_msg.append(self.make_reply())
- cb()
- db_pdu = self.serve_fetch_one()
- db_pdu.serve_destroy_hook(done, eb)
-
- def serve_dispatch(self, r_msg, cb, eb):
- """
- Action dispatch handler.
- """
- dispatch = { "create" : self.serve_create,
- "set" : self.serve_set,
- "get" : self.serve_get,
- "list" : self.serve_list,
- "destroy" : self.serve_destroy }
- if self.action not in dispatch:
- raise rpki.exceptions.BadQuery("Unexpected query: action %s" % self.action)
- dispatch[self.action](r_msg, cb, eb)
-
- def unimplemented_control(self, *controls):
- """
- Uniform handling for unimplemented control operations.
- """
- unimplemented = [x for x in controls if getattr(self, x, False)]
- if unimplemented:
- raise rpki.exceptions.NotImplementedYet("Unimplemented control %s" % ", ".join(unimplemented))
-
-class msg(list):
- """
- Generic top-level PDU.
- """
-
- def startElement(self, stack, name, attrs):
- """
- Handle top-level PDU.
- """
- if name == "msg":
- assert self.version == int(attrs["version"])
- self.type = attrs["type"]
- else:
- elt = self.pdus[name]()
- self.append(elt)
- stack.append(elt)
- elt.startElement(stack, name, attrs)
-
- def endElement(self, stack, name, text):
- """
- Handle top-level PDU.
- """
- assert name == "msg", "Unexpected name %s, stack %s" % (name, stack)
- assert len(stack) == 1
- stack.pop()
-
- def __str__(self):
- """
- Convert msg object to string.
- """
- return lxml.etree.tostring(self.toXML(), pretty_print = True, encoding = "us-ascii")
-
- def toXML(self):
- """
- Generate top-level PDU.
- """
- elt = lxml.etree.Element(self.xmlns + "msg", nsmap = self.nsmap, version = str(self.version), type = self.type)
- elt.extend([i.toXML() for i in self])
- return elt
-
- @classmethod
- def query(cls, *args):
- """
- Create a query PDU.
- """
- self = cls(args)
- self.type = "query"
- return self
-
- @classmethod
- def reply(cls, *args):
- """
- Create a reply PDU.
- """
- self = cls(args)
- self.type = "reply"
- return self
-
- def is_query(self):
- """
- Is this msg a query?
- """
- return self.type == "query"
-
- def is_reply(self):
- """
- Is this msg a reply?
- """
- return self.type == "reply"