aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile.in121
-rw-r--r--buildtools/build-debian-packages.py18
-rw-r--r--buildtools/build-freebsd-ports.py172
l---------buildtools/build-ubuntu-ports.py1
-rwxr-xr-xbuildtools/config.guess1266
-rwxr-xr-xbuildtools/config.sub553
-rw-r--r--buildtools/debian-skeleton/control13
-rw-r--r--buildtools/debian-skeleton/rpki-ca.default10
-rw-r--r--buildtools/debian-skeleton/rpki-ca.init.d115
-rw-r--r--buildtools/debian-skeleton/rpki-ca.install5
-rw-r--r--buildtools/debian-skeleton/rpki-ca.postinst81
-rw-r--r--buildtools/debian-skeleton/rpki-ca.postrm7
-rw-r--r--buildtools/debian-skeleton/rpki-ca.prerm55
-rw-r--r--buildtools/debian-skeleton/rpki-ca.upstart53
-rw-r--r--buildtools/debian-skeleton/rpki-rp.install7
-rw-r--r--buildtools/debian-skeleton/rpki-rp.postinst73
-rw-r--r--buildtools/debian-skeleton/rpki-rp.postrm7
-rw-r--r--buildtools/debian-skeleton/rpki-rp.prerm15
-rw-r--r--buildtools/debian-skeleton/rules5
-rw-r--r--buildtools/defstack.py26
-rw-r--r--buildtools/freebsd-skeleton/rpki-ca/Makefile9
-rw-r--r--buildtools/freebsd-skeleton/rpki-ca/files/rpki-ca.in9
-rw-r--r--buildtools/freebsd-skeleton/rpki-ca/pkg-install3
-rw-r--r--buildtools/freebsd-skeleton/rpki-rp/Makefile17
-rw-r--r--buildtools/freebsd-skeleton/rpki-rp/files/pkg-install.in65
-rw-r--r--buildtools/make-rcynic-script.py4
-rw-r--r--buildtools/make-relaxng.py16
-rw-r--r--buildtools/make-sql-schemas.py18
-rw-r--r--buildtools/make-version.py38
-rw-r--r--buildtools/pull-doc-from-wiki.py189
-rw-r--r--buildtools/pylint.rc32
-rw-r--r--ca/Makefile.in76
-rwxr-xr-xca/irbe_cli466
-rwxr-xr-xca/irdbd4
-rwxr-xr-xca/pubd4
-rwxr-xr-xca/rootd4
-rwxr-xr-xca/rpki-confgen291
-rwxr-xr-xca/rpki-manage13
-rwxr-xr-xca/rpki-nanny217
-rwxr-xr-xca/rpki-sql-backup53
-rwxr-xr-xca/rpki-sql-setup311
-rwxr-xr-xca/rpki-start-servers39
-rw-r--r--ca/rpki.wsgi6
-rwxr-xr-xca/rpkic59
-rwxr-xr-xca/rpkid4
-rwxr-xr-xca/rpkigui-apache-conf-gen765
-rwxr-xr-xca/rpkigui-import-routes1
-rwxr-xr-xca/rpkigui-query-routes17
-rwxr-xr-xca/rpkigui-rcynic18
-rw-r--r--ca/tests/Makefile.in42
-rwxr-xr-xca/tests/bgpsec-yaml.py52
-rw-r--r--ca/tests/left-right-protocol-samples.xml442
-rw-r--r--ca/tests/myrpki-xml-parse-test.py74
-rw-r--r--ca/tests/old_irdbd.py4
-rw-r--r--ca/tests/publication-control-protocol-samples.xml155
-rw-r--r--ca/tests/publication-protocol-samples.xml451
-rw-r--r--ca/tests/rcynic.conf17
-rw-r--r--ca/tests/rrdp-samples.xml81
-rw-r--r--ca/tests/smoketest.6.yaml3
-rw-r--r--ca/tests/smoketest.py2317
-rw-r--r--ca/tests/sql-cleaner.py44
-rw-r--r--ca/tests/sql-dumper.py22
-rwxr-xr-xca/tests/test-rrdp.py123
-rw-r--r--ca/tests/testpoke.py120
-rw-r--r--ca/tests/xml-parse-test.py136
-rw-r--r--ca/tests/yamlconf.py1419
-rw-r--r--ca/tests/yamltest-test-all.sh26
-rwxr-xr-x[-rw-r--r--]ca/tests/yamltest.py1617
-rw-r--r--ca/upgrade-scripts/upgrade-rpkid-to-0.5709.py40
-rwxr-xr-xconfigure308
-rw-r--r--configure.ac233
-rw-r--r--doc/doc.RPKI.CA.Configuration.web_portal7
-rw-r--r--doc/doc.RPKI.Installation10
-rw-r--r--doc/doc.RPKI.RP.rcynic2
-rw-r--r--doc/manual.pdfbin344052 -> 760086 bytes
-rw-r--r--ext/POW.c2347
-rw-r--r--h/rpki/manifest.h7
-rw-r--r--h/rpki/roa.h14
-rw-r--r--h/rpki/sk_manifest.h2
-rw-r--r--h/rpki/sk_roa.h2
-rw-r--r--potpourri/analyze-rcynic-history.py428
-rw-r--r--potpourri/analyze-transition.py102
-rw-r--r--potpourri/apnic-to-csv.py38
-rw-r--r--potpourri/arin-to-csv.py64
-rwxr-xr-xpotpourri/ca-unpickle.py1283
-rw-r--r--potpourri/cross_certify.py20
-rw-r--r--potpourri/csvgrep.py56
-rw-r--r--potpourri/django-legacy-database.README4
-rw-r--r--potpourri/django-legacy-database.tar.xzbin0 -> 10076 bytes
-rw-r--r--potpourri/expand-roa-prefixes.py76
-rw-r--r--potpourri/extract-key.py18
-rw-r--r--potpourri/fakerootd.py11
-rw-r--r--potpourri/format-application-x-rpki.py126
-rw-r--r--potpourri/gc_summary.py158
-rw-r--r--potpourri/generate-ripe-root-cert.py32
-rwxr-xr-xpotpourri/generate-root-certificate62
-rw-r--r--potpourri/gski.py4
-rw-r--r--potpourri/guess-roas-from-routeviews.py36
-rw-r--r--potpourri/iana-to-csv.py56
-rw-r--r--potpourri/missing-oids.py32
-rw-r--r--potpourri/object-dates.py38
-rw-r--r--potpourri/oob-translate.xsl81
-rw-r--r--potpourri/print-profile.py2
-rw-r--r--potpourri/rcynic-diff.py150
-rw-r--r--potpourri/ripe-asns-to-csv.py132
-rw-r--r--potpourri/ripe-to-csv.py154
-rw-r--r--potpourri/roa-to-irr.py236
-rw-r--r--potpourri/rrd-rcynic-history.py288
-rwxr-xr-xpotpourri/rrdp-fetch-from-tal229
-rwxr-xr-xpotpourri/rrdp-fetch.py68
-rwxr-xr-xpotpourri/rrdp-test-tool135
-rw-r--r--potpourri/show-key-identifiers.py60
-rw-r--r--potpourri/show-tracking-data.py18
-rw-r--r--potpourri/signed-object-dates.py26
-rw-r--r--potpourri/testbed-rootcert.py14
-rw-r--r--potpourri/translate-handles.py18
-rw-r--r--potpourri/upgrade-add-ghostbusters.py34
-rwxr-xr-xpotpourri/validation-status-sql.py333
-rw-r--r--potpourri/whack-ripe-asns.py46
-rw-r--r--potpourri/whack-ripe-prefixes.py66
-rw-r--r--potpourri/x509-dot.py156
-rw-r--r--rp/Makefile.in2
-rw-r--r--rp/config/Makefile.in88
l---------rp/config/rpki1
-rwxr-xr-xrp/config/rpki-confgen281
-rw-r--r--rp/config/rpki-confgen.xml (renamed from ca/rpki-confgen.xml)606
-rwxr-xr-xrp/config/rpki-generate-root-certificate77
-rwxr-xr-xrp/config/rpki-manage46
-rwxr-xr-xrp/config/rpki-sql-backup63
-rwxr-xr-xrp/config/rpki-sql-setup348
-rw-r--r--rp/rcynic/Makefile.in96
-rwxr-xr-xrp/rcynic/rc-scripts/darwin/RCynic42
-rw-r--r--rp/rcynic/rc-scripts/darwin/StartupParameters.plist19
-rwxr-xr-xrp/rcynic/rc-scripts/freebsd/rc.d.rcynic44
-rwxr-xr-xrp/rcynic/rcynic-cron90
-rwxr-xr-xrp/rcynic/rcynic-dump95
-rwxr-xr-xrp/rcynic/rcynic-html934
-rwxr-xr-xrp/rcynic/rcynic-svn107
-rwxr-xr-xrp/rcynic/rcynic-text144
-rw-r--r--rp/rcynic/rcynic.c4
-rwxr-xr-xrp/rcynic/rcynicng1478
-rw-r--r--rp/rcynic/rpki-torrent.py1166
-rw-r--r--rp/rcynic/rules.darwin.mk114
-rw-r--r--rp/rcynic/rules.freebsd.mk51
-rw-r--r--rp/rcynic/rules.linux.mk85
-rw-r--r--rp/rcynic/rules.unknown.mk2
-rw-r--r--rp/rcynic/sample-trust-anchors/apnic-testbed.tal.disabled (renamed from rp/rcynic/sample-trust-anchors/apnic-testbed.tal)0
-rw-r--r--rp/rcynic/sample-trust-anchors/rpki.net-testbed.tal.disabled (renamed from rp/rcynic/sample-trust-anchors/rpki.net-testbed.tal)0
-rw-r--r--rp/rcynic/static-rsync/Makefile.in44
-rw-r--r--rp/rcynic/static-rsync/README15
-rw-r--r--rp/rcynic/static-rsync/patches/patch-CVE-2007-409160
-rw-r--r--rp/rcynic/static-rsync/rsync-2.6.9.tar.gzbin811841 -> 0 bytes
-rwxr-xr-xrp/rcynic/validation_status15
-rwxr-xr-xrp/rpki-rtr/rpki-rtr4
-rw-r--r--rp/rpki-rtr/rules.freebsd.mk4
-rw-r--r--rp/rpki-rtr/rules.linux.mk2
-rwxr-xr-xrp/utils/find_roa233
-rwxr-xr-xrp/utils/hashdir60
-rwxr-xr-xrp/utils/print_roa89
-rwxr-xr-xrp/utils/print_rpki_manifest44
-rwxr-xr-xrp/utils/scan_roas67
-rwxr-xr-xrp/utils/scan_routercerts39
-rwxr-xr-xrp/utils/uri80
-rw-r--r--rpki/POW/__init__.py192
-rw-r--r--rpki/adns.py590
-rw-r--r--rpki/async.py433
-rw-r--r--rpki/cli.py423
-rw-r--r--rpki/config.py833
-rw-r--r--rpki/csv_utils.py164
-rw-r--r--rpki/daemonize.py89
-rw-r--r--rpki/django_settings/__init__.py (renamed from rpki/gui/cacheview/__init__.py)0
-rw-r--r--rpki/django_settings/common.py125
-rw-r--r--rpki/django_settings/gui.py159
-rw-r--r--rpki/django_settings/irdb.py47
-rw-r--r--rpki/django_settings/pubd.py45
-rw-r--r--rpki/django_settings/rcynic.py68
-rw-r--r--rpki/django_settings/rpkid.py45
-rw-r--r--rpki/exceptions.py300
-rw-r--r--rpki/fields.py205
-rw-r--r--rpki/gui/app/check_expired.py53
-rw-r--r--rpki/gui/app/forms.py194
-rw-r--r--rpki/gui/app/glue.py88
-rw-r--r--rpki/gui/app/migrations/0001_initial.py439
-rw-r--r--rpki/gui/app/models.py9
-rwxr-xr-xrpki/gui/app/range_list.py2
-rw-r--r--rpki/gui/app/south_migrations/0001_initial.py192
-rw-r--r--rpki/gui/app/south_migrations/0002_auto__add_field_resourcecert_conf.py (renamed from rpki/gui/app/migrations/0002_auto__add_field_resourcecert_conf.py)0
-rw-r--r--rpki/gui/app/south_migrations/0003_set_conf_from_parent.py (renamed from rpki/gui/app/migrations/0003_set_conf_from_parent.py)0
-rw-r--r--rpki/gui/app/south_migrations/0004_auto__chg_field_resourcecert_conf.py (renamed from rpki/gui/app/migrations/0004_auto__chg_field_resourcecert_conf.py)0
-rw-r--r--rpki/gui/app/south_migrations/0005_auto__chg_field_resourcecert_parent.py (renamed from rpki/gui/app/migrations/0005_auto__chg_field_resourcecert_parent.py)0
-rw-r--r--rpki/gui/app/south_migrations/0006_add_conf_acl.py (renamed from rpki/gui/app/migrations/0006_add_conf_acl.py)0
-rw-r--r--rpki/gui/app/south_migrations/0007_default_acls.py (renamed from rpki/gui/app/migrations/0007_default_acls.py)0
-rw-r--r--rpki/gui/app/south_migrations/0008_add_alerts.py (renamed from rpki/gui/app/migrations/0008_add_alerts.py)0
-rw-r--r--rpki/gui/app/south_migrations/__init__.py0
-rw-r--r--rpki/gui/app/views.py135
-rw-r--r--rpki/gui/cacheview/forms.py51
-rw-r--r--rpki/gui/cacheview/misc.py31
-rw-r--r--rpki/gui/cacheview/templates/cacheview/addressrange_detail.html18
-rw-r--r--rpki/gui/cacheview/templates/cacheview/cacheview_base.html10
-rw-r--r--rpki/gui/cacheview/templates/cacheview/cert_detail.html105
-rw-r--r--rpki/gui/cacheview/templates/cacheview/ghostbuster_detail.html13
-rw-r--r--rpki/gui/cacheview/templates/cacheview/global_summary.html26
-rw-r--r--rpki/gui/cacheview/templates/cacheview/query_result.html21
-rw-r--r--rpki/gui/cacheview/templates/cacheview/roa_detail.html18
-rw-r--r--rpki/gui/cacheview/templates/cacheview/search_form.html17
-rw-r--r--rpki/gui/cacheview/templates/cacheview/search_result.html42
-rw-r--r--rpki/gui/cacheview/templates/cacheview/signedobject_detail.html58
-rw-r--r--rpki/gui/cacheview/tests.py23
-rw-r--r--rpki/gui/cacheview/urls.py32
-rw-r--r--rpki/gui/cacheview/util.py441
-rw-r--r--rpki/gui/cacheview/views.py172
-rw-r--r--rpki/gui/default_settings.py188
-rw-r--r--rpki/gui/gui_rpki_cache/__init__.py0
-rw-r--r--rpki/gui/gui_rpki_cache/migrations/0001_initial.py136
-rw-r--r--rpki/gui/gui_rpki_cache/migrations/0002_auto_20160411_2311.py41
-rw-r--r--rpki/gui/gui_rpki_cache/migrations/0003_auto_20160420_2146.py24
-rw-r--r--rpki/gui/gui_rpki_cache/migrations/__init__.py0
-rw-r--r--rpki/gui/gui_rpki_cache/models.py (renamed from rpki/gui/cacheview/models.py)121
-rw-r--r--rpki/gui/gui_rpki_cache/util.py308
-rw-r--r--rpki/gui/models.py110
-rw-r--r--rpki/gui/routeview/api.py2
-rw-r--r--rpki/gui/routeview/models.py8
-rw-r--r--rpki/gui/routeview/util.py56
-rw-r--r--rpki/gui/script_util.py43
-rw-r--r--rpki/gui/urls.py3
-rw-r--r--rpki/http.py1058
-rw-r--r--rpki/http_simple.py138
-rw-r--r--rpki/ipaddrs.py151
-rw-r--r--rpki/irdb/__init__.py3
-rw-r--r--rpki/irdb/migrations/0001_initial.py362
-rw-r--r--rpki/irdb/migrations/__init__.py0
-rw-r--r--rpki/irdb/models.py854
-rw-r--r--rpki/irdb/router.py131
-rw-r--r--rpki/irdb/zookeeper.py2936
-rw-r--r--rpki/irdbd.py444
-rw-r--r--rpki/left_right.py1291
-rw-r--r--rpki/log.py266
-rw-r--r--rpki/myrpki.py4
-rw-r--r--rpki/mysql_import.py8
-rw-r--r--rpki/oids.py26
-rw-r--r--rpki/old_irdbd.py497
-rw-r--r--rpki/pubd.py350
-rw-r--r--rpki/pubdb/__init__.py0
-rw-r--r--rpki/pubdb/migrations/0001_initial.py69
-rw-r--r--rpki/pubdb/migrations/0002_auto_20160221_0617.py22
-rw-r--r--rpki/pubdb/migrations/0003_remove_delta_xml.py18
-rw-r--r--rpki/pubdb/migrations/__init__.py0
-rw-r--r--rpki/pubdb/models.py329
-rw-r--r--rpki/publication.py484
-rw-r--r--rpki/publication_control.py74
-rw-r--r--rpki/rcynic.py447
-rw-r--r--rpki/rcynicdb/__init__.py0
-rw-r--r--rpki/rcynicdb/iterator.py49
-rw-r--r--rpki/rcynicdb/migrations/0001_initial.py58
-rw-r--r--rpki/rcynicdb/migrations/0002_auto_20160227_2003.py29
-rw-r--r--rpki/rcynicdb/migrations/0003_auto_20160301_0333.py24
-rw-r--r--rpki/rcynicdb/migrations/__init__.py0
-rw-r--r--rpki/rcynicdb/models.py81
-rw-r--r--rpki/relaxng.py983
-rw-r--r--rpki/relaxng_parser.py32
-rw-r--r--rpki/resource_set.py1948
-rw-r--r--rpki/rootd.py757
-rw-r--r--rpki/rpkic.py1549
-rw-r--r--rpki/rpkid.py2990
-rw-r--r--rpki/rpkid_tasks.py1265
-rw-r--r--rpki/rpkidb/__init__.py3
-rw-r--r--rpki/rpkidb/migrations/0001_initial.py222
-rw-r--r--rpki/rpkidb/migrations/0002_root.py29
-rw-r--r--rpki/rpkidb/migrations/__init__.py0
-rw-r--r--rpki/rpkidb/models.py2466
-rwxr-xr-xrpki/rtr/bgpdump.py485
-rw-r--r--rpki/rtr/channels.py366
-rw-r--r--rpki/rtr/client.py816
-rw-r--r--rpki/rtr/generator.py959
-rw-r--r--rpki/rtr/main.py85
-rw-r--r--rpki/rtr/pdus.py990
-rw-r--r--rpki/rtr/server.py874
-rw-r--r--rpki/sql.py437
-rw-r--r--rpki/sql_schemas.py319
-rw-r--r--rpki/sundial.py459
-rw-r--r--rpki/up_down.py745
-rw-r--r--rpki/x509.py3494
-rw-r--r--rpki/xml_utils.py494
-rw-r--r--schemas/relaxng/left-right.rnc (renamed from schemas/relaxng/left-right-schema.rnc)182
-rw-r--r--schemas/relaxng/left-right.rng (renamed from schemas/relaxng/left-right-schema.rng)241
-rw-r--r--schemas/relaxng/myrpki.rng2
-rw-r--r--schemas/relaxng/oob-setup.rnc68
-rw-r--r--schemas/relaxng/oob-setup.rng168
-rw-r--r--schemas/relaxng/publication-control.rnc (renamed from schemas/relaxng/publication-schema.rnc)55
-rw-r--r--schemas/relaxng/publication-control.rng280
-rw-r--r--schemas/relaxng/publication-schema.rng577
-rw-r--r--schemas/relaxng/publication.rnc111
-rw-r--r--schemas/relaxng/publication.rng201
-rw-r--r--schemas/relaxng/router-certificate.rnc (renamed from schemas/relaxng/router-certificate-schema.rnc)0
-rw-r--r--schemas/relaxng/router-certificate.rng (renamed from schemas/relaxng/router-certificate-schema.rng)2
-rw-r--r--schemas/relaxng/rrdp.rnc81
-rw-r--r--schemas/relaxng/rrdp.rng150
-rw-r--r--schemas/relaxng/up-down.rnc (renamed from schemas/relaxng/up-down-schema.rnc)0
-rw-r--r--schemas/relaxng/up-down.rng (renamed from schemas/relaxng/up-down-schema.rng)2
-rw-r--r--schemas/sql/pubd.sql59
-rw-r--r--schemas/sql/rpkid.sql250
-rw-r--r--setup.py191
-rw-r--r--setup_extensions.py130
303 files changed, 36687 insertions, 33078 deletions
diff --git a/Makefile.in b/Makefile.in
index 8908ae32..ef929dd7 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -7,6 +7,7 @@ LIBS = @LIBS@
PYTHON = @PYTHON@
INSTALL = @INSTALL@ -m 555
TRANG = @TRANG@
+SUDO = @SUDO@
prefix = @prefix@
exec_prefix = @exec_prefix@
@@ -32,8 +33,8 @@ CA_TARGET = @CA_TARGET@
WSGI_DAEMON_PROCESS = @WSGI_DAEMON_PROCESS@
WSGI_PROCESS_GROUP = @WSGI_PROCESS_GROUP@
RCYNIC_DIR = @RCYNIC_DIR@
-RCYNIC_USER = @RCYNIC_USER@
-RCYNIC_GROUP = @RCYNIC_GROUP@
+RPKI_USER = @RPKI_USER@
+RPKI_GROUP = @RPKI_GROUP@
RCYNIC_HTML_DIR = @RCYNIC_HTML_DIR@
RRDTOOL = @RRDTOOL@
APACHE_VERSION = @APACHE_VERSION@
@@ -45,14 +46,14 @@ SETUP_PY_ROOT = `${PYTHON} -c 'import sys; print "--root " + sys.argv[1] if sys.
POW_SO = rpki/POW/_POW.so
-RNGS = schemas/relaxng/left-right-schema.rng \
- schemas/relaxng/up-down-schema.rng \
- schemas/relaxng/publication-schema.rng \
+RNGS = schemas/relaxng/left-right.rng \
+ schemas/relaxng/up-down.rng \
+ schemas/relaxng/publication.rng \
+ schemas/relaxng/publication-control.rng \
schemas/relaxng/myrpki.rng \
- schemas/relaxng/router-certificate-schema.rng
-
-SQLS = schemas/sql/rpkid.sql \
- schemas/sql/pubd.sql
+ schemas/relaxng/router-certificate.rng \
+ schemas/relaxng/rrdp.rng \
+ schemas/relaxng/oob-setup.rng
default: all
@@ -103,7 +104,6 @@ rpki-all: \
${abs_top_srcdir}/h/rpki/sk_manifest.h \
${abs_top_srcdir}/h/rpki/sk_roa.h \
${abs_top_srcdir}/rpki/relaxng.py \
- ${abs_top_srcdir}/rpki/sql_schemas.py \
${POW_SO} \
build/stamp
@@ -128,38 +128,39 @@ VERSION: .FORCE
${PYTHON} buildtools/make-version.py
rpki/autoconf.py: Makefile
- @echo 'Generating $@'; \
- (echo '# Automatically generated. DO NOT EDIT.'; \
- echo ; \
- echo 'bindir = "${bindir}"'; \
- echo 'datarootdir = "${datarootdir}"'; \
- echo 'localstatedir = "${localstatedir}"'; \
- echo 'sbindir = "${sbindir}"'; \
- echo 'sharedstatedir = "${sharedstatedir}"'; \
- echo 'sysconfdir = "${sysconfdir}"'; \
- echo 'libexecdir = "${libexecdir}"'; \
- echo ; \
- echo 'WSGI_DAEMON_PROCESS = "${WSGI_DAEMON_PROCESS}"'; \
- echo 'WSGI_PROCESS_GROUP = "${WSGI_PROCESS_GROUP}"'; \
- echo 'RCYNIC_DIR = "${RCYNIC_DIR}"'; \
- echo 'RCYNIC_USER = "${RCYNIC_USER}"'; \
- echo 'RCYNIC_GROUP = "${RCYNIC_GROUP}"'; \
- echo 'RCYNIC_HTML_DIR = "${RCYNIC_HTML_DIR}"'; \
- echo 'RRDTOOL = "${RRDTOOL}"'; \
- echo 'APACHE_VERSION = "${APACHE_VERSION}"'; \
- echo 'WSGI_PYTHON_EGG_CACHE_DIR = "${WSGI_PYTHON_EGG_CACHE_DIR}"'; \
- echo 'WSGI_PYTHON_EGG_CACHE_USER = "${WSGI_PYTHON_EGG_CACHE_USER}"'; \
+ @echo 'Generating $@'; \
+ (echo '# Automatically generated. DO NOT EDIT.'; \
+ echo ; \
+ echo 'bindir = "${bindir}"'; \
+ echo 'datarootdir = "${datarootdir}"'; \
+ echo 'localstatedir = "${localstatedir}"'; \
+ echo 'sbindir = "${sbindir}"'; \
+ echo 'sharedstatedir = "${sharedstatedir}"'; \
+ echo 'sysconfdir = "${sysconfdir}"'; \
+ echo 'libexecdir = "${libexecdir}"'; \
+ echo ; \
+ echo 'WSGI_DAEMON_PROCESS = "${WSGI_DAEMON_PROCESS}"'; \
+ echo 'WSGI_PROCESS_GROUP = "${WSGI_PROCESS_GROUP}"'; \
+ echo 'RCYNIC_DIR = "${RCYNIC_DIR}"'; \
+ echo 'RPKI_USER = "${RPKI_USER}"'; \
+ echo 'RPKI_GROUP = "${RPKI_GROUP}"'; \
+ echo 'RCYNIC_HTML_DIR = "${RCYNIC_HTML_DIR}"'; \
+ echo 'RRDTOOL = "${RRDTOOL}"'; \
+ echo 'APACHE_VERSION = "${APACHE_VERSION}"'; \
+ echo 'WSGI_PYTHON_EGG_CACHE_DIR = "${WSGI_PYTHON_EGG_CACHE_DIR}"'; \
+ echo 'WSGI_PYTHON_EGG_CACHE_USER = "${WSGI_PYTHON_EGG_CACHE_USER}"'; \
+ echo 'SUDO = "${SUDO}"'; \
) > $@
setup_autoconf.py: rpki/autoconf.py
- @echo 'Generating $@'; \
- (cat rpki/autoconf.py; \
- echo ; \
+ @echo 'Generating $@'; \
+ (cat rpki/autoconf.py; \
+ echo ; \
echo 'CFLAGS = """${CFLAGS}"""'; \
echo 'LDFLAGS = """${LDFLAGS}"""'; \
echo 'LIBS = """${LIBS}"""'; \
- echo 'RP_TARGET = """${RP_TARGET}"""'; \
- echo 'CA_TARGET = """${CA_TARGET}"""'; \
+ echo 'RP_TARGET = """${RP_TARGET}"""'; \
+ echo 'CA_TARGET = """${CA_TARGET}"""'; \
) > $@
.FORCE:
@@ -183,33 +184,43 @@ ${abs_top_srcdir}/rpki/relaxng.py: buildtools/make-relaxng.py ${RNGS}
cd schemas/relaxng; ${PYTHON} ${abs_top_srcdir}/buildtools/make-relaxng.py *.rng >$@.tmp
mv $@.tmp $@
-${abs_top_srcdir}/rpki/sql_schemas.py: buildtools/make-sql-schemas.py ${SQLS}
- cd schemas/sql; ${PYTHON} ${abs_top_srcdir}/buildtools/make-sql-schemas.py >$@.tmp
- mv $@.tmp $@
+schemas/relaxng/left-right.rng: schemas/relaxng/left-right.rnc
+ ${TRANG} schemas/relaxng/left-right.rnc schemas/relaxng/left-right.rng
-schemas/relaxng/left-right-schema.rng: schemas/relaxng/left-right-schema.rnc
- ${TRANG} schemas/relaxng/left-right-schema.rnc schemas/relaxng/left-right-schema.rng
+schemas/relaxng/up-down.rng: schemas/relaxng/up-down.rnc
+ ${TRANG} schemas/relaxng/up-down.rnc schemas/relaxng/up-down.rng
-schemas/relaxng/up-down-schema.rng: schemas/relaxng/up-down-schema.rnc
- ${TRANG} schemas/relaxng/up-down-schema.rnc schemas/relaxng/up-down-schema.rng
+schemas/relaxng/publication.rng: schemas/relaxng/publication.rnc
+ ${TRANG} schemas/relaxng/publication.rnc schemas/relaxng/publication.rng
-schemas/relaxng/publication-schema.rng: schemas/relaxng/publication-schema.rnc
- ${TRANG} schemas/relaxng/publication-schema.rnc schemas/relaxng/publication-schema.rng
+schemas/relaxng/publication-control.rng: schemas/relaxng/publication-control.rnc
+ ${TRANG} schemas/relaxng/publication-control.rnc schemas/relaxng/publication-control.rng
schemas/relaxng/myrpki.rng: schemas/relaxng/myrpki.rnc
${TRANG} schemas/relaxng/myrpki.rnc schemas/relaxng/myrpki.rng
-schemas/relaxng/router-certificate-schema.rng: schemas/relaxng/router-certificate-schema.rnc
- ${TRANG} schemas/relaxng/router-certificate-schema.rnc schemas/relaxng/router-certificate-schema.rng
+schemas/relaxng/router-certificate.rng: schemas/relaxng/router-certificate.rnc
+ ${TRANG} schemas/relaxng/router-certificate.rnc schemas/relaxng/router-certificate.rng
+
+schemas/relaxng/rrdp.rng: schemas/relaxng/rrdp.rnc
+ ${TRANG} schemas/relaxng/rrdp.rnc schemas/relaxng/rrdp.rng
+
+schemas/relaxng/oob-setup.rng: schemas/relaxng/oob-setup.rnc
+ ${TRANG} schemas/relaxng/oob-setup.rnc schemas/relaxng/oob-setup.rng
# Eg: PYLINT_FLAGS='--disable=W0311'
-lint:
- { find rpki rp ca -name '*.py' -print; find rp ca -type f -perm -1 -print | xargs grep -El '^#!.+python'; } | \
- sort -u | xargs pylint --rcfile ${abs_top_srcdir}/buildtools/pylint.rc ${PYLINT_FLAGS}
+lint: .FORCE
+ pylint --rcfile ${abs_top_srcdir}/buildtools/pylint.rc ${PYLINT_FLAGS} rpki `find rp ca -type f -perm -1 ! -name '*~' -print | xargs grep -El '^#!.+python'`
-tags: Makefile
- find rpki rp ca schemas -type f \
- \( -name '*.[ch]' -o -name '*.py' -o -name '*.sql' -o -name '*.rnc' \) \
- ! -name relaxng.py ! -name sql_schemas.py -print | \
- etags -
+tags: Makefile .FORCE
+ { find rpki rp ca schemas -type f \( -name '*.[ch]' -o -name '*.py' -o -name '*.sql' -o -name '*.rnc' \) ! -name relaxng.py -print; find rp ca -type f -perm -1 ! -name '*~' -print | xargs grep -El '^#!.+python'; } | etags -
+
+makemigrations:
+ PYTHONPATH=. RPKI_CONF=makemigrations.conf.$$$$ TEMP_DB=makemigrations.db.$$$$; export PYTHONPATH RPKI_CONF TEMP_DB; trap "rm -f $$RPKI_CONF $$TEMP_DB" 0; \
+ ${PYTHON} rp/config/rpki-confgen --read-xml rp/config/rpki-confgen.xml --autoconf --set myrpki::shared_sql_engine=sqlite3 \
+ --set myrpki::rpkid_sql_database=$$TEMP_DB --set myrpki::irdbd_sql_database=$$TEMP_DB --set myrpki::pubd_sql_database=$$TEMP_DB \
+ --pwgen myrpki::shared_sql_password --pwgen web_portal::secret-key --write-conf $$RPKI_CONF; \
+ for i in rpkid pubd irdb rcynic; do django-admin makemigrations --settings rpki.django_settings.$$i; done
+
+.FORCE:
diff --git a/buildtools/build-debian-packages.py b/buildtools/build-debian-packages.py
index 0a326da8..ee2cfdd2 100644
--- a/buildtools/build-debian-packages.py
+++ b/buildtools/build-debian-packages.py
@@ -2,11 +2,11 @@
#
# Copyright (C) 2014 Dragon Research Labs ("DRL")
# Portions copyright (C) 2013 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notices and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ISC DISCLAIM ALL
# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
@@ -39,7 +39,19 @@ parser.add_argument("-s", "--version-suffix", nargs = "?", const = platform.linu
help = "suffix to add to version string")
args = parser.parse_args()
-version = "0." + subprocess.check_output(("svnversion", "-c")).strip().split(":")[-1]
+if os.path.exists(".svn"):
+ version = "0.{rev}".format(
+ rev = subprocess.check_output(("svnversion", "-c")).strip().split(":")[-1])
+elif os.path.exists(".git/svn"):
+ git_svn_log = subprocess.check_output(("git", "svn", "log", "--show-commit", "--oneline", "--limit=1")).split()
+ version = "0.{rev}.{count}.{time}.{commit}".format(
+ rev = git_svn_log[0][1:],
+ count = subprocess.check_output(("git", "rev-list", "--count", git_svn_log[2] + "..HEAD")).strip(),
+ time = subprocess.check_output(("git", "show", "--no-patch", "--format=%ct", "HEAD")).strip(),
+ commit = subprocess.check_output(("git", "rev-parse", "HEAD")).strip())
+ del git_svn_log
+else:
+ sys.exit("Sorry, don't know how to extract version number from this source tree")
if os.path.exists("debian"):
shutil.rmtree("debian")
diff --git a/buildtools/build-freebsd-ports.py b/buildtools/build-freebsd-ports.py
index fc35c94b..bf0b2c47 100644
--- a/buildtools/build-freebsd-ports.py
+++ b/buildtools/build-freebsd-ports.py
@@ -2,11 +2,11 @@
#
# Copyright (C) 2014 Dragon Research Labs ("DRL")
# Portions copyright (C) 2012-2013 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notices and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ISC DISCLAIM ALL
# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
@@ -23,124 +23,134 @@ This is a script because we need to generate package lists and update
version numbers in the Makefiles.
"""
-import sys
import os
import re
-import subprocess
-import errno
+import sys
import glob
+import errno
import shutil
import argparse
+import subprocess
def check_dir(s):
- if not os.path.isdir(s):
- raise argparse.ArgumentTypeError("%r is not a directory" % s)
- return s
-
-parser = argparse.ArgumentParser(description = __doc__)
-parser.add_argument("--allow-dirty", action = "store_true",
- help = "don't insist on pristine subversion checkout")
+ if not os.path.isdir(s):
+ raise argparse.ArgumentTypeError("%r is not a directory" % s)
+ return s
+
+parser = argparse.ArgumentParser(description = __doc__,
+ formatter_class = argparse.ArgumentDefaultsHelpFormatter)
+parser.add_argument("--local-dist", action = "store_true",
+ help = "generate local distribution from subversion working tree (implies --make-package)")
parser.add_argument("--make-package", action = "store_true",
help = "build binary package")
parser.add_argument("--no-clean", action = "store_true",
help = "don't clean port after staging etc (implies --no-tarball)")
parser.add_argument("--no-tarball", action = "store_true",
help = "don't create tarball of generated port")
+parser.add_argument("--portsdir", type = os.path.abspath,
+ default = os.path.abspath("freebsd-ports"),
+ help = "where to build FreeBSD port trees")
parser.add_argument("svndir", metavar = "subversion-working-directory", type = check_dir,
help = "directory containing subversion working tree")
args = parser.parse_args()
svnversion = subprocess.check_output(("svnversion", "-c", args.svndir)).strip().split(":")[-1]
-if args.allow_dirty:
- svnversion = svnversion.translate(None, "M")
+if args.local_dist:
+ svnversion = svnversion.translate(None, "M")
if not svnversion.isdigit():
- sys.exit("Sources don't look pristine, not building (%r)" % svnversion)
+ sys.exit("Sources don't look pristine, not building (%r)" % svnversion)
branch = os.path.basename(args.svndir.rstrip(os.path.sep))
if branch != "trunk" and (branch[:2] != "tk" or not branch[2:].isdigit()):
- sys.exit("Could not parse branch from working directory name, not building (%r)" % branch)
+ sys.exit("Could not parse branch from working directory name, not building (%r)" % branch)
version = "0." + svnversion
tarname = "rpki-%s-r%s" % (branch, svnversion)
tarball = tarname + ".tar.xz"
-url = "http://download.rpki.net/" + tarball
-
-portsdir = os.path.abspath("freebsd-ports")
-portsdir_old = portsdir + ".old"
-# Could perhaps use distutils.sysconfig.get_python_lib() instead of
-# this regexp hack, but would be just as complicated in its own way,
-# so just go with this for the moment.
-
-py_lib = re.compile(r"^lib/python\d+\.\d+")
-py_sitelib = re.compile(r"^lib/python\d+\.\d+/site-packages")
+portsdir_old = args.portsdir + ".old"
if os.path.isdir(portsdir_old):
- shutil.rmtree(portsdir_old)
-
-if os.path.isdir(portsdir):
- os.rename(portsdir, portsdir_old)
-
-shutil.copytree(os.path.join(args.svndir, "buildtools", "freebsd-skeleton"), portsdir)
-
-if os.path.exists(os.path.join(portsdir_old, tarball)):
- os.link(os.path.join(portsdir_old, tarball), os.path.join(portsdir, tarball))
+ shutil.rmtree(portsdir_old)
+
+if os.path.isdir(args.portsdir):
+ os.rename(args.portsdir, portsdir_old)
+
+shutil.copytree(os.path.join(args.svndir, "buildtools", "freebsd-skeleton"), args.portsdir)
+
+if args.local_dist:
+ subprocess.check_call(("svn", "export", args.svndir, os.path.join(args.portsdir, tarname)))
+ for fn, fmt in (("VERSION", "%s\n"), ("rpki/version.py", "VERSION = \"%s\"\n")):
+ with open(os.path.join(args.portsdir, tarname, fn), "w") as f:
+ f.write(fmt % version)
+ subprocess.check_call(("tar", "cJvvf", tarball, tarname), cwd = args.portsdir)
+ shutil.rmtree(os.path.join(args.portsdir, tarname))
+elif os.path.exists(os.path.join(portsdir_old, tarball)):
+ os.link(os.path.join(portsdir_old, tarball), os.path.join(args.portsdir, tarball))
elif os.path.exists(os.path.join("/usr/ports/distfiles", tarball)):
- shutil.copy(os.path.join("/usr/ports/distfiles", tarball), os.path.join(portsdir, tarball))
+ shutil.copy(os.path.join("/usr/ports/distfiles", tarball), os.path.join(args.portsdir, tarball))
if os.path.isdir(portsdir_old):
- shutil.rmtree(portsdir_old)
+ shutil.rmtree(portsdir_old)
+
+if args.make_package or args.local_dist:
+ pkgdir = os.path.join(args.portsdir, "packages")
+ os.mkdir(pkgdir)
+
+py_lib = re.compile(r"^lib/python\d+\.\d+")
+py_sitelib = re.compile(r"^lib/python\d+\.\d+/site-packages")
-if args.make_package:
- pkgdir = os.path.join(portsdir, "packages")
- os.mkdir(pkgdir)
+if args.local_dist:
+ master_site = "file://" + args.portsdir + "/"
+else:
+ master_site = "http://download.rpki.net/"
-formatdict = dict(SVNVERSION = svnversion, SVNBRANCH = branch)
+formatdict = dict(SVNVERSION = svnversion, SVNBRANCH = branch, MASTER_SITE = master_site)
keepdirs = ("usr", "etc", "bin", "var", "lib", "libexec", "sbin", "share", "etc/rc.d", "%%PYTHON_SITELIBDIR%%")
for port in ("rpki-rp", "rpki-ca"):
- base = os.path.join(portsdir, port)
- stage = os.path.join(base, "work", "stage")
- fn = os.path.join(portsdir, port, "Makefile")
- with open(fn, "r") as f:
- template = f.read()
- with open(fn, "w") as f:
- f.write(template % formatdict)
-
- subprocess.check_call(("make", "makesum", "stage", "DISTDIR=" + portsdir, "NO_DEPENDS=yes"),
- cwd = base)
-
- with open(os.path.join(base, "pkg-plist"), "w") as f:
- usr_local = None
- for dirpath, dirnames, filenames in os.walk(stage, topdown = False):
- dn = dirpath[len(stage)+1:]
- if dn.startswith("usr/local"):
- if not usr_local and usr_local is not None:
- f.write("@cwd\n")
- usr_local = True
- dn = dn[len("usr/local/"):]
- dn = py_sitelib.sub("%%PYTHON_SITELIBDIR%%", dn)
- if dn == "etc/rc.d":
- continue
- else:
- if usr_local:
- f.write("@cwd /\n")
- usr_local = False
- for fn in filenames:
- f.write(os.path.join(dn, fn) + "\n")
- if dn and dn not in keepdirs and not py_lib.match(dn):
- f.write("@dirrm %s\n" % dn)
-
- if args.make_package:
- subprocess.check_call(("make", "clean", "package", "PKGREPOSITORY=" + pkgdir), cwd = base)
-
- if not args.no_clean:
- subprocess.check_call(("make", "clean"), cwd = base)
-
- if not args.no_tarball and not args.no_clean:
- subprocess.check_call(("tar", "czf", "%s-port.tgz" % port, port), cwd = portsdir)
+ base = os.path.join(args.portsdir, port)
+ stage = os.path.join(base, "work", "stage")
+ fn = os.path.join(args.portsdir, port, "Makefile")
+ with open(fn, "r") as f:
+ template = f.read()
+ with open(fn, "w") as f:
+ f.write(template % formatdict)
+
+ subprocess.check_call(("make", "makesum", "stage", "DISTDIR=" + args.portsdir, "NO_DEPENDS=yes"),
+ cwd = base)
+
+ with open(os.path.join(base, "pkg-plist"), "w") as f:
+ usr_local = None
+ for dirpath, dirnames, filenames in os.walk(stage, topdown = False):
+ dn = dirpath[len(stage)+1:]
+ if dn.startswith("usr/local"):
+ if not usr_local and usr_local is not None:
+ f.write("@cwd\n")
+ usr_local = True
+ dn = dn[len("usr/local/"):]
+ dn = py_sitelib.sub("%%PYTHON_SITELIBDIR%%", dn)
+ if dn == "etc/rc.d":
+ continue
+ else:
+ if usr_local:
+ f.write("@cwd /\n")
+ usr_local = False
+ for fn in filenames:
+ f.write(os.path.join(dn, fn) + "\n")
+ if dn and dn not in keepdirs and not py_lib.match(dn):
+ f.write("@dirrm %s\n" % dn)
+
+ if args.make_package or args.local_dist:
+ subprocess.check_call(("make", "clean", "package", "DISTDIR=" + args.portsdir, "PKGREPOSITORY=" + pkgdir), cwd = base)
+
+ if not args.no_clean:
+ subprocess.check_call(("make", "clean"), cwd = base)
+
+ if not args.no_tarball and not args.no_clean:
+ subprocess.check_call(("tar", "czf", "%s-port.tgz" % port, port), cwd = args.portsdir)
diff --git a/buildtools/build-ubuntu-ports.py b/buildtools/build-ubuntu-ports.py
deleted file mode 120000
index 26a3dba7..00000000
--- a/buildtools/build-ubuntu-ports.py
+++ /dev/null
@@ -1 +0,0 @@
-build-debian-packages.py \ No newline at end of file
diff --git a/buildtools/config.guess b/buildtools/config.guess
index 0e30d56e..16592509 100755
--- a/buildtools/config.guess
+++ b/buildtools/config.guess
@@ -1,13 +1,12 @@
#! /bin/sh
# Attempt to guess a canonical system name.
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-# 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+# Copyright 1992-2015 Free Software Foundation, Inc.
-timestamp='2003-07-02'
+timestamp='2015-08-20'
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
+# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
@@ -16,24 +15,22 @@ timestamp='2003-07-02'
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that program.
-
-# Originally written by Per Bothner <per@bothner.com>.
-# Please send patches to <config-patches@gnu.org>. Submit a context
-# diff and a properly formatted ChangeLog entry.
+# the same distribution terms that you use for the rest of that
+# program. This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
+#
+# Originally written by Per Bothner; maintained since 2000 by Ben Elliston.
#
-# This script attempts to guess a canonical system name similar to
-# config.sub. If it succeeds, it prints the system name on stdout, and
-# exits with 0. Otherwise, it exits with 1.
+# You can get the latest version of this script from:
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
#
-# The plan is that this can be called by configure scripts if you
-# don't specify an explicit build system type.
+# Please send patches to <config-patches@gnu.org>.
+
me=`echo "$0" | sed -e 's,.*/,,'`
@@ -53,8 +50,7 @@ version="\
GNU config.guess ($timestamp)
Originally written by Per Bothner.
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001
-Free Software Foundation, Inc.
+Copyright 1992-2015 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -66,11 +62,11 @@ Try \`$me --help' for more information."
while test $# -gt 0 ; do
case $1 in
--time-stamp | --time* | -t )
- echo "$timestamp" ; exit 0 ;;
+ echo "$timestamp" ; exit ;;
--version | -v )
- echo "$version" ; exit 0 ;;
+ echo "$version" ; exit ;;
--help | --h* | -h )
- echo "$usage"; exit 0 ;;
+ echo "$usage"; exit ;;
-- ) # Stop option processing
shift; break ;;
- ) # Use stdin as input.
@@ -104,7 +100,7 @@ set_cc_for_build='
trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ;
trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ;
: ${TMPDIR=/tmp} ;
- { tmp=`(umask 077 && mktemp -d -q "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
+ { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
{ test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } ||
{ tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } ||
{ echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ;
@@ -123,7 +119,7 @@ case $CC_FOR_BUILD,$HOST_CC,$CC in
;;
,,*) CC_FOR_BUILD=$CC ;;
,*,*) CC_FOR_BUILD=$HOST_CC ;;
-esac ;'
+esac ; set_cc_for_build= ;'
# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
# (ghazi@noc.rutgers.edu 1994-08-24)
@@ -136,12 +132,33 @@ UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown
UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
+case "${UNAME_SYSTEM}" in
+Linux|GNU|GNU/*)
+ # If the system lacks a compiler, then just pick glibc.
+ # We could probably try harder.
+ LIBC=gnu
+
+ eval $set_cc_for_build
+ cat <<-EOF > $dummy.c
+ #include <features.h>
+ #if defined(__UCLIBC__)
+ LIBC=uclibc
+ #elif defined(__dietlibc__)
+ LIBC=dietlibc
+ #else
+ LIBC=gnu
+ #endif
+ EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'`
+ ;;
+esac
+
# Note: order is significant - the case branches are not exclusive.
case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
*:NetBSD:*:*)
# NetBSD (nbsd) targets should (where applicable) match one or
- # more of the tupples: *-*-netbsdelf*, *-*-netbsdaout*,
+ # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*,
# *-*-netbsdecoff* and *-*-netbsd*. For targets that recently
# switched to ELF, *-*-netbsd* would select the old
# object file format. This provides both forward
@@ -151,22 +168,30 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
# Note: NetBSD doesn't particularly care about the vendor
# portion of the name. We always set it to "unknown".
sysctl="sysctl -n hw.machine_arch"
- UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
- /usr/sbin/$sysctl 2>/dev/null || echo unknown)`
+ UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \
+ /sbin/$sysctl 2>/dev/null || \
+ /usr/sbin/$sysctl 2>/dev/null || \
+ echo unknown)`
case "${UNAME_MACHINE_ARCH}" in
armeb) machine=armeb-unknown ;;
arm*) machine=arm-unknown ;;
sh3el) machine=shl-unknown ;;
sh3eb) machine=sh-unknown ;;
+ sh5el) machine=sh5le-unknown ;;
+ earmv*)
+ arch=`echo ${UNAME_MACHINE_ARCH} | sed -e 's,^e\(armv[0-9]\).*$,\1,'`
+ endian=`echo ${UNAME_MACHINE_ARCH} | sed -ne 's,^.*\(eb\)$,\1,p'`
+ machine=${arch}${endian}-unknown
+ ;;
*) machine=${UNAME_MACHINE_ARCH}-unknown ;;
esac
# The Operating System including object format, if it has switched
# to ELF recently, or will in the future.
case "${UNAME_MACHINE_ARCH}" in
- arm*|i386|m68k|ns32k|sh3*|sparc|vax)
+ arm*|earm*|i386|m68k|ns32k|sh3*|sparc|vax)
eval $set_cc_for_build
if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
- | grep __ELF__ >/dev/null
+ | grep -q __ELF__
then
# Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
# Return netbsd for either. FIX?
@@ -176,7 +201,14 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
fi
;;
*)
- os=netbsd
+ os=netbsd
+ ;;
+ esac
+ # Determine ABI tags.
+ case "${UNAME_MACHINE_ARCH}" in
+ earm*)
+ expr='s/^earmv[0-9]/-eabi/;s/eb$//'
+ abi=`echo ${UNAME_MACHINE_ARCH} | sed -e "$expr"`
;;
esac
# The OS release
@@ -189,57 +221,46 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
release='-gnu'
;;
*)
- release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
+ release=`echo ${UNAME_RELEASE} | sed -e 's/[-_].*//' | cut -d. -f1,2`
;;
esac
# Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
# contains redundant information, the shorter form:
# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
- echo "${machine}-${os}${release}"
- exit 0 ;;
- amiga:OpenBSD:*:*)
- echo m68k-unknown-openbsd${UNAME_RELEASE}
- exit 0 ;;
- arc:OpenBSD:*:*)
- echo mipsel-unknown-openbsd${UNAME_RELEASE}
- exit 0 ;;
- hp300:OpenBSD:*:*)
- echo m68k-unknown-openbsd${UNAME_RELEASE}
- exit 0 ;;
- mac68k:OpenBSD:*:*)
- echo m68k-unknown-openbsd${UNAME_RELEASE}
- exit 0 ;;
- macppc:OpenBSD:*:*)
- echo powerpc-unknown-openbsd${UNAME_RELEASE}
- exit 0 ;;
- mvme68k:OpenBSD:*:*)
- echo m68k-unknown-openbsd${UNAME_RELEASE}
- exit 0 ;;
- mvme88k:OpenBSD:*:*)
- echo m88k-unknown-openbsd${UNAME_RELEASE}
- exit 0 ;;
- mvmeppc:OpenBSD:*:*)
- echo powerpc-unknown-openbsd${UNAME_RELEASE}
- exit 0 ;;
- pmax:OpenBSD:*:*)
- echo mipsel-unknown-openbsd${UNAME_RELEASE}
- exit 0 ;;
- sgi:OpenBSD:*:*)
- echo mipseb-unknown-openbsd${UNAME_RELEASE}
- exit 0 ;;
- sun3:OpenBSD:*:*)
- echo m68k-unknown-openbsd${UNAME_RELEASE}
- exit 0 ;;
- wgrisc:OpenBSD:*:*)
- echo mipsel-unknown-openbsd${UNAME_RELEASE}
- exit 0 ;;
+ echo "${machine}-${os}${release}${abi}"
+ exit ;;
+ *:Bitrig:*:*)
+ UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'`
+ echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE}
+ exit ;;
*:OpenBSD:*:*)
- echo ${UNAME_MACHINE}-unknown-openbsd${UNAME_RELEASE}
- exit 0 ;;
+ UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
+ echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
+ exit ;;
+ *:ekkoBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
+ exit ;;
+ *:SolidBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE}
+ exit ;;
+ macppc:MirBSD:*:*)
+ echo powerpc-unknown-mirbsd${UNAME_RELEASE}
+ exit ;;
+ *:MirBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
+ exit ;;
+ *:Sortix:*:*)
+ echo ${UNAME_MACHINE}-unknown-sortix
+ exit ;;
alpha:OSF1:*:*)
- if test $UNAME_RELEASE = "V4.0"; then
+ case $UNAME_RELEASE in
+ *4.0)
UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
- fi
+ ;;
+ *5.*)
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
+ ;;
+ esac
# According to Compaq, /usr/sbin/psrinfo has been available on
# OSF/1 and Tru64 systems produced since 1995. I hope that
# covers most systems running today. This code pipes the CPU
@@ -277,42 +298,52 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
"EV7.9 (21364A)")
UNAME_MACHINE="alphaev79" ;;
esac
+ # A Pn.n version is a patched version.
# A Vn.n version is a released version.
# A Tn.n version is a released field test version.
# A Xn.n version is an unreleased experimental baselevel.
# 1.2 uses "1.2" for uname -r.
- echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[VTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
- exit 0 ;;
- Alpha*:OpenVMS:*:*)
- echo alpha-hp-vms
- exit 0 ;;
+ echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+ # Reset EXIT trap before exiting to avoid spurious non-zero exit code.
+ exitcode=$?
+ trap '' 0
+ exit $exitcode ;;
Alpha\ *:Windows_NT*:*)
# How do we know it's Interix rather than the generic POSIX subsystem?
# Should we change UNAME_MACHINE based on the output of uname instead
# of the specific Alpha model?
echo alpha-pc-interix
- exit 0 ;;
+ exit ;;
21064:Windows_NT:50:3)
echo alpha-dec-winnt3.5
- exit 0 ;;
+ exit ;;
Amiga*:UNIX_System_V:4.0:*)
echo m68k-unknown-sysv4
- exit 0;;
+ exit ;;
*:[Aa]miga[Oo][Ss]:*:*)
echo ${UNAME_MACHINE}-unknown-amigaos
- exit 0 ;;
+ exit ;;
*:[Mm]orph[Oo][Ss]:*:*)
echo ${UNAME_MACHINE}-unknown-morphos
- exit 0 ;;
+ exit ;;
*:OS/390:*:*)
echo i370-ibm-openedition
- exit 0 ;;
+ exit ;;
+ *:z/VM:*:*)
+ echo s390-ibm-zvmoe
+ exit ;;
+ *:OS400:*:*)
+ echo powerpc-ibm-os400
+ exit ;;
arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
echo arm-acorn-riscix${UNAME_RELEASE}
- exit 0;;
+ exit ;;
+ arm*:riscos:*:*|arm*:RISCOS:*:*)
+ echo arm-unknown-riscos
+ exit ;;
SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
echo hppa1.1-hitachi-hiuxmpp
- exit 0;;
+ exit ;;
Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
# akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
if test "`(/bin/universe) 2>/dev/null`" = att ; then
@@ -320,32 +351,51 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
else
echo pyramid-pyramid-bsd
fi
- exit 0 ;;
+ exit ;;
NILE*:*:*:dcosx)
echo pyramid-pyramid-svr4
- exit 0 ;;
+ exit ;;
DRS?6000:unix:4.0:6*)
echo sparc-icl-nx6
- exit 0 ;;
- DRS?6000:UNIX_SV:4.2*:7*)
+ exit ;;
+ DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
case `/usr/bin/uname -p` in
- sparc) echo sparc-icl-nx7 && exit 0 ;;
+ sparc) echo sparc-icl-nx7; exit ;;
esac ;;
+ s390x:SunOS:*:*)
+ echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
sun4H:SunOS:5.*:*)
echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit 0 ;;
+ exit ;;
sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit 0 ;;
- i86pc:SunOS:5.*:*)
- echo i386-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit 0 ;;
+ exit ;;
+ i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*)
+ echo i386-pc-auroraux${UNAME_RELEASE}
+ exit ;;
+ i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
+ eval $set_cc_for_build
+ SUN_ARCH="i386"
+ # If there is a compiler, see if it is configured for 64-bit objects.
+ # Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
+ # This test works for both compilers.
+ if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+ if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ SUN_ARCH="x86_64"
+ fi
+ fi
+ echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
sun4*:SunOS:6*:*)
# According to config.sub, this is the proper way to canonicalize
# SunOS6. Hard to guess exactly what SunOS6 will be like, but
# it's likely to be more like Solaris than SunOS4.
echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit 0 ;;
+ exit ;;
sun4*:SunOS:*:*)
case "`/usr/bin/arch -k`" in
Series*|S4*)
@@ -354,10 +404,10 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
esac
# Japanese Language versions have a version number like `4.1.3-JL'.
echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
- exit 0 ;;
+ exit ;;
sun3*:SunOS:*:*)
echo m68k-sun-sunos${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
sun*:*:4.2BSD:*)
UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
@@ -369,10 +419,10 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
echo sparc-sun-sunos${UNAME_RELEASE}
;;
esac
- exit 0 ;;
+ exit ;;
aushp:SunOS:*:*)
echo sparc-auspex-sunos${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
# The situation for MiNT is a little confusing. The machine name
# can be virtually everything (everything which is not
# "atarist" or "atariste" at least should have a processor
@@ -382,38 +432,41 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
# MiNT. But MiNT is downward compatible to TOS, so this should
# be no problem.
atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
- echo m68k-atari-mint${UNAME_RELEASE}
- exit 0 ;;
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
echo m68k-atari-mint${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
*falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
- echo m68k-atari-mint${UNAME_RELEASE}
- exit 0 ;;
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
- echo m68k-milan-mint${UNAME_RELEASE}
- exit 0 ;;
+ echo m68k-milan-mint${UNAME_RELEASE}
+ exit ;;
hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
- echo m68k-hades-mint${UNAME_RELEASE}
- exit 0 ;;
+ echo m68k-hades-mint${UNAME_RELEASE}
+ exit ;;
*:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
- echo m68k-unknown-mint${UNAME_RELEASE}
- exit 0 ;;
+ echo m68k-unknown-mint${UNAME_RELEASE}
+ exit ;;
+ m68k:machten:*:*)
+ echo m68k-apple-machten${UNAME_RELEASE}
+ exit ;;
powerpc:machten:*:*)
echo powerpc-apple-machten${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
RISC*:Mach:*:*)
echo mips-dec-mach_bsd4.3
- exit 0 ;;
+ exit ;;
RISC*:ULTRIX:*:*)
echo mips-dec-ultrix${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
VAX*:ULTRIX*:*:*)
echo vax-dec-ultrix${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
2020:CLIX:*:* | 2430:CLIX:*:*)
echo clipper-intergraph-clix${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
mips:*:*:UMIPS | mips:*:*:RISCos)
eval $set_cc_for_build
sed 's/^ //' << EOF >$dummy.c
@@ -437,35 +490,36 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
exit (-1);
}
EOF
- $CC_FOR_BUILD -o $dummy $dummy.c \
- && $dummy `echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` \
- && exit 0
+ $CC_FOR_BUILD -o $dummy $dummy.c &&
+ dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` &&
+ SYSTEM_NAME=`$dummy $dummyarg` &&
+ { echo "$SYSTEM_NAME"; exit; }
echo mips-mips-riscos${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
Motorola:PowerMAX_OS:*:*)
echo powerpc-motorola-powermax
- exit 0 ;;
+ exit ;;
Motorola:*:4.3:PL8-*)
echo powerpc-harris-powermax
- exit 0 ;;
+ exit ;;
Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*)
echo powerpc-harris-powermax
- exit 0 ;;
+ exit ;;
Night_Hawk:Power_UNIX:*:*)
echo powerpc-harris-powerunix
- exit 0 ;;
+ exit ;;
m88k:CX/UX:7*:*)
echo m88k-harris-cxux7
- exit 0 ;;
+ exit ;;
m88k:*:4*:R4*)
echo m88k-motorola-sysv4
- exit 0 ;;
+ exit ;;
m88k:*:3*:R3*)
echo m88k-motorola-sysv3
- exit 0 ;;
+ exit ;;
AViiON:dgux:*:*)
- # DG/UX returns AViiON for all architectures
- UNAME_PROCESSOR=`/usr/bin/uname -p`
+ # DG/UX returns AViiON for all architectures
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
then
if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
@@ -478,29 +532,29 @@ EOF
else
echo i586-dg-dgux${UNAME_RELEASE}
fi
- exit 0 ;;
+ exit ;;
M88*:DolphinOS:*:*) # DolphinOS (SVR3)
echo m88k-dolphin-sysv3
- exit 0 ;;
+ exit ;;
M88*:*:R3*:*)
# Delta 88k system running SVR3
echo m88k-motorola-sysv3
- exit 0 ;;
+ exit ;;
XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
echo m88k-tektronix-sysv3
- exit 0 ;;
+ exit ;;
Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
echo m68k-tektronix-bsd
- exit 0 ;;
+ exit ;;
*:IRIX*:*:*)
echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
- exit 0 ;;
+ exit ;;
????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
- echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
- exit 0 ;; # Note that: echo "'`uname -s`'" gives 'AIX '
+ echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
+ exit ;; # Note that: echo "'`uname -s`'" gives 'AIX '
i*86:AIX:*:*)
echo i386-ibm-aix
- exit 0 ;;
+ exit ;;
ia64:AIX:*:*)
if [ -x /usr/bin/oslevel ] ; then
IBM_REV=`/usr/bin/oslevel`
@@ -508,7 +562,7 @@ EOF
IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
fi
echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
- exit 0 ;;
+ exit ;;
*:AIX:2:3)
if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
eval $set_cc_for_build
@@ -523,49 +577,54 @@ EOF
exit(0);
}
EOF
- $CC_FOR_BUILD -o $dummy $dummy.c && $dummy && exit 0
- echo rs6000-ibm-aix3.2.5
+ if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy`
+ then
+ echo "$SYSTEM_NAME"
+ else
+ echo rs6000-ibm-aix3.2.5
+ fi
elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
echo rs6000-ibm-aix3.2.4
else
echo rs6000-ibm-aix3.2
fi
- exit 0 ;;
- *:AIX:*:[45])
+ exit ;;
+ *:AIX:*:[4567])
IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
IBM_ARCH=rs6000
else
IBM_ARCH=powerpc
fi
- if [ -x /usr/bin/oslevel ] ; then
- IBM_REV=`/usr/bin/oslevel`
+ if [ -x /usr/bin/lslpp ] ; then
+ IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc |
+ awk -F: '{ print $3 }' | sed s/[0-9]*$/0/`
else
IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
fi
echo ${IBM_ARCH}-ibm-aix${IBM_REV}
- exit 0 ;;
+ exit ;;
*:AIX:*:*)
echo rs6000-ibm-aix
- exit 0 ;;
+ exit ;;
ibmrt:4.4BSD:*|romp-ibm:BSD:*)
echo romp-ibm-bsd4.4
- exit 0 ;;
+ exit ;;
ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and
echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to
- exit 0 ;; # report: romp-ibm BSD 4.3
+ exit ;; # report: romp-ibm BSD 4.3
*:BOSX:*:*)
echo rs6000-bull-bosx
- exit 0 ;;
+ exit ;;
DPX/2?00:B.O.S.:*:*)
echo m68k-bull-sysv3
- exit 0 ;;
+ exit ;;
9000/[34]??:4.3bsd:1.*:*)
echo m68k-hp-bsd
- exit 0 ;;
+ exit ;;
hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
echo m68k-hp-bsd4.4
- exit 0 ;;
+ exit ;;
9000/[34678]??:HP-UX:*:*)
HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
case "${UNAME_MACHINE}" in
@@ -574,52 +633,52 @@ EOF
9000/[678][0-9][0-9])
if [ -x /usr/bin/getconf ]; then
sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
- sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
- case "${sc_cpu_version}" in
- 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
- 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
- 532) # CPU_PA_RISC2_0
- case "${sc_kernel_bits}" in
- 32) HP_ARCH="hppa2.0n" ;;
- 64) HP_ARCH="hppa2.0w" ;;
+ sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
+ case "${sc_cpu_version}" in
+ 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
+ 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
+ 532) # CPU_PA_RISC2_0
+ case "${sc_kernel_bits}" in
+ 32) HP_ARCH="hppa2.0n" ;;
+ 64) HP_ARCH="hppa2.0w" ;;
'') HP_ARCH="hppa2.0" ;; # HP-UX 10.20
- esac ;;
- esac
+ esac ;;
+ esac
fi
if [ "${HP_ARCH}" = "" ]; then
eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
+ sed 's/^ //' << EOF >$dummy.c
- #define _HPUX_SOURCE
- #include <stdlib.h>
- #include <unistd.h>
+ #define _HPUX_SOURCE
+ #include <stdlib.h>
+ #include <unistd.h>
- int main ()
- {
- #if defined(_SC_KERNEL_BITS)
- long bits = sysconf(_SC_KERNEL_BITS);
- #endif
- long cpu = sysconf (_SC_CPU_VERSION);
+ int main ()
+ {
+ #if defined(_SC_KERNEL_BITS)
+ long bits = sysconf(_SC_KERNEL_BITS);
+ #endif
+ long cpu = sysconf (_SC_CPU_VERSION);
- switch (cpu)
- {
- case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
- case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
- case CPU_PA_RISC2_0:
- #if defined(_SC_KERNEL_BITS)
- switch (bits)
- {
- case 64: puts ("hppa2.0w"); break;
- case 32: puts ("hppa2.0n"); break;
- default: puts ("hppa2.0"); break;
- } break;
- #else /* !defined(_SC_KERNEL_BITS) */
- puts ("hppa2.0"); break;
- #endif
- default: puts ("hppa1.0"); break;
- }
- exit (0);
- }
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
+ case CPU_PA_RISC2_0:
+ #if defined(_SC_KERNEL_BITS)
+ switch (bits)
+ {
+ case 64: puts ("hppa2.0w"); break;
+ case 32: puts ("hppa2.0n"); break;
+ default: puts ("hppa2.0"); break;
+ } break;
+ #else /* !defined(_SC_KERNEL_BITS) */
+ puts ("hppa2.0"); break;
+ #endif
+ default: puts ("hppa1.0"); break;
+ }
+ exit (0);
+ }
EOF
(CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
test -z "$HP_ARCH" && HP_ARCH=hppa
@@ -627,9 +686,19 @@ EOF
esac
if [ ${HP_ARCH} = "hppa2.0w" ]
then
- # avoid double evaluation of $set_cc_for_build
- test -n "$CC_FOR_BUILD" || eval $set_cc_for_build
- if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E -) | grep __LP64__ >/dev/null
+ eval $set_cc_for_build
+
+ # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
+ # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler
+ # generating 64-bit code. GNU and HP use different nomenclature:
+ #
+ # $ CC_FOR_BUILD=cc ./config.guess
+ # => hppa2.0w-hp-hpux11.23
+ # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
+ # => hppa64-hp-hpux11.23
+
+ if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
+ grep -q __LP64__
then
HP_ARCH="hppa2.0w"
else
@@ -637,11 +706,11 @@ EOF
fi
fi
echo ${HP_ARCH}-hp-hpux${HPUX_REV}
- exit 0 ;;
+ exit ;;
ia64:HP-UX:*:*)
HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
echo ia64-hp-hpux${HPUX_REV}
- exit 0 ;;
+ exit ;;
3050*:HI-UX:*:*)
eval $set_cc_for_build
sed 's/^ //' << EOF >$dummy.c
@@ -669,321 +738,348 @@ EOF
exit (0);
}
EOF
- $CC_FOR_BUILD -o $dummy $dummy.c && $dummy && exit 0
+ $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` &&
+ { echo "$SYSTEM_NAME"; exit; }
echo unknown-hitachi-hiuxwe2
- exit 0 ;;
+ exit ;;
9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
echo hppa1.1-hp-bsd
- exit 0 ;;
+ exit ;;
9000/8??:4.3bsd:*:*)
echo hppa1.0-hp-bsd
- exit 0 ;;
+ exit ;;
*9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
echo hppa1.0-hp-mpeix
- exit 0 ;;
+ exit ;;
hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
echo hppa1.1-hp-osf
- exit 0 ;;
+ exit ;;
hp8??:OSF1:*:*)
echo hppa1.0-hp-osf
- exit 0 ;;
+ exit ;;
i*86:OSF1:*:*)
if [ -x /usr/sbin/sysversion ] ; then
echo ${UNAME_MACHINE}-unknown-osf1mk
else
echo ${UNAME_MACHINE}-unknown-osf1
fi
- exit 0 ;;
+ exit ;;
parisc*:Lites*:*:*)
echo hppa1.1-hp-lites
- exit 0 ;;
+ exit ;;
C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
echo c1-convex-bsd
- exit 0 ;;
+ exit ;;
C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
if getsysinfo -f scalar_acc
then echo c32-convex-bsd
else echo c2-convex-bsd
fi
- exit 0 ;;
+ exit ;;
C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
echo c34-convex-bsd
- exit 0 ;;
+ exit ;;
C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
echo c38-convex-bsd
- exit 0 ;;
+ exit ;;
C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
echo c4-convex-bsd
- exit 0 ;;
+ exit ;;
CRAY*Y-MP:*:*:*)
echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit 0 ;;
+ exit ;;
CRAY*[A-Z]90:*:*:*)
echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
| sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
-e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
-e 's/\.[^.]*$/.X/'
- exit 0 ;;
+ exit ;;
CRAY*TS:*:*:*)
echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit 0 ;;
+ exit ;;
CRAY*T3E:*:*:*)
echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit 0 ;;
+ exit ;;
CRAY*SV1:*:*:*)
echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit 0 ;;
+ exit ;;
*:UNICOS/mp:*:*)
- echo nv1-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit 0 ;;
+ echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
- FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
- FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
- echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
- exit 0 ;;
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
+ echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
+ 5000:UNIX_System_V:4.*:*)
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
+ echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
sparc*:BSD/OS:*:*)
echo sparc-unknown-bsdi${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
*:BSD/OS:*:*)
echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
- exit 0 ;;
- *:FreeBSD:*:*|*:GNU/FreeBSD:*:*)
- # Determine whether the default compiler uses glibc.
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #include <features.h>
- #if __GLIBC__ >= 2
- LIBC=gnu
- #else
- LIBC=
- #endif
-EOF
- eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^LIBC=`
- # GNU/FreeBSD systems have a "k" prefix to indicate we are using
- # FreeBSD's kernel, but not the complete OS.
- case ${LIBC} in gnu) kernel_only='k' ;; esac
- echo ${UNAME_MACHINE}-unknown-${kernel_only}freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`${LIBC:+-$LIBC}
- exit 0 ;;
+ exit ;;
+ *:FreeBSD:*:*)
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
+ case ${UNAME_PROCESSOR} in
+ amd64)
+ echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ *)
+ echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ esac
+ exit ;;
i*:CYGWIN*:*)
echo ${UNAME_MACHINE}-pc-cygwin
- exit 0 ;;
- i*:MINGW*:*)
+ exit ;;
+ *:MINGW64*:*)
+ echo ${UNAME_MACHINE}-pc-mingw64
+ exit ;;
+ *:MINGW*:*)
echo ${UNAME_MACHINE}-pc-mingw32
- exit 0 ;;
+ exit ;;
+ *:MSYS*:*)
+ echo ${UNAME_MACHINE}-pc-msys
+ exit ;;
+ i*:windows32*:*)
+ # uname -m includes "-pc" on this system.
+ echo ${UNAME_MACHINE}-mingw32
+ exit ;;
i*:PW*:*)
echo ${UNAME_MACHINE}-pc-pw32
- exit 0 ;;
- x86:Interix*:[34]*)
- echo i586-pc-interix${UNAME_RELEASE}|sed -e 's/\..*//'
- exit 0 ;;
+ exit ;;
+ *:Interix*:*)
+ case ${UNAME_MACHINE} in
+ x86)
+ echo i586-pc-interix${UNAME_RELEASE}
+ exit ;;
+ authenticamd | genuineintel | EM64T)
+ echo x86_64-unknown-interix${UNAME_RELEASE}
+ exit ;;
+ IA64)
+ echo ia64-unknown-interix${UNAME_RELEASE}
+ exit ;;
+ esac ;;
[345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
echo i${UNAME_MACHINE}-pc-mks
- exit 0 ;;
+ exit ;;
+ 8664:Windows_NT:*)
+ echo x86_64-pc-mks
+ exit ;;
i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
# How do we know it's Interix rather than the generic POSIX subsystem?
# It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
# UNAME_MACHINE based on the output of uname instead of i386?
echo i586-pc-interix
- exit 0 ;;
+ exit ;;
i*:UWIN*:*)
echo ${UNAME_MACHINE}-pc-uwin
- exit 0 ;;
+ exit ;;
+ amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
+ echo x86_64-unknown-cygwin
+ exit ;;
p*:CYGWIN*:*)
echo powerpcle-unknown-cygwin
- exit 0 ;;
+ exit ;;
prep*:SunOS:5.*:*)
echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit 0 ;;
+ exit ;;
*:GNU:*:*)
- echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
- exit 0 ;;
+ # the GNU system
+ echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
+ exit ;;
+ *:GNU/*:*:*)
+ # other systems with GNU libc and userland
+ echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC}
+ exit ;;
i*86:Minix:*:*)
echo ${UNAME_MACHINE}-pc-minix
- exit 0 ;;
+ exit ;;
+ aarch64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ aarch64_be:Linux:*:*)
+ UNAME_MACHINE=aarch64_be
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ alpha:Linux:*:*)
+ case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
+ EV5) UNAME_MACHINE=alphaev5 ;;
+ EV56) UNAME_MACHINE=alphaev56 ;;
+ PCA56) UNAME_MACHINE=alphapca56 ;;
+ PCA57) UNAME_MACHINE=alphapca56 ;;
+ EV6) UNAME_MACHINE=alphaev6 ;;
+ EV67) UNAME_MACHINE=alphaev67 ;;
+ EV68*) UNAME_MACHINE=alphaev68 ;;
+ esac
+ objdump --private-headers /bin/sh | grep -q ld.so.1
+ if test "$?" = 0 ; then LIBC="gnulibc1" ; fi
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ arc:Linux:*:* | arceb:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
arm*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit 0 ;;
+ eval $set_cc_for_build
+ if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_EABI__
+ then
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ else
+ if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_PCS_VFP
+ then
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi
+ else
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf
+ fi
+ fi
+ exit ;;
+ avr32*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
cris:Linux:*:*)
- echo cris-axis-linux-gnu
- exit 0 ;;
+ echo ${UNAME_MACHINE}-axis-linux-${LIBC}
+ exit ;;
+ crisv32:Linux:*:*)
+ echo ${UNAME_MACHINE}-axis-linux-${LIBC}
+ exit ;;
+ e2k:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ frv:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ hexagon:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ i*86:Linux:*:*)
+ echo ${UNAME_MACHINE}-pc-linux-${LIBC}
+ exit ;;
ia64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit 0 ;;
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ m32r*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
m68*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit 0 ;;
- mips:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ mips:Linux:*:* | mips64:Linux:*:*)
eval $set_cc_for_build
sed 's/^ //' << EOF >$dummy.c
#undef CPU
- #undef mips
- #undef mipsel
+ #undef ${UNAME_MACHINE}
+ #undef ${UNAME_MACHINE}el
#if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
- CPU=mipsel
+ CPU=${UNAME_MACHINE}el
#else
#if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
- CPU=mips
+ CPU=${UNAME_MACHINE}
#else
CPU=
#endif
#endif
EOF
- eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^CPU=`
- test x"${CPU}" != x && echo "${CPU}-unknown-linux-gnu" && exit 0
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'`
+ test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; }
;;
- mips64:Linux:*:*)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #undef CPU
- #undef mips64
- #undef mips64el
- #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
- CPU=mips64el
- #else
- #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
- CPU=mips64
- #else
- CPU=
- #endif
- #endif
-EOF
- eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^CPU=`
- test x"${CPU}" != x && echo "${CPU}-unknown-linux-gnu" && exit 0
- ;;
- ppc:Linux:*:*)
- echo powerpc-unknown-linux-gnu
- exit 0 ;;
- ppc64:Linux:*:*)
- echo powerpc64-unknown-linux-gnu
- exit 0 ;;
- alpha:Linux:*:*)
- case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
- EV5) UNAME_MACHINE=alphaev5 ;;
- EV56) UNAME_MACHINE=alphaev56 ;;
- PCA56) UNAME_MACHINE=alphapca56 ;;
- PCA57) UNAME_MACHINE=alphapca56 ;;
- EV6) UNAME_MACHINE=alphaev6 ;;
- EV67) UNAME_MACHINE=alphaev67 ;;
- EV68*) UNAME_MACHINE=alphaev68 ;;
- esac
- objdump --private-headers /bin/sh | grep ld.so.1 >/dev/null
- if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
- echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
- exit 0 ;;
+ openrisc*:Linux:*:*)
+ echo or1k-unknown-linux-${LIBC}
+ exit ;;
+ or32:Linux:*:* | or1k*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ padre:Linux:*:*)
+ echo sparc-unknown-linux-${LIBC}
+ exit ;;
+ parisc64:Linux:*:* | hppa64:Linux:*:*)
+ echo hppa64-unknown-linux-${LIBC}
+ exit ;;
parisc:Linux:*:* | hppa:Linux:*:*)
# Look for CPU level
case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
- PA7*) echo hppa1.1-unknown-linux-gnu ;;
- PA8*) echo hppa2.0-unknown-linux-gnu ;;
- *) echo hppa-unknown-linux-gnu ;;
+ PA7*) echo hppa1.1-unknown-linux-${LIBC} ;;
+ PA8*) echo hppa2.0-unknown-linux-${LIBC} ;;
+ *) echo hppa-unknown-linux-${LIBC} ;;
esac
- exit 0 ;;
- parisc64:Linux:*:* | hppa64:Linux:*:*)
- echo hppa64-unknown-linux-gnu
- exit 0 ;;
+ exit ;;
+ ppc64:Linux:*:*)
+ echo powerpc64-unknown-linux-${LIBC}
+ exit ;;
+ ppc:Linux:*:*)
+ echo powerpc-unknown-linux-${LIBC}
+ exit ;;
+ ppc64le:Linux:*:*)
+ echo powerpc64le-unknown-linux-${LIBC}
+ exit ;;
+ ppcle:Linux:*:*)
+ echo powerpcle-unknown-linux-${LIBC}
+ exit ;;
s390:Linux:*:* | s390x:Linux:*:*)
- echo ${UNAME_MACHINE}-ibm-linux
- exit 0 ;;
+ echo ${UNAME_MACHINE}-ibm-linux-${LIBC}
+ exit ;;
sh64*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit 0 ;;
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
sh*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit 0 ;;
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
sparc:Linux:*:* | sparc64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit 0 ;;
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ tile*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ vax:Linux:*:*)
+ echo ${UNAME_MACHINE}-dec-linux-${LIBC}
+ exit ;;
x86_64:Linux:*:*)
- echo x86_64-unknown-linux-gnu
- exit 0 ;;
- i*86:Linux:*:*)
- # The BFD linker knows what the default object file format is, so
- # first see if it will tell us. cd to the root directory to prevent
- # problems with other programs or directories called `ld' in the path.
- # Set LC_ALL=C to ensure ld outputs messages in English.
- ld_supported_targets=`cd /; LC_ALL=C ld --help 2>&1 \
- | sed -ne '/supported targets:/!d
- s/[ ][ ]*/ /g
- s/.*supported targets: *//
- s/ .*//
- p'`
- case "$ld_supported_targets" in
- elf32-i386)
- TENTATIVE="${UNAME_MACHINE}-pc-linux-gnu"
- ;;
- a.out-i386-linux)
- echo "${UNAME_MACHINE}-pc-linux-gnuaout"
- exit 0 ;;
- coff-i386)
- echo "${UNAME_MACHINE}-pc-linux-gnucoff"
- exit 0 ;;
- "")
- # Either a pre-BFD a.out linker (linux-gnuoldld) or
- # one that does not give us useful --help.
- echo "${UNAME_MACHINE}-pc-linux-gnuoldld"
- exit 0 ;;
- esac
- # Determine whether the default compiler is a.out or elf
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #include <features.h>
- #ifdef __ELF__
- # ifdef __GLIBC__
- # if __GLIBC__ >= 2
- LIBC=gnu
- # else
- LIBC=gnulibc1
- # endif
- # else
- LIBC=gnulibc1
- # endif
- #else
- #ifdef __INTEL_COMPILER
- LIBC=gnu
- #else
- LIBC=gnuaout
- #endif
- #endif
-EOF
- eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep ^LIBC=`
- test x"${LIBC}" != x && echo "${UNAME_MACHINE}-pc-linux-${LIBC}" && exit 0
- test x"${TENTATIVE}" != x && echo "${TENTATIVE}" && exit 0
- ;;
+ echo ${UNAME_MACHINE}-pc-linux-${LIBC}
+ exit ;;
+ xtensa*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
i*86:DYNIX/ptx:4*:*)
# ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
# earlier versions are messed up and put the nodename in both
# sysname and nodename.
echo i386-sequent-sysv4
- exit 0 ;;
+ exit ;;
i*86:UNIX_SV:4.2MP:2.*)
- # Unixware is an offshoot of SVR4, but it has its own version
- # number series starting with 2...
- # I am not positive that other SVR4 systems won't match this,
+ # Unixware is an offshoot of SVR4, but it has its own version
+ # number series starting with 2...
+ # I am not positive that other SVR4 systems won't match this,
# I just have to hope. -- rms.
- # Use sysv4.2uw... so that sysv4* matches it.
+ # Use sysv4.2uw... so that sysv4* matches it.
echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
- exit 0 ;;
+ exit ;;
i*86:OS/2:*:*)
# If we were able to find `uname', then EMX Unix compatibility
# is probably installed.
echo ${UNAME_MACHINE}-pc-os2-emx
- exit 0 ;;
+ exit ;;
i*86:XTS-300:*:STOP)
echo ${UNAME_MACHINE}-unknown-stop
- exit 0 ;;
+ exit ;;
i*86:atheos:*:*)
echo ${UNAME_MACHINE}-unknown-atheos
- exit 0 ;;
- i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.0*:*)
+ exit ;;
+ i*86:syllable:*:*)
+ echo ${UNAME_MACHINE}-pc-syllable
+ exit ;;
+ i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*)
echo i386-unknown-lynxos${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
i*86:*DOS:*:*)
echo ${UNAME_MACHINE}-pc-msdosdjgpp
- exit 0 ;;
+ exit ;;
i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
@@ -991,15 +1087,16 @@ EOF
else
echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
fi
- exit 0 ;;
- i*86:*:5:[78]*)
+ exit ;;
+ i*86:*:5:[678]*)
+ # UnixWare 7.x, OpenUNIX and OpenServer 6.
case `/bin/uname -X | grep "^Machine"` in
*486*) UNAME_MACHINE=i486 ;;
*Pentium) UNAME_MACHINE=i586 ;;
*Pent*|*Celeron) UNAME_MACHINE=i686 ;;
esac
echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
- exit 0 ;;
+ exit ;;
i*86:*:3.2:*)
if test -f /usr/options/cb.name; then
UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
@@ -1017,73 +1114,86 @@ EOF
else
echo ${UNAME_MACHINE}-pc-sysv32
fi
- exit 0 ;;
+ exit ;;
pc:*:*:*)
# Left here for compatibility:
- # uname -m prints for DJGPP always 'pc', but it prints nothing about
- # the processor, so we play safe by assuming i386.
- echo i386-pc-msdosdjgpp
- exit 0 ;;
+ # uname -m prints for DJGPP always 'pc', but it prints nothing about
+ # the processor, so we play safe by assuming i586.
+ # Note: whatever this is, it MUST be the same as what config.sub
+ # prints for the "djgpp" host, or else GDB configury will decide that
+ # this is a cross-build.
+ echo i586-pc-msdosdjgpp
+ exit ;;
Intel:Mach:3*:*)
echo i386-pc-mach3
- exit 0 ;;
+ exit ;;
paragon:*:*:*)
echo i860-intel-osf1
- exit 0 ;;
+ exit ;;
i860:*:4.*:*) # i860-SVR4
if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
else # Add other i860-SVR4 vendors below as they are discovered.
echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4
fi
- exit 0 ;;
+ exit ;;
mini*:CTIX:SYS*5:*)
# "miniframe"
echo m68010-convergent-sysv
- exit 0 ;;
+ exit ;;
mc68k:UNIX:SYSTEM5:3.51m)
echo m68k-convergent-sysv
- exit 0 ;;
+ exit ;;
M680?0:D-NIX:5.3:*)
echo m68k-diab-dnix
- exit 0 ;;
- M68*:*:R3V[567]*:*)
- test -r /sysV68 && echo 'm68k-motorola-sysv' && exit 0 ;;
- 3[34]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0)
+ exit ;;
+ M68*:*:R3V[5678]*:*)
+ test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;;
+ 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
OS_REL=''
test -r /etc/.relid \
&& OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
- && echo i486-ncr-sysv4.3${OS_REL} && exit 0
+ && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
/bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
- && echo i586-ncr-sysv4.3${OS_REL} && exit 0 ;;
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
- /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
- && echo i486-ncr-sysv4 && exit 0 ;;
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4; exit; } ;;
+ NCR*:*:4.2:* | MPRAS*:*:4.2:*)
+ OS_REL='.3'
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
echo m68k-unknown-lynxos${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
mc68030:UNIX_System_V:4.*:*)
echo m68k-atari-sysv4
- exit 0 ;;
+ exit ;;
TSUNAMI:LynxOS:2.*:*)
echo sparc-unknown-lynxos${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
rs6000:LynxOS:2.*:*)
echo rs6000-unknown-lynxos${UNAME_RELEASE}
- exit 0 ;;
- PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.0*:*)
+ exit ;;
+ PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*)
echo powerpc-unknown-lynxos${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
SM[BE]S:UNIX_SV:*:*)
echo mips-dde-sysv${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
RM*:ReliantUNIX-*:*:*)
echo mips-sni-sysv4
- exit 0 ;;
+ exit ;;
RM*:SINIX-*:*:*)
echo mips-sni-sysv4
- exit 0 ;;
+ exit ;;
*:SINIX-*:*:*)
if uname -p 2>/dev/null >/dev/null ; then
UNAME_MACHINE=`(uname -p) 2>/dev/null`
@@ -1091,68 +1201,109 @@ EOF
else
echo ns32k-sni-sysv
fi
- exit 0 ;;
- PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
- # says <Richard.M.Bartel@ccMail.Census.GOV>
- echo i586-unisys-sysv4
- exit 0 ;;
+ exit ;;
+ PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
+ # says <Richard.M.Bartel@ccMail.Census.GOV>
+ echo i586-unisys-sysv4
+ exit ;;
*:UNIX_System_V:4*:FTX*)
# From Gerald Hewes <hewes@openmarket.com>.
# How about differentiating between stratus architectures? -djm
echo hppa1.1-stratus-sysv4
- exit 0 ;;
+ exit ;;
*:*:*:FTX*)
# From seanf@swdc.stratus.com.
echo i860-stratus-sysv4
- exit 0 ;;
+ exit ;;
+ i*86:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo ${UNAME_MACHINE}-stratus-vos
+ exit ;;
*:VOS:*:*)
# From Paul.Green@stratus.com.
echo hppa1.1-stratus-vos
- exit 0 ;;
+ exit ;;
mc68*:A/UX:*:*)
echo m68k-apple-aux${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
news*:NEWS-OS:6*:*)
echo mips-sony-newsos6
- exit 0 ;;
+ exit ;;
R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
if [ -d /usr/nec ]; then
- echo mips-nec-sysv${UNAME_RELEASE}
+ echo mips-nec-sysv${UNAME_RELEASE}
else
- echo mips-unknown-sysv${UNAME_RELEASE}
+ echo mips-unknown-sysv${UNAME_RELEASE}
fi
- exit 0 ;;
+ exit ;;
BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only.
echo powerpc-be-beos
- exit 0 ;;
+ exit ;;
BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only.
echo powerpc-apple-beos
- exit 0 ;;
+ exit ;;
BePC:BeOS:*:*) # BeOS running on Intel PC compatible.
echo i586-pc-beos
- exit 0 ;;
+ exit ;;
+ BePC:Haiku:*:*) # Haiku running on Intel PC compatible.
+ echo i586-pc-haiku
+ exit ;;
+ x86_64:Haiku:*:*)
+ echo x86_64-unknown-haiku
+ exit ;;
SX-4:SUPER-UX:*:*)
echo sx4-nec-superux${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
SX-5:SUPER-UX:*:*)
echo sx5-nec-superux${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
SX-6:SUPER-UX:*:*)
echo sx6-nec-superux${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
+ SX-7:SUPER-UX:*:*)
+ echo sx7-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-8:SUPER-UX:*:*)
+ echo sx8-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-8R:SUPER-UX:*:*)
+ echo sx8r-nec-superux${UNAME_RELEASE}
+ exit ;;
Power*:Rhapsody:*:*)
echo powerpc-apple-rhapsody${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
*:Rhapsody:*:*)
echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
*:Darwin:*:*)
- case `uname -p` in
- *86) UNAME_PROCESSOR=i686 ;;
- powerpc) UNAME_PROCESSOR=powerpc ;;
- esac
+ UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
+ eval $set_cc_for_build
+ if test "$UNAME_PROCESSOR" = unknown ; then
+ UNAME_PROCESSOR=powerpc
+ fi
+ if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then
+ if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+ if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ case $UNAME_PROCESSOR in
+ i386) UNAME_PROCESSOR=x86_64 ;;
+ powerpc) UNAME_PROCESSOR=powerpc64 ;;
+ esac
+ fi
+ fi
+ elif test "$UNAME_PROCESSOR" = i386 ; then
+ # Avoid executing cc on OS X 10.9, as it ships with a stub
+ # that puts up a graphical alert prompting to install
+ # developer tools. Any system running Mac OS X 10.7 or
+ # later (Darwin 11 and later) is required to have a 64-bit
+ # processor. This is not true of the ARM version of Darwin
+ # that Apple uses in portable devices.
+ UNAME_PROCESSOR=x86_64
+ fi
echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
*:procnto*:*:* | *:QNX:[0123456789]*:*)
UNAME_PROCESSOR=`uname -p`
if test "$UNAME_PROCESSOR" = "x86"; then
@@ -1160,22 +1311,28 @@ EOF
UNAME_MACHINE=pc
fi
echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
*:QNX:*:4*)
echo i386-pc-qnx
- exit 0 ;;
- NSR-[DGKLNPTVW]:NONSTOP_KERNEL:*:*)
+ exit ;;
+ NEO-?:NONSTOP_KERNEL:*:*)
+ echo neo-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ NSE-*:NONSTOP_KERNEL:*:*)
+ echo nse-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ NSR-?:NONSTOP_KERNEL:*:*)
echo nsr-tandem-nsk${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
*:NonStop-UX:*:*)
echo mips-compaq-nonstopux
- exit 0 ;;
+ exit ;;
BS2000:POSIX*:*:*)
echo bs2000-siemens-sysv
- exit 0 ;;
+ exit ;;
DS/*:UNIX_System_V:*:*)
echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
- exit 0 ;;
+ exit ;;
*:Plan9:*:*)
# "uname -m" is not consistent, so use $cputype instead. 386
# is converted to i386 for consistency with other x86
@@ -1186,180 +1343,55 @@ EOF
UNAME_MACHINE="$cputype"
fi
echo ${UNAME_MACHINE}-unknown-plan9
- exit 0 ;;
+ exit ;;
*:TOPS-10:*:*)
echo pdp10-unknown-tops10
- exit 0 ;;
+ exit ;;
*:TENEX:*:*)
echo pdp10-unknown-tenex
- exit 0 ;;
+ exit ;;
KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
echo pdp10-dec-tops20
- exit 0 ;;
+ exit ;;
XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
echo pdp10-xkl-tops20
- exit 0 ;;
+ exit ;;
*:TOPS-20:*:*)
echo pdp10-unknown-tops20
- exit 0 ;;
+ exit ;;
*:ITS:*:*)
echo pdp10-unknown-its
- exit 0 ;;
+ exit ;;
SEI:*:*:SEIUX)
- echo mips-sei-seiux${UNAME_RELEASE}
- exit 0 ;;
+ echo mips-sei-seiux${UNAME_RELEASE}
+ exit ;;
+ *:DragonFly:*:*)
+ echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
+ exit ;;
+ *:*VMS:*:*)
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ case "${UNAME_MACHINE}" in
+ A*) echo alpha-dec-vms ; exit ;;
+ I*) echo ia64-dec-vms ; exit ;;
+ V*) echo vax-dec-vms ; exit ;;
+ esac ;;
+ *:XENIX:*:SysV)
+ echo i386-pc-xenix
+ exit ;;
+ i*86:skyos:*:*)
+ echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//'
+ exit ;;
+ i*86:rdos:*:*)
+ echo ${UNAME_MACHINE}-pc-rdos
+ exit ;;
+ i*86:AROS:*:*)
+ echo ${UNAME_MACHINE}-pc-aros
+ exit ;;
+ x86_64:VMkernel:*:*)
+ echo ${UNAME_MACHINE}-unknown-esx
+ exit ;;
esac
-#echo '(No uname command or uname output not recognized.)' 1>&2
-#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2
-
-eval $set_cc_for_build
-cat >$dummy.c <<EOF
-#ifdef _SEQUENT_
-# include <sys/types.h>
-# include <sys/utsname.h>
-#endif
-main ()
-{
-#if defined (sony)
-#if defined (MIPSEB)
- /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed,
- I don't know.... */
- printf ("mips-sony-bsd\n"); exit (0);
-#else
-#include <sys/param.h>
- printf ("m68k-sony-newsos%s\n",
-#ifdef NEWSOS4
- "4"
-#else
- ""
-#endif
- ); exit (0);
-#endif
-#endif
-
-#if defined (__arm) && defined (__acorn) && defined (__unix)
- printf ("arm-acorn-riscix"); exit (0);
-#endif
-
-#if defined (hp300) && !defined (hpux)
- printf ("m68k-hp-bsd\n"); exit (0);
-#endif
-
-#if defined (NeXT)
-#if !defined (__ARCHITECTURE__)
-#define __ARCHITECTURE__ "m68k"
-#endif
- int version;
- version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`;
- if (version < 4)
- printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
- else
- printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version);
- exit (0);
-#endif
-
-#if defined (MULTIMAX) || defined (n16)
-#if defined (UMAXV)
- printf ("ns32k-encore-sysv\n"); exit (0);
-#else
-#if defined (CMU)
- printf ("ns32k-encore-mach\n"); exit (0);
-#else
- printf ("ns32k-encore-bsd\n"); exit (0);
-#endif
-#endif
-#endif
-
-#if defined (__386BSD__)
- printf ("i386-pc-bsd\n"); exit (0);
-#endif
-
-#if defined (sequent)
-#if defined (i386)
- printf ("i386-sequent-dynix\n"); exit (0);
-#endif
-#if defined (ns32000)
- printf ("ns32k-sequent-dynix\n"); exit (0);
-#endif
-#endif
-
-#if defined (_SEQUENT_)
- struct utsname un;
-
- uname(&un);
-
- if (strncmp(un.version, "V2", 2) == 0) {
- printf ("i386-sequent-ptx2\n"); exit (0);
- }
- if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */
- printf ("i386-sequent-ptx1\n"); exit (0);
- }
- printf ("i386-sequent-ptx\n"); exit (0);
-
-#endif
-
-#if defined (vax)
-# if !defined (ultrix)
-# include <sys/param.h>
-# if defined (BSD)
-# if BSD == 43
- printf ("vax-dec-bsd4.3\n"); exit (0);
-# else
-# if BSD == 199006
- printf ("vax-dec-bsd4.3reno\n"); exit (0);
-# else
- printf ("vax-dec-bsd\n"); exit (0);
-# endif
-# endif
-# else
- printf ("vax-dec-bsd\n"); exit (0);
-# endif
-# else
- printf ("vax-dec-ultrix\n"); exit (0);
-# endif
-#endif
-
-#if defined (alliant) && defined (i860)
- printf ("i860-alliant-bsd\n"); exit (0);
-#endif
-
- exit (1);
-}
-EOF
-
-$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && $dummy && exit 0
-
-# Apollos put the system type in the environment.
-
-test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit 0; }
-
-# Convex versions that predate uname can use getsysinfo(1)
-
-if [ -x /usr/convex/getsysinfo ]
-then
- case `getsysinfo -f cpu_type` in
- c1*)
- echo c1-convex-bsd
- exit 0 ;;
- c2*)
- if getsysinfo -f scalar_acc
- then echo c32-convex-bsd
- else echo c2-convex-bsd
- fi
- exit 0 ;;
- c34*)
- echo c34-convex-bsd
- exit 0 ;;
- c38*)
- echo c38-convex-bsd
- exit 0 ;;
- c4*)
- echo c4-convex-bsd
- exit 0 ;;
- esac
-fi
-
cat >&2 <<EOF
$0: unable to guess system type
@@ -1367,7 +1399,9 @@ This script, last modified $timestamp, has failed to recognize
the operating system you are using. It is advised that you
download the most up to date version of the config scripts from
- ftp://ftp.gnu.org/pub/gnu/config/
+ http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+and
+ http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
If the version you run ($0) is already up to date, please
send the following data and any information you think might be
diff --git a/buildtools/config.sub b/buildtools/config.sub
index 9d7f7339..1acc966a 100755
--- a/buildtools/config.sub
+++ b/buildtools/config.sub
@@ -1,42 +1,40 @@
#! /bin/sh
# Configuration validation subroutine script.
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-# 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
+# Copyright 1992-2015 Free Software Foundation, Inc.
-timestamp='2003-07-04'
+timestamp='2015-08-20'
-# This file is (in principle) common to ALL GNU software.
-# The presence of a machine in this file suggests that SOME GNU software
-# can handle that machine. It does not imply ALL GNU software can.
-#
-# This file is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330,
-# Boston, MA 02111-1307, USA.
-
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that program.
+# the same distribution terms that you use for the rest of that
+# program. This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
-# Please send patches to <config-patches@gnu.org>. Submit a context
-# diff and a properly formatted ChangeLog entry.
+
+# Please send patches to <config-patches@gnu.org>.
#
# Configuration subroutine to validate and canonicalize a configuration type.
# Supply the specified configuration type as an argument.
# If it is invalid, we print an error message on stderr and exit with code 1.
# Otherwise, we print the canonical config type on stdout and succeed.
+# You can get the latest version of this script from:
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
+
# This file is supposed to be the same for all GNU packages
# and recognize all the CPU types, system types and aliases
# that are meaningful with *any* GNU software.
@@ -70,8 +68,7 @@ Report bugs and patches to <config-patches@gnu.org>."
version="\
GNU config.sub ($timestamp)
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001
-Free Software Foundation, Inc.
+Copyright 1992-2015 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -83,11 +80,11 @@ Try \`$me --help' for more information."
while test $# -gt 0 ; do
case $1 in
--time-stamp | --time* | -t )
- echo "$timestamp" ; exit 0 ;;
+ echo "$timestamp" ; exit ;;
--version | -v )
- echo "$version" ; exit 0 ;;
+ echo "$version" ; exit ;;
--help | --h* | -h )
- echo "$usage"; exit 0 ;;
+ echo "$usage"; exit ;;
-- ) # Stop option processing
shift; break ;;
- ) # Use stdin as input.
@@ -99,7 +96,7 @@ while test $# -gt 0 ; do
*local*)
# First pass through any local machine types.
echo $1
- exit 0;;
+ exit ;;
* )
break ;;
@@ -118,10 +115,18 @@ esac
# Here we must recognize all the valid KERNEL-OS combinations.
maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
case $maybe_os in
- nto-qnx* | linux-gnu* | kfreebsd*-gnu* | netbsd*-gnu* | storm-chaos* | os2-emx* | rtmk-nova*)
+ nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \
+ linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
+ knetbsd*-gnu* | netbsd*-gnu* | netbsd*-eabi* | \
+ kopensolaris*-gnu* | \
+ storm-chaos* | os2-emx* | rtmk-nova*)
os=-$maybe_os
basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
;;
+ android-linux)
+ os=-linux-android
+ basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown
+ ;;
*)
basic_machine=`echo $1 | sed 's/-[^-]*$//'`
if [ $basic_machine != $1 ]
@@ -144,10 +149,13 @@ case $os in
-convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
-c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
-harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
- -apple | -axis)
+ -apple | -axis | -knuth | -cray | -microblaze*)
os=
basic_machine=$1
;;
+ -bluegene*)
+ os=-cnk
+ ;;
-sim | -cisco | -oki | -wec | -winbond)
os=
basic_machine=$1
@@ -162,13 +170,17 @@ case $os in
os=-chorusos
basic_machine=$1
;;
- -chorusrdb)
- os=-chorusrdb
+ -chorusrdb)
+ os=-chorusrdb
basic_machine=$1
- ;;
+ ;;
-hiux*)
os=-hiuxwe2
;;
+ -sco6)
+ os=-sco5v6
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
-sco5)
os=-sco3.2v5
basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
@@ -185,6 +197,10 @@ case $os in
# Don't forget version if it is 3.2v4 or newer.
basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
;;
+ -sco5v6*)
+ # Don't forget version if it is 3.2v4 or newer.
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
-sco*)
os=-sco3.2v2
basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
@@ -202,6 +218,12 @@ case $os in
-isc*)
basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
;;
+ -lynx*178)
+ os=-lynxos178
+ ;;
+ -lynx*5)
+ os=-lynxos5
+ ;;
-lynx*)
os=-lynxos
;;
@@ -226,55 +248,114 @@ case $basic_machine in
# Some are omitted here because they have special meanings below.
1750a | 580 \
| a29k \
+ | aarch64 | aarch64_be \
| alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
| alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
- | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr \
- | c4x | clipper \
+ | am33_2.0 \
+ | arc | arceb \
+ | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \
+ | avr | avr32 \
+ | ba \
+ | be32 | be64 \
+ | bfin \
+ | c4x | c8051 | clipper \
| d10v | d30v | dlx | dsp16xx \
- | fr30 | frv \
+ | e2k | epiphany \
+ | fido | fr30 | frv | ft32 \
| h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
+ | hexagon \
| i370 | i860 | i960 | ia64 \
- | ip2k \
- | m32r | m68000 | m68k | m88k | mcore \
+ | ip2k | iq2000 \
+ | k1om \
+ | le32 | le64 \
+ | lm32 \
+ | m32c | m32r | m32rle | m68000 | m68k | m88k \
+ | maxq | mb | microblaze | microblazeel | mcore | mep | metag \
| mips | mipsbe | mipseb | mipsel | mipsle \
| mips16 \
| mips64 | mips64el \
- | mips64vr | mips64vrel \
+ | mips64octeon | mips64octeonel \
| mips64orion | mips64orionel \
+ | mips64r5900 | mips64r5900el \
+ | mips64vr | mips64vrel \
| mips64vr4100 | mips64vr4100el \
| mips64vr4300 | mips64vr4300el \
| mips64vr5000 | mips64vr5000el \
+ | mips64vr5900 | mips64vr5900el \
| mipsisa32 | mipsisa32el \
| mipsisa32r2 | mipsisa32r2el \
+ | mipsisa32r6 | mipsisa32r6el \
| mipsisa64 | mipsisa64el \
+ | mipsisa64r2 | mipsisa64r2el \
+ | mipsisa64r6 | mipsisa64r6el \
| mipsisa64sb1 | mipsisa64sb1el \
| mipsisa64sr71k | mipsisa64sr71kel \
+ | mipsr5900 | mipsr5900el \
| mipstx39 | mipstx39el \
| mn10200 | mn10300 \
+ | moxie \
+ | mt \
| msp430 \
+ | nds32 | nds32le | nds32be \
+ | nios | nios2 | nios2eb | nios2el \
| ns16k | ns32k \
- | openrisc | or32 \
+ | open8 | or1k | or1knd | or32 \
| pdp10 | pdp11 | pj | pjl \
- | powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \
+ | powerpc | powerpc64 | powerpc64le | powerpcle \
| pyramid \
- | sh | sh[1234] | sh[23]e | sh[34]eb | shbe | shle | sh[1234]le | sh3ele \
+ | riscv32 | riscv64 \
+ | rl78 | rx \
+ | score \
+ | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[234]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
| sh64 | sh64le \
- | sparc | sparc64 | sparc86x | sparclet | sparclite | sparcv9 | sparcv9b \
- | strongarm \
- | tahoe | thumb | tic4x | tic80 | tron \
- | v850 | v850e \
+ | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
+ | sparcv8 | sparcv9 | sparcv9b | sparcv9v \
+ | spu \
+ | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \
+ | ubicom32 \
+ | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \
+ | visium \
| we32k \
- | x86 | xscale | xstormy16 | xtensa \
- | z8k)
+ | x86 | xc16x | xstormy16 | xtensa \
+ | z8k | z80)
basic_machine=$basic_machine-unknown
;;
- m6811 | m68hc11 | m6812 | m68hc12)
- # Motorola 68HC11/12.
+ c54x)
+ basic_machine=tic54x-unknown
+ ;;
+ c55x)
+ basic_machine=tic55x-unknown
+ ;;
+ c6x)
+ basic_machine=tic6x-unknown
+ ;;
+ leon|leon[3-9])
+ basic_machine=sparc-$basic_machine
+ ;;
+ m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip)
basic_machine=$basic_machine-unknown
os=-none
;;
m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
;;
+ ms1)
+ basic_machine=mt-unknown
+ ;;
+
+ strongarm | thumb | xscale)
+ basic_machine=arm-unknown
+ ;;
+ xgate)
+ basic_machine=$basic_machine-unknown
+ os=-none
+ ;;
+ xscaleeb)
+ basic_machine=armeb-unknown
+ ;;
+
+ xscaleel)
+ basic_machine=armel-unknown
+ ;;
# We use `pc' rather than `unknown'
# because (1) that's what they normally are, and
@@ -290,58 +371,89 @@ case $basic_machine in
# Recognize the basic CPU types with company name.
580-* \
| a29k-* \
+ | aarch64-* | aarch64_be-* \
| alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
| alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
- | alphapca5[67]-* | alpha64pca5[67]-* | amd64-* | arc-* \
+ | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \
| arm-* | armbe-* | armle-* | armeb-* | armv*-* \
- | avr-* \
- | bs2000-* \
- | c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \
- | clipper-* | cydra-* \
+ | avr-* | avr32-* \
+ | ba-* \
+ | be32-* | be64-* \
+ | bfin-* | bs2000-* \
+ | c[123]* | c30-* | [cjt]90-* | c4x-* \
+ | c8051-* | clipper-* | craynv-* | cydra-* \
| d10v-* | d30v-* | dlx-* \
- | elxsi-* \
- | f30[01]-* | f700-* | fr30-* | frv-* | fx80-* \
+ | e2k-* | elxsi-* \
+ | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
| h8300-* | h8500-* \
| hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
+ | hexagon-* \
| i*86-* | i860-* | i960-* | ia64-* \
- | ip2k-* \
- | m32r-* \
+ | ip2k-* | iq2000-* \
+ | k1om-* \
+ | le32-* | le64-* \
+ | lm32-* \
+ | m32c-* | m32r-* | m32rle-* \
| m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
- | m88110-* | m88k-* | mcore-* \
+ | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \
+ | microblaze-* | microblazeel-* \
| mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
| mips16-* \
| mips64-* | mips64el-* \
- | mips64vr-* | mips64vrel-* \
+ | mips64octeon-* | mips64octeonel-* \
| mips64orion-* | mips64orionel-* \
+ | mips64r5900-* | mips64r5900el-* \
+ | mips64vr-* | mips64vrel-* \
| mips64vr4100-* | mips64vr4100el-* \
| mips64vr4300-* | mips64vr4300el-* \
| mips64vr5000-* | mips64vr5000el-* \
+ | mips64vr5900-* | mips64vr5900el-* \
| mipsisa32-* | mipsisa32el-* \
| mipsisa32r2-* | mipsisa32r2el-* \
+ | mipsisa32r6-* | mipsisa32r6el-* \
| mipsisa64-* | mipsisa64el-* \
+ | mipsisa64r2-* | mipsisa64r2el-* \
+ | mipsisa64r6-* | mipsisa64r6el-* \
| mipsisa64sb1-* | mipsisa64sb1el-* \
| mipsisa64sr71k-* | mipsisa64sr71kel-* \
+ | mipsr5900-* | mipsr5900el-* \
| mipstx39-* | mipstx39el-* \
+ | mmix-* \
+ | mt-* \
| msp430-* \
- | none-* | np1-* | nv1-* | ns16k-* | ns32k-* \
+ | nds32-* | nds32le-* | nds32be-* \
+ | nios-* | nios2-* | nios2eb-* | nios2el-* \
+ | none-* | np1-* | ns16k-* | ns32k-* \
+ | open8-* \
+ | or1k*-* \
| orion-* \
| pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
- | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \
+ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \
| pyramid-* \
- | romp-* | rs6000-* \
- | sh-* | sh[1234]-* | sh[23]e-* | sh[34]eb-* | shbe-* \
+ | riscv32-* | riscv64-* \
+ | rl78-* | romp-* | rs6000-* | rx-* \
+ | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
| shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
- | sparc-* | sparc64-* | sparc86x-* | sparclet-* | sparclite-* \
- | sparcv9-* | sparcv9b-* | strongarm-* | sv1-* | sx?-* \
- | tahoe-* | thumb-* \
+ | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
+ | sparclite-* \
+ | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx*-* \
+ | tahoe-* \
| tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
+ | tile*-* \
| tron-* \
- | v850-* | v850e-* | vax-* \
+ | ubicom32-* \
+ | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \
+ | vax-* \
+ | visium-* \
| we32k-* \
- | x86-* | x86_64-* | xps100-* | xscale-* | xstormy16-* \
- | xtensa-* \
+ | x86-* | x86_64-* | xc16x-* | xps100-* \
+ | xstormy16-* | xtensa*-* \
| ymp-* \
- | z8k-*)
+ | z8k-* | z80-*)
+ ;;
+ # Recognize the basic CPU types without company name, with glob match.
+ xtensa*)
+ basic_machine=$basic_machine-unknown
;;
# Recognize the various machine names and aliases which stand
# for a CPU type and a company and sometimes even an OS.
@@ -359,6 +471,9 @@ case $basic_machine in
basic_machine=a29k-amd
os=-udi
;;
+ abacus)
+ basic_machine=abacus-unknown
+ ;;
adobe68k)
basic_machine=m68010-adobe
os=-scout
@@ -376,6 +491,9 @@ case $basic_machine in
amd64)
basic_machine=x86_64-pc
;;
+ amd64-*)
+ basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
amdahl)
basic_machine=580-amdahl
os=-sysv
@@ -399,6 +517,13 @@ case $basic_machine in
basic_machine=m68k-apollo
os=-bsd
;;
+ aros)
+ basic_machine=i386-pc
+ os=-aros
+ ;;
+ asmjs)
+ basic_machine=asmjs-unknown
+ ;;
aux)
basic_machine=m68k-apple
os=-aux
@@ -407,10 +532,35 @@ case $basic_machine in
basic_machine=ns32k-sequent
os=-dynix
;;
+ blackfin)
+ basic_machine=bfin-unknown
+ os=-linux
+ ;;
+ blackfin-*)
+ basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
+ bluegene*)
+ basic_machine=powerpc-ibm
+ os=-cnk
+ ;;
+ c54x-*)
+ basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ c55x-*)
+ basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ c6x-*)
+ basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
c90)
basic_machine=c90-cray
os=-unicos
;;
+ cegcc)
+ basic_machine=arm-unknown
+ os=-cegcc
+ ;;
convex-c1)
basic_machine=c1-convex
os=-bsd
@@ -435,12 +585,27 @@ case $basic_machine in
basic_machine=j90-cray
os=-unicos
;;
+ craynv)
+ basic_machine=craynv-cray
+ os=-unicosmp
+ ;;
+ cr16 | cr16-*)
+ basic_machine=cr16-unknown
+ os=-elf
+ ;;
crds | unos)
basic_machine=m68k-crds
;;
+ crisv32 | crisv32-* | etraxfs*)
+ basic_machine=crisv32-axis
+ ;;
cris | cris-* | etrax*)
basic_machine=cris-axis
;;
+ crx)
+ basic_machine=crx-unknown
+ os=-elf
+ ;;
da30 | da30-*)
basic_machine=m68k-da30
;;
@@ -463,6 +628,14 @@ case $basic_machine in
basic_machine=m88k-motorola
os=-sysv3
;;
+ dicos)
+ basic_machine=i686-pc
+ os=-dicos
+ ;;
+ djgpp)
+ basic_machine=i586-pc
+ os=-msdosdjgpp
+ ;;
dpx20 | dpx20-*)
basic_machine=rs6000-bull
os=-bosx
@@ -574,7 +747,6 @@ case $basic_machine in
i370-ibm* | ibm*)
basic_machine=i370-ibm
;;
-# I'm not sure what "Sysv32" means. Should this be sysv3.2?
i*86v32)
basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
os=-sysv32
@@ -613,6 +785,17 @@ case $basic_machine in
basic_machine=m68k-isi
os=-sysv
;;
+ leon-*|leon[3-9]-*)
+ basic_machine=sparc-`echo $basic_machine | sed 's/-.*//'`
+ ;;
+ m68knommu)
+ basic_machine=m68k-unknown
+ os=-linux
+ ;;
+ m68knommu-*)
+ basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
m88k-omron*)
basic_machine=m88k-omron
;;
@@ -624,10 +807,21 @@ case $basic_machine in
basic_machine=ns32k-utek
os=-sysv
;;
+ microblaze*)
+ basic_machine=microblaze-xilinx
+ ;;
+ mingw64)
+ basic_machine=x86_64-pc
+ os=-mingw64
+ ;;
mingw32)
- basic_machine=i386-pc
+ basic_machine=i686-pc
os=-mingw32
;;
+ mingw32ce)
+ basic_machine=arm-unknown
+ os=-mingw32ce
+ ;;
miniframe)
basic_machine=m68000-convergent
;;
@@ -641,10 +835,6 @@ case $basic_machine in
mips3*)
basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
;;
- mmix*)
- basic_machine=mmix-knuth
- os=-mmixware
- ;;
monitor)
basic_machine=m68k-rom68k
os=-coff
@@ -653,14 +843,29 @@ case $basic_machine in
basic_machine=powerpc-unknown
os=-morphos
;;
+ moxiebox)
+ basic_machine=moxie-unknown
+ os=-moxiebox
+ ;;
msdos)
basic_machine=i386-pc
os=-msdos
;;
+ ms1-*)
+ basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
+ ;;
+ msys)
+ basic_machine=i686-pc
+ os=-msys
+ ;;
mvs)
basic_machine=i370-ibm
os=-mvs
;;
+ nacl)
+ basic_machine=le32-unknown
+ os=-nacl
+ ;;
ncr3000)
basic_machine=i486-ncr
os=-sysv4
@@ -725,9 +930,11 @@ case $basic_machine in
np1)
basic_machine=np1-gould
;;
- nv1)
- basic_machine=nv1-cray
- os=-unicosmp
+ neo-tandem)
+ basic_machine=neo-tandem
+ ;;
+ nse-tandem)
+ basic_machine=nse-tandem
;;
nsr-tandem)
basic_machine=nsr-tandem
@@ -736,9 +943,12 @@ case $basic_machine in
basic_machine=hppa1.1-oki
os=-proelf
;;
- or32 | or32-*)
+ openrisc | openrisc-*)
basic_machine=or32-unknown
- os=-coff
+ ;;
+ os400)
+ basic_machine=powerpc-ibm
+ os=-os400
;;
OSE68000 | ose68000)
basic_machine=m68000-ericsson
@@ -756,6 +966,14 @@ case $basic_machine in
basic_machine=i860-intel
os=-osf
;;
+ parisc)
+ basic_machine=hppa-unknown
+ os=-linux
+ ;;
+ parisc-*)
+ basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
pbd)
basic_machine=sparc-tti
;;
@@ -765,6 +983,12 @@ case $basic_machine in
pc532 | pc532-*)
basic_machine=ns32k-pc532
;;
+ pc98)
+ basic_machine=i386-pc
+ ;;
+ pc98-*)
+ basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
pentium | p5 | k5 | k6 | nexgen | viac3)
basic_machine=i586-pc
;;
@@ -794,9 +1018,10 @@ case $basic_machine in
;;
power) basic_machine=power-ibm
;;
- ppc) basic_machine=powerpc-unknown
+ ppc | ppcbe) basic_machine=powerpc-unknown
;;
- ppc-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ppc-* | ppcbe-*)
+ basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
;;
ppcle | powerpclittle | ppc-le | powerpc-little)
basic_machine=powerpcle-unknown
@@ -821,6 +1046,14 @@ case $basic_machine in
basic_machine=i586-unknown
os=-pw32
;;
+ rdos | rdos64)
+ basic_machine=x86_64-pc
+ os=-rdos
+ ;;
+ rdos32)
+ basic_machine=i386-pc
+ os=-rdos
+ ;;
rom68k)
basic_machine=m68k-rom68k
os=-coff
@@ -847,6 +1080,10 @@ case $basic_machine in
sb1el)
basic_machine=mipsisa64sb1el-unknown
;;
+ sde)
+ basic_machine=mipsisa32-sde
+ os=-elf
+ ;;
sei)
basic_machine=mips-sei
os=-seiux
@@ -858,6 +1095,9 @@ case $basic_machine in
basic_machine=sh-hitachi
os=-hms
;;
+ sh5el)
+ basic_machine=sh5le-unknown
+ ;;
sh64)
basic_machine=sh64-unknown
;;
@@ -879,6 +1119,9 @@ case $basic_machine in
basic_machine=i860-stratus
os=-sysv4
;;
+ strongarm-* | thumb-*)
+ basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
sun2)
basic_machine=m68000-sun
;;
@@ -935,17 +1178,9 @@ case $basic_machine in
basic_machine=t90-cray
os=-unicos
;;
- tic54x | c54x*)
- basic_machine=tic54x-unknown
- os=-coff
- ;;
- tic55x | c55x*)
- basic_machine=tic55x-unknown
- os=-coff
- ;;
- tic6x | c6x*)
- basic_machine=tic6x-unknown
- os=-coff
+ tile*)
+ basic_machine=$basic_machine-unknown
+ os=-linux-gnu
;;
tx39)
basic_machine=mipstx39-unknown
@@ -960,6 +1195,10 @@ case $basic_machine in
tower | tower-32)
basic_machine=m68k-ncr
;;
+ tpf)
+ basic_machine=s390x-ibm
+ os=-tpf
+ ;;
udi29k)
basic_machine=a29k-amd
os=-udi
@@ -1003,9 +1242,16 @@ case $basic_machine in
basic_machine=hppa1.1-winbond
os=-proelf
;;
+ xbox)
+ basic_machine=i686-pc
+ os=-mingw32
+ ;;
xps | xps100)
basic_machine=xps100-honeywell
;;
+ xscale-* | xscalee[bl]-*)
+ basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'`
+ ;;
ymp)
basic_machine=ymp-cray
os=-unicos
@@ -1014,6 +1260,10 @@ case $basic_machine in
basic_machine=z8k-unknown
os=-sim
;;
+ z80-*-coff)
+ basic_machine=z80-unknown
+ os=-sim
+ ;;
none)
basic_machine=none-none
os=-none
@@ -1033,6 +1283,9 @@ case $basic_machine in
romp)
basic_machine=romp-ibm
;;
+ mmix)
+ basic_machine=mmix-knuth
+ ;;
rs6000)
basic_machine=rs6000-ibm
;;
@@ -1049,13 +1302,10 @@ case $basic_machine in
we32k)
basic_machine=we32k-att
;;
- sh3 | sh4 | sh[34]eb | sh[1234]le | sh[23]ele)
+ sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele)
basic_machine=sh-unknown
;;
- sh64)
- basic_machine=sh64-unknown
- ;;
- sparc | sparcv9 | sparcv9b)
+ sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v)
basic_machine=sparc-sun
;;
cydra)
@@ -1099,9 +1349,12 @@ esac
if [ x"$os" != x"" ]
then
case $os in
- # First match some system type aliases
- # that might get confused with valid system types.
+ # First match some system type aliases
+ # that might get confused with valid system types.
# -solaris* is a basic system type, with this one exception.
+ -auroraux)
+ os=-auroraux
+ ;;
-solaris1 | -solaris1.*)
os=`echo $os | sed -e 's|solaris1|sunos4|'`
;;
@@ -1122,25 +1375,31 @@ case $os in
# Each alternative MUST END IN A *, to match a version number.
# -sysv* is not here because it comes later, after sysvr4.
-gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
- | -*vms* | -sco* | -esix* | -isc* | -aix* | -sunos | -sunos[34]*\
- | -hpux* | -unos* | -osf* | -luna* | -dgux* | -solaris* | -sym* \
+ | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\
+ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \
+ | -sym* | -kopensolaris* | -plan9* \
| -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
- | -aos* \
+ | -aos* | -aros* | -cloudabi* | -sortix* \
| -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
| -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
- | -hiux* | -386bsd* | -netbsd* | -openbsd* | -kfreebsd* | -freebsd* | -riscix* \
- | -lynxos* | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
+ | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
+ | -bitrig* | -openbsd* | -solidbsd* \
+ | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
+ | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
| -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
| -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
- | -chorusos* | -chorusrdb* \
- | -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
- | -mingw32* | -linux-gnu* | -uxpv* | -beos* | -mpeix* | -udk* \
+ | -chorusos* | -chorusrdb* | -cegcc* \
+ | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
+ | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \
+ | -linux-newlib* | -linux-musl* | -linux-uclibc* \
+ | -uxpv* | -beos* | -mpeix* | -udk* | -moxiebox* \
| -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
| -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
| -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
| -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
| -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
- | -powermax* | -dnix* | -nx6 | -nx7 | -sei*)
+ | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
+ | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* | -tirtos*)
# Remember, each alternative MUST END IN *, to match a version number.
;;
-qnx*)
@@ -1158,12 +1417,15 @@ case $os in
os=`echo $os | sed -e 's|nto|nto-qnx|'`
;;
-sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
- | -windows* | -osx | -abug | -netware* | -os9* | -beos* \
+ | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \
| -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
;;
-mac*)
os=`echo $os | sed -e 's|mac|macos|'`
;;
+ -linux-dietlibc)
+ os=-linux-dietlibc
+ ;;
-linux*)
os=`echo $os | sed -e 's|linux|linux-gnu|'`
;;
@@ -1176,6 +1438,9 @@ case $os in
-opened*)
os=-openedition
;;
+ -os400*)
+ os=-os400
+ ;;
-wince*)
os=-wince
;;
@@ -1197,6 +1462,9 @@ case $os in
-atheos*)
os=-atheos
;;
+ -syllable*)
+ os=-syllable
+ ;;
-386bsd)
os=-bsd
;;
@@ -1219,6 +1487,9 @@ case $os in
-sinix*)
os=-sysv4
;;
+ -tpf*)
+ os=-tpf
+ ;;
-triton*)
os=-sysv3
;;
@@ -1252,8 +1523,13 @@ case $os in
-aros*)
os=-aros
;;
- -kaos*)
- os=-kaos
+ -zvmoe)
+ os=-zvmoe
+ ;;
+ -dicos*)
+ os=-dicos
+ ;;
+ -nacl*)
;;
-none)
;;
@@ -1277,6 +1553,12 @@ else
# system, and we'll never get to this point.
case $basic_machine in
+ score-*)
+ os=-elf
+ ;;
+ spu-*)
+ os=-elf
+ ;;
*-acorn)
os=-riscix1.2
;;
@@ -1286,9 +1568,24 @@ case $basic_machine in
arm*-semi)
os=-aout
;;
- c4x-* | tic4x-*)
- os=-coff
- ;;
+ c4x-* | tic4x-*)
+ os=-coff
+ ;;
+ c8051-*)
+ os=-elf
+ ;;
+ hexagon-*)
+ os=-elf
+ ;;
+ tic54x-*)
+ os=-coff
+ ;;
+ tic55x-*)
+ os=-coff
+ ;;
+ tic6x-*)
+ os=-coff
+ ;;
# This must come before the *-dec entry.
pdp10-*)
os=-tops20
@@ -1307,13 +1604,13 @@ case $basic_machine in
;;
m68000-sun)
os=-sunos3
- # This also exists in the configure program, but was not the
- # default.
- # os=-sunos4
;;
m68*-cisco)
os=-aout
;;
+ mep-*)
+ os=-elf
+ ;;
mips*-cisco)
os=-elf
;;
@@ -1332,9 +1629,15 @@ case $basic_machine in
*-be)
os=-beos
;;
+ *-haiku)
+ os=-haiku
+ ;;
*-ibm)
os=-aix
;;
+ *-knuth)
+ os=-mmixware
+ ;;
*-wec)
os=-proelf
;;
@@ -1437,7 +1740,7 @@ case $basic_machine in
-sunos*)
vendor=sun
;;
- -aix*)
+ -cnk*|-aix*)
vendor=ibm
;;
-beos*)
@@ -1467,9 +1770,15 @@ case $basic_machine in
-mvs* | -opened*)
vendor=ibm
;;
+ -os400*)
+ vendor=ibm
+ ;;
-ptx*)
vendor=sequent
;;
+ -tpf*)
+ vendor=ibm
+ ;;
-vxsim* | -vxworks* | -windiss*)
vendor=wrs
;;
@@ -1494,7 +1803,7 @@ case $basic_machine in
esac
echo $basic_machine$os
-exit 0
+exit
# Local variables:
# eval: (add-hook 'write-file-hooks 'time-stamp)
diff --git a/buildtools/debian-skeleton/control b/buildtools/debian-skeleton/control
index a91c21ca..f8e1b846 100644
--- a/buildtools/debian-skeleton/control
+++ b/buildtools/debian-skeleton/control
@@ -1,8 +1,15 @@
+# Source package requires lxml because it builds an initial rpki.conf.
+
+# rp package requires xinetd for historical reasons, could use "rpki-rtr server" instead.
+
+# Division between rp and ca packages is somewhat arbitrary, and becomes less relevant as we move more and
+# more of the code from C into Python. Some day we may just collapse down to a single binary package again.
+
Source: rpki
Section: net
Priority: extra
Maintainer: Rob Austein <sra@hactrn.net>
-Build-Depends: debhelper (>= 8.0.0), autotools-dev, rsync, rrdtool, xsltproc, python (>= 2.7), python-all-dev, python-setuptools, python-lxml, libxml2-utils, mysql-client, mysql-server, python-mysqldb, python-vobject, python-yaml, python-django (>= 1.3.7), python-django-south (>= 0.7.5)
+Build-Depends: debhelper (>= 8.0.0), autotools-dev, python (>= 2.7), python-all-dev, python-setuptools, python-lxml
Standards-Version: 3.9.3
Homepage: http://trac.rpki.net/
Vcs-Svn: http://subvert-rpki.hactrn.net/
@@ -11,7 +18,7 @@ X-Python-Version: 2.7
Package: rpki-rp
Architecture: any
-Depends: ${shlibs:Depends}, ${misc:Depends}, python (>= 2.7), rrdtool, rsync, xinetd, apache2, ssl-cert
+Depends: ${shlibs:Depends}, ${misc:Depends}, python (>= 2.7), apache2, ssl-cert, python-lxml, postgresql, postgresql-client, python-psycopg2, python-django (>= 1.8.0), python-tornado, rrdtool, rsync, xinetd
Description: rpki.net relying party tools
"Relying party" validation tools from the rpki.net toolkit.
See the online documentation at http://rpki.net/.
@@ -19,7 +26,7 @@ Replaces: rpki-ca (<= 0.5767)
Package: rpki-ca
Architecture: any
-Depends: ${shlibs:Depends}, ${misc:Depends}, rpki-rp (= ${binary:Version}), xsltproc, python (>= 2.7), python-lxml, libxml2-utils, mysql-client, mysql-server, python-mysqldb, python-vobject, python-yaml, python-django (>= 1.3.7), python-django-south (>= 0.7.5), apache2, libapache2-mod-wsgi, python-netifaces, ssl-cert
+Depends: ${shlibs:Depends}, ${misc:Depends}, python (>= 2.7), apache2, ssl-cert, python-lxml, postgresql, postgresql-client, python-psycopg2, python-django (>= 1.8.0), python-tornado, libapache2-mod-wsgi, python-vobject, python-yaml, python-netifaces, rpki-rp (= ${binary:Version}), lsb-base (>= 3.2-14)
Description: rpki.net certification authority tools
"Certification authority" tools for issuing RPKI certificates and
related objects using the rpki.net toolkit.
diff --git a/buildtools/debian-skeleton/rpki-ca.default b/buildtools/debian-skeleton/rpki-ca.default
new file mode 100644
index 00000000..94a92844
--- /dev/null
+++ b/buildtools/debian-skeleton/rpki-ca.default
@@ -0,0 +1,10 @@
+# Defaults for rpki-ca initscript
+# sourced by /etc/init.d/rpki-ca
+# installed at /etc/default/rpki-ca by the maintainer scripts
+
+#
+# This is a POSIX shell fragment
+#
+
+# Additional arguments that are passed to rpki-nanny.
+DAEMON_ARGS=""
diff --git a/buildtools/debian-skeleton/rpki-ca.init.d b/buildtools/debian-skeleton/rpki-ca.init.d
new file mode 100644
index 00000000..22feba38
--- /dev/null
+++ b/buildtools/debian-skeleton/rpki-ca.init.d
@@ -0,0 +1,115 @@
+#!/bin/sh
+### BEGIN INIT INFO
+# Provides: rpki-ca
+# Required-Start: $local_fs $network $remote_fs $syslog postgresql
+# Required-Stop: $local_fs $network $remote_fs $syslog postgresql
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: RPKI CA Servers
+### END INIT INFO
+
+# Author: Rob Austein <sra@hactrn.net>
+
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+DESC="rpki-ca"
+NAME=rpki-nanny
+PIDDIR=/var/run/rpki
+LOGDIR=/var/log/rpki
+DAEMON=/usr/lib/rpki/$NAME
+SCRIPTNAME=/etc/init.d/rpki-ca
+PIDFILE=$PIDDIR/$NAME.pid
+
+# Exit if the package is not installed
+test -x "$DAEMON" || exit 0
+
+# Read configuration variable file if it is present
+test -r /etc/default/rpki-ca && . /etc/default/rpki-ca
+
+# Load the VERBOSE setting and other rcS variables
+. /lib/init/vars.sh
+
+# Define LSB log_* functions.
+# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
+# and status_of_proc is working.
+. /lib/lsb/init-functions
+
+#
+# Function that starts the daemon/service
+#
+do_start()
+{
+ # Return
+ # 0 if daemon has been started
+ # 1 if daemon was already running
+ # 2 if daemon could not be started
+
+ test -f /etc/rpki.conf || return 2
+
+ for dir in $PIDDIR $LOGDIR /usr/share/rpki/publication /usr/share/rpki/rrdp-publication
+ do
+ test -d $dir || install -d -o rpki -g rpki $dir || return 2
+ done
+
+ start-stop-daemon --start --quiet --pidfile $PIDFILE --startas $DAEMON --name $NAME --test > /dev/null || return 1
+ start-stop-daemon --start --quiet --pidfile $PIDFILE --startas $DAEMON --name $NAME -- $DAEMON_ARGS || return 2
+}
+
+#
+# Function that stops the daemon/service
+#
+do_stop()
+{
+ # Return
+ # 0 if daemon has been stopped
+ # 1 if daemon was already stopped
+ # 2 if daemon could not be stopped
+ # other if a failure occurred
+
+ start-stop-daemon --stop --quiet --oknodo --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
+}
+
+case "$1" in
+ start)
+ test "$VERBOSE" != no && log_daemon_msg "Starting $DESC" "$NAME"
+ do_start
+ case "$?" in
+ 0|1) test "$VERBOSE" != no && log_end_msg 0 ;;
+ 2) test "$VERBOSE" != no && log_end_msg 1 ;;
+ esac
+ ;;
+ stop)
+ test "$VERBOSE" != no && log_daemon_msg "Stopping $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1) test "$VERBOSE" != no && log_end_msg 0 ;;
+ 2) test "$VERBOSE" != no && log_end_msg 1 ;;
+ esac
+ ;;
+ status)
+ status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
+ ;;
+ restart|force-reload)
+ log_daemon_msg "Restarting $DESC" "$NAME"
+ do_stop
+ case "$?" in
+ 0|1)
+ do_start
+ case "$?" in
+ 0) log_end_msg 0 ;;
+ 1) log_end_msg 1 ;; # Old process is still running
+ *) log_end_msg 1 ;; # Failed to start
+ esac
+ ;;
+ *)
+ # Failed to stop
+ log_end_msg 1
+ ;;
+ esac
+ ;;
+ *)
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
+ exit 3
+ ;;
+esac
+
+:
diff --git a/buildtools/debian-skeleton/rpki-ca.install b/buildtools/debian-skeleton/rpki-ca.install
index ffae5103..62cf9922 100644
--- a/buildtools/debian-skeleton/rpki-ca.install
+++ b/buildtools/debian-skeleton/rpki-ca.install
@@ -1,4 +1,5 @@
-etc/rpki/rpki-confgen.xml
usr/lib/rpki
-usr/sbin
+usr/sbin/irbe_cli
+usr/sbin/rpkic
+usr/sbin/rpkigui-query-routes
usr/share/rpki
diff --git a/buildtools/debian-skeleton/rpki-ca.postinst b/buildtools/debian-skeleton/rpki-ca.postinst
index c94e052c..4c72d148 100644
--- a/buildtools/debian-skeleton/rpki-ca.postinst
+++ b/buildtools/debian-skeleton/rpki-ca.postinst
@@ -5,73 +5,46 @@
set -e
-setup_rpkid_user() {
- if ! getent passwd rpkid >/dev/null
- then
- useradd -g rpkid -M -N -d /nonexistent -s /sbin/nologin -c "RPKI certification authority engine(s)" rpkid
- fi
-}
-
-setup_rpkid_group() {
- if ! getent group rpkid >/dev/null
- then
- groupadd rpkid
- fi
+setup_directories() {
+ install -o rpki -g rpki -d /usr/share/rpki/bpki /usr/share/rpki/publication /usr/share/rpki/rrdp-publication
}
setup_apache() {
/usr/lib/rpki/rpkigui-apache-conf-gen --install --verbose
}
-setup_rpki_conf() {
- # Update /etc/rpki.conf.sample for this system, and copy it to
- # /etc/rpki.conf if no configuration file exists yet.
-
- # We don't (yet) have the ability to merge in settings from an
- # existing rpki.conf, so we generate a new secret_key and a new
- # SQL password every time, but that's harmless so long as we're
- # careful not to overwrite an existing configuration.
-
- rpki-confgen --read-xml /etc/rpki/rpki-confgen.xml \
- --autoconf \
- --set myrpki::handle=`hostname -f | sed 's/[.]/_/g'` \
- --set myrpki::rpkid_server_host=`hostname -f` \
- --set myrpki::pubd_server_host=`hostname -f` \
- --pwgen myrpki::shared_sql_password \
- --pwgen web_portal::secret-key \
- --write-conf /etc/rpki.conf.sample
-
- if test ! -f /etc/rpki.conf
+setup_config() {
+
+ rpki-confgen --read-xml /etc/rpki/rpki.rp.xml \
+ --set myrpki::run_rpkid=yes \
+ --set myrpki::run_pubd=yes \
+ --write-xml /etc/rpki/rpki.ca.xml \
+ --write-conf /etc/rpki/rpki.ca.conf.sample
+
+ if test ! -f /etc/rpki.conf || cmp -s /etc/rpki.conf /etc/rpki/rpki.rp.conf.sample
then
- cp -p /etc/rpki.conf.sample /etc/rpki.conf
+ cp -p /etc/rpki/rpki.ca.conf.sample /etc/rpki.conf
fi
}
-setup_mysql() {
- rpki-sql-setup --create-if-missing --mysql-defaults /etc/mysql/debian.cnf
- rpki-sql-setup --apply-upgrades --verbose
+setup_sql() {
+ rpki-sql-setup --debug --verbose --postgresql-root-username postgres create
+ sudo -u rpki rpki-manage migrate rpkidb --settings rpki.django_settings.rpkid
+ sudo -u rpki rpki-manage migrate pubdb --settings rpki.django_settings.pubd
+ sudo -u rpki rpki-manage migrate irdb --settings rpki.django_settings.irdb
+ sudo -u rpki rpki-manage migrate --settings rpki.django_settings.gui
}
setup_bpki() {
rpkic initialize_server_bpki
}
-setup_django() {
- rpki-manage syncdb --noinput
- rpki-manage migrate app
-}
-
setup_cron() {
- t=$(hexdump -n 1 -e '"%u"' /dev/urandom) && echo "$(($t % 60)) */2 * * * nobody /usr/lib/rpki/rpkigui-import-routes" > /etc/cron.d/rpkigui-routeviews
- chmod 644 /etc/cron.d/rpkigui-routeviews
- ln -sf /usr/lib/rpki/rpkigui-check-expired /etc/cron.daily/rpkigui-check-expired
-
- # This should be user rpkid, but I don't have permissions set up
- # properly for that yet. Arguably this should be integrated with
- # rpkigui-check-expired anyway, not there yet either.
-
- echo "30 3 * * * root /usr/sbin/rpkic update_bpki" >/etc/cron.d/rpki-update-bpki
- chmod 644 /etc/cron.d/rpki-update-bpki
+ t=$(( $(hexdump -n 1 -e '"%u"' /dev/urandom) % 60 )) || exit
+ echo "$t */2 * * * rpki /usr/lib/rpki/rpkigui-import-routes" > /etc/cron.d/rpkigui-routeviews
+ echo "@daily rpki /usr/lib/rpki/rpkigui-check-expired" > /etc/cron.d/rpkigui-check-expired
+ echo "30 3 * * * rpki /usr/sbin/rpkic update_bpki" > /etc/cron.d/rpki-update-bpki
+ chmod 644 /etc/cron.d/rpkigui-routeviews /etc/cron.d/rpkigui-check-expired /etc/cron.d/rpki-update-bpki
}
# summary of how this script can be called:
@@ -89,13 +62,11 @@ setup_cron() {
case "$1" in
configure)
- setup_rpkid_group
- setup_rpkid_user
+ setup_directories
setup_apache
- setup_rpki_conf
- setup_mysql
+ setup_config
+ setup_sql
setup_bpki
- setup_django
setup_cron
;;
diff --git a/buildtools/debian-skeleton/rpki-ca.postrm b/buildtools/debian-skeleton/rpki-ca.postrm
index c93f84df..c9418cdb 100644
--- a/buildtools/debian-skeleton/rpki-ca.postrm
+++ b/buildtools/debian-skeleton/rpki-ca.postrm
@@ -22,12 +22,11 @@ set -e
case "$1" in
purge)
- sql=/etc/rpki/drop_databases.sql
- if test -f $sql
+ if cmp -s /etc/rpki.conf /etc/rpki/rpki.ca.conf.sample
then
- mysql --defaults-file=/etc/mysql/debian.cnf --execute "source $sql"
+ cp -p /etc/rpki/rpki.rp.conf.sample /etc/rpki.conf
fi
- rm -f /etc/rpki.conf /etc/rpki.conf.sample $sql
+ rm -f /etc/rpki/rpki.ca.conf.sample /etc/rpki/rpki.ca.xml
rm -f /etc/rpki/apache.conf /etc/rpki/apache.cer /etc/rpki/apache.key
;;
diff --git a/buildtools/debian-skeleton/rpki-ca.prerm b/buildtools/debian-skeleton/rpki-ca.prerm
index 8b4d3945..c341502b 100644
--- a/buildtools/debian-skeleton/rpki-ca.prerm
+++ b/buildtools/debian-skeleton/rpki-ca.prerm
@@ -21,62 +21,19 @@ case "$1" in
remove)
# Clean up BPKI files. These all come from SQL, so we can
- # regenerate them easily if appropriate.
+ # regenerate them easily, if appropriate.
- rm -f /usr/share/rpki/ca.cer
- rm -f /usr/share/rpki/irbe.cer
- rm -f /usr/share/rpki/irdbd.cer
- rm -f /usr/share/rpki/pubd.cer
- rm -f /usr/share/rpki/pubd.key
- rm -f /usr/share/rpki/rpkid.cer
- rm -f /usr/share/rpki/rpkid.key
-
- # Record what will be needed to drop the databases completely,
- # while we still have the necessary configuration data, but
- # postpone dropping the databases until the postrm script,
- # since that's where we find out whether this is a purge.
-
- rpki-sql-setup --mysql-defaults /etc/mysql/debian.cnf --script-drop >/etc/rpki/drop_databases.sql
+ rm -rf /usr/share/rpki/bpki
# Clean up our cron jobs.
rm -f /etc/cron.d/rpkigui-routeviews
- rm -f /etc/cron.daily/rpkigui-check-expired
+ rm -f /etc/cron.d/rpkigui-check-expired
rm -f /etc/cron.d/rpki-update-bpki
- # Clean up what we did to Apache. Modern version of this is
- # just invocation of a Python script, but for now we also
- # retain code needed to clean up nasty mess we created in the
- # past, to avoid breaking old installations on upgrade.
-
- # Remove the old stuff first, if the containing file even exists.
-
- f=/etc/apache2/sites-available/default-ssl
- if test -r $f
- then
- awk < $f > ${f}.tmp '
- BEGIN {
- conf_file = "/etc/rpki/apache.conf";
- conf_regexp = "^[ \t]*Include[ \t]+" conf_file "[ \t]*$";
- }
- $0 !~ conf_regexp {
- print;
- }'
-
- if cmp -s ${f}.tmp ${f}.orig
- then
- mv -f ${f}.orig $f
- rm -f ${f}.tmp
- else
- mv -f ${f}.tmp $f
- fi
- fi
-
- # At this point we've cleaned up our Apache config mess.
- # Not sure whether we should do "service apache2 reload"
- # here, one could make a case either way. Skip for now.
-
- # Now remove the new stuff.
+ rm -f /etc/cron.daily/rpkigui-check-expired
+
+ # Clean up what we did to Apache.
/usr/lib/rpki/rpkigui-apache-conf-gen --remove --verbose
diff --git a/buildtools/debian-skeleton/rpki-ca.upstart b/buildtools/debian-skeleton/rpki-ca.upstart
deleted file mode 100644
index 36a792ee..00000000
--- a/buildtools/debian-skeleton/rpki-ca.upstart
+++ /dev/null
@@ -1,53 +0,0 @@
-# RPKI CA Service
-
-description "RPKI CA Servers"
-author "Rob Austein <sra@hactrn.net>"
-
-# This is almost certainly wrong. Suggestions on how to improve this
-# welcome, but please first read the Python code to understand what it
-# is doing.
-
-# Our only real dependencies are on mysqld and our config file.
-
-start on started mysql
-stop on stopping mysql
-
-pre-start script
- if test -f /etc/rpki.conf &&
- test -f /usr/share/rpki/ca.cer &&
- test -f /usr/share/rpki/irbe.cer &&
- test -f /usr/share/rpki/irdbd.cer &&
- test -f /usr/share/rpki/rpkid.cer &&
- test -f /usr/share/rpki/rpkid.key
- then
- install -m 755 -o rpkid -g rpkid -d /var/run/rpki /usr/share/rpki/publication
-
- # This should be running as user rpkid, but I haven't got all
- # the pesky details worked out yet. Most testing to date has
- # either been all under a single non-root user or everything
- # as root, so, eg, running "rpkic initialize" as root will not
- # leave things in a sane state for rpkid running as user
- # rpkid.
- #
- # In the interest of debugging the rest of this before trying
- # to break new ground, run daemons as root for the moment,
- # with the intention of coming back to fix this later.
- #
- #sudo -u rpkid /usr/sbin/rpki-start-servers
- /usr/sbin/rpki-start-servers
-
- else
- stop
- exit 0
- fi
-end script
-
-post-stop script
- for i in rpkid pubd irdbd rootd
- do
- if test -f /var/run/rpki/$i.pid
- then
- kill `cat /var/run/rpki/$i.pid`
- fi
- done
-end script
diff --git a/buildtools/debian-skeleton/rpki-rp.install b/buildtools/debian-skeleton/rpki-rp.install
index ce17bb14..8490936a 100644
--- a/buildtools/debian-skeleton/rpki-rp.install
+++ b/buildtools/debian-skeleton/rpki-rp.install
@@ -1,6 +1,11 @@
-etc/rcynic.conf
+etc/rpki/rpki-confgen.xml
etc/rpki/trust-anchors
etc/xinetd.d/rpki-rtr
usr/bin
usr/lib/python2.7
+usr/sbin/rpki-confgen
+usr/sbin/rpki-generate-root-certificate
+usr/sbin/rpki-manage
+usr/sbin/rpki-sql-backup
+usr/sbin/rpki-sql-setup
var/rcynic
diff --git a/buildtools/debian-skeleton/rpki-rp.postinst b/buildtools/debian-skeleton/rpki-rp.postinst
index b9f666a2..a3c1bb68 100644
--- a/buildtools/debian-skeleton/rpki-rp.postinst
+++ b/buildtools/debian-skeleton/rpki-rp.postinst
@@ -5,54 +5,64 @@
set -e
-setup_groups() {
- if ! getent group rcynic >/dev/null
+setup_user() {
+ if ! getent group rpki >/dev/null
then
- groupadd rcynic
+ groupadd rpki
fi
- if ! getent group rpkirtr >/dev/null
+ if ! getent passwd rpki >/dev/null
then
- groupadd rpkirtr
+ useradd -g rpki -M -N -d /var/rcynic -s /sbin/nologin -c "RPKI system software" rpki
fi
}
-setup_users() {
- if ! getent passwd rcynic >/dev/null
- then
- useradd -g rcynic -M -N -d /var/rcynic -s /sbin/nologin -c "RPKI validation system" rcynic
- fi
- if ! getent passwd rpkirtr >/dev/null
- then
- useradd -g rpkirtr -M -N -d /var/rcynic/rpki-rtr -s /sbin/nologin -c "RPKI router server" rpkirtr
- fi
- usermod -a -G rpkirtr rcynic
+setup_directories() {
+ install -o rpki -g rpki -d /var/rcynic/data /var/rcynic/rpki-rtr /var/rcynic/rpki-rtr/sockets /var/www/html/rcynic
}
-setup_directories() {
- install -o rcynic -g rcynic -d /var/rcynic/data /var/rcynic/rpki-rtr
- if test -d /var/www/html && test -d /var/www/rcynic && test ! -d /var/www/html/rcynic
- then
- mv /var/www/rcynic /var/www/html/rcynic
- elif test -d /var/www/html
+setup_config() {
+
+ rpki-confgen --read-xml /etc/rpki/rpki-confgen.xml \
+ --autoconf \
+ --set myrpki::handle=`hostname -f | sed 's/[.]/_/g'` \
+ --set myrpki::rpkid_server_host=`hostname -f` \
+ --set myrpki::pubd_server_host=`hostname -f` \
+ --set myrpki::shared_sql_engine=postgresql \
+ --set myrpki::rcynic_sql_database=rpki \
+ --set myrpki::rpkid_sql_database=rpki \
+ --set myrpki::irdbd_sql_database=rpki \
+ --set myrpki::pubd_sql_database=rpki \
+ --pwgen myrpki::shared_sql_password \
+ --pwgen web_portal::secret-key \
+ --set myrpki::run_rpkid=no \
+ --set myrpki::run_pubd=no \
+ --write-xml /etc/rpki/rpki.rp.xml \
+ --write-conf /etc/rpki/rpki.rp.conf.sample
+
+ if test ! -f /etc/rpki.conf
then
- install -o rcynic -g rcynic -d /var/www/html/rcynic
+ cp -p /etc/rpki/rpki.rp.conf.sample /etc/rpki.conf
fi
- install -o rpkirtr -g rcynic -m 775 -d /var/rcynic/rpki-rtr/sockets
+}
+
+setup_sql() {
+ rpki-sql-setup --debug --verbose --postgresql-root-username postgres create
+ sudo -u rpki rpki-manage migrate rcynicdb --settings rpki.django_settings.rcynic
}
# We want to pick a *random* minute for rcynic to run, to spread load
# on repositories, which is why we don't just use a package crontab.
-setup_rcynic_cron() {
- if test "X`crontab -l -u rcynic 2>/dev/null`" = "X"
+setup_cron() {
+ if test "X`crontab -l -u rpki 2>/dev/null`" = "X"
then
awk -v t=`hexdump -n 2 -e '"%u\n"' /dev/urandom` '
BEGIN {printf "MAILTO=root\n%u * * * *\texec /usr/bin/rcynic-cron\n", t % 60}' |
- crontab -u rcynic -
+ crontab -u rpki -
fi
}
-setup_rpkirtr_listener() {
+setup_xinetd() {
if test -f /var/run/xinetd.pid
then
kill -HUP `cat /var/run/xinetd.pid`
@@ -74,11 +84,12 @@ setup_rpkirtr_listener() {
case "$1" in
configure)
- setup_groups
- setup_users
+ setup_user
setup_directories
- setup_rcynic_cron
- setup_rpkirtr_listener
+ setup_config
+ setup_sql
+ setup_cron
+ setup_xinetd
;;
abort-upgrade|abort-remove|abort-deconfigure)
diff --git a/buildtools/debian-skeleton/rpki-rp.postrm b/buildtools/debian-skeleton/rpki-rp.postrm
index ad4ed1b9..ff8eafab 100644
--- a/buildtools/debian-skeleton/rpki-rp.postrm
+++ b/buildtools/debian-skeleton/rpki-rp.postrm
@@ -23,6 +23,13 @@ case "$1" in
purge)
rm -rf /var/rcynic
+ sql=/etc/rpki/drop_databases.sql
+ if test -f $sql
+ then
+ sudo -u postgres psql -f $sql
+ rm -f $sql
+ fi
+ rm -f /etc/rpki/rpki.rp.conf.sample /etc/rpki/rpki.rp.xml /etc/rpki.conf
;;
remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
diff --git a/buildtools/debian-skeleton/rpki-rp.prerm b/buildtools/debian-skeleton/rpki-rp.prerm
index 37b111f2..b7e7cbde 100644
--- a/buildtools/debian-skeleton/rpki-rp.prerm
+++ b/buildtools/debian-skeleton/rpki-rp.prerm
@@ -20,16 +20,25 @@ set -e
case "$1" in
remove)
- crontab -l -u rcynic 2>/dev/null | awk '
+ # Record what will be needed to drop the databases completely,
+ # while we still have the necessary configuration data, but
+ # postpone dropping the databases until the postrm script,
+ # since that's where we find out whether this is a purge.
+
+ rpki-sql-setup --debug --verbose --postgresql-root-username postgres script-drop /etc/rpki/drop_databases.sql
+
+ # Clean up our cron job.
+
+ crontab -l -u rpki 2>/dev/null | awk '
$0 !~ "exec /usr/bin/rcynic-cron" {
line[++n] = $0;
}
END {
if (n)
for (i = 1; i <= n; i++)
- print line[i] | "crontab -u rcynic -";
+ print line[i] | "crontab -u rpki -";
else
- system("crontab -u rcynic -r");
+ system("crontab -u rpki -r");
}'
;;
diff --git a/buildtools/debian-skeleton/rules b/buildtools/debian-skeleton/rules
index ea2b043f..4849f2de 100644
--- a/buildtools/debian-skeleton/rules
+++ b/buildtools/debian-skeleton/rules
@@ -8,7 +8,10 @@ export DH_VERBOSE=1
dh $@ --with python2
override_dh_auto_configure:
- dh_auto_configure -- --disable-target-installation --enable-python-install-layout=deb
+ dh_auto_configure -- --disable-target-installation --enable-python-install-layout=deb --disable-runtime-dependencies --enable-wsgi-daemon-mode=rpki
+
+override_dh_auto_build:
+ dh_auto_build -- SUDO=/usr/bin/sudo RRDTOOL=/usr/bin/rrdtool
override_dh_auto_test:
@true
diff --git a/buildtools/defstack.py b/buildtools/defstack.py
index 757516f3..b3df0777 100644
--- a/buildtools/defstack.py
+++ b/buildtools/defstack.py
@@ -8,11 +8,11 @@
# code with code maintained by humans, so "nasty" is a relative term.
#
# Copyright (C) 2011-2012 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -68,7 +68,7 @@ template = '''
'''
if len(sys.argv) < 2:
- sys.exit("Usage: %s source.c [source.c ...]" % sys.argv[0])
+ sys.exit("Usage: %s source.c [source.c ...]" % sys.argv[0])
splitter = re.compile("[() \t]+").split
@@ -76,16 +76,16 @@ token = None
for line in fileinput.input():
- if token is None:
- path = fileinput.filename().split(os.path.sep)
- path = os.path.join(path[-2], path[-1]) if len(path) > 1 else path[-1]
- token = "".join(c if c.isalnum() else "_" for c in path.upper())
- sys.stdout.write(header.replace("%", token))
+ if token is None:
+ path = fileinput.filename().split(os.path.sep)
+ path = os.path.join(path[-2], path[-1]) if len(path) > 1 else path[-1]
+ token = "".join(c if c.isalnum() else "_" for c in path.upper())
+ sys.stdout.write(header.replace("%", token))
- if "DECLARE_STACK_OF" in line:
- words = splitter(line)
- if len(words) > 1 and words[0] == "DECLARE_STACK_OF":
- sys.stdout.write(template.replace("%", words[1]))
+ if "DECLARE_STACK_OF" in line:
+ words = splitter(line)
+ if len(words) > 1 and words[0] == "DECLARE_STACK_OF":
+ sys.stdout.write(template.replace("%", words[1]))
if token is not None:
- sys.stdout.write(footer.replace("%", token))
+ sys.stdout.write(footer.replace("%", token))
diff --git a/buildtools/freebsd-skeleton/rpki-ca/Makefile b/buildtools/freebsd-skeleton/rpki-ca/Makefile
index 1edb962d..b48ae73c 100644
--- a/buildtools/freebsd-skeleton/rpki-ca/Makefile
+++ b/buildtools/freebsd-skeleton/rpki-ca/Makefile
@@ -1,7 +1,7 @@
PORTNAME= rpki-ca
PORTVERSION= 0.%(SVNVERSION)s
CATEGORIES= net
-MASTER_SITES= http://download.rpki.net/
+MASTER_SITES= %(MASTER_SITE)s
DISTFILES= rpki-%(SVNBRANCH)s-r%(SVNVERSION)s.tar.xz
WRKSRC= ${WRKDIR}/rpki-%(SVNBRANCH)s-r%(SVNVERSION)s
MAINTAINER= sra@hactrn.net
@@ -30,13 +30,12 @@ USE_PERL5_BUILD=yes
# For building OpenSSL, not needed otherwise
BUILD_DEPENDS+= makedepend>0:${PORTSDIR}/devel/makedepend
-RPKID_DEPENDS= ${PYTHON_PKGNAMEPREFIX}lxml>0:${PORTSDIR}/devel/py-lxml \
- ${PYTHON_PKGNAMEPREFIX}MySQLdb>0:${PORTSDIR}/databases/py-MySQLdb \
- ${PYTHON_PKGNAMEPREFIX}django16>=1.6:${PORTSDIR}/www/py-django16 \
+RPKID_DEPENDS= ${PYTHON_PKGNAMEPREFIX}lxml>0:${PORTSDIR}/devel/py-lxml \
+ ${PYTHON_PKGNAMEPREFIX}django18>=1.8:${PORTSDIR}/www/py-django18 \
${PYTHON_PKGNAMEPREFIX}vobject>0:${PORTSDIR}/deskutils/py-vobject \
${PYTHON_PKGNAMEPREFIX}yaml>0:${PORTSDIR}/devel/py-yaml \
${PYTHON_PKGNAMEPREFIX}netifaces>0:${PORTSDIR}/net/py-netifaces \
- ${PYTHON_PKGNAMEPREFIX}south>=0.7.6:${PORTSDIR}/databases/py-south
+ ${PYTHON_PKGNAMEPREFIX}tornado>0:${PORTSDIR}/www/py-tornado
BUILD_DEPENDS+= ${RPKID_DEPENDS}
RUN_DEPENDS+= ${RPKID_DEPENDS}
diff --git a/buildtools/freebsd-skeleton/rpki-ca/files/rpki-ca.in b/buildtools/freebsd-skeleton/rpki-ca/files/rpki-ca.in
index d6234a12..0c021e6d 100644
--- a/buildtools/freebsd-skeleton/rpki-ca/files/rpki-ca.in
+++ b/buildtools/freebsd-skeleton/rpki-ca/files/rpki-ca.in
@@ -22,13 +22,12 @@ stop_cmd="rpkica_stop"
load_rc_config $name
: ${rpkica_enable="NO"}
-
: ${rpkica_pid_dir="/var/run/rpki"}
rpkica_start()
{
- /usr/bin/install -m 755 -d $rpkica_pid_dir
- /usr/local/sbin/rpki-start-servers
+ /usr/bin/install -m 755 -d ${rpkica_pid_dir}
+ /usr/local/sbin/rpki-start-servers ${rpkica_flags}
return 0
}
@@ -36,9 +35,9 @@ rpkica_stop()
{
for i in rpkid pubd irdbd rootd
do
- if /bin/test -f $rpkica_pid_dir/$i.pid
+ if /bin/test -f ${rpkica_pid_dir}/${i}.pid
then
- /bin/kill `/bin/cat $rpkica_pid_dir/$i.pid`
+ /bin/kill `/bin/cat ${rpkica_pid_dir}/${i}.pid`
fi
done
return 0
diff --git a/buildtools/freebsd-skeleton/rpki-ca/pkg-install b/buildtools/freebsd-skeleton/rpki-ca/pkg-install
index 157b3ced..10c44eda 100644
--- a/buildtools/freebsd-skeleton/rpki-ca/pkg-install
+++ b/buildtools/freebsd-skeleton/rpki-ca/pkg-install
@@ -29,8 +29,7 @@ POST-INSTALL)
/usr/bin/install -o root -g wheel -d /usr/local/share/rpki/publication
/usr/bin/install -o www -g www -d /usr/local/share/rpki/python-eggs
- /usr/local/sbin/rpki-sql-setup --create-if-missing
- /usr/local/sbin/rpki-sql-setup --apply-upgrades --verbose
+ /usr/local/sbin/rpki-sql-setup create
/usr/local/sbin/rpki-manage syncdb --noinput
/usr/local/sbin/rpki-manage migrate app
diff --git a/buildtools/freebsd-skeleton/rpki-rp/Makefile b/buildtools/freebsd-skeleton/rpki-rp/Makefile
index 16537fdc..5b27f211 100644
--- a/buildtools/freebsd-skeleton/rpki-rp/Makefile
+++ b/buildtools/freebsd-skeleton/rpki-rp/Makefile
@@ -1,7 +1,7 @@
PORTNAME= rpki-rp
PORTVERSION= 0.%(SVNVERSION)s
CATEGORIES= net
-MASTER_SITES= http://download.rpki.net/
+MASTER_SITES= %(MASTER_SITE)s
DISTFILES= rpki-%(SVNBRANCH)s-r%(SVNVERSION)s.tar.xz
WRKSRC= ${WRKDIR}/rpki-%(SVNBRANCH)s-r%(SVNVERSION)s
MAINTAINER= sra@hactrn.net
@@ -24,15 +24,14 @@ USE_PERL5_BUILD=yes
# For building OpenSSL, not needed otherwise
BUILD_DEPENDS+= makedepend>0:${PORTSDIR}/devel/makedepend
-# Needed at build to keep ./configure from complaining;
-# needed at runtime for rcynic to do anything useful.
-BUILD_DEPENDS+= rsync>0:${PORTSDIR}/net/rsync
-RUN_DEPENDS+= rsync>0:${PORTSDIR}/net/rsync
+RCYNIC_DEPENDS= rsync>0:${PORTSDIR}/net/rsync \
+ rrdtool>0:${PORTSDIR}/databases/rrdtool \
+ ${PYTHON_PKGNAMEPREFIX}lxml>0:${PORTSDIR}/devel/py-lxml \
+ ${PYTHON_PKGNAMEPREFIX}django18>=1.8:${PORTSDIR}/www/py-django18 \
+ ${PYTHON_PKGNAMEPREFIX}tornado>0:${PORTSDIR}/www/py-tornado
-# Needed at build to keep ./configure from complaining;
-# used at runtime by rcynic-html.
-BUILD_DEPENDS+= rrdtool>0:${PORTSDIR}/databases/rrdtool
-RUN_DEPENDS+= rrdtool>0:${PORTSDIR}/databases/rrdtool
+BUILD_DEPENDS+= ${RCYNIC_DEPENDS}
+RUN_DEPENDS+= ${RCYNIC_DEPENDS}
# Just want relying party tools, try to use system OpenSSL if we can.
diff --git a/buildtools/freebsd-skeleton/rpki-rp/files/pkg-install.in b/buildtools/freebsd-skeleton/rpki-rp/files/pkg-install.in
index 0ba70633..ace1426f 100644
--- a/buildtools/freebsd-skeleton/rpki-rp/files/pkg-install.in
+++ b/buildtools/freebsd-skeleton/rpki-rp/files/pkg-install.in
@@ -3,47 +3,25 @@
case $2 in
PRE-INSTALL)
- if /usr/sbin/pw groupshow "rcynic" 2>/dev/null
+ if /usr/sbin/pw groupshow "rpki" 2>/dev/null
then
- echo "You already have a group \"rcynic\", so I will use it."
- elif /usr/sbin/pw groupadd rcynic
+ echo "You already have a group \"rpki\", so I will use it."
+ elif /usr/sbin/pw groupadd rpki
then
- echo "Added group \"rcynic\"."
+ echo "Added group \"rpki\"."
else
- echo "Adding group \"rcynic\" failed..."
+ echo "Adding group \"rpki\" failed..."
echo "Please create it, then try again."
exit 1
fi
- if /usr/sbin/pw usershow "rcynic" 2>/dev/null
+ if /usr/sbin/pw usershow "rpki" 2>/dev/null
then
- echo "You already have a user \"rcynic\", so I will use it."
- elif /usr/sbin/pw useradd rcynic -g rcynic -h - -d /nonexistant -s /usr/sbin/nologin -c "RPKI validation system"
+ echo "You already have a user \"rpki\", so I will use it."
+ elif /usr/sbin/pw useradd rpki -g rpki -h - -d /nonexistant -s /usr/sbin/nologin -c "RPKI system daemons"
then
- echo "Added user \"rcynic\"."
+ echo "Added user \"rpki\"."
else
- echo "Adding user \"rcynic\" failed..."
- echo "Please create it, then try again."
- exit 1
- fi
- if /usr/sbin/pw groupshow "rpkirtr" 2>/dev/null
- then
- echo "You already have a group \"rpkirtr\", so I will use it."
- elif /usr/sbin/pw groupadd rpkirtr
- then
- echo "Added group \"rpkirtr\"."
- else
- echo "Adding group \"rpkirtr\" failed..."
- echo "Please create it, then try again."
- exit 1
- fi
- if /usr/sbin/pw usershow "rpkirtr" 2>/dev/null
- then
- echo "You already have a user \"rpkirtr\", so I will use it."
- elif /usr/sbin/pw useradd rpkirtr -g rpkirtr -h - -d /nonexistant -s /usr/sbin/nologin -c "RPKI router server"
- then
- echo "Added user \"rpkirtr\"."
- else
- echo "Adding user \"rpkirtr\" failed..."
+ echo "Adding user \"rpki\" failed..."
echo "Please create it, then try again."
exit 1
fi
@@ -55,25 +33,24 @@ POST-INSTALL)
echo "Creating /var/rcynic"
/usr/bin/install -o root -g wheel -d /var/rcynic
fi
- for dir in /var/rcynic/data /var/rcynic/rpki-rtr
+ for dir in /var/rcynic/data /var/rcynic/rpki-rtr /var/rcynic/rpki-rtr/sockets
do
- /usr/bin/install -o rcynic -g rcynic -d $dir
+ /usr/bin/install -o rpki -g rpki -d $dir
done
- /usr/bin/install -o rpkirtr -g rcynic -m 775 -d /var/rcynic/rpki-rtr/sockets
- if test ! -f /usr/local/etc/rcynic.conf
+ if test ! -f /usr/local/etc/rpki.conf
then
- /bin/cp -p /usr/local/etc/rcynic.conf.sample /usr/local/etc/rcynic.conf
+ /bin/cp -p /usr/local/etc/rpki.conf.sample /usr/local/etc/rpki.conf
fi
htmldir=/usr/local/www/apache%%APACHE_VERSION%%/data/rcynic
- /usr/bin/install -o rcynic -g rcynic -d $htmldir
- if test "X`/usr/bin/crontab -l -u rcynic 2>/dev/null`" != "X"
+ /usr/bin/install -o rpki -g rpki -d $htmldir
+ if test "X`/usr/bin/crontab -l -u rpki 2>/dev/null`" != "X"
then
- echo "rcynic user already has a crontab, leaving it alone"
+ echo "rpki user already has a crontab, leaving it alone"
else
- echo "Setting up rcynic's crontab to run rcynic-cron script"
+ echo "Setting up rpki's crontab to run rcynic-cron script"
/usr/bin/awk -v t=`/usr/bin/hexdump -n 2 -e '"%u\n"' /dev/random` '
BEGIN {printf "MAILTO=root\n%u * * * *\texec /usr/local/bin/rcynic-cron\n", t % 60}' |
- /usr/bin/crontab -u rcynic -
+ /usr/bin/crontab -u rpki -
fi
echo "Setting up rpki-rtr listener under inetd"
if /usr/bin/egrep -q '^rpki-rtr' /etc/services
@@ -101,7 +78,7 @@ POST-INSTALL)
if /usr/bin/egrep -q "rpki-rtr[ ]+stream[ ]+tcp[ ]" /etc/inetd.conf
then
echo "You already have an /etc/inetd.conf entry for rpki-rtr on TCPv4, so I will use it."
- elif echo >>/etc/inetd.conf "rpki-rtr stream tcp nowait rpkirtr /usr/local/bin/rpki-rtr rpki-rtr server /var/rcynic/rpki-rtr"
+ elif echo >>/etc/inetd.conf "rpki-rtr stream tcp nowait rpki /usr/local/bin/rpki-rtr rpki-rtr server /var/rcynic/rpki-rtr"
then
echo "Added rpki-rtr for TCPv4 to /etc/inetd.conf."
else
@@ -111,7 +88,7 @@ POST-INSTALL)
if /usr/bin/egrep -q "rpki-rtr[ ]+stream[ ]+tcp6[ ]" /etc/inetd.conf
then
echo "You already have an /etc/inetd.conf entry for rpki-rtr on TCPv6, so I will use it."
- elif echo >>/etc/inetd.conf "rpki-rtr stream tcp6 nowait rpkirtr /usr/local/bin/rpki-rtr rpki-rtr server /var/rcynic/rpki-rtr"
+ elif echo >>/etc/inetd.conf "rpki-rtr stream tcp6 nowait rpki /usr/local/bin/rpki-rtr rpki-rtr server /var/rcynic/rpki-rtr"
then
echo "Added rpki-rtr for TCPv6 to /etc/inetd.conf."
else
diff --git a/buildtools/make-rcynic-script.py b/buildtools/make-rcynic-script.py
index 94fb6f32..fdfb3d6b 100644
--- a/buildtools/make-rcynic-script.py
+++ b/buildtools/make-rcynic-script.py
@@ -24,8 +24,8 @@ sys.stdout.write('''\
''' % os.environ)
for k, v in os.environ.iteritems():
- if k.startswith("AC_") and k != "AC_PYTHON_INTERPRETER":
- sys.stdout.write("%s = '''%s'''\n" % (k.lower(), v))
+ if k.startswith("AC_") and k != "AC_PYTHON_INTERPRETER":
+ sys.stdout.write("%s = '''%s'''\n" % (k.lower(), v))
sys.stdout.write('''\
diff --git a/buildtools/make-relaxng.py b/buildtools/make-relaxng.py
index d540fa9a..a8b562fa 100644
--- a/buildtools/make-relaxng.py
+++ b/buildtools/make-relaxng.py
@@ -1,5 +1,5 @@
# $Id$
-#
+#
# Copyright (C) 2014 Dragon Research Labs ("DRL")
# Portions copyright (C) 2009--2012 Internet Systems Consortium ("ISC")
# Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
@@ -26,6 +26,8 @@ import sys
header = """\
# Automatically generated, do not edit.
+# pylint: skip-file
+
from rpki.relaxng_parser import RelaxNGParser
"""
@@ -40,13 +42,13 @@ del RelaxNGParser
"""
def symbol(s):
- for suffix in (".rng", "-schema"):
- if s.endswith(suffix):
- s = s[:-len(suffix)]
- return s.replace("-", "_")
+ for suffix in (".rng", "-schema"):
+ if s.endswith(suffix):
+ s = s[:-len(suffix)]
+ return s.replace("-", "_")
sys.stdout.write(header)
for fn in sys.argv[1:]:
- with open(fn, "r") as f:
- sys.stdout.write(format % dict(name = symbol(fn), rng = f.read()))
+ with open(fn, "r") as f:
+ sys.stdout.write(format % dict(name = symbol(fn), rng = f.read()))
sys.stdout.write(footer)
diff --git a/buildtools/make-sql-schemas.py b/buildtools/make-sql-schemas.py
index 0df775c2..051f17e8 100644
--- a/buildtools/make-sql-schemas.py
+++ b/buildtools/make-sql-schemas.py
@@ -1,11 +1,11 @@
# $Id$
-#
+#
# Copyright (C) 2009-2012 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -13,13 +13,13 @@
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
-#
+#
# Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -47,6 +47,6 @@ format_2 = """\
print format_1
for name in schemas:
- print format_2 % {
- "name" : name,
- "sql" : open(name + ".sql").read() }
+ print format_2 % {
+ "name" : name,
+ "sql" : open(name + ".sql").read() }
diff --git a/buildtools/make-version.py b/buildtools/make-version.py
index a73a89ab..09d43801 100644
--- a/buildtools/make-version.py
+++ b/buildtools/make-version.py
@@ -2,11 +2,11 @@
# $Id$
# Copyright (C) 2013 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -37,33 +37,33 @@ import sys
unknown = "Unknown"
try:
- v = subprocess.Popen(("svnversion", "-c"), stdout = subprocess.PIPE).communicate()[0]
- err = None
+ v = subprocess.Popen(("svnversion", "-c"), stdout = subprocess.PIPE).communicate()[0]
+ err = None
except Exception, e:
- v = unknown
- err = e
+ v = unknown
+ err = e
if any(s in v for s in ("Unversioned", "Uncommitted", unknown)):
- v = unknown
+ v = unknown
else:
- v = "0." + v.strip().split(":")[-1].translate(None, "SMP")
+ v = "0." + v.strip().split(":")[-1].translate(None, "SMP")
try:
- old = open("VERSION", "r").read().strip()
+ old = open("VERSION", "r").read().strip()
except:
- old = None
+ old = None
if err is not None and (old is None or old == unknown):
- sys.stderr.write("Warning: No saved version and svnversion failed: %s\n" % err)
+ sys.stderr.write("Warning: No saved version and svnversion failed: %s\n" % err)
if v == unknown:
- if old is not None and old != unknown:
- v = old
- else:
- sys.stderr.write("Warning: Could not determine software version\n")
+ if old is not None and old != unknown:
+ v = old
+ else:
+ sys.stderr.write("Warning: Could not determine software version\n")
if old is None or v != old:
- with open("rpki/version.py", "w") as f:
- f.write("VERSION = \"%s\"\n" % v)
- with open("VERSION", "w") as f:
- f.write(v + "\n")
+ with open("rpki/version.py", "w") as f:
+ f.write("VERSION = \"%s\"\n" % v)
+ with open("VERSION", "w") as f:
+ f.write(v + "\n")
diff --git a/buildtools/pull-doc-from-wiki.py b/buildtools/pull-doc-from-wiki.py
index 7acc82a8..5995823a 100644
--- a/buildtools/pull-doc-from-wiki.py
+++ b/buildtools/pull-doc-from-wiki.py
@@ -1,12 +1,12 @@
# $Id$
-#
+#
# Copyright (C) 2014 Dragon Research Labs ("DRL")
# Portions copyright (C) 2012 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notices and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ISC DISCLAIM ALL
# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
@@ -45,95 +45,94 @@ import tempfile
def main():
- base = "https://trac.rpki.net"
-
- parser = argparse.ArgumentParser(description = __doc__)
- parser.add_argument("-b", "--base_url",
- default = base,
- help = "base URL for documentation web site")
- parser.add_argument("-t", "--toc",
- default = base + "/wiki/doc/RPKI/TOC",
- help = "table of contents URL")
- parser.add_argument("-d", "--directory",
- default = ".",
- help = "output directory")
- parser.add_argument("-p", "--pdf_file",
- default = "manual.pdf",
- help = "output PDF file")
- parser.add_argument("-r", "--html2textrc",
- default = os.path.join(os.path.dirname(sys.argv[0]), "html2textrc"),
- help = "html2textrc rules file")
- args = parser.parse_args()
-
- urls = str(xsl_get_toc(lxml.etree.parse(urllib.urlopen(args.toc)).getroot(),
- basename = repr(args.base_url))).splitlines()
-
- assert all(urlparse.urlparse(url).path.startswith("/wiki/") for url in urls)
-
- htmldoc = subprocess.Popen(
- ("htmldoc", "--book", "--title", "--outfile", args.pdf_file, "--format", "pdf",
- "--firstpage", "p1", "--size", "Universal", "--no-duplex",
- "--fontsize", "11.0", "--fontspacing", "1.1", "--headfootsize", "11.0",
- "--headingfont", "Helvetica", "--bodyfont", "Times", "--headfootfont", "Helvetica-Oblique",
- "-"), stdin = subprocess.PIPE)
-
- lxml.etree.ElementTree(xml_title).write(htmldoc.stdin)
-
- png_fns = []
-
- for url in urls:
- path = urlparse.urlparse(url).path
- page = xsl_get_page(lxml.etree.parse(urllib.urlopen(url)).getroot(),
- basename = repr(args.base_url),
- path = repr(path))
-
- for img in page.xpath("//img | //object | //embed"):
- attr = "data" if img.tag == "object" else "src"
- img_url = img.get(attr)
- sys.stderr.write("Image URL: {}\n".format(img_url))
- if img_url.endswith(".svg"):
- png_fd, png_fn = tempfile.mkstemp(suffix = ".png")
- sys.stderr.write("Converting {} to {}\n".format(img_url, png_fn))
- subprocess.Popen(("svg2png", "-h", "700", "-w", "600", "-", "-"),
- stdout = png_fd,
- stdin = subprocess.PIPE).communicate(urllib.urlopen(img_url).read())
- os.close(png_fd)
- img.set(attr, png_fn)
- png_fns.append(png_fn)
-
- page.write(htmldoc.stdin)
-
- html2text = subprocess.Popen(("html2text", "-rcfile", args.html2textrc, "-nobs", "-ascii"),
- stdin = subprocess.PIPE,
- stdout = subprocess.PIPE)
- page.write(html2text.stdin)
- html2text.stdin.close()
- lines = html2text.stdout.readlines()
- html2text.stdout.close()
- html2text.wait()
-
- while lines and lines[0].isspace():
- del lines[0]
-
- fn = os.path.join(args.directory, path[len("/wiki/"):].replace("/", "."))
- f = open(fn, "w")
- want_blank = False
- for line in lines:
- blank = line.isspace()
- if want_blank and not blank:
- f.write("\n")
- if not blank:
- f.write(line)
- want_blank = blank
- f.close()
- sys.stderr.write("Wrote %s\n" % fn)
-
- htmldoc.stdin.close()
- htmldoc.wait()
- sys.stderr.write("Wrote %s\n" % args.pdf_file)
-
- for png_fn in png_fns:
- os.unlink(png_fn)
+ base = "https://trac.rpki.net"
+
+ parser = argparse.ArgumentParser(description = __doc__)
+ parser.add_argument("-b", "--base_url",
+ default = base,
+ help = "base URL for documentation web site")
+ parser.add_argument("-t", "--toc",
+ default = base + "/wiki/doc/RPKI/TOC",
+ help = "table of contents URL")
+ parser.add_argument("-d", "--directory",
+ default = ".",
+ help = "output directory")
+ parser.add_argument("-p", "--pdf_file",
+ default = "manual.pdf",
+ help = "output PDF file")
+ parser.add_argument("-r", "--html2textrc",
+ default = os.path.join(os.path.dirname(sys.argv[0]), "html2textrc"),
+ help = "html2textrc rules file")
+ args = parser.parse_args()
+
+ urls = str(xsl_get_toc(lxml.etree.parse(urllib.urlopen(args.toc)).getroot(),
+ basename = repr(args.base_url))).splitlines()
+
+ assert all(urlparse.urlparse(url).path.startswith("/wiki/") for url in urls)
+
+ htmldoc = subprocess.Popen(
+ ("htmldoc", "--book", "--title", "--outfile", args.pdf_file, "--format", "pdf",
+ "--firstpage", "p1", "--size", "Universal", "--no-duplex",
+ "--fontsize", "11.0", "--fontspacing", "1.1", "--headfootsize", "11.0",
+ "--headingfont", "Helvetica", "--bodyfont", "Times", "--headfootfont", "Helvetica-Oblique",
+ "-"), stdin = subprocess.PIPE)
+
+ lxml.etree.ElementTree(xml_title).write(htmldoc.stdin)
+
+ png_fns = []
+
+ for url in urls:
+ path = urlparse.urlparse(url).path
+ page = xsl_get_page(lxml.etree.parse(urllib.urlopen(url)).getroot(),
+ basename = repr(args.base_url),
+ path = repr(path))
+
+ for img in page.xpath("//img | //object | //embed"):
+ attr = "data" if img.tag == "object" else "src"
+ img_url = img.get(attr)
+ if img_url.endswith(".svg"):
+ #sys.stderr.write("Converting %s to PNG\n" % img_url)
+ png_fd, png_fn = tempfile.mkstemp(suffix = ".png")
+ subprocess.Popen(("svg2png", "-h", "700", "-w", "600", "-", "-"),
+ stdout = png_fd,
+ stdin = subprocess.PIPE).communicate(urllib.urlopen(img_url).read())
+ os.close(png_fd)
+ img.set(attr, png_fn)
+ png_fns.append(png_fn)
+
+ page.write(htmldoc.stdin)
+
+ html2text = subprocess.Popen(("html2text", "-rcfile", args.html2textrc, "-nobs", "-ascii"),
+ stdin = subprocess.PIPE,
+ stdout = subprocess.PIPE)
+ page.write(html2text.stdin)
+ html2text.stdin.close()
+ lines = html2text.stdout.readlines()
+ html2text.stdout.close()
+ html2text.wait()
+
+ while lines and lines[0].isspace():
+ del lines[0]
+
+ fn = os.path.join(args.directory, path[len("/wiki/"):].replace("/", "."))
+ f = open(fn, "w")
+ want_blank = False
+ for line in lines:
+ blank = line.isspace()
+ if want_blank and not blank:
+ f.write("\n")
+ if not blank:
+ f.write(line)
+ want_blank = blank
+ f.close()
+ sys.stderr.write("Wrote %s\n" % fn)
+
+ htmldoc.stdin.close()
+ htmldoc.wait()
+ sys.stderr.write("Wrote %s\n" % args.pdf_file)
+
+ for png_fn in png_fns:
+ os.unlink(png_fn)
# HTMLDOC title page. At some point we might want to generate this
# dynamically as an ElementTree, but static content will do for the
@@ -189,7 +188,7 @@ xsl_get_toc = lxml.etree.XSLT(lxml.etree.XML('''\
# we care, and this seems to work.
#
# Original author's explanation:
-#
+#
# The rather convoluted XPath expression for selecting the following
# sibling aaa nodes which are merged with the current one:
#
@@ -322,12 +321,12 @@ xsl_get_page = lxml.etree.XSLT(lxml.etree.XML('''\
<xsl:otherwise>
<xsl:value-of select="$s"/>
</xsl:otherwise>
- </xsl:choose>
+ </xsl:choose>
</xsl:template>
<xsl:template match="ol">
<xsl:if test="not(preceding-sibling::*[1]/self::ol)">
- <xsl:variable name="following"
+ <xsl:variable name="following"
select="following-sibling::ol[
not(preceding-sibling::*[
not(self::ol) and
diff --git a/buildtools/pylint.rc b/buildtools/pylint.rc
index ed296108..ac893ad6 100644
--- a/buildtools/pylint.rc
+++ b/buildtools/pylint.rc
@@ -19,10 +19,22 @@
[MASTER]
profile=no
-ignore=.svn
+
+# Including "gui" here is a temporary measure: it's risky, but so is
+# making ten zillion cosmetic changes in a co-worker's code on a
+# long-running development branch.
+
+ignore=.svn,.git,migrations,south_migrations,gui
+
persistent=yes
cache-size=500
-load-plugins=
+
+# We make heavy use of Django, which confuses pylint. Fortunately, there's a plug-in.
+load-plugins=pylint_django
+
+# Extension (C, etc) modules that pylint should trust enough to import.
+
+extension-pkg-whitelist=lxml,rpki.POW
[MESSAGES CONTROL]
@@ -44,14 +56,24 @@ disable-msg-cat=
#enable-msg=
# Disable the message(s) with the given id(s).
-disable=R0801,R0903,R0913,C0321,R0904,W0201,E1101,W0614,C0301,R0901,C0302,R0902,R0201,W0613,R0912,R0915,W0703,W0212,R0914,W0603,W0142,I0011,C0111,C0103,R0401,C0326,R0911,C0325
+#
+# I0011 is (sort of) special, in that it marks places where we've used
+# inline overrides in the code to control pylint's behavior.
+# Ordinarily we leave this turned off, but it's a good idea to run
+# with it enabled every once in a while to see what we've overriden.
+#
+disable=I0011,I0013,R0801,C0111,C0301,C0326,W0702,R0902,R0913,W0703,R0912,R0903,R0915,R0914,C0302,W0613,R0201,R0901,R0904,C0325,R0911,C0103,R0401
+#
+# Additional messages we used to have disabled but now appear to be
+# able to leave alone.
+#
+#disable=C0321,W0201,E1101,W0614,W0212,W0603,W0142,C0330,W0311,E1124
[REPORTS]
#output-format=parseable
msg-template={path}:{line}: [{msg_id}({symbol}), {obj}] {msg}
-include-ids=yes
files-output=no
reports=no
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
@@ -196,7 +218,7 @@ max-line-length=80
max-module-lines=1000
# String used as indentation unit.
-indent-string=' '
+indent-string=' '
[SIMILARITIES]
diff --git a/ca/Makefile.in b/ca/Makefile.in
index 70cce62e..6a2c8d3f 100644
--- a/ca/Makefile.in
+++ b/ca/Makefile.in
@@ -35,7 +35,11 @@ WSGI_PYTHON_EGG_CACHE_USER = @WSGI_PYTHON_EGG_CACHE_USER@
CA_INSTALL_TARGETS = @CA_INSTALL_TARGETS@
-all:: examples/rpki.conf
+# Apache configuration may end up moving to common runtime config, but
+# that yak will have to wait for another day to get its shave.
+
+all::
+ @true
clean::
cd tests; $(MAKE) $@
@@ -43,11 +47,9 @@ clean::
install:: ${CA_INSTALL_TARGETS}
install-always:: all
- @echo
- @echo "== Default configuration file location is ${sysconfdir}/rpki.conf =="
- @echo
- ${INSTALL} examples/rpki.conf ${DESTDIR}${sysconfdir}/rpki.conf.sample
${INSTALL} -d ${DESTDIR}${datarootdir}/rpki/publication
+ ${INSTALL} -d ${DESTDIR}${datarootdir}/rpki/rrdp-publication
+ ${INSTALL} -d ${DESTDIR}${datarootdir}/rpki/bpki
uninstall deinstall::
-${libexecdir}/rpkigui-apache-conf-gen --remove --verbose
@@ -62,30 +64,8 @@ distclean:: clean
cd tests; ${MAKE} $@
rm -f Makefile
-examples/rpki.conf: ${abs_top_srcdir}/rpki/autoconf.py rpki-confgen rpki-confgen.xml
- ${PYTHON} rpki-confgen \
- --read-xml rpki-confgen.xml \
- --autoconf \
- --set myrpki::handle=`hostname -f | sed 's/[.]/_/g'` \
- --set myrpki::rpkid_server_host=`hostname -f` \
- --set myrpki::pubd_server_host=`hostname -f` \
- --pwgen myrpki::shared_sql_password \
- --pwgen web_portal::secret-key \
- --write-conf $@
-
-clean::
- rm -f examples/rpki.conf
-
install-postconf: \
- install-user install-egg-cache install-conf install-apache install-mysql install-django install-bpki install-cron
-
-# This should create user "rpkid" and group "rpkid", but as we have
-# not yet tested our ability to run in such a configuration, this
-# would be a little premature. Can steal Makefile code for this from
-# rcynic when we're ready to do something with it.
-
-install-user:
- @true
+ install-egg-cache install-conf install-apache install-bpki install-cron
# This is only necessary on some platforms (currently FreeBSD,
# due to shortcomings in the way that Python eggs are installed
@@ -99,38 +79,25 @@ install-egg-cache:
fi; \
fi
-# We used to play the FreeBSD game of copying rpki.conf.sample to
-# rpki.conf on install and removing rpki.conf if it's identical to
-# rpki.conf.sample in uninstall, but that turns out to work poorly
-# with generated passwords. So now we copy rpki.conf.sample if and
-# only if rpki.conf does not exist, and we leave removal of rpki.conf
-# for the user to deal with. This in turn leaves us with a different
-# problem of how to upgrade rpki.conf, but at least in the FreeBSD
-# universe we're supposed to leave that problem for the user.
-
install-conf:
- if test -f ${DESTDIR}${sysconfdir}/rpki.conf; \
+ ${sbindir}/rpki-confgen \
+ --read-xml ${DESTDIR}${sysconfdir}/rpki/rpki.rp.xml \
+ --set myrpki::run_rpkid=yes \
+ --set myrpki::run_pubd=yes \
+ --write-xml ${DESTDIR}${sysconfdir}/rpki/rpki.ca.xml \
+ --write-conf ${DESTDIR}${sysconfdir}/rpki/rpki.ca.conf.sample
+ if test ! -f ${DESTDIR}${sysconfdir}/rpki.conf ||\
+ cmp -s ${DESTDIR}${sysconfdir}/rpki.conf ${DESTDIR}${sysconfdir}/rpki/rpki.rp.conf.sample;\
then \
- true; \
- else \
- cp -p ${DESTDIR}${sysconfdir}/rpki.conf.sample ${DESTDIR}${sysconfdir}/rpki.conf; \
+ cp -p ${DESTDIR}${sysconfdir}/rpki/rpki.ca.conf.sample ${DESTDIR}${sysconfdir}/rpki.conf
fi
-uninstall deinstall::
-# if cmp -s ${DESTDIR}${sysconfdir}/rpki.conf ${DESTDIR}${sysconfdir}/rpki.conf.sample; then rm -f ${DESTDIR}${sysconfdir}/rpki.conf; else true; fi
- rm -f ${DESTDIR}${sysconfdir}/rpki.conf.sample
+#uninstall deinstall::
+# rm -f ${DESTDIR}${sysconfdir}/rpki/rpki.ca.xml ${DESTDIR}${sysconfdir}/rpki/rpki.ca.conf.sample
install-apache:
${libexecdir}/rpkigui-apache-conf-gen --install --verbose
-install-mysql:
- ${sbindir}/rpki-sql-setup --create-if-missing
- ${sbindir}/rpki-sql-setup --apply-upgrades --verbose
-
-install-django:
- ${sbindir}/rpki-manage syncdb --noinput
- ${sbindir}/rpki-manage migrate app
-
install-bpki:
${sbindir}/rpkic initialize_server_bpki
@@ -151,11 +118,8 @@ uninstall deinstall:: uninstall-cron-using-crontab
# Code for setting up and tearing down cron jobs using the crontab(1)
# program. We don't use this on all platforms, but we do use it on
# more than one, so it's broken out here as common code.
-#
-# CRONTAB_USER really should be rpkid, but we don't have the rest of
-# the package set up for that yet, so run it as root for now.
-CRONTAB_USER = root
+CRONTAB_USER = rpki
install-cron-using-crontab:
@crontab -l -u ${CRONTAB_USER} 2>/dev/null | \
diff --git a/ca/irbe_cli b/ca/irbe_cli
index 91c12aa9..7d62db9d 100755
--- a/ca/irbe_cli
+++ b/ca/irbe_cli
@@ -37,311 +37,293 @@ Command line IR back-end control program for rpkid and pubd.
# Command line processing of this program is too complex and
# idiosyncratic to be worth trying to reimplement using argparse.
+import os
import sys
import getopt
import textwrap
import rpki.left_right
-import rpki.http
+import rpki.http_simple
import rpki.x509
import rpki.config
import rpki.log
import rpki.publication
-import rpki.async
pem_out = None
-class UsageWrapper(textwrap.TextWrapper):
- """
- Call interface around Python textwrap.Textwrapper class.
- """
+# This program needs a complete rewrite. In the meantime, shut up about lint.
+# pylint: skip-file
- def __call__(self, *args):
+class UsageWrapper(textwrap.TextWrapper):
"""
- Format arguments, with TextWrapper indentation.
+ Call interface around Python textwrap.Textwrapper class.
"""
- return self.fill(textwrap.dedent(" ".join(args)))
+
+ def __call__(self, *args):
+ """
+ Format arguments, with TextWrapper indentation.
+ """
+ return self.fill(textwrap.dedent(" ".join(args)))
usage_fill = UsageWrapper(subsequent_indent = " " * 4)
class reply_elt_mixin(object):
- """
- Protocol mix-in for printout of reply PDUs.
- """
-
- is_cmd = False
-
- def client_reply_decode(self):
- pass
-
- def client_reply_show(self):
- print self.element_name
- for i in self.attributes + self.elements:
- if getattr(self, i) is not None:
- print " %s: %s" % (i, getattr(self, i))
-
-class cmd_elt_mixin(reply_elt_mixin):
- """
- Protocol mix-in for command line client element PDUs.
- """
-
- is_cmd = True
-
- ## @var excludes
- # XML attributes and elements that should not be allowed as command
- # line arguments.
- excludes = ()
-
- @classmethod
- def usage(cls):
"""
- Generate usage message for this PDU.
+ Protocol mix-in for printout of reply PDUs.
"""
- args = " ".join("--" + x + "=" for x in cls.attributes + cls.elements if x not in cls.excludes)
- bools = " ".join("--" + x for x in cls.booleans)
- if args and bools:
- return args + " " + bools
- else:
- return args or bools
- def client_getopt(self, argv):
- """
- Parse options for this class.
- """
- # pylint: disable=W0621
- opts, argv = getopt.getopt(argv, "", [x + "=" for x in self.attributes + self.elements if x not in self.excludes] + list(self.booleans))
- for o, a in opts:
- o = o[2:]
- handler = getattr(self, "client_query_" + o, None)
- if handler is not None:
- handler(a)
- elif o in self.booleans:
- setattr(self, o, True)
- else:
- assert o in self.attributes
- setattr(self, o, a)
- return argv
-
- def client_query_bpki_cert(self, arg):
- """
- Special handler for --bpki_cert option.
- """
- self.bpki_cert = rpki.x509.X509(Auto_file = arg)
+ is_cmd = False
- def client_query_glue(self, arg):
- """
- Special handler for --bpki_glue option.
- """
- self.bpki_glue = rpki.x509.X509(Auto_file = arg)
+ def client_reply_decode(self):
+ pass
- def client_query_bpki_cms_cert(self, arg):
- """
- Special handler for --bpki_cms_cert option.
- """
- self.bpki_cms_cert = rpki.x509.X509(Auto_file = arg)
+ def client_reply_show(self):
+ print self.element_name
+ for i in self.attributes + self.elements:
+ if getattr(self, i) is not None:
+ print " %s: %s" % (i, getattr(self, i))
- def client_query_cms_glue(self, arg):
+class cmd_elt_mixin(reply_elt_mixin):
"""
- Special handler for --bpki_cms_glue option.
+ Protocol mix-in for command line client element PDUs.
"""
- self.bpki_cms_glue = rpki.x509.X509(Auto_file = arg)
-class cmd_msg_mixin(object):
- """
- Protocol mix-in for command line client message PDUs.
- """
+ is_cmd = True
+
+ ## @var excludes
+ # XML attributes and elements that should not be allowed as command
+ # line arguments.
+ excludes = ()
+
+ @classmethod
+ def usage(cls):
+ """
+ Generate usage message for this PDU.
+ """
+ args = " ".join("--" + x + "=" for x in cls.attributes + cls.elements if x not in cls.excludes)
+ bools = " ".join("--" + x for x in cls.booleans)
+ if args and bools:
+ return args + " " + bools
+ else:
+ return args or bools
+
+ def client_getopt(self, argv):
+ """
+ Parse options for this class.
+ """
+ # pylint: disable=W0621
+ opts, argv = getopt.getopt(argv, "", [x + "=" for x in self.attributes + self.elements if x not in self.excludes] + list(self.booleans))
+ for o, a in opts:
+ o = o[2:]
+ handler = getattr(self, "client_query_" + o, None)
+ if handler is not None:
+ handler(a)
+ elif o in self.booleans:
+ setattr(self, o, True)
+ else:
+ assert o in self.attributes
+ setattr(self, o, a)
+ return argv
+
+ def client_query_bpki_cert(self, arg):
+ """
+ Special handler for --bpki_cert option.
+ """
+ self.bpki_cert = rpki.x509.X509(Auto_file = arg)
+
+ def client_query_glue(self, arg):
+ """
+ Special handler for --bpki_glue option.
+ """
+ self.bpki_glue = rpki.x509.X509(Auto_file = arg)
+
+ def client_query_bpki_cms_cert(self, arg):
+ """
+ Special handler for --bpki_cms_cert option.
+ """
+ self.bpki_cms_cert = rpki.x509.X509(Auto_file = arg)
+
+ def client_query_cms_glue(self, arg):
+ """
+ Special handler for --bpki_cms_glue option.
+ """
+ self.bpki_cms_glue = rpki.x509.X509(Auto_file = arg)
- @classmethod
- def usage(cls):
+class cmd_msg_mixin(object):
"""
- Generate usage message for this PDU.
+ Protocol mix-in for command line client message PDUs.
"""
- for k, v in cls.pdus.items():
- if v.is_cmd:
- print usage_fill(k, v.usage())
+
+ @classmethod
+ def usage(cls):
+ """
+ Generate usage message for this PDU.
+ """
+ for k, v in cls.pdus.items():
+ if v.is_cmd:
+ print usage_fill(k, v.usage())
# left-right protcol
class left_right_msg(cmd_msg_mixin, rpki.left_right.msg):
- class self_elt(cmd_elt_mixin, rpki.left_right.self_elt):
- pass
+ class self_elt(cmd_elt_mixin, rpki.left_right.self_elt):
+ pass
- class bsc_elt(cmd_elt_mixin, rpki.left_right.bsc_elt):
+ class bsc_elt(cmd_elt_mixin, rpki.left_right.bsc_elt):
- excludes = ("pkcs10_request",)
+ excludes = ("pkcs10_request",)
- def client_query_signing_cert(self, arg):
- """--signing_cert option."""
- self.signing_cert = rpki.x509.X509(Auto_file = arg)
+ def client_query_signing_cert(self, arg):
+ """--signing_cert option."""
+ self.signing_cert = rpki.x509.X509(Auto_file = arg)
- def client_query_signing_cert_crl(self, arg):
- """--signing_cert_crl option."""
- self.signing_cert_crl = rpki.x509.CRL(Auto_file = arg)
+ def client_query_signing_cert_crl(self, arg):
+ """--signing_cert_crl option."""
+ self.signing_cert_crl = rpki.x509.CRL(Auto_file = arg)
- def client_reply_decode(self):
- global pem_out
- if pem_out is not None and self.pkcs10_request is not None:
- if isinstance(pem_out, str):
- pem_out = open(pem_out, "w")
- pem_out.write(self.pkcs10_request.get_PEM())
+ def client_reply_decode(self):
+ global pem_out
+ if pem_out is not None and self.pkcs10_request is not None:
+ if isinstance(pem_out, str):
+ pem_out = open(pem_out, "w")
+ pem_out.write(self.pkcs10_request.get_PEM())
- class parent_elt(cmd_elt_mixin, rpki.left_right.parent_elt):
- pass
+ class parent_elt(cmd_elt_mixin, rpki.left_right.parent_elt):
+ pass
- class child_elt(cmd_elt_mixin, rpki.left_right.child_elt):
- pass
+ class child_elt(cmd_elt_mixin, rpki.left_right.child_elt):
+ pass
- class repository_elt(cmd_elt_mixin, rpki.left_right.repository_elt):
- pass
+ class repository_elt(cmd_elt_mixin, rpki.left_right.repository_elt):
+ pass
- class list_published_objects_elt(cmd_elt_mixin, rpki.left_right.list_published_objects_elt):
- excludes = ("uri",)
+ class list_published_objects_elt(cmd_elt_mixin, rpki.left_right.list_published_objects_elt):
+ excludes = ("uri",)
- class list_received_resources_elt(cmd_elt_mixin, rpki.left_right.list_received_resources_elt):
- excludes = ("parent_handle", "notBefore", "notAfter", "uri", "sia_uri", "aia_uri", "asn", "ipv4", "ipv6")
+ class list_received_resources_elt(cmd_elt_mixin, rpki.left_right.list_received_resources_elt):
+ excludes = ("parent_handle", "notBefore", "notAfter", "uri", "sia_uri", "aia_uri", "asn", "ipv4", "ipv6")
- class report_error_elt(reply_elt_mixin, rpki.left_right.report_error_elt):
- pass
+ class report_error_elt(reply_elt_mixin, rpki.left_right.report_error_elt):
+ pass
- pdus = dict((x.element_name, x)
- for x in (self_elt, bsc_elt, parent_elt, child_elt, repository_elt,
- list_published_objects_elt, list_received_resources_elt, report_error_elt))
+ pdus = dict((x.element_name, x)
+ for x in (self_elt, bsc_elt, parent_elt, child_elt, repository_elt,
+ list_published_objects_elt, list_received_resources_elt, report_error_elt))
class left_right_sax_handler(rpki.left_right.sax_handler):
- pdu = left_right_msg
+ pdu = left_right_msg
class left_right_cms_msg(rpki.left_right.cms_msg):
- saxify = left_right_sax_handler.saxify
+ saxify = left_right_sax_handler.saxify
# Publication protocol
class publication_msg(cmd_msg_mixin, rpki.publication.msg):
- class config_elt(cmd_elt_mixin, rpki.publication.config_elt):
+ class config_elt(cmd_elt_mixin, rpki.publication.config_elt):
- def client_query_bpki_crl(self, arg):
- """
- Special handler for --bpki_crl option.
- """
- self.bpki_crl = rpki.x509.CRL(Auto_file = arg)
+ def client_query_bpki_crl(self, arg):
+ """
+ Special handler for --bpki_crl option.
+ """
+ self.bpki_crl = rpki.x509.CRL(Auto_file = arg)
- class client_elt(cmd_elt_mixin, rpki.publication.client_elt):
- pass
+ class client_elt(cmd_elt_mixin, rpki.publication.client_elt):
+ pass
- class certificate_elt(cmd_elt_mixin, rpki.publication.certificate_elt):
- pass
+ class certificate_elt(cmd_elt_mixin, rpki.publication.certificate_elt):
+ pass
- class crl_elt(cmd_elt_mixin, rpki.publication.crl_elt):
- pass
+ class crl_elt(cmd_elt_mixin, rpki.publication.crl_elt):
+ pass
- class manifest_elt(cmd_elt_mixin, rpki.publication.manifest_elt):
- pass
+ class manifest_elt(cmd_elt_mixin, rpki.publication.manifest_elt):
+ pass
- class roa_elt(cmd_elt_mixin, rpki.publication.roa_elt):
- pass
+ class roa_elt(cmd_elt_mixin, rpki.publication.roa_elt):
+ pass
- class report_error_elt(reply_elt_mixin, rpki.publication.report_error_elt):
- pass
+ class report_error_elt(reply_elt_mixin, rpki.publication.report_error_elt):
+ pass
- class ghostbuster_elt(cmd_elt_mixin, rpki.publication.ghostbuster_elt):
- pass
+ class ghostbuster_elt(cmd_elt_mixin, rpki.publication.ghostbuster_elt):
+ pass
- pdus = dict((x.element_name, x)
- for x in (config_elt, client_elt, certificate_elt, crl_elt,
- manifest_elt, roa_elt, report_error_elt,
- ghostbuster_elt))
+ pdus = dict((x.element_name, x)
+ for x in (config_elt, client_elt, certificate_elt, crl_elt,
+ manifest_elt, roa_elt, report_error_elt,
+ ghostbuster_elt))
class publication_sax_handler(rpki.publication.sax_handler):
- pdu = publication_msg
+ pdu = publication_msg
class publication_cms_msg(rpki.publication.cms_msg):
- saxify = publication_sax_handler.saxify
+ saxify = publication_sax_handler.saxify
# Usage
top_opts = ["config=", "help", "pem_out=", "quiet", "verbose"]
def usage(code = 1):
- if __doc__ is not None:
- print __doc__.strip()
+ if __doc__ is not None:
+ print __doc__.strip()
+ print
+ print "Usage:"
print
- print "Usage:"
- print
- print "# Top-level options:"
- print usage_fill(*["--" + x for x in top_opts])
- print
- print "# left-right protocol:"
- left_right_msg.usage()
- print
- print "# publication protocol:"
- publication_msg.usage()
- sys.exit(code)
+ print "# Top-level options:"
+ print usage_fill(*["--" + x for x in top_opts])
+ print
+ print "# left-right protocol:"
+ left_right_msg.usage()
+ print
+ print "# publication protocol:"
+ publication_msg.usage()
+ sys.exit(code)
# Main program
-rpki.log.init("irbe_cli")
-
argv = sys.argv[1:]
if not argv:
- usage(0)
+ usage(0)
cfg_file = None
verbose = True
opts, argv = getopt.getopt(argv, "c:hpqv?", top_opts)
for o, a in opts:
- if o in ("-?", "-h", "--help"):
- usage(0)
- elif o in ("-c", "--config"):
- cfg_file = a
- elif o in ("-p", "--pem_out"):
- pem_out = a
- elif o in ("-q", "--quiet"):
- verbose = False
- elif o in ("-v", "--verbose"):
- verbose = True
+ if o in ("-?", "-h", "--help"):
+ usage(0)
+ elif o in ("-c", "--config"):
+ cfg_file = a
+ elif o in ("-p", "--pem_out"):
+ pem_out = a
+ elif o in ("-q", "--quiet"):
+ verbose = False
+ elif o in ("-v", "--verbose"):
+ verbose = True
if not argv:
- usage(1)
+ usage(1)
-cfg = rpki.config.parser(cfg_file, "irbe_cli")
+cfg = rpki.config.parser(set_filename = cfg_file, section = "irbe_cli")
q_msg_left_right = []
q_msg_publication = []
while argv:
- if argv[0] in left_right_msg.pdus:
- q_pdu = left_right_msg.pdus[argv[0]]()
- q_msg = q_msg_left_right
- elif argv[0] in publication_msg.pdus:
- q_pdu = publication_msg.pdus[argv[0]]()
- q_msg = q_msg_publication
- else:
- usage(1)
- argv = q_pdu.client_getopt(argv[1:])
- q_msg.append(q_pdu)
-
-import django
-
-from django.conf import settings
-
-settings.configure(
- DATABASES = { "default" : {
- "ENGINE" : "django.db.backends.mysql",
- "NAME" : cfg.get("sql-database", section = "irdbd"),
- "USER" : cfg.get("sql-username", section = "irdbd"),
- "PASSWORD" : cfg.get("sql-password", section = "irdbd"),
- "HOST" : "",
- "PORT" : "",
- "OPTIONS" : { "init_command": "SET storage_engine=INNODB" }}},
- INSTALLED_APPS = ("rpki.irdb",),
- MIDDLEWARE_CLASSES = (),
-)
-
-if django.VERSION >= (1, 7):
- from django.apps import apps
- apps.populate(settings.INSTALLED_APPS)
+ if argv[0] in left_right_msg.pdus:
+ q_pdu = left_right_msg.pdus[argv[0]]()
+ q_msg = q_msg_left_right
+ elif argv[0] in publication_msg.pdus:
+ q_pdu = publication_msg.pdus[argv[0]]()
+ q_msg = q_msg_publication
+ else:
+ usage(1)
+ argv = q_pdu.client_getopt(argv[1:])
+ q_msg.append(q_pdu)
+
+os.environ.update(DJANGO_SETTINGS_MODULE = "rpki.django_settings.irdb")
import rpki.irdb
@@ -350,46 +332,36 @@ irbe = server_ca.ee_certificates.get(purpose = "irbe")
if q_msg_left_right:
- class left_right_proto(object):
- cms_msg = left_right_cms_msg
- msg = left_right_msg
-
- rpkid = server_ca.ee_certificates.get(purpose = "rpkid")
+ rpkid = server_ca.ee_certificates.get(purpose = "rpkid")
- rpkid_url = "http://%s:%s/left-right/" % (
- cfg.get("server-host", section = "rpkid"),
- cfg.get("server-port", section = "rpkid"))
+ rpkid_url = "http://%s:%s/left-right/" % (
+ cfg.get("server-host", section = "rpkid"),
+ cfg.get("server-port", section = "rpkid"))
- call_rpkid = rpki.async.sync_wrapper(rpki.http.caller(
- proto = left_right_proto,
- client_key = irbe.private_key,
- client_cert = irbe.certificate,
- server_ta = server_ca.certificate,
- server_cert = rpkid.certificate,
- url = rpkid_url,
- debug = verbose))
-
- call_rpkid(*q_msg_left_right)
+ rpki.http_simple.client(
+ proto_cms_msg = left_right_cms_msg,
+ client_key = irbe.private_key,
+ client_cert = irbe.certificate,
+ server_ta = server_ca.certificate,
+ server_cert = rpkid.certificate,
+ url = rpkid_url,
+ debug = verbose,
+ q_msg = left_right_msg(*q_msg_left_right))
if q_msg_publication:
- class publication_proto(object):
- msg = publication_msg
- cms_msg = publication_cms_msg
-
- pubd = server_ca.ee_certificates.get(purpose = "pubd")
-
- pubd_url = "http://%s:%s/control/" % (
- cfg.get("server-host", section = "pubd"),
- cfg.get("server-port", section = "pubd"))
-
- call_pubd = rpki.async.sync_wrapper(rpki.http.caller(
- proto = publication_proto,
- client_key = irbe.private_key,
- client_cert = irbe.certificate,
- server_ta = server_ca.certificate,
- server_cert = pubd.certificate,
- url = pubd_url,
- debug = verbose))
-
- call_pubd(*q_msg_publication)
+ pubd = server_ca.ee_certificates.get(purpose = "pubd")
+
+ pubd_url = "http://%s:%s/control/" % (
+ cfg.get("server-host", section = "pubd"),
+ cfg.get("server-port", section = "pubd"))
+
+ rpki.http_simple.client(
+ proto_cms_msg = publication_cms_msg,
+ client_key = irbe.private_key,
+ client_cert = irbe.certificate,
+ server_ta = server_ca.certificate,
+ server_cert = pubd.certificate,
+ url = pubd_url,
+ debug = verbose,
+ q_msg = publication_msg(*q_msg_publication))
diff --git a/ca/irdbd b/ca/irdbd
index 493e3d72..c5ca652c 100755
--- a/ca/irdbd
+++ b/ca/irdbd
@@ -17,5 +17,5 @@
# PERFORMANCE OF THIS SOFTWARE.
if __name__ == "__main__":
- import rpki.irdbd
- rpki.irdbd.main()
+ import rpki.irdbd
+ rpki.irdbd.main()
diff --git a/ca/pubd b/ca/pubd
index 7d8ecbfa..69d3b476 100755
--- a/ca/pubd
+++ b/ca/pubd
@@ -17,5 +17,5 @@
# PERFORMANCE OF THIS SOFTWARE.
if __name__ == "__main__":
- import rpki.pubd
- rpki.pubd.main()
+ import rpki.pubd
+ rpki.pubd.main()
diff --git a/ca/rootd b/ca/rootd
index cb59f958..6caeb88f 100755
--- a/ca/rootd
+++ b/ca/rootd
@@ -17,5 +17,5 @@
# PERFORMANCE OF THIS SOFTWARE.
if __name__ == "__main__":
- import rpki.rootd
- rpki.rootd.main()
+ import rpki.rootd
+ rpki.rootd.main()
diff --git a/ca/rpki-confgen b/ca/rpki-confgen
deleted file mode 100755
index 07c87f0f..00000000
--- a/ca/rpki-confgen
+++ /dev/null
@@ -1,291 +0,0 @@
-#!/usr/bin/env python
-
-# $Id$
-#
-# Copyright (C) 2014 Dragon Research Labs ("DRL")
-# Portions copyright (C) 2013 Internet Systems Consortium ("ISC")
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notices and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ISC DISCLAIM ALL
-# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
-# ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
-# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA
-# OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
-# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-import os
-import sys
-import argparse
-import base64
-import textwrap
-
-from lxml.etree import Element, SubElement, ElementTree
-
-space4 = " " * 4
-space6 = " " * 6
-space8 = " " * 8
-star78 = "*" * 78
-
-wiki_wrapper = textwrap.TextWrapper()
-conf_wrapper = textwrap.TextWrapper(initial_indent = "# ", subsequent_indent = "# ")
-xml6_wrapper = textwrap.TextWrapper(initial_indent = space6, subsequent_indent = space6)
-xml8_wrapper = textwrap.TextWrapper(initial_indent = space8, subsequent_indent = space8)
-
-class Option(object):
-
- def __init__(self, name, value, doc):
- self.name = name
- self.value = value
- self.doc = doc
-
- @property
- def width(self):
- return len(self.name)
-
- def to_xml(self):
- x = Element("option", name = self.name)
- if self.value is not None:
- x.set("value", self.value)
- for d in self.doc:
- SubElement(x, "doc").text = "\n" + xml8_wrapper.fill(d) + "\n" + space6
- return x
-
- def to_wiki(self, f):
- f.write("\n== %s == #%s\n" % (self.name, self.name))
- for d in self.doc:
- f.write("\n%s\n" % wiki_wrapper.fill(d))
- if self.value is None:
- f.write("\n%s\n" % wiki_wrapper.fill("No default value."))
- else:
- f.write("\n{{{\n#!ini\n%s = %s\n}}}\n" % (self.name, self.value))
-
- def to_conf(self, f, width):
- for i, d in enumerate(self.doc):
- f.write("%s\n%s\n" % ("" if i == 0 else "#", conf_wrapper.fill(d)))
- if self.value is None:
- f.write("\n#%-*s = ???\n" % (width - 1, self.name))
- else:
- f.write("\n%-*s = %s\n" % (width, self.name, self.value))
-
-class Section(object):
-
- def __init__(self, name):
- self.name = name
- self.doc = []
- self.options = []
-
- @property
- def width(self):
- return max(o.width for o in self.options)
-
- @classmethod
- def from_xml(cls, elt):
- self = cls(name = elt.get("name"))
- for x in elt.iterchildren("doc"):
- self.doc.append(" ".join(x.text.split()))
- for x in elt.iterchildren("option"):
- self.options.append(Option(name = x.get("name"), value = x.get("value"),
- doc = [" ".join(d.text.split())
- for d in x.iterchildren("doc")]))
- return self
-
- def to_xml(self):
- x = Element("section", name = self.name)
- for d in self.doc:
- SubElement(x, "doc").text = "\n" + xml6_wrapper.fill(d) + "\n" + space4
- x.extend(o.to_xml() for o in self.options)
- return x
-
- def to_wiki(self, f):
- f.write("\n= [%s] section = #%s\n" % (self.name, self.name))
- for d in self.doc:
- f.write("\n%s\n" % wiki_wrapper.fill(d))
- for o in self.options:
- o.to_wiki(f)
-
- def to_conf(self, f, width):
- f.write("\n" + "#" * 78 + "\n\n[" + self.name + "]\n")
- if self.doc:
- f.write("\n##")
- for i, d in enumerate(self.doc):
- f.write("%s\n%s\n" % ("" if i == 0 else "#", conf_wrapper.fill(d)))
- f.write("##\n")
- for o in self.options:
- o.to_conf(f, width)
-
-def wiki_header(f, ident, toc):
- f.write("\n".join((
- "{{{",
- "#!comment",
- "",
- star78,
- "THIS PAGE WAS GENERATED AUTOMATICALLY, DO NOT EDIT.",
- "",
- "Generated from " + ident,
- " by $Id$",
- star78,
- "",
- "}}}",
- "")))
- if toc is not None:
- f.write("[[TracNav(%s)]]\n" % toc)
- f.write("[[PageOutline]]\n")
-
-def conf_header(f, ident):
- f.write("\n".join((
- "# Automatically generated. Edit as needed, but be careful of overwriting.",
- "#",
- "# Generated from " + ident,
- "# by $Id$",
- "")))
-
-
-# http://stackoverflow.com/questions/9027028/argparse-argument-order
-
-class CustomAction(argparse.Action):
- def __call__(self, parser, namespace, values, option_string = None):
- if not "ordered_args" in namespace:
- namespace.ordered_args = []
- namespace.ordered_args.append((self.dest, values))
-
-class CustomFlagAction(argparse.Action):
- def __init__(self, option_strings, dest, default = None,
- required = False, help = None): # pylint: disable=W0622
- super(CustomFlagAction, self).__init__(
- option_strings = option_strings, dest = dest, nargs = 0,
- const = None, default = default, required = required, help = help)
- def __call__(self, parser, namespace, values, option_string = None):
- if not "ordered_args" in namespace:
- namespace.ordered_args = []
- namespace.ordered_args.append((self.dest, None))
-
-
-class main(object):
-
- def __init__(self):
- self.sections = []
- self.section_map = None
- self.option_map = None
- self.ident = None
- self.toc = None
-
- parser = argparse.ArgumentParser(description = __doc__)
- parser.add_argument("--read-xml", metavar = "FILE", action = CustomAction,
- required = True, type = argparse.FileType("r"),
- help = "XML input file defining sections and options")
- parser.add_argument("--write-xml", metavar = "FILE", action = CustomAction,
- help = "XML file to write")
- parser.add_argument("--write-wiki", metavar = "FILE", action = CustomAction,
- help = "TracWiki file to write")
- parser.add_argument("--write-conf", metavar = "FILE", action = CustomAction,
- help = "rpki.conf configuration file to write")
- parser.add_argument("--set", metavar = "VARVAL", action = CustomAction,
- help = "variable setting in form \"VAR=VAL\"")
- parser.add_argument("--pwgen", metavar = "VAR", action = CustomAction,
- help = "set variable to generated password")
- parser.add_argument("--toc", metavar = "TRACNAV", action = CustomAction,
- help = "set TOC value to use with TracNav plugin")
- parser.add_argument("--autoconf", action = CustomFlagAction,
- help = "configure [autoconf] section")
- args = parser.parse_args()
-
- for cmd, arg in args.ordered_args:
- getattr(self, "do_" + cmd)(arg)
-
-
- def do_read_xml(self, arg):
- self.option_map = None
- root = ElementTree(file = arg).getroot()
- self.ident = root.get("ident")
- self.sections.extend(Section.from_xml(x) for x in root)
- self.option_map = {}
- self.section_map = {}
- for section in self.sections:
- if section.name in self.section_map:
- sys.exit("Duplicate section %s" % section.name)
- self.section_map[section.name] = section
- for option in section.options:
- name = (section.name, option.name)
- if name in self.option_map:
- sys.exit("Duplicate option %s::%s" % name)
- self.option_map[name] = option
-
-
- def do_set(self, arg):
- try:
- name, value = arg.split("=", 1)
- section, option = name.split("::")
- except ValueError:
- sys.exit("Couldn't parse --set specification \"%s\"" % arg)
- name = (section, option)
- if name not in self.option_map:
- sys.exit("Couldn't find option %s::%s" % name)
- self.option_map[name].value = value
-
-
- def do_pwgen(self, arg):
- try:
- section, option = arg.split("::")
- except ValueError:
- sys.exit("Couldn't parse --pwgen specification \"%s\"" % arg)
- name = (section, option)
- if name not in self.option_map:
- sys.exit("Couldn't find option %s::%s" % name)
- self.option_map[name].value = base64.urlsafe_b64encode(os.urandom(66))
-
-
- def do_autoconf(self, ignored):
- try:
- import rpki.autoconf
- for option in self.section_map["autoconf"].options:
- try:
- option.value = getattr(rpki.autoconf, option.name)
- except AttributeError:
- pass
- except ImportError:
- sys.exit("rpki.autoconf module is not available")
- except KeyError:
- sys.exit("Couldn't find autoconf section")
-
-
- def do_write_xml(self, arg):
- x = Element("configuration", ident = self.ident)
- x.extend(s.to_xml() for s in self.sections)
- ElementTree(x).write(arg, pretty_print = True, encoding = "us-ascii")
-
-
- def do_write_wiki(self, arg):
- if "%" in arg:
- for section in self.sections:
- with open(arg % section.name, "w") as f:
- wiki_header(f, self.ident, self.toc)
- section.to_wiki(f)
- else:
- with open(arg, "w") as f:
- for i, section in enumerate(self.sections):
- if i == 0:
- wiki_header(f, self.ident, self.toc)
- else:
- f.write("\f\n")
- section.to_wiki(f)
-
-
- def do_write_conf(self, arg):
- with open(arg, "w") as f:
- conf_header(f, self.ident)
- width = max(s.width for s in self.sections)
- for section in self.sections:
- section.to_conf(f, width)
-
-
- def do_toc(self, arg):
- self.toc = arg
-
-
-if __name__ == "__main__":
- main()
diff --git a/ca/rpki-manage b/ca/rpki-manage
deleted file mode 100755
index 0d581ce9..00000000
--- a/ca/rpki-manage
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python
-
-import os
-from django.core.management import execute_from_command_line
-
-# django-admin seems to have problems creating the superuser account when
-# $LANG is unset or is set to something totally incompatible with UTF-8.
-if os.environ.get('LANG') in (None, "", "C"):
- os.environ['LANG'] = 'en_US.UTF-8'
-
-os.environ['DJANGO_SETTINGS_MODULE'] = 'rpki.gui.default_settings'
-
-execute_from_command_line()
diff --git a/ca/rpki-nanny b/ca/rpki-nanny
new file mode 100755
index 00000000..914c8584
--- /dev/null
+++ b/ca/rpki-nanny
@@ -0,0 +1,217 @@
+#!/usr/bin/env python
+
+# $Id$
+#
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+# Portions copyright (C) 2009--2013 Internet Systems Consortium ("ISC")
+# Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notices and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL, ISC, AND ARIN DISCLAIM ALL
+# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL,
+# ISC, OR ARIN BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
+# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Start servers, using config file to figure out which servers the user
+wants started.
+"""
+
+import os
+import pwd
+import sys
+import time
+import signal
+import logging
+import argparse
+import subprocess
+
+import rpki.log
+import rpki.config
+import rpki.autoconf
+import rpki.daemonize
+
+from logging.handlers import SysLogHandler
+
+logger = logging.getLogger(__name__)
+
+signames = dict((getattr(signal, sig), sig)
+ for sig in dir(signal)
+ if sig.startswith("SIG")
+ and sig.isalnum()
+ and sig.isupper()
+ and isinstance(getattr(signal, sig), int))
+
+
+class Daemon(object):
+ """
+ Representation and control of one daemon under our care.
+ """
+
+ def __init__(self, name):
+ self.name = name
+ self.proc = None
+ self.next_restart = 0
+ if cfg.getboolean(option = "start_" + name, section = "myrpki", default = False):
+ self.cmd = (os.path.join(rpki.autoconf.libexecdir, name), "--foreground")
+ else:
+ self.cmd = ()
+
+ def start_maybe(self, output):
+ if self.cmd and self.proc is None and time.time() > self.next_restart:
+ try:
+ self.proc = subprocess.Popen(self.cmd, stdout = output, stderr = output)
+ self.next_restart = int(time.time() + args.restart_delay)
+ logger.debug("Started %s[%s]", self.name, self.proc.pid)
+ except:
+ logger.exception("Trouble starting %s", self.name)
+
+ def terminate(self):
+ if self.proc is not None:
+ try:
+ logger.debug("Terminating daemon %s[%s]", self.name, self.proc.pid)
+ self.proc.terminate()
+ except:
+ logger.exception("Trouble terminating %s[%s]", self.name, self.proc.pid)
+
+ def delay(self):
+ return max(0, int(self.next_restart - time.time())) if self.cmd and self.proc is None else 0
+
+ def reap(self):
+ if self.proc is not None and self.proc.poll() is not None:
+ code = self.proc.wait()
+ if code < 0:
+ logger.warn("%s[%s] exited on signal %s",
+ self.name, self.proc.pid, signames.get(-code, "???"))
+ else:
+ logger.warn("%s[%s] exited with status %s",
+ self.name, self.proc.pid, code)
+ self.proc = None
+
+
+class Signals(object):
+ """
+
+ Convert POSIX signals into something we can use in a loop at main
+ program level. Assumes that we use signal.pause() to block, so
+ simply receiving the signal is enough to wake us up.
+
+ Calling the constructed Signals object with one or more signal
+ numbers returns True if any of those signals have been received,
+ and clears the internal flag for the first such signal.
+ """
+
+ def __init__(self, *sigs):
+ self._active = set()
+ for sig in sigs:
+ signal.signal(sig, self._handler)
+
+ def _handler(self, sig, frame):
+ self._active.add(sig)
+ #logger.debug("Received %s", signames.get(sig, "???"))
+
+ def __call__(self, *sigs):
+ for sig in sigs:
+ try:
+ self._active.remove(sig)
+ return True
+ except KeyError:
+ pass
+ return False
+
+
+def non_negative_integer(s):
+ if int(s) < 0:
+ raise ValueError
+ return s
+
+def positive_integer(s):
+ if int(s) <= 0:
+ raise ValueError
+ return s
+
+
+if __name__ == "__main__":
+
+ os.environ.update(TZ = "UTC")
+ time.tzset()
+
+ cfg = rpki.config.argparser(section = "rpki-nanny", doc = __doc__)
+
+ cfg.add_argument("--restart-delay", type = positive_integer, default = 60,
+ help = "how long to wait before restarting a crashed daemon")
+ cfg.add_argument("--pidfile",
+ default = os.path.join(rpki.daemonize.default_pid_directory, "rpki-nanny.pid"),
+ help = "override default location of pid file")
+ cfg.add_boolean_argument("--foreground", default = False,
+ help = "whether to stay in foreground rather than daemonizing")
+ cfg.add_boolean_argument("--capture-stdout-stderr", default = True,
+ help = "whether to capture output incorrectly sent to stdout/stderr")
+ cfg.add_logging_arguments()
+
+ args = cfg.argparser.parse_args()
+
+ # Drop privs before daemonizing or opening log file
+ pw = pwd.getpwnam(rpki.autoconf.RPKI_USER)
+ os.setgid(pw.pw_gid)
+ os.setuid(pw.pw_uid)
+
+ cfg.configure_logging(ident = "rpki-nanny", args = args)
+
+ if not args.foreground:
+ rpki.daemonize.daemon(pidfile = args.pidfile)
+
+ if args.capture_stdout_stderr:
+ try:
+ logger_pipe = os.pipe()
+ logger_pid = os.fork()
+ if logger_pid == 0:
+ os.close(logger_pipe[1])
+ with os.fdopen(logger_pipe[0]) as f:
+ for line in f:
+ logger.warn("Captured: %s", line.rstrip())
+ # Should never get here, but just in case
+ logger.error("[Unexpected EOF in stdout/stderr capture logger]")
+ sys.exit(1)
+ else:
+ os.close(logger_pipe[0])
+ except:
+ logger.exception("Trouble setting up stdout/stderr capture process")
+ sys.exit(1)
+
+ daemon_output = logger_pipe[1] if args.capture_stdout_stderr else None
+
+ signals = Signals(signal.SIGALRM, signal.SIGCHLD, signal.SIGTERM, signal.SIGINT)
+ daemons = [Daemon(name) for name in ("irdbd", "rpkid", "pubd", "rootd")]
+ exiting = False
+
+ try:
+ while not exiting or not all(daemon.proc is None for daemon in daemons):
+ if not exiting and signals(signal.SIGTERM, signal.SIGINT):
+ logger.info("Received exit signal")
+ exiting = True
+ for daemon in daemons:
+ daemon.terminate()
+ if not exiting:
+ for daemon in daemons:
+ daemon.start_maybe(daemon_output)
+ alarms = tuple(daemon.delay() for daemon in daemons)
+ signal.alarm(min(a for a in alarms if a > 0) + 1 if any(alarms) else 0)
+ if not signals(signal.SIGCHLD, signal.SIGALRM):
+ signal.pause()
+ for daemon in daemons:
+ daemon.reap()
+ if args.capture_stdout_stderr:
+ os.kill(logger_pid, signal.SIGTERM)
+ except:
+ logger.exception("Unhandled exception in main loop")
+ for daemon in daemons:
+ daemon.terminate()
+ sys.exit(1)
diff --git a/ca/rpki-sql-backup b/ca/rpki-sql-backup
deleted file mode 100755
index e60f9ae3..00000000
--- a/ca/rpki-sql-backup
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-
-# $Id$
-#
-# Copyright (C) 2014 Dragon Research Labs ("DRL")
-# Portions copyright (C) 2010-2013 Internet Systems Consortium ("ISC")
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notices and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ISC DISCLAIM ALL
-# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
-# ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
-# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA
-# OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
-# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-"""
-Back up data from SQL databases, looking at config file to figure out
-which databases and what credentials to use with them.
-"""
-
-import subprocess
-import os
-import argparse
-import sys
-import time
-import rpki.config
-
-os.environ["TZ"] = "UTC"
-time.tzset()
-
-parser = argparse.ArgumentParser(description = __doc__)
-parser.add_argument("-c", "--config",
- help = "override default location of configuration file")
-parser.add_argument("-o", "--output",
- type = argparse.FileType("wb"), default = sys.stdout,
- help = "destination for SQL dump (default: stdout)")
-args = parser.parse_args()
-
-cfg = rpki.config.parser(args.config, "myrpki")
-
-for name in ("rpkid", "irdbd", "pubd"):
- if cfg.getboolean("start_" + name, False):
- subprocess.check_call(
- ("mysqldump", "--add-drop-database",
- "-u", cfg.get("sql-username", section = name),
- "-p" + cfg.get("sql-password", section = name),
- "-B", cfg.get("sql-database", section = name)),
- stdout = args.output)
diff --git a/ca/rpki-sql-setup b/ca/rpki-sql-setup
deleted file mode 100755
index edc2c242..00000000
--- a/ca/rpki-sql-setup
+++ /dev/null
@@ -1,311 +0,0 @@
-#!/usr/bin/env python
-
-# $Id$
-#
-# Copyright (C) 2014 Dragon Research Labs ("DRL")
-# Portions copyright (C) 2009-2013 Internet Systems Consortium ("ISC")
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notices and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ISC DISCLAIM ALL
-# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
-# ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
-# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA
-# OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
-# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-import os
-import sys
-import glob
-import getpass
-import argparse
-import datetime
-import rpki.config
-import rpki.version
-import rpki.autoconf
-import rpki.sql_schemas
-
-from rpki.mysql_import import MySQLdb, _mysql_exceptions
-
-ER_NO_SUCH_TABLE = 1146 # See mysqld_ername.h
-
-
-class RootDB(object):
- """
- Class to wrap MySQL actions that require root-equivalent access so
- we can defer such actions until we're sure they're really needed.
- Overall goal here is to prompt the user for the root password once
- at most, and not at all when not necessary.
- """
-
- def __init__(self, mysql_defaults = None):
- self.initialized = False
- self.mysql_defaults = mysql_defaults
-
- def __getattr__(self, name):
- if self.initialized:
- raise AttributeError
- if self.mysql_defaults is None:
- self.db = MySQLdb.connect(db = "mysql",
- user = "root",
- passwd = getpass.getpass("Please enter your MySQL root password: "))
- else:
- mysql_cfg = rpki.config.parser(self.mysql_defaults, "client")
- self.db = MySQLdb.connect(db = "mysql",
- user = mysql_cfg.get("user"),
- passwd = mysql_cfg.get("password"))
- self.cur = self.db.cursor()
- self.cur.execute("SHOW DATABASES")
- self.databases = set(d[0] for d in self.cur.fetchall())
- self.initialized = True
- return getattr(self, name)
-
- def close(self):
- if self.initialized:
- self.db.close()
-
-
-class UserDB(object):
- """
- Class to wrap MySQL access parameters for a particular database.
-
- NB: The SQL definitions for the upgrade_version table is embedded in
- this class rather than being declared in any of the .sql files.
- This is deliberate: nothing but the upgrade system should ever touch
- this table, and it's simpler to keep everything in one place.
-
- We have to be careful about SQL commits here, because CREATE TABLE
- implies an automatic commit. So presence of the magic table per se
- isn't significant, only its content (or lack thereof).
- """
-
- upgrade_version_table_schema = """
- CREATE TABLE upgrade_version (
- version TEXT NOT NULL,
- updated DATETIME NOT NULL
- ) ENGINE=InnoDB
- """
-
- def __init__(self, name):
- self.name = name
- self.database = cfg.get("sql-database", section = name)
- self.username = cfg.get("sql-username", section = name)
- self.password = cfg.get("sql-password", section = name)
- self.db = None
- self.cur = None
-
- def open(self):
- self.db = MySQLdb.connect(db = self.database, user = self.username, passwd = self.password)
- self.db.autocommit(False)
- self.cur = self.db.cursor()
-
- def close(self):
- if self.cur is not None:
- self.cur.close()
- self.cur = None
- if self.db is not None:
- self.db.commit()
- self.db.close()
- self.db = None
-
- @property
- def exists_and_accessible(self):
- try:
- MySQLdb.connect(db = self.database, user = self.username, passwd = self.password).close()
- except: # pylint: disable=W0702
- return False
- else:
- return True
-
- @property
- def version(self):
- try:
- self.cur.execute("SELECT version FROM upgrade_version")
- v = self.cur.fetchone()
- return Version(None if v is None else v[0])
- except _mysql_exceptions.ProgrammingError, e:
- if e.args[0] != ER_NO_SUCH_TABLE:
- raise
- log("Creating upgrade_version table in %s" % self.name)
- self.cur.execute(self.upgrade_version_table_schema)
- return Version(None)
-
- @version.setter
- def version(self, v):
- if v > self.version:
- self.cur.execute("DELETE FROM upgrade_version")
- self.cur.execute("INSERT upgrade_version (version, updated) VALUES (%s, %s)", (v, datetime.datetime.now()))
- self.db.commit()
- log("Updated %s to %s" % (self.name, v))
-
- @property
- def schema(self):
- lines = []
- for line in getattr(rpki.sql_schemas, self.name, "").splitlines():
- line = " ".join(line.split())
- if line and not line.startswith("--"):
- lines.append(line)
- return [statement.strip() for statement in " ".join(lines).rstrip(";").split(";") if statement.strip()]
-
-
-class Version(object):
- """
- A version number. This is a class in its own right to force the
- comparision and string I/O behavior we want.
- """
-
- def __init__(self, v):
- if v is None:
- v = "0.0"
- self.v = tuple(v.lower().split("."))
-
- def __str__(self):
- return ".".join(self.v)
-
- def __cmp__(self, other):
- return cmp(self.v, other.v)
-
-
-class Upgrade(object):
- """
- One upgrade script. Really, just its filename and the Version
- object we parse from its filename, we don't need to read the script
- itself except when applying it, but we do need to sort all the
- available upgrade scripts into version order.
- """
-
- @classmethod
- def load_all(cls, name, dn):
- g = os.path.join(dn, "upgrade-%s-to-*.py" % name)
- for fn in glob.iglob(g):
- yield cls(g, fn)
-
- def __init__(self, g, fn):
- head, sep, tail = g.partition("*") # pylint: disable=W0612
- self.fn = fn
- self.version = Version(fn[len(head):-len(tail)])
-
- def __cmp__(self, other):
- return cmp(self.version, other.version)
-
- def apply(self, db):
- # db is an argument here primarily so the script we exec can get at it
- log("Applying %s to %s" % (self.fn, db.name))
- with open(self.fn, "r") as f:
- exec f # pylint: disable=W0122
-
-
-def do_drop(name):
- db = UserDB(name)
- if db.database in root.databases:
- log("DROP DATABASE %s" % db.database)
- root.cur.execute("DROP DATABASE %s" % db.database)
- root.db.commit()
-
-def do_create(name):
- db = UserDB(name)
- log("CREATE DATABASE %s" % db.database)
- root.cur.execute("CREATE DATABASE %s" % db.database)
- log("GRANT ALL ON %s.* TO %s@localhost IDENTIFIED BY ###" % (db.database, db.username))
- root.cur.execute("GRANT ALL ON %s.* TO %s@localhost IDENTIFIED BY %%s" % (db.database, db.username),
- (db.password,))
- root.db.commit()
- db.open()
- for statement in db.schema:
- if not statement.upper().startswith("DROP TABLE"):
- log(statement)
- db.cur.execute(statement)
- db.version = current_version
- db.close()
-
-def do_script_drop(name):
- db = UserDB(name)
- print "DROP DATABASE IF EXISTS %s;" % db.database
-
-def do_drop_and_create(name):
- do_drop(name)
- do_create(name)
-
-def do_fix_grants(name):
- db = UserDB(name)
- if not db.exists_and_accessible:
- log("GRANT ALL ON %s.* TO %s@localhost IDENTIFIED BY ###" % (db.database, db.username))
- root.cur.execute("GRANT ALL ON %s.* TO %s@localhost IDENTIFIED BY %%s" % (db.database, db.username),
- (db.password,))
- root.db.commit()
-
-def do_create_if_missing(name):
- db = UserDB(name)
- if not db.exists_and_accessible:
- do_create(name)
-
-def do_apply_upgrades(name):
- upgrades = sorted(Upgrade.load_all(name, args.upgrade_scripts))
- if upgrades:
- db = UserDB(name)
- db.open()
- log("Current version of %s is %s" % (db.name, db.version))
- for upgrade in upgrades:
- if upgrade.version > db.version:
- upgrade.apply(db)
- db.version = upgrade.version
- db.version = current_version
- db.close()
-
-def log(text):
- if args.verbose:
- print "#", text
-
-parser = argparse.ArgumentParser(description = """\
-Automated setup of all SQL stuff used by the RPKI CA tools. Pulls
-configuration from rpki.conf, prompts for MySQL password when needed.
-""")
-group = parser.add_mutually_exclusive_group()
-parser.add_argument("-c", "--config",
- help = "specify alternate location for rpki.conf")
-parser.add_argument("-v", "--verbose", action = "store_true",
- help = "whistle while you work")
-parser.add_argument("--mysql-defaults",
- help = "specify MySQL root access credentials via a configuration file")
-parser.add_argument("--upgrade-scripts",
- default = os.path.join(rpki.autoconf.datarootdir, "rpki", "upgrade-scripts"),
- help = "override default location of upgrade scripts")
-group.add_argument("--create",
- action = "store_const", dest = "dispatch", const = do_create,
- help = "create databases and load schemas")
-group.add_argument("--drop",
- action = "store_const", dest = "dispatch", const = do_drop,
- help = "drop databases")
-group.add_argument("--script-drop",
- action = "store_const", dest = "dispatch", const = do_script_drop,
- help = "send SQL commands to drop databases to standard output")
-group.add_argument("--drop-and-create",
- action = "store_const", dest = "dispatch", const = do_drop_and_create,
- help = "drop databases then recreate them and load schemas")
-group.add_argument("--fix-grants",
- action = "store_const", dest = "dispatch", const = do_fix_grants,
- help = "whack database access to match current configuration file")
-group.add_argument("--create-if-missing",
- action = "store_const", dest = "dispatch", const = do_create_if_missing,
- help = "create databases and load schemas if they don't exist already")
-group.add_argument("--apply-upgrades",
- action = "store_const", dest = "dispatch", const = do_apply_upgrades,
- help = "apply upgrade scripts to existing databases")
-parser.set_defaults(dispatch = do_create_if_missing)
-args = parser.parse_args()
-
-try:
- cfg = rpki.config.parser(args.config, "myrpki")
- root = RootDB(args.mysql_defaults)
- current_version = Version(rpki.version.VERSION)
- for program_name in ("irdbd", "rpkid", "pubd"):
- if cfg.getboolean("start_" + program_name, False):
- args.dispatch(program_name)
- root.close()
-except Exception, e:
- #raise
- sys.exit(str(e))
diff --git a/ca/rpki-start-servers b/ca/rpki-start-servers
index 8a745896..1d7befb6 100755
--- a/ca/rpki-start-servers
+++ b/ca/rpki-start-servers
@@ -38,9 +38,9 @@ os.environ["TZ"] = "UTC"
time.tzset()
def non_negative_integer(s):
- if int(s) < 0:
- raise ValueError
- return s
+ if int(s) < 0:
+ raise ValueError
+ return s
parser = argparse.ArgumentParser(description = __doc__)
parser.add_argument("-c", "--config",
@@ -64,26 +64,25 @@ group.add_argument("--log-syslog", default = "daemon", nargs = "?",
help = "log syslog")
args = parser.parse_args()
-cfg = rpki.config.parser(args.config, "myrpki")
+cfg = rpki.config.parser(set_filename = args.config, section = "myrpki")
def run(name, old_flag = None):
- if cfg.getboolean("start_" + name, cfg.getboolean("run_" + name if old_flag is None else old_flag, False)):
- # pylint: disable=E1103
- log_file = os.path.join(args.log_directory, name + ".log")
- cmd = (os.path.join(rpki.autoconf.libexecdir, name), "--config", cfg.filename, "--log-level", args.log_level)
- if args.log_file:
- cmd += ("--log-file", log_file)
- elif args.log_rotating_file_kbytes:
- cmd += ("--log-rotating-file", log_file, args.log_rotating_file_kbytes, args.log_backup_count)
- elif args.log_rotating_file_hours:
- cmd += ("--log-timed-rotating-file", log_file, args.log_rotating_file_hours, args.log_backup_count)
- else:
- cmd += ("--log-syslog", args.log_syslog)
- proc = subprocess.Popen(cmd)
- if proc.wait() != 0:
- sys.exit("Problem starting %s, pid %s" % (name, proc.pid))
+ if cfg.getboolean("start_" + name, cfg.getboolean("run_" + name if old_flag is None else old_flag, False)):
+ log_file = os.path.join(args.log_directory, name + ".log")
+ cmd = (rpki.autoconf.SUDO, "-u", rpki.autoconf.RPKI_USER,
+ os.path.join(rpki.autoconf.libexecdir, name), "--log-level", args.log_level)
+ if args.log_file:
+ cmd += ("--log-file", log_file)
+ elif args.log_rotating_file_kbytes:
+ cmd += ("--log-rotating-file", log_file, args.log_rotating_file_kbytes, args.log_backup_count)
+ elif args.log_rotating_file_hours:
+ cmd += ("--log-timed-rotating-file", log_file, args.log_rotating_file_hours, args.log_backup_count)
+ else:
+ cmd += ("--log-syslog", args.log_syslog)
+ proc = subprocess.Popen(cmd)
+ if proc.wait() != 0:
+ sys.exit("Problem starting %s, pid %s" % (name, proc.pid))
run("irdbd", "run_rpkid")
run("rpkid")
run("pubd")
-run("rootd")
diff --git a/ca/rpki.wsgi b/ca/rpki.wsgi
index 72ba75ac..487650f7 100644
--- a/ca/rpki.wsgi
+++ b/ca/rpki.wsgi
@@ -21,7 +21,7 @@ import sys
import os
import rpki.autoconf
-os.environ['DJANGO_SETTINGS_MODULE'] = 'rpki.gui.default_settings'
+os.environ.update(DJANGO_SETTINGS_MODULE = "rpki.django_settings.gui")
# Needed for local_settings.py
sys.path.insert(1, rpki.autoconf.sysconfdir + '/rpki')
@@ -39,7 +39,7 @@ os.environ['DISABLE_SETPROCTITLE'] = 'yes'
if not os.environ.get('PYTHON_EGG_CACHE') and rpki.autoconf.WSGI_PYTHON_EGG_CACHE_DIR:
os.environ['PYTHON_EGG_CACHE'] = rpki.autoconf.WSGI_PYTHON_EGG_CACHE_DIR
-import django.core.handlers.wsgi
-application = django.core.handlers.wsgi.WSGIHandler()
+from django.core.wsgi import get_wsgi_application
+application = get_wsgi_application()
# vim:ft=python
diff --git a/ca/rpkic b/ca/rpkic
index 333a5eb7..77c65c62 100755
--- a/ca/rpkic
+++ b/ca/rpkic
@@ -1,21 +1,48 @@
#!/usr/bin/env python
-# $Id$
+# Using a Python script to run sudo to run a Python script is a bit
+# silly, but it lets us use rpki.autoconf to locate sudo, lets us
+# avoid needing a custom setuid wrapper, lets us avoid another pass
+# through the adventures of shell quoting and tokenization, and
+# generally is just a lot simpler to implement correctly.
#
-# Copyright (C) 2010-2011 Internet Systems Consortium ("ISC")
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
+# OK, it's probably a few milliseconds slower. Big deal.
if __name__ == "__main__":
- import rpki.rpkic
- rpki.rpkic.main()
+
+ import os
+ import pwd
+ import sys
+ import rpki.autoconf
+
+ argv = [sys.executable, os.path.abspath(sys.argv[0])]
+ argv.extend(sys.argv[1:])
+
+ already_ran_sudo = os.getenv("SUDO_COMMAND") == " ".join(argv)
+
+ euid = os.geteuid()
+
+ try:
+ puid = pwd.getpwnam(rpki.autoconf.RPKI_USER).pw_uid
+ except KeyError:
+ puid = None
+ print "Warning: User \"{}\" not found, not dropping privileges".format(rpki.autoconf.RPKI_USER)
+
+ if puid is not None and already_ran_sudo:
+ try:
+ os.setgid( int(os.environ["SUDO_GID"]))
+ os.setreuid(int(os.environ["SUDO_UID"]), puid)
+ except OSError as e:
+ sys.exit("Couldn't drop privs to user {}: {!s}".format(rpki.autoconf.RPKI_USER, e))
+
+ if already_ran_sudo or puid in (None, euid):
+ import rpki.rpkic
+ rpki.rpkic.main()
+
+ else:
+ try:
+ argv.insert(0, rpki.autoconf.SUDO)
+ os.execv(argv[0], argv)
+ sys.exit("rpkic startup failure, no exception so don't know why, sorry")
+ except Exception as e:
+ sys.exit("Couldn't exec sudo python rpkic: {!s}".format(e))
diff --git a/ca/rpkid b/ca/rpkid
index a4cc6cd3..f63e441e 100755
--- a/ca/rpkid
+++ b/ca/rpkid
@@ -17,5 +17,5 @@
# PERFORMANCE OF THIS SOFTWARE.
if __name__ == "__main__":
- import rpki.rpkid
- rpki.rpkid.main()
+ import rpki.rpkid
+ rpki.rpkid.main()
diff --git a/ca/rpkigui-apache-conf-gen b/ca/rpkigui-apache-conf-gen
index 6f71c7b1..4e17a9d7 100755
--- a/ca/rpkigui-apache-conf-gen
+++ b/ca/rpkigui-apache-conf-gen
@@ -29,6 +29,52 @@ import rpki.autoconf
fqdn = socket.getfqdn()
vhost_template = """\
+
+#
+# Stuff that should be visible with both HTTP and HTTPS is (now)
+# outside the vhost block (see if this works properly...).
+#
+
+#
+# Allow access to the directory where rcynic-html writes
+# its output files.
+#
+<Directory %(RCYNIC_HTML_DIR)s>
+%(allow)s
+</Directory>
+
+#
+# Add alias pointing to rcynic-html's output files.
+#
+# If for some reason you need to change this, be careful to leave
+# the trailing slash off the URL, otherwise /rcynic will be
+# swallowed by the WSGIScriptAlias
+#
+Alias /rcynic %(RCYNIC_HTML_DIR)s/
+
+#
+# Allow access to the directory where pubd writes RRDP files.
+#
+<Directory %(datarootdir)s/rpki/rrdp-publication/>
+%(allow)s
+</Directory>
+
+#
+# Add alias pointing to pubd's RRD output files.
+#
+Alias /rrdp %(datarootdir)s/rpki/rrdp-publication/
+
+#
+# RRDP "notification" file needs a short expiration: this is
+# a critical part of how RRDP interacts with HTTP caching.
+# Timeout is per current RRDP I-D, this will need to track
+# any changes as the specification evolves.
+#
+<LocationMatch ^/rrdp/notify[.]xml$>
+ ExpiresActive on
+ ExpiresDefault "access plus 1 minute"
+</LocationMatch>
+
#
# By default, this configuration assumes that you use name-based
# virtual hosting. If that's not what you want, you may need
@@ -78,23 +124,6 @@ vhost_template = """\
Alias /site_media/ %(datarootdir)s/rpki/media/
#
- # Allow access to the directory where rcynic-html writes
- # its output files.
- #
- <Directory %(RCYNIC_HTML_DIR)s>
-%(allow)s
- </Directory>
-
- #
- # Add alias pointing to rcynic-html's output files.
- #
- # If for some reason you need to change this, be careful to leave
- # the trailing slash off the URL, otherwise /rcynic will be
- # swallowed by the WSGIScriptAlias
- #
- Alias /rcynic %(RCYNIC_HTML_DIR)s/
-
- #
# Redirect to the GUI dashboard when someone hits the bare vhost.
#
RedirectMatch ^/$ /rpki/
@@ -102,7 +131,7 @@ vhost_template = """\
#
# Enable HTTPS
#
- SSLEngine on
+ SSLEngine on
#
# Specify HTTPS server certificate and key files for this virtual host.
@@ -161,369 +190,387 @@ NameVirtualHost *:443
'''
def Guess(args):
- """
- Guess what platform this is and dispatch to platform constructor.
- """
-
- system = platform.system()
- if system == "FreeBSD":
- return FreeBSD(args)
- if system == "Darwin":
- return Darwin(args)
- if system == "Linux":
- distro = platform.linux_distribution()[0].lower()
- if distro == "debian":
- return Debian(args)
- if distro == "ubuntu":
- return Ubuntu(args)
- if distro in ("fedora", "centos"):
- return Redhat(args)
- raise NotImplementedError("Can't guess what platform this is, sorry")
+ """
+ Guess what platform this is and dispatch to platform constructor.
+ """
+
+ system = platform.system()
+ if system == "FreeBSD":
+ return FreeBSD(args)
+ if system == "Darwin":
+ return Darwin(args)
+ if system == "Linux":
+ distro = platform.linux_distribution()[0].lower()
+ if distro == "debian":
+ return Debian(args)
+ if distro == "ubuntu":
+ return Ubuntu(args)
+ if distro in ("fedora", "centos"):
+ return Redhat(args)
+ raise NotImplementedError("Can't guess what platform this is, sorry")
+
class Platform(object):
- """
- Abstract base class representing an operating system platform.
- """
-
- apache_cer = os.path.join(rpki.autoconf.sysconfdir, "rpki", "apache.cer")
- apache_key = os.path.join(rpki.autoconf.sysconfdir, "rpki", "apache.key")
-
- apache_conf = os.path.join(rpki.autoconf.sysconfdir, "rpki", "apache.conf")
- apache_conf_sample = apache_conf + ".sample"
-
- apache_conf_preface = ""
-
- def __init__(self, args):
- self.args = args
- self.log("RPKI Apache configuration: platform \"%s\", action \"%s\"" % (
- self.__class__.__name__, args.action))
- getattr(self, args.action)()
-
- def log(self, msg):
- if self.args.verbose:
- print msg
-
- def run(self, *cmd, **kwargs):
- self.log("Running %s" % " ".join(cmd))
- subprocess.check_call(cmd, **kwargs)
-
- req_cmd = ("openssl", "req", "-new",
- "-config", "/dev/stdin",
- "-out", "/dev/stdout",
- "-keyout", apache_key,
- "-newkey", "rsa:2048")
-
- x509_cmd = ("openssl", "x509", "-req", "-sha256",
- "-signkey", apache_key,
- "-in", "/dev/stdin",
- "-out", apache_cer,
- "-days", "3650")
-
- req_conf = '''\
- [req]
- default_bits = 2048
- default_md = sha256
- distinguished_name = req_dn
- prompt = no
- encrypt_key = no
- [req_dn]
- CN = %s
- ''' % fqdn
-
- def unlink(self, fn, silent = False):
- if os.path.lexists(fn):
- if not silent:
- self.log("Removing %s" % fn)
- os.unlink(fn)
- elif not silent:
- self.log("Would have removed %s if it existed" % fn)
-
- def del_certs(self, silent = False):
- self.unlink(self.apache_cer, silent)
- self.unlink(self.apache_key, silent)
-
- def add_certs(self):
- if os.path.exists(self.apache_cer) and os.path.exists(self.apache_key):
- return
- self.del_certs()
- req = subprocess.Popen(self.req_cmd,
- stdin = subprocess.PIPE,
- stdout = subprocess.PIPE,
- stderr = open("/dev/null", "w"))
- x509 = subprocess.Popen(self.x509_cmd,
- stdin = req.stdout,
- stderr = open("/dev/null", "w"))
- req.stdin.write(self.req_conf)
- req.stdin.close()
- if req.wait():
- raise subprocess.CalledProcessError(req.returncode, self.req_cmd)
- if x509.wait():
- raise subprocess.CalledProcessError(x509.returncode, self.x509_cmd)
- self.log("Created %s and %s, chmoding %s" % (
- self.apache_cer, self.apache_key, self.apache_key))
- os.chmod(self.apache_key, 0600)
-
- _vhost = None
-
- @property
- def vhost(self):
- if self._vhost is None:
- allow = allow_22_template if self.args.apache_version <= 22 else allow_24_template
- self._vhost = vhost_template % dict(rpki.autoconf.__dict__, fqdn = fqdn, allow = allow)
- return self._vhost
-
- @property
- def name_virtual_host(self):
- return name_virtual_host_template if self.args.apache_version <= 22 else ""
-
- @property
- def too_complex(self):
- return textwrap.dedent('''\
- # It looks like you already have HTTPS enabled in your
- # Apache configuration, which makes your configuration too
- # complex for us to enable support for the RPKI GUI automatically.
- #
- # To enable support, take a look at %s
- # and copy what you need from that file into %s,
- # paying attention to the comments which mark the bits that
- # you might (or might not) need to change or omit, depending
- # on the details of your particular Apache configuration.
- ''' % (self.apache_conf_sample, self.apache_conf))
-
- def install(self):
- with open(self.apache_conf_sample, "w") as f:
- self.log("Writing %s" % f.name)
- f.write(self.apache_conf_preface)
- f.write(self.name_virtual_host)
- f.write(self.vhost)
- if not os.path.exists(self.apache_conf):
- self.unlink(self.apache_conf)
- with open(self.apache_conf, "w") as f:
- self.log("Writing %s" % f.name)
- if self.test_url("https://%s/" % fqdn):
- f.write(self.too_complex)
- sys.stdout.write(self.too_complex)
- else:
- if not self.test_tcp("localhost", 443):
+ """
+ Abstract base class representing an operating system platform.
+ """
+
+ apache_cer = os.path.join(rpki.autoconf.sysconfdir, "rpki", "apache.cer")
+ apache_key = os.path.join(rpki.autoconf.sysconfdir, "rpki", "apache.key")
+
+ apache_conf = os.path.join(rpki.autoconf.sysconfdir, "rpki", "apache.conf")
+ apache_conf_sample = apache_conf + ".sample"
+
+ apache_conf_preface = ""
+
+ def __init__(self, args):
+ self.args = args
+ self.log("RPKI Apache configuration: platform \"%s\", action \"%s\"" % (
+ self.__class__.__name__, args.action))
+ getattr(self, args.action)()
+
+ def log(self, msg):
+ if self.args.verbose:
+ print msg
+
+ def run(self, *cmd, **kwargs):
+ self.log("Running %s" % " ".join(cmd))
+ subprocess.check_call(cmd, **kwargs)
+
+ req_cmd = ("openssl", "req", "-new",
+ "-config", "/dev/stdin",
+ "-out", "/dev/stdout",
+ "-keyout", apache_key,
+ "-newkey", "rsa:2048")
+
+ x509_cmd = ("openssl", "x509", "-req", "-sha256",
+ "-signkey", apache_key,
+ "-in", "/dev/stdin",
+ "-out", apache_cer,
+ "-days", "3650")
+
+ req_conf = '''\
+ [req]
+ default_bits = 2048
+ default_md = sha256
+ distinguished_name = req_dn
+ prompt = no
+ encrypt_key = no
+ [req_dn]
+ CN = %s
+ ''' % fqdn
+
+ def unlink(self, fn, silent = False):
+ if os.path.lexists(fn):
+ if not silent:
+ self.log("Removing %s" % fn)
+ os.unlink(fn)
+ elif not silent:
+ self.log("Would have removed %s if it existed" % fn)
+
+ def del_certs(self, silent = False):
+ self.unlink(self.apache_cer, silent)
+ self.unlink(self.apache_key, silent)
+
+ def add_certs(self):
+ if os.path.exists(self.apache_cer) and os.path.exists(self.apache_key):
+ return
+ self.del_certs()
+ req = subprocess.Popen(self.req_cmd,
+ stdin = subprocess.PIPE,
+ stdout = subprocess.PIPE,
+ stderr = open("/dev/null", "w"))
+ x509 = subprocess.Popen(self.x509_cmd,
+ stdin = req.stdout,
+ stderr = open("/dev/null", "w"))
+ req.stdin.write(self.req_conf)
+ req.stdin.close()
+ if req.wait():
+ raise subprocess.CalledProcessError(req.returncode, self.req_cmd)
+ if x509.wait():
+ raise subprocess.CalledProcessError(x509.returncode, self.x509_cmd)
+ self.log("Created %s and %s, chmoding %s" % (
+ self.apache_cer, self.apache_key, self.apache_key))
+ os.chmod(self.apache_key, 0600)
+
+ _vhost = None
+
+ @property
+ def vhost(self):
+ if self._vhost is None:
+ allow = allow_22_template if self.args.apache_version <= 22 else allow_24_template
+ self._vhost = vhost_template % dict(rpki.autoconf.__dict__, fqdn = fqdn, allow = allow)
+ return self._vhost
+
+ @property
+ def name_virtual_host(self):
+ return name_virtual_host_template if self.args.apache_version <= 22 else ""
+
+ @property
+ def too_complex(self):
+ return textwrap.dedent('''\
+ # It looks like you already have HTTPS enabled in your
+ # Apache configuration, which makes your configuration too
+ # complex for us to enable support for the RPKI GUI automatically.
+ #
+ # To enable support, take a look at %s
+ # and copy what you need from that file into %s,
+ # paying attention to the comments which mark the bits that
+ # you might (or might not) need to change or omit, depending
+ # on the details of your particular Apache configuration.
+ ''' % (self.apache_conf_sample, self.apache_conf))
+
+ @property
+ def apache_conf_target(self):
+ raise NotImplementedError
+
+ def restart(self):
+ raise NotImplementedError
+
+ def install(self):
+ with open(self.apache_conf_sample, "w") as f:
+ self.log("Writing %s" % f.name)
f.write(self.apache_conf_preface)
f.write(self.name_virtual_host)
- f.write(self.vhost)
- if not os.path.exists(self.apache_conf_target):
- self.unlink(self.apache_conf_target)
- self.log("Symlinking %s to %s" % (
- self.apache_conf_target, self.apache_conf))
- os.symlink(self.apache_conf, self.apache_conf_target)
- self.add_certs()
- self.enable()
- self.restart()
-
- def enable(self):
- pass
-
- def disable(self):
- pass
-
- def remove(self):
- try:
- same = open(self.apache_conf, "r").read() == open(self.apache_conf_sample, "r").read()
- except: # pylint: disable=W0702
- same = False
- self.unlink(self.apache_conf_sample)
- if same:
- self.unlink(self.apache_conf)
- self.unlink(self.apache_conf_target)
- self.disable()
- self.restart()
-
- def purge(self):
- self.remove()
- self.unlink(self.apache_conf)
- self.del_certs()
-
- @staticmethod
- def test_url(url = "https://localhost/"):
- try:
- urllib2.urlopen(url).close()
- except IOError:
- return False
- else:
- return True
-
- @staticmethod
- def test_tcp(host = "localhost", port = 443, family = socket.AF_UNSPEC, proto = socket.SOCK_STREAM):
- try:
- addrinfo = socket.getaddrinfo(host, port, family, proto)
- except socket.error:
- return False
- for af, socktype, proto, canon, sa in addrinfo: # pylint: disable=W0612
- try:
- s = socket.socket(af, socktype, proto)
- s.connect(sa)
- s.close()
- except socket.error:
- continue
- else:
- return True
- return False
+ f.write(self.vhost)
+ if not os.path.exists(self.apache_conf):
+ self.unlink(self.apache_conf)
+ with open(self.apache_conf, "w") as f:
+ self.log("Writing %s" % f.name)
+ if self.test_url("https://%s/" % fqdn):
+ f.write(self.too_complex)
+ sys.stdout.write(self.too_complex)
+ else:
+ if not self.test_tcp("localhost", 443):
+ f.write(self.apache_conf_preface)
+ f.write(self.name_virtual_host)
+ f.write(self.vhost)
+ if not os.path.exists(self.apache_conf_target):
+ self.unlink(self.apache_conf_target)
+ self.log("Symlinking %s to %s" % (
+ self.apache_conf_target, self.apache_conf))
+ os.symlink(self.apache_conf, self.apache_conf_target)
+ self.add_certs()
+ self.enable()
+ self.restart()
+
+ def enable(self):
+ pass
+
+ def disable(self):
+ pass
+
+ def remove(self):
+ try:
+ same = open(self.apache_conf, "r").read() == open(self.apache_conf_sample, "r").read()
+ except:
+ same = False
+ self.unlink(self.apache_conf_sample)
+ if same:
+ self.unlink(self.apache_conf)
+ self.unlink(self.apache_conf_target)
+ self.disable()
+ self.restart()
+
+ def purge(self):
+ self.remove()
+ self.unlink(self.apache_conf)
+ self.del_certs()
+
+ @staticmethod
+ def test_url(url = "https://localhost/"):
+ try:
+ urllib2.urlopen(url).close()
+ except IOError:
+ return False
+ else:
+ return True
+
+ @staticmethod
+ def test_tcp(host = "localhost", port = 443, family = socket.AF_UNSPEC, proto = socket.SOCK_STREAM):
+ try:
+ addrinfo = socket.getaddrinfo(host, port, family, proto)
+ except socket.error:
+ return False
+ for af, socktype, proto, canon, sa in addrinfo: # pylint: disable=W0612
+ try:
+ s = socket.socket(af, socktype, proto)
+ s.connect(sa)
+ s.close()
+ except socket.error:
+ continue
+ else:
+ return True
+ return False
+
class FreeBSD(Platform):
- """
- FreeBSD.
- """
+ """
+ FreeBSD.
+ """
- # On FreeBSD we have to ask httpd what version it is before we know
- # where to put files or what to call the service. In FreeBSD's makefiles,
- # this value is called APACHE_VERSION, and is calculated thusly:
- #
- # httpd -V | sed -ne 's/^Server version: Apache\/\([0-9]\)\.\([0-9]*\).*/\1\2/p'
+ # On FreeBSD we have to ask httpd what version it is before we know
+ # where to put files or what to call the service. In FreeBSD's makefiles,
+ # this value is called APACHE_VERSION, and is calculated thusly:
+ #
+ # httpd -V | sed -ne 's/^Server version: Apache\/\([0-9]\)\.\([0-9]*\).*/\1\2/p'
+
+ _apache_name = None
- _apache_name = None
+ @property
+ def apache_name(self):
+ if self._apache_name is None:
+ self._apache_name = "apache%s" % self.args.apache_version
+ return self._apache_name
- @property
- def apache_name(self):
- if self._apache_name is None:
- self._apache_name = "apache%s" % self.args.apache_version
- return self._apache_name
+ @property
+ def apache_conf_target(self):
+ return "/usr/local/etc/%s/Includes/rpki.conf" % self.apache_name
- @property
- def apache_conf_target(self):
- return "/usr/local/etc/%s/Includes/rpki.conf" % self.apache_name
+ apache_conf_preface = textwrap.dedent('''\
+ # These directives tell Apache to listen on the HTTPS port
+ # and to enable name-based virtual hosting. If you already
+ # have HTTPS enabled elsewhere in your configuration, you may
+ # need to remove these.
- apache_conf_preface = textwrap.dedent('''\
- # These directives tell Apache to listen on the HTTPS port
- # and to enable name-based virtual hosting. If you already
- # have HTTPS enabled elsewhere in your configuration, you may
- # need to remove these.
+ Listen [::]:443
+ Listen 0.0.0.0:443
+ ''')
- Listen [::]:443
- Listen 0.0.0.0:443
- ''')
+ def restart(self):
+ self.run("service", self.apache_name, "restart")
- def restart(self):
- self.run("service", self.apache_name, "restart")
class Debian(Platform):
- """
- Debian and related platforms like Ubuntu.
- """
+ """
+ Debian and related platforms like Ubuntu.
+ """
+
+ # Pull the current version number for released code. Use
+ # something very large when there is no version (eg, "sid").
+ @property
+ def distribution_version(self):
+ v = platform.linux_distribution()[1].split(".")
+ if all(d.isdigit() for d in v):
+ return tuple(int(d) for d in v)
+ else:
+ return (99999999, 0)
+
+ # On Debian, the filename must end in .conf on Stretch and must not
+ # end in .conf on Wheezy. Haven't checked Jessie yet, will need to
+ # update this if we ever sort out the version skew mess on Jessie.
+ @property
+ def apache_conf_target(self):
+ if self.distribution_version < (8, 0):
+ return "/etc/apache2/sites-available/rpki"
+ else:
+ return "/etc/apache2/sites-available/rpki.conf"
- @property
- def distribution_version(self):
- return tuple(int(v) for v in platform.linux_distribution()[1].split("."))
+ snake_oil_cer = "/etc/ssl/certs/ssl-cert-snakeoil.pem"
+ snake_oil_key = "/etc/ssl/private/ssl-cert-snakeoil.key"
- # As of Wheezy, Debian still wants the configuration filename
- # without the .conf suffix. Keep an eye on this, as it may change
- # in future releases.
- #
- @property
- def apache_conf_target(self):
- return "/etc/apache2/sites-available/rpki"
-
- snake_oil_cer = "/etc/ssl/certs/ssl-cert-snakeoil.pem"
- snake_oil_key = "/etc/ssl/private/ssl-cert-snakeoil.key"
-
- def add_certs(self):
- if not os.path.exists(self.snake_oil_cer) or not os.path.exists(self.snake_oil_key):
- return Platform.add_certs(self)
- if not os.path.exists(self.apache_cer):
- self.unlink(self.apache_cer)
- os.symlink(self.snake_oil_cer, self.apache_cer)
- if not os.path.exists(self.apache_key):
- self.unlink(self.apache_key)
- os.symlink(self.snake_oil_key, self.apache_key)
-
- def enable(self):
- self.run("a2enmod", "ssl")
- self.run("a2ensite", "rpki")
- #
- # In light of BREACH and CRIME attacks, mod_deflate is looking
- # like a bad idea, so make sure it's off.
- self.run("a2dismod", "deflate")
+ def add_certs(self):
+ if not os.path.exists(self.snake_oil_cer) or not os.path.exists(self.snake_oil_key):
+ return Platform.add_certs(self)
+ if not os.path.exists(self.apache_cer):
+ self.unlink(self.apache_cer)
+ os.symlink(self.snake_oil_cer, self.apache_cer)
+ if not os.path.exists(self.apache_key):
+ self.unlink(self.apache_key)
+ os.symlink(self.snake_oil_key, self.apache_key)
+
+ def enable(self):
+ self.run("a2enmod", "ssl")
+ self.run("a2enmod", "expires")
+ self.run("a2ensite", "rpki")
+ #
+ # In light of BREACH and CRIME attacks, mod_deflate is looking
+ # like a bad idea, so make sure it's off.
+ self.run("a2dismod", "-f", "deflate")
- def disable(self):
- self.run("a2dissite", "rpki")
+ def disable(self):
+ self.run("a2dissite", "rpki")
+
+ def restart(self):
+ self.run("service", "apache2", "restart")
- def restart(self):
- self.run("service", "apache2", "restart")
class Ubuntu(Debian):
-
- # On Ubuntu, the filename must end in .conf on Trusty and must not
- # end in .conf on Precise.
- @property
- def apache_conf_target(self):
- if self.distribution_version >= (14, 0):
- return "/etc/apache2/sites-available/rpki.conf"
- else:
- return "/etc/apache2/sites-available/rpki"
-
-class NIY(Platform):
- def __init__(self, args):
- super(NIY, self).__init__(args)
- raise NotImplementedError("Platform %s not implemented yet, sorry" % self.__class__.__name__)
-
-class Redhat(NIY):
- """
- Redhat family of Linux distributions (Fedora, CentOS).
- """
-
-class Darwin(NIY):
- """
- Mac OS X (aka Darwin).
- """
+
+ # On Ubuntu, the filename must end in .conf on Trusty and must not
+ # end in .conf on Precise.
+ @property
+ def apache_conf_target(self):
+ if self.distribution_version < (14, 0):
+ return "/etc/apache2/sites-available/rpki"
+ else:
+ return "/etc/apache2/sites-available/rpki.conf"
+
+
+class NIY(Platform): # pylint: disable=W0223
+ def __init__(self, args):
+ super(NIY, self).__init__(args)
+ raise NotImplementedError("Platform %s not implemented yet, sorry" % self.__class__.__name__)
+
+class Redhat(NIY): # pylint: disable=W0223
+ "Redhat family of Linux distributions (Fedora, CentOS)."
+
+class Darwin(NIY): # pylint: disable=W0223
+ "Mac OS X (aka Darwin)."
+
def main():
- """
- Generate and (de)install configuration suitable for using Apache httpd
- to drive the RPKI web interface under WSGI.
- """
-
- parser = argparse.ArgumentParser(description = __doc__)
- group1 = parser.add_mutually_exclusive_group()
- group2 = parser.add_mutually_exclusive_group()
-
- parser.add_argument("-v", "--verbose",
- help = "whistle while you work", action = "store_true")
- parser.add_argument("--apache-version",
- help = "Apache version (default " + rpki.autoconf.APACHE_VERSION + ")",
- type = int, default = rpki.autoconf.APACHE_VERSION)
-
- group1.add_argument("--freebsd",
- help = "configure for FreeBSD",
- action = "store_const", dest = "platform", const = FreeBSD)
- group1.add_argument("--debian",
- help = "configure for Debian",
- action = "store_const", dest = "platform", const = Debian)
- group1.add_argument("--ubuntu",
- help = "configure for Ubuntu",
- action = "store_const", dest = "platform", const = Ubuntu)
- group1.add_argument("--redhat", "--fedora", "--centos",
- help = "configure for Redhat/Fedora/CentOS",
- action = "store_const", dest = "platform", const = Redhat)
- group1.add_argument("--macosx", "--darwin",
- help = "configure for Mac OS X (Darwin)",
- action = "store_const", dest = "platform", const = Darwin)
- group1.add_argument("--guess",
- help = "guess which platform configuration to use",
- action = "store_const", dest = "platform", const = Guess)
-
- group2.add_argument("-i", "--install",
- help = "install configuration",
- action = "store_const", dest = "action", const = "install")
- group2.add_argument("-r", "--remove", "--deinstall", "--uninstall",
- help = "remove configuration",
- action = "store_const", dest = "action", const = "remove")
- group2.add_argument("-P", "--purge",
- help = "remove configuration with extreme prejudice",
- action = "store_const", dest = "action", const = "purge")
-
- parser.set_defaults(platform = Guess, action = "install")
- args = parser.parse_args()
-
- try:
- args.platform(args)
- except Exception, e:
- sys.exit(str(e))
+ """
+ Generate and (de)install configuration suitable for using Apache httpd
+ to drive the RPKI web interface under WSGI.
+ """
+
+ parser = argparse.ArgumentParser(description = __doc__)
+ group1 = parser.add_mutually_exclusive_group()
+ group2 = parser.add_mutually_exclusive_group()
+
+ parser.add_argument("-v", "--verbose",
+ help = "whistle while you work", action = "store_true")
+ parser.add_argument("--apache-version",
+ help = "Apache version (default " + rpki.autoconf.APACHE_VERSION + ")",
+ type = int, default = rpki.autoconf.APACHE_VERSION)
+
+ group1.add_argument("--freebsd",
+ help = "configure for FreeBSD",
+ action = "store_const", dest = "platform", const = FreeBSD)
+ group1.add_argument("--debian",
+ help = "configure for Debian",
+ action = "store_const", dest = "platform", const = Debian)
+ group1.add_argument("--ubuntu",
+ help = "configure for Ubuntu",
+ action = "store_const", dest = "platform", const = Ubuntu)
+ group1.add_argument("--redhat", "--fedora", "--centos",
+ help = "configure for Redhat/Fedora/CentOS",
+ action = "store_const", dest = "platform", const = Redhat)
+ group1.add_argument("--macosx", "--darwin",
+ help = "configure for Mac OS X (Darwin)",
+ action = "store_const", dest = "platform", const = Darwin)
+ group1.add_argument("--guess",
+ help = "guess which platform configuration to use",
+ action = "store_const", dest = "platform", const = Guess)
+
+ group2.add_argument("-i", "--install",
+ help = "install configuration",
+ action = "store_const", dest = "action", const = "install")
+ group2.add_argument("-r", "--remove", "--deinstall", "--uninstall",
+ help = "remove configuration",
+ action = "store_const", dest = "action", const = "remove")
+ group2.add_argument("-P", "--purge",
+ help = "remove configuration with extreme prejudice",
+ action = "store_const", dest = "action", const = "purge")
+
+ parser.set_defaults(platform = Guess, action = "install")
+ args = parser.parse_args()
+
+ try:
+ args.platform(args)
+ except Exception, e:
+ sys.exit(str(e))
if __name__ == "__main__":
- main()
+ main()
diff --git a/ca/rpkigui-import-routes b/ca/rpkigui-import-routes
index 0fbe0126..fb8e381e 100755
--- a/ca/rpkigui-import-routes
+++ b/ca/rpkigui-import-routes
@@ -110,4 +110,3 @@ automatically.""")
except Exception as e:
logging.exception(e)
sys.exit(1)
-
diff --git a/ca/rpkigui-query-routes b/ca/rpkigui-query-routes
index 1f698f23..dc2835a0 100755
--- a/ca/rpkigui-query-routes
+++ b/ca/rpkigui-query-routes
@@ -49,18 +49,17 @@ qs = rv.RouteOrigin.objects.filter(
prefix_max__gte=r.max
)
-
-def validity_marker(route, roa, roa_prefix):
- "Return + if the roa would cause the route to be accepted, or - if not"
- # we already know the ROA covers this route because they are returned
- # from RouteOrigin.roas, so just check the ASN and max prefix length
- return '-' if (roa.asid == 0 or route.asn != roa.asid or
- route.prefixlen > roa_prefix.max_length) else '+'
-
# xxx.xxx.xxx.xxx/xx-xx is 22 characters
+# we already know the ROA covers this route because they are returned
+# from RouteOrigin.roas, so just check the ASN and max prefix length
+
for route in qs:
print route.as_resource_range(), route.asn, route.status
for pfx in route.roa_prefixes:
for roa in pfx.roas.all():
- print validity_marker(route, roa, pfx), pfx.as_roa_prefix(), roa.asid, roa.repo.uri
+ if roa.asid == 0 or route.asn != roa.asid or route.prefixlen > pfx.max_length:
+ validity_marker = '-'
+ else:
+ validity_marker = '+'
+ print validity_marker, pfx.as_roa_prefix(), roa.asid, roa.repo.uri
print
diff --git a/ca/rpkigui-rcynic b/ca/rpkigui-rcynic
index 79afb15f..c753fc5e 100755
--- a/ca/rpkigui-rcynic
+++ b/ca/rpkigui-rcynic
@@ -1,7 +1,7 @@
#!/usr/bin/env python
# Copyright (C) 2011 SPARTA, Inc. dba Cobham
-# Copyright (C) 2012, 2013 SPARTA, Inc. a Parsons Company
+# Copyright (C) 2012, 2013, 2016 SPARTA, Inc. a Parsons Company
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -17,17 +17,13 @@
__version__ = '$Id$'
-# probably should be exported from rpki.gui.cacheview.util
-default_logfile = '/var/rcynic/data/rcynic.xml'
-default_root = '/var/rcynic/data'
-
import logging
import sys
from rpki.gui.script_util import setup
setup()
-from rpki.gui.cacheview.util import import_rcynic_xml
+from rpki.gui.gui_rpki_cache.util import update_cache
if __name__ == '__main__':
import optparse
@@ -35,20 +31,12 @@ if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option("-l", "--level", dest="log_level", default='ERROR',
help="specify the logging level [default: %default]")
- parser.add_option(
- "-f", "--file", dest="logfile",
- help="specify the rcynic XML file to parse [default: %default]",
- default=default_logfile)
- parser.add_option(
- "-r", "--root",
- help="specify the chroot directory for the rcynic jail [default: %default]",
- metavar="DIR", default=default_root)
options, args = parser.parse_args(sys.argv)
v = getattr(logging, options.log_level.upper())
logging.basicConfig(level=v)
logging.info('log level set to %s', logging.getLevelName(v))
- import_rcynic_xml(options.root, options.logfile)
+ update_cache()
logging.shutdown()
diff --git a/ca/tests/Makefile.in b/ca/tests/Makefile.in
index 9796dd2b..eb4357b1 100644
--- a/ca/tests/Makefile.in
+++ b/ca/tests/Makefile.in
@@ -3,12 +3,11 @@
PYTHON = @PYTHON@
abs_top_builddir = @abs_top_builddir@
-all: protocol-samples
+all:
+ @true
clean:
- rm -rf smoketest.dir left-right-protocol-samples publication-protocol-samples yamltest.dir rcynic.xml rcynic-data
-
-protocol-samples: left-right-protocol-samples/.stamp publication-protocol-samples/.stamp
+ rm -rf smoketest.dir left-right-protocol-samples publication-protocol-samples publication-control-protocol-samples rrdp-samples yamltest.dir rcynic.xml rcynic-data
left-right-protocol-samples/.stamp: left-right-protocol-samples.xml split-protocol-samples.xsl
rm -rf left-right-protocol-samples
@@ -16,20 +15,44 @@ left-right-protocol-samples/.stamp: left-right-protocol-samples.xml split-protoc
xsltproc --param verbose 0 --stringparam dir left-right-protocol-samples split-protocol-samples.xsl left-right-protocol-samples.xml
touch $@
+left-right-relaxng: left-right-protocol-samples/.stamp
+ xmllint --noout --relaxng ../../schemas/relaxng/left-right.rng left-right-protocol-samples/*.xml
+
publication-protocol-samples/.stamp: publication-protocol-samples.xml split-protocol-samples.xsl
rm -rf publication-protocol-samples
mkdir publication-protocol-samples
xsltproc --param verbose 0 --stringparam dir publication-protocol-samples split-protocol-samples.xsl publication-protocol-samples.xml
touch $@
-relaxng: protocol-samples
- xmllint --noout --relaxng ../../schemas/relaxng/left-right-schema.rng left-right-protocol-samples/*.xml
- xmllint --noout --relaxng ../../schemas/relaxng/up-down-schema.rng up-down-protocol-samples/*.xml
- xmllint --noout --relaxng ../../schemas/relaxng/publication-schema.rng publication-protocol-samples/*.xml
+publication-relaxng: publication-protocol-samples/.stamp
+ xmllint --noout --relaxng ../../schemas/relaxng/publication.rng publication-protocol-samples/*.xml
+
+publication-control-protocol-samples/.stamp: publication-control-protocol-samples.xml split-protocol-samples.xsl
+ rm -rf publication-control-protocol-samples
+ mkdir publication-control-protocol-samples
+ xsltproc --param verbose 0 --stringparam dir publication-control-protocol-samples split-protocol-samples.xsl publication-control-protocol-samples.xml
+ touch $@
+
+publication-control-relaxng: publication-control-protocol-samples/.stamp
+ xmllint --noout --relaxng ../../schemas/relaxng/publication-control.rng publication-control-protocol-samples/*.xml
+
+rrdp-samples/.stamp: rrdp-samples.xml split-protocol-samples.xsl
+ rm -rf rrdp-samples
+ mkdir rrdp-samples
+ xsltproc --param verbose 0 --stringparam dir rrdp-samples split-protocol-samples.xsl rrdp-samples.xml
+ touch $@
+
+rrdp-relaxng: rrdp-samples/.stamp
+ xmllint --noout --relaxng ../../schemas/relaxng/rrdp.rng rrdp-samples/*.xml
+
+up-down-relaxng:
+ xmllint --noout --relaxng ../../schemas/relaxng/up-down.rng up-down-protocol-samples/*.xml
+
+relaxng: up-down-relaxng left-right-relaxng publication-relaxng publication-control-relaxng rrdp-relaxng
all-tests:: relaxng
-parse-test: protocol-samples
+parse-test: left-right-protocol-samples publication-protocol-samples publication-control-protocol-samples
${PYTHON} xml-parse-test.py
all-tests:: parse-test
@@ -67,7 +90,6 @@ YAMLTEST_CONFIG = smoketest.1.yaml
yamltest:
rm -rf yamltest.dir rcynic-data
- ${PYTHON} sql-cleaner.py
${PYTHON} yamltest.py ${YAMLTEST_CONFIG}
YAMLCONF_CONFIG = ${YAMLTEST_CONFIG}
diff --git a/ca/tests/bgpsec-yaml.py b/ca/tests/bgpsec-yaml.py
index 1562f86e..500d2b9d 100755
--- a/ca/tests/bgpsec-yaml.py
+++ b/ca/tests/bgpsec-yaml.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
#
# $Id$
-#
+#
# Copyright (C) 2014 Dragon Research Labs ("DRL")
#
# Permission to use, copy, modify, and distribute this software for any
@@ -30,28 +30,28 @@ root = "Root"
class Kid(object):
- def __init__(self, n):
- self.name = "ISP-%03d" % n
- self.ipv4 = "10.%d.0.0/16" % n
- self.asn = n
- self.router_id = n * 10000
+ def __init__(self, i):
+ self.name = "ISP-%03d" % i
+ self.ipv4 = "10.%d.0.0/16" % i
+ self.asn = i
+ self.router_id = i * 10000
- @property
- def declare(self):
- return dict(name = self.name,
- ipv4 = self.ipv4,
- asn = self.asn,
- hosted_by = root,
- roa_request = [dict(asn = self.asn, ipv4 = self.ipv4)],
- router_cert = [dict(asn = self.asn, router_id = self.router_id)])
+ @property
+ def declare(self):
+ return dict(name = self.name,
+ ipv4 = self.ipv4,
+ asn = self.asn,
+ hosted_by = root,
+ roa_request = [dict(asn = self.asn, ipv4 = self.ipv4)],
+ router_cert = [dict(asn = self.asn, router_id = self.router_id)])
- @property
- def del_routercert(self):
- return dict(name = self.name, router_cert_del = [dict(asn = self.asn, router_id = self.router_id)])
+ @property
+ def del_routercert(self):
+ return dict(name = self.name, router_cert_del = [dict(asn = self.asn, router_id = self.router_id)])
- @property
- def add_routercert(self):
- return dict(name = self.name, router_cert_add = [dict(asn = self.asn, router_id = self.router_id)])
+ @property
+ def add_routercert(self):
+ return dict(name = self.name, router_cert_add = [dict(asn = self.asn, router_id = self.router_id)])
kids = [Kid(n + 1) for n in xrange(200)]
@@ -72,14 +72,14 @@ docs.append([shell_first,
gym = kids[50:70]
for kid in gym:
- docs.append([shell_next,
- kid.del_routercert,
- sleeper])
+ docs.append([shell_next,
+ kid.del_routercert,
+ sleeper])
for kid in gym:
- docs.append([shell_next,
- kid.add_routercert,
- sleeper])
+ docs.append([shell_next,
+ kid.add_routercert,
+ sleeper])
print '''\
# This configuration was generated by a script. Edit at your own risk.
diff --git a/ca/tests/left-right-protocol-samples.xml b/ca/tests/left-right-protocol-samples.xml
index 7b97386d..c3d24b9d 100644
--- a/ca/tests/left-right-protocol-samples.xml
+++ b/ca/tests/left-right-protocol-samples.xml
@@ -2,11 +2,11 @@
- $Id$
-
- Copyright (C) 2010 Internet Systems Consortium ("ISC")
- -
+ -
- Permission to use, copy, modify, and distribute this software for any
- purpose with or without fee is hereby granted, provided that the above
- copyright notice and this permission notice appear in all copies.
- -
+ -
- THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -14,7 +14,7 @@
- LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- PERFORMANCE OF THIS SOFTWARE.
- -
+ -
- Portions copyright (C) 2007-2008 American Registry for Internet Numbers ("ARIN")
-
- Permission to use, copy, modify, and distribute this software for any
@@ -35,17 +35,17 @@
-->
<completely_gratuitous_wrapper_element_to_let_me_run_this_through_xmllint>
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <self action="create" tag="a000" self_handle="42"/>
+ <tenant action="create" tag="a000" tenant_handle="42"/>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <self action="create" tag="a000" self_handle="42"/>
+ <tenant action="create" tag="a000" tenant_handle="42"/>
</msg>
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <self action="set" self_handle="42"
+ <tenant action="set" tenant_handle="42"
rekey="yes"
reissue="yes"
revoke="yes"
@@ -92,19 +92,19 @@
YyMNrG2xMOtIC7T4+IOHgT8PgrNhaeDg9ctewj0X8Qi9nI9nXeinicLX8vj6hdEq
3ORv7RZMJNYqv1HQ3wUE2B7fCPFv7EUwzaCds1kgRQ==
</bpki_glue>
- </self>
+ </tenant>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <self action="set" self_handle="42"/>
+ <tenant action="set" tenant_handle="42"/>
</msg>
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <self action="get" self_handle="42"/>
+ <tenant action="get" tenant_handle="42"/>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <self action="get" self_handle="42">
+ <tenant action="get" tenant_handle="42">
<bpki_cert>
MIIDGzCCAgOgAwIBAgIJAKi+/+wUhQlxMA0GCSqGSIb3DQEBBQUAMCQxIjAgBgNV
BAMTGVRlc3QgQ2VydGlmaWNhdGUgQm9iIFJvb3QwHhcNMDcwODAxMTk1MzEwWhcN
@@ -143,15 +143,15 @@
YyMNrG2xMOtIC7T4+IOHgT8PgrNhaeDg9ctewj0X8Qi9nI9nXeinicLX8vj6hdEq
3ORv7RZMJNYqv1HQ3wUE2B7fCPFv7EUwzaCds1kgRQ==
</bpki_glue>
- </self>
+ </tenant>
</msg>
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <self action="list"/>
+ <tenant action="list"/>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <self action="list" self_handle="42">
+ <tenant action="list" tenant_handle="42">
<bpki_cert>
MIIDGzCCAgOgAwIBAgIJAKi+/+wUhQlxMA0GCSqGSIb3DQEBBQUAMCQxIjAgBgNV
BAMTGVRlc3QgQ2VydGlmaWNhdGUgQm9iIFJvb3QwHhcNMDcwODAxMTk1MzEwWhcN
@@ -190,22 +190,22 @@
YyMNrG2xMOtIC7T4+IOHgT8PgrNhaeDg9ctewj0X8Qi9nI9nXeinicLX8vj6hdEq
3ORv7RZMJNYqv1HQ3wUE2B7fCPFv7EUwzaCds1kgRQ==
</bpki_glue>
- </self>
- <self action="list" self_handle="99"/>
+ </tenant>
+ <tenant action="list" tenant_handle="99"/>
</msg>
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <self action="destroy" self_handle="42"/>
+ <tenant action="destroy" tenant_handle="42"/>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <self action="destroy" self_handle="42"/>
+ <tenant action="destroy" tenant_handle="42"/>
</msg>
-
+
<!-- ==== -->
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <bsc action="create" self_handle="42" bsc_handle="17"
+ <bsc action="create" tenant_handle="42" bsc_handle="17"
generate_keypair="yes"
key_type="rsa"
hash_alg="sha256"
@@ -231,15 +231,15 @@
</signing_cert>
</bsc>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <bsc action="create" self_handle="42" bsc_handle="17">
+ <bsc action="create" tenant_handle="42" bsc_handle="17">
<pkcs10_request>cmVxdWVzdAo=</pkcs10_request>
</bsc>
</msg>
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <bsc action="set" self_handle="42" bsc_handle="17">
+ <bsc action="set" tenant_handle="42" bsc_handle="17">
<signing_cert>
MIIDHTCCAgWgAwIBAgIJAKUUCoKn9ovVMA0GCSqGSIb3DQEBBQUAMCYxJDAiBgNV
BAMTG1Rlc3QgQ2VydGlmaWNhdGUgQWxpY2UgUm9vdDAeFw0wNzA4MDExOTUzMDda
@@ -272,17 +272,17 @@
</signing_cert_crl>
</bsc>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <bsc action="set" self_handle="42" bsc_handle="17"/>
+ <bsc action="set" tenant_handle="42" bsc_handle="17"/>
</msg>
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <bsc action="get" self_handle="42" bsc_handle="17"/>
+ <bsc action="get" tenant_handle="42" bsc_handle="17"/>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <bsc action="get" self_handle="42" bsc_handle="17">
+ <bsc action="get" tenant_handle="42" bsc_handle="17">
<signing_cert>
MIIDHTCCAgWgAwIBAgIJAKUUCoKn9ovVMA0GCSqGSIb3DQEBBQUAMCYxJDAiBgNV
BAMTG1Rlc3QgQ2VydGlmaWNhdGUgQWxpY2UgUm9vdDAeFw0wNzA4MDExOTUzMDda
@@ -304,13 +304,13 @@
</signing_cert>
</bsc>
</msg>
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <bsc action="list" self_handle="42"/>
+ <bsc action="list" tenant_handle="42"/>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <bsc action="get" self_handle="42" bsc_handle="17">
+ <bsc action="get" tenant_handle="42" bsc_handle="17">
<signing_cert>
MIIDHTCCAgWgAwIBAgIJAKUUCoKn9ovVMA0GCSqGSIb3DQEBBQUAMCYxJDAiBgNV
BAMTG1Rlc3QgQ2VydGlmaWNhdGUgQWxpY2UgUm9vdDAeFw0wNzA4MDExOTUzMDda
@@ -332,26 +332,26 @@
</signing_cert>
</bsc>
</msg>
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <bsc action="destroy" self_handle="42" bsc_handle="17"/>
+ <bsc action="destroy" tenant_handle="42" bsc_handle="17"/>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <bsc action="destroy" self_handle="42" bsc_handle="17"/>
+ <bsc action="destroy" tenant_handle="42" bsc_handle="17"/>
</msg>
-
+
<!-- ==== -->
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <parent action="create" self_handle="42" parent_handle="666"
+ <parent action="create" tenant_handle="42" parent_handle="666"
peer_contact_uri="https://re.bar.example/bandicoot/"
sia_base="rsync://repo.foo.example/wombat/"
bsc_handle="17"
repository_handle="120"
sender_name="tweedledee"
recipient_name="tweedledum">
- <bpki_cms_cert>
+ <bpki_cert>
MIIDGzCCAgOgAwIBAgIJAKi+/+wUhQlxMA0GCSqGSIb3DQEBBQUAMCQxIjAgBgNV
BAMTGVRlc3QgQ2VydGlmaWNhdGUgQm9iIFJvb3QwHhcNMDcwODAxMTk1MzEwWhcN
MDcwODMxMTk1MzEwWjAkMSIwIAYDVQQDExlUZXN0IENlcnRpZmljYXRlIEJvYiBS
@@ -369,8 +369,8 @@
sqep6LAlFj62qqaIJzNeQ9NVkBqtkygnYlBOkaBTHfQTux3jYNpEo8JJB5e/WFdH
YyMNrG2xMOtIC7T4+IOHgT8PgrNhaeDg9ctewj0X8Qi9nI9nXeinicLX8vj6hdEq
3ORv7RZMJNYqv1HQ3wUE2B7fCPFv7EUwzaCds1kgRQ==
- </bpki_cms_cert>
- <bpki_cms_glue>
+ </bpki_cert>
+ <bpki_glue>
MIIDGzCCAgOgAwIBAgIJAKi+/+wUhQlxMA0GCSqGSIb3DQEBBQUAMCQxIjAgBgNV
BAMTGVRlc3QgQ2VydGlmaWNhdGUgQm9iIFJvb3QwHhcNMDcwODAxMTk1MzEwWhcN
MDcwODMxMTk1MzEwWjAkMSIwIAYDVQQDExlUZXN0IENlcnRpZmljYXRlIEJvYiBS
@@ -388,16 +388,61 @@
sqep6LAlFj62qqaIJzNeQ9NVkBqtkygnYlBOkaBTHfQTux3jYNpEo8JJB5e/WFdH
YyMNrG2xMOtIC7T4+IOHgT8PgrNhaeDg9ctewj0X8Qi9nI9nXeinicLX8vj6hdEq
3ORv7RZMJNYqv1HQ3wUE2B7fCPFv7EUwzaCds1kgRQ==
- </bpki_cms_glue>
+ </bpki_glue>
</parent>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <parent action="create" self_handle="42" parent_handle="666"/>
+ <parent action="create" tenant_handle="42" parent_handle="666"/>
</msg>
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <parent action="set" self_handle="42" parent_handle="666"
+ <parent action="create" tenant_handle="42" parent_handle="666"
+ peer_contact_uri="https://re.bar.example/bandicoot/"
+ sia_base="rsync://repo.foo.example/wombat/"
+ bsc_handle="17"
+ repository_handle="120"
+ sender_name="tweedledee"
+ recipient_name="tweedledum"
+ root_asn_resources="17,42,666"
+ root_ipv4_resources="10.0.0.0/8,192.168.0.0/16"
+ root_ipv6_resources="">
+ </parent>
+ </msg>
+
+ <msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
+ <parent action="create" tenant_handle="42" parent_handle="666">
+ <rpki_root_cert>
+ MIIEaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQsFADAzMTEwLwYDVQQDEyhFRjE5
+ QTdDQUE3MDg0NUZCNkYzRjlEOUFBOEE4OTBDRTg5QTgxMUQzMB4XDTE2MDQxNDIy
+ NTE0N1oXDTE3MDQxNDIyNTE0N1owMzExMC8GA1UEAxMoRUYxOUE3Q0FBNzA4NDVG
+ QjZGM0Y5RDlBQThBODkwQ0U4OUE4MTFEMzCCASIwDQYJKoZIhvcNAQEBBQADggEP
+ ADCCAQoCggEBAKLVZcQPCPyKX21GYGuB5OPNh224HY9ndD1TU5r4yzJWCDN7Rfku
+ ftDPOG9qVOp8EMEgr+xsH7tm5LcyuJt1+KHhQD0JT44J6LCUtn2xJPINIDQCbaXm
+ L+RGAX+GlWesC9yxjnGBjLFSQJ9qN2QeR1MBeaL8iP/vyXq9hgEbuHTQliUqg/6n
+ bZ7+JShIQHrgNvv4wTLtrD7JoL+hDvnIhpt3OSwwzb1QvTfUEZ9mv8IE/Zpe2Kk6
+ MXcbFjhWgmGS1ZpQjfWQYkbHnSrkZd7IhGKyPj/x/mV3P7tmHBKXo1TnRbKd/ij5
+ ZjNDSnW144CvAiTcpj9xnKgtQLqfbsEt4ccCAwEAAaOCAYUwggGBMB0GA1UdDgQW
+ BBTvGafKpwhF+28/nZqoqJDOiagR0zAfBgNVHSMEGDAWgBTvGafKpwhF+28/nZqo
+ qJDOiagR0zAYBgNVHSABAf8EDjAMMAoGCCsGAQUFBw4CMA8GA1UdEwEB/wQFMAMB
+ Af8wDgYDVR0PAQH/BAQDAgEGMIG3BggrBgEFBQcBCwSBqjCBpzA2BggrBgEFBQcw
+ BYYqcnN5bmM6Ly9sb2NhbGhvc3Q6NDQxMC9ycGtpL1JJUi1yb290L3Jvb3QvMD4G
+ CCsGAQUFBzAKhjJyc3luYzovL2xvY2FsaG9zdDo0NDEwL3Jwa2kvUklSLXJvb3Qv
+ cm9vdC9yb290Lm1mdDAtBggrBgEFBQcwDYYhaHR0cHM6Ly9sb2NhbGhvc3Q6NDQx
+ MS9ub3RpZnkueG1sMCEGCCsGAQUFBwEIAQH/BBIwEKAOMAwwCgIBAAIFAP////8w
+ JwYIKwYBBQUHAQcBAf8EGDAWMAkEAgABMAMDAQAwCQQCAAIwAwMBADANBgkqhkiG
+ 9w0BAQsFAAOCAQEAADpTJlaW/YBhvM4d8+VJwGMNgRy1gIbNfikXbDJunIsfVvQH
+ 6Cvu+G9LHwzr41S31gLDPiI5xqlYIcOLNmD4kFF+FkI5pmdZaYyE7cmUrV9LfJSp
+ 6AjwNGhOlFDQJbfvndxAmTpAimvC/eKdB4nsbun3ewddIBbz7meq8FD/anrsU9F7
+ ezLgQuChwzshV29wqyM97RQ1J8xeBdadWv8DKxrYj2OkHAZCzQNoYp33i6B/qHf2
+ +350IE4Shix6fGfOuhq3BKSMEzBFUUK6RDmUrBfJlyCwD9+DWkgXb6gdw4MHLEEK
+ 34fI46rg3JkTm9LK4glhTSSdXNuSgQNiNt1sYA==
+ </rpki_root_cert>
+ </parent>
+ </msg>
+
+ <msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
+ <parent action="set" tenant_handle="42" parent_handle="666"
peer_contact_uri="https://re.bar.example/bandicoot/"
sia_base="rsync://repo.foo.example/wombat/"
bsc_handle="17"
@@ -405,7 +450,7 @@
rekey="yes"
reissue="yes"
revoke="yes">
- <bpki_cms_cert>
+ <bpki_cert>
MIIDGzCCAgOgAwIBAgIJAKi+/+wUhQlxMA0GCSqGSIb3DQEBBQUAMCQxIjAgBgNV
BAMTGVRlc3QgQ2VydGlmaWNhdGUgQm9iIFJvb3QwHhcNMDcwODAxMTk1MzEwWhcN
MDcwODMxMTk1MzEwWjAkMSIwIAYDVQQDExlUZXN0IENlcnRpZmljYXRlIEJvYiBS
@@ -423,8 +468,8 @@
sqep6LAlFj62qqaIJzNeQ9NVkBqtkygnYlBOkaBTHfQTux3jYNpEo8JJB5e/WFdH
YyMNrG2xMOtIC7T4+IOHgT8PgrNhaeDg9ctewj0X8Qi9nI9nXeinicLX8vj6hdEq
3ORv7RZMJNYqv1HQ3wUE2B7fCPFv7EUwzaCds1kgRQ==
- </bpki_cms_cert>
- <bpki_cms_glue>
+ </bpki_cert>
+ <bpki_glue>
MIIDGzCCAgOgAwIBAgIJAKi+/+wUhQlxMA0GCSqGSIb3DQEBBQUAMCQxIjAgBgNV
BAMTGVRlc3QgQ2VydGlmaWNhdGUgQm9iIFJvb3QwHhcNMDcwODAxMTk1MzEwWhcN
MDcwODMxMTk1MzEwWjAkMSIwIAYDVQQDExlUZXN0IENlcnRpZmljYXRlIEJvYiBS
@@ -442,25 +487,68 @@
sqep6LAlFj62qqaIJzNeQ9NVkBqtkygnYlBOkaBTHfQTux3jYNpEo8JJB5e/WFdH
YyMNrG2xMOtIC7T4+IOHgT8PgrNhaeDg9ctewj0X8Qi9nI9nXeinicLX8vj6hdEq
3ORv7RZMJNYqv1HQ3wUE2B7fCPFv7EUwzaCds1kgRQ==
- </bpki_cms_glue>
+ </bpki_glue>
</parent>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <parent action="set" self_handle="42" parent_handle="666"/>
+ <parent action="set" tenant_handle="42" parent_handle="666"/>
</msg>
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <parent action="get" self_handle="42" parent_handle="666"/>
+ <parent action="set" tenant_handle="42" parent_handle="666"
+ peer_contact_uri="https://re.bar.example/bandicoot/"
+ sia_base="rsync://repo.foo.example/wombat/"
+ bsc_handle="17"
+ repository_handle="120"
+ root_asn_resources="17,42,666"
+ root_ipv4_resources="10.0.0.0/8,192.168.0.0/16"
+ root_ipv6_resources="">
+ </parent>
+ </msg>
+
+ <msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
+ <parent action="set" tenant_handle="42" parent_handle="666">
+ <rpki_root_cert>
+ MIIEaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQsFADAzMTEwLwYDVQQDEyhFRjE5
+ QTdDQUE3MDg0NUZCNkYzRjlEOUFBOEE4OTBDRTg5QTgxMUQzMB4XDTE2MDQxNDIy
+ NTE0N1oXDTE3MDQxNDIyNTE0N1owMzExMC8GA1UEAxMoRUYxOUE3Q0FBNzA4NDVG
+ QjZGM0Y5RDlBQThBODkwQ0U4OUE4MTFEMzCCASIwDQYJKoZIhvcNAQEBBQADggEP
+ ADCCAQoCggEBAKLVZcQPCPyKX21GYGuB5OPNh224HY9ndD1TU5r4yzJWCDN7Rfku
+ ftDPOG9qVOp8EMEgr+xsH7tm5LcyuJt1+KHhQD0JT44J6LCUtn2xJPINIDQCbaXm
+ L+RGAX+GlWesC9yxjnGBjLFSQJ9qN2QeR1MBeaL8iP/vyXq9hgEbuHTQliUqg/6n
+ bZ7+JShIQHrgNvv4wTLtrD7JoL+hDvnIhpt3OSwwzb1QvTfUEZ9mv8IE/Zpe2Kk6
+ MXcbFjhWgmGS1ZpQjfWQYkbHnSrkZd7IhGKyPj/x/mV3P7tmHBKXo1TnRbKd/ij5
+ ZjNDSnW144CvAiTcpj9xnKgtQLqfbsEt4ccCAwEAAaOCAYUwggGBMB0GA1UdDgQW
+ BBTvGafKpwhF+28/nZqoqJDOiagR0zAfBgNVHSMEGDAWgBTvGafKpwhF+28/nZqo
+ qJDOiagR0zAYBgNVHSABAf8EDjAMMAoGCCsGAQUFBw4CMA8GA1UdEwEB/wQFMAMB
+ Af8wDgYDVR0PAQH/BAQDAgEGMIG3BggrBgEFBQcBCwSBqjCBpzA2BggrBgEFBQcw
+ BYYqcnN5bmM6Ly9sb2NhbGhvc3Q6NDQxMC9ycGtpL1JJUi1yb290L3Jvb3QvMD4G
+ CCsGAQUFBzAKhjJyc3luYzovL2xvY2FsaG9zdDo0NDEwL3Jwa2kvUklSLXJvb3Qv
+ cm9vdC9yb290Lm1mdDAtBggrBgEFBQcwDYYhaHR0cHM6Ly9sb2NhbGhvc3Q6NDQx
+ MS9ub3RpZnkueG1sMCEGCCsGAQUFBwEIAQH/BBIwEKAOMAwwCgIBAAIFAP////8w
+ JwYIKwYBBQUHAQcBAf8EGDAWMAkEAgABMAMDAQAwCQQCAAIwAwMBADANBgkqhkiG
+ 9w0BAQsFAAOCAQEAADpTJlaW/YBhvM4d8+VJwGMNgRy1gIbNfikXbDJunIsfVvQH
+ 6Cvu+G9LHwzr41S31gLDPiI5xqlYIcOLNmD4kFF+FkI5pmdZaYyE7cmUrV9LfJSp
+ 6AjwNGhOlFDQJbfvndxAmTpAimvC/eKdB4nsbun3ewddIBbz7meq8FD/anrsU9F7
+ ezLgQuChwzshV29wqyM97RQ1J8xeBdadWv8DKxrYj2OkHAZCzQNoYp33i6B/qHf2
+ +350IE4Shix6fGfOuhq3BKSMEzBFUUK6RDmUrBfJlyCwD9+DWkgXb6gdw4MHLEEK
+ 34fI46rg3JkTm9LK4glhTSSdXNuSgQNiNt1sYA==
+ </rpki_root_cert>
+ </parent>
+ </msg>
+
+ <msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
+ <parent action="get" tenant_handle="42" parent_handle="666"/>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <parent action="get" self_handle="42" parent_handle="666"
+ <parent action="get" tenant_handle="42" parent_handle="666"
peer_contact_uri="https://re.bar.example/bandicoot/"
sia_base="rsync://repo.foo.example/wombat/"
bsc_handle="17"
repository_handle="120">
- <bpki_cms_cert>
+ <bpki_cert>
MIIDGzCCAgOgAwIBAgIJAKi+/+wUhQlxMA0GCSqGSIb3DQEBBQUAMCQxIjAgBgNV
BAMTGVRlc3QgQ2VydGlmaWNhdGUgQm9iIFJvb3QwHhcNMDcwODAxMTk1MzEwWhcN
MDcwODMxMTk1MzEwWjAkMSIwIAYDVQQDExlUZXN0IENlcnRpZmljYXRlIEJvYiBS
@@ -478,8 +566,8 @@
sqep6LAlFj62qqaIJzNeQ9NVkBqtkygnYlBOkaBTHfQTux3jYNpEo8JJB5e/WFdH
YyMNrG2xMOtIC7T4+IOHgT8PgrNhaeDg9ctewj0X8Qi9nI9nXeinicLX8vj6hdEq
3ORv7RZMJNYqv1HQ3wUE2B7fCPFv7EUwzaCds1kgRQ==
- </bpki_cms_cert>
- <bpki_cms_glue>
+ </bpki_cert>
+ <bpki_glue>
MIIDGzCCAgOgAwIBAgIJAKi+/+wUhQlxMA0GCSqGSIb3DQEBBQUAMCQxIjAgBgNV
BAMTGVRlc3QgQ2VydGlmaWNhdGUgQm9iIFJvb3QwHhcNMDcwODAxMTk1MzEwWhcN
MDcwODMxMTk1MzEwWjAkMSIwIAYDVQQDExlUZXN0IENlcnRpZmljYXRlIEJvYiBS
@@ -497,21 +585,47 @@
sqep6LAlFj62qqaIJzNeQ9NVkBqtkygnYlBOkaBTHfQTux3jYNpEo8JJB5e/WFdH
YyMNrG2xMOtIC7T4+IOHgT8PgrNhaeDg9ctewj0X8Qi9nI9nXeinicLX8vj6hdEq
3ORv7RZMJNYqv1HQ3wUE2B7fCPFv7EUwzaCds1kgRQ==
- </bpki_cms_glue>
+ </bpki_glue>
+ <rpki_root_cert>
+ MIIEaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQsFADAzMTEwLwYDVQQDEyhFRjE5
+ QTdDQUE3MDg0NUZCNkYzRjlEOUFBOEE4OTBDRTg5QTgxMUQzMB4XDTE2MDQxNDIy
+ NTE0N1oXDTE3MDQxNDIyNTE0N1owMzExMC8GA1UEAxMoRUYxOUE3Q0FBNzA4NDVG
+ QjZGM0Y5RDlBQThBODkwQ0U4OUE4MTFEMzCCASIwDQYJKoZIhvcNAQEBBQADggEP
+ ADCCAQoCggEBAKLVZcQPCPyKX21GYGuB5OPNh224HY9ndD1TU5r4yzJWCDN7Rfku
+ ftDPOG9qVOp8EMEgr+xsH7tm5LcyuJt1+KHhQD0JT44J6LCUtn2xJPINIDQCbaXm
+ L+RGAX+GlWesC9yxjnGBjLFSQJ9qN2QeR1MBeaL8iP/vyXq9hgEbuHTQliUqg/6n
+ bZ7+JShIQHrgNvv4wTLtrD7JoL+hDvnIhpt3OSwwzb1QvTfUEZ9mv8IE/Zpe2Kk6
+ MXcbFjhWgmGS1ZpQjfWQYkbHnSrkZd7IhGKyPj/x/mV3P7tmHBKXo1TnRbKd/ij5
+ ZjNDSnW144CvAiTcpj9xnKgtQLqfbsEt4ccCAwEAAaOCAYUwggGBMB0GA1UdDgQW
+ BBTvGafKpwhF+28/nZqoqJDOiagR0zAfBgNVHSMEGDAWgBTvGafKpwhF+28/nZqo
+ qJDOiagR0zAYBgNVHSABAf8EDjAMMAoGCCsGAQUFBw4CMA8GA1UdEwEB/wQFMAMB
+ Af8wDgYDVR0PAQH/BAQDAgEGMIG3BggrBgEFBQcBCwSBqjCBpzA2BggrBgEFBQcw
+ BYYqcnN5bmM6Ly9sb2NhbGhvc3Q6NDQxMC9ycGtpL1JJUi1yb290L3Jvb3QvMD4G
+ CCsGAQUFBzAKhjJyc3luYzovL2xvY2FsaG9zdDo0NDEwL3Jwa2kvUklSLXJvb3Qv
+ cm9vdC9yb290Lm1mdDAtBggrBgEFBQcwDYYhaHR0cHM6Ly9sb2NhbGhvc3Q6NDQx
+ MS9ub3RpZnkueG1sMCEGCCsGAQUFBwEIAQH/BBIwEKAOMAwwCgIBAAIFAP////8w
+ JwYIKwYBBQUHAQcBAf8EGDAWMAkEAgABMAMDAQAwCQQCAAIwAwMBADANBgkqhkiG
+ 9w0BAQsFAAOCAQEAADpTJlaW/YBhvM4d8+VJwGMNgRy1gIbNfikXbDJunIsfVvQH
+ 6Cvu+G9LHwzr41S31gLDPiI5xqlYIcOLNmD4kFF+FkI5pmdZaYyE7cmUrV9LfJSp
+ 6AjwNGhOlFDQJbfvndxAmTpAimvC/eKdB4nsbun3ewddIBbz7meq8FD/anrsU9F7
+ ezLgQuChwzshV29wqyM97RQ1J8xeBdadWv8DKxrYj2OkHAZCzQNoYp33i6B/qHf2
+ +350IE4Shix6fGfOuhq3BKSMEzBFUUK6RDmUrBfJlyCwD9+DWkgXb6gdw4MHLEEK
+ 34fI46rg3JkTm9LK4glhTSSdXNuSgQNiNt1sYA==
+ </rpki_root_cert>
</parent>
</msg>
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <parent action="list" self_handle="42"/>
+ <parent action="list" tenant_handle="42"/>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <parent action="list" self_handle="42" parent_handle="666"
+ <parent action="list" tenant_handle="42" parent_handle="666"
peer_contact_uri="https://re.bar.example/bandicoot/"
sia_base="rsync://repo.foo.example/wombat/"
bsc_handle="17"
repository_handle="120">
- <bpki_cms_cert>
+ <bpki_cert>
MIIDGzCCAgOgAwIBAgIJAKi+/+wUhQlxMA0GCSqGSIb3DQEBBQUAMCQxIjAgBgNV
BAMTGVRlc3QgQ2VydGlmaWNhdGUgQm9iIFJvb3QwHhcNMDcwODAxMTk1MzEwWhcN
MDcwODMxMTk1MzEwWjAkMSIwIAYDVQQDExlUZXN0IENlcnRpZmljYXRlIEJvYiBS
@@ -529,8 +643,8 @@
sqep6LAlFj62qqaIJzNeQ9NVkBqtkygnYlBOkaBTHfQTux3jYNpEo8JJB5e/WFdH
YyMNrG2xMOtIC7T4+IOHgT8PgrNhaeDg9ctewj0X8Qi9nI9nXeinicLX8vj6hdEq
3ORv7RZMJNYqv1HQ3wUE2B7fCPFv7EUwzaCds1kgRQ==
- </bpki_cms_cert>
- <bpki_cms_glue>
+ </bpki_cert>
+ <bpki_glue>
MIIDGzCCAgOgAwIBAgIJAKi+/+wUhQlxMA0GCSqGSIb3DQEBBQUAMCQxIjAgBgNV
BAMTGVRlc3QgQ2VydGlmaWNhdGUgQm9iIFJvb3QwHhcNMDcwODAxMTk1MzEwWhcN
MDcwODMxMTk1MzEwWjAkMSIwIAYDVQQDExlUZXN0IENlcnRpZmljYXRlIEJvYiBS
@@ -548,22 +662,48 @@
sqep6LAlFj62qqaIJzNeQ9NVkBqtkygnYlBOkaBTHfQTux3jYNpEo8JJB5e/WFdH
YyMNrG2xMOtIC7T4+IOHgT8PgrNhaeDg9ctewj0X8Qi9nI9nXeinicLX8vj6hdEq
3ORv7RZMJNYqv1HQ3wUE2B7fCPFv7EUwzaCds1kgRQ==
- </bpki_cms_glue>
+ </bpki_glue>
+ <rpki_root_cert>
+ MIIEaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQsFADAzMTEwLwYDVQQDEyhFRjE5
+ QTdDQUE3MDg0NUZCNkYzRjlEOUFBOEE4OTBDRTg5QTgxMUQzMB4XDTE2MDQxNDIy
+ NTE0N1oXDTE3MDQxNDIyNTE0N1owMzExMC8GA1UEAxMoRUYxOUE3Q0FBNzA4NDVG
+ QjZGM0Y5RDlBQThBODkwQ0U4OUE4MTFEMzCCASIwDQYJKoZIhvcNAQEBBQADggEP
+ ADCCAQoCggEBAKLVZcQPCPyKX21GYGuB5OPNh224HY9ndD1TU5r4yzJWCDN7Rfku
+ ftDPOG9qVOp8EMEgr+xsH7tm5LcyuJt1+KHhQD0JT44J6LCUtn2xJPINIDQCbaXm
+ L+RGAX+GlWesC9yxjnGBjLFSQJ9qN2QeR1MBeaL8iP/vyXq9hgEbuHTQliUqg/6n
+ bZ7+JShIQHrgNvv4wTLtrD7JoL+hDvnIhpt3OSwwzb1QvTfUEZ9mv8IE/Zpe2Kk6
+ MXcbFjhWgmGS1ZpQjfWQYkbHnSrkZd7IhGKyPj/x/mV3P7tmHBKXo1TnRbKd/ij5
+ ZjNDSnW144CvAiTcpj9xnKgtQLqfbsEt4ccCAwEAAaOCAYUwggGBMB0GA1UdDgQW
+ BBTvGafKpwhF+28/nZqoqJDOiagR0zAfBgNVHSMEGDAWgBTvGafKpwhF+28/nZqo
+ qJDOiagR0zAYBgNVHSABAf8EDjAMMAoGCCsGAQUFBw4CMA8GA1UdEwEB/wQFMAMB
+ Af8wDgYDVR0PAQH/BAQDAgEGMIG3BggrBgEFBQcBCwSBqjCBpzA2BggrBgEFBQcw
+ BYYqcnN5bmM6Ly9sb2NhbGhvc3Q6NDQxMC9ycGtpL1JJUi1yb290L3Jvb3QvMD4G
+ CCsGAQUFBzAKhjJyc3luYzovL2xvY2FsaG9zdDo0NDEwL3Jwa2kvUklSLXJvb3Qv
+ cm9vdC9yb290Lm1mdDAtBggrBgEFBQcwDYYhaHR0cHM6Ly9sb2NhbGhvc3Q6NDQx
+ MS9ub3RpZnkueG1sMCEGCCsGAQUFBwEIAQH/BBIwEKAOMAwwCgIBAAIFAP////8w
+ JwYIKwYBBQUHAQcBAf8EGDAWMAkEAgABMAMDAQAwCQQCAAIwAwMBADANBgkqhkiG
+ 9w0BAQsFAAOCAQEAADpTJlaW/YBhvM4d8+VJwGMNgRy1gIbNfikXbDJunIsfVvQH
+ 6Cvu+G9LHwzr41S31gLDPiI5xqlYIcOLNmD4kFF+FkI5pmdZaYyE7cmUrV9LfJSp
+ 6AjwNGhOlFDQJbfvndxAmTpAimvC/eKdB4nsbun3ewddIBbz7meq8FD/anrsU9F7
+ ezLgQuChwzshV29wqyM97RQ1J8xeBdadWv8DKxrYj2OkHAZCzQNoYp33i6B/qHf2
+ +350IE4Shix6fGfOuhq3BKSMEzBFUUK6RDmUrBfJlyCwD9+DWkgXb6gdw4MHLEEK
+ 34fI46rg3JkTm9LK4glhTSSdXNuSgQNiNt1sYA==
+ </rpki_root_cert>
</parent>
</msg>
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <parent action="destroy" self_handle="42"
+ <parent action="destroy" tenant_handle="42"
parent_handle="666"/> </msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <parent action="destroy" self_handle="42" parent_handle="666"/>
+ <parent action="destroy" tenant_handle="42" parent_handle="666"/>
</msg>
-
+
<!-- ==== -->
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <child action="create" self_handle="42" child_handle="3"
+ <child action="create" tenant_handle="42" child_handle="3"
bsc_handle="17">
<bpki_cert>
MIIDGzCCAgOgAwIBAgIJAKi+/+wUhQlxMA0GCSqGSIb3DQEBBQUAMCQxIjAgBgNV
@@ -586,13 +726,13 @@
</bpki_cert>
</child>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <child action="create" self_handle="42" child_handle="3"/>
+ <child action="create" tenant_handle="42" child_handle="3"/>
</msg>
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <child action="set" self_handle="42" child_handle="3"
+ <child action="set" tenant_handle="42" child_handle="3"
bsc_handle="17"
reissue="yes">
<bpki_cert>
@@ -616,17 +756,17 @@
</bpki_cert>
</child>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <child action="set" self_handle="42" child_handle="3"/>
+ <child action="set" tenant_handle="42" child_handle="3"/>
</msg>
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <child action="get" self_handle="42" child_handle="3"/>
+ <child action="get" tenant_handle="42" child_handle="3"/>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <child action="get" self_handle="42" child_handle="3"
+ <child action="get" tenant_handle="42" child_handle="3"
bsc_handle="17">
<bpki_cert>
MIIDGzCCAgOgAwIBAgIJAKi+/+wUhQlxMA0GCSqGSIb3DQEBBQUAMCQxIjAgBgNV
@@ -649,13 +789,13 @@
</bpki_cert>
</child>
</msg>
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <child action="list" self_handle="42"/>
+ <child action="list" tenant_handle="42"/>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <child action="list" self_handle="42" child_handle="3"
+ <child action="list" tenant_handle="42" child_handle="3"
bsc_handle="17">
<bpki_cert>
MIIDGzCCAgOgAwIBAgIJAKi+/+wUhQlxMA0GCSqGSIb3DQEBBQUAMCQxIjAgBgNV
@@ -678,19 +818,19 @@
</bpki_cert>
</child>
</msg>
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <child action="destroy" self_handle="42" child_handle="3"/>
+ <child action="destroy" tenant_handle="42" child_handle="3"/>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <child action="destroy" self_handle="42" child_handle="3"/>
+ <child action="destroy" tenant_handle="42" child_handle="3"/>
</msg>
-
+
<!-- ==== -->
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <repository action="create" self_handle="42" repository_handle="120"
+ <repository action="create" tenant_handle="42" repository_handle="120"
peer_contact_uri="https://re.bar.example/bandicoot/"
bsc_handle="17">
<bpki_cert>
@@ -733,13 +873,13 @@
</bpki_glue>
</repository>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <repository action="create" self_handle="42" repository_handle="120"/>
+ <repository action="create" tenant_handle="42" repository_handle="120"/>
</msg>
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <repository action="set" self_handle="42" repository_handle="120"
+ <repository action="set" tenant_handle="42" repository_handle="120"
peer_contact_uri="https://re.bar.example/bandicoot/"
bsc_handle="17">
<bpki_cert>
@@ -782,17 +922,17 @@
</bpki_glue>
</repository>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <repository action="set" self_handle="42" repository_handle="120"/>
+ <repository action="set" tenant_handle="42" repository_handle="120"/>
</msg>
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <repository action="get" self_handle="42" repository_handle="120"/>
+ <repository action="get" tenant_handle="42" repository_handle="120"/>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <repository action="get" self_handle="42" repository_handle="120"
+ <repository action="get" tenant_handle="42" repository_handle="120"
peer_contact_uri="https://re.bar.example/bandicoot/"
bsc_handle="17">
<bpki_cert>
@@ -835,13 +975,13 @@
</bpki_glue>
</repository>
</msg>
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <repository action="list" self_handle="42"/>
+ <repository action="list" tenant_handle="42"/>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <repository action="list" self_handle="42" repository_handle="120"
+ <repository action="list" tenant_handle="42" repository_handle="120"
peer_contact_uri="https://re.bar.example/bandicoot/"
bsc_handle="17">
<bpki_cert>
@@ -884,56 +1024,56 @@
</bpki_glue>
</repository>
</msg>
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <repository action="destroy" self_handle="42" repository_handle="120"/>
+ <repository action="destroy" tenant_handle="42" repository_handle="120"/>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <repository action="destroy" self_handle="42" repository_handle="120"/>
+ <repository action="destroy" tenant_handle="42" repository_handle="120"/>
</msg>
<!-- ==== -->
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <list_resources self_handle="42" child_handle="289"/>
+ <list_resources tenant_handle="42" child_handle="289"/>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <list_resources self_handle="42" child_handle="289"
+ <list_resources tenant_handle="42" child_handle="289"
valid_until="2008-04-01T00:00:00Z"
ipv4="10.0.0.44/32,10.3.0.44/32"
ipv6="fe80:deed:f00d::/48,fe80:dead:beef:2::-fe80:dead:beef:2::49"
asn="666"/>
</msg>
-
+
<!-- === -->
-
+
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <list_roa_requests self_handle="42"/>
+ <list_roa_requests tenant_handle="42"/>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <list_roa_requests self_handle="42"
+ <list_roa_requests tenant_handle="42"
asn="666"
ipv4="10.0.0.44/32,10.3.0.44/32"
ipv6="fe80:deed:f00d::/48,fe80:dead:beef::/48-56"
/>
- <list_roa_requests self_handle="42"
+ <list_roa_requests tenant_handle="42"
asn="12345"
ipv4="10.0.0.44/32"
ipv6="2002:a00::/48-56"
/>
</msg>
-
+
<!-- === -->
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <list_received_resources self_handle="42"/>
+ <list_received_resources tenant_handle="42"/>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <list_received_resources self_handle="42"
+ <list_received_resources tenant_handle="42"
parent_handle="Alice"
notBefore="2010-02-22T03:44:23Z"
notAfter="2011-02-21T11:03:49Z"
@@ -942,7 +1082,7 @@
aia_uri="rsync://arin.rpki.net/arin/arin.cer"
asn="1280,3557"
ipv4="149.20.0.0/16,192.5.4.0/23,204.152.184.0/21"/>
- <list_received_resources self_handle="42"
+ <list_received_resources tenant_handle="42"
parent_handle="Bob"
uri="rsync://arin.rpki.net/arin/1/uWqpa8GkcEDBZkEsmOEofeDKk9s.cer"
notBefore="2010-02-22T03:44:20Z"
@@ -954,15 +1094,15 @@
ipv6="2001:4f8::/32,2001:500::/48,2001:500:2e::/47,2001:500:60::-2001:500:7c:ffff:ffff:ffff:ffff:ffff,2001:500:85::/48"/>
</msg>
-
+
<!-- === -->
<msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <list_published_objects self_handle="42"/>
+ <list_published_objects tenant_handle="42"/>
</msg>
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <list_published_objects self_handle="42" uri="rsync://rpki.example.org/rpki/DEMEtlxZrZes7TNGbe7XwVSMgW0.crl">
+ <list_published_objects tenant_handle="42" uri="rsync://rpki.example.org/rpki/DEMEtlxZrZes7TNGbe7XwVSMgW0.crl">
MIIBrjCBlwIBATANBgkqhkiG9w0BAQsFADAzMTEwLwYDVQQDEygwQzQzMDRCNjVDNTlBRDk3
QUNFRDMzNDY2REVFRDdDMTU0OEM4MTZEFw0wOTA5MjgyMDUxNDlaFw0wOTA5MjgyMTUxNDla
oDAwLjAfBgNVHSMEGDAWgBQMQwS2XFmtl6ztM0Zt7tfBVIyBbTALBgNVHRQEBAICAWkwDQYJ
@@ -973,7 +1113,7 @@
fBk4i7H945v/zs7bLLMJxTs8+ao4iCDuknjbGhjWmi9xrTXDtcCXx607rPDkJQcJE2WnRS/U
HIA=
</list_published_objects>
- <list_published_objects self_handle="42" uri="rsync://rpki.example.org/rpki/DEMEtlxZrZes7TNGbe7XwVSMgW0.mft">
+ <list_published_objects tenant_handle="42" uri="rsync://rpki.example.org/rpki/DEMEtlxZrZes7TNGbe7XwVSMgW0.mft">
MIIHBQYJKoZIhvcNAQcCoIIG9jCCBvICAQMxDTALBglghkgBZQMEAgEwggEfBgsqhkiG9w0B
CRABGqCCAQ4EggEKMIIBBgICAWoYDzIwMDkwOTI4MjA1MTQ5WhgPMjAwOTA5MjgyMTUxNDla
BglghkgBZQMEAgEwgdIwRBYfREVNRXRseFpyWmVzN1ROR2JlN1h3VlNNZ1cwLmNybAMhAPgd
@@ -1009,7 +1149,7 @@
yML8lQJAFAyjnXJ+doGbqfTUpVH4q4drqRb73WbL0zf/Z2HGwhDlTmsAdjparWdQcfXIVrJF
ynS1fab9XZfj+VtBFKjooDjaLw==
</list_published_objects>
- <list_published_objects self_handle="42" uri="rsync://rpki.example.org/rpki/ldvxcHGdr3oKHcPj-gukmetNRZ0.roa">
+ <list_published_objects tenant_handle="42" uri="rsync://rpki.example.org/rpki/ldvxcHGdr3oKHcPj-gukmetNRZ0.roa">
MIIGnQYJKoZIhvcNAQcCoIIGjjCCBooCAQMxDTALBglghkgBZQMEAgEwMQYLKoZIhvcNAQkQ
ARigIgQgMB4CAg3lMBgwFgQCAAEwEDAGAwQAwAUEMAYDBADABQWgggSTMIIEjzCCA3egAwIB
AgIBAjANBgkqhkiG9w0BAQsFADAzMTEwLwYDVQQDEygwQzQzMDRCNjVDNTlBRDk3QUNFRDMz
@@ -1043,7 +1183,7 @@
+N931gu2r5I/XB/MGgGvXNWozK7RuMn55i5hMqI2NQs+/b7/AQU0+/i3g7SlLA8iZwHq49U2
ZXRCjLXcy0tQOWVsMnGfReN8oNDhHbc=
</list_published_objects>
- <list_published_objects self_handle="42" uri="rsync://rpki.example.org/rpki/xopNGcsB_p7eafYqXatmVV8HZd0.roa">
+ <list_published_objects tenant_handle="42" uri="rsync://rpki.example.org/rpki/xopNGcsB_p7eafYqXatmVV8HZd0.roa">
MIIGoQYJKoZIhvcNAQcCoIIGkjCCBo4CAQMxDTALBglghkgBZQMEAgEwMAYLKoZIhvcNAQkQ
ARigIQQfMB0CAgUAMBcwFQQCAAEwDzAFAwMAlRQwBgMEA8yYuKCCBJgwggSUMIIDfKADAgEC
AgEDMA0GCSqGSIb3DQEBCwUAMDMxMTAvBgNVBAMTKDBDNDMwNEI2NUM1OUFEOTdBQ0VEMzM0
@@ -1081,13 +1221,13 @@
</msg>
<!-- === -->
-
+
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <report_error self_handle="42" error_code="your_hair_is_on_fire">text string</report_error>
+ <report_error tenant_handle="42" error_code="your_hair_is_on_fire">text string</report_error>
</msg>
<msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/left-right-spec/">
- <report_error self_handle="42" error_code="your_hair_is_on_fire"/>
+ <report_error tenant_handle="42" error_code="your_hair_is_on_fire"/>
</msg>
</completely_gratuitous_wrapper_element_to_let_me_run_this_through_xmllint>
diff --git a/ca/tests/myrpki-xml-parse-test.py b/ca/tests/myrpki-xml-parse-test.py
index 9db7ec57..d915ea5b 100644
--- a/ca/tests/myrpki-xml-parse-test.py
+++ b/ca/tests/myrpki-xml-parse-test.py
@@ -25,77 +25,77 @@ relaxng = lxml.etree.RelaxNG(file = "myrpki.rng")
tree = lxml.etree.parse("myrpki.xml").getroot()
if False:
- print lxml.etree.tostring(tree, pretty_print = True, encoding = "us-ascii", xml_declaration = True)
+ print lxml.etree.tostring(tree, pretty_print = True, encoding = "us-ascii", xml_declaration = True)
relaxng.assertValid(tree)
def showitems(y):
- if False:
- for k, v in y.items():
- if v:
- print " ", k, v
+ if False:
+ for k, v in y.items():
+ if v:
+ print " ", k, v
def tag(t):
- return "{http://www.hactrn.net/uris/rpki/myrpki/}" + t
+ return "{http://www.hactrn.net/uris/rpki/myrpki/}" + t
print "My handle:", tree.get("handle")
print "Children:"
for x in tree.getiterator(tag("child")):
- print " ", x
- print " Handle:", x.get("handle")
- print " ASNS: ", rpki.resource_set.resource_set_as(x.get("asns"))
- print " IPv4: ", rpki.resource_set.resource_set_ipv4(x.get("v4"))
- print " Valid: ", x.get("valid_until")
- showitems(x)
+ print " ", x
+ print " Handle:", x.get("handle")
+ print " ASNS: ", rpki.resource_set.resource_set_as(x.get("asns"))
+ print " IPv4: ", rpki.resource_set.resource_set_ipv4(x.get("v4"))
+ print " Valid: ", x.get("valid_until")
+ showitems(x)
print
print "ROA requests:"
for x in tree.getiterator(tag("roa_request")):
- print " ", x
- print " ASN: ", x.get("asn")
- print " IPv4:", rpki.resource_set.roa_prefix_set_ipv4(x.get("v4"))
- print " IPv6:", rpki.resource_set.roa_prefix_set_ipv6(x.get("v6"))
- showitems(x)
+ print " ", x
+ print " ASN: ", x.get("asn")
+ print " IPv4:", rpki.resource_set.roa_prefix_set_ipv4(x.get("v4"))
+ print " IPv6:", rpki.resource_set.roa_prefix_set_ipv6(x.get("v6"))
+ showitems(x)
print
def showpem(label, b64, kind):
- cmd = ("openssl", kind, "-noout", "-text", "-inform", "DER")
- if kind == "x509":
- cmd += ("-certopt", "no_pubkey,no_sigdump")
- p = subprocess.Popen(cmd, stdin = subprocess.PIPE, stdout = subprocess.PIPE)
- text = p.communicate(input = base64.b64decode(b64))[0]
- if p.returncode != 0:
- raise subprocess.CalledProcessError(returncode = p.returncode, cmd = cmd)
- print label, text
+ cmd = ("openssl", kind, "-noout", "-text", "-inform", "DER")
+ if kind == "x509":
+ cmd += ("-certopt", "no_pubkey,no_sigdump")
+ p = subprocess.Popen(cmd, stdin = subprocess.PIPE, stdout = subprocess.PIPE)
+ text = p.communicate(input = base64.b64decode(b64))[0]
+ if p.returncode != 0:
+ raise subprocess.CalledProcessError(returncode = p.returncode, cmd = cmd)
+ print label, text
for x in tree.getiterator(tag("child")):
- cert = x.findtext(tag("bpki_certificate"))
- if cert:
- showpem("Child", cert, "x509")
+ cert = x.findtext(tag("bpki_certificate"))
+ if cert:
+ showpem("Child", cert, "x509")
for x in tree.getiterator(tag("parent")):
- print "Parent URI:", x.get("service_uri")
- cert = x.findtext(tag("bpki_certificate"))
- if cert:
- showpem("Parent", cert, "x509")
+ print "Parent URI:", x.get("service_uri")
+ cert = x.findtext(tag("bpki_certificate"))
+ if cert:
+ showpem("Parent", cert, "x509")
ca = tree.findtext(tag("bpki_ca_certificate"))
if ca:
- showpem("CA", ca, "x509")
+ showpem("CA", ca, "x509")
bsc = tree.findtext(tag("bpki_bsc_certificate"))
if bsc:
- showpem("BSC EE", bsc, "x509")
+ showpem("BSC EE", bsc, "x509")
repo = tree.findtext(tag("bpki_repository_certificate"))
if repo:
- showpem("Repository", repo, "x509")
+ showpem("Repository", repo, "x509")
req = tree.findtext(tag("bpki_bsc_pkcs10"))
if req:
- showpem("BSC EE", req, "req")
+ showpem("BSC EE", req, "req")
crl = tree.findtext(tag("bpki_crl"))
if crl:
- showpem("CA", crl, "crl")
+ showpem("CA", crl, "crl")
diff --git a/ca/tests/old_irdbd.py b/ca/tests/old_irdbd.py
index d66e683e..d26c3476 100644
--- a/ca/tests/old_irdbd.py
+++ b/ca/tests/old_irdbd.py
@@ -15,5 +15,5 @@
# PERFORMANCE OF THIS SOFTWARE.
if __name__ == "__main__":
- import rpki.old_irdbd
- rpki.old_irdbd.main()
+ import rpki.old_irdbd
+ rpki.old_irdbd.main()
diff --git a/ca/tests/publication-control-protocol-samples.xml b/ca/tests/publication-control-protocol-samples.xml
new file mode 100644
index 00000000..e094f3f6
--- /dev/null
+++ b/ca/tests/publication-control-protocol-samples.xml
@@ -0,0 +1,155 @@
+<!-- -*- SGML -*-
+ - $Id$
+ -
+ - Copyright (C) 2008 American Registry for Internet Numbers ("ARIN")
+ -
+ - Permission to use, copy, modify, and distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+ -
+ -
+ - This is a collection of sample publication protocol PDU samples
+ - to use as test cases for the publication protocol RelaxNG schema.
+ -->
+
+<completely_gratuitous_wrapper_element_to_let_me_run_this_through_xmllint>
+
+ <msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/publication-control/">
+ <client action="create" client_handle="3" base_uri="rsync://wombat.invalid/">
+ <bpki_cert>
+ MIIDGzCCAgOgAwIBAgIJAKi+/+wUhQlxMA0GCSqGSIb3DQEBBQUAMCQxIjAgBgNV
+ BAMTGVRlc3QgQ2VydGlmaWNhdGUgQm9iIFJvb3QwHhcNMDcwODAxMTk1MzEwWhcN
+ MDcwODMxMTk1MzEwWjAkMSIwIAYDVQQDExlUZXN0IENlcnRpZmljYXRlIEJvYiBS
+ b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArKYUtJaM5PH5917S
+ G2ACc7iBYdQO2HYyu8Gb6i9Q2Gxc3cWEX7RTBvgOL79pWf3GIdnoupzMnoZVtY3G
+ Ux2G/0WkmLui2TCeDhcfXdQ4rcp8J3V/6ESj+yuEPPOG8UN17mUKKgujrch6ZvgC
+ DO9AyOK/uXu+ABQXTPsn2pVe2EVh3V004ShLi8GKgVdqb/rW/6GTg0Xb/zLT6WWM
+ uT++6sXTlztJdQYkRamJvKfQDU1naC8mAkGf79Tba0xyBGAUII0GfREY6t4/+NAP
+ 2Yyb3xNlBqcJoTov0JfNKHZcCZePr79j7LK/hkZxxip+Na9xDpE+oQRV+DRukCRJ
+ diqg+wIDAQABo1AwTjAMBgNVHRMEBTADAQH/MB0GA1UdDgQWBBTDEsXJe6pjAQD4
+ ULlB7+GMDBlimTAfBgNVHSMEGDAWgBTDEsXJe6pjAQD4ULlB7+GMDBlimTANBgkq
+ hkiG9w0BAQUFAAOCAQEAWWkNcW6S1tKKqtzJsdfhjJiAAPQmOXJskv0ta/8f6Acg
+ cum1YieNdtT0n96P7CUHOWP8QBb91JzeewR7b6WJLwb1Offs3wNq3kk75pJe89r4
+ XY39EZHhMW+Dv0PhIKu2CgD4LeyH1FVTQkF/QObGEmkn+s+HTsuzd1l2VLwcP1Sm
+ sqep6LAlFj62qqaIJzNeQ9NVkBqtkygnYlBOkaBTHfQTux3jYNpEo8JJB5e/WFdH
+ YyMNrG2xMOtIC7T4+IOHgT8PgrNhaeDg9ctewj0X8Qi9nI9nXeinicLX8vj6hdEq
+ 3ORv7RZMJNYqv1HQ3wUE2B7fCPFv7EUwzaCds1kgRQ==
+ </bpki_cert>
+ </client>
+ </msg>
+
+ <msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/publication-control/">
+ <client action="create" client_handle="3"/>
+ </msg>
+
+ <msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/publication-control/">
+ <client action="set" client_handle="3">
+ <bpki_glue>
+ MIIDGzCCAgOgAwIBAgIJAKi+/+wUhQlxMA0GCSqGSIb3DQEBBQUAMCQxIjAgBgNV
+ BAMTGVRlc3QgQ2VydGlmaWNhdGUgQm9iIFJvb3QwHhcNMDcwODAxMTk1MzEwWhcN
+ MDcwODMxMTk1MzEwWjAkMSIwIAYDVQQDExlUZXN0IENlcnRpZmljYXRlIEJvYiBS
+ b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArKYUtJaM5PH5917S
+ G2ACc7iBYdQO2HYyu8Gb6i9Q2Gxc3cWEX7RTBvgOL79pWf3GIdnoupzMnoZVtY3G
+ Ux2G/0WkmLui2TCeDhcfXdQ4rcp8J3V/6ESj+yuEPPOG8UN17mUKKgujrch6ZvgC
+ DO9AyOK/uXu+ABQXTPsn2pVe2EVh3V004ShLi8GKgVdqb/rW/6GTg0Xb/zLT6WWM
+ uT++6sXTlztJdQYkRamJvKfQDU1naC8mAkGf79Tba0xyBGAUII0GfREY6t4/+NAP
+ 2Yyb3xNlBqcJoTov0JfNKHZcCZePr79j7LK/hkZxxip+Na9xDpE+oQRV+DRukCRJ
+ diqg+wIDAQABo1AwTjAMBgNVHRMEBTADAQH/MB0GA1UdDgQWBBTDEsXJe6pjAQD4
+ ULlB7+GMDBlimTAfBgNVHSMEGDAWgBTDEsXJe6pjAQD4ULlB7+GMDBlimTANBgkq
+ hkiG9w0BAQUFAAOCAQEAWWkNcW6S1tKKqtzJsdfhjJiAAPQmOXJskv0ta/8f6Acg
+ cum1YieNdtT0n96P7CUHOWP8QBb91JzeewR7b6WJLwb1Offs3wNq3kk75pJe89r4
+ XY39EZHhMW+Dv0PhIKu2CgD4LeyH1FVTQkF/QObGEmkn+s+HTsuzd1l2VLwcP1Sm
+ sqep6LAlFj62qqaIJzNeQ9NVkBqtkygnYlBOkaBTHfQTux3jYNpEo8JJB5e/WFdH
+ YyMNrG2xMOtIC7T4+IOHgT8PgrNhaeDg9ctewj0X8Qi9nI9nXeinicLX8vj6hdEq
+ 3ORv7RZMJNYqv1HQ3wUE2B7fCPFv7EUwzaCds1kgRQ==
+ </bpki_glue>
+ </client>
+ </msg>
+
+ <msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/publication-control/">
+ <client action="set" client_handle="3"/>
+ </msg>
+
+ <msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/publication-control/">
+ <client action="get" client_handle="3"/>
+ </msg>
+
+ <msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/publication-control/">
+ <client action="get" client_handle="3" base_uri="rsync://wombat.invalid/">
+ <bpki_cert>
+ MIIDGzCCAgOgAwIBAgIJAKi+/+wUhQlxMA0GCSqGSIb3DQEBBQUAMCQxIjAgBgNV
+ BAMTGVRlc3QgQ2VydGlmaWNhdGUgQm9iIFJvb3QwHhcNMDcwODAxMTk1MzEwWhcN
+ MDcwODMxMTk1MzEwWjAkMSIwIAYDVQQDExlUZXN0IENlcnRpZmljYXRlIEJvYiBS
+ b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArKYUtJaM5PH5917S
+ G2ACc7iBYdQO2HYyu8Gb6i9Q2Gxc3cWEX7RTBvgOL79pWf3GIdnoupzMnoZVtY3G
+ Ux2G/0WkmLui2TCeDhcfXdQ4rcp8J3V/6ESj+yuEPPOG8UN17mUKKgujrch6ZvgC
+ DO9AyOK/uXu+ABQXTPsn2pVe2EVh3V004ShLi8GKgVdqb/rW/6GTg0Xb/zLT6WWM
+ uT++6sXTlztJdQYkRamJvKfQDU1naC8mAkGf79Tba0xyBGAUII0GfREY6t4/+NAP
+ 2Yyb3xNlBqcJoTov0JfNKHZcCZePr79j7LK/hkZxxip+Na9xDpE+oQRV+DRukCRJ
+ diqg+wIDAQABo1AwTjAMBgNVHRMEBTADAQH/MB0GA1UdDgQWBBTDEsXJe6pjAQD4
+ ULlB7+GMDBlimTAfBgNVHSMEGDAWgBTDEsXJe6pjAQD4ULlB7+GMDBlimTANBgkq
+ hkiG9w0BAQUFAAOCAQEAWWkNcW6S1tKKqtzJsdfhjJiAAPQmOXJskv0ta/8f6Acg
+ cum1YieNdtT0n96P7CUHOWP8QBb91JzeewR7b6WJLwb1Offs3wNq3kk75pJe89r4
+ XY39EZHhMW+Dv0PhIKu2CgD4LeyH1FVTQkF/QObGEmkn+s+HTsuzd1l2VLwcP1Sm
+ sqep6LAlFj62qqaIJzNeQ9NVkBqtkygnYlBOkaBTHfQTux3jYNpEo8JJB5e/WFdH
+ YyMNrG2xMOtIC7T4+IOHgT8PgrNhaeDg9ctewj0X8Qi9nI9nXeinicLX8vj6hdEq
+ 3ORv7RZMJNYqv1HQ3wUE2B7fCPFv7EUwzaCds1kgRQ==
+ </bpki_cert>
+ </client>
+ </msg>
+
+ <msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/publication-control/">
+ <client action="list"/>
+ </msg>
+
+ <msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/publication-control/">
+ <client action="list" client_handle="3">
+ <bpki_cert>
+ MIIDGzCCAgOgAwIBAgIJAKi+/+wUhQlxMA0GCSqGSIb3DQEBBQUAMCQxIjAgBgNV
+ BAMTGVRlc3QgQ2VydGlmaWNhdGUgQm9iIFJvb3QwHhcNMDcwODAxMTk1MzEwWhcN
+ MDcwODMxMTk1MzEwWjAkMSIwIAYDVQQDExlUZXN0IENlcnRpZmljYXRlIEJvYiBS
+ b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArKYUtJaM5PH5917S
+ G2ACc7iBYdQO2HYyu8Gb6i9Q2Gxc3cWEX7RTBvgOL79pWf3GIdnoupzMnoZVtY3G
+ Ux2G/0WkmLui2TCeDhcfXdQ4rcp8J3V/6ESj+yuEPPOG8UN17mUKKgujrch6ZvgC
+ DO9AyOK/uXu+ABQXTPsn2pVe2EVh3V004ShLi8GKgVdqb/rW/6GTg0Xb/zLT6WWM
+ uT++6sXTlztJdQYkRamJvKfQDU1naC8mAkGf79Tba0xyBGAUII0GfREY6t4/+NAP
+ 2Yyb3xNlBqcJoTov0JfNKHZcCZePr79j7LK/hkZxxip+Na9xDpE+oQRV+DRukCRJ
+ diqg+wIDAQABo1AwTjAMBgNVHRMEBTADAQH/MB0GA1UdDgQWBBTDEsXJe6pjAQD4
+ ULlB7+GMDBlimTAfBgNVHSMEGDAWgBTDEsXJe6pjAQD4ULlB7+GMDBlimTANBgkq
+ hkiG9w0BAQUFAAOCAQEAWWkNcW6S1tKKqtzJsdfhjJiAAPQmOXJskv0ta/8f6Acg
+ cum1YieNdtT0n96P7CUHOWP8QBb91JzeewR7b6WJLwb1Offs3wNq3kk75pJe89r4
+ XY39EZHhMW+Dv0PhIKu2CgD4LeyH1FVTQkF/QObGEmkn+s+HTsuzd1l2VLwcP1Sm
+ sqep6LAlFj62qqaIJzNeQ9NVkBqtkygnYlBOkaBTHfQTux3jYNpEo8JJB5e/WFdH
+ YyMNrG2xMOtIC7T4+IOHgT8PgrNhaeDg9ctewj0X8Qi9nI9nXeinicLX8vj6hdEq
+ 3ORv7RZMJNYqv1HQ3wUE2B7fCPFv7EUwzaCds1kgRQ==
+ </bpki_cert>
+ </client>
+ </msg>
+
+ <msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/publication-control/">
+ <client action="destroy" client_handle="3"/>
+ </msg>
+
+ <msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/publication-control/">
+ <client action="destroy" client_handle="3"/>
+ </msg>
+
+ <!-- === -->
+
+ <msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/publication-control/">
+ <report_error error_code="your_hair_is_on_fire">text string</report_error>
+ </msg>
+
+ <msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/publication-control/">
+ <report_error error_code="your_hair_is_on_fire"/>
+ </msg>
+
+</completely_gratuitous_wrapper_element_to_let_me_run_this_through_xmllint>
diff --git a/ca/tests/publication-protocol-samples.xml b/ca/tests/publication-protocol-samples.xml
index 96b095a7..6d0a99a9 100644
--- a/ca/tests/publication-protocol-samples.xml
+++ b/ca/tests/publication-protocol-samples.xml
@@ -1,370 +1,107 @@
<!-- -*- SGML -*-
- - $Id$
+ - $Id$
-
- - Copyright (C) 2008 American Registry for Internet Numbers ("ARIN")
+ - Sample PDUs for RPKI publication protocol, from current I-D.
-
- - Permission to use, copy, modify, and distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
+ - Copyright (c) 2014 IETF Trust and the persons identified as authors
+ - of the code. All rights reserved.
-
- - THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
+ - Redistribution and use in source and binary forms, with or without
+ - modification, are permitted provided that the following conditions
+ - are met:
-
+ - * Redistributions of source code must retain the above copyright
+ - notice, this list of conditions and the following disclaimer.
-
- - This is a collection of sample publication protocol PDU samples
- - to use as test cases for the publication protocol RelaxNG schema.
+ - * Redistributions in binary form must reproduce the above copyright
+ - notice, this list of conditions and the following disclaimer in
+ - the documentation and/or other materials provided with the
+ - distribution.
+ -
+ - * Neither the name of Internet Society, IETF or IETF Trust, nor the
+ - names of specific contributors, may be used to endorse or promote
+ - products derived from this software without specific prior written
+ - permission.
+ -
+ - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ - FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ - COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ - BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ - LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ - ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ - POSSIBILITY OF SUCH DAMAGE.
-->
<completely_gratuitous_wrapper_element_to_let_me_run_this_through_xmllint>
- <msg xmlns="http://www.hactrn.net/uris/rpki/publication-spec/" type="query" version="1">
- <config action="set">
- <bpki_crl>
- MIIBezBlAgEBMA0GCSqGSIb3DQEBCwUAMCMxITAfBgNVBAMTGFRlc3QgQ2VydGlm
- aWNhdGUgcHViZCBUQRcNMDgwNjAyMjE0OTQ1WhcNMDgwNzAyMjE0OTQ1WqAOMAww
- CgYDVR0UBAMCAQEwDQYJKoZIhvcNAQELBQADggEBAFWCWgBl4ljVqX/CHo+RpqYt
- vmKMnjPVflMXUB7i28RGP4DAq4l7deDU7Q82xEJyE4TXMWDWAV6UG6uUGum0VHWO
- cj9ohqyiZUGfOsKg2hbwkETm8sAENOsi1yNdyKGk6jZ16aF5fubxQqZa1pdGCSac
- 1/ZYC5sLLhEz3kmz+B9z9mXFVc5TgAh4dN3Gy5ftF8zZAFpDGnS4biCnRVqhGv6R
- 0Lh/5xmii+ZU6kNDhbeMsjJg+ZOmtN+wMeHSIbjiy0WuuaZ3k2xSh0C94anrHBZA
- vvCRhbazjR0Ef5OMZ5lcllw3uO8IHuoisHKkehy4Y0GySdj98fV+OuiRTH9vt/M=
- </bpki_crl>
- </config>
- </msg>
-
- <msg xmlns="http://www.hactrn.net/uris/rpki/publication-spec/" type="reply" version="1">
- <config action="set"/>
- </msg>
-
- <msg xmlns="http://www.hactrn.net/uris/rpki/publication-spec/" type="query" version="1">
- <config action="get"/>
- </msg>
-
- <msg xmlns="http://www.hactrn.net/uris/rpki/publication-spec/" type="reply" version="1">
- <config action="get">
- <bpki_crl>
- MIIBezBlAgEBMA0GCSqGSIb3DQEBCwUAMCMxITAfBgNVBAMTGFRlc3QgQ2VydGlm
- aWNhdGUgcHViZCBUQRcNMDgwNjAyMjE0OTQ1WhcNMDgwNzAyMjE0OTQ1WqAOMAww
- CgYDVR0UBAMCAQEwDQYJKoZIhvcNAQELBQADggEBAFWCWgBl4ljVqX/CHo+RpqYt
- vmKMnjPVflMXUB7i28RGP4DAq4l7deDU7Q82xEJyE4TXMWDWAV6UG6uUGum0VHWO
- cj9ohqyiZUGfOsKg2hbwkETm8sAENOsi1yNdyKGk6jZ16aF5fubxQqZa1pdGCSac
- 1/ZYC5sLLhEz3kmz+B9z9mXFVc5TgAh4dN3Gy5ftF8zZAFpDGnS4biCnRVqhGv6R
- 0Lh/5xmii+ZU6kNDhbeMsjJg+ZOmtN+wMeHSIbjiy0WuuaZ3k2xSh0C94anrHBZA
- vvCRhbazjR0Ef5OMZ5lcllw3uO8IHuoisHKkehy4Y0GySdj98fV+OuiRTH9vt/M=
- </bpki_crl>
- </config>
- </msg>
-
- <!-- === -->
-
- <msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <client action="create" client_handle="3" base_uri="rsync://wombat.invalid/">
- <bpki_cert>
- MIIDGzCCAgOgAwIBAgIJAKi+/+wUhQlxMA0GCSqGSIb3DQEBBQUAMCQxIjAgBgNV
- BAMTGVRlc3QgQ2VydGlmaWNhdGUgQm9iIFJvb3QwHhcNMDcwODAxMTk1MzEwWhcN
- MDcwODMxMTk1MzEwWjAkMSIwIAYDVQQDExlUZXN0IENlcnRpZmljYXRlIEJvYiBS
- b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArKYUtJaM5PH5917S
- G2ACc7iBYdQO2HYyu8Gb6i9Q2Gxc3cWEX7RTBvgOL79pWf3GIdnoupzMnoZVtY3G
- Ux2G/0WkmLui2TCeDhcfXdQ4rcp8J3V/6ESj+yuEPPOG8UN17mUKKgujrch6ZvgC
- DO9AyOK/uXu+ABQXTPsn2pVe2EVh3V004ShLi8GKgVdqb/rW/6GTg0Xb/zLT6WWM
- uT++6sXTlztJdQYkRamJvKfQDU1naC8mAkGf79Tba0xyBGAUII0GfREY6t4/+NAP
- 2Yyb3xNlBqcJoTov0JfNKHZcCZePr79j7LK/hkZxxip+Na9xDpE+oQRV+DRukCRJ
- diqg+wIDAQABo1AwTjAMBgNVHRMEBTADAQH/MB0GA1UdDgQWBBTDEsXJe6pjAQD4
- ULlB7+GMDBlimTAfBgNVHSMEGDAWgBTDEsXJe6pjAQD4ULlB7+GMDBlimTANBgkq
- hkiG9w0BAQUFAAOCAQEAWWkNcW6S1tKKqtzJsdfhjJiAAPQmOXJskv0ta/8f6Acg
- cum1YieNdtT0n96P7CUHOWP8QBb91JzeewR7b6WJLwb1Offs3wNq3kk75pJe89r4
- XY39EZHhMW+Dv0PhIKu2CgD4LeyH1FVTQkF/QObGEmkn+s+HTsuzd1l2VLwcP1Sm
- sqep6LAlFj62qqaIJzNeQ9NVkBqtkygnYlBOkaBTHfQTux3jYNpEo8JJB5e/WFdH
- YyMNrG2xMOtIC7T4+IOHgT8PgrNhaeDg9ctewj0X8Qi9nI9nXeinicLX8vj6hdEq
- 3ORv7RZMJNYqv1HQ3wUE2B7fCPFv7EUwzaCds1kgRQ==
- </bpki_cert>
- </client>
- </msg>
-
- <msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <client action="create" client_handle="3"/>
- </msg>
-
- <msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <client action="set" client_handle="3">
- <bpki_glue>
- MIIDGzCCAgOgAwIBAgIJAKi+/+wUhQlxMA0GCSqGSIb3DQEBBQUAMCQxIjAgBgNV
- BAMTGVRlc3QgQ2VydGlmaWNhdGUgQm9iIFJvb3QwHhcNMDcwODAxMTk1MzEwWhcN
- MDcwODMxMTk1MzEwWjAkMSIwIAYDVQQDExlUZXN0IENlcnRpZmljYXRlIEJvYiBS
- b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArKYUtJaM5PH5917S
- G2ACc7iBYdQO2HYyu8Gb6i9Q2Gxc3cWEX7RTBvgOL79pWf3GIdnoupzMnoZVtY3G
- Ux2G/0WkmLui2TCeDhcfXdQ4rcp8J3V/6ESj+yuEPPOG8UN17mUKKgujrch6ZvgC
- DO9AyOK/uXu+ABQXTPsn2pVe2EVh3V004ShLi8GKgVdqb/rW/6GTg0Xb/zLT6WWM
- uT++6sXTlztJdQYkRamJvKfQDU1naC8mAkGf79Tba0xyBGAUII0GfREY6t4/+NAP
- 2Yyb3xNlBqcJoTov0JfNKHZcCZePr79j7LK/hkZxxip+Na9xDpE+oQRV+DRukCRJ
- diqg+wIDAQABo1AwTjAMBgNVHRMEBTADAQH/MB0GA1UdDgQWBBTDEsXJe6pjAQD4
- ULlB7+GMDBlimTAfBgNVHSMEGDAWgBTDEsXJe6pjAQD4ULlB7+GMDBlimTANBgkq
- hkiG9w0BAQUFAAOCAQEAWWkNcW6S1tKKqtzJsdfhjJiAAPQmOXJskv0ta/8f6Acg
- cum1YieNdtT0n96P7CUHOWP8QBb91JzeewR7b6WJLwb1Offs3wNq3kk75pJe89r4
- XY39EZHhMW+Dv0PhIKu2CgD4LeyH1FVTQkF/QObGEmkn+s+HTsuzd1l2VLwcP1Sm
- sqep6LAlFj62qqaIJzNeQ9NVkBqtkygnYlBOkaBTHfQTux3jYNpEo8JJB5e/WFdH
- YyMNrG2xMOtIC7T4+IOHgT8PgrNhaeDg9ctewj0X8Qi9nI9nXeinicLX8vj6hdEq
- 3ORv7RZMJNYqv1HQ3wUE2B7fCPFv7EUwzaCds1kgRQ==
- </bpki_glue>
- </client>
- </msg>
-
- <msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <client action="set" client_handle="3"/>
- </msg>
-
- <msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <client action="get" client_handle="3"/>
- </msg>
-
- <msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <client action="get" client_handle="3" base_uri="rsync://wombat.invalid/">
- <bpki_cert>
- MIIDGzCCAgOgAwIBAgIJAKi+/+wUhQlxMA0GCSqGSIb3DQEBBQUAMCQxIjAgBgNV
- BAMTGVRlc3QgQ2VydGlmaWNhdGUgQm9iIFJvb3QwHhcNMDcwODAxMTk1MzEwWhcN
- MDcwODMxMTk1MzEwWjAkMSIwIAYDVQQDExlUZXN0IENlcnRpZmljYXRlIEJvYiBS
- b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArKYUtJaM5PH5917S
- G2ACc7iBYdQO2HYyu8Gb6i9Q2Gxc3cWEX7RTBvgOL79pWf3GIdnoupzMnoZVtY3G
- Ux2G/0WkmLui2TCeDhcfXdQ4rcp8J3V/6ESj+yuEPPOG8UN17mUKKgujrch6ZvgC
- DO9AyOK/uXu+ABQXTPsn2pVe2EVh3V004ShLi8GKgVdqb/rW/6GTg0Xb/zLT6WWM
- uT++6sXTlztJdQYkRamJvKfQDU1naC8mAkGf79Tba0xyBGAUII0GfREY6t4/+NAP
- 2Yyb3xNlBqcJoTov0JfNKHZcCZePr79j7LK/hkZxxip+Na9xDpE+oQRV+DRukCRJ
- diqg+wIDAQABo1AwTjAMBgNVHRMEBTADAQH/MB0GA1UdDgQWBBTDEsXJe6pjAQD4
- ULlB7+GMDBlimTAfBgNVHSMEGDAWgBTDEsXJe6pjAQD4ULlB7+GMDBlimTANBgkq
- hkiG9w0BAQUFAAOCAQEAWWkNcW6S1tKKqtzJsdfhjJiAAPQmOXJskv0ta/8f6Acg
- cum1YieNdtT0n96P7CUHOWP8QBb91JzeewR7b6WJLwb1Offs3wNq3kk75pJe89r4
- XY39EZHhMW+Dv0PhIKu2CgD4LeyH1FVTQkF/QObGEmkn+s+HTsuzd1l2VLwcP1Sm
- sqep6LAlFj62qqaIJzNeQ9NVkBqtkygnYlBOkaBTHfQTux3jYNpEo8JJB5e/WFdH
- YyMNrG2xMOtIC7T4+IOHgT8PgrNhaeDg9ctewj0X8Qi9nI9nXeinicLX8vj6hdEq
- 3ORv7RZMJNYqv1HQ3wUE2B7fCPFv7EUwzaCds1kgRQ==
- </bpki_cert>
- </client>
- </msg>
-
- <msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <client action="list"/>
- </msg>
-
- <msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <client action="list" client_handle="3">
- <bpki_cert>
- MIIDGzCCAgOgAwIBAgIJAKi+/+wUhQlxMA0GCSqGSIb3DQEBBQUAMCQxIjAgBgNV
- BAMTGVRlc3QgQ2VydGlmaWNhdGUgQm9iIFJvb3QwHhcNMDcwODAxMTk1MzEwWhcN
- MDcwODMxMTk1MzEwWjAkMSIwIAYDVQQDExlUZXN0IENlcnRpZmljYXRlIEJvYiBS
- b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArKYUtJaM5PH5917S
- G2ACc7iBYdQO2HYyu8Gb6i9Q2Gxc3cWEX7RTBvgOL79pWf3GIdnoupzMnoZVtY3G
- Ux2G/0WkmLui2TCeDhcfXdQ4rcp8J3V/6ESj+yuEPPOG8UN17mUKKgujrch6ZvgC
- DO9AyOK/uXu+ABQXTPsn2pVe2EVh3V004ShLi8GKgVdqb/rW/6GTg0Xb/zLT6WWM
- uT++6sXTlztJdQYkRamJvKfQDU1naC8mAkGf79Tba0xyBGAUII0GfREY6t4/+NAP
- 2Yyb3xNlBqcJoTov0JfNKHZcCZePr79j7LK/hkZxxip+Na9xDpE+oQRV+DRukCRJ
- diqg+wIDAQABo1AwTjAMBgNVHRMEBTADAQH/MB0GA1UdDgQWBBTDEsXJe6pjAQD4
- ULlB7+GMDBlimTAfBgNVHSMEGDAWgBTDEsXJe6pjAQD4ULlB7+GMDBlimTANBgkq
- hkiG9w0BAQUFAAOCAQEAWWkNcW6S1tKKqtzJsdfhjJiAAPQmOXJskv0ta/8f6Acg
- cum1YieNdtT0n96P7CUHOWP8QBb91JzeewR7b6WJLwb1Offs3wNq3kk75pJe89r4
- XY39EZHhMW+Dv0PhIKu2CgD4LeyH1FVTQkF/QObGEmkn+s+HTsuzd1l2VLwcP1Sm
- sqep6LAlFj62qqaIJzNeQ9NVkBqtkygnYlBOkaBTHfQTux3jYNpEo8JJB5e/WFdH
- YyMNrG2xMOtIC7T4+IOHgT8PgrNhaeDg9ctewj0X8Qi9nI9nXeinicLX8vj6hdEq
- 3ORv7RZMJNYqv1HQ3wUE2B7fCPFv7EUwzaCds1kgRQ==
- </bpki_cert>
- </client>
- </msg>
-
- <msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <client action="destroy" client_handle="3"/>
- </msg>
-
- <msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <client action="destroy" client_handle="3"/>
- </msg>
-
- <!-- === -->
-
- <msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <certificate action="publish" uri="rsync://wombat.invalid/testbed/RIR/1/j7ghjwblCrcCp9ltyPDNzYKPfxc.cer">
- MIIE+jCCA+KgAwIBAgIBDTANBgkqhkiG9w0BAQsFADAzMTEwLwYDVQQDEyhERjRBODAxN0U2
- NkE5RTkxNzJFNDYxMkQ4Q0Y0QzgzRjIzOERFMkEzMB4XDTA4MDUyMjE4MDUxMloXDTA4MDUy
- NDE3NTQ1M1owMzExMC8GA1UEAxMoOEZCODIxOEYwNkU1MEFCNzAyQTdEOTZEQzhGMENEQ0Q4
- MjhGN0YxNzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMeziKp0k5nP7v6SZoNs
- XIMQYRgNtC6Fr/9Xm/1yQHomiPqHUk47rHhGojYiK5AhkrwoYhkH4UjJl2iwklDYczXuaBU3
- F5qrKlZ4aZnjIxdlP7+hktVpeApL6yuJTUAYeC3UIxnLDVdD6phydZ/FOQluffiNDjzteCCv
- oyOUatqt8WB+oND6LToHp028g1YUYLHG6mur0dPdcHOVXLSmUDuZ1HDz1nDuYvIVKjB/MpH9
- aW9XeaQ6ZFIlZVPwuuvI2brR+ThH7Gv27GL/o8qFdC300VQfoTZ+rKPGDE8K1cI906BL4kiw
- x9z0oiDcE96QCz+B0vsjc9mGaA1jgAxlXWsCAwEAAaOCAhcwggITMB0GA1UdDgQWBBSPuCGP
- BuUKtwKn2W3I8M3Ngo9/FzAfBgNVHSMEGDAWgBTfSoAX5mqekXLkYS2M9Mg/I43iozBVBgNV
- HR8ETjBMMEqgSKBGhkRyc3luYzovL2xvY2FsaG9zdDo0NDAwL3Rlc3RiZWQvUklSLzEvMzBx
- QUYtWnFucEZ5NUdFdGpQVElQeU9ONHFNLmNybDBFBggrBgEFBQcBAQQ5MDcwNQYIKwYBBQUH
- MAKGKXJzeW5jOi8vbG9jYWxob3N0OjQ0MDAvdGVzdGJlZC9XT01CQVQuY2VyMBgGA1UdIAEB
- /wQOMAwwCgYIKwYBBQUHDgIwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwgZsG
- CCsGAQUFBwELBIGOMIGLMDQGCCsGAQUFBzAFhihyc3luYzovL2xvY2FsaG9zdDo0NDAwL3Rl
- c3RiZWQvUklSL1IwLzEvMFMGCCsGAQUFBzAKhkdyc3luYzovL2xvY2FsaG9zdDo0NDAwL3Rl
- c3RiZWQvUklSL1IwLzEvajdnaGp3YmxDcmNDcDlsdHlQRE56WUtQZnhjLm1uZjAaBggrBgEF
- BQcBCAEB/wQLMAmgBzAFAgMA/BUwPgYIKwYBBQUHAQcBAf8ELzAtMCsEAgABMCUDAwAKAzAO
- AwUAwAACAQMFAcAAAiAwDgMFAsAAAiwDBQDAAAJkMA0GCSqGSIb3DQEBCwUAA4IBAQCEhuH7
- jtI2PJY6+zwv306vmCuXhtu9Lr2mmRw2ZErB8EMcb5xypMrNqMoKeu14K2x4a4RPJkK4yATh
- M81FPNRsU5mM0acIRnAPtxjHvPME7PHN2w2nGLASRsZmaa+b8A7SSOxVcFURazENztppsolH
- eTpm0cpLItK7mNpudUg1JGuFo94VLf1MnE2EqARG1vTsNhel/SM/UvOArCCOBvf0Gz7kSuup
- DSZ7qx+LiDmtEsLdbGNQBiYPbLrDk41PHrxdx28qIj7ejZkRzNFw/3pi8/XK281h8zeHoFVu
- 6ghRPy5dbOA4akX/KG6b8XIx0iwPYdLiDbdWFbtTdPcXBauY
- </certificate>
- </msg>
-
- <msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <certificate action="publish" uri="rsync://wombat.invalid/testbed/RIR/1/j7ghjwblCrcCp9ltyPDNzYKPfxc.cer"/>
- </msg>
+ <msg xmlns="http://www.hactrn.net/uris/rpki/publication-spec/" type="query" version="3">
+ <!-- Zero or more PDUs -->
+ </msg>
+
+ <msg xmlns="http://www.hactrn.net/uris/rpki/publication-spec/" type="reply" version="3">
+ <!-- Zero or more PDUs -->
+ </msg>
+
+ <msg xmlns="http://www.hactrn.net/uris/rpki/publication-spec/" type="query" version="3">
+ <publish uri="rsync://wombat.example/Alice/blCrcCp9ltyPDNzYKPfxc.cer">
+ MIIE+jCCA+KgAwIBAgIBDTANBgkqhkiG9w0BAQsFADAzMTEwLwYDVQQDEyhE
+ RjRBODAxN0U2NkE5RTkxNzJFNDYxMkQ4Q0Y0QzgzRjIzOERFMkEzMB4XDTA4
+ MDUyMjE4MDUxMloXDTA4MDUyNDE3NTQ1M1owMzExMC8GA1UEAxMoOEZCODIx
+ OEYwNkU1MEFCNzAyQTdEOTZEQzhGMENEQ0Q4MjhGN0YxNzCCASIwDQYJKoZI
+ hvcNAQEBBQADggEPADCCAQoCggEBAMeziKp0k5nP7v6SZoNsXIMQYRgNtC6F
+ r/9Xm/1yQHomiPqHUk47rHhGojYiK5AhkrwoYhkH4UjJl2iwklDYczXuaBU3
+ F5qrKlZ4aZnjIxdlP7+hktVpeApL6yuJTUAYeC3UIxnLDVdD6phydZ/FOQlu
+ ffiNDjzteCCvoyOUatqt8WB+oND6LToHp028g1YUYLHG6mur0dPdcHOVXLSm
+ UDuZ1HDz1nDuYvIVKjB/MpH9aW9XeaQ6ZFIlZVPwuuvI2brR+ThH7Gv27GL/
+ o8qFdC300VQfoTZ+rKPGDE8K1cI906BL4kiwx9z0oiDcE96QCz+B0vsjc9mG
+ aA1jgAxlXWsCAwEAAaOCAhcwggITMB0GA1UdDgQWBBSPuCGPBuUKtwKn2W3I
+ 8M3Ngo9/FzAfBgNVHSMEGDAWgBTfSoAX5mqekXLkYS2M9Mg/I43iozBVBgNV
+ HR8ETjBMMEqgSKBGhkRyc3luYzovL2xvY2FsaG9zdDo0NDAwL3Rlc3RiZWQv
+ UklSLzEvMzBxQUYtWnFucEZ5NUdFdGpQVElQeU9ONHFNLmNybDBFBggrBgEF
+ BQcBAQQ5MDcwNQYIKwYBBQUHMAKGKXJzeW5jOi8vbG9jYWxob3N0OjQ0MDAv
+ dGVzdGJlZC9XT01CQVQuY2VyMBgGA1UdIAEB/wQOMAwwCgYIKwYBBQUHDgIw
+ DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwgZsGCCsGAQUFBwEL
+ BIGOMIGLMDQGCCsGAQUFBzAFhihyc3luYzovL2xvY2FsaG9zdDo0NDAwL3Rl
+ c3RiZWQvUklSL1IwLzEvMFMGCCsGAQUFBzAKhkdyc3luYzovL2xvY2FsaG9z
+ dDo0NDAwL3Rlc3RiZWQvUklSL1IwLzEvajdnaGp3YmxDcmNDcDlsdHlQRE56
+ WUtQZnhjLm1uZjAaBggrBgEFBQcBCAEB/wQLMAmgBzAFAgMA/BUwPgYIKwYB
+ BQUHAQcBAf8ELzAtMCsEAgABMCUDAwAKAzAOAwUAwAACAQMFAcAAAiAwDgMF
+ AsAAAiwDBQDAAAJkMA0GCSqGSIb3DQEBCwUAA4IBAQCEhuH7jtI2PJY6+zwv
+ 306vmCuXhtu9Lr2mmRw2ZErB8EMcb5xypMrNqMoKeu14K2x4a4RPJkK4yATh
+ M81FPNRsU5mM0acIRnAPtxjHvPME7PHN2w2nGLASRsZmaa+b8A7SSOxVcFUR
+ azENztppsolHeTpm0cpLItK7mNpudUg1JGuFo94VLf1MnE2EqARG1vTsNhel
+ /SM/UvOArCCOBvf0Gz7kSuupDSZ7qx+LiDmtEsLdbGNQBiYPbLrDk41PHrxd
+ x28qIj7ejZkRzNFw/3pi8/XK281h8zeHoFVu6ghRPy5dbOA4akX/KG6b8XIx
+ 0iwPYdLiDbdWFbtTdPcXBauY
+ </publish>
+ </msg>
+
+ <msg xmlns="http://www.hactrn.net/uris/rpki/publication-spec/" type="reply" version="3">
+ <publish uri="rsync://wombat.example/Alice/blCrcCp9ltyPDNzYKPfxc.cer"/>
+ </msg>
+
+ <msg xmlns="http://www.hactrn.net/uris/rpki/publication-spec/" type="reply" version="3">
+ <report_error error_code="your_hair_is_on_fire">
+ Shampooing with sterno again, are we?
+ </report_error>
+ </msg>
+
+ <msg xmlns="http://www.hactrn.net/uris/rpki/publication-spec/" type="reply" version="3">
+ <report_error error_code="your_hair_is_on_fire"/>
+ </msg>
+
+ <msg xmlns="http://www.hactrn.net/uris/rpki/publication-spec/" type="query" version="3">
+ <withdraw uri="rsync://wombat.example/Alice/blCrcCp9ltyPDNzYKPfxc.cer" hash="deadf00d"/>
+ </msg>
+
+ <msg xmlns="http://www.hactrn.net/uris/rpki/publication-spec/" type="reply" version="3">
+ <withdraw uri="rsync://wombat.example/Alice/blCrcCp9ltyPDNzYKPfxc.cer"/>
+ </msg>
- <msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <certificate action="withdraw" uri="rsync://wombat.invalid/testbed/RIR/1/j7ghjwblCrcCp9ltyPDNzYKPfxc.cer"/>
- </msg>
-
- <msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <certificate action="withdraw" uri="rsync://wombat.invalid/testbed/RIR/1/j7ghjwblCrcCp9ltyPDNzYKPfxc.cer"/>
- </msg>
-
- <!-- === -->
-
- <msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <crl action="publish" uri="rsync://wombat.invalid/testbed/RIR/1/30qAF-ZqnpFy5GEtjPTIPyON4qM.crl">
- MIIBwzCBrAIBATANBgkqhkiG9w0BAQsFADAzMTEwLwYDVQQDEyhERjRBODAxN0U2NkE5RTkx
- NzJFNDYxMkQ4Q0Y0QzgzRjIzOERFMkEzFw0wODA1MjIxODA0MTZaFw0wODA1MjIxODA1MTZa
- MBQwEgIBAhcNMDgwNTIyMTc1ODQwWqAvMC0wHwYDVR0jBBgwFoAU30qAF+ZqnpFy5GEtjPTI
- PyON4qMwCgYDVR0UBAMCAQYwDQYJKoZIhvcNAQELBQADggEBAKkM0Fb/pJpHVHWZyjp4wojH
- W2KkvA/DFtBiz3moxocSnkDVP3QI19uVvqdC6nH3hJyFmsAMwULR0f1XU/V4j+X+FqYEl6Nv
- p8zAEPIB4r8xbEFs7udRwXRAjkJmOQbv9aomF2i+d7jpTFVJxShZWOgsoGEhIy/aktKQrOIR
- c4ZDrXpQwXVj2Y7+cGVfQ4gvnPOdlyLcnNovoegazATvA3EcidBNPWRg7XTCz0LVBEB7JgPd
- nNyXRg35HdMEHBl7U9uUQJXP7S02oaQ1ehNDMfaJPgBBpQtAnM1lIzJfevd9+e4ywGsRpxAV
- 8wxTXSPd1jwuKtS0kwrgsrQ8Ya85xUE=
- </crl>
- </msg>
-
- <msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <crl action="publish" uri="rsync://wombat.invalid/testbed/RIR/1/30qAF-ZqnpFy5GEtjPTIPyON4qM.crl"/>
- </msg>
-
- <msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <crl action="withdraw" uri="rsync://wombat.invalid/testbed/RIR/1/30qAF-ZqnpFy5GEtjPTIPyON4qM.crl"/>
- </msg>
-
- <msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <crl action="withdraw" uri="rsync://wombat.invalid/testbed/RIR/1/30qAF-ZqnpFy5GEtjPTIPyON4qM.crl"/>
- </msg>
-
- <!-- === -->
-
- <msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <manifest action="publish" uri="rsync://wombat.invalid/testbed/RIR/R0/1/j7ghjwblCrcCp9ltyPDNzYKPfxc.mft">
- MIIHCgYJKoZIhvcNAQcCoIIG+zCCBvcCAQMxDTALBglghkgBZQMEAgEwggEeBgsqhkiG9w0B
- CRABGqCCAQ0EggEJMIIBBQIBEhgPMjAwODA1MjIxODA1MTVaGA8yMDA4MDUyMjE4MDYxNVoG
- CWCGSAFlAwQCATCB0jBEFh9ZbTVUTzRJYnlDb0pNZ3E2R2o4dG41Mng5U0UuY2VyAyEA4L8Z
- WMyuhOx+o6kUfsRR++QjSaRaATy4UOeVtjvZVqYwRBYfWnRxbjB3NEVFbU9hclAzQmd1SUY3
- MDhhNTM4LmNlcgMhAGQI1gYJotxWmwzcmpLNFZJ656uWOjcPYANlbNz80xm8MEQWH2xxa1Vx
- RHEwMDBESW9ZVjlybXdLTGdrN2F6by5jZXIDIQB7jRAEpkPvc4s4PX9vDvnTifj3BIE145FO
- 1ne2kEejVqCCBBEwggQNMIIC9aADAgECAgEFMA0GCSqGSIb3DQEBCwUAMDMxMTAvBgNVBAMT
- KDhGQjgyMThGMDZFNTBBQjcwMkE3RDk2REM4RjBDRENEODI4RjdGMTcwHhcNMDgwNTIyMTc1
- NzQ5WhcNMDgwNTI0MTc1NDUzWjAzMTEwLwYDVQQDEyhERkRBMjMyMUJENEVCMDNFQTE1RkUy
- N0NGRkRGMEFGRkU1QjBFNjY4MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2/Gk
- AHW5pDqye0+TvUp7sl0rVgmTfeHpVp18ypxvuovogVJgkjEtBEikfaFU0646wYD6JM6IJFJX
- lWLWd7bVmhkWViKuZL0VmT2wpUToNHCLUGUQUVVX8R7oSHFdTArv2AqH+6yt0LmczDH1y2M6
- 2Tgkz9wZ9ryyuPx3VX4PkHzUMlkGFICj1fvyXkcAu8jBaxR9UME1c413TPaMi6lMh1HUmtVN
- LJMP5+/SnwEAW/Z3dPClCFIgQXK3nAKPVzAIwADEiqhK7cSchhO7ikI1CVt0XzG4n7oaILc3
- Hq/DAxyiutw5GlkUlKPri2YJzJ3+H4P+TveSa/b02fVA5csm/QIDAQABo4IBKjCCASYwHQYD
- VR0OBBYEFN/aIyG9TrA+oV/ifP/fCv/lsOZoMB8GA1UdIwQYMBaAFI+4IY8G5Qq3AqfZbcjw
- zc2Cj38XMFgGA1UdHwRRME8wTaBLoEmGR3JzeW5jOi8vbG9jYWxob3N0OjQ0MDAvdGVzdGJl
- ZC9SSVIvUjAvMS9qN2doandibENyY0NwOWx0eVBETnpZS1BmeGMuY3JsMGAGCCsGAQUFBwEB
- BFQwUjBQBggrBgEFBQcwAoZEcnN5bmM6Ly9sb2NhbGhvc3Q6NDQwMC90ZXN0YmVkL1JJUi8x
- L2o3Z2hqd2JsQ3JjQ3A5bHR5UEROellLUGZ4Yy5jZXIwGAYDVR0gAQH/BA4wDDAKBggrBgEF
- BQcOAjAOBgNVHQ8BAf8EBAMCB4AwDQYJKoZIhvcNAQELBQADggEBADpsE9HfgVTgmX1WeJTE
- fm87CXuOoGH85RFiAngSt5kR4gYCyadklOZ7Eta+ERUZVu4tcKO6sJOTuHPfVrAvR0VpgH+j
- PvXboYWSfwJdi00BC28ScrVM2zarA7B10+J6Oq8tbFlAyVBkrbuPet/axmndBtGWhrBTynGl
- nc/5L371Lxy6CrOYqXO0Qx3SrOKaailAe3zTIpHQeACqnPdL00zIBw/hVy/VNaH1wy+FmhAz
- TsmsQUrMyovJcu/ry5w0KHlP8BTnqfykikCWR+Lw0VQHmpJGAbtrmsOeIbfLY1zl7A81lDAl
- AG/ZH1DUdDOUIXMLHWur+D2rwjp7RL16LHYxggGqMIIBpgIBA4AU39ojIb1OsD6hX+J8/98K
- /+Ww5mgwCwYJYIZIAWUDBAIBoGswGgYJKoZIhvcNAQkDMQ0GCyqGSIb3DQEJEAEaMBwGCSqG
- SIb3DQEJBTEPFw0wODA1MjIxODA1MTVaMC8GCSqGSIb3DQEJBDEiBCBj/GjEQw3LgKPf5DTz
- 8eu1fcp6/cQjqqne6ZqFkF42azANBgkqhkiG9w0BAQEFAASCAQBOY0uHNMwy/o1nFANSgha5
- PZxt8fz+wTrbeomCb+lxqQKq1clcSiQORVGc8NmqC8sS5OR3eTw/3qnK9yPHxz2UQ4hn1pBa
- +Zy5veM61qMaXCw6w98EyNcvUfA1AkezAjkabfHQDs3o4Ezh49thXXyRcBoF+O6Lmi+LZbT2
- 4jvfFbaXW9zsb6/DaoDkeHnlk+YYgfSP4wOnkK5uqxtDW8QpMPq3GGdIp0oJDkzEdj7VsWIL
- 9JP2mxxL8fTPVUyAPOmURYwYDXqhke2O9eVDiCYhrEfB8/84Rint4Cj8n5aCujnAtqtwxHpD
- 0NRYO/V1MjhG+ARy1vRH1Dm0r92RBam3
- </manifest>
- </msg>
-
- <msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <manifest action="publish" uri="rsync://wombat.invalid/testbed/RIR/R0/1/j7ghjwblCrcCp9ltyPDNzYKPfxc.mft"/>
- </msg>
-
- <msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <manifest action="withdraw" uri="rsync://wombat.invalid/testbed/RIR/R0/1/j7ghjwblCrcCp9ltyPDNzYKPfxc.mft"/>
- </msg>
-
- <msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <manifest action="withdraw" uri="rsync://wombat.invalid/testbed/RIR/R0/1/j7ghjwblCrcCp9ltyPDNzYKPfxc.mft"/>
- </msg>
-
- <!-- === -->
-
- <msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <roa action="publish" uri="rsync://wombat.invalid/testbed/RIR/R0/1/lqkUqDq000DIoYV9rmwKLgk7azo.roa">
- MIIGmwYJKoZIhvcNAQcCoIIGjDCCBogCAQMxDTALBglghkgBZQMEAgEwKgYLKoZIhvcNAQkQ
- ARigGwQZMBcCAgKaMBEwDwQCAAEwCTAHAwUACgMALKCCBJgwggSUMIIDfKADAgECAgEJMA0G
- CSqGSIb3DQEBCwUAMDMxMTAvBgNVBAMTKDhGQjgyMThGMDZFNTBBQjcwMkE3RDk2REM4RjBD
- RENEODI4RjdGMTcwHhcNMDgwNTIyMTc1ODI0WhcNMDgwNTI0MTc1NDUzWjAzMTEwLwYDVQQD
- Eyg5NkE5MTRBODNBQjREMzQwQzhBMTg1N0RBRTZDMEEyRTA5M0I2QjNBMIIBIjANBgkqhkiG
- 9w0BAQEFAAOCAQ8AMIIBCgKCAQEApoK50BjW5bcF4gsdaYhndtVADZvQk3RCsvuqDElF6uLi
- 9BYQq/NHyDOIMyJtvCmzjdv3Y135n1sNO7YvssqHlt7dMfCQTD5ND1GpFnQLdWP7stWM5AbO
- nJV6+PtDITUA/QHOli7Do0YCUgR6G+1QJsMu0DK+TRSzBJ6WP7WIYOBOOg3y/NKc1rkWhS1Q
- dcQepbHgQYZHzzpjNDR6+oYVuhuUEWx1P6O4pv/p+tpE0SDua7jBjMywIYHkPQBecf2IX1RU
- WNojB9dJlnRx5YUUneP2SvF2MrmdDbclgzwhf6alqD2OjiMuoBOG8yeTKcuhzCMnrFAklbst
- 6x3Rnq9BswIDAQABo4IBsTCCAa0wHQYDVR0OBBYEFJapFKg6tNNAyKGFfa5sCi4JO2s6MB8G
- A1UdIwQYMBaAFI+4IY8G5Qq3AqfZbcjwzc2Cj38XMFgGA1UdHwRRME8wTaBLoEmGR3JzeW5j
- Oi8vbG9jYWxob3N0OjQ0MDAvdGVzdGJlZC9SSVIvUjAvMS9qN2doandibENyY0NwOWx0eVBE
- TnpZS1BmeGMuY3JsMGAGCCsGAQUFBwEBBFQwUjBQBggrBgEFBQcwAoZEcnN5bmM6Ly9sb2Nh
- bGhvc3Q6NDQwMC90ZXN0YmVkL1JJUi8xL2o3Z2hqd2JsQ3JjQ3A5bHR5UEROellLUGZ4Yy5j
- ZXIwGAYDVR0gAQH/BA4wDDAKBggrBgEFBQcOAjAOBgNVHQ8BAf8EBAMCB4AwYwYIKwYBBQUH
- AQsEVzBVMFMGCCsGAQUFBzALhkdyc3luYzovL2xvY2FsaG9zdDo0NDAwL3Rlc3RiZWQvUklS
- L1IwLzEvbHFrVXFEcTAwMERJb1lWOXJtd0tMZ2s3YXpvLnJvYTAgBggrBgEFBQcBBwEB/wQR
- MA8wDQQCAAEwBwMFAAoDACwwDQYJKoZIhvcNAQELBQADggEBAL8iHwsyGOYhhIf3nVuL361y
- TOJSP8SR0mtQLHULPl+GkYk+5MRNWtL8ucTXFvniYJtOCXEGGEIO9eDXvkQIXQSz/qbF9URQ
- fuf38ghRza257syVhal6UHTgCFYuRIO9CUjcU1vkWUxH05BBIHlYdtlIQbAG/mRsCPCEgSmG
- bbQaomGlUOqmJMlKxLLcoAtz2vDrwVotgHyfS5h2mgINFjnlLcNLTci+sfs7/aQAkDYx7K98
- se/ZlMorvGkFNhHoOTcGIrWkYsfkbTygVwWRm278PaB3o4449Kvsg/gb8BZeHXRs68cr5Mcf
- jP7Q6jeypjTgDBnwb1yzoJIKWszFuSgxggGqMIIBpgIBA4AUlqkUqDq000DIoYV9rmwKLgk7
- azowCwYJYIZIAWUDBAIBoGswGgYJKoZIhvcNAQkDMQ0GCyqGSIb3DQEJEAEYMBwGCSqGSIb3
- DQEJBTEPFw0wODA1MjIxNzU4MjRaMC8GCSqGSIb3DQEJBDEiBCDCyf9v9Wed515TRp2WwnyM
- 1rk6dB///X+aqIym2e9jdTANBgkqhkiG9w0BAQEFAASCAQAFvzrHeRPW+wn4WSyoyBEq0zKS
- Cyh5tu1qTR0NHs6Rr/p8Pk81P1HQLND/U+znJZKLWlO2niEHUXPIicPDYchbj8ApH9VxKA+1
- lCWllOzFAsYyZFr3/VNs9pVp2eT4F9eEYBrBVDSNrD72MMTlWm1T5MEXqltTJJOCKzUEX96x
- 91iW6A+4erop7S8hpCnxqkTin4bFVreqYcGc4CC4bh+L9pPqJnURcEk7Qeu/WEHQBm38voB4
- S11qRZNrJMQ99oiJR7hXDIBm66HjGqoUL2gPCfpgJEVVnM9pVv2k889z4eTTck2Qj54gga2W
- Xkvw4Je420aDx88s9T2+PqXcbZ4g
- </roa>
- </msg>
-
- <msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <roa action="publish" uri="rsync://wombat.invalid/testbed/RIR/R0/1/lqkUqDq000DIoYV9rmwKLgk7azo.roa"/>
- </msg>
-
- <msg version="1" type="query" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <roa action="withdraw" uri="rsync://wombat.invalid/testbed/RIR/R0/1/lqkUqDq000DIoYV9rmwKLgk7azo.roa"/>
- </msg>
-
- <msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <roa action="withdraw" uri="rsync://wombat.invalid/testbed/RIR/R0/1/lqkUqDq000DIoYV9rmwKLgk7azo.roa"/>
- </msg>
-
- <!-- === -->
-
- <msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <report_error error_code="your_hair_is_on_fire">text string</report_error>
- </msg>
-
- <msg version="1" type="reply" xmlns="http://www.hactrn.net/uris/rpki/publication-spec/">
- <report_error error_code="your_hair_is_on_fire"/>
- </msg>
</completely_gratuitous_wrapper_element_to_let_me_run_this_through_xmllint>
diff --git a/ca/tests/rcynic.conf b/ca/tests/rcynic.conf
index ea31fe58..4d5fd619 100644
--- a/ca/tests/rcynic.conf
+++ b/ca/tests/rcynic.conf
@@ -3,12 +3,15 @@
# rcynic configuration for looking at yamltest results.
[rcynic]
-xml-summary = rcynic.xml
+sql-engine = sqlite3
+sql-database = rcynic.db
+xml-file = rcynic.xml
+trust-anchor-locators = yamltest.dir
+log-destination = stderr
+log-level = debug
jitter = 0
-use-links = yes
-use-syslog = no
-use-stderr = yes
-log-level = log_debug
-max-parallel-fetches = 32
+workers = 20
-trust-anchor-locator = yamltest.dir/root.tal
+[rpki-rtr]
+log-destination = stderr
+log-level = debug
diff --git a/ca/tests/rrdp-samples.xml b/ca/tests/rrdp-samples.xml
new file mode 100644
index 00000000..966d9887
--- /dev/null
+++ b/ca/tests/rrdp-samples.xml
@@ -0,0 +1,81 @@
+<!-- -*- SGML -*-
+ - $Id$
+ -
+ - This is a collection of sample RRDP PDU samples to use as test
+ - cases for the RRDP RelaxNG schema.
+ -
+ - Need to figure out whose copyright should be on these examples.
+ - BSD in any case so makes little practical difference, just need to
+ - be sure we give proper credit. Might be RIPE, might be IETF
+ - Trust, might be us for derivative work. Slap ours on for the
+ - moment, fix when we figure this out.
+ -
+ - Copyright (C) 2014 Dragon Research Labs ("DRL")
+ -
+ - Permission to use, copy, modify, and distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+ -->
+
+<completely_gratuitous_wrapper_element_to_let_me_run_this_through_xmllint>
+
+ <!-- Notification file: lists current snapshots and deltas -->
+
+ <notification xmlns="http://www.ripe.net/rpki/rrdp" version="1" session_id="d9f6dc91-0394-40b9-9663-66aef4bb623a" serial="185">
+ <snapshot uri="http://host.example/d9f6dc91-0394-40b9-9663-66aeb623a/snapshot/202.xml" hash="279b79fd8389e20585f26735ee70e0e4d4b8af23bb2e2e611c70e92d2433edea"/>
+ <delta serial="183" uri="http://host.example/d9f6c91-0394-40b9-9663-66aeb623a/deltas/183.xml" hash="a2d56ec180f2dde2a46bf90565932e25829b852a0b43107d5de6e41394c29100"/>
+ <delta serial="184" uri="http://host.example/d9f6c91-0394-40b9-9663-66aeb623a/deltas/184.xml" hash="a2d56ec180f2dde2a46b2e0565932e25829b852a0b43107d5de6e41394c29200"/>
+ <delta serial="185" uri="http://host.example/d9f6c91-0394-40b9-9663-66aeb623a/deltas/185.xml" hash="a2d56ec180f2dde2a46b2e0565932e25829b852a0b43107d5de6e41394c29201"/>
+ </notification>
+
+ <!-- Snapshot segment: think DNS AXFR -->
+
+ <snapshot version="1" xmlns="http://www.ripe.net/rpki/rrdp" session_id="d9f6dc91-0394-40b9-9663-66aef4bb623a" serial="1">
+ <publish uri="http://host.example/foo/bar/cer1.cer">
+ MIIE+jCCA+KgAwIBAgIBDTANBgkqhkiG9w0BAQsFADAzMTEwLwYDVQQD
+ jRBODAxN0U2NkE5RTkxNzJFNDYxMkQ4Q0Y0QzgzRjIzOERFMkEzMB4XE
+ h8zeHoFVu6ghRPy5dbOA4akX/KG6b8XIx0iwPYdLiDbdWFbtTdPcXBau
+ </publish>
+ <publish uri="http://host.example/foo/bar/cer2.cer">
+ MIIE+jCCA+KgAwIBAgIBDTANBgkqhkiG9w0BAQsFADAzMTEwLwYDVQQD
+ h8zeHoFVu6ghRPy5dbOA4akX/KG6b8XIx0iwPYdLiDbdWFbtTdPcXBau
+ jRBODAxN0U2NkE5RTkxNzJFNDYxMkQ4Q0Y0QzgzRjIzOERFMkEzMB4XD
+ </publish>
+ <publish uri="http://host.example/foo/bar/cer3.cer">
+ MIIE+jCCA+KgAwIBAgIBDTANBgkqhkiG9w0BAQsFADAzMTEwLwYDVQQD
+ h8zeHoFVu6ghRPy5dbOA4akX/KG6b8XIx0iwPYdLiDbdWFbtTdPcXBau
+ jRBODAxN0U2NkE5RTkxNzJFNDYxMkQ4Q0Y0QzgzRjIzOERFMkEzMB4XD
+ </publish>
+ </snapshot>
+
+ <!-- Delta segment: think DNS IXFR -->
+
+ <delta version="1" xmlns="http://www.ripe.net/rpki/rrdp" session_id="d9f6dc91-0394-40b9-9663-66aef4bb623a" serial="3">
+ <publish uri="http://host.example/foo/bar/cer1.cer">
+ MIIE+jCCA+KgAwIBAgIBDTANBgkqhkiG9w0BAQsFADAzMTEw
+ jRBODAxN0U2NkE5RTkxNzJFNDYxMkQ4Q0Y0QzgzRjIzOERFM
+ h8zeHoFVu6ghRPy5dbOA4akX/KG6b8XIx0iwPYdLiDbdWFbt
+ </publish>
+ <withdraw uri="http://host.example/foo/bar/cer1.cer" hash="deadf00d"/>
+ <publish uri="http://host.example/foo/bar/cer2.cer">
+ MIIE+jCCA+KgAwIBAgIBDTANBgkqhkiG9w0BAQsFADAzMTEw
+ h8zeHoFVu6ghRPy5dbOA4akX/KG6b8XIx0iwPYdLiDbdWFbt
+ jRBODAxN0U2NkE5RTkxNzJFNDYxMkQ4Q0Y0QzgzRjIzOERFM
+ </publish>
+ <publish uri="http://host.example/foo/bar/cer3.cer" hash="deadf00d">
+ MIIE+jCCA+KgAwIBAgIBDTANBgkqhkiG9w0BAQsFADAzMTEw
+ h8zeHoFVu6ghRPy5dbOA4akX/KG6b8XIx0iwPYdLiDbdWFbt
+ jRBODAxN0U2NkE5RTkxNzJFNDYxMkQ4Q0Y0QzgzRjIzOERFM
+ </publish>
+ <withdraw uri="http://host.example/foo/bar/cer4.cer" hash="deadf00d"/>
+ </delta>
+
+</completely_gratuitous_wrapper_element_to_let_me_run_this_through_xmllint>
diff --git a/ca/tests/smoketest.6.yaml b/ca/tests/smoketest.6.yaml
index e8d65433..e33e75c2 100644
--- a/ca/tests/smoketest.6.yaml
+++ b/ca/tests/smoketest.6.yaml
@@ -54,6 +54,9 @@ kids:
roa_request:
- asn: 64533
ipv6: 2001:db8::80/121
+ router_cert:
+ - router_id: 666
+ asn: 64533
ghostbusters:
- |
BEGIN:VCARD
diff --git a/ca/tests/smoketest.py b/ca/tests/smoketest.py
index 32f11cc3..2bce936b 100644
--- a/ca/tests/smoketest.py
+++ b/ca/tests/smoketest.py
@@ -47,8 +47,7 @@ import rpki.http
import rpki.log
import rpki.left_right
import rpki.config
-import rpki.publication
-import rpki.async
+import rpki.publication_control
from rpki.mysql_import import MySQLdb
@@ -68,7 +67,7 @@ parser.add_argument("yaml_file", type = argparse.FileType("r"),
help = "YAML description of test network")
args = parser.parse_args()
-cfg = rpki.config.parser(args.config, "smoketest", allow_missing = True)
+cfg = rpki.config.parser(set_filename = args.config, section = "smoketest", allow_missing = True)
# Load the YAML script early, so we can report errors ASAP
@@ -77,13 +76,14 @@ yaml_script = [y for y in yaml.safe_load_all(args.yaml_file)]
# Define port allocator early, so we can use it while reading config
def allocate_port():
- """
- Allocate a TCP port number.
- """
- global base_port
- p = base_port
- base_port += 1
- return p
+ """
+ Allocate a TCP port number.
+ """
+
+ global base_port
+ p = base_port
+ base_port += 1
+ return p
# Most filenames in the following are relative to the working directory.
@@ -139,14 +139,14 @@ pubd_last_cms_time = None
ecdsa_params = None
class CantRekeyYAMLLeaf(Exception):
- """
- Can't rekey YAML leaf.
- """
+ """
+ Can't rekey YAML leaf.
+ """
class CouldntIssueBSCEECertificate(Exception):
- """
- Couldn't issue BSC EE certificate
- """
+ """
+ Couldn't issue BSC EE certificate
+ """
sql_conversions = MySQLdb.converters.conversions.copy()
sql_conversions.update({
@@ -154,195 +154,205 @@ sql_conversions.update({
MySQLdb.converters.FIELD_TYPE.DATETIME : rpki.sundial.datetime.DateTime_or_None })
def main():
- """
- Main program.
- """
-
- rpki.log.init(smoketest_name, argparse.Namespace(log_level = logging.DEBUG,
- log_handler = lambda: logging.StreamHandler(sys.stdout)))
- logger.info("Starting")
-
- rpki.http.http_client.timeout = rpki.sundial.timedelta(hours = 1)
-
- pubd_process = None
- rootd_process = None
- rsyncd_process = None
-
- rpki_sql = mangle_sql(rpki_sql_file)
- irdb_sql = mangle_sql(irdb_sql_file)
- pubd_sql = mangle_sql(pub_sql_file)
-
- logger.info("Initializing test directory")
-
- # Connect to test directory, creating it if necessary
- try:
- os.chdir(smoketest_dir)
- except OSError:
- os.makedirs(smoketest_dir)
- os.chdir(smoketest_dir)
-
- # Now that we're in the right directory, we can figure out whether
- # we have a private openssl executable to use
- global prog_openssl
- if not os.path.exists(prog_openssl):
- prog_openssl = "openssl"
-
- # Discard everything but keys, which take a while to generate.
- # Apparently os.walk() can't tell the difference between directories
- # and symlinks to directories, so we have to handle both.
- for root, dirs, files in os.walk(".", topdown = False):
- for fn in files:
- if not fn.endswith(".key"):
- os.remove(os.path.join(root, fn))
- for d in dirs:
- try:
- os.rmdir(os.path.join(root, d))
- except OSError, e:
- if e.errno == errno.ENOTDIR:
- os.remove(os.path.join(root, d))
- else:
- raise
-
- logger.info("Reading master YAML configuration")
- y = yaml_script.pop(0)
+ """
+ Main program.
+ """
- logger.info("Constructing internal allocation database")
- db = allocation_db(y)
+ log_handler = logging.StreamHandler(sys.stdout)
+ log_handler.setFormatter(rpki.config.Formatter("smoketest", log_handler, logging.DEBUG))
+ logging.getLogger().addHandler(log_handler)
+ logging.getLogger().setLevel(logging.DEBUG)
- logger.info("Constructing BPKI keys and certs for rootd")
- setup_bpki_cert_chain(rootd_name, ee = ("RPKI",))
+ logger.info("Starting")
- logger.info("Constructing BPKI keys and certs for pubd")
- setup_bpki_cert_chain(pubd_name, ee = ("PUBD", "IRBE"))
+ rpki.http.http_client.timeout = rpki.sundial.timedelta(hours = 1)
+ pubd_process = None
+ rootd_process = None
+ rsyncd_process = None
- for a in db:
- a.setup_bpki_certs()
+ rpki_sql = mangle_sql(rpki_sql_file)
+ irdb_sql = mangle_sql(irdb_sql_file)
+ pubd_sql = mangle_sql(pub_sql_file)
- setup_publication(pubd_sql)
- setup_rootd(db.root, y.get("rootd", {}))
- setup_rsyncd()
- setup_rcynic()
+ logger.info("Initializing test directory")
- for a in db.engines:
- a.setup_conf_file()
- a.setup_sql(rpki_sql, irdb_sql)
- a.sync_sql()
+ # Connect to test directory, creating it if necessary
+ try:
+ os.chdir(smoketest_dir)
+ except OSError:
+ os.makedirs(smoketest_dir)
+ os.chdir(smoketest_dir)
+
+ # Now that we're in the right directory, we can figure out whether
+ # we have a private openssl executable to use
+ global prog_openssl
+ if not os.path.exists(prog_openssl):
+ prog_openssl = "openssl"
+
+ # Discard everything but keys, which take a while to generate.
+ # Apparently os.walk() can't tell the difference between directories
+ # and symlinks to directories, so we have to handle both.
+ for root, dirs, files in os.walk(".", topdown = False):
+ for fn in files:
+ if not fn.endswith(".key"):
+ os.remove(os.path.join(root, fn))
+ for d in dirs:
+ try:
+ os.rmdir(os.path.join(root, d))
+ except OSError, e:
+ if e.errno == errno.ENOTDIR:
+ os.remove(os.path.join(root, d))
+ else:
+ raise
+
+ logger.info("Reading master YAML configuration")
+ y = yaml_script.pop(0)
+
+ logger.info("Constructing internal allocation database")
+ db = allocation_db(y)
+
+ logger.info("Constructing BPKI keys and certs for rootd")
+ setup_bpki_cert_chain(rootd_name, ee = ("RPKI",))
+
+ logger.info("Constructing BPKI keys and certs for pubd")
+ setup_bpki_cert_chain(pubd_name, ee = ("PUBD", "IRBE"))
+
+
+ for a in db:
+ a.setup_bpki_certs()
+
+ setup_publication(pubd_sql, db.root.irdb_db_name)
+ setup_rootd(db.root, y.get("rootd", {}), db)
+ setup_rsyncd()
+ setup_rcynic()
- try:
+ for a in db.engines:
+ a.setup_conf_file()
+ a.setup_sql(rpki_sql, irdb_sql)
+ a.sync_sql()
- logger.info("Starting rootd")
- rootd_process = subprocess.Popen((prog_python, prog_rootd, "--foreground", "--log-stdout", "--log-level", "debug", "--config", rootd_name + ".conf"))
+ try:
- logger.info("Starting pubd")
- pubd_process = subprocess.Popen((prog_python, prog_pubd, "--foreground", "--log-stdout", "--log-level", "debug", "--config", pubd_name + ".conf") +
- (("-p", pubd_name + ".prof") if args.profile else ()))
+ logger.info("Starting rootd")
+ rootd_process = subprocess.Popen((prog_python, prog_rootd, "--foreground", "--log-stdout", "--log-level", "debug"),
+ env = dict(os.environ, RPKI_CONF = rootd_name + ".conf"))
- logger.info("Starting rsyncd")
- rsyncd_process = subprocess.Popen((prog_rsyncd, "--daemon", "--no-detach", "--config", rsyncd_name + ".conf"))
+ logger.info("Starting pubd")
+ pubd_process = subprocess.Popen((prog_python, prog_pubd, "--foreground", "--log-stdout", "--log-level", "debug") +
+ (("-p", pubd_name + ".prof") if args.profile else ()),
+ env = dict(os.environ, RPKI_CONF = pubd_name + ".conf"))
- # Start rpkid and irdbd instances
- for a in db.engines:
- a.run_daemons()
+ logger.info("Starting rsyncd")
+ rsyncd_process = subprocess.Popen((prog_rsyncd, "--daemon", "--no-detach", "--config", rsyncd_name + ".conf"))
- # From this point on we'll be running event-driven, so the rest of
- # the code until final exit is all closures.
+ # Start rpkid and irdbd instances
+ for a in db.engines:
+ a.run_daemons()
- def start():
- rpki.async.iterator(db.engines, create_rpki_objects, created_rpki_objects)
+ # From this point on we'll be running event-driven, so the rest of
+ # the code until final exit is all closures.
- def create_rpki_objects(iterator, a):
- a.create_rpki_objects(iterator)
+ def start():
+ rpki.async.iterator(db.engines, create_rpki_objects, create_pubd_objects)
- def created_rpki_objects():
+ def create_rpki_objects(iterator, a):
+ a.create_rpki_objects(iterator)
- # Set pubd's BPKI CRL
- set_pubd_crl(yaml_loop)
+ def create_pubd_objects():
+ call_pubd([rpki.publication_control.client_elt.make_pdu(action = "create",
+ client_handle = db.root.client_handle + "-" + rootd_name,
+ base_uri = rootd_sia,
+ bpki_cert = cross_certify(rootd_name + "-TA", pubd_name + "-TA"))],
+ cb = lambda ignored: yaml_loop())
- def yaml_loop():
+ def yaml_loop():
- # This is probably where we should be updating expired BPKI
- # objects, particular CRLs
+ # This is probably where we should be updating expired BPKI
+ # objects, particular CRLs
- logger.info("Running cron for all RPKI engines")
- rpki.async.iterator(db.engines, run_cron, run_yaml)
+ logger.info("Running cron for all RPKI engines")
+ rpki.async.iterator(db.engines, run_cron, run_yaml)
- def run_cron(iterator, a):
- a.run_cron(iterator)
+ def run_cron(iterator, a):
+ a.run_cron(iterator)
- def run_yaml():
+ def run_yaml():
- # Run rcynic to check results
- run_rcynic()
+ # Run rcynic to check results
+ run_rcynic()
- # Apply next delta if we have one; otherwise, we're done.
- if yaml_script:
- logger.info("Applying deltas")
- db.apply_delta(yaml_script.pop(0), apply_delta_done)
- else:
- logger.info("No more deltas to apply, done")
- rpki.async.exit_event_loop()
+ # Apply next delta if we have one; otherwise, we're done.
+ if yaml_script:
+ logger.info("Applying deltas")
+ db.apply_delta(yaml_script.pop(0), apply_delta_done)
+ else:
+ logger.info("No more deltas to apply, done")
+ rpki.async.exit_event_loop()
- def apply_delta_done():
+ def apply_delta_done():
- # Resync IRDBs
- for a in db.engines:
- a.sync_sql()
+ # Resync IRDBs
+ for a in db.engines:
+ a.sync_sql()
- # Loop until we run out of control YAML
- yaml_loop()
+ # Loop until we run out of control YAML
+ yaml_loop()
- logger.info("Sleeping %d seconds while daemons start up", startup_delay)
- rpki.async.timer(start).set(rpki.sundial.timedelta(seconds = startup_delay))
- rpki.async.event_loop()
+ logger.info("Sleeping %d seconds while daemons start up", startup_delay)
+ rpki.async.timer(start).set(rpki.sundial.timedelta(seconds = startup_delay))
+ rpki.async.event_loop()
- # At this point we have gone into event-driven code.
+ # At this point we have gone into event-driven code.
- logger.info("Event loop exited normally")
+ logger.info("Event loop exited normally")
- except Exception, e:
- logger.exception("Event loop exited with an exception")
+ except Exception, e:
+ logger.exception("Event loop exited with an exception")
- finally:
- logger.info("Cleaning up")
- for a in db.engines:
- a.kill_daemons()
- for proc, name in ((rootd_process, "rootd"),
- (pubd_process, "pubd"),
- (rsyncd_process, "rsyncd")):
- # pylint: disable=E1103
- if proc is not None and proc.poll() is None:
- logger.info("Killing %s, pid %s", name, proc.pid)
- try:
- proc.terminate()
- except OSError:
- pass
- if proc is not None:
- logger.info("Daemon %s, pid %s exited with code %s", name, proc.pid, proc.wait())
+ finally:
+ logger.info("Cleaning up")
+ for a in db.engines:
+ a.kill_daemons()
+ for proc, name in ((rootd_process, "rootd"),
+ (pubd_process, "pubd"),
+ (rsyncd_process, "rsyncd")):
+ # pylint: disable=E1103
+ if proc is not None and proc.poll() is None:
+ logger.info("Killing %s, pid %s", name, proc.pid)
+ try:
+ proc.terminate()
+ except OSError:
+ pass
+ if proc is not None:
+ logger.info("Daemon %s, pid %s exited with code %s", name, proc.pid, proc.wait())
def cmd_sleep(cb, interval):
- """
- Set an alarm, then wait for it to go off.
- """
- howlong = rpki.sundial.timedelta.parse(interval)
- logger.info("Sleeping %r", howlong)
- rpki.async.timer(cb).set(howlong)
+ """
+ Set an alarm, then wait for it to go off.
+ """
+
+ howlong = rpki.sundial.timedelta.parse(interval)
+ logger.info("Sleeping %r", howlong)
+ rpki.async.timer(cb).set(howlong)
def cmd_shell(cb, *cmd):
- """
- Run a shell command.
- """
- cmd = " ".join(cmd)
- status = subprocess.call(cmd, shell = True)
- logger.info("Shell command returned status %d", status)
- cb()
+ """
+ Run a shell command.
+ """
+
+ cmd = " ".join(cmd)
+ status = subprocess.call(cmd, shell = True)
+ logger.info("Shell command returned status %d", status)
+ cb()
def cmd_echo(cb, *words):
- """
- Echo some text to the log.
- """
- logger.info(" ".join(words))
- cb()
+ """
+ Echo some text to the log.
+ """
+
+ logger.info(" ".join(words))
+ cb()
## @var cmds
# Dispatch table for commands embedded in delta sections
@@ -352,579 +362,954 @@ cmds = { "sleep" : cmd_sleep,
"echo" : cmd_echo }
class roa_request(object):
- """
- Representation for a roa_request object.
- """
-
- def __init__(self, asn, ipv4, ipv6):
- self.asn = asn
- self.v4 = rpki.resource_set.roa_prefix_set_ipv4("".join(ipv4.split())) if ipv4 else None
- self.v6 = rpki.resource_set.roa_prefix_set_ipv6("".join(ipv6.split())) if ipv6 else None
-
- def __eq__(self, other):
- return self.asn == other.asn and self.v4 == other.v4 and self.v6 == other.v6
-
- def __hash__(self):
- v4 = tuple(self.v4) if self.v4 is not None else None
- v6 = tuple(self.v6) if self.v6 is not None else None
- return self.asn.__hash__() + v4.__hash__() + v6.__hash__()
-
- def __str__(self):
- if self.v4 and self.v6: s = str(self.v4) + "," + str(self.v6)
- elif self.v4: s = str(self.v4)
- else: s = str(self.v6)
- return "%s: %s" % (self.asn, s)
-
- @classmethod
- def parse(cls, yaml):
- return cls(yaml.get("asn"), yaml.get("ipv4"), yaml.get("ipv6"))
-
-class router_cert(object):
- """
- Representation for a router_cert object.
- """
-
- _ecparams = None
- _keypair = None
- _pkcs10 = None
- _gski = None
-
- @classmethod
- def ecparams(cls):
- if cls._ecparams is None:
- cls._ecparams = rpki.x509.KeyParams.generateEC()
- return cls._ecparams
-
- def __init__(self, asn, router_id):
- self.asn = rpki.resource_set.resource_set_as("".join(str(asn).split()))
- self.router_id = router_id
- self.cn = "ROUTER-%08x" % self.asn[0].min
- self.sn = "%08x" % self.router_id
- self.eku = rpki.oids.id_kp_bgpsec_router
-
- @property
- def keypair(self):
- if self._keypair is None:
- self._keypair = rpki.x509.ECDSA.generate(self.ecparams())
- return self._keypair
-
- @property
- def pkcs10(self):
- if self._pkcs10 is None:
- self._pkcs10 = rpki.x509.PKCS10.create(keypair = self.keypair)
- return self._pkcs10
-
- @property
- def gski(self):
- if self._gski is None:
- self._gski = self.pkcs10.gSKI()
- return self._gski
-
- def __eq__(self, other):
- return self.asn == other.asn and self.sn == other.sn
-
- def __hash__(self):
- return tuple(self.asn).__hash__() + self.cn.__hash__() + self.sn.__hash__()
-
- def __str__(self):
- return "%s: %s,%s: %s" % (self.asn, self.cn, self.sn, self.gski)
-
- @classmethod
- def parse(cls, yaml):
- return cls(yaml.get("asn"), yaml.get("router_id"))
-
-class allocation_db(list):
- """
- Representation of all the entities and allocations in the test
- system. Almost everything is generated out of this database.
- """
-
- def __init__(self, yaml):
"""
- Initialize database from the (first) YAML document.
+ Representation for a roa_request object.
"""
- list.__init__(self)
- self.root = allocation(yaml, self)
- assert self.root.is_root
- if self.root.crl_interval is None:
- self.root.crl_interval = rpki.sundial.timedelta.parse(cfg.get("crl_interval", "1d")).convert_to_seconds()
- if self.root.regen_margin is None:
- self.root.regen_margin = rpki.sundial.timedelta.parse(cfg.get("regen_margin", "1d")).convert_to_seconds()
- for a in self:
- if a.sia_base is None:
- a.sia_base = (rootd_sia + "root/trunk/" if a.is_root else a.parent.sia_base) + a.name + "/"
- if a.base.valid_until is None:
- a.base.valid_until = a.parent.base.valid_until
- if a.crl_interval is None:
- a.crl_interval = a.parent.crl_interval
- if a.regen_margin is None:
- a.regen_margin = a.parent.regen_margin
- a.client_handle = "/".join(a.sia_base.split("/")[4:]).rstrip("/")
- self.root.closure()
- self.map = dict((a.name, a) for a in self)
- self.engines = [a for a in self if a.is_engine]
- for i, a in enumerate(self.engines):
- a.set_engine_number(i)
- for a in self:
- if a.is_hosted:
- a.hosted_by = self.map[a.hosted_by]
- a.hosted_by.hosts.append(a)
- assert a.is_twig, "%s is not twig" % a.name
- assert not a.hosted_by.is_hosted, "%s is hosted by a hosted entity" % a.name
-
- def apply_delta(self, delta, cb):
- """
- Apply a delta or run a command.
- """
-
- def loop(iterator, d):
- if isinstance(d, str):
- c = d.split()
- cmds[c[0]](iterator, *c[1:])
- else:
- self.map[d["name"]].apply_delta(d, iterator)
+ def __init__(self, asn, ipv4, ipv6):
+ self.asn = asn
+ self.v4 = rpki.resource_set.roa_prefix_set_ipv4("".join(ipv4.split())) if ipv4 else None
+ self.v6 = rpki.resource_set.roa_prefix_set_ipv6("".join(ipv6.split())) if ipv6 else None
- def done():
- self.root.closure()
- cb()
+ def __eq__(self, other):
+ return self.asn == other.asn and self.v4 == other.v4 and self.v6 == other.v6
- if delta is None:
- cb()
- else:
- rpki.async.iterator(delta, loop, done)
+ def __hash__(self):
+ v4 = tuple(self.v4) if self.v4 is not None else None
+ v6 = tuple(self.v6) if self.v6 is not None else None
+ return self.asn.__hash__() + v4.__hash__() + v6.__hash__()
- def dump(self):
- """
- Print content of the database.
- """
- for a in self:
- print a
+ def __str__(self):
+ if self.v4 and self.v6: s = str(self.v4) + "," + str(self.v6)
+ elif self.v4: s = str(self.v4)
+ else: s = str(self.v6)
+ return "%s: %s" % (self.asn, s)
-class allocation(object):
+ @classmethod
+ def parse(cls, yaml):
+ return cls(yaml.get("asn"), yaml.get("ipv4"), yaml.get("ipv6"))
- parent = None
- irdb_db_name = None
- irdb_port = None
- rpki_db_name = None
- rpki_port = None
- crl_interval = None
- regen_margin = None
- last_cms_time = None
- rpkid_process = None
- irdbd_process = None
-
- def __init__(self, yaml, db, parent = None):
- """
- Initialize one entity and insert it into the database.
- """
- db.append(self)
- self.name = yaml["name"]
- self.parent = parent
- self.kids = [allocation(k, db, self) for k in yaml.get("kids", ())]
- valid_until = None
- if "valid_until" in yaml:
- valid_until = rpki.sundial.datetime.from_datetime(yaml.get("valid_until"))
- if valid_until is None and "valid_for" in yaml:
- valid_until = rpki.sundial.now() + rpki.sundial.timedelta.parse(yaml["valid_for"])
- self.base = rpki.resource_set.resource_bag(
- asn = rpki.resource_set.resource_set_as(yaml.get("asn")),
- v4 = rpki.resource_set.resource_set_ipv4(yaml.get("ipv4")),
- v6 = rpki.resource_set.resource_set_ipv6(yaml.get("ipv6")),
- valid_until = valid_until)
- self.sia_base = yaml.get("sia_base")
- if "crl_interval" in yaml:
- self.crl_interval = rpki.sundial.timedelta.parse(yaml["crl_interval"]).convert_to_seconds()
- if "regen_margin" in yaml:
- self.regen_margin = rpki.sundial.timedelta.parse(yaml["regen_margin"]).convert_to_seconds()
- self.roa_requests = [roa_request.parse(y) for y in yaml.get("roa_request", yaml.get("route_origin", ()))]
- for r in self.roa_requests:
- if r.v4:
- self.base.v4 |= r.v4.to_resource_set()
- if r.v6:
- self.base.v6 |= r.v6.to_resource_set()
- self.router_certs = [router_cert.parse(y) for y in yaml.get("router_cert", ())]
- for r in self.router_certs:
- self.base.asn |= r.asn
- self.hosted_by = yaml.get("hosted_by")
- self.extra_conf = yaml.get("extra_conf", [])
- self.hosts = []
-
- def closure(self):
+class router_cert(object):
"""
- Compute the transitive resource closure.
+ Representation for a router_cert object.
"""
- resources = self.base
- for kid in self.kids:
- resources |= kid.closure()
- self.resources = resources
- return resources
- def apply_delta(self, yaml, cb):
+ _ecparams = None
+ _keypair = None
+ _pkcs10 = None
+ _gski = None
+
+ @classmethod
+ def ecparams(cls):
+ if cls._ecparams is None:
+ cls._ecparams = rpki.x509.KeyParams.generateEC()
+ return cls._ecparams
+
+ def __init__(self, asn, router_id):
+ self.asn = rpki.resource_set.resource_set_as("".join(str(asn).split()))
+ self.router_id = router_id
+ self.cn = "ROUTER-%08x" % self.asn[0].min
+ self.sn = "%08x" % self.router_id
+ self.eku = rpki.oids.id_kp_bgpsec_router
+
+ @property
+ def keypair(self):
+ if self._keypair is None:
+ self._keypair = rpki.x509.ECDSA.generate(self.ecparams())
+ return self._keypair
+
+ @property
+ def pkcs10(self):
+ if self._pkcs10 is None:
+ self._pkcs10 = rpki.x509.PKCS10.create(keypair = self.keypair)
+ return self._pkcs10
+
+ @property
+ def gski(self):
+ if self._gski is None:
+ self._gski = self.pkcs10.gSKI()
+ return self._gski
+
+ def __eq__(self, other):
+ return self.asn == other.asn and self.sn == other.sn
+
+ def __hash__(self):
+ return tuple(self.asn).__hash__() + self.cn.__hash__() + self.sn.__hash__()
+
+ def __str__(self):
+ return "%s: %s,%s: %s" % (self.asn, self.cn, self.sn, self.gski)
+
+ @classmethod
+ def parse(cls, yaml):
+ return cls(yaml.get("asn"), yaml.get("router_id"))
+
+class allocation_db(list):
"""
- Apply deltas to this entity.
+ Representation of all the entities and allocations in the test
+ system. Almost everything is generated out of this database.
"""
- logger.info("Applying delta: %s", yaml)
-
- def loop(iterator, kv):
- if kv[0] == "name":
- iterator()
- else:
- getattr(self, "apply_" + kv[0])(kv[1], iterator)
-
- rpki.async.iterator(yaml.items(), loop, cb)
-
- def apply_add_as(self, text, cb):
- self.base.asn |= rpki.resource_set.resource_set_as(text)
- cb()
-
- def apply_add_v4(self, text, cb):
- self.base.v4 |= rpki.resource_set.resource_set_ipv4(text)
- cb()
+ def __init__(self, yaml):
+ """
+ Initialize database from the (first) YAML document.
+ """
+
+ list.__init__(self)
+ self.root = allocation(yaml, self)
+ assert self.root.is_root
+ if self.root.crl_interval is None:
+ self.root.crl_interval = rpki.sundial.timedelta.parse(cfg.get("crl_interval", "1d")).convert_to_seconds()
+ if self.root.regen_margin is None:
+ self.root.regen_margin = rpki.sundial.timedelta.parse(cfg.get("regen_margin", "1d")).convert_to_seconds()
+ for a in self:
+ if a.sia_base is None:
+ a.sia_base = (rootd_sia + "root/trunk/" if a.is_root else a.parent.sia_base) + a.name + "/"
+ if a.base.valid_until is None:
+ a.base.valid_until = a.parent.base.valid_until
+ if a.crl_interval is None:
+ a.crl_interval = a.parent.crl_interval
+ if a.regen_margin is None:
+ a.regen_margin = a.parent.regen_margin
+ a.client_handle = "/".join(a.sia_base.split("/")[4:]).rstrip("/")
+ self.root.closure()
+ self.map = dict((a.name, a) for a in self)
+ self.engines = [a for a in self if a.is_engine]
+ for i, a in enumerate(self.engines):
+ a.set_engine_number(i)
+ for a in self:
+ if a.is_hosted:
+ a.hosted_by = self.map[a.hosted_by]
+ a.hosted_by.hosts.append(a)
+ assert a.is_twig, "%s is not twig" % a.name
+ assert not a.hosted_by.is_hosted, "%s is hosted by a hosted entity" % a.name
+
+ def apply_delta(self, delta, cb):
+ """
+ Apply a delta or run a command.
+ """
+
+ def loop(iterator, d):
+ if isinstance(d, str):
+ c = d.split()
+ cmds[c[0]](iterator, *c[1:])
+ else:
+ self.map[d["name"]].apply_delta(d, iterator)
+
+ def done():
+ self.root.closure()
+ cb()
+
+ if delta is None:
+ cb()
+ else:
+ rpki.async.iterator(delta, loop, done)
- def apply_add_v6(self, text, cb):
- self.base.v6 |= rpki.resource_set.resource_set_ipv6(text)
- cb()
+ def dump(self):
+ """
+ Print content of the database.
+ """
- def apply_sub_as(self, text, cb):
- self.base.asn |= rpki.resource_set.resource_set_as(text)
- cb()
+ for a in self:
+ print a
- def apply_sub_v4(self, text, cb):
- self.base.v4 |= rpki.resource_set.resource_set_ipv4(text)
- cb()
+class allocation(object):
- def apply_sub_v6(self, text, cb):
- self.base.v6 |= rpki.resource_set.resource_set_ipv6(text)
- cb()
+ parent = None
+ irdb_db_name = None
+ irdb_port = None
+ rpki_db_name = None
+ rpki_port = None
+ crl_interval = None
+ regen_margin = None
+ last_cms_time = None
+ rpkid_process = None
+ irdbd_process = None
+
+ def __init__(self, yaml, db, parent = None):
+ """
+ Initialize one entity and insert it into the database.
+ """
+
+ db.append(self)
+ self.name = yaml["name"]
+ self.parent = parent
+ self.kids = [allocation(k, db, self) for k in yaml.get("kids", ())]
+ valid_until = None
+ if "valid_until" in yaml:
+ valid_until = rpki.sundial.datetime.from_datetime(yaml.get("valid_until"))
+ if valid_until is None and "valid_for" in yaml:
+ valid_until = rpki.sundial.now() + rpki.sundial.timedelta.parse(yaml["valid_for"])
+ self.base = rpki.resource_set.resource_bag(
+ asn = str(yaml.get("asn", "")),
+ v4 = yaml.get("ipv4"),
+ v6 = yaml.get("ipv6"),
+ valid_until = valid_until)
+ self.sia_base = yaml.get("sia_base")
+ if "crl_interval" in yaml:
+ self.crl_interval = rpki.sundial.timedelta.parse(yaml["crl_interval"]).convert_to_seconds()
+ if "regen_margin" in yaml:
+ self.regen_margin = rpki.sundial.timedelta.parse(yaml["regen_margin"]).convert_to_seconds()
+ self.roa_requests = [roa_request.parse(y) for y in yaml.get("roa_request", yaml.get("route_origin", ()))]
+ for r in self.roa_requests:
+ if r.v4:
+ self.base.v4 |= r.v4.to_resource_set()
+ if r.v6:
+ self.base.v6 |= r.v6.to_resource_set()
+ self.router_certs = [router_cert.parse(y) for y in yaml.get("router_cert", ())]
+ for r in self.router_certs:
+ self.base.asn |= r.asn
+ self.hosted_by = yaml.get("hosted_by")
+ self.extra_conf = yaml.get("extra_conf", [])
+ self.hosts = []
+
+ def closure(self):
+ """
+ Compute the transitive resource closure.
+ """
+
+ resources = self.base
+ for kid in self.kids:
+ resources |= kid.closure()
+ self.resources = resources
+ return resources
+
+ def apply_delta(self, yaml, cb):
+ """
+ Apply deltas to this entity.
+ """
+
+ logger.info("Applying delta: %s", yaml)
+
+ def loop(iterator, kv):
+ if kv[0] == "name":
+ iterator()
+ else:
+ getattr(self, "apply_" + kv[0])(kv[1], iterator)
+
+ rpki.async.iterator(yaml.items(), loop, cb)
+
+ def apply_add_as(self, text, cb):
+ self.base.asn |= rpki.resource_set.resource_set_as(text)
+ cb()
+
+ def apply_add_v4(self, text, cb):
+ self.base.v4 |= rpki.resource_set.resource_set_ipv4(text)
+ cb()
+
+ def apply_add_v6(self, text, cb):
+ self.base.v6 |= rpki.resource_set.resource_set_ipv6(text)
+ cb()
+
+ def apply_sub_as(self, text, cb):
+ self.base.asn |= rpki.resource_set.resource_set_as(text)
+ cb()
+
+ def apply_sub_v4(self, text, cb):
+ self.base.v4 |= rpki.resource_set.resource_set_ipv4(text)
+ cb()
+
+ def apply_sub_v6(self, text, cb):
+ self.base.v6 |= rpki.resource_set.resource_set_ipv6(text)
+ cb()
+
+ def apply_valid_until(self, stamp, cb):
+ self.base.valid_until = rpki.sundial.datetime.from_datetime(stamp)
+ cb()
+
+ def apply_valid_for(self, text, cb):
+ self.base.valid_until = rpki.sundial.now() + rpki.sundial.timedelta.parse(text)
+ cb()
+
+ def apply_valid_add(self, text, cb):
+ self.base.valid_until += rpki.sundial.timedelta.parse(text)
+ cb()
+
+ def apply_valid_sub(self, text, cb):
+ self.base.valid_until -= rpki.sundial.timedelta.parse(text)
+ cb()
+
+ def apply_roa_request_add(self, yaml, cb):
+ for y in yaml:
+ r = roa_request.parse(y)
+ if r not in self.roa_requests:
+ self.roa_requests.append(r)
+ cb()
+
+ def apply_roa_request_del(self, yaml, cb):
+ for y in yaml:
+ r = roa_request.parse(y)
+ if r in self.roa_requests:
+ self.roa_requests.remove(r)
+ cb()
+
+ def apply_router_cert_add(self, yaml, cb):
+ for y in yaml:
+ r = router_cert.parse(y)
+ if r not in self.router_certs:
+ self.router_certs.append(r)
+ cb()
+
+ def apply_router_cert_del(self, yaml, cb):
+ for y in yaml:
+ r = router_cert.parse(y)
+ if r in self.router_certs:
+ self.router_certs.remove(r)
+ cb()
+
+ def apply_rekey(self, target, cb):
+
+ def done(e):
+ if isinstance(e, Exception):
+ logger.exception("Exception while rekeying %s", self.name)
+ raise e
+ cb()
+
+ if target is None:
+ logger.info("Rekeying <tenant/> %s", self.name)
+ self.call_rpkid([rpki.left_right.self_elt.make_pdu(
+ action = "set", self_handle = self.name, rekey = "yes")], cb = done)
+ else:
+ logger.info("Rekeying <parent/> %s %s", self.name, target)
+ self.call_rpkid([rpki.left_right.parent_elt.make_pdu(
+ action = "set", self_handle = self.name, parent_handle = target, rekey = "yes")], cb = done)
+
+ def apply_revoke(self, target, cb):
+
+ def done(e):
+ if isinstance(e, Exception):
+ logger.exception("Exception while revoking %s", self.name)
+ raise e
+ cb()
+
+ if target is None:
+ logger.info("Revoking <tenant/> %s", self.name)
+ self.call_rpkid([rpki.left_right.self_elt.make_pdu(
+ action = "set", self_handle = self.name, revoke = "yes")], cb = done)
+ else:
+ logger.info("Revoking <parent/> %s %s", self.name, target)
+ self.call_rpkid([rpki.left_right.parent_elt.make_pdu(
+ action = "set", self_handle = self.name, parent_handle = target, revoke = "yes")], cb = done)
+
+ def __str__(self):
+ s = self.name + "\n"
+ if self.resources.asn: s += " ASN: %s\n" % self.resources.asn
+ if self.resources.v4: s += " IPv4: %s\n" % self.resources.v4
+ if self.resources.v6: s += " IPv6: %s\n" % self.resources.v6
+ if self.kids: s += " Kids: %s\n" % ", ".join(k.name for k in self.kids)
+ if self.parent: s += " Up: %s\n" % self.parent.name
+ if self.sia_base: s += " SIA: %s\n" % self.sia_base
+ return s + "Until: %s\n" % self.resources.valid_until
+
+
+ @property
+ def is_root(self):
+ return self.parent is None
+
+ @property
+ def is_twig(self):
+ return not self.is_root
+
+ @property
+ def is_hosted(self):
+ return self.hosted_by is not None
+
+ @property
+ def is_engine(self):
+ return not self.is_hosted
+
+ def set_engine_number(self, n):
+ """
+ Set the engine number for this entity.
+ """
+
+ self.irdb_db_name = "irdb%d" % n
+ self.irdb_port = allocate_port()
+ self.rpki_db_name = "rpki%d" % n
+ self.rpki_port = allocate_port()
+
+ def get_rpki_port(self):
+ """
+ Get rpki port to use for this entity.
+ """
+
+ if self.is_hosted:
+ assert self.hosted_by.rpki_port is not None
+ return self.hosted_by.rpki_port
+ else:
+ assert self.rpki_port is not None
+ return self.rpki_port
+
+ def setup_bpki_certs(self):
+ """
+ Create BPKI certificates for this entity.
+ """
+
+ logger.info("Constructing BPKI keys and certs for %s", self.name)
+ setup_bpki_cert_chain(name = self.name,
+ ee = ("RPKI", "IRDB", "IRBE"),
+ ca = ("SELF",))
+ self.rpkid_ta = rpki.x509.X509(PEM_file = self.name + "-TA.cer")
+ self.irbe_key = rpki.x509.RSA( PEM_file = self.name + "-IRBE.key")
+ self.irbe_cert = rpki.x509.X509(PEM_file = self.name + "-IRBE.cer")
+ self.rpkid_cert = rpki.x509.X509(PEM_file = self.name + "-RPKI.cer")
+
+ def setup_conf_file(self):
+ """
+ Write config files for this entity.
+ """
+
+ logger.info("Writing config files for %s", self.name)
+ assert self.rpki_port is not None
+ d = dict(my_name = self.name,
+ irdb_db_name = self.irdb_db_name,
+ irdb_db_pass = irdb_db_pass,
+ irdb_port = self.irdb_port,
+ rpki_db_name = self.rpki_db_name,
+ rpki_db_pass = rpki_db_pass,
+ rpki_port = self.rpki_port)
+ f = open(self.name + ".conf", "w")
+ f.write(conf_fmt_1 % d)
+ for line in self.extra_conf:
+ f.write(line + "\n")
+ f.close()
+
+ def setup_sql(self, rpki_sql, irdb_sql):
+ """
+ Set up this entity's IRDB.
+ """
+
+ logger.info("Setting up MySQL for %s", self.name)
+ db = MySQLdb.connect(user = "rpki", db = self.rpki_db_name, passwd = rpki_db_pass,
+ conv = sql_conversions)
+ cur = db.cursor()
+ db.autocommit(True)
+ for sql in rpki_sql:
+ try:
+ cur.execute(sql)
+ except:
+ if "DROP TABLE IF EXISTS" not in sql.upper():
+ raise
+ db.close()
+ db = MySQLdb.connect(user = "irdb", db = self.irdb_db_name, passwd = irdb_db_pass,
+ conv = sql_conversions)
+ cur = db.cursor()
+ db.autocommit(True)
+ for sql in irdb_sql:
+ try:
+ cur.execute(sql)
+ except:
+ if "DROP TABLE IF EXISTS" not in sql.upper():
+ raise
+ for s in [self] + self.hosts:
+ for kid in s.kids:
+ cur.execute("INSERT registrant (registrant_handle, registry_handle, valid_until) VALUES (%s, %s, %s)",
+ (kid.name, s.name, kid.resources.valid_until))
+ db.close()
+
+ def sync_sql(self):
+ """
+ Whack this entity's IRDB to match our master database. We do this
+ once during setup, then do it again every time we apply a delta to
+ this entity.
+ """
+
+ logger.info("Updating MySQL data for IRDB %s", self.name)
+ db = MySQLdb.connect(user = "irdb", db = self.irdb_db_name, passwd = irdb_db_pass,
+ conv = sql_conversions)
+ cur = db.cursor()
+ db.autocommit(True)
+ cur.execute("DELETE FROM registrant_asn")
+ cur.execute("DELETE FROM registrant_net")
+ cur.execute("DELETE FROM roa_request_prefix")
+ cur.execute("DELETE FROM roa_request")
+ cur.execute("DELETE FROM ee_certificate_asn")
+ cur.execute("DELETE FROM ee_certificate_net")
+ cur.execute("DELETE FROM ee_certificate")
+
+ for s in [self] + self.hosts:
+ for kid in s.kids:
+ cur.execute("SELECT registrant_id FROM registrant WHERE registrant_handle = %s AND registry_handle = %s",
+ (kid.name, s.name))
+ registrant_id = cur.fetchone()[0]
+ for as_range in kid.resources.asn:
+ cur.execute("INSERT registrant_asn (start_as, end_as, registrant_id) VALUES (%s, %s, %s)",
+ (as_range.min, as_range.max, registrant_id))
+ for v4_range in kid.resources.v4:
+ cur.execute("INSERT registrant_net (start_ip, end_ip, version, registrant_id) VALUES (%s, %s, 4, %s)",
+ (v4_range.min, v4_range.max, registrant_id))
+ for v6_range in kid.resources.v6:
+ cur.execute("INSERT registrant_net (start_ip, end_ip, version, registrant_id) VALUES (%s, %s, 6, %s)",
+ (v6_range.min, v6_range.max, registrant_id))
+ cur.execute("UPDATE registrant SET valid_until = %s WHERE registrant_id = %s",
+ (kid.resources.valid_until, registrant_id))
+ for r in s.roa_requests:
+ cur.execute("INSERT roa_request (self_handle, asn) VALUES (%s, %s)",
+ (s.name, r.asn))
+ roa_request_id = cur.lastrowid
+ for version, prefix_set in ((4, r.v4), (6, r.v6)):
+ if prefix_set:
+ cur.executemany("INSERT roa_request_prefix "
+ "(roa_request_id, prefix, prefixlen, max_prefixlen, version) "
+ "VALUES (%s, %s, %s, %s, %s)",
+ ((roa_request_id, x.prefix, x.prefixlen, x.max_prefixlen, version)
+ for x in prefix_set))
+ for r in s.router_certs:
+ cur.execute("INSERT ee_certificate (self_handle, pkcs10, gski, cn, sn, eku, valid_until) "
+ "VALUES (%s, %s, %s, %s, %s, %s, %s)",
+ (s.name, r.pkcs10.get_DER(), r.gski, r.cn, r.sn, r.eku, s.resources.valid_until))
+ ee_certificate_id = cur.lastrowid
+ cur.executemany("INSERT ee_certificate_asn (ee_certificate_id, start_as, end_as) VALUES (%s, %s, %s)",
+ ((ee_certificate_id, a.min, a.max) for a in r.asn))
+ db.close()
+
+ def run_daemons(self):
+ """
+ Run daemons for this entity.
+ """
+
+ logger.info("Running daemons for %s", self.name)
+ env = dict(os.environ, RPKI_CONF = self.name + ".conf")
+ self.rpkid_process = subprocess.Popen((prog_python, prog_rpkid, "--foreground", "--log-stdout", "--log-level", "debug") +
+ (("--profile", self.name + ".prof") if args.profile else ()),
+ env = env)
+ self.irdbd_process = subprocess.Popen((prog_python, prog_irdbd, "--foreground", "--log-stdout", "--log-level", "debug"),
+ env = env)
+
+ def kill_daemons(self):
+ """
+ Kill daemons for this entity.
+ """
+
+ # pylint: disable=E1103
+ for proc, name in ((self.rpkid_process, "rpkid"),
+ (self.irdbd_process, "irdbd")):
+ if proc is not None and proc.poll() is None:
+ logger.info("Killing daemon %s pid %s for %s", name, proc.pid, self.name)
+ try:
+ proc.terminate()
+ except OSError:
+ pass
+ if proc is not None:
+ logger.info("Daemon %s pid %s for %s exited with code %s",
+ name, proc.pid, self.name, proc.wait())
+
+ def call_rpkid(self, pdus, cb):
+ """
+ Send a left-right message to this entity's RPKI daemon and return
+ the response.
+
+ If this entity is hosted (does not run its own RPKI daemon), all
+ of this happens with the hosting RPKI daemon.
+ """
+
+ logger.info("Calling rpkid for %s", self.name)
+
+ if self.is_hosted:
+ logger.info("rpkid %s is hosted by rpkid %s, switching", self.name, self.hosted_by.name)
+ self = self.hosted_by
+ assert not self.is_hosted
+
+ assert isinstance(pdus, (list, tuple))
+ assert self.rpki_port is not None
+
+ q_msg = rpki.left_right.msg.query(*pdus)
+ q_cms = rpki.left_right.cms_msg_saxify()
+ q_der = q_cms.wrap(q_msg, self.irbe_key, self.irbe_cert)
+ q_url = "http://localhost:%d/left-right" % self.rpki_port
+
+ logger.debug(q_cms.pretty_print_content())
+
+ def done(r_der):
+ logger.info("Callback from rpkid %s", self.name)
+ r_cms = rpki.left_right.cms_msg_saxify(DER = r_der)
+ r_msg = r_cms.unwrap((self.rpkid_ta, self.rpkid_cert))
+ self.last_cms_time = r_cms.check_replay(self.last_cms_time, q_url)
+ logger.debug(r_cms.pretty_print_content())
+ assert r_msg.is_reply
+ for r_pdu in r_msg:
+ assert not isinstance(r_pdu, rpki.left_right.report_error_elt)
+ cb(r_msg)
+
+ def lose(e):
+ raise
+
+ rpki.http.client(
+ url = q_url,
+ msg = q_der,
+ callback = done,
+ errback = lose)
+
+ def cross_certify(self, certificant, reverse = False):
+ """
+ Cross-certify and return the resulting certificate.
+ """
+
+ if reverse:
+ certifier = certificant
+ certificant = self.name + "-SELF"
+ else:
+ certifier = self.name + "-SELF"
+ return cross_certify(certificant, certifier)
+
+ def create_rpki_objects(self, cb):
+ """
+ Create RPKI engine objects for this engine.
+
+ Root node of the engine tree is special, it too has a parent but
+ that one is the magic self-signed micro engine.
+
+ The rest of this is straightforward. There are a lot of objects
+ to create, but we can do batch them all into one honking PDU, then
+ issue one more PDU to set BSC EE certificates based on the PKCS
+ #10 requests we get back when we tell rpkid to generate BSC keys.
+ """
+
+ assert not self.is_hosted
+
+ selves = [self] + self.hosts
+
+ rpkid_pdus = []
+ pubd_pdus = []
+
+ for i, s in enumerate(selves):
+ logger.info("Creating RPKI objects for [%d] %s", i, s.name)
+
+ rpkid_pdus.append(rpki.left_right.self_elt.make_pdu(
+ action = "create",
+ self_handle = s.name,
+ crl_interval = s.crl_interval,
+ regen_margin = s.regen_margin,
+ bpki_cert = (s.cross_certify(s.hosted_by.name + "-TA", reverse = True)
+ if s.is_hosted else
+ rpki.x509.X509(Auto_file = s.name + "-SELF.cer"))))
+
+ rpkid_pdus.append(rpki.left_right.bsc_elt.make_pdu(
+ action = "create",
+ self_handle = s.name,
+ bsc_handle = "b",
+ generate_keypair = True))
+
+ pubd_pdus.append(rpki.publication_control.client_elt.make_pdu(
+ action = "create",
+ client_handle = s.client_handle,
+ base_uri = s.sia_base,
+ bpki_cert = s.cross_certify(pubd_name + "-TA", reverse = True)))
+
+ rpkid_pdus.append(rpki.left_right.repository_elt.make_pdu(
+ action = "create",
+ self_handle = s.name,
+ bsc_handle = "b",
+ repository_handle = "r",
+ bpki_cert = s.cross_certify(pubd_name + "-TA"),
+ peer_contact_uri = "http://localhost:%d/client/%s" % (pubd_port, s.client_handle)))
+
+ for k in s.kids:
+ rpkid_pdus.append(rpki.left_right.child_elt.make_pdu(
+ action = "create",
+ self_handle = s.name,
+ child_handle = k.name,
+ bsc_handle = "b",
+ bpki_cert = s.cross_certify(k.name + "-SELF")))
+
+ if s.is_root:
+ rootd_cert = s.cross_certify(rootd_name + "-TA")
+ rpkid_pdus.append(rpki.left_right.parent_elt.make_pdu(
+ action = "create",
+ self_handle = s.name,
+ parent_handle = "rootd",
+ bsc_handle = "b",
+ repository_handle = "r",
+ sia_base = s.sia_base,
+ bpki_cert = rootd_cert,
+ sender_name = s.name,
+ recipient_name = "rootd",
+ peer_contact_uri = "http://localhost:%s/" % rootd_port))
+ else:
+ rpkid_pdus.append(rpki.left_right.parent_elt.make_pdu(
+ action = "create",
+ self_handle = s.name,
+ parent_handle = s.parent.name,
+ bsc_handle = "b",
+ repository_handle = "r",
+ sia_base = s.sia_base,
+ bpki_cert = s.cross_certify(s.parent.name + "-SELF"),
+ sender_name = s.name,
+ recipient_name = s.parent.name,
+ peer_contact_uri = "http://localhost:%s/up-down/%s/%s" % (s.parent.get_rpki_port(),
+ s.parent.name, s.name)))
+
+ def one():
+ call_pubd(pubd_pdus, cb = two)
+
+ def two(vals):
+ self.call_rpkid(rpkid_pdus, cb = three)
+
+ def three(vals):
+
+ bsc_dict = dict((b.self_handle, b) for b in vals if isinstance(b, rpki.left_right.bsc_elt))
+
+ bsc_pdus = []
+
+ for s in selves:
+ b = bsc_dict[s.name]
+
+ logger.info("Issuing BSC EE cert for %s", s.name)
+ cmd = (prog_openssl, "x509", "-req", "-sha256", "-extfile", s.name + "-RPKI.conf",
+ "-extensions", "req_x509_ext", "-days", "30",
+ "-CA", s.name + "-SELF.cer", "-CAkey", s.name + "-SELF.key", "-CAcreateserial", "-text")
+ signer = subprocess.Popen(cmd, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
+ signed = signer.communicate(input = b.pkcs10_request.get_PEM())
+ if not signed[0]:
+ logger.warning(signed[1])
+ raise CouldntIssueBSCEECertificate("Couldn't issue BSC EE certificate")
+ s.bsc_ee = rpki.x509.X509(PEM = signed[0])
+ s.bsc_crl = rpki.x509.CRL(PEM_file = s.name + "-SELF.crl")
+ logger.info("BSC EE cert for %s SKI %s", s.name, s.bsc_ee.hSKI())
+
+ bsc_pdus.append(rpki.left_right.bsc_elt.make_pdu(
+ action = "set",
+ self_handle = s.name,
+ bsc_handle = "b",
+ signing_cert = s.bsc_ee,
+ signing_cert_crl = s.bsc_crl))
+
+ self.call_rpkid(bsc_pdus, cb = four)
+
+ def four(vals):
+ cb()
+
+ one()
+
+ def setup_yaml_leaf(self):
+ """
+ Generate certificates and write YAML scripts for leaf nodes.
+
+ We're cheating a bit here: properly speaking, we can't generate
+ issue or revoke requests without knowing the class, which is
+ generated on the fly, but at the moment the test case is
+ simplistic enough that the class will always be "1", so we just
+ wire in that value for now.
+
+ Well, ok, we just broke that assumption. Now we do something even
+ nastier, just to eke a bit more life out of this kludge. This
+ really needs to be rewritten, but it may require a different tool
+ than testpoke.
+ """
+
+ if not os.path.exists(self.name + ".key"):
+ logger.info("Generating RPKI key for %s", self.name)
+ subprocess.check_call((prog_openssl, "genrsa", "-out", self.name + ".key", "2048" ),
+ stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
+ ski = rpki.x509.RSA(PEM_file = self.name + ".key").gSKI()
+
+ if self.parent.is_hosted:
+ parent_host = self.parent.hosted_by.name
+ else:
+ parent_host = self.parent.name
- def apply_valid_until(self, stamp, cb):
- self.base.valid_until = rpki.sundial.datetime.from_datetime(stamp)
- cb()
+ self.cross_certify(self.parent.name + "-SELF")
+ self.cross_certify(parent_host + "-TA")
- def apply_valid_for(self, text, cb):
- self.base.valid_until = rpki.sundial.now() + rpki.sundial.timedelta.parse(text)
- cb()
+ def run_cron(self, cb):
+ """
+ Trigger cron run for this engine.
+ """
- def apply_valid_add(self, text, cb):
- self.base.valid_until += rpki.sundial.timedelta.parse(text)
- cb()
+ logger.info("Running cron for %s", self.name)
- def apply_valid_sub(self, text, cb):
- self.base.valid_until -= rpki.sundial.timedelta.parse(text)
- cb()
+ assert self.rpki_port is not None
- def apply_roa_request_add(self, yaml, cb):
- for y in yaml:
- r = roa_request.parse(y)
- if r not in self.roa_requests:
- self.roa_requests.append(r)
- cb()
+ def done(result):
+ assert result == "OK", 'Expected "OK" result from cronjob, got %r' % result
+ cb()
- def apply_roa_request_del(self, yaml, cb):
- for y in yaml:
- r = roa_request.parse(y)
- if r in self.roa_requests:
- self.roa_requests.remove(r)
- cb()
+ rpki.http.client(
+ url = "http://localhost:%d/cronjob" % self.rpki_port,
+ msg = "Run cron now, please",
+ callback = done,
+ errback = done)
- def apply_router_cert_add(self, yaml, cb):
- for y in yaml:
- r = router_cert.parse(y)
- if r not in self.router_certs:
- self.router_certs.append(r)
- cb()
+ def run_yaml(self):
+ """
+ Run YAML scripts for this leaf entity. Since we're not bothering
+ to check the class list returned by the list command, the issue
+ command may fail, so we treat failure of the list command as an
+ error, but only issue a warning when issue fails.
+ """
- def apply_router_cert_del(self, yaml, cb):
- for y in yaml:
- r = router_cert.parse(y)
- if r in self.router_certs:
- self.router_certs.remove(r)
- cb()
+ logger.info("Running YAML for %s", self.name)
+ subprocess.check_call((prog_python, prog_poke, "-y", self.name + ".yaml", "-r", "list"))
+ if subprocess.call((prog_python, prog_poke, "-y", self.name + ".yaml", "-r", "issue")) != 0:
+ logger.warning("YAML issue command failed for %s, continuing", self.name)
- def apply_rekey(self, target, cb):
-
- def done(e):
- if isinstance(e, Exception):
- logger.exception("Exception while rekeying %s", self.name)
- raise e
- cb()
-
- if target is None:
- logger.info("Rekeying <self/> %s", self.name)
- self.call_rpkid([rpki.left_right.self_elt.make_pdu(
- action = "set", self_handle = self.name, rekey = "yes")], cb = done)
- else:
- logger.info("Rekeying <parent/> %s %s", self.name, target)
- self.call_rpkid([rpki.left_right.parent_elt.make_pdu(
- action = "set", self_handle = self.name, parent_handle = target, rekey = "yes")], cb = done)
-
- def apply_revoke(self, target, cb):
-
- def done(e):
- if isinstance(e, Exception):
- logger.exception("Exception while revoking %s", self.name)
- raise e
- cb()
-
- if target is None:
- logger.info("Revoking <self/> %s", self.name)
- self.call_rpkid([rpki.left_right.self_elt.make_pdu(
- action = "set", self_handle = self.name, revoke = "yes")], cb = done)
- else:
- logger.info("Revoking <parent/> %s %s", self.name, target)
- self.call_rpkid([rpki.left_right.parent_elt.make_pdu(
- action = "set", self_handle = self.name, parent_handle = target, revoke = "yes")], cb = done)
-
- def __str__(self):
- s = self.name + "\n"
- if self.resources.asn: s += " ASN: %s\n" % self.resources.asn
- if self.resources.v4: s += " IPv4: %s\n" % self.resources.v4
- if self.resources.v6: s += " IPv6: %s\n" % self.resources.v6
- if self.kids: s += " Kids: %s\n" % ", ".join(k.name for k in self.kids)
- if self.parent: s += " Up: %s\n" % self.parent.name
- if self.sia_base: s += " SIA: %s\n" % self.sia_base
- return s + "Until: %s\n" % self.resources.valid_until
-
-
- @property
- def is_root(self):
- return self.parent is None
-
- @property
- def is_twig(self):
- return not self.is_root
-
- @property
- def is_hosted(self):
- return self.hosted_by is not None
-
- @property
- def is_engine(self):
- return not self.is_hosted
-
- def set_engine_number(self, n):
+def setup_bpki_cert_chain(name, ee = (), ca = ()):
"""
- Set the engine number for this entity.
+ Build a set of BPKI certificates.
"""
- self.irdb_db_name = "irdb%d" % n
- self.irdb_port = allocate_port()
- self.rpki_db_name = "rpki%d" % n
- self.rpki_port = allocate_port()
- def get_rpki_port(self):
- """
- Get rpki port to use for this entity.
+ s = "exec >/dev/null 2>&1\n"
+ #s = "set -x\n"
+ for kind in ("TA",) + ee + ca:
+ d = dict(name = name,
+ kind = kind,
+ ca = "false" if kind in ee else "true",
+ openssl = prog_openssl)
+ f = open("%(name)s-%(kind)s.conf" % d, "w")
+ f.write(bpki_cert_fmt_1 % d)
+ f.close()
+ if not os.path.exists("%(name)s-%(kind)s.key" % d):
+ s += bpki_cert_fmt_2 % d
+ s += bpki_cert_fmt_3 % d
+ d = dict(name = name,
+ openssl = prog_openssl)
+ s += bpki_cert_fmt_4 % d
+ for kind in ee + ca:
+ d["kind"] = kind
+ s += bpki_cert_fmt_5 % d
+ for kind in ("TA",) + ca:
+ d["kind"] = kind
+ s += bpki_cert_fmt_6 % d
+ subprocess.check_call(s, shell = True)
+
+def setup_rootd(rpkid, rootd_yaml, db):
"""
- if self.is_hosted:
- assert self.hosted_by.rpki_port is not None
- return self.hosted_by.rpki_port
- else:
- assert self.rpki_port is not None
- return self.rpki_port
-
- def setup_bpki_certs(self):
+ Write the config files for rootd.
"""
- Create BPKI certificates for this entity.
- """
- logger.info("Constructing BPKI keys and certs for %s", self.name)
- setup_bpki_cert_chain(name = self.name,
- ee = ("RPKI", "IRDB", "IRBE"),
- ca = ("SELF",))
- self.rpkid_ta = rpki.x509.X509(PEM_file = self.name + "-TA.cer")
- self.irbe_key = rpki.x509.RSA( PEM_file = self.name + "-IRBE.key")
- self.irbe_cert = rpki.x509.X509(PEM_file = self.name + "-IRBE.cer")
- self.rpkid_cert = rpki.x509.X509(PEM_file = self.name + "-RPKI.cer")
-
- def setup_conf_file(self):
+
+ rpkid.cross_certify(rootd_name + "-TA", reverse = True)
+ cross_certify(pubd_name + "-TA", rootd_name + "-TA")
+ logger.info("Writing config files for %s", rootd_name)
+ d = dict(rootd_name = rootd_name,
+ rootd_port = rootd_port,
+ rpkid_name = rpkid.name,
+ pubd_name = pubd_name,
+ rootd_sia = rootd_sia,
+ rsyncd_dir = rsyncd_dir,
+ openssl = prog_openssl,
+ lifetime = rootd_yaml.get("lifetime", "30d"),
+ pubd_port = pubd_port,
+ rootd_handle = db.root.client_handle + "-" + rootd_name)
+ f = open(rootd_name + ".conf", "w")
+ f.write(rootd_fmt_1 % d)
+ f.close()
+ s = "exec >/dev/null 2>&1\n"
+ #s = "set -x\n"
+ if not os.path.exists("root.key"):
+ s += rootd_fmt_2 % d
+ s += rootd_fmt_3 % d
+ subprocess.check_call(s, shell = True)
+
+def setup_rcynic():
"""
- Write config files for this entity.
+ Write the config file for rcynic.
"""
- logger.info("Writing config files for %s", self.name)
- assert self.rpki_port is not None
- d = { "my_name" : self.name,
- "irdb_db_name" : self.irdb_db_name,
- "irdb_db_pass" : irdb_db_pass,
- "irdb_port" : self.irdb_port,
- "rpki_db_name" : self.rpki_db_name,
- "rpki_db_pass" : rpki_db_pass,
- "rpki_port" : self.rpki_port }
- f = open(self.name + ".conf", "w")
- f.write(conf_fmt_1 % d)
- for line in self.extra_conf:
- f.write(line + "\n")
+
+ logger.info("Config file for rcynic")
+ d = dict(rcynic_name = rcynic_name,
+ rootd_name = rootd_name,
+ rootd_sia = rootd_sia)
+ f = open(rcynic_name + ".conf", "w")
+ f.write(rcynic_fmt_1 % d)
f.close()
- def setup_sql(self, rpki_sql, irdb_sql):
+def setup_rsyncd():
"""
- Set up this entity's IRDB.
+ Write the config file for rsyncd.
"""
- logger.info("Setting up MySQL for %s", self.name)
- db = MySQLdb.connect(user = "rpki", db = self.rpki_db_name, passwd = rpki_db_pass,
- conv = sql_conversions)
- cur = db.cursor()
- db.autocommit(True)
- for sql in rpki_sql:
- try:
- cur.execute(sql)
- except Exception:
- if "DROP TABLE IF EXISTS" not in sql.upper():
- raise
- db.close()
- db = MySQLdb.connect(user = "irdb", db = self.irdb_db_name, passwd = irdb_db_pass,
- conv = sql_conversions)
- cur = db.cursor()
- db.autocommit(True)
- for sql in irdb_sql:
- try:
- cur.execute(sql)
- except Exception:
- if "DROP TABLE IF EXISTS" not in sql.upper():
- raise
- for s in [self] + self.hosts:
- for kid in s.kids:
- cur.execute("INSERT registrant (registrant_handle, registry_handle, valid_until) VALUES (%s, %s, %s)",
- (kid.name, s.name, kid.resources.valid_until))
- db.close()
- def sync_sql(self):
+ logger.info("Config file for rsyncd")
+ d = dict(rsyncd_name = rsyncd_name,
+ rsyncd_port = rsyncd_port,
+ rsyncd_module = rsyncd_module,
+ rsyncd_dir = rsyncd_dir)
+ f = open(rsyncd_name + ".conf", "w")
+ f.write(rsyncd_fmt_1 % d)
+ f.close()
+
+def setup_publication(pubd_sql, irdb_db_name):
"""
- Whack this entity's IRDB to match our master database. We do this
- once during setup, then do it again every time we apply a delta to
- this entity.
+ Set up publication daemon.
"""
- logger.info("Updating MySQL data for IRDB %s", self.name)
- db = MySQLdb.connect(user = "irdb", db = self.irdb_db_name, passwd = irdb_db_pass,
+
+ logger.info("Configure publication daemon")
+ publication_dir = os.getcwd() + "/publication"
+ assert rootd_sia.startswith("rsync://")
+ global rsyncd_dir
+ rsyncd_dir = publication_dir + "/".join(rootd_sia.split("/")[4:])
+ if not rsyncd_dir.endswith("/"):
+ rsyncd_dir += "/"
+ os.makedirs(rsyncd_dir + "root/trunk")
+ db = MySQLdb.connect(db = pubd_db_name, user = pubd_db_user, passwd = pubd_db_pass,
conv = sql_conversions)
cur = db.cursor()
db.autocommit(True)
- cur.execute("DELETE FROM registrant_asn")
- cur.execute("DELETE FROM registrant_net")
- cur.execute("DELETE FROM roa_request_prefix")
- cur.execute("DELETE FROM roa_request")
- cur.execute("DELETE FROM ee_certificate_asn")
- cur.execute("DELETE FROM ee_certificate_net")
- cur.execute("DELETE FROM ee_certificate")
-
- for s in [self] + self.hosts:
- for kid in s.kids:
- cur.execute("SELECT registrant_id FROM registrant WHERE registrant_handle = %s AND registry_handle = %s",
- (kid.name, s.name))
- registrant_id = cur.fetchone()[0]
- for as_range in kid.resources.asn:
- cur.execute("INSERT registrant_asn (start_as, end_as, registrant_id) VALUES (%s, %s, %s)",
- (as_range.min, as_range.max, registrant_id))
- for v4_range in kid.resources.v4:
- cur.execute("INSERT registrant_net (start_ip, end_ip, version, registrant_id) VALUES (%s, %s, 4, %s)",
- (v4_range.min, v4_range.max, registrant_id))
- for v6_range in kid.resources.v6:
- cur.execute("INSERT registrant_net (start_ip, end_ip, version, registrant_id) VALUES (%s, %s, 6, %s)",
- (v6_range.min, v6_range.max, registrant_id))
- cur.execute("UPDATE registrant SET valid_until = %s WHERE registrant_id = %s",
- (kid.resources.valid_until, registrant_id))
- for r in s.roa_requests:
- cur.execute("INSERT roa_request (self_handle, asn) VALUES (%s, %s)",
- (s.name, r.asn))
- roa_request_id = cur.lastrowid
- for version, prefix_set in ((4, r.v4), (6, r.v6)):
- if prefix_set:
- cur.executemany("INSERT roa_request_prefix "
- "(roa_request_id, prefix, prefixlen, max_prefixlen, version) "
- "VALUES (%s, %s, %s, %s, %s)",
- ((roa_request_id, x.prefix, x.prefixlen, x.max_prefixlen, version)
- for x in prefix_set))
- for r in s.router_certs:
- cur.execute("INSERT ee_certificate (self_handle, pkcs10, gski, cn, sn, eku, valid_until) "
- "VALUES (%s, %s, %s, %s, %s, %s, %s)",
- (s.name, r.pkcs10.get_DER(), r.gski, r.cn, r.sn, r.eku, s.resources.valid_until))
- ee_certificate_id = cur.lastrowid
- cur.executemany("INSERT ee_certificate_asn (ee_certificate_id, start_as, end_as) VALUES (%s, %s, %s)",
- ((ee_certificate_id, a.min, a.max) for a in r.asn))
+ for sql in pubd_sql:
+ try:
+ cur.execute(sql)
+ except:
+ if "DROP TABLE IF EXISTS" not in sql.upper():
+ raise
db.close()
+ d = dict(pubd_name = pubd_name,
+ pubd_port = pubd_port,
+ pubd_db_name = pubd_db_name,
+ pubd_db_user = pubd_db_user,
+ pubd_db_pass = pubd_db_pass,
+ pubd_dir = rsyncd_dir,
+ irdb_db_name = irdb_db_name,
+ irdb_db_pass = irdb_db_pass)
+ f = open(pubd_name + ".conf", "w")
+ f.write(pubd_fmt_1 % d)
+ f.close()
+ global pubd_ta
+ global pubd_irbe_key
+ global pubd_irbe_cert
+ global pubd_pubd_cert
+ pubd_ta = rpki.x509.X509(Auto_file = pubd_name + "-TA.cer")
+ pubd_irbe_key = rpki.x509.RSA( Auto_file = pubd_name + "-IRBE.key")
+ pubd_irbe_cert = rpki.x509.X509(Auto_file = pubd_name + "-IRBE.cer")
+ pubd_pubd_cert = rpki.x509.X509(Auto_file = pubd_name + "-PUBD.cer")
- def run_daemons(self):
- """
- Run daemons for this entity.
- """
- logger.info("Running daemons for %s", self.name)
- self.rpkid_process = subprocess.Popen((prog_python, prog_rpkid, "--foreground", "--log-stdout", "--log-level", "debug", "--config", self.name + ".conf") +
- (("--profile", self.name + ".prof") if args.profile else ()))
- self.irdbd_process = subprocess.Popen((prog_python, prog_irdbd, "--foreground", "--log-stdout", "--log-level", "debug", "--config", self.name + ".conf"))
-
- def kill_daemons(self):
- """
- Kill daemons for this entity.
- """
- # pylint: disable=E1103
- for proc, name in ((self.rpkid_process, "rpkid"),
- (self.irdbd_process, "irdbd")):
- if proc is not None and proc.poll() is None:
- logger.info("Killing daemon %s pid %s for %s", name, proc.pid, self.name)
- try:
- proc.terminate()
- except OSError:
- pass
- if proc is not None:
- logger.info("Daemon %s pid %s for %s exited with code %s",
- name, proc.pid, self.name, proc.wait())
-
- def call_rpkid(self, pdus, cb):
+def call_pubd(pdus, cb):
"""
- Send a left-right message to this entity's RPKI daemon and return
+ Send a publication control message to publication daemon and return
the response.
-
- If this entity is hosted (does not run its own RPKI daemon), all
- of this happens with the hosting RPKI daemon.
"""
- logger.info("Calling rpkid for %s", self.name)
-
- if self.is_hosted:
- logger.info("rpkid %s is hosted by rpkid %s, switching", self.name, self.hosted_by.name)
- self = self.hosted_by
- assert not self.is_hosted
-
- assert isinstance(pdus, (list, tuple))
- assert self.rpki_port is not None
-
- q_msg = rpki.left_right.msg.query(*pdus)
- q_cms = rpki.left_right.cms_msg()
- q_der = q_cms.wrap(q_msg, self.irbe_key, self.irbe_cert)
- q_url = "http://localhost:%d/left-right" % self.rpki_port
+ logger.info("Calling pubd")
+ q_msg = rpki.publication_control.msg.query(*pdus)
+ q_cms = rpki.publication_control.cms_msg_saxify()
+ q_der = q_cms.wrap(q_msg, pubd_irbe_key, pubd_irbe_cert)
+ q_url = "http://localhost:%d/control" % pubd_port
logger.debug(q_cms.pretty_print_content())
- def done(r_der):
- logger.info("Callback from rpkid %s", self.name)
- r_cms = rpki.left_right.cms_msg(DER = r_der)
- r_msg = r_cms.unwrap((self.rpkid_ta, self.rpkid_cert))
- self.last_cms_time = r_cms.check_replay(self.last_cms_time, q_url)
- logger.debug(r_cms.pretty_print_content())
- assert r_msg.is_reply
- for r_pdu in r_msg:
- assert not isinstance(r_pdu, rpki.left_right.report_error_elt)
- cb(r_msg)
+ def call_pubd_cb(r_der):
+ global pubd_last_cms_time
+ r_cms = rpki.publication_control.cms_msg_saxify(DER = r_der)
+ r_msg = r_cms.unwrap((pubd_ta, pubd_pubd_cert))
+ pubd_last_cms_time = r_cms.check_replay(pubd_last_cms_time, q_url)
+ logger.debug(r_cms.pretty_print_content())
+ assert r_msg.is_reply
+ for r_pdu in r_msg:
+ r_pdu.raise_if_error()
+ cb(r_msg)
- def lose(e):
- raise
+ def call_pubd_eb(e):
+ logger.exception("Problem calling pubd")
rpki.http.client(
- url = q_url,
- msg = q_der,
- callback = done,
- errback = lose)
+ url = q_url,
+ msg = q_der,
+ callback = call_pubd_cb,
+ errback = call_pubd_eb)
+
- def cross_certify(self, certificant, reverse = False):
+def cross_certify(certificant, certifier):
"""
Cross-certify and return the resulting certificate.
"""
- if reverse:
- certifier = certificant
- certificant = self.name + "-SELF"
- else:
- certifier = self.name + "-SELF"
certfile = certifier + "-" + certificant + ".cer"
logger.info("Cross certifying %s into %s's BPKI (%s)", certificant, certifier, certfile)
@@ -938,424 +1323,58 @@ class allocation(object):
notAfter = now + rpki.sundial.timedelta(days = 30)
try:
- f = open(serial_file, "r")
- serial = f.read()
- f.close()
- serial = int(serial.splitlines()[0], 16)
+ with open(serial_file, "r") as f:
+ serial = int(f.read().splitlines()[0], 16)
except IOError:
- serial = 1
+ serial = 1
x = parent.bpki_cross_certify(
- keypair = keypair,
- source_cert = child,
- serial = serial,
- notAfter = notAfter,
- now = now)
-
- f = open(serial_file, "w")
- f.write("%02x\n" % (serial + 1))
- f.close()
+ keypair = keypair,
+ source_cert = child,
+ serial = serial,
+ notAfter = notAfter,
+ now = now)
- f = open(certfile, "w")
- f.write(x.get_PEM())
- f.close()
+ with open(serial_file, "w") as f:
+ f.write("%02x\n" % (serial + 1))
+
+ with open(certfile, "w") as f:
+ f.write(x.get_PEM())
logger.debug("Cross certified %s:", certfile)
logger.debug(" Issuer %s [%s]", x.getIssuer(), x.hAKI())
logger.debug(" Subject %s [%s]", x.getSubject(), x.hSKI())
return x
- def create_rpki_objects(self, cb):
- """
- Create RPKI engine objects for this engine.
-
- Root node of the engine tree is special, it too has a parent but
- that one is the magic self-signed micro engine.
-
- The rest of this is straightforward. There are a lot of objects
- to create, but we can do batch them all into one honking PDU, then
- issue one more PDU to set BSC EE certificates based on the PKCS
- #10 requests we get back when we tell rpkid to generate BSC keys.
- """
-
- assert not self.is_hosted
-
- selves = [self] + self.hosts
-
- for i, s in enumerate(selves):
- logger.info("Creating RPKI objects for [%d] %s", i, s.name)
-
- rpkid_pdus = []
- pubd_pdus = []
-
- for s in selves:
-
- rpkid_pdus.append(rpki.left_right.self_elt.make_pdu(
- action = "create",
- self_handle = s.name,
- crl_interval = s.crl_interval,
- regen_margin = s.regen_margin,
- bpki_cert = (s.cross_certify(s.hosted_by.name + "-TA", reverse = True)
- if s.is_hosted else
- rpki.x509.X509(Auto_file = s.name + "-SELF.cer"))))
-
- rpkid_pdus.append(rpki.left_right.bsc_elt.make_pdu(
- action = "create",
- self_handle = s.name,
- bsc_handle = "b",
- generate_keypair = True))
-
- pubd_pdus.append(rpki.publication.client_elt.make_pdu(
- action = "create",
- client_handle = s.client_handle,
- base_uri = s.sia_base,
- bpki_cert = s.cross_certify(pubd_name + "-TA", reverse = True)))
-
- rpkid_pdus.append(rpki.left_right.repository_elt.make_pdu(
- action = "create",
- self_handle = s.name,
- bsc_handle = "b",
- repository_handle = "r",
- bpki_cert = s.cross_certify(pubd_name + "-TA"),
- peer_contact_uri = "http://localhost:%d/client/%s" % (pubd_port, s.client_handle)))
-
- for k in s.kids:
- rpkid_pdus.append(rpki.left_right.child_elt.make_pdu(
- action = "create",
- self_handle = s.name,
- child_handle = k.name,
- bsc_handle = "b",
- bpki_cert = s.cross_certify(k.name + "-SELF")))
-
- if s.is_root:
- rootd_cert = s.cross_certify(rootd_name + "-TA")
- rpkid_pdus.append(rpki.left_right.parent_elt.make_pdu(
- action = "create",
- self_handle = s.name,
- parent_handle = "rootd",
- bsc_handle = "b",
- repository_handle = "r",
- sia_base = s.sia_base,
- bpki_cms_cert = rootd_cert,
- sender_name = s.name,
- recipient_name = "rootd",
- peer_contact_uri = "http://localhost:%s/" % rootd_port))
- else:
- rpkid_pdus.append(rpki.left_right.parent_elt.make_pdu(
- action = "create",
- self_handle = s.name,
- parent_handle = s.parent.name,
- bsc_handle = "b",
- repository_handle = "r",
- sia_base = s.sia_base,
- bpki_cms_cert = s.cross_certify(s.parent.name + "-SELF"),
- sender_name = s.name,
- recipient_name = s.parent.name,
- peer_contact_uri = "http://localhost:%s/up-down/%s/%s" % (s.parent.get_rpki_port(),
- s.parent.name, s.name)))
-
- def one():
- call_pubd(pubd_pdus, cb = two)
-
- def two(vals):
- self.call_rpkid(rpkid_pdus, cb = three)
-
- def three(vals):
-
- bsc_dict = dict((b.self_handle, b) for b in vals if isinstance(b, rpki.left_right.bsc_elt))
-
- bsc_pdus = []
-
- for s in selves:
- b = bsc_dict[s.name]
-
- logger.info("Issuing BSC EE cert for %s", s.name)
- cmd = (prog_openssl, "x509", "-req", "-sha256", "-extfile", s.name + "-RPKI.conf",
- "-extensions", "req_x509_ext", "-days", "30",
- "-CA", s.name + "-SELF.cer", "-CAkey", s.name + "-SELF.key", "-CAcreateserial", "-text")
- signer = subprocess.Popen(cmd, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
- signed = signer.communicate(input = b.pkcs10_request.get_PEM())
- if not signed[0]:
- logger.warning(signed[1])
- raise CouldntIssueBSCEECertificate("Couldn't issue BSC EE certificate")
- s.bsc_ee = rpki.x509.X509(PEM = signed[0])
- s.bsc_crl = rpki.x509.CRL(PEM_file = s.name + "-SELF.crl")
- logger.info("BSC EE cert for %s SKI %s", s.name, s.bsc_ee.hSKI())
-
- bsc_pdus.append(rpki.left_right.bsc_elt.make_pdu(
- action = "set",
- self_handle = s.name,
- bsc_handle = "b",
- signing_cert = s.bsc_ee,
- signing_cert_crl = s.bsc_crl))
-
- self.call_rpkid(bsc_pdus, cb = four)
-
- def four(vals):
- cb()
-
- one()
-
- def setup_yaml_leaf(self):
- """
- Generate certificates and write YAML scripts for leaf nodes.
-
- We're cheating a bit here: properly speaking, we can't generate
- issue or revoke requests without knowing the class, which is
- generated on the fly, but at the moment the test case is
- simplistic enough that the class will always be "1", so we just
- wire in that value for now.
-
- Well, ok, we just broke that assumption. Now we do something even
- nastier, just to eke a bit more life out of this kludge. This
- really needs to be rewritten, but it may require a different tool
- than testpoke.
- """
-
- if not os.path.exists(self.name + ".key"):
- logger.info("Generating RPKI key for %s", self.name)
- subprocess.check_call((prog_openssl, "genrsa", "-out", self.name + ".key", "2048" ),
- stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
- ski = rpki.x509.RSA(PEM_file = self.name + ".key").gSKI()
-
- if self.parent.is_hosted:
- parent_host = self.parent.hosted_by.name
- else:
- parent_host = self.parent.name
-
- self.cross_certify(self.parent.name + "-SELF")
- self.cross_certify(parent_host + "-TA")
-
- logger.info("Writing leaf YAML for %s", self.name)
- f = open(self.name + ".yaml", "w")
- f.write(yaml_fmt_1 % {
- "parent_name" : self.parent.name,
- "parent_host" : parent_host,
- "my_name" : self.name,
- "http_port" : self.parent.get_rpki_port(),
- "class_name" : 2 if self.parent.is_hosted else 1,
- "sia" : self.sia_base,
- "ski" : ski })
- f.close()
+last_rcynic_run = None
- def run_cron(self, cb):
+def run_rcynic():
"""
- Trigger cron run for this engine.
+ Run rcynic to see whether what was published makes sense.
"""
- logger.info("Running cron for %s", self.name)
-
- assert self.rpki_port is not None
+ logger.info("Running rcynic")
+ env = os.environ.copy()
+ env["TZ"] = ""
+ global last_rcynic_run
+ if int(time.time()) == last_rcynic_run:
+ time.sleep(1)
+ subprocess.check_call((prog_rcynic, "-c", rcynic_name + ".conf"), env = env)
+ subprocess.call(rcynic_stats, shell = True, env = env)
+ last_rcynic_run = int(time.time())
+ os.link("%s.xml" % rcynic_name, "%s.%s.xml" % (rcynic_name, last_rcynic_run))
- def done(result):
- assert result == "OK", 'Expected "OK" result from cronjob, got %r' % result
- cb()
-
- rpki.http.client(
- url = "http://localhost:%d/cronjob" % self.rpki_port,
- msg = "Run cron now, please",
- callback = done,
- errback = done)
-
- def run_yaml(self):
+def mangle_sql(filename):
"""
- Run YAML scripts for this leaf entity. Since we're not bothering
- to check the class list returned by the list command, the issue
- command may fail, so we treat failure of the list command as an
- error, but only issue a warning when issue fails.
+ Mangle an SQL file into a sequence of SQL statements.
"""
- logger.info("Running YAML for %s", self.name)
- subprocess.check_call((prog_python, prog_poke, "-y", self.name + ".yaml", "-r", "list"))
- if subprocess.call((prog_python, prog_poke, "-y", self.name + ".yaml", "-r", "issue")) != 0:
- logger.warning("YAML issue command failed for %s, continuing", self.name)
-
-def setup_bpki_cert_chain(name, ee = (), ca = ()):
- """
- Build a set of BPKI certificates.
- """
- s = "exec >/dev/null 2>&1\n"
- #s = "set -x\n"
- for kind in ("TA",) + ee + ca:
- d = { "name" : name,
- "kind" : kind,
- "ca" : "false" if kind in ee else "true",
- "openssl" : prog_openssl }
- f = open("%(name)s-%(kind)s.conf" % d, "w")
- f.write(bpki_cert_fmt_1 % d)
+ words = []
+ f = open(filename)
+ for line in f:
+ words.extend(line.partition("--")[0].split())
f.close()
- if not os.path.exists("%(name)s-%(kind)s.key" % d):
- s += bpki_cert_fmt_2 % d
- s += bpki_cert_fmt_3 % d
- d = { "name" : name, "openssl" : prog_openssl }
- s += bpki_cert_fmt_4 % d
- for kind in ee + ca:
- d["kind"] = kind
- s += bpki_cert_fmt_5 % d
- for kind in ("TA",) + ca:
- d["kind"] = kind
- s += bpki_cert_fmt_6 % d
- subprocess.check_call(s, shell = True)
-
-def setup_rootd(rpkid, rootd_yaml):
- """
- Write the config files for rootd.
- """
- rpkid.cross_certify(rootd_name + "-TA", reverse = True)
- logger.info("Writing config files for %s", rootd_name)
- d = { "rootd_name" : rootd_name,
- "rootd_port" : rootd_port,
- "rpkid_name" : rpkid.name,
- "rootd_sia" : rootd_sia,
- "rsyncd_dir" : rsyncd_dir,
- "openssl" : prog_openssl,
- "lifetime" : rootd_yaml.get("lifetime", "30d") }
- f = open(rootd_name + ".conf", "w")
- f.write(rootd_fmt_1 % d)
- f.close()
- s = "exec >/dev/null 2>&1\n"
- #s = "set -x\n"
- if not os.path.exists("root.key"):
- s += rootd_fmt_2 % d
- s += rootd_fmt_3 % d
- subprocess.check_call(s, shell = True)
-
-def setup_rcynic():
- """
- Write the config file for rcynic.
- """
- logger.info("Config file for rcynic")
- d = { "rcynic_name" : rcynic_name,
- "rootd_name" : rootd_name,
- "rootd_sia" : rootd_sia }
- f = open(rcynic_name + ".conf", "w")
- f.write(rcynic_fmt_1 % d)
- f.close()
-
-def setup_rsyncd():
- """
- Write the config file for rsyncd.
- """
- logger.info("Config file for rsyncd")
- d = { "rsyncd_name" : rsyncd_name,
- "rsyncd_port" : rsyncd_port,
- "rsyncd_module" : rsyncd_module,
- "rsyncd_dir" : rsyncd_dir }
- f = open(rsyncd_name + ".conf", "w")
- f.write(rsyncd_fmt_1 % d)
- f.close()
-
-def setup_publication(pubd_sql):
- """
- Set up publication daemon.
- """
- logger.info("Configure publication daemon")
- publication_dir = os.getcwd() + "/publication"
- assert rootd_sia.startswith("rsync://")
- global rsyncd_dir
- rsyncd_dir = publication_dir + "/".join(rootd_sia.split("/")[4:])
- if not rsyncd_dir.endswith("/"):
- rsyncd_dir += "/"
- os.makedirs(rsyncd_dir + "root/trunk")
- db = MySQLdb.connect(db = pubd_db_name, user = pubd_db_user, passwd = pubd_db_pass,
- conv = sql_conversions)
- cur = db.cursor()
- db.autocommit(True)
- for sql in pubd_sql:
- try:
- cur.execute(sql)
- except Exception:
- if "DROP TABLE IF EXISTS" not in sql.upper():
- raise
- db.close()
- d = { "pubd_name" : pubd_name,
- "pubd_port" : pubd_port,
- "pubd_db_name" : pubd_db_name,
- "pubd_db_user" : pubd_db_user,
- "pubd_db_pass" : pubd_db_pass,
- "pubd_dir" : rsyncd_dir }
- f = open(pubd_name + ".conf", "w")
- f.write(pubd_fmt_1 % d)
- f.close()
- global pubd_ta
- global pubd_irbe_key
- global pubd_irbe_cert
- global pubd_pubd_cert
- pubd_ta = rpki.x509.X509(Auto_file = pubd_name + "-TA.cer")
- pubd_irbe_key = rpki.x509.RSA( Auto_file = pubd_name + "-IRBE.key")
- pubd_irbe_cert = rpki.x509.X509(Auto_file = pubd_name + "-IRBE.cer")
- pubd_pubd_cert = rpki.x509.X509(Auto_file = pubd_name + "-PUBD.cer")
-
-def call_pubd(pdus, cb):
- """
- Send a publication message to publication daemon and return the
- response.
- """
- logger.info("Calling pubd")
- q_msg = rpki.publication.msg.query(*pdus)
- q_cms = rpki.publication.cms_msg()
- q_der = q_cms.wrap(q_msg, pubd_irbe_key, pubd_irbe_cert)
- q_url = "http://localhost:%d/control" % pubd_port
-
- logger.debug(q_cms.pretty_print_content())
-
- def call_pubd_cb(r_der):
- global pubd_last_cms_time
- r_cms = rpki.publication.cms_msg(DER = r_der)
- r_msg = r_cms.unwrap((pubd_ta, pubd_pubd_cert))
- pubd_last_cms_time = r_cms.check_replay(pubd_last_cms_time, q_url)
- logger.debug(r_cms.pretty_print_content())
- assert r_msg.is_reply
- for r_pdu in r_msg:
- assert not isinstance(r_pdu, rpki.publication.report_error_elt)
- cb(r_msg)
-
- def call_pubd_eb(e):
- logger.exception("Problem calling pubd")
-
- rpki.http.client(
- url = q_url,
- msg = q_der,
- callback = call_pubd_cb,
- errback = call_pubd_eb)
-
-def set_pubd_crl(cb):
- """
- Whack publication daemon's bpki_crl. This must be configured before
- publication daemon starts talking to its clients, and must be
- updated whenever we update the CRL.
- """
- logger.info("Setting pubd's BPKI CRL")
- crl = rpki.x509.CRL(Auto_file = pubd_name + "-TA.crl")
- call_pubd([rpki.publication.config_elt.make_pdu(action = "set", bpki_crl = crl)], cb = lambda ignored: cb())
-
-last_rcynic_run = None
-
-def run_rcynic():
- """
- Run rcynic to see whether what was published makes sense.
- """
- logger.info("Running rcynic")
- env = os.environ.copy()
- env["TZ"] = ""
- global last_rcynic_run
- if int(time.time()) == last_rcynic_run:
- time.sleep(1)
- subprocess.check_call((prog_rcynic, "-c", rcynic_name + ".conf"), env = env)
- subprocess.call(rcynic_stats, shell = True, env = env)
- last_rcynic_run = int(time.time())
- os.link("%s.xml" % rcynic_name, "%s.%s.xml" % (rcynic_name, last_rcynic_run))
-
-def mangle_sql(filename):
- """
- Mangle an SQL file into a sequence of SQL statements.
- """
- words = []
- f = open(filename)
- for line in f:
- words.extend(line.partition("--")[0].split())
- f.close()
- return " ".join(words).strip(";").split(";")
+ return " ".join(words).strip(";").split(";")
bpki_cert_fmt_1 = '''\
[req]
@@ -1432,88 +1451,57 @@ bpki_cert_fmt_6 = ''' && \
-config %(name)s-%(kind)s.conf \
'''
-yaml_fmt_1 = '''---
-version: 1
-posturl: http://localhost:%(http_port)s/up-down/%(parent_name)s/%(my_name)s
-recipient-id: "%(parent_name)s"
-sender-id: "%(my_name)s"
-
-cms-cert-file: %(my_name)s-RPKI.cer
-cms-key-file: %(my_name)s-RPKI.key
-cms-ca-cert-file: %(my_name)s-TA.cer
-cms-crl-file: %(my_name)s-TA.crl
-cms-ca-certs-file:
- - %(my_name)s-TA-%(parent_name)s-SELF.cer
-
-ssl-cert-file: %(my_name)s-RPKI.cer
-ssl-key-file: %(my_name)s-RPKI.key
-ssl-ca-cert-file: %(my_name)s-TA.cer
-ssl-ca-certs-file:
- - %(my_name)s-TA-%(parent_host)s-TA.cer
-
-# We're cheating here by hardwiring the class name
-
-requests:
- list:
- type: list
- issue:
- type: issue
- class: %(class_name)s
- sia:
- - %(sia)s
- cert-request-key-file: %(my_name)s.key
- revoke:
- type: revoke
- class: %(class_name)s
- ski: %(ski)s
-'''
-
conf_fmt_1 = '''\
[irdbd]
-startup-message = This is %(my_name)s irdbd
+startup-message = This is %(my_name)s irdbd
-sql-database = %(irdb_db_name)s
-sql-username = irdb
-sql-password = %(irdb_db_pass)s
-bpki-ta = %(my_name)s-TA.cer
-rpkid-cert = %(my_name)s-RPKI.cer
-irdbd-cert = %(my_name)s-IRDB.cer
-irdbd-key = %(my_name)s-IRDB.key
-http-url = http://localhost:%(irdb_port)d/
-enable_tracebacks = yes
+sql-database = %(irdb_db_name)s
+sql-username = irdb
+sql-password = %(irdb_db_pass)s
+bpki-ta = %(my_name)s-TA.cer
+rpkid-cert = %(my_name)s-RPKI.cer
+irdbd-cert = %(my_name)s-IRDB.cer
+irdbd-key = %(my_name)s-IRDB.key
+http-url = http://localhost:%(irdb_port)d/
+enable_tracebacks = yes
[irbe_cli]
-rpkid-bpki-ta = %(my_name)s-TA.cer
-rpkid-cert = %(my_name)s-RPKI.cer
-rpkid-irbe-cert = %(my_name)s-IRBE.cer
-rpkid-irbe-key = %(my_name)s-IRBE.key
-rpkid-url = http://localhost:%(rpki_port)d/left-right
-enable_tracebacks = yes
+rpkid-bpki-ta = %(my_name)s-TA.cer
+rpkid-cert = %(my_name)s-RPKI.cer
+rpkid-irbe-cert = %(my_name)s-IRBE.cer
+rpkid-irbe-key = %(my_name)s-IRBE.key
+rpkid-url = http://localhost:%(rpki_port)d/left-right
+enable_tracebacks = yes
[rpkid]
-startup-message = This is %(my_name)s rpkid
+startup-message = This is %(my_name)s rpkid
-sql-database = %(rpki_db_name)s
-sql-username = rpki
-sql-password = %(rpki_db_pass)s
+sql-database = %(rpki_db_name)s
+sql-username = rpki
+sql-password = %(rpki_db_pass)s
-bpki-ta = %(my_name)s-TA.cer
-rpkid-key = %(my_name)s-RPKI.key
-rpkid-cert = %(my_name)s-RPKI.cer
-irdb-cert = %(my_name)s-IRDB.cer
-irbe-cert = %(my_name)s-IRBE.cer
+bpki-ta = %(my_name)s-TA.cer
+rpkid-key = %(my_name)s-RPKI.key
+rpkid-cert = %(my_name)s-RPKI.cer
+irdb-cert = %(my_name)s-IRDB.cer
+irbe-cert = %(my_name)s-IRBE.cer
-irdb-url = http://localhost:%(irdb_port)d/
+irdb-url = http://localhost:%(irdb_port)d/
+
+server-host = localhost
+server-port = %(rpki_port)d
-server-host = localhost
-server-port = %(rpki_port)d
+use-internal-cron = false
+enable_tracebacks = yes
-use-internal-cron = false
-enable_tracebacks = yes
+[myrpki]
+start_rpkid = yes
+start_irdbd = yes
+start_pubd = no
'''
rootd_fmt_1 = '''\
@@ -1525,24 +1513,28 @@ rootd-bpki-cert = %(rootd_name)s-RPKI.cer
rootd-bpki-key = %(rootd_name)s-RPKI.key
rootd-bpki-crl = %(rootd_name)s-TA.crl
child-bpki-cert = %(rootd_name)s-TA-%(rpkid_name)s-SELF.cer
+pubd-bpki-cert = %(rootd_name)s-TA-%(pubd_name)s-TA.cer
server-port = %(rootd_port)s
-rpki-root-dir = %(rsyncd_dir)sroot
-rpki-base-uri = %(rootd_sia)sroot/
-rpki-root-cert-uri = %(rootd_sia)sroot.cer
+rpki-class-name = trunk
-rpki-root-key = root.key
-rpki-root-cert = root.cer
+pubd-contact-uri = http://localhost:%(pubd_port)d/client/%(rootd_handle)s
+
+rpki-root-cert-file = root.cer
+rpki-root-cert-uri = %(rootd_sia)sroot.cer
+rpki-root-key-file = root.key
-rpki-subject-pkcs10 = %(rootd_name)s.subject.pkcs10
+rpki-subject-cert-file = trunk.cer
+rpki-subject-cert-uri = %(rootd_sia)sroot/trunk.cer
+rpki-subject-pkcs10-file= trunk.p10
rpki-subject-lifetime = %(lifetime)s
-rpki-root-crl = root.crl
-rpki-root-manifest = root.mft
+rpki-root-crl-file = root.crl
+rpki-root-crl-uri = %(rootd_sia)sroot/root.crl
-rpki-class-name = trunk
-rpki-subject-cert = trunk.cer
+rpki-root-manifest-file = root.mft
+rpki-root-manifest-uri = %(rootd_sia)sroot/root.mft
include-bpki-crl = yes
enable_tracebacks = yes
@@ -1579,7 +1571,7 @@ certificatePolicies = critical, @rpki_certificate_policy
[rpki_certificate_policy]
-policyIdentifier = 1.3.6.1.5.5.7.14.2
+policyIdentifier = 1.3.6.1.5.5.7.14.2
'''
rootd_fmt_2 = '''\
@@ -1602,8 +1594,7 @@ awk '!/-----(BEGIN|END)/' >>%(rootd_name)s.tal &&
-outform DER \
-extfile %(rootd_name)s.conf \
-extensions req_x509_rpki_ext \
- -signkey root.key &&
-ln -f root.cer %(rsyncd_dir)s
+ -signkey root.key
'''
rcynic_fmt_1 = '''\
@@ -1636,6 +1627,7 @@ sql-database = %(pubd_db_name)s
sql-username = %(pubd_db_user)s
sql-password = %(pubd_db_pass)s
bpki-ta = %(pubd_name)s-TA.cer
+pubd-crl = %(pubd_name)s-TA.crl
pubd-cert = %(pubd_name)s-PUBD.cer
pubd-key = %(pubd_name)s-PUBD.key
irbe-cert = %(pubd_name)s-IRBE.cer
@@ -1643,6 +1635,17 @@ server-host = localhost
server-port = %(pubd_port)d
publication-base = %(pubd_dir)s
enable_tracebacks = yes
+
+[irdbd]
+
+sql-database = %(irdb_db_name)s
+sql-username = irdb
+sql-password = %(irdb_db_pass)s
+
+[myrpki]
+start_rpkid = no
+start_irdbd = no
+start_pubd = yes
'''
main()
diff --git a/ca/tests/sql-cleaner.py b/ca/tests/sql-cleaner.py
index ca88d456..c518b77b 100644
--- a/ca/tests/sql-cleaner.py
+++ b/ca/tests/sql-cleaner.py
@@ -19,43 +19,33 @@
"""
import rpki.config
-import rpki.sql_schemas
from rpki.mysql_import import MySQLdb
-cfg = rpki.config.parser(None, "yamltest", allow_missing = True)
+cfg = rpki.config.parser(section = "yamltest", allow_missing = True)
for name in ("rpkid", "irdbd", "pubd"):
- username = cfg.get("%s_sql_username" % name, name[:4])
- password = cfg.get("%s_sql_password" % name, "fnord")
+ username = cfg.get("%s_sql_username" % name, name[:4])
+ password = cfg.get("%s_sql_password" % name, "fnord")
- schema = []
- for line in getattr(rpki.sql_schemas, name, "").splitlines():
- schema.extend(line.partition("--")[0].split())
- schema = " ".join(schema).strip(";").split(";")
- schema = [statement.strip() for statement in schema if statement and "DROP TABLE" not in statement]
+ db = MySQLdb.connect(user = username, passwd = password)
+ cur = db.cursor()
- db = MySQLdb.connect(user = username, passwd = password)
- cur = db.cursor()
+ cur.execute("SHOW DATABASES")
- cur.execute("SHOW DATABASES")
+ databases = [r[0] for r in cur.fetchall() if r[0][:4] == name[:4] and r[0][4:].isdigit()]
- databases = [r[0] for r in cur.fetchall() if r[0][:4] == name[:4] and r[0][4:].isdigit()]
+ for database in databases:
- for database in databases:
+ cur.execute("USE " + database)
- cur.execute("USE " + database)
+ cur.execute("SHOW TABLES")
+ tables = [r[0] for r in cur.fetchall()]
- cur.execute("SHOW TABLES")
- tables = [r[0] for r in cur.fetchall()]
+ cur.execute("SET foreign_key_checks = 0")
+ for table in tables:
+ cur.execute("DROP TABLE %s" % table)
+ cur.execute("SET foreign_key_checks = 1")
- cur.execute("SET foreign_key_checks = 0")
- for table in tables:
- cur.execute("DROP TABLE %s" % table)
- cur.execute("SET foreign_key_checks = 1")
-
- for statement in schema:
- cur.execute(statement)
-
- cur.close()
- db.close()
+ cur.close()
+ db.close()
diff --git a/ca/tests/sql-dumper.py b/ca/tests/sql-dumper.py
index 19cc1b34..af24f2d4 100644
--- a/ca/tests/sql-dumper.py
+++ b/ca/tests/sql-dumper.py
@@ -22,22 +22,22 @@ import subprocess
import rpki.config
from rpki.mysql_import import MySQLdb
-cfg = rpki.config.parser(None, "yamltest", allow_missing = True)
+cfg = rpki.config.parser(section = "yamltest", allow_missing = True)
for name in ("rpkid", "irdbd", "pubd"):
- username = cfg.get("%s_sql_username" % name, name[:4])
- password = cfg.get("%s_sql_password" % name, "fnord")
+ username = cfg.get("%s_sql_username" % name, name[:4])
+ password = cfg.get("%s_sql_password" % name, "fnord")
- cmd = ["mysqldump", "-u", username, "-p" + password, "--databases"]
+ cmd = ["mysqldump", "-u", username, "-p" + password, "--databases"]
- db = MySQLdb.connect(user = username, passwd = password)
- cur = db.cursor()
+ db = MySQLdb.connect(user = username, passwd = password)
+ cur = db.cursor()
- cur.execute("SHOW DATABASES")
- cmd.extend(r[0] for r in cur.fetchall() if r[0][:4] == name[:4] and r[0][4:].isdigit())
+ cur.execute("SHOW DATABASES")
+ cmd.extend(r[0] for r in cur.fetchall() if r[0][:4] == name[:4] and r[0][4:].isdigit())
- cur.close()
- db.close()
+ cur.close()
+ db.close()
- subprocess.check_call(cmd, stdout = open("backup.%s.sql" % name, "w"))
+ subprocess.check_call(cmd, stdout = open("backup.%s.sql" % name, "w"))
diff --git a/ca/tests/test-rrdp.py b/ca/tests/test-rrdp.py
new file mode 100755
index 00000000..97797444
--- /dev/null
+++ b/ca/tests/test-rrdp.py
@@ -0,0 +1,123 @@
+#!/usr/bin/env python
+# $Id$
+#
+# Copyright (C) 2013 Dragon Research Labs ("DRL")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Proof-of-concept test driver for RRDP code. Still fairly kludgy in places.
+"""
+
+import os
+import sys
+import glob
+import time
+import signal
+import textwrap
+import argparse
+import subprocess
+
+parser = argparse.ArgumentParser(description = __doc__)
+parser.add_argument("--use-smoketest", action = "store_true")
+parser.add_argument("--yaml-file", default = "smoketest.2.yaml")
+parser.add_argument("--delay", type = int, default = 30)
+parser.add_argument("--exhaustive", action = "store_true")
+parser.add_argument("--skip-daemons", action = "store_true")
+parser.add_argument("--dry-run", action = "store_true")
+args = parser.parse_args()
+
+def log(msg):
+ sys.stdout.write(msg + "\n")
+ sys.stdout.flush()
+
+def run(*argv):
+ log("Running: " + " ".join(argv))
+ if not args.dry_run:
+ subprocess.check_call(argv)
+
+def dataglob(pattern):
+ return glob.iglob(os.path.join(("smoketest.dir" if args.use_smoketest else "yamltest.dir/RIR"), pattern))
+
+def snapshot_to_serial(fn):
+ return int(os.path.splitext(os.path.basename(fn))[0])
+
+def delta_to_serial(fn):
+ return int(os.path.splitext(os.path.basename(fn))[0])
+
+top = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), "..", ".."))
+
+rrdp_test_tool = os.path.join(top, "potpourri/rrdp-test-tool")
+rcynic = os.path.join(top, "rp/rcynic/rcynic")
+rcynic_text = os.path.join(top, "rp/rcynic/rcynic-text")
+
+with open("rcynic-rrdp.conf", "w") as f:
+ f.write(textwrap.dedent('''# Automatically generated for RRDP tests, do not edit.
+ [rcynic]
+ xml-summary = rcynic.xml
+ jitter = 0
+ use-links = yes
+ use-syslog = no
+ use-stderr = yes
+ log-level = log_debug
+ run-rsync = no
+ '''))
+ if args.use_smoketest:
+ f.write("trust-anchor = smoketest.dir/root.cer\n")
+ else:
+ f.write("trust-anchor = yamltest.dir/RIR/publication/RIR-root/root.cer\n")
+
+if args.skip_daemons:
+ log("--skip-daemons specified, so running neither smoketest nor yamltest")
+elif args.use_smoketest:
+ run("python", "smoketest.py", args.yaml_file)
+else:
+ run("python", "sql-cleaner.py")
+ class GotSIGUSR1(Exception):
+ pass
+ def handle_sigusr1(signum, frame):
+ raise GotSIGUSR1
+ old_sigusr1 = signal.signal(signal.SIGUSR1, handle_sigusr1)
+ cmd = ("python", "yamltest.py", args.yaml_file, "--notify-when-startup-complete", str(os.getpid()))
+ log("Running: " + " ".join(cmd))
+ yamltest = subprocess.Popen(cmd)
+ log("Waiting for SIGUSR1 from yamltest")
+ try:
+ while True:
+ signal.pause()
+ except GotSIGUSR1:
+ signal.signal(signal.SIGUSR1, old_sigusr1)
+ log("Sleeping %s" % args.delay)
+ time.sleep(args.delay)
+ yamltest.terminate()
+
+snapshots = dict((snapshot_to_serial(fn), fn) for fn in dataglob("rrdp-publication/*/snapshot/*.xml"))
+deltas = dict((delta_to_serial(fn), fn) for fn in dataglob("rrdp-publication/*/deltas/*.xml"))
+
+for snapshot in sorted(snapshots):
+
+ time.sleep(1)
+ run("rm", "-rf", "rcynic-data")
+ run(rrdp_test_tool, snapshots[snapshot])
+ run(rcynic, "-c", "rcynic-rrdp.conf")
+ run(rcynic_text, "rcynic.xml")
+
+ for delta in sorted(deltas):
+ if delta > snapshot:
+ time.sleep(1)
+ run(rrdp_test_tool, deltas[delta])
+ run(rcynic, "-c", "rcynic-rrdp.conf")
+ run(rcynic_text, "rcynic.xml")
+
+ if not args.exhaustive:
+ break
diff --git a/ca/tests/testpoke.py b/ca/tests/testpoke.py
index c28ed397..7ebe7d44 100644
--- a/ca/tests/testpoke.py
+++ b/ca/tests/testpoke.py
@@ -51,87 +51,85 @@ parser.add_argument("-d", "--debug",
help = "enable debugging")
args = parser.parse_args()
-rpki.log.init("testpoke")
-
yaml_data = yaml.load(args.yaml)
yaml_cmd = args.request
if yaml_cmd is None and len(yaml_data["requests"]) == 1:
- yaml_cmd = yaml_data["requests"].keys()[0]
+ yaml_cmd = yaml_data["requests"].keys()[0]
yaml_req = yaml_data["requests"][yaml_cmd]
def get_PEM(name, cls, y = yaml_data):
- if name in y:
- return cls(PEM = y[name])
- if name + "-file" in y:
- return cls(PEM_file = y[name + "-file"])
- return None
+ if name in y:
+ return cls(PEM = y[name])
+ if name + "-file" in y:
+ return cls(PEM_file = y[name + "-file"])
+ return None
def get_PEM_chain(name, cert = None):
- chain = []
- if cert is not None:
- chain.append(cert)
- if name in yaml_data:
- chain.extend([rpki.x509.X509(PEM = x) for x in yaml_data[name]])
- elif name + "-file" in yaml_data:
- chain.extend([rpki.x509.X509(PEM_file = x) for x in yaml_data[name + "-file"]])
- return chain
+ chain = []
+ if cert is not None:
+ chain.append(cert)
+ if name in yaml_data:
+ chain.extend(rpki.x509.X509(PEM = x) for x in yaml_data[name])
+ elif name + "-file" in yaml_data:
+ chain.extend(rpki.x509.X509(PEM_file = x) for x in yaml_data[name + "-file"])
+ return chain
def query_up_down(q_pdu):
- q_msg = rpki.up_down.message_pdu.make_query(
- payload = q_pdu,
- sender = yaml_data["sender-id"],
- recipient = yaml_data["recipient-id"])
- q_der = rpki.up_down.cms_msg().wrap(q_msg, cms_key, cms_certs, cms_crl)
-
- def done(r_der):
- global last_cms_timestamp
- r_cms = rpki.up_down.cms_msg(DER = r_der)
- r_msg = r_cms.unwrap([cms_ta] + cms_ca_certs)
- last_cms_timestamp = r_cms.check_replay(last_cms_timestamp)
- print r_cms.pretty_print_content()
- try:
- r_msg.payload.check_response()
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- fail(e)
-
- rpki.http.want_persistent_client = False
-
- rpki.http.client(
- msg = q_der,
- url = yaml_data["posturl"],
- callback = done,
- errback = fail,
- content_type = rpki.up_down.content_type)
+ q_msg = rpki.up_down.message_pdu.make_query(
+ payload = q_pdu,
+ sender = yaml_data["sender-id"],
+ recipient = yaml_data["recipient-id"])
+ q_der = rpki.up_down.cms_msg_saxify().wrap(q_msg, cms_key, cms_certs, cms_crl)
+
+ def done(r_der):
+ global last_cms_timestamp
+ r_cms = rpki.up_down.cms_msg_saxify(DER = r_der)
+ r_msg = r_cms.unwrap([cms_ta] + cms_ca_certs)
+ last_cms_timestamp = r_cms.check_replay(last_cms_timestamp)
+ print r_cms.pretty_print_content()
+ try:
+ r_msg.payload.check_response()
+ except (rpki.async.ExitNow, SystemExit):
+ raise
+ except Exception, e:
+ fail(e)
+
+ rpki.http.want_persistent_client = False
+
+ rpki.http.client(
+ msg = q_der,
+ url = yaml_data["posturl"],
+ callback = done,
+ errback = fail,
+ content_type = rpki.up_down.content_type)
def do_list():
- query_up_down(rpki.up_down.list_pdu())
+ query_up_down(rpki.up_down.list_pdu())
def do_issue():
- q_pdu = rpki.up_down.issue_pdu()
- req_key = get_PEM("cert-request-key", rpki.x509.RSA, yaml_req) or cms_key
- q_pdu.class_name = yaml_req["class"]
- q_pdu.pkcs10 = rpki.x509.PKCS10.create(
- keypair = req_key,
- is_ca = True,
- caRepository = yaml_req["sia"][0],
- rpkiManifest = yaml_req["sia"][0] + req_key.gSKI() + ".mft")
- query_up_down(q_pdu)
+ q_pdu = rpki.up_down.issue_pdu()
+ req_key = get_PEM("cert-request-key", rpki.x509.RSA, yaml_req) or cms_key
+ q_pdu.class_name = yaml_req["class"]
+ q_pdu.pkcs10 = rpki.x509.PKCS10.create(
+ keypair = req_key,
+ is_ca = True,
+ caRepository = yaml_req["sia"][0],
+ rpkiManifest = yaml_req["sia"][0] + req_key.gSKI() + ".mft")
+ query_up_down(q_pdu)
def do_revoke():
- q_pdu = rpki.up_down.revoke_pdu()
- q_pdu.class_name = yaml_req["class"]
- q_pdu.ski = yaml_req["ski"]
- query_up_down(q_pdu)
+ q_pdu = rpki.up_down.revoke_pdu()
+ q_pdu.class_name = yaml_req["class"]
+ q_pdu.ski = yaml_req["ski"]
+ query_up_down(q_pdu)
dispatch = { "list" : do_list, "issue" : do_issue, "revoke" : do_revoke }
def fail(e): # pylint: disable=W0621
- sys.exit("Testpoke failed: %s" % e)
+ sys.exit("Testpoke failed: %s" % e)
cms_ta = get_PEM("cms-ca-cert", rpki.x509.X509)
cms_cert = get_PEM("cms-cert", rpki.x509.X509)
@@ -143,7 +141,7 @@ cms_ca_certs = get_PEM_chain("cms-ca-certs")
last_cms_timestamp = None
try:
- dispatch[yaml_req["type"]]()
- rpki.async.event_loop()
+ dispatch[yaml_req["type"]]()
+ rpki.async.event_loop()
except Exception, e:
- fail(e)
+ fail(e)
diff --git a/ca/tests/xml-parse-test.py b/ca/tests/xml-parse-test.py
index 5ea25492..f24d5683 100644
--- a/ca/tests/xml-parse-test.py
+++ b/ca/tests/xml-parse-test.py
@@ -28,92 +28,102 @@
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
-import glob, lxml.etree, lxml.sax
-import rpki.up_down, rpki.left_right, rpki.publication, rpki.relaxng
+import glob
+import lxml.etree
+import rpki.up_down
+import rpki.left_right
+import rpki.publication
+import rpki.publication_control
+import rpki.relaxng
verbose = False
-def test(fileglob, rng, sax_handler, encoding, tester = None):
- files = glob.glob(fileglob)
- files.sort()
- for f in files:
- print "<!--", f, "-->"
- handler = sax_handler()
- elt_in = lxml.etree.parse(f).getroot()
- if verbose:
- print "<!-- Input -->"
- print lxml.etree.tostring(elt_in, pretty_print = True, encoding = encoding, xml_declaration = True)
- rng.assertValid(elt_in)
- lxml.sax.saxify(elt_in, handler)
- elt_out = handler.result.toXML()
- if verbose:
- print "<!-- Output -->"
- print lxml.etree.tostring(elt_out, pretty_print = True, encoding = encoding, xml_declaration = True)
- rng.assertValid(elt_out)
- if tester:
- tester(elt_in, elt_out, handler.result)
- if verbose:
- print
+def test(fileglob, rng, parser, encoding, tester = None):
+ files = glob.glob(fileglob)
+ files.sort()
+ for f in files:
+ print "<!--", f, "-->"
+ elt_in = lxml.etree.parse(f).getroot()
+ if verbose:
+ print "<!-- Input -->"
+ print lxml.etree.tostring(elt_in, pretty_print = True, encoding = encoding, xml_declaration = True)
+ rng.assertValid(elt_in)
+ parsed = parser.fromXML(elt_in)
+ elt_out = parsed.toXML()
+ if verbose:
+ print "<!-- Output -->"
+ print lxml.etree.tostring(elt_out, pretty_print = True, encoding = encoding, xml_declaration = True)
+ rng.assertValid(elt_out)
+ if tester:
+ tester(elt_in, elt_out, parsed)
+ if verbose:
+ print
def pprint(pairs):
- if verbose:
- for thing, name in pairs:
- if thing is not None:
- print "[%s]" % name
- print thing.get_POW().pprint()
+ if verbose:
+ for thing, name in pairs:
+ if thing is not None:
+ print "[%s]" % name
+ print thing.get_POW().pprint()
def ud_tester(elt_in, elt_out, msg):
- assert isinstance(msg, rpki.up_down.message_pdu)
- if isinstance(msg.payload, rpki.up_down.list_response_pdu):
- for c in msg.payload.classes:
- pprint([(c.certs[i].cert, ("%s certificate #%d" % (c.class_name, i))) for i in xrange(len(c.certs))] + [(c.issuer, ("%s issuer" % c.class_name))])
+ assert isinstance(msg, rpki.up_down.message_pdu)
+ if isinstance(msg.payload, rpki.up_down.list_response_pdu):
+ for c in msg.payload.classes:
+ pprint([(c.certs[i].cert, ("%s certificate #%d" % (c.class_name, i))) for i in xrange(len(c.certs))] + [(c.issuer, ("%s issuer" % c.class_name))])
def lr_tester(elt_in, elt_out, msg):
- assert isinstance(msg, rpki.left_right.msg)
- for obj in msg:
- if isinstance(obj, rpki.left_right.self_elt):
- pprint(((obj.bpki_cert, "BPKI cert"),
- (obj.bpki_glue, "BPKI glue")))
- if isinstance(obj, rpki.left_right.bsc_elt):
- pprint(((obj.signing_cert, "Signing certificate"),
- (obj.signing_cert_crl, "Signing certificate CRL")))
- # (obj.pkcs10_request, "PKCS #10 request")
- if isinstance(obj, rpki.left_right.parent_elt):
- pprint(((obj.bpki_cms_cert, "CMS certificate"),
- (obj.bpki_cms_glue, "CMS glue")))
- if isinstance(obj, (rpki.left_right.child_elt, rpki.left_right.repository_elt)):
- pprint(((obj.bpki_cert, "Certificate"),
- (obj.bpki_glue, "Glue")))
+ assert isinstance(msg, rpki.left_right.msg)
+ for obj in msg:
+ if isinstance(obj, rpki.left_right.self_elt):
+ pprint(((obj.bpki_cert, "BPKI cert"),
+ (obj.bpki_glue, "BPKI glue")))
+ if isinstance(obj, rpki.left_right.bsc_elt):
+ pprint(((obj.signing_cert, "Signing certificate"),
+ (obj.signing_cert_crl, "Signing certificate CRL")))
+ # (obj.pkcs10_request, "PKCS #10 request")
+ if isinstance(obj, rpki.left_right.parent_elt):
+ pprint(((obj.bpki_cert, "BPKI certificate"),
+ (obj.bpki_glue, "BPKI glue")))
+ if isinstance(obj, (rpki.left_right.child_elt, rpki.left_right.repository_elt)):
+ pprint(((obj.bpki_cert, "BPKI certificate"),
+ (obj.bpki_glue, "BPKI glue")))
def pp_tester(elt_in, elt_out, msg):
- assert isinstance(msg, rpki.publication.msg)
- for obj in msg:
- if isinstance(obj, rpki.publication.client_elt):
- pprint(((obj.bpki_cert, "BPKI cert"),
- (obj.bpki_glue, "BPKI glue")))
- if isinstance(obj, rpki.publication.certificate_elt):
- pprint(((obj.payload, "RPKI cert"),))
- if isinstance(obj, rpki.publication.crl_elt):
- pprint(((obj.payload, "RPKI CRL"),))
- if isinstance(obj, rpki.publication.manifest_elt):
- pprint(((obj.payload, "RPKI manifest"),))
- if isinstance(obj, rpki.publication.roa_elt):
- pprint(((obj.payload, "ROA"),))
+ assert isinstance(msg, rpki.publication.msg)
+ for obj in msg:
+ if isinstance(obj, rpki.publication.publish_elt):
+ pprint(((obj.payload, "Publish object"),))
+ if isinstance(obj, rpki.publication.withdraw_elt):
+ pprint(((None, "Withdraw object"),))
+
+def pc_tester(elt_in, elt_out, msg):
+ assert isinstance(msg, rpki.publication_control.msg)
+ for obj in msg:
+ if isinstance(obj, rpki.publication_control.client_elt):
+ pprint(((obj.bpki_cert, "BPKI cert"),
+ (obj.bpki_glue, "BPKI glue")))
test(fileglob = "up-down-protocol-samples/*.xml",
rng = rpki.relaxng.up_down,
- sax_handler = rpki.up_down.sax_handler,
+ parser = rpki.up_down.msg,
encoding = "utf-8",
tester = ud_tester)
test(fileglob = "left-right-protocol-samples/*.xml",
rng = rpki.relaxng.left_right,
- sax_handler = rpki.left_right.sax_handler,
+ parser = rpki.left_right.msg,
encoding = "us-ascii",
tester = lr_tester)
test(fileglob = "publication-protocol-samples/*.xml",
rng = rpki.relaxng.publication,
- sax_handler = rpki.publication.sax_handler,
+ parser = rpki.publication.msg,
encoding = "us-ascii",
tester = pp_tester)
+
+test(fileglob = "publication-control-protocol-samples/*.xml",
+ rng = rpki.relaxng.publication_control,
+ parser = rpki.publication_control.msg,
+ encoding = "us-ascii",
+ tester = pc_tester)
diff --git a/ca/tests/yamlconf.py b/ca/tests/yamlconf.py
index f1073c92..db368320 100644
--- a/ca/tests/yamlconf.py
+++ b/ca/tests/yamlconf.py
@@ -75,786 +75,783 @@ config_overrides = {
"pubd_sql_username" : "pubd", "pubd_sql_password" : "fnord" }
def cleanpath(*names):
- return os.path.normpath(os.path.join(*names))
+ return os.path.normpath(os.path.join(*names))
this_dir = os.getcwd()
test_dir = None
rpki_conf = None
class roa_request(object):
- """
- Representation of a ROA request.
- """
-
- def __init__(self, asn, ipv4, ipv6):
- self.asn = asn
- self.v4 = rpki.resource_set.roa_prefix_set_ipv4("".join(ipv4.split())) if ipv4 else None
- self.v6 = rpki.resource_set.roa_prefix_set_ipv6("".join(ipv6.split())) if ipv6 else None
-
- def __eq__(self, other):
- return self.asn == other.asn and self.v4 == other.v4 and self.v6 == other.v6
-
- def __hash__(self):
- v4 = tuple(self.v4) if self.v4 is not None else None
- v6 = tuple(self.v6) if self.v6 is not None else None
- return self.asn.__hash__() + v4.__hash__() + v6.__hash__()
-
- def __str__(self):
- if self.v4 and self.v6:
- return "%s: %s,%s" % (self.asn, self.v4, self.v6)
- else:
- return "%s: %s" % (self.asn, self.v4 or self.v6)
+ """
+ Representation of a ROA request.
+ """
- @classmethod
- def parse(cls, y):
- return cls(y.get("asn"), y.get("ipv4"), y.get("ipv6"))
+ def __init__(self, asn, ipv4, ipv6):
+ self.asn = asn
+ self.v4 = rpki.resource_set.roa_prefix_set_ipv4("".join(ipv4.split())) if ipv4 else None
+ self.v6 = rpki.resource_set.roa_prefix_set_ipv6("".join(ipv6.split())) if ipv6 else None
-class router_cert(object):
- """
- Representation for a router_cert object.
- """
+ def __eq__(self, other):
+ return self.asn == other.asn and self.v4 == other.v4 and self.v6 == other.v6
- _ecparams = None
+ def __hash__(self):
+ v4 = tuple(self.v4) if self.v4 is not None else None
+ v6 = tuple(self.v6) if self.v6 is not None else None
+ return self.asn.__hash__() + v4.__hash__() + v6.__hash__()
- @classmethod
- def ecparams(cls):
- if cls._ecparams is None:
- cls._ecparams = rpki.x509.KeyParams.generateEC()
- return cls._ecparams
+ def __str__(self):
+ if self.v4 and self.v6:
+ return "%s: %s,%s" % (self.asn, self.v4, self.v6)
+ else:
+ return "%s: %s" % (self.asn, self.v4 or self.v6)
- def __init__(self, asn, router_id):
- self.asn = rpki.resource_set.resource_set_as("".join(str(asn).split()))
- self.router_id = router_id
- self.keypair = rpki.x509.ECDSA.generate(self.ecparams())
- self.pkcs10 = rpki.x509.PKCS10.create(keypair = self.keypair)
- self.gski = self.pkcs10.gSKI()
+ @classmethod
+ def parse(cls, y):
+ return cls(y.get("asn"), y.get("ipv4"), y.get("ipv6"))
- def __eq__(self, other):
- return self.asn == other.asn and self.router_id == other.router_id and self.gski == other.gski
+class router_cert(object):
+ """
+ Representation for a router_cert object.
+ """
- def __hash__(self):
- return tuple(self.asn).__hash__() + self.router_id.__hash__() + self.gski.__hash__()
+ _ecparams = None
- def __str__(self):
- return "%s: %s: %s" % (self.asn, self.router_id, self.gski)
+ @classmethod
+ def ecparams(cls):
+ if cls._ecparams is None:
+ cls._ecparams = rpki.x509.KeyParams.generateEC()
+ return cls._ecparams
- @classmethod
- def parse(cls, yaml):
- return cls(yaml.get("asn"), yaml.get("router_id"))
+ def __init__(self, asn, router_id):
+ self.asn = rpki.resource_set.resource_set_as("".join(str(asn).split()))
+ self.router_id = router_id
+ self.keypair = rpki.x509.ECDSA.generate(params = self.ecparams(), quiet = True)
+ self.pkcs10 = rpki.x509.PKCS10.create(keypair = self.keypair)
+ self.gski = self.pkcs10.gSKI()
+ def __eq__(self, other):
+ return self.asn == other.asn and self.router_id == other.router_id and self.gski == other.gski
-class allocation_db(list):
- """
- Allocation database.
- """
-
- def __init__(self, y):
- list.__init__(self)
- self.root = allocation(y, self)
- assert self.root.is_root
- if self.root.crl_interval is None:
- self.root.crl_interval = 60 * 60
- if self.root.regen_margin is None:
- self.root.regen_margin = 24 * 60 * 60
- if self.root.base.valid_until is None:
- self.root.base.valid_until = rpki.sundial.now() + rpki.sundial.timedelta(days = 2)
- for a in self:
- if a.base.valid_until is None:
- a.base.valid_until = a.parent.base.valid_until
- if a.crl_interval is None:
- a.crl_interval = a.parent.crl_interval
- if a.regen_margin is None:
- a.regen_margin = a.parent.regen_margin
- self.root.closure()
- self.map = dict((a.name, a) for a in self)
- for a in self:
- if a.is_hosted:
- a.hosted_by = self.map[a.hosted_by]
- a.hosted_by.hosts.append(a)
- assert not a.is_root and not a.hosted_by.is_hosted
-
- def dump(self):
- for a in self:
- a.dump()
+ def __hash__(self):
+ return tuple(self.asn).__hash__() + self.router_id.__hash__() + self.gski.__hash__()
+ def __str__(self):
+ return "%s: %s: %s" % (self.asn, self.router_id, self.gski)
-class allocation(object):
- """
- One entity in our allocation database. Every entity in the database
- is assumed to hold resources. Entities that don't have the
- hosted_by property run their own copies of rpkid, irdbd, and pubd.
- """
-
- base_port = 4400
- base_engine = -1
- parent = None
- crl_interval = None
- regen_margin = None
- engine = -1
- rpkid_port = 4404
- irdbd_port = 4403
- pubd_port = 4402
- rootd_port = 4401
- rsync_port = 873
-
- @classmethod
- def allocate_port(cls):
- cls.base_port += 1
- return cls.base_port
-
- @classmethod
- def allocate_engine(cls):
- cls.base_engine += 1
- return cls.base_engine
-
- def __init__(self, y, db, parent = None):
- db.append(self)
- self.name = y["name"]
- self.parent = parent
- self.kids = [allocation(k, db, self) for k in y.get("kids", ())]
- valid_until = None
- if "valid_until" in y:
- valid_until = rpki.sundial.datetime.from_datetime(y.get("valid_until"))
- if valid_until is None and "valid_for" in y:
- valid_until = rpki.sundial.now() + rpki.sundial.timedelta.parse(y["valid_for"])
- self.base = rpki.resource_set.resource_bag(
- asn = rpki.resource_set.resource_set_as(y.get("asn")),
- v4 = rpki.resource_set.resource_set_ipv4(y.get("ipv4")),
- v6 = rpki.resource_set.resource_set_ipv6(y.get("ipv6")),
- valid_until = valid_until)
- if "crl_interval" in y:
- self.crl_interval = rpki.sundial.timedelta.parse(y["crl_interval"]).convert_to_seconds()
- if "regen_margin" in y:
- self.regen_margin = rpki.sundial.timedelta.parse(y["regen_margin"]).convert_to_seconds()
- if "ghostbusters" in y:
- self.ghostbusters = y.get("ghostbusters")
- elif "ghostbuster" in y:
- self.ghostbusters = [y.get("ghostbuster")]
- else:
- self.ghostbusters = []
- self.roa_requests = [roa_request.parse(r) for r in y.get("roa_request", ())]
- self.router_certs = [router_cert.parse(r) for r in y.get("router_cert", ())]
- for r in self.roa_requests:
- if r.v4:
- self.base.v4 |= r.v4.to_resource_set()
- if r.v6:
- self.base.v6 |= r.v6.to_resource_set()
- for r in self.router_certs:
- self.base.asn |= r.asn
- self.hosted_by = y.get("hosted_by")
- self.hosts = []
- if not self.is_hosted:
- self.engine = self.allocate_engine()
- if loopback and not self.is_hosted:
- self.rpkid_port = self.allocate_port()
- self.irdbd_port = self.allocate_port()
- if loopback and self.runs_pubd:
- self.pubd_port = self.allocate_port()
- self.rsync_port = self.allocate_port()
- if loopback and self.is_root:
- self.rootd_port = self.allocate_port()
-
- def closure(self):
- resources = self.base
- for kid in self.kids:
- resources |= kid.closure()
- self.resources = resources
- return resources
-
- @property
- def hostname(self):
- if loopback:
- return "localhost"
- elif dns_suffix:
- return self.name + "." + dns_suffix.lstrip(".")
- else:
- return self.name
+ @classmethod
+ def parse(cls, yaml):
+ return cls(yaml.get("asn"), yaml.get("router_id"))
- @property
- def rsync_server(self):
- if loopback:
- return "%s:%s" % (self.pubd.hostname, self.pubd.rsync_port)
- else:
- return self.pubd.hostname
- def dump(self):
- if not quiet:
- print str(self)
-
- def __str__(self):
- s = self.name + ":\n"
- if self.resources.asn: s += " ASNs: %s\n" % self.resources.asn
- if self.resources.v4: s += " IPv4: %s\n" % self.resources.v4
- if self.resources.v6: s += " IPv6: %s\n" % self.resources.v6
- if self.kids: s += " Kids: %s\n" % ", ".join(k.name for k in self.kids)
- if self.parent: s += " Up: %s\n" % self.parent.name
- if self.is_hosted: s += " Host: %s\n" % self.hosted_by.name
- if self.hosts: s += " Hosts: %s\n" % ", ".join(h.name for h in self.hosts)
- for r in self.roa_requests: s += " ROA: %s\n" % r
- if not self.is_hosted: s += " IPort: %s\n" % self.irdbd_port
- if self.runs_pubd: s += " PPort: %s\n" % self.pubd_port
- if not self.is_hosted: s += " RPort: %s\n" % self.rpkid_port
- if self.runs_pubd: s += " SPort: %s\n" % self.rsync_port
- if self.is_root: s += " TPort: %s\n" % self.rootd_port
- return s + " Until: %s\n" % self.resources.valid_until
-
- @property
- def is_root(self):
- return self.parent is None
-
- @property
- def is_hosted(self):
- return self.hosted_by is not None
-
- @property
- def runs_pubd(self):
- return self.is_root or not (self.is_hosted or only_one_pubd)
-
- def path(self, *names):
- return cleanpath(test_dir, self.host.name, *names)
-
- def csvout(self, fn):
- path = self.path(fn)
- if not quiet:
- print "Writing", path
- return rpki.csv_utils.csv_writer(path)
-
- def up_down_url(self):
- return "http://%s:%d/up-down/%s/%s" % (self.parent.host.hostname,
- self.parent.host.rpkid_port,
- self.parent.name,
- self.name)
-
- def dump_asns(self, fn):
- with self.csvout(fn) as f:
- for k in self.kids:
- f.writerows((k.name, a) for a in k.resources.asn)
-
- def dump_prefixes(self, fn):
- with self.csvout(fn) as f:
- for k in self.kids:
- f.writerows((k.name, p) for p in (k.resources.v4 + k.resources.v6))
-
- def dump_roas(self, fn):
- with self.csvout(fn) as f:
- for g1, r in enumerate(self.roa_requests):
- f.writerows((p, r.asn, "G%08d%08d" % (g1, g2))
- for g2, p in enumerate((r.v4 + r.v6 if r.v4 and r.v6 else r.v4 or r.v6 or ())))
-
- def dump_ghostbusters(self, fn):
- if self.ghostbusters:
- path = self.path(fn)
- if not quiet:
- print "Writing", path
- with open(path, "w") as f:
- for i, g in enumerate(self.ghostbusters):
- if i > 0:
- f.write("\n")
- f.write(g)
-
- def dump_router_certificates(self, fn):
- if self.router_certs:
- path = self.path(fn)
- if not quiet:
- print "Writing", path
- xmlns = rpki.relaxng.router_certificate.xmlns
- xml = lxml.etree.Element(xmlns + "router_certificate_requests",
- version = rpki.relaxng.router_certificate.version,
- nsmap = rpki.relaxng.router_certificate.nsmap)
- for r in self.router_certs:
- x = lxml.etree.SubElement(xml, xmlns + "router_certificate_request",
- router_id = str(r.router_id),
- asn = str(r.asn),
- valid_until = str(self.resources.valid_until))
- x.text = r.pkcs10.get_Base64()
- rpki.relaxng.router_certificate.assertValid(xml)
- lxml.etree.ElementTree(xml).write(path, pretty_print = True)
-
- @property
- def pubd(self):
- s = self
- while not s.runs_pubd:
- s = s.parent
- return s
-
- @property
- def client_handle(self):
- path = []
- s = self
- if not flat_publication:
- while not s.runs_pubd:
- path.append(s)
- s = s.parent
- path.append(s)
- return ".".join(i.name for i in reversed(path))
-
- @property
- def host(self):
- return self.hosted_by or self
-
- @property
- def publication_base_directory(self):
- if not loopback and publication_base is not None:
- return publication_base
- else:
- return self.path("publication")
+class allocation_db(list):
+ """
+ Allocation database.
+ """
+
+ def __init__(self, y):
+ list.__init__(self)
+ self.root = allocation(y, self)
+ assert self.root.is_root
+ if self.root.crl_interval is None:
+ self.root.crl_interval = 60 * 60
+ if self.root.regen_margin is None:
+ self.root.regen_margin = 24 * 60 * 60
+ if self.root.base.valid_until is None:
+ self.root.base.valid_until = rpki.sundial.now() + rpki.sundial.timedelta(days = 2)
+ for a in self:
+ if a.base.valid_until is None:
+ a.base.valid_until = a.parent.base.valid_until
+ if a.crl_interval is None:
+ a.crl_interval = a.parent.crl_interval
+ if a.regen_margin is None:
+ a.regen_margin = a.parent.regen_margin
+ self.root.closure()
+ self.map = dict((a.name, a) for a in self)
+ for a in self:
+ if a.is_hosted:
+ a.hosted_by = self.map[a.hosted_by]
+ a.hosted_by.hosts.append(a)
+ assert not a.is_root and not a.hosted_by.is_hosted
+
+ def dump(self):
+ for a in self:
+ a.dump()
- @property
- def publication_root_directory(self):
- if not loopback and publication_root is not None:
- return publication_root
- else:
- return self.path("publication.root")
-
- def dump_conf(self):
-
- r = dict(
- handle = self.name,
- run_rpkid = str(not self.is_hosted),
- run_pubd = str(self.runs_pubd),
- run_rootd = str(self.is_root),
- irdbd_sql_username = "irdb",
- rpkid_sql_username = "rpki",
- rpkid_server_host = self.hostname,
- rpkid_server_port = str(self.rpkid_port),
- irdbd_server_host = "localhost",
- irdbd_server_port = str(self.irdbd_port),
- rootd_server_port = str(self.rootd_port),
- pubd_sql_username = "pubd",
- pubd_server_host = self.pubd.hostname,
- pubd_server_port = str(self.pubd.pubd_port),
- publication_rsync_server = self.rsync_server)
-
- if loopback:
- r.update(
- irdbd_sql_database = self.irdb_name,
- rpkid_sql_database = "rpki%d" % self.engine,
- pubd_sql_database = "pubd%d" % self.engine,
- bpki_servers_directory = self.path(),
- publication_base_directory = self.publication_base_directory)
-
- r.update(config_overrides)
-
- with open(self.path("rpki.conf"), "w") as f:
- f.write("# Automatically generated, do not edit\n")
- if not quiet:
- print "Writing", f.name
-
- section = None
- for line in open(rpki_conf):
- m = section_regexp.match(line)
- if m:
- section = m.group(1)
- m = variable_regexp.match(line)
- option = m.group(1) if m and section == "myrpki" else None
- if option and option in r:
- line = "%s = %s\n" % (option, r[option])
- f.write(line)
-
- def dump_rsyncd(self):
- lines = []
- if self.runs_pubd:
- lines.extend((
- "# Automatically generated, do not edit",
- "port = %d" % self.rsync_port,
- "address = %s" % self.hostname,
- "log file = rsyncd.log",
- "read only = yes",
- "use chroot = no",
- "[rpki]",
- "path = %s" % self.publication_base_directory,
- "comment = RPKI test"))
- if self.is_root:
- assert self.runs_pubd
- lines.extend((
- "[root]",
- "path = %s" % self.publication_root_directory,
- "comment = RPKI test root"))
- if lines:
- with open(self.path("rsyncd.conf"), "w") as f:
- if not quiet:
- print "Writing", f.name
- f.writelines(line + "\n" for line in lines)
-
- @property
- def irdb_name(self):
- return "irdb%d" % self.host.engine
-
- @property
- def irdb(self):
- prior_name = self.zoo.handle
- return rpki.irdb.database(
- self.irdb_name,
- on_entry = lambda: self.zoo.reset_identity(self.name),
- on_exit = lambda: self.zoo.reset_identity(prior_name))
-
- def syncdb(self):
- import django.core.management
- assert not self.is_hosted
- django.core.management.call_command("syncdb",
- database = self.irdb_name,
- load_initial_data = False,
- interactive = False,
- verbosity = 0)
-
- def hire_zookeeper(self):
- assert not self.is_hosted
- self._zoo = rpki.irdb.Zookeeper(
- cfg = rpki.config.parser(self.path("rpki.conf")),
- logstream = None if quiet else sys.stdout)
-
- @property
- def zoo(self):
- return self.host._zoo
-
- def dump_root(self):
-
- assert self.is_root and not self.is_hosted
-
- root_resources = rpki.resource_set.resource_bag(
- asn = rpki.resource_set.resource_set_as("0-4294967295"),
- v4 = rpki.resource_set.resource_set_ipv4("0.0.0.0/0"),
- v6 = rpki.resource_set.resource_set_ipv6("::/0"))
-
- root_key = rpki.x509.RSA.generate(quiet = True)
-
- root_uri = "rsync://%s/rpki/" % self.rsync_server
-
- root_sia = (root_uri, root_uri + "root.mft", None)
-
- root_cert = rpki.x509.X509.self_certify(
- keypair = root_key,
- subject_key = root_key.get_public(),
- serial = 1,
- sia = root_sia,
- notAfter = rpki.sundial.now() + rpki.sundial.timedelta(days = 365),
- resources = root_resources)
-
- with open(self.path("publication.root", "root.cer"), "wb") as f:
- f.write(root_cert.get_DER())
-
- with open(self.path("root.key"), "wb") as f:
- f.write(root_key.get_DER())
-
- with open(cleanpath(test_dir, "root.tal"), "w") as f:
- f.write("rsync://%s/root/root.cer\n\n%s" % (
- self.rsync_server, root_key.get_public().get_Base64()))
-
- def mkdir(self, *path):
- path = self.path(*path)
- if not quiet:
- print "Creating directory", path
- os.makedirs(path)
- def dump_sql(self):
- if not self.is_hosted:
- with open(self.path("rpkid.sql"), "w") as f:
+class allocation(object):
+ """
+ One entity in our allocation database. Every entity in the database
+ is assumed to hold resources. Entities that don't have the
+ hosted_by property run their own copies of rpkid, irdbd, and pubd.
+ """
+
+ base_port = 4400
+ base_engine = -1
+ parent = None
+ crl_interval = None
+ regen_margin = None
+ engine = -1
+ rpkid_port = 4404
+ irdbd_port = 4403
+ pubd_port = 4402
+ rsync_port = 873
+
+ @classmethod
+ def allocate_port(cls):
+ cls.base_port += 1
+ return cls.base_port
+
+ @classmethod
+ def allocate_engine(cls):
+ cls.base_engine += 1
+ return cls.base_engine
+
+ def __init__(self, y, db, parent = None):
+ db.append(self)
+ self.name = y["name"]
+ self.parent = parent
+ self.kids = [allocation(k, db, self) for k in y.get("kids", ())]
+ valid_until = None
+ if "valid_until" in y:
+ valid_until = rpki.sundial.datetime.from_datetime(y.get("valid_until"))
+ if valid_until is None and "valid_for" in y:
+ valid_until = rpki.sundial.now() + rpki.sundial.timedelta.parse(y["valid_for"])
+ self.base = rpki.resource_set.resource_bag(
+ asn = str(y.get("asn", "")),
+ v4 = y.get("ipv4"),
+ v6 = y.get("ipv6"),
+ valid_until = valid_until)
+ if "crl_interval" in y:
+ self.crl_interval = rpki.sundial.timedelta.parse(y["crl_interval"]).convert_to_seconds()
+ if "regen_margin" in y:
+ self.regen_margin = rpki.sundial.timedelta.parse(y["regen_margin"]).convert_to_seconds()
+ if "ghostbusters" in y:
+ self.ghostbusters = y.get("ghostbusters")
+ elif "ghostbuster" in y:
+ self.ghostbusters = [y.get("ghostbuster")]
+ else:
+ self.ghostbusters = []
+ self.roa_requests = [roa_request.parse(r) for r in y.get("roa_request", ())]
+ self.router_certs = [router_cert.parse(r) for r in y.get("router_cert", ())]
+ for r in self.roa_requests:
+ if r.v4:
+ self.base.v4 |= r.v4.to_resource_set()
+ if r.v6:
+ self.base.v6 |= r.v6.to_resource_set()
+ for r in self.router_certs:
+ self.base.asn |= r.asn
+ self.hosted_by = y.get("hosted_by")
+ self.hosts = []
+ if not self.is_hosted:
+ self.engine = self.allocate_engine()
+ if loopback and not self.is_hosted:
+ self.rpkid_port = self.allocate_port()
+ self.irdbd_port = self.allocate_port()
+ if loopback and self.runs_pubd:
+ self.pubd_port = self.allocate_port()
+ self.rsync_port = self.allocate_port()
+
+ def closure(self):
+ resources = self.base
+ for kid in self.kids:
+ resources |= kid.closure()
+ self.resources = resources
+ return resources
+
+ @property
+ def hostname(self):
+ if loopback:
+ return "localhost"
+ elif dns_suffix:
+ return self.name + "." + dns_suffix.lstrip(".")
+ else:
+ return self.name
+
+ @property
+ def rsync_server(self):
+ if loopback:
+ return "%s:%s" % (self.pubd.hostname, self.pubd.rsync_port)
+ else:
+ return self.pubd.hostname
+
+ def dump(self):
if not quiet:
- print "Writing", f.name
- f.write(rpki.sql_schemas.rpkid)
- if self.runs_pubd:
- with open(self.path("pubd.sql"), "w") as f:
+ print str(self)
+
+ def __str__(self):
+ s = self.name + ":\n"
+ if self.resources.asn: s += " ASNs: %s\n" % self.resources.asn
+ if self.resources.v4: s += " IPv4: %s\n" % self.resources.v4
+ if self.resources.v6: s += " IPv6: %s\n" % self.resources.v6
+ if self.kids: s += " Kids: %s\n" % ", ".join(k.name for k in self.kids)
+ if self.parent: s += " Up: %s\n" % self.parent.name
+ if self.is_hosted: s += " Host: %s\n" % self.hosted_by.name
+ if self.hosts: s += " Hosts: %s\n" % ", ".join(h.name for h in self.hosts)
+ for r in self.roa_requests: s += " ROA: %s\n" % r
+ if not self.is_hosted: s += " IPort: %s\n" % self.irdbd_port
+ if self.runs_pubd: s += " PPort: %s\n" % self.pubd_port
+ if not self.is_hosted: s += " RPort: %s\n" % self.rpkid_port
+ if self.runs_pubd: s += " SPort: %s\n" % self.rsync_port
+ return s + " Until: %s\n" % self.resources.valid_until
+
+ @property
+ def is_root(self):
+ return self.parent is None
+
+ @property
+ def is_hosted(self):
+ return self.hosted_by is not None
+
+ @property
+ def runs_pubd(self):
+ return self.is_root or not (self.is_hosted or only_one_pubd)
+
+ def path(self, *names):
+ return cleanpath(test_dir, self.host.name, *names)
+
+ def csvout(self, fn):
+ path = self.path(fn)
if not quiet:
- print "Writing", f.name
- f.write(rpki.sql_schemas.pubd)
- if not self.is_hosted:
- username = config_overrides["irdbd_sql_username"]
- password = config_overrides["irdbd_sql_password"]
- cmd = ("mysqldump", "-u", username, "-p" + password, self.irdb_name)
- with open(self.path("irdbd.sql"), "w") as f:
+ print "Writing", path
+ return rpki.csv_utils.csv_writer(path)
+
+ def up_down_url(self):
+ return "http://%s:%d/up-down/%s/%s" % (self.parent.host.hostname,
+ self.parent.host.rpkid_port,
+ self.parent.name,
+ self.name)
+
+ def dump_asns(self, fn):
+ with self.csvout(fn) as f:
+ for k in self.kids:
+ f.writerows((k.name, a) for a in k.resources.asn)
+
+ def dump_prefixes(self, fn):
+ with self.csvout(fn) as f:
+ for k in self.kids:
+ f.writerows((k.name, p) for p in (k.resources.v4 + k.resources.v6))
+
+ def dump_roas(self, fn):
+ with self.csvout(fn) as f:
+ for g1, r in enumerate(self.roa_requests):
+ f.writerows((p, r.asn, "G%08d%08d" % (g1, g2))
+ for g2, p in enumerate((r.v4 + r.v6 if r.v4 and r.v6 else r.v4 or r.v6 or ())))
+
+ def dump_ghostbusters(self, fn):
+ if self.ghostbusters:
+ path = self.path(fn)
+ if not quiet:
+ print "Writing", path
+ with open(path, "w") as f:
+ for i, g in enumerate(self.ghostbusters):
+ if i > 0:
+ f.write("\n")
+ f.write(g)
+
+ def dump_router_certificates(self, fn):
+ if self.router_certs:
+ path = self.path(fn)
+ if not quiet:
+ print "Writing", path
+ xmlns = rpki.relaxng.router_certificate.xmlns
+ xml = lxml.etree.Element(xmlns + "router_certificate_requests",
+ version = rpki.relaxng.router_certificate.version,
+ nsmap = rpki.relaxng.router_certificate.nsmap)
+ for r in self.router_certs:
+ x = lxml.etree.SubElement(xml, xmlns + "router_certificate_request",
+ router_id = str(r.router_id),
+ asn = str(r.asn),
+ valid_until = str(self.resources.valid_until))
+ x.text = r.pkcs10.get_Base64()
+ rpki.relaxng.router_certificate.assertValid(xml)
+ lxml.etree.ElementTree(xml).write(path, pretty_print = True)
+
+ @property
+ def pubd(self):
+ s = self
+ while not s.runs_pubd:
+ s = s.parent
+ return s
+
+ @property
+ def client_handle(self):
+ path = []
+ s = self
+ if not flat_publication:
+ while not s.runs_pubd:
+ path.append(s)
+ s = s.parent
+ path.append(s)
+ return ".".join(i.name for i in reversed(path))
+
+ @property
+ def host(self):
+ return self.hosted_by or self
+
+ @property
+ def publication_base_directory(self):
+ if not loopback and publication_base is not None:
+ return publication_base
+ else:
+ return self.path("publication")
+
+ @property
+ def publication_root_directory(self):
+ if not loopback and publication_root is not None:
+ return publication_root
+ else:
+ return self.path("publication.root")
+
+ def dump_conf(self):
+
+ r = dict(
+ handle = self.name,
+ run_rpkid = str(not self.is_hosted),
+ run_pubd = str(self.runs_pubd),
+ irdbd_sql_username = "irdb",
+ rpkid_sql_username = "rpki",
+ rpkid_server_host = self.hostname,
+ rpkid_server_port = str(self.rpkid_port),
+ irdbd_server_host = "localhost",
+ irdbd_server_port = str(self.irdbd_port),
+ pubd_sql_username = "pubd",
+ pubd_server_host = self.pubd.hostname,
+ pubd_server_port = str(self.pubd.pubd_port),
+ publication_rsync_server = self.rsync_server)
+
+ if loopback:
+ r.update(
+ irdbd_sql_database = self.irdb_name,
+ rpkid_sql_database = "rpki%d" % self.engine,
+ pubd_sql_database = "pubd%d" % self.engine,
+ bpki_servers_directory = self.path(),
+ publication_base_directory = self.publication_base_directory)
+
+ r.update(config_overrides)
+
+ with open(self.path("rpki.conf"), "w") as f:
+ f.write("# Automatically generated, do not edit\n")
+ if not quiet:
+ print "Writing", f.name
+
+ section = None
+ for line in open(rpki_conf):
+ m = section_regexp.match(line)
+ if m:
+ section = m.group(1)
+ m = variable_regexp.match(line)
+ option = m.group(1) if m and section == "myrpki" else None
+ if option and option in r:
+ line = "%s = %s\n" % (option, r[option])
+ f.write(line)
+
+ def dump_rsyncd(self):
+ lines = []
+ if self.runs_pubd:
+ lines.extend((
+ "# Automatically generated, do not edit",
+ "port = %d" % self.rsync_port,
+ "address = %s" % self.hostname,
+ "log file = rsyncd.log",
+ "read only = yes",
+ "use chroot = no",
+ "[rpki]",
+ "path = %s" % self.publication_base_directory,
+ "comment = RPKI test"))
+ if self.is_root:
+ assert self.runs_pubd
+ lines.extend((
+ "[root]",
+ "path = %s" % self.publication_root_directory,
+ "comment = RPKI test root"))
+ if lines:
+ with open(self.path("rsyncd.conf"), "w") as f:
+ if not quiet:
+ print "Writing", f.name
+ f.writelines(line + "\n" for line in lines)
+
+ @property
+ def irdb_name(self):
+ return "irdb%d" % self.host.engine
+
+ @property
+ def irdb(self):
+ prior_name = self.zoo.handle
+ return rpki.irdb.database(
+ self.irdb_name,
+ on_entry = lambda: self.zoo.reset_identity(self.name),
+ on_exit = lambda: self.zoo.reset_identity(prior_name))
+
+ def syncdb(self):
+ import django.core.management
+ assert not self.is_hosted
+ django.core.management.call_command(
+ "syncdb",
+ verbosity = 0,
+ database = self.irdb_name,
+ migrate = True,
+ load_initial_data = False,
+ interactive = False)
+
+ def hire_zookeeper(self):
+ assert not self.is_hosted
+ self._zoo = rpki.irdb.Zookeeper(
+ cfg = rpki.config.parser(filename = self.path("rpki.conf")),
+ logstream = None if quiet else sys.stdout)
+
+ @property
+ def zoo(self):
+ return self.host._zoo
+
+ def dump_root(self):
+
+ assert self.is_root and not self.is_hosted
+
+ root_resources = rpki.resource_set.resource_bag(
+ asn = "0-4294967295",
+ v4 = "0.0.0.0/0",
+ v6 = "::/0")
+
+ root_key = rpki.x509.RSA.generate(quiet = True)
+
+ root_uri = "rsync://%s/rpki/" % self.rsync_server
+
+ root_sia = (root_uri, root_uri + "root.mft", None, rpki.publication.rrdp_sia_uri_kludge)
+
+ root_cert = rpki.x509.X509.self_certify(
+ keypair = root_key,
+ subject_key = root_key.get_public(),
+ serial = 1,
+ sia = root_sia,
+ notAfter = rpki.sundial.now() + rpki.sundial.timedelta(days = 365),
+ resources = root_resources)
+
+ with open(self.path("root.cer"), "wb") as f:
+ f.write(root_cert.get_DER())
+
+ with open(self.path("root.key"), "wb") as f:
+ f.write(root_key.get_DER())
+
+ with open(cleanpath(test_dir, "root.tal"), "w") as f:
+ f.write("rsync://%s/root/root.cer\n\n" % self.rsync_server)
+ f.write(root_key.get_public().get_Base64())
+
+ def mkdir(self, *path):
+ path = self.path(*path)
if not quiet:
- print "Writing", f.name
- subprocess.check_call(cmd, stdout = f)
+ print "Creating directory", path
+ os.makedirs(path)
+
+ def dump_sql(self):
+ if not self.is_hosted:
+ with open(self.path("rpkid.sql"), "w") as f:
+ if not quiet:
+ print "Writing", f.name
+ f.write(rpki.sql_schemas.rpkid)
+ if self.runs_pubd:
+ with open(self.path("pubd.sql"), "w") as f:
+ if not quiet:
+ print "Writing", f.name
+ f.write(rpki.sql_schemas.pubd)
+ if not self.is_hosted:
+ username = config_overrides["irdbd_sql_username"]
+ password = config_overrides["irdbd_sql_password"]
+ cmd = ("mysqldump", "-u", username, "-p" + password, self.irdb_name)
+ with open(self.path("irdbd.sql"), "w") as f:
+ if not quiet:
+ print "Writing", f.name
+ subprocess.check_call(cmd, stdout = f)
def pre_django_sql_setup(needed):
- username = config_overrides["irdbd_sql_username"]
- password = config_overrides["irdbd_sql_password"]
-
- # If we have the MySQL root password, just blow away and recreate
- # the required databases. Otherwise, check for missing databases,
- # then blow away all tables in the required databases. In either
- # case, we assume that the Django syncdb code will populate
- # databases as necessary, all we need to do here is provide empty
- # databases for the Django code to fill in.
+ username = config_overrides["irdbd_sql_username"]
+ password = config_overrides["irdbd_sql_password"]
+
+ # If we have the MySQL root password, just blow away and recreate
+ # the required databases. Otherwise, check for missing databases,
+ # then blow away all tables in the required databases. In either
+ # case, we assume that the Django syncdb code will populate
+ # databases as necessary, all we need to do here is provide empty
+ # databases for the Django code to fill in.
+
+ if mysql_rootpass is not None:
+ if mysql_rootpass:
+ db = MySQLdb.connect(user = mysql_rootuser, passwd = mysql_rootpass)
+ else:
+ db = MySQLdb.connect(user = mysql_rootuser)
+ cur = db.cursor()
+ for database in needed:
+ try:
+ cur.execute("DROP DATABASE IF EXISTS %s" % database)
+ except:
+ pass
+ cur.execute("CREATE DATABASE %s" % database)
+ cur.execute("GRANT ALL ON %s.* TO %s@localhost IDENTIFIED BY %%s" % (
+ database, username), (password,))
- if mysql_rootpass is not None:
- if mysql_rootpass:
- db = MySQLdb.connect(user = mysql_rootuser, passwd = mysql_rootpass)
else:
- db = MySQLdb.connect(user = mysql_rootuser)
- cur = db.cursor()
- for database in needed:
- try:
- cur.execute("DROP DATABASE IF EXISTS %s" % database)
- except:
- pass
- cur.execute("CREATE DATABASE %s" % database)
- cur.execute("GRANT ALL ON %s.* TO %s@localhost IDENTIFIED BY %%s" % (
- database, username), (password,))
-
- else:
- db = MySQLdb.connect(user = username, passwd = password)
- cur = db.cursor()
- cur.execute("SHOW DATABASES")
- existing = set(r[0] for r in cur.fetchall())
- if needed - existing:
- sys.stderr.write("The following databases are missing:\n")
- for database in sorted(needed - existing):
- sys.stderr.write(" %s\n" % database)
- sys.stderr.write("Please create them manually or put MySQL root password in my config file\n")
- sys.exit("Missing databases and MySQL root password not known, can't continue")
- for database in needed:
- db.select_db(database)
- cur.execute("SHOW TABLES")
- tables = [r[0] for r in cur.fetchall()]
- cur.execute("SET foreign_key_checks = 0")
- for table in tables:
- cur.execute("DROP TABLE %s" % table)
- cur.execute("SET foreign_key_checks = 1")
-
- cur.close()
- db.commit()
- db.close()
+ db = MySQLdb.connect(user = username, passwd = password)
+ cur = db.cursor()
+ cur.execute("SHOW DATABASES")
+ existing = set(r[0] for r in cur.fetchall())
+ if needed - existing:
+ sys.stderr.write("The following databases are missing:\n")
+ for database in sorted(needed - existing):
+ sys.stderr.write(" %s\n" % database)
+ sys.stderr.write("Please create them manually or put MySQL root password in my config file\n")
+ sys.exit("Missing databases and MySQL root password not known, can't continue")
+ for database in needed:
+ db.select_db(database)
+ cur.execute("SHOW TABLES")
+ tables = [r[0] for r in cur.fetchall()]
+ cur.execute("SET foreign_key_checks = 0")
+ for table in tables:
+ cur.execute("DROP TABLE %s" % table)
+ cur.execute("SET foreign_key_checks = 1")
+
+ cur.close()
+ db.commit()
+ db.close()
class timestamp(object):
- def __init__(self, *args):
- self.count = 0
- self.start = self.tick = rpki.sundial.now()
+ def __init__(self, *args):
+ self.count = 0
+ self.start = self.tick = rpki.sundial.now()
- def __call__(self, *args):
- now = rpki.sundial.now()
- if not quiet:
- print "[Count %s last %s total %s now %s]" % (
- self.count, now - self.tick, now - self.start, now)
- self.tick = now
- self.count += 1
+ def __call__(self, *args):
+ now = rpki.sundial.now()
+ if not quiet:
+ print "[Count %s last %s total %s now %s]" % (
+ self.count, now - self.tick, now - self.start, now)
+ self.tick = now
+ self.count += 1
def main():
- global flat_publication
- global config_overrides
- global only_one_pubd
- global loopback
- global dns_suffix
- global mysql_rootuser
- global mysql_rootpass
- global yaml_file
- global test_dir
- global rpki_conf
- global publication_base
- global publication_root
- global quiet
-
- os.environ["TZ"] = "UTC"
- time.tzset()
-
- parser = argparse.ArgumentParser(description = "yamlconf")
- parser.add_argument("-c", "--config", help = "configuration file")
- parser.add_argument("--dns_suffix",
- help = "DNS suffix to add to hostnames")
- parser.add_argument("-l", "--loopback", action = "store_true",
- help = "Configure for use with yamltest on localhost")
- parser.add_argument("-f", "--flat_publication", action = "store_true",
- help = "Use flat publication model")
- parser.add_argument("-q", "--quiet", action = "store_true",
- help = "Work more quietly")
- parser.add_argument("--profile",
- help = "Filename for profile output")
- parser.add_argument("yaml_file", type = argparse.FileType("r"),
- help = "YAML file describing network to build")
- args = parser.parse_args()
-
- dns_suffix = args.dns_suffix
- loopback = args.loopback
- flat_publication = args.flat_publication
- quiet = args.quiet
- yaml_file = args.yaml_file
-
- rpki.log.init("yamlconf", argparse.Namespace(log_level = logging.DEBUG,
- log_handler = lambda: logging.StreamHandler(sys.stdout)))
-
- # Allow optional config file for this tool to override default
- # passwords: this is mostly so that I can show a complete working
- # example without publishing my own server's passwords.
-
- cfg = rpki.config.parser(args.config, "yamlconf", allow_missing = True)
- try:
- cfg.set_global_flags()
- except:
- pass
-
- # Use of "yamltest.dir" is deliberate: intent is for what we write to
- # be usable with "yamltest --skip_config".
-
- only_one_pubd = cfg.getboolean("only_one_pubd", True)
- test_dir = cfg.get("test_directory", cleanpath(this_dir, "yamltest.dir"))
- rpki_conf = cfg.get("rpki_conf", cleanpath(this_dir, "..", "examples/rpki.conf"))
- mysql_rootuser = cfg.get("mysql_rootuser", "root")
-
- try:
- mysql_rootpass = cfg.get("mysql_rootpass")
- except:
- pass
-
- try:
- publication_base = cfg.get("publication_base")
- except:
- pass
-
- try:
- publication_root = cfg.get("publication_root")
- except:
- pass
-
- for k in ("rpkid_sql_password", "irdbd_sql_password", "pubd_sql_password",
- "rpkid_sql_username", "irdbd_sql_username", "pubd_sql_username"):
- if cfg.has_option(k):
- config_overrides[k] = cfg.get(k)
-
- if args.profile:
- import cProfile
- prof = cProfile.Profile()
+ global flat_publication
+ global config_overrides
+ global only_one_pubd
+ global loopback
+ global dns_suffix
+ global mysql_rootuser
+ global mysql_rootpass
+ global yaml_file
+ global test_dir
+ global rpki_conf
+ global publication_base
+ global publication_root
+ global quiet
+
+ os.environ["TZ"] = "UTC"
+ time.tzset()
+
+ parser = argparse.ArgumentParser(description = "yamlconf")
+ parser.add_argument("-c", "--config", help = "configuration file")
+ parser.add_argument("--dns_suffix",
+ help = "DNS suffix to add to hostnames")
+ parser.add_argument("-l", "--loopback", action = "store_true",
+ help = "Configure for use with yamltest on localhost")
+ parser.add_argument("-f", "--flat_publication", action = "store_true",
+ help = "Use flat publication model")
+ parser.add_argument("-q", "--quiet", action = "store_true",
+ help = "Work more quietly")
+ parser.add_argument("--profile",
+ help = "Filename for profile output")
+ parser.add_argument("yaml_file", type = argparse.FileType("r"),
+ help = "YAML file describing network to build")
+ args = parser.parse_args()
+
+ dns_suffix = args.dns_suffix
+ loopback = args.loopback
+ flat_publication = args.flat_publication
+ quiet = args.quiet
+ yaml_file = args.yaml_file
+
+ log_handler = logging.StreamHandler(sys.stdout)
+ log_handler.setFormatter(rpki.config.Formatter("yamlconf", log_handler, logging.DEBUG))
+ logging.getLogger().addHandler(log_handler)
+ logging.getLogger().setLevel(logging.DEBUG)
+
+ # Allow optional config file for this tool to override default
+ # passwords: this is mostly so that I can show a complete working
+ # example without publishing my own server's passwords.
+
+ cfg = rpki.config.parser(set_filename = args.config, section = "yamlconf", allow_missing = True)
try:
- prof.runcall(body)
- finally:
- prof.dump_stats(args.profile)
- if not quiet:
- print
- print "Dumped profile data to %s" % args.profile
- else:
- body()
-
-def body():
+ cfg.set_global_flags()
+ except:
+ pass
- global rpki
+ # Use of "yamltest.dir" is deliberate: intent is for what we write to
+ # be usable with "yamltest --skip_config".
- ts = timestamp()
+ only_one_pubd = cfg.getboolean("only_one_pubd", True)
+ test_dir = cfg.get("test_directory", cleanpath(this_dir, "yamltest.dir"))
+ rpki_conf = cfg.get("rpki_conf", cleanpath(this_dir, "..", "examples/rpki.conf"))
+ mysql_rootuser = cfg.get("mysql_rootuser", "root")
- for root, dirs, files in os.walk(test_dir, topdown = False):
- for fn in files:
- os.unlink(os.path.join(root, fn))
- for d in dirs:
- os.rmdir(os.path.join(root, d))
+ try:
+ mysql_rootpass = cfg.get("mysql_rootpass")
+ except:
+ pass
- if not quiet:
- print
- print "Reading YAML", yaml_file.name
+ try:
+ publication_base = cfg.get("publication_base")
+ except:
+ pass
- db = allocation_db(yaml.safe_load_all(yaml_file).next())
+ try:
+ publication_root = cfg.get("publication_root")
+ except:
+ pass
- # Show what we loaded
+ for k in ("rpkid_sql_password", "irdbd_sql_password", "pubd_sql_password",
+ "rpkid_sql_username", "irdbd_sql_username", "pubd_sql_username"):
+ if cfg.has_option(k):
+ config_overrides[k] = cfg.get(k)
+
+ if args.profile:
+ import cProfile
+ prof = cProfile.Profile()
+ try:
+ prof.runcall(body)
+ finally:
+ prof.dump_stats(args.profile)
+ if not quiet:
+ print
+ print "Dumped profile data to %s" % args.profile
+ else:
+ body()
- #db.dump()
+def body():
- # Do pre-Django SQL setup
+ global rpki
- pre_django_sql_setup(set(d.irdb_name for d in db if not d.is_hosted))
+ ts = timestamp()
- # Now ready for fun with multiple databases in Django!
+ for root, dirs, files in os.walk(test_dir, topdown = False):
+ for fn in files:
+ os.unlink(os.path.join(root, fn))
+ for d in dirs:
+ os.rmdir(os.path.join(root, d))
- # https://docs.djangoproject.com/en/1.4/topics/db/multi-db/
- # https://docs.djangoproject.com/en/1.4/topics/db/sql/
+ if not quiet:
+ print
+ print "Reading YAML", yaml_file.name
- database_template = {
- "ENGINE" : "django.db.backends.mysql",
- "USER" : config_overrides["irdbd_sql_username"],
- "PASSWORD" : config_overrides["irdbd_sql_password"],
- "HOST" : "",
- "PORT" : "",
- "OPTIONS" : { "init_command": "SET storage_engine=INNODB" }}
+ db = allocation_db(yaml.safe_load_all(yaml_file).next())
- databases = dict((d.irdb_name,
- dict(database_template, NAME = d.irdb_name))
- for d in db if not d.is_hosted)
+ # Show what we loaded
- databases["default"] = databases[db.root.irdb_name]
+ #db.dump()
- import django
+ # Do pre-Django SQL setup
- from django.conf import settings
+ pre_django_sql_setup(set(d.irdb_name for d in db if not d.is_hosted))
- settings.configure(
- DATABASES = databases,
- DATABASE_ROUTERS = ["rpki.irdb.router.DBContextRouter"],
- MIDDLEWARE_CLASSES = (),
- INSTALLED_APPS = ("rpki.irdb",))
+ # Now ready for fun with multiple databases in Django!
+ #
+ # https://docs.djangoproject.com/en/1.4/topics/db/multi-db/
+ # https://docs.djangoproject.com/en/1.4/topics/db/sql/
+ #
+ # This program's use of the ORM is sufficiently different that it's
+ # not worth straining to use rpki.django_settings, so we just use
+ # Django's settings API directly.
- if django.VERSION >= (1, 7): # API change, feh
- from django.apps import apps
- apps.populate(settings.INSTALLED_APPS)
+ database_template = {
+ "ENGINE" : "django.db.backends.mysql",
+ "USER" : config_overrides["irdbd_sql_username"],
+ "PASSWORD" : config_overrides["irdbd_sql_password"],
+ "HOST" : "",
+ "PORT" : "",
+ "OPTIONS" : { "init_command": "SET storage_engine=INNODB" }}
- import rpki.irdb
+ databases = dict((d.irdb_name, dict(database_template, NAME = d.irdb_name))
+ for d in db if not d.is_hosted)
- rpki.irdb.models.ca_certificate_lifetime = rpki.sundial.timedelta(days = 3652 * 2)
- rpki.irdb.models.ee_certificate_lifetime = rpki.sundial.timedelta(days = 3652)
+ databases["default"] = databases[db.root.irdb_name]
- ts()
+ import django
+ django.setup()
- for d in db:
- if not quiet:
- print
- print "Configuring", d.name
-
- if not d.is_hosted:
- d.mkdir()
- if d.runs_pubd:
- d.mkdir("publication")
- if d.is_root:
- d.mkdir("publication.root")
-
- if not d.is_hosted:
- d.dump_conf()
- d.dump_rsyncd()
-
- d.dump_asns("%s.asns.csv" % d.name)
- d.dump_prefixes("%s.prefixes.csv" % d.name)
- d.dump_roas("%s.roas.csv" % d.name)
- d.dump_ghostbusters("%s.ghostbusters.vcard" % d.name)
- d.dump_router_certificates("%s.routercerts.xml" % d.name)
-
- if not d.is_hosted:
- if not quiet:
- print "Initializing SQL"
- d.syncdb()
- if not quiet:
- print "Hiring zookeeper"
- d.hire_zookeeper()
-
- with d.irdb:
- if not quiet:
- print "Creating identity"
- x = d.zoo.initialize()
-
- if d.is_root:
- if not quiet:
- print "Creating RPKI root certificate and TAL"
- d.dump_root()
- x = d.zoo.configure_rootd()
+ from django.conf import settings
- else:
- with d.parent.irdb:
- x = d.parent.zoo.configure_child(x.file)[0]
- x = d.zoo.configure_parent(x.file)[0]
+ settings.configure(
+ DATABASES = databases,
+ DATABASE_ROUTERS = ["rpki.irdb.router.DBContextRouter"],
+ INSTALLED_APPS = ["rpki.irdb"])
- with d.pubd.irdb:
- x = d.pubd.zoo.configure_publication_client(x.file, flat = flat_publication)[0]
- d.zoo.configure_repository(x.file)
+ import rpki.irdb
- if loopback and not d.is_hosted:
- with d.irdb:
- d.zoo.write_bpki_files()
+ rpki.irdb.models.ca_certificate_lifetime = rpki.sundial.timedelta(days = 3652 * 2)
+ rpki.irdb.models.ee_certificate_lifetime = rpki.sundial.timedelta(days = 3652)
ts()
- if not loopback:
- if not quiet:
- print
for d in db:
- d.dump_sql()
+ if not quiet:
+ print
+ print "Configuring", d.name
+
+ if not d.is_hosted:
+ d.mkdir()
+ if d.runs_pubd:
+ d.mkdir("publication")
+ if d.is_root:
+ d.mkdir("publication.root")
+
+ if not d.is_hosted:
+ d.dump_conf()
+ d.dump_rsyncd()
+
+ d.dump_asns("%s.asns.csv" % d.name)
+ d.dump_prefixes("%s.prefixes.csv" % d.name)
+ d.dump_roas("%s.roas.csv" % d.name)
+ d.dump_ghostbusters("%s.ghostbusters.vcard" % d.name)
+ d.dump_router_certificates("%s.routercerts.xml" % d.name)
+
+ if not d.is_hosted:
+ if not quiet:
+ print "Initializing SQL"
+ d.syncdb()
+ if not quiet:
+ print "Hiring zookeeper"
+ d.hire_zookeeper()
+
+ with d.irdb:
+ if not quiet:
+ print "Creating identity"
+ x = d.zoo.initialize()
+
+ if d.is_root:
+ if not quiet:
+ print "Creating RPKI root certificate and TAL"
+ d.dump_root()
+ x = d.zoo.configure_root()
+
+ else:
+ with d.parent.irdb:
+ x = d.parent.zoo.configure_child(x.file)[0]
+ x = d.zoo.configure_parent(x.file)[0]
+
+ with d.pubd.irdb:
+ x = d.pubd.zoo.configure_publication_client(x.file, flat = flat_publication)[0]
+ d.zoo.configure_repository(x.file)
+
+ if loopback and not d.is_hosted:
+ with d.irdb:
+ d.zoo.write_bpki_files()
+
+ ts()
+
+ if not loopback:
+ if not quiet:
+ print
+ for d in db:
+ d.dump_sql()
if __name__ == "__main__":
- main()
+ main()
diff --git a/ca/tests/yamltest-test-all.sh b/ca/tests/yamltest-test-all.sh
index 4bd5c560..54224d5e 100644
--- a/ca/tests/yamltest-test-all.sh
+++ b/ca/tests/yamltest-test-all.sh
@@ -24,24 +24,28 @@ test -z "$STY" && exec screen -L sh $0
screen -X split
screen -X focus
-: ${runtime=900}
+# Timers
+: ${startup=600} ${runtime=900} ${poll=30} ${shutdown=30}
+
+# Once upon a time we had a settitle program. Noop for now.
+: ${settitle=":"}
for yaml in smoketest.*.yaml
do
- settitle "$yaml: Starting"
+ $settitle "$yaml: Starting"
rm -rf test rcynic-data
python sql-cleaner.py
now=$(date +%s)
- finish=$(($now + $runtime))
+ finish=$(($now + $startup + $runtime))
title="$yaml: will finish at $(date -r $finish)"
- settitle "$title"
- screen sh -c "settitle '$title'; exec python yamltest.py -p yamltest.pid $yaml"
+ $settitle "$title"
+ screen sh -c "$settitle '$title'; exec python yamltest.py -p yamltest.pid $yaml"
date
- sleep 180
+ sleep $startup
date
while test $(date +%s) -lt $finish
do
- sleep 30
+ sleep $poll
date
../../rp/rcynic/rcynic
../../rp/rcynic/rcynic-text rcynic.xml
@@ -49,10 +53,8 @@ do
date
echo "$title"
done
- if test -r yamltest.pid
- then
- kill -INT $(cat yamltest.pid)
- sleep 30
- fi
+ if test -r yamltest.pid; then kill -INT $(cat yamltest.pid); sleep ${shutdown}; fi
+ if test -r yamltest.pid; then kill -INT $(cat yamltest.pid); sleep ${shutdown}; fi
+ if test -r yamltest.pid; then kill -KILL $(cat yamltest.pid); sleep ${shutdown}; fi
make backup
done
diff --git a/ca/tests/yamltest.py b/ca/tests/yamltest.py
index 7de7b675..c2959dc9 100644..100755
--- a/ca/tests/yamltest.py
+++ b/ca/tests/yamltest.py
@@ -36,25 +36,27 @@ and waits for one of them to exit.
# running daemons.
#
-# pylint: disable=W0702,W0621
-
import subprocess
import re
import os
import logging
import argparse
+import webbrowser
import sys
import yaml
import signal
import time
+import textwrap
import lxml.etree
import rpki.resource_set
import rpki.sundial
-import rpki.config
import rpki.log
import rpki.csv_utils
import rpki.x509
import rpki.relaxng
+import rpki.config
+
+# pylint: disable=W0621
# Nasty regular expressions for parsing config files. Sadly, while
# the Python ConfigParser supports writing config files, it does so in
@@ -64,821 +66,1000 @@ section_regexp = re.compile(r"\s*\[\s*(.+?)\s*\]\s*$")
variable_regexp = re.compile(r"\s*([-a-zA-Z0-9_]+)\s*=\s*(.+?)\s*$")
def cleanpath(*names):
- """
- Construct normalized pathnames.
- """
- return os.path.normpath(os.path.join(*names))
-
-# Pathnames for various things we need
-
-this_dir = os.getcwd()
-test_dir = cleanpath(this_dir, "yamltest.dir")
-rpkid_dir = cleanpath(this_dir, "..")
-
-prog_rpkic = cleanpath(rpkid_dir, "rpkic")
-prog_rpkid = cleanpath(rpkid_dir, "rpkid")
-prog_irdbd = cleanpath(rpkid_dir, "irdbd")
-prog_pubd = cleanpath(rpkid_dir, "pubd")
-prog_rootd = cleanpath(rpkid_dir, "rootd")
-
-class roa_request(object):
- """
- Representation of a ROA request.
- """
-
- def __init__(self, asn, ipv4, ipv6):
- self.asn = asn
- self.v4 = rpki.resource_set.roa_prefix_set_ipv4("".join(ipv4.split())) if ipv4 else None
- self.v6 = rpki.resource_set.roa_prefix_set_ipv6("".join(ipv6.split())) if ipv6 else None
-
- def __eq__(self, other):
- return self.asn == other.asn and self.v4 == other.v4 and self.v6 == other.v6
-
- def __hash__(self):
- v4 = tuple(self.v4) if self.v4 is not None else None
- v6 = tuple(self.v6) if self.v6 is not None else None
- return self.asn.__hash__() + v4.__hash__() + v6.__hash__()
-
- def __str__(self):
- if self.v4 and self.v6:
- return "%s: %s,%s" % (self.asn, self.v4, self.v6)
- else:
- return "%s: %s" % (self.asn, self.v4 or self.v6)
-
- @classmethod
- def parse(cls, y):
"""
- Parse a ROA request from YAML format.
+ Construct normalized pathnames.
"""
- return cls(y.get("asn"), y.get("ipv4"), y.get("ipv6"))
-
-
-class router_cert(object):
- """
- Representation for a router_cert object.
- """
- _ecparams = None
+ return os.path.normpath(os.path.join(*names))
- @classmethod
- def ecparams(cls):
- if cls._ecparams is None:
- cls._ecparams = rpki.x509.KeyParams.generateEC()
- return cls._ecparams
-
- def __init__(self, asn, router_id):
- self.asn = rpki.resource_set.resource_set_as("".join(str(asn).split()))
- self.router_id = router_id
- self.keypair = rpki.x509.ECDSA.generate(self.ecparams())
- self.pkcs10 = rpki.x509.PKCS10.create(keypair = self.keypair)
- self.gski = self.pkcs10.gSKI()
-
- def __eq__(self, other):
- return self.asn == other.asn and self.router_id == other.router_id and self.gski == other.gski
-
- def __hash__(self):
- return tuple(self.asn).__hash__() + self.router_id.__hash__() + self.gski.__hash__()
+# Pathnames for various things we need
- def __str__(self):
- return "%s: %s: %s" % (self.asn, self.router_id, self.gski)
+this_dir = os.getcwd()
+test_dir = cleanpath(this_dir, "yamltest.dir")
+ca_dir = cleanpath(this_dir, "..")
+rp_conf_dir = cleanpath(this_dir, "..", "..", "rp", "config")
+rpki_dir = cleanpath(this_dir, "..", "..")
- @classmethod
- def parse(cls, yaml):
- return cls(yaml.get("asn"), yaml.get("router_id"))
+prog_rpkid = cleanpath(ca_dir, "rpkid")
+prog_irdbd = cleanpath(ca_dir, "irdbd")
+prog_pubd = cleanpath(ca_dir, "pubd")
+prog_rpki_confgen = cleanpath(rp_conf_dir, "rpki-confgen")
-class allocation_db(list):
- """
- Our allocation database.
- """
-
- def __init__(self, yaml):
- list.__init__(self)
- self.root = allocation(yaml, self)
- assert self.root.is_root
- if self.root.crl_interval is None:
- self.root.crl_interval = 60 * 60
- if self.root.regen_margin is None:
- self.root.regen_margin = 24 * 60 * 60
- if self.root.base.valid_until is None:
- self.root.base.valid_until = rpki.sundial.now() + rpki.sundial.timedelta(days = 2)
- for a in self:
- if a.base.valid_until is None:
- a.base.valid_until = a.parent.base.valid_until
- if a.crl_interval is None:
- a.crl_interval = a.parent.crl_interval
- if a.regen_margin is None:
- a.regen_margin = a.parent.regen_margin
- self.root.closure()
- self.map = dict((a.name, a) for a in self)
- for a in self:
- if a.is_hosted:
- a.hosted_by = self.map[a.hosted_by]
- a.hosted_by.hosts.append(a)
- assert not a.is_root and not a.hosted_by.is_hosted
-
- def dump(self):
+class roa_request(object):
"""
- Show contents of allocation database.
+ Representation of a ROA request.
"""
- for a in self:
- a.dump()
+ def __init__(self, asn, ipv4, ipv6):
+ self.asn = asn
+ self.v4 = rpki.resource_set.roa_prefix_set_ipv4("".join(ipv4.split())) if ipv4 else None
+ self.v6 = rpki.resource_set.roa_prefix_set_ipv6("".join(ipv6.split())) if ipv6 else None
-class allocation(object):
- """
- One entity in our allocation database. Every entity in the database
- is assumed to hold resources, so needs at least rpkic services.
- Entities that don't have the hosted_by property run their own copies
- of rpkid, irdbd, and pubd, so they also need myirbe services.
- """
-
- base_port = None
- parent = None
- crl_interval = None
- regen_margin = None
- rootd_port = None
- engine = -1
- rpkid_port = -1
- irdbd_port = -1
- pubd_port = -1
- rsync_port = -1
- rootd_port = -1
- rpkic_counter = 0L
-
- @classmethod
- def allocate_port(cls):
- """
- Allocate a TCP port.
- """
- cls.base_port += 1
- return cls.base_port
+ def __eq__(self, other):
+ return self.asn == other.asn and self.v4 == other.v4 and self.v6 == other.v6
- base_engine = -1
+ def __hash__(self):
+ v4 = tuple(self.v4) if self.v4 is not None else None
+ v6 = tuple(self.v6) if self.v6 is not None else None
+ return self.asn.__hash__() + v4.__hash__() + v6.__hash__()
- @classmethod
- def allocate_engine(cls):
- """
- Allocate an engine number, mostly used to construct MySQL database
- names.
- """
- cls.base_engine += 1
- return cls.base_engine
-
- def __init__(self, yaml, db, parent = None):
- db.append(self)
- self.name = yaml["name"]
- self.parent = parent
- self.kids = [allocation(k, db, self) for k in yaml.get("kids", ())]
- valid_until = None
- if "valid_until" in yaml:
- valid_until = rpki.sundial.datetime.from_datetime(yaml.get("valid_until"))
- if valid_until is None and "valid_for" in yaml:
- valid_until = rpki.sundial.now() + rpki.sundial.timedelta.parse(yaml["valid_for"])
- self.base = rpki.resource_set.resource_bag(
- asn = rpki.resource_set.resource_set_as(yaml.get("asn")),
- v4 = rpki.resource_set.resource_set_ipv4(yaml.get("ipv4")),
- v6 = rpki.resource_set.resource_set_ipv6(yaml.get("ipv6")),
- valid_until = valid_until)
- if "crl_interval" in yaml:
- self.crl_interval = rpki.sundial.timedelta.parse(yaml["crl_interval"]).convert_to_seconds()
- if "regen_margin" in yaml:
- self.regen_margin = rpki.sundial.timedelta.parse(yaml["regen_margin"]).convert_to_seconds()
- self.roa_requests = [roa_request.parse(y) for y in yaml.get("roa_request", yaml.get("route_origin", ()))]
- self.router_certs = [router_cert.parse(y) for y in yaml.get("router_cert", ())]
- if "ghostbusters" in yaml:
- self.ghostbusters = yaml.get("ghostbusters")
- elif "ghostbuster" in yaml:
- self.ghostbusters = [yaml.get("ghostbuster")]
- else:
- self.ghostbusters = []
- for r in self.roa_requests:
- if r.v4:
- self.base.v4 |= r.v4.to_resource_set()
- if r.v6:
- self.base.v6 |= r.v6.to_resource_set()
- for r in self.router_certs:
- self.base.asn |= r.asn
- self.hosted_by = yaml.get("hosted_by")
- self.hosts = []
- if not self.is_hosted:
- self.engine = self.allocate_engine()
- self.rpkid_port = self.allocate_port()
- self.irdbd_port = self.allocate_port()
- if self.runs_pubd:
- self.pubd_port = self.allocate_port()
- self.rsync_port = self.allocate_port()
- if self.is_root:
- self.rootd_port = self.allocate_port()
-
- def closure(self):
- """
- Compute resource closure of this node and its children, to avoid a
- lot of tedious (and error-prone) duplication in the YAML file.
- """
- resources = self.base
- for kid in self.kids:
- resources |= kid.closure()
- self.resources = resources
- return resources
-
- def dump(self):
- """
- Show content of this allocation node.
- """
- print str(self)
-
- def __str__(self):
- s = self.name + ":\n"
- if self.resources.asn: s += " ASNs: %s\n" % self.resources.asn
- if self.resources.v4: s += " IPv4: %s\n" % self.resources.v4
- if self.resources.v6: s += " IPv6: %s\n" % self.resources.v6
- if self.kids: s += " Kids: %s\n" % ", ".join(k.name for k in self.kids)
- if self.parent: s += " Up: %s\n" % self.parent.name
- if self.is_hosted: s += " Host: %s\n" % self.hosted_by.name
- if self.hosts: s += " Hosts: %s\n" % ", ".join(h.name for h in self.hosts)
- for r in self.roa_requests: s += " ROA: %s\n" % r
- if not self.is_hosted: s += " IPort: %s\n" % self.irdbd_port
- if self.runs_pubd: s += " PPort: %s\n" % self.pubd_port
- if not self.is_hosted: s += " RPort: %s\n" % self.rpkid_port
- if self.runs_pubd: s += " SPort: %s\n" % self.rsync_port
- if self.is_root: s += " TPort: %s\n" % self.rootd_port
- return s + " Until: %s\n" % self.resources.valid_until
-
- @property
- def is_root(self):
- """
- Is this the root node?
- """
- return self.parent is None
-
- @property
- def is_hosted(self):
- """
- Is this entity hosted?
- """
- return self.hosted_by is not None
+ def __str__(self):
+ if self.v4 and self.v6:
+ return "%s: %s,%s" % (self.asn, self.v4, self.v6)
+ else:
+ return "%s: %s" % (self.asn, self.v4 or self.v6)
- @property
- def runs_pubd(self):
- """
- Does this entity run a pubd?
- """
- return self.is_root or not (self.is_hosted or only_one_pubd)
+ @classmethod
+ def parse(cls, y):
+ """
+ Parse a ROA request from YAML format.
+ """
- def path(self, *names):
- """
- Construct pathnames in this entity's test directory.
- """
- return cleanpath(test_dir, self.host.name, *names)
+ return cls(y.get("asn"), y.get("ipv4"), y.get("ipv6"))
- def csvout(self, fn):
- """
- Open and log a CSV output file.
- """
- path = self.path(fn)
- print "Writing", path
- return rpki.csv_utils.csv_writer(path)
- def up_down_url(self):
+class router_cert(object):
"""
- Construct service URL for this node's parent.
+ Representation for a router_cert object.
"""
- return "http://localhost:%d/up-down/%s/%s" % (self.parent.host.rpkid_port,
- self.parent.name,
- self.name)
- def dump_asns(self):
- """
- Write Autonomous System Numbers CSV file.
- """
- fn = "%s.asns.csv" % d.name
- if not args.skip_config:
- f = self.csvout(fn)
- for k in self.kids:
- f.writerows((k.name, a) for a in k.resources.asn)
- f.close()
- if not args.stop_after_config:
- self.run_rpkic("load_asns", fn)
-
- def dump_prefixes(self):
- """
- Write prefixes CSV file.
- """
- fn = "%s.prefixes.csv" % d.name
- if not args.skip_config:
- f = self.csvout(fn)
- for k in self.kids:
- f.writerows((k.name, p) for p in (k.resources.v4 + k.resources.v6))
- f.close()
- if not args.stop_after_config:
- self.run_rpkic("load_prefixes", fn)
-
- def dump_roas(self):
- """
- Write ROA CSV file.
- """
- fn = "%s.roas.csv" % d.name
- if not args.skip_config:
- f = self.csvout(fn)
- for r in self.roa_requests:
- f.writerows((p, r.asn)
- for p in (r.v4 + r.v6 if r.v4 and r.v6 else r.v4 or r.v6 or ()))
- f.close()
- if not args.stop_after_config:
- self.run_rpkic("load_roa_requests", fn)
-
- def dump_ghostbusters(self):
- """
- Write Ghostbusters vCard file.
- """
- if self.ghostbusters:
- fn = "%s.ghostbusters.vcard" % d.name
- if not args.skip_config:
- path = self.path(fn)
- print "Writing", path
- f = open(path, "w")
- for i, g in enumerate(self.ghostbusters):
- if i:
- f.write("\n")
- f.write(g)
- f.close()
- if not args.stop_after_config:
- self.run_rpkic("load_ghostbuster_requests", fn)
-
- def dump_router_certificates(self):
- """
- Write EE certificates (router certificates, etc).
- """
- if self.router_certs:
- fn = "%s.routercerts.xml" % d.name
- if not args.skip_config:
- path = self.path(fn)
- print "Writing", path
- xmlns = rpki.relaxng.router_certificate.xmlns
- xml = lxml.etree.Element(xmlns + "router_certificate_requests",
- version = rpki.relaxng.router_certificate.version,
- nsmap = rpki.relaxng.router_certificate.nsmap)
- for r in self.router_certs:
- x = lxml.etree.SubElement(xml, xmlns + "router_certificate_request",
- router_id = str(r.router_id),
- asn = str(r.asn),
- valid_until = str(self.resources.valid_until))
- x.text = r.pkcs10.get_Base64()
- rpki.relaxng.router_certificate.assertValid(xml)
- lxml.etree.ElementTree(xml).write(path, pretty_print = True)
- if not args.stop_after_config:
- self.run_rpkic("add_router_certificate_request", fn)
- if not args.skip_config and args.store_router_private_keys:
- path = self.path("%s.routercerts.keys" % d.name)
- print "Writing", path
- with open(path, "w") as f:
- for r in self.router_certs:
- f.write(r.keypair.get_PEM())
+ _ecparams = None
- @property
- def pubd(self):
- """
- Walk up tree until we find somebody who runs pubd.
- """
- s = self
- while not s.runs_pubd:
- s = s.parent
- return s
+ @classmethod
+ def ecparams(cls):
+ if cls._ecparams is None:
+ cls._ecparams = rpki.x509.KeyParams.generateEC()
+ return cls._ecparams
- @property
- def client_handle(self):
- """
- Work out what pubd configure_publication_client will call us.
- """
- path = []
- s = self
- if not args.flat_publication:
- while not s.runs_pubd:
- path.append(s)
- s = s.parent
- path.append(s)
- return ".".join(i.name for i in reversed(path))
+ def __init__(self, asn, router_id):
+ self.asn = rpki.resource_set.resource_set_as("".join(str(asn).split()))
+ self.router_id = router_id
+ self.keypair = rpki.x509.ECDSA.generate(params = self.ecparams(), quiet = True)
+ self.pkcs10 = rpki.x509.PKCS10.create(keypair = self.keypair)
+ self.gski = self.pkcs10.gSKI()
- @property
- def host(self):
- return self.hosted_by or self
+ def __eq__(self, other):
+ return self.asn == other.asn and self.router_id == other.router_id and self.gski == other.gski
- def dump_conf(self):
- """
- Write configuration file for OpenSSL and RPKI tools.
- """
+ def __hash__(self):
+ return tuple(self.asn).__hash__() + self.router_id.__hash__() + self.gski.__hash__()
- r = dict(
- handle = self.name,
- run_rpkid = str(not self.is_hosted),
- run_pubd = str(self.runs_pubd),
- run_rootd = str(self.is_root),
- irdbd_sql_database = "irdb%d" % self.engine,
- irdbd_sql_username = "irdb",
- rpkid_sql_database = "rpki%d" % self.engine,
- rpkid_sql_username = "rpki",
- rpkid_server_host = "localhost",
- rpkid_server_port = str(self.rpkid_port),
- irdbd_server_host = "localhost",
- irdbd_server_port = str(self.irdbd_port),
- rootd_server_port = str(self.rootd_port),
- pubd_sql_database = "pubd%d" % self.engine,
- pubd_sql_username = "pubd",
- pubd_server_host = "localhost",
- pubd_server_port = str(self.pubd.pubd_port),
- publication_rsync_server = "localhost:%s" % self.pubd.rsync_port,
- bpki_servers_directory = self.path(),
- publication_base_directory = self.path("publication"),
- shared_sql_password = "fnord")
-
- r.update(config_overrides)
-
- f = open(self.path("rpki.conf"), "w")
- f.write("# Automatically generated, do not edit\n")
- print "Writing", f.name
-
- section = None
- for line in open(cleanpath(rpkid_dir, "examples/rpki.conf")):
- m = section_regexp.match(line)
- if m:
- section = m.group(1)
- m = variable_regexp.match(line)
- option = m.group(1) if m and section == "myrpki" else None
- if option and option in r:
- line = "%s = %s\n" % (option, r[option])
- f.write(line)
-
- f.close()
-
- def dump_rsyncd(self):
- """
- Write rsyncd configuration file.
- """
+ def __str__(self):
+ return "%s: %s: %s" % (self.asn, self.router_id, self.gski)
- if self.runs_pubd:
- f = open(self.path("rsyncd.conf"), "w")
- print "Writing", f.name
- f.writelines(s + "\n" for s in
- ("# Automatically generated, do not edit",
- "port = %d" % self.rsync_port,
- "address = localhost",
- "[rpki]",
- "log file = rsyncd.log",
- "read only = yes",
- "use chroot = no",
- "path = %s" % self.path("publication"),
- "comment = RPKI test",
- "[root]",
- "log file = rsyncd_root.log",
- "read only = yes",
- "use chroot = no",
- "path = %s" % self.path("publication.root"),
- "comment = RPKI test root"))
- f.close()
-
- @classmethod
- def next_rpkic_counter(cls):
- cls.rpkic_counter += 10000
- return str(cls.rpkic_counter)
-
- def run_rpkic(self, *argv):
- """
- Run rpkic for this entity.
- """
- cmd = [prog_rpkic, "-i", self.name, "-c", self.path("rpki.conf")]
- if args.profile:
- cmd.append("--profile")
- cmd.append(self.path("rpkic.%s.prof" % rpki.sundial.now()))
- cmd.extend(str(a) for a in argv if a is not None)
- print 'Running "%s"' % " ".join(cmd)
- env = os.environ.copy()
- env["YAMLTEST_RPKIC_COUNTER"] = self.next_rpkic_counter()
- subprocess.check_call(cmd, cwd = self.host.path(), env = env)
-
- def run_python_daemon(self, prog):
- """
- Start a Python daemon and return a subprocess.Popen object
- representing the running daemon.
- """
- basename = os.path.splitext(os.path.basename(prog))[0]
- cmd = [prog, "--foreground", "--log-level", "debug",
- "--log-file", self.path(basename + ".log"),
- "--config", self.path("rpki.conf")]
- if args.profile and basename != "rootd":
- cmd.extend((
- "--profile", self.path(basename + ".prof")))
- p = subprocess.Popen(cmd, cwd = self.path())
- print 'Running %s for %s: pid %d process %r' % (" ".join(cmd), self.name, p.pid, p)
- return p
-
- def run_rpkid(self):
- """
- Run rpkid.
- """
- return self.run_python_daemon(prog_rpkid)
+ @classmethod
+ def parse(cls, yaml):
+ return cls(yaml.get("asn"), yaml.get("router_id"))
- def run_irdbd(self):
+class allocation_db(list):
"""
- Run irdbd.
+ Our allocation database.
"""
- return self.run_python_daemon(prog_irdbd)
- def run_pubd(self):
- """
- Run pubd.
- """
- return self.run_python_daemon(prog_pubd)
+ def __init__(self, yaml):
+ list.__init__(self)
+ self.root = allocation(yaml, self)
+ assert self.root.is_root and not any(a.is_root for a in self if a is not self.root) and self[0] is self.root
+ if self.root.crl_interval is None:
+ self.root.crl_interval = 60 * 60
+ if self.root.regen_margin is None:
+ self.root.regen_margin = 24 * 60 * 60
+ if self.root.base.valid_until is None:
+ self.root.base.valid_until = rpki.sundial.now() + rpki.sundial.timedelta(days = 2)
+ for a in self:
+ if a.base.valid_until is None:
+ a.base.valid_until = a.parent.base.valid_until
+ if a.crl_interval is None:
+ a.crl_interval = a.parent.crl_interval
+ if a.regen_margin is None:
+ a.regen_margin = a.parent.regen_margin
+ self.root.closure()
+ self.map = dict((a.name, a) for a in self)
+ for a in self:
+ if a.is_hosted:
+ a.hosted_by = self.map[a.hosted_by]
+ a.hosted_by.hosts.append(a)
+ assert not a.is_root and not a.hosted_by.is_hosted
+
+ def dump(self):
+ """
+ Show contents of allocation database.
+ """
+
+ for a in self:
+ a.dump()
- def run_rootd(self):
- """
- Run rootd.
- """
- return self.run_python_daemon(prog_rootd)
- def run_rsyncd(self):
+class allocation(object):
"""
- Run rsyncd.
+ One entity in our allocation database. Every entity in the database
+ is assumed to hold resources, so needs at least rpkic services.
+ Entities that don't have the hosted_by property run their own copies
+ of rpkid, irdbd, and pubd, so they also need myirbe services.
"""
- p = subprocess.Popen(("rsync", "--daemon", "--no-detach", "--config", "rsyncd.conf"),
- cwd = self.path())
- print "Running rsyncd for %s: pid %d process %r" % (self.name, p.pid, p)
- return p
-
-def create_root_certificate(db_root):
- print "Creating rootd RPKI root certificate"
-
- root_resources = rpki.resource_set.resource_bag(
- asn = rpki.resource_set.resource_set_as("0-4294967295"),
- v4 = rpki.resource_set.resource_set_ipv4("0.0.0.0/0"),
- v6 = rpki.resource_set.resource_set_ipv6("::/0"))
-
- root_key = rpki.x509.RSA.generate(quiet = True)
-
- root_uri = "rsync://localhost:%d/rpki/" % db_root.pubd.rsync_port
+ base_port = None
+ parent = None
+ crl_interval = None
+ regen_margin = None
+ engine = -1
+ rpkid_port = -1
+ irdbd_port = -1
+ pubd_port = -1
+ rsync_port = -1
+ rrdp_port = -1
+ rpkic_counter = 0L
+
+ @classmethod
+ def allocate_port(cls):
+ """
+ Allocate a TCP port.
+ """
+
+ cls.base_port += 1
+ return cls.base_port
+
+ base_engine = -1
+
+ @classmethod
+ def allocate_engine(cls):
+ """
+ Allocate an engine number, mostly used to construct SQL database
+ names.
+ """
+
+ cls.base_engine += 1
+ return cls.base_engine
+
+ def __init__(self, yaml, db, parent = None):
+ db.append(self)
+ self.name = yaml["name"]
+ self.parent = parent
+ self.kids = [allocation(k, db, self) for k in yaml.get("kids", ())]
+ valid_until = None
+ if "valid_until" in yaml:
+ valid_until = rpki.sundial.datetime.from_datetime(yaml.get("valid_until"))
+ if valid_until is None and "valid_for" in yaml:
+ valid_until = rpki.sundial.now() + rpki.sundial.timedelta.parse(yaml["valid_for"])
+ self.base = rpki.resource_set.resource_bag(
+ asn = str(yaml.get("asn", "")),
+ v4 = yaml.get("ipv4"),
+ v6 = yaml.get("ipv6"),
+ valid_until = valid_until)
+ if "crl_interval" in yaml:
+ self.crl_interval = rpki.sundial.timedelta.parse(yaml["crl_interval"]).convert_to_seconds()
+ if "regen_margin" in yaml:
+ self.regen_margin = rpki.sundial.timedelta.parse(yaml["regen_margin"]).convert_to_seconds()
+ self.roa_requests = [roa_request.parse(y) for y in yaml.get("roa_request", yaml.get("route_origin", ()))]
+ self.router_certs = [router_cert.parse(y) for y in yaml.get("router_cert", ())]
+ if "ghostbusters" in yaml:
+ self.ghostbusters = yaml.get("ghostbusters")
+ elif "ghostbuster" in yaml:
+ self.ghostbusters = [yaml.get("ghostbuster")]
+ else:
+ self.ghostbusters = []
+ for r in self.roa_requests:
+ if r.v4:
+ self.base.v4 |= r.v4.to_resource_set()
+ if r.v6:
+ self.base.v6 |= r.v6.to_resource_set()
+ for r in self.router_certs:
+ self.base.asn |= r.asn
+ self.hosted_by = yaml.get("hosted_by")
+ self.hosts = []
+ if not self.is_hosted:
+ self.engine = self.allocate_engine()
+ self.rpkid_port = self.allocate_port()
+ self.irdbd_port = self.allocate_port()
+ if self.runs_pubd:
+ self.pubd_port = self.allocate_port()
+ self.rsync_port = self.allocate_port()
+ self.rrdp_port = self.allocate_port()
+
+ def closure(self):
+ """
+ Compute resource closure of this node and its children, to avoid a
+ lot of tedious (and error-prone) duplication in the YAML file.
+ """
+
+ resources = self.base
+ for kid in self.kids:
+ resources |= kid.closure()
+ self.resources = resources # pylint: disable=W0201
+ return resources
+
+ def dump(self):
+ """
+ Show content of this allocation node.
+ """
+
+ print str(self)
+
+ def __str__(self):
+ # pylint: disable=C0321
+ s = self.name + ":\n"
+ if self.resources.asn: s += " ASNs: %s\n" % self.resources.asn
+ if self.resources.v4: s += " IPv4: %s\n" % self.resources.v4
+ if self.resources.v6: s += " IPv6: %s\n" % self.resources.v6
+ if self.kids: s += " Kids: %s\n" % ", ".join(k.name for k in self.kids)
+ if self.parent: s += " Up: %s\n" % self.parent.name
+ if self.is_hosted: s += " Host: %s\n" % self.hosted_by.name
+ if self.hosts: s += " Hosts: %s\n" % ", ".join(h.name for h in self.hosts)
+ for r in self.roa_requests: s += " ROA: %s\n" % r
+ if not self.is_hosted: s += " IPort: %s\n" % self.irdbd_port
+ if self.runs_pubd: s += " PPort: %s\n" % self.pubd_port
+ if not self.is_hosted: s += " RPort: %s\n" % self.rpkid_port
+ if self.runs_pubd: s += " SPort: %s\n" % self.rsync_port
+ return s + " Until: %s\n" % self.resources.valid_until
+
+ @property
+ def is_root(self):
+ """
+ Is this the root node?
+ """
+
+ return self.parent is None
+
+ @property
+ def is_hosted(self):
+ """
+ Is this entity hosted?
+ """
+
+ return self.hosted_by is not None
+
+ @property
+ def runs_pubd(self):
+ """
+ Does this entity run a pubd?
+ """
+
+ return self.is_root or (args.one_pubd_per_rpkid and not self.is_hosted)
+
+ def path(self, *names):
+ """
+ Construct pathnames in this entity's test directory.
+ """
+
+ return cleanpath(test_dir, self.host.name, *names)
+
+ def csvout(self, fn):
+ """
+ Open and log a CSV output file.
+ """
- root_sia = (root_uri, root_uri + "root.mft", None)
+ path = self.path(fn)
+ print "Writing", path
+ return rpki.csv_utils.csv_writer(path)
+
+ def up_down_url(self):
+ """
+ Construct service URL for this node's parent.
+ """
+
+ return "http://localhost:%d/up-down/%s/%s" % (self.parent.host.rpkid_port,
+ self.parent.name,
+ self.name)
+
+ def dump_asns(self):
+ """
+ Write Autonomous System Numbers CSV file.
+ """
+
+ fn = "%s.asns.csv" % d.name
+ if not args.skip_config:
+ with self.csvout(fn) as f:
+ for k in self.kids:
+ f.writerows((k.name, a) for a in k.resources.asn)
+ if not args.stop_after_config:
+ self.run_rpkic("load_asns", fn)
+
+ def dump_prefixes(self):
+ """
+ Write prefixes CSV file.
+ """
+
+ fn = "%s.prefixes.csv" % d.name
+ if not args.skip_config:
+ with self.csvout(fn) as f:
+ for k in self.kids:
+ f.writerows((k.name, p) for p in (k.resources.v4 + k.resources.v6))
+ if not args.stop_after_config:
+ self.run_rpkic("load_prefixes", fn)
+
+ def dump_roas(self):
+ """
+ Write ROA CSV file.
+ """
+
+ fn = "%s.roas.csv" % d.name
+ if not args.skip_config:
+ with self.csvout(fn) as f:
+ for r in self.roa_requests:
+ f.writerows((p, r.asn)
+ for p in (r.v4 + r.v6 if r.v4 and r.v6 else r.v4 or r.v6 or ()))
+ if not args.stop_after_config:
+ self.run_rpkic("load_roa_requests", fn)
+
+ def dump_ghostbusters(self):
+ """
+ Write Ghostbusters vCard file.
+ """
+
+ if self.ghostbusters:
+ fn = "%s.ghostbusters.vcard" % d.name
+ if not args.skip_config:
+ path = self.path(fn)
+ print "Writing", path
+ with open(path, "w") as f:
+ f.write("\n".join(self.ghostbusters))
+ if not args.stop_after_config:
+ self.run_rpkic("load_ghostbuster_requests", fn)
+
+ def dump_router_certificates(self):
+ """
+ Write EE certificates (router certificates, etc).
+ """
+
+ if self.router_certs:
+ fn = "%s.routercerts.xml" % d.name
+ if not args.skip_config:
+ path = self.path(fn)
+ print "Writing", path
+ xmlns = rpki.relaxng.router_certificate.xmlns
+ xml = lxml.etree.Element(xmlns + "router_certificate_requests",
+ version = rpki.relaxng.router_certificate.version,
+ nsmap = rpki.relaxng.router_certificate.nsmap)
+ for r in self.router_certs:
+ x = lxml.etree.SubElement(xml, xmlns + "router_certificate_request",
+ router_id = str(r.router_id),
+ asn = str(r.asn),
+ valid_until = str(self.resources.valid_until))
+ x.text = r.pkcs10.get_Base64()
+ rpki.relaxng.router_certificate.assertValid(xml)
+ lxml.etree.ElementTree(xml).write(path, pretty_print = True)
+ if not args.stop_after_config:
+ self.run_rpkic("add_router_certificate_request", fn)
+ if not args.skip_config and args.store_router_private_keys:
+ path = self.path("%s.routercerts.keys" % d.name)
+ print "Writing", path
+ with open(path, "w") as f:
+ for r in self.router_certs:
+ f.write(r.keypair.get_PEM())
+
+ @property
+ def pubd(self):
+ """
+ Walk up tree until we find somebody who runs pubd.
+ """
+
+ s = self
+ while not s.runs_pubd:
+ s = s.parent
+ return s
+
+ @property
+ def client_handle(self):
+ """
+ Work out what pubd configure_publication_client will call us.
+ """
+
+ path = []
+ s = self
+ if not args.flat_publication:
+ while not s.runs_pubd:
+ path.append(s)
+ s = s.parent
+ path.append(s)
+ return ".".join(i.name for i in reversed(path))
+
+ @property
+ def host(self):
+ return self.hosted_by or self
+
+ def dump_conf(self):
+ """
+ Write configuration file for OpenSSL and RPKI tools.
+ """
+
+ r = dict(
+ handle = self.name,
+ run_rpkid = str(not self.is_hosted),
+ run_pubd = str(self.runs_pubd),
+ rpkid_server_host = "localhost",
+ rpkid_server_port = str(self.rpkid_port),
+ irdbd_server_host = "localhost",
+ irdbd_server_port = str(self.irdbd_port),
+ pubd_server_host = "localhost",
+ pubd_server_port = str(self.pubd.pubd_port),
+ publication_rsync_server = "localhost:%s" % self.pubd.rsync_port,
+ publication_rrdp_base_uri = "https://localhost:%s/" % self.pubd.rrdp_port,
+ bpki_servers_directory = self.path(),
+ publication_base_directory = self.path("publication.rsync"),
+ rrdp_publication_base_directory = self.path("publication.rrdp"),
+ shared_sql_engine = args.sql_engine,
+ shared_sql_password = "fnord",
+ irdbd_sql_username = "irdb",
+ rpkid_sql_username = "rpki",
+ pubd_sql_username = "pubd")
+
+ if args.sql_engine == "sqlite3":
+ r.update(
+ irdbd_sql_database = self.path("irdb.sqlite3"),
+ rpkid_sql_database = self.path("rpkidb.sqlite3"),
+ pubd_sql_database = self.path("pubdb.sqlite3"))
+ else:
+ r.update(
+ irdbd_sql_database = "irdb%d" % self.engine,
+ rpkid_sql_database = "rpki%d" % self.engine,
+ pubd_sql_database = "pubd%d" % self.engine)
+
+ fn = self.path("rpki.conf")
+
+ cmd = [sys.executable, prog_rpki_confgen,
+ "--read-xml", prog_rpki_confgen + ".xml",
+ "--autoconf"]
+ for k, v in r.iteritems():
+ cmd.extend(("--set", "myrpki::{}={}".format(k, v)))
+ cmd.extend(("--write-conf", fn))
+
+ print "Writing", fn
+ subprocess.check_call(cmd)
+
+ def dump_rsyncd(self):
+ """
+ Write rsyncd configuration file.
+ """
+
+ if self.runs_pubd:
+ with open(self.path("rsyncd.conf"), "w") as f:
+ print "Writing", f.name
+ f.writelines(s + "\n" for s in
+ ("# Automatically generated, do not edit",
+ "port = %d" % self.rsync_port,
+ "address = localhost",
+ "[rpki]",
+ "log file = rsyncd.log",
+ "read only = yes",
+ "use chroot = no",
+ "path = %s" % self.path("publication.rsync"),
+ "comment = RPKI test",
+ "[root]",
+ "log file = rsyncd_root.log",
+ "read only = yes",
+ "use chroot = no",
+ "path = %s" % self.path("publication.root"),
+ "comment = RPKI test root"))
+
+ def dump_httpsd(self):
+ """
+ Write certificates for internal RRDP httpsd.
+ """
+
+ # For the moment we create a new TA for each httpsd server
+ # instance, which will be a mess if the RRDP client wants to
+ # verify them. At the moment, running RRDP over HTTPS is more
+ # of a political statement than a technical requirement
+ # derived from the underlying security model, so we defer
+ # shaving that yak for another day. Likewise, we defer
+ # deciding whether we really only wanted one TA/EE pair for an
+ # entire yamltest run, or perhaps a single TA and multiple EEs
+ # (all with the same name!), or ....
+ #
+ # If and when we ever deal with this, we might also see about
+ # getting the Django test GUI server to run over TLS. Then
+ # again, since we have no sane way of telling the user's web
+ # browser about our TA, this pretty much guarantees a lot of
+ # tedious browser exception pop-ups every time. Feh.
+
+ if self.runs_pubd:
+ print "Creating certificates for %s RRDP HTTPS server" % self.name
+
+ ca_key = rpki.x509.RSA.generate(quiet = True)
+ ee_key = rpki.x509.RSA.generate(quiet = True)
+ ca_dn = rpki.x509.X501DN.from_cn("%s RRDP HTTPS trust anchor" % self.name)
+ ee_dn = rpki.x509.X501DN.from_cn("localhost")
+ notAfter = rpki.sundial.now() + rpki.sundial.timedelta(days = 365)
+
+ ca_cer = rpki.x509.X509.bpki_self_certify(
+ keypair = ca_key,
+ subject_name = ca_dn,
+ serial = 1,
+ notAfter = notAfter)
+
+ ee_cer = ca_cer.bpki_certify(
+ keypair = ca_key,
+ subject_name = ee_dn,
+ subject_key = ee_key.get_public(),
+ serial = 2,
+ notAfter = notAfter,
+ is_ca = False)
+
+ with open(self.path("httpsd.client.pem"), "w") as f:
+ f.write(ca_cer.get_PEM())
+
+ with open(self.path("httpsd.server.pem"), "w") as f:
+ f.write(ee_key.get_PEM())
+ f.write(ee_cer.get_PEM())
+ f.write(ca_cer.get_PEM())
+
+ @classmethod
+ def next_rpkic_counter(cls):
+ cls.rpkic_counter += 10000
+ return str(cls.rpkic_counter)
+
+ def run_rpkic(self, *argv):
+ """
+ Run rpkic for this entity.
+ """
+
+ cmd = [sys.executable, "-c", "import rpki.rpkic; rpki.rpkic.main()", "-i", self.name]
+ if args.profile:
+ cmd.append("--profile")
+ cmd.append(self.path("rpkic.{!s}.prof".format(rpki.sundial.now())))
+ cmd.extend(str(a) for a in argv if a is not None)
+ print 'Running "rpkic {}"'.format(" ".join(cmd[3:]))
+ env = dict(os.environ,
+ YAMLTEST_RPKIC_COUNTER = self.next_rpkic_counter(),
+ RPKI_CONF = self.path("rpki.conf"),
+ PYTHONPATH = rpki_dir)
+ subprocess.check_call(cmd, cwd = self.host.path(), env = env)
+
+ def syncdb(self):
+ """
+ Run whatever Django ORM commands are necessary to set up the
+ database this week.
+ """
+
+ # Fork a sub-process for each syncdb/migrate run, because it's
+ # easier than figuring out how to change Django settings after
+ # initialization.
+
+ def sync_settings(settings, verbosity = 1):
+
+ if verbosity > 0:
+ print "Running Django setup for", self.name
+
+ pid = os.fork()
+
+ if pid == 0:
+ logging.getLogger().setLevel(logging.WARNING)
+
+ os.environ.update(RPKI_CONF = self.path("rpki.conf"),
+ DJANGO_SETTINGS_MODULE = "rpki.django_settings." + settings)
+
+ import django
+ django.setup()
+
+ import django.core.management
+ django.core.management.call_command("migrate", verbosity = verbosity, no_color = True,
+ load_initial_data = False, interactive = False)
+
+ if settings in ("gui", "irdb"):
+ from django.contrib.auth.models import User
+ User.objects.create_superuser("root", "root@example.org", "fnord")
+
+ sys.exit(0)
+
+ elif os.waitpid(pid, 0)[1]:
+ raise RuntimeError("Django setup failed for %s %s" % (self.name, settings))
+
+ for settings in ("rpkid", "pubd", "gui"):
+ sync_settings(settings)
+
+ def run_python_daemon(self, prog):
+ """
+ Start a Python daemon and return a subprocess.Popen object
+ representing the running daemon.
+ """
+
+ basename = os.path.splitext(os.path.basename(prog))[0]
+ cmd = [prog, "--foreground",
+ "--log-level", "debug",
+ "--log-destination", "file",
+ "--log-filename", self.path(basename + ".log")]
+ if args.profile:
+ cmd.extend((
+ "--profile", self.path(basename + ".prof")))
+ env = dict(os.environ, RPKI_CONF = self.path("rpki.conf"))
+ p = subprocess.Popen(cmd, cwd = self.path(), env = env)
+ print "Running %s for %s: pid %d process %r" % (" ".join(cmd), self.name, p.pid, p)
+ return p
+
+ def run_rpkid(self):
+ """
+ Run rpkid.
+ """
+
+ return self.run_python_daemon(prog_rpkid)
+
+ def run_irdbd(self):
+ """
+ Run irdbd.
+ """
+
+ return self.run_python_daemon(prog_irdbd)
+
+ def run_pubd(self):
+ """
+ Run pubd.
+ """
+
+ return self.run_python_daemon(prog_pubd)
+
+ def run_rsyncd(self):
+ """
+ Run rsyncd.
+ """
+
+ p = subprocess.Popen(("rsync", "--daemon", "--no-detach", "--config", "rsyncd.conf"),
+ cwd = self.path())
+ print "Running rsyncd for %s: pid %d process %r" % (self.name, p.pid, p)
+ return p
+
+ def run_httpsd(self):
+ """
+ Run httpsd (minimal HTTPS server, for RRDP).
+ """
+
+ # Minimal HTTPS server hack from:
+ # https://www.piware.de/2011/01/creating-an-https-server-in-python/
+ # coded as a script so that we can run this using the
+ # subprocess API used by all our other daemon processes.
+
+ if self.runs_pubd:
+ script = textwrap.dedent('''\
+ import BaseHTTPServer, SimpleHTTPServer, ssl
+ httpd = BaseHTTPServer.HTTPServer(("localhost", {port}), SimpleHTTPServer.SimpleHTTPRequestHandler)
+ httpd.socket = ssl.wrap_socket(httpd.socket, server_side = True, certfile = "{pem}")
+ httpd.serve_forever()
+ '''.format(port = self.rrdp_port, pem = self.path("httpsd.server.pem")))
+ p = subprocess.Popen((sys.executable, "-c", script),
+ stdout = open(self.path("httpsd.log"), "w"), stderr = subprocess.STDOUT,
+ cwd = self.path("publication.rrdp"))
+ print "Running httpsd for %s: pid %d process %r" % (self.name, p.pid, p)
+ return p
+
+ def run_gui(self):
+ """
+ Start an instance of the RPKI GUI under the Django test server and
+ return a subprocess.Popen object representing the running daemon.
+ """
+
+ env = dict(os.environ,
+ RPKI_CONF = self.path("rpki.conf"),
+ DJANGO_SETTINGS_MODULE = "rpki.django_settings.gui",
+ RPKI_DJANGO_DEBUG = "yes",
+ LANG = "en_US.UTF-8",
+ ALLOW_PLAIN_HTTP_FOR_TESTING = "I solemnly swear that I am not running this in production")
+
+ if False:
+ # This ought to work, doesn't. Looks like some kind of Django argv hairball.
+ cmd = (sys.executable, "-c", textwrap.dedent('''\
+ import django
+ django.setup()
+ import django.core.management
+ django.core.management.call_command("runserver", "{port}")
+ '''.format(port = 8000 + self.engine)))
+ else:
+ cmd = ("django-admin", "runserver", str(8000 + self.engine))
- root_cert = rpki.x509.X509.self_certify(
- keypair = root_key,
- subject_key = root_key.get_public(),
- serial = 1,
- sia = root_sia,
- notAfter = rpki.sundial.now() + rpki.sundial.timedelta(days = 365),
- resources = root_resources)
+ p = subprocess.Popen(cmd, cwd = self.path(), env = env,
+ stdout = open(self.path("gui.log"), "w"), stderr = subprocess.STDOUT)
+ print "Running GUI for %s: pid %d process %r" % (self.name, p.pid, p)
+ return p
- f = open(db_root.path("publication.root/root.cer"), "wb")
- f.write(root_cert.get_DER())
- f.close()
+ def extract_root_cert_and_tal(self):
+ """
+ Use rpkic to extract the root certficate and TAL and place them
+ where we can use them to check the published result using rcynic.
+ """
- f = open(db_root.path("root.key"), "wb")
- f.write(root_key.get_DER())
- f.close()
+ print
+ self.run_rpkic("extract_root_tal", "--output",
+ os.path.join(test_dir, "root.tal"))
- f = open(os.path.join(test_dir, "root.tal"), "w")
- f.write("rsync://localhost:%d/root/root.cer\n\n" % db_root.pubd.rsync_port)
- f.write(root_key.get_public().get_Base64())
- f.close()
+ root_cer = self.path("root.cer")
+ self.run_rpkic("extract_root_certificate", "--output", root_cer)
+ gski = rpki.x509.X509(DER_file = root_cer).gSKI()
+ fn = self.path("publication.rrdp", gski + ".cer")
+ print "Linking", root_cer
+ print "to ", fn
+ os.link(root_cer, fn)
+logger = logging.getLogger(__name__)
-os.environ["TZ"] = "UTC"
+os.environ.update(DJANGO_SETTINGS_MODULE = "rpki.django_settings.irdb",
+ TZ = "UTC")
time.tzset()
parser = argparse.ArgumentParser(description = __doc__)
-parser.add_argument("-c", "--config",
- help = "configuration file")
-parser.add_argument("-f", "--flat_publication", action = "store_true",
+parser.add_argument("-f", "--flat-publication", action = "store_true",
help = "disable hierarchical publication")
-parser.add_argument("-k", "--keep_going", action = "store_true",
+parser.add_argument("-k", "--keep-going", action = "store_true",
help = "keep going until all subprocesses exit")
parser.add_argument("-p", "--pidfile",
help = "save pid to this file")
-parser.add_argument("--skip_config", action = "store_true",
+parser.add_argument("--skip-config", action = "store_true",
help = "skip over configuration phase")
-parser.add_argument("--stop_after_config", action = "store_true",
+parser.add_argument("--stop-after-config", action = "store_true",
help = "stop after configuration phase")
parser.add_argument("--synchronize", action = "store_true",
help = "synchronize IRDB with daemons")
parser.add_argument("--profile", action = "store_true",
help = "enable profiling")
+parser.add_argument("-g", "--run-gui", "--gui", action = "store_true",
+ help = "enable GUI using django-admin runserver")
+parser.add_argument("--no-browser", action = "store_true",
+ help = "don't create web browser tabs for GUI")
+parser.add_argument("--notify-when-startup-complete", type = int,
+ help = "send SIGUSR1 to this process when startup is complete")
parser.add_argument("--store-router-private-keys", action = "store_true",
help = "write generate router private keys to disk")
+parser.add_argument("--sql-engine", choices = ("mysql", "sqlite3", "postgresql"), default = "sqlite3",
+ help = "select SQL engine to use")
+parser.add_argument("--one-pubd-per-rpkid", action = "store_true",
+ help = "enable separate a pubd process for each rpkid process")
+parser.add_argument("--base-port", type = int, default = 4400,
+ help = "base port number for allocated TCP ports")
parser.add_argument("yaml_file", type = argparse.FileType("r"),
help = "YAML description of test network")
args = parser.parse_args()
try:
- if args.pidfile is not None:
- open(args.pidfile, "w").write("%s\n" % os.getpid())
-
- rpki.log.init("yamltest", argparse.Namespace(log_level = logging.DEBUG,
- log_handler = lambda: logging.StreamHandler(sys.stdout)))
-
- # Allow optional config file for this tool to override default
- # passwords: this is mostly so that I can show a complete working
- # example without publishing my own server's passwords.
-
- cfg = rpki.config.parser(args.config, "yamltest", allow_missing = True)
+ if args.pidfile is not None:
+ with open(args.pidfile, "w") as f:
+ print "Writing pidfile", f.name
+ f.write("%s\n" % os.getpid())
- only_one_pubd = cfg.getboolean("only_one_pubd", True)
- allocation.base_port = cfg.getint("base_port", 4400)
+ log_handler = logging.StreamHandler(sys.stdout)
+ log_handler.setFormatter(rpki.config.Formatter("yamltest", log_handler, logging.DEBUG))
+ logging.getLogger().addHandler(log_handler)
+ logging.getLogger().setLevel(logging.DEBUG)
- config_overrides = dict(
- (k, cfg.get(k))
- for k in ("rpkid_sql_password", "irdbd_sql_password", "pubd_sql_password",
- "rpkid_sql_username", "irdbd_sql_username", "pubd_sql_username")
- if cfg.has_option(k))
+ allocation.base_port = args.base_port
- # Start clean, maybe
+ # Start clean, maybe
- if not args.skip_config:
- for root, dirs, files in os.walk(test_dir, topdown = False):
- for fn in files:
- os.unlink(os.path.join(root, fn))
- for d in dirs:
- os.rmdir(os.path.join(root, d))
+ if not args.skip_config:
+ for root, dirs, files in os.walk(test_dir, topdown = False):
+ for fn in files:
+ os.unlink(os.path.join(root, fn))
+ for d in dirs:
+ os.rmdir(os.path.join(root, d))
- # Read first YAML doc in file and process as compact description of
- # test layout and resource allocations. Ignore subsequent YAML docs,
- # they're for smoketest.py, not this script.
+ # Read first YAML doc in file and process as compact description of
+ # test layout and resource allocations. Ignore subsequent YAML docs,
+ # they're for smoketest.py, not this script.
- db = allocation_db(yaml.safe_load_all(args.yaml_file).next())
+ db = allocation_db(yaml.safe_load_all(args.yaml_file).next())
- # Show what we loaded
+ # Show what we loaded
- #db.dump()
+ #db.dump()
- if args.skip_config:
+ if args.skip_config:
- print "Skipping pre-daemon configuration, assuming you already did that"
+ print "Skipping pre-daemon configuration, assuming you already did that"
- else:
+ else:
- # Set up each entity in our test, create publication directories,
- # and initialize server BPKI.
+ # Set up each entity in our test, create publication directories,
+ # and initialize server BPKI.
- for d in db:
- if not d.is_hosted:
- os.makedirs(d.path())
- d.dump_conf()
- if d.runs_pubd:
- os.makedirs(d.path("publication"))
- d.dump_rsyncd()
- if d.is_root:
- os.makedirs(d.path("publication.root"))
- d.run_rpkic("initialize_server_bpki")
+ for d in db:
+ if not d.is_hosted:
+ print "Initializing", d.name
+ os.makedirs(d.path())
+ d.dump_conf()
+ if d.runs_pubd:
+ os.makedirs(d.path("publication.rsync"))
+ os.makedirs(d.path("publication.rrdp"))
+ d.dump_rsyncd()
+ d.dump_httpsd()
+ d.syncdb()
+ d.run_rpkic("initialize_server_bpki")
+ print
- # Initialize resource holding BPKI and generate self-descriptor
- # for each entity.
+ # Initialize resource holding BPKI and generate self-descriptor
+ # for each entity.
- for d in db:
- d.run_rpkic("create_identity", d.name)
+ for d in db:
+ d.run_rpkic("create_identity", d.name)
- # Create RPKI root certificate.
+ # Set up root
- create_root_certificate(db.root)
+ db.root.run_rpkic("configure_root")
- # Set up rootd.
+ # From here on we need to pay attention to initialization order. We
+ # used to do all the pre-configure_daemons stuff before running any
+ # of the daemons, but that doesn't work right in hosted cases, so we
+ # have to interleave configuration with starting daemons, just as
+ # one would in the real world for this sort of thing.
- db.root.run_rpkic("configure_root")
+ progs = []
- # From here on we need to pay attention to initialization order. We
- # used to do all the pre-configure_daemons stuff before running any
- # of the daemons, but that doesn't work right in hosted cases, so we
- # have to interleave configuration with starting daemons, just as
- # one would in the real world for this sort of thing.
+ try:
- progs = []
+ for d in db:
- try:
+ if not d.is_hosted:
+ print
+ print "Running daemons for", d.name
+ progs.append(d.run_irdbd())
+ progs.append(d.run_rpkid())
+ if d.runs_pubd:
+ progs.append(d.run_pubd())
+ progs.append(d.run_rsyncd())
+ progs.append(d.run_httpsd())
+ if args.run_gui:
+ progs.append(d.run_gui())
- for d in db:
+ if args.synchronize or not args.skip_config:
- if not d.is_hosted:
- print
- print "Running daemons for", d.name
- if d.is_root:
- progs.append(d.run_rootd())
- progs.append(d.run_irdbd())
- progs.append(d.run_rpkid())
- if d.runs_pubd:
- progs.append(d.run_pubd())
- progs.append(d.run_rsyncd())
-
- if args.synchronize or not args.skip_config:
-
- print
- print "Giving daemons time to start up"
- time.sleep(20)
- assert all(p.poll() is None for p in progs)
+ print
+ print "Giving daemons time to start up"
+ time.sleep(20)
+ assert all(p.poll() is None for p in progs)
- if args.skip_config:
+ if args.skip_config:
- print
- print "Skipping configure_*, you'll have to do that yourself if needed"
+ print
+ print "Skipping configure_*, you'll have to do that yourself if needed"
- else:
+ else:
- for d in db:
+ for d in db:
+ print
+ print "Configuring", d.name
+ print
+ if d.is_root:
+ assert not d.is_hosted
+ d.run_rpkic("configure_publication_client",
+ "--flat" if args.flat_publication else None,
+ d.path("%s.%s.repository-request.xml" % (d.name, d.name)))
+ print
+ d.run_rpkic("configure_repository",
+ d.path("%s.repository-response.xml" % d.client_handle))
+ print
+ else:
+ d.parent.run_rpkic("configure_child",
+ "--valid_until", d.resources.valid_until,
+ d.path("%s.identity.xml" % d.name))
+ print
+ d.run_rpkic("configure_parent",
+ d.parent.path("%s.%s.parent-response.xml" % (d.parent.name, d.name)))
+ print
+ d.pubd.run_rpkic("configure_publication_client",
+ "--flat" if args.flat_publication else None,
+ d.path("%s.%s.repository-request.xml" % (d.name, d.parent.name)))
+ print
+ d.run_rpkic("configure_repository",
+ d.pubd.path("%s.repository-response.xml" % d.client_handle))
+ print
+
+ print
+ print "Done with initial configuration"
+ print
+
+ if args.synchronize:
+ print
+ print "Synchronizing"
+ print
+ for d in db:
+ if not d.is_hosted:
+ d.run_rpkic("synchronize")
+
+ if args.synchronize or not args.skip_config:
+ print
+ print "Loading CSV files"
+ print
+ for d in db:
+ d.dump_asns()
+ d.dump_prefixes()
+ d.dump_roas()
+ d.dump_ghostbusters()
+ d.dump_router_certificates()
+
+ db.root.extract_root_cert_and_tal()
+
+ if args.run_gui:
+ print
+ print 'GUI user "root", password "fnord"'
+ gui_count = 0
+ for d in db:
+ if not d.is_hosted:
+ url = "http://127.0.0.1:%d/rpki/" % (8000 + d.engine)
+ print "GUI URL", url, "for", d.name
+ if not args.no_browser:
+ gui_count += 1
+ if d is db.root:
+ webbrowser.open_new(url)
+ else:
+ webbrowser.open_new_tab(url)
+ time.sleep(2)
+ if gui_count > 1:
+ print "Warning: Logging into more than one GUI instance at once will probably fail due to CSRF protection"
+
+ # Wait until something terminates.
+
+ if not args.stop_after_config or args.keep_going:
+ if args.notify_when_startup_complete:
+ print
+ print "Sending SIGUSR1 to process", args.notify_when_startup_complete
+ os.kill(args.notify_when_startup_complete, signal.SIGUSR1)
+ print
+ print "Waiting for daemons to exit"
+ signal.signal(signal.SIGCHLD, lambda *dont_care: None)
+ while (any(p.poll() is None for p in progs)
+ if args.keep_going else
+ all(p.poll() is None for p in progs)):
+ signal.pause()
+
+ finally:
print
- print "Configuring", d.name
+ print "Shutting down"
print
- if d.is_root:
- assert not d.is_hosted
- d.run_rpkic("configure_publication_client",
- "--flat" if args.flat_publication else None,
- d.path("%s.%s.repository-request.xml" % (d.name, d.name)))
- print
- d.run_rpkic("configure_repository",
- d.path("%s.repository-response.xml" % d.client_handle))
- print
+
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+
+ if args.profile:
+ how_long = 300
else:
- d.parent.run_rpkic("configure_child",
- "--valid_until", d.resources.valid_until,
- d.path("%s.identity.xml" % d.name))
- print
- d.run_rpkic("configure_parent",
- d.parent.path("%s.%s.parent-response.xml" % (d.parent.name, d.name)))
- print
- d.pubd.run_rpkic("configure_publication_client",
- "--flat" if args.flat_publication else None,
- d.path("%s.%s.repository-request.xml" % (d.name, d.parent.name)))
- print
- d.run_rpkic("configure_repository",
- d.pubd.path("%s.repository-response.xml" % d.client_handle))
- print
-
- print
- print "Done with initial configuration"
- print
-
- if args.synchronize:
- print
- print "Synchronizing"
- print
- for d in db:
- if not d.is_hosted:
- d.run_rpkic("synchronize")
-
- if args.synchronize or not args.skip_config:
- print
- print "Loading CSV files"
- print
- for d in db:
- d.dump_asns()
- d.dump_prefixes()
- d.dump_roas()
- d.dump_ghostbusters()
- d.dump_router_certificates()
-
- # Wait until something terminates.
-
- if not args.stop_after_config or args.keep_going:
- print
- print "Waiting for daemons to exit"
- signal.signal(signal.SIGCHLD, lambda *dont_care: None)
- while (any(p.poll() is None for p in progs)
- if args.keep_going else
- all(p.poll() is None for p in progs)):
- signal.pause()
-
- finally:
-
- print
- print "Shutting down"
- print
-
- signal.signal(signal.SIGCHLD, signal.SIG_DFL)
-
- if args.profile:
- how_long = 300
- else:
- how_long = 30
+ how_long = 30
+
+ how_often = how_long / 2
- how_often = how_long / 2
+ for i in xrange(how_long):
+ if i % how_often == 0:
+ for p in progs:
+ if p.poll() is None:
+ print "Politely nudging pid %d" % p.pid
+ p.terminate()
+ print
+ if all(p.poll() is not None for p in progs):
+ break
+ time.sleep(1)
- for i in xrange(how_long):
- if i % how_often == 0:
for p in progs:
- if p.poll() is None:
- print "Politely nudging pid %d" % p.pid
- p.terminate()
- print
- if all(p.poll() is not None for p in progs):
- break
- time.sleep(1)
+ if p.poll() is None:
+ print "Pulling the plug on pid %d" % p.pid
+ p.kill()
- for p in progs:
- if p.poll() is None:
- print "Pulling the plug on pid %d" % p.pid
- p.kill()
+ for p in progs:
+ print "Program pid %d %r returned %d" % (p.pid, p, p.wait())
- for p in progs:
- print "Program pid %d %r returned %d" % (p.pid, p, p.wait())
+except Exception, e:
+ print "Blowing out on exception", str(e)
+ raise
finally:
- if args.pidfile is not None:
- os.unlink(args.pidfile)
+ if args.pidfile is not None and os.path.exists(args.pidfile):
+ os.unlink(args.pidfile)
+
+# Local Variables:
+# indent-tabs-mode: nil
+# End:
diff --git a/ca/upgrade-scripts/upgrade-rpkid-to-0.5709.py b/ca/upgrade-scripts/upgrade-rpkid-to-0.5709.py
deleted file mode 100644
index 0cea5671..00000000
--- a/ca/upgrade-scripts/upgrade-rpkid-to-0.5709.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# $Id$
-#
-# Copyright (C) 2014 Dragon Research Labs ("DRL")
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-# pylint: disable=E0602
-
-"""
-Upgrade RPKI SQL databases to schema expected by 0.5709.
-
-This code is evaluated in the context of rpki-sql-setup's
-do_apply_upgrades() function and has access to its variables.
-"""
-
-db.cur.execute("""
- CREATE TABLE ee_cert (
- ee_cert_id SERIAL NOT NULL,
- ski BINARY(20) NOT NULL,
- cert LONGBLOB NOT NULL,
- published DATETIME,
- self_id BIGINT UNSIGNED NOT NULL,
- ca_detail_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (ee_cert_id),
- CONSTRAINT ee_cert_self_id
- FOREIGN KEY (self_id) REFERENCES self (self_id) ON DELETE CASCADE,
- CONSTRAINT ee_cert_ca_detail_id
- FOREIGN KEY (ca_detail_id) REFERENCES ca_detail (ca_detail_id) ON DELETE CASCADE
- ) ENGINE=InnoDB
-""")
diff --git a/configure b/configure
index 18e99f95..b85b9a73 100755
--- a/configure
+++ b/configure
@@ -631,21 +631,15 @@ OPENSSL_TARGET
WSGI_PYTHON_EGG_CACHE_USER
WSGI_PYTHON_EGG_CACHE_DIR
SETUP_PY_INSTALL_LAYOUT
+CFG_INSTALL_TARGETS
CA_INSTALL_TARGETS
RTR_ORIGIN_INSTALL_TARGETS
RCYNIC_INSTALL_TARGETS
RCYNIC_HTML_DIR
-RCYNIC_STATIC_RSYNC
-RCYNIC_GROUP
-RCYNIC_USER
-RCYNIC_CRON_USER
-RCYNIC_CONF_TA_DIR
RCYNIC_CONF_DATA
-RCYNIC_CONF_RSYNC
-RCYNIC_BIN_RCYNIC
-RCYNIC_TA_DIR
-RCYNIC_CONF_FILE
-RCYNIC_JAIL_DIRS
+RPKI_GROUP
+RPKI_USER
+SUDO
RSYNC
TRANG
RRDTOOL
@@ -654,7 +648,6 @@ AWK
XSLTPROC
PYTHON
POW_LDFLAGS
-LD_STATIC_FLAG
EGREP
GREP
CPP
@@ -722,7 +715,6 @@ CA_MAKE_RULES'
ac_user_opts='
enable_option_checking
with_system_openssl
-enable_rcynic_jail
enable_openssl_asm
enable_ca_tools
enable_rp_tools
@@ -734,6 +726,7 @@ enable_runtime_dependencies
enable_python
enable_django
enable_rpki_rtr
+enable_rcynic_jail
'
ac_precious_vars='build_alias
host_alias
@@ -1359,7 +1352,6 @@ Optional Features:
--disable-option-checking ignore unrecognized --enable/--with options
--disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no)
--enable-FEATURE[=ARG] include FEATURE [ARG=yes]
- --enable-rcynic-jail Run rcynic in chroot jail
--disable-openssl-asm Don't let OpenSSL build assembler code
--disable-ca-tools Don't build any of the CA tools
--disable-rp-tools Don't build any of the relying party tools
@@ -1376,6 +1368,7 @@ Optional Features:
--disable-python (Obsolete, do not use)
--disable-django (Obsolete, do not use)
--disable-rpki-rtr (Obsolete, do not use)
+ --enable-rcynic-jail (Obsolete, do not use)
Optional Packages:
--with-PACKAGE[=ARG] use PACKAGE [ARG=yes]
@@ -1383,8 +1376,7 @@ Optional Packages:
--with-system-openssl Link against system copy of OpenSSL
Some influential environment variables:
- RCYNIC_DIR Where to put output files from rcynic and rpki-rtr; also
- controls jail location for --enable-rcynic-jail
+ RCYNIC_DIR Where to put output files from rcynic and rpki-rtr
APACHE_VERSION
Version of Apache httpd, mostly used on FreeBSD where it
determines some of the directory names
@@ -2425,13 +2417,6 @@ else
with_system_openssl=auto
fi
-# Check whether --enable-rcynic_jail was given.
-if test "${enable_rcynic_jail+set}" = set; then :
- enableval=$enable_rcynic_jail;
-else
- enable_rcynic_jail=no
-fi
-
# Check whether --enable-openssl_asm was given.
if test "${enable_openssl_asm+set}" = set; then :
enableval=$enable_openssl_asm;
@@ -2509,6 +2494,11 @@ if test "${enable_rpki_rtr+set}" = set; then :
enableval=$enable_rpki_rtr; as_fn_error $? "--disable-rpki-rtr is obsolete. Please see the --disable-rp-tools option" "$LINENO" 5
fi
+# Check whether --enable-rcynic_jail was given.
+if test "${enable_rcynic_jail+set}" = set; then :
+ enableval=$enable_rcynic_jail; as_fn_error $? "--enable-rcynic-jail is obsolete." "$LINENO" 5
+fi
+
ac_ext=c
ac_cpp='$CPP $CPPFLAGS'
@@ -3825,33 +3815,6 @@ _ACEOF
-# We'd like to build rcynic as a static binary if we can, because that
-# makes it much simpler to run rcynic in a chroot jail, but we don't
-# know how to do it on all platforms, so we try the hack we know, and
-# if that doesn't work, oh well.
-#
-# Sadly, it's even worse than this, because there are platforms like
-# Fedora where the compiler and linker support -static just fine, but
-# the default libraries do not, and if you start down the primrose
-# path of installing the necessary libraries, you eventually hit a
-# wall where one of the static libraries you downloaded depends on
-# something that's not available as a static library, ie, you lose.
-#
-# So for now I'm just going to make this a FreeBSD-only option.
-# Feh. Those of you who choose to use other platforms are welcome to
-# fix this and send me the patch, if you care.
-
-
-case $host_os in
- freebsd*)
- LD_STATIC_FLAG='-static'
- ;;
- *)
- LD_STATIC_FLAG=''
- ;;
-esac
-
-
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler and linker support -Wl,-Bsymbolic" >&5
$as_echo_n "checking whether compiler and linker support -Wl,-Bsymbolic... " >&6; }
old_LDFLAGS="$LDFLAGS"
@@ -4246,6 +4209,46 @@ $as_echo "no" >&6; }
fi
+# Extract the first word of "sudo", so it can be a program name with args.
+set dummy sudo; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_path_SUDO+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ case $SUDO in
+ [\\/]* | ?:[\\/]*)
+ ac_cv_path_SUDO="$SUDO" # Let the user override the test with a path.
+ ;;
+ *)
+ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+ IFS=$as_save_IFS
+ test -z "$as_dir" && as_dir=.
+ for ac_exec_ext in '' $ac_executable_extensions; do
+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+ ac_cv_path_SUDO="$as_dir/$ac_word$ac_exec_ext"
+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+ break 2
+ fi
+done
+ done
+IFS=$as_save_IFS
+
+ ;;
+esac
+fi
+SUDO=$ac_cv_path_SUDO
+if test -n "$SUDO"; then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $SUDO" >&5
+$as_echo "$SUDO" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
# See whether we need to check for dependencies that we only need at
# runtime. We do this by default when compiling from source to avoid
@@ -4509,18 +4512,14 @@ fi
if test "X$RCYNIC_DIR" = "X"
then
- rcynic_base_dir='/var/rcynic'
-else
- rcynic_base_dir="${RCYNIC_DIR}"
+ RCYNIC_DIR='/var/rcynic'
fi
-RCYNIC_DIR='${DESTDIR}'"${rcynic_base_dir}"
-
# APACHE_VERSION is another "precious" argument to this script. It
# mostly matters on FreeBSD, where most things involving Apache encode
# the Apache version number into their filenames.
#
-# If we can't figure out the version number, we assume 2.2 and hope
+# If we can't figure out the version number, we assume 2.4 and hope
# for the best; at some point we may need to do better than this.
#
# apachectl sometimes whines about ulimits, so we discard its stderr.
@@ -4544,8 +4543,8 @@ fi
if test "X$APACHE_VERSION" = "X"
then
- APACHE_VERSION="22"
- msg='not found, defaulting to 2.2'
+ APACHE_VERSION="24"
+ msg='not found, defaulting to 2.4'
else
msg=`echo $APACHE_VERSION | sed 's=.=&.='`
fi
@@ -4553,70 +4552,15 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $msg" >&5
$as_echo "$msg" >&6; }
-# Figure out whether to run rcynic in a chroot jail, which determines
-# a bunch of other settings.
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build chroot jail for rcynic" >&5
-$as_echo_n "checking whether to build chroot jail for rcynic... " >&6; }
-
-case $enable_rcynic_jail in
- yes)
- use_rcynic_jail=yes
- RCYNIC_CONF_FILE='${RCYNIC_DIR}/etc/rcynic.conf'
- RCYNIC_TA_DIR='${RCYNIC_DIR}/etc/trust-anchors'
- RCYNIC_BIN_RCYNIC='${RCYNIC_DIR}/bin/rcynic'
- RCYNIC_CONF_RSYNC='/bin/rsync'
- RCYNIC_CONF_DATA='/data'
- RCYNIC_CONF_TA_DIR='/etc/trust-anchors'
- RCYNIC_CRON_USER='root'
- RCYNIC_JAIL_DIRS='${RCYNIC_DIR}/bin ${RCYNIC_DIR}/dev ${RCYNIC_DIR}/etc'
- if test "X$host_os" = "Xlinux"
- then
- RCYNIC_JAIL_DIRS="$RCYNIC_JAIL_DIRS "'${RCYNIC_DIR}/lib ${RCYNIC_DIR}/lib64 ${RCYNIC_DIR}/usr/lib'
- fi
- ;;
- no)
- use_rcynic_jail=no
- RCYNIC_CONF_FILE='${DESTDIR}${sysconfdir}/rcynic.conf'
- RCYNIC_TA_DIR='${DESTDIR}${sysconfdir}/rpki/trust-anchors'
- RCYNIC_BIN_RCYNIC='${DESTDIR}${bindir}/rcynic'
- RCYNIC_CONF_RSYNC="${RSYNC}"
- RCYNIC_CONF_DATA="${rcynic_base_dir}/data"
- RCYNIC_CONF_TA_DIR='${sysconfdir}/rpki/trust-anchors'
- RCYNIC_CRON_USER='${RCYNIC_USER}'
- RCYNIC_JAIL_DIRS=''
- ;;
- *)
- as_fn_error $? "Unrecognized value for --enable-rcynic-jail: $enable_rcynic_jail" "$LINENO" 5
- ;;
-esac
-
-
-
-
-
-
-
-
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $use_rcynic_jail" >&5
-$as_echo "$use_rcynic_jail" >&6; }
-
# Perhaps there should be a way to set these, but for now just
# wire them in here so at least they're consistent in all Makefiles.
-RCYNIC_USER=rcynic
+RPKI_USER=rpki
-RCYNIC_GROUP=rcynic
+RPKI_GROUP=rpki
-if test $use_rcynic_jail = yes && test "X$LD_STATIC_FLAG" != "X"
-then
- RCYNIC_STATIC_RSYNC='static-rsync/rsync'
-else
- RCYNIC_STATIC_RSYNC=''
-fi
+RCYNIC_CONF_DATA="${RCYNIC_DIR}/data"
@@ -4638,10 +4582,8 @@ esac
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_target_installation" >&5
$as_echo "$enable_target_installation" >&6; }
-# rcynic jail setup is complicated enough that it's simplest to have
-# different rule sets for different platforms. Icky, but....
-# rpki-rtr isn't as complicated, but has similar issues, same hack.
-# ca isn't as complicated either, but same hack.
+# Some bits of post-installation setup are complicated enough that
+# it's simplest to have different rule sets for different platforms.
case $host_os in
darwin*)
@@ -4691,29 +4633,29 @@ $as_echo "$RCYNIC_HTML_DIR" >&6; }
-# Sort out which things to install, depending on rcynic jail status and whether
+# Sort out which things to install, depending on whether
# we're doing final target installation.
+#
+# As things have evolved, this a bit repetitive. Simplify someday, maybe.
RCYNIC_INSTALL_TARGETS='install-always'
RTR_ORIGIN_INSTALL_TARGETS='install-always'
CA_INSTALL_TARGETS='install-always'
-
-if test $use_rcynic_jail = yes
-then
- RCYNIC_INSTALL_TARGETS="$RCYNIC_INSTALL_TARGETS install-jailed"
-fi
+CFG_INSTALL_TARGETS='install-always'
if test $enable_target_installation = yes
then
RCYNIC_INSTALL_TARGETS="$RCYNIC_INSTALL_TARGETS install-postconf"
RTR_ORIGIN_INSTALL_TARGETS="$RTR_ORIGIN_INSTALL_TARGETS install-postconf"
CA_INSTALL_TARGETS="$CA_INSTALL_TARGETS install-postconf"
+ CFG_INSTALL_TARGETS="$CFG_INSTALL_TARGETS install-postconf"
fi
+
# Now a bunch of checks to figure out what we can do with Python. If
# we don't have Python at all, none of the rest of this matters. If
# we do have Python, we need to check for required packages and
@@ -4724,9 +4666,9 @@ then
as_fn_error $? "I can't find a Python binary at all, this isn't going to work. Perhaps you need to set PATH?" "$LINENO" 5
fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for Python version 2.6 or higher" >&5
-$as_echo_n "checking for Python version 2.6 or higher... " >&6; }
-have_acceptable_python=`$PYTHON -c 'import sys; print "yes" if sys.version_info[0] == 2 and sys.version_info[1] >= 6 else "no"'`
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for Python version 2.7 or higher" >&5
+$as_echo_n "checking for Python version 2.7 or higher... " >&6; }
+have_acceptable_python=`$PYTHON -c 'import sys; print "yes" if sys.version_info[0] == 2 and sys.version_info[1] >= 7 else "no"'`
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_acceptable_python" >&5
$as_echo "$have_acceptable_python" >&6; }
@@ -4752,11 +4694,22 @@ if $PYTHON -c 'import lxml.etree' 2>/dev/null; then have_lxml=yes; else have_lxm
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_lxml" >&5
$as_echo "$have_lxml" >&6; }
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for MySQLdb" >&5
-$as_echo_n "checking for MySQLdb... " >&6; }
-if $PYTHON -c 'import MySQLdb' 2>/dev/null; then have_mysqldb=yes; else have_mysqldb=no; fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_mysqldb" >&5
-$as_echo "$have_mysqldb" >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for Tornado" >&5
+$as_echo_n "checking for Tornado... " >&6; }
+if $PYTHON -c 'import tornado' 2>/dev/null; then have_tornado=yes; else have_tornado=no; fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_tornado" >&5
+$as_echo "$have_tornado" >&6; }
+
+if test $have_tornado = yes
+then
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Tornado 4.2 or higher" >&5
+$as_echo_n "checking for Tornado 4.2 or higher... " >&6; }
+ have_acceptable_tornado=`$PYTHON -c "import tornado; print 'no' if tornado.version_info < (4, 2) else 'yes'"`
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_acceptable_tornado" >&5
+$as_echo "$have_acceptable_tornado" >&6; }
+else
+ have_acceptable_tornado=no
+fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for Django" >&5
$as_echo_n "checking for Django... " >&6; }
@@ -4766,9 +4719,9 @@ $as_echo "$have_django" >&6; }
if test $have_django = yes
then
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Django 1.3.7 or higher" >&5
-$as_echo_n "checking for Django 1.3.7 or higher... " >&6; }
- have_acceptable_django=`$PYTHON -c "import django; print 'no' if django.VERSION < (1, 3, 7) else 'yes'"`
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Django 1.8 or higher" >&5
+$as_echo_n "checking for Django 1.8 or higher... " >&6; }
+ have_acceptable_django=`$PYTHON -c "import django; print 'no' if django.VERSION < (1, 8) else 'yes'"`
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_acceptable_django" >&5
$as_echo "$have_acceptable_django" >&6; }
else
@@ -4787,23 +4740,6 @@ if $PYTHON -c 'import vobject' 2>/dev/null; then have_vobject=yes; else have_vob
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_vobject" >&5
$as_echo "$have_vobject" >&6; }
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for Django South" >&5
-$as_echo_n "checking for Django South... " >&6; }
-if $PYTHON -c 'import south' 2>/dev/null; then have_django_south=yes; else have_django_south=no; fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_django_south" >&5
-$as_echo "$have_django_south" >&6; }
-
-if test $have_django_south = yes
-then
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Django South 0.7.5 or later" >&5
-$as_echo_n "checking for Django South 0.7.5 or later... " >&6; }
- have_acceptable_django_south=`$PYTHON -c "import south; print 'no' if map(int,south.__version__.split('.')) < [0, 7, 5] else 'yes'"`
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_acceptable_django_south" >&5
-$as_echo "$have_acceptable_django_south" >&6; }
-else
- have_acceptable_django_south=no
-fi
-
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for argparse" >&5
$as_echo_n "checking for argparse... " >&6; }
if $PYTHON -c 'import argparse' 2>/dev/null; then have_argparse=yes; else have_argparse=no; fi
@@ -4823,8 +4759,8 @@ runtime_ok=yes
if test $have_acceptable_python = no
then
runtime_ok=no
- { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: The RPKI code requires Python version 2.x, for x = 6 or higher." >&5
-$as_echo "$as_me: WARNING: The RPKI code requires Python version 2.x, for x = 6 or higher." >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: The RPKI code requires Python version 2.x, for x = 7 or higher." >&5
+$as_echo "$as_me: WARNING: The RPKI code requires Python version 2.x, for x = 7 or higher." >&2;}
fi
if test $build_rp_tools = yes
@@ -4832,8 +4768,8 @@ then
if test $have_python_h = no
then
build_ok=no
- { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: I can't find Python.h. Python sources are required to build the RP tools." >&5
-$as_echo "$as_me: WARNING: I can't find Python.h. Python sources are required to build the RP tools." >&2;}
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: I can't find Python.h. Python sources are required to build the rpki.POW module." >&5
+$as_echo "$as_me: WARNING: I can't find Python.h. Python sources are required to build the rpki.POW module." >&2;}
fi
if test "x$RRDTOOL" = "x"
then
@@ -4841,7 +4777,7 @@ $as_echo "$as_me: WARNING: I can't find Python.h. Python sources are required t
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: rcynic-html requires rrdtool." >&5
$as_echo "$as_me: WARNING: rcynic-html requires rrdtool." >&2;}
fi
- if test $use_rcynic_jail = no && test "X$RSYNC" = "X"
+ if test "X$RSYNC" = "X"
then
runtime_ok=no
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: The RPKI relying party tools require rsync." >&5
@@ -4849,38 +4785,42 @@ $as_echo "$as_me: WARNING: The RPKI relying party tools require rsync." >&2;}
fi
fi
+if test $have_lxml = no
+then
+ runtime_ok=no
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: The RPKI tools require the Python LXML package." >&5
+$as_echo "$as_me: WARNING: The RPKI tools require the Python LXML package." >&2;}
+fi
+
+if test $have_acceptable_django = no
+then
+ runtime_ok=no
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: The RPKI tools require Django 1.8 or higher." >&5
+$as_echo "$as_me: WARNING: The RPKI tools require Django 1.8 or higher." >&2;}
+fi
+
+if test $have_argparse = no
+then
+ runtime_ok=no
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: The RPKI tools require the Python argparse module." >&5
+$as_echo "$as_me: WARNING: The RPKI tools require the Python argparse module." >&2;}
+fi
+
+if test $have_acceptable_tornado = no
+then
+ runtime_ok=no
+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: The RPKI tools require Tornado 4.2 or higher." >&5
+$as_echo "$as_me: WARNING: The RPKI tools require Tornado 4.2 or higher." >&2;}
+fi
+
if test $build_ca_tools = yes
then
- if test $have_lxml = no
- then
- runtime_ok=no
- { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: The RPKI tools require the Python LXML module." >&5
-$as_echo "$as_me: WARNING: The RPKI tools require the Python LXML module." >&2;}
- fi
- if test $have_acceptable_django = no
- then
- runtime_ok=no
- { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: The RPKI CA tools require Django 1.3.7 or higher." >&5
-$as_echo "$as_me: WARNING: The RPKI CA tools require Django 1.3.7 or higher." >&2;}
- fi
if test $have_vobject = no
then
runtime_ok=no
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: The RPKI CA GUI requires the Python vobject module." >&5
$as_echo "$as_me: WARNING: The RPKI CA GUI requires the Python vobject module." >&2;}
fi
- if test $have_acceptable_django_south = no
- then
- runtime_ok=no
- { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: The RPKI CA tools require Django South 0.7.5 or higher." >&5
-$as_echo "$as_me: WARNING: The RPKI CA tools require Django South 0.7.5 or higher." >&2;}
- fi
- if test $have_argparse = no
- then
- runtime_ok=no
- { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: The RPKI CA tools require the Python argparse module." >&5
-$as_echo "$as_me: WARNING: The RPKI CA tools require the Python argparse module." >&2;}
- fi
if test $have_pyyaml = no
then
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: PyYAML missing, so \"make test\" will not work properly." >&5
@@ -5084,7 +5024,7 @@ fi
if test $build_rp_tools = yes
then
- ac_config_files="$ac_config_files rp/Makefile rp/rcynic/Makefile rp/rcynic/static-rsync/Makefile rp/utils/Makefile rp/rpki-rtr/Makefile"
+ ac_config_files="$ac_config_files rp/Makefile rp/config/Makefile rp/rcynic/Makefile rp/utils/Makefile rp/rpki-rtr/Makefile"
fi
@@ -5838,8 +5778,8 @@ do
"openssl/Makefile") CONFIG_FILES="$CONFIG_FILES openssl/Makefile" ;;
"openssl/tests/Makefile") CONFIG_FILES="$CONFIG_FILES openssl/tests/Makefile" ;;
"rp/Makefile") CONFIG_FILES="$CONFIG_FILES rp/Makefile" ;;
+ "rp/config/Makefile") CONFIG_FILES="$CONFIG_FILES rp/config/Makefile" ;;
"rp/rcynic/Makefile") CONFIG_FILES="$CONFIG_FILES rp/rcynic/Makefile" ;;
- "rp/rcynic/static-rsync/Makefile") CONFIG_FILES="$CONFIG_FILES rp/rcynic/static-rsync/Makefile" ;;
"rp/utils/Makefile") CONFIG_FILES="$CONFIG_FILES rp/utils/Makefile" ;;
"rp/rpki-rtr/Makefile") CONFIG_FILES="$CONFIG_FILES rp/rpki-rtr/Makefile" ;;
"ca/Makefile") CONFIG_FILES="$CONFIG_FILES ca/Makefile" ;;
diff --git a/configure.ac b/configure.ac
index f85fb909..d701981b 100644
--- a/configure.ac
+++ b/configure.ac
@@ -33,7 +33,6 @@ fi
# Put the user option stuff up front.
AC_ARG_WITH([system_openssl], [AS_HELP_STRING([--with-system-openssl], [Link against system copy of OpenSSL])], [], [with_system_openssl=auto])
-AC_ARG_ENABLE([rcynic_jail], [AS_HELP_STRING([--enable-rcynic-jail], [Run rcynic in chroot jail])], [], [enable_rcynic_jail=no])
AC_ARG_ENABLE([openssl_asm], [AS_HELP_STRING([--disable-openssl-asm], [Don't let OpenSSL build assembler code])], [], [enable_openssl_asm=auto])
AC_ARG_ENABLE([ca_tools], [AS_HELP_STRING([--disable-ca-tools], [Don't build any of the CA tools])], [], [enable_ca_tools=yes])
AC_ARG_ENABLE([rp_tools], [AS_HELP_STRING([--disable-rp-tools], [Don't build any of the relying party tools])], [], [enable_rp_tools=yes])
@@ -43,63 +42,21 @@ AC_ARG_ENABLE([python_install_layout], [AS_HELP_STRING([--enable-python-install-
AC_ARG_ENABLE([wsgi_python_egg_cache], [AS_HELP_STRING([--enable-wsgi-python-egg-cache=dir[[:user]]], [Set up PYTHON_EGG_CACHE in wsgi wrapper])], [], [enable_wsgi_python_egg_cache=auto])
AC_ARG_ENABLE([runtime_dependencies], [AS_HELP_STRING([--disable-runtime-dependencies], [Don't enforce runtime dependencies])], [], [enable_runtime_dependencies=yes])
-AC_ARG_VAR([RCYNIC_DIR], [Where to put output files from rcynic and rpki-rtr; also controls jail location for --enable-rcynic-jail])
+AC_ARG_VAR([RCYNIC_DIR], [Where to put output files from rcynic and rpki-rtr])
AC_ARG_VAR([APACHE_VERSION], [Version of Apache httpd, mostly used on FreeBSD where it determines some of the directory names])
# Obsolete options. If you know of a better way to handle this, tell me.
-AC_ARG_ENABLE([python], [AS_HELP_STRING([--disable-python], [(Obsolete, do not use)])], [AC_MSG_ERROR([--disable-python is obsolete. Please see the --disable-ca-tools option])], [])
-AC_ARG_ENABLE([django], [AS_HELP_STRING([--disable-django], [(Obsolete, do not use)])], [AC_MSG_ERROR([--disable-django is obsolete. Please see the --disable-ca-tools option])], [])
-AC_ARG_ENABLE([rpki_rtr], [AS_HELP_STRING([--disable-rpki-rtr], [(Obsolete, do not use)])], [AC_MSG_ERROR([--disable-rpki-rtr is obsolete. Please see the --disable-rp-tools option])], [])
+AC_ARG_ENABLE([python], [AS_HELP_STRING([--disable-python], [(Obsolete, do not use)])], [AC_MSG_ERROR([--disable-python is obsolete. Please see the --disable-ca-tools option])], [])
+AC_ARG_ENABLE([django], [AS_HELP_STRING([--disable-django], [(Obsolete, do not use)])], [AC_MSG_ERROR([--disable-django is obsolete. Please see the --disable-ca-tools option])], [])
+AC_ARG_ENABLE([rpki_rtr], [AS_HELP_STRING([--disable-rpki-rtr], [(Obsolete, do not use)])], [AC_MSG_ERROR([--disable-rpki-rtr is obsolete. Please see the --disable-rp-tools option])], [])
+AC_ARG_ENABLE([rcynic_jail], [AS_HELP_STRING([--enable-rcynic-jail], [(Obsolete, do not use)])], [AC_MSG_ERROR([--enable-rcynic-jail is obsolete.])], [])
AC_PROG_CC
AC_PROG_INSTALL
AC_CHECK_SIZEOF([long])
-# We'd like to build rcynic as a static binary if we can, because that
-# makes it much simpler to run rcynic in a chroot jail, but we don't
-# know how to do it on all platforms, so we try the hack we know, and
-# if that doesn't work, oh well.
-#
-# Sadly, it's even worse than this, because there are platforms like
-# Fedora where the compiler and linker support -static just fine, but
-# the default libraries do not, and if you start down the primrose
-# path of installing the necessary libraries, you eventually hit a
-# wall where one of the static libraries you downloaded depends on
-# something that's not available as a static library, ie, you lose.
-#
-# So for now I'm just going to make this a FreeBSD-only option.
-# Feh. Those of you who choose to use other platforms are welcome to
-# fix this and send me the patch, if you care.
-
-dnl AC_MSG_CHECKING([whether linker supports -static])
-dnl old_LDFLAGS="$LDFLAGS"
-dnl LDFLAGS="$LDFLAGS -static"
-dnl AC_LINK_IFELSE(
-dnl [AC_LANG_SOURCE([[int main (int argc, char *argv[]) { return 0; }]])],
-dnl [
-dnl AC_MSG_RESULT(yes)
-dnl LD_STATIC_FLAG='-static'
-dnl ],
-dnl [
-dnl AC_MSG_RESULT(no)
-dnl LD_STATIC_FLAG=''
-dnl ]
-dnl )
-dnl LDFLAGS="$old_LDFLAGS"
-dnl unset old_LDFLAGS
-
-case $host_os in
- freebsd*)
- LD_STATIC_FLAG='-static'
- ;;
- *)
- LD_STATIC_FLAG=''
- ;;
-esac
-AC_SUBST(LD_STATIC_FLAG)
-
AC_MSG_CHECKING([whether compiler and linker support -Wl,-Bsymbolic])
old_LDFLAGS="$LDFLAGS"
LDFLAGS="$LDFLAGS -Wl,-Bsymbolic"
@@ -138,6 +95,7 @@ AC_PATH_PROG([SORT], [sort])
AC_PATH_PROG([RRDTOOL], [rrdtool])
AC_PATH_PROG([TRANG], [trang], [\${abs_top_srcdir}/buildtools/trang-not-found])
AC_PATH_PROG([RSYNC], [rsync])
+AC_PATH_PROG([SUDO], [sudo])
# See whether we need to check for dependencies that we only need at
# runtime. We do this by default when compiling from source to avoid
@@ -313,18 +271,14 @@ fi
if test "X$RCYNIC_DIR" = "X"
then
- rcynic_base_dir='/var/rcynic'
-else
- rcynic_base_dir="${RCYNIC_DIR}"
+ RCYNIC_DIR='/var/rcynic'
fi
-RCYNIC_DIR='${DESTDIR}'"${rcynic_base_dir}"
-
# APACHE_VERSION is another "precious" argument to this script. It
# mostly matters on FreeBSD, where most things involving Apache encode
# the Apache version number into their filenames.
#
-# If we can't figure out the version number, we assume 2.2 and hope
+# If we can't figure out the version number, we assume 2.4 and hope
# for the best; at some point we may need to do better than this.
#
# apachectl sometimes whines about ulimits, so we discard its stderr.
@@ -347,76 +301,23 @@ fi
if test "X$APACHE_VERSION" = "X"
then
- APACHE_VERSION="22"
- msg='not found, defaulting to 2.2'
+ APACHE_VERSION="24"
+ msg='not found, defaulting to 2.4'
else
msg=`echo $APACHE_VERSION | sed 's=.=&.='`
fi
AC_MSG_RESULT([$msg])
-# Figure out whether to run rcynic in a chroot jail, which determines
-# a bunch of other settings.
-
-AC_MSG_CHECKING([whether to build chroot jail for rcynic])
-
-case $enable_rcynic_jail in
- yes)
- use_rcynic_jail=yes
- RCYNIC_CONF_FILE='${RCYNIC_DIR}/etc/rcynic.conf'
- RCYNIC_TA_DIR='${RCYNIC_DIR}/etc/trust-anchors'
- RCYNIC_BIN_RCYNIC='${RCYNIC_DIR}/bin/rcynic'
- RCYNIC_CONF_RSYNC='/bin/rsync'
- RCYNIC_CONF_DATA='/data'
- RCYNIC_CONF_TA_DIR='/etc/trust-anchors'
- RCYNIC_CRON_USER='root'
- RCYNIC_JAIL_DIRS='${RCYNIC_DIR}/bin ${RCYNIC_DIR}/dev ${RCYNIC_DIR}/etc'
- if test "X$host_os" = "Xlinux"
- then
- RCYNIC_JAIL_DIRS="$RCYNIC_JAIL_DIRS "'${RCYNIC_DIR}/lib ${RCYNIC_DIR}/lib64 ${RCYNIC_DIR}/usr/lib'
- fi
- ;;
- no)
- use_rcynic_jail=no
- RCYNIC_CONF_FILE='${DESTDIR}${sysconfdir}/rcynic.conf'
- RCYNIC_TA_DIR='${DESTDIR}${sysconfdir}/rpki/trust-anchors'
- RCYNIC_BIN_RCYNIC='${DESTDIR}${bindir}/rcynic'
- RCYNIC_CONF_RSYNC="${RSYNC}"
- RCYNIC_CONF_DATA="${rcynic_base_dir}/data"
- RCYNIC_CONF_TA_DIR='${sysconfdir}/rpki/trust-anchors'
- RCYNIC_CRON_USER='${RCYNIC_USER}'
- RCYNIC_JAIL_DIRS=''
- ;;
- *)
- AC_MSG_ERROR([Unrecognized value for --enable-rcynic-jail: $enable_rcynic_jail])
- ;;
-esac
-
-AC_SUBST(RCYNIC_JAIL_DIRS)
-AC_SUBST(RCYNIC_CONF_FILE)
-AC_SUBST(RCYNIC_TA_DIR)
-AC_SUBST(RCYNIC_BIN_RCYNIC)
-AC_SUBST(RCYNIC_CONF_RSYNC)
-AC_SUBST(RCYNIC_CONF_DATA)
-AC_SUBST(RCYNIC_CONF_TA_DIR)
-AC_SUBST(RCYNIC_CRON_USER)
-
-AC_MSG_RESULT([$use_rcynic_jail])
-
# Perhaps there should be a way to set these, but for now just
# wire them in here so at least they're consistent in all Makefiles.
-AC_SUBST(RCYNIC_USER, [rcynic])
-AC_SUBST(RCYNIC_GROUP, [rcynic])
+AC_SUBST(RPKI_USER, [rpki])
+AC_SUBST(RPKI_GROUP, [rpki])
-if test $use_rcynic_jail = yes && test "X$LD_STATIC_FLAG" != "X"
-then
- RCYNIC_STATIC_RSYNC='static-rsync/rsync'
-else
- RCYNIC_STATIC_RSYNC=''
-fi
+RCYNIC_CONF_DATA="${RCYNIC_DIR}/data"
-AC_SUBST(RCYNIC_STATIC_RSYNC)
+AC_SUBST(RCYNIC_CONF_DATA)
# Check whether to do "final target installation". This means actions
# that can only be done when installing a package, as opposed to when
@@ -434,10 +335,8 @@ esac
AC_MSG_RESULT([$enable_target_installation])
-# rcynic jail setup is complicated enough that it's simplest to have
-# different rule sets for different platforms. Icky, but....
-# rpki-rtr isn't as complicated, but has similar issues, same hack.
-# ca isn't as complicated either, but same hack.
+# Some bits of post-installation setup are complicated enough that
+# it's simplest to have different rule sets for different platforms.
case $host_os in
darwin*)
@@ -485,28 +384,28 @@ AC_MSG_RESULT([$RCYNIC_HTML_DIR])
AC_SUBST(RCYNIC_HTML_DIR)
-# Sort out which things to install, depending on rcynic jail status and whether
+# Sort out which things to install, depending on whether
# we're doing final target installation.
+#
+# As things have evolved, this a bit repetitive. Simplify someday, maybe.
RCYNIC_INSTALL_TARGETS='install-always'
RTR_ORIGIN_INSTALL_TARGETS='install-always'
CA_INSTALL_TARGETS='install-always'
-
-if test $use_rcynic_jail = yes
-then
- RCYNIC_INSTALL_TARGETS="$RCYNIC_INSTALL_TARGETS install-jailed"
-fi
+CFG_INSTALL_TARGETS='install-always'
if test $enable_target_installation = yes
then
RCYNIC_INSTALL_TARGETS="$RCYNIC_INSTALL_TARGETS install-postconf"
RTR_ORIGIN_INSTALL_TARGETS="$RTR_ORIGIN_INSTALL_TARGETS install-postconf"
CA_INSTALL_TARGETS="$CA_INSTALL_TARGETS install-postconf"
+ CFG_INSTALL_TARGETS="$CFG_INSTALL_TARGETS install-postconf"
fi
AC_SUBST(RCYNIC_INSTALL_TARGETS)
AC_SUBST(RTR_ORIGIN_INSTALL_TARGETS)
AC_SUBST(CA_INSTALL_TARGETS)
+AC_SUBST(CFG_INSTALL_TARGETS)
# Now a bunch of checks to figure out what we can do with Python. If
# we don't have Python at all, none of the rest of this matters. If
@@ -518,8 +417,8 @@ then
AC_MSG_ERROR([I can't find a Python binary at all, this isn't going to work. Perhaps you need to set PATH?])
fi
-AC_MSG_CHECKING([for Python version 2.6 or higher])
-have_acceptable_python=`$PYTHON -c 'import sys; print "yes" if sys.version_info[[0]] == 2 and sys.version_info[[1]] >= 6 else "no"'`
+AC_MSG_CHECKING([for Python version 2.7 or higher])
+have_acceptable_python=`$PYTHON -c 'import sys; print "yes" if sys.version_info[[0]] == 2 and sys.version_info[[1]] >= 7 else "no"'`
AC_MSG_RESULT([$have_acceptable_python])
AC_MSG_CHECKING([distutils to find out where Python.h should be])
@@ -532,9 +431,18 @@ AC_MSG_CHECKING([for lxml.etree])
if $PYTHON -c 'import lxml.etree' 2>/dev/null; then have_lxml=yes; else have_lxml=no; fi
AC_MSG_RESULT([$have_lxml])
-AC_MSG_CHECKING([for MySQLdb])
-if $PYTHON -c 'import MySQLdb' 2>/dev/null; then have_mysqldb=yes; else have_mysqldb=no; fi
-AC_MSG_RESULT([$have_mysqldb])
+AC_MSG_CHECKING([for Tornado])
+if $PYTHON -c 'import tornado' 2>/dev/null; then have_tornado=yes; else have_tornado=no; fi
+AC_MSG_RESULT([$have_tornado])
+
+if test $have_tornado = yes
+then
+ AC_MSG_CHECKING([for Tornado 4.2 or higher])
+ have_acceptable_tornado=`$PYTHON -c "import tornado; print 'no' if tornado.version_info < (4, 2) else 'yes'"`
+ AC_MSG_RESULT([$have_acceptable_tornado])
+else
+ have_acceptable_tornado=no
+fi
AC_MSG_CHECKING([for Django])
if $PYTHON -c 'import django' 2>/dev/null; then have_django=yes; else have_django=no; fi
@@ -542,8 +450,8 @@ AC_MSG_RESULT([$have_django])
if test $have_django = yes
then
- AC_MSG_CHECKING([for Django 1.3.7 or higher])
- have_acceptable_django=`$PYTHON -c "import django; print 'no' if django.VERSION < (1, 3, 7) else 'yes'"`
+ AC_MSG_CHECKING([for Django 1.8 or higher])
+ have_acceptable_django=`$PYTHON -c "import django; print 'no' if django.VERSION < (1, 8) else 'yes'"`
AC_MSG_RESULT([$have_acceptable_django])
else
have_acceptable_django=no
@@ -557,19 +465,6 @@ AC_MSG_CHECKING([for vobject])
if $PYTHON -c 'import vobject' 2>/dev/null; then have_vobject=yes; else have_vobject=no; fi
AC_MSG_RESULT([$have_vobject])
-AC_MSG_CHECKING([for Django South])
-if $PYTHON -c 'import south' 2>/dev/null; then have_django_south=yes; else have_django_south=no; fi
-AC_MSG_RESULT([$have_django_south])
-
-if test $have_django_south = yes
-then
- AC_MSG_CHECKING([for Django South 0.7.5 or later])
- have_acceptable_django_south=`$PYTHON -c "import south; print 'no' if map(int,south.__version__.split('.')) < [[0, 7, 5]] else 'yes'"`
- AC_MSG_RESULT([$have_acceptable_django_south])
-else
- have_acceptable_django_south=no
-fi
-
AC_MSG_CHECKING([for argparse])
if $PYTHON -c 'import argparse' 2>/dev/null; then have_argparse=yes; else have_argparse=no; fi
AC_MSG_RESULT([$have_argparse])
@@ -587,7 +482,7 @@ runtime_ok=yes
if test $have_acceptable_python = no
then
runtime_ok=no
- AC_MSG_WARN([The RPKI code requires Python version 2.x, for x = 6 or higher.])
+ AC_MSG_WARN([The RPKI code requires Python version 2.x, for x = 7 or higher.])
fi
if test $build_rp_tools = yes
@@ -595,47 +490,51 @@ then
if test $have_python_h = no
then
build_ok=no
- AC_MSG_WARN([I can't find Python.h. Python sources are required to build the RP tools.])
+ AC_MSG_WARN([I can't find Python.h. Python sources are required to build the rpki.POW module.])
fi
if test "x$RRDTOOL" = "x"
then
runtime_ok=no
AC_MSG_WARN([rcynic-html requires rrdtool.])
fi
- if test $use_rcynic_jail = no && test "X$RSYNC" = "X"
+ if test "X$RSYNC" = "X"
then
runtime_ok=no
AC_MSG_WARN([The RPKI relying party tools require rsync.])
fi
fi
+if test $have_lxml = no
+then
+ runtime_ok=no
+ AC_MSG_WARN([The RPKI tools require the Python LXML package.])
+fi
+
+if test $have_acceptable_django = no
+then
+ runtime_ok=no
+ AC_MSG_WARN([The RPKI tools require Django 1.8 or higher.])
+fi
+
+if test $have_argparse = no
+then
+ runtime_ok=no
+ AC_MSG_WARN([The RPKI tools require the Python argparse module.])
+fi
+
+if test $have_acceptable_tornado = no
+then
+ runtime_ok=no
+ AC_MSG_WARN([The RPKI tools require Tornado 4.2 or higher.])
+fi
+
if test $build_ca_tools = yes
then
- if test $have_lxml = no
- then
- runtime_ok=no
- AC_MSG_WARN([The RPKI tools require the Python LXML module.])
- fi
- if test $have_acceptable_django = no
- then
- runtime_ok=no
- AC_MSG_WARN([The RPKI CA tools require Django 1.3.7 or higher.])
- fi
if test $have_vobject = no
then
runtime_ok=no
AC_MSG_WARN([The RPKI CA GUI requires the Python vobject module.])
fi
- if test $have_acceptable_django_south = no
- then
- runtime_ok=no
- AC_MSG_WARN([The RPKI CA tools require Django South 0.7.5 or higher.])
- fi
- if test $have_argparse = no
- then
- runtime_ok=no
- AC_MSG_WARN([The RPKI CA tools require the Python argparse module.])
- fi
if test $have_pyyaml = no
then
AC_MSG_WARN([PyYAML missing, so "make test" will not work properly.])
@@ -824,8 +723,8 @@ fi
if test $build_rp_tools = yes
then
AC_CONFIG_FILES([rp/Makefile
+ rp/config/Makefile
rp/rcynic/Makefile
- rp/rcynic/static-rsync/Makefile
rp/utils/Makefile
rp/rpki-rtr/Makefile])
fi
diff --git a/doc/doc.RPKI.CA.Configuration.web_portal b/doc/doc.RPKI.CA.Configuration.web_portal
index 3c3ddb09..096f5101 100644
--- a/doc/doc.RPKI.CA.Configuration.web_portal
+++ b/doc/doc.RPKI.CA.Configuration.web_portal
@@ -38,10 +38,3 @@ If you get an error like "Invalid HTTP_HOST header (you may need to set
ALLOWED_HOSTS)", you will need to set this option.
No default value.
-
-***** download-directory *****
-
-A directory large enough to hold the RouteViews?.org routing table dump fetched
-by the rpkigui-import-routes script.
-
- download-directory = /var/tmp
diff --git a/doc/doc.RPKI.Installation b/doc/doc.RPKI.Installation
index 0acd33c1..67746d55 100644
--- a/doc/doc.RPKI.Installation
+++ b/doc/doc.RPKI.Installation
@@ -6,14 +6,8 @@ platform on which you're trying to install.
* On Ubuntu 12.04 LTS ("Precise Pangolin"), Ubuntu 14.04 ("Trusty Tahir"), or
Debian 7 ("Wheezy"), you can use Debian binary packages.
-* if you want to install a simple RPKI cache to feed routers from a Ubuntu
- 14.04 system, here is a one page ten minute recipe.
-
-* If you want to install a CA and a cache on a Ubuntu 14.04 with a rootd CA,
- here is a one page hack. It will take less than an hour.
-
-* If you are feeling dangerous and want to try the rrdp testbed CA and RP, here
- is a one page hack. It does not support rootd.
+ At present we only generate binary packages for Precise Pangolin and
+ Wheezy. This may change in the future.
* On FreeBSD, you can use FreeBSD ports.
diff --git a/doc/doc.RPKI.RP.rcynic b/doc/doc.RPKI.RP.rcynic
index f3ba5e4c..4bd95ae1 100644
--- a/doc/doc.RPKI.RP.rcynic
+++ b/doc/doc.RPKI.RP.rcynic
@@ -447,7 +447,7 @@ except when building complex topologies where rcynic running on one set of
machines acts as aggregators for another set of validators. A large ISP might
want to build such a topology so that they could have a local validation cache
in each POP while minimizing load on the global repository system and
-maintaining some degree of internal consistency between POPs. In such cases,
+maintaining some degree of internal consistancy between POPs. In such cases,
one might want the rcynic instances in the POPs to validate data fetched from
the aggregators via an external process, without the POP rcynic instances
attempting to fetch anything themselves.
diff --git a/doc/manual.pdf b/doc/manual.pdf
index 0b480c9c..bd98c08b 100644
--- a/doc/manual.pdf
+++ b/doc/manual.pdf
Binary files differ
diff --git a/ext/POW.c b/ext/POW.c
index 990d344d..4519da5f 100644
--- a/ext/POW.c
+++ b/ext/POW.c
@@ -80,7 +80,6 @@
#include <openssl/pem.h>
#include <openssl/evp.h>
#include <openssl/err.h>
-#include <openssl/md5.h>
#include <openssl/sha.h>
#include <openssl/cms.h>
@@ -112,9 +111,12 @@ define GCC_UNUSED
*/
#define MAX_ASN1_INTEGER_LEN 20
+/*
+ * How many bytes is a SHA256 digest?
+ */
+#define HASH_SHA256_LEN 32
+
/* Digests */
-#define MD5_DIGEST 2
-#define SHA_DIGEST 3
#define SHA1_DIGEST 4
#define SHA256_DIGEST 6
#define SHA384_DIGEST 7
@@ -130,7 +132,6 @@ define GCC_UNUSED
/* Object check functions */
#define POW_X509_Check(op) PyObject_TypeCheck(op, &POW_X509_Type)
-#define POW_X509Store_Check(op) PyObject_TypeCheck(op, &POW_X509Store_Type)
#define POW_X509StoreCTX_Check(op) PyObject_TypeCheck(op, &POW_X509StoreCTX_Type)
#define POW_CRL_Check(op) PyObject_TypeCheck(op, &POW_CRL_Type)
#define POW_Asymmetric_Check(op) PyObject_TypeCheck(op, &POW_Asymmetric_Type)
@@ -162,12 +163,40 @@ static char pow_module__doc__ [] =
* but we try to put all the magic associated with this in one place.
*/
-#ifndef NID_rpkiManifest
-static int NID_rpkiManifest;
+#ifndef NID_ad_rpkiManifest
+static int NID_ad_rpkiManifest;
+#endif
+
+#ifndef NID_ad_signedObject
+static int NID_ad_signedObject;
+#endif
+
+#ifndef NID_ad_rpkiNotify
+static int NID_ad_rpkiNotify;
+#endif
+
+#ifndef NID_ct_ROA
+static int NID_ct_ROA;
+#endif
+
+#ifndef NID_ct_rpkiManifest
+static int NID_ct_rpkiManifest;
+#endif
+
+#ifndef NID_ct_rpkiGhostbusters
+static int NID_ct_rpkiGhostbusters;
+#endif
+
+#ifndef NID_cp_ipAddr_asNumber
+static int NID_cp_ipAddr_asNumber;
#endif
-#ifndef NID_signedObject
-static int NID_signedObject;
+#ifndef NID_id_kp_bgpsec_router
+static int NID_id_kp_bgpsec_router;
+#endif
+
+#ifndef NID_binary_signing_time
+static int NID_binary_signing_time;
#endif
static const struct {
@@ -177,12 +206,40 @@ static const struct {
const char *ln;
} missing_nids[] = {
-#ifndef NID_rpkiManifest
- {&NID_rpkiManifest, "1.3.6.1.5.5.7.48.10", "id-ad-rpkiManifest", "RPKI Manifest"},
+#ifndef NID_ad_rpkiManifest
+ {&NID_ad_rpkiManifest, "1.3.6.1.5.5.7.48.10", "id-ad-rpkiManifest", "RPKI Manifest"},
+#endif
+
+#ifndef NID_ad_signedObject
+ {&NID_ad_signedObject, "1.3.6.1.5.5.7.48.11", "id-ad-signedObject", "Signed Object"},
+#endif
+
+#ifndef NID_ad_rpkiNotify
+ {&NID_ad_rpkiNotify, "1.3.6.1.5.5.7.48.13", "id-ad-rpkiNotify", "RPKI RRDP Notification"},
+#endif
+
+#ifndef NID_ct_ROA
+ {&NID_ct_ROA, "1.2.840.113549.1.9.16.1.24", "id-ct-routeOriginAttestation", "ROA eContent"},
+#endif
+
+#ifndef NID_ct_rpkiManifest
+ {&NID_ct_rpkiManifest, "1.2.840.113549.1.9.16.1.26", "id-ct-rpkiManifest", "RPKI Manifest eContent"},
+#endif
+
+#ifndef NID_ct_rpkiGhostbusters
+ {&NID_ct_rpkiGhostbusters, "1.2.840.113549.1.9.16.1.35", "id-ct-rpkiGhostbusters", "RPKI Ghostbusters eContent"},
+#endif
+
+#ifndef NID_cp_ipAddr_asNumber
+ {&NID_cp_ipAddr_asNumber, "1.3.6.1.5.5.7.14.2", "id-cp-ipAddr-asNumber", "RPKI Certificate Policy"},
+#endif
+
+#ifndef NID_id_kp_bgpsec_router
+ {&NID_id_kp_bgpsec_router, "1.3.6.1.5.5.7.3.30", "id-kp-bgpsec-router", "BGPSEC Router Certificate"},
#endif
-#ifndef NID_signedObject
- {&NID_signedObject, "1.3.6.1.5.5.7.48.11", "id-ad-signedObject", "Signed Object"}
+#ifndef NID_binary_signing_time
+ {&NID_binary_signing_time, "1.2.840.113549.1.9.16.2.46", "id-aa-binarySigningTime", "CMS Binary Signing Time"},
#endif
};
@@ -235,7 +292,8 @@ static PyObject
*ErrorObject,
*OpenSSLErrorObject,
*POWErrorObject,
- *NotVerifiedErrorObject;
+ *NotVerifiedErrorObject,
+ *ValidationErrorObject;
/*
* Constructor for customized datetime class.
@@ -251,12 +309,17 @@ static PyObject *custom_datetime;
static int x509_store_ctx_ex_data_idx = -1;
/*
+ * ASN.1 "constants" constructed at runtime.
+ */
+
+static const ASN1_INTEGER *asn1_zero, *asn1_four_octets, *asn1_twenty_octets;
+
+/*
* Declarations of type objects (definitions come later).
*/
static PyTypeObject
POW_X509_Type,
- POW_X509Store_Type,
POW_X509StoreCTX_Type,
POW_CRL_Type,
POW_Asymmetric_Type,
@@ -286,14 +349,8 @@ typedef struct {
typedef struct {
PyObject_HEAD
- X509_STORE *store;
- PyObject *ctxclass;
-} x509_store_object;
-
-typedef struct {
- PyObject_HEAD
X509_STORE_CTX *ctx;
- x509_store_object *store;
+ X509_STORE *store;
} x509_store_ctx_object;
typedef struct {
@@ -338,6 +395,17 @@ typedef struct {
X509_EXTENSIONS *exts;
} pkcs10_object;
+/*
+ * Container for a generic extension, including a destructor.
+ */
+
+typedef struct {
+ void (*destructor)(void *);
+ void *value;
+ int nid;
+ int critical;
+} extension_wrapper;
+
/*
@@ -401,6 +469,12 @@ typedef struct {
goto error; \
} while (0)
+#define lose_validation_error(_msg_) \
+ do { \
+ PyErr_SetString(ValidationErrorObject, (_msg_)); \
+ goto error; \
+ } while (0)
+
#define assert_no_unhandled_openssl_errors() \
do { \
if (ERR_peek_error()) { \
@@ -427,8 +501,6 @@ static const EVP_MD *
evp_digest_factory(int digest_type)
{
switch (digest_type) {
- case MD5_DIGEST: return EVP_md5();
- case SHA_DIGEST: return EVP_sha();
case SHA1_DIGEST: return EVP_sha1();
case SHA256_DIGEST: return EVP_sha256();
case SHA384_DIGEST: return EVP_sha384();
@@ -524,7 +596,7 @@ x509_object_helper_set_name(PyObject *dn_obj)
goto error;
if (!PySequence_Check(rdn_obj) || PySequence_Size(rdn_obj) == 0)
- lose_type_error("each RDN must be a sequence with at least one element");
+ lose_type_error("Each RDN must be a sequence with at least one element");
for (j = 0; j < PySequence_Size(rdn_obj); j++) {
@@ -532,7 +604,7 @@ x509_object_helper_set_name(PyObject *dn_obj)
goto error;
if (!PySequence_Check(pair_obj) || PySequence_Size(pair_obj) != 2)
- lose_type_error("each name entry must be a two-element sequence");
+ lose_type_error("Each name entry must be a two-element sequence");
if ((type_obj = PySequence_GetItem(pair_obj, 0)) == NULL ||
(type_str = PyString_AsString(type_obj)) == NULL ||
@@ -573,7 +645,7 @@ x509_object_helper_set_name(PyObject *dn_obj)
static PyObject *
x509_object_helper_get_name(X509_NAME *name, int format)
{
- X509_NAME_ENTRY *entry = NULL;
+ X509_NAME_ENTRY *ne = NULL;
PyObject *result = NULL;
PyObject *rdn = NULL;
PyObject *item = NULL;
@@ -593,18 +665,18 @@ x509_object_helper_get_name(X509_NAME *name, int format)
for (i = 0; i < X509_NAME_entry_count(name); i++) {
- if ((entry = X509_NAME_get_entry(name, i)) == NULL)
+ if ((ne = X509_NAME_get_entry(name, i)) == NULL)
lose("Couldn't get certificate name");
- if (entry->set < 0 || entry->set < set || entry->set > set + 1)
+ if (ne->set < 0 || ne->set < set || ne->set > set + 1)
lose("X509_NAME->set value out of expected range");
switch (format) {
case SHORTNAME_FORMAT:
- oid = OBJ_nid2sn(OBJ_obj2nid(entry->object));
+ oid = OBJ_nid2sn(OBJ_obj2nid(X509_NAME_ENTRY_get_object(ne)));
break;
case LONGNAME_FORMAT:
- oid = OBJ_nid2ln(OBJ_obj2nid(entry->object));
+ oid = OBJ_nid2ln(OBJ_obj2nid(X509_NAME_ENTRY_get_object(ne)));
break;
case OIDNAME_FORMAT:
oid = NULL;
@@ -614,16 +686,16 @@ x509_object_helper_get_name(X509_NAME *name, int format)
}
if (oid == NULL) {
- if (OBJ_obj2txt(oidbuf, sizeof(oidbuf), entry->object, 1) <= 0)
+ if (OBJ_obj2txt(oidbuf, sizeof(oidbuf), X509_NAME_ENTRY_get_object(ne), 1) <= 0)
lose_openssl_error("Couldn't translate OID");
oid = oidbuf;
}
- if (entry->set > set) {
+ if (ne->set > set) {
set++;
- if ((item = Py_BuildValue("((ss#))", oid, ASN1_STRING_data(entry->value),
- (Py_ssize_t) ASN1_STRING_length(entry->value))) == NULL)
+ if ((item = Py_BuildValue("((ss#))", oid, ASN1_STRING_data(X509_NAME_ENTRY_get_data(ne)),
+ (Py_ssize_t) ASN1_STRING_length(X509_NAME_ENTRY_get_data(ne)))) == NULL)
goto error;
PyTuple_SET_ITEM(result, set, item);
item = NULL;
@@ -636,8 +708,8 @@ x509_object_helper_get_name(X509_NAME *name, int format)
PyTuple_SET_ITEM(result, set, rdn);
if (rdn == NULL)
goto error;
- if ((item = Py_BuildValue("(ss#)", oid, ASN1_STRING_data(entry->value),
- (Py_ssize_t) ASN1_STRING_length(entry->value))) == NULL)
+ if ((item = Py_BuildValue("(ss#)", oid, ASN1_STRING_data(X509_NAME_ENTRY_get_data(ne)),
+ (Py_ssize_t) ASN1_STRING_length(X509_NAME_ENTRY_get_data(ne)))) == NULL)
goto error;
PyTuple_SetItem(rdn, PyTuple_Size(rdn) - 1, item);
rdn = item = NULL;
@@ -677,7 +749,7 @@ x509_helper_iterable_to_stack(PyObject *iterable)
while ((item = PyIter_Next(iterator)) != NULL) {
if (!POW_X509_Check(item))
- lose_type_error("Inapropriate type");
+ lose_type_error("Expected an X509 object");
if (!sk_X509_push(stack, ((x509_object *) item)->x509))
lose("Couldn't add X509 object to stack");
@@ -1084,10 +1156,504 @@ whack_ec_key_to_namedCurve(EVP_PKEY *pkey)
/*
- * Extension functions. Calling sequence here is a little weird,
- * because it turns out that the simplest way to avoid massive
- * duplication of code between classes is to work directly with
- * X509_EXTENSIONS objects.
+ * Validation status codes. Still under construction. Conceptually
+ * modeled after rcynic's validation status database, implementation
+ * somewhat different due to language issues and desire to keep the C
+ * side of this as simple as possible. Depends on support from the
+ * Python side (see rpki/POW/__init__.py).
+ */
+
+/*
+ * Add code to status object, throwing an error if something goes
+ * horribly wrong.
+ */
+
+#define record_validation_status(_status_, _code_) \
+ do { \
+ if (!_record_validation_status(_status_, #_code_)) \
+ goto error; \
+ } while (0)
+
+static int
+_record_validation_status(PyObject *status, const char *code)
+{
+ if (status == Py_None)
+ return 1;
+ PyObject *value = PyString_FromString(code);
+ if (value == NULL)
+ return 0;
+ int result = PySet_Add(status, value);
+ Py_XDECREF(value);
+ return result == 0;
+}
+
+
+
+/*
+ * Detail checking functions. These are only used by the relying
+ * party code, and only when the caller of one of the verification
+ * functions has requested detailed checking by passing in a result
+ * status set object.
+ */
+
+/*
+ * Check whether a Distinguished Name conforms to the rescert profile.
+ * The profile is very restrictive: it only allows one mandatory
+ * CommonName field and one optional SerialNumber field, both of which
+ * must be of type PrintableString.
+ */
+
+static int check_allowed_dn(X509_NAME *dn)
+{
+ X509_NAME_ENTRY *ne;
+ ASN1_STRING *s;
+ int loc;
+
+ if (dn == NULL)
+ return 0;
+
+ switch (X509_NAME_entry_count(dn)) {
+
+ case 2:
+ if ((loc = X509_NAME_get_index_by_NID(dn, NID_serialNumber, -1)) < 0 ||
+ (ne = X509_NAME_get_entry(dn, loc)) == NULL ||
+ (s = X509_NAME_ENTRY_get_data(ne)) == NULL ||
+ ASN1_STRING_type(s) != V_ASN1_PRINTABLESTRING)
+ return 0;
+
+ /* Fall through */
+
+ case 1:
+ if ((loc = X509_NAME_get_index_by_NID(dn, NID_commonName, -1)) < 0 ||
+ (ne = X509_NAME_get_entry(dn, loc)) == NULL ||
+ (s = X509_NAME_ENTRY_get_data(ne)) == NULL ||
+ ASN1_STRING_type(s) != V_ASN1_PRINTABLESTRING)
+ return 0;
+
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+/*
+ * Check whether an ASN.1 TIME value conforms to RFC 5280 4.1.2.5.
+ */
+
+static int check_allowed_time_encoding(ASN1_TIME *t)
+{
+ switch (t->type) {
+
+ case V_ASN1_UTCTIME:
+ return t->length == sizeof("yymmddHHMMSSZ") - 1;
+
+ case V_ASN1_GENERALIZEDTIME:
+ return (t->length == sizeof("yyyymmddHHMMSSZ") - 1 &&
+ strcmp("205", (char *) t->data) <= 0);
+
+ }
+ return 0;
+}
+
+/*
+ * Compare filename fields of two FileAndHash structures.
+ */
+
+static int check_manifest_FileAndHash_name_cmp(const FileAndHash * const *a, const FileAndHash * const *b)
+{
+ return strcmp((char *) (*a)->file->data, (char *) (*b)->file->data);
+}
+
+/*
+ * Check a lot of pesky low-level things about RPKI CRLs.
+ */
+
+static int check_crl(X509_CRL *crl,
+ X509 *issuer,
+ PyObject *status)
+{
+ STACK_OF(X509_REVOKED) *revoked;
+ AUTHORITY_KEYID *aki = NULL;
+ EVP_PKEY *pkey;
+ int i, ret = 0;
+
+ if (crl->crl == NULL ||
+ crl->crl->sig_alg == NULL || crl->crl->sig_alg->algorithm == NULL ||
+ OBJ_obj2nid(crl->crl->sig_alg->algorithm) != NID_sha256WithRSAEncryption)
+ record_validation_status(status, NONCONFORMANT_SIGNATURE_ALGORITHM);
+
+ if (!check_allowed_time_encoding(X509_CRL_get_lastUpdate(crl)) ||
+ !check_allowed_time_encoding(X509_CRL_get_nextUpdate(crl)))
+ record_validation_status(status, NONCONFORMANT_ASN1_TIME_VALUE);
+
+ if ((aki = X509_CRL_get_ext_d2i(crl, NID_authority_key_identifier, NULL, NULL)) == NULL)
+ record_validation_status(status, AKI_EXTENSION_MISSING);
+ else if (aki->keyid == NULL || aki->serial != NULL || aki->issuer != NULL)
+ record_validation_status(status, AKI_EXTENSION_WRONG_FORMAT);
+
+ if (X509_CRL_get_ext_count(crl) > 2)
+ record_validation_status(status, DISALLOWED_X509V3_EXTENSION);
+
+ if (!check_allowed_dn(X509_CRL_get_issuer(crl)))
+ record_validation_status(status, NONCONFORMANT_ISSUER_NAME);
+
+ if ((revoked = X509_CRL_get_REVOKED(crl)) != NULL)
+ for (i = sk_X509_REVOKED_num(revoked) - 1; i >= 0; --i)
+ if (X509_REVOKED_get_ext_count(sk_X509_REVOKED_value(revoked, i)) > 0)
+ record_validation_status(status, DISALLOWED_X509V3_EXTENSION);
+
+ if ((pkey = X509_get_pubkey(issuer)) != NULL) {
+ ret = X509_CRL_verify(crl, pkey) > 0;
+ EVP_PKEY_free(pkey);
+ }
+
+ error:
+ AUTHORITY_KEYID_free(aki);
+ return ret;
+}
+
+/*
+ * Extract one datum from a CMS_SignerInfo.
+ */
+
+static void *extract_si_datum(CMS_SignerInfo *si,
+ int *n,
+ const int optional,
+ const int nid,
+ const int asn1_type)
+{
+ int i = CMS_signed_get_attr_by_NID(si, nid, -1);
+ void *result = NULL;
+ X509_ATTRIBUTE *a;
+
+ if (i < 0 && optional)
+ return NULL;
+
+ if (i >= 0 &&
+ CMS_signed_get_attr_by_NID(si, nid, i) < 0 &&
+ (a = CMS_signed_get_attr(si, i)) != NULL &&
+ X509_ATTRIBUTE_count(a) == 1 &&
+ (result = X509_ATTRIBUTE_get0_data(a, 0, asn1_type, NULL)) != NULL)
+ --*n;
+ else
+ *n = -1;
+
+ return result;
+}
+
+/*
+ * Check a lot of pesky low-level things about RPKI CMS objects.
+ *
+ * We already have code elsewhere for checking X.509 certificates, so
+ * we assume that the caller has already used use that code to check
+ * the embedded EE certificate.
+ */
+
+static int check_cms(CMS_ContentInfo *cms,
+ PyObject *status)
+{
+ STACK_OF(CMS_SignerInfo) *signer_infos = NULL;
+ CMS_SignerInfo *si = NULL;
+ ASN1_OCTET_STRING *sid = NULL;
+ X509_NAME *si_issuer = NULL;
+ ASN1_INTEGER *si_serial = NULL;
+ STACK_OF(X509_CRL) *crls = NULL;
+ STACK_OF(X509) *certs = NULL;
+ X509_ALGOR *signature_alg = NULL, *digest_alg = NULL;
+ ASN1_OBJECT *oid = NULL;
+ X509 *x = NULL;
+ int i, ret = 0;
+
+ if ((crls = CMS_get1_crls(cms)) != NULL)
+ record_validation_status(status, CMS_INCLUDES_CRLS);
+
+ if ((signer_infos = CMS_get0_SignerInfos(cms)) == NULL ||
+ sk_CMS_SignerInfo_num(signer_infos) != 1 ||
+ (si = sk_CMS_SignerInfo_value(signer_infos, 0)) == NULL ||
+ !CMS_SignerInfo_get0_signer_id(si, &sid, &si_issuer, &si_serial) ||
+ sid == NULL || si_issuer != NULL || si_serial != NULL ||
+ CMS_unsigned_get_attr_count(si) != -1)
+ record_validation_status(status, BAD_CMS_SIGNER_INFOS);
+
+ if (si != NULL)
+ CMS_SignerInfo_get0_algs(si, NULL, &x, &digest_alg, &signature_alg);
+
+ if (x == NULL)
+ record_validation_status(status, CMS_SIGNER_MISSING);
+ else if ((certs = CMS_get1_certs(cms)) == NULL ||
+ sk_X509_num(certs) != 1 ||
+ X509_cmp(x, sk_X509_value(certs, 0)))
+ record_validation_status(status, BAD_CMS_SIGNER);
+
+ X509_ALGOR_get0(&oid, NULL, NULL, signature_alg);
+ i = OBJ_obj2nid(oid);
+ if (i != NID_sha256WithRSAEncryption && i != NID_rsaEncryption)
+ record_validation_status(status, WRONG_CMS_SI_SIGNATURE_ALGORITHM);
+
+ X509_ALGOR_get0(&oid, NULL, NULL, digest_alg);
+ if (OBJ_obj2nid(oid) != NID_sha256)
+ record_validation_status(status, WRONG_CMS_SI_DIGEST_ALGORITHM);
+
+ i = CMS_signed_get_attr_count(si);
+
+ (void) extract_si_datum(si, &i, 1, NID_pkcs9_signingTime, V_ASN1_UTCTIME);
+ (void) extract_si_datum(si, &i, 1, NID_binary_signing_time, V_ASN1_INTEGER);
+ oid = extract_si_datum(si, &i, 0, NID_pkcs9_contentType, V_ASN1_OBJECT);
+ (void) extract_si_datum(si, &i, 0, NID_pkcs9_messageDigest, V_ASN1_OCTET_STRING);
+
+ if (i != 0)
+ record_validation_status(status, BAD_CMS_SI_SIGNED_ATTRIBUTES);
+
+ if (OBJ_cmp(oid, CMS_get0_eContentType(cms)) != 0)
+ record_validation_status(status, BAD_CMS_SI_CONTENTTYPE);
+
+ if (si != NULL && x != NULL && CMS_SignerInfo_cert_cmp(si, x))
+ record_validation_status(status, CMS_SKI_MISMATCH);
+
+ ret = 1;
+
+ error:
+ sk_X509_CRL_pop_free(crls, X509_CRL_free);
+ sk_X509_pop_free(certs, X509_free);
+
+ return ret;
+}
+
+/*
+ * Check a lot of pesky low-level things about RPKI manifests.
+ */
+
+#warning Almost everything in this function could be done in Python
+
+static int check_manifest(CMS_ContentInfo *cms,
+ Manifest *manifest,
+ PyObject *status)
+{
+ STACK_OF(FileAndHash) *sorted_fileList = NULL;
+ FileAndHash *fah1 = NULL, *fah2 = NULL;
+ STACK_OF(X509) *certs = NULL;
+ int i, ret = 0;
+
+ if (manifest == NULL)
+ lose_not_verified("Can't check an unverified manifest");
+
+ if (OBJ_obj2nid(CMS_get0_eContentType(cms)) != NID_ct_rpkiManifest)
+ record_validation_status(status, BAD_CMS_ECONTENTTYPE);
+
+#warning Can check value in Python, but not whether encoding was defaulted
+ if (manifest->version)
+ record_validation_status(status, WRONG_OBJECT_VERSION);
+
+ if ((certs = CMS_get1_certs(cms)) == NULL || sk_X509_num(certs) != 1)
+ record_validation_status(status, BAD_CMS_SIGNER);
+
+ if (ASN1_INTEGER_cmp(manifest->manifestNumber, asn1_zero) < 0 ||
+ ASN1_INTEGER_cmp(manifest->manifestNumber, asn1_twenty_octets) > 0)
+ record_validation_status(status, BAD_MANIFEST_NUMBER);
+
+ if (OBJ_obj2nid(manifest->fileHashAlg) != NID_sha256)
+ record_validation_status(status, NONCONFORMANT_DIGEST_ALGORITHM);
+
+ if ((sorted_fileList = sk_FileAndHash_dup(manifest->fileList)) == NULL)
+ lose_no_memory();
+
+ (void) sk_FileAndHash_set_cmp_func(sorted_fileList, check_manifest_FileAndHash_name_cmp);
+ sk_FileAndHash_sort(sorted_fileList);
+
+ for (i = 0; ((fah1 = sk_FileAndHash_value(sorted_fileList, i + 0)) != NULL &&
+ (fah2 = sk_FileAndHash_value(sorted_fileList, i + 1)) != NULL); i++)
+ if (!strcmp((char *) fah1->file->data, (char *) fah2->file->data))
+ record_validation_status(status, DUPLICATE_NAME_IN_MANIFEST);
+
+ for (i = 0; (fah1 = sk_FileAndHash_value(manifest->fileList, i)) != NULL; i++)
+ if (fah1->hash->length != HASH_SHA256_LEN ||
+ (fah1->hash->flags & (ASN1_STRING_FLAG_BITS_LEFT | 7)) > ASN1_STRING_FLAG_BITS_LEFT)
+ record_validation_status(status, BAD_MANIFEST_DIGEST_LENGTH);
+
+ ret = 1;
+
+ error:
+ sk_FileAndHash_free(sorted_fileList);
+ sk_X509_pop_free(certs, X509_free);
+
+ return ret;
+}
+
+/*
+ * Extract a ROA prefix from the ASN.1 bitstring encoding.
+ */
+static int check_roa_extract_roa_prefix(const ROAIPAddress *ra,
+ const unsigned afi,
+ unsigned char *addr,
+ unsigned *prefixlen,
+ unsigned *max_prefixlen)
+{
+ unsigned length;
+ long maxlen;
+
+ assert(ra && addr && prefixlen && max_prefixlen);
+
+ maxlen = ASN1_INTEGER_get(ra->maxLength);
+
+ switch (afi) {
+ case IANA_AFI_IPV4: length = 4; break;
+ case IANA_AFI_IPV6: length = 16; break;
+ default: return 0;
+ }
+
+ if (ra->IPAddress->length < 0 || ra->IPAddress->length > length ||
+ maxlen < 0 || maxlen > (long) length * 8)
+ return 0;
+
+ if (ra->IPAddress->length > 0) {
+ memcpy(addr, ra->IPAddress->data, ra->IPAddress->length);
+ if ((ra->IPAddress->flags & 7) != 0) {
+ unsigned char mask = 0xFF >> (8 - (ra->IPAddress->flags & 7));
+ addr[ra->IPAddress->length - 1] &= ~mask;
+ }
+ }
+
+ memset(addr + ra->IPAddress->length, 0, length - ra->IPAddress->length);
+ *prefixlen = (ra->IPAddress->length * 8) - (ra->IPAddress->flags & 7);
+ *max_prefixlen = ra->maxLength ? (unsigned) maxlen : *prefixlen;
+
+ return 1;
+}
+
+/*
+ * Check a lot of pesky low-level things about RPKI ROAs.
+ */
+
+static int check_roa(CMS_ContentInfo *cms,
+ ROA *roa,
+ PyObject *status)
+{
+ STACK_OF(IPAddressFamily) *roa_resources = NULL, *ee_resources = NULL;
+ unsigned afi, *safi = NULL, safi_, prefixlen, max_prefixlen;
+ unsigned char addrbuf[RAW_IPADDR_BUFLEN];
+ STACK_OF(X509) *certs = NULL;
+ ROAIPAddressFamily *rf;
+ ROAIPAddress *ra;
+ int i, j, result = 0;
+
+ if (roa == NULL)
+ lose_not_verified("Can't check an unverified ROA");
+
+#warning Could be done in Python
+ if (OBJ_obj2nid(CMS_get0_eContentType(cms)) != NID_ct_ROA)
+ record_validation_status(status, BAD_CMS_ECONTENTTYPE);
+
+ if (roa->version)
+ record_validation_status(status, WRONG_OBJECT_VERSION);
+
+#warning Could be done in Python
+ if (ASN1_INTEGER_cmp(roa->asID, asn1_zero) < 0 ||
+ ASN1_INTEGER_cmp(roa->asID, asn1_four_octets) > 0)
+ record_validation_status(status, BAD_ROA_ASID);
+
+#warning Could be done in Python
+ if ((certs = CMS_get1_certs(cms)) == NULL || sk_X509_num(certs) != 1)
+ record_validation_status(status, BAD_CMS_SIGNER);
+
+ if ((ee_resources = X509_get_ext_d2i(sk_X509_value(certs, 0), NID_sbgp_ipAddrBlock, NULL, NULL)) == NULL)
+ record_validation_status(status, BAD_IPADDRBLOCKS);
+
+ /*
+ * Convert ROA prefixes to resource set. This goes on a bit.
+ */
+
+ if ((roa_resources = sk_IPAddressFamily_new_null()) == NULL)
+ lose_no_memory();
+
+ for (i = 0; i < sk_ROAIPAddressFamily_num(roa->ipAddrBlocks); i++) {
+ rf = sk_ROAIPAddressFamily_value(roa->ipAddrBlocks, i);
+
+ if (rf == NULL || rf->addressFamily == NULL)
+ lose_no_memory();
+
+ if (rf->addressFamily->length < 2 || rf->addressFamily->length > 3)
+ record_validation_status(status, MALFORMED_ROA_ADDRESSFAMILY);
+
+ afi = (rf->addressFamily->data[0] << 8) | (rf->addressFamily->data[1]);
+ if (rf->addressFamily->length == 3)
+ *(safi = &safi_) = rf->addressFamily->data[2];
+
+ for (j = 0; j < sk_ROAIPAddress_num(rf->addresses); j++) {
+ ra = sk_ROAIPAddress_value(rf->addresses, j);
+
+ if (ra == NULL ||
+ !check_roa_extract_roa_prefix(ra, afi, addrbuf, &prefixlen, &max_prefixlen) ||
+ !v3_addr_add_prefix(roa_resources, afi, safi, addrbuf, prefixlen))
+ record_validation_status(status, ROA_RESOURCES_MALFORMED);
+
+ else if (max_prefixlen < prefixlen)
+ record_validation_status(status, ROA_MAX_PREFIXLEN_TOO_SHORT);
+ }
+ }
+
+ /*
+ * ROAs can include nested prefixes, so direct translation to
+ * resource sets could include overlapping ranges, which is illegal.
+ * So we have to remove nested stuff before whacking into canonical
+ * form. Fortunately, this is relatively easy, since we know these
+ * are just prefixes, not ranges: in a list of prefixes sorted by
+ * the RFC 3779 rules, the first element of a set of nested prefixes
+ * will always be the least specific.
+ */
+
+ for (i = 0; i < sk_IPAddressFamily_num(roa_resources); i++) {
+ IPAddressFamily *f = sk_IPAddressFamily_value(roa_resources, i);
+
+ if ((afi = v3_addr_get_afi(f)) == 0)
+ record_validation_status(status, ROA_CONTAINS_BAD_AFI_VALUE);
+
+ if (f->ipAddressChoice->type == IPAddressChoice_addressesOrRanges) {
+ IPAddressOrRanges *aors = f->ipAddressChoice->u.addressesOrRanges;
+
+ sk_IPAddressOrRange_sort(aors);
+
+ for (j = 0; j < sk_IPAddressOrRange_num(aors) - 1; j++) {
+ IPAddressOrRange *a = sk_IPAddressOrRange_value(aors, j);
+ IPAddressOrRange *b = sk_IPAddressOrRange_value(aors, j + 1);
+ unsigned char a_min[RAW_IPADDR_BUFLEN], a_max[RAW_IPADDR_BUFLEN];
+ unsigned char b_min[RAW_IPADDR_BUFLEN], b_max[RAW_IPADDR_BUFLEN];
+ int a_len, b_len;
+
+ if ((a_len = v3_addr_get_range(a, afi, a_min, a_max, RAW_IPADDR_BUFLEN)) == 0 ||
+ (b_len = v3_addr_get_range(b, afi, b_min, b_max, RAW_IPADDR_BUFLEN)) == 0 ||
+ a_len != b_len)
+ record_validation_status(status, ROA_RESOURCES_MALFORMED);
+
+ if (memcmp(a_max, b_max, a_len) >= 0) {
+ (void) sk_IPAddressOrRange_delete(aors, j + 1);
+ IPAddressOrRange_free(b);
+ --j;
+ }
+ }
+ }
+ }
+
+ if (!v3_addr_canonize(roa_resources))
+ record_validation_status(status, ROA_RESOURCES_MALFORMED);
+
+ if (ee_resources == NULL || !v3_addr_subset(roa_resources, ee_resources))
+ record_validation_status(status, ROA_RESOURCE_NOT_IN_EE);
+
+ result = 1;
+
+ error:
+ sk_IPAddressFamily_pop_free(roa_resources, IPAddressFamily_free);
+ sk_IPAddressFamily_pop_free(ee_resources, IPAddressFamily_free);
+ sk_X509_pop_free(certs, X509_free);
+
+ return result;
+}
+
+
+
+/*
+ * Extension functions.
*/
#define EXTENSION_GET_KEY_USAGE__DOC__ \
@@ -1096,7 +1662,7 @@ whack_ec_key_to_namedCurve(EVP_PKEY *pkey)
"extension. The bits have the same names as in RFC 5280.\n"
static PyObject *
-extension_get_key_usage(X509_EXTENSIONS **exts)
+extension_get_key_usage(X509_EXTENSION *ext_)
{
ASN1_BIT_STRING *ext = NULL;
PyObject *result = NULL;
@@ -1105,12 +1671,12 @@ extension_get_key_usage(X509_EXTENSIONS **exts)
ENTERING(extension_get_key_usage);
- if (!exts)
- goto error;
-
- if ((ext = X509V3_get_d2i(*exts, NID_key_usage, NULL, NULL)) == NULL)
+ if (!ext_)
Py_RETURN_NONE;
+ if ((ext = X509V3_EXT_d2i(ext_)) == NULL)
+ lose_openssl_error("Couldn't parse KeyUsage extension");
+
if ((result = PyFrozenSet_New(NULL)) == NULL)
goto error;
@@ -1141,8 +1707,14 @@ extension_get_key_usage(X509_EXTENSIONS **exts)
"should be marked as critical or not. RFC 5280 4.2.1.3 says this extension SHOULD\n" \
"be marked as critical when used, so the default is True.\n"
-static PyObject *
-extension_set_key_usage(X509_EXTENSIONS **exts, PyObject *args)
+static void
+extension_set_key_usage_destructor(void *value)
+{
+ ASN1_BIT_STRING_free(value);
+}
+
+static extension_wrapper
+extension_set_key_usage(PyObject *args)
{
ASN1_BIT_STRING *ext = NULL;
PyObject *iterable = NULL;
@@ -1151,12 +1723,10 @@ extension_set_key_usage(X509_EXTENSIONS **exts, PyObject *args)
PyObject *item = NULL;
const char *token;
int bit = -1;
- int ok = 0;
- ENTERING(extension_set_key_usage);
+ extension_wrapper result = {extension_set_key_usage_destructor};
- if (!exts)
- goto error;
+ ENTERING(extension_set_key_usage);
if ((ext = ASN1_BIT_STRING_new()) == NULL)
lose_no_memory();
@@ -1184,22 +1754,16 @@ extension_set_key_usage(X509_EXTENSIONS **exts, PyObject *args)
item = NULL;
}
- if (!X509V3_add1_i2d(exts, NID_key_usage, ext,
- PyObject_IsTrue(critical),
- X509V3_ADD_REPLACE))
- lose_openssl_error("Couldn't add KeyUsage extension to OpenSSL object");
-
- ok = 1;
+ result.value = ext;
+ result.nid = NID_key_usage;
+ result.critical = PyObject_IsTrue(critical);
+ ext = NULL;
error: /* Fall through */
ASN1_BIT_STRING_free(ext);
Py_XDECREF(iterator);
Py_XDECREF(item);
-
- if (ok)
- Py_RETURN_NONE;
- else
- return NULL;
+ return result;
}
#define EXTENSION_GET_BASIC_CONSTRAINTS__DOC__ \
@@ -1211,19 +1775,19 @@ extension_set_key_usage(X509_EXTENSIONS **exts, PyObject *args)
"pathLenConstraint value or None if there is no pathLenConstraint.\n"
static PyObject *
-extension_get_basic_constraints(X509_EXTENSIONS **exts)
+extension_get_basic_constraints(X509_EXTENSION *ext_)
{
BASIC_CONSTRAINTS *ext = NULL;
PyObject *result = NULL;
ENTERING(extension_get_basic_constraints);
- if (!exts)
- goto error;
-
- if ((ext = X509V3_get_d2i(*exts, NID_basic_constraints, NULL, NULL)) == NULL)
+ if (!ext_)
Py_RETURN_NONE;
+ if ((ext = X509V3_EXT_d2i(ext_)) == NULL)
+ lose_openssl_error("Couldn't parse BasicConstraints extension");
+
if (ext->pathlen == NULL)
result = Py_BuildValue("(NO)", PyBool_FromLong(ext->ca), Py_None);
else
@@ -1246,26 +1810,30 @@ extension_get_basic_constraints(X509_EXTENSIONS **exts)
"should be marked as critical. RFC 5280 4.2.1.9 requires that CA\n" \
"certificates mark this extension as critical, so the default is True.\n"
-static PyObject *
-extension_set_basic_constraints(X509_EXTENSIONS **exts, PyObject *args)
+static void
+extension_set_basic_constraints_destructor(void *value)
+{
+ BASIC_CONSTRAINTS_free(value);
+}
+
+static extension_wrapper
+extension_set_basic_constraints(PyObject *args)
{
BASIC_CONSTRAINTS *ext = NULL;
PyObject *is_ca = NULL;
PyObject *pathlen_obj = Py_None;
PyObject *critical = Py_True;
long pathlen = -1;
- int ok = 0;
- ENTERING(extension_set_basic_constraints);
+ extension_wrapper result = {extension_set_basic_constraints_destructor};
- if (!exts)
- goto error;
+ ENTERING(extension_set_basic_constraints);
if (!PyArg_ParseTuple(args, "O|OO", &is_ca, &pathlen_obj, &critical))
goto error;
if (pathlen_obj != Py_None && (pathlen = PyInt_AsLong(pathlen_obj)) < 0)
- lose_type_error("Bad pathLenConstraint value");
+ lose_value_error("Bad pathLenConstraint value");
if ((ext = BASIC_CONSTRAINTS_new()) == NULL)
lose_no_memory();
@@ -1277,54 +1845,51 @@ extension_set_basic_constraints(X509_EXTENSIONS **exts, PyObject *args)
!ASN1_INTEGER_set(ext->pathlen, pathlen)))
lose_no_memory();
- if (!X509V3_add1_i2d(exts, NID_basic_constraints, ext,
- PyObject_IsTrue(critical), X509V3_ADD_REPLACE))
- lose_openssl_error("Couldn't add BasicConstraints extension to OpenSSL object");
+ result.value = ext;
+ result.nid = NID_basic_constraints;
+ result.critical = PyObject_IsTrue(critical);
+ ext = NULL;
- ok = 1;
-
- error:
+ error: /* Fall through */
BASIC_CONSTRAINTS_free(ext);
-
- if (ok)
- Py_RETURN_NONE;
- else
- return NULL;
+ return result;
}
#define EXTENSION_GET_SIA__DOC__ \
"If there is no SIA extension, this method returns None.\n" \
"\n" \
- "Otherwise, it returns a tuple containing three values:\n" \
- "caRepository URIs, rpkiManifest URIs, and signedObject URIs.\n" \
+ "Otherwise, it returns a tuple containing four values:\n" \
+ "caRepository URIs, rpkiManifest URIs, signedObject, and rpkiNotify URIs.\n" \
"Each of these values is a tuple of strings, representing an ordered\n" \
"sequence of URIs. Any or all of these sequences may be empty.\n" \
"\n" \
"Any other accessMethods are ignored, as are any non-URI accessLocations.\n"
static PyObject *
-extension_get_sia(X509_EXTENSIONS **exts)
+extension_get_sia(X509_EXTENSION *ext_)
{
AUTHORITY_INFO_ACCESS *ext = NULL;
PyObject *result = NULL;
PyObject *result_caRepository = NULL;
PyObject *result_rpkiManifest = NULL;
PyObject *result_signedObject = NULL;
+ PyObject *result_rpkiNotify = NULL;
int n_caRepository = 0;
int n_rpkiManifest = 0;
int n_signedObject = 0;
+ int n_rpkiNotify = 0;
const char *uri;
PyObject *obj;
int i, nid;
- ENTERING(pkcs10_object_get_sia);
-
- if (!exts)
- goto error;
+ ENTERING(extension_get_sia);
- if ((ext = X509V3_get_d2i(*exts, NID_sinfo_access, NULL, NULL)) == NULL)
+ if (!ext_)
Py_RETURN_NONE;
+ if ((ext = X509V3_EXT_d2i(ext_)) == NULL)
+ lose_openssl_error("Couldn't parse SubjectInformationAccess extension");
+
/*
* Easiest to do this in two passes, first pass just counts URIs.
*/
@@ -1334,26 +1899,23 @@ extension_get_sia(X509_EXTENSIONS **exts)
if (a->location->type != GEN_URI)
continue;
nid = OBJ_obj2nid(a->method);
- if (nid == NID_caRepository) {
+ if (nid == NID_caRepository)
n_caRepository++;
- continue;
- }
- if (nid == NID_rpkiManifest) {
+ else if (nid == NID_ad_rpkiManifest)
n_rpkiManifest++;
- continue;
- }
- if (nid == NID_signedObject) {
+ else if (nid == NID_ad_signedObject)
n_signedObject++;
- continue;
- }
+ else if (nid == NID_ad_rpkiNotify)
+ n_rpkiNotify++;
}
if (((result_caRepository = PyTuple_New(n_caRepository)) == NULL) ||
((result_rpkiManifest = PyTuple_New(n_rpkiManifest)) == NULL) ||
- ((result_signedObject = PyTuple_New(n_signedObject)) == NULL))
+ ((result_signedObject = PyTuple_New(n_signedObject)) == NULL) ||
+ ((result_rpkiNotify = PyTuple_New(n_rpkiNotify)) == NULL))
goto error;
- n_caRepository = n_rpkiManifest = n_signedObject = 0;
+ n_caRepository = n_rpkiManifest = n_signedObject = n_rpkiNotify = 0;
for (i = 0; i < sk_ACCESS_DESCRIPTION_num(ext); i++) {
ACCESS_DESCRIPTION *a = sk_ACCESS_DESCRIPTION_value(ext, i);
@@ -1367,65 +1929,79 @@ extension_get_sia(X509_EXTENSIONS **exts)
PyTuple_SET_ITEM(result_caRepository, n_caRepository++, obj);
continue;
}
- if (nid == NID_rpkiManifest) {
+ if (nid == NID_ad_rpkiManifest) {
if ((obj = PyString_FromString(uri)) == NULL)
goto error;
PyTuple_SET_ITEM(result_rpkiManifest, n_rpkiManifest++, obj);
continue;
}
- if (nid == NID_signedObject) {
+ if (nid == NID_ad_signedObject) {
if ((obj = PyString_FromString(uri)) == NULL)
goto error;
PyTuple_SET_ITEM(result_signedObject, n_signedObject++, obj);
continue;
}
+ if (nid == NID_ad_rpkiNotify) {
+ if ((obj = PyString_FromString(uri)) == NULL)
+ goto error;
+ PyTuple_SET_ITEM(result_rpkiNotify, n_rpkiNotify++, obj);
+ continue;
+ }
}
- result = Py_BuildValue("(OOO)",
+ result = Py_BuildValue("(OOOO)",
result_caRepository,
result_rpkiManifest,
- result_signedObject);
+ result_signedObject,
+ result_rpkiNotify);
error:
AUTHORITY_INFO_ACCESS_free(ext);
Py_XDECREF(result_caRepository);
Py_XDECREF(result_rpkiManifest);
Py_XDECREF(result_signedObject);
+ Py_XDECREF(result_rpkiNotify);
return result;
}
#define EXTENSION_SET_SIA__DOC__ \
- "This method Takes three arguments:\n" \
- "\"caRepository\", \"rpkiManifest\", and \"signedObject\".\n" \
+ "This method takes four arguments: \"caRepository\"\n," \
+ "\"rpkiManifest\", \"signedObject\", and \"rpkiNotify\".\n" \
"Each of these should be an iterable which returns URIs.\n" \
"\n" \
"None is acceptable as an alternate way of specifying an empty\n" \
"collection of URIs for a particular argument.\n"
-static PyObject *
-extension_set_sia(X509_EXTENSIONS **exts, PyObject *args, PyObject *kwds)
+static void
+extension_set_sia_destructor(void *value)
+{
+ AUTHORITY_INFO_ACCESS_free(value);
+}
+
+static extension_wrapper
+extension_set_sia(PyObject *args, PyObject *kwds)
{
- static char *kwlist[] = {"caRepository", "rpkiManifest", "signedObject", NULL};
+ static char *kwlist[] = {"caRepository", "rpkiManifest", "signedObject", "rpkiNotify", NULL};
AUTHORITY_INFO_ACCESS *ext = NULL;
PyObject *caRepository = Py_None;
PyObject *rpkiManifest = Py_None;
PyObject *signedObject = Py_None;
+ PyObject *rpkiNotify = Py_None;
PyObject *iterator = NULL;
ASN1_OBJECT *oid = NULL;
PyObject **pobj = NULL;
PyObject *item = NULL;
ACCESS_DESCRIPTION *a = NULL;
- int i, nid = NID_undef, ok = 0;
+ int i, nid = NID_undef;
Py_ssize_t urilen;
char *uri;
- ENTERING(extension_set_sia);
+ extension_wrapper result = {extension_set_sia_destructor};
- if (!exts)
- goto error;
+ ENTERING(extension_set_sia);
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOO", kwlist,
- &caRepository, &rpkiManifest, &signedObject))
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOOO", kwlist,
+ &caRepository, &rpkiManifest, &signedObject, &rpkiNotify))
goto error;
if ((ext = AUTHORITY_INFO_ACCESS_new()) == NULL)
@@ -1437,11 +2013,12 @@ extension_set_sia(X509_EXTENSIONS **exts, PyObject *args, PyObject *kwds)
* single URI as an abbreviation for a collection containing one URI.
*/
- for (i = 0; i < 3; i++) {
+ for (i = 0; i < 4; i++) {
switch (i) {
- case 0: pobj = &caRepository; nid = NID_caRepository; break;
- case 1: pobj = &rpkiManifest; nid = NID_rpkiManifest; break;
- case 2: pobj = &signedObject; nid = NID_signedObject; break;
+ case 0: pobj = &caRepository; nid = NID_caRepository; break;
+ case 1: pobj = &rpkiManifest; nid = NID_ad_rpkiManifest; break;
+ case 2: pobj = &signedObject; nid = NID_ad_signedObject; break;
+ case 3: pobj = &rpkiNotify; nid = NID_ad_rpkiNotify; break;
}
if (*pobj == Py_None)
@@ -1461,7 +2038,8 @@ extension_set_sia(X509_EXTENSIONS **exts, PyObject *args, PyObject *kwds)
if ((a = ACCESS_DESCRIPTION_new()) == NULL ||
(a->method = OBJ_dup(oid)) == NULL ||
(a->location->d.uniformResourceIdentifier = ASN1_IA5STRING_new()) == NULL ||
- !ASN1_OCTET_STRING_set(a->location->d.uniformResourceIdentifier, (unsigned char *) uri, urilen))
+ !ASN1_OCTET_STRING_set(a->location->d.uniformResourceIdentifier,
+ (unsigned char *) uri, urilen))
lose_no_memory();
a->location->type = GEN_URI;
@@ -1478,21 +2056,17 @@ extension_set_sia(X509_EXTENSIONS **exts, PyObject *args, PyObject *kwds)
iterator = NULL;
}
- if (!X509V3_add1_i2d(exts, NID_sinfo_access, ext, 0, X509V3_ADD_REPLACE))
- lose_openssl_error("Couldn't add SIA extension to OpenSSL object");
-
- ok = 1;
+ result.value = ext;
+ result.nid = NID_sinfo_access;
+ result.critical = 0;
+ ext = NULL;
- error:
+ error: /* Fall through */
AUTHORITY_INFO_ACCESS_free(ext);
ACCESS_DESCRIPTION_free(a);
Py_XDECREF(item);
Py_XDECREF(iterator);
-
- if (ok)
- Py_RETURN_NONE;
- else
- return NULL;
+ return result;
}
#define EXTENSION_GET_EKU__DOC__ \
@@ -1501,7 +2075,7 @@ extension_set_sia(X509_EXTENSIONS **exts, PyObject *args, PyObject *kwds)
"has no ExtendedKeyUsage extension.\n"
static PyObject *
-extension_get_eku(X509_EXTENSIONS **exts)
+extension_get_eku(X509_EXTENSION *ext_)
{
EXTENDED_KEY_USAGE *ext = NULL;
PyObject *result = NULL;
@@ -1510,12 +2084,12 @@ extension_get_eku(X509_EXTENSIONS **exts)
ENTERING(extension_get_eku);
- if (!exts)
- goto error;
-
- if ((ext = X509V3_get_d2i(*exts, NID_ext_key_usage, NULL, NULL)) == NULL)
+ if (!ext_)
Py_RETURN_NONE;
+ if ((ext = X509V3_EXT_d2i(ext_)) == NULL)
+ lose_openssl_error("Couldn't parse ExtendedKeyUsage extension");
+
if ((result = PyFrozenSet_New(NULL)) == NULL)
goto error;
@@ -1545,8 +2119,14 @@ extension_get_eku(X509_EXTENSIONS **exts)
"should be marked as critical or not. RFC 6487 4.8.5 says this extension\n" \
"MUST NOT be marked as non-critical when used, so the default is False.\n"
-static PyObject *
-extension_set_eku(X509_EXTENSIONS **exts, PyObject *args)
+static void
+extension_set_eku_destructor(void *value)
+{
+ sk_ASN1_OBJECT_pop_free(value, ASN1_OBJECT_free);
+}
+
+static extension_wrapper
+extension_set_eku(PyObject *args)
{
EXTENDED_KEY_USAGE *ext = NULL;
PyObject *iterable = NULL;
@@ -1555,12 +2135,10 @@ extension_set_eku(X509_EXTENSIONS **exts, PyObject *args)
PyObject *item = NULL;
ASN1_OBJECT *obj = NULL;
const char *txt;
- int ok = 0;
- ENTERING(extension_set_eku);
+ extension_wrapper result = {extension_set_eku_destructor};
- if (!exts)
- goto error;
+ ENTERING(extension_set_eku);
if ((ext = sk_ASN1_OBJECT_new_null()) == NULL)
lose_no_memory();
@@ -1588,22 +2166,16 @@ extension_set_eku(X509_EXTENSIONS **exts, PyObject *args)
if (sk_ASN1_OBJECT_num(ext) < 1)
lose("Empty ExtendedKeyUsage extension");
- if (!X509V3_add1_i2d(exts, NID_ext_key_usage, ext,
- PyObject_IsTrue(critical),
- X509V3_ADD_REPLACE))
- lose_openssl_error("Couldn't add ExtendedKeyUsage extension to OpenSSL object");
-
- ok = 1;
+ result.value = ext;
+ result.nid = NID_ext_key_usage;
+ result.critical = PyObject_IsTrue(critical);
+ ext = NULL;
error: /* Fall through */
sk_ASN1_OBJECT_pop_free(ext, ASN1_OBJECT_free);
Py_XDECREF(item);
Py_XDECREF(iterator);
-
- if (ok)
- Py_RETURN_NONE;
- else
- return NULL;
+ return result;
}
#define EXTENSION_GET_SKI__DOC__ \
@@ -1611,19 +2183,19 @@ extension_set_eku(X509_EXTENSIONS **exts, PyObject *args)
"or None if the object has no SKI extension.\n"
static PyObject *
-extension_get_ski(X509_EXTENSIONS **exts)
+extension_get_ski(X509_EXTENSION *ext_)
{
ASN1_OCTET_STRING *ext = NULL;
PyObject *result = NULL;
ENTERING(extension_get_ski);
- if (!exts)
- goto error;
-
- if ((ext = X509V3_get_d2i(*exts, NID_subject_key_identifier, NULL, NULL)) == NULL)
+ if (!ext_)
Py_RETURN_NONE;
+ if ((ext = X509V3_EXT_d2i(ext_)) == NULL)
+ lose_openssl_error("Couldn't parse SubjectKeyIdentifier extension");
+
result = Py_BuildValue("s#", ASN1_STRING_data(ext),
(Py_ssize_t) ASN1_STRING_length(ext));
@@ -1635,18 +2207,22 @@ extension_get_ski(X509_EXTENSIONS **exts)
#define EXTENSION_SET_SKI__DOC__ \
"Set the Subject Key Identifier (SKI) value for this object.\n"
-static PyObject *
-extension_set_ski(X509_EXTENSIONS **exts, PyObject *args)
+static void
+extension_set_ski_destructor(void *value)
+{
+ ASN1_OCTET_STRING_free(value);
+}
+
+static extension_wrapper
+extension_set_ski(PyObject *args)
{
ASN1_OCTET_STRING *ext = NULL;
const unsigned char *buf = NULL;
Py_ssize_t len;
- int ok = 0;
- ENTERING(extension_set_ski);
+ extension_wrapper result = {extension_set_ski_destructor};
- if (!exts)
- goto error;
+ ENTERING(extension_set_ski);
if (!PyArg_ParseTuple(args, "s#", &buf, &len))
goto error;
@@ -1659,19 +2235,14 @@ extension_set_ski(X509_EXTENSIONS **exts, PyObject *args)
* RFC 5280 says this MUST be non-critical.
*/
- if (!X509V3_add1_i2d(exts, NID_subject_key_identifier,
- ext, 0, X509V3_ADD_REPLACE))
- lose_openssl_error("Couldn't add SKI extension to OpenSSL object");
-
- ok = 1;
+ result.value = ext;
+ result.nid = NID_subject_key_identifier;
+ result.critical = 0;
+ ext = NULL;
error:
ASN1_OCTET_STRING_free(ext);
-
- if (ok)
- Py_RETURN_NONE;
- else
- return NULL;
+ return result;
}
#define EXTENSION_GET_AKI__DOC__ \
@@ -1680,19 +2251,19 @@ extension_set_ski(X509_EXTENSIONS **exts, PyObject *args)
"no keyIdentifier value.\n"
static PyObject *
-extension_get_aki(X509_EXTENSIONS **exts)
+extension_get_aki(X509_EXTENSION *ext_)
{
AUTHORITY_KEYID *ext = NULL;
PyObject *result = NULL;
ENTERING(extension_get_aki);
- if (!exts)
- goto error;
-
- if ((ext = X509V3_get_d2i(*exts, NID_authority_key_identifier, NULL, NULL)) == NULL)
+ if (!ext_)
Py_RETURN_NONE;
+ if ((ext = X509V3_EXT_d2i(ext_)) == NULL)
+ lose_openssl_error("Couldn't parse AuthorityKeyIdentifier extension");
+
result = Py_BuildValue("s#", ASN1_STRING_data(ext->keyid),
(Py_ssize_t) ASN1_STRING_length(ext->keyid));
@@ -1707,13 +2278,20 @@ extension_get_aki(X509_EXTENSIONS **exts)
"We only support the keyIdentifier method, as that's the only form\n" \
"which is legal for RPKI certificates.\n"
-static PyObject *
-extension_set_aki(X509_EXTENSIONS **exts, PyObject *args)
+static void
+extension_set_aki_destructor(void *value)
+{
+ AUTHORITY_KEYID_free(value);
+}
+
+static extension_wrapper
+extension_set_aki(PyObject *args)
{
AUTHORITY_KEYID *ext = NULL;
const unsigned char *buf = NULL;
Py_ssize_t len;
- int ok = 0;
+
+ extension_wrapper result = {extension_set_aki_destructor};
ENTERING(extension_set_aki);
@@ -1731,19 +2309,14 @@ extension_set_aki(X509_EXTENSIONS **exts, PyObject *args)
* RFC 5280 says this MUST be non-critical.
*/
- if (!X509V3_add1_i2d(exts, NID_authority_key_identifier,
- ext, 0, X509V3_ADD_REPLACE))
- lose_openssl_error("Couldn't add AKI extension to OpenSSL object");
-
- ok = 1;
+ result.value = ext;
+ result.nid = NID_authority_key_identifier;
+ result.critical = 0;
+ ext = NULL;
error:
AUTHORITY_KEYID_free(ext);
-
- if (ok)
- Py_RETURN_NONE;
- else
- return NULL;
+ return result;
}
@@ -2417,13 +2990,34 @@ x509_object_der_write(x509_object *self)
return result;
}
-static X509_EXTENSIONS **
-x509_object_extension_helper(x509_object *self)
+static X509_EXTENSION *
+x509_object_extension_get_helper(x509_object *self, int nid)
{
- if (self && self->x509 && self->x509->cert_info)
- return &self->x509->cert_info->extensions;
- PyErr_SetString(PyExc_ValueError, "Can't find X509_EXTENSIONS in X509 object");
- return NULL;
+ if (self != NULL && self->x509 != NULL)
+ return X509_get_ext(self->x509, X509_get_ext_by_NID(self->x509, nid, -1));
+ else
+ return NULL;
+}
+
+static PyObject *
+x509_object_extension_set_helper(x509_object *self, extension_wrapper ext)
+{
+ int ok = 0;
+
+ if (ext.value == NULL)
+ goto error;
+
+ if (!X509_add1_ext_i2d(self->x509, ext.nid, ext.value, ext.critical, X509V3_ADD_REPLACE))
+ lose_openssl_error("Couldn't add extension to certificate");
+
+ ok = 1;
+
+ error:
+ ext.destructor(ext.value);
+ if (ok)
+ Py_RETURN_NONE;
+ else
+ return NULL;
}
static char x509_object_get_public_key__doc__[] =
@@ -2489,8 +3083,6 @@ static char x509_object_sign__doc__[] =
"The optional \"digest\" parameter indicates which digest to compute and\n"
"sign, and should be one of the following:\n"
"\n"
- "* MD5_DIGEST\n"
- "* SHA_DIGEST\n"
"* SHA1_DIGEST\n"
"* SHA256_DIGEST\n"
"* SHA384_DIGEST\n"
@@ -2523,6 +3115,407 @@ x509_object_sign(x509_object *self, PyObject *args)
return NULL;
}
+static int x509_store_ctx_object_verify_cb(int ok, X509_STORE_CTX *ctx);
+
+static char x509_object_verify__doc__[] =
+ "Verify a certificate.\n"
+ ;
+
+#warning Write real x509_object_verify__doc__[] once API is stable.
+
+static PyObject *
+x509_object_verify(x509_object *self, PyObject *args, PyObject *kwds)
+{
+ static char *kwlist[] = {"trusted", "untrusted", "crl", "policy", "context_class", NULL};
+ PyObject *ctxclass = (PyObject *) &POW_X509StoreCTX_Type;
+ STACK_OF(X509) *trusted_stack = NULL;
+ STACK_OF(X509) *untrusted_stack = NULL;
+ STACK_OF(X509_CRL) *crl_stack = NULL;
+ x509_store_ctx_object *ctx = NULL;
+ PyObject *trusted = Py_None;
+ PyObject *untrusted = Py_None;
+ PyObject *crl = Py_None;
+ PyObject *policy = Py_None;
+ int ok = 0;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOOOO", kwlist, &trusted, &untrusted, &crl, &policy, &ctxclass))
+ goto error;
+
+ if ((trusted_stack = x509_helper_iterable_to_stack(trusted)) == NULL)
+ goto error;
+
+ if ((untrusted_stack = x509_helper_iterable_to_stack(untrusted)) == NULL)
+ goto error;
+
+ if (crl != Py_None && !POW_CRL_Check(crl))
+ lose_type_error("Not a CRL");
+
+ if (crl != Py_None && ((crl_stack = sk_X509_CRL_new_null()) == NULL ||
+ !sk_X509_CRL_push(crl_stack, ((crl_object *) crl)->crl)))
+ lose_no_memory();
+
+ if (!PyCallable_Check(ctxclass))
+ lose_type_error("Context class must be callable");
+
+ if ((ctx = (x509_store_ctx_object *) PyObject_CallFunctionObjArgs(ctxclass, NULL)) == NULL)
+ goto error;
+
+ if (!POW_X509StoreCTX_Check(ctx))
+ lose_type_error("Returned context is not a X509StoreCTX");
+
+ if (ctx->ctx == NULL)
+ lose("Uninitialized X509StoreCTX");
+
+ if (crl != Py_None)
+ X509_VERIFY_PARAM_set_flags(ctx->ctx->param, X509_V_FLAG_CRL_CHECK);
+
+ if (policy != Py_None) {
+ const char *oid_txt = NULL;
+ ASN1_OBJECT *oid_obj = NULL;
+
+ if ((oid_txt = PyString_AsString(policy)) == NULL)
+ goto error;
+
+ if ((oid_obj = OBJ_txt2obj(oid_txt, 1)) == NULL)
+ lose("Couldn't parse policy OID");
+
+ X509_VERIFY_PARAM_set_flags(ctx->ctx->param, X509_V_FLAG_POLICY_CHECK | X509_V_FLAG_EXPLICIT_POLICY);
+ X509_VERIFY_PARAM_add0_policy(ctx->ctx->param, oid_obj);
+ }
+
+ Py_XINCREF(trusted);
+ Py_XINCREF(untrusted);
+ Py_XINCREF(crl);
+ X509_STORE_CTX_set_cert(ctx->ctx, self->x509);
+ X509_STORE_CTX_trusted_stack(ctx->ctx, trusted_stack);
+ X509_STORE_CTX_set_chain(ctx->ctx, untrusted_stack);
+ X509_STORE_CTX_set0_crls(ctx->ctx, crl_stack);
+
+ X509_STORE_CTX_set_verify_cb(ctx->ctx, x509_store_ctx_object_verify_cb);
+ X509_VERIFY_PARAM_set_flags(ctx->ctx->param, X509_V_FLAG_X509_STRICT);
+
+ ok = X509_verify_cert(ctx->ctx) >= 0;
+
+ X509_STORE_CTX_set0_crls(ctx->ctx, NULL);
+ X509_STORE_CTX_set_chain(ctx->ctx, NULL);
+ X509_STORE_CTX_trusted_stack(ctx->ctx, NULL);
+ X509_STORE_CTX_set_cert(ctx->ctx, NULL);
+ Py_XDECREF(crl);
+ Py_XDECREF(untrusted);
+ Py_XDECREF(trusted);
+
+ if (PyErr_Occurred())
+ goto error;
+
+ if (!ok)
+ lose_validation_error("X509_verify_cert() raised an exception");
+
+ error:
+ sk_X509_free(trusted_stack);
+ sk_X509_free(untrusted_stack);
+ sk_X509_CRL_free(crl_stack);
+
+ if (ok)
+ return (PyObject *) ctx;
+
+ Py_XDECREF(ctx);
+ return NULL;
+}
+
+
+static char x509_object_check_rpki_conformance__doc__[] =
+ "Check a certificate for conformance to the RPKI profile.\n"
+ ;
+
+#warning Write real x509_object_check_rpki_conformance__doc__[] once API is stable.
+
+static PyObject *
+x509_object_check_rpki_conformance(x509_object *self, PyObject *args, PyObject *kwds)
+{
+ static char *kwlist[] = {"status", "eku", NULL};
+ PyObject *status = Py_None;
+ PyObject *ekuarg = Py_None;
+ EVP_PKEY *issuer_pkey = NULL, *subject_pkey = NULL;
+ AUTHORITY_INFO_ACCESS *sia = NULL, *aia = NULL;
+ STACK_OF(POLICYINFO) *policies = NULL;
+ ASN1_BIT_STRING *ski_pubkey = NULL;
+ STACK_OF(DIST_POINT) *crldp = NULL;
+ EXTENDED_KEY_USAGE *eku = NULL;
+ BASIC_CONSTRAINTS *bc = NULL;
+ ASN1_OCTET_STRING *ski = NULL;
+ AUTHORITY_KEYID *aki = NULL;
+ ASIdentifiers *asid = NULL;
+ IPAddrBlocks *addr = NULL;
+ unsigned char ski_hashbuf[EVP_MAX_MD_SIZE];
+ unsigned ski_hashlen, afi;
+ int i, ok, crit, ex_count, is_ca = 0, ekunid = NID_undef, ret = 0;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!O", kwlist, &PySet_Type, &status, &ekuarg))
+ goto error;
+
+ if (ekuarg != Py_None) {
+ const char *ekutxt = PyString_AsString(ekuarg);
+ if (ekutxt == NULL)
+ goto error;
+ ekunid = OBJ_txt2nid(ekutxt);
+ }
+
+ /*
+ * We don't use X509_check_ca() to check whether the certificate is
+ * a CA, because it's not paranoid enough to enforce the RPKI
+ * certificate profile, but we still call it because we need it (or
+ * something) to invoke x509v3_cache_extensions() for us.
+ */
+
+ (void) X509_check_ca(self->x509);
+
+ if (!check_allowed_time_encoding(X509_get_notBefore(self->x509)) ||
+ !check_allowed_time_encoding(X509_get_notAfter(self->x509)))
+ record_validation_status(status, NONCONFORMANT_ASN1_TIME_VALUE);
+
+ if (X509_get_signature_nid(self->x509) != NID_sha256WithRSAEncryption)
+ record_validation_status(status, NONCONFORMANT_SIGNATURE_ALGORITHM);
+
+ if (!check_allowed_dn(X509_get_subject_name(self->x509)))
+ record_validation_status(status, NONCONFORMANT_SUBJECT_NAME);
+
+ if (!check_allowed_dn(X509_get_issuer_name(self->x509)))
+ record_validation_status(status, NONCONFORMANT_ISSUER_NAME);
+
+ /*
+ * Apparently nothing ever looks at these fields. We wouldn't
+ * bother either if they weren't forbidden by the RPKI certificate
+ * profile.
+ */
+
+ if (!self->x509->cert_info || self->x509->cert_info->issuerUID || self->x509->cert_info->subjectUID)
+ record_validation_status(status, NONCONFORMANT_CERTIFICATE_UID);
+
+ /*
+ * Public key checks postponed until we've checked extensions (in
+ * particular, until we've checked Basic Constraints and know
+ * whether to apply the CA or EE rules).
+ */
+
+ /*
+ * Keep track of allowed extensions we've seen. Once we've
+ * processed all the ones we expect, anything left is an error.
+ */
+
+ ex_count = X509_get_ext_count(self->x509);
+
+ /* Critical */
+ if ((bc = X509_get_ext_d2i(self->x509, NID_basic_constraints, &crit, NULL)) != NULL) {
+ ex_count--;
+ if (!crit || bc->ca <= 0 || bc->pathlen != NULL)
+ record_validation_status(status, MALFORMED_BASIC_CONSTRAINTS);
+ }
+
+ is_ca = bc != NULL;
+
+ /*
+ * Check for presence of AIA, SIA, and CRLDP, and make sure that
+ * they're in the correct format, but leave checking of the URIs
+ * themselves for Python code to handle.
+ */
+
+ /* Non-criticial */
+ if ((aia = X509_get_ext_d2i(self->x509, NID_info_access, &crit, NULL)) != NULL) {
+ ex_count--;
+ if (crit)
+ record_validation_status(status, GRATUITOUSLY_CRITICAL_EXTENSION);
+ ok = sk_ACCESS_DESCRIPTION_num(aia) > 0;
+ for (i = 0; ok && i < sk_ACCESS_DESCRIPTION_num(aia); i++) {
+ ACCESS_DESCRIPTION *a = sk_ACCESS_DESCRIPTION_value(aia, i);
+ ok = (a != NULL && a->location->type == GEN_URI &&
+ OBJ_obj2nid(a->method) == NID_ad_ca_issuers);
+ }
+ if (!ok)
+ record_validation_status(status, MALFORMED_AIA_EXTENSION);
+ }
+
+ /* Non-criticial */
+ if ((sia = X509_get_ext_d2i(self->x509, NID_sinfo_access, &crit, NULL)) != NULL) {
+ ex_count--;
+ if (crit)
+ record_validation_status(status, GRATUITOUSLY_CRITICAL_EXTENSION);
+ ok = sk_ACCESS_DESCRIPTION_num(sia) > 0;
+ for (i = 0; ok && i < sk_ACCESS_DESCRIPTION_num(sia); i++) {
+ ACCESS_DESCRIPTION *a = sk_ACCESS_DESCRIPTION_value(sia, i);
+ int nid = a == NULL ? NID_undef : OBJ_obj2nid(a->method);
+ ok = (a != NULL && a->location->type == GEN_URI &&
+ (nid == NID_caRepository || nid == NID_ad_rpkiManifest ||
+ nid == NID_ad_signedObject || nid == NID_ad_rpkiNotify));
+ }
+ if (!ok)
+ record_validation_status(status, MALFORMED_SIA_EXTENSION);
+ }
+
+ /* Non-critical */
+ if ((crldp = X509_get_ext_d2i(self->x509, NID_crl_distribution_points, &crit, NULL)) != NULL) {
+ DIST_POINT *dp = sk_DIST_POINT_value(crldp, 0);
+ ex_count--;
+ if (crit)
+ record_validation_status(status, GRATUITOUSLY_CRITICAL_EXTENSION);
+ ok = (sk_DIST_POINT_num(crldp) == 1 &&
+ dp->reasons == NULL && dp->CRLissuer == NULL &&
+ dp->distpoint != NULL && dp->distpoint->type == 0);
+ for (i = 0; ok && i < sk_GENERAL_NAME_num(dp->distpoint->name.fullname); i++) {
+ GENERAL_NAME *gn = sk_GENERAL_NAME_value(dp->distpoint->name.fullname, i);
+ ok = gn != NULL && gn->type == GEN_URI;
+ }
+ if (!ok)
+ record_validation_status(status, MALFORMED_CRLDP_EXTENSION);
+ }
+
+ /* Non-critical */
+ if ((eku = X509_get_ext_d2i(self->x509, NID_ext_key_usage, &crit, NULL)) != NULL) {
+ ex_count--;
+ ok = 0;
+ if (!crit && !is_ca && sk_ASN1_OBJECT_num(eku) > 0 && ekunid != NID_undef)
+ for (i = 0; !ok && i < sk_ASN1_OBJECT_num(eku); i++)
+ ok = OBJ_obj2nid(sk_ASN1_OBJECT_value(eku, i)) == ekunid;
+ if (!ok)
+ record_validation_status(status, INAPPROPRIATE_EKU_EXTENSION);
+ }
+
+ /* Critical */
+ if ((policies = X509_get_ext_d2i(self->x509, NID_certificate_policies, &crit, NULL)) != NULL) {
+ POLICYQUALINFO *qualifier = NULL;
+ POLICYINFO *policy = NULL;
+ ex_count--;
+ if (!crit || sk_POLICYINFO_num(policies) != 1 ||
+ (policy = sk_POLICYINFO_value(policies, 0)) == NULL ||
+ OBJ_obj2nid(policy->policyid) != NID_cp_ipAddr_asNumber ||
+ sk_POLICYQUALINFO_num(policy->qualifiers) > 1 ||
+ (sk_POLICYQUALINFO_num(policy->qualifiers) == 1 &&
+ ((qualifier = sk_POLICYQUALINFO_value(policy->qualifiers, 0)) == NULL ||
+ OBJ_obj2nid(qualifier->pqualid) != NID_id_qt_cps)))
+ record_validation_status(status, BAD_CERTIFICATE_POLICY);
+ else if (qualifier != NULL)
+ record_validation_status(status, POLICY_QUALIFIER_CPS);
+ }
+
+ /* Critical */
+ if ((self->x509->ex_flags & EXFLAG_KUSAGE) == 0)
+ record_validation_status(status, KEY_USAGE_MISSING);
+ else {
+ ex_count--;
+ if (!X509_EXTENSION_get_critical(X509_get_ext(self->x509, X509_get_ext_by_NID(self->x509, NID_key_usage, -1))) ||
+ self->x509->ex_kusage != (is_ca ? KU_KEY_CERT_SIGN | KU_CRL_SIGN : KU_DIGITAL_SIGNATURE))
+ record_validation_status(status, BAD_KEY_USAGE);
+ }
+
+ /* Critical */
+ if ((addr = X509_get_ext_d2i(self->x509, NID_sbgp_ipAddrBlock, &crit, NULL)) != NULL) {
+ ex_count--;
+ if (!crit || ekunid == NID_id_kp_bgpsec_router ||
+ !v3_addr_is_canonical(addr) || sk_IPAddressFamily_num(addr) == 0)
+ record_validation_status(status, BAD_IPADDRBLOCKS);
+ else
+ for (i = 0; i < sk_IPAddressFamily_num(addr); i++) {
+ IPAddressFamily *f = sk_IPAddressFamily_value(addr, i);
+ afi = v3_addr_get_afi(f);
+ if (afi != IANA_AFI_IPV4 && afi != IANA_AFI_IPV6)
+ record_validation_status(status, UNKNOWN_AFI);
+ else if (f->addressFamily->length != 2)
+ record_validation_status(status, SAFI_NOT_ALLOWED);
+ }
+ }
+
+ /* Critical */
+ if ((asid = X509_get_ext_d2i(self->x509, NID_sbgp_autonomousSysNum, &crit, NULL)) != NULL) {
+ ex_count--;
+ if (!crit || asid->asnum == NULL || asid->rdi != NULL || !v3_asid_is_canonical(asid) ||
+ (ekunid == NID_id_kp_bgpsec_router && asid->asnum->type == ASIdentifierChoice_inherit))
+ record_validation_status(status, BAD_ASIDENTIFIERS);
+ }
+
+ if (addr == NULL && asid == NULL)
+ record_validation_status(status, MISSING_RESOURCES);
+
+ /* Non-critical */
+ if ((ski = X509_get_ext_d2i(self->x509, NID_subject_key_identifier, &crit, NULL)) == NULL)
+ record_validation_status(status, SKI_EXTENSION_MISSING);
+ else {
+ ex_count--;
+ if (crit)
+ record_validation_status(status, GRATUITOUSLY_CRITICAL_EXTENSION);
+ if ((ski_pubkey = X509_get0_pubkey_bitstr(self->x509)) == NULL ||
+ !EVP_Digest(ski_pubkey->data, ski_pubkey->length,
+ ski_hashbuf, &ski_hashlen, EVP_sha1(), NULL) ||
+ ski_hashlen != 20 ||
+ ski_hashlen != ASN1_STRING_length(ski) ||
+ memcmp(ski_hashbuf, ASN1_STRING_data(ski), ski_hashlen))
+ record_validation_status(status, SKI_PUBLIC_KEY_MISMATCH);
+ }
+
+ /* Non-critical */
+ if ((aki = X509_get_ext_d2i(self->x509, NID_authority_key_identifier, &crit, NULL)) != NULL) {
+ ex_count--;
+ if (crit)
+ record_validation_status(status, GRATUITOUSLY_CRITICAL_EXTENSION);
+ if (aki->keyid == NULL || aki->serial != NULL || aki->issuer != NULL)
+ record_validation_status(status, AKI_EXTENSION_WRONG_FORMAT);
+ }
+
+ if (ex_count > 0)
+ record_validation_status(status, DISALLOWED_X509V3_EXTENSION);
+
+ /*
+ * Public key checks.
+ */
+
+ subject_pkey = X509_get_pubkey(self->x509);
+ ok = subject_pkey != NULL;
+ if (ok) {
+ ASN1_OBJECT *algorithm;
+
+ (void) X509_PUBKEY_get0_param(&algorithm, NULL, NULL, NULL, X509_get_X509_PUBKEY(self->x509));
+
+ switch (OBJ_obj2nid(algorithm)) {
+
+ case NID_rsaEncryption:
+ ok = (EVP_PKEY_base_id(subject_pkey) == EVP_PKEY_RSA &&
+ EVP_PKEY_bits(subject_pkey) == 2048 &&
+ BN_get_word(subject_pkey->pkey.rsa->e) == 65537);
+ break;
+
+ case NID_X9_62_id_ecPublicKey:
+ ok = (EVP_PKEY_base_id(subject_pkey) == EVP_PKEY_EC &&
+ ekunid == NID_id_kp_bgpsec_router &&
+ EC_GROUP_get_curve_name(EC_KEY_get0_group(subject_pkey->pkey.ec)) == NID_X9_62_prime256v1);
+ break;
+
+ default:
+ ok = 0;
+ }
+ }
+ if (!ok)
+ record_validation_status(status, BAD_PUBLIC_KEY);
+
+ ret = 1;
+
+ error:
+ EVP_PKEY_free(issuer_pkey);
+ EVP_PKEY_free(subject_pkey);
+ BASIC_CONSTRAINTS_free(bc);
+ sk_ACCESS_DESCRIPTION_pop_free(sia, ACCESS_DESCRIPTION_free);
+ sk_ACCESS_DESCRIPTION_pop_free(aia, ACCESS_DESCRIPTION_free);
+ sk_DIST_POINT_pop_free(crldp, DIST_POINT_free);
+ sk_POLICYINFO_pop_free(policies, POLICYINFO_free);
+ sk_ASN1_OBJECT_pop_free(eku, ASN1_OBJECT_free);
+ ASN1_OCTET_STRING_free(ski);
+ AUTHORITY_KEYID_free(aki);
+ ASIdentifiers_free(asid);
+ sk_IPAddressFamily_pop_free(addr, IPAddressFamily_free);
+
+ if (ret)
+ Py_RETURN_NONE;
+ else
+ return NULL;
+}
+
static char x509_object_get_version__doc__[] =
"Return version number of this certificate.\n"
;
@@ -2694,7 +3687,7 @@ x509_object_set_subject(x509_object *self, PyObject *args)
goto error;
if (!PySequence_Check(name_sequence))
- lose_type_error("Inapropriate type");
+ lose_type_error("Expected a sequence object");
if ((name = x509_object_helper_set_name(name_sequence)) == NULL)
goto error;
@@ -2730,7 +3723,7 @@ x509_object_set_issuer(x509_object *self, PyObject *args)
goto error;
if (!PySequence_Check(name_sequence))
- lose_type_error("Inapropriate type");
+ lose_type_error("Expected a sequence object");
if ((name = x509_object_helper_set_name(name_sequence)) == NULL)
goto error;
@@ -2877,7 +3870,7 @@ static char x509_object_get_ski__doc__[] =
static PyObject *
x509_object_get_ski(x509_object *self)
{
- return extension_get_ski(x509_object_extension_helper(self));
+ return extension_get_ski(x509_object_extension_get_helper(self, NID_subject_key_identifier));
}
static char x509_object_set_ski__doc__[] =
@@ -2887,7 +3880,7 @@ static char x509_object_set_ski__doc__[] =
static PyObject *
x509_object_set_ski(x509_object *self, PyObject *args)
{
- return extension_set_ski(x509_object_extension_helper(self), args);
+ return x509_object_extension_set_helper(self, extension_set_ski(args));
}
static char x509_object_get_aki__doc__[] =
@@ -2897,7 +3890,7 @@ static char x509_object_get_aki__doc__[] =
static PyObject *
x509_object_get_aki(x509_object *self)
{
- return extension_get_aki(x509_object_extension_helper(self));
+ return extension_get_aki(x509_object_extension_get_helper(self, NID_authority_key_identifier));
}
static char x509_object_set_aki__doc__[] =
@@ -2907,7 +3900,7 @@ static char x509_object_set_aki__doc__[] =
static PyObject *
x509_object_set_aki(x509_object *self, PyObject *args)
{
- return extension_set_aki(x509_object_extension_helper(self), args);
+ return x509_object_extension_set_helper(self, extension_set_aki(args));
}
static char x509_object_get_key_usage__doc__[] =
@@ -2917,7 +3910,7 @@ static char x509_object_get_key_usage__doc__[] =
static PyObject *
x509_object_get_key_usage(x509_object *self)
{
- return extension_get_key_usage(x509_object_extension_helper(self));
+ return extension_get_key_usage(x509_object_extension_get_helper(self, NID_key_usage));
}
static char x509_object_set_key_usage__doc__[] =
@@ -2929,7 +3922,7 @@ static char x509_object_set_key_usage__doc__[] =
static PyObject *
x509_object_set_key_usage(x509_object *self, PyObject *args)
{
- return extension_set_key_usage(x509_object_extension_helper(self), args);
+ return x509_object_extension_set_helper(self, extension_set_key_usage(args));
}
static char x509_object_get_eku__doc__[] =
@@ -2939,7 +3932,7 @@ static char x509_object_get_eku__doc__[] =
static PyObject *
x509_object_get_eku(x509_object *self)
{
- return extension_get_eku(x509_object_extension_helper(self));
+ return extension_get_eku(x509_object_extension_get_helper(self, NID_ext_key_usage));
}
static char x509_object_set_eku__doc__[] =
@@ -2951,7 +3944,7 @@ static char x509_object_set_eku__doc__[] =
static PyObject *
x509_object_set_eku(x509_object *self, PyObject *args)
{
- return extension_set_eku(x509_object_extension_helper(self), args);
+ return x509_object_extension_set_helper(self, extension_set_eku(args));
}
static char x509_object_get_rfc3779__doc__[] =
@@ -3021,12 +4014,12 @@ x509_object_get_rfc3779(x509_object *self)
break;
default:
- lose_type_error("Unexpected asIdsOrRanges type");
+ lose_value_error("Unexpected asIdsOrRanges type");
}
if (ASN1_STRING_type(b) == V_ASN1_NEG_INTEGER ||
ASN1_STRING_type(e) == V_ASN1_NEG_INTEGER)
- lose_type_error("I don't believe in negative ASNs");
+ lose_value_error("I don't believe in negative ASNs");
if ((range_b = ASN1_INTEGER_to_PyLong(b)) == NULL ||
(range_e = ASN1_INTEGER_to_PyLong(e)) == NULL ||
@@ -3040,7 +4033,7 @@ x509_object_get_rfc3779(x509_object *self)
break;
default:
- lose_type_error("Unexpected ASIdentifierChoice type");
+ lose_value_error("Unexpected ASIdentifierChoice type");
}
}
@@ -3055,14 +4048,14 @@ x509_object_get_rfc3779(x509_object *self)
switch (afi) {
case IANA_AFI_IPV4: result_obj = &ipv4_result; ip_type = &ipaddress_version_4; break;
case IANA_AFI_IPV6: result_obj = &ipv6_result; ip_type = &ipaddress_version_6; break;
- default: lose_type_error("Unknown AFI");
+ default: lose_value_error("Unknown AFI");
}
if (*result_obj != NULL)
- lose_type_error("Duplicate IPAddressFamily");
+ lose_value_error("Duplicate IPAddressFamily");
if (f->addressFamily->length > 2)
- lose_type_error("Unsupported SAFI");
+ lose_value_error("Unsupported SAFI");
switch (f->ipAddressChoice->type) {
@@ -3075,7 +4068,7 @@ x509_object_get_rfc3779(x509_object *self)
break;
default:
- lose_type_error("Unexpected IPAddressChoice type");
+ lose_value_error("Unexpected IPAddressChoice type");
}
if ((*result_obj = PyTuple_New(sk_IPAddressOrRange_num(f->ipAddressChoice->u.addressesOrRanges))) == NULL)
@@ -3095,7 +4088,7 @@ x509_object_get_rfc3779(x509_object *self)
if ((addr_len = v3_addr_get_range(aor, afi, addr_b->address, addr_e->address,
sizeof(addr_b->address))) == 0)
- lose_type_error("Couldn't unpack IP addresses from BIT STRINGs");
+ lose_value_error("Couldn't unpack IP addresses from BIT STRINGs");
addr_b->type = addr_e->type = ip_type;
@@ -3329,7 +4322,7 @@ static char x509_object_get_basic_constraints__doc__[] =
static PyObject *
x509_object_get_basic_constraints(x509_object *self)
{
- return extension_get_basic_constraints(x509_object_extension_helper(self));
+ return extension_get_basic_constraints(x509_object_extension_get_helper(self, NID_basic_constraints));
}
static char x509_object_set_basic_constraints__doc__[] =
@@ -3341,7 +4334,7 @@ static char x509_object_set_basic_constraints__doc__[] =
static PyObject *
x509_object_set_basic_constraints(x509_object *self, PyObject *args)
{
- return extension_set_basic_constraints(x509_object_extension_helper(self), args);
+ return x509_object_extension_set_helper(self, extension_set_basic_constraints(args));
}
static char x509_object_get_sia__doc__[] =
@@ -3353,7 +4346,7 @@ static char x509_object_get_sia__doc__[] =
static PyObject *
x509_object_get_sia(x509_object *self)
{
- return extension_get_sia(x509_object_extension_helper(self));
+ return extension_get_sia(x509_object_extension_get_helper(self, NID_sinfo_access));
}
static char x509_object_set_sia__doc__[] =
@@ -3365,7 +4358,7 @@ static char x509_object_set_sia__doc__[] =
static PyObject *
x509_object_set_sia(x509_object *self, PyObject *args, PyObject *kwds)
{
- return extension_set_sia(x509_object_extension_helper(self), args, kwds);
+ return x509_object_extension_set_helper(self, extension_set_sia(args, kwds));
}
static char x509_object_get_aia__doc__[] =
@@ -3783,6 +4776,8 @@ static struct PyMethodDef x509_object_methods[] = {
Define_Method(pemWrite, x509_object_pem_write, METH_NOARGS),
Define_Method(derWrite, x509_object_der_write, METH_NOARGS),
Define_Method(sign, x509_object_sign, METH_VARARGS),
+ Define_Method(verify, x509_object_verify, METH_KEYWORDS),
+ Define_Method(checkRPKIConformance, x509_object_check_rpki_conformance, METH_KEYWORDS),
Define_Method(getPublicKey, x509_object_get_public_key, METH_NOARGS),
Define_Method(setPublicKey, x509_object_set_public_key, METH_VARARGS),
Define_Method(getVersion, x509_object_get_version, METH_NOARGS),
@@ -3877,286 +4872,6 @@ static PyTypeObject POW_X509_Type = {
/*
- * X509Store object.
- */
-
-static PyObject *
-x509_store_object_new(PyTypeObject *type, GCC_UNUSED PyObject *args, GCC_UNUSED PyObject *kwds)
-{
- x509_store_object *self = NULL;
-
- ENTERING(x509_store_object_new);
-
- if ((self = (x509_store_object *) type->tp_alloc(type, 0)) == NULL)
- goto error;
-
- if ((self->store = X509_STORE_new()) == NULL)
- lose_no_memory();
-
- self->ctxclass = (PyObject *) &POW_X509StoreCTX_Type;
- Py_XINCREF(self->ctxclass);
- return (PyObject *) self;
-
- error:
- Py_XDECREF(self);
- return NULL;
-}
-
-static void
-x509_store_object_dealloc(x509_store_object *self)
-{
- ENTERING(x509_store_object_dealloc);
- X509_STORE_free(self->store);
- Py_XDECREF(self->ctxclass);
- self->ob_type->tp_free((PyObject*) self);
-}
-
-static char x509_store_object_add_trust__doc__[] =
- "Add a trusted certificate to this certificate store object.\n"
- "\n"
- "The \"certificate\" parameter should be an instance of the X509 class.\n"
- ;
-
-static PyObject *
-x509_store_object_add_trust(x509_store_object *self, PyObject *args)
-{
- x509_object *x509 = NULL;
-
- ENTERING(x509_store_object_add_trust);
-
- if (!PyArg_ParseTuple(args, "O!", &POW_X509_Type, &x509))
- goto error;
-
- X509_STORE_add_cert(self->store, x509->x509);
-
- Py_RETURN_NONE;
-
- error:
-
- return NULL;
-}
-
-static char x509_store_object_add_crl__doc__[] =
- "Add a CRL to this certificate store object.\n"
- "\n"
- "The \"crl\" parameter should be an instance of the CRL class.\n"
- ;
-
-static PyObject *
-x509_store_object_add_crl(x509_store_object *self, PyObject *args)
-{
- crl_object *crl = NULL;
-
- ENTERING(x509_store_object_add_crl);
-
- if (!PyArg_ParseTuple(args, "O!", &POW_CRL_Type, &crl))
- goto error;
-
- X509_STORE_add_crl(self->store, crl->crl);
-
- Py_RETURN_NONE;
-
- error:
-
- return NULL;
-}
-
-static char x509_store_object_set_flags__doc__[] =
- "Set validation flags for this X509Store.\n"
- "\n"
- "Argument is an integer containing bit flags to set.\n"
- ;
-
-static PyObject *
-x509_store_object_set_flags (x509_store_object *self, PyObject *args)
-{
- unsigned long flags;
-
- if (!PyArg_ParseTuple(args, "k", &flags))
- goto error;
-
- if (!X509_VERIFY_PARAM_set_flags(self->store->param, flags))
- lose_openssl_error("X509_VERIFY_PARAM_set_flags() failed");
-
- Py_RETURN_NONE;
-
- error:
- return NULL;
-}
-
-static char x509_store_object_clear_flags__doc__[] =
- "Clear validation flags for this X509Store.\n"
- "\n"
- "Argument is an integer containing bit flags to clear.\n"
- ;
-
-static PyObject *
-x509_store_object_clear_flags (x509_store_object *self, PyObject *args)
-{
- unsigned long flags;
-
- if (!PyArg_ParseTuple(args, "k", &flags))
- goto error;
-
- if (!X509_VERIFY_PARAM_clear_flags(self->store->param, flags))
- lose_openssl_error("X509_VERIFY_PARAM_clear_flags() failed");
-
- Py_RETURN_NONE;
-
- error:
- return NULL;
-}
-
-static char x509_store_object_set_context_class__doc__[] =
- "Set validation context class factory for this X509Store.\n"
- "\n"
- "This must be a callable object which takes one argument, an X509Store,\n"
- "and returns a subclass of X509StoreCTX. The callable can be a class\n"
- "object but need not be, so long as calling it returns an instance of an\n"
- "appropriate class. The default is X509StoreCTX.\n"
- ;
-
-static PyObject *
-x509_store_object_set_context_class (x509_store_object *self, PyObject *args)
-{
- PyObject *ctxclass = (PyObject *) &POW_X509StoreCTX_Type;
-
- if (!PyArg_ParseTuple(args, "|O", &ctxclass))
- goto error;
-
- if (!PyCallable_Check(ctxclass))
- lose("Context class must be callable");
-
- Py_XDECREF(self->ctxclass);
- self->ctxclass = ctxclass;
- Py_XINCREF(self->ctxclass);
-
- Py_RETURN_NONE;
-
- error:
- return NULL;
-}
-
-static char x509_store_object_verify__doc__[] =
- "Verify an X509 certificate object using this certificate store.\n"
- "\n"
- "Optional second argument is an iterable that supplies untrusted certificates\n"
- "to be considered when building a chain to the trust anchor.\n"
- "\n"
- "This method returns an instance of the store's verification context class.\n"
- ;
-
-static PyObject *
-x509_store_object_verify(x509_store_object *self, PyObject *args)
-{
- x509_store_ctx_object *ctx = NULL;
- STACK_OF(X509) *stack = NULL;
- x509_object *x509 = NULL;
- PyObject *chain = Py_None;
- int ok;
-
- if (!PyArg_ParseTuple(args, "O!|O", &POW_X509_Type, &x509, &chain))
- goto error;
-
- if ((ctx = (x509_store_ctx_object *) PyObject_CallFunctionObjArgs(self->ctxclass, self, NULL)) == NULL)
- goto error;
-
- if (!POW_X509StoreCTX_Check(ctx))
- lose_type_error("Returned context is not a X509StoreCTX");
-
- if (ctx->ctx == NULL)
- lose("Uninitialized X509StoreCTX");
-
- if (chain != Py_None && (stack = x509_helper_iterable_to_stack(chain)) == NULL)
- goto error;
-
- Py_XINCREF(x509);
- Py_XINCREF(chain);
- X509_STORE_CTX_set_cert(ctx->ctx, x509->x509);
- X509_STORE_CTX_set_chain(ctx->ctx, stack);
-
- ok = X509_verify_cert(ctx->ctx);
-
- X509_STORE_CTX_set_chain(ctx->ctx, NULL);
- X509_STORE_CTX_set_cert(ctx->ctx, NULL);
- Py_XDECREF(chain);
- Py_XDECREF(x509);
-
- sk_X509_free(stack);
-
- if (PyErr_Occurred())
- goto error;
-
- if (ok < 0)
- lose_openssl_error("X509_verify_cert() raised an exception");
-
- return (PyObject *) ctx;
-
- error: /* fall through */
- Py_XDECREF(ctx);
- return NULL;
-}
-
-static struct PyMethodDef x509_store_object_methods[] = {
- Define_Method(addTrust, x509_store_object_add_trust, METH_VARARGS),
- Define_Method(addCrl, x509_store_object_add_crl, METH_VARARGS),
- Define_Method(setContextClass,x509_store_object_set_context_class, METH_VARARGS),
- Define_Method(setFlags, x509_store_object_set_flags, METH_VARARGS),
- Define_Method(clearFlags, x509_store_object_clear_flags, METH_VARARGS),
- Define_Method(verify, x509_store_object_verify, METH_VARARGS),
- {NULL}
-};
-
-static char POW_X509Store_Type__doc__[] =
- "This class holds the OpenSSL certificate store objects used in CMS\n"
- "and certificate verification.\n"
- ;
-
-static PyTypeObject POW_X509Store_Type = {
- PyObject_HEAD_INIT(0)
- 0, /* ob_size */
- "rpki.POW.X509Store", /* tp_name */
- sizeof(x509_store_object), /* tp_basicsize */
- 0, /* tp_itemsize */
- (destructor)x509_store_object_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_compare */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
- POW_X509Store_Type__doc__, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- x509_store_object_methods, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- x509_store_object_new, /* tp_new */
-};
-
-
-
-/*
* X509StoreCTX object.
*/
@@ -4164,13 +4879,16 @@ static int
x509_store_ctx_object_verify_cb(int ok, X509_STORE_CTX *ctx)
{
static char method_name[] = "verify_callback";
- PyObject *self = X509_STORE_CTX_get_ex_data(ctx, x509_store_ctx_ex_data_idx);
+ x509_store_ctx_object *self = (x509_store_ctx_object *) X509_STORE_CTX_get_ex_data(ctx, x509_store_ctx_ex_data_idx);
PyObject *result = NULL;
- if (self == NULL || !PyObject_HasAttrString(self, method_name))
+ if (self == NULL)
+ return ok;
+
+ if (!PyObject_HasAttrString((PyObject *) self, method_name))
return ok;
- if ((result = PyObject_CallMethod(self, method_name, "i", ok)) == NULL)
+ if ((result = PyObject_CallMethod((PyObject *) self, method_name, "i", ok)) == NULL)
return -1;
ok = PyObject_IsTrue(result);
@@ -4200,26 +4918,19 @@ x509_store_ctx_object_new(PyTypeObject *type, GCC_UNUSED PyObject *args, GCC_UNU
static int
x509_store_ctx_object_init(x509_store_ctx_object *self, PyObject *args, GCC_UNUSED PyObject *kwds)
{
- x509_store_object *store = NULL;
-
- if (!PyArg_ParseTuple(args, "|O!", &POW_X509Store_Type, &store))
- goto error;
+ ENTERING(x509_store_ctx_object_init);
- if ((self->ctx = X509_STORE_CTX_new()) == NULL)
+ if ((self->store = X509_STORE_new()) == NULL ||
+ (self->ctx = X509_STORE_CTX_new()) == NULL)
lose_no_memory();
- if (!X509_STORE_CTX_init(self->ctx, store ? store->store : NULL, NULL, NULL))
+ if (!X509_STORE_CTX_init(self->ctx, self->store, NULL, NULL))
lose_openssl_error("Couldn't initialize X509_STORE_CTX");
if (!X509_STORE_CTX_set_ex_data(self->ctx, x509_store_ctx_ex_data_idx, self))
lose_openssl_error("Couldn't set X509_STORE_CTX ex_data");
- Py_XDECREF(self->store);
- self->store = store;
- Py_XINCREF(self->store);
-
X509_VERIFY_PARAM_set_flags(self->ctx->param, X509_V_FLAG_X509_STRICT);
- X509_STORE_CTX_set_verify_cb(self->ctx, x509_store_ctx_object_verify_cb);
return 0;
error:
@@ -4231,16 +4942,10 @@ x509_store_ctx_object_dealloc(x509_store_ctx_object *self)
{
ENTERING(x509_store_ctx_object_dealloc);
X509_STORE_CTX_free(self->ctx);
- Py_XDECREF(self->store);
+ X509_STORE_free(self->store);
self->ob_type->tp_free((PyObject*) self);
}
-static PyObject *
-x509_store_ctx_object_get_store (x509_store_ctx_object *self, GCC_UNUSED void *closure)
-{
- return Py_BuildValue("O", self->store == NULL ? Py_None : (PyObject *) self->store);
-}
-
static char x509_store_ctx_object_get_error__doc__[] =
"Extract verification error code from this X509StoreCTX.\n"
;
@@ -4323,52 +5028,6 @@ x509_store_ctx_object_get_chain (x509_store_ctx_object *self)
}
/*
- * For some reason, there are no methods for the policy mechanism for
- * X509_STORE, only for X509_STORE_CTX. Presumably we can whack these
- * anyway using the X509_VERIFY_PARAM_*() calls, the question is
- * whether there's a good reason for this omission.
- *
- * For the moment, I'm just going to leave the policy stuff
- * unimplemented, until we figure out whether it belongs in X509Store
- * or X509StoreCTX.
- */
-
-#define IMPLEMENT_X509StoreCTX_POLICY 0
-
-#if IMPLEMENT_X509StoreCTX_POLICY
-
-static char x509_store_ctx_object_set_policy__doc__[] =
- "Set this X509StoreCTX to require a specified certificate policy.\n"
- ;
-
-static PyObject*
-x509_store_ctx_object_set_policy (x509_store_ctx_object *self, PyObject *args)
-{
- ASN1_OBJECT *policy = NULL;
- char *oid = NULL;
-
- if (!PyArg_ParseTuple(args, "s", &oid))
- goto error;
-
- if ((policy = OBJ_txt2obj(oid, 1)) == NULL)
- lose_openssl_error("Couldn't parse OID");
-
- if (!X509_VERIFY_PARAM_set_flags(self->ctx->param, X509_V_FLAG_POLICY_CHECK | X509_V_FLAG_EXPLICIT_POLICY))
- lose_openssl_error("Couldn't set policy flags");
-
- if (!X509_VERIFY_PARAM_add0_policy(self->ctx->param, policy))
- lose_openssl_error("Couldn't set policy");
-
- Py_RETURN_NONE;
-
- error:
- ASN1_OBJECT_free(policy);
- return NULL;
-}
-
-#endif /* IMPLEMENT_X509StoreCTX_POLICY */
-
-/*
* See (omnibus) man page for X509_STORE_CTX_get_error() for other
* query methods we might want to expose. Someday we might want to
* support X509_V_FLAG_USE_CHECK_TIME too.
@@ -4380,29 +5039,20 @@ static struct PyMethodDef x509_store_ctx_object_methods[] = {
Define_Method(getErrorDepth, x509_store_ctx_object_get_error_depth, METH_NOARGS),
Define_Method(getCurrentCertificate, x509_store_ctx_object_get_current_certificate, METH_NOARGS),
Define_Method(getChain, x509_store_ctx_object_get_chain, METH_NOARGS),
-
-#if IMPLEMENT_X509StoreCTX_POLICY
- Define_Method(setPolicy, x509_store_ctx_object_set_policy, METH_VARARGS),
-#endif
{NULL}
};
-static PyGetSetDef x509_store_ctx_object_getsetters[] = {
- {"store", (getter) x509_store_ctx_object_get_store},
- {NULL}
-};
-
static char POW_X509StoreCTX_Type__doc__[] =
"This class holds the state of an OpenSSL certificate verification\n"
"operation. Ordinarily, the user will never have cause to instantiate\n"
"this class directly, instead, an object of this class will be returned\n"
- "by X509Store.verify().\n"
+ "by X509.verify().\n"
"\n"
"If you need to see OpenSSL's verification callbacks, you can do so\n"
- "by subclassing X509StoreCTX and attaching your subclass to an X509Store\n"
- "object using X509Store.setContextClass(). Your subclass should provide\n"
- "a .verify_callback() method, which should expect to receive one argument:\n"
- "the integer \"ok\" value passed by OpenSSL's verification callbacks.\n"
+ "by subclassing X509StoreCTX and passing your subclass as an argument\n"
+ "to X509.verify. Your subclass should provide a .verify_callback()\n"
+ "method, which should expect to receive one argument: the integer \"ok\"\n"
+ "value passed by OpenSSL's verification callbacks.\n"
"\n"
"The return value from your .verify_callback() method will be is interpreted\n"
"as a boolean value: anything which evaluates to True will be result in a\n"
@@ -4441,7 +5091,7 @@ static PyTypeObject POW_X509StoreCTX_Type = {
0, /* tp_iternext */
x509_store_ctx_object_methods, /* tp_methods */
0, /* tp_members */
- x509_store_ctx_object_getsetters, /* tp_getset */
+ 0, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
@@ -4586,15 +5236,38 @@ crl_object_der_read_file(PyTypeObject *type, PyObject *args)
return read_from_file_helper(crl_object_der_read_helper, type, args);
}
-static X509_EXTENSIONS **
-crl_object_extension_helper(crl_object *self)
+static X509_EXTENSION *
+crl_object_extension_get_helper(crl_object *self, int nid)
{
- if (self && self->crl && self->crl->crl)
- return &self->crl->crl->extensions;
- PyErr_SetString(PyExc_ValueError, "Can't find X509_EXTENSIONS in CRL object");
- return NULL;
+ if (self != NULL && self->crl != NULL)
+ return X509_CRL_get_ext(self->crl, X509_CRL_get_ext_by_NID(self->crl, nid, -1));
+ else
+ return NULL;
}
+static PyObject *
+crl_object_extension_set_helper(crl_object *self, extension_wrapper ext)
+{
+ int ok = 0;
+
+ if (ext.value == NULL)
+ goto error;
+
+ if (!X509_CRL_add1_ext_i2d(self->crl, ext.nid, ext.value, ext.critical, X509V3_ADD_REPLACE))
+ lose_openssl_error("Couldn't add extension to CRL");
+
+ ok = 1;
+
+ error:
+ ext.destructor(ext.value);
+ if (ok)
+ Py_RETURN_NONE;
+ else
+ return NULL;
+}
+
+
+
static char crl_object_get_version__doc__[] =
"Return the version number of this CRL.\n"
;
@@ -4672,7 +5345,7 @@ crl_object_set_issuer(crl_object *self, PyObject *args)
goto error;
if (!PySequence_Check(name_sequence))
- lose_type_error("Inapropriate type");
+ lose_type_error("Expected a sequence object");
if ((name = x509_object_helper_set_name(name_sequence)) == NULL)
goto error;
@@ -4917,6 +5590,21 @@ crl_object_get_revoked(crl_object *self)
return NULL;
}
+static char crl_object_is_revoked__doc__[] =
+ "Check whether a particular certificate has been revoked.\n"
+ ;
+
+static PyObject *
+crl_object_is_revoked(crl_object *self, PyObject *args)
+{
+ x509_object *x = NULL;
+
+ if (!PyArg_ParseTuple(args, "O!", &POW_X509_Type, &x))
+ return NULL;
+
+ return PyBool_FromLong(X509_CRL_get0_by_cert(self->crl, NULL, x->x509));
+}
+
static char crl_object_clear_extensions__doc__[] =
"Clear all extensions attached to this CRL.\n"
;
@@ -4943,8 +5631,6 @@ static char crl_object_sign__doc__[] =
"The optional \"digest\" parameter indicates which digest to compute and\n"
"sign, and should be one of the following:\n"
"\n"
- "* MD5_DIGEST\n"
- "* SHA_DIGEST\n"
"* SHA1_DIGEST\n"
"* SHA256_DIGEST\n"
"* SHA384_DIGEST\n"
@@ -4978,25 +5664,48 @@ crl_object_sign(crl_object *self, PyObject *args)
}
static char crl_object_verify__doc__[] =
- "Verify this CRL's signature.\n"
- "\n"
- "The check is performed using OpenSSL's X509_CRL_verify() function.\n"
- "\n"
- "The \"key\" parameter should be an instance of the Asymmetric class\n"
- "containing the public key of the purported signer.\n"
+ "Verify this CRL's signature against its issuer.\n"
;
static PyObject *
crl_object_verify(crl_object *self, PyObject *args)
{
- asymmetric_object *asym;
+ x509_object *issuer;
ENTERING(crl_object_verify);
- if (!PyArg_ParseTuple(args, "O!", &POW_Asymmetric_Type, &asym))
+ if (!PyArg_ParseTuple(args, "O!", &POW_X509_Type, &issuer))
goto error;
- return PyBool_FromLong(X509_CRL_verify(self->crl, asym->pkey));
+ if (!X509_CRL_verify(self->crl, X509_get_pubkey(issuer->x509)))
+ lose_validation_error("X509_CRL_verify() raised an exception");
+
+ Py_RETURN_NONE;
+
+ error:
+ return NULL;
+}
+
+static char crl_object_check_rpki_conformance__doc__[] =
+ "Check this CRL for conformance to the RPKI profile.\n"
+ ;
+
+static PyObject *
+crl_object_check_rpki_conformance(crl_object *self, PyObject *args, PyObject *kwds)
+{
+ static char *kwlist[] = {"issuer", "status", NULL};
+ x509_object *issuer = NULL;
+ PyObject *status = Py_None;
+
+ ENTERING(crl_object_check_rpki_conformance);
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!O!", kwlist, &POW_X509_Type, &issuer, &PySet_Type, &status))
+ goto error;
+
+ if (!check_crl(self->crl, issuer->x509, status))
+ goto error;
+
+ Py_RETURN_NONE;
error:
return NULL;
@@ -5059,7 +5768,7 @@ static char crl_object_get_aki__doc__[] =
static PyObject *
crl_object_get_aki(crl_object *self)
{
- return extension_get_aki(crl_object_extension_helper(self));
+ return extension_get_aki(crl_object_extension_get_helper(self, NID_authority_key_identifier));
}
static char crl_object_set_aki__doc__[] =
@@ -5069,7 +5778,7 @@ static char crl_object_set_aki__doc__[] =
static PyObject *
crl_object_set_aki(crl_object *self, PyObject *args)
{
- return extension_set_aki(crl_object_extension_helper(self), args);
+ return crl_object_extension_set_helper(self, extension_set_aki(args));
}
static char crl_object_get_crl_number__doc__[] =
@@ -5147,31 +5856,33 @@ crl_object_pprint(crl_object *self)
}
static struct PyMethodDef crl_object_methods[] = {
- Define_Method(sign, crl_object_sign, METH_VARARGS),
- Define_Method(verify, crl_object_verify, METH_VARARGS),
- Define_Method(getVersion, crl_object_get_version, METH_NOARGS),
- Define_Method(setVersion, crl_object_set_version, METH_VARARGS),
- Define_Method(getIssuer, crl_object_get_issuer, METH_VARARGS),
- Define_Method(setIssuer, crl_object_set_issuer, METH_VARARGS),
- Define_Method(getThisUpdate, crl_object_get_this_update, METH_NOARGS),
- Define_Method(setThisUpdate, crl_object_set_this_update, METH_VARARGS),
- Define_Method(getNextUpdate, crl_object_get_next_update, METH_NOARGS),
- Define_Method(setNextUpdate, crl_object_set_next_update, METH_VARARGS),
- Define_Method(getRevoked, crl_object_get_revoked, METH_NOARGS),
- Define_Method(addRevocations, crl_object_add_revocations, METH_VARARGS),
- Define_Method(clearExtensions, crl_object_clear_extensions, METH_NOARGS),
- Define_Method(pemWrite, crl_object_pem_write, METH_NOARGS),
- Define_Method(derWrite, crl_object_der_write, METH_NOARGS),
- Define_Method(pprint, crl_object_pprint, METH_NOARGS),
- Define_Method(getAKI, crl_object_get_aki, METH_NOARGS),
- Define_Method(setAKI, crl_object_set_aki, METH_VARARGS),
- Define_Method(getCRLNumber, crl_object_get_crl_number, METH_NOARGS),
- Define_Method(setCRLNumber, crl_object_set_crl_number, METH_VARARGS),
- Define_Method(getIssuerHash, crl_object_get_issuer_hash, METH_NOARGS),
- Define_Class_Method(pemRead, crl_object_pem_read, METH_VARARGS),
- Define_Class_Method(pemReadFile, crl_object_pem_read_file, METH_VARARGS),
- Define_Class_Method(derRead, crl_object_der_read, METH_VARARGS),
- Define_Class_Method(derReadFile, crl_object_der_read_file, METH_VARARGS),
+ Define_Method(sign, crl_object_sign, METH_VARARGS),
+ Define_Method(verify, crl_object_verify, METH_VARARGS),
+ Define_Method(checkRPKIConformance, crl_object_check_rpki_conformance, METH_KEYWORDS),
+ Define_Method(getVersion, crl_object_get_version, METH_NOARGS),
+ Define_Method(setVersion, crl_object_set_version, METH_VARARGS),
+ Define_Method(getIssuer, crl_object_get_issuer, METH_VARARGS),
+ Define_Method(setIssuer, crl_object_set_issuer, METH_VARARGS),
+ Define_Method(getThisUpdate, crl_object_get_this_update, METH_NOARGS),
+ Define_Method(setThisUpdate, crl_object_set_this_update, METH_VARARGS),
+ Define_Method(getNextUpdate, crl_object_get_next_update, METH_NOARGS),
+ Define_Method(setNextUpdate, crl_object_set_next_update, METH_VARARGS),
+ Define_Method(getRevoked, crl_object_get_revoked, METH_NOARGS),
+ Define_Method(isRevoked, crl_object_is_revoked, METH_VARARGS),
+ Define_Method(addRevocations, crl_object_add_revocations, METH_VARARGS),
+ Define_Method(clearExtensions, crl_object_clear_extensions, METH_NOARGS),
+ Define_Method(pemWrite, crl_object_pem_write, METH_NOARGS),
+ Define_Method(derWrite, crl_object_der_write, METH_NOARGS),
+ Define_Method(pprint, crl_object_pprint, METH_NOARGS),
+ Define_Method(getAKI, crl_object_get_aki, METH_NOARGS),
+ Define_Method(setAKI, crl_object_set_aki, METH_VARARGS),
+ Define_Method(getCRLNumber, crl_object_get_crl_number, METH_NOARGS),
+ Define_Method(setCRLNumber, crl_object_set_crl_number, METH_VARARGS),
+ Define_Method(getIssuerHash, crl_object_get_issuer_hash, METH_NOARGS),
+ Define_Class_Method(pemRead, crl_object_pem_read, METH_VARARGS),
+ Define_Class_Method(pemReadFile, crl_object_pem_read_file, METH_VARARGS),
+ Define_Class_Method(derRead, crl_object_der_read, METH_VARARGS),
+ Define_Class_Method(derReadFile, crl_object_der_read_file, METH_VARARGS),
{NULL}
};
@@ -5730,14 +6441,16 @@ asymmetric_object_calculate_ski(asymmetric_object *self)
X509_PUBKEY *pubkey = NULL;
unsigned char digest[EVP_MAX_MD_SIZE];
unsigned digest_length;
+ const unsigned char *key_data = NULL;
+ int key_length;
ENTERING(asymmetric_object_calculate_ski);
- if (!X509_PUBKEY_set(&pubkey, self->pkey))
+ if (!X509_PUBKEY_set(&pubkey, self->pkey) ||
+ !X509_PUBKEY_get0_param(NULL, &key_data, &key_length, NULL, pubkey))
lose_openssl_error("Couldn't extract public key");
- if (!EVP_Digest(pubkey->public_key->data, pubkey->public_key->length,
- digest, &digest_length, EVP_sha1(), NULL))
+ if (!EVP_Digest(key_data, key_length, digest, &digest_length, EVP_sha1(), NULL))
lose_openssl_error("Couldn't calculate SHA-1 digest of public key");
result = PyString_FromStringAndSize((char *) digest, digest_length);
@@ -6240,8 +6953,9 @@ static PyObject *
digest_object_digest(digest_object *self)
{
unsigned char digest_text[EVP_MAX_MD_SIZE];
- EVP_MD_CTX ctx;
unsigned digest_len = 0;
+ PyObject *result = NULL;
+ EVP_MD_CTX ctx;
ENTERING(digest_object_digest);
@@ -6250,12 +6964,11 @@ digest_object_digest(digest_object *self)
EVP_DigestFinal(&ctx, digest_text, &digest_len);
- EVP_MD_CTX_cleanup(&ctx);
-
- return Py_BuildValue("s#", digest_text, (Py_ssize_t) digest_len);
+ result = Py_BuildValue("s#", digest_text, (Py_ssize_t) digest_len);
error:
- return NULL;
+ EVP_MD_CTX_cleanup(&ctx);
+ return result;
}
static struct PyMethodDef digest_object_methods[] = {
@@ -6273,8 +6986,6 @@ static char POW_Digest_Type__doc__[] =
"The constructor takes one parameter, the kind of Digest object to create.\n"
"This should be one of the following:\n"
"\n"
- " * MD5_DIGEST\n"
- " * SHA_DIGEST\n"
" * SHA1_DIGEST\n"
" * SHA256_DIGEST\n"
" * SHA384_DIGEST\n"
@@ -6542,7 +7253,7 @@ cms_object_sign_helper(cms_object *self,
while ((item = PyIter_Next(iterator)) != NULL) {
if (!POW_CRL_Check(item))
- lose_type_error("Inappropriate type");
+ lose_type_error("Expected a CRL object");
if (!CMS_add1_crl(cms, ((crl_object *) item)->crl))
lose_openssl_error("Couldn't add CRL to CMS");
@@ -6651,15 +7362,12 @@ cms_object_sign(cms_object *self, PyObject *args)
return NULL;
}
-#define DONT_VERIFY_ANYTHING \
- (CMS_NOCRL | \
- CMS_NO_SIGNER_CERT_VERIFY | \
- CMS_NO_ATTR_VERIFY | \
- CMS_NO_CONTENT_VERIFY)
-
static BIO *
cms_object_extract_without_verifying_helper(cms_object *self)
{
+ const unsigned flags =
+ CMS_NOCRL | CMS_NO_SIGNER_CERT_VERIFY | CMS_NO_ATTR_VERIFY | CMS_NO_CONTENT_VERIFY;
+
BIO *bio = NULL;
ENTERING(cms_object_extract_without_verifying_helper);
@@ -6667,7 +7375,7 @@ cms_object_extract_without_verifying_helper(cms_object *self)
if ((bio = BIO_new(BIO_s_mem())) == NULL)
lose_no_memory();
- if (CMS_verify(self->cms, NULL, NULL, NULL, bio, DONT_VERIFY_ANYTHING) <= 0)
+ if (CMS_verify(self->cms, NULL, NULL, NULL, bio, flags) <= 0)
lose_openssl_error("Couldn't parse CMS message");
return bio;
@@ -6677,13 +7385,9 @@ cms_object_extract_without_verifying_helper(cms_object *self)
return NULL;
}
-#undef DONT_VERIFY_ANYTHING
#define CMS_OBJECT_VERIFY_HELPER__DOC__ \
"\n" \
- "The \"store\" parameter is an X509Store object, the trusted certificate\n" \
- "store to use in verification.\n" \
- "\n" \
"The optional \"certs\" parameter is a set of certificates to search\n" \
"for the signer's certificate.\n" \
"\n" \
@@ -6694,39 +7398,49 @@ cms_object_extract_without_verifying_helper(cms_object *self)
" * CMS_NOCRL\n" \
" * CMS_NO_SIGNER_CERT_VERIFY\n" \
" * CMS_NO_ATTR_VERIFY\n" \
- " * CMS_NO_CONTENT_VERIFY\n"
+ " * CMS_NO_CONTENT_VERIFY\n" \
+ "\n" \
+ "Note that this method does NOT verify X.509 certificates, it just\n" \
+ "verifies the CMS signature. Use certificate verification functions\n" \
+ "to verify certificates."
+
+#warning Should we really allow the full range of flags here, or constrain to just the useful cases?
static BIO *
cms_object_verify_helper(cms_object *self, PyObject *args, PyObject *kwds)
{
- static char *kwlist[] = {"store", "certs", "flags", NULL};
- x509_store_object *store = NULL;
+ static char *kwlist[] = {"certs", "flags", NULL};
PyObject *certs_iterable = Py_None;
STACK_OF(X509) *certs_stack = NULL;
unsigned flags = 0, ok = 0;
BIO *bio = NULL;
+ const unsigned flag_mask =
+ CMS_NOINTERN | CMS_NOCRL | CMS_NO_SIGNER_CERT_VERIFY |
+ CMS_NO_ATTR_VERIFY | CMS_NO_CONTENT_VERIFY;
+
ENTERING(cms_object_verify_helper);
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|OI", kwlist,
- &POW_X509Store_Type, &store, &certs_iterable, &flags))
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OI", kwlist, &certs_iterable, &flags))
goto error;
+ if ((flags & ~flag_mask) != 0)
+ lose_value_error("Bad CMS_verify() flags");
+
+ flags |= CMS_NO_SIGNER_CERT_VERIFY;
+
if ((bio = BIO_new(BIO_s_mem())) == NULL)
lose_no_memory();
assert_no_unhandled_openssl_errors();
- flags &= (CMS_NOINTERN | CMS_NOCRL | CMS_NO_SIGNER_CERT_VERIFY |
- CMS_NO_ATTR_VERIFY | CMS_NO_CONTENT_VERIFY);
-
if (certs_iterable != Py_None &&
(certs_stack = x509_helper_iterable_to_stack(certs_iterable)) == NULL)
goto error;
assert_no_unhandled_openssl_errors();
- if (CMS_verify(self->cms, certs_stack, store->store, NULL, bio, flags) <= 0)
+ if (CMS_verify(self->cms, certs_stack, NULL, NULL, bio, flags) <= 0)
lose_openssl_error("Couldn't verify CMS message");
assert_no_unhandled_openssl_errors();
@@ -6787,6 +7501,30 @@ cms_object_extract_without_verifying(cms_object *self)
return result;
}
+static char cms_object_check_rpki_conformance__doc__[] =
+ "Check this CMS message for conformance to the RPKI profile.\n"
+ ;
+
+static PyObject *
+cms_object_check_rpki_conformance(cms_object *self, PyObject *args, PyObject *kwds)
+{
+ static char *kwlist[] = {"status", NULL};
+ PyObject *status = Py_None;
+
+ ENTERING(cms_object_check_rpki_conformance);
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!", kwlist, &PySet_Type, &status))
+ goto error;
+
+ if (!check_cms(self->cms, status))
+ goto error;
+
+ Py_RETURN_NONE;
+
+ error:
+ return NULL;
+}
+
static char cms_object_eContentType__doc__[] =
"Return the eContentType OID of this CMS message.\n"
;
@@ -6843,10 +7581,10 @@ cms_object_signingTime(cms_object *self)
if (xa->single)
lose("Couldn't extract signerInfos from CMS message[5]");
- if (sk_ASN1_TYPE_num(xa->value.set) != 1)
+ if (X509_ATTRIBUTE_count(xa) != 1)
lose("Couldn't extract signerInfos from CMS message[6]");
- if ((so = sk_ASN1_TYPE_value(xa->value.set, 0)) == NULL)
+ if ((so = X509_ATTRIBUTE_get0_type(xa, 0)) == NULL)
lose("Couldn't extract signerInfos from CMS message[7]");
switch (so->type) {
@@ -6895,6 +7633,10 @@ static char cms_object_certs__doc__[] =
"contains no certificates.\n"
;
+/*
+ * Might want to accept an optional subclass argument.
+ */
+
static PyObject *
cms_object_certs(cms_object *self)
{
@@ -6921,6 +7663,10 @@ static char cms_object_crls__doc__[] =
"This sequence will be empty if the message contains no CRLs.\n"
;
+/*
+ * Might want to accept an optional subclass argument.
+ */
+
static PyObject *
cms_object_crls(cms_object *self)
{
@@ -6948,6 +7694,7 @@ static struct PyMethodDef cms_object_methods[] = {
Define_Method(sign, cms_object_sign, METH_VARARGS),
Define_Method(verify, cms_object_verify, METH_KEYWORDS),
Define_Method(extractWithoutVerifying, cms_object_extract_without_verifying, METH_NOARGS),
+ Define_Method(checkRPKIConformance, cms_object_check_rpki_conformance, METH_KEYWORDS),
Define_Method(eContentType, cms_object_eContentType, METH_NOARGS),
Define_Method(signingTime, cms_object_signingTime, METH_NOARGS),
Define_Method(pprint, cms_object_pprint, METH_NOARGS),
@@ -7076,14 +7823,13 @@ static char manifest_object_extract_without_verifying__doc__[] =
static PyObject *
manifest_object_extract_without_verifying(manifest_object *self)
{
- PyObject *result = NULL;
BIO *bio = NULL;
int ok = 0;
ENTERING(manifest_object_extract_without_verifying);
- if ((bio = cms_object_extract_without_verifying_helper(&self->cms)) != NULL)
- result = BIO_to_PyString_helper(bio);
+ if ((bio = cms_object_extract_without_verifying_helper(&self->cms)) == NULL)
+ goto error;
if (!ASN1_item_d2i_bio(ASN1_ITEM_rptr(Manifest), bio, &self->manifest))
lose_openssl_error("Couldn't decode manifest");
@@ -7099,6 +7845,31 @@ manifest_object_extract_without_verifying(manifest_object *self)
return NULL;
}
+static char manifest_object_check_rpki_conformance__doc__[] =
+ "Check this manifest for conformance to the RPKI profile.\n"
+ ;
+
+static PyObject *
+manifest_object_check_rpki_conformance(manifest_object *self, PyObject *args, PyObject *kwds)
+{
+ static char *kwlist[] = {"status", NULL};
+ PyObject *status = Py_None;
+
+ ENTERING(manifest_object_check_rpki_conformance);
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!", kwlist, &PySet_Type, &status))
+ goto error;
+
+ if (!check_cms(self->cms.cms, status) || !check_manifest(self->cms.cms, self->manifest, status))
+ goto error;
+
+ Py_RETURN_NONE;
+
+ error:
+ return NULL;
+}
+
+
static PyObject *
manifest_object_der_read_helper(PyTypeObject *type, BIO *bio)
{
@@ -7627,6 +8398,7 @@ static struct PyMethodDef manifest_object_methods[] = {
Define_Method(addFiles, manifest_object_add_files, METH_VARARGS),
Define_Method(sign, manifest_object_sign, METH_VARARGS),
Define_Method(verify, manifest_object_verify, METH_KEYWORDS),
+ Define_Method(checkRPKIConformance, manifest_object_check_rpki_conformance, METH_KEYWORDS),
Define_Method(extractWithoutVerifying, manifest_object_extract_without_verifying, METH_NOARGS),
Define_Class_Method(pemRead, manifest_object_pem_read, METH_VARARGS),
Define_Class_Method(pemReadFile, manifest_object_pem_read_file, METH_VARARGS),
@@ -7752,14 +8524,13 @@ static char roa_object_extract_without_verifying__doc__[] =
static PyObject *
roa_object_extract_without_verifying(roa_object *self)
{
- PyObject *result = NULL;
BIO *bio = NULL;
int ok = 0;
ENTERING(roa_object_extract_without_verifying);
- if ((bio = cms_object_extract_without_verifying_helper(&self->cms)) != NULL)
- result = BIO_to_PyString_helper(bio);
+ if ((bio = cms_object_extract_without_verifying_helper(&self->cms)) == NULL)
+ goto error;
if (!ASN1_item_d2i_bio(ASN1_ITEM_rptr(ROA), bio, &self->roa))
lose_openssl_error("Couldn't decode ROA");
@@ -7775,6 +8546,30 @@ roa_object_extract_without_verifying(roa_object *self)
return NULL;
}
+static char roa_object_check_rpki_conformance__doc__[] =
+ "Check this ROA for conformance to the RPKI profile.\n"
+ ;
+
+static PyObject *
+roa_object_check_rpki_conformance(roa_object *self, PyObject *args, PyObject *kwds)
+{
+ static char *kwlist[] = {"status", NULL};
+ PyObject *status = Py_None;
+
+ ENTERING(roa_object_check_rpki_conformance);
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!", kwlist, &PySet_Type, &status))
+ goto error;
+
+ if (!check_cms(self->cms.cms, status) || !check_roa(self->cms.cms, self->roa, status))
+ goto error;
+
+ Py_RETURN_NONE;
+
+ error:
+ return NULL;
+}
+
static PyObject *
roa_object_pem_read_helper(PyTypeObject *type, BIO *bio)
{
@@ -8005,14 +8800,14 @@ roa_object_get_prefixes(roa_object *self)
switch (afi) {
case IANA_AFI_IPV4: resultp = &ipv4_result; ip_type = &ipaddress_version_4; break;
case IANA_AFI_IPV6: resultp = &ipv6_result; ip_type = &ipaddress_version_6; break;
- default: lose_type_error("Unknown AFI");
+ default: lose_value_error("Unknown AFI");
}
if (fam->addressFamily->length > 2)
- lose_type_error("Unsupported SAFI");
+ lose_value_error("Unsupported SAFI");
if (*resultp != NULL)
- lose_type_error("Duplicate ROAIPAddressFamily");
+ lose_value_error("Duplicate ROAIPAddressFamily");
if ((*resultp = PyTuple_New(sk_ROAIPAddress_num(fam->addresses))) == NULL)
goto error;
@@ -8158,7 +8953,7 @@ roa_object_set_prefixes(roa_object *self, PyObject *args, PyObject *kwds)
}
if (addr->type != ip_type)
- lose_type_error("Bad ROA prefix");
+ lose_value_error("Bad ROA prefix");
if (prefixlen > addr->type->length * 8)
lose("Bad prefix length");
@@ -8290,6 +9085,7 @@ static struct PyMethodDef roa_object_methods[] = {
Define_Method(sign, roa_object_sign, METH_VARARGS),
Define_Method(verify, roa_object_verify, METH_KEYWORDS),
Define_Method(extractWithoutVerifying, roa_object_extract_without_verifying, METH_NOARGS),
+ Define_Method(checkRPKIConformance, roa_object_check_rpki_conformance, METH_KEYWORDS),
Define_Class_Method(pemRead, roa_object_pem_read, METH_VARARGS),
Define_Class_Method(pemReadFile, roa_object_pem_read_file, METH_VARARGS),
Define_Class_Method(derRead, roa_object_der_read, METH_VARARGS),
@@ -8528,10 +9324,34 @@ pkcs10_object_der_write(pkcs10_object *self)
return result;
}
-static X509_EXTENSIONS **
-pkcs10_object_extension_helper(pkcs10_object *self)
+static X509_EXTENSION *
+pkcs10_object_extension_get_helper(pkcs10_object *self, int nid)
{
- return &self->exts;
+ if (self != NULL && self->exts != NULL)
+ return X509v3_get_ext(self->exts, X509v3_get_ext_by_NID(self->exts, nid, -1));
+ else
+ return NULL;
+}
+
+static PyObject *
+pkcs10_object_extension_set_helper(pkcs10_object *self, extension_wrapper ext)
+{
+ int ok = 0;
+
+ if (ext.value == NULL)
+ goto error;
+
+ if (!X509V3_add1_i2d(&self->exts, ext.nid, ext.value, ext.critical, X509V3_ADD_REPLACE))
+ lose_openssl_error("Couldn't add extension to PKCS #10 object");
+
+ ok = 1;
+
+ error:
+ ext.destructor(ext.value);
+ if (ok)
+ Py_RETURN_NONE;
+ else
+ return NULL;
}
static char pkcs10_object_get_public_key__doc__[] =
@@ -8597,8 +9417,6 @@ static char pkcs10_object_sign__doc__[] =
"The optional \"digest\" parameter indicates which digest to compute and\n"
"sign, and should be one of the following:\n"
"\n"
- "* MD5_DIGEST\n"
- "* SHA_DIGEST\n"
"* SHA1_DIGEST\n"
"* SHA256_DIGEST\n"
"* SHA384_DIGEST\n"
@@ -8752,7 +9570,7 @@ pkcs10_object_set_subject(pkcs10_object *self, PyObject *args)
goto error;
if (!PySequence_Check(name_sequence))
- lose_type_error("Inapropriate type");
+ lose_type_error("Expected a sequence object");
if ((name = x509_object_helper_set_name(name_sequence)) == NULL)
goto error;
@@ -8776,7 +9594,7 @@ static char pkcs10_object_get_key_usage__doc__[] =
static PyObject *
pkcs10_object_get_key_usage(pkcs10_object *self)
{
- return extension_get_key_usage(pkcs10_object_extension_helper(self));
+ return extension_get_key_usage(pkcs10_object_extension_get_helper(self, NID_key_usage));
}
static char pkcs10_object_set_key_usage__doc__[] =
@@ -8788,7 +9606,7 @@ static char pkcs10_object_set_key_usage__doc__[] =
static PyObject *
pkcs10_object_set_key_usage(pkcs10_object *self, PyObject *args)
{
- return extension_set_key_usage(pkcs10_object_extension_helper(self), args);
+ return pkcs10_object_extension_set_helper(self, extension_set_key_usage(args));
}
static char pkcs10_object_get_eku__doc__[] =
@@ -8798,7 +9616,7 @@ static char pkcs10_object_get_eku__doc__[] =
static PyObject *
pkcs10_object_get_eku(pkcs10_object *self)
{
- return extension_get_eku(pkcs10_object_extension_helper(self));
+ return extension_get_eku(pkcs10_object_extension_get_helper(self, NID_ext_key_usage));
}
static char pkcs10_object_set_eku__doc__[] =
@@ -8810,7 +9628,7 @@ static char pkcs10_object_set_eku__doc__[] =
static PyObject *
pkcs10_object_set_eku(pkcs10_object *self, PyObject *args)
{
- return extension_set_eku(pkcs10_object_extension_helper(self), args);
+ return pkcs10_object_extension_set_helper(self, extension_set_eku(args));
}
static char pkcs10_object_get_basic_constraints__doc__[] =
@@ -8822,7 +9640,7 @@ static char pkcs10_object_get_basic_constraints__doc__[] =
static PyObject *
pkcs10_object_get_basic_constraints(pkcs10_object *self)
{
- return extension_get_basic_constraints(pkcs10_object_extension_helper(self));
+ return extension_get_basic_constraints(pkcs10_object_extension_get_helper(self, NID_basic_constraints));
}
static char pkcs10_object_set_basic_constraints__doc__[] =
@@ -8834,7 +9652,7 @@ static char pkcs10_object_set_basic_constraints__doc__[] =
static PyObject *
pkcs10_object_set_basic_constraints(pkcs10_object *self, PyObject *args)
{
- return extension_set_basic_constraints(pkcs10_object_extension_helper(self), args);
+ return pkcs10_object_extension_set_helper(self, extension_set_basic_constraints(args));
}
static char pkcs10_object_get_sia__doc__[] =
@@ -8846,7 +9664,7 @@ static char pkcs10_object_get_sia__doc__[] =
static PyObject *
pkcs10_object_get_sia(pkcs10_object *self)
{
- return extension_get_sia(pkcs10_object_extension_helper(self));
+ return extension_get_sia(pkcs10_object_extension_get_helper(self, NID_sinfo_access));
}
static char pkcs10_object_set_sia__doc__[] =
@@ -8858,7 +9676,7 @@ static char pkcs10_object_set_sia__doc__[] =
static PyObject *
pkcs10_object_set_sia(pkcs10_object *self, PyObject *args, PyObject *kwds)
{
- return extension_set_sia(pkcs10_object_extension_helper(self), args, kwds);
+ return pkcs10_object_extension_set_helper(self, extension_set_sia(args, kwds));
}
static char pkcs10_object_get_signature_algorithm__doc__[] =
@@ -8897,7 +9715,7 @@ pkcs10_object_get_extension_oids(pkcs10_object *self)
for (i = 0; i < sk_X509_EXTENSION_num(self->exts); i++) {
X509_EXTENSION *ext = sk_X509_EXTENSION_value(self->exts, i);
- if ((oid = ASN1_OBJECT_to_PyString(ext->object)) == NULL ||
+ if ((oid = ASN1_OBJECT_to_PyString(X509_EXTENSION_get_object(ext))) == NULL ||
PySet_Add(result, oid) < 0)
goto error;
Py_XDECREF(oid);
@@ -9082,6 +9900,128 @@ pow_module_clear_error(GCC_UNUSED PyObject *self)
Py_RETURN_NONE;
}
+static char pow_module_get_verification_errors__doc__[] =
+ "Return strings for known OpenSSL certificate verification errors.\n"
+ "Returns a list of (number, symbol, text) tuples.\n"
+ ;
+
+static PyObject *
+pow_module_get_verification_errors(GCC_UNUSED PyObject *self)
+{
+ PyObject *result = NULL, *item = NULL;
+
+ ENTERING(pow_module_get_verification_errors);
+
+ /*
+ * This function is only called once, and doesn't need to be
+ * particularly efficient, so we use a list to keep the code simple.
+ */
+
+ if ((result = PyList_New(0)) == NULL)
+ goto error;
+
+#define Verification_Error(_v_) \
+ do { \
+ const char *msg = X509_verify_cert_error_string(_v_); \
+ if ((item = Py_BuildValue("(iss)", _v_, #_v_, msg)) == NULL || \
+ PyList_Append(result, item) < 0) \
+ goto error; \
+ Py_XDECREF(item); \
+ item = NULL; \
+ } while (0)
+
+ Verification_Error( X509_V_OK );
+ Verification_Error( X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT );
+ Verification_Error( X509_V_ERR_UNABLE_TO_GET_CRL );
+ Verification_Error( X509_V_ERR_UNABLE_TO_DECRYPT_CERT_SIGNATURE );
+ Verification_Error( X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE );
+ Verification_Error( X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY );
+ Verification_Error( X509_V_ERR_CERT_SIGNATURE_FAILURE );
+ Verification_Error( X509_V_ERR_CRL_SIGNATURE_FAILURE );
+ Verification_Error( X509_V_ERR_CERT_NOT_YET_VALID );
+ Verification_Error( X509_V_ERR_CERT_HAS_EXPIRED );
+ Verification_Error( X509_V_ERR_CRL_NOT_YET_VALID );
+ Verification_Error( X509_V_ERR_CRL_HAS_EXPIRED );
+ Verification_Error( X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD );
+ Verification_Error( X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD );
+ Verification_Error( X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD );
+ Verification_Error( X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD );
+ Verification_Error( X509_V_ERR_OUT_OF_MEM );
+ Verification_Error( X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT );
+ Verification_Error( X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN );
+ Verification_Error( X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY );
+ Verification_Error( X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE );
+ Verification_Error( X509_V_ERR_CERT_CHAIN_TOO_LONG );
+ Verification_Error( X509_V_ERR_CERT_REVOKED );
+ Verification_Error( X509_V_ERR_INVALID_CA );
+ Verification_Error( X509_V_ERR_PATH_LENGTH_EXCEEDED );
+ Verification_Error( X509_V_ERR_INVALID_PURPOSE );
+ Verification_Error( X509_V_ERR_CERT_UNTRUSTED );
+ Verification_Error( X509_V_ERR_CERT_REJECTED );
+ Verification_Error( X509_V_ERR_SUBJECT_ISSUER_MISMATCH );
+ Verification_Error( X509_V_ERR_AKID_SKID_MISMATCH );
+ Verification_Error( X509_V_ERR_AKID_ISSUER_SERIAL_MISMATCH );
+ Verification_Error( X509_V_ERR_KEYUSAGE_NO_CERTSIGN );
+ Verification_Error( X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER );
+ Verification_Error( X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION );
+ Verification_Error( X509_V_ERR_KEYUSAGE_NO_CRL_SIGN );
+ Verification_Error( X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION );
+ Verification_Error( X509_V_ERR_INVALID_NON_CA );
+ Verification_Error( X509_V_ERR_PROXY_PATH_LENGTH_EXCEEDED );
+ Verification_Error( X509_V_ERR_KEYUSAGE_NO_DIGITAL_SIGNATURE );
+ Verification_Error( X509_V_ERR_PROXY_CERTIFICATES_NOT_ALLOWED );
+ Verification_Error( X509_V_ERR_INVALID_EXTENSION );
+ Verification_Error( X509_V_ERR_INVALID_POLICY_EXTENSION );
+ Verification_Error( X509_V_ERR_NO_EXPLICIT_POLICY );
+ Verification_Error( X509_V_ERR_DIFFERENT_CRL_SCOPE );
+ Verification_Error( X509_V_ERR_UNSUPPORTED_EXTENSION_FEATURE );
+ Verification_Error( X509_V_ERR_UNNESTED_RESOURCE );
+ Verification_Error( X509_V_ERR_PERMITTED_VIOLATION );
+ Verification_Error( X509_V_ERR_EXCLUDED_VIOLATION );
+ Verification_Error( X509_V_ERR_SUBTREE_MINMAX );
+ Verification_Error( X509_V_ERR_UNSUPPORTED_CONSTRAINT_TYPE );
+ Verification_Error( X509_V_ERR_UNSUPPORTED_CONSTRAINT_SYNTAX );
+ Verification_Error( X509_V_ERR_UNSUPPORTED_NAME_SYNTAX );
+ Verification_Error( X509_V_ERR_CRL_PATH_VALIDATION_ERROR );
+#ifdef X509_V_ERR_SUITE_B_INVALID_VERSION
+ Verification_Error( X509_V_ERR_SUITE_B_INVALID_VERSION );
+#endif
+#ifdef X509_V_ERR_SUITE_B_INVALID_ALGORITHM
+ Verification_Error( X509_V_ERR_SUITE_B_INVALID_ALGORITHM );
+#endif
+#ifdef X509_V_ERR_SUITE_B_INVALID_CURVE
+ Verification_Error( X509_V_ERR_SUITE_B_INVALID_CURVE );
+#endif
+#ifdef X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM
+ Verification_Error( X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM );
+#endif
+#ifdef X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED
+ Verification_Error( X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED );
+#endif
+#ifdef X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256
+ Verification_Error( X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256 );
+#endif
+#ifdef X509_V_ERR_HOSTNAME_MISMATCH
+ Verification_Error( X509_V_ERR_HOSTNAME_MISMATCH );
+#endif
+#ifdef X509_V_ERR_EMAIL_MISMATCH
+ Verification_Error( X509_V_ERR_EMAIL_MISMATCH );
+#endif
+#ifdef X509_V_ERR_IP_ADDRESS_MISMATCH
+ Verification_Error( X509_V_ERR_IP_ADDRESS_MISMATCH );
+#endif
+ Verification_Error( X509_V_ERR_APPLICATION_VERIFICATION );
+
+#undef Verification_Error
+
+ return result;
+
+ error:
+ Py_XDECREF(result);
+ Py_XDECREF(item);
+ return NULL;
+}
+
static char pow_module_seed__doc__[] =
"Add data to OpenSSL's pseudo-random number generator state.\n"
"\n"
@@ -9217,14 +10157,15 @@ pow_module_custom_datetime(GCC_UNUSED PyObject *self, PyObject *args)
static struct PyMethodDef pow_module_methods[] = {
- Define_Method(getError, pow_module_get_error, METH_NOARGS),
- Define_Method(clearError, pow_module_clear_error, METH_NOARGS),
- Define_Method(seed, pow_module_seed, METH_VARARGS),
- Define_Method(add, pow_module_add, METH_VARARGS),
- Define_Method(readRandomFile, pow_module_read_random_file, METH_VARARGS),
- Define_Method(writeRandomFile, pow_module_write_random_file, METH_VARARGS),
- Define_Method(addObject, pow_module_add_object, METH_VARARGS),
- Define_Method(customDatetime, pow_module_custom_datetime, METH_VARARGS),
+ Define_Method(getError, pow_module_get_error, METH_NOARGS),
+ Define_Method(clearError, pow_module_clear_error, METH_NOARGS),
+ Define_Method(getVerificationErrors, pow_module_get_verification_errors, METH_NOARGS),
+ Define_Method(seed, pow_module_seed, METH_VARARGS),
+ Define_Method(add, pow_module_add, METH_VARARGS),
+ Define_Method(readRandomFile, pow_module_read_random_file, METH_VARARGS),
+ Define_Method(writeRandomFile, pow_module_write_random_file, METH_VARARGS),
+ Define_Method(addObject, pow_module_add_object, METH_VARARGS),
+ Define_Method(customDatetime, pow_module_custom_datetime, METH_VARARGS),
{NULL}
};
@@ -9274,7 +10215,6 @@ init_POW(void)
} while (0)
Define_Class(POW_X509_Type);
- Define_Class(POW_X509Store_Type);
Define_Class(POW_X509StoreCTX_Type);
Define_Class(POW_CRL_Type);
Define_Class(POW_Asymmetric_Type);
@@ -9296,6 +10236,7 @@ init_POW(void)
Define_Exception(OpenSSLError, ErrorObject);
Define_Exception(POWError, ErrorObject);
Define_Exception(NotVerifiedError, ErrorObject);
+ Define_Exception(ValidationError, ErrorObject);
#undef Define_Exception
@@ -9308,8 +10249,6 @@ init_POW(void)
Define_Integer_Constant(OIDNAME_FORMAT);
/* Message digests */
- Define_Integer_Constant(MD5_DIGEST);
- Define_Integer_Constant(SHA_DIGEST);
Define_Integer_Constant(SHA1_DIGEST);
Define_Integer_Constant(SHA256_DIGEST);
Define_Integer_Constant(SHA384_DIGEST);
@@ -9411,6 +10350,10 @@ init_POW(void)
x509_store_ctx_ex_data_idx = X509_STORE_CTX_get_ex_new_index(0, "x590_store_ctx_object for verify callback",
NULL, NULL, NULL);
+ asn1_zero = s2i_ASN1_INTEGER(NULL, "0x0");
+ asn1_four_octets = s2i_ASN1_INTEGER(NULL, "0xFFFFFFFF");
+ asn1_twenty_octets = s2i_ASN1_INTEGER(NULL, "0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF");
+
if (PyErr_Occurred() || !OpenSSL_ok)
Py_FatalError("Can't initialize module POW");
}
diff --git a/h/rpki/manifest.h b/h/rpki/manifest.h
index 0e6cd531..5e5ddf67 100644
--- a/h/rpki/manifest.h
+++ b/h/rpki/manifest.h
@@ -44,8 +44,6 @@
#include <openssl/asn1t.h>
#include <openssl/cms.h>
-#include <rpki/sk_manifest.h>
-
/*
* ASN.1 templates. Not sure that ASN1_EXP_OPT() is the right macro
* for these defaulted "version" fields, but it's what the examples
@@ -66,7 +64,12 @@ typedef struct FileAndHash_st {
ASN1_BIT_STRING *hash;
} FileAndHash;
+#ifdef DEFINE_STACK_OF
+DEFINE_STACK_OF(FileAndHash)
+#else
DECLARE_STACK_OF(FileAndHash)
+#include <rpki/sk_manifest.h>
+#endif
ASN1_SEQUENCE(FileAndHash) = {
ASN1_SIMPLE(FileAndHash, file, ASN1_IA5STRING),
diff --git a/h/rpki/roa.h b/h/rpki/roa.h
index a63f726b..36f1ec5c 100644
--- a/h/rpki/roa.h
+++ b/h/rpki/roa.h
@@ -44,8 +44,6 @@
#include <openssl/asn1t.h>
#include <openssl/cms.h>
-#include <rpki/sk_roa.h>
-
/*
* ASN.1 templates. Not sure that ASN1_EXP_OPT() is the right macro
* for these defaulted "version" fields, but it's what the examples
@@ -66,7 +64,11 @@ typedef struct ROAIPAddress_st {
ASN1_INTEGER *maxLength;
} ROAIPAddress;
+#ifdef DEFINE_STACK_OF
+DEFINE_STACK_OF(ROAIPAddress)
+#else
DECLARE_STACK_OF(ROAIPAddress)
+#endif
ASN1_SEQUENCE(ROAIPAddress) = {
ASN1_SIMPLE(ROAIPAddress, IPAddress, ASN1_BIT_STRING),
@@ -78,7 +80,11 @@ typedef struct ROAIPAddressFamily_st {
STACK_OF(ROAIPAddress) *addresses;
} ROAIPAddressFamily;
+#ifdef DEFINE_STACK_OF
+DEFINE_STACK_OF(ROAIPAddressFamily)
+#else
DECLARE_STACK_OF(ROAIPAddressFamily)
+#endif
ASN1_SEQUENCE(ROAIPAddressFamily) = {
ASN1_SIMPLE(ROAIPAddressFamily, addressFamily, ASN1_OCTET_STRING),
@@ -104,6 +110,10 @@ IMPLEMENT_ASN1_FUNCTIONS(ROAIPAddress)
IMPLEMENT_ASN1_FUNCTIONS(ROAIPAddressFamily)
IMPLEMENT_ASN1_FUNCTIONS(ROA)
+#ifndef DEFINE_STACK_OF
+#include <rpki/sk_roa.h>
+#endif
+
#endif /* DOXYGEN_GETS_HOPELESSLY_CONFUSED_BY_THIS_SECTION */
#endif /* __ROA_H__ */
diff --git a/h/rpki/sk_manifest.h b/h/rpki/sk_manifest.h
index 59ff80a5..48ad6317 100644
--- a/h/rpki/sk_manifest.h
+++ b/h/rpki/sk_manifest.h
@@ -1,6 +1,6 @@
/*
* Automatically generated, do not edit.
- * Generator $Id: defstack.py 4878 2012-11-15 22:13:53Z sra $
+ * Generator $Id: defstack.py 6152 2015-10-26 06:29:00Z sra $
*/
#ifndef __RPKI_MANIFEST_H__DEFSTACK_H__
diff --git a/h/rpki/sk_roa.h b/h/rpki/sk_roa.h
index 13036955..7952e3c2 100644
--- a/h/rpki/sk_roa.h
+++ b/h/rpki/sk_roa.h
@@ -1,6 +1,6 @@
/*
* Automatically generated, do not edit.
- * Generator $Id: defstack.py 4878 2012-11-15 22:13:53Z sra $
+ * Generator $Id: defstack.py 6152 2015-10-26 06:29:00Z sra $
*/
#ifndef __RPKI_ROA_H__DEFSTACK_H__
diff --git a/potpourri/analyze-rcynic-history.py b/potpourri/analyze-rcynic-history.py
index 648538cc..c0836ab2 100644
--- a/potpourri/analyze-rcynic-history.py
+++ b/potpourri/analyze-rcynic-history.py
@@ -1,11 +1,11 @@
# $Id$
-#
+#
# Copyright (C) 2011-2012 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -35,201 +35,201 @@ from xml.etree.cElementTree import (ElementTree as ElementTree,
fromstring as ElementTreeFromString)
def parse_utc(s):
- return datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%SZ")
+ return datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%SZ")
class Rsync_History(object):
- """
- An Rsync_History object represents one rsync connection.
- """
+ """
+ An Rsync_History object represents one rsync connection.
+ """
- def __init__(self, elt):
- self.error = elt.get("error")
- self.uri = elt.text.strip()
- self.hostname = urlparse.urlparse(self.uri).hostname or None
- self.elapsed = parse_utc(elt.get("finished")) - parse_utc(elt.get("started"))
+ def __init__(self, elt):
+ self.error = elt.get("error")
+ self.uri = elt.text.strip()
+ self.hostname = urlparse.urlparse(self.uri).hostname or None
+ self.elapsed = parse_utc(elt.get("finished")) - parse_utc(elt.get("started"))
class Host(object):
- """
- A host object represents all the data collected for one host. Note
- that it (usually) contains a list of all the sessions in which this
- host appears.
- """
-
- def __init__(self, hostname, session_id):
- self.hostname = hostname
- self.session_id = session_id
- self.elapsed = datetime.timedelta(0)
- self.connection_count = 0
- self.dead_connections = 0
- self.uris = set()
- self.total_connection_time = datetime.timedelta(0)
-
- def add_rsync_history(self, h):
- self.connection_count += 1
- self.elapsed += h.elapsed
- self.dead_connections += int(h.error is not None)
- self.total_connection_time += h.elapsed
-
- def add_uri(self, u):
- self.uris.add(u)
-
- def finalize(self):
- self.object_count = len(self.uris)
- del self.uris
-
- @property
- def failed(self):
- return 1 if self.dead_connections else 0
-
- @property
- def seconds_per_object(self):
- if self.failed:
- return None
- else:
- return float(self.elapsed.days * 24 * 60 * 60 +
- self.elapsed.seconds +
- self.elapsed.microseconds / 10**6) / float(self.object_count)
-
- @property
- def objects_per_connection(self):
- if self.failed:
- return None
- else:
- return float(self.object_count) / float(self.connection_count)
-
- @property
- def average_connection_time(self):
- return float(self.total_connection_time.days * 24 * 60 * 60 +
- self.total_connection_time.seconds +
- self.total_connection_time.microseconds / 10**6) / float(self.connection_count)
-
- class Format(object):
-
- def __init__(self, attr, title, fmt, ylabel = ""):
- self.attr = attr
- self.title = title
- self.width = len(title) - int("%" in fmt)
- self.fmt = "%%%d%s" % (self.width, fmt)
- self.oops = "*" * self.width
- self.ylabel = ylabel
-
- def __call__(self, obj):
- try:
- value = getattr(obj, self.attr)
- return None if value is None else self.fmt % value
- except ZeroDivisionError:
- return self.oops
-
- format = (Format("connection_count", "Connections", "d", "Connections To Repository (Per Session)"),
- Format("object_count", "Objects", "d", "Objects In Repository (Distinct URIs Per Session)"),
- Format("objects_per_connection", "Objects/Connection", ".3f", "Objects In Repository / Connections To Repository"),
- Format("seconds_per_object", "Seconds/Object", ".3f", "Seconds To Transfer / Object (Average Per Session)"),
- Format("failure_rate_running", "Failure Rate", ".3f%%", "Sessions With Failed Connections Within Last %d Hours" % window_hours),
- Format("average_connection_time", "Average Connection", ".3f", "Seconds / Connection (Average Per Session)"),
- Format("hostname", "Hostname", "s"))
-
- format_dict = dict((fmt.attr, fmt) for fmt in format)
-
- def format_field(self, name):
- result = self.format_dict[name](self)
- return None if result is None else result.strip()
+ """
+ A host object represents all the data collected for one host. Note
+ that it (usually) contains a list of all the sessions in which this
+ host appears.
+ """
+
+ def __init__(self, hostname, session_id):
+ self.hostname = hostname
+ self.session_id = session_id
+ self.elapsed = datetime.timedelta(0)
+ self.connection_count = 0
+ self.dead_connections = 0
+ self.uris = set()
+ self.total_connection_time = datetime.timedelta(0)
+
+ def add_rsync_history(self, h):
+ self.connection_count += 1
+ self.elapsed += h.elapsed
+ self.dead_connections += int(h.error is not None)
+ self.total_connection_time += h.elapsed
+
+ def add_uri(self, u):
+ self.uris.add(u)
+
+ def finalize(self):
+ self.object_count = len(self.uris)
+ del self.uris
+
+ @property
+ def failed(self):
+ return 1 if self.dead_connections else 0
+
+ @property
+ def seconds_per_object(self):
+ if self.failed:
+ return None
+ else:
+ return float(self.elapsed.days * 24 * 60 * 60 +
+ self.elapsed.seconds +
+ self.elapsed.microseconds / 10**6) / float(self.object_count)
+
+ @property
+ def objects_per_connection(self):
+ if self.failed:
+ return None
+ else:
+ return float(self.object_count) / float(self.connection_count)
+
+ @property
+ def average_connection_time(self):
+ return float(self.total_connection_time.days * 24 * 60 * 60 +
+ self.total_connection_time.seconds +
+ self.total_connection_time.microseconds / 10**6) / float(self.connection_count)
+
+ class Format(object):
+
+ def __init__(self, attr, title, fmt, ylabel = ""):
+ self.attr = attr
+ self.title = title
+ self.width = len(title) - int("%" in fmt)
+ self.fmt = "%%%d%s" % (self.width, fmt)
+ self.oops = "*" * self.width
+ self.ylabel = ylabel
+
+ def __call__(self, obj):
+ try:
+ value = getattr(obj, self.attr)
+ return None if value is None else self.fmt % value
+ except ZeroDivisionError:
+ return self.oops
+
+ format = (Format("connection_count", "Connections", "d", "Connections To Repository (Per Session)"),
+ Format("object_count", "Objects", "d", "Objects In Repository (Distinct URIs Per Session)"),
+ Format("objects_per_connection", "Objects/Connection", ".3f", "Objects In Repository / Connections To Repository"),
+ Format("seconds_per_object", "Seconds/Object", ".3f", "Seconds To Transfer / Object (Average Per Session)"),
+ Format("failure_rate_running", "Failure Rate", ".3f%%", "Sessions With Failed Connections Within Last %d Hours" % window_hours),
+ Format("average_connection_time", "Average Connection", ".3f", "Seconds / Connection (Average Per Session)"),
+ Format("hostname", "Hostname", "s"))
+
+ format_dict = dict((fmt.attr, fmt) for fmt in format)
+
+ def format_field(self, name):
+ result = self.format_dict[name](self)
+ return None if result is None else result.strip()
class Session(dict):
- """
- A session corresponds to one XML file. This is a dictionary of Host
- objects, keyed by hostname.
- """
-
- def __init__(self, session_id, msg_key):
- self.session_id = session_id
- self.msg_key = msg_key
- self.date = parse_utc(session_id)
- self.calculated_failure_history = False
-
- @property
- def hostnames(self):
- return set(self.iterkeys())
-
- def get_plot_row(self, name, hostnames):
- return (self.session_id,) + tuple(self[h].format_field(name) if h in self else "" for h in hostnames)
-
- def add_rsync_history(self, h):
- if h.hostname not in self:
- self[h.hostname] = Host(h.hostname, self.session_id)
- self[h.hostname].add_rsync_history(h)
-
- def add_uri(self, u):
- h = urlparse.urlparse(u).hostname
- if h and h in self:
- self[h].add_uri(u)
-
- def finalize(self):
- for h in self.itervalues():
- h.finalize()
-
- def calculate_failure_history(self, sessions):
- start = self.date - datetime.timedelta(hours = window_hours)
- sessions = tuple(s for s in sessions if s.date <= self.date and s.date > start)
- for hostname, h in self.iteritems():
- i = n = 0
- for s in sessions:
- if hostname in s:
- i += s[hostname].failed
- n += 1
- h.failure_rate_running = float(100 * i) / n
- self.calculated_failure_history = True
+ """
+ A session corresponds to one XML file. This is a dictionary of Host
+ objects, keyed by hostname.
+ """
+
+ def __init__(self, session_id, msg_key):
+ self.session_id = session_id
+ self.msg_key = msg_key
+ self.date = parse_utc(session_id)
+ self.calculated_failure_history = False
+
+ @property
+ def hostnames(self):
+ return set(self.iterkeys())
+
+ def get_plot_row(self, name, hostnames):
+ return (self.session_id,) + tuple(self[h].format_field(name) if h in self else "" for h in hostnames)
+
+ def add_rsync_history(self, h):
+ if h.hostname not in self:
+ self[h.hostname] = Host(h.hostname, self.session_id)
+ self[h.hostname].add_rsync_history(h)
+
+ def add_uri(self, u):
+ h = urlparse.urlparse(u).hostname
+ if h and h in self:
+ self[h].add_uri(u)
+
+ def finalize(self):
+ for h in self.itervalues():
+ h.finalize()
+
+ def calculate_failure_history(self, sessions):
+ start = self.date - datetime.timedelta(hours = window_hours)
+ sessions = tuple(s for s in sessions if s.date <= self.date and s.date > start)
+ for hostname, h in self.iteritems():
+ i = n = 0
+ for s in sessions:
+ if hostname in s:
+ i += s[hostname].failed
+ n += 1
+ h.failure_rate_running = float(100 * i) / n
+ self.calculated_failure_history = True
def plotter(f, hostnames, field, logscale = False):
- plotlines = sorted(session.get_plot_row(field, hostnames) for session in sessions)
- title = Host.format_dict[field].title
- ylabel = Host.format_dict[field].ylabel
- n = len(hostnames) + 1
- assert all(n == len(plotline) for plotline in plotlines)
- if "%%" in Host.format_dict[field].fmt:
- f.write('set format y "%.0f%%"\n')
- else:
- f.write('set format y\n')
- if logscale:
- f.write("set logscale y\n")
- else:
- f.write("unset logscale y\n")
- f.write("""
- set xdata time
- set timefmt '%Y-%m-%dT%H:%M:%SZ'
- #set format x '%m/%d'
- #set format x '%b%d'
- #set format x '%Y-%m-%d'
- set format x '%Y-%m'
- #set title '""" + title + """'
- set ylabel '""" + ylabel + """'
- plot""" + ",".join(" '-' using 1:2 with linespoints pointinterval 500 title '%s'" % h for h in hostnames) + "\n")
- for i in xrange(1, n):
- for plotline in plotlines:
- if plotline[i] is not None:
- f.write("%s %s\n" % (plotline[0], plotline[i].rstrip("%")))
- f.write("e\n")
+ plotlines = sorted(session.get_plot_row(field, hostnames) for session in sessions)
+ title = Host.format_dict[field].title
+ ylabel = Host.format_dict[field].ylabel
+ n = len(hostnames) + 1
+ assert all(n == len(plotline) for plotline in plotlines)
+ if "%%" in Host.format_dict[field].fmt:
+ f.write('set format y "%.0f%%"\n')
+ else:
+ f.write('set format y\n')
+ if logscale:
+ f.write("set logscale y\n")
+ else:
+ f.write("unset logscale y\n")
+ f.write("""
+ set xdata time
+ set timefmt '%Y-%m-%dT%H:%M:%SZ'
+ #set format x '%m/%d'
+ #set format x '%b%d'
+ #set format x '%Y-%m-%d'
+ set format x '%Y-%m'
+ #set title '""" + title + """'
+ set ylabel '""" + ylabel + """'
+ plot""" + ",".join(" '-' using 1:2 with linespoints pointinterval 500 title '%s'" % h for h in hostnames) + "\n")
+ for i in xrange(1, n):
+ for plotline in plotlines:
+ if plotline[i] is not None:
+ f.write("%s %s\n" % (plotline[0], plotline[i].rstrip("%")))
+ f.write("e\n")
def plot_hosts(hostnames, fields):
- for field in fields:
- for logscale in (False, True):
- gnuplot = subprocess.Popen(("gnuplot",), stdin = subprocess.PIPE)
- gnuplot.stdin.write("set terminal pdf\n")
- gnuplot.stdin.write("set output '%s/%s-%s.pdf'\n" % (outdir, field, "log" if logscale else "linear"))
- plotter(gnuplot.stdin, hostnames, field, logscale = logscale)
- gnuplot.stdin.close()
- gnuplot.wait()
+ for field in fields:
+ for logscale in (False, True):
+ gnuplot = subprocess.Popen(("gnuplot",), stdin = subprocess.PIPE)
+ gnuplot.stdin.write("set terminal pdf\n")
+ gnuplot.stdin.write("set output '%s/%s-%s.pdf'\n" % (outdir, field, "log" if logscale else "linear"))
+ plotter(gnuplot.stdin, hostnames, field, logscale = logscale)
+ gnuplot.stdin.close()
+ gnuplot.wait()
outdir = "images"
if not os.path.exists(outdir):
- os.makedirs(outdir)
+ os.makedirs(outdir)
mb = mailbox.Maildir("/u/sra/rpki/rcynic-xml", factory = None, create = False)
if sys.platform == "darwin": # Sigh
- shelf = shelve.open("rcynic-xml", "c")
+ shelf = shelve.open("rcynic-xml", "c")
else:
- shelf = shelve.open("rcynic-xml.db", "c")
+ shelf = shelve.open("rcynic-xml.db", "c")
sessions = []
@@ -237,55 +237,55 @@ latest = None
parsed = 0
for i, key in enumerate(mb.iterkeys(), 1):
- sys.stderr.write("\r%s %d/%d/%d..." % ("|\\-/"[i & 3], parsed, i, len(mb)))
-
- if key in shelf:
- session = shelf[key]
-
- else:
- sys.stderr.write("%s..." % key)
- assert not mb[key].is_multipart()
- input = ElementTreeFromString(mb[key].get_payload())
- date = input.get("date")
- sys.stderr.write("%s..." % date)
- session = Session(date, key)
- for elt in input.findall("rsync_history"):
- session.add_rsync_history(Rsync_History(elt))
- for elt in input.findall("validation_status"):
- if elt.get("generation") == "current":
- session.add_uri(elt.text.strip())
- session.finalize()
- shelf[key] = session
- parsed += 1
-
- sessions.append(session)
- if latest is None or session.session_id > latest.session_id:
- latest = session
+ sys.stderr.write("\r%s %d/%d/%d..." % ("|\\-/"[i & 3], parsed, i, len(mb)))
+
+ if key in shelf:
+ session = shelf[key]
+
+ else:
+ sys.stderr.write("%s..." % key)
+ assert not mb[key].is_multipart()
+ input = ElementTreeFromString(mb[key].get_payload())
+ date = input.get("date")
+ sys.stderr.write("%s..." % date)
+ session = Session(date, key)
+ for elt in input.findall("rsync_history"):
+ session.add_rsync_history(Rsync_History(elt))
+ for elt in input.findall("validation_status"):
+ if elt.get("generation") == "current":
+ session.add_uri(elt.text.strip())
+ session.finalize()
+ shelf[key] = session
+ parsed += 1
+
+ sessions.append(session)
+ if latest is None or session.session_id > latest.session_id:
+ latest = session
sys.stderr.write("\n")
shelf.sync()
for session in sessions:
- if not getattr(session, "calculated_failure_history", False):
- session.calculate_failure_history(sessions)
- shelf[session.msg_key] = session
+ if not getattr(session, "calculated_failure_history", False):
+ session.calculate_failure_history(sessions)
+ shelf[session.msg_key] = session
if plot_all_hosts:
- hostnames = sorted(reduce(lambda x, y: x | y,
- (s.hostnames for s in sessions),
- set()))
+ hostnames = sorted(reduce(lambda x, y: x | y,
+ (s.hostnames for s in sessions),
+ set()))
else:
- hostnames = ("rpki.apnic.net", "rpki.ripe.net", "repository.lacnic.net", "rpki.afrinic.net", "rpki.arin.net",
- #"localcert.ripe.net", "arin.rpki.net", "repo0.rpki.net", "rgnet.rpki.net",
- "ca0.rpki.net")
+ hostnames = ("rpki.apnic.net", "rpki.ripe.net", "repository.lacnic.net", "rpki.afrinic.net", "rpki.arin.net",
+ #"localcert.ripe.net", "arin.rpki.net", "repo0.rpki.net", "rgnet.rpki.net",
+ "ca0.rpki.net")
plot_hosts(hostnames, [fmt.attr for fmt in Host.format if fmt.attr != "hostname"])
if latest is not None:
- f = open("rcynic.xml", "wb")
- f.write(mb[latest.msg_key].get_payload())
- f.close()
+ f = open("rcynic.xml", "wb")
+ f.write(mb[latest.msg_key].get_payload())
+ f.close()
shelf.close()
diff --git a/potpourri/analyze-transition.py b/potpourri/analyze-transition.py
index e2125dfb..9f7928dc 100644
--- a/potpourri/analyze-transition.py
+++ b/potpourri/analyze-transition.py
@@ -1,11 +1,11 @@
# $Id$
-#
+#
# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
-#
+#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -21,44 +21,44 @@ Compare rcynic.xml files, tell the user what became invalid, and why.
import sys
try:
- from lxml.etree import ElementTree
+ from lxml.etree import ElementTree
except ImportError:
- from xml.etree.ElementTree import ElementTree
+ from xml.etree.ElementTree import ElementTree
class Object(object):
- def __init__(self, session, uri):
- self.session = session
- self.uri = uri
- self.labels = []
+ def __init__(self, session, uri):
+ self.session = session
+ self.uri = uri
+ self.labels = []
- def add(self, label):
- self.labels.append(label)
+ def add(self, label):
+ self.labels.append(label)
- def __cmp__(self, other):
- return cmp(self.labels, other.labels)
+ def __cmp__(self, other):
+ return cmp(self.labels, other.labels)
- @property
- def accepted(self):
- return "object_accepted" in self.labels
+ @property
+ def accepted(self):
+ return "object_accepted" in self.labels
class Session(dict):
- def __init__(self, name):
- self.name = name
- tree = ElementTree(file = name)
- labels = tuple((elt.tag.strip(), elt.text.strip()) for elt in tree.find("labels"))
- self.labels = tuple(pair[0] for pair in labels)
- self.descrs = dict(labels)
- self.date = tree.getroot().get("date")
- for elt in tree.findall("validation_status"):
- status = elt.get("status")
- uri = elt.text.strip()
- if status.startswith("rsync_transfer_") or elt.get("generation") != "current":
- continue
- if uri not in self:
- self[uri] = Object(self, uri)
- self[uri].add(status)
+ def __init__(self, name):
+ self.name = name
+ tree = ElementTree(file = name)
+ labels = tuple((elt.tag.strip(), elt.text.strip()) for elt in tree.find("labels"))
+ self.labels = tuple(pair[0] for pair in labels)
+ self.descrs = dict(labels)
+ self.date = tree.getroot().get("date")
+ for elt in tree.findall("validation_status"):
+ status = elt.get("status")
+ uri = elt.text.strip()
+ if status.startswith("rsync_transfer_") or elt.get("generation") != "current":
+ continue
+ if uri not in self:
+ self[uri] = Object(self, uri)
+ self[uri].add(status)
skip_labels = frozenset(("object_accepted", "object_rejected"))
@@ -66,23 +66,23 @@ old_db = new_db = None
for arg in sys.argv[1:]:
- old_db = new_db
- new_db = Session(arg)
- if old_db is None:
- continue
-
- old_uris = frozenset(old_db)
- new_uris = frozenset(new_db)
-
- for uri in sorted(old_uris - new_uris):
- print new_db.date, uri, "dropped"
-
- for uri in sorted(old_uris & new_uris):
- old = old_db[uri]
- new = new_db[uri]
- if old.accepted and not new.accepted:
- print new_db.date, uri, "invalid"
- labels = frozenset(new.labels) - frozenset(old.labels) - skip_labels
- for label in new.labels:
- if label in labels:
- print " ", new_db.descrs[label]
+ old_db = new_db
+ new_db = Session(arg)
+ if old_db is None:
+ continue
+
+ old_uris = frozenset(old_db)
+ new_uris = frozenset(new_db)
+
+ for uri in sorted(old_uris - new_uris):
+ print new_db.date, uri, "dropped"
+
+ for uri in sorted(old_uris & new_uris):
+ old = old_db[uri]
+ new = new_db[uri]
+ if old.accepted and not new.accepted:
+ print new_db.date, uri, "invalid"
+ labels = frozenset(new.labels) - frozenset(old.labels) - skip_labels
+ for label in new.labels:
+ if label in labels:
+ print " ", new_db.descrs[label]
diff --git a/potpourri/apnic-to-csv.py b/potpourri/apnic-to-csv.py
index 62293a51..83f5388b 100644
--- a/potpourri/apnic-to-csv.py
+++ b/potpourri/apnic-to-csv.py
@@ -1,11 +1,11 @@
# $Id$
-#
+#
# Copyright (C) 2010-2012 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -27,29 +27,29 @@ prefixes = csv_writer("prefixes.csv")
for line in open("delegated-apnic-extended-latest"):
- line = line.rstrip()
+ line = line.rstrip()
- if not line.startswith("apnic|") or line.endswith("|summary"):
- continue
+ if not line.startswith("apnic|") or line.endswith("|summary"):
+ continue
- try:
- registry, cc, rectype, start, value, date, status, opaque_id = line.split("|")
- except ValueError:
- continue
+ try:
+ registry, cc, rectype, start, value, date, status, opaque_id = line.split("|")
+ except ValueError:
+ continue
- if not opaque_id:
- continue
+ if not opaque_id:
+ continue
- assert registry == "apnic"
+ assert registry == "apnic"
- if rectype == "asn":
- asns.writerow((opaque_id, "%s-%s" % (start, int(start) + int(value) - 1)))
+ if rectype == "asn":
+ asns.writerow((opaque_id, "%s-%s" % (start, int(start) + int(value) - 1)))
- elif rectype == "ipv4":
- prefixes.writerow((opaque_id, "%s-%s" % (start, v4addr(v4addr(start) + long(value) - 1))))
+ elif rectype == "ipv4":
+ prefixes.writerow((opaque_id, "%s-%s" % (start, v4addr(v4addr(start) + long(value) - 1))))
- elif rectype == "ipv6":
- prefixes.writerow((opaque_id, "%s/%s" % (start, value)))
+ elif rectype == "ipv6":
+ prefixes.writerow((opaque_id, "%s/%s" % (start, value)))
asns.close()
prefixes.close()
diff --git a/potpourri/arin-to-csv.py b/potpourri/arin-to-csv.py
index a4e7ffc3..a4b7f285 100644
--- a/potpourri/arin-to-csv.py
+++ b/potpourri/arin-to-csv.py
@@ -1,11 +1,11 @@
# $Id$
-#
+#
# Copyright (C) 2009-2012 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -34,9 +34,9 @@ import lxml.etree
from rpki.csv_utils import csv_writer
def ns(tag):
- return "{http://www.arin.net/bulkwhois/core/v1}" + tag
+ return "{http://www.arin.net/bulkwhois/core/v1}" + tag
-tag_asn = ns("asn")
+tag_asn = ns("asn")
tag_net = ns("net")
tag_org = ns("org")
tag_poc = ns("poc")
@@ -49,12 +49,12 @@ tag_startAsNumber = ns("startAsNumber")
tag_endAsNumber = ns("endAsNumber")
def find(node, tag):
- return node.findtext(tag).strip()
+ return node.findtext(tag).strip()
def do_asn(node):
- asns.writerow((find(node, tag_orgHandle),
- "%s-%s" % (find(node, tag_startAsNumber),
- find(node, tag_endAsNumber))))
+ asns.writerow((find(node, tag_orgHandle),
+ "%s-%s" % (find(node, tag_startAsNumber),
+ find(node, tag_endAsNumber))))
erx_table = {
"AF" : "afrinic",
@@ -71,19 +71,19 @@ erx_table = {
"RX" : "ripe" }
def do_net(node):
- handle = find(node, tag_orgHandle)
- for netblock in node.iter(tag_netBlock):
- tag = find(netblock, tag_type)
- startAddress = find(netblock, tag_startAddress)
- endAddress = find(netblock, tag_endAddress)
- if not startAddress.endswith(".000") and not startAddress.endswith(":0000"):
- continue
- if not endAddress.endswith(".255") and not endAddress.endswith(":FFFF"):
- continue
- if tag in ("DS", "DA", "IU"):
- prefixes.writerow((handle, "%s-%s" % (startAddress, endAddress)))
- elif tag in erx_table:
- erx.writerow((erx_table[tag], "%s-%s" % (startAddress, endAddress)))
+ handle = find(node, tag_orgHandle)
+ for netblock in node.iter(tag_netBlock):
+ tag = find(netblock, tag_type)
+ startAddress = find(netblock, tag_startAddress)
+ endAddress = find(netblock, tag_endAddress)
+ if not startAddress.endswith(".000") and not startAddress.endswith(":0000"):
+ continue
+ if not endAddress.endswith(".255") and not endAddress.endswith(":FFFF"):
+ continue
+ if tag in ("DS", "DA", "IU"):
+ prefixes.writerow((handle, "%s-%s" % (startAddress, endAddress)))
+ elif tag in erx_table:
+ erx.writerow((erx_table[tag], "%s-%s" % (startAddress, endAddress)))
dispatch = { tag_asn : do_asn, tag_net : do_net }
@@ -95,19 +95,19 @@ root = None
for event, node in lxml.etree.iterparse(sys.stdin):
- if root is None:
- root = node
- while root.getparent() is not None:
- root = root.getparent()
+ if root is None:
+ root = node
+ while root.getparent() is not None:
+ root = root.getparent()
- if node.getparent() is root:
+ if node.getparent() is root:
- if node.tag in dispatch:
- dispatch[node.tag](node)
+ if node.tag in dispatch:
+ dispatch[node.tag](node)
- node.clear()
- while node.getprevious() is not None:
- del node.getparent()[0]
+ node.clear()
+ while node.getprevious() is not None:
+ del node.getparent()[0]
asns.close()
prefixes.close()
diff --git a/potpourri/ca-unpickle.py b/potpourri/ca-unpickle.py
index 5f255d8f..3ddee10b 100755
--- a/potpourri/ca-unpickle.py
+++ b/potpourri/ca-unpickle.py
@@ -3,24 +3,1283 @@
# $Id$
"""
-Unpickle CA state packaged by ca-pickle.
-
-This version is a stub, and exists only to test ca-pickle.
+Unpickle trunk/ CA state packaged by ca-pickle and attempt to whack a
+tk705/ rpki-ca instance into an equivalent state.
"""
+# We need to fork separate processes to handle different databases
+# (well, OK, there are other ways we could to this, but forks are
+# by far the simplest). So we organize the database-specific bits
+# as separate functions, one per database to be whacked, and handle
+# the fork management in a common loop.
+
+import os
import sys
+import uuid
+import time
import cPickle
+import tempfile
+import datetime
import argparse
+import urlparse
import subprocess
-parser = argparse.ArgumentParser(description = __doc__)
-parser.add_argument("input", help = "input file")
-args = parser.parse_args()
+import rpki.config
+import rpki.x509
+import rpki.POW
+import rpki.resource_set
+
+from base64 import urlsafe_b64encode
+
+class LazyDict(object):
+ """
+ Convenience wrapper to allow attribute notation for brevity
+ when diving into deeply nested mappings created by ca-pickle.
+ """
+
+ def __init__(self, *args, **kwargs):
+ #self._d = dict(*args, **kwargs)
+ self.__dict__["_d"] = dict(*args, **kwargs)
+ for k, v in self._d.iteritems():
+ self._d[k] = self._insinuate(v)
+
+ def __getattr__(self, name):
+ if name in self._d:
+ return self._d[name]
+ name = name.replace("_", "-")
+ if name in self._d:
+ return self._d[name]
+ raise AttributeError
+
+ def __setattr__(self, name, value):
+ if name in self._d:
+ self._d[name] = value
+ else:
+ raise AttributeError
+
+ def __getitem__(self, name):
+ return self._d[name]
+
+ def __iter__(self):
+ return self._d.iterkeys()
+
+ def __len__(self):
+ return len(self._d)
+
+ def __repr__(self):
+ return repr(self._d)
+
+ @classmethod
+ def _insinuate(cls, thing):
+ if isinstance(thing, dict):
+ return cls(thing)
+ if isinstance(thing, list):
+ return list(cls._insinuate(v) for v in thing)
+ if isinstance(thing, tuple):
+ return tuple(cls._insinuate(v) for v in thing)
+ return thing
+
+
+class FixURI(object):
+ """
+ Clean up URIs. Mostly this means adjusting port numbers as necessary
+ to accomodate differences between pickled and current rpki.conf.
+ """
+
+ def __init__(self, cfg, args, world):
+ fmt = "{host}:{port}".format
+ self.old_rpkid = fmt(host = world.cfg.rpkid.server_host,
+ port = world.cfg.rpkid.server_port)
+ self.new_rpkid = fmt(host = cfg.get(section = "rpkid", option = "server-host"),
+ port = cfg.get(section = "rpkid", option = "server-port"))
+ self.old_pubd = fmt(host = world.cfg.pubd.server_host,
+ port = world.cfg.pubd.server_port)
+ self.new_pubd = fmt(host = cfg.get(section = "pubd", option = "server-host"),
+ port = cfg.get(section = "pubd", option = "server-port"))
+ self.new_irdbd = fmt(host = world.cfg.irdbd.server_host,
+ port = world.cfg.irdbd.server_port)
+ self.new_irdbd = fmt(host = cfg.get(section = "irdbd", option = "server-host"),
+ port = cfg.get(section = "irdbd", option = "server-port"))
+ self.old_rsyncd = world.cfg.myrpki.publication_rsync_server
+ self.new_rsyncd = cfg.get(section = "myrpki",
+ option = "publication_rsync_server")
+
+ def _fix(self, uri, scheme, old_netloc, new_netloc):
+ u = urlparse.urlparse(uri)
+ uri = urlparse.urlunparse(u)
+ old = urlparse.urlunparse((scheme, old_netloc) + u[2:])
+ new = urlparse.urlunparse((scheme, new_netloc) + u[2:])
+ return new if uri == old or not u.netloc else uri
+
+ def rpkid(self, uri): return self._fix(uri, "http", self.old_rpkid, self.new_rpkid)
+ def pubd(self, uri): return self._fix(uri, "http", self.old_pubd, self.new_pubd)
+ def irdbd(self, uri): return self._fix(uri, "http", self.old_irdbd, self.new_irdbd)
+ def rsyncd(self, uri): return self._fix(uri, "rsync", self.old_rsyncd, self.new_rsyncd)
+
+
+# None-safe wrappers for ASN.1 constructors.
+def NoneSafe(obj, cls):
+ if obj is None:
+ return None
+ elif "-----BEGIN" in obj:
+ return cls(PEM = obj)
+ else:
+ return cls(DER = obj)
+
+def X509(obj): return NoneSafe(obj, rpki.x509.X509)
+def CRL(obj): return NoneSafe(obj, rpki.x509.CRL)
+def RSA(obj): return NoneSafe(obj, rpki.x509.RSA)
+def PKCS10(obj): return NoneSafe(obj, rpki.x509.PKCS10)
+def MFT(obj): return NoneSafe(obj, rpki.x509.SignedManifest)
+def ROA(obj): return NoneSafe(obj, rpki.x509.ROA)
+def GBR(obj): return NoneSafe(obj, rpki.x509.Ghostbuster)
+def REF(obj): return NoneSafe(obj, rpki.x509.SignedReferral)
+
+# Other conversions
+
+def SKI_to_gSKI(ski):
+ return None if ski is None else urlsafe_b64encode(ski).rstrip("=")
+
+def cfg_to_Bool(v):
+ from ConfigParser import RawConfigParser
+ states = RawConfigParser._boolean_states
+ return states[v.lower()]
+
+# Silly formatting
+
+def show_model(db, model):
+ print db, model
+
+def show_instance(id, handle = None):
+ if handle:
+ print " ", id, handle
+ else:
+ print " ", id
+
+# Smoke 'em if you got 'em
+
+def main():
+
+ os.environ.update(TZ = "UTC")
+ time.tzset()
+
+ cfg = rpki.config.argparser(doc = __doc__)
+ cfg.argparser.add_argument("--rootd", action = "store_true",
+ help = "enable extra processing for rootd transitions")
+ cfg.add_logging_arguments()
+ cfg.argparser.add_argument("input_file", help = "input file")
+ args = cfg.argparser.parse_args()
+ cfg.configure_logging(args = args)
+
+ xzcat = subprocess.Popen(("xzcat", args.input_file), stdout = subprocess.PIPE)
+ world = LazyDict(cPickle.load(xzcat.stdout))
+ if xzcat.wait() != 0:
+ sys.exit("XZ unpickling failed with code {}".format(xzcat.returncode))
+
+ fixuri = FixURI(cfg, args, world)
+
+ root = Root(cfg, args, world, fixuri)
+
+ if root.enabled:
+ print "Pickled configuration included rootd"
+ else:
+ print "Pickled configuration did not include rootd"
+
+ for enabled, handler in ((world.cfg.myrpki.run_rpkid, rpkid_handler),
+ (world.cfg.myrpki.run_rpkid, irdb_handler),
+ (world.cfg.myrpki.run_pubd, pubd_handler)):
+ if not cfg_to_Bool(enabled):
+ continue
+ if os.fork() == 0:
+ handler(cfg, args, world, root, fixuri)
+ sys.exit()
+ else:
+ pid, status = os.wait()
+ if status and os.WIFEXITED(status):
+ sys.exit("Internal process exited with status {}".format(os.WEXITSTATUS(status)))
+ if status and os.WIFSIGNALED(status):
+ sys.exit("Internal process exited on signal {}".format(os.WTERMSIG(status)))
+
+
+class Root(object):
+
+ @staticmethod
+ def iter_get(iterable):
+ result = tuple(iterable)
+ if len(result) == 1:
+ return result[0]
+ else:
+ raise RuntimeError("Iterable returned {} results, expected one".format(len(result)))
+
+ def __init__(self, cfg, args, world, fixuri):
+
+ self.enabled = cfg_to_Bool(world.cfg.myrpki.run_rootd) and args.rootd
+
+ if not self.enabled:
+ return
+
+ r = world.cfg.rootd
+ d = os.path.join(r.rpki_root_dir, "")
+
+ rpki_root_cer = X509(world.file[ r.rpki_root_cert ])
+ rpki_root_key = RSA( world.file[ r.rpki_root_key ])
+ rpki_root_crl = CRL( world.file[d + r.rpki_root_crl ])
+ rpki_root_mft = MFT( world.file[d + r.rpki_root_manifest])
+ rpki_work_cer = X509(world.file[d + r.rpki_subject_cert ])
+
+ rootd_bpki_ta = X509(world.file[ r.bpki_ta ])
+ rootd_bpki_cer = X509(world.file[ r.rootd_bpki_cert ])
+ rootd_bpki_key = RSA( world.file[ r.rootd_bpki_key ])
+ child_bpki_cer = X509(world.file[ r.child_bpki_cert ])
+
+ rpki_root_resources = rpki_root_cer.get_3779resources()
+ rpki_root_class_name = r.rpki_class_name
+
+ rpki_root_mft_key = rpki.x509.RSA.generate()
+
+ # Maybe we'll figure out a prettier handle to use later
+ root_handle = str(uuid.uuid4())
+
+ rpki_root_last_serial = max(
+ rpki_root_cer.getSerial(),
+ rpki_work_cer.getSerial(),
+ self.iter_get(rpki_root_mft.get_POW().certs()).getSerial())
+
+ rpki_root_mft.extract()
+
+ rpki_root_last_crl_manifest_number = max(
+ rpki_root_mft.get_POW().getManifestNumber(),
+ rpki_root_crl.getCRLNumber())
+
+ rootd_turtle = self.iter_get(row for row in world.db.irdbd.irdb_turtle
+ if row.id not in
+ frozenset(p.turtle_ptr_id for p in world.db.irdbd.irdb_parent))
+ self.rootd_turtle_id = rootd_turtle.id
+
+ serverca = self.iter_get(world.db.irdbd.irdb_serverca)
+ serverca_cer = X509(serverca.certificate)
+ serverca_key = RSA(serverca.private_key)
+
+ rootd = self.iter_get(world.db.irdbd.irdb_rootd)
+
+ work_resourceholderca = self.iter_get(row for row in world.db.irdbd.irdb_resourceholderca
+ if row.id == rootd.issuer_id)
+ work_resourceholderca_cer = X509(work_resourceholderca.certificate)
+
+ self.work_resourceholderca_id = work_resourceholderca.id
+
+ work_irdb_repository = self.iter_get(row for row in world.db.irdbd.irdb_repository
+ if row.turtle_id == rootd.turtle_ptr_id)
+
+ work_tenant = self.iter_get(row for row in world.db.rpkid.self
+ if row.self_handle == work_resourceholderca.handle)
+
+ work_rpkid_parent = self.iter_get(row for row in world.db.rpkid.parent
+ if row.parent_handle == work_resourceholderca.handle
+ and row.self_id == work_tenant.self_id)
+
+ now = rpki.sundial.now()
+
+ crl_interval = cfg.getint(section = "myrpki",
+ option = "tenant_crl_interval",
+ default = 6 * 60 * 60)
+
+ regen_margin = cfg.getint(section = "myrpki",
+ option = "tenant_regen_margin",
+ default = 14 * 24 * 60 * 60 + 2 * 60)
+
+ # RPKI root CA validity interval, in case we still need it.
+ #rpki.sundial.timedelta(days = 3653)
+
+ # Whole lota new BPKI glorp.
+
+ root_resourceholderca_serial = 1
+ root_resourceholderca_key = rpki.x509.RSA.generate()
+ root_resourceholderca_cer = rpki.x509.X509.bpki_self_certify(
+ keypair = root_resourceholderca_key,
+ subject_name = rpki.x509.X501DN.from_cn("{} BPKI resource CA".format(root_handle)),
+ serial = root_resourceholderca_serial,
+ now = now,
+ notAfter = now + rpki.sundial.timedelta(days = 60))
+ root_resourceholderca_serial += 1
+ root_resourceholderca_crl = rpki.x509.CRL.generate(
+ keypair = root_resourceholderca_key,
+ issuer = root_resourceholderca_cer,
+ serial = 1,
+ thisUpdate = now,
+ nextUpdate = now + rpki.sundial.timedelta(hours = 25),
+ revokedCertificates = ())
+
+ root_bsc_key = rpki.x509.RSA.generate()
+ root_bsc_pkcs10 = rpki.x509.PKCS10.create(keypair = root_bsc_key)
+ root_bsc_cer = root_resourceholderca_cer.bpki_certify(
+ keypair = root_resourceholderca_key,
+ subject_name = root_bsc_pkcs10.getSubject(),
+ subject_key = root_bsc_pkcs10.getPublicKey(),
+ serial = root_resourceholderca_serial,
+ now = now,
+ notAfter = now + rpki.sundial.timedelta(days = 60),
+ is_ca = False,
+ pathLenConstraint = None)
+ root_resourceholderca_serial += 1
+
+ root_repository_bpki_cer = root_resourceholderca_cer.bpki_certify(
+ keypair = root_resourceholderca_key,
+ subject_name = serverca_cer.getSubject(),
+ subject_key = serverca_cer.getPublicKey(),
+ serial = root_resourceholderca_serial,
+ now = now,
+ notAfter = now + rpki.sundial.timedelta(days = 60),
+ is_ca = True,
+ pathLenConstraint = 0)
+ root_resourceholderca_serial += 1
+
+ root_parent_bpki_cer = root_resourceholderca_cer.bpki_certify(
+ keypair = root_resourceholderca_key,
+ subject_name = root_resourceholderca_cer.getSubject(),
+ subject_key = root_resourceholderca_cer.getPublicKey(),
+ serial = root_resourceholderca_serial,
+ now = now,
+ notAfter = now + rpki.sundial.timedelta(days = 60),
+ is_ca = True,
+ pathLenConstraint = 0)
+ root_resourceholderca_serial += 1
+
+ root_child_bpki_cer = root_resourceholderca_cer.bpki_certify(
+ keypair = root_resourceholderca_key,
+ subject_name = work_resourceholderca_cer.getSubject(),
+ subject_key = work_resourceholderca_cer.getPublicKey(),
+ serial = root_resourceholderca_serial,
+ now = now,
+ notAfter = now + rpki.sundial.timedelta(days = 60),
+ is_ca = True,
+ pathLenConstraint = 0)
+ root_resourceholderca_serial += 1
+
+ root_hostedca_cer = serverca_cer.bpki_certify(
+ keypair = serverca_key,
+ subject_name = root_resourceholderca_cer.getSubject(),
+ subject_key = root_resourceholderca_cer.getPublicKey(),
+ serial = serverca.next_serial,
+ now = now,
+ notAfter = now + rpki.sundial.timedelta(days = 60),
+ is_ca = True,
+ pathLenConstraint = 1)
+ serverca.next_serial += 1
+
+ root_client_cer = serverca_cer.bpki_certify(
+ keypair = serverca_key,
+ subject_name = root_resourceholderca_cer.getSubject(),
+ subject_key = root_resourceholderca_cer.getPublicKey(),
+ serial = serverca.next_serial,
+ now = now,
+ notAfter = now + rpki.sundial.timedelta(days = 60),
+ is_ca = True,
+ pathLenConstraint = 0)
+ serverca.next_serial += 1
+
+ # Various contact URIs.
+
+ root_up_down_path = "/up-down/{root}/{work}".format(
+ root = root_handle,
+ work = work_resourceholderca.handle)
+
+ root_loopback_uri = fixuri.rpkid("/up-down/{root}/{root}".format(
+ root = root_handle))
+
+ root_publication_control_uri = fixuri.pubd("/client/{root}".format(
+ root = root_handle))
+
+ root_rsync_uri = fixuri.rsyncd("/{module}/{handle}/".format(
+ module = cfg.get(section = "myrpki", option = "publication_rsync_module"),
+ handle = root_handle))
+
+ rpki_root_cer_uri = fixuri.rsyncd("/{module}/{gski}.cer".format(
+ module = cfg.get(section = "myrpki", option = "publication_rsync_module"),
+ gski = rpki_root_key.gSKI()))
+
+ rpki_root_crl_uri = root_rsync_uri + rpki_root_key.gSKI() + ".crl"
+
+ rpki_root_mft_uri = root_rsync_uri + rpki_root_key.gSKI() + ".mft"
+
+ rrdp_notification_uri = cfg.get(section = "myrpki",
+ option = "publication_rrdp_notification_uri")
+
+ # Some sanity checks
+
+ if len(world.db.irdbd.irdb_rootd) != 1:
+ raise RuntimeError("Unexpected length for pickled rpki.irdb.models.Rootd")
+
+ if rootd.turtle_ptr_id != self.rootd_turtle_id:
+ raise RuntimeError("Pickled rpki.irdb.models.Rootd does not match Turtle ID")
+
+ if rootd.certificate != rootd_bpki_cer.get_DER():
+ raise RuntimeError("Pickled rootd BPKI certificate does not match pickled SQL")
+
+ if rootd.private_key != rootd_bpki_key.get_DER():
+ raise RuntimeError("Pickled rootd BPKI key does not match pickled SQL")
+
+ if rootd_turtle.service_uri != work_rpkid_parent.peer_contact_uri:
+ raise RuntimeError("Inconsistent pickled Rootd configuration")
+
+ if serverca_cer != rootd_bpki_ta:
+ raise RuntimeError("Pickled rootd BPKI TA does not match pickled SQL ServerCA")
+
+ if work_resourceholderca_cer != child_bpki_cer:
+ raise RuntimeError("Pickled rootd BPKI child CA does not match pickled SQL")
+
+ if rootd_turtle.service_uri != "http://{host}:{port}/".format(
+ host = world.cfg.rootd.server_host,
+ port = world.cfg.rootd.server_port):
+ raise RuntimeError("Pickled Rootd service_uri does not match pickled configuration")
+
+ # Updated RPKI root certificate, CRL and manifest.
+ # The root certificate URI here isn't really right, but it's (probably) harmless.
+
+ rpki_root_last_serial += 1
+ rpki_root_cer = rpki.x509.X509.self_certify(
+ keypair = rpki_root_key,
+ subject_key = rpki_root_key.get_public(),
+ serial = rpki_root_last_serial,
+ sia = (root_rsync_uri, rpki_root_mft_uri, None, rrdp_notification_uri),
+ notAfter = rpki_root_resources.valid_until,
+ resources = rpki_root_resources)
+
+ rpki_root_last_crl_manifest_number += 1
+
+ root_rpki_crl = rpki.x509.CRL.generate(
+ keypair = rpki_root_key,
+ issuer = rpki_root_cer,
+ serial = rpki_root_last_crl_manifest_number,
+ thisUpdate = now,
+ nextUpdate = now + rpki.sundial.timedelta(seconds = crl_interval),
+ revokedCertificates = ())
+
+ rpki_root_last_serial += 1
+ mft_cer = rpki_root_cer.issue(
+ keypair = rpki_root_key,
+ subject_key = rpki_root_mft_key.get_public(),
+ serial = rpki_root_last_serial,
+ sia = (None, None, rpki_root_mft_uri, rrdp_notification_uri),
+ resources = rpki.resource_set.resource_bag.from_inheritance(),
+ aia = rpki_root_cer_uri,
+ crldp = rpki_root_crl_uri,
+ notBefore = now,
+ notAfter = rpki_root_cer.getNotAfter(),
+ is_ca = False)
+
+ rpki_root_mft_objs = [
+ (rpki_root_key.gSKI() + ".crl", root_rpki_crl),
+ (work_resourceholderca_cer.gSKI() + ".cer", work_resourceholderca_cer)]
+
+ rpki_root_mft = rpki.x509.SignedManifest.build(
+ keypair = rpki_root_mft_key,
+ certs = mft_cer,
+ serial = rpki_root_last_crl_manifest_number,
+ thisUpdate = now,
+ nextUpdate = now + rpki.sundial.timedelta(seconds = crl_interval),
+ names_and_objs = rpki_root_mft_objs)
+
+ # Adjust saved working CA's parent object to point at new root.
+ # We supply just the path portion of the URI here, to avoid confusing fixuri.rpkid() later.
+ #
+ # NB: This is the rpkid Parent object. We'd perform the same updates for the irdb Parent
+ # object, but it doesn't exist under the old schema, instead we had the Rootd object which
+ # doesn't contain the fields we need to set here. So we'll need to create a new irdb Parent
+ # object for the working CA, coresponding to the rpkid Parent object we're updating here.
+
+ work_rpkid_parent.recipient_name = root_handle
+ work_rpkid_parent.peer_contact_uri = root_up_down_path
+ work_rpkid_parent.bpki_cms_cert = root_hostedca_cer.get_DER()
+
+ # Templates we'll pass to ORM .objects.create() calls in handlers,
+ # after filling in foreign key fields as needed.
+
+ self.irdb_work_Parent = dict(
+ certificate = root_hostedca_cer,
+ handle = work_rpkid_parent.parent_handle,
+ ta = root_resourceholderca_cer,
+ service_uri = fixuri.rpkid(root_up_down_path),
+ parent_handle = root_handle,
+ child_handle = work_rpkid_parent.sender_name,
+ repository_type = "none",
+ referrer = None,
+ referral_authorization = None,
+ asn_resources = "",
+ ipv4_resources = "",
+ ipv6_resources = "",
+ # Foreign keys: issuer
+ )
+
+ self.irdb_work_Repository = dict(
+ certificate = X509(work_irdb_repository.certificate),
+ handle = work_irdb_repository.handle,
+ ta = X509(work_irdb_repository.ta),
+ client_handle = work_irdb_repository.client_handle,
+ service_uri = fixuri.pubd(work_irdb_repository.service_uri),
+ sia_base = fixuri.pubd(work_irdb_repository.sia_base),
+ rrdp_notification_uri = rrdp_notification_uri,
+ # Foreign keys: issuer, parent
+ )
+
+ self.irdb_root_ResourceHolderCA = dict(
+ certificate = root_resourceholderca_cer,
+ private_key = root_resourceholderca_key,
+ latest_crl = root_resourceholderca_crl,
+ next_serial = root_resourceholderca_serial,
+ next_crl_number = 2,
+ last_crl_update = root_resourceholderca_crl.getThisUpdate(),
+ next_crl_update = root_resourceholderca_crl.getNextUpdate(),
+ handle = root_handle,
+ )
+
+ self.irdb_root_HostedCA = dict(
+ certificate = root_hostedca_cer,
+ # Foreign keys: issuer, hosted
+ )
+
+ self.irdb_root_Parent = dict(
+ certificate = root_parent_bpki_cer,
+ handle = root_handle,
+ ta = root_resourceholderca_cer,
+ service_uri = root_loopback_uri,
+ parent_handle = root_handle,
+ child_handle = root_handle,
+ repository_type = "none",
+ referrer = None,
+ referral_authorization = None,
+ asn_resources = "0-4294967295",
+ ipv4_resources = "0.0.0.0/0",
+ ipv6_resources = "::/0",
+ # Foreign keys: issuer
+ )
+
+ self.irdb_root_BSC = dict(
+ certificate = root_bsc_cer,
+ handle = "bsc",
+ pkcs10 = root_bsc_pkcs10,
+ # Foreign keys: issuer
+ )
+
+ self.irdb_root_Child = dict(
+ certificate = root_child_bpki_cer,
+ handle = work_resourceholderca.handle,
+ ta = work_resourceholderca_cer,
+ valid_until = work_resourceholderca_cer.getNotAfter(),
+ # Foreign keys: issuer
+ )
+
+ self.irdb_root_ChildASN = dict(
+ start_as = 0,
+ end_as = 4294967295,
+ # Foreign keys: child
+ )
+
+ self.irdb_root_ChildNet = dict(
+ start_ip = "0.0.0.0",
+ end_ip = "255.255.255.255",
+ version = 4,
+ # Foreign keys: child
+ )
+
+ self.irdb_root_ChildNet = dict(
+ start_ip = "::",
+ end_ip = "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff",
+ version = 6,
+ # Foreign keys: child
+ )
+
+ self.irdb_root_Repository = dict(
+ certificate = root_repository_bpki_cer,
+ handle = root_handle,
+ ta = serverca_cer,
+ client_handle = root_handle,
+ service_uri = root_publication_control_uri,
+ sia_base = root_rsync_uri,
+ rrdp_notification_uri = rrdp_notification_uri,
+ # Foreign keys: issuer, parent
+ )
+
+ self.irdb_root_Client = dict(
+ certificate = root_client_cer,
+ handle = root_handle,
+ ta = root_resourceholderca_cer,
+ sia_base = root_rsync_uri,
+ # Foreign keys: issuer
+ )
+
+ self.pubd_root_Client = dict(
+ client_handle = root_handle,
+ base_uri = root_rsync_uri,
+ bpki_cert = root_client_cer,
+ bpki_glue = None,
+ last_cms_timestamp = None,
+ )
+
+ self.rpkid_root_Tenant = dict(
+ tenant_handle = root_handle,
+ use_hsm = False,
+ crl_interval = crl_interval,
+ regen_margin = regen_margin,
+ bpki_cert = root_hostedca_cer,
+ bpki_glue = None,
+ )
+
+ self.rpkid_root_BSC = dict(
+ bsc_handle = "bsc",
+ private_key_id = root_bsc_key,
+ pkcs10_request = root_bsc_pkcs10,
+ signing_cert = root_bsc_cer,
+ signing_cert_crl = root_resourceholderca_crl,
+ # Foreign keys: tenant
+ )
+
+ self.rpkid_root_Repository = dict(
+ repository_handle = root_handle,
+ peer_contact_uri = root_publication_control_uri,
+ rrdp_notification_uri = rrdp_notification_uri,
+ bpki_cert = root_repository_bpki_cer,
+ bpki_glue = None,
+ last_cms_timestamp = None,
+ # Foreign keys: tenant, bsc
+ )
+
+ self.rpkid_root_Parent = dict(
+ parent_handle = root_handle,
+ bpki_cert = root_parent_bpki_cer,
+ bpki_glue = None,
+ peer_contact_uri = root_loopback_uri,
+ sia_base = root_rsync_uri,
+ sender_name = root_handle,
+ recipient_name = root_handle,
+ last_cms_timestamp = None,
+ root_asn_resources = "0-4294967295",
+ root_ipv4_resources = "0.0.0.0/0",
+ root_ipv6_resources = "::/0",
+ # Foreign keys: tenant, bsc, repository
+ )
+
+ self.rpkid_root_CA = dict(
+ last_crl_manifest_number = rpki_root_last_crl_manifest_number,
+ last_issued_sn = rpki_root_last_serial,
+ sia_uri = root_rsync_uri,
+ parent_resource_class = world.cfg.rootd.rpki_class_name,
+ # Foreign keys: parent
+ )
+
+ self.rpkid_root_CADetail = dict(
+ public_key = rpki_root_key.get_public(),
+ private_key_id = rpki_root_key,
+ latest_crl = rpki_root_crl,
+ crl_published = None,
+ latest_ca_cert = rpki_root_cer,
+ manifest_private_key_id = rpki_root_mft_key,
+ manifest_public_key = rpki_root_mft_key.get_public(),
+ latest_manifest = rpki_root_mft,
+ manifest_published = None,
+ state = "active",
+ ca_cert_uri = rpki_root_cer_uri,
+ # Foreign keys: ca
+ )
+
+ self.rpkid_root_Child = dict(
+ child_handle = work_resourceholderca.handle,
+ bpki_cert = root_child_bpki_cer,
+ bpki_glue = None,
+ last_cms_timestamp = None,
+ # Foreign keys: tenant, bsc
+ )
+
+ self.rpkid_root_ChildCert = dict(
+ cert = rpki_work_cer,
+ published = None,
+ gski = rpki_work_cer.gSKI(),
+ # Foreign keys: child, ca_detail
+ )
+
+
+def reset_sequence(*app_labels):
+ # Apparently this is the approved way of telling the database to reset its
+ # idea of what sequence numbers to use in AutoField columns we've touched.
+ #
+ # The need to specify "--no-color" here is a particularly cute touch.
+
+ from django.core import management
+ from django.db import connection
+
+ with tempfile.TemporaryFile() as f:
+ management.call_command("sqlsequencereset", *app_labels, no_color = True, stdout = f)
+ f.seek(0)
+ cmds = f.read().split(";")
+
+ with connection.cursor() as cur:
+ for cmd in cmds:
+ cmd = cmd.strip()
+ if cmd:
+ cur.execute(cmd)
+
+
+def rpkid_handler(cfg, args, world, root, fixuri):
+ os.environ.update(DJANGO_SETTINGS_MODULE = "rpki.django_settings.rpkid")
+ import django
+ django.setup()
+ import rpki.rpkidb
+
+ show_model("rpkid", "self")
+ for row in world.db.rpkid.self:
+ show_instance(row.self_id, row.self_handle)
+ rpki.rpkidb.models.Tenant.objects.create(
+ pk = row.self_id,
+ tenant_handle = row.self_handle,
+ use_hsm = row.use_hsm,
+ crl_interval = row.crl_interval,
+ regen_margin = row.regen_margin,
+ bpki_cert = X509(row.bpki_cert),
+ bpki_glue = X509(row.bpki_glue))
+
+ show_model("rpkid", "bsc")
+ for row in world.db.rpkid.bsc:
+ show_instance(row.bsc_id, row.bsc_handle)
+ tenant = rpki.rpkidb.models.Tenant.objects.get(pk = row.self_id)
+ rpki.rpkidb.models.BSC.objects.create(
+ pk = row.bsc_id,
+ bsc_handle = row.bsc_handle,
+ private_key_id = RSA(row.private_key_id),
+ pkcs10_request = PKCS10(row.pkcs10_request),
+ hash_alg = row.hash_alg or "sha256",
+ signing_cert = X509(row.signing_cert),
+ signing_cert_crl = CRL(row.signing_cert_crl),
+ tenant = tenant)
+
+ rrdp_notification_uri = cfg.get(section = "myrpki", option = "publication_rrdp_notification_uri")
+
+ show_model("rpkid", "repository")
+ for row in world.db.rpkid.repository:
+ show_instance(row.repository_id, row.repository_handle)
+ tenant = rpki.rpkidb.models.Tenant.objects.get(pk = row.self_id)
+ bsc = rpki.rpkidb.models.BSC.objects.get( pk = row.bsc_id,
+ tenant = row.self_id)
+ rpki.rpkidb.models.Repository.objects.create(
+ pk = row.repository_id,
+ repository_handle = row.repository_handle,
+ peer_contact_uri = fixuri.pubd(row.peer_contact_uri),
+ rrdp_notification_uri = rrdp_notification_uri,
+ bpki_cert = X509(row.bpki_cert),
+ bpki_glue = X509(row.bpki_glue),
+ last_cms_timestamp = row.last_cms_timestamp,
+ bsc = bsc,
+ tenant = tenant)
+
+ show_model("rpkid", "parent")
+ for row in world.db.rpkid.parent:
+ show_instance(row.parent_id, row.parent_handle)
+ tenant = rpki.rpkidb.models.Tenant.objects.get( pk = row.self_id)
+ bsc = rpki.rpkidb.models.BSC.objects.get( pk = row.bsc_id,
+ tenant = row.self_id)
+ repository = rpki.rpkidb.models.Repository.objects.get(pk = row.repository_id,
+ tenant = row.self_id)
+ rpki.rpkidb.models.Parent.objects.create(
+ pk = row.parent_id,
+ parent_handle = row.parent_handle,
+ bpki_cert = X509(row.bpki_cms_cert),
+ bpki_glue = X509(row.bpki_cms_glue),
+ peer_contact_uri = fixuri.rpkid(row.peer_contact_uri),
+ sia_base = fixuri.rsyncd(row.sia_base),
+ sender_name = row.sender_name,
+ recipient_name = row.recipient_name,
+ last_cms_timestamp = row.last_cms_timestamp,
+ bsc = bsc,
+ repository = repository,
+ tenant = tenant)
+
+ show_model("rpkid", "ca")
+ for row in world.db.rpkid.ca:
+ show_instance(row.ca_id)
+ parent = rpki.rpkidb.models.Parent.objects.get(pk = row.parent_id)
+ last_crl_mft_number = max(row.last_crl_sn,
+ row.last_manifest_sn)
+ rpki.rpkidb.models.CA.objects.create(
+ pk = row.ca_id,
+ last_crl_manifest_number = last_crl_mft_number,
+ last_issued_sn = row.last_issued_sn,
+ sia_uri = fixuri.rsyncd(row.sia_uri),
+ parent_resource_class = row.parent_resource_class,
+ parent = parent)
+
+ show_model("rpkid", "ca_detail")
+ for row in world.db.rpkid.ca_detail:
+ show_instance(row.ca_detail_id)
+ ca = rpki.rpkidb.models.CA.objects.get(pk = row.ca_id)
+ rpki.rpkidb.models.CADetail.objects.create(
+ pk = row.ca_detail_id,
+ public_key = RSA(row.public_key),
+ private_key_id = RSA(row.private_key_id),
+ latest_crl = CRL(row.latest_crl),
+ crl_published = row.crl_published,
+ latest_ca_cert = X509(row.latest_ca_cert),
+ manifest_private_key_id = RSA(row.manifest_private_key_id),
+ manifest_public_key = RSA(row.manifest_public_key),
+ latest_manifest = MFT(row.latest_manifest),
+ manifest_published = row.manifest_published,
+ state = row.state,
+ ca_cert_uri = fixuri.rsyncd(row.ca_cert_uri),
+ ca = ca)
+
+ show_model("rpkid", "child")
+ for row in world.db.rpkid.child:
+ show_instance(row.child_id, row.child_handle)
+ tenant = rpki.rpkidb.models.Tenant.objects.get(pk = row.self_id)
+ bsc = rpki.rpkidb.models.BSC.objects.get( pk = row.bsc_id,
+ tenant = row.self_id)
+ rpki.rpkidb.models.Child.objects.create(
+ pk = row.child_id,
+ child_handle = row.child_handle,
+ bpki_cert = X509(row.bpki_cert),
+ bpki_glue = X509(row.bpki_glue),
+ last_cms_timestamp = row.last_cms_timestamp,
+ tenant = tenant,
+ bsc = bsc)
+
+ show_model("rpkid", "child_cert")
+ for row in world.db.rpkid.child_cert:
+ show_instance(row.child_cert_id)
+ child = rpki.rpkidb.models.Child.objects.get( pk = row.child_id)
+ ca_detail = rpki.rpkidb.models.CADetail.objects.get(pk = row.ca_detail_id)
+ rpki.rpkidb.models.ChildCert.objects.create(
+ pk = row.child_cert_id,
+ cert = X509(row.cert),
+ published = row.published,
+ gski = SKI_to_gSKI(row.ski),
+ child = child,
+ ca_detail = ca_detail)
+
+ show_model("rpkid", "revoked_cert")
+ for row in world.db.rpkid.revoked_cert:
+ show_instance(row.revoked_cert_id)
+ ca_detail = rpki.rpkidb.models.CADetail.objects.get(pk = row.ca_detail_id)
+ rpki.rpkidb.models.RevokedCert.objects.create(
+ pk = row.revoked_cert_id,
+ serial = row.serial,
+ revoked = row.revoked,
+ expires = row.expires,
+ ca_detail = ca_detail)
+
+ show_model("rpkid", "roa")
+ for row in world.db.rpkid.roa:
+ show_instance(row.roa_id)
+ tenant = rpki.rpkidb.models.Tenant.objects.get( pk = row.self_id)
+ ca_detail = rpki.rpkidb.models.CADetail.objects.get(pk = row.ca_detail_id)
+ prefixes = tuple(
+ (p.version, "{0.prefix}/{0.prefixlen}-{0.max_prefixlen}".format(p))
+ for p in world.db.rpkid.roa_prefix
+ if p.roa_id == row.roa_id)
+ ipv4 = ",".join(p for v, p in prefixes if v == 4) or None
+ ipv6 = ",".join(p for v, p in prefixes if v == 6) or None
+ rpki.rpkidb.models.ROA.objects.create(
+ pk = row.roa_id,
+ asn = row.asn,
+ ipv4 = ipv4,
+ ipv6 = ipv6,
+ cert = X509(row.cert),
+ roa = ROA(row.roa),
+ published = row.published,
+ tenant = tenant,
+ ca_detail = ca_detail)
+
+ show_model("rpkid", "ghostbuster")
+ for row in world.db.rpkid.ghostbuster:
+ show_instance(row.ghostbuster_id)
+ tenant = rpki.rpkidb.models.Tenant.objects.get( pk = row.self_id)
+ ca_detail = rpki.rpkidb.models.CADetail.objects.get(pk = row.ca_detail_id)
+ rpki.rpkidb.models.Ghostbuster.objects.create(
+ pk = row.ghostbuster_id,
+ vcard = row.vcard,
+ cert = X509(row.cert),
+ ghostbuster = GBR(row.ghostbuster),
+ published = row.published,
+ tenant = tenant,
+ ca_detail = ca_detail)
+
+ show_model("rpkid", "ee_cert")
+ for row in world.db.rpkid.ee_cert:
+ show_instance(row.ee_cert_id)
+ tenant = rpki.rpkidb.models.Tenant.objects.get( pk = row.self_id)
+ ca_detail = rpki.rpkidb.models.CADetail.objects.get(pk = row.ca_detail_id)
+ rpki.rpkidb.models.EECertificate.objects.create(
+ pk = row.ee_cert_id,
+ gski = SKI_to_gSKI(row.ski),
+ cert = X509(row.cert),
+ published = row.published,
+ tenant = tenant,
+ ca_detail = ca_detail)
+
+ reset_sequence("rpkidb")
+
+ if root.enabled:
+ tenant = rpki.rpkidb.models.Tenant.objects.create(**dict(
+ root.rpkid_root_Tenant))
+ bsc = rpki.rpkidb.models.BSC.objects.create(**dict(
+ root.rpkid_root_BSC,
+ tenant = tenant))
+ repository = rpki.rpkidb.models.Repository.objects.create(**dict(
+ root.rpkid_root_Repository,
+ tenant = tenant,
+ bsc = bsc))
+ parent = rpki.rpkidb.models.Parent.objects.create(**dict(
+ root.rpkid_root_Parent,
+ tenant = tenant,
+ bsc = bsc,
+ repository = repository))
+ ca = rpki.rpkidb.models.CA.objects.create(**dict(
+ root.rpkid_root_CA,
+ parent = parent))
+ ca_detail = rpki.rpkidb.models.CADetail.objects.create(**dict(
+ root.rpkid_root_CADetail,
+ ca = ca))
+ child = rpki.rpkidb.models.Child.objects.create(**dict(
+ root.rpkid_root_Child,
+ tenant = tenant,
+ bsc = bsc))
+ child_cert = rpki.rpkidb.models.ChildCert.objects.create(**dict(
+ root.rpkid_root_ChildCert,
+ child = child,
+ ca_detail = ca_detail))
+
+
+def pubd_handler(cfg, args, world, root, fixuri):
+ os.environ.update(DJANGO_SETTINGS_MODULE = "rpki.django_settings.pubd")
+ import django
+ django.setup()
+ import rpki.pubdb
+
+ show_model("pubd", "client")
+ for row in world.db.pubd.client:
+ show_instance(row.client_id, row.client_handle)
+ rpki.pubdb.models.Client.objects.create(
+ pk = row.client_id,
+ client_handle = row.client_handle,
+ base_uri = fixuri.rsyncd(row.base_uri),
+ bpki_cert = X509(row.bpki_cert),
+ bpki_glue = X509(row.bpki_glue),
+ last_cms_timestamp = row.last_cms_timestamp)
+
+ reset_sequence("pubdb")
+
+ if root.enabled:
+ rpki.pubdb.models.Client.objects.create(**dict(
+ root.pubd_root_Client))
+
+
+def irdb_handler(cfg, args, world, root, fixuri):
+ os.environ.update(DJANGO_SETTINGS_MODULE = "rpki.django_settings.irdb")
+ import django
+ django.setup()
+ import rpki.irdb
+
+ # Most pk fields are just id. The one exception is Parent, whose pk
+ # is turtle_ptr_id because it's (also) a foreign key pointing at Turtle.id.
+
+ show_model("irdb", "ServerCA")
+ for row in world.db.irdbd.irdb_serverca:
+ show_instance(row.id)
+ rpki.irdb.models.ServerCA.objects.create(
+ pk = row.id,
+ certificate = X509(row.certificate),
+ private_key = RSA(row.private_key),
+ latest_crl = CRL(row.latest_crl),
+ next_serial = row.next_serial,
+ next_crl_number = row.next_crl_number,
+ last_crl_update = row.last_crl_update,
+ next_crl_update = row.next_crl_update)
+
+ show_model("irdb", "ResourceHolderCA")
+ for row in world.db.irdbd.irdb_resourceholderca:
+ show_instance(row.id, row.handle)
+ rpki.irdb.models.ResourceHolderCA.objects.create(
+ pk = row.id,
+ certificate = X509(row.certificate),
+ private_key = RSA(row.private_key),
+ latest_crl = CRL(row.latest_crl),
+ next_serial = row.next_serial,
+ next_crl_number = row.next_crl_number,
+ last_crl_update = row.last_crl_update,
+ next_crl_update = row.next_crl_update,
+ handle = row.handle)
+
+ show_model("irdb", "HostedCA")
+ for row in world.db.irdbd.irdb_hostedca:
+ show_instance(row.id)
+ issuer = rpki.irdb.models.ServerCA.objects.get( pk = row.issuer_id)
+ hosted = rpki.irdb.models.ResourceHolderCA.objects.get(pk = row.hosted_id)
+ rpki.irdb.models.HostedCA.objects.create(
+ pk = row.id,
+ certificate = X509(row.certificate),
+ issuer = issuer,
+ hosted = hosted)
+
+ show_model("irdb", "ServerRevocation")
+ for row in world.db.irdbd.irdb_serverrevocation:
+ show_instance(row.id)
+ issuer = rpki.irdb.models.ServerCA.objects.get(pk = row.issuer_id)
+ rpki.irdb.models.ServerRevocation.objects.create(
+ pk = row.id,
+ serial = row.serial,
+ revoked = row.revoked,
+ expires = row.expires,
+ issuer = issuer)
+
+ show_model("irdb", "ResourceHolderRevocation")
+ for row in world.db.irdbd.irdb_resourceholderrevocation:
+ show_instance(row.id)
+ issuer = rpki.irdb.models.ResourceHolderCA.objects.get(pk = row.issuer_id)
+ rpki.irdb.models.ResourceHolderRevocation.objects.create(
+ pk = row.id,
+ serial = row.serial,
+ revoked = row.revoked,
+ expires = row.expires,
+ issuer = issuer)
+
+ show_model("irdb", "ServerEE")
+ for row in world.db.irdbd.irdb_serveree:
+ show_instance(row.id)
+ issuer = rpki.irdb.models.ServerCA.objects.get(pk = row.issuer_id)
+ rpki.irdb.models.ServerEE.objects.create(
+ pk = row.id,
+ certificate = X509(row.certificate),
+ private_key = RSA(row.private_key),
+ purpose = row.purpose,
+ issuer = issuer)
+
+ show_model("irdb", "Referral")
+ for row in world.db.irdbd.irdb_referral:
+ show_instance(row.id)
+ issuer = rpki.irdb.models.ResourceHolderCA.objects.get(pk = row.issuer_id)
+ rpki.irdb.models.Referral.objects.create(
+ pk = row.id,
+ certificate = X509(row.certificate),
+ private_key = RSA(row.private_key),
+ issuer = issuer)
+
+ show_model("irdb", "BSC")
+ for row in world.db.irdbd.irdb_bsc:
+ show_instance(row.id, row.handle)
+ issuer = rpki.irdb.models.ResourceHolderCA.objects.get(pk = row.issuer_id)
+ rpki.irdb.models.BSC.objects.create(
+ pk = row.id,
+ certificate = X509(row.certificate),
+ handle = row.handle,
+ pkcs10 = PKCS10(row.pkcs10),
+ issuer = issuer)
+
+ show_model("irdb", "Child")
+ for row in world.db.irdbd.irdb_child:
+ show_instance(row.id, row.handle)
+ issuer = rpki.irdb.models.ResourceHolderCA.objects.get(pk = row.issuer_id)
+ rpki.irdb.models.Child.objects.create(
+ pk = row.id,
+ certificate = X509(row.certificate),
+ handle = row.handle,
+ ta = X509(row.ta),
+ valid_until = row.valid_until,
+ name = row.name,
+ issuer = issuer)
+
+ show_model("irdb", "ChildASN")
+ for row in world.db.irdbd.irdb_childasn:
+ show_instance(row.id)
+ child = rpki.irdb.models.Child.objects.get(pk = row.child_id)
+ rpki.irdb.models.ChildASN.objects.create(
+ pk = row.id,
+ start_as = row.start_as,
+ end_as = row.end_as,
+ child = child)
+
+ show_model("irdb", "ChildNet")
+ for row in world.db.irdbd.irdb_childnet:
+ show_instance(row.id)
+ child = rpki.irdb.models.Child.objects.get(pk = row.child_id)
+ rpki.irdb.models.ChildNet.objects.create(
+ pk = row.id,
+ start_ip = row.start_ip,
+ end_ip = row.end_ip,
+ version = row.version,
+ child = child)
+
+ turtle_map = dict((row.id, row) for row in world.db.irdbd.irdb_turtle)
+
+ show_model("irdb", "Parent")
+ for row in world.db.irdbd.irdb_parent:
+ show_instance(row.turtle_ptr_id, row.handle)
+ issuer = rpki.irdb.models.ResourceHolderCA.objects.get(pk = row.issuer_id)
+ rpki.irdb.models.Parent.objects.create(
+ pk = row.turtle_ptr_id,
+ service_uri = fixuri.rpkid(turtle_map[row.turtle_ptr_id].service_uri),
+ certificate = X509(row.certificate),
+ handle = row.handle,
+ ta = X509(row.ta),
+ parent_handle = row.parent_handle,
+ child_handle = row.child_handle,
+ repository_type = row.repository_type,
+ referrer = row.referrer,
+ referral_authorization = REF(row.referral_authorization),
+ issuer = issuer)
+
+ show_model("irdb", "ROARequest")
+ for row in world.db.irdbd.irdb_roarequest:
+ show_instance(row.id)
+ issuer = rpki.irdb.models.ResourceHolderCA.objects.get(pk = row.issuer_id)
+ rpki.irdb.models.ROARequest.objects.create(
+ pk = row.id,
+ asn = row.asn,
+ issuer = issuer)
+
+ show_model("irdb", "ROARequestPrefix")
+ for row in world.db.irdbd.irdb_roarequestprefix:
+ show_instance(row.id)
+ roa_request = rpki.irdb.models.ROARequest.objects.get(pk = row.roa_request_id)
+ rpki.irdb.models.ROARequestPrefix.objects.create(
+ pk = row.id,
+ version = row.version,
+ prefix = row.prefix,
+ prefixlen = row.prefixlen,
+ max_prefixlen = row.max_prefixlen,
+ roa_request = roa_request)
+
+ show_model("irdb", "Ghostbuster")
+ for row in world.db.irdbd.irdb_ghostbusterrequest:
+ show_instance(row.id)
+ issuer = rpki.irdb.models.ResourceHolderCA.objects.get(pk = row.issuer_id)
+ try:
+ parent = rpki.irdb.models.Parent.objects.get(pk = row.parent_id)
+ except rpki.irdb.models.Parent.DoesNotExist:
+ parent = None
+ rpki.irdb.models.GhostbusterRequest.objects.create(
+ pk = row.id,
+ vcard = row.vcard,
+ parent = parent,
+ issuer = issuer)
+
+ show_model("irdb", "EECertificateRequest")
+ for row in world.db.irdbd.irdb_eecertificaterequest:
+ show_instance(row.id)
+ issuer = rpki.irdb.models.ResourceHolderCA.objects.get(pk = row.issuer_id)
+ rpki.irdb.models.EECertificateRequest.objects.create(
+ pk = row.id,
+ valid_until = row.valid_until,
+ pkcs10 = PKCS10(row.pkcs10),
+ gski = row.gski,
+ cn = row.cn,
+ sn = row.sn,
+ eku = row.eku,
+ issuer = issuer)
+
+ show_model("irdb", "EECertificateRequestASN")
+ for row in world.db.irdbd.irdb_eecertificaterequestasn:
+ show_instance(row.id)
+ ee_certificate_request = rpki.irdb.models.EECertificateRequest.objects.get(
+ pk = row.ee_certificate_request_id)
+ rpki.irdb.models.EECertificateRequestASN.objects.create(
+ pk = row.id,
+ start_as = row.start_as,
+ end_as = row.end_as,
+ ee_certificate_request = ee_certificate_request)
+
+ show_model("irdb", "EECertificateRequestNet")
+ for row in world.db.irdbd.irdb_eecertificaterequestnet:
+ show_instance(row.id)
+ ee_certificate_request = rpki.irdb.models.EECertificateRequest.objects.get(
+ pk = row.ee_certificate_request_id)
+ rpki.irdb.models.EECertificateRequestNet.objects.create(
+ pk = row.id,
+ start_ip = row.start_ip,
+ end_ip = row.end_ip,
+ version = row.version,
+ ee_certificate_request = ee_certificate_request)
+
+ # Turtle without a Parent can happen where the old database had a Rootd.
+ # We can create an irdb parent, but only handle_rpkid() (or rpkid itself)
+ # can create an rpkidb Parent object, so we need to coordinate with handle_rpkid().
+
+ rrdp_notification_uri = cfg.get(section = "myrpki", option = "publication_rrdp_notification_uri")
+
+ show_model("irdb", "Repository")
+ for row in world.db.irdbd.irdb_repository:
+ show_instance(row.turtle_id, row.handle)
+ issuer = rpki.irdb.models.ResourceHolderCA.objects.get(pk = row.issuer_id)
+ try:
+ parent = rpki.irdb.models.Parent.objects.get(pk = row.turtle_id)
+ except rpki.irdb.models.Parent.DoesNotExist:
+ if row.turtle_id in set(r.turtle_ptr_id for r in world.db.irdbd.irdb_rootd):
+ print " Skipping repository for old rootd instance"
+ continue
+ else:
+ raise
+ rpki.irdb.models.Repository.objects.create(
+ pk = row.id,
+ certificate = X509(row.certificate),
+ handle = row.handle,
+ ta = X509(row.ta),
+ client_handle = row.client_handle,
+ service_uri = fixuri.pubd(row.service_uri),
+ sia_base = fixuri.rsyncd(row.sia_base),
+ rrdp_notification_uri = rrdp_notification_uri,
+ parent = parent,
+ issuer = issuer)
+
+ show_model("irdb", "Client")
+ for row in world.db.irdbd.irdb_client:
+ show_instance(row.id, row.handle)
+ issuer = rpki.irdb.models.ServerCA.objects.get(pk = row.issuer_id)
+ rpki.irdb.models.Client.objects.create(
+ pk = row.id,
+ certificate = X509(row.certificate),
+ handle = row.handle,
+ ta = X509(row.ta),
+ sia_base = fixuri.rsyncd(row.sia_base),
+ issuer = issuer)
+
+ reset_sequence("irdb")
+
+ if root.enabled:
+ irdb_issuer = rpki.irdb.models.ResourceHolderCA.objects.get(
+ pk = root.work_resourceholderca_id)
+ irdb_parent = rpki.irdb.models.Parent.objects.create(**dict(
+ root.irdb_work_Parent,
+ issuer = irdb_issuer))
+ irdb_repository = rpki.irdb.models.Repository.objects.create(**dict(
+ root.irdb_work_Repository,
+ issuer = irdb_issuer,
+ parent = irdb_parent))
+ serverca = rpki.irdb.models.ServerCA.objects.get()
+ resourceholderca = rpki.irdb.models.ResourceHolderCA.objects.create(**dict(
+ root.irdb_root_ResourceHolderCA))
+ hostedca = rpki.irdb.models.HostedCA(**dict(
+ root.irdb_root_HostedCA,
+ issuer = serverca,
+ hosted = resourceholderca))
+ parent = rpki.irdb.models.Parent.objects.create(**dict(
+ root.irdb_root_Parent,
+ issuer = resourceholderca))
+ bsc = rpki.irdb.models.BSC.objects.create(**dict(
+ root.irdb_root_BSC,
+ issuer = resourceholderca))
+ child = rpki.irdb.models.Child.objects.create(**dict(
+ root.irdb_root_Child,
+ issuer = resourceholderca))
+ childasn = rpki.irdb.models.ChildASN.objects.create(**dict(
+ root.irdb_root_ChildASN,
+ child = child))
+ childnet = rpki.irdb.models.ChildNet.objects.create(**dict(
+ root.irdb_root_ChildNet,
+ child = child))
+ repository = rpki.irdb.models.Repository.objects.create(**dict(
+ root.irdb_root_Repository,
+ parent = parent,
+ issuer = resourceholderca))
+ client = rpki.irdb.models.Client.objects.create(**dict(
+ root.irdb_root_Client,
+ issuer = serverca))
-xzcat = subprocess.Popen(("xzcat", args.input), stdout = subprocess.PIPE)
-world = cPickle.load(xzcat.stdout)
-if xzcat.wait() != 0:
- sys.exit("XZ unpickling failed with code {}".format(xzcat.returncode))
-print "import datetime"
-print "world =", repr(world)
+if __name__ == "__main__":
+ main()
diff --git a/potpourri/cross_certify.py b/potpourri/cross_certify.py
index fab7743b..4e6485b7 100644
--- a/potpourri/cross_certify.py
+++ b/potpourri/cross_certify.py
@@ -1,13 +1,13 @@
# $Id$
-#
+#
# Copyright (C) 2014 Dragon Research Labs ("DRL")
# Portions copyright (C) 2009--2012 Internet Systems Consortium ("ISC")
# Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notices and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND DRL, ISC, AND ARIN DISCLAIM ALL
# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL,
@@ -57,18 +57,18 @@ now = rpki.sundial.now()
notAfter = now + args.lifetime
try:
- with open(args.serial, "r") as f:
- serial = int(f.read().splitlines()[0], 16)
+ with open(args.serial, "r") as f:
+ serial = int(f.read().splitlines()[0], 16)
except IOError:
- serial = 1
+ serial = 1
cert = args.ca.cross_certify(args.key, args.input, serial, notAfter, now)
with open(args.serial, "w") as f:
- f.write("%02x\n" % (serial + 1))
+ f.write("%02x\n" % (serial + 1))
if args.out is None:
- sys.stdout.write(cert.get_PEM())
+ sys.stdout.write(cert.get_PEM())
else:
- with open(args.out, "w") as f:
- f.write(cert.get_PEM())
+ with open(args.out, "w") as f:
+ f.write(cert.get_PEM())
diff --git a/potpourri/csvgrep.py b/potpourri/csvgrep.py
index 68bdd259..3d558245 100644
--- a/potpourri/csvgrep.py
+++ b/potpourri/csvgrep.py
@@ -1,11 +1,11 @@
# $Id$
-#
+#
# Copyright (C) 2010-2012 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -37,36 +37,36 @@ ipv4 = resource_set_ipv4()
ipv6 = resource_set_ipv6()
for datum in sys.argv[1:]:
- if datum.replace("-", "").isdigit():
- t = asn
- else:
- t = ipv6 if ":" in datum else ipv4
- if "-" not in datum and "/" not in datum:
- datum = datum + "-" + datum
- try:
- t.append(t.parse_str(datum))
- except:
- print "Error attempting to parse", datum
- raise
+ if datum.replace("-", "").isdigit():
+ t = asn
+ else:
+ t = ipv6 if ":" in datum else ipv4
+ if "-" not in datum and "/" not in datum:
+ datum = datum + "-" + datum
+ try:
+ t.append(t.parse_str(datum))
+ except:
+ print "Error attempting to parse", datum
+ raise
#print "Looking for: ASNs %s IPv4 %s IPv6 %s" % (asn, ipv4, ipv6)
def matches(set1, datum):
- set2 = set1.__class__(datum)
- if set1.intersection(set2):
- return set2
- else:
- return False
+ set2 = set1.__class__(datum)
+ if set1.intersection(set2):
+ return set2
+ else:
+ return False
if asn:
- for h, a in csv_reader("asns.csv", columns = 2):
- m = matches(asn, a)
- if m:
- print h, m
+ for h, a in csv_reader("asns.csv", columns = 2):
+ m = matches(asn, a)
+ if m:
+ print h, m
if ipv4 or ipv6:
- for h, a in csv_reader("prefixes.csv", columns = 2):
- t = ipv6 if ":" in a else ipv4
- m = t and matches(t, a)
- if m:
- print h, m
+ for h, a in csv_reader("prefixes.csv", columns = 2):
+ t = ipv6 if ":" in a else ipv4
+ m = t and matches(t, a)
+ if m:
+ print h, m
diff --git a/potpourri/django-legacy-database.README b/potpourri/django-legacy-database.README
new file mode 100644
index 00000000..41a3b911
--- /dev/null
+++ b/potpourri/django-legacy-database.README
@@ -0,0 +1,4 @@
+Snapshot of work in progress on converting our existing databases into
+Django using South 1.0 migrations. This will probably need rewriting
+to address changes in how we deal with Django settings and multiple
+databases, this snapshot is just to get it into the subversion archive.
diff --git a/potpourri/django-legacy-database.tar.xz b/potpourri/django-legacy-database.tar.xz
new file mode 100644
index 00000000..762dde7d
--- /dev/null
+++ b/potpourri/django-legacy-database.tar.xz
Binary files differ
diff --git a/potpourri/expand-roa-prefixes.py b/potpourri/expand-roa-prefixes.py
index ae34ea0a..c08f8abf 100644
--- a/potpourri/expand-roa-prefixes.py
+++ b/potpourri/expand-roa-prefixes.py
@@ -1,11 +1,11 @@
# $Id$
-#
+#
# Copyright (C) 2011 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -27,53 +27,53 @@ import rpki.resource_set
import rpki.ipaddrs
class NotAPrefix(Exception):
- """
- Address is not a proper prefix.
- """
+ """
+ Address is not a proper prefix.
+ """
class address_range(object):
- """
- Iterator for rpki.ipaddrs address objects.
- """
+ """
+ Iterator for rpki.ipaddrs address objects.
+ """
- def __init__(self, start, stop, step):
- self.addr = start
- self.stop = stop
- self.step = step
- self.type = type(start)
+ def __init__(self, start, stop, step):
+ self.addr = start
+ self.stop = stop
+ self.step = step
+ self.type = type(start)
- def __iter__(self):
- while self.addr < self.stop:
- yield self.addr
- self.addr = self.type(self.addr + self.step)
+ def __iter__(self):
+ while self.addr < self.stop:
+ yield self.addr
+ self.addr = self.type(self.addr + self.step)
def main(argv):
- prefix_sets = []
- for arg in argv:
- if ":" in arg:
- prefix_sets.extend(rpki.resource_set.roa_prefix_set_ipv6(arg))
- else:
- prefix_sets.extend(rpki.resource_set.roa_prefix_set_ipv4(arg))
+ prefix_sets = []
+ for arg in argv:
+ if ":" in arg:
+ prefix_sets.extend(rpki.resource_set.roa_prefix_set_ipv6(arg))
+ else:
+ prefix_sets.extend(rpki.resource_set.roa_prefix_set_ipv4(arg))
- for prefix_set in prefix_sets:
- sys.stdout.write("%s expands to:\n" % prefix_set)
+ for prefix_set in prefix_sets:
+ sys.stdout.write("%s expands to:\n" % prefix_set)
- prefix_type = prefix_set.range_type.datum_type
- prefix_min = prefix_set.prefix
- prefix_max = prefix_set.prefix + (1L << (prefix_type.bits - prefix_set.prefixlen))
+ prefix_type = prefix_set.range_type.datum_type
+ prefix_min = prefix_set.prefix
+ prefix_max = prefix_set.prefix + (1L << (prefix_type.bits - prefix_set.prefixlen))
- for prefixlen in xrange(prefix_set.prefixlen, prefix_set.max_prefixlen + 1):
+ for prefixlen in xrange(prefix_set.prefixlen, prefix_set.max_prefixlen + 1):
- step = (1L << (prefix_type.bits - prefixlen))
- mask = step - 1
+ step = (1L << (prefix_type.bits - prefixlen))
+ mask = step - 1
- for addr in address_range(prefix_min, prefix_max, step):
- if (addr & mask) != 0:
- raise NotAPrefix, "%s is not a /%d prefix" % (addr, prefixlen)
- sys.stdout.write(" %s/%d\n" % (addr, prefixlen))
+ for addr in address_range(prefix_min, prefix_max, step):
+ if (addr & mask) != 0:
+ raise NotAPrefix, "%s is not a /%d prefix" % (addr, prefixlen)
+ sys.stdout.write(" %s/%d\n" % (addr, prefixlen))
- sys.stdout.write("\n")
+ sys.stdout.write("\n")
if __name__ == "__main__":
- main(sys.argv[1:] if len(sys.argv) > 1 else ["18.0.0.0/8-24"])
+ main(sys.argv[1:] if len(sys.argv) > 1 else ["18.0.0.0/8-24"])
diff --git a/potpourri/extract-key.py b/potpourri/extract-key.py
index b85c3d55..e185b255 100644
--- a/potpourri/extract-key.py
+++ b/potpourri/extract-key.py
@@ -2,11 +2,11 @@
# Copyright (C) 2014 Dragon Research Labs ("DRL")
# Portions copyright (C) 2008 American Registry for Internet Numbers ("ARIN")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notices and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND AND ARIN DISCLAIM ALL
# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
@@ -49,16 +49,16 @@ args = parser.parse_args()
cur = MySQLdb.connect(user = args.user, db = args.db, passwd = args.password).cursor()
cur.execute(
- """
- SELECT bsc.private_key_id, bsc.signing_cert
- FROM bsc, self
- WHERE self.self_handle = %s AND self.self_id = bsc.self_id AND bsc_handle = %s
- """,
- (args.self, args.bsc))
+ """
+ SELECT bsc.private_key_id, bsc.signing_cert
+ FROM bsc, self
+ WHERE self.self_handle = %s AND self.self_id = bsc.self_id AND bsc_handle = %s
+ """,
+ (args.self, args.bsc))
key, cer = cur.fetchone()
print rpki.x509.RSA(DER = key).get_PEM()
if cer:
- print rpki.x509.X509(DER = cer).get_PEM()
+ print rpki.x509.X509(DER = cer).get_PEM()
diff --git a/potpourri/fakerootd.py b/potpourri/fakerootd.py
index 6275a2a9..22b1c117 100644
--- a/potpourri/fakerootd.py
+++ b/potpourri/fakerootd.py
@@ -1,11 +1,11 @@
# $Id$
-#
+#
# Copyright (C) 2011 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -44,7 +44,6 @@ s6.listen(limit)
print "Going to sleep at", datetime.datetime.utcnow()
try:
- signal.pause()
+ signal.pause()
except KeyboardInterrupt:
- sys.exit(0)
-
+ sys.exit(0)
diff --git a/potpourri/format-application-x-rpki.py b/potpourri/format-application-x-rpki.py
index 184103f9..44428131 100644
--- a/potpourri/format-application-x-rpki.py
+++ b/potpourri/format-application-x-rpki.py
@@ -1,12 +1,12 @@
# $Id$
-#
+#
# Copyright (C) 2014 Dragon Research Labs ("DRL")
# Portions copyright (C) 2010--2012 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notices and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ISC DISCLAIM ALL
# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
@@ -53,80 +53,80 @@ parser.add_argument("-u", "--unseen", action = "store_true",
args = parser.parse_args()
def pprint_cert(b64):
- return rpki.POW.X509.derRead(base64.b64decode(b64)).pprint()
-
+ return rpki.POW.X509.derRead(base64.b64decode(b64)).pprint()
+
def up_down():
- msg["X-RPKI-Up-Down-Type"] = xml.get("type")
- msg["X-RPKI-Up-Down-Sender"] = xml.get("sender")
- msg["X-RPKI-Up-Down-Recipient"] = xml.get("recipient")
- msg["Subject"] = "Up-down %s %s => %s" % (xml.get("type"), xml.get("sender"), xml.get("recipient"))
- for x in xml:
- if x.tag.endswith("class"):
- for y in x:
- if y.tag.endswith("certificate") or y.tag.endswith("issuer"):
- msg.attach(email.mime.text.MIMEText(pprint_cert(y.text)))
+ msg["X-RPKI-Up-Down-Type"] = xml.get("type")
+ msg["X-RPKI-Up-Down-Sender"] = xml.get("sender")
+ msg["X-RPKI-Up-Down-Recipient"] = xml.get("recipient")
+ msg["Subject"] = "Up-down %s %s => %s" % (xml.get("type"), xml.get("sender"), xml.get("recipient"))
+ for x in xml:
+ if x.tag.endswith("class"):
+ for y in x:
+ if y.tag.endswith("certificate") or y.tag.endswith("issuer"):
+ msg.attach(email.mime.text.MIMEText(pprint_cert(y.text)))
def left_right():
- msg["X-RPKI-Left-Right-Type"] = xml.get("type")
- msg["Subject"] = "Left-right %s" % xml.get("type")
+ msg["X-RPKI-Left-Right-Type"] = xml.get("type")
+ msg["Subject"] = "Left-right %s" % xml.get("type")
def publication():
- msg["X-RPKI-Left-Right-Type"] = xml.get("type")
- msg["Subject"] = "Publication %s" % xml.get("type")
+ msg["X-RPKI-Left-Right-Type"] = xml.get("type")
+ msg["Subject"] = "Publication %s" % xml.get("type")
dispatch = { "{http://www.apnic.net/specs/rescerts/up-down/}message" : up_down,
"{http://www.hactrn.net/uris/rpki/left-right-spec/}msg" : left_right,
"{http://www.hactrn.net/uris/rpki/publication-spec/}msg" : publication }
def fix_headers():
- if "X-RPKI-PID" in srcmsg or "X-RPKI-Object" in srcmsg:
- msg["X-RPKI-PID"] = srcmsg["X-RPKI-PID"]
- msg["X-RPKI-Object"] = srcmsg["X-RPKI-Object"]
- else:
- words = srcmsg["Subject"].split()
- msg["X-RPKI-PID"] = words[1]
- msg["X-RPKI-Object"] = " ".join(words[4:])
-
+ if "X-RPKI-PID" in srcmsg or "X-RPKI-Object" in srcmsg:
+ msg["X-RPKI-PID"] = srcmsg["X-RPKI-PID"]
+ msg["X-RPKI-Object"] = srcmsg["X-RPKI-Object"]
+ else:
+ words = srcmsg["Subject"].split()
+ msg["X-RPKI-PID"] = words[1]
+ msg["X-RPKI-Object"] = " ".join(words[4:])
+
destination = None
source = None
try:
- destination = mailbox.MH(args.output, factory = None, create = True)
- source = mailbox.Maildir(args.input, factory = None)
+ destination = mailbox.MH(args.output, factory = None, create = True)
+ source = mailbox.Maildir(args.input, factory = None)
- for srckey, srcmsg in source.iteritems():
- if args.unseen and "S" in srcmsg.get_flags():
- continue
- assert not srcmsg.is_multipart() and srcmsg.get_content_type() == "application/x-rpki"
- payload = srcmsg.get_payload(decode = True)
- cms = rpki.POW.CMS.derRead(payload)
- txt = cms.verify(rpki.POW.X509Store(), None, rpki.POW.CMS_NOCRL | rpki.POW.CMS_NO_SIGNER_CERT_VERIFY | rpki.POW.CMS_NO_ATTR_VERIFY | rpki.POW.CMS_NO_CONTENT_VERIFY)
- xml = lxml.etree.fromstring(txt)
- tag = xml.tag
- if args.tag and tag != args.tag:
- continue
- msg = email.mime.multipart.MIMEMultipart("related")
- msg["X-RPKI-Tag"] = tag
- for i in ("Date", "Message-ID", "X-RPKI-Timestamp"):
- msg[i] = srcmsg[i]
- fix_headers()
- if tag in dispatch:
- dispatch[tag]()
- if "Subject" not in msg:
- msg["Subject"] = srcmsg["Subject"]
- msg.attach(email.mime.text.MIMEText(txt))
- msg.attach(email.mime.application.MIMEApplication(payload, "x-rpki"))
- msg.epilogue = "\n" # Force trailing newline
- key = destination.add(msg)
- print "Added", key
- if args.kill:
- del source[srckey]
- elif args.mark:
- srcmsg.set_subdir("cur")
- srcmsg.add_flag("S")
- source[srckey] = srcmsg
+ for srckey, srcmsg in source.iteritems():
+ if args.unseen and "S" in srcmsg.get_flags():
+ continue
+ assert not srcmsg.is_multipart() and srcmsg.get_content_type() == "application/x-rpki"
+ payload = srcmsg.get_payload(decode = True)
+ cms = rpki.POW.CMS.derRead(payload)
+ txt = cms.verify(rpki.POW.X509Store(), None, rpki.POW.CMS_NOCRL | rpki.POW.CMS_NO_SIGNER_CERT_VERIFY | rpki.POW.CMS_NO_ATTR_VERIFY | rpki.POW.CMS_NO_CONTENT_VERIFY)
+ xml = lxml.etree.fromstring(txt)
+ tag = xml.tag
+ if args.tag and tag != args.tag:
+ continue
+ msg = email.mime.multipart.MIMEMultipart("related")
+ msg["X-RPKI-Tag"] = tag
+ for i in ("Date", "Message-ID", "X-RPKI-Timestamp"):
+ msg[i] = srcmsg[i]
+ fix_headers()
+ if tag in dispatch:
+ dispatch[tag]()
+ if "Subject" not in msg:
+ msg["Subject"] = srcmsg["Subject"]
+ msg.attach(email.mime.text.MIMEText(txt))
+ msg.attach(email.mime.application.MIMEApplication(payload, "x-rpki"))
+ msg.epilogue = "\n" # Force trailing newline
+ key = destination.add(msg)
+ print "Added", key
+ if args.kill:
+ del source[srckey]
+ elif args.mark:
+ srcmsg.set_subdir("cur")
+ srcmsg.add_flag("S")
+ source[srckey] = srcmsg
finally:
- if destination:
- destination.close()
- if source:
- source.close()
+ if destination:
+ destination.close()
+ if source:
+ source.close()
diff --git a/potpourri/gc_summary.py b/potpourri/gc_summary.py
index 1f6987bf..61b21587 100644
--- a/potpourri/gc_summary.py
+++ b/potpourri/gc_summary.py
@@ -1,11 +1,11 @@
# $Id$
-#
+#
# Copyright (C) 2010 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -20,93 +20,93 @@ import sys, os, time
class datapoint(object):
- outtype = os.getenv("OUTTYPE", "png")
- outname = os.getenv("OUTNAME", "")
- timefmt = os.getenv("TIMEFMT", "%T")
- pretend = os.getenv("PRETEND_EVERYTHING_CHANGED", False)
- threshold = int(os.getenv("THRESHOLD", "100"))
+ outtype = os.getenv("OUTTYPE", "png")
+ outname = os.getenv("OUTNAME", "")
+ timefmt = os.getenv("TIMEFMT", "%T")
+ pretend = os.getenv("PRETEND_EVERYTHING_CHANGED", False)
+ threshold = int(os.getenv("THRESHOLD", "100"))
- raw = []
- filenames = []
+ raw = []
+ filenames = []
- def __init__(self, filename, timestamp, process, count, typesig, line):
- self.filename = filename
- self.timestamp = timestamp
- self.process = process
- self.count = count
- self.typesig = typesig
- self.line = line
- self.key = "%s %s" % (filename, typesig)
- self.raw.append(self)
- if filename not in self.filenames:
- self.filenames.append(filename)
+ def __init__(self, filename, timestamp, process, count, typesig, line):
+ self.filename = filename
+ self.timestamp = timestamp
+ self.process = process
+ self.count = count
+ self.typesig = typesig
+ self.line = line
+ self.key = "%s %s" % (filename, typesig)
+ self.raw.append(self)
+ if filename not in self.filenames:
+ self.filenames.append(filename)
- def __cmp__(self, other):
- c = cmp(self.key, other.key)
- return c if c else cmp(self.timestamp, other.timestamp)
+ def __cmp__(self, other):
+ c = cmp(self.key, other.key)
+ return c if c else cmp(self.timestamp, other.timestamp)
- @classmethod
- def plot(cls):
+ @classmethod
+ def plot(cls):
- print "# [%s] Looking for interesting records" % time.strftime("%T")
- changed = {}
- for i in cls.raw:
- if i.key not in changed:
- changed[i.key] = set()
- changed[i.key].add(i.count)
- if cls.pretend:
- changed = set(changed.iterkeys())
- else:
- changed = set(k for k, v in changed.iteritems() if max(v) - min(v) > cls.threshold)
+ print "# [%s] Looking for interesting records" % time.strftime("%T")
+ changed = {}
+ for i in cls.raw:
+ if i.key not in changed:
+ changed[i.key] = set()
+ changed[i.key].add(i.count)
+ if cls.pretend:
+ changed = set(changed.iterkeys())
+ else:
+ changed = set(k for k, v in changed.iteritems() if max(v) - min(v) > cls.threshold)
- if not changed:
- print "# [%s] Apparently nothing worth reporting" % time.strftime("%T")
- print "print 'Nothing to plot'"
- return
+ if not changed:
+ print "# [%s] Apparently nothing worth reporting" % time.strftime("%T")
+ print "print 'Nothing to plot'"
+ return
- print "# [%s] Header" % time.strftime("%T")
- print "set xdata time"
- print "set timefmt '%Y-%m-%dT%H:%M:%S'"
- print "set format x '%s'" % cls.timefmt
- print "set key right bottom"
- if cls.outname:
- print "set terminal", cls.outtype
- print "set output '%s.%s'" % (cls.outname, cls.outtype)
- print "set term png size 1024,1024"
- print "plot", ", ".join("'-' using 1:2 with linespoints title '%s'" % i for i in changed)
+ print "# [%s] Header" % time.strftime("%T")
+ print "set xdata time"
+ print "set timefmt '%Y-%m-%dT%H:%M:%S'"
+ print "set format x '%s'" % cls.timefmt
+ print "set key right bottom"
+ if cls.outname:
+ print "set terminal", cls.outtype
+ print "set output '%s.%s'" % (cls.outname, cls.outtype)
+ print "set term png size 1024,1024"
+ print "plot", ", ".join("'-' using 1:2 with linespoints title '%s'" % i for i in changed)
- print "# [%s] Sorting" % time.strftime("%T")
- cls.raw.sort()
+ print "# [%s] Sorting" % time.strftime("%T")
+ cls.raw.sort()
- key = None
- proc = None
- for i in cls.raw:
- if i.key not in changed:
- continue
- if key is not None and i.key != key:
+ key = None
+ proc = None
+ for i in cls.raw:
+ if i.key not in changed:
+ continue
+ if key is not None and i.key != key:
+ print "e"
+ elif proc is not None and i.process != proc:
+ print ""
+ key = i.key
+ proc = i.process
+ print "#", i.key, i.line
+ print i.timestamp, i.count
print "e"
- elif proc is not None and i.process != proc:
- print ""
- key = i.key
- proc = i.process
- print "#", i.key, i.line
- print i.timestamp, i.count
- print "e"
- if not cls.outname:
- print "pause mouse any"
+ if not cls.outname:
+ print "pause mouse any"
for filename in sys.argv[1:]:
- print "# [%s] Reading %s" % (time.strftime("%T"), filename)
- for line in open(filename):
- if "gc_summary:" in line:
- word = line.split(None, 6)
- if word[4].isdigit() and word[5].startswith("(") and word[5].endswith(")"):
- datapoint(filename = filename,
- timestamp = word[0] + "T" + word[1],
- process = word[2],
- count = int(word[4]),
- typesig = word[5],
- line = line.strip())
-
+ print "# [%s] Reading %s" % (time.strftime("%T"), filename)
+ for line in open(filename):
+ if "gc_summary:" in line:
+ word = line.split(None, 6)
+ if word[4].isdigit() and word[5].startswith("(") and word[5].endswith(")"):
+ datapoint(filename = filename,
+ timestamp = word[0] + "T" + word[1],
+ process = word[2],
+ count = int(word[4]),
+ typesig = word[5],
+ line = line.strip())
+
print "# [%s] Plotting" % time.strftime("%T")
datapoint.plot()
diff --git a/potpourri/generate-ripe-root-cert.py b/potpourri/generate-ripe-root-cert.py
index 3407bc51..1b891dce 100644
--- a/potpourri/generate-ripe-root-cert.py
+++ b/potpourri/generate-ripe-root-cert.py
@@ -1,11 +1,11 @@
# $Id$
-#
+#
# Copyright (C) 2010-2012 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -24,10 +24,10 @@ import lxml.etree
from rpki.csv_utils import csv_writer
def iterate_xml(filename, tag):
- return lxml.etree.parse(filename).getroot().getiterator(tag)
+ return lxml.etree.parse(filename).getroot().getiterator(tag)
def ns(tag):
- return "{http://www.iana.org/assignments}" + tag
+ return "{http://www.iana.org/assignments}" + tag
tag_description = ns("description")
tag_designation = ns("designation")
@@ -39,19 +39,19 @@ asns = csv_writer("asns.csv")
prefixes = csv_writer("prefixes.csv")
for record in iterate_xml("as-numbers.xml", tag_record):
- if record.findtext(tag_description) == "Assigned by RIPE NCC":
- asns.writerow(("RIPE", record.findtext(tag_number)))
-
+ if record.findtext(tag_description) == "Assigned by RIPE NCC":
+ asns.writerow(("RIPE", record.findtext(tag_number)))
+
for record in iterate_xml("ipv4-address-space.xml", tag_record):
- if record.findtext(tag_designation) in ("RIPE NCC", "Administered by RIPE NCC"):
- prefix = record.findtext(tag_prefix)
- p, l = prefix.split("/")
- assert l == "8", "Violated /8 assumption: %r" % prefix
- prefixes.writerow(("RIPE", "%d.0.0.0/8" % int(p)))
-
+ if record.findtext(tag_designation) in ("RIPE NCC", "Administered by RIPE NCC"):
+ prefix = record.findtext(tag_prefix)
+ p, l = prefix.split("/")
+ assert l == "8", "Violated /8 assumption: %r" % prefix
+ prefixes.writerow(("RIPE", "%d.0.0.0/8" % int(p)))
+
for record in iterate_xml("ipv6-unicast-address-assignments.xml", tag_record):
- if record.findtext(tag_description) == "RIPE NCC":
- prefixes.writerow(("RIPE", record.findtext(tag_prefix)))
+ if record.findtext(tag_description) == "RIPE NCC":
+ prefixes.writerow(("RIPE", record.findtext(tag_prefix)))
asns.close()
prefixes.close()
diff --git a/potpourri/generate-root-certificate b/potpourri/generate-root-certificate
deleted file mode 100755
index 31647d5f..00000000
--- a/potpourri/generate-root-certificate
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Generate an RPKI root certificate for rootd. In most cases you should
-not need to do this; see caveats in the manual about running rootd if
-you think you need this. This script does nothing that can't also be
-done with the OpenSSL command line tool, but on some platforms the
-installed copy of openssl doesn't understand the RFC 3779 extensions.
-"""
-
-import os
-import sys
-import time
-import argparse
-import rpki.x509
-import rpki.config
-import rpki.sundial
-import rpki.resource_set
-
-os.environ["TZ"] = "UTC"
-time.tzset()
-
-parser = argparse.ArgumentParser(description = __doc__)
-parser.add_argument("-c", "--config", help = "configuration file")
-parser.add_argument("-a", "--asns", default = "0-4294967295", help = "ASN resources")
-parser.add_argument("-4", "--ipv4", default = "0.0.0.0/0", help = "IPv4 resources")
-parser.add_argument("-6", "--ipv6", default = "::/0", help = "IPv6 resources")
-parser.add_argument("--certificate", default = "root.cer", help = "certificate file")
-parser.add_argument("--key", default = "root.key", help = "key file")
-parser.add_argument("--tal", default = "root.tal", help = "TAL file")
-args = parser.parse_args()
-
-cfg = rpki.config.parser(args.config, "rootd")
-
-resources = rpki.resource_set.resource_bag(
- asn = rpki.resource_set.resource_set_as(args.asns),
- v4 = rpki.resource_set.resource_set_ipv4(args.ipv4),
- v6 = rpki.resource_set.resource_set_ipv6(args.ipv6))
-
-keypair = rpki.x509.RSA.generate(quiet = True)
-
-sia = cfg.get("rpki-base-uri")
-sia = (sia, sia + "root.mft", None)
-
-uri = cfg.get("rpki-root-cert-uri")
-
-cert = rpki.x509.X509.self_certify(
- keypair = keypair,
- subject_key = keypair.get_public(),
- serial = 1,
- sia = sia,
- notAfter = rpki.sundial.now() + rpki.sundial.timedelta(days = 365),
- resources = resources)
-
-with open(args.certificate, "wb") as f:
- f.write(cert.get_DER())
-
-with open(args.key, "wb") as f:
- f.write(keypair.get_DER())
-
-with open(args.tal, "w") as f:
- f.write(uri + "\n\n" + keypair.get_public().get_Base64())
diff --git a/potpourri/gski.py b/potpourri/gski.py
index 083a59c8..3faf22d6 100644
--- a/potpourri/gski.py
+++ b/potpourri/gski.py
@@ -17,5 +17,5 @@
import rpki.x509, sys
for file in sys.argv[1:]:
- cert = rpki.x509.X509(Auto_file = file)
- print cert.gSKI(), cert.hSKI(), file
+ cert = rpki.x509.X509(Auto_file = file)
+ print cert.gSKI(), cert.hSKI(), file
diff --git a/potpourri/guess-roas-from-routeviews.py b/potpourri/guess-roas-from-routeviews.py
index d8fb9c4c..8e2ed81a 100644
--- a/potpourri/guess-roas-from-routeviews.py
+++ b/potpourri/guess-roas-from-routeviews.py
@@ -1,11 +1,11 @@
# $Id$
-#
+#
# Copyright (C) 2009 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -39,25 +39,25 @@ from rpki.resource_set import roa_prefix_ipv4, resource_set_ipv4, resource_range
roas = []
for filename in sys.argv[1:]:
- resources = rpki.x509.X509(Auto_file = filename).get_3779resources().v4
+ resources = rpki.x509.X509(Auto_file = filename).get_3779resources().v4
- while resources:
- labels = str(resources[0].min).split(".")
- labels.reverse()
+ while resources:
+ labels = str(resources[0].min).split(".")
+ labels.reverse()
- try:
- for answer in dns.resolver.query(".".join(labels) + ".asn.routeviews.org", "txt"):
- asn, prefix, prefixlen = answer.strings
- roa_prefix = roa_prefix_ipv4(v4addr(prefix), long(prefixlen))
- roa = "%s\t%s\t%s" % (roa_prefix, long(asn), filename)
- if roa not in roas:
- roas.append(roa)
- resources = resources.difference(resource_set_ipv4([roa_prefix.to_resource_range()]))
+ try:
+ for answer in dns.resolver.query(".".join(labels) + ".asn.routeviews.org", "txt"):
+ asn, prefix, prefixlen = answer.strings
+ roa_prefix = roa_prefix_ipv4(v4addr(prefix), long(prefixlen))
+ roa = "%s\t%s\t%s" % (roa_prefix, long(asn), filename)
+ if roa not in roas:
+ roas.append(roa)
+ resources = resources.difference(resource_set_ipv4([roa_prefix.to_resource_range()]))
- except dns.resolver.NXDOMAIN:
- resources = resources.difference(resource_set_ipv4([resource_range_ipv4(resources[0].min, v4addr(resources[0].min + 256))]))
+ except dns.resolver.NXDOMAIN:
+ resources = resources.difference(resource_set_ipv4([resource_range_ipv4(resources[0].min, v4addr(resources[0].min + 256))]))
roas.sort()
for roa in roas:
- print roa
+ print roa
diff --git a/potpourri/iana-to-csv.py b/potpourri/iana-to-csv.py
index f803a21e..cf82c7e9 100644
--- a/potpourri/iana-to-csv.py
+++ b/potpourri/iana-to-csv.py
@@ -1,11 +1,11 @@
# $Id$
-#
+#
# Copyright (C) 2010-2012 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -24,10 +24,10 @@ from rpki.csv_utils import csv_reader, csv_writer
from rpki.resource_set import resource_bag
def iterate_xml(filename, tag):
- return lxml.etree.parse(filename).getroot().getiterator(tag)
+ return lxml.etree.parse(filename).getroot().getiterator(tag)
def ns(tag):
- return "{http://www.iana.org/assignments}" + tag
+ return "{http://www.iana.org/assignments}" + tag
tag_description = ns("description")
tag_designation = ns("designation")
@@ -40,30 +40,30 @@ handles = {}
rirs = { "legacy" : resource_bag() }
for rir in ("AfriNIC", "APNIC", "ARIN", "LACNIC", "RIPE NCC"):
- handle = rir.split()[0].lower()
- handles[rir] = handles["Assigned by %s" % rir] = handles["Administered by %s" % rir] = handle
- rirs[handle] = resource_bag()
+ handle = rir.split()[0].lower()
+ handles[rir] = handles["Assigned by %s" % rir] = handles["Administered by %s" % rir] = handle
+ rirs[handle] = resource_bag()
asns = csv_writer("asns.csv")
prefixes = csv_writer("prefixes.csv")
for record in iterate_xml("as-numbers.xml", tag_record):
- description = record.findtext(tag_description)
- if description in handles:
- asns.writerow((handles[description], record.findtext(tag_number)))
-
+ description = record.findtext(tag_description)
+ if description in handles:
+ asns.writerow((handles[description], record.findtext(tag_number)))
+
for record in iterate_xml("ipv4-address-space.xml", tag_record):
- designation = record.findtext(tag_designation)
- if record.findtext(tag_status) != "RESERVED":
- prefix, prefixlen = [int(i) for i in record.findtext(tag_prefix).split("/")]
- if prefixlen != 8:
- raise ValueError("%s violated /8 assumption" % record.findtext(tag_prefix))
- rirs[handles.get(designation, "legacy")] |= resource_bag.from_str("%d.0.0.0/8" % prefix)
+ designation = record.findtext(tag_designation)
+ if record.findtext(tag_status) != "RESERVED":
+ prefix, prefixlen = [int(i) for i in record.findtext(tag_prefix).split("/")]
+ if prefixlen != 8:
+ raise ValueError("%s violated /8 assumption" % record.findtext(tag_prefix))
+ rirs[handles.get(designation, "legacy")] |= resource_bag.from_str("%d.0.0.0/8" % prefix)
for record in iterate_xml("ipv6-unicast-address-assignments.xml", tag_record):
- description = record.findtext(tag_description)
- if record.findtext(tag_description) in handles:
- rirs[handles[description]] |= resource_bag.from_str(record.findtext(tag_prefix))
+ description = record.findtext(tag_description)
+ if record.findtext(tag_description) in handles:
+ rirs[handles[description]] |= resource_bag.from_str(record.findtext(tag_prefix))
erx = list(csv_reader("erx.csv"))
assert all(r in rirs for r, p in erx)
@@ -71,15 +71,15 @@ assert all(r in rirs for r, p in erx)
erx_overrides = resource_bag.from_str(",".join(p for r, p in erx), allow_overlap = True)
for rir in rirs:
- if rir != "legacy":
- rirs[rir] -= erx_overrides
- rirs[rir] |= resource_bag.from_str(",".join(p for r, p in erx if r == rir), allow_overlap = True)
+ if rir != "legacy":
+ rirs[rir] -= erx_overrides
+ rirs[rir] |= resource_bag.from_str(",".join(p for r, p in erx if r == rir), allow_overlap = True)
for rir, bag in rirs.iteritems():
- for p in bag.v4:
- prefixes.writerow((rir, p))
- for p in bag.v6:
- prefixes.writerow((rir, p))
+ for p in bag.v4:
+ prefixes.writerow((rir, p))
+ for p in bag.v6:
+ prefixes.writerow((rir, p))
asns.close()
prefixes.close()
diff --git a/potpourri/missing-oids.py b/potpourri/missing-oids.py
index 16316eac..8557e841 100644
--- a/potpourri/missing-oids.py
+++ b/potpourri/missing-oids.py
@@ -1,11 +1,11 @@
# $Id$
-#
+#
# Copyright (C) 2008 American Registry for Internet Numbers ("ARIN")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -23,16 +23,16 @@ import rpki.POW.pkix, rpki.oids
need_header = True
for oid, name in rpki.oids.oid2name.items():
- try:
- rpki.POW.pkix.oid2obj(oid)
- except:
- o = rpki.POW.pkix.Oid()
- o.set(oid)
- if need_header:
- print
- print "# Local additions"
- need_header = False
- print
- print "OID =", " ".join(("%02X" % ord(c)) for c in o.toString())
- print "Comment = RPKI project"
- print "Description =", name, "(" + " ".join((str(i) for i in oid)) + ")"
+ try:
+ rpki.POW.pkix.oid2obj(oid)
+ except:
+ o = rpki.POW.pkix.Oid()
+ o.set(oid)
+ if need_header:
+ print
+ print "# Local additions"
+ need_header = False
+ print
+ print "OID =", " ".join(("%02X" % ord(c)) for c in o.toString())
+ print "Comment = RPKI project"
+ print "Description =", name, "(" + " ".join((str(i) for i in oid)) + ")"
diff --git a/potpourri/object-dates.py b/potpourri/object-dates.py
index b99441d6..ea2fd489 100644
--- a/potpourri/object-dates.py
+++ b/potpourri/object-dates.py
@@ -5,11 +5,11 @@
# RPKI objects.
# Copyright (C) 2013--2014 Dragon Research Labs ("DRL")
-#
+#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -28,21 +28,21 @@ extract_flags = (rpki.POW.CMS_NOCRL |
rpki.POW.CMS_NO_CONTENT_VERIFY)
def get_mft(fn):
- cms = rpki.POW.Manifest.derReadFile(fn)
- cms.verify(rpki.POW.X509Store(), None, extract_flags)
- return cms, cms.certs()[0]
+ cms = rpki.POW.Manifest.derReadFile(fn)
+ cms.extractWithoutVerifying()
+ return cms, cms.certs()[0]
def get_roa(fn):
- return None, rpki.POW.CMS.derReadFile(fn).certs()[0]
+ return None, rpki.POW.CMS.derReadFile(fn).certs()[0]
def get_gbr(fn):
- return None, rpki.POW.CMS.derReadFile(fn).certs()[0]
+ return None, rpki.POW.CMS.derReadFile(fn).certs()[0]
def get_crl(fn):
- return rpki.POW.CRL.derReadFile(fn), None
+ return rpki.POW.CRL.derReadFile(fn), None
def get_cer(fn):
- return None, rpki.POW.X509.derReadFile(fn)
+ return None, rpki.POW.X509.derReadFile(fn)
dispatch = dict(mft = get_mft,
roa = get_roa,
@@ -51,13 +51,13 @@ dispatch = dict(mft = get_mft,
cer = get_cer)
for fn in sys.argv[1:]:
- obj, cer = dispatch[os.path.splitext(fn)[1][1:]](fn)
- print fn
- if cer is not None:
- print "notBefore: ", cer.getNotBefore()
- if obj is not None:
- print "thisUpdate:", obj.getThisUpdate()
- print "nextUpdate:", obj.getNextUpdate()
- if cer is not None:
- print "notAfter: ", cer.getNotAfter()
- print
+ obj, cer = dispatch[os.path.splitext(fn)[1][1:]](fn)
+ print fn
+ if cer is not None:
+ print "notBefore: ", cer.getNotBefore()
+ if obj is not None:
+ print "thisUpdate:", obj.getThisUpdate()
+ print "nextUpdate:", obj.getNextUpdate()
+ if cer is not None:
+ print "notAfter: ", cer.getNotAfter()
+ print
diff --git a/potpourri/oob-translate.xsl b/potpourri/oob-translate.xsl
new file mode 100644
index 00000000..da71e348
--- /dev/null
+++ b/potpourri/oob-translate.xsl
@@ -0,0 +1,81 @@
+<!-- $Id$ -->
+<!--
+ - Translate between old "myrpki" XML and current IETF standards
+ - track out-of-band-setup protocol XML. Well, partially. Much of
+ - the old protocol is either irrelevant or can't be translated due
+ - to embedded signatures, but the subset that other implementations
+ - support is small enough that we can fake something workable.
+ -->
+
+<xsl:transform xmlns:xsl = "http://www.w3.org/1999/XSL/Transform"
+ version = "1.0"
+ xmlns:myrpki = "http://www.hactrn.net/uris/rpki/myrpki/"
+ xmlns:oob = "http://www.hactrn.net/uris/rpki/rpki-setup/"
+ exclude-result-prefixes = "myrpki oob">
+
+ <xsl:output omit-xml-declaration = "yes"
+ indent = "yes"
+ method = "xml"
+ encoding = "US-ASCII"/>
+
+ <!-- Versions of the respective protocols -->
+
+ <xsl:param name = "myrpki-version" select = "2"/>
+ <xsl:param name = "oob-version" select = "1"/>
+
+ <!-- Old-style identity to new-style child_request -->
+
+ <xsl:template match = "/myrpki:identity">
+ <oob:child_request version = "{$oob-version}"
+ child_handle = "{@handle}">
+ <oob:child_bpki_ta>
+ <xsl:value-of select = "myrpki:bpki_ta"/>
+ </oob:child_bpki_ta>
+ </oob:child_request>
+ </xsl:template>
+
+ <!-- New-style child_request to old style identity -->
+
+ <xsl:template match = "/oob:child_request">
+ <myrpki:identity version = "{$myrpki-version}"
+ handle = "{@child_handle}">
+ <myrpki:bpki_ta>
+ <xsl:value-of select = "oob:child_bpki_ta"/>
+ </myrpki:bpki_ta>
+ </myrpki:identity>
+ </xsl:template>
+
+ <!-- Old-style parent response to new-style parent_response -->
+ <!-- Referrals are not translatable due to embedded signatures -->
+
+ <xsl:template match = "/myrpki:parent">
+ <oob:parent_response version = "{$oob-version}"
+ service_uri = "{@service_uri}"
+ child_handle = "{@child_handle}"
+ parent_handle = "{@parent_handle}">
+ <oob:parent_bpki_ta>
+ <xsl:value-of select = "myrpki:bpki_resource_ta"/>
+ </oob:parent_bpki_ta>
+ <xsl:if test = "repository[@type = 'offer']">
+ <oob:offer/>
+ </xsl:if>
+ </oob:parent_response>
+ </xsl:template>
+
+ <!-- New-style parent_response to old-style parent response -->
+ <!-- Referrals are not translatable due to embedded signatures -->
+
+ <xsl:template match = "/oob:parent_response">
+ <myrpki:parent version = "{$myrpki-version}"
+ service_uri = "{@service_uri}"
+ child_handle = "{@child_handle}"
+ parent_handle = "{@parent_handle}">
+ <myrpki:bpki_resource_ta>
+ <xsl:value-of select = "oob:parent_bpki_ta"/>
+ </myrpki:bpki_resource_ta>
+ <myrpki:bpki_child_ta/>
+ <myrpki:repository type = "none"/>
+ </myrpki:parent>
+ </xsl:template>
+
+</xsl:transform>
diff --git a/potpourri/print-profile.py b/potpourri/print-profile.py
index 081d2602..4012fa3f 100644
--- a/potpourri/print-profile.py
+++ b/potpourri/print-profile.py
@@ -17,4 +17,4 @@
import pstats, glob
for f in glob.iglob("*.prof"):
- pstats.Stats(f).sort_stats("time").print_stats(50)
+ pstats.Stats(f).sort_stats("time").print_stats(50)
diff --git a/potpourri/rcynic-diff.py b/potpourri/rcynic-diff.py
index 327a7b71..d5be51e0 100644
--- a/potpourri/rcynic-diff.py
+++ b/potpourri/rcynic-diff.py
@@ -1,11 +1,11 @@
# $Id$
-#
+#
# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
-#
+#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -21,94 +21,94 @@ Diff a series of rcynic.xml files, sort of.
import sys
try:
- from lxml.etree import ElementTree
+ from lxml.etree import ElementTree
except ImportError:
- from xml.etree.ElementTree import ElementTree
+ from xml.etree.ElementTree import ElementTree
show_backup_generation = False
show_rsync_transfer = False
class Object(object):
- def __init__(self, session, uri, generation):
- self.session = session
- self.uri = uri
- self.generation = generation
- self.labels = []
+ def __init__(self, session, uri, generation):
+ self.session = session
+ self.uri = uri
+ self.generation = generation
+ self.labels = []
- def add(self, label):
- self.labels.append(label)
+ def add(self, label):
+ self.labels.append(label)
- def __cmp__(self, other):
- return cmp(self.labels, other.labels)
+ def __cmp__(self, other):
+ return cmp(self.labels, other.labels)
def show(old = None, new = None):
- assert old is not None or new is not None
- assert old is None or new is None or old.uri == new.uri
- if old is None:
- obj = new
- labels = ["+" + label for label in new.labels]
- elif new is None:
- obj = old
- labels = ["-" + label for label in old.labels]
- else:
- obj = new
- labels = []
- for label in new.session.labels:
- if label in new.labels and label in old.labels:
- labels.append(label)
- elif label in new.labels:
- labels.append("+" + label)
- elif label in old.labels:
- labels.append("-" + label)
- labels = " ".join(labels)
- if show_backup_generation:
- print " ", obj.uri, obj.generation, labels
- else:
- print " ", obj.uri, labels
+ assert old is not None or new is not None
+ assert old is None or new is None or old.uri == new.uri
+ if old is None:
+ obj = new
+ labels = ["+" + label for label in new.labels]
+ elif new is None:
+ obj = old
+ labels = ["-" + label for label in old.labels]
+ else:
+ obj = new
+ labels = []
+ for label in new.session.labels:
+ if label in new.labels and label in old.labels:
+ labels.append(label)
+ elif label in new.labels:
+ labels.append("+" + label)
+ elif label in old.labels:
+ labels.append("-" + label)
+ labels = " ".join(labels)
+ if show_backup_generation:
+ print " ", obj.uri, obj.generation, labels
+ else:
+ print " ", obj.uri, labels
class Session(dict):
- def __init__(self, name):
- self.name = name
- tree = ElementTree(file = name)
- self.labels = [elt.tag.strip() for elt in tree.find("labels")]
- for elt in tree.findall("validation_status"):
- generation = elt.get("generation")
- status = elt.get("status")
- uri = elt.text.strip()
- if not show_rsync_transfer and status.startswith("rsync_transfer_"):
- continue
- if show_backup_generation:
- key = (uri, generation)
- elif generation == "backup":
- continue
- else:
- key = uri
- if key not in self:
- self[key] = Object(self, uri, generation)
- self[key].add(status)
+ def __init__(self, name):
+ self.name = name
+ tree = ElementTree(file = name)
+ self.labels = [elt.tag.strip() for elt in tree.find("labels")]
+ for elt in tree.findall("validation_status"):
+ generation = elt.get("generation")
+ status = elt.get("status")
+ uri = elt.text.strip()
+ if not show_rsync_transfer and status.startswith("rsync_transfer_"):
+ continue
+ if show_backup_generation:
+ key = (uri, generation)
+ elif generation == "backup":
+ continue
+ else:
+ key = uri
+ if key not in self:
+ self[key] = Object(self, uri, generation)
+ self[key].add(status)
old_db = new_db = None
for arg in sys.argv[1:]:
- old_db = new_db
- new_db = Session(arg)
-
- if old_db is None:
- continue
-
- only_old = set(old_db) - set(new_db)
- only_new = set(new_db) - set(old_db)
- changed = set(key for key in (set(old_db) & set(new_db)) if old_db[key] != new_db[key])
-
- if only_old or changed or only_new:
- print "Comparing", old_db.name, "with", new_db.name
- for key in sorted(only_old):
- show(old = old_db[key])
- for key in sorted(changed):
- show(old = old_db[key], new = new_db[key])
- for key in sorted(only_new):
- show(new = new_db[key])
- print
+ old_db = new_db
+ new_db = Session(arg)
+
+ if old_db is None:
+ continue
+
+ only_old = set(old_db) - set(new_db)
+ only_new = set(new_db) - set(old_db)
+ changed = set(key for key in (set(old_db) & set(new_db)) if old_db[key] != new_db[key])
+
+ if only_old or changed or only_new:
+ print "Comparing", old_db.name, "with", new_db.name
+ for key in sorted(only_old):
+ show(old = old_db[key])
+ for key in sorted(changed):
+ show(old = old_db[key], new = new_db[key])
+ for key in sorted(only_new):
+ show(new = new_db[key])
+ print
diff --git a/potpourri/ripe-asns-to-csv.py b/potpourri/ripe-asns-to-csv.py
index 50251ce8..0c85b901 100644
--- a/potpourri/ripe-asns-to-csv.py
+++ b/potpourri/ripe-asns-to-csv.py
@@ -1,11 +1,11 @@
# $Id$
-#
+#
# Copyright (C) 2009-2012 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -28,81 +28,81 @@ from rpki.csv_utils import csv_writer
class Handle(dict):
- want_tags = ()
+ want_tags = ()
- debug = False
+ debug = False
- def set(self, tag, val):
- if tag in self.want_tags:
- self[tag] = "".join(val.split(" "))
+ def set(self, tag, val):
+ if tag in self.want_tags:
+ self[tag] = "".join(val.split(" "))
- def check(self):
- for tag in self.want_tags:
- if not tag in self:
- return False
- if self.debug:
- self.log()
- return True
+ def check(self):
+ for tag in self.want_tags:
+ if not tag in self:
+ return False
+ if self.debug:
+ self.log()
+ return True
- def __repr__(self):
- return "<%s %s>" % (self.__class__.__name__,
- " ".join("%s:%s" % (tag, self.get(tag, "?"))
- for tag in self.want_tags))
+ def __repr__(self):
+ return "<%s %s>" % (self.__class__.__name__,
+ " ".join("%s:%s" % (tag, self.get(tag, "?"))
+ for tag in self.want_tags))
- def log(self):
- print repr(self)
+ def log(self):
+ print repr(self)
- def finish(self, ctx):
- self.check()
+ def finish(self, ctx):
+ self.check()
class aut_num(Handle):
- want_tags = ("aut-num", "mnt-by", "as-name")
+ want_tags = ("aut-num", "mnt-by", "as-name")
- def set(self, tag, val):
- if tag == "aut-num" and val.startswith("AS"):
- val = val[2:]
- Handle.set(self, tag, val)
+ def set(self, tag, val):
+ if tag == "aut-num" and val.startswith("AS"):
+ val = val[2:]
+ Handle.set(self, tag, val)
- def finish(self, ctx):
- if self.check():
- ctx.asns.writerow((self["mnt-by"], self["aut-num"]))
+ def finish(self, ctx):
+ if self.check():
+ ctx.asns.writerow((self["mnt-by"], self["aut-num"]))
class main(object):
- types = dict((x.want_tags[0], x) for x in (aut_num,))
-
-
- def finish_statement(self, done):
- if self.statement:
- tag, sep, val = self.statement.partition(":")
- assert sep, "Couldn't find separator in %r" % self.statement
- tag = tag.strip().lower()
- val = val.strip().upper()
- if self.cur is None:
- self.cur = self.types[tag]() if tag in self.types else False
- if self.cur is not False:
- self.cur.set(tag, val)
- if done and self.cur:
- self.cur.finish(self)
- self.cur = None
-
- filenames = ("ripe.db.aut-num.gz",)
-
- def __init__(self):
- self.asns = csv_writer("asns.csv")
- for fn in self.filenames:
- f = gzip.open(fn)
- self.statement = ""
- self.cur = None
- for line in f:
- line = line.expandtabs().partition("#")[0].rstrip("\n")
- if line and not line[0].isalpha():
- self.statement += line[1:] if line[0] == "+" else line
- else:
- self.finish_statement(not line)
- self.statement = line
- self.finish_statement(True)
- f.close()
- self.asns.close()
+ types = dict((x.want_tags[0], x) for x in (aut_num,))
+
+
+ def finish_statement(self, done):
+ if self.statement:
+ tag, sep, val = self.statement.partition(":")
+ assert sep, "Couldn't find separator in %r" % self.statement
+ tag = tag.strip().lower()
+ val = val.strip().upper()
+ if self.cur is None:
+ self.cur = self.types[tag]() if tag in self.types else False
+ if self.cur is not False:
+ self.cur.set(tag, val)
+ if done and self.cur:
+ self.cur.finish(self)
+ self.cur = None
+
+ filenames = ("ripe.db.aut-num.gz",)
+
+ def __init__(self):
+ self.asns = csv_writer("asns.csv")
+ for fn in self.filenames:
+ f = gzip.open(fn)
+ self.statement = ""
+ self.cur = None
+ for line in f:
+ line = line.expandtabs().partition("#")[0].rstrip("\n")
+ if line and not line[0].isalpha():
+ self.statement += line[1:] if line[0] == "+" else line
+ else:
+ self.finish_statement(not line)
+ self.statement = line
+ self.finish_statement(True)
+ f.close()
+ self.asns.close()
main()
diff --git a/potpourri/ripe-to-csv.py b/potpourri/ripe-to-csv.py
index b864345b..308917ce 100644
--- a/potpourri/ripe-to-csv.py
+++ b/potpourri/ripe-to-csv.py
@@ -1,11 +1,11 @@
# $Id$
-#
+#
# Copyright (C) 2009-2012 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -41,98 +41,98 @@ from rpki.csv_utils import csv_writer
class Handle(dict):
- want_tags = ()
+ want_tags = ()
- want_status = ("ASSIGNED", "ASSIGNEDPA", "ASSIGNEDPI")
+ want_status = ("ASSIGNED", "ASSIGNEDPA", "ASSIGNEDPI")
- debug = False
+ debug = False
- def set(self, tag, val):
- if tag in self.want_tags:
- self[tag] = "".join(val.split(" "))
+ def set(self, tag, val):
+ if tag in self.want_tags:
+ self[tag] = "".join(val.split(" "))
- def check(self):
- for tag in self.want_tags:
- if not tag in self:
- return False
- if self.debug:
- self.log()
- return True
+ def check(self):
+ for tag in self.want_tags:
+ if not tag in self:
+ return False
+ if self.debug:
+ self.log()
+ return True
- def __repr__(self):
- return "<%s %s>" % (self.__class__.__name__,
- " ".join("%s:%s" % (tag, self.get(tag, "?"))
- for tag in self.want_tags))
+ def __repr__(self):
+ return "<%s %s>" % (self.__class__.__name__,
+ " ".join("%s:%s" % (tag, self.get(tag, "?"))
+ for tag in self.want_tags))
- def log(self):
- print repr(self)
+ def log(self):
+ print repr(self)
- def finish(self, ctx):
- self.check()
+ def finish(self, ctx):
+ self.check()
class aut_num(Handle):
- want_tags = ("aut-num", "mnt-by") # "as-name"
+ want_tags = ("aut-num", "mnt-by") # "as-name"
- def set(self, tag, val):
- if tag == "aut-num" and val.startswith("AS"):
- val = val[2:]
- Handle.set(self, tag, val)
+ def set(self, tag, val):
+ if tag == "aut-num" and val.startswith("AS"):
+ val = val[2:]
+ Handle.set(self, tag, val)
- def finish(self, ctx):
- if self.check():
- ctx.asns.writerow((self["mnt-by"], self["aut-num"]))
+ def finish(self, ctx):
+ if self.check():
+ ctx.asns.writerow((self["mnt-by"], self["aut-num"]))
class inetnum(Handle):
- want_tags = ("inetnum", "netname", "status") # "mnt-by"
-
- def finish(self, ctx):
- if self.check() and self["status"] in self.want_status:
- ctx.prefixes.writerow((self["netname"], self["inetnum"]))
+ want_tags = ("inetnum", "netname", "status") # "mnt-by"
+
+ def finish(self, ctx):
+ if self.check() and self["status"] in self.want_status:
+ ctx.prefixes.writerow((self["netname"], self["inetnum"]))
class inet6num(Handle):
- want_tags = ("inet6num", "netname", "status") # "mnt-by"
+ want_tags = ("inet6num", "netname", "status") # "mnt-by"
- def finish(self, ctx):
- if self.check() and self["status"] in self.want_status:
- ctx.prefixes.writerow((self["netname"], self["inet6num"]))
+ def finish(self, ctx):
+ if self.check() and self["status"] in self.want_status:
+ ctx.prefixes.writerow((self["netname"], self["inet6num"]))
class main(object):
- types = dict((x.want_tags[0], x) for x in (aut_num, inetnum, inet6num))
-
- def finish_statement(self, done):
- if self.statement:
- tag, sep, val = self.statement.partition(":")
- assert sep, "Couldn't find separator in %r" % self.statement
- tag = tag.strip().lower()
- val = val.strip().upper()
- if self.cur is None:
- self.cur = self.types[tag]() if tag in self.types else False
- if self.cur is not False:
- self.cur.set(tag, val)
- if done and self.cur:
- self.cur.finish(self)
- self.cur = None
-
- filenames = ("ripe.db.aut-num.gz", "ripe.db.inet6num.gz", "ripe.db.inetnum.gz")
-
- def __init__(self):
- self.asns = csv_writer("asns.csv")
- self.prefixes = csv_writer("prefixes.csv")
- for fn in self.filenames:
- f = gzip.open(fn)
- self.statement = ""
- self.cur = None
- for line in f:
- line = line.expandtabs().partition("#")[0].rstrip("\n")
- if line and not line[0].isalpha():
- self.statement += line[1:] if line[0] == "+" else line
- else:
- self.finish_statement(not line)
- self.statement = line
- self.finish_statement(True)
- f.close()
- self.asns.close()
- self.prefixes.close()
+ types = dict((x.want_tags[0], x) for x in (aut_num, inetnum, inet6num))
+
+ def finish_statement(self, done):
+ if self.statement:
+ tag, sep, val = self.statement.partition(":")
+ assert sep, "Couldn't find separator in %r" % self.statement
+ tag = tag.strip().lower()
+ val = val.strip().upper()
+ if self.cur is None:
+ self.cur = self.types[tag]() if tag in self.types else False
+ if self.cur is not False:
+ self.cur.set(tag, val)
+ if done and self.cur:
+ self.cur.finish(self)
+ self.cur = None
+
+ filenames = ("ripe.db.aut-num.gz", "ripe.db.inet6num.gz", "ripe.db.inetnum.gz")
+
+ def __init__(self):
+ self.asns = csv_writer("asns.csv")
+ self.prefixes = csv_writer("prefixes.csv")
+ for fn in self.filenames:
+ f = gzip.open(fn)
+ self.statement = ""
+ self.cur = None
+ for line in f:
+ line = line.expandtabs().partition("#")[0].rstrip("\n")
+ if line and not line[0].isalpha():
+ self.statement += line[1:] if line[0] == "+" else line
+ else:
+ self.finish_statement(not line)
+ self.statement = line
+ self.finish_statement(True)
+ f.close()
+ self.asns.close()
+ self.prefixes.close()
main()
diff --git a/potpourri/roa-to-irr.py b/potpourri/roa-to-irr.py
index 500596f8..748f37fa 100644
--- a/potpourri/roa-to-irr.py
+++ b/potpourri/roa-to-irr.py
@@ -1,12 +1,12 @@
# $Id$
-#
+#
# Copyright (C) 2014 Dragon Research Labs ("DRL")
# Portions copyright (C) 2010--2012 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notices and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ISC DISCLAIM ALL
# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
@@ -34,128 +34,128 @@ from time import time, strftime, gmtime, asctime
args = None
class route(object):
- """
- Interesting parts of a route object.
- """
-
- def __init__(self, label, uri, asnum, date, prefix, prefixlen, max_prefixlen):
- self.label = label
- self.uri = uri
- self.asn = asnum
- self.date = date
- self.prefix = prefix
- self.prefixlen = prefixlen
- self.max_prefixlen = self.prefixlen if max_prefixlen is None else max_prefixlen
-
- def __cmp__(self, other):
- result = cmp(self.asn, other.asn)
- if result == 0:
- result = cmp(self.prefix, other.prefix)
- if result == 0:
- result = cmp(self.prefixlen, other.prefixlen)
- if result == 0:
- result = cmp(self.max_prefixlen, other.max_prefixlen)
- if result == 0:
- result = cmp(self.date, other.date)
- return result
-
- def __str__(self):
- lines = "\n" if args.email else ""
- lines += dedent('''\
- {self.label:<14s}{self.prefix}/{self.prefixlen}
- descr: {self.prefix}/{self.prefixlen}-{self.max_prefixlen}
- origin: AS{self.asn:d}
- notify: {args.notify}
- mnt-by: {args.mnt_by}
- changed: {args.changed_by} {self.date}
- source: {args.source}
- ''').format(self = self, args = args)
- if args.password is not None:
- lines += "override: {}\n".format(args.password)
- return lines
-
- def write(self, output_directory):
- name = "{0.prefix}-{0.prefixlen}-{0.max_prefixlen}-AS{0.asn:d}-{0.date}".format(self)
- with open(os.path.join(output_directory, name), "w") as f:
- f.write(str(self))
+ """
+ Interesting parts of a route object.
+ """
+
+ def __init__(self, label, uri, asnum, date, prefix, prefixlen, max_prefixlen):
+ self.label = label
+ self.uri = uri
+ self.asn = asnum
+ self.date = date
+ self.prefix = prefix
+ self.prefixlen = prefixlen
+ self.max_prefixlen = self.prefixlen if max_prefixlen is None else max_prefixlen
+
+ def __cmp__(self, other):
+ result = cmp(self.asn, other.asn)
+ if result == 0:
+ result = cmp(self.prefix, other.prefix)
+ if result == 0:
+ result = cmp(self.prefixlen, other.prefixlen)
+ if result == 0:
+ result = cmp(self.max_prefixlen, other.max_prefixlen)
+ if result == 0:
+ result = cmp(self.date, other.date)
+ return result
+
+ def __str__(self):
+ lines = "\n" if args.email else ""
+ lines += dedent('''\
+ {self.label:<14s}{self.prefix}/{self.prefixlen}
+ descr: {self.prefix}/{self.prefixlen}-{self.max_prefixlen}
+ origin: AS{self.asn:d}
+ notify: {args.notify}
+ mnt-by: {args.mnt_by}
+ changed: {args.changed_by} {self.date}
+ source: {args.source}
+ ''').format(self = self, args = args)
+ if args.password is not None:
+ lines += "override: {}\n".format(args.password)
+ return lines
+
+ def write(self, output_directory):
+ name = "{0.prefix}-{0.prefixlen}-{0.max_prefixlen}-AS{0.asn:d}-{0.date}".format(self)
+ with open(os.path.join(output_directory, name), "w") as f:
+ f.write(str(self))
class route_list(list):
- """
- A list of route objects.
- """
-
- def __init__(self, rcynic_dir):
- for root, dirs, files in os.walk(rcynic_dir):
- for f in files:
- if f.endswith(".roa"):
- path = os.path.join(root, f)
- uri = "rsync://" + path[len(rcynic_dir):].lstrip("/")
- roa = rpki.x509.ROA(DER_file = path)
- roa.extract()
- assert roa.get_POW().getVersion() == 0, "ROA version is {:d}, expected 0".format(roa.get_POW().getVersion())
- asnum = roa.get_POW().getASID()
- notBefore = roa.get_POW().certs()[0].getNotBefore().strftime("%Y%m%d")
- v4, v6 = roa.get_POW().getPrefixes()
- if v4 is not None:
- for prefix, prefixlen, max_prefixlen in v4:
- self.append(route("route:", uri, asnum, notBefore, prefix, prefixlen, max_prefixlen))
- if v6 is not None:
- for prefix, prefixlen, max_prefixlen in v6:
- self.append(route("route6:", uri, asnum, notBefore, prefix, prefixlen, max_prefixlen))
- self.sort()
- for i in xrange(len(self) - 2, -1, -1):
- if self[i] == self[i + 1]:
- del self[i + 1]
+ """
+ A list of route objects.
+ """
+
+ def __init__(self, rcynic_dir):
+ for root, dirs, files in os.walk(rcynic_dir):
+ for f in files:
+ if f.endswith(".roa"):
+ path = os.path.join(root, f)
+ uri = "rsync://" + path[len(rcynic_dir):].lstrip("/")
+ roa = rpki.x509.ROA(DER_file = path)
+ roa.extract()
+ assert roa.get_POW().getVersion() == 0, "ROA version is {:d}, expected 0".format(roa.get_POW().getVersion())
+ asnum = roa.get_POW().getASID()
+ notBefore = roa.get_POW().certs()[0].getNotBefore().strftime("%Y%m%d")
+ v4, v6 = roa.get_POW().getPrefixes()
+ if v4 is not None:
+ for prefix, prefixlen, max_prefixlen in v4:
+ self.append(route("route:", uri, asnum, notBefore, prefix, prefixlen, max_prefixlen))
+ if v6 is not None:
+ for prefix, prefixlen, max_prefixlen in v6:
+ self.append(route("route6:", uri, asnum, notBefore, prefix, prefixlen, max_prefixlen))
+ self.sort()
+ for i in xrange(len(self) - 2, -1, -1):
+ if self[i] == self[i + 1]:
+ del self[i + 1]
def email_header(f):
- if args.email:
- now = time()
- f.write(dedent('''\
- From {from_} {ctime}
- Date: {date}
- From: {from_}
- Subject: Fake email header to make irr_rpsl_submit happy
- Message-Id: <{pid}.{seconds}@{hostname}>
- ''').format(from_ = args.from_,
- ctime = asctime(gmtime(now)),
- date = strftime("%d %b %Y %T %z", gmtime(now)),
- pid = os.getpid(),
- seconds = now,
- hostname = gethostname()))
+ if args.email:
+ now = time()
+ f.write(dedent('''\
+ From {from_} {ctime}
+ Date: {date}
+ From: {from_}
+ Subject: Fake email header to make irr_rpsl_submit happy
+ Message-Id: <{pid}.{seconds}@{hostname}>
+ ''').format(from_ = args.from_,
+ ctime = asctime(gmtime(now)),
+ date = strftime("%d %b %Y %T %z", gmtime(now)),
+ pid = os.getpid(),
+ seconds = now,
+ hostname = gethostname()))
def main():
- global args
- whoami = "{}@{}".format(os.getlogin(), gethostname())
-
- parser = argparse.ArgumentParser(description = __doc__)
- parser.add_argument("-c", "--changed_by", default = whoami, help = "override \"changed:\" value")
- parser.add_argument("-f", "--from", dest="from_", default = whoami, help = "override \"from:\" header when using --email")
- parser.add_argument("-m", "--mnt_by", default = "MAINT-RPKI", help = "override \"mnt-by:\" value")
- parser.add_argument("-n", "--notify", default = whoami, help = "override \"notify:\" value")
- parser.add_argument("-p", "--password", help = "specify \"override:\" password")
- parser.add_argument("-s", "--source", default = "RPKI", help = "override \"source:\" value")
- group = parser.add_mutually_exclusive_group()
- group.add_argument("-e", "--email", action = "store_true", help = "generate fake RFC 822 header suitable for piping to irr_rpsl_submit")
- group.add_argument("-d", "--output-directory", help = "write route and route6 objects to directory OUTPUT, one object per file")
- parser.add_argument("authenticated_directory", help = "directory tree containing authenticated rcynic output")
- args = parser.parse_args()
-
- if not os.path.isdir(args.authenticated_directory):
- sys.exit('"{}" is not a directory'.format(args.authenticated_directory))
-
- routes = route_list(args.authenticated_directory)
-
- if args.output_directory:
- if not os.path.isdir(args.output_directory):
- os.makedirs(args.output_directory)
- for r in routes:
- r.write(args.output_directory)
- else:
- email_header(sys.stdout)
- for r in routes:
- sys.stdout.write(str(r))
+ global args
+ whoami = "{}@{}".format(os.getlogin(), gethostname())
+
+ parser = argparse.ArgumentParser(description = __doc__)
+ parser.add_argument("-c", "--changed_by", default = whoami, help = "override \"changed:\" value")
+ parser.add_argument("-f", "--from", dest="from_", default = whoami, help = "override \"from:\" header when using --email")
+ parser.add_argument("-m", "--mnt_by", default = "MAINT-RPKI", help = "override \"mnt-by:\" value")
+ parser.add_argument("-n", "--notify", default = whoami, help = "override \"notify:\" value")
+ parser.add_argument("-p", "--password", help = "specify \"override:\" password")
+ parser.add_argument("-s", "--source", default = "RPKI", help = "override \"source:\" value")
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument("-e", "--email", action = "store_true", help = "generate fake RFC 822 header suitable for piping to irr_rpsl_submit")
+ group.add_argument("-d", "--output-directory", help = "write route and route6 objects to directory OUTPUT, one object per file")
+ parser.add_argument("authenticated_directory", help = "directory tree containing authenticated rcynic output")
+ args = parser.parse_args()
+
+ if not os.path.isdir(args.authenticated_directory):
+ sys.exit('"{}" is not a directory'.format(args.authenticated_directory))
+
+ routes = route_list(args.authenticated_directory)
+
+ if args.output_directory:
+ if not os.path.isdir(args.output_directory):
+ os.makedirs(args.output_directory)
+ for r in routes:
+ r.write(args.output_directory)
+ else:
+ email_header(sys.stdout)
+ for r in routes:
+ sys.stdout.write(str(r))
if __name__ == "__main__":
- main()
+ main()
diff --git a/potpourri/rrd-rcynic-history.py b/potpourri/rrd-rcynic-history.py
index 8a0d50a8..45aec6c5 100644
--- a/potpourri/rrd-rcynic-history.py
+++ b/potpourri/rrd-rcynic-history.py
@@ -1,11 +1,11 @@
# $Id$
-#
+#
# Copyright (C) 2011-2012 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -39,159 +39,159 @@ os.putenv("TZ", "UTC")
time.tzset()
def parse_utc(s):
- return int(time.mktime(time.strptime(s, "%Y-%m-%dT%H:%M:%SZ")))
+ return int(time.mktime(time.strptime(s, "%Y-%m-%dT%H:%M:%SZ")))
class Host(object):
- """
- A host object represents all the data collected for one host for a given session.
- """
-
- def __init__(self, hostname, timestamp):
- self.hostname = hostname
- self.timestamp = timestamp
- self.elapsed = 0
- self.connections = 0
- self.failures = 0
- self.uris = set()
-
- def add_connection(self, elt):
- self.elapsed += parse_utc(elt.get("finished")) - parse_utc(elt.get("started"))
- self.connections += 1
- if elt.get("error") is not None:
- self.failures += 1
-
- def add_object_uri(self, u):
- self.uris.add(u)
-
- @property
- def failed(self):
- return 1 if self.failures > 0 else 0
-
- @property
- def objects(self):
- return len(self.uris)
-
- field_table = (("timestamp", None, None, None),
- ("connections", "GAUGE", "Connections", "FF0000"),
- ("objects", "GAUGE", "Objects", "00FF00"),
- ("elapsed", "GAUGE", "Fetch Time", "0000FF"),
- ("failed", "ABSOLUTE", "Failed", "00FFFF"))
-
- @property
- def field_values(self):
- return tuple(str(getattr(self, field[0])) for field in self.field_table)
-
- @classmethod
- def field_ds_specifiers(cls, heartbeat = 24 * 60 * 60, minimum = 0, maximum = "U"):
- return ["DS:%s:%s:%s:%s:%s" % (field[0], field[1], heartbeat, minimum, maximum)
- for field in cls.field_table if field[1] is not None]
-
- @classmethod
- def field_graph_specifiers(cls, hostname):
- result = []
- for field in cls.field_table:
- if field[1] is not None:
- result.append("DEF:%s=%s.rrd:%s:AVERAGE" % (field[0], hostname, field[0]))
- result.append("'LINE1:%s#%s:%s'" % (field[0], field[3], field[2]))
- return result
-
- def save(self, rrdtable):
- rrdtable.add(self.hostname, self.field_values)
+ """
+ A host object represents all the data collected for one host for a given session.
+ """
+
+ def __init__(self, hostname, timestamp):
+ self.hostname = hostname
+ self.timestamp = timestamp
+ self.elapsed = 0
+ self.connections = 0
+ self.failures = 0
+ self.uris = set()
+
+ def add_connection(self, elt):
+ self.elapsed += parse_utc(elt.get("finished")) - parse_utc(elt.get("started"))
+ self.connections += 1
+ if elt.get("error") is not None:
+ self.failures += 1
+
+ def add_object_uri(self, u):
+ self.uris.add(u)
+
+ @property
+ def failed(self):
+ return 1 if self.failures > 0 else 0
+
+ @property
+ def objects(self):
+ return len(self.uris)
+
+ field_table = (("timestamp", None, None, None),
+ ("connections", "GAUGE", "Connections", "FF0000"),
+ ("objects", "GAUGE", "Objects", "00FF00"),
+ ("elapsed", "GAUGE", "Fetch Time", "0000FF"),
+ ("failed", "ABSOLUTE", "Failed", "00FFFF"))
+
+ @property
+ def field_values(self):
+ return tuple(str(getattr(self, field[0])) for field in self.field_table)
+
+ @classmethod
+ def field_ds_specifiers(cls, heartbeat = 24 * 60 * 60, minimum = 0, maximum = "U"):
+ return ["DS:%s:%s:%s:%s:%s" % (field[0], field[1], heartbeat, minimum, maximum)
+ for field in cls.field_table if field[1] is not None]
+
+ @classmethod
+ def field_graph_specifiers(cls, hostname):
+ result = []
+ for field in cls.field_table:
+ if field[1] is not None:
+ result.append("DEF:%s=%s.rrd:%s:AVERAGE" % (field[0], hostname, field[0]))
+ result.append("'LINE1:%s#%s:%s'" % (field[0], field[3], field[2]))
+ return result
+
+ def save(self, rrdtable):
+ rrdtable.add(self.hostname, self.field_values)
class Session(dict):
- """
- A session corresponds to one XML file. This is a dictionary of Host
- objects, keyed by hostname.
- """
-
- def __init__(self, timestamp):
- dict.__init__(self)
- self.timestamp = timestamp
-
- @property
- def hostnames(self):
- return set(self.iterkeys())
-
- def add_connection(self, elt):
- hostname = urlparse.urlparse(elt.text.strip()).hostname
- if hostname not in self:
- self[hostname] = Host(hostname, self.timestamp)
- self[hostname].add_connection(elt)
-
- def add_object_uri(self, u):
- h = urlparse.urlparse(u).hostname
- if h and h in self:
- self[h].add_object_uri(u)
-
- def save(self, rrdtable):
- for h in self.itervalues():
- h.save(rrdtable)
+ """
+ A session corresponds to one XML file. This is a dictionary of Host
+ objects, keyed by hostname.
+ """
+
+ def __init__(self, timestamp):
+ dict.__init__(self)
+ self.timestamp = timestamp
+
+ @property
+ def hostnames(self):
+ return set(self.iterkeys())
+
+ def add_connection(self, elt):
+ hostname = urlparse.urlparse(elt.text.strip()).hostname
+ if hostname not in self:
+ self[hostname] = Host(hostname, self.timestamp)
+ self[hostname].add_connection(elt)
+
+ def add_object_uri(self, u):
+ h = urlparse.urlparse(u).hostname
+ if h and h in self:
+ self[h].add_object_uri(u)
+
+ def save(self, rrdtable):
+ for h in self.itervalues():
+ h.save(rrdtable)
class RRDTable(dict):
- """
- Final data we're going to be sending to rrdtool. We need to buffer
- it until we're done because we have to sort it. Might be easier
- just to sort the maildir, then again it might be easier to get rid
- of the maildir too once we're dealing with current data. We'll see.
- """
-
- def __init__(self, rrdtool = sys.stdout):
- dict.__init__(self)
- self.rrdtool = rrdtool
-
- def add(self, hostname, data):
- if hostname not in self:
- self[hostname] = []
- self[hostname].append(data)
-
- def sort(self):
- for data in self.itervalues():
- data.sort()
-
- @property
- def oldest(self):
- return min(min(datum[0] for datum in data) for data in self.itervalues())
-
- rras = tuple("RRA:AVERAGE:0.5:%s:9600" % steps for steps in (1, 4, 24))
-
- def create(self):
- start = self.oldest
- ds_list = Host.field_ds_specifiers()
- ds_list.extend(self.rras)
- for hostname in self:
- if not os.path.exists("%s.rrd" % hostname):
- self.rrdtool("create %s.rrd --start %s --step 3600 %s\n" % (hostname, start, " ".join(ds_list)))
-
- def update(self):
- for hostname, data in self.iteritems():
- for datum in data:
- self.rrdtool("update %s.rrd %s\n" % (hostname, ":".join(str(d) for d in datum)))
-
- def graph(self):
- for hostname in self:
- self.rrdtool("graph %s.png --start -90d %s\n" % (hostname, " ".join(Host.field_graph_specifiers(hostname))))
+ """
+ Final data we're going to be sending to rrdtool. We need to buffer
+ it until we're done because we have to sort it. Might be easier
+ just to sort the maildir, then again it might be easier to get rid
+ of the maildir too once we're dealing with current data. We'll see.
+ """
+
+ def __init__(self, rrdtool = sys.stdout):
+ dict.__init__(self)
+ self.rrdtool = rrdtool
+
+ def add(self, hostname, data):
+ if hostname not in self:
+ self[hostname] = []
+ self[hostname].append(data)
+
+ def sort(self):
+ for data in self.itervalues():
+ data.sort()
+
+ @property
+ def oldest(self):
+ return min(min(datum[0] for datum in data) for data in self.itervalues())
+
+ rras = tuple("RRA:AVERAGE:0.5:%s:9600" % steps for steps in (1, 4, 24))
+
+ def create(self):
+ start = self.oldest
+ ds_list = Host.field_ds_specifiers()
+ ds_list.extend(self.rras)
+ for hostname in self:
+ if not os.path.exists("%s.rrd" % hostname):
+ self.rrdtool("create %s.rrd --start %s --step 3600 %s\n" % (hostname, start, " ".join(ds_list)))
+
+ def update(self):
+ for hostname, data in self.iteritems():
+ for datum in data:
+ self.rrdtool("update %s.rrd %s\n" % (hostname, ":".join(str(d) for d in datum)))
+
+ def graph(self):
+ for hostname in self:
+ self.rrdtool("graph %s.png --start -90d %s\n" % (hostname, " ".join(Host.field_graph_specifiers(hostname))))
mb = mailbox.Maildir("/u/sra/rpki/rcynic-xml", factory = None, create = False)
rrdtable = RRDTable()
for i, key in enumerate(mb.iterkeys(), 1):
- sys.stderr.write("\r%s %d/%d..." % ("|\\-/"[i & 3], i, len(mb)))
-
- assert not mb[key].is_multipart()
- input = ElementTreeFromString(mb[key].get_payload())
- date = input.get("date")
- sys.stderr.write("%s..." % date)
- session = Session(parse_utc(date))
- for elt in input.findall("rsync_history"):
- session.add_connection(elt)
- for elt in input.findall("validation_status"):
- if elt.get("generation") == "current":
- session.add_object_uri(elt.text.strip())
- session.save(rrdtable)
-
- # XXX
- #if i > 4: break
+ sys.stderr.write("\r%s %d/%d..." % ("|\\-/"[i & 3], i, len(mb)))
+
+ assert not mb[key].is_multipart()
+ input = ElementTreeFromString(mb[key].get_payload())
+ date = input.get("date")
+ sys.stderr.write("%s..." % date)
+ session = Session(parse_utc(date))
+ for elt in input.findall("rsync_history"):
+ session.add_connection(elt)
+ for elt in input.findall("validation_status"):
+ if elt.get("generation") == "current":
+ session.add_object_uri(elt.text.strip())
+ session.save(rrdtable)
+
+ # XXX
+ #if i > 4: break
sys.stderr.write("\n")
diff --git a/potpourri/rrdp-fetch-from-tal b/potpourri/rrdp-fetch-from-tal
new file mode 100755
index 00000000..08d245dd
--- /dev/null
+++ b/potpourri/rrdp-fetch-from-tal
@@ -0,0 +1,229 @@
+#!/usr/bin/env python
+# $Id$
+#
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Fetch RPKI data using RRDP starting from a TAL.
+
+Work in progress, don't be too surprised by anything this does or
+doesn't do.
+"""
+
+import rpki.relaxng
+import rpki.x509
+import lxml.etree
+import argparse
+import urlparse
+import urllib2
+import sys
+import os
+
+
+class Tags(object):
+ def __init__(self, *tags):
+ for tag in tags:
+ setattr(self, tag, rpki.relaxng.rrdp.xmlns + tag)
+
+tags = Tags("notification", "delta", "snapshot", "publish", "withdraw")
+
+
+class RSyncHandler(urllib2.BaseHandler):
+ """
+ Jam support for rsync:// URIs into urllib2 framework.
+ Very basic, probably not paranoid enough.
+ """
+
+ _n = 0
+
+ def rsync_open(self, req):
+ import subprocess, mimetools
+ u = req.get_full_url()
+ if u.endswith("/"):
+ raise urllib2.URLError("rsync directory URI not allowed")
+ t = "/tmp/rrdp-fetch-from-tal.%d.%d" % (os.getpid(), self._n)
+ self._n += 1
+ subprocess.check_call(("rsync", u, t))
+ h = mimetools.Message(open("/dev/null"))
+ h["Content-type"] = "text/plain"
+ h["Content-length"] = str(os.stat(t).st_size)
+ f = open(t, "rb")
+ os.unlink(t)
+ return urllib2.addinfourl(f, h, u)
+
+urllib2.install_opener(urllib2.build_opener(RSyncHandler))
+
+
+class main(object):
+
+ def __init__(self):
+ parser = argparse.ArgumentParser(description = __doc__)
+ parser.add_argument("--rcynic-tree", default = "rcynic-data/unauthenticated",
+ help = "directory tree in which to write extracted RPKI objects")
+ parser.add_argument("--serial-filename", # default handled later
+ help = "file name in which to store RRDP serial number")
+ parser.add_argument("tal", help = "trust anchor locator")
+ self.args = parser.parse_args()
+ if not os.path.isdir(self.args.rcynic_tree):
+ os.makedirs(self.args.rcynic_tree)
+ self.urls = set()
+ self.ta = self.ta_fetch()
+ url = self.ta.get_sia_rrdp_notify()
+ if url is None:
+ sys.exit("Couldn't get RRDP URI from trust anchor")
+ self.rrdp_fetch(url)
+ self.write_ta()
+
+ def rrdp_fetch(self, url):
+ if url in self.urls:
+ print "Already fetched %s, skipping" % url
+ return
+ self.urls.add(url)
+ xml = lxml.etree.ElementTree(file = urllib2.urlopen(url)).getroot()
+ rpki.relaxng.rrdp.assertValid(xml)
+ if xml.tag[len(rpki.relaxng.rrdp.xmlns):] != "notification":
+ sys.exit("Expected notification at %s, found %s" % (url, xml.tag))
+ self.prettyprint_notification(xml)
+
+ # We should be checking session_id here, but we're not storing it yet
+
+ old_serial = self.get_serial()
+ new_serial = int(xml.get("serial"))
+ deltas = dict((int(elt.get("serial")), elt)
+ for elt in xml.iterchildren(tags.delta))
+ if old_serial == 0 or not all(serial + 1 in deltas
+ for serial in xrange(old_serial, new_serial)):
+ return self.snapshot_fetch(xml.iterchildren(tags.snapshot).next())
+ for serial in sorted(deltas):
+ if serial > old_serial:
+ self.delta_fetch(deltas[serial])
+
+ def prettyprint_notification(self, xml):
+ print "Notification version %s session %s serial %s" % (
+ xml.get("version"), xml.get("session_id"), xml.get("serial"))
+ elt = xml.iterchildren(tags.snapshot).next()
+ print " Snapshot URI %s hash %s" % (
+ elt.get("uri"), elt.get("hash"))
+ for elt in xml.iterchildren(tags.delta):
+ print " Delta %6s URI %s hash %s" % (
+ elt.get("serial"), elt.get("uri"), elt.get("hash"))
+
+ def ta_fetch(self):
+ with open(self.args.tal, "r") as f:
+ tal = f.read()
+ uris, key = tal.split("\n\n", 2)
+ key = rpki.x509.PublicKey(Base64 = key)
+ for uri in uris.split():
+ ta = rpki.x509.X509(DER = urllib2.urlopen(uri).read())
+ if ta.getPublicKey() == key:
+ return ta
+ print "TAL key mismatch for certificate", url
+ sys.exit("Could not fetch trust anchor")
+
+ @property
+ def serial_filename(self):
+ return self.args.serial_filename or os.path.join(self.args.rcynic_tree, "serial")
+
+ def get_serial(self):
+ try:
+ with open(self.serial_filename, "r") as f:
+ return int(f.read().strip())
+ except:
+ return 0
+
+ def set_serial(self, value):
+ with open(self.serial_filename, "w") as f:
+ f.write("%s\n" % value)
+
+ def uri_to_filename(self, uri):
+ assert uri.startswith("rsync://")
+ return os.path.join(self.args.rcynic_tree, uri[len("rsync://"):])
+
+ def add_obj(self, uri, obj):
+ fn = self.uri_to_filename(uri)
+ dn = os.path.dirname(fn)
+ if not os.path.isdir(dn):
+ os.makedirs(dn)
+ with open(fn, "wb") as f:
+ f.write(obj)
+
+ def del_obj(self, uri, hash):
+ fn = self.uri_to_filename(uri)
+ with open(fn, "rb") as f:
+ if hash.lower() != rpki.x509.sha256(f.read()).encode("hex"):
+ raise RuntimeError("Hash mismatch for URI %s" % uri)
+ os.unlink(fn)
+ dn = os.path.dirname(fn)
+ while True:
+ try:
+ os.rmdir(dn)
+ except OSError:
+ break
+ else:
+ dn = os.path.dirname(dn)
+
+ def xml_fetch(self, elt):
+ url = elt.get("uri")
+ hash = elt.get("hash").lower()
+ print "Fetching", url
+ text = urllib2.urlopen(url).read()
+ h = rpki.x509.sha256(text).encode("hex")
+ if h != hash:
+ sys.exit("Bad hash for %s: expected %s got %s" % (url, hash, h))
+ xml = lxml.etree.XML(text)
+ rpki.relaxng.rrdp.schema.assertValid(xml)
+ return xml
+
+ def snapshot_fetch(self, xml):
+ xml = self.xml_fetch(xml)
+ print "Unpacking snapshot version %s session %s serial %6s" % (
+ xml.get("version"), xml.get("session_id"), xml.get("serial"))
+ for elt in xml.iterchildren(tags.publish):
+ print " ", elt.get("uri")
+ self.add_obj(elt.get("uri"), elt.text.decode("base64"))
+ self.set_serial(xml.get("serial"))
+
+ def delta_fetch(self, xml):
+ xml = self.xml_fetch(xml)
+ old_serial = int(self.get_serial())
+ new_serial = int(xml.get("serial"))
+ print "Unpacking deltas version %s session %s serial %s" % (
+ xml.get("version"), xml.get("session_id"), new_serial)
+ if old_serial != new_serial - 1:
+ raise RuntimeError("Can't apply deltas: old serial %s new serial %s" % (old_serial, new_serial))
+ for i, elt in enumerate(xml.iterchildren(tags.withdraw)):
+ uri = elt.get("uri")
+ hash = elt.get("hash")
+ print " %3d withdraw URI %s hash %s" % (i, uri, hash)
+ self.del_obj(uri, hash)
+ for i, elt in enumerate(xml.iterchildren(tags.publish)):
+ uri = elt.get("uri")
+ hash = elt.get("hash", None)
+ print " %3d publish URI %s hash %s" % (i, uri, hash)
+ if hash is not None:
+ self.del_obj(uri, hash)
+ self.add_obj(elt.get("uri"), elt.text.decode("base64"))
+ self.set_serial(new_serial)
+
+ def write_ta(self):
+ der = self.ta.get_DER()
+ fn = rpki.x509.sha256(der).encode("hex") + ".cer"
+ if not os.path.exists(fn):
+ print "Writing", fn
+ with open(fn, "wb") as f:
+ f.write(der)
+
+if __name__ == "__main__":
+ main()
diff --git a/potpourri/rrdp-fetch.py b/potpourri/rrdp-fetch.py
new file mode 100755
index 00000000..b8d927ee
--- /dev/null
+++ b/potpourri/rrdp-fetch.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+# $Id$
+#
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Fetch an RRDP notifcation file and follow all the links. Should be
+merged into rrdp-test-tool eventually, but one thing at a time.
+"""
+
+from urllib2 import urlopen
+from lxml.etree import ElementTree, XML
+from socket import getfqdn
+from rpki.x509 import sha256
+from rpki.relaxng import rrdp
+from urlparse import urlparse
+from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
+
+class BadHash(Exception):
+ "Calculated hash value doesn't match expected hash value."
+
+def fetch(elt):
+ uri = elt.get("uri")
+ hash = elt.get("hash").lower()
+ print "Fetching", uri
+
+ text = urlopen(uri).read()
+ h = sha256(text).encode("hex")
+ if h != hash:
+ raise BadHash("Bad hash for %s: expected %s got %s" % (uri, hash, h))
+
+ xml = XML(text)
+ rrdp.schema.assertValid(xml)
+
+ u = urlparse(uri)
+ fn = u.netloc + u.path
+
+ return elt, xml, fn
+
+parser = ArgumentParser(description = __doc__, formatter_class = ArgumentDefaultsHelpFormatter)
+parser.add_argument("uri", nargs = "?",
+ default = "http://" + getfqdn() + "/rrdp/updates.xml",
+ help = "RRDP notification file to fetch")
+args = parser.parse_args()
+
+updates = ElementTree(file = urlopen(args.uri))
+rrdp.schema.assertValid(updates)
+
+snapshot = fetch(updates.find(rrdp.xmlns + "snapshot"))
+
+deltas = [fetch(elt) for elt in updates.findall(rrdp.xmlns + "delta")]
+
+print updates
+print snapshot
+for delta in deltas:
+ print delta
diff --git a/potpourri/rrdp-test-tool b/potpourri/rrdp-test-tool
new file mode 100755
index 00000000..ccf17960
--- /dev/null
+++ b/potpourri/rrdp-test-tool
@@ -0,0 +1,135 @@
+#!/usr/bin/env python
+# $Id$
+#
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Test tool for prototype RRDP implementation. Eventually some of this
+code will likely be refactored into more user-friendly form, but for
+the moment this just does whatever insane thing I need to do this week
+for testing.
+"""
+
+import rpki.relaxng
+import rpki.x509
+import lxml.etree
+import argparse
+import os
+
+class Tags(object):
+ def __init__(self, *tags):
+ for tag in tags:
+ setattr(self, tag, rpki.relaxng.rrdp.xmlns + tag)
+
+tags = Tags("notification", "delta", "snapshot", "publish", "withdraw")
+
+class main(object):
+
+ def __init__(self):
+ parser = argparse.ArgumentParser(description = __doc__)
+ parser.add_argument("--rcynic-tree", default = "rcynic-data/unauthenticated",
+ help = "directory tree in which to write extracted RPKI objects")
+ parser.add_argument("--serial-filename",
+ help = "file name in which to store RRDP serial number")
+ parser.add_argument("rrdp_file", nargs = "+",
+ help = "RRDP snapshot or deltas file")
+ self.args = parser.parse_args()
+ if not os.path.isdir(self.args.rcynic_tree):
+ os.makedirs(self.args.rcynic_tree)
+ for rrdp_file in self.args.rrdp_file:
+ xml = lxml.etree.ElementTree(file = rrdp_file).getroot()
+ rpki.relaxng.rrdp.assertValid(xml)
+ getattr(self, "handle_" + xml.tag[len(rpki.relaxng.rrdp.xmlns):])(xml)
+
+ @property
+ def serial_filename(self):
+ return self.args.serial_filename or os.path.join(self.args.rcynic_tree, "serial")
+
+ def get_serial(self):
+ with open(self.serial_filename, "r") as f:
+ return f.read().strip()
+
+ def set_serial(self, value):
+ with open(self.serial_filename, "w") as f:
+ f.write("%s\n" % value)
+
+ def handle_notification(self, xml):
+ print "Notification version %s session %s serial %s" % (
+ xml.get("version"), xml.get("session_id"), xml.get("serial"))
+ assert xml[0].tag == tags.snapshot
+ print " Snapshot URI %s hash %s" % (
+ xml[0].get("uri"), xml[0].get("hash"))
+ for i, elt in enumerate(xml.iterchildren(tags.delta)):
+ print " Delta %3d serial %6s URI %s hash %s" % (
+ i, elt.get("serial"), elt.get("uri"), elt.get("hash"))
+
+ def uri_to_filename(self, uri):
+ assert uri.startswith("rsync://")
+ return os.path.join(self.args.rcynic_tree, uri[len("rsync://"):])
+
+ def add_obj(self, uri, obj):
+ fn = self.uri_to_filename(uri)
+ dn = os.path.dirname(fn)
+ if not os.path.isdir(dn):
+ os.makedirs(dn)
+ with open(fn, "wb") as f:
+ f.write(obj)
+
+ def del_obj(self, uri, hash):
+ fn = self.uri_to_filename(uri)
+ with open(fn, "rb") as f:
+ if hash.lower() != rpki.x509.sha256(f.read()).encode("hex"):
+ raise RuntimeError("Hash mismatch for URI %s" % uri)
+ os.unlink(fn)
+ dn = os.path.dirname(fn)
+ while True:
+ try:
+ os.rmdir(dn)
+ except OSError:
+ break
+ else:
+ dn = os.path.dirname(dn)
+
+ def handle_snapshot(self, xml):
+ print "Unpacking snapshot version %s session %s serial %6s" % (
+ xml.get("version"), xml.get("session_id"), xml.get("serial"))
+ for elt in xml.iterchildren(tags.publish):
+ print " ", elt.get("uri")
+ self.add_obj(elt.get("uri"), elt.text.decode("base64"))
+ self.set_serial(xml.get("serial"))
+
+ def handle_delta(self, xml):
+ old_serial = int(self.get_serial())
+ new_serial = int(xml.get("serial"))
+ print "Unpacking deltas version %s session %s serial %s" % (
+ xml.get("version"), xml.get("session_id"), new_serial)
+ if old_serial != new_serial - 1:
+ raise RuntimeError("Can't apply deltas: old serial %s new serial %s" % (old_serial, new_serial))
+ for i, elt in enumerate(xml.iterchildren(tags.withdraw)):
+ uri = elt.get("uri")
+ hash = elt.get("hash")
+ print " %3d withdraw URI %s hash %s" % (i, uri, hash)
+ self.del_obj(uri, hash)
+ for i, elt in enumerate(xml.iterchildren(tags.publish)):
+ uri = elt.get("uri")
+ hash = elt.get("hash", None)
+ print " %3d publish URI %s hash %s" % (i, uri, hash)
+ if hash is not None:
+ self.del_obj(uri, hash)
+ self.add_obj(elt.get("uri"), elt.text.decode("base64"))
+ self.set_serial(new_serial)
+
+if __name__ == "__main__":
+ main()
diff --git a/potpourri/show-key-identifiers.py b/potpourri/show-key-identifiers.py
index fa2bae8b..4ba6219a 100644
--- a/potpourri/show-key-identifiers.py
+++ b/potpourri/show-key-identifiers.py
@@ -29,26 +29,26 @@ import rpki.oids
def check_dir(s):
- if os.path.isdir(s):
- return os.path.abspath(s)
- else:
- raise argparse.ArgumentTypeError("%r is not a directory" % s)
+ if os.path.isdir(s):
+ return os.path.abspath(s)
+ else:
+ raise argparse.ArgumentTypeError("%r is not a directory" % s)
def filename_to_uri(filename):
- if not filename.startswith(args.rcynic_dir):
- raise ValueError
- return "rsync://" + filename[len(args.rcynic_dir):].lstrip("/")
+ if not filename.startswith(args.rcynic_dir):
+ raise ValueError
+ return "rsync://" + filename[len(args.rcynic_dir):].lstrip("/")
def get_roa(fn):
- return rpki.POW.CMS.derReadFile(fn).certs()[0]
+ return rpki.POW.CMS.derReadFile(fn).certs()[0]
def get_crl(fn):
- return rpki.POW.CRL.derReadFile(fn)
+ return rpki.POW.CRL.derReadFile(fn)
def get_cer(fn):
- return rpki.POW.X509.derReadFile(fn)
+ return rpki.POW.X509.derReadFile(fn)
dispatch = dict(roa = get_roa,
crl = get_crl,
@@ -59,23 +59,23 @@ parser.add_argument("rcynic_dir", type = check_dir, help = "rcynic authenticated
args = parser.parse_args()
for root, dirs, files in os.walk(args.rcynic_dir):
- for fn in files:
- fn = os.path.join(root, fn)
- fn2 = os.path.splitext(fn)[1][1:]
- if fn2 not in dispatch:
- continue
- obj = dispatch[fn2](fn)
- uri = filename_to_uri(fn)
- try:
- ski = obj.getSKI().encode("hex")
- except:
- ski = ""
- try:
- aki = obj.getAKI().encode("hex")
- except:
- aki = ""
- try:
- res = ",".join(",".join("%s-%s" % r2 for r2 in r1) for r1 in obj.getRFC3779() if r1 is not None)
- except:
- res = ""
- print "\t".join((uri, ski, aki, res))
+ for fn in files:
+ fn = os.path.join(root, fn)
+ fn2 = os.path.splitext(fn)[1][1:]
+ if fn2 not in dispatch:
+ continue
+ obj = dispatch[fn2](fn)
+ uri = filename_to_uri(fn)
+ try:
+ ski = obj.getSKI().encode("hex")
+ except:
+ ski = ""
+ try:
+ aki = obj.getAKI().encode("hex")
+ except:
+ aki = ""
+ try:
+ res = ",".join(",".join("%s-%s" % r2 for r2 in r1) for r1 in obj.getRFC3779() if r1 is not None)
+ except:
+ res = ""
+ print "\t".join((uri, ski, aki, res))
diff --git a/potpourri/show-tracking-data.py b/potpourri/show-tracking-data.py
index 07e0a144..0fbb26c1 100644
--- a/potpourri/show-tracking-data.py
+++ b/potpourri/show-tracking-data.py
@@ -1,11 +1,11 @@
# $Id$
-#
+#
# Copyright (C) 2012 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -31,9 +31,9 @@ import rpki.sundial
rcynic_dir = sys.argv[1]
for root, dirs, files in os.walk(rcynic_dir):
- for f in files:
- path = os.path.join(root, f)
- date = rpki.sundial.datetime.utcfromtimestamp(os.stat(path).st_mtime)
- uri = "rsync://" + path[len(rcynic_dir):].lstrip("/")
- obj = rpki.x509.uri_dispatch(uri)(DER_file = path)
- print date, obj.tracking_data(uri)
+ for f in files:
+ path = os.path.join(root, f)
+ date = rpki.sundial.datetime.utcfromtimestamp(os.stat(path).st_mtime)
+ uri = "rsync://" + path[len(rcynic_dir):].lstrip("/")
+ obj = rpki.x509.uri_dispatch(uri)(DER_file = path)
+ print date, obj.tracking_data(uri)
diff --git a/potpourri/signed-object-dates.py b/potpourri/signed-object-dates.py
index fefd9448..d5699252 100644
--- a/potpourri/signed-object-dates.py
+++ b/potpourri/signed-object-dates.py
@@ -5,11 +5,11 @@
# if the object is a manifest, also extract thisUpdate and nextUpdate.
# Copyright (C) 2013 Dragon Research Labs ("DRL")
-#
+#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -27,14 +27,14 @@ extract_flags = (rpki.POW.CMS_NOCRL |
rpki.POW.CMS_NO_CONTENT_VERIFY)
for fn in sys.argv[1:]:
- cls = rpki.POW.Manifest if fn.endswith(".mft") else rpki.POW.CMS
- cms = cls.derReadFile(fn)
- cer = cms.certs()[0]
- print fn
- print " notBefore: ", cer.getNotBefore()
- if fn.endswith(".mft"):
- cms.verify(rpki.POW.X509Store(), None, extract_flags)
- print " thisUpdate:", cms.getThisUpdate()
- print " nextUpdate:", cms.getNextUpdate()
- print " notAfter: ", cer.getNotAfter()
- print
+ cls = rpki.POW.Manifest if fn.endswith(".mft") else rpki.POW.CMS
+ cms = cls.derReadFile(fn)
+ cer = cms.certs()[0]
+ print fn
+ print " notBefore: ", cer.getNotBefore()
+ if fn.endswith(".mft"):
+ cms.verify(rpki.POW.X509Store(), None, extract_flags)
+ print " thisUpdate:", cms.getThisUpdate()
+ print " nextUpdate:", cms.getNextUpdate()
+ print " notAfter: ", cer.getNotAfter()
+ print
diff --git a/potpourri/testbed-rootcert.py b/potpourri/testbed-rootcert.py
index 0716be2f..5e2e97c5 100644
--- a/potpourri/testbed-rootcert.py
+++ b/potpourri/testbed-rootcert.py
@@ -1,11 +1,11 @@
# $Id$
-#
+#
# Copyright (C) 2009-2012 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -26,7 +26,7 @@ import sys
from rpki.csv_utils import csv_reader
if len(sys.argv) not in (2, 4):
- sys.exit("Usage: %s holder [asns.csv prefixes.csv]" % sys.argv[0])
+ sys.exit("Usage: %s holder [asns.csv prefixes.csv]" % sys.argv[0])
print '''\
[req]
@@ -53,7 +53,7 @@ sbgp-ipAddrBlock = critical,@rfc3997_addrs
"HOLDER" : sys.argv[1].upper() }
for i, asn in enumerate(asn for handle, asn in csv_reader(sys.argv[2] if len(sys.argv) > 2 else "asns.csv", columns = 2)):
- print "AS.%d = %s" % (i, asn)
+ print "AS.%d = %s" % (i, asn)
print '''\
@@ -62,5 +62,5 @@ print '''\
'''
for i, prefix in enumerate(prefix for handle, prefix in csv_reader(sys.argv[3] if len(sys.argv) > 2 else "prefixes.csv", columns = 2)):
- v = 6 if ":" in prefix else 4
- print "IPv%d.%d = %s" % (v, i, prefix)
+ v = 6 if ":" in prefix else 4
+ print "IPv%d.%d = %s" % (v, i, prefix)
diff --git a/potpourri/translate-handles.py b/potpourri/translate-handles.py
index 49848277..124604e6 100644
--- a/potpourri/translate-handles.py
+++ b/potpourri/translate-handles.py
@@ -1,11 +1,11 @@
# $Id$
-#
+#
# Copyright (C) 2010-2012 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -39,11 +39,11 @@ translations = dict((src, dst) for src, dst in csv_reader("translations.csv", co
for filename in sys.argv[1:]:
- f = csv_writer(filename)
+ f = csv_writer(filename)
- for cols in csv_reader(filename):
- if cols[0] in translations:
- cols[0] = translations[cols[0]]
- f.writerow(cols)
+ for cols in csv_reader(filename):
+ if cols[0] in translations:
+ cols[0] = translations[cols[0]]
+ f.writerow(cols)
- f.close()
+ f.close()
diff --git a/potpourri/upgrade-add-ghostbusters.py b/potpourri/upgrade-add-ghostbusters.py
index a8c8a92b..2370e959 100644
--- a/potpourri/upgrade-add-ghostbusters.py
+++ b/potpourri/upgrade-add-ghostbusters.py
@@ -1,11 +1,11 @@
# $Id$
-#
+#
# Copyright (C) 2009--2011 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -24,26 +24,26 @@ import getopt, sys, rpki.config, warnings
from rpki.mysql_import import MySQLdb
def fix(name, *statements):
- db = MySQLdb.connect(db = cfg.get("sql-database", section = name),
- user = cfg.get("sql-username", section = name),
- passwd = cfg.get("sql-password", section = name))
- cur = db.cursor()
- for statement in statements:
- cur.execute(statement)
- db.commit()
- db.close()
+ db = MySQLdb.connect(db = cfg.get("sql-database", section = name),
+ user = cfg.get("sql-username", section = name),
+ passwd = cfg.get("sql-password", section = name))
+ cur = db.cursor()
+ for statement in statements:
+ cur.execute(statement)
+ db.commit()
+ db.close()
cfg_file = None
opts, argv = getopt.getopt(sys.argv[1:], "c:h?", ["config=", "help"])
for o, a in opts:
- if o in ("-h", "--help", "-?"):
- print __doc__
- sys.exit(0)
- if o in ("-c", "--config"):
- cfg_file = a
+ if o in ("-h", "--help", "-?"):
+ print __doc__
+ sys.exit(0)
+ if o in ("-c", "--config"):
+ cfg_file = a
-cfg = rpki.config.parser(cfg_file, "myrpki")
+cfg = rpki.config.parser(filename = cfg_file, section = "myrpki")
fix("irdbd", """
CREATE TABLE ghostbuster_request (
diff --git a/potpourri/validation-status-sql.py b/potpourri/validation-status-sql.py
index fc52e64b..646d7d9b 100755
--- a/potpourri/validation-status-sql.py
+++ b/potpourri/validation-status-sql.py
@@ -34,186 +34,185 @@ import subprocess
class Parser(object):
- @staticmethod
- def main():
- parser = argparse.ArgumentParser(
- description = __doc__,
- formatter_class = argparse.ArgumentDefaultsHelpFormatter)
- group = parser.add_mutually_exclusive_group(required = True)
- group.add_argument("--mailbox", "--mb",
- help = "Maildir mailbox containing rcynic XML output")
- group.add_argument("--tarballs",
- help = "directory tree of tar files containing containing rcynic XML output")
- parser.add_argument("--database", "--db",
- default = "validation-status-sql.db",
- help = "SQLite3 database")
- parser.add_argument("--path-within-tarball",
- default = "var/rcynic/data/rcynic.xml",
- help = "rcynic.xml path name within tarball(s)")
- parser.add_argument("--tar-extensions", nargs = "+",
- default = ".tar .tar.gz .tgz .tar.bz2 .tbz .tar.xz .txz".split(),
- help = "extensions to recognize as indicating tar files")
- args = parser.parse_args()
- if args.mailbox:
- ParserMailbox(args)
- else:
- ParserTarball(args)
-
- def __init__(self, args):
- self.args = args
- self.init_sql()
- self.init_hook()
- self.index1()
- self.parsed = 1
- for self.current, self.iterval in enumerate(self.iterator, 1):
- self.parse_xml()
- if self.parsed > 1:
- sys.stderr.write("\n")
- self.index2()
- self.db.close()
-
-
- def init_sql(self):
- creating = not os.path.exists(self.args.database)
- self.db = sqlite3.connect(self.args.database)
- self.db.text_factory = str
- self.db.executescript('''
- PRAGMA foreign_keys = off;
- PRAGMA synchronous = off;
- PRAGMA count_changes = off;
- ''')
-
- if creating:
- self.db.executescript('''
- CREATE TABLE sessions (
- session_id INTEGER PRIMARY KEY NOT NULL,
- session DATETIME NOT NULL,
- handle TEXT NOT NULL
- );
-
- CREATE TABLE uris (
- uri_id INTEGER PRIMARY KEY NOT NULL,
- uri TEXT NOT NULL
- );
-
- CREATE TABLE codes (
- code_id INTEGER PRIMARY KEY NOT NULL,
- code TEXT NOT NULL
- );
-
- CREATE TABLE generations (
- generation_id INTEGER PRIMARY KEY NOT NULL,
- generation TEXT NOT NULL
- );
-
- CREATE TABLE events (
- id INTEGER PRIMARY KEY NOT NULL,
- timestamp DATETIME NOT NULL,
- session_id INTEGER NOT NULL REFERENCES sessions (session_id) ON DELETE RESTRICT ON UPDATE RESTRICT,
- generation_id INTEGER NOT NULL REFERENCES generations (generation_id) ON DELETE RESTRICT ON UPDATE RESTRICT,
- code_id INTEGER NOT NULL REFERENCES codes (code_id) ON DELETE RESTRICT ON UPDATE RESTRICT,
- uri_id INTEGER NOT NULL REFERENCES uris (uri_id) ON DELETE RESTRICT ON UPDATE RESTRICT
- );
-
- CREATE VIEW status AS
- SELECT id, handle, session, timestamp, generation, code, uri
- FROM events
- NATURAL JOIN sessions
- NATURAL JOIN uris
- NATURAL JOIN codes
- NATURAL JOIN generations;
- ''')
-
-
- def index1(self):
- self.db.executescript('''
- CREATE UNIQUE INDEX IF NOT EXISTS sessions_index ON sessions (session);
- CREATE UNIQUE INDEX IF NOT EXISTS handles_index ON sessions (handle);
- CREATE UNIQUE INDEX IF NOT EXISTS uris_index ON uris (uri);
- CREATE UNIQUE INDEX IF NOT EXISTS codes_index ON codes (code);
- CREATE UNIQUE INDEX IF NOT EXISTS generations_index ON generations (generation);
- ''')
-
-
- def index2(self):
- self.db.executescript('''
- CREATE UNIQUE INDEX IF NOT EXISTS events_index ON events (uri_id, timestamp, code_id, generation_id);
- ''')
-
-
- def string_id(self, table, value):
- field = table.rstrip("s")
- try:
- return self.db.execute("SELECT %s_id FROM %s WHERE %s = ?" % (field, table, field), (value,)).fetchone()[0]
- except:
- return self.db.execute("INSERT INTO %s (%s) VALUES (?)" % (table, field), (value,)).lastrowid
-
-
- def parse_xml(self):
- sys.stderr.write("\r%s %d/%d/%d...%s " % ("|\\-/"[self.current & 3],
- self.current, self.parsed, self.total, self.handle))
- if self.db.execute("SELECT handle FROM sessions WHERE handle = ?", (self.handle,)).fetchone():
- return
- xml = self.read_xml()
- with self.db:
- session_id = self.db.execute("INSERT INTO sessions (session, handle) VALUES (strftime('%s', ?), ?)",
- (xml.get("date"), self.handle)).lastrowid
- self.db.executemany("INSERT INTO events (session_id, timestamp, generation_id, code_id, uri_id) "
- "VALUES (?, strftime('%s', ?), ?, ?, ?)",
- ((session_id,
- x.get("timestamp"),
- self.string_id("generations", x.get("generation", "none")),
- self.string_id("codes", x.get("status")),
- self.string_id("uris", x.text.strip()))
- for x in xml.findall("validation_status")))
- self.parsed += 1
+ @staticmethod
+ def main():
+ parser = argparse.ArgumentParser(
+ description = __doc__,
+ formatter_class = argparse.ArgumentDefaultsHelpFormatter)
+ group = parser.add_mutually_exclusive_group(required = True)
+ group.add_argument("--mailbox", "--mb",
+ help = "Maildir mailbox containing rcynic XML output")
+ group.add_argument("--tarballs",
+ help = "directory tree of tar files containing containing rcynic XML output")
+ parser.add_argument("--database", "--db",
+ default = "validation-status-sql.db",
+ help = "SQLite3 database")
+ parser.add_argument("--path-within-tarball",
+ default = "var/rcynic/data/rcynic.xml",
+ help = "rcynic.xml path name within tarball(s)")
+ parser.add_argument("--tar-extensions", nargs = "+",
+ default = ".tar .tar.gz .tgz .tar.bz2 .tbz .tar.xz .txz".split(),
+ help = "extensions to recognize as indicating tar files")
+ args = parser.parse_args()
+ if args.mailbox:
+ ParserMailbox(args)
+ else:
+ ParserTarball(args)
+
+ def __init__(self, args):
+ self.args = args
+ self.init_sql()
+ self.init_hook()
+ self.index1()
+ self.parsed = 1
+ for self.current, self.iterval in enumerate(self.iterator, 1):
+ self.parse_xml()
+ if self.parsed > 1:
+ sys.stderr.write("\n")
+ self.index2()
+ self.db.close()
+
+
+ def init_sql(self):
+ creating = not os.path.exists(self.args.database)
+ self.db = sqlite3.connect(self.args.database)
+ self.db.text_factory = str
+ self.db.executescript('''
+ PRAGMA foreign_keys = off;
+ PRAGMA synchronous = off;
+ PRAGMA count_changes = off;
+ ''')
+
+ if creating:
+ self.db.executescript('''
+ CREATE TABLE sessions (
+ session_id INTEGER PRIMARY KEY NOT NULL,
+ session DATETIME NOT NULL,
+ handle TEXT NOT NULL
+ );
+
+ CREATE TABLE uris (
+ uri_id INTEGER PRIMARY KEY NOT NULL,
+ uri TEXT NOT NULL
+ );
+
+ CREATE TABLE codes (
+ code_id INTEGER PRIMARY KEY NOT NULL,
+ code TEXT NOT NULL
+ );
+
+ CREATE TABLE generations (
+ generation_id INTEGER PRIMARY KEY NOT NULL,
+ generation TEXT NOT NULL
+ );
+
+ CREATE TABLE events (
+ id INTEGER PRIMARY KEY NOT NULL,
+ timestamp DATETIME NOT NULL,
+ session_id INTEGER NOT NULL REFERENCES sessions (session_id) ON DELETE RESTRICT ON UPDATE RESTRICT,
+ generation_id INTEGER NOT NULL REFERENCES generations (generation_id) ON DELETE RESTRICT ON UPDATE RESTRICT,
+ code_id INTEGER NOT NULL REFERENCES codes (code_id) ON DELETE RESTRICT ON UPDATE RESTRICT,
+ uri_id INTEGER NOT NULL REFERENCES uris (uri_id) ON DELETE RESTRICT ON UPDATE RESTRICT
+ );
+
+ CREATE VIEW status AS
+ SELECT id, handle, session, timestamp, generation, code, uri
+ FROM events
+ NATURAL JOIN sessions
+ NATURAL JOIN uris
+ NATURAL JOIN codes
+ NATURAL JOIN generations;
+ ''')
+
+
+ def index1(self):
+ self.db.executescript('''
+ CREATE UNIQUE INDEX IF NOT EXISTS sessions_index ON sessions (session);
+ CREATE UNIQUE INDEX IF NOT EXISTS handles_index ON sessions (handle);
+ CREATE UNIQUE INDEX IF NOT EXISTS uris_index ON uris (uri);
+ CREATE UNIQUE INDEX IF NOT EXISTS codes_index ON codes (code);
+ CREATE UNIQUE INDEX IF NOT EXISTS generations_index ON generations (generation);
+ ''')
+
+
+ def index2(self):
+ self.db.executescript('''
+ CREATE UNIQUE INDEX IF NOT EXISTS events_index ON events (uri_id, timestamp, code_id, generation_id);
+ ''')
+
+
+ def string_id(self, table, value):
+ field = table.rstrip("s")
+ try:
+ return self.db.execute("SELECT %s_id FROM %s WHERE %s = ?" % (field, table, field), (value,)).fetchone()[0]
+ except:
+ return self.db.execute("INSERT INTO %s (%s) VALUES (?)" % (table, field), (value,)).lastrowid
+
+
+ def parse_xml(self):
+ sys.stderr.write("\r%s %d/%d/%d...%s " % ("|\\-/"[self.current & 3],
+ self.current, self.parsed, self.total, self.handle))
+ if self.db.execute("SELECT handle FROM sessions WHERE handle = ?", (self.handle,)).fetchone():
+ return
+ xml = self.read_xml()
+ with self.db:
+ session_id = self.db.execute("INSERT INTO sessions (session, handle) VALUES (strftime('%s', ?), ?)",
+ (xml.get("date"), self.handle)).lastrowid
+ self.db.executemany("INSERT INTO events (session_id, timestamp, generation_id, code_id, uri_id) "
+ "VALUES (?, strftime('%s', ?), ?, ?, ?)",
+ ((session_id,
+ x.get("timestamp"),
+ self.string_id("generations", x.get("generation", "none")),
+ self.string_id("codes", x.get("status")),
+ self.string_id("uris", x.text.strip()))
+ for x in xml.findall("validation_status")))
+ self.parsed += 1
class ParserTarball(Parser):
- def init_hook(self):
- self.total = 0
- for fn in self.iter_tarball_names():
- self.total += 1
- self.iterator = self.iter_tarball_names()
+ def init_hook(self):
+ self.total = 0
+ for fn in self.iter_tarball_names():
+ self.total += 1
+ self.iterator = self.iter_tarball_names()
- @property
- def handle(self):
- return self.iterval
+ @property
+ def handle(self):
+ return self.iterval
- def read_xml(self):
- return lxml.etree.ElementTree(
- file = subprocess.Popen(("tar", "Oxf", self.iterval, self.args.path_within_tarball),
- stdout = subprocess.PIPE).stdout).getroot()
+ def read_xml(self):
+ return lxml.etree.ElementTree(
+ file = subprocess.Popen(("tar", "Oxf", self.iterval, self.args.path_within_tarball),
+ stdout = subprocess.PIPE).stdout).getroot()
- def iter_tarball_names(self):
- if os.path.isdir(self.args.tarballs):
- for root, dirs, files in os.walk(self.args.tarballs):
- for fn in files:
- if any(fn.endswith(ext) for ext in self.args.tar_extensions):
- yield os.path.join(root, fn)
- else:
- yield self.args.tarballs
+ def iter_tarball_names(self):
+ if os.path.isdir(self.args.tarballs):
+ for root, dirs, files in os.walk(self.args.tarballs):
+ for fn in files:
+ if any(fn.endswith(ext) for ext in self.args.tar_extensions):
+ yield os.path.join(root, fn)
+ else:
+ yield self.args.tarballs
class ParserMailbox(Parser):
- def init_hook(self):
- self.mb = mailbox.Maildir(self.args.mailbox, factory = None, create = False)
- self.total = len(self.mb)
- self.iterator = self.mb.iterkeys()
+ def init_hook(self):
+ self.mb = mailbox.Maildir(self.args.mailbox, factory = None, create = False)
+ self.total = len(self.mb)
+ self.iterator = self.mb.iterkeys()
- @property
- def handle(self):
- return self.mb[self.iterval].get("Message-ID")
+ @property
+ def handle(self):
+ return self.mb[self.iterval].get("Message-ID")
- def read_xml(self):
- return lxml.etree.XML(self.mb[self.iterval].get_payload())
+ def read_xml(self):
+ return lxml.etree.XML(self.mb[self.iterval].get_payload())
if __name__ == "__main__":
- try:
- Parser.main()
- except KeyboardInterrupt:
- pass
-
+ try:
+ Parser.main()
+ except KeyboardInterrupt:
+ pass
diff --git a/potpourri/whack-ripe-asns.py b/potpourri/whack-ripe-asns.py
index 9c702271..ed4a6451 100644
--- a/potpourri/whack-ripe-asns.py
+++ b/potpourri/whack-ripe-asns.py
@@ -1,11 +1,11 @@
# $Id$
-#
+#
# Copyright (C) 2010 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -44,14 +44,14 @@ sorter = subprocess.Popen(("sort", "-T.", "-n"),
stdout = subprocess.PIPE)
for line in sys.stdin:
- handle, asn = line.split()
+ handle, asn = line.split()
- if "-" in asn:
- range_min, range_max = asn.split("-")
- else:
- range_min, range_max = asn, asn
+ if "-" in asn:
+ range_min, range_max = asn.split("-")
+ else:
+ range_min, range_max = asn, asn
- sorter.stdin.write("%d %d\n" % (long(range_min), long(range_max)))
+ sorter.stdin.write("%d %d\n" % (long(range_min), long(range_max)))
sorter.stdin.close()
@@ -59,22 +59,22 @@ prev_min = None
prev_max = None
def show():
- if prev_min and prev_max:
- sys.stdout.write("x\t%s-%s\n" % (prev_min, prev_max))
+ if prev_min and prev_max:
+ sys.stdout.write("x\t%s-%s\n" % (prev_min, prev_max))
for line in sorter.stdout:
- this_min, this_max = line.split()
- this_min = long(this_min)
- this_max = long(this_max)
-
- if prev_min and prev_max and prev_max + 1 >= this_min:
- prev_min = min(prev_min, this_min)
- prev_max = max(prev_max, this_max)
-
- else:
- show()
- prev_min = this_min
- prev_max = this_max
+ this_min, this_max = line.split()
+ this_min = long(this_min)
+ this_max = long(this_max)
+
+ if prev_min and prev_max and prev_max + 1 >= this_min:
+ prev_min = min(prev_min, this_min)
+ prev_max = max(prev_max, this_max)
+
+ else:
+ show()
+ prev_min = this_min
+ prev_max = this_max
show()
diff --git a/potpourri/whack-ripe-prefixes.py b/potpourri/whack-ripe-prefixes.py
index 52ea3f18..b3d9c39d 100644
--- a/potpourri/whack-ripe-prefixes.py
+++ b/potpourri/whack-ripe-prefixes.py
@@ -1,11 +1,11 @@
# $Id$
-#
+#
# Copyright (C) 2010 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
@@ -49,21 +49,21 @@ sorter = subprocess.Popen(("sort", "-T.", "-n"),
stdout = subprocess.PIPE)
for line in sys.stdin:
- handle, prefix = line.split()
+ handle, prefix = line.split()
- if "-" in prefix:
- range_min, range_max = prefix.split("-")
- range_min = rpki.ipaddrs.parse(range_min)
- range_max = rpki.ipaddrs.parse(range_max)
+ if "-" in prefix:
+ range_min, range_max = prefix.split("-")
+ range_min = rpki.ipaddrs.parse(range_min)
+ range_max = rpki.ipaddrs.parse(range_max)
- else:
- address, length = prefix.split("/")
- address = rpki.ipaddrs.parse(address)
- mask = (1L << (address.bits - int(length))) - 1
- range_min = address & ~mask
- range_max = address | mask
+ else:
+ address, length = prefix.split("/")
+ address = rpki.ipaddrs.parse(address)
+ mask = (1L << (address.bits - int(length))) - 1
+ range_min = address & ~mask
+ range_max = address | mask
- sorter.stdin.write("%d %d\n" % (long(range_min), long(range_max)))
+ sorter.stdin.write("%d %d\n" % (long(range_min), long(range_max)))
sorter.stdin.close()
@@ -71,28 +71,28 @@ prev_min = None
prev_max = None
def address(number):
- if number > 0xffffffff:
- return rpki.ipaddrs.v6addr(number)
- else:
- return rpki.ipaddrs.v4addr(number)
+ if number > 0xffffffff:
+ return rpki.ipaddrs.v6addr(number)
+ else:
+ return rpki.ipaddrs.v4addr(number)
def show():
- if prev_min and prev_max:
- sys.stdout.write("x\t%s-%s\n" % (address(prev_min), address(prev_max)))
+ if prev_min and prev_max:
+ sys.stdout.write("x\t%s-%s\n" % (address(prev_min), address(prev_max)))
for line in sorter.stdout:
- this_min, this_max = line.split()
- this_min = long(this_min)
- this_max = long(this_max)
-
- if prev_min and prev_max and prev_max + 1 >= this_min:
- prev_min = min(prev_min, this_min)
- prev_max = max(prev_max, this_max)
-
- else:
- show()
- prev_min = this_min
- prev_max = this_max
+ this_min, this_max = line.split()
+ this_min = long(this_min)
+ this_max = long(this_max)
+
+ if prev_min and prev_max and prev_max + 1 >= this_min:
+ prev_min = min(prev_min, this_min)
+ prev_max = max(prev_max, this_max)
+
+ else:
+ show()
+ prev_min = this_min
+ prev_max = this_max
show()
diff --git a/potpourri/x509-dot.py b/potpourri/x509-dot.py
index 42e1543a..493199fd 100644
--- a/potpourri/x509-dot.py
+++ b/potpourri/x509-dot.py
@@ -36,123 +36,123 @@ import rpki.POW, sys, glob, os
class x509(object):
- ski = None
- aki = None
+ ski = None
+ aki = None
- show_file = False
- show_ski = False
- show_aki = False
- show_issuer = True
- show_subject = True
+ show_file = False
+ show_ski = False
+ show_aki = False
+ show_issuer = True
+ show_subject = True
- cn_only = True
+ cn_only = True
- subjects = {}
+ subjects = {}
- def __init__(self, filename):
+ def __init__(self, filename):
- while filename.startswith("./"):
- filename = filename[2:]
+ while filename.startswith("./"):
+ filename = filename[2:]
- self.filename = filename
+ self.filename = filename
- f = open(filename, "rb")
- text = f.read()
- f.close()
+ f = open(filename, "rb")
+ text = f.read()
+ f.close()
- if "-----BEGIN" in text:
- self.pow = rpki.POW.X509.pemRead(text)
- else:
- self.pow = rpki.POW.X509.derRead(text)
+ if "-----BEGIN" in text:
+ self.pow = rpki.POW.X509.pemRead(text)
+ else:
+ self.pow = rpki.POW.X509.derRead(text)
- try:
- self.ski = ":".join(["%02X" % ord(i) for i in self.pow.getSKI()])
- except:
- pass
+ try:
+ self.ski = ":".join(["%02X" % ord(i) for i in self.pow.getSKI()])
+ except:
+ pass
- try:
- self.aki = ":".join(["%02X" % ord(i) for i in self.pow.getAKI()])
- except:
- pass
+ try:
+ self.aki = ":".join(["%02X" % ord(i) for i in self.pow.getAKI()])
+ except:
+ pass
- self.subject = self.canonize(self.pow.getSubject())
- self.issuer = self.canonize(self.pow.getIssuer())
+ self.subject = self.canonize(self.pow.getSubject())
+ self.issuer = self.canonize(self.pow.getIssuer())
- if self.subject in self.subjects:
- self.subjects[self.subject].append(self)
- else:
- self.subjects[self.subject] = [self]
+ if self.subject in self.subjects:
+ self.subjects[self.subject].append(self)
+ else:
+ self.subjects[self.subject] = [self]
- def canonize(self, name):
+ def canonize(self, name):
- # Probably should just use rpki.x509.X501DN class here.
+ # Probably should just use rpki.x509.X501DN class here.
- try:
- if self.cn_only and name[0][0][0] == "2.5.4.3":
- return name[0][0][1]
- except:
- pass
+ try:
+ if self.cn_only and name[0][0][0] == "2.5.4.3":
+ return name[0][0][1]
+ except:
+ pass
- return name
+ return name
- def set_node(self, node):
+ def set_node(self, node):
- self.node = node
+ self.node = node
- def dot(self):
+ def dot(self):
- label = []
+ label = []
- if self.show_issuer:
- label.append(("Issuer", self.issuer))
+ if self.show_issuer:
+ label.append(("Issuer", self.issuer))
- if self.show_subject:
- label.append(("Subject", self.subject))
+ if self.show_subject:
+ label.append(("Subject", self.subject))
- if self.show_file:
- label.append(("File", self.filename))
+ if self.show_file:
+ label.append(("File", self.filename))
- if self.show_aki:
- label.append(("AKI", self.aki))
+ if self.show_aki:
+ label.append(("AKI", self.aki))
- if self.show_ski:
- label.append(("SKI", self.ski))
+ if self.show_ski:
+ label.append(("SKI", self.ski))
- print "#", repr(label)
+ print "#", repr(label)
- if len(label) > 1:
- print '%s [shape = record, label = "{%s}"];' % (self.node, "|".join("{%s|%s}" % (x, y) for x, y in label if y is not None))
- else:
- print '%s [label = "%s"];' % (self.node, label[0][1])
+ if len(label) > 1:
+ print '%s [shape = record, label = "{%s}"];' % (self.node, "|".join("{%s|%s}" % (x, y) for x, y in label if y is not None))
+ else:
+ print '%s [label = "%s"];' % (self.node, label[0][1])
- for issuer in self.subjects.get(self.issuer, ()):
+ for issuer in self.subjects.get(self.issuer, ()):
- if issuer is self:
- print "# Issuer is self"
- issuer = None
+ if issuer is self:
+ print "# Issuer is self"
+ issuer = None
- if issuer is not None and self.aki is not None and self.ski is not None and self.aki == self.ski:
- print "# Self-signed"
- issuer = None
+ if issuer is not None and self.aki is not None and self.ski is not None and self.aki == self.ski:
+ print "# Self-signed"
+ issuer = None
- if issuer is not None and self.aki is not None and issuer.ski is not None and self.aki != issuer.ski:
- print "# AKI does not match issuer SKI"
- issuer = None
+ if issuer is not None and self.aki is not None and issuer.ski is not None and self.aki != issuer.ski:
+ print "# AKI does not match issuer SKI"
+ issuer = None
- if issuer is not None:
- print "%s -> %s;" % (issuer.node, self.node)
+ if issuer is not None:
+ print "%s -> %s;" % (issuer.node, self.node)
- print
+ print
certs = []
for topdir in sys.argv[1:] or ["."]:
- for dirpath, dirnames, filenames in os.walk(topdir):
- certs += [x509(dirpath + "/" + filename) for filename in filenames if filename.endswith(".cer")]
+ for dirpath, dirnames, filenames in os.walk(topdir):
+ certs += [x509(dirpath + "/" + filename) for filename in filenames if filename.endswith(".cer")]
for i, cert in enumerate(certs):
- cert.set_node("cert_%d" % i)
+ cert.set_node("cert_%d" % i)
print """\
digraph certificates {
@@ -165,6 +165,6 @@ ratio = fill;
"""
for cert in certs:
- cert.dot()
+ cert.dot()
print "}"
diff --git a/rp/Makefile.in b/rp/Makefile.in
index 2c770a46..d22ddbcb 100644
--- a/rp/Makefile.in
+++ b/rp/Makefile.in
@@ -1,6 +1,6 @@
# $Id$
-SUBDIRS = rcynic rpki-rtr utils
+SUBDIRS = config rcynic rpki-rtr utils
all clean test distclean install deinstall uninstall::
@for i in ${SUBDIRS}; do echo "Making $@ in $$i"; (cd $$i && ${MAKE} $@); done
diff --git a/rp/config/Makefile.in b/rp/config/Makefile.in
new file mode 100644
index 00000000..c6050f3e
--- /dev/null
+++ b/rp/config/Makefile.in
@@ -0,0 +1,88 @@
+# $Id$
+
+PYTHON = @PYTHON@
+
+INSTALL = @INSTALL@ -m 555
+
+prefix = @prefix@
+exec_prefix = @exec_prefix@
+datarootdir = @datarootdir@
+datadir = @datadir@
+localstatedir = @localstatedir@
+sharedstatedir = @sharedstatedir@
+sysconfdir = @sysconfdir@
+bindir = @bindir@
+sbindir = @sbindir@
+libexecdir = @libexecdir@
+sysconfdir = @sysconfdir@
+
+abs_builddir = @abs_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+abs_top_builddir= @abs_top_builddir@
+srcdir = @srcdir@
+
+CFG_INSTALL_TARGETS = @CFG_INSTALL_TARGETS@
+
+all:: rpki.rp.xml rpki.rp.conf.sample
+
+clean::
+ @true
+
+install:: ${CFG_INSTALL_TARGETS}
+
+install-always:: all
+ @echo
+ @echo "== Default configuration file location is ${sysconfdir}/rpki.conf =="
+ @echo
+ ${INSTALL} -d ${DESTDIR}${sysconfdir}/rpki
+ ${INSTALL} rpki.rp.xml rpki.rp.conf.sample ${DESTDIR}${sysconfdir}/rpki
+
+test uninstall deinstall::
+ @true
+
+distclean:: clean
+ rm -f Makefile
+
+rpki.rp.xml: ${abs_top_srcdir}/rpki/autoconf.py rpki-confgen rpki-confgen.xml
+ ${PYTHON} rpki-confgen \
+ --read-xml rpki-confgen.xml \
+ --autoconf \
+ --set myrpki::handle=`hostname -f | sed 's/[.]/_/g'` \
+ --set myrpki::rpkid_server_host=`hostname -f` \
+ --set myrpki::pubd_server_host=`hostname -f` \
+ --pwgen myrpki::shared_sql_password \
+ --pwgen web_portal::secret-key \
+ --set myrpki::run_rpkid=no \
+ --set myrpki::run_pubd=no \
+ --write-xml $@
+
+rpki.rp.conf.sample: rpki.rp.xml
+ ${PYTHON} rpki-confgen \
+ --read-xml rpki.rp.xml \
+ --write-conf $@
+
+clean::
+ rm -f rpki.rp.xml rpki.rp.conf.sample
+
+install-postconf: \
+ install-user install-conf install-sql install-django
+
+# This should create user "rpki" and group "rpki", but rcynic already
+# does that...but we probably need to do it here instead, bother.
+
+install-user:
+ @true
+
+install-conf:
+ test -f ${DESTDIR}${sysconfdir}/rpki.conf ||\
+ cp -p ${DESTDIR}${sysconfdir}/rpki/rpki.rp.conf.sample ${DESTDIR}${sysconfdir}/rpki.conf
+
+#uninstall deinstall::
+# rm -f ${DESTDIR}${sysconfdir}/rpki/rpki.rp.xml ${DESTDIR}${sysconfdir}/rpki/rpki.rp.conf.sample
+
+install-sql:
+ ${sbindir}/rpki-sql-setup create
+
+install-django:
+ ${sbindir}/rpki-manage syncdb --noinput
+ ${sbindir}/rpki-manage migrate app
diff --git a/rp/config/rpki b/rp/config/rpki
new file mode 120000
index 00000000..d39d05b6
--- /dev/null
+++ b/rp/config/rpki
@@ -0,0 +1 @@
+../../rpki \ No newline at end of file
diff --git a/rp/config/rpki-confgen b/rp/config/rpki-confgen
new file mode 100755
index 00000000..7fac9eab
--- /dev/null
+++ b/rp/config/rpki-confgen
@@ -0,0 +1,281 @@
+#!/usr/bin/env python
+
+# $Id$
+#
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+# Portions copyright (C) 2013 Internet Systems Consortium ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notices and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ISC DISCLAIM ALL
+# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
+# ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA
+# OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+import os
+import sys
+import argparse
+import base64
+import textwrap
+
+from lxml.etree import Element, SubElement, ElementTree, Comment
+
+space4 = " " * 4
+space6 = " " * 6
+space8 = " " * 8
+star78 = "*" * 78
+
+wiki_wrapper = textwrap.TextWrapper()
+conf_wrapper = textwrap.TextWrapper(initial_indent = "# ", subsequent_indent = "# ")
+xml6_wrapper = textwrap.TextWrapper(initial_indent = space6, subsequent_indent = space6)
+xml8_wrapper = textwrap.TextWrapper(initial_indent = space8, subsequent_indent = space8)
+
+class Option(object):
+
+ def __init__(self, name, value, doc):
+ self.name = name
+ self.value = value
+ self.doc = doc
+
+ @property
+ def width(self):
+ return len(self.name)
+
+ def to_xml(self):
+ x = Element("option", name = self.name)
+ if self.value is not None:
+ x.set("value", self.value)
+ for d in self.doc:
+ SubElement(x, "doc").text = "\n" + xml8_wrapper.fill(d) + "\n" + space6
+ return x
+
+ def to_wiki(self, f):
+ f.write("\n== {0.name} == #{0.name}\n".format(self))
+ for d in self.doc:
+ f.write("\n{0}\n".format(wiki_wrapper.fill(d)))
+ if self.value is None:
+ f.write("\n{0}\n".format(wiki_wrapper.fill("No default value.")))
+ else:
+ f.write("\n{{{{{{\n#!ini\n{0.name} = {0.value}\n}}}}}}\n".format(self))
+
+ def to_conf(self, f, width):
+ for i, d in enumerate(self.doc):
+ f.write("{}\n{}\n".format("" if i == 0 else "#",
+ conf_wrapper.fill(d)))
+ if self.value is None:
+ f.write("\n#{1.name:{0}} = ???\n".format(width - 1, self))
+ else:
+ f.write("\n{1.name:{0}} = {1.value}\n".format(width, self))
+
+class Section(object):
+
+ def __init__(self, name):
+ self.name = name
+ self.doc = []
+ self.options = []
+
+ @property
+ def width(self):
+ return max(o.width for o in self.options)
+
+ @classmethod
+ def from_xml(cls, elt):
+ self = cls(name = elt.get("name"))
+ for x in elt.iterchildren("doc"):
+ self.doc.append(" ".join(x.text.split()))
+ for x in elt.iterchildren("option"):
+ self.options.append(Option(name = x.get("name"), value = x.get("value"),
+ doc = [" ".join(d.text.split())
+ for d in x.iterchildren("doc")]))
+ return self
+
+ def to_xml(self):
+ x = Element("section", name = self.name)
+ for d in self.doc:
+ SubElement(x, "doc").text = "\n" + xml6_wrapper.fill(d) + "\n" + space4
+ x.extend(o.to_xml() for o in self.options)
+ return x
+
+ def to_wiki(self, f):
+ f.write("\n= [{0}] section = #{0}\n".format(self.name))
+ for d in self.doc:
+ f.write("\n{0}\n".format(wiki_wrapper.fill(d)))
+ for o in self.options:
+ o.to_wiki(f)
+
+ def to_conf(self, f, width):
+ f.write("\n" + "#" * 78 + "\n\n[" + self.name + "]\n")
+ if self.doc:
+ f.write("\n##")
+ for i, d in enumerate(self.doc):
+ f.write("{}\n{}\n".format("" if i == 0 else "#",
+ conf_wrapper.fill(d)))
+ f.write("##\n")
+ for o in self.options:
+ o.to_conf(f, width)
+
+def wiki_header(f, ident, toc):
+ f.write(textwrap.dedent('''\
+ {{{{{{
+ #!comment
+
+ {star78}
+ THIS PAGE WAS GENERATED AUTOMATICALLY, DO NOT EDIT.
+
+ Generated from {ident}
+ by $Id$
+ {star78}
+
+ }}}}}}
+ '''.format(star78 = star78,
+ ident = ident)))
+ if toc is not None:
+ f.write("[[TracNav({})]]\n".format(toc))
+ f.write("[[PageOutline]]\n")
+
+def conf_header(f, ident):
+ f.write(textwrap.dedent('''\
+ # Automatically generated. Edit as needed, but be careful of overwriting.
+ #
+ # Generated from {ident}
+ # by $Id$
+
+ '''.format(ident = ident)))
+
+
+# http://stackoverflow.com/questions/9027028/argparse-argument-order
+
+class CustomAction(argparse.Action):
+
+ def __call__(self, parser, namespace, values, option_string = None):
+ if not "ordered_args" in namespace:
+ namespace.ordered_args = []
+ namespace.ordered_args.append((self.dest, values))
+
+class CustomFlagAction(CustomAction):
+
+ def __init__(self, option_strings, dest, default = None,
+ required = False, help = None): # pylint: disable=W0622
+ super(CustomFlagAction, self).__init__(
+ option_strings = option_strings,
+ dest = dest,
+ nargs = 0,
+ const = None,
+ default = default,
+ required = required,
+ help = help)
+
+
+class main(object):
+
+ def __init__(self):
+ self.sections = []
+ self.section_map = None
+ self.option_map = None
+ self.ident = None
+ self.toc = None
+
+ parser = argparse.ArgumentParser(description = __doc__)
+ parser.add_argument("--read-xml", type = argparse.FileType("r"), metavar = "FILE", action = CustomAction, help = "XML input file defining sections and options", required = True)
+ parser.add_argument("--write-xml", type = argparse.FileType("w"), metavar = "FILE", action = CustomAction, help = "XML output file to snapshot configuration")
+ parser.add_argument("--write-conf", type = argparse.FileType("w"), metavar = "FILE", action = CustomAction, help = "rpki.conf configuration file to write")
+ parser.add_argument("--write-wiki", type = argparse.FileType("w"), metavar = "FILE", action = CustomAction, help = "TracWiki file to write (monolithic)")
+ parser.add_argument("--write-wiki-pages", metavar = "PATTERN", action = CustomAction, help = "TracWiki filenames (pattern) to write (one section per page)")
+ parser.add_argument("--set", metavar = "VARVAL", action = CustomAction, help = "variable setting in form \"VAR=VAL\"")
+ parser.add_argument("--pwgen", metavar = "VAR", action = CustomAction, help = "set variable to generated password")
+ parser.add_argument("--toc", metavar = "TOCVAL", action = CustomAction, help = "set TOC value to use with TracNav plugin")
+ parser.add_argument("--autoconf", action = CustomFlagAction, help = "configure [autoconf] section")
+ args = parser.parse_args()
+
+ for cmd, arg in args.ordered_args:
+ getattr(self, "do_" + cmd)(arg)
+
+ def do_read_xml(self, arg):
+ self.option_map = None
+ root = ElementTree(file = arg).getroot()
+ self.ident = root.get("ident")
+ self.sections.extend(Section.from_xml(x) for x in root.iterchildren("section"))
+ self.option_map = {}
+ self.section_map = {}
+ for section in self.sections:
+ if section.name in self.section_map:
+ sys.exit("Duplicate section {}".format(section.name))
+ self.section_map[section.name] = section
+ for option in section.options:
+ name = (section.name, option.name)
+ if name in self.option_map:
+ sys.exit("Duplicate option {}::{}".format(*name))
+ self.option_map[name] = option
+
+ def do_set(self, arg):
+ try:
+ name, value = arg.split("=", 1)
+ section, option = name.split("::")
+ except ValueError:
+ sys.exit("Couldn't parse --set specification \"{}\"".format(arg))
+ name = (section, option)
+ if name not in self.option_map:
+ sys.exit("Couldn't find option {}::{}".format(*name))
+ self.option_map[name].value = value
+
+ def do_pwgen(self, arg):
+ try:
+ section, option = arg.split("::")
+ except ValueError:
+ sys.exit("Couldn't parse --pwgen specification \"{}\"".format(arg))
+ name = (section, option)
+ if name not in self.option_map:
+ sys.exit("Couldn't find option {}::{}".format(name))
+ self.option_map[name].value = base64.urlsafe_b64encode(os.urandom(66))
+
+ def do_autoconf(self, ignored):
+ try:
+ import rpki.autoconf
+ for option in self.section_map["autoconf"].options:
+ try:
+ option.value = getattr(rpki.autoconf, option.name)
+ except AttributeError:
+ pass
+ except ImportError:
+ sys.exit("rpki.autoconf module is not available")
+ except KeyError:
+ sys.exit("Couldn't find autoconf section")
+
+ def do_write_xml(self, arg):
+ x = Element("configuration", ident = self.ident)
+ x.append(Comment(" Machine-editable configuration snapshot, generated automatically, do not touch "))
+ x.extend(s.to_xml() for s in self.sections)
+ ElementTree(x).write(arg, pretty_print = True, encoding = "us-ascii")
+
+ def do_write_wiki(self, arg):
+ for i, section in enumerate(self.sections):
+ if i == 0:
+ wiki_header(arg, self.ident, self.toc)
+ else:
+ arg.write("\f\n")
+ section.to_wiki(arg)
+
+ def do_write_wiki_pages(self, arg):
+ for section in self.sections:
+ with open(arg % section.name, "w") as f:
+ wiki_header(f, self.ident, self.toc)
+ section.to_wiki(f)
+
+ def do_write_conf(self, arg):
+ conf_header(arg, self.ident)
+ width = max(s.width for s in self.sections)
+ for section in self.sections:
+ section.to_conf(arg, width)
+
+ def do_toc(self, arg):
+ self.toc = arg
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ca/rpki-confgen.xml b/rp/config/rpki-confgen.xml
index ac36d3db..b7bc2f62 100644
--- a/ca/rpki-confgen.xml
+++ b/rp/config/rpki-confgen.xml
@@ -57,7 +57,7 @@
</option>
<option name = "bpki_servers_directory"
- value = "${autoconf::datarootdir}/rpki">
+ value = "${autoconf::datarootdir}/rpki/bpki">
<doc>
Directory for BPKI files generated by rpkic and used by rpkid
and pubd. You will not normally need to change this.
@@ -148,31 +148,6 @@
</doc>
</option>
- <option name = "run_rootd"
- value = "no">
- <doc>
- Whether you want to run your very own copy of rootd. Don't
- enable this unless you really know what you're doing.
- </doc>
- </option>
-
- <option name = "rootd_server_host"
- value = "localhost">
- <doc>
- DNS hostname for rootd, if you're running it. This should be
- localhost unless you really know what you are doing.
- </doc>
- </option>
-
- <option name = "rootd_server_port"
- value = "4401">
- <doc>
- Server port number for rootd, if you're running it. This can
- be any legal TCP port number that you're not using for
- something else.
- </doc>
- </option>
-
<option name = "publication_base_directory"
value = "${autoconf::datarootdir}/rpki/publication">
<doc>
@@ -186,17 +161,17 @@
</doc>
</option>
- <option name = "publication_root_cert_directory"
- value = "${myrpki::publication_base_directory}.root">
+ <option name = "rrdp_publication_base_directory"
+ value = "${autoconf::datarootdir}/rpki/rrdp-publication">
<doc>
- Root of local directory tree where rootd (sigh) should write out
- published data. This is just like publication_base_directory, but
- rootd is too dumb to use pubd and needs its own directory in
- which to write one certificate, one CRL, and one manifest.
- Neither rootd nor rsyncd much cares //where// you tell them to put
- this stuff, the important thing is that the rsync URIs in
- generated certificates match up with the published objects so that
- relying parties can find and verify rootd's published outputs.
+ Root of local directory tree where pubd should write out RRDP
+ files. You need to configure this, and the configuration
+ should match up with the directory where you point the web
+ server (usually Apache) that serves the RRDP files. Neither
+ pubd nor Apache much cares //where// you tell it to put this
+ stuff, the important thing is that all the URIs match up so
+ that relying parties can find and verify rpkid's published
+ outputs.
</doc>
</option>
@@ -209,15 +184,6 @@
</doc>
</option>
- <option name = "publication_root_module"
- value = "root">
- <doc>
- rsyncd module name corresponding to publication_root_cert_directory.
- This has to match the module you configured into `rsyncd.conf`.
- Leave this alone unless you have some need to change it.
- </doc>
- </option>
-
<option name = "publication_rsync_server"
value = "${myrpki::pubd_server_host}">
<doc>
@@ -226,6 +192,22 @@
</doc>
</option>
+ <option name = "publication_rrdp_base_uri"
+ value = "https://${myrpki::pubd_server_host}/rrdp/">
+ <doc>
+ Base URI for RRDP notification, snapshot, and delta files.
+ In most cases this should be a HTTPS URL for the directory
+ on the publication server where the notify.xml lives.
+ </doc>
+ </option>
+
+ <option name = "publication_rrdp_notification_uri"
+ value = "${myrpki::publication_rrdp_base_uri}notify.xml">
+ <doc>
+ URI for RRDP notification file. You shouldn't need to change this.
+ </doc>
+ </option>
+
<option name = "start_rpkid"
value = "${myrpki::run_rpkid}">
<doc>
@@ -268,17 +250,18 @@
</doc>
</option>
- <option name = "start_rootd"
- value = "${myrpki::run_rootd}">
+ <option name = "shared_sql_engine"
+ value = "mysql">
<doc>
- rootd startup control. This should usually have the same value as
- run_rootd: the only case where you would want to change this is
- when you are running the back-end code on a different machine from
- one or more of the daemons, in which case you need finer control
- over which daemons to start on which machines. In such cases,
- run_rootd controls whether the back-end code is doing things to
- manage rootd, while start_rootd controls whether
- rpki-start-servers attempts to start rootd on this machine.
+ Database engine to use. Default is MySQL, because that's what
+ we've been using for years. Now that all runtime database
+ access is via Django ORM, changing to another engine supported
+ by Django is just a configuration issue.
+ </doc>
+ <doc>
+ Current supported values are "mysql" (the default), "sqlite3",
+ and "postgresql". In theory it should be straightforward to
+ add support for any SQL engine Django supports.
</doc>
</option>
@@ -286,7 +269,7 @@
value = "rpki">
<doc>
If you're comfortable with having all of the databases use the
- same MySQL username, set that value here. The default setting
+ same SQL username, set that value here. The default setting
of this variable should be fine.
</doc>
</option>
@@ -294,7 +277,7 @@
<option name = "shared_sql_password">
<doc>
If you're comfortable with having all of the databases use the
- same MySQL password, set that value here. You should use a
+ same SQL password, set that value here. You should use a
locally generated password either here or in the individual
settings below. The installation process generates a random
value for this option, which satisfies this requirement, so
@@ -302,6 +285,46 @@
</doc>
</option>
+ <option name = "rcynic_sql_engine"
+ value = "${myrpki::shared_sql_engine}">
+ <doc>
+ SQL engine to use for rcynic's database. The default setting
+ of this variable should be fine.
+ </doc>
+ </option>
+
+ <option name = "rcynic_sql_database"
+ value = "rcynic">
+ <doc>
+ SQL database name for rcynic's database. The default setting of
+ this variable should be fine.
+ </doc>
+ </option>
+
+ <option name = "rcynic_sql_username"
+ value = "${myrpki::shared_sql_username}">
+ <doc>
+ If you want to use a separate SQL username for rcynic's database,
+ set it here.
+ </doc>
+ </option>
+
+ <option name = "rcynic_sql_password"
+ value = "${myrpki::shared_sql_password}">
+ <doc>
+ If you want to use a separate SQL password for rcynic's database,
+ set it here.
+ </doc>
+ </option>
+
+ <option name = "rpkid_sql_engine"
+ value = "${myrpki::shared_sql_engine}">
+ <doc>
+ SQL engine to use for rpkid's database. The default setting
+ of this variable should be fine.
+ </doc>
+ </option>
+
<option name = "rpkid_sql_database"
value = "rpkid">
<doc>
@@ -326,6 +349,14 @@
</doc>
</option>
+ <option name = "irdbd_sql_engine"
+ value = "${myrpki::shared_sql_engine}">
+ <doc>
+ SQL engine to use for irdbd's database. The default setting
+ of this variable should be fine.
+ </doc>
+ </option>
+
<option name = "irdbd_sql_database"
value = "irdbd">
<doc>
@@ -350,6 +381,14 @@
</doc>
</option>
+ <option name = "pubd_sql_engine"
+ value = "${myrpki::shared_sql_engine}">
+ <doc>
+ SQL engine to use for pubd's database. The default setting
+ of this variable should be fine.
+ </doc>
+ </option>
+
<option name = "pubd_sql_database"
value = "pubd">
<doc>
@@ -374,6 +413,118 @@
</doc>
</option>
+ <option name = "log-destination"
+ value = "file">
+ <doc>
+ Default logging mechanism, can be "file", "syslog", "stderr", or "stdout".
+ </doc>
+ </option>
+
+ <option name = "log-directory"
+ value = "/var/log/rpki">
+ <doc>
+ Where to write log files when logging to files.
+ </doc>
+ </option>
+
+ <option name = "log-level"
+ value = "info">
+ <doc>
+ Default logging level.
+ </doc>
+ </option>
+
+ <option name = "log-time-limit"
+ value = "3">
+ <doc>
+ Interval between log file rotations, in hours.
+ Set to zero to disable automatic rotations.
+ </doc>
+ </option>
+
+ <option name = "log-count"
+ value = "56">
+ <doc>
+ How many old logs to keep before deleting.
+ </doc>
+ </option>
+
+ </section>
+
+ <section name = "rcynic">
+
+ <doc>
+ rcynicng, unlike it's predecessor, uses the same `rpki.conf`
+ file as all the other programs in the RPKI toolkit. Start
+ rcynicng with "`-c filename`" to choose a different
+ configuration file. All options are in the "`[rcynic]`"
+ section.
+ </doc>
+
+ <option name = "sql-engine"
+ value = "${myrpki::rcynic_sql_engine}">
+ <doc>
+ SQL engine for rcynic.
+ </doc>
+ </option>
+
+ <option name = "sql-database"
+ value = "${myrpki::rcynic_sql_database}">
+ <doc>
+ SQL database name for rcynic.
+ </doc>
+ </option>
+
+ <option name = "sql-username"
+ value = "${myrpki::rcynic_sql_username}">
+ <doc>
+ SQL user name for rcynic.
+ </doc>
+ </option>
+
+ <option name = "sql-password"
+ value = "${myrpki::rcynic_sql_password}">
+ <doc>
+ SQL password for rcynic.
+ </doc>
+ </option>
+
+ <option name = "log-destination"
+ value = "${myrpki::log-destination}">
+ <doc>
+ Logging mechanism, can be "file", "syslog", "stderr", or "stdout".
+ </doc>
+ </option>
+
+ <option name = "log-filename"
+ value = "${myrpki::log-directory}/rcynic.log">
+ <doc>
+ Where to write log file when logging to a file.
+ </doc>
+ </option>
+
+ <option name = "log-level"
+ value = "${myrpki::log-level}">
+ <doc>
+ Default logging level.
+ </doc>
+ </option>
+
+ <option name = "log-time-limit"
+ value = "${myrpki::log-time-limit}">
+ <doc>
+ Interval between log file rotations, in hours.
+ Set to zero to disable automatic rotations.
+ </doc>
+ </option>
+
+ <option name = "log-count"
+ value = "${myrpki::log-count}">
+ <doc>
+ How many old logs to keep before deleting.
+ </doc>
+ </option>
+
</section>
<section name = "rpkid">
@@ -385,24 +536,31 @@
Certificates and keys may be in either DER or PEM format.
</doc>
+ <option name = "sql-engine"
+ value = "${myrpki::rpkid_sql_engine}">
+ <doc>
+ SQL engine for rpkid.
+ </doc>
+ </option>
+
<option name = "sql-database"
value = "${myrpki::rpkid_sql_database}">
<doc>
- MySQL database name for rpkid.
+ SQL database name for rpkid.
</doc>
</option>
<option name = "sql-username"
value = "${myrpki::rpkid_sql_username}">
<doc>
- MySQL user name for rpkid.
+ SQL user name for rpkid.
</doc>
</option>
<option name = "sql-password"
value = "${myrpki::rpkid_sql_password}">
<doc>
- MySQL password for rpkid.
+ SQL password for rpkid.
</doc>
</option>
@@ -473,6 +631,42 @@
</doc>
</option>
+ <option name = "log-destination"
+ value = "${myrpki::log-destination}">
+ <doc>
+ Logging mechanism, can be "file", "syslog", "stderr", or "stdout".
+ </doc>
+ </option>
+
+ <option name = "log-filename"
+ value = "${myrpki::log-directory}/rpkid.log">
+ <doc>
+ Where to write log file when logging to a file.
+ </doc>
+ </option>
+
+ <option name = "log-level"
+ value = "${myrpki::log-level}">
+ <doc>
+ Default logging level.
+ </doc>
+ </option>
+
+ <option name = "log-time-limit"
+ value = "${myrpki::log-time-limit}">
+ <doc>
+ Interval between log file rotations, in hours.
+ Set to zero to disable automatic rotations.
+ </doc>
+ </option>
+
+ <option name = "log-count"
+ value = "${myrpki::log-count}">
+ <doc>
+ How many old logs to keep before deleting.
+ </doc>
+ </option>
+
</section>
<section name = "irdbd">
@@ -490,24 +684,31 @@
configuration than the other daemons.
</doc>
+ <option name = "sql-engine"
+ value = "${myrpki::irdbd_sql_engine}">
+ <doc>
+ SQL engine for irdbd.
+ </doc>
+ </option>
+
<option name = "sql-database"
value = "${myrpki::irdbd_sql_database}">
<doc>
- MySQL database name for irdbd.
+ SQL database name for irdbd.
</doc>
</option>
<option name = "sql-username"
value = "${myrpki::irdbd_sql_username}">
<doc>
- MySQL user name for irdbd.
+ SQL user name for irdbd.
</doc>
</option>
<option name = "sql-password"
value = "${myrpki::irdbd_sql_password}">
<doc>
- MySQL password for irdbd.
+ SQL password for irdbd.
</doc>
</option>
@@ -532,6 +733,42 @@
</doc>
</option>
+ <option name = "log-destination"
+ value = "${myrpki::log-destination}">
+ <doc>
+ Logging mechanism, can be "file", "syslog", "stderr", or "stdout".
+ </doc>
+ </option>
+
+ <option name = "log-filename"
+ value = "${myrpki::log-directory}/irdbd.log">
+ <doc>
+ Where to write log file when logging to a file.
+ </doc>
+ </option>
+
+ <option name = "log-level"
+ value = "${myrpki::log-level}">
+ <doc>
+ Default logging level.
+ </doc>
+ </option>
+
+ <option name = "log-time-limit"
+ value = "${myrpki::log-time-limit}">
+ <doc>
+ Interval between log file rotations, in hours.
+ Set to zero to disable automatic rotations.
+ </doc>
+ </option>
+
+ <option name = "log-count"
+ value = "${myrpki::log-count}">
+ <doc>
+ How many old logs to keep before deleting.
+ </doc>
+ </option>
+
</section>
<section name = "pubd">
@@ -543,24 +780,31 @@
BPKI certificates and keys may be either DER or PEM format.
</doc>
+ <option name = "sql-engine"
+ value = "${myrpki::pubd_sql_engine}">
+ <doc>
+ SQL engine for pubd.
+ </doc>
+ </option>
+
<option name = "sql-database"
value = "${myrpki::pubd_sql_database}">
<doc>
- MySQL database name for pubd.
+ SQL database name for pubd.
</doc>
</option>
<option name = "sql-username"
value = "${myrpki::pubd_sql_username}">
<doc>
- MySQL user name for pubd.
+ SQL user name for pubd.
</doc>
</option>
<option name = "sql-password"
value = "${myrpki::pubd_sql_password}">
<doc>
- MySQL password for pubd.
+ SQL password for pubd.
</doc>
</option>
@@ -577,6 +821,20 @@
</doc>
</option>
+ <option name = "rrdp-publication-base"
+ value = "${myrpki::rrdp_publication_base_directory}">
+ <doc>
+ Root of local directory tree where pubd should write out RRDP
+ files. You need to configure this, and the configuration
+ should match up with the directory where you point the web
+ server (usually Apache) that serves the RRDP files. Neither
+ pubd nor Apache much cares //where// you tell it to put this
+ stuff, the important thing is that all the URIs match up so
+ that relying parties can find and verify rpkid's published
+ outputs.
+ </doc>
+ </option>
+
<option name = "server-host"
value = "${myrpki::pubd_server_host}">
<doc>
@@ -618,196 +876,104 @@
</doc>
</option>
- <option name = "irbe-cert"
- value = "${myrpki::bpki_servers_directory}/irbe.cer">
- <doc>
- Where pubd should look for the back-end control client's BPKI EE
- certificate. Don't change this unless you really know what you
- are doing.
- </doc>
- </option>
-
- </section>
-
- <section name = "rootd">
-
- <doc>
- You don't need to run rootd unless you're IANA, are certifying
- private address space, or are an RIR which refuses to accept IANA as
- the root of the public address hierarchy.
- </doc>
-
- <doc>
- Ok, if that wasn't enough to scare you off: rootd is a mess, and
- needs to be rewritten, or, better, merged into rpkid. It
- doesn't use the publication protocol, and it requires far too
- many configuration parameters.
- </doc>
-
- <doc>
- rootd was originally intended to be a very simple program which
- simplified rpkid enormously by moving one specific task (acting
- as the root CA of an RPKI certificate hierarchy) out of rpkid.
- As the specifications and code (mostly the latter) have evolved,
- however, this task has become more complicated, and rootd would
- have to become much more complicated to keep up.
- </doc>
-
- <doc>
- Don't run rootd unless you're sure that you need to do so.
- </doc>
-
- <doc>
- Still think you need to run rootd? OK, but remember, you have
- been warned....
- </doc>
-
- <doc>
- rootd's default configuration file is the system `rpki.conf`
- file. Start rootd with "`-c filename`" to choose a different
- configuration file. All options are in the "`[rootd]`" section.
- Certificates and keys may be in either DER or PEM format.
- </doc>
-
- <option name = "bpki-ta"
- value = "${myrpki::bpki_servers_directory}/ca.cer">
- <doc>
- Where rootd should look for the BPKI trust anchor. All BPKI
- certificate verification within rootd traces back to this
- trust anchor. Don't change this unless you really know what
- you are doing.
- </doc>
- </option>
-
- <option name = "rootd-bpki-crl"
+ <option name = "pubd-crl"
value = "${myrpki::bpki_servers_directory}/ca.crl">
<doc>
- BPKI CRL. Don't change this unless you really know what you are
- doing.
- </doc>
- </option>
-
- <option name = "rootd-bpki-cert"
- value = "${myrpki::bpki_servers_directory}/rootd.cer">
- <doc>
- rootd's own BPKI EE certificate. Don't change this unless you
- really know what you are doing.
- </doc>
- </option>
-
- <option name = "rootd-bpki-key"
- value = "${myrpki::bpki_servers_directory}/rootd.key">
- <doc>
- Private key corresponding to rootd's own BPKI EE certificate.
- Don't change this unless you really know what you are doing.
+ Where pubd should look for the CRL covering its own BPKI EE
+ certificate. Don't change this unless you really know what
+ you are doing.
</doc>
</option>
- <option name = "child-bpki-cert"
- value = "${myrpki::bpki_servers_directory}/child.cer">
+ <option name = "irbe-cert"
+ value = "${myrpki::bpki_servers_directory}/irbe.cer">
<doc>
- BPKI certificate for rootd's one and only up-down child (RPKI
- engine to which rootd issues an RPKI certificate). Don't
- change this unless you really know what you are doing.
+ Where pubd should look for the back-end control client's BPKI EE
+ certificate. Don't change this unless you really know what you
+ are doing.
</doc>
</option>
- <option name = "server-host"
- value = "${myrpki::rootd_server_host}">
+ <option name = "rrdp-base-uri"
+ value = "${myrpki::publication_rrdp_base_uri}">
<doc>
- Server host on which rootd should listen.
+ RRDP base URI for naming snapshots and deltas.
</doc>
</option>
- <option name = "server-port"
- value = "${myrpki::rootd_server_port}">
+ <option name = "log-destination"
+ value = "${myrpki::log-destination}">
<doc>
- Server port on which rootd should listen.
+ Logging mechanism, can be "file", "syslog", "stderr", or "stdout".
</doc>
</option>
- <option name = "rpki-root-dir"
- value = "${myrpki::publication_base_directory}">
+ <option name = "log-filename"
+ value = "${myrpki::log-directory}/pubd.log">
<doc>
- Where rootd should write its output. Yes, rootd should be using
- pubd instead of publishing directly, but it doesn't. This
- needs to match pubd's configuration.
+ Where to write log file when logging to a file.
</doc>
</option>
- <option name = "rpki-base-uri"
- value = "rsync://${myrpki::publication_rsync_server}/${myrpki::publication_rsync_module}/">
+ <option name = "log-level"
+ value = "${myrpki::log-level}">
<doc>
- rsync URI corresponding to directory containing rootd's outputs.
+ Default logging level.
</doc>
</option>
- <option name = "rpki-root-cert-uri"
- value = "rsync://${myrpki::publication_rsync_server}/${myrpki::publication_root_module}/root.cer">
+ <option name = "log-time-limit"
+ value = "${myrpki::log-time-limit}">
<doc>
- rsync URI for rootd's root (self-signed) RPKI certificate.
+ Interval between log file rotations, in hours.
+ Set to zero to disable automatic rotations.
</doc>
</option>
- <option name = "rpki-root-key"
- value = "${myrpki::bpki_servers_directory}/root.key">
+ <option name = "log-count"
+ value = "${myrpki::log-count}">
<doc>
- Private key corresponding to rootd's root RPKI certificate.
+ How many old logs to keep before deleting.
</doc>
</option>
- <option name = "rpki-root-cert"
- value = "${myrpki::publication_root_cert_directory}/root.cer">
- <doc>
- Filename (as opposed to rsync URI) of rootd's root RPKI
- certificate.
- </doc>
- </option>
+ </section>
- <option name = "rpki-subject-pkcs10"
- value = "${myrpki::bpki_servers_directory}/rootd.subject.pkcs10">
- <doc>
- Where rootd should stash a copy of the PKCS #10 request it gets
- from its one (and only) child
- </doc>
- </option>
+ <section name = "rpki-nanny">
- <option name = "rpki-subject-lifetime"
- value = "30d">
+ <option name = "log-destination"
+ value = "${myrpki::log-destination}">
<doc>
- Lifetime of the one and only RPKI certificate rootd issues.
+ Logging mechanism, can be "file", "syslog", "stderr", or "stdout".
</doc>
</option>
- <option name = "rpki-root-crl"
- value = "root.crl">
+ <option name = "log-filename"
+ value = "${myrpki::log-directory}/rpki-nanny.log">
<doc>
- Filename (relative to rootd-base-uri and rpki-root-dir) of the CRL
- for rootd's root RPKI certificate.
+ Where to write log file when logging to a file.
</doc>
</option>
- <option name = "rpki-root-manifest"
- value = "root.mft">
+ <option name = "log-level"
+ value = "${myrpki::log-level}">
<doc>
- Filename (relative to rootd-base-uri and rpki-root-dir) of the
- manifest for rootd's root RPKI certificate.
+ Default logging level.
</doc>
</option>
- <option name = "rpki-class-name"
- value = "${myrpki::handle}">
+ <option name = "log-time-limit"
+ value = "${myrpki::log-time-limit}">
<doc>
- Up-down protocol class name for RPKI certificate rootd issues to its
- one (and only) child.
+ Interval between log file rotations, in hours.
+ Set to zero to disable automatic rotations.
</doc>
</option>
- <option name = "rpki-subject-cert"
- value = "${myrpki::handle}.cer">
+ <option name = "log-count"
+ value = "${myrpki::log-count}">
<doc>
- Filename (relative to rootd-base-uri and rpki-root-dir) of the one
- (and only) RPKI certificate rootd issues.
+ How many old logs to keep before deleting.
</doc>
</option>
@@ -816,30 +982,18 @@
<section name = "web_portal">
<doc>
- Glue to allow the Django application to pull user configuration
- from this file rather than directly editing settings.py.
+ Glue to allow Django to pull user configuration from this file
+ rather than requiring the user to edit settings.py.
</doc>
- <option name = "sql-database"
- value = "${myrpki::irdbd_sql_database}">
- <doc>
- SQL database name the web portal should use.
- </doc>
- </option>
-
- <option name = "sql-username"
- value = "${myrpki::irdbd_sql_username}">
- <doc>
- SQL user name the web portal should use.
- </doc>
- </option>
-
- <option name = "sql-password"
- value = "${myrpki::irdbd_sql_password}">
- <doc>
- SQL password the web portal should use.
- </doc>
- </option>
+ <!--
+ We used to have SQL settings for the GUI here, but since
+ they're pretty much required to be identical to the ones for
+ irdbd at this point, the duplicate entries were just another
+ chance to misconfigure something, so I removed them. Not yet
+ sure whether this was the right approach. Too much historical
+ baggage in this file.
+ -->
<option name = "secret-key">
<doc>
diff --git a/rp/config/rpki-generate-root-certificate b/rp/config/rpki-generate-root-certificate
new file mode 100755
index 00000000..10b8b194
--- /dev/null
+++ b/rp/config/rpki-generate-root-certificate
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+
+"""
+Generate an RPKI root certificate for rootd. In most cases you should
+not need to do this; see caveats in the manual about running rootd if
+you think you need this. This script does nothing that can't also be
+done with the OpenSSL command line tool, but on some platforms the
+installed copy of openssl doesn't understand the RFC 3779 extensions.
+"""
+
+import os
+import sys
+import pwd
+import time
+import rpki.x509
+import rpki.config
+import rpki.sundial
+import rpki.autoconf
+import rpki.resource_set
+
+os.environ["TZ"] = "UTC"
+time.tzset()
+
+cfg = rpki.config.argparser(section = "rootd", doc = __doc__)
+
+default_certfile = cfg.get("rpki-root-cert-file", "root.cer")
+default_keyfile = cfg.get("rpki-root-key-file", "root.key")
+default_talfile = os.path.splitext(default_certfile)[0] + ".tal"
+
+cfg.argparser.add_argument("-a", "--asns", help = "ASN resources", default = "0-4294967295")
+cfg.argparser.add_argument("-4", "--ipv4", help = "IPv4 resources", default = "0.0.0.0/0")
+cfg.argparser.add_argument("-6", "--ipv6", help = "IPv6 resources", default = "::/0")
+cfg.argparser.add_argument("--certificate", help = "certificate file", default = default_certfile)
+cfg.argparser.add_argument("--key", help = "key file", default = default_keyfile)
+cfg.argparser.add_argument("--tal", help = "TAL file", default = default_talfile)
+
+args = cfg.argparser.parse_args()
+
+resources = rpki.resource_set.resource_bag(
+ asn = args.asns,
+ v4 = args.ipv4,
+ v6 = args.ipv6)
+
+keypair = rpki.x509.RSA.generate(quiet = True)
+
+sia = (cfg.get("rpki_base_uri") + "/",
+ cfg.get("rpki-root-manifest-uri"),
+ None,
+ cfg.get("publication_rrdp_notification_uri", section = "myrpki"))
+
+uris = (cfg.get("rpki-root-cert-uri"),
+ cfg.get("publication_rrdp_base_uri", section = "myrpki") + "root.cer")
+
+cert = rpki.x509.X509.self_certify(
+ keypair = keypair,
+ subject_key = keypair.get_public(),
+ serial = 1,
+ sia = sia,
+ notAfter = rpki.sundial.now() + rpki.sundial.timedelta(days = 365),
+ resources = resources)
+
+with open(args.certificate, "wb") as f:
+ f.write(cert.get_DER())
+
+with open(args.tal, "w") as f:
+ for uri in uris:
+ f.write(uri + "\n")
+ f.write(keypair.get_public().get_Base64())
+
+with os.fdopen(os.open(args.key, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0400), "w") as f:
+ f.write(keypair.get_DER())
+
+try:
+ pw = pwd.getpwnam(rpki.autoconf.RPKI_USER)
+ os.chown(args.key, pw.pw_uid, pw.pw_gid)
+except:
+ pass
diff --git a/rp/config/rpki-manage b/rp/config/rpki-manage
new file mode 100755
index 00000000..ac3cc967
--- /dev/null
+++ b/rp/config/rpki-manage
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+
+# Using a Python script to run sudo to run a Python script is a bit
+# silly, but it lets us use rpki.autoconf to locate sudo, lets us
+# avoid needing a custom setuid wrapper, lets us avoid another pass
+# through the adventures of shell quoting and tokenization, and
+# generally is just a lot simpler to implement correctly.
+#
+# OK, it's probably a few milliseconds slower. Big deal.
+
+if __name__ == "__main__":
+
+ import os
+ import pwd
+ import sys
+ import rpki.autoconf
+
+ try:
+ uid = pwd.getpwnam(rpki.autoconf.RPKI_USER).pw_uid
+ except:
+ uid = None
+
+ if uid is None or uid == os.geteuid():
+
+ # django-admin seems to have problems creating the superuser account when
+ # $LANG is unset or is set to something totally incompatible with UTF-8.
+
+ if os.environ.get("LANG") in (None, "", "C"):
+ os.environ["LANG"] = "en_US.UTF-8"
+
+ os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rpki.django_settings.gui")
+
+ from django.core.management import execute_from_command_line
+
+ execute_from_command_line()
+
+ else:
+
+ try:
+ argv = [rpki.autoconf.SUDO, "-u", rpki.autoconf.RPKI_USER, sys.executable]
+ argv.extend(os.path.abspath(a) if i == 0 else a for i, a in enumerate(sys.argv))
+ os.execv(argv[0], argv)
+ sys.exit("rpki-manage startup failure, no exception so don't know why, sorry")
+
+ except Exception as e:
+ sys.exit("Couldn't exec sudo python rpki-manage: {!s}".format(e))
diff --git a/rp/config/rpki-sql-backup b/rp/config/rpki-sql-backup
new file mode 100755
index 00000000..09e5856e
--- /dev/null
+++ b/rp/config/rpki-sql-backup
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+
+# $Id$
+#
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+# Portions copyright (C) 2010-2013 Internet Systems Consortium ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notices and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ISC DISCLAIM ALL
+# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
+# ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA
+# OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Back up data from SQL databases, looking at config file to figure out
+which databases and what credentials to use with them, and eliminating
+duplicates in cases where we've configured multiple applications to
+share a single database.
+"""
+
+import os
+import sys
+import time
+import argparse
+import subprocess
+import rpki.config
+
+os.environ["TZ"] = "UTC"
+time.tzset()
+
+cfg = rpki.config.argparser(doc = __doc__, section = "myrpki")
+cfg.argparser.add_argument("-o", "--output", type = argparse.FileType("wb"), default = sys.stdout,
+ help = "destination for SQL dump (default: stdout)")
+cfg.argparser.add_argument("-v", "--verbose", action = "store_true",
+ help = "whistle while you work")
+args = cfg.argparser.parse_args()
+
+templates = dict(mysql = "mysqldump --add-drop-database -u{username} -p{password} -B{database}",
+ sqlite3 = "sqlite3 {database} .dump",
+ postgresql = "sudo -u {username} pg_dump {database}")
+
+cmds = []
+
+for name in ("rpkid", "irdbd", "pubd"):
+ if cfg.getboolean("start_" + name, False):
+ cmd = templates[cfg.get("sql-engine", section = name)]
+ cmd = cmd.format(database = cfg.get("sql-database", section = name),
+ username = cfg.get("sql-username", section = name),
+ password = cfg.get("sql-password", section = name))
+ if cmd not in cmds:
+ cmds.append(cmd)
+
+for cmd in cmds:
+ if args.verbose:
+ sys.stderr.write("[Running \"{}\"]\n".format(cmd))
+ subprocess.check_call(cmd.split(), stdout = args.output)
diff --git a/rp/config/rpki-sql-setup b/rp/config/rpki-sql-setup
new file mode 100755
index 00000000..6fd64588
--- /dev/null
+++ b/rp/config/rpki-sql-setup
@@ -0,0 +1,348 @@
+#!/usr/bin/env python
+
+# $Id$
+#
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+# Portions copyright (C) 2009-2013 Internet Systems Consortium ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notices and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ISC DISCLAIM ALL
+# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
+# ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA
+# OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Automated setup of SQL stuff used by the RPKI tools. Pulls
+configuration from rpki.conf, prompts for SQL password when needed.
+"""
+
+import os
+import pwd
+import sys
+import getpass
+import textwrap
+import argparse
+import rpki.config
+
+
+class Abstract_Driver(object):
+
+ # Kludge to make classes derived from this into singletons. Net
+ # of a Million Lies says this is Not Pythonic, but it seems to
+ # work, so long as one doesn't attempt to subclass the resulting
+ # driver classes. For our purposes, it will do.
+
+ __instance = None
+
+ def __new__(cls, *args, **kwargs):
+ if cls.__instance is None:
+ cls.__instance = object.__new__(cls, *args, **kwargs)
+ return cls.__instance
+
+ def db_accessible(self, udb):
+ try:
+ self._db_accessible_test(udb)
+ except:
+ return False
+ else:
+ return True
+
+ def fetchone(self):
+ return self._cur.fetchone()
+
+ def fetchall(self):
+ return self._cur.fetchall()
+
+ def close(self):
+ self._cur.close()
+ self._db.close()
+
+ def log(self, msg):
+ if self.args.verbose:
+ sys.stderr.write(msg + "\n")
+
+
+class MySQL_Driver(Abstract_Driver):
+
+ _initialized = False
+
+ def __init__(self, args):
+ try:
+ self.driver
+ except AttributeError:
+ from rpki.mysql_import import MySQLdb
+ self.driver = MySQLdb
+ self.args = args
+
+ def _db_accessible_test(self, udb):
+ self.driver.connect(db = udb.database, user = udb.username, passwd = udb.password).close()
+
+ def db_exists(self, udb):
+ self.execute("SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = '{0.database}'".format(udb))
+ return bool(self.fetchone()[0])
+
+ def execute(*args):
+ try:
+ self._cur
+ except AttributeError:
+ self.log("MySQL driver initializing root connection")
+ if self.args.mysql_defaults:
+ mysql_cfg = rpki.config.parser(set_filename = self.args.mysql_defaults, section = "client")
+ self._db = self.driver.connect(db = "mysql",
+ user = mysql_cfg.get("user"),
+ passwd = mysql_cfg.get("password"))
+ else:
+ self._db = self.driver.connect(db = "mysql",
+ user = "root",
+ passwd = getpass.getpass("Please enter your MySQL root password: "))
+ self._db.autocommit(True)
+ self._cur = self._db.cursor()
+ self.log("MySQL driver executing {}".format(", ".join(args)))
+ return self._cur.execute(*args)
+
+ def create(self, udb):
+ self.execute("CREATE DATABASE IF NOT EXISTS {0.database}".format(udb))
+ self.fix_grants(udb)
+
+ def drop(self, udb):
+ self.execute("DROP DATABASE IF EXISTS {0.database}".format(udb))
+
+ def script_drop(self, udb):
+ self.args.script_output.write("DROP DATABASE IF EXISTS {};\n".format(udb.database))
+
+ def fix_grants(self, udb):
+ self.execute("GRANT ALL ON {0.database}.* TO {0.username}@localhost IDENTIFIED BY %s".format(udb),
+ (udb.password,))
+
+class SQLite3_Driver(Abstract_Driver):
+
+ def __init__(self, args):
+ try:
+ self.driver
+ except AttributeError:
+ import sqlite3
+ self.driver = sqlite3
+ self.args = args
+
+ def _db_accessible_test(self, udb):
+ self.driver.connect(udb.database).close()
+
+ def db_exists(self, udb):
+ return os.path.exists(udb.database)
+
+ def _grant(self, udb):
+ if udb.username and os.geteuid() == 0:
+ pw = pwd.getpwnam(udb.username)
+ os.chown(udb.database, pw.pw_uid, pw.pw_gid)
+
+ def create(self, udb):
+ self._db_accessible_test(udb.database)
+ self._grant(udb)
+
+ def drop(self, udb):
+ os.unlink(udb.database)
+
+ def script_drop(self, udb):
+ self.args.script_output.write("rm {}\n".format(udb.database))
+
+ def fix_grants(self, udb):
+ self._grant(udb)
+
+
+class PostgreSQL_Driver(Abstract_Driver):
+
+ def __init__(self, args):
+ try:
+ self.driver
+ except AttributeError:
+ import psycopg2
+ self.driver = psycopg2
+ self.args = args
+ if args.postgresql_root_username and (os.getuid() == 0 or os.geteuid() == 0):
+ self._pw = pwd.getpwnam(args.postgresql_root_username)
+ else:
+ self._pw = None
+ self.log("Initialized PostgreSQL driver, pw {!r}".format(self._pw))
+
+ def _seteuid(self, new_uid):
+ old_uid = os.geteuid()
+ if new_uid != old_uid:
+ self.log("PostgreSQL driver changing EUID from {} to {}".format(old_uid, new_uid))
+ os.seteuid(new_uid)
+ return old_uid
+
+ def execute(self, *args):
+ try:
+ self._cur
+ except AttributeError:
+ self.log("PostgreSQL driver opening connection to database {}".format(self.args.postgresql_root_database))
+ if self._pw is not None:
+ euid = self._seteuid(self._pw.pw_uid)
+ try:
+ self._db = self.driver.connect(database = self.args.postgresql_root_database)
+ self._db.autocommit = True
+ self._cur = self._db.cursor()
+ finally:
+ if self._pw is not None:
+ self._seteuid(euid)
+ self.log("PostgreSQL driver executing {}".format(", ".join(args)))
+ return self._cur.execute(*args)
+
+ def _db_accessible_test(self, udb):
+ pw = pwd.getpwnam(udb.username)
+ uid = self._seteuid(pw.pw_uid)
+ try:
+ self.driver.connect(database = udb.database, user = udb.username , password = udb.password).close()
+ finally:
+ self._seteuid(uid)
+
+ def db_exists(self, udb):
+ self.execute("SELECT COUNT(*) FROM pg_database WHERE datname = '{0.database}'".format(udb))
+ return bool(self.fetchone()[0])
+
+ def role_in_use(self, udb):
+ self.execute(textwrap.dedent('''\
+ SELECT COUNT(*) FROM pg_database
+ JOIN pg_roles ON pg_database.datdba = pg_roles.oid
+ WHERE pg_roles.rolname = '{0.username}'
+ '''.format(udb)))
+ return bool(self.fetchone()[0])
+
+ def create(self, udb):
+ if not self.role_in_use(udb):
+ self.execute("CREATE ROLE {0.username} LOGIN PASSWORD '{0.password}'".format(udb))
+ if not self.db_exists(udb):
+ self.execute("CREATE DATABASE {0.database} OWNER {0.username}".format(udb))
+
+ def drop(self, udb):
+ self.execute("DROP DATABASE IF EXISTS {0.database}".format(udb))
+ if not self.role_in_use(udb):
+ self.execute("DROP ROLE IF EXISTS {0.username}".format(udb))
+
+ def script_drop(self, udb):
+ self.args.script_output.write(textwrap.dedent('''\
+ DROP DATABASE IF EXISTS {0.database};
+ DO $$ BEGIN
+ IF NOT EXISTS (SELECT * FROM pg_database JOIN pg_roles
+ ON pg_database.datdba = pg_roles.oid
+ WHERE pg_roles.rolname = '{0.username}')
+ THEN
+ DROP ROLE IF EXISTS {0.username};
+ END IF;
+ END $$;
+ '''.format(udb)))
+
+ def fix_grants(self, udb):
+ self.execute("ALTER DATABASE {0.database} OWNER TO {0.username}".format(udb))
+ self.execute("ALTER ROLE {0.username} WITH PASSWORD '{0.password}".format(udb))
+
+
+class UserDB(object):
+ """
+ Class to wrap access parameters for a particular database.
+ """
+
+ drivers = dict(sqlite3 = SQLite3_Driver,
+ mysql = MySQL_Driver,
+ postgresql = PostgreSQL_Driver)
+
+ def __init__(self, args, name):
+ self.database = cfg.get("sql-database", section = name)
+ self.username = cfg.get("sql-username", section = name)
+ self.password = cfg.get("sql-password", section = name)
+ self.engine = cfg.get("sql-engine", section = name)
+ self.driver = self.drivers[self.engine](args)
+ self.args = args
+
+ def drop(self):
+ if self.args.force or self.driver.db_accessible(self):
+ self.driver.drop(self)
+
+ def create(self):
+ if self.args.force or not self.driver.db_accessible(self):
+ self.driver.create(self)
+
+ def script_drop(self):
+ self.driver.script_drop(self)
+
+ def drop_and_create(self):
+ if self.args.force or self.driver.db_accessible(self):
+ self.driver.drop(self)
+ self.driver.create(self)
+
+ def fix_grants(self):
+ if self.args.force or not self.driver.db_accessible(self):
+ self.driver.fix_grants(self)
+
+
+parser = argparse.ArgumentParser(description = __doc__)
+parser.add_argument("-c", "--config",
+ help = "specify alternate location for rpki.conf")
+parser.add_argument("-d", "--debug", action = "store_true",
+ help = "enable debugging (eg, Python backtraces)")
+parser.add_argument("-v", "--verbose", action = "store_true",
+ help = "whistle while you work")
+parser.add_argument("-f", "--force", action = "store_true",
+ help = "force database create, drop, or grant regardless of current state")
+
+parser.add_argument("--mysql-defaults",
+ help = "specify MySQL root access credentials via a configuration file")
+
+
+parser.add_argument("--postgresql-root-database", default = "postgres",
+ help = "name of PostgreSQL control database")
+parser.add_argument("--postgresql-root-username",
+ help = "username of PostgreSQL control role")
+
+subparsers = parser.add_subparsers(title = "Commands", metavar = "", dest = "dispatch")
+
+subparsers.add_parser("create",
+ help = "create databases and load schemas")
+
+subparsers.add_parser("drop",
+ help = "drop databases")
+
+subparser = subparsers.add_parser("script-drop",
+ help = "show SQL commands to drop databases")
+subparser.add_argument("script_output",
+ nargs = "?", type = argparse.FileType("w"), default = "-",
+ help = "destination for drop script")
+
+subparsers.add_parser("drop-and-create",
+ help = "drop databases then recreate them and load schemas")
+
+subparsers.add_parser("fix-grants",
+ help = "whack database to match configuration file")
+
+args = parser.parse_args()
+
+try:
+
+ cfg = rpki.config.parser(set_filename = args.config, section = "myrpki")
+
+ names = [name for name in ("irdbd", "rpkid", "pubd")
+ if cfg.getboolean("start_" + name, False)]
+ names.append("rcynic")
+
+ # For now, we quietly ignore missing sections rather than throwing an exception.
+ # I could make a case either way for this, but ignoring missing sections is a
+ # lot easier to clean up while debugging the installation scripts.
+
+ for name in names:
+ if cfg.has_section(name):
+ udb = UserDB(args = args, name = name)
+ method = args.dispatch.replace("-", "_")
+ getattr(udb, method)()
+
+except Exception, e:
+ if args.debug:
+ raise
+ else:
+ sys.exit(str(e))
diff --git a/rp/rcynic/Makefile.in b/rp/rcynic/Makefile.in
index a2d844bd..52c67fde 100644
--- a/rp/rcynic/Makefile.in
+++ b/rp/rcynic/Makefile.in
@@ -1,17 +1,7 @@
# $Id$
-NAME = rcynic
-
-BIN = ${NAME}
-SRC = ${NAME}.c
-OBJ = ${NAME}.o
-
-GEN = defstack.h
-
-OBJS = ${OBJ} bio_f_linebreak.o
-
CFLAGS = @CFLAGS@ -Wall -Wshadow -Wmissing-prototypes -Wmissing-declarations -Werror-implicit-function-declaration
-LDFLAGS = @LDFLAGS@ @LD_STATIC_FLAG@
+LDFLAGS = @LDFLAGS@
LIBS = @LIBS@
AWK = @AWK@
@@ -40,53 +30,48 @@ abs_top_srcdir = @abs_top_srcdir@
abs_top_builddir = @abs_top_builddir@
srcdir = @srcdir@
-RCYNIC_BIN_RCYNIC = @RCYNIC_BIN_RCYNIC@
+RCYNIC_BIN_RCYNIC = ${DESTDIR}${bindir}/rcynic
RCYNIC_CONF_DATA = @RCYNIC_CONF_DATA@
-RCYNIC_CONF_FILE = @RCYNIC_CONF_FILE@
-RCYNIC_CONF_RSYNC = @RCYNIC_CONF_RSYNC@
-RCYNIC_CONF_TA_DIR = @RCYNIC_CONF_TA_DIR@
-RCYNIC_CRON_USER = @RCYNIC_CRON_USER@
-RCYNIC_DATA_DIR = ${RCYNIC_DIR}/data
+RCYNIC_CONF_FILE = ${DESTDIR}${sysconfdir}/rcynic.conf
+RCYNIC_CONF_RSYNC = @RSYNC@
+RCYNIC_CONF_TA_DIR = ${sysconfdir}/rpki/trust-anchors
+RCYNIC_CRON_USER = ${RPKI_USER}
+RCYNIC_DATA_DIR = ${DESTDIR}${RCYNIC_DIR}/data
RCYNIC_DIR = @RCYNIC_DIR@
-RCYNIC_DIRS = ${RCYNIC_TA_DIR} ${RCYNIC_JAIL_DIRS} ${RCYNIC_DATA_DIR} ${RPKIRTR_DIR} ${RPKIRTR_DIR}/sockets
-RCYNIC_GECOS = RPKI Validation System
-RCYNIC_GROUP = @RCYNIC_GROUP@
+RCYNIC_DIRS = ${RCYNIC_TA_DIR} ${RCYNIC_DATA_DIR} ${RPKIRTR_DIR} ${RPKIRTR_DIR}/sockets
+RPKI_GECOS = RPKI System Software
+RPKI_GROUP = @RPKI_GROUP@
RCYNIC_HTML_DIR = @RCYNIC_HTML_DIR@
RCYNIC_INSTALL_TARGETS = @RCYNIC_INSTALL_TARGETS@
-RCYNIC_JAIL_DIRS = @RCYNIC_JAIL_DIRS@
-RCYNIC_STATIC_RSYNC = @RCYNIC_STATIC_RSYNC@
-RCYNIC_TA_DIR = @RCYNIC_TA_DIR@
-RCYNIC_USER = @RCYNIC_USER@
-RPKIRTR_DIR = ${RCYNIC_DIR}/rpki-rtr
-RPKIRTR_GECOS = RPKI router server
-RPKIRTR_GROUP = rpkirtr
-RPKIRTR_MODE = 775
-RPKIRTR_USER = rpkirtr
-
-all: ${BIN} ${RCYNIC_STATIC_RSYNC}
+RCYNIC_TA_DIR = ${DESTDIR}${sysconfdir}/rpki/trust-anchors
+RPKI_USER = @RPKI_USER@
+RPKIRTR_DIR = ${DESTDIR}${RCYNIC_DIR}/rpki-rtr
+
+OBJS = rcynic.o bio_f_linebreak.o
+
+all: rcynicng
clean:
- if test -r static-rsync/Makefile; then cd static-rsync; ${MAKE} $@; fi
- rm -f ${BIN} ${OBJS}
+ rm -f rcynic ${OBJS}
-${OBJ}: ${SRC} ${GEN}
+rcynic.o: rcynic.c defstack.h
-${BIN}: ${OBJS}
+rcynic: ${OBJS}
${CC} ${CFLAGS} -o $@ ${OBJS} ${LDFLAGS} ${LIBS}
-${GEN}: ${SRC}
- ${PYTHON} ${abs_top_srcdir}/buildtools/defstack.py ${SRC} >$@.tmp
+defstack.h: rcynic.c
+ ${PYTHON} ${abs_top_srcdir}/buildtools/defstack.py rcynic.c >$@.tmp
mv $@.tmp $@
tags: TAGS
-TAGS: ${SRC} ${GEN}
- etags ${SRC} ${GEN}
+TAGS: rcynic.c defstack.h
+ etags rcynic.c defstack.h
-test: ${BIN}
+test: rcynic
if test -r rcynic.conf; \
then \
- ./${BIN} -j 0 && \
+ ./rcynic -j 0 && \
test -r rcynic.xml && \
echo && \
./rcynic-text rcynic.xml; \
@@ -108,33 +93,31 @@ static-rsync/rsync:
install: all ${RCYNIC_INSTALL_TARGETS}
install-always: \
- install-directories install-rcynic install-rcynic-conf
+ install-directories install-rcynic install-tals
install-postconf: \
install-user-and-group install-directory-ownership install-crontab
-install-jailed: \
- install-static-rsync install-shared-libraries install-rc-scripts
-
install-directories: ${RCYNIC_DIRS}
${RCYNIC_DIRS} ${DESTDIR}${bindir} ${DESTDIR}${sysconfdir}:
${INSTALL} -v -d $@
install-directory-ownership: ${RCYNIC_DATA_DIR} ${RPKIRTR_DIR} ${RPKIRTR_DIR}/sockets
- chown ${RCYNIC_USER}:${RCYNIC_GROUP} ${RCYNIC_DATA_DIR} ${RPKIRTR_DIR}
- chown ${RPKIRTR_USER}:${RCYNIC_GROUP} ${RPKIRTR_DIR}/sockets
- chmod ${RPKIRTR_MODE} ${RPKIRTR_DIR}/sockets
+ chown ${RPKI_USER}:${RPKI_GROUP} ${RCYNIC_DATA_DIR} ${RPKIRTR_DIR} ${RPKIRTR_DIR}/sockets
-install-rcynic-conf: ${RCYNIC_CONF_FILE}
+install-tals:
+ ${INSTALL} -v -d ${RCYNIC_TA_DIR}
+ ${INSTALL} -v -p -m 444 sample-trust-anchors/*.tal ${RCYNIC_TA_DIR}
+
+# We don't install rcynic.conf anymore. Keep this for now as internal documentation,
+# clean up later.
${RCYNIC_CONF_FILE}:
@echo
- @echo Found no ${RCYNIC_CONF_FILE}, creating basic config and installing default trust anchor locators.
+ @echo Found no ${RCYNIC_CONF_FILE}, creating basic configuration.
@echo You might want to edit this.
@echo
- ${INSTALL} -v -d ${RCYNIC_TA_DIR}
- ${INSTALL} -v -p -m 444 sample-trust-anchors/*.tal ${RCYNIC_TA_DIR}
@echo > $@.tmp '# Basic rcynic configuration file with default trust anchors.'
@echo >>$@.tmp '# See documentation for details.'
@echo >>$@.tmp ''
@@ -153,13 +136,8 @@ ${RCYNIC_CONF_FILE}:
install-rcynic: ${RCYNIC_BIN_RCYNIC}
-${RCYNIC_BIN_RCYNIC}: ${BIN}
- ${INSTALL} -p -m 555 ${BIN} $@
-
-install-static-rsync: ${RCYNIC_DIR}/bin/rsync
-
-${RCYNIC_DIR}/bin/rsync: static-rsync/rsync
- ${INSTALL} -p -m 555 static-rsync/rsync $@
+${RCYNIC_BIN_RCYNIC}: rcynicng
+ ${INSTALL} -p -m 555 rcynicng $@
.FORCE:
diff --git a/rp/rcynic/rc-scripts/darwin/RCynic b/rp/rcynic/rc-scripts/darwin/RCynic
deleted file mode 100755
index d486a3c3..00000000
--- a/rp/rcynic/rc-scripts/darwin/RCynic
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/sh -
-#
-# $Id$
-#
-. /etc/rc.common
-
-name="rcynic"
-start_cmd="rcynic_start"
-stop_cmd="rcynic_stop"
-
-: ${rcynic_dir="/var/rcynic"}
-
-StartService()
-{
- /sbin/umount "${rcynic_dir}/dev" 2>/dev/null
-
- if ! /sbin/mount_devfs devfs "${rcynic_dir}/dev"; then
- echo "Mounting devfs on ${rcynic_dir}/dev failed..."
- exit 1
- fi
-
- for i in /etc/localtime /etc/resolv.conf; do
- j="${rcynic_dir}${i}"
- if /bin/test -r "$i" && ! /usr/bin/cmp -s "$i" "$j"; then
- /usr/bin/install -m 444 -o root -g wheel -p "$i" "$j"
- fi
- done
-
- /bin/ln -f /var/run/mDNSResponder "${rcynic_dir}/var/run/mDNSResponder"
-}
-
-StopService()
-{
- /sbin/umount "${rcynic_dir}/dev" 2>/dev/null
-}
-
-RestartService()
-{
- StartService
-}
-
-RunService "$1"
diff --git a/rp/rcynic/rc-scripts/darwin/StartupParameters.plist b/rp/rcynic/rc-scripts/darwin/StartupParameters.plist
deleted file mode 100644
index ca46b676..00000000
--- a/rp/rcynic/rc-scripts/darwin/StartupParameters.plist
+++ /dev/null
@@ -1,19 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
-<plist version="1.0">
-<dict>
- <key>Description</key>
- <string>RCynic Setup</string>
- <key>OrderPreference</key>
- <string>None</string>
- <key>Provides</key>
- <array>
- <string>RCynic</string>
- </array>
- <key>Uses</key>
- <array>
- <string>Network</string>
- <string>Resolver</string>
- </array>
- </dict>
-</plist>
diff --git a/rp/rcynic/rc-scripts/freebsd/rc.d.rcynic b/rp/rcynic/rc-scripts/freebsd/rc.d.rcynic
deleted file mode 100755
index 9b7aa545..00000000
--- a/rp/rcynic/rc-scripts/freebsd/rc.d.rcynic
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/bin/sh -
-#
-# $Id$
-#
-# PROVIDE: rcynic
-# REQUIRE: DAEMON
-# KEYWORD: nojail
-
-. /etc/rc.subr
-
-name="rcynic"
-start_cmd="rcynic_start"
-stop_cmd="rcynic_stop"
-
-: ${rcynic_dir="/var/rcynic"}
-
-rcynic_start()
-{
- /sbin/umount "${rcynic_dir}/dev" 2>/dev/null
-
- if ! /sbin/mount -t devfs dev "${rcynic_dir}/dev"; then
- echo "Mounting devfs on ${rcynic_dir}/dev failed..."
- exit 1
- fi
-
- /sbin/devfs -m "${rcynic_dir}/dev" rule apply hide
- /sbin/devfs -m "${rcynic_dir}/dev" rule apply path null unhide
- /sbin/devfs -m "${rcynic_dir}/dev" rule apply path random unhide
-
- for i in /etc/localtime /etc/resolv.conf; do
- j="${rcynic_dir}${i}"
- if /bin/test -r "$i" && ! /usr/bin/cmp -s "$i" "$j"; then
- /usr/bin/install -m 444 -o root -g wheel -p "$i" "$j"
- fi
- done
-}
-
-rcynic_stop()
-{
- /sbin/umount "${rcynic_dir}/dev" 2>/dev/null
-}
-
-load_rc_config $name
-run_rc_command "$1"
diff --git a/rp/rcynic/rcynic-cron b/rp/rcynic/rcynic-cron
index 53bfea9f..e7e564b3 100755
--- a/rp/rcynic/rcynic-cron
+++ b/rp/rcynic/rcynic-cron
@@ -28,83 +28,51 @@ our purposes. In theory this is portable to any Unix-like system.
import os
import sys
-import pwd
import fcntl
import errno
-import argparse
import rpki.autoconf
def run(*cmd, **kwargs):
- chroot_this = kwargs.pop("chroot_this", False)
- cwd = kwargs.pop("cwd", None)
- pid = os.fork()
- if pid == 0:
- if chroot_this:
- os.chdir(rpki.autoconf.RCYNIC_DIR)
- elif cwd is not None:
- os.chdir(cwd)
- if we_are_root:
- os.initgroups(pw.pw_name, pw.pw_gid)
- if chroot_this:
- os.chroot(rpki.autoconf.RCYNIC_DIR)
- if we_are_root:
- os.setgid(pw.pw_gid)
- os.setuid(pw.pw_uid)
- os.closerange(3, os.sysconf("SC_OPEN_MAX"))
- os.execvp(cmd[0], cmd)
- os._exit(1)
- else:
- status = os.waitpid(pid, 0)[1]
- if status == 0:
- return
- elif os.WIFSIGNALED(status):
- sys.exit("Process %s exited with signal %s" % (" ".join(cmd), os.WTERMSIG(status)))
- elif os.WIFEXITED(status):
- sys.exit("Program %s exited with status %s" % (" ".join(cmd), os.WEXITSTATUS(status)))
+ cwd = kwargs.pop("cwd", None)
+ pid = os.fork()
+ if pid == 0:
+ if cwd is not None:
+ os.chdir(cwd)
+ os.closerange(3, os.sysconf("SC_OPEN_MAX"))
+ os.execvp(cmd[0], cmd)
+ os._exit(1) # pylint: disable=W0212
else:
- sys.exit("Program %s exited for unknown reason %s" % (" ".join(cmd), status))
-
-parser = argparse.ArgumentParser(description = __doc__)
-parser.add_argument("--chroot", action = "store_true", help = "run chrooted")
-args = parser.parse_args()
-
-we_are_root = os.getuid() == 0
-
-if args.chroot and not we_are_root:
- sys.exit("Only root can --chroot")
+ status = os.waitpid(pid, 0)[1]
+ if status == 0:
+ return
+ elif os.WIFSIGNALED(status):
+ sys.exit("Process %s exited with signal %s" % (" ".join(cmd), os.WTERMSIG(status)))
+ elif os.WIFEXITED(status):
+ sys.exit("Program %s exited with status %s" % (" ".join(cmd), os.WEXITSTATUS(status)))
+ else:
+ sys.exit("Program %s exited for unknown reason %s" % (" ".join(cmd), status))
try:
- pw = pwd.getpwnam(rpki.autoconf.RCYNIC_USER)
-except KeyError:
- sys.exit("Could not find passwd entry for user %s" % rpki.autoconf.RCYNIC_USER)
-
-try:
- lock = os.open(os.path.join(rpki.autoconf.RCYNIC_DIR, "data/lock"),
- os.O_RDONLY | os.O_CREAT | os.O_NONBLOCK, 0666)
- fcntl.flock(lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
- if we_are_root:
- os.fchown(lock, pw.pw_uid, pw.pw_gid)
+ lock = os.open(os.path.join(rpki.autoconf.RCYNIC_DIR, "data", "lock"),
+ os.O_RDONLY | os.O_CREAT | os.O_NONBLOCK, 0666)
+ fcntl.flock(lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
except (IOError, OSError), e:
- if e.errno == errno.EAGAIN:
- sys.exit(0) # Another instance of this script is already running, exit silently
- else:
- sys.exit("Error %r opening lock %r" % (e.strerror, os.path.join(rpki.autoconf.RCYNIC_DIR, "data/lock")))
+ if e.errno == errno.EAGAIN:
+ sys.exit(0) # Another instance of this script is already running, exit silently
+ else:
+ sys.exit("Error %r opening lock %r" % (e.strerror, os.path.join(rpki.autoconf.RCYNIC_DIR, "data/lock")))
-if args.chroot:
- run("/bin/rcynic", "-c", "/etc/rcynic.conf", chroot_this = True)
-else:
- run(os.path.join(rpki.autoconf.bindir, "rcynic"), "-c", os.path.join(rpki.autoconf.sysconfdir, "rcynic.conf"))
+run(os.path.join(rpki.autoconf.bindir, "rcynic"))
run(os.path.join(rpki.autoconf.bindir, "rpki-rtr"),
"cronjob",
- os.path.join(rpki.autoconf.RCYNIC_DIR, "data/authenticated"),
cwd = os.path.join(rpki.autoconf.RCYNIC_DIR, "rpki-rtr"))
prog = os.path.join(rpki.autoconf.libexecdir, "rpkigui-rcynic")
if os.path.exists(prog):
- run(prog)
+ run(prog)
if rpki.autoconf.RCYNIC_HTML_DIR and os.path.exists(os.path.dirname(rpki.autoconf.RCYNIC_HTML_DIR)):
- run(os.path.join(rpki.autoconf.bindir, "rcynic-html"),
- os.path.join(rpki.autoconf.RCYNIC_DIR, "data/rcynic.xml"),
- rpki.autoconf.RCYNIC_HTML_DIR)
+ run(os.path.join(rpki.autoconf.bindir, "rcynic-html"),
+ os.path.join(rpki.autoconf.RCYNIC_DIR, "data/rcynic.xml"),
+ rpki.autoconf.RCYNIC_HTML_DIR)
diff --git a/rp/rcynic/rcynic-dump b/rp/rcynic/rcynic-dump
new file mode 100755
index 00000000..0c7f898f
--- /dev/null
+++ b/rp/rcynic/rcynic-dump
@@ -0,0 +1,95 @@
+#!/usr/bin/env python
+
+# $Id$
+
+"""
+Dump rcynicng database to old-style disk files.
+
+This is a slow operation due to blocking operations in the underlying
+filesystem, so in the long run we will almost certainly want to
+rewrite the RP toolkit to use the database directly, but it's (much)
+easier to compare results between the old and new validation engines
+when they use the same data representation.
+"""
+
+import os
+import sys
+import time
+import shutil
+import logging
+import argparse
+
+import rpki.config
+import rpki.autoconf
+
+logger = logging.getLogger("rcynic-dump")
+
+os.environ.update(TZ = "UTC",
+ DJANGO_SETTINGS_MODULE = "rpki.django_settings.rcynic")
+time.tzset()
+
+logging.basicConfig(level = logging.DEBUG, format = "%(asctime)s %(message)s", datefmt = "%Y-%m-%d %H:%M:%S")
+
+parser = argparse.ArgumentParser(description = __doc__)
+parser.add_argument("-c", "--config")
+parser.add_argument("output_tree", nargs = "?", default = "rcynic-data")
+args = parser.parse_args()
+
+rpki.config.parser(set_filename = args.config, section = "rcynic")
+
+import django
+django.setup()
+
+import rpki.rcynicdb
+
+def uri_to_filename(obj, base):
+ return os.path.join(args.output_tree, base, obj.uri[obj.uri.index("://") + 3:])
+
+def sha256_to_filename(obj):
+ return os.path.join(args.output_tree, "sha256", obj.sha256[:2], obj.sha256 + obj.uri[-4:])
+
+def authenticated_to_dirname(authenticated):
+ return "authenticated-{}".format(authenticated.started.strftime("%Y-%m-%dT%H:%M:%SZ"))
+
+seen = set()
+
+def check_der(fn, der):
+ with open(fn, "rb") as f:
+ return der == f.read()
+
+def mkdir_maybe(fn):
+ dn = os.path.dirname(fn)
+ if not os.path.exists(dn):
+ os.makedirs(dn)
+
+for obj in rpki.rcynicdb.models.RPKIObject.objects.all():
+
+ hfn = sha256_to_filename(obj)
+ ufn = uri_to_filename(obj, "unauthenticated")
+
+ if not os.path.exists(hfn) or not check_der(hfn, obj.der):
+ mkdir_maybe(hfn)
+ with open(hfn, "wb") as f:
+ f.write(obj.der)
+
+ seen.add(hfn)
+ seen.add(ufn)
+
+ for auth in obj.authenticated.all():
+ afn = uri_to_filename(obj, authenticated_to_dirname(auth))
+ mkdir_maybe(afn)
+ if not os.path.exists(afn):
+ os.link(hfn, afn)
+ elif not check_der(afn, obj.der):
+ os.unlink(afn)
+ os.link(hfn, afn)
+ seen.add(afn)
+
+auth = rpki.rcynicdb.models.Authenticated.objects.order_by("-started").first()
+
+if auth is not None:
+ src = authenticated_to_dirname(auth)
+ dst = os.path.join(args.output_tree, "authenticated")
+ if os.path.exists(dst):
+ os.unlink(dst)
+ os.symlink(src, dst)
diff --git a/rp/rcynic/rcynic-html b/rp/rcynic/rcynic-html
index ef566440..154193b2 100755
--- a/rp/rcynic/rcynic-html
+++ b/rp/rcynic/rcynic-html
@@ -32,361 +32,363 @@ import copy
import rpki.autoconf
try:
- from lxml.etree import (ElementTree, Element, SubElement, Comment)
+ from lxml.etree import (ElementTree, Element, SubElement, Comment)
except ImportError:
- from xml.etree.ElementTree import (ElementTree, Element, SubElement, Comment)
+ from xml.etree.ElementTree import (ElementTree, Element, SubElement, Comment)
session = None
args = None
def parse_options():
- global args
-
- parser = argparse.ArgumentParser(description = __doc__)
- parser.add_argument("--refresh", type = int, default = 1800,
- help = "refresh interval for generated HTML")
- parser.add_argument("--hide-problems", action = "store_true",
- help = "don't generate \"problems\" page")
- parser.add_argument("--hide-graphs", action = "store_true",
- help = "don't generate graphs")
- parser.add_argument("--hide-object-counts", action = "store_true",
- help = "don't display object counts")
- parser.add_argument("--dont-update-rrds", action = "store_true",
- help = "don't add new data to RRD databases")
- parser.add_argument("--png-height", type = int, default = 190,
- help = "height of PNG images")
- parser.add_argument("--png-width", type = int, default = 1350,
- help = "width of PNG images")
- parser.add_argument("--svg-height", type = int, default = 600,
- help = "height of SVG images")
- parser.add_argument("--svg-width", type = int, default = 1200,
- help = "width of SVG images")
- parser.add_argument("--eps-height", type = int, default = 0,
- help = "height of EPS images")
- parser.add_argument("--eps-width", type = int, default = 0,
- help = "width of EPS images")
- parser.add_argument("--rrdtool-binary", default = rpki.autoconf.RRDTOOL,
- help = "location of rrdtool binary")
- parser.add_argument("input_file", type = argparse.FileType("r"),
- help = "XML input file")
- parser.add_argument("output_directory",
- help = "output directory")
- args = parser.parse_args()
+ global args # pylint: disable=W0603
+
+ parser = argparse.ArgumentParser(description = __doc__)
+ parser.add_argument("--refresh", type = int, default = 1800,
+ help = "refresh interval for generated HTML")
+ parser.add_argument("--hide-problems", action = "store_true",
+ help = "don't generate \"problems\" page")
+ parser.add_argument("--hide-graphs", action = "store_true",
+ help = "don't generate graphs")
+ parser.add_argument("--hide-object-counts", action = "store_true",
+ help = "don't display object counts")
+ parser.add_argument("--dont-update-rrds", action = "store_true",
+ help = "don't add new data to RRD databases")
+ parser.add_argument("--png-height", type = int, default = 190,
+ help = "height of PNG images")
+ parser.add_argument("--png-width", type = int, default = 1350,
+ help = "width of PNG images")
+ parser.add_argument("--svg-height", type = int, default = 600,
+ help = "height of SVG images")
+ parser.add_argument("--svg-width", type = int, default = 1200,
+ help = "width of SVG images")
+ parser.add_argument("--eps-height", type = int, default = 0,
+ help = "height of EPS images")
+ parser.add_argument("--eps-width", type = int, default = 0,
+ help = "width of EPS images")
+ parser.add_argument("--rrdtool-binary", default = rpki.autoconf.RRDTOOL,
+ help = "location of rrdtool binary")
+ parser.add_argument("input_file", type = argparse.FileType("r"),
+ help = "XML input file")
+ parser.add_argument("output_directory",
+ help = "output directory")
+ args = parser.parse_args()
def parse_utc(s):
- return int(time.mktime(time.strptime(s, "%Y-%m-%dT%H:%M:%SZ")))
+ return int(time.mktime(time.strptime(s, "%Y-%m-%dT%H:%M:%SZ")))
class Label(object):
- moods = ["bad", "warn", "good"]
+ moods = ["bad", "warn", "good"]
- def __init__(self, elt):
- self.code = elt.tag
- self.mood = elt.get("kind")
- self.text = elt.text.strip()
- self.count = 0
+ def __init__(self, elt):
+ self.code = elt.tag
+ self.mood = elt.get("kind")
+ self.text = elt.text.strip()
+ self.count = 0
- def get_count(self):
- return self.count
+ def get_count(self):
+ return self.count
- @property
- def sort_key(self):
- try:
- return self.moods.index(self.mood)
- except ValueError:
- return len(self.moods)
+ @property
+ def sort_key(self):
+ try:
+ return self.moods.index(self.mood)
+ except ValueError:
+ return len(self.moods)
class Validation_Status(object):
- def __init__(self, elt, label_map):
- self.uri = elt.text.strip()
- self.timestamp = elt.get("timestamp")
- self.generation = elt.get("generation")
- self.hostname = urlparse.urlparse(self.uri).hostname or "[None]"
- self.fn2 = os.path.splitext(self.uri)[1] or None if self.generation else None
- self.label = label_map[elt.get("status")]
+ def __init__(self, elt, label_map):
+ self.uri = elt.text.strip()
+ self.timestamp = elt.get("timestamp")
+ self.generation = elt.get("generation")
+ self.hostname = urlparse.urlparse(self.uri).hostname or "[None]"
+ self.fn2 = os.path.splitext(self.uri)[1] or None if self.generation else None
+ self.label = label_map[elt.get("status")]
- def sort_key(self):
- return (self.label.sort_key, self.timestamp, self.hostname, self.fn2, self.generation)
+ def sort_key(self):
+ return (self.label.sort_key, self.timestamp, self.hostname, self.fn2, self.generation)
- @property
- def code(self):
- return self.label.code
+ @property
+ def code(self):
+ return self.label.code
- @property
- def mood(self):
- return self.label.mood
+ @property
+ def mood(self):
+ return self.label.mood
- @property
- def accepted(self):
- return self.label.code == "object_accepted"
+ @property
+ def accepted(self):
+ return self.label.code == "object_accepted"
- @property
- def rejected(self):
- return self.label.code == "object_rejected"
+ @property
+ def rejected(self):
+ return self.label.code == "object_rejected"
- @property
- def is_current(self):
- return self.generation == "current"
+ @property
+ def is_current(self):
+ return self.generation == "current"
- @property
- def is_backup(self):
- return self.generation == "backup"
+ @property
+ def is_backup(self):
+ return self.generation == "backup"
- @property
- def is_problem(self):
- return self.label.mood != "good"
+ @property
+ def is_problem(self):
+ return self.label.mood != "good"
- @property
- def is_connection_problem(self):
- return self.label.mood != "good" and self.label.code.startswith("rsync_transfer_")
+ @property
+ def is_connection_problem(self):
+ return self.label.mood != "good" and self.label.code.startswith("rsync_transfer_")
- @property
- def is_object_problem(self):
- return self.label.mood != "good" and not self.label.code.startswith("rsync_transfer_")
+ @property
+ def is_object_problem(self):
+ return self.label.mood != "good" and not self.label.code.startswith("rsync_transfer_")
- @property
- def is_connection_detail(self):
- return self.label.code.startswith("rsync_transfer_")
+ @property
+ def is_connection_detail(self):
+ return self.label.code.startswith("rsync_transfer_")
- @property
- def is_object_detail(self):
- return not self.label.code.startswith("rsync_transfer_")
+ @property
+ def is_object_detail(self):
+ return not self.label.code.startswith("rsync_transfer_")
class Problem_Mixin(object):
- @property
- def connection_problems(self):
- result = [v for v in self.validation_status if v.is_connection_problem]
- result.sort(key = Validation_Status.sort_key)
- return result
+ # pylint: disable=E1101
- @property
- def object_problems(self):
- result = [v for v in self.validation_status if v.is_object_problem]
- result.sort(key = Validation_Status.sort_key)
- return result
+ @property
+ def connection_problems(self):
+ result = [v for v in self.validation_status if v.is_connection_problem]
+ result.sort(key = Validation_Status.sort_key)
+ return result
+
+ @property
+ def object_problems(self):
+ result = [v for v in self.validation_status if v.is_object_problem]
+ result.sort(key = Validation_Status.sort_key)
+ return result
class Host(Problem_Mixin):
- def __init__(self, hostname, timestamp):
- self.hostname = hostname
- self.timestamp = timestamp
- self.elapsed = 0
- self.connections = 0
- self.failures = 0
- self.uris = set()
- self.graph = None
- self.counters = {}
- self.totals = {}
- self.validation_status = []
-
- def add_connection(self, elt):
- self.elapsed += parse_utc(elt.get("finished")) - parse_utc(elt.get("started"))
- self.connections += 1
- if elt.get("error") is not None:
- self.failures += 1
-
- def add_validation_status(self, v):
- self.validation_status.append(v)
- if v.generation == "current":
- self.uris.add(v.uri)
- self.counters[(v.fn2, v.generation, v.label)] = self.get_counter(v.fn2, v.generation, v.label) + 1
- self.totals[v.label] = self.get_total(v.label) + 1
- v.label.count += 1
-
- def get_counter(self, fn2, generation, label):
- return self.counters.get((fn2, generation, label), 0)
-
- def get_total(self, label):
- return self.totals.get(label, 0)
-
- @property
- def failed(self):
- return 1 if self.failures > 0 else 0
-
- @property
- def objects(self):
- return len(self.uris)
-
- field_table = (("connections", "GAUGE"),
- ("objects", "GAUGE"),
- ("elapsed", "GAUGE"),
- ("failed", "ABSOLUTE"))
-
- rras = tuple("RRA:AVERAGE:0.5:%s:9600" % steps
- for steps in (1, 4, 24))
-
- @classmethod
- def field_ds_specifiers(cls, heartbeat = 24 * 60 * 60, minimum = 0, maximum = "U"):
- return ["DS:%s:%s:%s:%s:%s" % (field[0], field[1], heartbeat, minimum, maximum)
- for field in cls.field_table]
-
- @property
- def field_values(self):
- return tuple(str(getattr(self, field[0])) for field in self.field_table)
-
- @classmethod
- def field_defs(cls, filebase):
- return ["DEF:%s=%s.rrd:%s:AVERAGE" % (field[0], filebase, field[0])
- for field in cls.field_table]
-
- graph_opts = (
- "--vertical-label", "Sync time (seconds)",
- "--right-axis-label", "Objects (count)",
- "--lower-limit", "0",
- "--right-axis", "1:0",
- "--full-size-mode" )
-
- graph_cmds = (
-
- # Split elapsed into separate data sets, so we can color
- # differently to indicate how succesful transfer was. Intent is
- # that exactly one of these be defined for every value in elapsed.
-
- r"CDEF:success=failed,UNKN,elapsed,IF",
- r"CDEF:failure=connections,1,EQ,failed,*,elapsed,UNKN,IF",
- r"CDEF:partial=connections,1,NE,failed,*,elapsed,UNKN,IF",
-
- # Show connection timing first, as color-coded semi-transparent
- # areas with opaque borders. Intent is to make the colors stand
- # out, since they're a major health indicator. Transparency is
- # handled via an alpha channel (fourth octet of color code). We
- # draw this stuff first so that later lines can overwrite it.
-
- r"AREA:success#00FF0080:Sync time (success)",
- r"AREA:partial#FFA50080:Sync time (partial failure)",
- r"AREA:failure#FF000080:Sync time (total failure)",
-
- r"LINE1:success#00FF00", # Green
- r"LINE1:partial#FFA500", # Orange
- r"LINE1:failure#FF0000", # Red
-
- # Now show object counts, as a simple black line.
-
- r"LINE1:objects#000000:Objects", # Black
-
- # Add averages over period to chart legend.
-
- r"VDEF:avg_elapsed=elapsed,AVERAGE",
- r"VDEF:avg_connections=connections,AVERAGE",
- r"VDEF:avg_objects=objects,AVERAGE",
- r"COMMENT:\j",
- r"GPRINT:avg_elapsed:Average sync time (seconds)\: %5.2lf",
- r"GPRINT:avg_connections:Average connection count\: %5.2lf",
- r"GPRINT:avg_objects:Average object count\: %5.2lf" )
-
- graph_periods = (("week", "-1w"),
- ("month", "-31d"),
- ("year", "-1y"))
-
- def rrd_run(self, cmd):
- try:
- cmd = [str(i) for i in cmd]
- cmd.insert(0, args.rrdtool_binary)
- subprocess.check_call(cmd, stdout = open("/dev/null", "w"))
- except OSError, e:
- sys.exit("Problem running %s, perhaps you need to set --rrdtool-binary? (%s)" % (args.rrdtool_binary, e))
- except subprocess.CalledProcessError, e:
- sys.exit("Failure running %s: %s" % (args.rrdtool_binary, e))
-
- def rrd_update(self):
- filename = os.path.join(args.output_directory, self.hostname) + ".rrd"
- if not os.path.exists(filename):
- cmd = ["create", filename, "--start", self.timestamp - 1, "--step", "3600"]
- cmd.extend(self.field_ds_specifiers())
- cmd.extend(self.rras)
- self.rrd_run(cmd)
- self.rrd_run(["update", filename,
- "%s:%s" % (self.timestamp, ":".join(str(v) for v in self.field_values))])
-
- def rrd_graph(self, html):
- # pylint: disable=W0622
- filebase = os.path.join(args.output_directory, self.hostname)
- formats = [format for format in ("png", "svg", "eps")
- if getattr(args, format + "_width") and getattr(args, format + "_height")]
- for period, start in self.graph_periods:
- for format in formats:
- cmds = [ "graph", "%s_%s.%s" % (filebase, period, format),
- "--title", "%s last %s" % (self.hostname, period),
- "--start", start,
- "--width", getattr(args, format + "_width"),
- "--height", getattr(args, format + "_height"),
- "--imgformat", format.upper() ]
- cmds.extend(self.graph_opts)
- cmds.extend(self.field_defs(filebase))
- cmds.extend(self.graph_cmds)
- self.rrd_run(cmds)
- img = Element("img", src = "%s_%s.png" % (self.hostname, period),
- width = str(args.png_width),
- height = str(args.png_height))
- if self.graph is None:
- self.graph = copy.copy(img)
- html.BodyElement("h2").text = "%s over last %s" % (self.hostname, period)
- html.BodyElement("a", href = "%s_%s_svg.html" % (self.hostname, period)).append(img)
- html.BodyElement("br")
- svg_html = HTML("%s over last %s" % (self.hostname, period),
- "%s_%s_svg" % (self.hostname, period))
- svg_html.BodyElement("img", src = "%s_%s.svg" % (self.hostname, period))
- svg_html.close()
+ def __init__(self, hostname, timestamp):
+ self.hostname = hostname
+ self.timestamp = timestamp
+ self.elapsed = 0
+ self.connections = 0
+ self.failures = 0
+ self.uris = set()
+ self.graph = None
+ self.counters = {}
+ self.totals = {}
+ self.validation_status = []
+
+ def add_connection(self, elt):
+ self.elapsed += parse_utc(elt.get("finished")) - parse_utc(elt.get("started"))
+ self.connections += 1
+ if elt.get("error") is not None:
+ self.failures += 1
+
+ def add_validation_status(self, v):
+ self.validation_status.append(v)
+ if v.generation == "current":
+ self.uris.add(v.uri)
+ self.counters[(v.fn2, v.generation, v.label)] = self.get_counter(v.fn2, v.generation, v.label) + 1
+ self.totals[v.label] = self.get_total(v.label) + 1
+ v.label.count += 1
+
+ def get_counter(self, fn2, generation, label):
+ return self.counters.get((fn2, generation, label), 0)
+
+ def get_total(self, label):
+ return self.totals.get(label, 0)
+
+ @property
+ def failed(self):
+ return 1 if self.failures > 0 else 0
+
+ @property
+ def objects(self):
+ return len(self.uris)
+
+ field_table = (("connections", "GAUGE"),
+ ("objects", "GAUGE"),
+ ("elapsed", "GAUGE"),
+ ("failed", "ABSOLUTE"))
+
+ rras = tuple("RRA:AVERAGE:0.5:%s:9600" % steps
+ for steps in (1, 4, 24))
+
+ @classmethod
+ def field_ds_specifiers(cls, heartbeat = 24 * 60 * 60, minimum = 0, maximum = "U"):
+ return ["DS:%s:%s:%s:%s:%s" % (field[0], field[1], heartbeat, minimum, maximum)
+ for field in cls.field_table]
+
+ @property
+ def field_values(self):
+ return tuple(str(getattr(self, field[0])) for field in self.field_table)
+
+ @classmethod
+ def field_defs(cls, filebase):
+ return ["DEF:%s=%s.rrd:%s:AVERAGE" % (field[0], filebase, field[0])
+ for field in cls.field_table]
+
+ graph_opts = (
+ "--vertical-label", "Sync time (seconds)",
+ "--right-axis-label", "Objects (count)",
+ "--lower-limit", "0",
+ "--right-axis", "1:0",
+ "--full-size-mode" )
+
+ graph_cmds = (
+
+ # Split elapsed into separate data sets, so we can color
+ # differently to indicate how succesful transfer was. Intent is
+ # that exactly one of these be defined for every value in elapsed.
+
+ r"CDEF:success=failed,UNKN,elapsed,IF",
+ r"CDEF:failure=connections,1,EQ,failed,*,elapsed,UNKN,IF",
+ r"CDEF:partial=connections,1,NE,failed,*,elapsed,UNKN,IF",
+
+ # Show connection timing first, as color-coded semi-transparent
+ # areas with opaque borders. Intent is to make the colors stand
+ # out, since they're a major health indicator. Transparency is
+ # handled via an alpha channel (fourth octet of color code). We
+ # draw this stuff first so that later lines can overwrite it.
+
+ r"AREA:success#00FF0080:Sync time (success)",
+ r"AREA:partial#FFA50080:Sync time (partial failure)",
+ r"AREA:failure#FF000080:Sync time (total failure)",
+
+ r"LINE1:success#00FF00", # Green
+ r"LINE1:partial#FFA500", # Orange
+ r"LINE1:failure#FF0000", # Red
+
+ # Now show object counts, as a simple black line.
+
+ r"LINE1:objects#000000:Objects", # Black
+
+ # Add averages over period to chart legend.
+
+ r"VDEF:avg_elapsed=elapsed,AVERAGE",
+ r"VDEF:avg_connections=connections,AVERAGE",
+ r"VDEF:avg_objects=objects,AVERAGE",
+ r"COMMENT:\j",
+ r"GPRINT:avg_elapsed:Average sync time (seconds)\: %5.2lf",
+ r"GPRINT:avg_connections:Average connection count\: %5.2lf",
+ r"GPRINT:avg_objects:Average object count\: %5.2lf" )
+
+ graph_periods = (("week", "-1w"),
+ ("month", "-31d"),
+ ("year", "-1y"))
+
+ def rrd_run(self, cmd):
+ try:
+ cmd = [str(i) for i in cmd]
+ cmd.insert(0, args.rrdtool_binary)
+ subprocess.check_call(cmd, stdout = open("/dev/null", "w"))
+ except OSError, e:
+ sys.exit("Problem running %s, perhaps you need to set --rrdtool-binary? (%s)" % (args.rrdtool_binary, e))
+ except subprocess.CalledProcessError, e:
+ sys.exit("Failure running %s: %s" % (args.rrdtool_binary, e))
+
+ def rrd_update(self):
+ filename = os.path.join(args.output_directory, self.hostname) + ".rrd"
+ if not os.path.exists(filename):
+ cmd = ["create", filename, "--start", self.timestamp - 1, "--step", "3600"]
+ cmd.extend(self.field_ds_specifiers())
+ cmd.extend(self.rras)
+ self.rrd_run(cmd)
+ self.rrd_run(["update", filename,
+ "%s:%s" % (self.timestamp, ":".join(str(v) for v in self.field_values))])
+
+ def rrd_graph(self, html):
+ # pylint: disable=W0622
+ filebase = os.path.join(args.output_directory, self.hostname)
+ formats = [format for format in ("png", "svg", "eps")
+ if getattr(args, format + "_width") and getattr(args, format + "_height")]
+ for period, start in self.graph_periods:
+ for format in formats:
+ cmds = [ "graph", "%s_%s.%s" % (filebase, period, format),
+ "--title", "%s last %s" % (self.hostname, period),
+ "--start", start,
+ "--width", getattr(args, format + "_width"),
+ "--height", getattr(args, format + "_height"),
+ "--imgformat", format.upper() ]
+ cmds.extend(self.graph_opts)
+ cmds.extend(self.field_defs(filebase))
+ cmds.extend(self.graph_cmds)
+ self.rrd_run(cmds)
+ img = Element("img", src = "%s_%s.png" % (self.hostname, period),
+ width = str(args.png_width),
+ height = str(args.png_height))
+ if self.graph is None:
+ self.graph = copy.copy(img)
+ html.BodyElement("h2").text = "%s over last %s" % (self.hostname, period)
+ html.BodyElement("a", href = "%s_%s_svg.html" % (self.hostname, period)).append(img)
+ html.BodyElement("br")
+ svg_html = HTML("%s over last %s" % (self.hostname, period),
+ "%s_%s_svg" % (self.hostname, period))
+ svg_html.BodyElement("img", src = "%s_%s.svg" % (self.hostname, period))
+ svg_html.close()
class Session(Problem_Mixin):
- def __init__(self):
- self.hosts = {}
+ def __init__(self):
+ self.hosts = {}
- self.root = ElementTree(file = args.input_file).getroot()
+ self.root = ElementTree(file = args.input_file).getroot()
- self.rcynic_version = self.root.get("rcynic-version")
- self.rcynic_date = self.root.get("date")
- self.timestamp = parse_utc(self.rcynic_date)
+ self.rcynic_version = self.root.get("rcynic-version")
+ self.rcynic_date = self.root.get("date")
+ self.timestamp = parse_utc(self.rcynic_date)
- self.labels = [Label(elt) for elt in self.root.find("labels")]
- self.load_validation_status()
+ self.labels = [Label(elt) for elt in self.root.find("labels")]
+ self.load_validation_status()
- for elt in self.root.findall("rsync_history"):
- self.get_host(urlparse.urlparse(elt.text.strip()).hostname).add_connection(elt)
+ for elt in self.root.findall("rsync_history"):
+ self.get_host(urlparse.urlparse(elt.text.strip()).hostname).add_connection(elt)
- generations = set()
- fn2s = set()
+ generations = set()
+ fn2s = set()
- for v in self.validation_status:
- self.get_host(v.hostname).add_validation_status(v)
- generations.add(v.generation)
- fn2s.add(v.fn2)
+ for v in self.validation_status:
+ self.get_host(v.hostname).add_validation_status(v)
+ generations.add(v.generation)
+ fn2s.add(v.fn2)
- self.labels = [l for l in self.labels if l.count > 0]
+ self.labels = [l for l in self.labels if l.count > 0]
- self.hostnames = sorted(self.hosts)
- self.generations = sorted(generations)
- self.fn2s = sorted(fn2s)
+ self.hostnames = sorted(self.hosts)
+ self.generations = sorted(generations)
+ self.fn2s = sorted(fn2s)
- def load_validation_status(self):
- label_map = dict((label.code, label) for label in self.labels)
- full_validation_status = [Validation_Status(elt, label_map)
- for elt in self.root.findall("validation_status")]
- accepted_current = set(v.uri for v in full_validation_status
- if v.is_current and v.accepted)
- self.validation_status = [v for v in full_validation_status
- if not v.is_backup
- or v.uri not in accepted_current]
+ def load_validation_status(self):
+ label_map = dict((label.code, label) for label in self.labels)
+ full_validation_status = [Validation_Status(elt, label_map)
+ for elt in self.root.findall("validation_status")]
+ accepted_current = set(v.uri for v in full_validation_status
+ if v.is_current and v.accepted)
+ self.validation_status = [v for v in full_validation_status
+ if not v.is_backup
+ or v.uri not in accepted_current]
- def get_host(self, hostname):
- if hostname not in self.hosts:
- self.hosts[hostname] = Host(hostname, self.timestamp)
- return self.hosts[hostname]
+ def get_host(self, hostname):
+ if hostname not in self.hosts:
+ self.hosts[hostname] = Host(hostname, self.timestamp)
+ return self.hosts[hostname]
- def get_sum(self, fn2, generation, label):
- return sum(h.get_counter(fn2, generation, label)
- for h in self.hosts.itervalues())
+ def get_sum(self, fn2, generation, label):
+ return sum(h.get_counter(fn2, generation, label)
+ for h in self.hosts.itervalues())
- def rrd_update(self):
- if not args.dont_update_rrds:
- for h in self.hosts.itervalues():
- h.rrd_update()
+ def rrd_update(self):
+ if not args.dont_update_rrds:
+ for h in self.hosts.itervalues():
+ h.rrd_update()
css = '''
th, td {
@@ -475,183 +477,183 @@ css = '''
class HTML(object):
- def __init__(self, title, filebase):
+ def __init__(self, title, filebase):
+
+ self.filename = os.path.join(args.output_directory, filebase + ".html")
+
+ self.html = Element("html")
+ self.html.append(Comment(" Generators:\n" +
+ " " + session.rcynic_version + "\n" +
+ " $Id$\n"))
+ self.head = SubElement(self.html, "head")
+ self.body = SubElement(self.html, "body")
+
+ title += " " + session.rcynic_date
+ SubElement(self.head, "title").text = title
+ SubElement(self.body, "h1").text = title
+ SubElement(self.head, "style", type = "text/css").text = css
+
+ if args.refresh:
+ SubElement(self.head, "meta", { "http-equiv" : "Refresh", "content" : str(args.refresh) })
+
+ hostwidth = max(len(hostname) for hostname in session.hostnames)
+
+ toc = SubElement(self.body, "ul", id = "nav")
+ SubElement(SubElement(toc, "li"), "a", href = "index.html").text = "Overview"
+ li = SubElement(toc, "li")
+ SubElement(li, "span").text = "Repositories"
+ ul = SubElement(li, "ul", style = "width: %sem" % hostwidth)
+ for hostname in session.hostnames:
+ SubElement(SubElement(ul, "li"), "a", href = "%s.html" % hostname).text = hostname
+ SubElement(SubElement(toc, "li"), "a", href = "problems.html").text = "Problems"
+ li = SubElement(toc, "li")
+ SubElement(li, "span").text = "All Details"
+ ul = SubElement(li, "ul", style = "width: 15em")
+ SubElement(SubElement(ul, "li"), "a", href = "connections.html").text = "All Connections"
+ SubElement(SubElement(ul, "li"), "a", href = "objects.html").text = "All Objects"
+ SubElement(self.body, "br")
+
+ def close(self):
+ ElementTree(element = self.html).write(self.filename)
+
+ def BodyElement(self, tag, **attrib):
+ return SubElement(self.body, tag, **attrib)
+
+ def counter_table(self, data_func, total_func):
+ table = self.BodyElement("table", rules = "all", border = "1")
+ thead = SubElement(table, "thead")
+ tfoot = SubElement(table, "tfoot")
+ tbody = SubElement(table, "tbody")
+ tr = SubElement(thead, "tr")
+ SubElement(tr, "th")
+ for label in session.labels:
+ SubElement(tr, "th").text = label.text
+ for fn2 in session.fn2s:
+ for generation in session.generations:
+ counters = [data_func(fn2, generation, label) for label in session.labels]
+ if sum(counters) > 0:
+ tr = SubElement(tbody, "tr")
+ SubElement(tr, "td").text = ((generation or "") + " " + (fn2 or "")).strip()
+ for label, count in zip(session.labels, counters):
+ td = SubElement(tr, "td")
+ if count > 0:
+ td.set("class", label.mood)
+ td.text = str(count)
+ tr = SubElement(tfoot, "tr")
+ SubElement(tr, "td").text = "Total"
+ counters = [total_func(label) for label in session.labels]
+ for label, count in zip(session.labels, counters):
+ td = SubElement(tr, "td")
+ if count > 0:
+ td.set("class", label.mood)
+ td.text = str(count)
+ return table
+
+ def object_count_table(self, session): # pylint: disable=W0621
+ table = self.BodyElement("table", rules = "all", border = "1")
+ thead = SubElement(table, "thead")
+ tbody = SubElement(table, "tbody")
+ tfoot = SubElement(table, "tfoot")
+ fn2s = [fn2 for fn2 in session.fn2s if fn2 is not None]
+ total = dict((fn2, 0) for fn2 in fn2s)
+ for hostname in session.hostnames:
+ tr = SubElement(tbody, "tr")
+ SubElement(tr, "td").text = hostname
+ for fn2 in fn2s:
+ td = SubElement(tr, "td")
+ count = sum(uri.endswith(fn2) for uri in session.hosts[hostname].uris)
+ total[fn2] += count
+ if count > 0:
+ td.text = str(count)
+ trhead = SubElement(thead, "tr")
+ trfoot = SubElement(tfoot, "tr")
+ SubElement(trhead, "th").text = "Repository"
+ SubElement(trfoot, "td").text = "Total"
+ for fn2 in fn2s:
+ SubElement(trhead, "th").text = fn2
+ SubElement(trfoot, "td").text = str(total[fn2])
+ return table
+
+ def detail_table(self, records):
+ if records:
+ table = self.BodyElement("table", rules = "all", border = "1")
+ thead = SubElement(table, "thead")
+ tbody = SubElement(table, "tbody")
+ tr = SubElement(thead, "tr")
+ SubElement(tr, "th").text = "Timestamp"
+ SubElement(tr, "th").text = "Generation"
+ SubElement(tr, "th").text = "Status"
+ SubElement(tr, "th").text = "URI"
+ for v in records:
+ tr = SubElement(tbody, "tr", { "class" : v.mood })
+ SubElement(tr, "td").text = v.timestamp
+ SubElement(tr, "td").text = v.generation
+ SubElement(tr, "td").text = v.label.text
+ SubElement(tr, "td", { "class" : "uri"}).text = v.uri
+ return table
+ else:
+ self.BodyElement("p").text = "None found"
+ return None
- self.filename = os.path.join(args.output_directory, filebase + ".html")
+def main():
- self.html = Element("html")
- self.html.append(Comment(" Generators:\n" +
- " " + session.rcynic_version + "\n" +
- " $Id$\n"))
- self.head = SubElement(self.html, "head")
- self.body = SubElement(self.html, "body")
+ global session # pylint: disable=W0603
- title += " " + session.rcynic_date
- SubElement(self.head, "title").text = title
- SubElement(self.body, "h1").text = title
- SubElement(self.head, "style", type = "text/css").text = css
+ os.putenv("TZ", "UTC")
+ time.tzset()
- if args.refresh:
- SubElement(self.head, "meta", { "http-equiv" : "Refresh", "content" : str(args.refresh) })
+ parse_options()
- hostwidth = max(len(hostname) for hostname in session.hostnames)
+ session = Session()
+ session.rrd_update()
- toc = SubElement(self.body, "ul", id = "nav")
- SubElement(SubElement(toc, "li"), "a", href = "index.html").text = "Overview"
- li = SubElement(toc, "li")
- SubElement(li, "span").text = "Repositories"
- ul = SubElement(li, "ul", style = "width: %sem" % hostwidth)
for hostname in session.hostnames:
- SubElement(SubElement(ul, "li"), "a", href = "%s.html" % hostname).text = hostname
- SubElement(SubElement(toc, "li"), "a", href = "problems.html").text = "Problems"
- li = SubElement(toc, "li")
- SubElement(li, "span").text = "All Details"
- ul = SubElement(li, "ul", style = "width: 15em")
- SubElement(SubElement(ul, "li"), "a", href = "connections.html").text = "All Connections"
- SubElement(SubElement(ul, "li"), "a", href = "objects.html").text = "All Objects"
- SubElement(self.body, "br")
-
- def close(self):
- ElementTree(element = self.html).write(self.filename)
-
- def BodyElement(self, tag, **attrib):
- return SubElement(self.body, tag, **attrib)
-
- def counter_table(self, data_func, total_func):
- table = self.BodyElement("table", rules = "all", border = "1")
- thead = SubElement(table, "thead")
- tfoot = SubElement(table, "tfoot")
- tbody = SubElement(table, "tbody")
- tr = SubElement(thead, "tr")
- SubElement(tr, "th")
- for label in session.labels:
- SubElement(tr, "th").text = label.text
- for fn2 in session.fn2s:
- for generation in session.generations:
- counters = [data_func(fn2, generation, label) for label in session.labels]
- if sum(counters) > 0:
- tr = SubElement(tbody, "tr")
- SubElement(tr, "td").text = ((generation or "") + " " + (fn2 or "")).strip()
- for label, count in zip(session.labels, counters):
- td = SubElement(tr, "td")
- if count > 0:
- td.set("class", label.mood)
- td.text = str(count)
- tr = SubElement(tfoot, "tr")
- SubElement(tr, "td").text = "Total"
- counters = [total_func(label) for label in session.labels]
- for label, count in zip(session.labels, counters):
- td = SubElement(tr, "td")
- if count > 0:
- td.set("class", label.mood)
- td.text = str(count)
- return table
-
- def object_count_table(self, session): # pylint: disable=W0621
- table = self.BodyElement("table", rules = "all", border = "1")
- thead = SubElement(table, "thead")
- tbody = SubElement(table, "tbody")
- tfoot = SubElement(table, "tfoot")
- fn2s = [fn2 for fn2 in session.fn2s if fn2 is not None]
- total = dict((fn2, 0) for fn2 in fn2s)
+ html = HTML("Repository details for %s" % hostname, hostname)
+ html.counter_table(session.hosts[hostname].get_counter, session.hosts[hostname].get_total)
+ if not args.hide_graphs:
+ session.hosts[hostname].rrd_graph(html)
+ if not args.hide_problems:
+ html.BodyElement("h2").text = "Connection Problems"
+ html.detail_table(session.hosts[hostname].connection_problems)
+ html.BodyElement("h2").text = "Object Problems"
+ html.detail_table(session.hosts[hostname].object_problems)
+ html.close()
+
+ html = HTML("rcynic summary", "index")
+ html.BodyElement("h2").text = "Grand totals for all repositories"
+ html.counter_table(session.get_sum, Label.get_count)
+ if not args.hide_object_counts:
+ html.BodyElement("br")
+ html.BodyElement("hr")
+ html.BodyElement("br")
+ html.BodyElement("h2").text = "Current total object counts (distinct URIs)"
+ html.object_count_table(session)
for hostname in session.hostnames:
- tr = SubElement(tbody, "tr")
- SubElement(tr, "td").text = hostname
- for fn2 in fn2s:
- td = SubElement(tr, "td")
- count = sum(uri.endswith(fn2) for uri in session.hosts[hostname].uris)
- total[fn2] += count
- if count > 0:
- td.text = str(count)
- trhead = SubElement(thead, "tr")
- trfoot = SubElement(tfoot, "tr")
- SubElement(trhead, "th").text = "Repository"
- SubElement(trfoot, "td").text = "Total"
- for fn2 in fn2s:
- SubElement(trhead, "th").text = fn2
- SubElement(trfoot, "td").text = str(total[fn2])
- return table
-
- def detail_table(self, records):
- if records:
- table = self.BodyElement("table", rules = "all", border = "1")
- thead = SubElement(table, "thead")
- tbody = SubElement(table, "tbody")
- tr = SubElement(thead, "tr")
- SubElement(tr, "th").text = "Timestamp"
- SubElement(tr, "th").text = "Generation"
- SubElement(tr, "th").text = "Status"
- SubElement(tr, "th").text = "URI"
- for v in records:
- tr = SubElement(tbody, "tr", { "class" : v.mood })
- SubElement(tr, "td").text = v.timestamp
- SubElement(tr, "td").text = v.generation
- SubElement(tr, "td").text = v.label.text
- SubElement(tr, "td", { "class" : "uri"}).text = v.uri
- return table
- else:
- self.BodyElement("p").text = "None found"
- return None
-
-def main():
-
- global session
-
- os.putenv("TZ", "UTC")
- time.tzset()
-
- parse_options()
+ html.BodyElement("br")
+ html.BodyElement("hr")
+ html.BodyElement("br")
+ html.BodyElement("h2").text = "Overview for repository %s" % hostname
+ html.counter_table(session.hosts[hostname].get_counter, session.hosts[hostname].get_total)
+ if not args.hide_graphs:
+ html.BodyElement("br")
+ html.BodyElement("a", href = "%s.html" % hostname).append(session.hosts[hostname].graph)
+ html.close()
- session = Session()
- session.rrd_update()
+ html = HTML("Problems", "problems")
+ html.BodyElement("h2").text = "Connection Problems"
+ html.detail_table(session.connection_problems)
+ html.BodyElement("h2").text = "Object Problems"
+ html.detail_table(session.object_problems)
+ html.close()
- for hostname in session.hostnames:
- html = HTML("Repository details for %s" % hostname, hostname)
- html.counter_table(session.hosts[hostname].get_counter, session.hosts[hostname].get_total)
- if not args.hide_graphs:
- session.hosts[hostname].rrd_graph(html)
- if not args.hide_problems:
- html.BodyElement("h2").text = "Connection Problems"
- html.detail_table(session.hosts[hostname].connection_problems)
- html.BodyElement("h2").text = "Object Problems"
- html.detail_table(session.hosts[hostname].object_problems)
+ html = HTML("All connections", "connections")
+ html.detail_table([v for v in session.validation_status if v.is_connection_detail])
html.close()
- html = HTML("rcynic summary", "index")
- html.BodyElement("h2").text = "Grand totals for all repositories"
- html.counter_table(session.get_sum, Label.get_count)
- if not args.hide_object_counts:
- html.BodyElement("br")
- html.BodyElement("hr")
- html.BodyElement("br")
- html.BodyElement("h2").text = "Current total object counts (distinct URIs)"
- html.object_count_table(session)
- for hostname in session.hostnames:
- html.BodyElement("br")
- html.BodyElement("hr")
- html.BodyElement("br")
- html.BodyElement("h2").text = "Overview for repository %s" % hostname
- html.counter_table(session.hosts[hostname].get_counter, session.hosts[hostname].get_total)
- if not args.hide_graphs:
- html.BodyElement("br")
- html.BodyElement("a", href = "%s.html" % hostname).append(session.hosts[hostname].graph)
- html.close()
-
- html = HTML("Problems", "problems")
- html.BodyElement("h2").text = "Connection Problems"
- html.detail_table(session.connection_problems)
- html.BodyElement("h2").text = "Object Problems"
- html.detail_table(session.object_problems)
- html.close()
-
- html = HTML("All connections", "connections")
- html.detail_table([v for v in session.validation_status if v.is_connection_detail])
- html.close()
-
- html = HTML("All objects", "objects")
- html.detail_table([v for v in session.validation_status if v.is_object_detail])
- html.close()
+ html = HTML("All objects", "objects")
+ html.detail_table([v for v in session.validation_status if v.is_object_detail])
+ html.close()
if __name__ == "__main__":
- main()
+ main()
diff --git a/rp/rcynic/rcynic-svn b/rp/rcynic/rcynic-svn
index 28b24672..3c59116a 100755
--- a/rp/rcynic/rcynic-svn
+++ b/rp/rcynic/rcynic-svn
@@ -27,50 +27,50 @@ import fcntl
import os
try:
- from lxml.etree import ElementTree
+ from lxml.etree import ElementTree
except ImportError:
- from xml.etree.ElementTree import ElementTree
+ from xml.etree.ElementTree import ElementTree
mime_types = (
- ("html", "application/xhtml+xml"),
- ("cer", "application/pkix-cert"),
- ("crl", "application/pkix-crl"),
- ("mft", "application/rpki-manifest"),
- ("mnf", "application/rpki-manifest"),
- ("roa", "application/rpki-roa"),
- ("gbr", "application/rpki-ghostbusters"))
+ ("html", "application/xhtml+xml"),
+ ("cer", "application/pkix-cert"),
+ ("crl", "application/pkix-crl"),
+ ("mft", "application/rpki-manifest"),
+ ("mnf", "application/rpki-manifest"),
+ ("roa", "application/rpki-roa"),
+ ("gbr", "application/rpki-ghostbusters"))
def run(*argv, **kwargs):
- """
- Run a program, displaying timing data when appropriate.
- """
+ """
+ Run a program, displaying timing data when appropriate.
+ """
- _t0 = datetime.datetime.utcnow()
- subprocess.check_call(argv, **kwargs)
- if args.show_timing:
- _t1 = datetime.datetime.utcnow()
- print _t1, (_t1 - _t0), " ".join(argv)
+ _t0 = datetime.datetime.utcnow()
+ subprocess.check_call(argv, **kwargs)
+ if args.show_timing:
+ _t1 = datetime.datetime.utcnow()
+ print _t1, (_t1 - _t0), " ".join(argv)
def runxml(*argv):
- """
-
- Run a program which produces XML output, displaying timing data when
- appropriate and returning an ElementTree constructed from the
- program's output.
- """
- _t0 = datetime.datetime.utcnow()
- p = subprocess.Popen(argv, stdout = subprocess.PIPE)
- x = ElementTree(file = p.stdout)
- s = p.wait()
- if s:
- raise subprocess.CalledProcessError(s, argv[0])
- if args.show_timing:
- _t1 = datetime.datetime.utcnow()
- print _t1, (_t1 - _t0), " ".join(argv)
- return x
+ """
+
+ Run a program which produces XML output, displaying timing data when
+ appropriate and returning an ElementTree constructed from the
+ program's output.
+ """
+ _t0 = datetime.datetime.utcnow()
+ p = subprocess.Popen(argv, stdout = subprocess.PIPE)
+ x = ElementTree(file = p.stdout)
+ s = p.wait()
+ if s:
+ raise subprocess.CalledProcessError(s, argv[0])
+ if args.show_timing:
+ _t1 = datetime.datetime.utcnow()
+ print _t1, (_t1 - _t0), " ".join(argv)
+ return x
# Main program.
@@ -120,8 +120,8 @@ parser.add_argument("working_directory", help = \
args = parser.parse_args()
if args.show_timing:
- t0 = datetime.datetime.utcnow()
- print t0, "Starting"
+ t0 = datetime.datetime.utcnow()
+ print t0, "Starting"
# Lock out other instances of this program. We may want some more
# sophsiticated approach when combining this with other programs, but
@@ -141,18 +141,18 @@ run("svn", "update", "--quiet", args.working_directory)
if args.files_to_archive:
- if args.verbatim:
- cmd = ["rsync", "--archive", "--quiet", "--delete"]
- cmd.extend(args.files_to_archive)
- cmd.append(args.working_directory)
- run(*cmd)
+ if args.verbatim:
+ cmd = ["rsync", "--archive", "--quiet", "--delete"]
+ cmd.extend(args.files_to_archive)
+ cmd.append(args.working_directory)
+ run(*cmd)
- else:
- for src in args.files_to_archive:
- cmd = ["rsync", "--archive", "--quiet", "--delete", "--copy-links"]
- cmd.append(src.rstrip("/"))
- cmd.append(args.working_directory.rstrip("/") + "/")
- run(*cmd)
+ else:
+ for src in args.files_to_archive:
+ cmd = ["rsync", "--archive", "--quiet", "--delete", "--copy-links"]
+ cmd.append(src.rstrip("/"))
+ cmd.append(args.working_directory.rstrip("/") + "/")
+ run(*cmd)
# Ask Subversion to add any new files, trying hard to get the MIME
# types right.
@@ -160,8 +160,8 @@ if args.files_to_archive:
cmd = ["svn", "add", "--quiet", "--force", "--auto-props"]
for fn2, mime_type in mime_types:
- cmd.append("--config-option")
- cmd.append("config:auto-props:*.%s=svn:mime-type=%s" % (fn2, mime_type))
+ cmd.append("--config-option")
+ cmd.append("config:auto-props:*.%s=svn:mime-type=%s" % (fn2, mime_type))
cmd.append(".")
@@ -171,15 +171,16 @@ run(*cmd, cwd = args.working_directory)
# files have been deleted, and tell Subversion that we deleted them
# intentionally.
+# pylint: disable=E1101
missing = sorted(entry.get("path")
for entry in runxml("svn", "status", "--xml", args.working_directory).find("target").findall("entry")
if entry.find("wc-status").get("item") == "missing")
deleted = []
for path in missing:
- if not any(path.startswith(r) for r in deleted):
- run("svn", "delete", "--quiet", path)
- deleted.append(path + "/")
+ if not any(path.startswith(r) for r in deleted):
+ run("svn", "delete", "--quiet", path)
+ deleted.append(path + "/")
# Commit our changes and update the working tree.
@@ -187,5 +188,5 @@ run("svn", "commit", "--quiet", "--message", "Auto update.", args.working_direct
run("svn", "update", "--quiet", args.working_directory)
if args.show_timing:
- t1 = datetime.datetime.utcnow()
- print t1, t1 - t0, "total runtime"
+ t1 = datetime.datetime.utcnow()
+ print t1, t1 - t0, "total runtime"
diff --git a/rp/rcynic/rcynic-text b/rp/rcynic/rcynic-text
index db4126ce..d4a5b23e 100755
--- a/rp/rcynic/rcynic-text
+++ b/rp/rcynic/rcynic-text
@@ -25,96 +25,96 @@ import urlparse
import textwrap
try:
- from lxml.etree import ElementTree
+ from lxml.etree import ElementTree
except ImportError:
- from xml.etree.ElementTree import ElementTree
+ from xml.etree.ElementTree import ElementTree
class Label(object):
- def __init__(self, elt):
- self.tag = elt.tag
- self.width = max(len(s) for s in elt.text.split())
- self.lines = textwrap.wrap(elt.text.strip(), width = self.width)
- self.counter = 0
+ def __init__(self, elt):
+ self.tag = elt.tag
+ self.width = max(len(s) for s in elt.text.split())
+ self.lines = textwrap.wrap(elt.text.strip(), width = self.width)
+ self.counter = 0
- def line(self, n):
- try:
- return " " + self.lines[n].center(self.width) + " "
- except IndexError:
- return " " * (self.width + 2)
+ def line(self, n):
+ try:
+ return " " + self.lines[n].center(self.width) + " "
+ except IndexError:
+ return " " * (self.width + 2)
- def add(self):
- self.counter += 1
+ def add(self):
+ self.counter += 1
- @property
- def total(self):
- return " " + str(self.counter).rjust(self.width) + " "
+ @property
+ def total(self):
+ return " " + str(self.counter).rjust(self.width) + " "
- @property
- def visible(self):
- return self.counter > 0
+ @property
+ def visible(self):
+ return self.counter > 0
class Host(object):
- def __init__(self):
- self.counters = {}
+ def __init__(self):
+ self.counters = {}
- def add(self, label):
- self.counters[label] = self.counters.get(label, 0) + 1
- label.add()
+ def add(self, label):
+ self.counters[label] = self.counters.get(label, 0) + 1
+ label.add()
- def total(self, label):
- if label in self.counters:
- return " " + str(self.counters[label]).rjust(label.width) + " "
- else:
- return " " * (label.width + 2)
+ def total(self, label):
+ if label in self.counters:
+ return " " + str(self.counters[label]).rjust(label.width) + " "
+ else:
+ return " " * (label.width + 2)
class Session(object):
- def __init__(self, labels):
- self.hosts = {}
- self.labels = labels
- self.map = dict((label.tag, label) for label in labels)
-
- def add(self, elt):
- label = self.map[elt.get("status")]
- hostname = urlparse.urlparse(elt.text.strip()).hostname
- if hostname not in self.hosts:
- self.hosts[hostname] = Host()
- self.hosts[hostname].add(label)
-
- def show(self):
- visible = [label for label in self.labels if label.visible]
- hostnames = sorted(hostname for hostname in self.hosts if hostname is not None)
- hostwidth = max(len(hostname) for hostname in hostnames + ["Hostname"])
- separator = "+-%s-+-%s-+" % (
- "-" * hostwidth,
- "-+-".join("-" * label.width for label in visible))
- print separator
- for i in xrange(max(len(label.lines) for label in visible)):
- print "| %s |%s|" % (
- ("Hostname" if i == 0 else "").ljust(hostwidth),
- "|".join(label.line(i) for label in visible))
- print separator
- for hostname in hostnames:
- print "| %s |%s|" % (
- hostname.ljust(hostwidth),
- "|".join(self.hosts[hostname].total(label) for label in visible))
- if hostnames:
- print separator
- print "| %s |%s|" % (
- "Total".ljust(hostwidth),
- "|".join(label.total for label in visible))
- print separator
+ def __init__(self, labels):
+ self.hosts = {}
+ self.labels = labels
+ self.map = dict((label.tag, label) for label in labels)
+
+ def add(self, elt):
+ label = self.map[elt.get("status")]
+ hostname = urlparse.urlparse(elt.text.strip()).hostname
+ if hostname not in self.hosts:
+ self.hosts[hostname] = Host()
+ self.hosts[hostname].add(label)
+
+ def show(self):
+ visible = [label for label in self.labels if label.visible]
+ hostnames = sorted(hostname for hostname in self.hosts if hostname is not None)
+ hostwidth = max(len(hostname) for hostname in hostnames + ["Hostname"])
+ separator = "+-%s-+-%s-+" % (
+ "-" * hostwidth,
+ "-+-".join("-" * label.width for label in visible))
+ print separator
+ for i in xrange(max(len(label.lines) for label in visible)):
+ print "| %s |%s|" % (
+ ("Hostname" if i == 0 else "").ljust(hostwidth),
+ "|".join(label.line(i) for label in visible))
+ print separator
+ for hostname in hostnames:
+ print "| %s |%s|" % (
+ hostname.ljust(hostwidth),
+ "|".join(self.hosts[hostname].total(label) for label in visible))
+ if hostnames:
+ print separator
+ print "| %s |%s|" % (
+ "Total".ljust(hostwidth),
+ "|".join(label.total for label in visible))
+ print separator
def main():
- for filename in ([sys.stdin] if len(sys.argv) < 2 else sys.argv[1:]):
- etree = ElementTree(file = filename)
- session = Session([Label(elt) for elt in etree.find("labels")])
- for elt in etree.findall("validation_status"):
- session.add(elt)
- session.show()
+ for filename in ([sys.stdin] if len(sys.argv) < 2 else sys.argv[1:]):
+ etree = ElementTree(file = filename)
+ session = Session([Label(elt) for elt in etree.find("labels")])
+ for elt in etree.findall("validation_status"):
+ session.add(elt)
+ session.show()
if __name__ == "__main__":
- main()
+ main()
diff --git a/rp/rcynic/rcynic.c b/rp/rcynic/rcynic.c
index d0da40f5..36c1950f 100644
--- a/rp/rcynic/rcynic.c
+++ b/rp/rcynic/rcynic.c
@@ -3190,7 +3190,7 @@ static int extract_access_uri(rcynic_ctx_t *rc,
if (OBJ_obj2nid(a->method) != nid)
continue;
++*count;
- if (!relevant((char *) a->location->d.uniformResourceIdentifier->data))
+ if (relevant && !relevant((char *) a->location->d.uniformResourceIdentifier->data))
continue;
if (sizeof(result->s) <= a->location->d.uniformResourceIdentifier->length)
log_validation_status(rc, uri, uri_too_long, generation);
@@ -3707,7 +3707,7 @@ static int check_x509(rcynic_ctx_t *rc,
int n_caIssuers = 0;
ex_count--;
if (!extract_access_uri(rc, uri, generation, aia, NID_ad_ca_issuers,
- &certinfo->aia, &n_caIssuers, is_rsync) ||
+ &certinfo->aia, &n_caIssuers, NULL) ||
!certinfo->aia.s[0] ||
sk_ACCESS_DESCRIPTION_num(aia) != n_caIssuers) {
log_validation_status(rc, uri, malformed_aia_extension, generation);
diff --git a/rp/rcynic/rcynicng b/rp/rcynic/rcynicng
new file mode 100755
index 00000000..eccd247f
--- /dev/null
+++ b/rp/rcynic/rcynicng
@@ -0,0 +1,1478 @@
+#!/usr/bin/env python
+
+# $Id$
+
+"""
+Reimplementation of rcynic in Python. Work in progress.
+"""
+
+import os
+import sys
+import ssl
+import time
+import copy
+import errno
+import shutil
+import socket
+import logging
+import argparse
+import tempfile
+import urlparse
+import subprocess
+
+import tornado.gen
+import tornado.locks
+import tornado.ioloop
+import tornado.queues
+import tornado.process
+import tornado.httpclient
+
+import rpki.POW
+import rpki.log
+import rpki.config
+import rpki.sundial
+import rpki.relaxng
+import rpki.autoconf
+
+from rpki.oids import id_kp_bgpsec_router
+
+from lxml.etree import (ElementTree, Element, SubElement, Comment,
+ XML, DocumentInvalid, XMLSyntaxError, iterparse)
+
+logger = logging.getLogger("rcynicng")
+
+xmlns = rpki.relaxng.rrdp.xmlns
+
+tag_delta = xmlns + "delta"
+tag_notification = xmlns + "notification"
+tag_publish = xmlns + "publish"
+tag_snapshot = xmlns + "snapshot"
+tag_withdraw = xmlns + "withdraw"
+
+codes = rpki.POW.validation_status
+
+
+class Status(object):
+ """
+ Validation status database, like validation_status_t in rcynic:tos.
+
+ rcynic:tos version of this data structure is stored as an AVL
+ tree, because the OpenSSL STACK_OF() sort-and-bsearch turned out
+ to be a very poor choice for the input data. Remains to be seen
+ whether we need to do something like that here too.
+ """
+
+ db = dict()
+
+ def __init__(self, uri):
+ self.uri = uri
+ self._timestamp = None
+ self.status = set()
+
+ def __str__(self):
+ return "{my.timestamp} {my.uri} {status}".format(
+ my = self, status = ",".join(str(s) for s in sorted(self.status)))
+
+ @property
+ def timestamp(self):
+ return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(self._timestamp))
+
+ @classmethod
+ def get(cls, uri):
+ try:
+ return cls.db[uri].status
+ except KeyError:
+ return None
+
+ @classmethod
+ def update(cls, uri):
+ try:
+ self = cls.db[uri]
+ except KeyError:
+ self = cls.db[uri] = cls(uri)
+ self._timestamp = time.time()
+ return self.status
+
+ @classmethod
+ def add(cls, uri, *codes):
+ status = cls.update(uri)
+ for code in codes:
+ status.add(code)
+
+ @classmethod
+ def remove(cls, uri, *codes):
+ if uri in cls.db:
+ for code in codes:
+ cls.db[uri].status.discard(code)
+
+ @classmethod
+ def test(cls, uri, code):
+ return uri in cls.db and code in cls.db[uri].status
+
+
+def install_object(obj):
+ obj.obj.authenticated.add(authenticated)
+ obj.obj.save()
+
+
+class X509StoreCTX(rpki.POW.X509StoreCTX):
+
+ @classmethod
+ def subclass(cls, **kwargs):
+ return type(cls.__name__, (cls,), kwargs)
+
+ status = None
+
+ def verify_callback(self, ok):
+ err = self.getError()
+ if err in (codes.X509_V_OK.code, codes.X509_V_ERR_SUBJECT_ISSUER_MISMATCH.code):
+ return ok
+ elif err == codes.X509_V_ERR_CRL_HAS_EXPIRED.code:
+ return True
+ elif err == codes.X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT.code:
+ self.status.add(codes.TRUST_ANCHOR_NOT_SELF_SIGNED)
+ return ok
+ else:
+ self.status.add(codes.find(err))
+ return ok
+
+
+class POW_Mixin(object):
+
+ @classmethod
+ def store_if_new(cls, der, uri, retrieval):
+ self = cls.derRead(der)
+ ski, aki = self.get_hex_SKI_AKI()
+ return RPKIObject.objects.get_or_create(
+ der = der,
+ defaults = dict(uri = uri,
+ aki = aki,
+ ski = ski,
+ sha256 = sha256hex(der),
+ retrieved = retrieval))
+
+ def get_hex_SKI_AKI(self):
+ cer = self.certs()[0]
+ ski = cer.getSKI()
+ aki = cer.getAKI()
+ return ski.encode("hex") if ski else "", aki.encode("hex") if aki else ""
+
+ @property
+ def uri(self):
+ return self.obj.uri
+
+ @property
+ def aki(self):
+ return self.obj.aki
+
+ @property
+ def ski(self):
+ return self.obj.ski
+
+
+class X509(rpki.POW.X509, POW_Mixin):
+
+ def __repr__(self):
+ try:
+ return "<X509 \"{}\" at 0x{:x}>".format(self.uri, id(self))
+ except:
+ return "<X509 at 0x{:x}>".format(id(self))
+
+ def get_hex_SKI_AKI(self):
+ ski = self.getSKI()
+ aki = self.getAKI()
+ return ski.encode("hex") if ski else "", aki.encode("hex") if aki else ""
+
+ @classmethod
+ def load(cls, obj, cms = None):
+ if cms is not None:
+ # XXX Kludge to work around lack of subclass support in rpki.POW.CMS.certs().
+ der = cms.certs()[0].derWrite()
+ else:
+ der = obj.der
+ self = cls.derRead(der)
+ self.obj = obj
+ self.bc = self.getBasicConstraints()
+ self.eku = self.getEKU()
+ self.aia = self.getAIA()
+ self.sia = self.getSIA()
+ self.crldp = self.getCRLDP()
+ self.is_ca = self.bc is not None and self.bc[0]
+ self.caDirectory, self.rpkiManifest, self.signedObjectRepository, self.rpkiNotify \
+ = self.sia or (None, None, None, None)
+ return self
+
+ @staticmethod
+ def count_uris(uris, scheme = "rsync://"):
+ count = 0
+ if uris is not None:
+ for uri in uris:
+ if uri.startswith(scheme):
+ count += 1
+ return count
+
+ def check(self, trusted, crl):
+ #logger.debug("Starting checks for %r", self)
+ status = Status.update(self.uri)
+ is_ta = trusted is None
+ is_routercert = (self.eku is not None and id_kp_bgpsec_router in self.eku and
+ not self.is_ca and self.uri.endswith(".cer"))
+ if self.eku is not None and (self.is_ca or not self.uri.endswith(".cer")):
+ status.add(codes.INAPPROPRIATE_EKU_EXTENSION)
+ if is_ta and not self.is_ca:
+ status.add(codes.MALFORMED_TRUST_ANCHOR)
+ if is_ta and self.aia is not None:
+ status.add(codes.AIA_EXTENSION_FORBIDDEN)
+ if not is_ta and self.aia is None:
+ status.add(codes.AIA_EXTENSION_MISSING)
+ if is_routercert and self.sia is not None:
+ status.add(codes.SIA_EXTENSION_FORBIDDEN)
+ if not is_routercert and self.sia is None:
+ status.add(codes.SIA_EXTENSION_MISSING)
+ if is_ta and self.crldp is not None:
+ status.add(codes.CRLDP_EXTENSION_FORBIDDEN)
+ if not is_ta and self.crldp is None:
+ status.add(codes.CRLDP_EXTENSION_MISSING)
+ if not is_ta and not self.aki:
+ status.add(codes.AKI_EXTENSION_MISSING)
+ elif not is_ta and self.aki != trusted[0].ski:
+ status.add(codes.AKI_EXTENSION_ISSUER_MISMATCH)
+ serial = self.getSerial()
+ if serial <= 0 or serial > 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF:
+ status.add(codes.BAD_CERTIFICATE_SERIAL_NUMBER)
+ if self.getVersion() != 2:
+ status.add(codes.WRONG_OBJECT_VERSION)
+ n_rsync_caIssuers = self.count_uris(self.aia)
+ n_rsync_caDirectory = self.count_uris(self.caDirectory)
+ n_rsync_rpkiManifest = self.count_uris(self.rpkiManifest)
+ n_rsync_signedObjectRepository = self.count_uris(self.signedObjectRepository)
+ if n_rsync_caIssuers > 1 or n_rsync_caDirectory > 1 or n_rsync_rpkiManifest > 1 or n_rsync_signedObjectRepository > 1:
+ status.add(codes.MULTIPLE_RSYNC_URIS_IN_EXTENSION)
+ if self.aia is not None and n_rsync_caIssuers == 0:
+ status.add(codes.MALFORMED_AIA_EXTENSION)
+ if self.is_ca:
+ ok = n_rsync_caDirectory != 0 and n_rsync_rpkiManifest != 0 and n_rsync_signedObjectRepository == 0
+ elif not is_routercert:
+ ok = n_rsync_caDirectory == 0 and n_rsync_rpkiManifest == 0 and n_rsync_signedObjectRepository != 0
+ else:
+ ok = self.sia is None
+ if not ok:
+ status.add(codes.MALFORMED_SIA_EXTENSION)
+ if not is_ta and self.count_uris(self.crldp) == 0:
+ status.add(codes.MALFORMED_CRLDP_EXTENSION)
+ self.checkRPKIConformance(status = status, eku = id_kp_bgpsec_router if is_routercert else None)
+ try:
+ self.verify(trusted = [self] if trusted is None else trusted, crl = crl, policy = "1.3.6.1.5.5.7.14.2",
+ context_class = X509StoreCTX.subclass(status = status))
+ except rpki.POW.ValidationError as e:
+ logger.debug("%r rejected: %s", self, e)
+ status.add(codes.OBJECT_REJECTED)
+ codes.normalize(status)
+ #logger.debug("Finished checks for %r", self)
+ return not any(s.kind == "bad" for s in status)
+
+
+class CRL(rpki.POW.CRL, POW_Mixin):
+
+ def __repr__(self):
+ try:
+ return "<CRL \"{}\" at 0x{:x}>".format(self.uri, id(self))
+ except:
+ return "<CRL at 0x{:x}>".format(id(self))
+
+ def get_hex_SKI_AKI(self):
+ aki = self.getAKI()
+ return "", aki.encode("hex") if aki else ""
+
+ @classmethod
+ def load(cls, obj):
+ self = cls.derRead(obj.der)
+ self.obj = obj
+ self.thisUpdate = self.getThisUpdate()
+ self.nextUpdate = self.getNextUpdate()
+ self.number = self.getCRLNumber()
+ return self
+
+ def check(self, issuer):
+ status = Status.update(self.uri)
+ self.checkRPKIConformance(status = status, issuer = issuer)
+ try:
+ self.verify(issuer)
+ except rpki.POW.ValidationError as e:
+ logger.debug("%r rejected: %s", self, e)
+ status.add(codes.OBJECT_REJECTED)
+ codes.normalize(status)
+ if self.getVersion() != 1:
+ status.add(codes.WRONG_OBJECT_VERSION)
+ now = rpki.sundial.now()
+ if self.thisUpdate > now:
+ status.add(codes.CRL_NOT_YET_VALID)
+ if self.nextUpdate < now:
+ status.add(codes.STALE_CRL_OR_MANIFEST)
+ if self.number is None:
+ status.add(codes.CRL_NUMBER_EXTENSION_MISSING)
+ if self.number < 0:
+ status.add(codes.CRL_NUMBER_IS_NEGATIVE)
+ if self.number > 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF:
+ status.add(codes.CRL_NUMBER_OUT_OF_RANGE)
+ if self.getIssuer() != issuer.getSubject():
+ status.add(codes.CRL_ISSUER_NAME_MISMATCH)
+ if not self.aki:
+ status.add(codes.AKI_EXTENSION_MISSING)
+ elif self.aki != issuer.ski:
+ status.add(codes.AKI_EXTENSION_ISSUER_MISMATCH)
+
+ return not any(s.kind == "bad" for s in status)
+
+
+class Ghostbuster(rpki.POW.CMS, POW_Mixin):
+
+ def __repr__(self):
+ try:
+ return "<Ghostbuster \"{}\" at 0x{:x}>".format(self.uri, id(self))
+ except:
+ return "<Ghostbuster at 0x{:x}>".format(id(self))
+
+ @classmethod
+ def load(cls, obj):
+ self = cls.derRead(obj.der)
+ self.obj = obj
+ self.ee = X509.load(obj, self)
+ self.vcard = None
+ return self
+
+ def check(self, trusted, crl):
+ status = Status.update(self.uri)
+ self.ee.check(trusted = trusted, crl = crl)
+ try:
+ self.vcard = self.verify()
+ except rpki.POW.ValidationError as e:
+ logger.debug("%r rejected: %s", self, e)
+ status.add(codes.OBJECT_REJECTED)
+ self.checkRPKIConformance(status)
+ codes.normalize(status)
+ return not any(s.kind == "bad" for s in status)
+
+
+class Manifest(rpki.POW.Manifest, POW_Mixin):
+
+ def __repr__(self):
+ try:
+ return "<Manifest \"{}\" at 0x{:x}>".format(self.uri, id(self))
+ except:
+ return "<Manifest at 0x{:x}>".format(id(self))
+
+ @classmethod
+ def load(cls, obj):
+ self = cls.derRead(obj.der)
+ self.obj = obj
+ self.ee = X509.load(obj, self)
+ self.fah = None
+ self.thisUpdate = None
+ self.nextUpdate = None
+ self.number = None
+ return self
+
+ def check(self, trusted, crl):
+ status = Status.update(self.uri)
+ self.ee.check(trusted = trusted, crl = crl)
+ try:
+ self.verify()
+ except rpki.POW.ValidationError as e:
+ logger.debug("%r rejected: %s", self, e)
+ status.add(codes.OBJECT_REJECTED)
+ self.checkRPKIConformance(status)
+ self.thisUpdate = self.getThisUpdate()
+ self.nextUpdate = self.getNextUpdate()
+ self.number = self.getManifestNumber()
+ self.fah = self.getFiles()
+ self.notBefore = self.ee.getNotBefore()
+ self.notAfter = self.ee.getNotAfter()
+ if self.thisUpdate < self.notBefore or self.nextUpdate > self.notAfter:
+ status.add(codes.MANIFEST_INTERVAL_OVERRUNS_CERT)
+ now = rpki.sundial.now()
+ if self.thisUpdate > now:
+ status.add(codes.MANIFEST_NOT_YET_VALID)
+ if self.nextUpdate < now:
+ status.add(codes.STALE_CRL_OR_MANIFEST)
+ codes.normalize(status)
+ return not any(s.kind == "bad" for s in status)
+
+ def find_crl_candidate_hashes(self):
+ for fn, digest in self.fah:
+ if fn.endswith(".crl"):
+ yield digest.encode("hex")
+
+
+class ROA(rpki.POW.ROA, POW_Mixin):
+
+ def __repr__(self):
+ try:
+ return "<ROA \"{}\" at 0x{:x}>".format(self.uri, id(self))
+ except:
+ return "<ROA at 0x{:x}>".format(id(self))
+
+ @classmethod
+ def load(cls, obj):
+ self = cls.derRead(obj.der)
+ self.obj = obj
+ self.ee = X509.load(obj, self)
+ self.asn = None
+ self.prefixes = None
+ return self
+
+ def check(self, trusted, crl):
+ status = Status.update(self.uri)
+ self.ee.check(trusted = trusted, crl = crl)
+ try:
+ vcard = self.verify()
+ except rpki.POW.ValidationError:
+ status.add(codes.OBJECT_REJECTED)
+ self.checkRPKIConformance(status)
+ self.asn = self.getASID()
+ self.prefixes = self.getPrefixes()
+ codes.normalize(status)
+ return not any(s.kind == "bad" for s in status)
+
+
+class_dispatch = dict(cer = X509,
+ crl = CRL,
+ gbr = Ghostbuster,
+ mft = Manifest,
+ roa = ROA)
+
+def uri_to_class(uri):
+ cls = class_dispatch.get(uri[-3:]) if len(uri) > 4 and uri[-4] == "." else None
+ if cls is None:
+ Status.add(uri, None, codes.UNKNOWN_OBJECT_TYPE_SKIPPED)
+ return cls
+
+
+# If we find ourselves using this same ordering for every retrieval from the RPKIObjects model, we
+# can add it as a Meta option for the model and omit it in the query expressions, like this:
+#
+# class RPKIObjects(models.Model):
+# ...
+# class Meta:
+# ordering = ["-retrieved__started"]
+#
+# https://docs.djangoproject.com/en/1.8/ref/models/querysets/#order-by
+# https://docs.djangoproject.com/en/1.8/ref/models/options/#django.db.models.Options.ordering
+
+def fetch_objects(**kwargs):
+ for obj in RPKIObject.objects.filter(**kwargs).order_by("-retrieved__started"):
+ cls = uri_to_class(obj.uri)
+ if cls is not None:
+ yield cls.load(obj)
+
+
+class WalkFrame(object):
+ """
+ Certificate tree walk stack frame. This is basically just a
+ preamble and a loop, broken out into several separate methods so
+ that we can fork new tasks in the middle then resume processing of
+ the current state machine (ie, this frame) when appropriate (eg,
+ after an rsync or RRDP fetch completes).
+ """
+
+ def __init__(self, cer):
+ self.cer = cer
+ self.state = self.initial
+
+ def __repr__(self):
+ try:
+ return "<WalkFrame \"{}\" at 0x{:x}>".format(self.cer.uri, id(self))
+ except:
+ return "<WalkFrame at 0x{:x}>".format(id(self))
+
+ @tornado.gen.coroutine
+ def __call__(self, wsk):
+ yield self.state(wsk)
+
+ @tornado.gen.coroutine
+ def initial(self, wsk):
+
+ rsync_uri = first_rsync_uri(self.cer.caDirectory)
+ rrdp_uri = first_https_uri(self.cer.rpkiNotify)
+
+ if args.prefer_rsync:
+ uri = rsync_uri or rrdp_uri
+ else:
+ uri = rrdp_uri or rsync_uri
+
+ self.fetcher = Fetcher(uri)
+
+ if not self.fetcher.needed():
+ self.state = self.ready
+ elif not args.spawn_on_fetch:
+ self.state = self.fetch
+ else:
+ self.state = self.fetch
+ yield task_queue.put(wsk.clone())
+ wsk.pop()
+
+ @tornado.gen.coroutine
+ def fetch(self, wsk):
+ yield self.fetcher.fetch()
+ self.state = self.ready
+
+ @tornado.gen.coroutine
+ def ready(self, wsk):
+ self.trusted = wsk.trusted()
+
+ logger.debug("%r scanning products", self)
+
+ # NB: CRL checks on manifest EE certificates deferred until we've picked a CRL.
+
+ mft_candidates = []
+ crl_candidates = []
+ crl_candidate_hashes = set()
+
+ for mft in fetch_objects(aki = self.cer.ski, uri__endswith = ".mft"):
+ if mft.check(trusted = self.trusted, crl = None):
+ mft_candidates.append(mft)
+ crl_candidate_hashes.update(mft.find_crl_candidate_hashes())
+
+ if not mft_candidates:
+ wsk.pop()
+ return
+
+ for crl in fetch_objects(aki = self.cer.ski, uri__endswith = ".crl", sha256__in = crl_candidate_hashes):
+ if crl.check(self.trusted[0]):
+ crl_candidates.append(crl)
+
+ mft_candidates.sort(reverse = True, key = lambda x: (x.number, x.thisUpdate, x.obj.retrieved.started))
+ crl_candidates.sort(reverse = True, key = lambda x: (x.number, x.thisUpdate, x.obj.retrieved.started))
+
+ if not crl_candidates:
+ wsk.pop()
+ return
+
+ self.crl = crl_candidates[0]
+
+ install_object(self.crl)
+ Status.add(self.crl.uri, codes.OBJECT_ACCEPTED)
+
+ #logger.debug("Picked CRL %s", self.crl.uri)
+
+ for mft in mft_candidates:
+ if self.crl.isRevoked(mft.ee):
+ Status.add(mft.obj.uri, codes.MANIFEST_EE_REVOKED)
+ continue
+ self.mft = mft
+ break
+ else:
+ wsk.pop()
+ return
+
+ install_object(self.mft)
+ Status.add(self.mft.obj.uri, codes.OBJECT_ACCEPTED)
+
+ self.stale_crl = Status.test(self.crl.uri, codes.STALE_CRL_OR_MANIFEST)
+ self.stale_mft = Status.test(self.mft.uri, codes.STALE_CRL_OR_MANIFEST)
+
+ # Issue warnings on mft and crl URI mismatches?
+
+ # Use an explicit iterator so we can resume it; run loop in separate method, same reason.
+
+ self.mft_iterator = iter(self.mft.getFiles())
+ self.state = self.loop
+
+ @tornado.gen.coroutine
+ def loop(self, wsk):
+
+ #logger.debug("Processing %s", self.mft.uri)
+
+ for fn, digest in self.mft_iterator:
+
+ yield tornado.gen.moment
+
+ uri = self.mft.uri[:self.mft.uri.rindex("/") + 1] + fn
+
+ # Need general URI validator here?
+
+ if uri == self.crl.uri:
+ continue
+
+ cls = uri_to_class(uri)
+
+ if cls is None:
+ continue
+
+ if cls in (Manifest, CRL):
+ Status.add(uri, None, codes.INAPPROPRIATE_OBJECT_TYPE_SKIPPED)
+ continue
+
+ for obj in fetch_objects(sha256 = digest.encode("hex")):
+
+ if self.stale_crl:
+ Status.add(uri, codes.TAINTED_BY_STALE_CRL)
+ if self.stale_mft:
+ Status.add(uri, codes.TAINTED_BY_STALE_MANIFEST)
+
+ if not obj.check(trusted = self.trusted, crl = self.crl):
+ Status.add(uri, codes.OBJECT_REJECTED)
+ continue
+
+ install_object(obj)
+ Status.add(uri, codes.OBJECT_ACCEPTED)
+
+ if cls is not X509 or not obj.is_ca:
+ break
+
+ wsk.push(obj)
+ return
+
+ wsk.pop()
+
+
+class WalkTask(object):
+ """
+ Task corresponding to one walk stack, roughly analgous to
+ STACK_OF(walk_ctx_t) in rcynic:tos.
+ """
+
+ def __init__(self, wsk = None, cer = None):
+ self.wsk = [] if wsk is None else wsk
+ if cer is not None:
+ self.push(cer)
+
+ def __repr__(self):
+ try:
+ return "<WalkTask \"{}\" at 0x{:x}>".format(self.wsk[-1].cer.uri, id(self))
+ except:
+ return "<WalkTask at 0x{:x}>".format(id(self))
+
+ @tornado.gen.coroutine
+ def __call__(self):
+ while self.wsk:
+ yield self.wsk[-1](wsk = self)
+
+ def push(self, cer):
+ self.wsk.append(WalkFrame(cer))
+
+ def pop(self):
+ return self.wsk.pop()
+
+ def clone(self):
+ return WalkTask(wsk = list(self.wsk))
+
+ def trusted(self):
+ stack = [w.cer for w in self.wsk]
+ stack.reverse()
+ return stack
+
+
+def read_tals():
+ for head, dirs, files in os.walk(args.trust_anchor_locators):
+ for fn in files:
+ if fn.endswith(".tal"):
+ furi = "file://" + os.path.abspath(os.path.join(head, fn))
+ try:
+ with open(os.path.join(head, fn), "r") as f:
+ lines = [line.strip() for line in f]
+ blank = lines.index("")
+ uris = lines[:blank]
+ key = rpki.POW.Asymmetric.derReadPublic("".join(lines[blank:]).decode("base64"))
+ if not uris or not all(uri.endswith(".cer") for uri in uris):
+ Status.add(furi, None, codes.MALFORMED_TAL_URI)
+ yield uris, key
+ except:
+ Status.add(furi, None, codes.UNREADABLE_TRUST_ANCHOR_LOCATOR)
+
+
+def uri_to_filename(uri, base = None):
+ fn = uri[uri.index("://")+3:]
+ if base is not None:
+ fn = os.path.join(base, fn)
+ return fn
+
+def first_uri(uris, scheme):
+ if uris is not None:
+ for uri in uris:
+ if uri.startswith(scheme):
+ return uri
+ return None
+
+def first_rsync_uri(uris):
+ return first_uri(uris, "rsync://")
+
+def first_https_uri(uris):
+ return first_uri(uris, "https://")
+
+def sha256hex(bytes):
+ d = rpki.POW.Digest(rpki.POW.SHA256_DIGEST)
+ d.update(bytes)
+ return d.digest().encode("hex")
+
+
+class RRDP_ParseFailure(Exception):
+ "Failure parsing RRDP message."
+
+class DeadHost(Exception):
+ "Host recently tried and known to be unavailable."
+
+
+class Fetcher(object):
+ """
+ Network transfer methods and history database.
+
+ At the moment this is rsync-only; eventually it will include
+ support for HTTPS and RRDP.
+ """
+
+ # Internal protocol:
+ #
+ # - Instances which have just gotten to the query stage are not registered
+ #
+ # - Instances which are in progress are listed in .history and
+ # have a Condition object in .pending; instances which depend on
+ # this should wait for the condition, then return.
+ #
+ # - Instances which have completed are listed in .history and have
+ # .pending set to None.
+
+ _rsync_deadhosts = set()
+ _rsync_history = dict()
+
+ _https_deadhosts = set()
+ _https_history = dict()
+
+ def __init__(self, uri, ta = False):
+ self.uri = uri
+ self.ta = ta
+ self.pending = None
+ self.status = None
+
+ def _rsync_split_uri(self):
+ return tuple(self.uri.rstrip("/").split("/")[2:])
+
+ def _rsync_find(self, path):
+ for i in xrange(1, len(path)):
+ target = path[:i+1]
+ try:
+ return self._rsync_history[target]
+ except KeyError:
+ continue
+ return None
+
+ def needed(self):
+ if not args.fetch:
+ return False
+ if self.uri.startswith("rsync://"):
+ return self._rsync_needed()
+ if self.uri.startswith("https://"):
+ return self._https_needed()
+ raise ValueError
+
+ def _rsync_needed(self):
+ path = self._rsync_split_uri()
+ if path[0] in self._rsync_deadhosts:
+ return False
+ entry = self._rsync_find(path)
+ return entry is None or entry.pending is not None
+
+ def _https_needed(self):
+ netloc = urlparse.urlparse(self.uri).netloc
+ if netloc in self._https_deadhosts:
+ return False
+ entry = self._https_history.get(self.uri)
+ return entry is None or entry.pending is not None
+
+ def fetch(self):
+ if self.uri.startswith("rsync://"):
+ return self._rsync_fetch()
+ if self.uri.startswith("https://"):
+ return self._https_fetch_ta() if self.ta else self._rrdp_fetch()
+ raise ValueError
+
+ @tornado.gen.coroutine
+ def _rsync_fetch(self):
+ assert self.uri.startswith("rsync://") and (self.uri.endswith(".cer") if self.ta else self.uri.endswith("/"))
+
+ if not args.fetch:
+ return
+ path = self._rsync_split_uri()
+ dead = path[0] in self._rsync_deadhosts
+ other = self._rsync_find(path)
+ if not dead and other is not None and other.pending is not None:
+ yield other.pending.wait()
+ if dead or other is not None:
+ return
+
+ self.pending = tornado.locks.Condition()
+ self._rsync_history[path] = self
+
+ try:
+ path = uri_to_filename(self.uri, args.unauthenticated)
+ cmd = ["rsync", "--update", "--times", "--copy-links", "--itemize-changes"]
+ if self.uri.endswith("/"):
+ cmd.append("--recursive")
+ cmd.append("--delete")
+ cmd.append(self.uri)
+ cmd.append(path)
+
+ dn = os.path.dirname(path)
+ if not os.path.exists(dn):
+ os.makedirs(dn)
+
+ # We use the stdout close from rsync to detect when the subprocess has finished.
+ # There's a lovely tornado.process.Subprocess.wait_for_exit() method which does
+ # exactly what one would think we'd want -- but Unix signal handling still hasn't
+ # caught up to the software interrupt architecture ITS had forty years ago, so
+ # signals still cause random "system call interrupted" failures in other libraries.
+ # Nothing Tornado can do about this, so we avoid signals entirely and collect the
+ # process exit status directly from the operating system. In theory, the WNOHANG
+ # isn't necessary here, we use it anyway to be safe in case theory is wrong.
+
+ # If we need to add a timeout here to guard against rsync processes taking too long
+ # (which has happened in the past with, eg, LACNIC), see tornado.gen.with_timeout()
+ # (documented in the utility functions section of the tornado.gen page), which wraps
+ # any future in a timeout.
+
+ t0 = time.time()
+ rsync = tornado.process.Subprocess(cmd, stdout = tornado.process.Subprocess.STREAM, stderr = subprocess.STDOUT)
+ logger.debug("rsync[%s] started \"%s\"", rsync.pid, " ".join(cmd))
+ output = yield rsync.stdout.read_until_close()
+ pid, self.status = os.waitpid(rsync.pid, os.WNOHANG)
+ t1 = time.time()
+ if (pid, self.status) == (0, 0):
+ logger.warn("rsync[%s] Couldn't get real exit status without blocking, sorry", rsync.pid)
+ for line in output.splitlines():
+ logger.debug("rsync[%s] %s", rsync.pid, line)
+ logger.debug("rsync[%s] finished after %s seconds with status 0x%x", rsync.pid, t1 - t0, self.status)
+
+ # Should do something with rsync result and validation status database here.
+
+ retrieval = Retrieval.objects.create(
+ uri = self.uri,
+ started = rpki.sundial.datetime.fromtimestamp(t0),
+ finished = rpki.sundial.datetime.fromtimestamp(t1),
+ successful = self.status == 0)
+
+ for fn in self._rsync_walk(path):
+ yield tornado.gen.moment
+ uri = "rsync://" + fn[len(args.unauthenticated):].lstrip("/")
+ cls = uri_to_class(uri)
+ if cls is not None:
+ try:
+ with open(fn, "rb") as f:
+ cls.store_if_new(f.read(), uri, retrieval)
+ except:
+ Status.add(uri, codes.UNREADABLE_OBJECT)
+ logger.exception("Couldn't read %s from rsync tree", uri)
+
+ finally:
+ pending = self.pending
+ self.pending = None
+ pending.notify_all()
+
+ def _rsync_walk(self, path):
+ if self.uri.endswith("/"):
+ for head, dirs, files in os.walk(path):
+ for fn in files:
+ yield os.path.join(head, fn)
+ elif os.path.exists(path):
+ yield path
+
+ @tornado.gen.coroutine
+ def _https_fetch_url(self, url, streaming_callback = None):
+
+ if urlparse.urlparse(url).netloc in self._https_deadhosts:
+ raise DeadHost
+
+ # Should do something with deadhost processing below. Looks
+ # like errors such as HTTP timeout show up as
+ # tornado.httpclient.HTTPError exceptions (which we could
+ # suppress if we wanted to do so, but we probably don't).
+ # HTTP timeout shows up in the logs as "HTTP 599". See doc for:
+ #
+ # tornado.httpclient.AsyncHTTPClient.fetch()
+ # tornado.httpclient.HTTPError
+
+ # Might need to do something with If-Modified-Since support
+ # See if_modified_since argument to
+ # http://www.tornadoweb.org/en/stable/httpclient.html#request-objects
+ # (which we can pass to client.fetch(), below). Not sure how
+ # "you don't need to retrieve this" result comes back,
+ # probably a benign exception we need to catch. Supporting
+ # this means adding another null-able timestamp field to the
+ # RRDPSnapshot model (which probably should be named the
+ # RRDPZone model instead), and storing a datetime there.
+ # Would also need to pull timestamp from the Last-Modified
+ # header in the response object.
+
+ try:
+ ok = False
+ t0 = time.time()
+ client = tornado.httpclient.AsyncHTTPClient(max_body_size = args.max_https_body_size)
+ response = yield client.fetch(url,
+ streaming_callback = streaming_callback,
+ validate_cert = args.validate_https,
+ connect_timeout = args.https_timeout,
+ request_timeout = args.https_timeout)
+ # Might want to check response Content-Type here
+ ok = True
+
+ except tornado.httpclient.HTTPError as e:
+ # Might want to check e.response here to figure out whether to add to _https_deadhosts.
+ logger.info("HTTP error for %s: %s", url, e)
+ raise
+
+ except (socket.error, IOError, ssl.SSLError) as e:
+ # Might want to check e.errno here to figure out whether to add to _https_deadhosts.
+ logger.info("Network I/O error for %s: %s", url, e)
+ raise
+
+ except Exception as e:
+ logger.exception("Error (%r) for %s", type(e), url)
+ raise
+
+ finally:
+ t1 = time.time()
+ logger.debug("Fetch of %s finished after %s seconds", url, t1 - t0)
+ retrieval = Retrieval.objects.create(
+ uri = url,
+ started = rpki.sundial.datetime.fromtimestamp(t0),
+ finished = rpki.sundial.datetime.fromtimestamp(t1),
+ successful = ok)
+ if ok:
+ raise tornado.gen.Return((retrieval, response))
+
+ @tornado.gen.coroutine
+ def _https_fetch_ta(self):
+
+ if not args.fetch:
+ return
+
+ other = self._https_history.get(self.uri)
+ if other is not None and other.pending is not None:
+ yield other.pending.wait()
+ return
+
+ self.pending = tornado.locks.Condition()
+ self._rsync_history[self.uri] = self
+
+ try:
+ retrieval, response = yield self._https_fetch_url(self.uri)
+ X509.store_if_new(response.body, self.uri, retrieval)
+ except:
+ logger.exception("Couldn't load %s", self.uri)
+
+ finally:
+ pending = self.pending
+ self.pending = None
+ pending.notify_all()
+
+ @tornado.gen.coroutine
+ def _rrdp_fetch_notification(self, url):
+
+ retrieval, response = yield self._https_fetch_url(url)
+
+ notification = ElementTree(file = response.buffer).getroot()
+
+ rpki.relaxng.rrdp.schema.assertValid(notification)
+
+ if notification.tag != tag_notification:
+ raise RRDP_ParseFailure("Expected RRDP notification for {}, got {}".format(url, notification.tag))
+
+ raise tornado.gen.Return((retrieval, notification))
+
+ @tornado.gen.coroutine
+ def _rrdp_fetch_data_file(self, url, expected_hash):
+
+ sha256 = rpki.POW.Digest(rpki.POW.SHA256_DIGEST)
+ xml_file = tempfile.SpooledTemporaryFile()
+
+ retrieval, response = yield self._https_fetch_url(url, lambda data: (sha256.update(data), xml_file.write(data)))
+
+ received_hash = sha256.digest().encode("hex")
+ xml_file.seek(0)
+
+ if received_hash != expected_hash.lower():
+ raise RRDP_ParseFailure("Expected RRDP hash {} for {}, got {}".format(expected_hash.lower(), url, received_hash))
+
+ raise tornado.gen.Return((retrieval, response, xml_file))
+
+ @tornado.gen.coroutine
+ def _rrdp_bulk_create(self, new_objs, existing_objs):
+ from django.db import IntegrityError
+
+ #logger.debug("Bulk creation of new RPKIObjects")
+
+ try:
+ RPKIObject.objects.bulk_create(new_objs)
+
+ except IntegrityError:
+ #logger.debug("Some objects already existed, weeding and retrying")
+ i = 0
+ while i < len(new_objs):
+ yield tornado.gen.moment
+ try:
+ existing_objs.append(RPKIObject.objects.values_list("pk", flat = True).get(der = new_objs[i].der))
+ logger.debug("Object existed in SQL but, apparently, not in prior copy of snapshot: uri %s sha256 %s",
+ new_objs[i].uri, new_objs[i].sha256)
+ except RPKIObject.DoesNotExist:
+ i += 1
+ else:
+ del new_objs[i]
+ RPKIObject.objects.bulk_create(new_objs)
+
+ del new_objs[:]
+
+ @tornado.gen.coroutine
+ def _rrdp_fetch(self):
+ from django.db import transaction
+
+ if not args.fetch:
+ return
+
+ other = self._https_history.get(self.uri)
+ if other is not None and other.pending is not None:
+ yield other.pending.wait()
+ return
+
+ self.pending = tornado.locks.Condition()
+ self._https_history[self.uri] = self
+
+ try:
+ retrieval, notification = yield self._rrdp_fetch_notification(url = self.uri)
+
+ session_id = notification.get("session_id")
+ serial = long(notification.get("serial"))
+
+ snapshot = RRDPSnapshot.objects.filter(
+ session_id = session_id).order_by("-retrieved__started").first()
+
+ logger.debug("RRDP notification for %s session_id %s serial %s current snapshot %r",
+ self.uri, session_id, serial, snapshot)
+
+ if snapshot is not None and snapshot.serial == serial:
+ logger.debug("RRDP data for %s is up-to-date, nothing to do", self.uri)
+ return
+
+ deltas = dict((long(delta.get("serial")), (delta.get("uri"), delta.get("hash")))
+ for delta in notification.iterchildren(tag_delta))
+
+ if snapshot is None or snapshot.serial + 1 not in deltas:
+
+ existing_rpkiobject_map = dict()
+
+ if snapshot is not None:
+ logger.debug("RRDP %s no deltas available for serial %s", self.uri, snapshot.serial)
+ existing_rpkiobject_map.update(snapshot.rpkiobject_set.values_list("sha256", "pk"))
+
+ x = notification.find(tag_snapshot)
+
+ url, hash = x.get("uri"), x.get("hash")
+
+ logger.debug("RRDP %s loading from snapshot %s serial %s", self.uri, url, serial)
+
+ retrieval, response, xml_file = yield self._rrdp_fetch_data_file(url, hash)
+
+ snapshot = RRDPSnapshot.objects.create(session_id = session_id, serial = serial)
+
+ # Value of "chunk" here may need to be configurable. Larger numbers batch more objects in
+ # a single bulk addition, which is faster ... unless one or more of them isn't really new, in
+ # which case we have to check everything in that batch when we get the IntegrityError, so
+ # the smaller the batch, the faster that check. No single good answer.
+
+ root = None
+ existing_rpkiobjects = []
+ new_rpkiobjects = []
+ chunk = 2000
+
+ for event, node in iterparse(xml_file):
+ if node is root:
+ continue
+
+ if root is None:
+ root = node.getparent()
+ if root is None or root.tag != tag_snapshot \
+ or root.get("version") != "1" \
+ or any(a not in ("version", "session_id", "serial") for a in root.attrib):
+ raise RRDP_ParseFailure("{} doesn't look like an RRDP snapshot file".format(url))
+ if root.get("session_id") != session_id:
+ raise RRDP_ParseFailure("Expected RRDP session_id {} for {}, got {}".format(
+ session_id, url, root.get("session_id")))
+ if long(root.get("serial")) != long(serial):
+ raise RRDP_ParseFailure("Expected RRDP serial {} for {}, got {}".format(
+ serial, url, root.get("serial")))
+
+ if node.tag != tag_publish or node.getparent() is not root \
+ or any(a != "uri" for a in node.attrib):
+ raise RRDP_ParseFailure("{} doesn't look like an RRDP snapshot file".format(url))
+
+ uri = node.get("uri")
+ cls = uri_to_class(uri)
+ if cls is None:
+ raise RRDP_ParseFailure("Unexpected URI {}".format(uri))
+
+ der = node.text.decode("base64")
+ sha256 = sha256hex(der)
+ try:
+ existing_rpkiobjects.append(existing_rpkiobject_map[sha256])
+ except KeyError:
+ ski, aki = cls.derRead(der).get_hex_SKI_AKI()
+ new_rpkiobjects.append(RPKIObject(der = der, uri = uri, ski = ski, aki = aki,
+ retrieved = retrieval, sha256 = sha256))
+
+ node.clear()
+ while node.getprevious() is not None:
+ del root[0]
+
+ if len(new_rpkiobjects) > chunk:
+ yield self._rrdp_bulk_create(new_rpkiobjects, existing_rpkiobjects)
+
+ yield tornado.gen.moment
+
+ if len(new_rpkiobjects) > 0:
+ yield self._rrdp_bulk_create(new_rpkiobjects, existing_rpkiobjects)
+
+ RPKIObject.snapshot.through.objects.bulk_create([
+ RPKIObject.snapshot.through(rrdpsnapshot_id = snapshot.id, rpkiobject_id = i)
+ for i in retrieval.rpkiobject_set.values_list("pk", flat = True)])
+
+ RPKIObject.snapshot.through.objects.bulk_create([
+ RPKIObject.snapshot.through(rrdpsnapshot_id = snapshot.id, rpkiobject_id = i)
+ for i in existing_rpkiobjects])
+
+ snapshot.retrieved = retrieval
+ snapshot.save()
+
+ xml_file.close()
+
+ else:
+ logger.debug("RRDP %s %s deltas (%s--%s)", self.uri,
+ (serial - snapshot.serial), snapshot.serial, serial)
+
+ deltas = [(serial, deltas[serial][0], deltas[serial][1])
+ for serial in xrange(snapshot.serial + 1, serial + 1)]
+ futures = []
+
+ while deltas or futures:
+
+ while deltas and len(futures) < args.fetch_ahead_goal:
+ serial, url, hash = deltas.pop(0)
+ logger.debug("RRDP %s serial %s fetching %s", self.uri, serial, url)
+ futures.append(self._rrdp_fetch_data_file(url, hash))
+
+ retrieval, response, xml_file = yield futures.pop(0)
+
+ root = None
+
+ with transaction.atomic():
+ snapshot.serial += 1
+ snapshot.save()
+ logger.debug("RRDP %s serial %s loading", self.uri, snapshot.serial)
+
+ for event, node in iterparse(xml_file):
+ if node is root:
+ continue
+
+ if root is None:
+ root = node.getparent()
+ if root is None or root.tag != tag_delta \
+ or root.get("version") != "1" \
+ or any(a not in ("version", "session_id", "serial") for a in root.attrib):
+ raise RRDP_ParseFailure("{} doesn't look like an RRDP delta file".format(url))
+ if root.get("session_id") != session_id:
+ raise RRDP_ParseFailure("Expected RRDP session_id {} for {}, got {}".format(
+ session_id, url, root.get("session_id")))
+ if long(root.get("serial")) != snapshot.serial:
+ raise RRDP_ParseFailure("Expected RRDP serial {} for {}, got {}".format(
+ snapshot.serial, url, root.get("serial")))
+
+ hash = node.get("hash")
+
+ if node.getparent() is not root or node.tag not in (tag_publish, tag_withdraw) \
+ or (node.tag == tag_withdraw and hash is None) \
+ or any(a not in ("uri", "hash") for a in node.attrib):
+ raise RRDP_ParseFailure("{} doesn't look like an RRDP delta file".format(url))
+
+ if node.tag == tag_withdraw or node.get("hash") is not None:
+ snapshot.rpkiobject_set.remove(snapshot.rpkiobject_set.get(sha256 = node.get("hash").lower()))
+
+ if node.tag == tag_publish:
+ uri = node.get("uri")
+ cls = uri_to_class(uri)
+ if cls is None:
+ raise RRDP_ParseFailure("Unexpected URI %s" % uri)
+ obj, created = cls.store_if_new(node.text.decode("base64"), uri, retrieval)
+ obj.snapshot.add(snapshot)
+
+ node.clear()
+ while node.getprevious() is not None:
+ del root[0]
+
+ #yield tornado.gen.moment
+
+ xml_file.close()
+
+ logger.debug("RRDP %s done processing deltas", self.uri)
+
+ except (tornado.httpclient.HTTPError, socket.error, IOError, ssl.SSLError):
+ pass # Already logged
+
+ except RRDP_ParseFailure as e:
+ logger.info("RRDP parse failure: %s", e)
+
+ except:
+ logger.exception("Couldn't load %s", self.uri)
+
+ finally:
+ pending = self.pending
+ self.pending = None
+ pending.notify_all()
+
+
+class CheckTALTask(object):
+
+ def __init__(self, uris, key):
+ rsync_uri = first_rsync_uri(uris)
+ https_uri = first_https_uri(uris)
+
+ if args.prefer_rsync:
+ self.uri = rsync_uri or https_uri
+ else:
+ self.uri = https_uri or rsync_uri
+
+ self.key = key
+
+ def __repr__(self):
+ return "<CheckTALTask: \"{}\">".format(self.uri)
+
+ @tornado.gen.coroutine
+ def __call__(self):
+ yield Fetcher(self.uri, ta = True).fetch()
+ for cer in fetch_objects(uri = self.uri):
+ if self.check(cer):
+ yield task_queue.put(WalkTask(cer = cer))
+ break
+ else:
+ Status.add(self.uri, codes.TRUST_ANCHOR_SKIPPED)
+
+ def check(self, cer):
+ if self.key.derWritePublic() != cer.getPublicKey().derWritePublic():
+ Status.add(self.uri, codes.TRUST_ANCHOR_KEY_MISMATCH)
+ ok = False
+ else:
+ ok = cer.check(trusted = None, crl = None)
+ if ok:
+ install_object(cer)
+ Status.add(self.uri, codes.OBJECT_ACCEPTED)
+ else:
+ Status.add(self.uri, codes.OBJECT_REJECTED)
+ return ok
+
+
+@tornado.gen.coroutine
+def worker(meself):
+ #
+ # NB: This particular style of control loop REQUIRES an except
+ # clause, even if that except clause is just a pass statement.
+ #
+ while True:
+ task = yield task_queue.get()
+ name = repr(task)
+ try:
+ logger.debug("Worker %s starting %s, queue length %s", meself, name, task_queue.qsize())
+ yield task()
+ except:
+ logger.exception("Worker %s caught unhandled exception from %s", meself, name)
+ finally:
+ task_queue.task_done()
+ logger.debug("Worker %s finished %s, queue length %s", meself, name, task_queue.qsize())
+
+
+def final_report():
+ # Clean up a bit to avoid confusing the user unnecessarily.
+ for s in Status.db.itervalues():
+ if codes.OBJECT_ACCEPTED in s.status:
+ s.status.discard(codes.OBJECT_REJECTED)
+ doc = Element("rcynic-summary", date = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()))
+ doc.set("reporting-hostname", socket.getfqdn())
+ doc.set("rcynic-version", "rcynicng")
+ doc.set("summary-version", "1")
+ labels = SubElement(doc, "labels")
+ for code in codes.all():
+ SubElement(labels, code.name, kind = code.kind).text = code.text
+ for uri in Status.db:
+ for sym in sorted(Status.db[uri].status):
+ SubElement(doc, "validation_status",
+ timestamp = str(Status.db[uri].timestamp),
+ status = str(sym),
+ generation = "None" # Historical relic, remove eventually
+ ).text = uri
+ #
+ # Should generate <rsync_history/> elements here too, later
+ #
+ ElementTree(doc).write(file = argparse.FileType("w")(args.xml_file),
+ pretty_print = True)
+
+
+def final_cleanup():
+ from django.db import transaction, models
+
+ def report(when):
+ logger.debug("Database %s cleanup: %s Authenticated %s RRDPSnapshot %s RPKIObject %s Retrieval", when,
+ Authenticated.objects.all().count(), RRDPSnapshot.objects.all().count(),
+ RPKIObject.objects.all().count(), Retrieval.objects.all().count())
+
+ report("before")
+
+ with transaction.atomic():
+
+ #logger.debug("Flushing incomplete RRDP snapshots")
+
+ q = RRDPSnapshot.objects
+ q = q.filter(retrieved__isnull = True)
+ q.delete()
+
+ #logger.debug("Flushing old authenticated sets")
+
+ q = Authenticated.objects
+ q = q.exclude(id = authenticated.id)
+ q.delete()
+
+ #logger.debug("Flushing RRDP snapshots which don't contain anything in the (remaining) authenticated set")
+
+ q = RPKIObject.objects
+ q = q.filter(authenticated = authenticated.id)
+ q = q.exclude(snapshot = None)
+ q = q.order_by("snapshot__id")
+ q = q.values_list("snapshot__id", flat = True)
+ q = q.distinct()
+ q = RRDPSnapshot.objects.exclude(id__in = q)
+ q.delete()
+
+ #logger.debug("Flushing RPKI objects which are in neither current authenticated set nor current RRDP snapshot")
+
+ q = RPKIObject.objects
+ q = q.filter(authenticated = None) # was: q = q.exclude(authenticated = authenticated.id)
+ q = q.filter(snapshot = None)
+ q.delete()
+
+ #logger.debug("Flushing retrieval objects which are no longer related to any RPKI objects or RRDP snapshot")
+
+ q = RPKIObject.objects
+ q = q.order_by("retrieved__id")
+ q = q.values_list("retrieved__id", flat = True)
+ q = q.distinct()
+ q = Retrieval.objects.exclude(id__in = q)
+ q = q.filter(rrdpsnapshot = None)
+ q.delete()
+
+ report("after")
+
+
+@tornado.gen.coroutine
+def launcher():
+ for i in xrange(args.workers):
+ tornado.ioloop.IOLoop.current().spawn_callback(worker, i)
+
+ yield [task_queue.put(CheckTALTask(uris, key)) for uris, key in read_tals()]
+ yield task_queue.join()
+
+
+class posint(int):
+ def __init__(self, value):
+ if self <= 0:
+ raise ValueError
+
+
+def main():
+ global rpki
+
+ os.environ.update(TZ = "UTC",
+ DJANGO_SETTINGS_MODULE = "rpki.django_settings.rcynic")
+ time.tzset()
+
+ cfg = rpki.config.argparser(section = "rcynic", doc = __doc__, cfg_optional = True)
+
+ cfg.add_logging_arguments()
+
+ cfg.add_argument("-u", "--unauthenticated",
+ help = "where to store unauthenticated data retrieved via rsycnc",
+ default = os.path.join(rpki.autoconf.RCYNIC_DIR, "data", "unauthenticated"))
+
+ cfg.add_argument("-x", "--xml-file",
+ help = "where to write XML log of validation results",
+ default = os.path.join(rpki.autoconf.RCYNIC_DIR, "data", "rcynic.xml"))
+
+ cfg.add_argument("-t", "--trust-anchor-locators", "--tals",
+ help = "where to find trust anchor locators",
+ default = os.path.join(rpki.autoconf.sysconfdir, "rpki", "trust-anchors"))
+
+ cfg.add_argument("-w", "--workers", type = posint,
+ help = "number of worker pseudo-threads to allow",
+ default = 10)
+
+ cfg.add_argument("--fetch-ahead-goal", type = posint,
+ help = "how many deltas we want in the fetch-ahead pipe",
+ default = 2)
+
+ cfg.add_argument("--https-timeout", type = posint,
+ help = "HTTPS connection timeout, in seconds",
+ default = 300)
+
+ cfg.add_argument("--max-https-body-size", type = posint,
+ help = "upper limit on byte length of HTTPS message body",
+ default = 512 * 1024 * 1024)
+
+ cfg.add_boolean_argument("--fetch", default = True,
+ help = "whether to fetch data at all")
+
+ cfg.add_boolean_argument("--spawn-on-fetch", default = True,
+ help = "whether to spawn new pseudo-threads on fetch")
+
+ cfg.add_boolean_argument("--migrate", default = True,
+ help = "whether to migrate the ORM database on startup")
+
+ cfg.add_boolean_argument("--prefer-rsync", default = False,
+ help = "whether to prefer rsync over RRDP")
+
+ cfg.add_boolean_argument("--validate-https", default = False,
+ help = "whether to validate HTTPS server certificates")
+
+ global args
+ args = cfg.argparser.parse_args()
+
+ cfg.configure_logging(args = args, ident = "rcynic")
+
+ import django
+ django.setup()
+
+ if args.migrate:
+ # Not sure we should be doing this on every run, but sure simplifies things.
+ import django.core.management
+ django.core.management.call_command("migrate", verbosity = 0, interactive = False)
+
+ import rpki.rcynicdb
+ global Retrieval
+ global Authenticated
+ global RRDPSnapshot
+ global RPKIObject
+ Retrieval = rpki.rcynicdb.models.Retrieval
+ Authenticated = rpki.rcynicdb.models.Authenticated
+ RRDPSnapshot = rpki.rcynicdb.models.RRDPSnapshot
+ RPKIObject = rpki.rcynicdb.models.RPKIObject
+
+
+ global authenticated
+ authenticated = Authenticated.objects.create(started = rpki.sundial.datetime.now())
+
+ global task_queue
+ task_queue = tornado.queues.Queue()
+ tornado.ioloop.IOLoop.current().run_sync(launcher)
+
+ authenticated.finished = rpki.sundial.datetime.now()
+ authenticated.save()
+
+ final_report()
+
+ final_cleanup()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/rp/rcynic/rpki-torrent.py b/rp/rcynic/rpki-torrent.py
index 2c6aa64d..f9a3d620 100644
--- a/rp/rcynic/rpki-torrent.py
+++ b/rp/rcynic/rpki-torrent.py
@@ -46,688 +46,688 @@ import transmissionrpc
tr_env_vars = ("TR_TORRENT_DIR", "TR_TORRENT_ID", "TR_TORRENT_NAME")
class WrongServer(Exception):
- "Hostname not in X.509v3 subjectAltName extension."
+ "Hostname not in X.509v3 subjectAltName extension."
class UnexpectedRedirect(Exception):
- "Unexpected HTTP redirect."
+ "Unexpected HTTP redirect."
class WrongMode(Exception):
- "Wrong operation for mode."
+ "Wrong operation for mode."
class BadFormat(Exception):
- "Zip file does not match our expectations."
+ "Zip file does not match our expectations."
class InconsistentEnvironment(Exception):
- "Environment variables received from Transmission aren't consistent."
+ "Environment variables received from Transmission aren't consistent."
class TorrentNotReady(Exception):
- "Torrent is not ready for checking."
+ "Torrent is not ready for checking."
class TorrentDoesNotMatchManifest(Exception):
- "Retrieved torrent does not match manifest."
+ "Retrieved torrent does not match manifest."
class TorrentNameDoesNotMatchURL(Exception):
- "Torrent name doesn't uniquely match a URL."
+ "Torrent name doesn't uniquely match a URL."
class CouldNotFindTorrents(Exception):
- "Could not find torrent(s) with given name(s)."
+ "Could not find torrent(s) with given name(s)."
class UseTheSourceLuke(Exception):
- "Use The Source, Luke."
+ "Use The Source, Luke."
cfg = None
def main():
- try:
- syslog_flags = syslog.LOG_PID
- if os.isatty(sys.stderr.fileno()):
- syslog_flags |= syslog.LOG_PERROR
- syslog.openlog("rpki-torrent", syslog_flags)
-
- # If I seriously expected this script to get a lot of further use,
- # I might rewrite this using subparsers, but it'd be a bit tricky
- # as argparse doesn't support making the subparser argument
- # optional and transmission gives no sane way to provide arguments
- # when running a completion script. So, for the moment, let's
- # just fix the bugs accidently introduced while converting the
- # universe to argparse without making any radical changes to the
- # program structure here, even if the result looks kind of klunky.
-
- parser = argparse.ArgumentParser(description = __doc__)
- parser.add_argument("-c", "--config",
- help = "configuration file")
- parser.add_argument("action", choices = ("poll", "generate", "mirror"), nargs = "?",
- help = "action to take")
- args = parser.parse_args()
-
- global cfg
- cfg = MyConfigParser()
- cfg.read(args.config or
- [os.path.join(dn, fn)
- for fn in ("rcynic.conf", "rpki.conf")
- for dn in ("/var/rcynic/etc", "/usr/local/etc", "/etc")])
-
- if cfg.act_as_generator:
- if args.action == "generate":
- generator_main()
- elif args.action == "mirror":
- mirror_main()
- else:
- raise UseTheSourceLuke
- else:
- if args.action is None and all(v in os.environ for v in tr_env_vars):
- torrent_completion_main()
- elif args.action == "poll":
- poll_main()
- else:
- raise UseTheSourceLuke
-
- except:
- for line in traceback.format_exc().splitlines():
- syslog.syslog(line)
- sys.exit(1)
+ try:
+ syslog_flags = syslog.LOG_PID
+ if os.isatty(sys.stderr.fileno()):
+ syslog_flags |= syslog.LOG_PERROR
+ syslog.openlog("rpki-torrent", syslog_flags)
+
+ # If I seriously expected this script to get a lot of further use,
+ # I might rewrite this using subparsers, but it'd be a bit tricky
+ # as argparse doesn't support making the subparser argument
+ # optional and transmission gives no sane way to provide arguments
+ # when running a completion script. So, for the moment, let's
+ # just fix the bugs accidently introduced while converting the
+ # universe to argparse without making any radical changes to the
+ # program structure here, even if the result looks kind of klunky.
+
+ parser = argparse.ArgumentParser(description = __doc__)
+ parser.add_argument("-c", "--config",
+ help = "configuration file")
+ parser.add_argument("action", choices = ("poll", "generate", "mirror"), nargs = "?",
+ help = "action to take")
+ args = parser.parse_args()
+
+ global cfg
+ cfg = MyConfigParser()
+ cfg.read(args.config or
+ [os.path.join(dn, fn)
+ for fn in ("rcynic.conf", "rpki.conf")
+ for dn in ("/var/rcynic/etc", "/usr/local/etc", "/etc")])
+
+ if cfg.act_as_generator:
+ if args.action == "generate":
+ generator_main()
+ elif args.action == "mirror":
+ mirror_main()
+ else:
+ raise UseTheSourceLuke
+ else:
+ if args.action is None and all(v in os.environ for v in tr_env_vars):
+ torrent_completion_main()
+ elif args.action == "poll":
+ poll_main()
+ else:
+ raise UseTheSourceLuke
+
+ except:
+ for line in traceback.format_exc().splitlines():
+ syslog.syslog(line)
+ sys.exit(1)
def generator_main():
- import paramiko
-
- class SFTPClient(paramiko.SFTPClient):
- def atomic_rename(self, oldpath, newpath):
- oldpath = self._adjust_cwd(oldpath)
- newpath = self._adjust_cwd(newpath)
- self._log(paramiko.common.DEBUG, 'atomic_rename(%r, %r)' % (oldpath, newpath))
- self._request(paramiko.sftp.CMD_EXTENDED, "posix-rename@openssh.com", oldpath, newpath)
-
- z = ZipFile(url = cfg.generate_url, dn = cfg.zip_dir)
- client = TransmissionClient()
-
- client.remove_torrents(z.torrent_name)
-
- download_dir = client.get_session().download_dir
- torrent_dir = os.path.join(download_dir, z.torrent_name)
- torrent_file = os.path.join(cfg.zip_dir, z.torrent_name + ".torrent")
-
-
- syslog.syslog("Synchronizing local data from %s to %s" % (cfg.unauthenticated, torrent_dir))
- subprocess.check_call((cfg.rsync_prog, "--archive", "--delete",
- os.path.normpath(cfg.unauthenticated) + "/",
- os.path.normpath(torrent_dir) + "/"))
-
- syslog.syslog("Creating %s" % torrent_file)
- try:
- os.unlink(torrent_file)
- except OSError, e:
- if e.errno != errno.ENOENT:
- raise
- ignore_output_for_now = subprocess.check_output( # pylint: disable=W0612
- (cfg.mktorrent_prog,
- "-a", cfg.tracker_url,
- "-c", "RPKI unauthenticated data snapshot generated by rpki-torrent",
- "-o", torrent_file,
- torrent_dir))
-
- syslog.syslog("Generating manifest")
- manifest = create_manifest(download_dir, z.torrent_name)
-
- syslog.syslog("Loading %s with unlimited seeding" % torrent_file)
- f = open(torrent_file, "rb")
- client.add(base64.b64encode(f.read()))
- f.close()
- client.unlimited_seeding(z.torrent_name)
-
- syslog.syslog("Creating upload connection")
- ssh = paramiko.Transport((cfg.sftp_host, cfg.sftp_port))
- try:
- hostkeys = paramiko.util.load_host_keys(cfg.sftp_hostkey_file)[cfg.sftp_host]["ssh-rsa"]
- except ConfigParser.Error:
- hostkeys = None
- ssh.connect(
- username = cfg.sftp_user,
- hostkey = hostkeys,
- pkey = paramiko.RSAKey.from_private_key_file(cfg.sftp_private_key_file))
- sftp = SFTPClient.from_transport(ssh)
-
- zip_filename = os.path.join("data", os.path.basename(z.filename))
- zip_tempname = zip_filename + ".new"
-
- syslog.syslog("Creating %s" % zip_tempname)
- f = sftp.open(zip_tempname, "wb")
- z.set_output_stream(f)
-
- syslog.syslog("Writing %s to zip" % torrent_file)
- z.write(
- torrent_file,
- arcname = os.path.basename(torrent_file),
- compress_type = zipfile.ZIP_DEFLATED)
-
- manifest_name = z.torrent_name + ".manifest"
-
- syslog.syslog("Writing %s to zip" % manifest_name)
- zi = zipfile.ZipInfo(manifest_name, time.gmtime()[:6])
- zi.external_attr = (stat.S_IFREG | 0644) << 16
- zi.internal_attr = 1 # Text, not binary
- z.writestr(zi,
- "".join("%s %s\n" % (v, k) for k, v in manifest.iteritems()),
- zipfile.ZIP_DEFLATED)
-
- syslog.syslog("Closing %s and renaming to %s" % (zip_tempname, zip_filename))
- z.close()
- f.close()
- sftp.atomic_rename(zip_tempname, zip_filename)
-
- syslog.syslog("Closing upload connection")
- ssh.close()
-
-def mirror_main():
- client = TransmissionClient()
- torrent_names = []
-
- for zip_url in cfg.zip_urls:
- if zip_url != cfg.generate_url:
- z = ZipFile(url = zip_url, dn = cfg.zip_dir, ta = cfg.zip_ta)
- if z.fetch():
- client.remove_torrents(z.torrent_name)
- syslog.syslog("Mirroring torrent %s" % z.torrent_name)
- client.add(z.get_torrent())
- torrent_names.append(z.torrent_name)
-
- if torrent_names:
- client.unlimited_seeding(*torrent_names)
+ import paramiko
+ class SFTPClient(paramiko.SFTPClient):
+ def atomic_rename(self, oldpath, newpath):
+ oldpath = self._adjust_cwd(oldpath)
+ newpath = self._adjust_cwd(newpath)
+ self._log(paramiko.common.DEBUG, 'atomic_rename(%r, %r)' % (oldpath, newpath))
+ self._request(paramiko.sftp.CMD_EXTENDED, "posix-rename@openssh.com", oldpath, newpath)
-def poll_main():
- for zip_url in cfg.zip_urls:
-
- z = ZipFile(url = zip_url, dn = cfg.zip_dir, ta = cfg.zip_ta)
+ z = ZipFile(url = cfg.generate_url, dn = cfg.zip_dir)
client = TransmissionClient()
- if z.fetch():
- client.remove_torrents(z.torrent_name)
- syslog.syslog("Adding torrent %s" % z.torrent_name)
- client.add(z.get_torrent())
-
- elif cfg.run_rcynic_anyway:
- run_rcynic(client, z)
-
-
-def torrent_completion_main():
- torrent_name = os.getenv("TR_TORRENT_NAME")
- torrent_id = int(os.getenv("TR_TORRENT_ID"))
-
- z = ZipFile(url = cfg.find_url(torrent_name), dn = cfg.zip_dir, ta = cfg.zip_ta)
- client = TransmissionClient()
- torrent = client.info([torrent_id]).popitem()[1]
+ client.remove_torrents(z.torrent_name)
- if torrent.name != torrent_name:
- raise InconsistentEnvironment("Torrent name %s does not match ID %d" % (torrent_name, torrent_id))
+ download_dir = client.get_session().download_dir
+ torrent_dir = os.path.join(download_dir, z.torrent_name)
+ torrent_file = os.path.join(cfg.zip_dir, z.torrent_name + ".torrent")
- if z.torrent_name != torrent_name:
- raise InconsistentEnvironment("Torrent name %s does not match torrent name in zip file %s" % (torrent_name, z.torrent_name))
- if torrent is None or torrent.progress != 100:
- raise TorrentNotReady("Torrent %s not ready for checking, how did I get here?" % torrent_name)
+ syslog.syslog("Synchronizing local data from %s to %s" % (cfg.unauthenticated, torrent_dir))
+ subprocess.check_call((cfg.rsync_prog, "--archive", "--delete",
+ os.path.normpath(cfg.unauthenticated) + "/",
+ os.path.normpath(torrent_dir) + "/"))
- log_email("Download complete %s" % z.url)
-
- run_rcynic(client, z)
-
-
-def run_rcynic(client, z):
- """
- Run rcynic and any post-processing we might want.
- """
-
- if cfg.lockfile is not None:
- syslog.syslog("Acquiring lock %s" % cfg.lockfile)
- lock = os.open(cfg.lockfile, os.O_WRONLY | os.O_CREAT, 0600)
- fcntl.flock(lock, fcntl.LOCK_EX)
- else:
- lock = None
-
- syslog.syslog("Checking manifest against disk")
-
- download_dir = client.get_session().download_dir
-
- manifest_from_disk = create_manifest(download_dir, z.torrent_name)
- manifest_from_zip = z.get_manifest()
+ syslog.syslog("Creating %s" % torrent_file)
+ try:
+ os.unlink(torrent_file)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ ignore_output_for_now = subprocess.check_output( # pylint: disable=W0612
+ (cfg.mktorrent_prog,
+ "-a", cfg.tracker_url,
+ "-c", "RPKI unauthenticated data snapshot generated by rpki-torrent",
+ "-o", torrent_file,
+ torrent_dir))
+
+ syslog.syslog("Generating manifest")
+ manifest = create_manifest(download_dir, z.torrent_name)
+
+ syslog.syslog("Loading %s with unlimited seeding" % torrent_file)
+ f = open(torrent_file, "rb")
+ client.add(base64.b64encode(f.read()))
+ f.close()
+ client.unlimited_seeding(z.torrent_name)
- excess_files = set(manifest_from_disk) - set(manifest_from_zip)
- for fn in excess_files:
- del manifest_from_disk[fn]
+ syslog.syslog("Creating upload connection")
+ ssh = paramiko.Transport((cfg.sftp_host, cfg.sftp_port))
+ try:
+ hostkeys = paramiko.util.load_host_keys(cfg.sftp_hostkey_file)[cfg.sftp_host]["ssh-rsa"]
+ except ConfigParser.Error:
+ hostkeys = None
+ ssh.connect(
+ username = cfg.sftp_user,
+ hostkey = hostkeys,
+ pkey = paramiko.RSAKey.from_private_key_file(cfg.sftp_private_key_file))
+ sftp = SFTPClient.from_transport(ssh)
+
+ zip_filename = os.path.join("data", os.path.basename(z.filename))
+ zip_tempname = zip_filename + ".new"
+
+ syslog.syslog("Creating %s" % zip_tempname)
+ f = sftp.open(zip_tempname, "wb")
+ z.set_output_stream(f)
+
+ syslog.syslog("Writing %s to zip" % torrent_file)
+ z.write(
+ torrent_file,
+ arcname = os.path.basename(torrent_file),
+ compress_type = zipfile.ZIP_DEFLATED)
+
+ manifest_name = z.torrent_name + ".manifest"
+
+ syslog.syslog("Writing %s to zip" % manifest_name)
+ zi = zipfile.ZipInfo(manifest_name, time.gmtime()[:6])
+ zi.external_attr = (stat.S_IFREG | 0644) << 16
+ zi.internal_attr = 1 # Text, not binary
+ z.writestr(zi,
+ "".join("%s %s\n" % (v, k) for k, v in manifest.iteritems()),
+ zipfile.ZIP_DEFLATED)
+
+ syslog.syslog("Closing %s and renaming to %s" % (zip_tempname, zip_filename))
+ z.close()
+ f.close()
+ sftp.atomic_rename(zip_tempname, zip_filename)
- if manifest_from_disk != manifest_from_zip:
- raise TorrentDoesNotMatchManifest("Manifest for torrent %s does not match what we got" %
- z.torrent_name)
+ syslog.syslog("Closing upload connection")
+ ssh.close()
- if excess_files:
- syslog.syslog("Cleaning up excess files")
- for fn in excess_files:
- os.unlink(os.path.join(download_dir, fn))
+def mirror_main():
+ client = TransmissionClient()
+ torrent_names = []
- syslog.syslog("Running rcynic")
- log_email("Starting rcynic %s" % z.url)
- subprocess.check_call((cfg.rcynic_prog,
- "-c", cfg.rcynic_conf,
- "-u", os.path.join(client.get_session().download_dir, z.torrent_name)))
- log_email("Completed rcynic %s" % z.url)
+ for zip_url in cfg.zip_urls:
+ if zip_url != cfg.generate_url:
+ z = ZipFile(url = zip_url, dn = cfg.zip_dir, ta = cfg.zip_ta)
+ if z.fetch():
+ client.remove_torrents(z.torrent_name)
+ syslog.syslog("Mirroring torrent %s" % z.torrent_name)
+ client.add(z.get_torrent())
+ torrent_names.append(z.torrent_name)
- for cmd in cfg.post_rcynic_commands:
- syslog.syslog("Running post-rcynic command: %s" % cmd)
- subprocess.check_call(cmd, shell = True)
+ if torrent_names:
+ client.unlimited_seeding(*torrent_names)
- if lock is not None:
- syslog.syslog("Releasing lock %s" % cfg.lockfile)
- os.close(lock)
-# See http://www.minstrel.org.uk/papers/sftp/ for details on how to
-# set up safe upload-only SFTP directories on the server. In
-# particular http://www.minstrel.org.uk/papers/sftp/builtin/ is likely
-# to be the right path.
+def poll_main():
+ for zip_url in cfg.zip_urls:
+ z = ZipFile(url = zip_url, dn = cfg.zip_dir, ta = cfg.zip_ta)
+ client = TransmissionClient()
-class ZipFile(object):
- """
- Augmented version of standard python zipfile.ZipFile class, with
- some extra methods and specialized capabilities.
-
- All methods of the standard zipfile.ZipFile class are supported, but
- the constructor arguments are different, and opening the zip file
- itself is deferred until a call which requires this, since the file
- may first need to be fetched via HTTPS.
- """
-
- def __init__(self, url, dn, ta = None, verbose = True):
- self.url = url
- self.dir = dn
- self.ta = ta
- self.verbose = verbose
- self.filename = os.path.join(dn, os.path.basename(url))
- self.changed = False
- self.zf = None
- self.peercert = None
- self.torrent_name, zip_ext = os.path.splitext(os.path.basename(url))
- if zip_ext != ".zip":
- raise BadFormat
-
-
- def __getattr__(self, name):
- if self.zf is None:
- self.zf = zipfile.ZipFile(self.filename)
- return getattr(self.zf, name)
-
-
- def build_opener(self):
- """
- Voodoo to create a urllib2.OpenerDirector object with TLS
- certificate checking enabled and a hook to set self.peercert so
- our caller can check the subjectAltName field.
+ if z.fetch():
+ client.remove_torrents(z.torrent_name)
+ syslog.syslog("Adding torrent %s" % z.torrent_name)
+ client.add(z.get_torrent())
- You probably don't want to look at this if you can avoid it.
- """
+ elif cfg.run_rcynic_anyway:
+ run_rcynic(client, z)
- assert self.ta is not None
- # Yes, we're constructing one-off classes. Look away, look away.
+def torrent_completion_main():
+ torrent_name = os.getenv("TR_TORRENT_NAME")
+ torrent_id = int(os.getenv("TR_TORRENT_ID"))
- class HTTPSConnection(httplib.HTTPSConnection):
- zip = self
- def connect(self):
- sock = socket.create_connection((self.host, self.port), self.timeout)
- if getattr(self, "_tunnel_host", None):
- self.sock = sock
- self._tunnel()
- self.sock = ssl.wrap_socket(sock,
- keyfile = self.key_file,
- certfile = self.cert_file,
- cert_reqs = ssl.CERT_REQUIRED,
- ssl_version = ssl.PROTOCOL_TLSv1,
- ca_certs = self.zip.ta)
- self.zip.peercert = self.sock.getpeercert()
+ z = ZipFile(url = cfg.find_url(torrent_name), dn = cfg.zip_dir, ta = cfg.zip_ta)
+ client = TransmissionClient()
+ torrent = client.info([torrent_id]).popitem()[1]
- class HTTPSHandler(urllib2.HTTPSHandler):
- def https_open(self, req):
- return self.do_open(HTTPSConnection, req)
+ if torrent.name != torrent_name:
+ raise InconsistentEnvironment("Torrent name %s does not match ID %d" % (torrent_name, torrent_id))
- return urllib2.build_opener(HTTPSHandler)
+ if z.torrent_name != torrent_name:
+ raise InconsistentEnvironment("Torrent name %s does not match torrent name in zip file %s" % (torrent_name, z.torrent_name))
+ if torrent is None or torrent.progress != 100:
+ raise TorrentNotReady("Torrent %s not ready for checking, how did I get here?" % torrent_name)
- def check_subjectAltNames(self):
- """
- Check self.peercert against URL to make sure we were talking to
- the right HTTPS server.
- """
+ log_email("Download complete %s" % z.url)
- hostname = urlparse.urlparse(self.url).hostname
- subjectAltNames = set(i[1]
- for i in self.peercert.get("subjectAltName", ())
- if i[0] == "DNS")
- if hostname not in subjectAltNames:
- raise WrongServer
+ run_rcynic(client, z)
- def download_file(self, r, bufsize = 4096):
+def run_rcynic(client, z):
"""
- Downloaded file to disk.
+ Run rcynic and any post-processing we might want.
"""
- tempname = self.filename + ".new"
- f = open(tempname, "wb")
- n = int(r.info()["Content-Length"])
- for i in xrange(0, n - bufsize, bufsize): # pylint: disable=W0612
- f.write(r.read(bufsize))
- f.write(r.read())
- f.close()
- mtime = email.utils.mktime_tz(email.utils.parsedate_tz(r.info()["Last-Modified"]))
- os.utime(tempname, (mtime, mtime))
- os.rename(tempname, self.filename)
+ if cfg.lockfile is not None:
+ syslog.syslog("Acquiring lock %s" % cfg.lockfile)
+ lock = os.open(cfg.lockfile, os.O_WRONLY | os.O_CREAT, 0600)
+ fcntl.flock(lock, fcntl.LOCK_EX)
+ else:
+ lock = None
+ syslog.syslog("Checking manifest against disk")
- def set_output_stream(self, stream):
- """
- Set up this zip file for writing to a network stream.
- """
+ download_dir = client.get_session().download_dir
- assert self.zf is None
- self.zf = zipfile.ZipFile(stream, "w")
+ manifest_from_disk = create_manifest(download_dir, z.torrent_name)
+ manifest_from_zip = z.get_manifest()
+ excess_files = set(manifest_from_disk) - set(manifest_from_zip)
+ for fn in excess_files:
+ del manifest_from_disk[fn]
- def fetch(self):
- """
- Fetch zip file from URL given to constructor.
- """
+ if manifest_from_disk != manifest_from_zip:
+ raise TorrentDoesNotMatchManifest("Manifest for torrent %s does not match what we got" %
+ z.torrent_name)
- headers = { "User-Agent" : "rpki-torrent" }
- try:
- headers["If-Modified-Since"] = email.utils.formatdate(
- os.path.getmtime(self.filename), False, True)
- except OSError:
- pass
+ if excess_files:
+ syslog.syslog("Cleaning up excess files")
+ for fn in excess_files:
+ os.unlink(os.path.join(download_dir, fn))
- syslog.syslog("Checking %s..." % self.url)
- try:
- r = self.build_opener().open(urllib2.Request(self.url, None, headers))
- syslog.syslog("%s has changed, starting download" % self.url)
- self.changed = True
- log_email("Downloading %s" % self.url)
- except urllib2.HTTPError, e:
- if e.code == 304:
- syslog.syslog("%s has not changed" % self.url)
- elif e.code == 404:
- syslog.syslog("%s does not exist" % self.url)
- else:
- raise
- r = None
-
- self.check_subjectAltNames()
+ syslog.syslog("Running rcynic")
+ log_email("Starting rcynic %s" % z.url)
+ subprocess.check_call((cfg.rcynic_prog,
+ "-c", cfg.rcynic_conf,
+ "-u", os.path.join(client.get_session().download_dir, z.torrent_name)))
+ log_email("Completed rcynic %s" % z.url)
- if r is not None and r.geturl() != self.url:
- raise UnexpectedRedirect
+ for cmd in cfg.post_rcynic_commands:
+ syslog.syslog("Running post-rcynic command: %s" % cmd)
+ subprocess.check_call(cmd, shell = True)
- if r is not None:
- self.download_file(r)
- r.close()
+ if lock is not None:
+ syslog.syslog("Releasing lock %s" % cfg.lockfile)
+ os.close(lock)
- return self.changed
+# See http://www.minstrel.org.uk/papers/sftp/ for details on how to
+# set up safe upload-only SFTP directories on the server. In
+# particular http://www.minstrel.org.uk/papers/sftp/builtin/ is likely
+# to be the right path.
- def check_format(self):
- """
- Make sure that format of zip file matches our preconceptions: it
- should contain two files, one of which is the .torrent file, the
- other is the manifest, with names derived from the torrent name
- inferred from the URL.
+class ZipFile(object):
"""
+ Augmented version of standard python zipfile.ZipFile class, with
+ some extra methods and specialized capabilities.
- if set(self.namelist()) != set((self.torrent_name + ".torrent", self.torrent_name + ".manifest")):
- raise BadFormat
-
-
- def get_torrent(self):
- """
- Extract torrent file from zip file, encoded in Base64 because
- that's what the transmisionrpc library says it wants.
+ All methods of the standard zipfile.ZipFile class are supported, but
+ the constructor arguments are different, and opening the zip file
+ itself is deferred until a call which requires this, since the file
+ may first need to be fetched via HTTPS.
"""
- self.check_format()
- return base64.b64encode(self.read(self.torrent_name + ".torrent"))
+ def __init__(self, url, dn, ta = None, verbose = True):
+ self.url = url
+ self.dir = dn
+ self.ta = ta
+ self.verbose = verbose
+ self.filename = os.path.join(dn, os.path.basename(url))
+ self.changed = False
+ self.zf = None
+ self.peercert = None
+ self.torrent_name, zip_ext = os.path.splitext(os.path.basename(url))
+ if zip_ext != ".zip":
+ raise BadFormat
+
+
+ def __getattr__(self, name):
+ if self.zf is None:
+ self.zf = zipfile.ZipFile(self.filename)
+ return getattr(self.zf, name)
+
+
+ def build_opener(self):
+ """
+ Voodoo to create a urllib2.OpenerDirector object with TLS
+ certificate checking enabled and a hook to set self.peercert so
+ our caller can check the subjectAltName field.
+
+ You probably don't want to look at this if you can avoid it.
+ """
+
+ assert self.ta is not None
+
+ # Yes, we're constructing one-off classes. Look away, look away.
+
+ class HTTPSConnection(httplib.HTTPSConnection):
+ zip = self
+ def connect(self):
+ sock = socket.create_connection((self.host, self.port), self.timeout)
+ if getattr(self, "_tunnel_host", None):
+ self.sock = sock
+ self._tunnel()
+ self.sock = ssl.wrap_socket(sock,
+ keyfile = self.key_file,
+ certfile = self.cert_file,
+ cert_reqs = ssl.CERT_REQUIRED,
+ ssl_version = ssl.PROTOCOL_TLSv1,
+ ca_certs = self.zip.ta)
+ self.zip.peercert = self.sock.getpeercert()
+
+ class HTTPSHandler(urllib2.HTTPSHandler):
+ def https_open(self, req):
+ return self.do_open(HTTPSConnection, req)
+
+ return urllib2.build_opener(HTTPSHandler)
+
+
+ def check_subjectAltNames(self):
+ """
+ Check self.peercert against URL to make sure we were talking to
+ the right HTTPS server.
+ """
+
+ hostname = urlparse.urlparse(self.url).hostname
+ subjectAltNames = set(i[1]
+ for i in self.peercert.get("subjectAltName", ())
+ if i[0] == "DNS")
+ if hostname not in subjectAltNames:
+ raise WrongServer
+
+
+ def download_file(self, r, bufsize = 4096):
+ """
+ Downloaded file to disk.
+ """
+
+ tempname = self.filename + ".new"
+ f = open(tempname, "wb")
+ n = int(r.info()["Content-Length"])
+ for i in xrange(0, n - bufsize, bufsize): # pylint: disable=W0612
+ f.write(r.read(bufsize))
+ f.write(r.read())
+ f.close()
+ mtime = email.utils.mktime_tz(email.utils.parsedate_tz(r.info()["Last-Modified"]))
+ os.utime(tempname, (mtime, mtime))
+ os.rename(tempname, self.filename)
+
+
+ def set_output_stream(self, stream):
+ """
+ Set up this zip file for writing to a network stream.
+ """
+
+ assert self.zf is None
+ self.zf = zipfile.ZipFile(stream, "w")
+
+
+ def fetch(self):
+ """
+ Fetch zip file from URL given to constructor.
+ """
+
+ headers = { "User-Agent" : "rpki-torrent" }
+ try:
+ headers["If-Modified-Since"] = email.utils.formatdate(
+ os.path.getmtime(self.filename), False, True)
+ except OSError:
+ pass
+
+ syslog.syslog("Checking %s..." % self.url)
+ try:
+ r = self.build_opener().open(urllib2.Request(self.url, None, headers))
+ syslog.syslog("%s has changed, starting download" % self.url)
+ self.changed = True
+ log_email("Downloading %s" % self.url)
+ except urllib2.HTTPError, e:
+ if e.code == 304:
+ syslog.syslog("%s has not changed" % self.url)
+ elif e.code == 404:
+ syslog.syslog("%s does not exist" % self.url)
+ else:
+ raise
+ r = None
+
+ self.check_subjectAltNames()
+
+ if r is not None and r.geturl() != self.url:
+ raise UnexpectedRedirect
+
+ if r is not None:
+ self.download_file(r)
+ r.close()
+
+ return self.changed
+
+
+ def check_format(self):
+ """
+ Make sure that format of zip file matches our preconceptions: it
+ should contain two files, one of which is the .torrent file, the
+ other is the manifest, with names derived from the torrent name
+ inferred from the URL.
+ """
+
+ if set(self.namelist()) != set((self.torrent_name + ".torrent", self.torrent_name + ".manifest")):
+ raise BadFormat
+
+
+ def get_torrent(self):
+ """
+ Extract torrent file from zip file, encoded in Base64 because
+ that's what the transmisionrpc library says it wants.
+ """
+
+ self.check_format()
+ return base64.b64encode(self.read(self.torrent_name + ".torrent"))
+
+
+ def get_manifest(self):
+ """
+ Extract manifest from zip file, as a dictionary.
+
+ For the moment we're fixing up the internal file names from the
+ format that the existing shell-script prototype uses, but this
+ should go away once this program both generates and checks the
+ manifests.
+ """
+
+ self.check_format()
+ result = {}
+ for line in self.open(self.torrent_name + ".manifest"):
+ h, fn = line.split()
+ #
+ # Fixup for earlier manifest format, this should go away
+ if not fn.startswith(self.torrent_name):
+ fn = os.path.normpath(os.path.join(self.torrent_name, fn))
+ #
+ result[fn] = h
+ return result
- def get_manifest(self):
+def create_manifest(topdir, torrent_name):
"""
- Extract manifest from zip file, as a dictionary.
-
- For the moment we're fixing up the internal file names from the
- format that the existing shell-script prototype uses, but this
- should go away once this program both generates and checks the
- manifests.
+ Generate a manifest, expressed as a dictionary.
"""
- self.check_format()
result = {}
- for line in self.open(self.torrent_name + ".manifest"):
- h, fn = line.split()
- #
- # Fixup for earlier manifest format, this should go away
- if not fn.startswith(self.torrent_name):
- fn = os.path.normpath(os.path.join(self.torrent_name, fn))
- #
- result[fn] = h
+ topdir = os.path.abspath(topdir)
+ for dirpath, dirnames, filenames in os.walk(os.path.join(topdir, torrent_name)): # pylint: disable=W0612
+ for filename in filenames:
+ filename = os.path.join(dirpath, filename)
+ f = open(filename, "rb")
+ result[os.path.relpath(filename, topdir)] = hashlib.sha256(f.read()).hexdigest()
+ f.close()
return result
-def create_manifest(topdir, torrent_name):
- """
- Generate a manifest, expressed as a dictionary.
- """
-
- result = {}
- topdir = os.path.abspath(topdir)
- for dirpath, dirnames, filenames in os.walk(os.path.join(topdir, torrent_name)): # pylint: disable=W0612
- for filename in filenames:
- filename = os.path.join(dirpath, filename)
- f = open(filename, "rb")
- result[os.path.relpath(filename, topdir)] = hashlib.sha256(f.read()).hexdigest()
- f.close()
- return result
-
-
def log_email(msg, subj = None):
- try:
- if not msg.endswith("\n"):
- msg += "\n"
- if subj is None:
- subj = msg.partition("\n")[0]
- m = email.mime.text.MIMEText(msg)
- m["Date"] = time.strftime("%d %b %Y %H:%M:%S +0000", time.gmtime())
- m["From"] = cfg.log_email
- m["To"] = cfg.log_email
- m["Subject"] = subj
- s = smtplib.SMTP("localhost")
- s.sendmail(cfg.log_email, [cfg.log_email], m.as_string())
- s.quit()
- except ConfigParser.Error:
- pass
+ try:
+ if not msg.endswith("\n"):
+ msg += "\n"
+ if subj is None:
+ subj = msg.partition("\n")[0]
+ m = email.mime.text.MIMEText(msg)
+ m["Date"] = time.strftime("%d %b %Y %H:%M:%S +0000", time.gmtime())
+ m["From"] = cfg.log_email
+ m["To"] = cfg.log_email
+ m["Subject"] = subj
+ s = smtplib.SMTP("localhost")
+ s.sendmail(cfg.log_email, [cfg.log_email], m.as_string())
+ s.quit()
+ except ConfigParser.Error:
+ pass
class TransmissionClient(transmissionrpc.client.Client):
- """
- Extension of transmissionrpc.client.Client.
- """
-
- def __init__(self, **kwargs):
- kwargs.setdefault("address", "127.0.0.1")
- kwargs.setdefault("user", cfg.transmission_username)
- kwargs.setdefault("password", cfg.transmission_password)
- transmissionrpc.client.Client.__init__(self, **kwargs)
-
-
- def find_torrents(self, *names):
- """
- Find torrents with given name(s), return id(s).
- """
-
- result = [i for i, t in self.list().iteritems() if t.name in names]
- if not result:
- raise CouldNotFindTorrents
- return result
-
-
- def remove_torrents(self, *names):
"""
- Remove any torrents with the given name(s).
+ Extension of transmissionrpc.client.Client.
"""
- try:
- ids = self.find_torrents(*names)
- except CouldNotFindTorrents:
- pass
- else:
- syslog.syslog("Removing torrent%s %s (%s)" % (
- "" if len(ids) == 1 else "s",
- ", ".join(names),
- ", ".join("#%s" % i for i in ids)))
- self.remove(ids)
+ def __init__(self, **kwargs):
+ kwargs.setdefault("address", "127.0.0.1")
+ kwargs.setdefault("user", cfg.transmission_username)
+ kwargs.setdefault("password", cfg.transmission_password)
+ transmissionrpc.client.Client.__init__(self, **kwargs)
- def unlimited_seeding(self, *names):
- """
- Set unlimited seeding for specified torrents.
- """
- # Apparently seedRatioMode = 2 means "no limit"
- try:
- self.change(self.find_torrents(*names), seedRatioMode = 2)
- except CouldNotFindTorrents:
- syslog.syslog("Couldn't tweak seedRatioMode, blundering onwards")
+ def find_torrents(self, *names):
+ """
+ Find torrents with given name(s), return id(s).
+ """
+ result = [i for i, t in self.list().iteritems() if t.name in names]
+ if not result:
+ raise CouldNotFindTorrents
+ return result
-class MyConfigParser(ConfigParser.RawConfigParser):
- rpki_torrent_section = "rpki-torrent"
+ def remove_torrents(self, *names):
+ """
+ Remove any torrents with the given name(s).
+ """
- @property
- def zip_dir(self):
- return self.get(self.rpki_torrent_section, "zip_dir")
+ try:
+ ids = self.find_torrents(*names)
+ except CouldNotFindTorrents:
+ pass
+ else:
+ syslog.syslog("Removing torrent%s %s (%s)" % (
+ "" if len(ids) == 1 else "s",
+ ", ".join(names),
+ ", ".join("#%s" % i for i in ids)))
+ self.remove(ids)
- @property
- def zip_ta(self):
- return self.get(self.rpki_torrent_section, "zip_ta")
+ def unlimited_seeding(self, *names):
+ """
+ Set unlimited seeding for specified torrents.
+ """
- @property
- def rcynic_prog(self):
- return self.get(self.rpki_torrent_section, "rcynic_prog")
+ # Apparently seedRatioMode = 2 means "no limit"
+ try:
+ self.change(self.find_torrents(*names), seedRatioMode = 2)
+ except CouldNotFindTorrents:
+ syslog.syslog("Couldn't tweak seedRatioMode, blundering onwards")
- @property
- def rcynic_conf(self):
- return self.get(self.rpki_torrent_section, "rcynic_conf")
- @property
- def run_rcynic_anyway(self):
- return self.getboolean(self.rpki_torrent_section, "run_rcynic_anyway")
-
- @property
- def generate_url(self):
- return self.get(self.rpki_torrent_section, "generate_url")
-
- @property
- def act_as_generator(self):
- try:
- return self.get(self.rpki_torrent_section, "generate_url") != ""
- except ConfigParser.Error:
- return False
-
- @property
- def rsync_prog(self):
- return self.get(self.rpki_torrent_section, "rsync_prog")
-
- @property
- def mktorrent_prog(self):
- return self.get(self.rpki_torrent_section, "mktorrent_prog")
-
- @property
- def tracker_url(self):
- return self.get(self.rpki_torrent_section, "tracker_url")
-
- @property
- def sftp_host(self):
- return self.get(self.rpki_torrent_section, "sftp_host")
-
- @property
- def sftp_port(self):
- try:
- return self.getint(self.rpki_torrent_section, "sftp_port")
- except ConfigParser.Error:
- return 22
-
- @property
- def sftp_user(self):
- return self.get(self.rpki_torrent_section, "sftp_user")
-
- @property
- def sftp_hostkey_file(self):
- return self.get(self.rpki_torrent_section, "sftp_hostkey_file")
-
- @property
- def sftp_private_key_file(self):
- return self.get(self.rpki_torrent_section, "sftp_private_key_file")
-
- @property
- def lockfile(self):
- try:
- return self.get(self.rpki_torrent_section, "lockfile")
- except ConfigParser.Error:
- return None
-
- @property
- def unauthenticated(self):
- try:
- return self.get(self.rpki_torrent_section, "unauthenticated")
- except ConfigParser.Error:
- return self.get("rcynic", "unauthenticated")
-
- @property
- def log_email(self):
- return self.get(self.rpki_torrent_section, "log_email")
-
- @property
- def transmission_username(self):
- try:
- return self.get(self.rpki_torrent_section, "transmission_username")
- except ConfigParser.Error:
- return None
+class MyConfigParser(ConfigParser.RawConfigParser):
- @property
- def transmission_password(self):
- try:
- return self.get(self.rpki_torrent_section, "transmission_password")
- except ConfigParser.Error:
- return None
-
- def multioption_iter(self, name, getter = None):
- if getter is None:
- getter = self.get
- if self.has_option(self.rpki_torrent_section, name):
- yield getter(self.rpki_torrent_section, name)
- name += "."
- names = [i for i in self.options(self.rpki_torrent_section) if i.startswith(name) and i[len(name):].isdigit()]
- names.sort(key = lambda s: int(s[len(name):])) # pylint: disable=W0631
- for name in names:
- yield getter(self.rpki_torrent_section, name)
-
- @property
- def zip_urls(self):
- return self.multioption_iter("zip_url")
-
- @property
- def post_rcynic_commands(self):
- return self.multioption_iter("post_rcynic_command")
-
- def find_url(self, torrent_name):
- urls = [u for u in self.zip_urls
- if os.path.splitext(os.path.basename(u))[0] == torrent_name]
- if len(urls) != 1:
- raise TorrentNameDoesNotMatchURL("Can't find URL matching torrent name %s" % torrent_name)
- return urls[0]
+ rpki_torrent_section = "rpki-torrent"
+
+ @property
+ def zip_dir(self):
+ return self.get(self.rpki_torrent_section, "zip_dir")
+
+ @property
+ def zip_ta(self):
+ return self.get(self.rpki_torrent_section, "zip_ta")
+
+ @property
+ def rcynic_prog(self):
+ return self.get(self.rpki_torrent_section, "rcynic_prog")
+
+ @property
+ def rcynic_conf(self):
+ return self.get(self.rpki_torrent_section, "rcynic_conf")
+
+ @property
+ def run_rcynic_anyway(self):
+ return self.getboolean(self.rpki_torrent_section, "run_rcynic_anyway")
+
+ @property
+ def generate_url(self):
+ return self.get(self.rpki_torrent_section, "generate_url")
+
+ @property
+ def act_as_generator(self):
+ try:
+ return self.get(self.rpki_torrent_section, "generate_url") != ""
+ except ConfigParser.Error:
+ return False
+
+ @property
+ def rsync_prog(self):
+ return self.get(self.rpki_torrent_section, "rsync_prog")
+
+ @property
+ def mktorrent_prog(self):
+ return self.get(self.rpki_torrent_section, "mktorrent_prog")
+
+ @property
+ def tracker_url(self):
+ return self.get(self.rpki_torrent_section, "tracker_url")
+
+ @property
+ def sftp_host(self):
+ return self.get(self.rpki_torrent_section, "sftp_host")
+
+ @property
+ def sftp_port(self):
+ try:
+ return self.getint(self.rpki_torrent_section, "sftp_port")
+ except ConfigParser.Error:
+ return 22
+
+ @property
+ def sftp_user(self):
+ return self.get(self.rpki_torrent_section, "sftp_user")
+
+ @property
+ def sftp_hostkey_file(self):
+ return self.get(self.rpki_torrent_section, "sftp_hostkey_file")
+
+ @property
+ def sftp_private_key_file(self):
+ return self.get(self.rpki_torrent_section, "sftp_private_key_file")
+
+ @property
+ def lockfile(self):
+ try:
+ return self.get(self.rpki_torrent_section, "lockfile")
+ except ConfigParser.Error:
+ return None
+
+ @property
+ def unauthenticated(self):
+ try:
+ return self.get(self.rpki_torrent_section, "unauthenticated")
+ except ConfigParser.Error:
+ return self.get("rcynic", "unauthenticated")
+
+ @property
+ def log_email(self):
+ return self.get(self.rpki_torrent_section, "log_email")
+
+ @property
+ def transmission_username(self):
+ try:
+ return self.get(self.rpki_torrent_section, "transmission_username")
+ except ConfigParser.Error:
+ return None
+
+ @property
+ def transmission_password(self):
+ try:
+ return self.get(self.rpki_torrent_section, "transmission_password")
+ except ConfigParser.Error:
+ return None
+
+ def multioption_iter(self, name, getter = None):
+ if getter is None:
+ getter = self.get
+ if self.has_option(self.rpki_torrent_section, name):
+ yield getter(self.rpki_torrent_section, name)
+ name += "."
+ names = [i for i in self.options(self.rpki_torrent_section) if i.startswith(name) and i[len(name):].isdigit()]
+ names.sort(key = lambda s: int(s[len(name):])) # pylint: disable=W0631
+ for name in names:
+ yield getter(self.rpki_torrent_section, name)
+
+ @property
+ def zip_urls(self):
+ return self.multioption_iter("zip_url")
+
+ @property
+ def post_rcynic_commands(self):
+ return self.multioption_iter("post_rcynic_command")
+
+ def find_url(self, torrent_name):
+ urls = [u for u in self.zip_urls
+ if os.path.splitext(os.path.basename(u))[0] == torrent_name]
+ if len(urls) != 1:
+ raise TorrentNameDoesNotMatchURL("Can't find URL matching torrent name %s" % torrent_name)
+ return urls[0]
if __name__ == "__main__":
- main()
+ main()
diff --git a/rp/rcynic/rules.darwin.mk b/rp/rcynic/rules.darwin.mk
index d37b0e75..f1eed3ce 100644
--- a/rp/rcynic/rules.darwin.mk
+++ b/rp/rcynic/rules.darwin.mk
@@ -1,108 +1,38 @@
# $Id$
install-user-and-group: .FORCE
- @if /usr/bin/dscl . -read "/Groups/${RCYNIC_GROUP}" >/dev/null 2>&1; \
+ @if /usr/bin/dscl . -read "/Groups/${RPKI_GROUP}" >/dev/null 2>&1; \
then \
- echo "You already have a group \"${RCYNIC_GROUP}\", so I will use it."; \
+ echo "You already have a group \"${RPKI_GROUP}\", so I will use it."; \
elif gid="$$(/usr/bin/dscl . -list /Groups PrimaryGroupID | /usr/bin/awk 'BEGIN {gid = 501} $$2 >= gid {gid = 1 + $$2} END {print gid}')" && \
- /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" && \
- /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" RealName "${RCYNIC_GECOS}" && \
- /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" PrimaryGroupID "$$gid" && \
- /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" GeneratedUID "$$(/usr/bin/uuidgen)" && \
- /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" Password "*"; \
+ /usr/bin/dscl . -create "/Groups/${RPKI_GROUP}" && \
+ /usr/bin/dscl . -create "/Groups/${RPKI_GROUP}" RealName "${RPKI_GECOS}" && \
+ /usr/bin/dscl . -create "/Groups/${RPKI_GROUP}" PrimaryGroupID "$$gid" && \
+ /usr/bin/dscl . -create "/Groups/${RPKI_GROUP}" GeneratedUID "$$(/usr/bin/uuidgen)" && \
+ /usr/bin/dscl . -create "/Groups/${RPKI_GROUP}" Password "*"; \
then \
- echo "Added group \"${RCYNIC_GROUP}\"."; \
+ echo "Added group \"${RPKI_GROUP}\"."; \
else \
- echo "Adding group \"${RCYNIC_GROUP}\" failed..."; \
+ echo "Adding group \"${RPKI_GROUP}\" failed..."; \
echo "Please create it, then try again."; \
exit 1; \
fi; \
- if /usr/bin/dscl . -read "/Users/${RCYNIC_USER}" >/dev/null 2>&1; \
+ if /usr/bin/dscl . -read "/Users/${RPKI_USER}" >/dev/null 2>&1; \
then \
- echo "You already have a user \"${RCYNIC_USER}\", so I will use it."; \
+ echo "You already have a user \"${RPKI_USER}\", so I will use it."; \
elif uid="$$(/usr/bin/dscl . -list /Users UniqueID | /usr/bin/awk 'BEGIN {uid = 501} $$2 >= uid {uid = 1 + $$2} END {print uid}')" && \
- /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" && \
- /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" UserShell "/usr/bin/false" && \
- /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" RealName "${RCYNIC_GECOS}" && \
- /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" UniqueID "$$uid" && \
- /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" PrimaryGroupID "$$gid" && \
- /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" NFSHomeDirectory "/var/empty" && \
- /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" GeneratedUID "$$(/usr/bin/uuidgen)" && \
- /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" Password "*"; \
- then \
- echo "Added user \"${RCYNIC_USER}\"."; \
- else \
- echo "Adding user \"${RCYNIC_USER}\" failed..."; \
- echo "Please create it, then try again."; \
- exit 1; \
- fi
- @if /usr/bin/dscl . -read "/Groups/${RPKIRTR_GROUP}" >/dev/null 2>&1; \
- then \
- echo "You already have a group \"${RPKIRTR_GROUP}\", so I will use it."; \
- elif gid="$$(/usr/bin/dscl . -list /Groups PrimaryGroupID | /usr/bin/awk 'BEGIN {gid = 501} $$2 >= gid {gid = 1 + $$2} END {print gid}')" && \
- /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" && \
- /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" RealName "${RPKIRTR_GECOS}" && \
- /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" PrimaryGroupID "$$gid" && \
- /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" GeneratedUID "$$(/usr/bin/uuidgen)" && \
- /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" Password "*"; \
- then \
- echo "Added group \"${RPKIRTR_GROUP}\"."; \
- else \
- echo "Adding group \"${RPKIRTR_GROUP}\" failed..."; \
- echo "Please create it, then try again."; \
- exit 1; \
- fi; \
- if /usr/bin/dscl . -read "/Users/${RPKIRTR_USER}" >/dev/null 2>&1; \
- then \
- echo "You already have a user \"${RPKIRTR_USER}\", so I will use it."; \
- elif uid="$$(/usr/bin/dscl . -list /Users UniqueID | /usr/bin/awk 'BEGIN {uid = 501} $$2 >= uid {uid = 1 + $$2} END {print uid}')" && \
- /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" && \
- /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" UserShell "/usr/bin/false" && \
- /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" RealName "${RPKIRTR_GECOS}" && \
- /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" UniqueID "$$uid" && \
- /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" PrimaryGroupID "$$gid" && \
- /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" NFSHomeDirectory "/var/empty" && \
- /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" GeneratedUID "$$(/usr/bin/uuidgen)" && \
- /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" Password "*"; \
- then \
- echo "Added user \"${RPKIRTR_USER}\"."; \
+ /usr/bin/dscl . -create "/Users/${RPKI_USER}" && \
+ /usr/bin/dscl . -create "/Users/${RPKI_USER}" UserShell "/usr/bin/false" && \
+ /usr/bin/dscl . -create "/Users/${RPKI_USER}" RealName "${RPKI_GECOS}" && \
+ /usr/bin/dscl . -create "/Users/${RPKI_USER}" UniqueID "$$uid" && \
+ /usr/bin/dscl . -create "/Users/${RPKI_USER}" PrimaryGroupID "$$gid" && \
+ /usr/bin/dscl . -create "/Users/${RPKI_USER}" NFSHomeDirectory "/var/empty" && \
+ /usr/bin/dscl . -create "/Users/${RPKI_USER}" GeneratedUID "$$(/usr/bin/uuidgen)" && \
+ /usr/bin/dscl . -create "/Users/${RPKI_USER}" Password "*"; \
+ then \
+ echo "Added user \"${RPKI_USER}\"."; \
else \
- echo "Adding user \"${RPKIRTR_USER}\" failed..."; \
+ echo "Adding user \"${RPKI_USER}\" failed..."; \
echo "Please create it, then try again."; \
exit 1; \
fi
-
-
-install-shared-libraries: .FORCE
- @echo "Copying required shared libraries"
- @shared_libraries="${RCYNIC_DIR}/bin/rcynic ${RCYNIC_DIR}/bin/rsync"; \
- while true; \
- do \
- closure="$$(/usr/bin/otool -L $${shared_libraries} | /usr/bin/awk '/:$$/ {next} {print $$1}' | /usr/bin/sort -u)"; \
- if test "x$$shared_libraries" = "x$$closure";
- then \
- break; \
- else \
- shared_libraries="$$closure"; \
- fi; \
- done; \
- for shared in /usr/lib/dyld $$shared_libraries; \
- do \
- if /bin/test -r "${RCYNIC_DIR}/$${shared}"; \
- then \
- echo "You already have a \"${RCYNIC_DIR}/$${shared}\", so I will use it"; \
- elif /usr/bin/install -m 555 -o root -g wheel -p "$${shared}" "${RCYNIC_DIR}/$${shared}"; \
- then \
- echo "Copied $${shared} into ${RCYNIC_DIR}"; \
- else \
- echo "Unable to copy $${shared} into ${RCYNIC_DIR}"; \
- exit 1; \
- fi; \
- done
-
-install-rc-scripts:
- ${INSTALL} -o root -g wheel -d ${DESTDIR}/Library/StartupItems/RCynic
- ${INSTALL} -o root -g wheel -m 555 \
- rc-scripts/darwin/RCynic \
- rc-scripts/darwin/StartupParameters.plist \
- ${DESTDIR}/Library/Startup/RCynic
diff --git a/rp/rcynic/rules.freebsd.mk b/rp/rcynic/rules.freebsd.mk
index 5233386e..0f022a2e 100644
--- a/rp/rcynic/rules.freebsd.mk
+++ b/rp/rcynic/rules.freebsd.mk
@@ -1,56 +1,25 @@
# $Id$
install-user-and-group: .FORCE
- @if /usr/sbin/pw groupshow "${RCYNIC_GROUP}" 2>/dev/null; \
+ @if /usr/sbin/pw groupshow "${RPKI_GROUP}" 2>/dev/null; \
then \
- echo "You already have a group \"${RCYNIC_GROUP}\", so I will use it."; \
- elif /usr/sbin/pw groupadd ${RCYNIC_GROUP}; \
+ echo "You already have a group \"${RPKI_GROUP}\", so I will use it."; \
+ elif /usr/sbin/pw groupadd ${RPKI_GROUP}; \
then \
- echo "Added group \"${RCYNIC_GROUP}\"."; \
+ echo "Added group \"${RPKI_GROUP}\"."; \
else \
- echo "Adding group \"${RCYNIC_GROUP}\" failed..."; \
+ echo "Adding group \"${RPKI_GROUP}\" failed..."; \
echo "Please create it, then try again."; \
exit 1; \
fi
- @if /usr/sbin/pw groupshow "${RPKIRTR_GROUP}" 2>/dev/null; \
+ @if /usr/sbin/pw usershow "${RPKI_USER}" 2>/dev/null; \
then \
- echo "You already have a group \"${RPKIRTR_GROUP}\", so I will use it."; \
- elif /usr/sbin/pw groupadd ${RPKIRTR_GROUP}; \
+ echo "You already have a user \"${RPKI_USER}\", so I will use it."; \
+ elif /usr/sbin/pw useradd ${RPKI_USER} -g ${RPKI_GROUP} -h - -d /nonexistant -s /usr/sbin/nologin -c "${RPKI_GECOS}"; \
then \
- echo "Added group \"${RPKIRTR_GROUP}\"."; \
+ echo "Added user \"${RPKI_USER}\"."; \
else \
- echo "Adding group \"${RPKIRTR_GROUP}\" failed..."; \
+ echo "Adding user \"${RPKI_USER}\" failed..."; \
echo "Please create it, then try again."; \
exit 1; \
fi
- @if /usr/sbin/pw usershow "${RCYNIC_USER}" 2>/dev/null; \
- then \
- echo "You already have a user \"${RCYNIC_USER}\", so I will use it."; \
- elif /usr/sbin/pw useradd ${RCYNIC_USER} -g ${RCYNIC_GROUP} -h - -d /nonexistant -s /usr/sbin/nologin -c "${RCYNIC_GECOS}" -G "${RPKIRTR_GROUP}"; \
- then \
- echo "Added user \"${RCYNIC_USER}\"."; \
- else \
- echo "Adding user \"${RCYNIC_USER}\" failed..."; \
- echo "Please create it, then try again."; \
- exit 1; \
- fi
- @if /usr/sbin/pw usershow "${RPKIRTR_USER}" 2>/dev/null; \
- then \
- echo "You already have a user \"${RPKIRTR_USER}\", so I will use it."; \
- elif /usr/sbin/pw useradd ${RPKIRTR_USER} -g ${RPKIRTR_GROUP} -h - -d /nonexistant -s /usr/sbin/nologin -c "${RPKIRTR_GECOS}"; \
- then \
- echo "Added user \"${RPKIRTR_USER}\"."; \
- else \
- echo "Adding user \"${RPKIRTR_USER}\" failed..."; \
- echo "Please create it, then try again."; \
- exit 1; \
- fi
-
-
-# We use static compilation on FreeBSD, so no need for shared libraries
-
-install-shared-libraries:
- @true
-
-install-rc-scripts:
- ${INSTALL} -m 555 -o root -g wheel -p rc-scripts/freebsd/rc.d.rcynic ${DESTDIR}/usr/local/etc/rc.d/rcynic
diff --git a/rp/rcynic/rules.linux.mk b/rp/rcynic/rules.linux.mk
index 6a962cef..c116f75c 100644
--- a/rp/rcynic/rules.linux.mk
+++ b/rp/rcynic/rules.linux.mk
@@ -1,92 +1,27 @@
# $Id$
install-user-and-group: .FORCE
- @if getent group ${RCYNIC_GROUP} >/dev/null; \
+ @if getent group ${RPKI_GROUP} >/dev/null; \
then \
- echo "You already have a group \"${RCYNIC_GROUP}\", so I will use it."; \
- elif /usr/sbin/groupadd ${RCYNIC_GROUP}; \
+ echo "You already have a group \"${RPKI_GROUP}\", so I will use it."; \
+ elif /usr/sbin/groupadd ${RPKI_GROUP}; \
then \
- echo "Added group \"${RCYNIC_GROUP}\"."; \
+ echo "Added group \"${RPKI_GROUP}\"."; \
else \
- echo "Adding group \"${RCYNIC_GROUP}\" failed..."; \
+ echo "Adding group \"${RPKI_GROUP}\" failed..."; \
echo "Please create it, then try again."; \
exit 1; \
fi
@nogroup='-N'; \
if test -f /etc/redhat-release; then read vendor release version < /etc/redhat-release; if test $$vendor = CentOS; then nogroup='-n'; fi; fi; \
- if getent passwd ${RCYNIC_USER} >/dev/null; \
+ if getent passwd ${RPKI_USER} >/dev/null; \
then \
- echo "You already have a user \"${RCYNIC_USER}\", so I will use it."; \
- elif /usr/sbin/useradd -g ${RCYNIC_GROUP} -M $$nogroup -d "${RCYNIC_DIR}" -s /sbin/nologin -c "${RCYNIC_GECOS}" ${RCYNIC_USER}; \
+ echo "You already have a user \"${RPKI_USER}\", so I will use it."; \
+ elif /usr/sbin/useradd -g ${RPKI_GROUP} -M $$nogroup -d "${DESTDIR}${RCYNIC_DIR}" -s /sbin/nologin -c "${RPKI_GECOS}" ${RPKI_USER}; \
then \
- echo "Added user \"${RCYNIC_USER}\"."; \
+ echo "Added user \"${RPKI_USER}\"."; \
else \
- echo "Adding user \"${RCYNIC_USER}\" failed..."; \
+ echo "Adding user \"${RPKI_USER}\" failed..."; \
echo "Please create it, then try again."; \
exit 1; \
fi
- @if getent group ${RPKIRTR_GROUP} >/dev/null; \
- then \
- echo "You already have a group \"${RPKIRTR_GROUP}\", so I will use it."; \
- elif /usr/sbin/groupadd ${RPKIRTR_GROUP}; \
- then \
- echo "Added group \"${RPKIRTR_GROUP}\"."; \
- else \
- echo "Adding group \"${RPKIRTR_GROUP}\" failed..."; \
- echo "Please create it, then try again."; \
- exit 1; \
- fi
- @nogroup='-N'; \
- if test -f /etc/redhat-release; then read vendor release version < /etc/redhat-release; if test $$vendor = CentOS; then nogroup='-n'; fi; fi; \
- if getent passwd ${RPKIRTR_USER} >/dev/null; \
- then \
- echo "You already have a user \"${RPKIRTR_USER}\", so I will use it."; \
- elif /usr/sbin/useradd -g ${RPKIRTR_GROUP} -M $$nogroup -d "${RPKIRTR_DIR}" -s /sbin/nologin -c "${RPKIRTR_GECOS}" ${RPKIRTR_USER}; \
- then \
- echo "Added user \"${RPKIRTR_USER}\"."; \
- else \
- echo "Adding user \"${RPKIRTR_USER}\" failed..."; \
- echo "Please create it, then try again."; \
- exit 1; \
- fi
- usermod -a -G ${RPKIRTR_GROUP} ${RCYNIC_USER}
-
-install-shared-libraries: .FORCE
- @echo "Copying required shared libraries"
- @if test -d /lib64; then libdir=/lib64; else libdir=/lib; fi; \
- shared_libraries="${RCYNIC_DIR}/bin/rcynic ${RCYNIC_DIR}/bin/rsync $$(/usr/bin/find $${libdir} -name 'libnss*.so*' -print)"; \
- while true; \
- do \
- closure="$$(/usr/bin/ldd $${shared_libraries} | \
- ${AWK} ' \
- { sub(/:$/, "") } \
- $$0 == "${RCYNIC_DIR}/bin/rcynic" { next } \
- $$0 == "${RCYNIC_DIR}/bin/rsync" { next } \
- $$1 ~ /\/ld-linux\.so/ { next } \
- { for (i = 1; i <= NF; i++) if ($$i ~ /^\//) print $$i } \
- ' | \
- ${SORT} -u)"; \
- if test "X$$shared_libraries" = "X$$closure"; \
- then \
- break; \
- else \
- shared_libraries="$$closure"; \
- fi; \
- done; \
- if test -f $${libdir}/libresolv.so.2; \
- then \
- shared_libraries="$${shared_libraries} $${libdir}/libresolv.so.2";
- fi; \
- for shared in $${libdir}/*ld*.so* $$shared_libraries; \
- do \
- if test ! -r "${RCYNIC_DIR}/$${shared}"; \
- then \
- ${INSTALL} -m 555 -d `dirname "${RCYNIC_DIR}$${shared}"` && \
- ${INSTALL} -m 555 -p "$${shared}" "${RCYNIC_DIR}$${shared}"; \
- fi; \
- done
-
-# No devfs, so no rc script
-
-install-rc-scripts:
- @true
diff --git a/rp/rcynic/rules.unknown.mk b/rp/rcynic/rules.unknown.mk
index 6ce3ea18..03cbd858 100644
--- a/rp/rcynic/rules.unknown.mk
+++ b/rp/rcynic/rules.unknown.mk
@@ -1,4 +1,4 @@
# $Id$
-install-user-and-group install-shared-libraries install-rc-scripts: .FORCE
+install-user-and-group: .FORCE
@echo "Don't know how to make $@ on this platform"; exit 1
diff --git a/rp/rcynic/sample-trust-anchors/apnic-testbed.tal b/rp/rcynic/sample-trust-anchors/apnic-testbed.tal.disabled
index f87a3bf3..f87a3bf3 100644
--- a/rp/rcynic/sample-trust-anchors/apnic-testbed.tal
+++ b/rp/rcynic/sample-trust-anchors/apnic-testbed.tal.disabled
diff --git a/rp/rcynic/sample-trust-anchors/rpki.net-testbed.tal b/rp/rcynic/sample-trust-anchors/rpki.net-testbed.tal.disabled
index 1e466300..1e466300 100644
--- a/rp/rcynic/sample-trust-anchors/rpki.net-testbed.tal
+++ b/rp/rcynic/sample-trust-anchors/rpki.net-testbed.tal.disabled
diff --git a/rp/rcynic/static-rsync/Makefile.in b/rp/rcynic/static-rsync/Makefile.in
deleted file mode 100644
index 8a433c7b..00000000
--- a/rp/rcynic/static-rsync/Makefile.in
+++ /dev/null
@@ -1,44 +0,0 @@
-# $Id$
-
-VERSION = 2.6.9
-
-CFG_ENV = CFLAGS='@CFLAGS@' LDFLAGS='@LDFLAGS@ @LD_STATIC_FLAG@'
-CFG_ARG =
-
-TARBALL = rsync-${VERSION}.tar.gz
-DIRNAME = rsync-${VERSION}
-
-CFG_LOG = > ../config.log 2>&1
-BIN_LOG = > ../build.log 2>&1
-
-BIN = rsync
-
-abs_top_srcdir = @abs_top_srcdir@
-abs_top_builddir = @abs_top_builddir@
-
-all: ${BIN}
-
-${BIN}: ${DIRNAME}/${BIN}
- ln ${DIRNAME}/${BIN} $@
- file $@
-
-${DIRNAME}/${BIN}: configured.stamp
- cd ${DIRNAME} && ${MAKE} ${BIN_LOG}
-
-extracted.stamp: ${TARBALL}
- gzip -c -d ${TARBALL} | tar -xf -
- touch $@
-
-patched.stamp: extracted.stamp
- for i in patches/patch-*; do if test -f "$$i"; then patch -d ${DIRNAME} <"$$i"; else :; fi; done
- touch $@
-
-configured.stamp: patched.stamp
- cd ${DIRNAME} && ${CFG_ENV} ./configure ${CFG_ARG} ${CFG_LOG}
- touch $@
-
-clean:
- rm -rf ${BIN} ${DIRNAME} *.stamp *.log
-
-distclean: clean
- rm -f Makefile
diff --git a/rp/rcynic/static-rsync/README b/rp/rcynic/static-rsync/README
deleted file mode 100644
index 9ff5afa8..00000000
--- a/rp/rcynic/static-rsync/README
+++ /dev/null
@@ -1,15 +0,0 @@
-$Id$
-
-Hack to build a static rsync binary suitable for use in a chroot jail.
-
-The default configuration is for gcc, since that's the most widely
-used compiler on the platforms we use. I've provided hooks intended
-to make it simple to support other compilers just by overriding make
-variables on the command line: if you need to do something more
-drastic than this to get your compiler working, please tell me.
-
-If your platform doesn't support static binaries at all, you're on
-your own (and should whine at your OS vendor, as this is nuts).
-
-We try to stick with rsync release code, but apply security patches
-when necessary.
diff --git a/rp/rcynic/static-rsync/patches/patch-CVE-2007-4091 b/rp/rcynic/static-rsync/patches/patch-CVE-2007-4091
deleted file mode 100644
index 201af96a..00000000
--- a/rp/rcynic/static-rsync/patches/patch-CVE-2007-4091
+++ /dev/null
@@ -1,60 +0,0 @@
---- sender.c 2006-09-20 03:53:32.000000000 +0200
-+++ sender.c 2007-07-25 15:33:05.000000000 +0200
-@@ -123,6 +123,7 @@
- char fname[MAXPATHLEN];
- struct file_struct *file;
- unsigned int offset;
-+ size_t l = 0;
-
- if (ndx < 0 || ndx >= the_file_list->count)
- return;
-@@ -133,6 +134,20 @@
- file->dir.root, "/", NULL);
- } else
- offset = 0;
-+
-+ l = offset + 1;
-+ if (file) {
-+ if (file->dirname)
-+ l += strlen(file->dirname);
-+ if (file->basename)
-+ l += strlen(file->basename);
-+ }
-+
-+ if (l >= sizeof(fname)) {
-+ rprintf(FERROR, "Overlong pathname\n");
-+ exit_cleanup(RERR_FILESELECT);
-+ }
-+
- f_name(file, fname + offset);
- if (remove_source_files) {
- if (do_unlink(fname) == 0) {
-@@ -224,6 +239,7 @@
- enum logcode log_code = log_before_transfer ? FLOG : FINFO;
- int f_xfer = write_batch < 0 ? batch_fd : f_out;
- int i, j;
-+ size_t l = 0;
-
- if (verbose > 2)
- rprintf(FINFO, "send_files starting\n");
-@@ -259,6 +275,20 @@
- fname[offset++] = '/';
- } else
- offset = 0;
-+
-+ l = offset + 1;
-+ if (file) {
-+ if (file->dirname)
-+ l += strlen(file->dirname);
-+ if (file->basename)
-+ l += strlen(file->basename);
-+ }
-+
-+ if (l >= sizeof(fname)) {
-+ rprintf(FERROR, "Overlong pathname\n");
-+ exit_cleanup(RERR_FILESELECT);
-+ }
-+
- fname2 = f_name(file, fname + offset);
-
- if (verbose > 2)
diff --git a/rp/rcynic/static-rsync/rsync-2.6.9.tar.gz b/rp/rcynic/static-rsync/rsync-2.6.9.tar.gz
deleted file mode 100644
index 6377f639..00000000
--- a/rp/rcynic/static-rsync/rsync-2.6.9.tar.gz
+++ /dev/null
Binary files differ
diff --git a/rp/rcynic/validation_status b/rp/rcynic/validation_status
index a3ee36f1..d8e2c8ae 100755
--- a/rp/rcynic/validation_status
+++ b/rp/rcynic/validation_status
@@ -23,14 +23,13 @@ Flat text listing of <validation_status/> elements from rcynic.xml.
import sys
try:
- from lxml.etree import ElementTree
+ from lxml.etree import ElementTree
except ImportError:
- from xml.etree.ElementTree import ElementTree
+ from xml.etree.ElementTree import ElementTree
for filename in ([sys.stdin] if len(sys.argv) < 2 else sys.argv[1:]):
- for elt in ElementTree(file = filename).findall("validation_status"):
- print "%s %8s %-40s %s" % (
- elt.get("timestamp"),
- elt.get("generation"),
- elt.get("status"),
- elt.text.strip())
+ for elt in ElementTree(file = filename).findall("validation_status"):
+ print "%s %-40s %s" % (
+ elt.get("timestamp"),
+ elt.get("status"),
+ elt.text.strip())
diff --git a/rp/rpki-rtr/rpki-rtr b/rp/rpki-rtr/rpki-rtr
index 5ad4cf26..7f3e6b4f 100755
--- a/rp/rpki-rtr/rpki-rtr
+++ b/rp/rpki-rtr/rpki-rtr
@@ -19,5 +19,5 @@
# PERFORMANCE OF THIS SOFTWARE.
if __name__ == "__main__":
- from rpki.rtr.main import main
- main()
+ from rpki.rtr.main import main
+ main()
diff --git a/rp/rpki-rtr/rules.freebsd.mk b/rp/rpki-rtr/rules.freebsd.mk
index f4d214a3..0f1546b2 100644
--- a/rp/rpki-rtr/rules.freebsd.mk
+++ b/rp/rpki-rtr/rules.freebsd.mk
@@ -18,7 +18,7 @@ install-listener: .FORCE
@if /usr/bin/egrep -q "rpki-rtr[ ]+stream[ ]+tcp[ ]" /etc/inetd.conf; \
then \
echo "You already have an inetd.conf entry for rpki-rtr on TCPv4, so I will use it."; \
- elif echo >>/etc/inetd.conf "rpki-rtr stream tcp nowait rpkirtr /usr/local/bin/rpki-rtr rpki-rtr server /var/rcynic/rpki-rtr"; \
+ elif echo >>/etc/inetd.conf "rpki-rtr stream tcp nowait rpki /usr/local/bin/rpki-rtr rpki-rtr server /var/rcynic/rpki-rtr"; \
then \
echo "Added rpki-rtr for TCPv4 to /etc/inetd.conf."; \
else \
@@ -28,7 +28,7 @@ install-listener: .FORCE
@if /usr/bin/egrep -q "rpki-rtr[ ]+stream[ ]+tcp6[ ]" /etc/inetd.conf; \
then \
echo "You already have an inetd.conf entry for rpki-rtr on TCPv6, so I will use it."; \
- elif echo >>/etc/inetd.conf "rpki-rtr stream tcp6 nowait rpkirtr /usr/local/bin/rpki-rtr rpki-rtr server /var/rcynic/rpki-rtr"; \
+ elif echo >>/etc/inetd.conf "rpki-rtr stream tcp6 nowait rpki /usr/local/bin/rpki-rtr rpki-rtr server /var/rcynic/rpki-rtr"; \
then \
echo "Added rpki-rtr for TCPv6 to /etc/inetd.conf."; \
else \
diff --git a/rp/rpki-rtr/rules.linux.mk b/rp/rpki-rtr/rules.linux.mk
index d9b21590..bad35ace 100644
--- a/rp/rpki-rtr/rules.linux.mk
+++ b/rp/rpki-rtr/rules.linux.mk
@@ -19,7 +19,7 @@ ${DESTDIR}/etc/xinetd.d/rpki-rtr:
print " protocol = tcp"; \
print " port = ${RPKI_RTR_PORT}"; \
print " wait = no"; \
- print " user = rpkirtr"; \
+ print " user = rpki"; \
print " server = ${bindir}/${BIN}"; \
print " server_args = server /var/rcynic/rpki-rtr"; \
print "}"; \
diff --git a/rp/utils/find_roa b/rp/utils/find_roa
index 4cfcccac..9a387c6a 100755
--- a/rp/utils/find_roa
+++ b/rp/utils/find_roa
@@ -25,134 +25,137 @@ import os
import argparse
import rpki.POW
import rpki.oids
+import rpki.config
def check_dir(s):
- if os.path.isdir(s):
- return os.path.abspath(s)
- else:
- raise argparse.ArgumentTypeError("%r is not a directory" % s)
+ if os.path.isdir(s):
+ return os.path.abspath(s)
+ else:
+ raise argparse.ArgumentTypeError("%r is not a directory" % s)
def filename_to_uri(filename):
- if not filename.startswith(args.rcynic_dir):
- raise ValueError
- return "rsync://" + filename[len(args.rcynic_dir):].lstrip("/")
+ if not filename.startswith(args.rcynic_dir):
+ raise ValueError
+ return "rsync://" + filename[len(args.rcynic_dir):].lstrip("/")
def uri_to_filename(uri):
- if not uri.startswith("rsync://"):
- raise ValueError
- return os.path.join(args.rcynic_dir, uri[len("rsync://"):])
+ if not uri.startswith("rsync://"):
+ raise ValueError
+ return os.path.join(args.rcynic_dir, uri[len("rsync://"):])
class Prefix(object):
- """
- One prefix parsed from the command line.
- """
-
- def __init__(self, val):
- addr, length = val.split("/")
- length, sep, maxlength = length.partition("-") # pylint: disable=W0612
- self.prefix = rpki.POW.IPAddress(addr)
- self.length = int(length)
- self.maxlength = int(maxlength) if maxlength else self.length
- if self.maxlength < self.length or self.length < 0 or self.length > self.prefix.bits:
- raise ValueError
- if self.prefix & ((1 << (self.prefix.bits - self.length)) - 1) != 0:
- raise ValueError
-
- def matches(self, roa):
- return any(self.prefix == prefix and
- self.length == length and
- (not args.match_maxlength or
- self.maxlength == maxlength or
- (maxlength is None and
- self.length == self.maxlength))
- for prefix, length, maxlength in roa.prefixes)
-
-
-class ROA(rpki.POW.ROA):
- """
- Aspects of a ROA that we care about.
- """
-
- @classmethod
- def parse(cls, fn):
- assert fn.startswith(args.rcynic_dir)
- self = cls.derReadFile(fn)
- self.fn = fn
- self.extractWithoutVerifying()
- v4, v6 = self.getPrefixes()
- self.prefixes = (v4 or ()) + (v6 or ())
- return self
-
- @property
- def uri(self):
- return filename_to_uri(self.fn)
-
- @property
- def formatted_prefixes(self):
- for prefix in self.prefixes:
- if prefix[2] is None or prefix[1] == prefix[2]:
- yield "%s/%d" % (prefix[0], prefix[1])
- else:
- yield "%s/%d-%d" % (prefix[0], prefix[1], prefix[2])
-
- def __str__(self):
- prefixes = " ".join(self.formatted_prefixes)
- plural = "es" if " " in prefixes else ""
- if args.show_inception:
- return "signingTime %s ASN %s prefix%s %s" % (self.signingTime(), self.getASID(), plural, prefixes)
- else:
- return "ASN %s prefix%s %s" % (self.getASID(), plural, prefixes)
-
- def show(self):
- print "%s %s" % (self, self.fn if args.show_filenames else self.uri)
-
- def show_expiration(self):
- print self
- x = self.certs()[0]
- fn = self.fn
- uri = self.uri
- while uri is not None:
- name = fn if args.show_filenames else uri
- if args.show_inception:
- print "notBefore", x.getNotBefore(), "notAfter", x.getNotAfter(), name
- else:
- print x.getNotAfter(), name
- for uri in x.getAIA() or ():
- if uri.startswith("rsync://"):
- break
- else:
- break
- fn = uri_to_filename(uri)
- if not os.path.exists(fn):
- print "***** MISSING ******", uri
- break
- x = rpki.POW.X509.derReadFile(fn)
- print
-
-
-parser = argparse.ArgumentParser(description = __doc__)
-parser.add_argument("-a", "--all", action = "store_true", help = "show all ROAs, do no prefix matching at all")
-parser.add_argument("-m", "--match-maxlength", action = "store_true", help = "pay attention to maxLength values")
-parser.add_argument("-e", "--show-expiration", action = "store_true", help = "show ROA chain expiration dates")
-parser.add_argument("-f", "--show-filenames", action = "store_true", help = "show filenames instead of URIs")
-parser.add_argument("-i", "--show-inception", action = "store_true", help = "show inception dates")
-parser.add_argument("rcynic_dir", type = check_dir, help = "rcynic authenticated output directory")
-parser.add_argument("prefixes", type = Prefix, nargs = "*", help = "ROA prefix(es) to match")
-args = parser.parse_args()
+ """
+ One prefix parsed from the command line.
+ """
+
+ def __init__(self, val):
+ addr, length = val.split("/")
+ length, sep, maxlength = length.partition("-") # pylint: disable=W0612
+ self.prefix = rpki.POW.IPAddress(addr)
+ self.length = int(length)
+ self.maxlength = int(maxlength) if maxlength else self.length
+ if self.maxlength < self.length or self.length < 0 or self.length > self.prefix.bits:
+ raise ValueError
+ if self.prefix & ((1 << (self.prefix.bits - self.length)) - 1) != 0:
+ raise ValueError
+
+ def matches(self, roa): # pylint: disable=W0621
+ return any(self.prefix == prefix and
+ self.length == length and
+ (not args.match_maxlength or
+ self.maxlength == maxlength or
+ (maxlength is None and
+ self.length == self.maxlength))
+ for prefix, length, maxlength in roa.prefixes)
+
+
+class ROA(rpki.POW.ROA): # pylint: disable=W0232
+ """
+ Aspects of a ROA that we care about.
+ """
+
+ @classmethod
+ def parse(cls, fn): # pylint: disable=W0621
+ assert fn.startswith(args.rcynic_dir)
+ self = cls.derReadFile(fn) # pylint: disable=E1101
+ self.fn = fn
+ self.extractWithoutVerifying()
+ v4, v6 = self.getPrefixes()
+ self.prefixes = (v4 or ()) + (v6 or ())
+ return self
+
+ @property
+ def uri(self):
+ return filename_to_uri(self.fn) # pylint: disable=E1101
+
+ @property
+ def formatted_prefixes(self):
+ for prefix in self.prefixes: # pylint: disable=E1101
+ if prefix[2] is None or prefix[1] == prefix[2]:
+ yield "%s/%d" % (prefix[0], prefix[1])
+ else:
+ yield "%s/%d-%d" % (prefix[0], prefix[1], prefix[2])
+
+ def __str__(self):
+ # pylint: disable=E1101
+ prefixes = " ".join(self.formatted_prefixes)
+ plural = "es" if " " in prefixes else ""
+ if args.show_inception:
+ return "signingTime %s ASN %s prefix%s %s" % (self.signingTime(), self.getASID(), plural, prefixes)
+ else:
+ return "ASN %s prefix%s %s" % (self.getASID(), plural, prefixes)
+
+ def show(self):
+ # pylint: disable=E1101
+ print "%s %s" % (self, self.fn if args.show_filenames else self.uri)
+
+ def show_expiration(self):
+ print self
+ x = self.certs()[0] # pylint: disable=E1101
+ fn = self.fn # pylint: disable=E1101,W0621
+ uri = self.uri
+ while uri is not None:
+ name = fn if args.show_filenames else uri
+ if args.show_inception:
+ print "notBefore", x.getNotBefore(), "notAfter", x.getNotAfter(), name
+ else:
+ print x.getNotAfter(), name
+ for uri in x.getAIA() or ():
+ if uri.startswith("rsync://"):
+ break
+ else:
+ break
+ fn = uri_to_filename(uri)
+ if not os.path.exists(fn):
+ print "***** MISSING ******", uri
+ break
+ x = rpki.POW.X509.derReadFile(fn)
+ print
+
+
+cfg = rpki.config.argparser(doc = __doc__)
+cfg.argparser.add_argument("-a", "--all", action = "store_true", help = "show all ROAs, do no prefix matching at all")
+cfg.argparser.add_argument("-m", "--match-maxlength", action = "store_true", help = "pay attention to maxLength values")
+cfg.argparser.add_argument("-e", "--show-expiration", action = "store_true", help = "show ROA chain expiration dates")
+cfg.argparser.add_argument("-f", "--show-filenames", action = "store_true", help = "show filenames instead of URIs")
+cfg.argparser.add_argument("-i", "--show-inception", action = "store_true", help = "show inception dates")
+cfg.argparser.add_argument("rcynic_dir", type = check_dir, help = "rcynic authenticated output directory")
+cfg.argparser.add_argument("prefixes", type = Prefix, nargs = "*", help = "ROA prefix(es) to match")
+args = cfg.argparser.parse_args()
# If there's some way to automate this in the parser, I don't know what it is, so just catch it here.
if args.all != (not args.prefixes):
- parser.error("--all and prefix list are mutually exclusive")
+ parser.error("--all and prefix list are mutually exclusive")
for root, dirs, files in os.walk(args.rcynic_dir):
- for fn in files:
- if fn.endswith(".roa"):
- roa = ROA.parse(os.path.join(root, fn))
- if args.all or any(prefix.matches(roa) for prefix in args.prefixes):
- if args.show_expiration:
- roa.show_expiration()
- else:
- roa.show()
+ for fn in files:
+ if fn.endswith(".roa"):
+ roa = ROA.parse(os.path.join(root, fn))
+ if args.all or any(prefix.matches(roa) for prefix in args.prefixes):
+ if args.show_expiration:
+ roa.show_expiration()
+ else:
+ roa.show()
diff --git a/rp/utils/hashdir b/rp/utils/hashdir
index d3fe393c..c2c100b8 100755
--- a/rp/utils/hashdir
+++ b/rp/utils/hashdir
@@ -26,42 +26,40 @@ distributed as part of the repository system.
import os
import sys
-import argparse
-import rpki.POW
+import rpki.config
+
+from rpki.rcynicdb.iterator import authenticated_objects
def check_dir(s):
- if os.path.isdir(s):
- return os.path.abspath(s)
- else:
- raise argparse.ArgumentTypeError("%r is not a directory" % s)
+ if os.path.isdir(s):
+ return os.path.abspath(s)
+ else:
+ raise argparse.ArgumentTypeError("{!r} is not a directory".format(s))
-parser = argparse.ArgumentParser(description = __doc__)
-parser.add_argument("-v", "--verbose", action = "store_true", help = "whistle while you work")
-parser.add_argument("rcynic_dir", type = check_dir, help = "rcynic authenticated output directory")
-parser.add_argument("output_dir", help = "name of output directory to create")
-args = parser.parse_args()
+cfg = rpki.config.argparser(doc = __doc__)
+cfg.argparser.add_argument("-v", "--verbose", action = "store_true", help = "whistle while you work")
+cfg.argparser.add_argument("rcynic_dir", nargs = "?", type = check_dir, help = "rcynic authenticated output directory")
+cfg.argparser.add_argument("output_dir", help = "name of output directory to create")
+args = cfg.argparser.parse_args()
if not os.path.isdir(args.output_dir):
- os.makedirs(args.output_dir)
+ os.makedirs(args.output_dir)
-for root, dirs, files in os.walk(args.rcynic_dir):
- for ifn in files:
- ifn = os.path.join(root, ifn)
- if ifn.endswith(".cer"):
- obj = rpki.POW.X509.derReadFile(ifn)
- fmt = "%08x.%%d" % obj.getSubjectHash()
- elif ifn.endswith(".crl"):
- obj = rpki.POW.CRL.derReadFile(ifn)
- fmt = "%08x.r%%d" % obj.getIssuerHash()
- else:
- continue
+def store(uri, obj, fmt):
for i in xrange(1000000):
- ofn = os.path.join(args.output_dir, fmt % i)
- if not os.path.exists(ofn):
- with open(ofn, "w") as f:
- f.write(obj.pemWrite())
- if args.verbose:
- print ofn, "<=", ifn
- break
+ fn = os.path.join(args.output_dir, fmt.format(i))
+ if os.path.exists(fn):
+ continue
+ with open(fn, "w") as f:
+ f.write(obj.pemWrite())
+ if args.verbose:
+ print fn, "<=", uri
+ return
else:
- sys.exit("No path name available for %s (%s)" % (ifn, ofn))
+ sys.exit("No path name available for {} ({})".format(uri, fn))
+
+for uri, cer in authenticated_objects(uri_suffix = ".cer"):
+ store(uri, cer, "{:08x}.{{:d}}".format(cer.getSubjectHash()))
+
+for uri, crl in authenticated_objects(uri_suffix = ".crl"):
+ store(uri, crl, "{:08x}.r{{:d}}".format(crl.getIssuerHash()))
diff --git a/rp/utils/print_roa b/rp/utils/print_roa
index d5db0c3c..c5b7793a 100755
--- a/rp/utils/print_roa
+++ b/rp/utils/print_roa
@@ -21,53 +21,56 @@ Pretty-print the content of a ROA. Does NOT attempt to verify the
signature.
"""
-import argparse
+import rpki.config
import rpki.POW
-class ROA(rpki.POW.ROA):
+class ROA(rpki.POW.ROA): # pylint: disable=W0232
- @staticmethod
- def _format_prefix(prefix):
- if prefix[2] is None or prefix[1] == prefix[2]:
- return "%s/%d" % (prefix[0], prefix[1])
- else:
- return "%s/%d-%d" % (prefix[0], prefix[1], prefix[2])
+ v4_prefixes = None
+ v6_prefixes = None
+
+ @staticmethod
+ def _format_prefix(p):
+ if p[2] in (None, p[1]):
+ return "%s/%d" % (p[0], p[1])
+ else:
+ return "%s/%d-%d" % (p[0], p[1], p[2])
- def parse(self):
- self.extractWithoutVerifying()
- v4, v6 = self.getPrefixes()
- self.v4_prefixes = [self._format_prefix(p) for p in (v4 or ())]
- self.v6_prefixes = [self._format_prefix(p) for p in (v6 or ())]
+ def parse(self):
+ self.extractWithoutVerifying() # pylint: disable=E1101
+ v4, v6 = self.getPrefixes() # pylint: disable=E1101
+ self.v4_prefixes = [self._format_prefix(p) for p in (v4 or ())]
+ self.v6_prefixes = [self._format_prefix(p) for p in (v6 or ())]
-parser = argparse.ArgumentParser(description = __doc__)
-parser.add_argument("-b", "--brief", action = "store_true", help = "show only ASN and prefix(es)")
-parser.add_argument("-c", "--cms", action = "store_true", help = "print text representation of entire CMS blob")
-parser.add_argument("-s", "--signing-time", action = "store_true", help = "show SigningTime in brief mode")
-parser.add_argument("roas", nargs = "+", type = ROA.derReadFile, help = "ROA(s) to print")
-args = parser.parse_args()
+cfg = rpki.config.argparser(doc = __doc__)
+cfg.argparser.add_argument("--brief", action = "store_true", help = "show only ASN and prefix(es)")
+cfg.argparser.add_argument("--cms", action = "store_true", help = "print text representation of entire CMS blob")
+cfg.argparser.add_argument("--signing-time", action = "store_true", help = "show SigningTime in brief mode")
+cfg.argparser.add_argument("roas", nargs = "+", type = ROA.derReadFile, help = "ROA(s) to print") # pylint: disable=E1101
+args = cfg.argparser.parse_args()
for roa in args.roas:
- roa.parse()
- if args.brief:
- if args.signing_time:
- print roa.signingTime(),
- print roa.getASID(), " ".join(roa.v4_prefixes + roa.v6_prefixes)
- else:
- print "ROA Version: ", roa.getVersion()
- print "SigningTime: ", roa.signingTime()
- print "asID: ", roa.getASID()
- if roa.v4_prefixes:
- print " addressFamily:", 1
- for p in roa.v4_prefixes:
- print " IPAddress:", p
- if roa.v6_prefixes:
- print " addressFamily:", 2
- for p in roa.v6_prefixes:
- print " IPAddress:", p
- if args.cms:
- print roa.pprint()
- for cer in roa.certs():
- print cer.pprint()
- for crl in roa.crls():
- print crl.pprint()
- print
+ roa.parse()
+ if args.brief:
+ if args.signing_time:
+ print roa.signingTime(),
+ print roa.getASID(), " ".join(roa.v4_prefixes + roa.v6_prefixes)
+ else:
+ print "ROA Version: ", roa.getVersion()
+ print "SigningTime: ", roa.signingTime()
+ print "asID: ", roa.getASID()
+ if roa.v4_prefixes:
+ print " addressFamily:", 1
+ for prefix in roa.v4_prefixes:
+ print " IPAddress:", prefix
+ if roa.v6_prefixes:
+ print " addressFamily:", 2
+ for prefix in roa.v6_prefixes:
+ print " IPAddress:", prefix
+ if args.cms:
+ print roa.pprint()
+ for cer in roa.certs():
+ print cer.pprint()
+ for crl in roa.crls():
+ print crl.pprint()
+ print
diff --git a/rp/utils/print_rpki_manifest b/rp/utils/print_rpki_manifest
index 5ebc6356..74a3fbd4 100755
--- a/rp/utils/print_rpki_manifest
+++ b/rp/utils/print_rpki_manifest
@@ -21,30 +21,30 @@ Pretty-print the content of a manifest. Does NOT attempt to verify the
signature.
"""
-import argparse
+import rpki.config
import rpki.POW
import rpki.oids
-parser = argparse.ArgumentParser(description = __doc__)
-parser.add_argument("-c", "--cms", action = "store_true", help = "print text representation of entire CMS blob")
-parser.add_argument("manifests", nargs = "+", type = rpki.POW.Manifest.derReadFile, help = "manifest(s) to print")
-args = parser.parse_args()
+cfg = rpki.config.argparser(doc = __doc__)
+cfg.argparser.add_argument("--cms", action = "store_true", help = "print text representation of entire CMS blob")
+cfg.argparser.add_argument("manifests", nargs = "+", type = rpki.POW.Manifest.derReadFile, help = "manifest(s) to print")
+args = cfg.argparser.parse_args()
for mft in args.manifests:
- mft.extractWithoutVerifying()
- print "Manifest Version:", mft.getVersion()
- print "SigningTime: ", mft.signingTime()
- print "Number: ", mft.getManifestNumber()
- print "thisUpdate: ", mft.getThisUpdate()
- print "nextUpdate: ", mft.getNextUpdate()
- print "fileHashAlg: ", rpki.oids.oid2name(mft.getAlgorithm())
- for i, fah in enumerate(mft.getFiles()):
- name, hash = fah
- print "fileList[%3d]: %s %s" % (i, ":".join(("%02X" % ord(h) for h in hash)), name)
- if args.cms:
- print mft.pprint()
- for cer in mft.certs():
- print cer.pprint()
- for crl in mft.crls():
- print crl.pprint()
- print
+ mft.extractWithoutVerifying()
+ print "Manifest Version:", mft.getVersion()
+ print "SigningTime: ", mft.signingTime()
+ print "Number: ", mft.getManifestNumber()
+ print "thisUpdate: ", mft.getThisUpdate()
+ print "nextUpdate: ", mft.getNextUpdate()
+ print "fileHashAlg: ", rpki.oids.oid2name(mft.getAlgorithm())
+ for i, fah in enumerate(mft.getFiles()):
+ name, obj_hash = fah
+ print "fileList[%3d]: %s %s" % (i, ":".join(("%02X" % ord(h) for h in obj_hash)), name)
+ if args.cms:
+ print mft.pprint()
+ for cer in mft.certs():
+ print cer.pprint()
+ for crl in mft.crls():
+ print crl.pprint()
+ print
diff --git a/rp/utils/scan_roas b/rp/utils/scan_roas
index a1b64f01..510fd7a0 100755
--- a/rp/utils/scan_roas
+++ b/rp/utils/scan_roas
@@ -24,40 +24,39 @@ per line.
import os
import argparse
+
+import rpki.config
import rpki.POW
+from rpki.rcynicdb.iterator import authenticated_objects
+
def check_dir(d):
- if not os.path.isdir(d):
- raise argparse.ArgumentTypeError("%r is not a directory" % d)
- return d
-
-class ROA(rpki.POW.ROA):
-
- @classmethod
- def parse(cls, fn):
- self = cls.derReadFile(fn)
- self.extractWithoutVerifying()
- return self
-
- @property
- def prefixes(self):
- v4, v6 = self.getPrefixes()
- for prefix, length, maxlength in (v4 or ()) + (v6 or ()):
- if maxlength is None or length == maxlength:
- yield "%s/%d" % (prefix, length)
- else:
- yield "%s/%d-%d" % (prefix, length, maxlength)
-
- def __str__(self):
- return "%s %s %s" % (self.signingTime(), self.getASID(), " ".join(self.prefixes))
-
-parser = argparse.ArgumentParser(description = __doc__)
-parser.add_argument("rcynic_dir", nargs = "+", type = check_dir,
- help = "rcynic authenticated output directory")
-args = parser.parse_args()
-
-for rcynic_dir in args.rcynic_dir:
- for root, dirs, files in os.walk(rcynic_dir):
- for fn in files:
- if fn.endswith(".roa"):
- print ROA.parse(os.path.join(root, fn))
+ if not os.path.isdir(d):
+ raise argparse.ArgumentTypeError("%r is not a directory" % d)
+ return d
+
+class ROA(rpki.POW.ROA): # pylint: disable=W0232
+
+ @property
+ def prefixes(self):
+ v4, v6 = self.getPrefixes() # pylint: disable=E1101
+ for prefix, length, maxlength in (v4 or ()) + (v6 or ()):
+ if maxlength is None or length == maxlength:
+ yield "%s/%d" % (prefix, length)
+ else:
+ yield "%s/%d-%d" % (prefix, length, maxlength)
+
+ def __str__(self):
+ # pylint: disable=E1101
+ return "%s %s %s" % (self.signingTime(), self.getASID(), " ".join(self.prefixes))
+
+cfg = rpki.config.argparser(doc = __doc__)
+cfg.argparser.add_argument("rcynic_dir", nargs = "?", type = check_dir,
+ help = "rcynic authenticated output directory")
+args = cfg.argparser.parse_args()
+
+for uri, roa in authenticated_objects(args.rcynic_dir,
+ uri_suffix = ".roa",
+ class_map = dict(roa = ROA)):
+ roa.extractWithoutVerifying()
+ print roa
diff --git a/rp/utils/scan_routercerts b/rp/utils/scan_routercerts
index 081a6293..540a8e25 100755
--- a/rp/utils/scan_routercerts
+++ b/rp/utils/scan_routercerts
@@ -26,32 +26,29 @@ import base64
import argparse
import rpki.POW
import rpki.oids
+import rpki.config
-def check_dir(s):
- if not os.path.isdir(s):
- raise argparse.ArgumentTypeError("%r is not a directory" % s)
- return s
-
-parser = argparse.ArgumentParser(description = __doc__)
-parser.add_argument("rcynic_dir", type = check_dir, help = "rcynic authenticated output directory")
-args = parser.parse_args()
+from rpki.rcynicdb.iterator import authenticated_objects
-for root, dirs, files in os.walk(args.rcynic_dir):
-
- for fn in files:
+def check_dir(s):
+ if not os.path.isdir(s):
+ raise argparse.ArgumentTypeError("{!r} is not a directory".format(s))
+ return s
- if not fn.endswith(".cer"):
- continue
+cfg = rpki.config.argparser(doc = __doc__)
+cfg.argparser.add_argument("rcynic_dir", nargs = "?", type = check_dir,
+ help = "rcynic authenticated output directory")
+args = cfg.argparser.parse_args()
- x = rpki.POW.X509.derReadFile(os.path.join(root, fn))
+for uri, cer in authenticated_objects(args.rcynic_dir, uri_suffix = ".cer"):
- if rpki.oids.id_kp_bgpsec_router not in (x.getEKU() or ()):
- continue
+ if rpki.oids.id_kp_bgpsec_router not in (cer.getEKU() or ()):
+ continue
- sys.stdout.write(base64.urlsafe_b64encode(x.getSKI()).rstrip("="))
+ sys.stdout.write(base64.urlsafe_b64encode(cer.getSKI()).rstrip("="))
- for min_asn, max_asn in x.getRFC3779()[0]:
- for asn in xrange(min_asn, max_asn + 1):
- sys.stdout.write(" %s" % asn)
+ for min_asn, max_asn in cer.getRFC3779()[0]:
+ for asn in xrange(min_asn, max_asn + 1):
+ sys.stdout.write(" {}".format(asn))
- sys.stdout.write(" %s\n" % base64.b64encode(x.getPublicKey().derWritePublic()))
+ sys.stdout.write(" {}\n".format(base64.b64encode(cer.getPublicKey().derWritePublic())))
diff --git a/rp/utils/uri b/rp/utils/uri
index e72d5e0d..d3d9eebb 100755
--- a/rp/utils/uri
+++ b/rp/utils/uri
@@ -24,47 +24,57 @@ Input files must be in DER format and may be either X.509v3 certificates
or CMS objects which contain X.509v3 certificates in the CMS wrapper.
"""
-import argparse
+import rpki.config
import rpki.POW
class Certificate(object):
- @staticmethod
- def first_rsync(uris):
- if uris is not None:
- for uri in uris:
- if uri.startswith("rsync://"):
- return uri
- return None
+ @staticmethod
+ def first_whatever(uris, prefix):
+ if uris is not None:
+ for uri in uris:
+ if uri.startswith(prefix):
+ return uri
+ return None
- def __init__(self, fn):
- try:
- x = rpki.POW.X509.derReadFile(fn)
- except: # pylint: disable=W0702
- try:
- cms = rpki.POW.CMS.derReadFile(fn)
- cms.extractWithoutVerifying()
- x = cms.certs()[0]
- except:
- raise ValueError
- sia = x.getSIA() or (None, None, None)
- self.fn = fn
- self.uris = (
- ("AIA:caIssuers", self.first_rsync(x.getAIA())),
- ("SIA:caRepository", self.first_rsync(sia[0])),
- ("SIA:rpkiManifest", self.first_rsync(sia[1])),
- ("SIA:signedObject", self.first_rsync(sia[2])),
- ("CRLDP", self.first_rsync(x.getCRLDP())))
+ def first_rsync(self, uris):
+ return self.first_whatever(uris, "rsync://")
- def __str__(self):
- words = [self.fn] if args.single_line else ["File: " + self.fn]
- words.extend(" %s: %s" % (tag, uri) for tag, uri in self.uris if uri is not None)
- return ("" if args.single_line else "\n").join(words)
+ def first_https(self, uris):
+ return self.first_whatever(uris, "https://")
-parser = argparse.ArgumentParser(description = __doc__)
-parser.add_argument("-s", "--single-line", action = "store_true", help = "single output line per object")
-parser.add_argument("certs", nargs = "+", type = Certificate, help = "RPKI objects to examine")
-args = parser.parse_args()
+ def first_http(self, uris):
+ return self.first_whatever(uris, "http://")
+
+ def __init__(self, fn):
+ try:
+ x = rpki.POW.X509.derReadFile(fn)
+ except:
+ try:
+ cms = rpki.POW.CMS.derReadFile(fn)
+ cms.extractWithoutVerifying()
+ x = cms.certs()[0]
+ except:
+ raise ValueError
+ sia = x.getSIA() or (None, None, None, None)
+ self.fn = fn
+ self.uris = (
+ ("AIA:caIssuers", self.first_rsync(x.getAIA())),
+ ("SIA:caRepository", self.first_rsync(sia[0])),
+ ("SIA:rpkiManifest", self.first_rsync(sia[1])),
+ ("SIA:signedObject", self.first_rsync(sia[2])),
+ ("SIA:rpkiNotify", self.first_https(sia[3]) or self.first_http(sia[3])),
+ ("CRLDP", self.first_rsync(x.getCRLDP())))
+
+ def __str__(self):
+ words = [self.fn] if args.single_line else ["File: " + self.fn]
+ words.extend(" %s: %s" % (tag, uri) for tag, uri in self.uris if uri is not None)
+ return ("" if args.single_line else "\n").join(words)
+
+cfg = rpki.config.argparser(doc = __doc__)
+cfg.argparser.add_argument("-s", "--single-line", action = "store_true", help = "single output line per object")
+cfg.argparser.add_argument("certs", nargs = "+", type = Certificate, help = "RPKI objects to examine")
+args = cfg.argparser.parse_args()
for cert in args.certs:
- print cert
+ print cert
diff --git a/rpki/POW/__init__.py b/rpki/POW/__init__.py
index a9371553..b6f15a39 100644
--- a/rpki/POW/__init__.py
+++ b/rpki/POW/__init__.py
@@ -17,13 +17,197 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-# pylint: disable=W0622,W0401
+# pylint: disable=W0401,W0622
-from rpki.POW._POW import *
-from rpki.POW._POW import __doc__
+from ._POW import *
+from ._POW import __doc__
-# Set callback to let POW construct rpki.sundial.datetime objects
+
+# Set callback to let POW construct rpki.sundial.datetime objects.
from rpki.sundial import datetime as sundial_datetime
customDatetime(sundial_datetime)
del sundial_datetime
+
+
+# Status code mechanism, (mostly) moved out of POW.c.
+
+class StatusCode(object):
+
+ def __init__(self, name, text, kind, code = None):
+ assert code is None or isinstance(code, int)
+ assert kind in ("good", "bad", "warn")
+ self.code = code
+ self.name = name
+ self.text = text
+ self.kind = kind
+
+ def __str__(self):
+ return self.name
+
+ def __repr__(self):
+ return "<StatusCode object \"{}\" at {}>".format(self.text, id(self))
+
+ def __hash__(self):
+ return hash(self.name)
+
+ def __cmp__(self, other):
+ return cmp(self.name, str(other))
+
+
+class StatusCodeDB(object):
+
+ def __init__(self, bad, warn, good, verification_errors):
+ self._map = dict((name, StatusCode(code = code, name = name, text = text,
+ kind = "bad" if code != 0 else "good"))
+ for code, name, text in verification_errors)
+ self._map.update((k, StatusCode(name = k, text = v, kind = "bad"))
+ for k, v in bad.iteritems())
+ self._map.update((k, StatusCode(name = k, text = v, kind = "warn"))
+ for k, v in warn.iteritems())
+ self._map.update((k, StatusCode(name = k, text = v, kind = "good"))
+ for k, v in good.iteritems())
+ for k, v in self._map.iteritems():
+ setattr(self, k, v)
+ self._map.update((s.code, s) for s in self._map.values() if s.code is not None)
+
+ def all(self):
+ return set(self._map.itervalues())
+
+ def normalize(self, status):
+ for s in [s for s in status if isinstance(s, (int, str)) and s in self._map]:
+ status.remove(s)
+ status.add(self._map[s])
+
+ def find(self, code):
+ return self._map[code]
+
+
+validation_status = StatusCodeDB(
+ bad = dict(
+ AIA_EXTENSION_MISSING = "AIA extension missing",
+ AIA_EXTENSION_FORBIDDEN = "AIA extension forbidden",
+ AIA_URI_MISSING = "AIA URI missing",
+ AKI_EXTENSION_ISSUER_MISMATCH = "AKI extension issuer mismatch",
+ AKI_EXTENSION_MISSING = "AKI extension missing",
+ AKI_EXTENSION_WRONG_FORMAT = "AKI extension is wrong format",
+ BAD_ASIDENTIFIERS = "Bad ASIdentifiers extension",
+ BAD_CERTIFICATE_POLICY = "Bad certificate policy",
+ BAD_CMS_ECONTENTTYPE = "Bad CMS eContentType",
+ BAD_CMS_SI_CONTENTTYPE = "Bad CMS SI ContentType",
+ BAD_CMS_SIGNER = "Bad CMS signer",
+ BAD_CMS_SIGNER_INFOS = "Bad CMS signerInfos",
+ BAD_CRL = "Bad CRL",
+ BAD_IPADDRBLOCKS = "Bad IPAddrBlocks extension",
+ BAD_KEY_USAGE = "Bad keyUsage",
+ BAD_MANIFEST_DIGEST_LENGTH = "Bad manifest digest length",
+ BAD_PUBLIC_KEY = "Bad public key",
+ BAD_ROA_ASID = "Bad ROA asID",
+ BAD_CERTIFICATE_SERIAL_NUMBER = "Bad certificate serialNumber",
+ BAD_MANIFEST_NUMBER = "Bad manifestNumber",
+ CERTIFICATE_BAD_SIGNATURE = "Bad certificate signature",
+ CERTIFICATE_FAILED_VALIDATION = "Certificate failed validation",
+ CMS_ECONTENT_DECODE_ERROR = "CMS eContent decode error",
+ CMS_INCLUDES_CRLS = "CMS includes CRLs",
+ CMS_SIGNER_MISSING = "CMS signer missing",
+ CMS_SKI_MISMATCH = "CMS SKI mismatch",
+ CMS_VALIDATION_FAILURE = "CMS validation failure",
+ CRL_ISSUER_NAME_MISMATCH = "CRL issuer name mismatch",
+ CRL_NOT_IN_MANIFEST = "CRL not listed in manifest",
+ CRL_NOT_YET_VALID = "CRL not yet valid",
+ CRL_NUMBER_EXTENSION_MISSING = "CRL number extension missing",
+ CRL_NUMBER_IS_NEGATIVE = "CRL number is negative",
+ CRL_NUMBER_OUT_OF_RANGE = "CRL number out of range",
+ CRLDP_DOESNT_MATCH_ISSUER_SIA = "CRLDP doesn't match issuer's SIA",
+ CRLDP_EXTENSION_FORBIDDEN = "CRLDP extension forbidden",
+ CRLDP_EXTENSION_MISSING = "CRLDP extension missing",
+ CRLDP_URI_MISSING = "CRLDP URI missing",
+ DISALLOWED_X509V3_EXTENSION = "Disallowed X.509v3 extension",
+ DUPLICATE_NAME_IN_MANIFEST = "Duplicate name in manifest",
+ INAPPROPRIATE_EKU_EXTENSION = "Inappropriate EKU extension",
+ MALFORMED_AIA_EXTENSION = "Malformed AIA extension",
+ MALFORMED_SIA_EXTENSION = "Malformed SIA extension",
+ MALFORMED_BASIC_CONSTRAINTS = "Malformed basicConstraints",
+ MALFORMED_TRUST_ANCHOR = "Malformed trust anchor",
+ MALFORMED_CADIRECTORY_URI = "Malformed caDirectory URI",
+ MALFORMED_CRLDP_EXTENSION = "Malformed CRDLP extension",
+ MALFORMED_CRLDP_URI = "Malformed CRDLP URI",
+ MALFORMED_ROA_ADDRESSFAMILY = "Malformed ROA addressFamily",
+ MALFORMED_TAL_URI = "Malformed TAL URI",
+ MANIFEST_CAREPOSITORY_MISMATCH = "Manifest caRepository mismatch",
+ MANIFEST_INTERVAL_OVERRUNS_CERT = "Manifest interval overruns certificate",
+ MANIFEST_LISTS_MISSING_OBJECT = "Manifest lists missing object",
+ MANIFEST_NOT_YET_VALID = "Manifest not yet valid",
+ MANIFEST_EE_REVOKED = "Manifest EE certificate revoked",
+ MISSING_RESOURCES = "Missing resources",
+ NONCONFORMANT_ASN1_TIME_VALUE = "Nonconformant ASN.1 time value",
+ NONCONFORMANT_PUBLIC_KEY_ALGORITHM = "Nonconformant public key algorithm",
+ NONCONFORMANT_SIGNATURE_ALGORITHM = "Nonconformant signature algorithm",
+ NONCONFORMANT_DIGEST_ALGORITHM = "Nonconformant digest algorithm",
+ NONCONFORMANT_CERTIFICATE_UID = "Nonconformant certificate UID",
+ OBJECT_REJECTED = "Object rejected",
+ RFC3779_INHERITANCE_REQUIRED = "RFC 3779 inheritance required",
+ ROA_CONTAINS_BAD_AFI_VALUE = "ROA contains bad AFI value",
+ ROA_MAX_PREFIXLEN_TOO_SHORT = "ROA maxPrefixlen too short",
+ ROA_RESOURCE_NOT_IN_EE = "ROA resource not in EE",
+ ROA_RESOURCES_MALFORMED = "ROA resources malformed",
+ RSYNC_TRANSFER_FAILED = "rsync transfer failed",
+ RSYNC_TRANSFER_TIMED_OUT = "rsync transfer timed out",
+ SAFI_NOT_ALLOWED = "SAFI not allowed",
+ SIA_CADIRECTORY_URI_MISSING = "SIA caDirectory URI missing",
+ SIA_EXTENSION_FORBIDDEN = "SIA extension forbidden",
+ SIA_EXTENSION_MISSING = "SIA extension missing",
+ SIA_MANIFEST_URI_MISSING = "SIA manifest URI missing",
+ SKI_EXTENSION_MISSING = "SKI extension missing",
+ SKI_PUBLIC_KEY_MISMATCH = "SKI public key mismatch",
+ TRUST_ANCHOR_KEY_MISMATCH = "Trust anchor key mismatch",
+ TRUST_ANCHOR_WITH_CRLDP = "Trust anchor can't have CRLDP",
+ UNKNOWN_AFI = "Unknown AFI",
+ UNKNOWN_OPENSSL_VERIFY_ERROR = "Unknown OpenSSL verify error",
+ UNREADABLE_OBJECT = "Unreadable object",
+ UNREADABLE_TRUST_ANCHOR = "Unreadable trust anchor",
+ UNREADABLE_TRUST_ANCHOR_LOCATOR = "Unreadable trust anchor locator",
+ WRONG_OBJECT_VERSION = "Wrong object version",
+ OBJECT_NOT_FOUND = "Object not found",
+ KEY_USAGE_MISSING = "Key usage missing"),
+
+ warn = dict(
+ AIA_DOESNT_MATCH_ISSUER = "AIA doesn't match issuer",
+ BACKUP_THISUPDATE_NEWER_THAN_CURRENT = "Backup thisUpdate newer than current",
+ BACKUP_NUMBER_HIGHER_THAN_CURRENT = "Backup number higher than current",
+ BAD_THISUPDATE = "Bad CRL thisUpdate",
+ BAD_CMS_SI_SIGNED_ATTRIBUTES = "Bad CMS SI signed attributes",
+ BAD_SIGNED_OBJECT_URI = "Bad signedObject URI",
+ CRLDP_NAMES_NEWER_CRL = "CRLDP names newer CRL",
+ DIGEST_MISMATCH = "Digest mismatch",
+ EE_CERTIFICATE_WITH_1024_BIT_KEY = "EE certificate with 1024 bit key",
+ GRATUITOUSLY_CRITICAL_EXTENSION = "Gratuitously critical extension",
+ INAPPROPRIATE_OBJECT_TYPE_SKIPPED = "Inappropriate object type skipped",
+ ISSUER_USES_MULTIPLE_CRLDP_VALUES = "Issuer uses multiple CRLDP values",
+ MULTIPLE_RSYNC_URIS_IN_EXTENSION = "Multiple rsync URIs in extension",
+ NONCONFORMANT_ISSUER_NAME = "Nonconformant X.509 issuer name",
+ NONCONFORMANT_SUBJECT_NAME = "Nonconformant X.509 subject name",
+ POLICY_QUALIFIER_CPS = "Policy Qualifier CPS",
+ RSYNC_PARTIAL_TRANSFER = "rsync partial transfer",
+ RSYNC_TRANSFER_SKIPPED = "rsync transfer skipped",
+ SIA_EXTENSION_MISSING_FROM_EE = "SIA extension missing from EE",
+ SKIPPED_BECAUSE_NOT_IN_MANIFEST = "Skipped because not in manifest",
+ STALE_CRL_OR_MANIFEST = "Stale CRL or manifest",
+ TAINTED_BY_STALE_CRL = "Tainted by stale CRL",
+ TAINTED_BY_STALE_MANIFEST = "Tainted by stale manifest",
+ TAINTED_BY_NOT_BEING_IN_MANIFEST = "Tainted by not being in manifest",
+ TRUST_ANCHOR_NOT_SELF_SIGNED = "Trust anchor not self-signed",
+ TRUST_ANCHOR_SKIPPED = "Trust anchor skipped",
+ UNKNOWN_OBJECT_TYPE_SKIPPED = "Unknown object type skipped",
+ URI_TOO_LONG = "URI too long",
+ WRONG_CMS_SI_SIGNATURE_ALGORITHM = "Wrong CMS SI signature algorithm",
+ WRONG_CMS_SI_DIGEST_ALGORITHM = "Wrong CMS SI digest algorithm"),
+
+ good = dict(
+ NON_RSYNC_URI_IN_EXTENSION = "Non-rsync URI in extension",
+ OBJECT_ACCEPTED = "Object accepted",
+ RECHECKING_OBJECT = "Rechecking object",
+ RSYNC_TRANSFER_SUCCEEDED = "rsync transfer succeeded",
+ VALIDATION_OK = "OK"),
+
+ verification_errors = _POW.getVerificationErrors())
diff --git a/rpki/adns.py b/rpki/adns.py
index 968684b5..4f8cf7ea 100644
--- a/rpki/adns.py
+++ b/rpki/adns.py
@@ -22,24 +22,25 @@ Basic asynchronous DNS code, using asyncore and Bob Halley's excellent
dnspython package.
"""
+# pylint: skip-file
+
import sys
import time
import socket
import logging
import asyncore
-import rpki.async
import rpki.sundial
import rpki.log
try:
- import dns.resolver, dns.rdatatype, dns.rdataclass, dns.name, dns.message
- import dns.inet, dns.exception, dns.query, dns.rcode, dns.ipv4, dns.ipv6
+ import dns.resolver, dns.rdatatype, dns.rdataclass, dns.name, dns.message
+ import dns.inet, dns.exception, dns.query, dns.rcode, dns.ipv4, dns.ipv6
except ImportError:
- if __name__ == "__main__":
- sys.stderr.write("DNSPython not available, skipping rpki.adns unit test\n")
- sys.exit(0)
- else:
- raise
+ if __name__ == "__main__":
+ sys.stderr.write("DNSPython not available, skipping rpki.adns unit test\n")
+ sys.exit(0)
+ else:
+ raise
logger = logging.getLogger(__name__)
@@ -48,7 +49,7 @@ logger = logging.getLogger(__name__)
resolver = dns.resolver.Resolver()
if resolver.cache is None:
- resolver.cache = dns.resolver.Cache()
+ resolver.cache = dns.resolver.Cache()
## @var nameservers
# Nameservers from resolver.nameservers converted to (af, address)
@@ -59,313 +60,326 @@ if resolver.cache is None:
nameservers = []
for ns in resolver.nameservers:
- try:
- nameservers.append((socket.AF_INET, dns.ipv4.inet_aton(ns)))
- continue
- except Exception:
- pass
- try:
- nameservers.append((socket.AF_INET6, dns.ipv6.inet_aton(ns)))
- continue
- except Exception:
- pass
- logger.error("Couldn't parse nameserver address %r", ns)
+ try:
+ nameservers.append((socket.AF_INET, dns.ipv4.inet_aton(ns)))
+ continue
+ except:
+ pass
+ try:
+ nameservers.append((socket.AF_INET6, dns.ipv6.inet_aton(ns)))
+ continue
+ except:
+ pass
+ logger.error("Couldn't parse nameserver address %r", ns)
class dispatcher(asyncore.dispatcher):
- """
- Basic UDP socket reader for use with asyncore.
- """
-
- def __init__(self, cb, eb, af, bufsize = 65535):
- asyncore.dispatcher.__init__(self)
- self.cb = cb
- self.eb = eb
- self.af = af
- self.bufsize = bufsize
- self.create_socket(af, socket.SOCK_DGRAM)
-
- def handle_read(self):
"""
- Receive a packet, hand it off to query class callback.
+ Basic UDP socket reader for use with asyncore.
"""
- wire, from_address = self.recvfrom(self.bufsize)
- self.cb(self.af, from_address[0], from_address[1], wire)
- def handle_error(self):
- """
- Pass errors to query class errback.
- """
- self.eb(sys.exc_info()[1])
+ def __init__(self, cb, eb, af, bufsize = 65535):
+ asyncore.dispatcher.__init__(self)
+ self.cb = cb
+ self.eb = eb
+ self.af = af
+ self.bufsize = bufsize
+ self.create_socket(af, socket.SOCK_DGRAM)
- def handle_connect(self):
- """
- Quietly ignore UDP "connection" events.
- """
- pass
+ def handle_read(self):
+ """
+ Receive a packet, hand it off to query class callback.
+ """
- def writable(self):
- """
- We don't need to hear about UDP socket becoming writable.
- """
- return False
+ wire, from_address = self.recvfrom(self.bufsize)
+ self.cb(self.af, from_address[0], from_address[1], wire)
+ def handle_error(self):
+ """
+ Pass errors to query class errback.
+ """
-class query(object):
- """
- Simplified (no search paths) asynchronous adaptation of
- dns.resolver.Resolver.query() (q.v.).
- """
-
- def __init__(self, cb, eb, qname, qtype = dns.rdatatype.A, qclass = dns.rdataclass.IN):
- if isinstance(qname, (str, unicode)):
- qname = dns.name.from_text(qname)
- if isinstance(qtype, str):
- qtype = dns.rdatatype.from_text(qtype)
- if isinstance(qclass, str):
- qclass = dns.rdataclass.from_text(qclass)
- assert qname.is_absolute()
- self.cb = cb
- self.eb = eb
- self.qname = qname
- self.qtype = qtype
- self.qclass = qclass
- self.start = time.time()
- rpki.async.event_defer(self.go)
-
- def go(self):
- """
- Start running the query. Check our cache before doing network
- query; if we find an answer there, just return it. Otherwise
- start the network query.
- """
- if resolver.cache:
- answer = resolver.cache.get((self.qname, self.qtype, self.qclass))
- else:
- answer = None
- if answer:
- self.cb(self, answer)
- else:
- self.timer = rpki.async.timer()
- self.sockets = {}
- self.request = dns.message.make_query(self.qname, self.qtype, self.qclass)
- if resolver.keyname is not None:
- self.request.use_tsig(resolver.keyring, resolver.keyname, resolver.keyalgorithm)
- self.request.use_edns(resolver.edns, resolver.ednsflags, resolver.payload)
- self.response = None
- self.backoff = 0.10
- self.nameservers = nameservers[:]
- self.loop1()
-
- def loop1(self):
- """
- Outer loop. If we haven't got a response yet and still have
- nameservers to check, start inner loop. Otherwise, we're done.
- """
- self.timer.cancel()
- if self.response is None and self.nameservers:
- self.iterator = rpki.async.iterator(self.nameservers[:], self.loop2, self.done2)
- else:
- self.done1()
+ self.eb(sys.exc_info()[1])
- def loop2(self, iterator, nameserver):
- """
- Inner loop. Send query to next nameserver in our list, unless
- we've hit the overall timeout for this query.
- """
- self.timer.cancel()
- try:
- timeout = resolver._compute_timeout(self.start)
- except dns.resolver.Timeout, e:
- self.lose(e)
- else:
- af, addr = nameserver
- if af not in self.sockets:
- self.sockets[af] = dispatcher(self.socket_cb, self.socket_eb, af)
- self.sockets[af].sendto(self.request.to_wire(),
- (dns.inet.inet_ntop(af, addr), resolver.port))
- self.timer.set_handler(self.socket_timeout)
- self.timer.set_errback(self.socket_eb)
- self.timer.set(rpki.sundial.timedelta(seconds = timeout))
-
- def socket_timeout(self):
- """
- No answer from nameserver, move on to next one (inner loop).
- """
- self.response = None
- self.iterator()
+ def handle_connect(self):
+ """
+ Quietly ignore UDP "connection" events.
+ """
- def socket_eb(self, e):
- """
- UDP socket signaled error. If it really is some kind of socket
- error, handle as if we've timed out on this nameserver; otherwise,
- pass error back to caller.
- """
- self.timer.cancel()
- if isinstance(e, socket.error):
- self.response = None
- self.iterator()
- else:
- self.lose(e)
+ pass
- def socket_cb(self, af, from_host, from_port, wire):
- """
- Received a packet that might be a DNS message. If it doesn't look
- like it came from one of our nameservers, just drop it and leave
- the timer running. Otherwise, try parsing it: if it's an answer,
- we're done, otherwise handle error appropriately and move on to
- next nameserver.
- """
- sender = (af, dns.inet.inet_pton(af, from_host))
- if from_port != resolver.port or sender not in self.nameservers:
- return
- self.timer.cancel()
- try:
- self.response = dns.message.from_wire(wire, keyring = self.request.keyring, request_mac = self.request.mac, one_rr_per_rrset = False)
- except dns.exception.FormError:
- self.nameservers.remove(sender)
- else:
- rcode = self.response.rcode()
- if rcode in (dns.rcode.NOERROR, dns.rcode.NXDOMAIN):
- self.done1()
- return
- if rcode != dns.rcode.SERVFAIL:
- self.nameservers.remove(sender)
- self.response = None
- self.iterator()
-
- def done2(self):
- """
- Done with inner loop. If we still haven't got an answer and
- haven't (yet?) eliminated all of our nameservers, wait a little
- while before starting the cycle again, unless we've hit the
- timeout threshold for the whole query.
- """
- if self.response is None and self.nameservers:
- try:
- delay = rpki.sundial.timedelta(seconds = min(resolver._compute_timeout(self.start), self.backoff))
- self.backoff *= 2
- self.timer.set_handler(self.loop1)
- self.timer.set_errback(self.lose)
- self.timer.set(delay)
- except dns.resolver.Timeout, e:
- self.lose(e)
- else:
- self.loop1()
+ def writable(self):
+ """
+ We don't need to hear about UDP socket becoming writable.
+ """
- def cleanup(self):
- """
- Shut down our timer and sockets.
- """
- self.timer.cancel()
- for s in self.sockets.itervalues():
- s.close()
+ return False
- def lose(self, e):
- """
- Something bad happened. Clean up, then pass error back to caller.
- """
- self.cleanup()
- self.eb(self, e)
- def done1(self):
+class query(object):
"""
- Done with outer loop. If we got a useful answer, cache it, then
- pass it back to caller; if we got an error, pass the appropriate
- exception back to caller.
+ Simplified (no search paths) asynchronous adaptation of
+ dns.resolver.Resolver.query() (q.v.).
"""
- self.cleanup()
- try:
- if not self.nameservers:
- raise dns.resolver.NoNameservers
- if self.response.rcode() == dns.rcode.NXDOMAIN:
- raise dns.resolver.NXDOMAIN
- answer = dns.resolver.Answer(self.qname, self.qtype, self.qclass, self.response)
- if resolver.cache:
- resolver.cache.put((self.qname, self.qtype, self.qclass), answer)
- self.cb(self, answer)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- self.lose(e)
+
+ def __init__(self, cb, eb, qname, qtype = dns.rdatatype.A, qclass = dns.rdataclass.IN):
+ if isinstance(qname, (str, unicode)):
+ qname = dns.name.from_text(qname)
+ if isinstance(qtype, str):
+ qtype = dns.rdatatype.from_text(qtype)
+ if isinstance(qclass, str):
+ qclass = dns.rdataclass.from_text(qclass)
+ assert qname.is_absolute()
+ self.cb = cb
+ self.eb = eb
+ self.qname = qname
+ self.qtype = qtype
+ self.qclass = qclass
+ self.start = time.time()
+ rpki.async.event_defer(self.go)
+
+ def go(self):
+ """
+ Start running the query. Check our cache before doing network
+ query; if we find an answer there, just return it. Otherwise
+ start the network query.
+ """
+
+ if resolver.cache:
+ answer = resolver.cache.get((self.qname, self.qtype, self.qclass))
+ else:
+ answer = None
+ if answer:
+ self.cb(self, answer)
+ else:
+ self.timer = rpki.async.timer()
+ self.sockets = {}
+ self.request = dns.message.make_query(self.qname, self.qtype, self.qclass)
+ if resolver.keyname is not None:
+ self.request.use_tsig(resolver.keyring, resolver.keyname, resolver.keyalgorithm)
+ self.request.use_edns(resolver.edns, resolver.ednsflags, resolver.payload)
+ self.response = None
+ self.backoff = 0.10
+ self.nameservers = nameservers[:]
+ self.loop1()
+
+ def loop1(self):
+ """
+ Outer loop. If we haven't got a response yet and still have
+ nameservers to check, start inner loop. Otherwise, we're done.
+ """
+
+ self.timer.cancel()
+ if self.response is None and self.nameservers:
+ self.iterator = rpki.async.iterator(self.nameservers[:], self.loop2, self.done2)
+ else:
+ self.done1()
+
+ def loop2(self, iterator, nameserver):
+ """
+ Inner loop. Send query to next nameserver in our list, unless
+ we've hit the overall timeout for this query.
+ """
+
+ self.timer.cancel()
+ try:
+ timeout = resolver._compute_timeout(self.start)
+ except dns.resolver.Timeout, e:
+ self.lose(e)
+ else:
+ af, addr = nameserver
+ if af not in self.sockets:
+ self.sockets[af] = dispatcher(self.socket_cb, self.socket_eb, af)
+ self.sockets[af].sendto(self.request.to_wire(),
+ (dns.inet.inet_ntop(af, addr), resolver.port))
+ self.timer.set_handler(self.socket_timeout)
+ self.timer.set_errback(self.socket_eb)
+ self.timer.set(rpki.sundial.timedelta(seconds = timeout))
+
+ def socket_timeout(self):
+ """
+ No answer from nameserver, move on to next one (inner loop).
+ """
+
+ self.response = None
+ self.iterator()
+
+ def socket_eb(self, e):
+ """
+ UDP socket signaled error. If it really is some kind of socket
+ error, handle as if we've timed out on this nameserver; otherwise,
+ pass error back to caller.
+ """
+
+ self.timer.cancel()
+ if isinstance(e, socket.error):
+ self.response = None
+ self.iterator()
+ else:
+ self.lose(e)
+
+ def socket_cb(self, af, from_host, from_port, wire):
+ """
+ Received a packet that might be a DNS message. If it doesn't look
+ like it came from one of our nameservers, just drop it and leave
+ the timer running. Otherwise, try parsing it: if it's an answer,
+ we're done, otherwise handle error appropriately and move on to
+ next nameserver.
+ """
+
+ sender = (af, dns.inet.inet_pton(af, from_host))
+ if from_port != resolver.port or sender not in self.nameservers:
+ return
+ self.timer.cancel()
+ try:
+ self.response = dns.message.from_wire(wire, keyring = self.request.keyring, request_mac = self.request.mac, one_rr_per_rrset = False)
+ except dns.exception.FormError:
+ self.nameservers.remove(sender)
+ else:
+ rcode = self.response.rcode()
+ if rcode in (dns.rcode.NOERROR, dns.rcode.NXDOMAIN):
+ self.done1()
+ return
+ if rcode != dns.rcode.SERVFAIL:
+ self.nameservers.remove(sender)
+ self.response = None
+ self.iterator()
+
+ def done2(self):
+ """
+ Done with inner loop. If we still haven't got an answer and
+ haven't (yet?) eliminated all of our nameservers, wait a little
+ while before starting the cycle again, unless we've hit the
+ timeout threshold for the whole query.
+ """
+
+ if self.response is None and self.nameservers:
+ try:
+ delay = rpki.sundial.timedelta(seconds = min(resolver._compute_timeout(self.start), self.backoff))
+ self.backoff *= 2
+ self.timer.set_handler(self.loop1)
+ self.timer.set_errback(self.lose)
+ self.timer.set(delay)
+ except dns.resolver.Timeout, e:
+ self.lose(e)
+ else:
+ self.loop1()
+
+ def cleanup(self):
+ """
+ Shut down our timer and sockets.
+ """
+
+ self.timer.cancel()
+ for s in self.sockets.itervalues():
+ s.close()
+
+ def lose(self, e):
+ """
+ Something bad happened. Clean up, then pass error back to caller.
+ """
+
+ self.cleanup()
+ self.eb(self, e)
+
+ def done1(self):
+ """
+ Done with outer loop. If we got a useful answer, cache it, then
+ pass it back to caller; if we got an error, pass the appropriate
+ exception back to caller.
+ """
+
+ self.cleanup()
+ try:
+ if not self.nameservers:
+ raise dns.resolver.NoNameservers
+ if self.response.rcode() == dns.rcode.NXDOMAIN:
+ raise dns.resolver.NXDOMAIN
+ answer = dns.resolver.Answer(self.qname, self.qtype, self.qclass, self.response)
+ if resolver.cache:
+ resolver.cache.put((self.qname, self.qtype, self.qclass), answer)
+ self.cb(self, answer)
+ except (rpki.async.ExitNow, SystemExit):
+ raise
+ except Exception, e:
+ self.lose(e)
class getaddrinfo(object):
- typemap = { dns.rdatatype.A : socket.AF_INET,
- dns.rdatatype.AAAA : socket.AF_INET6 }
-
- def __init__(self, cb, eb, host, address_families = typemap.values()):
- self.cb = cb
- self.eb = eb
- self.host = host
- self.result = []
- self.queries = [query(self.done, self.lose, host, qtype)
- for qtype in self.typemap
- if self.typemap[qtype] in address_families]
-
- def done(self, q, answer):
- if answer is not None:
- for a in answer:
- self.result.append((self.typemap[a.rdtype], a.address))
- self.queries.remove(q)
- if not self.queries:
- self.cb(self.result)
-
- def lose(self, q, e):
- if isinstance(e, dns.resolver.NoAnswer):
- self.done(q, None)
- else:
- for q in self.queries:
- q.cleanup()
- self.eb(e)
+ typemap = { dns.rdatatype.A : socket.AF_INET,
+ dns.rdatatype.AAAA : socket.AF_INET6 }
+
+ def __init__(self, cb, eb, host, address_families = typemap.values()):
+ self.cb = cb
+ self.eb = eb
+ self.host = host
+ self.result = []
+ self.queries = [query(self.done, self.lose, host, qtype)
+ for qtype in self.typemap
+ if self.typemap[qtype] in address_families]
+
+ def done(self, q, answer):
+ if answer is not None:
+ for a in answer:
+ self.result.append((self.typemap[a.rdtype], a.address))
+ self.queries.remove(q)
+ if not self.queries:
+ self.cb(self.result)
+
+ def lose(self, q, e):
+ if isinstance(e, dns.resolver.NoAnswer):
+ self.done(q, None)
+ else:
+ for q in self.queries:
+ q.cleanup()
+ self.eb(e)
if __name__ == "__main__":
- rpki.log.init("test-adns")
- print "Some adns tests may take a minute or two, please be patient"
+ print "Some adns tests may take a minute or two, please be patient"
- class test_getaddrinfo(object):
+ class test_getaddrinfo(object):
- def __init__(self, qname):
- self.qname = qname
- getaddrinfo(self.done, self.lose, qname)
+ def __init__(self, qname):
+ self.qname = qname
+ getaddrinfo(self.done, self.lose, qname)
- def done(self, result):
- print "getaddrinfo(%s) returned: %s" % (
- self.qname,
- ", ".join(str(r) for r in result))
+ def done(self, result):
+ print "getaddrinfo(%s) returned: %s" % (
+ self.qname,
+ ", ".join(str(r) for r in result))
- def lose(self, e):
- print "getaddrinfo(%s) failed: %r" % (self.qname, e)
+ def lose(self, e):
+ print "getaddrinfo(%s) failed: %r" % (self.qname, e)
- class test_query(object):
+ class test_query(object):
- def __init__(self, qname, qtype = dns.rdatatype.A, qclass = dns.rdataclass.IN):
- self.qname = qname
- self.qtype = qtype
- self.qclass = qclass
- query(self.done, self.lose, qname, qtype = qtype, qclass = qclass)
+ def __init__(self, qname, qtype = dns.rdatatype.A, qclass = dns.rdataclass.IN):
+ self.qname = qname
+ self.qtype = qtype
+ self.qclass = qclass
+ query(self.done, self.lose, qname, qtype = qtype, qclass = qclass)
- def done(self, q, result):
- print "query(%s, %s, %s) returned: %s" % (
- self.qname,
- dns.rdatatype.to_text(self.qtype),
- dns.rdataclass.to_text(self.qclass),
- ", ".join(str(r) for r in result))
+ def done(self, q, result):
+ print "query(%s, %s, %s) returned: %s" % (
+ self.qname,
+ dns.rdatatype.to_text(self.qtype),
+ dns.rdataclass.to_text(self.qclass),
+ ", ".join(str(r) for r in result))
- def lose(self, q, e):
- print "getaddrinfo(%s, %s, %s) failed: %r" % (
- self.qname,
- dns.rdatatype.to_text(self.qtype),
- dns.rdataclass.to_text(self.qclass),
- e)
-
- if True:
- for t in (dns.rdatatype.A, dns.rdatatype.AAAA, dns.rdatatype.HINFO):
- test_query("subvert-rpki.hactrn.net", t)
- test_query("nonexistant.rpki.net")
- test_query("subvert-rpki.hactrn.net", qclass = dns.rdataclass.CH)
-
- for h in ("subvert-rpki.hactrn.net", "nonexistant.rpki.net"):
- test_getaddrinfo(h)
-
- rpki.async.event_loop()
+ def lose(self, q, e):
+ print "getaddrinfo(%s, %s, %s) failed: %r" % (
+ self.qname,
+ dns.rdatatype.to_text(self.qtype),
+ dns.rdataclass.to_text(self.qclass),
+ e)
+
+ if True:
+ for t in (dns.rdatatype.A, dns.rdatatype.AAAA, dns.rdatatype.HINFO):
+ test_query("subvert-rpki.hactrn.net", t)
+ test_query("nonexistant.rpki.net")
+ test_query("subvert-rpki.hactrn.net", qclass = dns.rdataclass.CH)
+
+ for h in ("subvert-rpki.hactrn.net", "nonexistant.rpki.net"):
+ test_getaddrinfo(h)
+
+ rpki.async.event_loop()
diff --git a/rpki/async.py b/rpki/async.py
deleted file mode 100644
index 75b4b656..00000000
--- a/rpki/async.py
+++ /dev/null
@@ -1,433 +0,0 @@
-# $Id$
-#
-# Copyright (C) 2009--2012 Internet Systems Consortium ("ISC")
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-"""
-Utilities for event-driven programming.
-"""
-
-import gc
-import sys
-import signal
-import logging
-import asyncore
-import traceback
-import rpki.log
-import rpki.sundial
-
-logger = logging.getLogger(__name__)
-
-ExitNow = asyncore.ExitNow
-
-class iterator(object):
- """
- Iteration construct for event-driven code. Takes three
- arguments:
-
- - Some kind of iterable object
-
- - A callback to call on each item in the iteration
-
- - A callback to call after the iteration terminates.
-
- The item callback receives two arguments: the callable iterator
- object and the current value of the iteration. It should call the
- iterator (or arrange for the iterator to be called) when it is time
- to continue to the next item in the iteration.
-
- The termination callback receives no arguments.
-
- Special case for memory constrained cases: if keyword argument
- pop_list is True, iterable must be a list, which is modified in
- place, popping items off of it until it's empty.
- """
-
- def __init__(self, iterable, item_callback, done_callback, unwind_stack = True, pop_list = False):
- assert not pop_list or isinstance(iterable, list), "iterable must be a list when using pop_list"
- self.item_callback = item_callback
- self.done_callback = done_callback if done_callback is not None else lambda: None
- self.caller_file, self.caller_line, self.caller_function = traceback.extract_stack(limit = 2)[0][0:3]
- self.unwind_stack = unwind_stack
- self.pop_list = pop_list
- try:
- if self.pop_list:
- self.iterator = iterable
- else:
- self.iterator = iter(iterable)
- except (ExitNow, SystemExit):
- raise
- except Exception:
- logger.debug("Problem constructing iterator for %s", repr(iterable))
- raise
- self.doit()
-
- def __repr__(self):
- return rpki.log.log_repr(self,
- "created at %s:%s" % (self.caller_file,
- self.caller_line),
- self.caller_function)
-
- def __call__(self):
- if self.unwind_stack:
- event_defer(self.doit)
- else:
- self.doit()
-
- def doit(self):
- """
- Implement the iterator protocol: attempt to call the item handler
- with the next iteration value, call the termination handler if the
- iterator signaled StopIteration.
- """
-
- try:
- if self.pop_list:
- val = self.iterator.pop(0)
- else:
- val = self.iterator.next()
- except (IndexError, StopIteration):
- self.done_callback()
- else:
- self.item_callback(self, val)
-
-## @var timer_queue
-# Timer queue.
-
-timer_queue = []
-
-class timer(object):
- """
- Timer construct for event-driven code.
- """
-
- ## @var gc_debug
- # Verbose chatter about timers states and garbage collection.
- gc_debug = False
-
- ## @var run_debug
- # Verbose chatter about timers being run.
- run_debug = False
-
- def __init__(self, handler = None, errback = None):
- self.set_handler(handler)
- self.set_errback(errback)
- self.when = None
- if self.gc_debug:
- self.trace("Creating %r" % self)
-
- def trace(self, msg):
- """
- Debug logging.
- """
- if self.gc_debug:
- bt = traceback.extract_stack(limit = 3)
- logger.debug("%s from %s:%d", msg, bt[0][0], bt[0][1])
-
- def set(self, when):
- """
- Set a timer. Argument can be a datetime, to specify an absolute
- time, or a timedelta, to specify an offset time.
- """
- if self.gc_debug:
- self.trace("Setting %r to %r" % (self, when))
- if isinstance(when, rpki.sundial.timedelta):
- self.when = rpki.sundial.now() + when
- else:
- self.when = when
- assert isinstance(self.when, rpki.sundial.datetime), "%r: Expecting a datetime, got %r" % (self, self.when)
- if self not in timer_queue:
- timer_queue.append(self)
- timer_queue.sort(key = lambda x: x.when)
-
- def __cmp__(self, other):
- return cmp(id(self), id(other))
-
- if gc_debug:
- def __del__(self):
- logger.debug("Deleting %r", self)
-
- def cancel(self):
- """
- Cancel a timer, if it was set.
- """
- if self.gc_debug:
- self.trace("Canceling %r" % self)
- try:
- while True:
- timer_queue.remove(self)
- except ValueError:
- pass
-
- def is_set(self):
- """
- Test whether this timer is currently set.
- """
- return self in timer_queue
-
- def set_handler(self, handler):
- """
- Set timer's expiration handler. This is an alternative to
- subclassing the timer class, and may be easier to use when
- integrating timers into other classes (eg, the handler can be a
- bound method to an object in a class representing a network
- connection).
- """
- self.handler = handler
-
- def set_errback(self, errback):
- """
- Set a timer's errback. Like set_handler(), for errbacks.
- """
- self.errback = errback
-
- @classmethod
- def runq(cls):
- """
- Run the timer queue: for each timer whose call time has passed,
- pull the timer off the queue and call its handler() method.
-
- Comparisions are made against time at which this function was
- called, so that even if new events keep getting scheduled, we'll
- return to the I/O loop reasonably quickly.
- """
- now = rpki.sundial.now()
- while timer_queue and now >= timer_queue[0].when:
- t = timer_queue.pop(0)
- if cls.run_debug:
- logger.debug("Running %r", t)
- try:
- if t.handler is not None:
- t.handler()
- else:
- logger.warning("Timer %r expired with no handler set", t)
- except (ExitNow, SystemExit):
- raise
- except Exception, e:
- if t.errback is not None:
- t.errback(e)
- else:
- logger.exception("Unhandled exception from timer %r", t)
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.when, repr(self.handler))
-
- @classmethod
- def seconds_until_wakeup(cls):
- """
- Calculate delay until next timer expires, or None if no timers are
- set and we should wait indefinitely. Rounds up to avoid spinning
- in select() or poll(). We could calculate fractional seconds in
- the right units instead, but select() and poll() don't even take
- the same units (argh!), and we're not doing anything that
- hair-triggered, so rounding up is simplest.
- """
- if not timer_queue:
- return None
- now = rpki.sundial.now()
- if now >= timer_queue[0].when:
- return 0
- delay = timer_queue[0].when - now
- seconds = delay.convert_to_seconds()
- if delay.microseconds:
- seconds += 1
- return seconds
-
- @classmethod
- def clear(cls):
- """
- Cancel every timer on the queue. We could just throw away the
- queue content, but this way we can notify subclasses that provide
- their own cancel() method.
- """
- while timer_queue:
- timer_queue.pop(0).cancel()
-
-def _raiseExitNow(signum, frame):
- """
- Signal handler for event_loop().
- """
- raise ExitNow
-
-def exit_event_loop():
- """
- Force exit from event_loop().
- """
- raise ExitNow
-
-def event_defer(handler, delay = rpki.sundial.timedelta(seconds = 0)):
- """
- Use a near-term (default: zero interval) timer to schedule an event
- to run after letting the I/O system have a turn.
- """
- timer(handler).set(delay)
-
-## @var debug_event_timing
-# Enable insanely verbose logging of event timing
-
-debug_event_timing = False
-
-def event_loop(catch_signals = (signal.SIGINT, signal.SIGTERM)):
- """
- Replacement for asyncore.loop(), adding timer and signal support.
- """
- old_signal_handlers = {}
- while True:
- save_sigs = len(old_signal_handlers) == 0
- try:
- for sig in catch_signals:
- old = signal.signal(sig, _raiseExitNow)
- if save_sigs:
- old_signal_handlers[sig] = old
- while asyncore.socket_map or timer_queue:
- t = timer.seconds_until_wakeup()
- if debug_event_timing:
- logger.debug("Dismissing to asyncore.poll(), t = %s, q = %r", t, timer_queue)
- asyncore.poll(t, asyncore.socket_map)
- timer.runq()
- if timer.gc_debug:
- gc.collect()
- if gc.garbage:
- for i in gc.garbage:
- logger.debug("GC-cycle %r", i)
- del gc.garbage[:]
- except ExitNow:
- break
- except SystemExit:
- raise
- except ValueError, e:
- if str(e) == "filedescriptor out of range in select()":
- logger.error("Something is badly wrong, select() thinks we gave it a bad file descriptor.")
- logger.error("Content of asyncore.socket_map:")
- for fd in sorted(asyncore.socket_map.iterkeys()):
- logger.error(" fd %s obj %r", fd, asyncore.socket_map[fd])
- logger.error("Not safe to continue due to risk of spin loop on select(). Exiting.")
- sys.exit(1)
- logger.exception("event_loop() exited with exception %r, this is not supposed to happen, restarting")
- except Exception, e:
- logger.exception("event_loop() exited with exception %r, this is not supposed to happen, restarting")
- else:
- break
- finally:
- for sig in old_signal_handlers:
- signal.signal(sig, old_signal_handlers[sig])
-
-class sync_wrapper(object):
- """
- Synchronous wrapper around asynchronous functions. Running in
- asynchronous mode at all times makes sense for event-driven daemons,
- but is kind of tedious for simple scripts, hence this wrapper.
-
- The wrapped function should take at least two arguments: a callback
- function and an errback function. If any arguments are passed to
- the wrapper, they will be passed as additional arguments to the
- wrapped function.
- """
-
- res = None
- err = None
- fin = False
-
- def __init__(self, func, disable_signal_handlers = False):
- self.func = func
- self.disable_signal_handlers = disable_signal_handlers
-
- def cb(self, res = None):
- """
- Wrapped code has requested normal termination. Store result, and
- exit the event loop.
- """
- self.res = res
- self.fin = True
- logger.debug("%r callback with result %r", self, self.res)
- raise ExitNow
-
- def eb(self, err):
- """
- Wrapped code raised an exception. Store exception data, then exit
- the event loop.
- """
- exc_info = sys.exc_info()
- self.err = exc_info if exc_info[1] is err else err
- self.fin = True
- logger.debug("%r errback with exception %r", self, self.err)
- raise ExitNow
-
- def __call__(self, *args, **kwargs):
-
- def thunk():
- try:
- self.func(self.cb, self.eb, *args, **kwargs)
- except ExitNow:
- raise
- except Exception, e:
- self.eb(e)
-
- event_defer(thunk)
- if self.disable_signal_handlers:
- event_loop(catch_signals = ())
- else:
- event_loop()
- if not self.fin:
- logger.warning("%r event_loop terminated without callback or errback", self)
- if self.err is None:
- return self.res
- elif isinstance(self.err, tuple):
- raise self.err[0], self.err[1], self.err[2]
- else:
- raise self.err
-
-class gc_summary(object):
- """
- Periodic summary of GC state, for tracking down memory bloat.
- """
-
- def __init__(self, interval, threshold = 0):
- if isinstance(interval, (int, long)):
- interval = rpki.sundial.timedelta(seconds = interval)
- self.interval = interval
- self.threshold = threshold
- self.timer = timer(handler = self.handler)
- self.timer.set(self.interval)
-
- def handler(self):
- """
- Collect and log GC state for this period, reset timer.
- """
- logger.debug("gc_summary: Running gc.collect()")
- gc.collect()
- logger.debug("gc_summary: Summarizing (threshold %d)", self.threshold)
- total = {}
- tuples = {}
- for g in gc.get_objects():
- k = type(g).__name__
- total[k] = total.get(k, 0) + 1
- if isinstance(g, tuple):
- k = ", ".join(type(x).__name__ for x in g)
- tuples[k] = tuples.get(k, 0) + 1
- logger.debug("gc_summary: Sorting result")
- total = total.items()
- total.sort(reverse = True, key = lambda x: x[1])
- tuples = tuples.items()
- tuples.sort(reverse = True, key = lambda x: x[1])
- logger.debug("gc_summary: Object type counts in descending order")
- for name, count in total:
- if count > self.threshold:
- logger.debug("gc_summary: %8d %s", count, name)
- logger.debug("gc_summary: Tuple content type signature counts in descending order")
- for types, count in tuples:
- if count > self.threshold:
- logger.debug("gc_summary: %8d (%s)", count, types)
- logger.debug("gc_summary: Scheduling next cycle")
- self.timer.set(self.interval)
diff --git a/rpki/cli.py b/rpki/cli.py
index e75b8430..cbd2b1e1 100644
--- a/rpki/cli.py
+++ b/rpki/cli.py
@@ -28,244 +28,265 @@ import argparse
import traceback
try:
- import readline
- have_readline = True
+ import readline
+ have_readline = True
except ImportError:
- have_readline = False
+ have_readline = False
class BadCommandSyntax(Exception):
- "Bad command line syntax."
+ "Bad command line syntax."
class ExitArgparse(Exception):
- "Exit method from ArgumentParser."
+ "Exit method from ArgumentParser."
- def __init__(self, message = None, status = 0):
- super(ExitArgparse, self).__init__()
- self.message = message
- self.status = status
+ def __init__(self, message = None, status = 0):
+ super(ExitArgparse, self).__init__()
+ self.message = message
+ self.status = status
class Cmd(cmd.Cmd):
- """
- Customized subclass of Python cmd module.
- """
+ """
+ Customized subclass of Python cmd module.
+ """
- emptyline_repeats_last_command = False
+ emptyline_repeats_last_command = False
- EOF_exits_command_loop = True
+ EOF_exits_command_loop = True
- identchars = cmd.IDENTCHARS + "/-."
+ identchars = cmd.IDENTCHARS + "/-."
- histfile = None
+ histfile = None
- last_command_failed = False
+ last_command_failed = False
- def onecmd(self, line):
- """
- Wrap error handling around cmd.Cmd.onecmd(). Might want to do
- something kinder than showing a traceback, eventually.
- """
+ def onecmd(self, line):
+ """
+ Wrap error handling around cmd.Cmd.onecmd(). Might want to do
+ something kinder than showing a traceback, eventually.
+ """
- self.last_command_failed = False
- try:
- return cmd.Cmd.onecmd(self, line)
- except SystemExit:
- raise
- except ExitArgparse, e:
- if e.message is not None:
- print e.message
- self.last_command_failed = e.status != 0
- return False
- except BadCommandSyntax, e:
- print e
- except Exception:
- traceback.print_exc()
- self.last_command_failed = True
- return False
-
- def do_EOF(self, arg):
- if self.EOF_exits_command_loop and self.prompt:
- print
- return self.EOF_exits_command_loop
-
- def do_exit(self, arg):
- """
- Exit program.
- """
+ self.last_command_failed = False
+ try:
+ return cmd.Cmd.onecmd(self, line)
+ except SystemExit:
+ raise
+ except ExitArgparse, e:
+ if e.message is not None:
+ print e.message
+ self.last_command_failed = e.status != 0
+ return False
+ except BadCommandSyntax, e:
+ print e
+ except:
+ traceback.print_exc()
+ self.last_command_failed = True
+ return False
+
+ def do_EOF(self, arg):
+ if self.EOF_exits_command_loop and self.prompt:
+ print
+ return self.EOF_exits_command_loop
+
+ def do_exit(self, arg):
+ """
+ Exit program.
+ """
+
+ return True
+
+ do_quit = do_exit
+
+ def emptyline(self):
+ """
+ Handle an empty line. cmd module default is to repeat the last
+ command, which I find to be violation of the principal of least
+ astonishment, so my preference is that an empty line does nothing.
+ """
+
+ if self.emptyline_repeats_last_command:
+ cmd.Cmd.emptyline(self)
+
+ def filename_complete(self, text, line, begidx, endidx):
+ """
+ Filename completion handler, with hack to restore what I consider
+ the normal (bash-like) behavior when one hits the completion key
+ and there's only one match.
+ """
+
+ result = glob.glob(text + "*")
+ if len(result) == 1:
+ path = result.pop()
+ if os.path.isdir(path) or (os.path.islink(path) and os.path.isdir(os.path.join(path, "."))):
+ result.append(path + os.path.sep)
+ else:
+ result.append(path + " ")
+ return result
+
+ def completenames(self, text, *ignored):
+ """
+ Command name completion handler, with hack to restore what I
+ consider the normal (bash-like) behavior when one hits the
+ completion key and there's only one match.
+ """
+
+ result = cmd.Cmd.completenames(self, text, *ignored)
+ if len(result) == 1:
+ result[0] += " "
+ return result
+
+ def help_help(self):
+ """
+ Type "help [topic]" for help on a command,
+ or just "help" for a list of commands.
+ """
+
+ self.stdout.write(self.help_help.__doc__ + "\n")
+
+ def complete_help(self, *args):
+ """
+ Better completion function for help command arguments.
+ """
+
+ text = args[0]
+ names = self.get_names()
+ result = []
+ for prefix in ("do_", "help_"):
+ result.extend(s[len(prefix):] for s in names if s.startswith(prefix + text) and s != "do_EOF")
+ return result
+
+ if have_readline:
+
+ def cmdloop_with_history(self):
+ """
+ Better command loop, with history file and tweaked readline
+ completion delimiters.
+ """
+
+ old_completer_delims = readline.get_completer_delims()
+ if self.histfile is not None:
+ try:
+ self.read_history()
+ except IOError:
+ pass
+ try:
+ readline.set_completer_delims("".join(set(old_completer_delims) - set(self.identchars)))
+ self.cmdloop()
+ finally:
+ if self.histfile is not None and readline.get_current_history_length():
+ try:
+ self.save_history()
+ except IOError:
+ pass
+ readline.set_completer_delims(old_completer_delims)
+
+ def read_history(self):
+ """
+ Read readline history from file.
+
+ This is a separate method so that subclasses can wrap it when necessary.
+ """
+
+ readline.read_history_file(self.histfile)
+
+ def save_history(self):
+ """
+ Save readline history to file.
+
+ This is a separate method so that subclasses can wrap it when necessary.
+ """
+
+ readline.write_history_file(self.histfile)
+
+ else:
+
+ cmdloop_with_history = cmd.Cmd.cmdloop
- return True
- do_quit = do_exit
- def emptyline(self):
+def yes_or_no(prompt, default = None, require_full_word = False):
"""
- Handle an empty line. cmd module default is to repeat the last
- command, which I find to be violation of the principal of least
- astonishment, so my preference is that an empty line does nothing.
+ Ask a yes-or-no question.
"""
- if self.emptyline_repeats_last_command:
- cmd.Cmd.emptyline(self)
+ prompt = prompt.rstrip() + _yes_or_no_prompts[default]
+ while True:
+ answer = raw_input(prompt).strip().lower()
+ if not answer and default is not None:
+ return default
+ if answer == "yes" or (not require_full_word and answer.startswith("y")):
+ return True
+ if answer == "no" or (not require_full_word and answer.startswith("n")):
+ return False
+ print 'Please answer "yes" or "no"'
- def filename_complete(self, text, line, begidx, endidx):
- """
- Filename completion handler, with hack to restore what I consider
- the normal (bash-like) behavior when one hits the completion key
- and there's only one match.
- """
+_yes_or_no_prompts = {
+ True : ' ("yes" or "no" ["yes"]) ',
+ False : ' ("yes" or "no" ["no"]) ',
+ None : ' ("yes" or "no") ' }
- result = glob.glob(text + "*")
- if len(result) == 1:
- path = result.pop()
- if os.path.isdir(path) or (os.path.islink(path) and os.path.isdir(os.path.join(path, "."))):
- result.append(path + os.path.sep)
- else:
- result.append(path + " ")
- return result
- def completenames(self, text, *ignored):
+class NonExitingArgumentParser(argparse.ArgumentParser):
"""
- Command name completion handler, with hack to restore what I
- consider the normal (bash-like) behavior when one hits the
- completion key and there's only one match.
+ ArgumentParser tweaked to throw ExitArgparse exception
+ rather than using sys.exit(), for use with command loop.
"""
- result = cmd.Cmd.completenames(self, text, *ignored)
- if len(result) == 1:
- result[0] += " "
- return result
-
- def help_help(self):
- """
- Type "help [topic]" for help on a command,
- or just "help" for a list of commands.
- """
+ def exit(self, status = 0, message = None):
+ raise ExitArgparse(status = status, message = message)
- self.stdout.write(self.help_help.__doc__ + "\n")
- def complete_help(self, *args):
- """
- Better completion function for help command arguments.
+def parsecmd(subparsers, *arg_clauses):
"""
+ Decorator to combine the argparse and cmd modules.
- text = args[0]
- names = self.get_names()
- result = []
- for prefix in ("do_", "help_"):
- result.extend(s[len(prefix):] for s in names if s.startswith(prefix + text) and s != "do_EOF")
- return result
+ subparsers is an instance of argparse.ArgumentParser (or subclass) which was
+ returned by calling the .add_subparsers() method on an ArgumentParser instance
+ intended to handle parsing for the entire program on the command line.
- if have_readline:
+ arg_clauses is a series of defarg() invocations defining arguments to be parsed
+ by the argparse code.
- def cmdloop_with_history(self):
- """
- Better command loop, with history file and tweaked readline
- completion delimiters.
- """
+ The decorator will use arg_clauses to construct two separate argparse parser
+ instances: one will be attached to the global parser as a subparser, the
+ other will be used to parse arguments for this command when invoked by cmd.
- old_completer_delims = readline.get_completer_delims()
- if self.histfile is not None:
- try:
- readline.read_history_file(self.histfile)
- except IOError:
- pass
- try:
- readline.set_completer_delims("".join(set(old_completer_delims) - set(self.identchars)))
- self.cmdloop()
- finally:
- if self.histfile is not None and readline.get_current_history_length():
- readline.write_history_file(self.histfile)
- readline.set_completer_delims(old_completer_delims)
-
- else:
-
- cmdloop_with_history = cmd.Cmd.cmdloop
-
-
-
-def yes_or_no(prompt, default = None, require_full_word = False):
- """
- Ask a yes-or-no question.
- """
-
- prompt = prompt.rstrip() + _yes_or_no_prompts[default]
- while True:
- answer = raw_input(prompt).strip().lower()
- if not answer and default is not None:
- return default
- if answer == "yes" or (not require_full_word and answer.startswith("y")):
- return True
- if answer == "no" or (not require_full_word and answer.startswith("n")):
- return False
- print 'Please answer "yes" or "no"'
-
-_yes_or_no_prompts = {
- True : ' ("yes" or "no" ["yes"]) ',
- False : ' ("yes" or "no" ["no"]) ',
- None : ' ("yes" or "no") ' }
-
-
-class NonExitingArgumentParser(argparse.ArgumentParser):
- """
- ArgumentParser tweaked to throw ExitArgparse exception
- rather than using sys.exit(), for use with command loop.
- """
+ The decorator will replace the original do_whatever method with a wrapped version
+ which uses the local argparse instance to parse the single string supplied by
+ the cmd module.
- def exit(self, status = 0, message = None):
- raise ExitArgparse(status = status, message = message)
+ The intent is that, from the command's point of view, all of this should work
+ pretty much the same way regardless of whether the command was invoked from
+ the global command line or from within the cmd command loop. Either way,
+ the command method should get an argparse.Namespace object.
+ In theory, we could generate a completion handler from the argparse definitions,
+ much as the separate argcomplete package does. In practice this is a lot of
+ work and I'm not ready to get into that just yet.
+ """
-def parsecmd(subparsers, *arg_clauses):
- """
- Decorator to combine the argparse and cmd modules.
-
- subparsers is an instance of argparse.ArgumentParser (or subclass) which was
- returned by calling the .add_subparsers() method on an ArgumentParser instance
- intended to handle parsing for the entire program on the command line.
-
- arg_clauses is a series of defarg() invocations defining arguments to be parsed
- by the argparse code.
-
- The decorator will use arg_clauses to construct two separate argparse parser
- instances: one will be attached to the global parser as a subparser, the
- other will be used to parse arguments for this command when invoked by cmd.
-
- The decorator will replace the original do_whatever method with a wrapped version
- which uses the local argparse instance to parse the single string supplied by
- the cmd module.
-
- The intent is that, from the command's point of view, all of this should work
- pretty much the same way regardless of whether the command was invoked from
- the global command line or from within the cmd command loop. Either way,
- the command method should get an argparse.Namespace object.
-
- In theory, we could generate a completion handler from the argparse definitions,
- much as the separate argcomplete package does. In practice this is a lot of
- work and I'm not ready to get into that just yet.
- """
-
- def decorate(func):
- assert func.__name__.startswith("do_")
- parser = NonExitingArgumentParser(description = func.__doc__,
- prog = func.__name__[3:],
- add_help = False)
- subparser = subparsers.add_parser(func.__name__[3:],
- description = func.__doc__,
- help = func.__doc__.lstrip().partition("\n")[0])
- for positional, keywords in arg_clauses:
- parser.add_argument(*positional, **keywords)
- subparser.add_argument(*positional, **keywords)
- subparser.set_defaults(func = func)
- def wrapped(self, arg):
- return func(self, parser.parse_args(shlex.split(arg)))
- wrapped.argparser = parser
- wrapped.__doc__ = func.__doc__
- return wrapped
- return decorate
+ def decorate(func):
+ assert func.__name__.startswith("do_")
+ parser = NonExitingArgumentParser(description = func.__doc__,
+ prog = func.__name__[3:],
+ add_help = False)
+ subparser = subparsers.add_parser(func.__name__[3:],
+ description = func.__doc__,
+ help = func.__doc__.lstrip().partition("\n")[0])
+ for positional, keywords in arg_clauses:
+ parser.add_argument(*positional, **keywords)
+ subparser.add_argument(*positional, **keywords)
+ subparser.set_defaults(func = func)
+ def wrapped(self, arg):
+ return func(self, parser.parse_args(shlex.split(arg)))
+ wrapped.argparser = parser
+ wrapped.__doc__ = func.__doc__
+ return wrapped
+ return decorate
def cmdarg(*positional, **keywords):
- """
- Syntactic sugar to let us use keyword arguments normally when constructing
- arguments for deferred calls to argparse.ArgumentParser.add_argument().
- """
+ """
+ Syntactic sugar to let us use keyword arguments normally when constructing
+ arguments for deferred calls to argparse.ArgumentParser.add_argument().
+ """
- return positional, keywords
+ return positional, keywords
diff --git a/rpki/config.py b/rpki/config.py
index 253e56cf..2f507f90 100644
--- a/rpki/config.py
+++ b/rpki/config.py
@@ -23,7 +23,12 @@ ConfigParser module.
"""
import ConfigParser
+import argparse
import logging
+import logging.handlers
+import traceback
+import time
+import sys
import os
import re
@@ -32,280 +37,604 @@ logger = logging.getLogger(__name__)
## @var default_filename
# Default name of config file if caller doesn't specify one explictly.
-default_filename = "rpki.conf"
-
-## @var default_dirname
-# Default name of directory to check for global config file, or None
-# if no global config file. Autoconf-generated code may set this to a
-# non-None value during script startup.
-
try:
- import rpki.autoconf
- default_dirname = rpki.autoconf.sysconfdir
+ import rpki.autoconf
+ default_filename = os.path.join(rpki.autoconf.sysconfdir, "rpki.conf")
except ImportError:
- default_dirname = None
+ default_filename = None
-## @var default_envname
+## @var rpki_conf_envname
# Name of environment variable containing config file name.
-default_envname = "RPKI_CONF"
+rpki_conf_envname = "RPKI_CONF"
+
class parser(object):
- """
- Extensions to stock Python ConfigParser:
-
- Read config file and set default section while initializing parser object.
-
- Support for OpenSSL-style subscripted options and a limited form of
- OpenSSL-style indirect variable references (${section::option}).
-
- get-methods with default values and default section name.
-
- If no filename is given to the constructor (filename = None), we
- check for an environment variable naming the config file, then we
- check for a default filename in the current directory, then finally
- we check for a global config file if autoconf provided a directory
- name to check.
- """
-
- def __init__(self, filename = None, section = None, allow_missing = False):
-
- self.cfg = ConfigParser.RawConfigParser()
- self.default_section = section
-
- filenames = []
- if filename is not None:
- filenames.append(filename)
- else:
- if default_envname in os.environ:
- filenames.append(os.environ[default_envname])
- filenames.append(default_filename)
- if default_dirname is not None:
- filenames.append("%s/%s" % (default_dirname, default_filename))
-
- f = fn = None
-
- for fn in filenames:
- try:
- f = open(fn)
- break
- except IOError:
- f = None
-
- if f is not None:
- self.filename = fn
- self.cfg.readfp(f, fn)
- elif allow_missing:
- self.filename = None
- else:
- raise
-
- def has_section(self, section):
- """
- Test whether a section exists.
"""
+ Extensions to stock Python ConfigParser:
- return self.cfg.has_section(section)
+ Read config file and set default section while initializing parser object.
- def has_option(self, option, section = None):
- """
- Test whether an option exists.
- """
+ Support for OpenSSL-style subscripted options and a limited form of
+ OpenSSL-style indirect variable references (${section::option}).
- if section is None:
- section = self.default_section
- return self.cfg.has_option(section, option)
+ get-methods with default values and default section name.
- def multiget(self, option, section = None):
- """
- Parse OpenSSL-style foo.0, foo.1, ... subscripted options.
+ If no filename is given to the constructor (filename and
+ set_filename both None), we check for an environment variable naming
+ the config file, then finally we check for a global config file if
+ autoconf provided a directory name to check.
- Returns iteration of values matching the specified option name.
+ NB: Programs which accept a configuration filename on the command
+ lines should pass that filename using set_filename so that we can
+ set the magic environment variable. Constraints from some external
+ libraries (principally Django) sometimes require library code to
+ look things up in the configuration file without the knowledge of
+ the controlling program, but setting the environment variable
+ insures that everybody's reading from the same script, as it were.
"""
- matches = []
- if section is None:
- section = self.default_section
- if self.cfg.has_option(section, option):
- yield self.cfg.get(section, option)
- option += "."
- matches = [o for o in self.cfg.options(section) if o.startswith(option) and o[len(option):].isdigit()]
- matches.sort()
- for option in matches:
- yield self.cfg.get(section, option)
+ # Odd keyword-only calling sequence is a defense against old code
+ # that thinks it knows how __init__() handles positional arguments.
- _regexp = re.compile("\\${(.*?)::(.*?)}")
+ def __init__(self, **kwargs):
+ section = kwargs.pop("section", None)
+ allow_missing = kwargs.pop("allow_missing", False)
+ set_filename = kwargs.pop("set_filename", None)
+ filename = kwargs.pop("filename", set_filename)
+ argparser = kwargs.pop("argparser", None)
- def _repl(self, m):
- """
- Replacement function for indirect variable substitution.
- This is intended for use with re.subn().
- """
- section, option = m.group(1, 2)
- if section == "ENV":
- return os.getenv(option, "")
- else:
- return self.cfg.get(section, option)
+ assert not kwargs, "Unexpected keyword arguments: {}".format(
+ ", ".join("{} = {!r}".format(k, v) for k, v in kwargs.iteritems()))
+
+ if set_filename is not None:
+ os.environ[rpki_conf_envname] = set_filename
+
+ self.cfg = ConfigParser.RawConfigParser()
+ self.default_section = section
+
+ self.filename = filename or os.getenv(rpki_conf_envname) or default_filename
+ self.argparser = argparser
+ self.logging_defaults = None
+
+ try:
+ with open(self.filename, "r") as f:
+ self.cfg.readfp(f)
+ except IOError:
+ if allow_missing:
+ self.filename = None
+ else:
+ raise
+
+
+ def has_section(self, section):
+ """
+ Test whether a section exists.
+ """
+
+ return self.cfg.has_section(section)
+
+
+ def has_option(self, option, section = None):
+ """
+ Test whether an option exists.
+ """
+
+ if section is None:
+ section = self.default_section
+ return self.cfg.has_option(section, option)
+
+
+ def multiget(self, option, section = None):
+ """
+ Parse OpenSSL-style foo.0, foo.1, ... subscripted options.
+
+ Returns iteration of values matching the specified option name.
+ """
+
+ matches = []
+ if section is None:
+ section = self.default_section
+ if self.cfg.has_option(section, option):
+ yield self.cfg.get(section, option)
+ option += "."
+ matches = [o for o in self.cfg.options(section)
+ if o.startswith(option) and o[len(option):].isdigit()]
+ matches.sort()
+ for option in matches:
+ yield self.cfg.get(section, option)
+
+
+ _regexp = re.compile("\\${(.*?)::(.*?)}")
+
+ def _repl(self, m):
+ """
+ Replacement function for indirect variable substitution.
+ This is intended for use with re.subn().
+ """
+
+ section, option = m.group(1, 2)
+ if section == "ENV":
+ return os.getenv(option, "")
+ else:
+ return self.cfg.get(section, option)
+
+
+ def get(self, option, default = None, section = None):
+ """
+ Get an option, perhaps with a default value.
+ """
+
+ if section is None:
+ section = self.default_section
+ if default is not None and not self.cfg.has_option(section, option):
+ return default
+ val = self.cfg.get(section, option)
+ while True:
+ val, modified = self._regexp.subn(self._repl, val, 1)
+ if not modified:
+ return val
+
+
+ def getboolean(self, option, default = None, section = None):
+ """
+ Get a boolean option, perhaps with a default value.
+ """
+
+ # pylint: disable=W0212
+ v = self.get(option, default, section)
+ if isinstance(v, str):
+ v = v.lower()
+ if v not in self.cfg._boolean_states:
+ raise ValueError("Not boolean: {}".format(v))
+ v = self.cfg._boolean_states[v]
+ return v
- def get(self, option, default = None, section = None):
- """
- Get an option, perhaps with a default value.
- """
- if section is None:
- section = self.default_section
- if default is not None and not self.cfg.has_option(section, option):
- return default
- val = self.cfg.get(section, option)
- while True:
- val, modified = self._regexp.subn(self._repl, val, 1)
- if not modified:
- return val
-
- def getboolean(self, option, default = None, section = None):
- """
- Get a boolean option, perhaps with a default value.
- """
- v = self.get(option, default, section)
- if isinstance(v, str):
- v = v.lower()
- if v not in self.cfg._boolean_states:
- raise ValueError("Not a boolean: %s" % v)
- v = self.cfg._boolean_states[v]
- return v
-
- def getint(self, option, default = None, section = None):
- """
- Get an integer option, perhaps with a default value.
- """
- return int(self.get(option, default, section))
- def getlong(self, option, default = None, section = None):
+ def getint(self, option, default = None, section = None):
+ """
+ Get an integer option, perhaps with a default value.
+ """
+
+ return int(self.get(option, default, section))
+
+
+ def getlong(self, option, default = None, section = None):
+ """
+ Get a long integer option, perhaps with a default value.
+ """
+
+ return long(self.get(option, default, section))
+
+
+ def _get_argument_default(self, names, kwargs):
+ section = kwargs.pop("section", None)
+ default = kwargs.pop("default", None)
+
+ for name in names:
+ if name.startswith("--"):
+ name = name[2:]
+ break
+ else:
+ raise ValueError
+
+ if self.has_option(option = name, section = section):
+ default = self.get(option = name, section = section, default = default)
+
+ if "type" in kwargs:
+ default = kwargs["type"](default)
+
+ if "choices" in kwargs and default not in kwargs["choices"]:
+ raise ValueError
+
+ kwargs["default"] = default
+
+ return name, default, kwargs
+
+
+ def add_argument(self, *names, **kwargs):
+ """
+ Combined command line and config file argument. Takes
+ arguments mostly like ArgumentParser.add_argument(), but also
+ looks in config file for option of the same name.
+
+ The "section" and "default" arguments are used for the config file
+ lookup; the resulting value is used as the "default" parameter for
+ the argument parser.
+
+ If a "type" argument is specified, it applies to both the value
+ parsed from the config file and the argument parser.
+ """
+
+ name, default, kwargs = self._get_argument_default(names, kwargs)
+ return self.argparser.add_argument(*names, **kwargs)
+
+
+ def add_boolean_argument(self, name, **kwargs):
+ """
+ Combined command line and config file boolean argument. Takes
+ arguments mostly like ArgumentParser.add_argument(), but also
+ looks in config file for option of the same name.
+
+ The "section" and "default" arguments are used for the config file
+ lookup; the resulting value is used as the default value for
+ the argument parser.
+
+ Usage is a bit different from the normal ArgumentParser boolean
+ handling: because the command line default is controlled by the
+ config file, the "store_true" / "store_false" semantics don't
+ really work for us. So, instead, we use the --foo / --no-foo
+ convention, and generate a pair of command line arguments with
+ those names controlling a single "foo" value in the result.
+ """
+
+ section = kwargs.pop("section", None)
+ default = kwargs.pop("default", None)
+ help = kwargs.pop("help", None)
+
+ if not name.startswith("--"):
+ raise ValueError
+ name = name[2:]
+
+ default = self.getboolean(name, default = default, section = section)
+
+ kwargs["action"] = "store_const"
+ kwargs["dest"] = name.replace("-", "_")
+
+ group = self.argparser.add_mutually_exclusive_group()
+
+ kwargs["const"] = True
+ group.add_argument("--" + name, **kwargs)
+
+ kwargs["const"] = False
+ kwargs["help"] = help
+ group.add_argument("--no-" + name, **kwargs)
+
+ self.argparser.set_defaults(**{ kwargs["dest"] : default })
+
+
+ def _add_logging_argument(self, *names, **kwargs):
+ group = kwargs.pop("group", self.argparser)
+ name, default, kwargs = self._get_argument_default(names, kwargs)
+ setattr(self.logging_defaults, name.replace("-", "_"), default)
+ if group is not None:
+ group.add_argument(*names, **kwargs)
+
+
+ def add_logging_arguments(self, section = None):
+ """
+ Set up standard logging-related arguments. This can be called
+ even when we're not going to parse the command line (eg,
+ because we're a WSGI app and therefore don't have a command
+ line), to handle whacking arguments from the config file into
+ the format that the logging setup code expects to see.
+ """
+
+ self.logging_defaults = argparse.Namespace(
+ default_log_destination = None)
+
+ if self.argparser is not None:
+ self.argparser.set_defaults(
+ default_log_destination = None)
+
+ class non_negative_integer(int):
+ def __init__(self, value):
+ if self < 0:
+ raise ValueError
+
+ class positive_integer(int):
+ def __init__(self, value):
+ if self <= 0:
+ raise ValueError
+
+ if self.argparser is None:
+ limit_group = None
+ else:
+ limit_group = self.argparser.add_mutually_exclusive_group()
+
+ self._add_logging_argument(
+ "--log-level",
+ default = "warning",
+ choices = ("debug", "info", "warning", "error", "critical"),
+ help = "how verbosely to log")
+
+ self._add_logging_argument(
+ "--log-destination",
+ choices = ("syslog", "stdout", "stderr", "file"),
+ help = "logging mechanism to use")
+
+ self._add_logging_argument(
+ "--log-filename",
+ help = "where to log when log destination is \"file\"")
+
+ self._add_logging_argument(
+ "--log-facility",
+ default = "daemon",
+ choices = sorted(logging.handlers.SysLogHandler.facility_names.keys()),
+ help = "syslog facility to use when log destination is \"syslog\"")
+
+ self._add_logging_argument(
+ "--log-count",
+ default = "7",
+ type = positive_integer,
+ help = "how many logs to keep when rotating for log destination \"file\""),
+
+ self._add_logging_argument(
+ "--log-size-limit",
+ group = limit_group,
+ default = 0,
+ type = non_negative_integer,
+ help = "size in kbytes after which to rotate log for destination \"file\"")
+
+ self._add_logging_argument(
+ "--log-time-limit",
+ group = limit_group,
+ default = 0,
+ type = non_negative_integer,
+ help = "hours after which to rotate log for destination \"file\"")
+
+
+ def configure_logging(self, args = None, ident = None):
+ """
+ Configure the logging system, using information from both the
+ config file and the command line; if this particular program
+ doesn't use the command line (eg, a WSGI app), we just use the
+ config file.
+ """
+
+ if self.logging_defaults is None:
+ self.add_logging_arguments()
+
+ if args is None:
+ args = self.logging_defaults
+
+ log_level = getattr(logging, args.log_level.upper())
+
+ log_destination = args.log_destination or args.default_log_destination or "stderr"
+
+ if log_destination == "stderr":
+ log_handler = logging.StreamHandler(
+ stream = sys.stderr)
+
+ elif log_destination == "stdout":
+ log_handler = logging.StreamHandler(
+ stream = sys.stdout)
+
+ elif log_destination == "syslog":
+ log_handler = logging.handlers.SysLogHandler(
+ address = ("/dev/log" if os.path.exists("/dev/log")
+ else ("localhost", logging.handlers.SYSLOG_UDP_PORT)),
+ facility = logging.handlers.SysLogHandler.facility_names[args.log_facility])
+
+ elif log_destination == "file" and (args.log_size_limit == 0 and
+ args.log_time_limit == 0):
+ log_handler = logging.handlers.WatchedFileHandler(
+ filename = args.log_filename)
+
+ elif log_destination == "file" and args.log_time_limit == 0:
+ log_handler = logging.handlers.RotatingFileHandler(
+ filename = args.log_filename,
+ maxBytes = args.log_size_limit * 1024,
+ backupCount = args.log_count)
+
+ elif log_destination == "file" and args.log_size_limit == 0:
+ log_handler = logging.handlers.TimedRotatingFileHandler(
+ filename = args.log_filename,
+ interval = args.log_time_limit,
+ backupCount = args.log_count,
+ when = "H",
+ utc = True)
+
+ else:
+ raise ValueError
+
+ if ident is None:
+ ident = os.path.basename(sys.argv[0])
+
+ log_handler.setFormatter(Formatter(ident, log_handler, log_level))
+
+ root_logger = logging.getLogger()
+ root_logger.addHandler(log_handler)
+ root_logger.setLevel(log_level)
+
+
+ def set_global_flags(self):
+ """
+ Consolidated control for all the little global control flags
+ scattered through the libraries. This isn't a particularly good
+ place for this function to live, but it has to live somewhere and
+ making it a method of the config parser from which it gets all of
+ its data is less silly than the available alternatives.
+ """
+
+ # pylint: disable=W0621
+ import rpki.x509
+ import rpki.daemonize
+
+ for line in self.multiget("configure_logger"):
+ try:
+ name, level = line.split()
+ logging.getLogger(name).setLevel(getattr(logging, level.upper()))
+ except Exception, e:
+ logger.warning("Could not process configure_logger line %r: %s", line, e)
+
+ try:
+ rpki.x509.CMS_object.debug_cms_certs = self.getboolean("debug_cms_certs")
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.x509.XML_CMS_object.dump_outbound_cms = rpki.x509.DeadDrop(
+ self.get("dump_outbound_cms"))
+ except OSError, e:
+ logger.warning("Couldn't initialize mailbox %s: %s", self.get("dump_outbound_cms"), e)
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.x509.XML_CMS_object.dump_inbound_cms = rpki.x509.DeadDrop(
+ self.get("dump_inbound_cms"))
+ except OSError, e:
+ logger.warning("Couldn't initialize mailbox %s: %s", self.get("dump_inbound_cms"), e)
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.x509.XML_CMS_object.check_inbound_schema = self.getboolean("check_inbound_schema")
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.x509.XML_CMS_object.check_outbound_schema = self.getboolean("check_outbound_schema")
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.daemonize.default_pid_directory = self.get("pid_directory")
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.daemonize.pid_filename = self.get("pid_filename")
+ except ConfigParser.NoOptionError:
+ pass
+
+ try:
+ rpki.x509.generate_insecure_debug_only_rsa_key = rpki.x509.insecure_debug_only_rsa_key_generator(*self.get("insecure-debug-only-rsa-key-db").split())
+ except ConfigParser.NoOptionError:
+ pass
+ except:
+ logger.warning("insecure-debug-only-rsa-key-db configured but initialization failed, check for corrupted database file")
+
+ try:
+ rpki.up_down.content_type = self.get("up_down_content_type")
+ except ConfigParser.NoOptionError:
+ pass
+
+
+def argparser(section = None, doc = None, cfg_optional = False):
"""
- Get a long integer option, perhaps with a default value.
+ First cut at a combined configuration mechanism based on ConfigParser and argparse.
+
+ General idea here is to do an initial pass on the arguments to handle the config file,
+ then return the config file and a parser to use for the rest of the arguments.
"""
- return long(self.get(option, default, section))
- def set_global_flags(self):
+ # Basic approach here is a variation on:
+ # http://blog.vwelch.com/2011/04/combining-configparser-and-argparse.html
+
+ # For most of our uses of argparse, this should be a trivial
+ # drop-in, and should reduce the amount of repetitive code. There
+ # are a couple of special cases which will require attention:
+ #
+ # - rpki.rtr: The rpki-rtr modules have their own handling of all
+ # the logging setup, and use an argparse subparser. I -think-
+ # that the way they're already handling the logging setup should
+ # work fine, but there may be a few tricky bits reconciling the
+ # rpki-rtr logging setup with the generalized version in rpki.log.
+ #
+ # - rpki.rpkic: Use of argparse in rpkic is very complicated due to
+ # support for both the external command line and the internal
+ # command loop. Overall it works quite well, but the setup is
+ # tricky. rpki.rpkic.main.top_argparse may need to move outside
+ # the main class, but that may raise its own issues. Maybe we
+ # can get away with just replacing the current setup of
+ # top_argparser with a call to this function and otherwise
+ # leaving the whole structure alone? Try and see, I guess.
+
+ # Setting cfg_optional here doesn't really work, because the cfg
+ # object returned here is separate from the one that the Django
+ # ORM gets when it tries to look for databases. Given that just
+ # about everything which uses this module also uses Django,
+ # perhaps we should just resign ourselves to the config being a
+ # global thing we read exactly once, so we can stop playing this
+ # game.
+
+ topparser = argparse.ArgumentParser(add_help = False)
+ topparser.add_argument("-c", "--config",
+ default = os.getenv(rpki_conf_envname, default_filename),
+ help = "override default location of configuration file")
+
+ cfgparser = argparse.ArgumentParser(parents = [topparser], add_help = False)
+ cfgparser.add_argument("-h", "--help", action = "store_true")
+
+ args, remaining_argv = cfgparser.parse_known_args()
+
+ argparser = argparse.ArgumentParser(parents = [topparser], description = doc)
+
+ cfg = parser(section = section,
+ set_filename = args.config,
+ argparser = argparser,
+ allow_missing = cfg_optional or args.help)
+
+ return cfg
+
+
+class Formatter(object):
"""
- Consolidated control for all the little global control flags
- scattered through the libraries. This isn't a particularly good
- place for this function to live, but it has to live somewhere and
- making it a method of the config parser from which it gets all of
- its data is less silly than the available alternatives.
+ Reimplementation (easier than subclassing in this case) of
+ logging.Formatter.
+
+ It turns out that the logging code only cares about this class's
+ .format(record) method, everything else is internal; so long as
+ .format() converts a record into a properly formatted string, the
+ logging code is happy.
+
+ So, rather than mess around with dynamically constructing and
+ deconstructing and tweaking format strings and ten zillion options
+ we don't use, we just provide our own implementation that supports
+ what we do need.
"""
- # pylint: disable=W0621
- import rpki.http
- import rpki.x509
- import rpki.sql
- import rpki.async
- import rpki.log
- import rpki.daemonize
-
- for line in self.multiget("configure_logger"):
- try:
- name, level = line.split()
- logging.getLogger(name).setLevel(getattr(logging, level.upper()))
- except Exception, e:
- logger.warning("Could not process configure_logger line %r: %s", line, e)
-
- try:
- rpki.http.want_persistent_client = self.getboolean("want_persistent_client")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.http.want_persistent_server = self.getboolean("want_persistent_server")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.http.use_adns = self.getboolean("use_adns")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.http.enable_ipv6_clients = self.getboolean("enable_ipv6_clients")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.http.enable_ipv6_servers = self.getboolean("enable_ipv6_servers")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.x509.CMS_object.debug_cms_certs = self.getboolean("debug_cms_certs")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.sql.sql_persistent.sql_debug = self.getboolean("sql_debug")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.async.timer.gc_debug = self.getboolean("gc_debug")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.async.timer.run_debug = self.getboolean("timer_debug")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.x509.XML_CMS_object.dump_outbound_cms = rpki.x509.DeadDrop(self.get("dump_outbound_cms"))
- except OSError, e:
- logger.warning("Couldn't initialize mailbox %s: %s", self.get("dump_outbound_cms"), e)
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.x509.XML_CMS_object.dump_inbound_cms = rpki.x509.DeadDrop(self.get("dump_inbound_cms"))
- except OSError, e:
- logger.warning("Couldn't initialize mailbox %s: %s", self.get("dump_inbound_cms"), e)
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.x509.XML_CMS_object.check_inbound_schema = self.getboolean("check_inbound_schema")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.x509.XML_CMS_object.check_outbound_schema = self.getboolean("check_outbound_schema")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.async.gc_summary(self.getint("gc_summary"), self.getint("gc_summary_threshold", 0))
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.log.enable_tracebacks = self.getboolean("enable_tracebacks")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.daemonize.default_pid_directory = self.get("pid_directory")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.daemonize.pid_filename = self.get("pid_filename")
- except ConfigParser.NoOptionError:
- pass
-
- try:
- rpki.x509.generate_insecure_debug_only_rsa_key = rpki.x509.insecure_debug_only_rsa_key_generator(*self.get("insecure-debug-only-rsa-key-db").split())
- except ConfigParser.NoOptionError:
- pass
- except:
- logger.warning("insecure-debug-only-rsa-key-db configured but initialization failed, check for corrupted database file")
-
- try:
- rpki.up_down.content_type = self.get("up_down_content_type")
- except ConfigParser.NoOptionError:
- pass
+ converter = time.gmtime
+
+ def __init__(self, ident, handler, level):
+ self.ident = ident
+ self.is_syslog = isinstance(handler, logging.handlers.SysLogHandler)
+ self.debugging = level == logging.DEBUG
+
+ def format(self, record):
+ return "".join(self.coformat(record)).rstrip("\n")
+
+ def coformat(self, record):
+
+ try:
+ if not self.is_syslog:
+ yield time.strftime("%Y-%m-%d %H:%M:%S ", time.gmtime(record.created))
+ except:
+ yield "[$!$Time format failed]"
+
+ try:
+ yield "{}[{:d}]: ".format(self.ident, record.process)
+ except:
+ yield "[$!$ident format failed]"
+
+ try:
+ if isinstance(record.context, (str, unicode)):
+ yield record.context + " "
+ else:
+ yield repr(record.context) + " "
+ except AttributeError:
+ pass
+ except:
+ yield "[$!$context format failed]"
+
+ try:
+ yield record.getMessage()
+ except:
+ yield "[$!$record.getMessage() failed]"
+
+ try:
+ if record.exc_info:
+ if self.is_syslog or not self.debugging:
+ lines = traceback.format_exception_only(
+ record.exc_info[0], record.exc_info[1])
+ lines.insert(0, ": ")
+ else:
+ lines = traceback.format_exception(
+ record.exc_info[0], record.exc_info[1], record.exc_info[2])
+ lines.insert(0, "\n")
+ for line in lines:
+ yield line
+ except:
+ yield "[$!$exception formatting failed]"
diff --git a/rpki/csv_utils.py b/rpki/csv_utils.py
index 9ba04a02..5fa498a1 100644
--- a/rpki/csv_utils.py
+++ b/rpki/csv_utils.py
@@ -22,91 +22,99 @@ import csv
import os
class BadCSVSyntax(Exception):
- """
- Bad CSV syntax.
- """
+ """
+ Bad CSV syntax.
+ """
class csv_reader(object):
- """
- Reader for tab-delimited text that's (slightly) friendlier than the
- stock Python csv module (which isn't intended for direct use by
- humans anyway, and neither was this package originally, but that
- seems to be the way that it has evolved...).
-
- Columns parameter specifies how many columns users of the reader
- expect to see; lines with fewer columns will be padded with None
- values.
-
- Original API design for this class courtesy of Warren Kumari, but
- don't blame him if you don't like what I did with his ideas.
- """
-
- def __init__(self, filename, columns = None, min_columns = None, comment_characters = "#;"):
- assert columns is None or isinstance(columns, int)
- assert min_columns is None or isinstance(min_columns, int)
- if columns is not None and min_columns is None:
- min_columns = columns
- self.filename = filename
- self.columns = columns
- self.min_columns = min_columns
- self.comment_characters = comment_characters
- self.file = open(filename, "r")
-
- def __iter__(self):
- line_number = 0
- for line in self.file:
- line_number += 1
- line = line.strip()
- if not line or line[0] in self.comment_characters:
- continue
- fields = line.split()
- if self.min_columns is not None and len(fields) < self.min_columns:
- raise BadCSVSyntax("%s:%d: Not enough columns in line %r" % (self.filename, line_number, line))
- if self.columns is not None and len(fields) > self.columns:
- raise BadCSVSyntax("%s:%d: Too many columns in line %r" % (self.filename, line_number, line))
- if self.columns is not None and len(fields) < self.columns:
- fields += tuple(None for i in xrange(self.columns - len(fields)))
- yield fields
-
- def __enter__(self):
- return self
-
- def __exit__(self, _type, value, traceback):
- self.file.close()
+ """
+ Reader for tab-delimited text that's (slightly) friendlier than the
+ stock Python csv module (which isn't intended for direct use by
+ humans anyway, and neither was this package originally, but that
+ seems to be the way that it has evolved...).
+
+ Columns parameter specifies how many columns users of the reader
+ expect to see; lines with fewer columns will be padded with None
+ values.
+
+ Original API design for this class courtesy of Warren Kumari, but
+ don't blame him if you don't like what I did with his ideas.
+ """
+
+ def __init__(self, filename, columns = None, min_columns = None, comment_characters = "#;"):
+ assert columns is None or isinstance(columns, int)
+ assert min_columns is None or isinstance(min_columns, int)
+ if columns is not None and min_columns is None:
+ min_columns = columns
+ self.columns = columns
+ self.min_columns = min_columns
+ self.comment_characters = comment_characters
+ if isinstance(filename, (str, unicode)):
+ # Name of a file to open
+ self.filename = filename
+ self.file = open(filename, "r")
+ else:
+ # File-like object, already opened
+ self.filename = None
+ self.file = filename
+
+ def __iter__(self):
+ line_number = 0
+ for line in self.file:
+ line_number += 1
+ line = line.strip()
+ if not line or line[0] in self.comment_characters:
+ continue
+ fields = line.split()
+ if self.min_columns is not None and len(fields) < self.min_columns:
+ raise BadCSVSyntax("%s:%d: Not enough columns in line %r" % (self.filename, line_number, line))
+ if self.columns is not None and len(fields) > self.columns:
+ raise BadCSVSyntax("%s:%d: Too many columns in line %r" % (self.filename, line_number, line))
+ if self.columns is not None and len(fields) < self.columns:
+ fields += tuple(None for i in xrange(self.columns - len(fields)))
+ yield fields
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, _type, value, traceback):
+ self.file.close()
class csv_writer(object):
- """
- Writer object for tab delimited text. We just use the stock CSV
- module in excel-tab mode for this.
+ """
+ Writer object for tab delimited text. We just use the stock CSV
+ module in excel-tab mode for this.
- If "renmwo" is set (default), the file will be written to
- a temporary name and renamed to the real filename after closing.
- """
+ If "renmwo" is set (default), the file will be written to
+ a temporary name and renamed to the real filename after closing.
+ """
- def __init__(self, filename, renmwo = True):
- self.filename = filename
- self.renmwo = "%s.~renmwo%d~" % (filename, os.getpid()) if renmwo else filename
- self.file = open(self.renmwo, "w")
- self.writer = csv.writer(self.file, dialect = csv.get_dialect("excel-tab"))
+ def __init__(self, filename, renmwo = True):
+ self.filename = filename
+ self.renmwo = "%s.~renmwo%d~" % (filename, os.getpid()) if renmwo else filename
+ self.file = open(self.renmwo, "w")
+ self.writer = csv.writer(self.file, dialect = csv.get_dialect("excel-tab"))
- def __enter__(self):
- return self
+ def __enter__(self):
+ return self
- def __exit__(self, _type, value, traceback):
- self.close()
+ def __exit__(self, _type, value, traceback):
+ self.close()
- def close(self):
- """
- Close this writer.
- """
- if self.file is not None:
- self.file.close()
- self.file = None
- if self.filename != self.renmwo:
- os.rename(self.renmwo, self.filename)
+ def close(self):
+ """
+ Close this writer.
+ """
- def __getattr__(self, attr):
- """
- Fake inheritance from whatever object csv.writer deigns to give us.
- """
- return getattr(self.writer, attr)
+ if self.file is not None:
+ self.file.close()
+ self.file = None
+ if self.filename != self.renmwo:
+ os.rename(self.renmwo, self.filename)
+
+ def __getattr__(self, attr):
+ """
+ Fake inheritance from whatever object csv.writer deigns to give us.
+ """
+
+ return getattr(self.writer, attr)
diff --git a/rpki/daemonize.py b/rpki/daemonize.py
index 6a825566..5a1c3979 100644
--- a/rpki/daemonize.py
+++ b/rpki/daemonize.py
@@ -80,56 +80,57 @@ default_pid_directory = "/var/run/rpki"
pid_filename = None
def daemon(nochdir = False, noclose = False, pidfile = None):
- """
- Make this program become a daemon, like 4.4BSD daemon(3), and
- write its pid out to a file with cleanup on exit.
- """
-
- if pidfile is None:
- if pid_filename is None:
- prog = os.path.splitext(os.path.basename(sys.argv[0]))[0]
- pidfile = os.path.join(default_pid_directory, "%s.pid" % prog)
+ """
+ Make this program become a daemon, like 4.4BSD daemon(3), and
+ write its pid out to a file with cleanup on exit.
+ """
+
+ if pidfile is None:
+ if pid_filename is None:
+ prog = os.path.splitext(os.path.basename(sys.argv[0]))[0]
+ pidfile = os.path.join(default_pid_directory, "%s.pid" % prog)
+ else:
+ pidfile = pid_filename
+
+ old_sighup_action = signal.signal(signal.SIGHUP, signal.SIG_IGN)
+
+ try:
+ pid = os.fork()
+ except OSError, e:
+ logging.fatal("fork() failed: %d (%s)", e.errno, e.strerror)
+ sys.exit(1)
else:
- pidfile = pid_filename
+ if pid > 0:
+ os._exit(0) # pylint: disable=W0212
- old_sighup_action = signal.signal(signal.SIGHUP, signal.SIG_IGN)
+ if not nochdir:
+ os.chdir("/")
- try:
- pid = os.fork()
- except OSError, e:
- sys.exit("fork() failed: %d (%s)" % (e.errno, e.strerror))
- else:
- if pid > 0:
- os._exit(0)
+ os.setsid()
- if not nochdir:
- os.chdir("/")
+ if not noclose:
+ sys.stdout.flush()
+ sys.stderr.flush()
+ fd = os.open(os.devnull, os.O_RDWR)
+ os.dup2(fd, 0)
+ os.dup2(fd, 1)
+ os.dup2(fd, 2)
+ if fd > 2:
+ os.close(fd)
- os.setsid()
+ signal.signal(signal.SIGHUP, old_sighup_action)
- if not noclose:
- sys.stdout.flush()
- sys.stderr.flush()
- fd = os.open(os.devnull, os.O_RDWR)
- os.dup2(fd, 0)
- os.dup2(fd, 1)
- os.dup2(fd, 2)
- if fd > 2:
- os.close(fd)
+ def delete_pid_file():
+ try:
+ os.unlink(pidfile)
+ except OSError:
+ pass
- signal.signal(signal.SIGHUP, old_sighup_action)
+ atexit.register(delete_pid_file)
- def delete_pid_file():
try:
- os.unlink(pidfile)
- except OSError:
- pass
-
- atexit.register(delete_pid_file)
-
- try:
- f = open(pidfile, "w")
- f.write("%d\n" % os.getpid())
- f.close()
- except IOError, e:
- logger.warning("Couldn't write PID file %s: %s", pidfile, e.strerror)
+ f = open(pidfile, "w")
+ f.write("%d\n" % os.getpid())
+ f.close()
+ except IOError, e:
+ logger.warning("Couldn't write PID file %s: %s", pidfile, e.strerror)
diff --git a/rpki/gui/cacheview/__init__.py b/rpki/django_settings/__init__.py
index e69de29b..e69de29b 100644
--- a/rpki/gui/cacheview/__init__.py
+++ b/rpki/django_settings/__init__.py
diff --git a/rpki/django_settings/common.py b/rpki/django_settings/common.py
new file mode 100644
index 00000000..2f41fe77
--- /dev/null
+++ b/rpki/django_settings/common.py
@@ -0,0 +1,125 @@
+# $Id$
+
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This module contains common configuration settings for Django libraries.
+
+Most of our CA code uses at least the Django ORM; the web interface
+uses a lot more of Django. We also want to handle all normal user
+configuration via rpki.conf, so some of the code here is just pulling
+settings from rpki.conf and stuffing them into the form Django wants.
+"""
+
+__version__ = "$Id$"
+
+import os
+import rpki.config
+import rpki.autoconf
+
+# Some configuration, including SQL authorization, comes from rpki.conf.
+cfg = rpki.config.parser()
+
+
+# Do -not- turn on DEBUG here except for short-lived tests, otherwise
+# long-running programs like irdbd will eventually run out of memory
+# and crash. This is also why this is controlled by an environment
+# variable rather than by an rpki.conf setting: just because we want
+# debugging enabled in the GUI doesn't mean we want it in irdb.
+#
+# If you must enable debugging, you may need to add code that uses
+# django.db.reset_queries() to clear the query list manually, but it's
+# probably better just to run with debugging disabled, since that's
+# the expectation for production code.
+#
+# https://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory
+
+if os.getenv("RPKI_DJANGO_DEBUG") == "yes":
+ DEBUG = True
+
+
+# Database configuration differs from program to program, but includes
+# a lot of boilerplate. So we define a class here to handle this,
+# then use it and clean up in the modules that import from this one.
+
+class DatabaseConfigurator(object):
+
+ default_sql_engine = "mysql"
+ cfg = None
+ section = None
+
+ def configure(self, cfg, section): # pylint: disable=W0621
+ self.cfg = cfg
+ self.section = section
+ engine = cfg.get("sql-engine", section = section,
+ default = self.default_sql_engine)
+ return dict(
+ default = getattr(self, engine))
+
+ @property
+ def mysql(self):
+ return dict(
+ ENGINE = "django.db.backends.mysql",
+ NAME = cfg.get("sql-database", section = self.section),
+ USER = cfg.get("sql-username", section = self.section),
+ PASSWORD = cfg.get("sql-password", section = self.section),
+ #
+ # Using "latin1" here is totally evil and wrong, but
+ # without it MySQL 5.6 (and, probably, later versions)
+ # whine incessantly about bad UTF-8 characters in BLOB
+ # columns. Which makes no freaking sense at all, but this
+ # is MySQL, which has the character set management interface
+ # from hell, so good luck with that. If anybody really
+ # understands how to fix this, tell me; for now, we force
+ # MySQL to revert to the default behavior in MySQL 5.5.
+ #
+ OPTIONS = dict(charset = "latin1"))
+
+ @property
+ def sqlite3(self):
+ return dict(
+ ENGINE = "django.db.backends.sqlite3",
+ NAME = cfg.get("sql-database", section = self.section))
+
+ @property
+ def postgresql(self):
+ return dict(
+ ENGINE = "django.db.backends.postgresql_psycopg2",
+ NAME = cfg.get("sql-database", section = self.section),
+ USER = cfg.get("sql-username", section = self.section),
+ PASSWORD = cfg.get("sql-password", section = self.section))
+
+
+# Apps are also handled by the modules that import this one, now that
+# we don't require South.
+
+
+# Silence whining about MIDDLEWARE_CLASSES
+
+MIDDLEWARE_CLASSES = ()
+
+# That would be it if we just need the ORM, but Django throws a hissy
+# fit if SECRET_KEY isn't set, whether we use it for anything or not.
+#
+# Make this unique, and don't share it with anybody.
+if cfg.has_option("secret-key", section = "web_portal"):
+ SECRET_KEY = cfg.get("secret-key", section = "web_portal")
+else:
+ SECRET_KEY = os.urandom(66).encode("hex")
+
+
+# Django defaults to thinking everybody lives in Chicago.
+
+TIME_ZONE = "UTC"
diff --git a/rpki/django_settings/gui.py b/rpki/django_settings/gui.py
new file mode 100644
index 00000000..071d845f
--- /dev/null
+++ b/rpki/django_settings/gui.py
@@ -0,0 +1,159 @@
+# $Id$
+
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This module contains GUI-specific configuration settings for Django libraries.
+"""
+
+# Pull in the irdb configuration, which in turn pulls in the common configuration.
+
+from .irdb import * # pylint: disable=W0401,W0614
+
+__version__ = "$Id$"
+
+import socket
+
+# GUI uses the IRDB database configuration, so we don't need to set
+# anything here.
+
+# Where to put static files.
+STATIC_ROOT = rpki.autoconf.datarootdir + "/rpki/media"
+
+# Must end with a slash!
+STATIC_URL = "/media/"
+
+# Where to email server errors.
+ADMINS = (("Administrator", "root@localhost"),)
+
+LOGGING = {
+ "version": 1,
+ "formatters": {
+ "verbose": {
+ # see http://docs.python.org/2.7/library/logging.html#logging.LogRecord
+ "format": "%(levelname)s %(asctime)s %(name)s %(message)s"
+ },
+ },
+ "handlers": {
+ "stderr": {
+ "class": "logging.StreamHandler",
+ "level": "DEBUG",
+ "formatter": "verbose",
+ },
+ "mail_admins": {
+ "level": "ERROR",
+ "class": "django.utils.log.AdminEmailHandler",
+ },
+ },
+ "loggers": {
+ "django": {
+ "level": "ERROR",
+ "handlers": ["stderr", "mail_admins"],
+ },
+ "rpki.gui": {
+ "level": "WARNING",
+ "handlers": ["stderr"],
+ },
+ },
+}
+
+def select_tz():
+ "Find a supported timezone that looks like UTC"
+ for tz in ("UTC", "GMT", "Etc/UTC", "Etc/GMT"):
+ if os.path.exists("/usr/share/zoneinfo/" + tz):
+ return tz
+ # Can't determine the proper timezone, fall back to UTC and let Django
+ # report the error to the user.
+ return "UTC"
+
+# Local time zone for this installation. Choices can be found here:
+# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
+# although not all choices may be available on all operating systems.
+# If running in a Windows environment this must be set to the same as your
+# system time zone.
+TIME_ZONE = select_tz()
+
+# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
+# for details on why you might need this.
+def get_allowed_hosts():
+ allowed_hosts = set(cfg.multiget("allowed-hosts", section = "web_portal"))
+ allowed_hosts.add(socket.getfqdn())
+ allowed_hosts.add("127.0.0.1")
+ allowed_hosts.add("::1")
+ try:
+ import netifaces
+ for interface in netifaces.interfaces():
+ addresses = netifaces.ifaddresses(interface)
+ for af in (netifaces.AF_INET, netifaces.AF_INET6):
+ if af in addresses:
+ for address in addresses[af]:
+ if "addr" in address:
+ allowed_hosts.add(address["addr"])
+ except ImportError:
+ pass
+ return list(allowed_hosts)
+
+ALLOWED_HOSTS = get_allowed_hosts()
+
+DOWNLOAD_DIRECTORY = cfg.get("download-directory", "/var/tmp", section = "web_portal")
+
+# List of callables that know how to import templates from various sources.
+TEMPLATE_LOADERS = (
+ "django.template.loaders.filesystem.Loader",
+ "django.template.loaders.app_directories.Loader",
+ "django.template.loaders.eggs.Loader"
+)
+
+MIDDLEWARE_CLASSES = (
+ "django.middleware.common.CommonMiddleware",
+ "django.contrib.sessions.middleware.SessionMiddleware",
+ "django.middleware.csrf.CsrfViewMiddleware",
+ "django.contrib.auth.middleware.AuthenticationMiddleware",
+ "django.contrib.messages.middleware.MessageMiddleware"
+)
+
+ROOT_URLCONF = "rpki.gui.urls"
+
+INSTALLED_APPS.extend((
+ "django.contrib.auth",
+ #"django.contrib.admin",
+ #"django.contrib.admindocs",
+ "django.contrib.contenttypes",
+ "django.contrib.sessions",
+ "django.contrib.staticfiles",
+ "rpki.gui.app",
+ "rpki.gui.gui_rpki_cache",
+ "rpki.gui.routeview",
+ "rpki.rcynicdb"
+))
+
+TEMPLATE_CONTEXT_PROCESSORS = (
+ "django.contrib.auth.context_processors.auth",
+ "django.core.context_processors.debug",
+ "django.core.context_processors.i18n",
+ "django.core.context_processors.media",
+ "django.contrib.messages.context_processors.messages",
+ "django.core.context_processors.request",
+ "django.core.context_processors.static"
+)
+
+# Allow local site to override any setting above -- but if there's
+# anything that local sites routinely need to modify, please consider
+# putting that configuration into rpki.conf and just adding code here
+# to read that configuration.
+try:
+ from local_settings import * # pylint: disable=W0401,F0401
+except ImportError:
+ pass
diff --git a/rpki/django_settings/irdb.py b/rpki/django_settings/irdb.py
new file mode 100644
index 00000000..da42a111
--- /dev/null
+++ b/rpki/django_settings/irdb.py
@@ -0,0 +1,47 @@
+# $Id$
+
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This module contains configuration settings for Django libraries. All
+of the back-end programs (rpkic, irdbd, etc) use this configuration;
+the GUI code also uses this but adds a bunch of other stuff, thus has
+its own settings file.
+"""
+
+from .common import * # pylint: disable=W0401,W0614
+
+__version__ = "$Id$"
+
+
+# Database configuration.
+
+DATABASES = DatabaseConfigurator().configure(cfg, "irdbd")
+del DatabaseConfigurator
+
+
+# Apps.
+
+INSTALLED_APPS = ["rpki.irdb"]
+
+
+# Allow local site to override any setting above -- but if there's
+# anything that local sites routinely need to modify, please consider
+# putting that configuration into rpki.conf and just adding code here
+# to read that configuration.
+try:
+ from local_settings import * # pylint: disable=W0401,F0401
+except ImportError:
+ pass
diff --git a/rpki/django_settings/pubd.py b/rpki/django_settings/pubd.py
new file mode 100644
index 00000000..6bd9fdc0
--- /dev/null
+++ b/rpki/django_settings/pubd.py
@@ -0,0 +1,45 @@
+# $Id$
+
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This module contains configuration settings for Django libraries for
+the pubd program.
+"""
+
+from .common import * # pylint: disable=W0401,W0614
+
+__version__ = "$Id$"
+
+
+# Database configuration.
+
+DATABASES = DatabaseConfigurator().configure(cfg, "pubd")
+del DatabaseConfigurator
+
+
+# Apps.
+
+INSTALLED_APPS = ["rpki.pubdb"]
+
+
+# Allow local site to override any setting above -- but if there's
+# anything that local sites routinely need to modify, please consider
+# putting that configuration into rpki.conf and just adding code here
+# to read that configuration.
+try:
+ from local_settings import * # pylint: disable=W0401,F0401
+except ImportError:
+ pass
diff --git a/rpki/django_settings/rcynic.py b/rpki/django_settings/rcynic.py
new file mode 100644
index 00000000..0845604c
--- /dev/null
+++ b/rpki/django_settings/rcynic.py
@@ -0,0 +1,68 @@
+# $Id$
+
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This module contains configuration settings for Django libraries.
+At present, rcynicng only uses the Django ORM, not the rest of Django.
+Unlike the CA tools rcynicng defaults to using SQLite3 as its database
+engine, so we tweak the defaults a little before instantiating the
+database configuration here.
+"""
+
+from .common import * # pylint: disable=W0401,W0614
+
+__version__ = "$Id$"
+
+
+# Database configuration.
+
+class DBConfigurator(DatabaseConfigurator):
+
+ default_sql_engine = "sqlite3"
+
+ @property
+ def sqlite3(self):
+ return dict(
+ ENGINE = "django.db.backends.sqlite3",
+ NAME = cfg.get("sql-database", section = self.section, default = "rcynic.db"))
+
+
+DATABASES = DBConfigurator().configure(cfg, "rcynic")
+
+del DBConfigurator
+del DatabaseConfigurator
+
+
+# Apps.
+
+INSTALLED_APPS = ["rpki.rcynicdb"]
+
+
+# Debugging
+#
+# DO NOT ENABLE DJANGO DEBUGGING IN PRODUCTION!
+#
+#DEBUG = True
+
+
+# Allow local site to override any setting above -- but if there's
+# anything that local sites routinely need to modify, please consider
+# putting that configuration into rpki.conf and just adding code here
+# to read that configuration.
+try:
+ from local_settings import * # pylint: disable=W0401,F0401
+except ImportError:
+ pass
diff --git a/rpki/django_settings/rpkid.py b/rpki/django_settings/rpkid.py
new file mode 100644
index 00000000..e34518bb
--- /dev/null
+++ b/rpki/django_settings/rpkid.py
@@ -0,0 +1,45 @@
+# $Id$
+
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This module contains configuration settings for Django libraries for
+the rpkid program.
+"""
+
+from .common import * # pylint: disable=W0401,W0614
+
+__version__ = "$Id$"
+
+
+# Database configuration.
+
+DATABASES = DatabaseConfigurator().configure(cfg, "rpkid")
+del DatabaseConfigurator
+
+
+# Apps.
+
+INSTALLED_APPS = ["rpki.rpkidb"]
+
+
+# Allow local site to override any setting above -- but if there's
+# anything that local sites routinely need to modify, please consider
+# putting that configuration into rpki.conf and just adding code here
+# to read that configuration.
+try:
+ from local_settings import * # pylint: disable=W0401,F0401
+except ImportError:
+ pass
diff --git a/rpki/exceptions.py b/rpki/exceptions.py
index 504c6f28..d66ad00c 100644
--- a/rpki/exceptions.py
+++ b/rpki/exceptions.py
@@ -22,346 +22,228 @@ Exception definitions for RPKI modules.
"""
class RPKI_Exception(Exception):
- """
- Base class for RPKI exceptions.
- """
+ "Base class for RPKI exceptions."
class NotInDatabase(RPKI_Exception):
- """
- Lookup failed for an object expected to be in the database.
- """
+ "Lookup failed for an object expected to be in the database."
class BadURISyntax(RPKI_Exception):
- """
- Illegal syntax for a URI.
- """
+ "Illegal syntax for a URI."
class BadStatusCode(RPKI_Exception):
- """
- Unrecognized protocol status code.
- """
+ "Unrecognized protocol status code."
class BadQuery(RPKI_Exception):
- """
- Unexpected protocol query.
- """
+ "Unexpected protocol query."
class DBConsistancyError(RPKI_Exception):
- """
- Found multiple matches for a database query that shouldn't ever
- return that.
- """
+ "Found multiple matches for a database query that shouldn't ever return that."
class CMSVerificationFailed(RPKI_Exception):
- """
- Verification of a CMS message failed.
- """
+ "Verification of a CMS message failed."
class HTTPRequestFailed(RPKI_Exception):
- """
- HTTP request failed.
- """
+ "HTTP request failed."
class DERObjectConversionError(RPKI_Exception):
- """
- Error trying to convert a DER-based object from one representation
- to another.
- """
+ "Error trying to convert a DER-based object from one representation to another."
class NotACertificateChain(RPKI_Exception):
- """
- Certificates don't form a proper chain.
- """
+ "Certificates don't form a proper chain."
class BadContactURL(RPKI_Exception):
- """
- Error trying to parse contact URL.
- """
+ "Error trying to parse contact URL."
class BadClassNameSyntax(RPKI_Exception):
- """
- Illegal syntax for a class_name.
- """
+ "Illegal syntax for a class_name."
class BadIssueResponse(RPKI_Exception):
- """
- issue_response PDU with wrong number of classes or certificates.
- """
+ "issue_response PDU with wrong number of classes or certificates."
class NotImplementedYet(RPKI_Exception):
- """
- Internal error -- not implemented yet.
- """
+ "Internal error -- not implemented yet."
class BadPKCS10(RPKI_Exception):
- """
- Bad PKCS #10 object.
- """
+ "Bad PKCS #10 object."
class UpstreamError(RPKI_Exception):
- """
- Received an error from upstream.
- """
+ "Received an error from upstream."
class ChildNotFound(RPKI_Exception):
- """
- Could not find specified child in database.
- """
+ "Could not find specified child in database."
class BSCNotFound(RPKI_Exception):
- """
- Could not find specified BSC in database.
- """
+ "Could not find specified BSC in database."
class BadSender(RPKI_Exception):
- """
- Unexpected XML sender value.
- """
+ "Unexpected XML sender value."
class ClassNameMismatch(RPKI_Exception):
- """
- class_name does not match child context.
- """
+ "class_name does not match child context."
class ClassNameUnknown(RPKI_Exception):
- """
- Unknown class_name.
- """
+ "Unknown class_name."
class SKIMismatch(RPKI_Exception):
- """
- SKI value in response does not match request.
- """
+ "SKI value in response does not match request."
class SubprocessError(RPKI_Exception):
- """
- Subprocess returned unexpected error.
- """
+ "Subprocess returned unexpected error."
class BadIRDBReply(RPKI_Exception):
- """
- Unexpected reply to IRDB query.
- """
+ "Unexpected reply to IRDB query."
class NotFound(RPKI_Exception):
- """
- Object not found in database.
- """
+ "Object not found in database."
class MustBePrefix(RPKI_Exception):
- """
- Resource range cannot be expressed as a prefix.
- """
+ "Resource range cannot be expressed as a prefix."
class TLSValidationError(RPKI_Exception):
- """
- TLS certificate validation error.
- """
+ "TLS certificate validation error."
class MultipleTLSEECert(TLSValidationError):
- """
- Received more than one TLS EE certificate.
- """
+ "Received more than one TLS EE certificate."
class ReceivedTLSCACert(TLSValidationError):
- """
- Received CA certificate via TLS.
- """
+ "Received CA certificate via TLS."
class WrongEContentType(RPKI_Exception):
- """
- Received wrong CMS eContentType.
- """
+ "Received wrong CMS eContentType."
class EmptyPEM(RPKI_Exception):
- """
- Couldn't find PEM block to convert.
- """
+ "Couldn't find PEM block to convert."
class UnexpectedCMSCerts(RPKI_Exception):
- """
- Received CMS certs when not expecting any.
- """
+ "Received CMS certs when not expecting any."
class UnexpectedCMSCRLs(RPKI_Exception):
- """
- Received CMS CRLs when not expecting any.
- """
+ "Received CMS CRLs when not expecting any."
class MissingCMSEEcert(RPKI_Exception):
- """
- Didn't receive CMS EE cert when expecting one.
- """
+ "Didn't receive CMS EE cert when expecting one."
class MissingCMSCRL(RPKI_Exception):
- """
- Didn't receive CMS CRL when expecting one.
- """
+ "Didn't receive CMS CRL when expecting one."
class UnparsableCMSDER(RPKI_Exception):
- """
- Alleged CMS DER wasn't parsable.
- """
+ "Alleged CMS DER wasn't parsable."
class CMSCRLNotSet(RPKI_Exception):
- """
- CMS CRL has not been configured.
- """
+ "CMS CRL has not been configured."
class ServerShuttingDown(RPKI_Exception):
- """
- Server is shutting down.
- """
+ "Server is shutting down."
class NoActiveCA(RPKI_Exception):
- """
- No active ca_detail for specified class.
- """
+ "No active ca_detail for specified class."
class BadClientURL(RPKI_Exception):
- """
- URL given to HTTP client does not match profile.
- """
+ "URL given to HTTP client does not match profile."
class ClientNotFound(RPKI_Exception):
- """
- Could not find specified client in database.
- """
+ "Could not find specified client in database."
class BadExtension(RPKI_Exception):
- """
- Forbidden X.509 extension.
- """
+ "Forbidden X.509 extension."
class ForbiddenURI(RPKI_Exception):
- """
- Forbidden URI, does not start with correct base URI.
- """
+ "Forbidden URI, does not start with correct base URI."
class HTTPClientAborted(RPKI_Exception):
- """
- HTTP client connection closed while in request-sent state.
- """
+ "HTTP client connection closed while in request-sent state."
class BadPublicationReply(RPKI_Exception):
- """
- Unexpected reply to publication query.
- """
+ "Unexpected reply to publication query."
class DuplicateObject(RPKI_Exception):
- """
- Attempt to create an object that already exists.
- """
+ "Attempt to create an object that already exists."
class EmptyROAPrefixList(RPKI_Exception):
- """
- Can't create ROA with an empty prefix list.
- """
+ "Can't create ROA with an empty prefix list."
class NoCoveringCertForROA(RPKI_Exception):
- """
- Couldn't find a covering certificate to generate ROA.
- """
+ "Couldn't find a covering certificate to generate ROA."
class BSCNotReady(RPKI_Exception):
- """
- BSC not yet in a usable state, signing_cert not set.
- """
+ "BSC not yet in a usable state, signing_cert not set."
class HTTPUnexpectedState(RPKI_Exception):
- """
- HTTP event occurred in an unexpected state.
- """
+ "HTTP event occurred in an unexpected state."
class HTTPBadVersion(RPKI_Exception):
- """
- HTTP couldn't parse HTTP version.
- """
+ "HTTP couldn't parse HTTP version."
class HandleTranslationError(RPKI_Exception):
- """
- Internal error translating protocol handle -> SQL id.
- """
+ "Internal error translating protocol handle -> SQL id."
class NoObjectAtURI(RPKI_Exception):
- """
- No object published at specified URI.
- """
+ "No object published at specified URI."
+
+class ExistingObjectAtURI(RPKI_Exception):
+ "An object has already been published at specified URI."
+
+class DifferentObjectAtURI(RPKI_Exception):
+ "An object with a different hash exists at specified URI."
class CMSContentNotSet(RPKI_Exception):
- """
- Inner content of a CMS_object has not been set. If object is known
- to be valid, the .extract() method should be able to set the
- content; otherwise, only the .verify() method (which checks
- signatures) is safe.
- """
+ """
+ Inner content of a CMS_object has not been set. If object is known
+ to be valid, the .extract() method should be able to set the
+ content; otherwise, only the .verify() method (which checks
+ signatures) is safe.
+ """
class HTTPTimeout(RPKI_Exception):
- """
- HTTP connection timed out.
- """
+ "HTTP connection timed out."
class BadIPResource(RPKI_Exception):
- """
- Parse failure for alleged IP resource string.
- """
+ "Parse failure for alleged IP resource string."
class BadROAPrefix(RPKI_Exception):
- """
- Parse failure for alleged ROA prefix string.
- """
+ "Parse failure for alleged ROA prefix string."
class CommandParseFailure(RPKI_Exception):
- """
- Failed to parse command line.
- """
+ "Failed to parse command line."
class CMSCertHasExpired(RPKI_Exception):
- """
- CMS certificate has expired.
- """
+ "CMS certificate has expired."
class TrustedCMSCertHasExpired(RPKI_Exception):
- """
- Trusted CMS certificate has expired.
- """
+ "Trusted CMS certificate has expired."
class MultipleCMSEECert(RPKI_Exception):
- """
- Can't have more than one CMS EE certificate in validation chain.
- """
+ "Can't have more than one CMS EE certificate in validation chain."
class ResourceOverlap(RPKI_Exception):
- """
- Overlapping resources in resource_set.
- """
+ "Overlapping resources in resource_set."
class CMSReplay(RPKI_Exception):
- """
- Possible CMS replay attack detected.
- """
+ "Possible CMS replay attack detected."
class PastNotAfter(RPKI_Exception):
- """
- Requested notAfter value is already in the past.
- """
+ "Requested notAfter value is already in the past."
class NullValidityInterval(RPKI_Exception):
- """
- Requested validity interval is null.
- """
+ "Requested validity interval is null."
class BadX510DN(RPKI_Exception):
- """
- X.510 distinguished name does not match profile.
- """
+ "X.510 distinguished name does not match profile."
class BadAutonomousSystemNumber(RPKI_Exception):
- """
- Bad AutonomousSystem number.
- """
+ "Bad AutonomousSystem number."
class WrongEKU(RPKI_Exception):
- """
- Extended Key Usage extension does not match profile.
- """
+ "Extended Key Usage extension does not match profile."
+
+class UnexpectedUpDownResponse(RPKI_Exception):
+ "Up-down message is not of the expected type."
+
+class BadContentType(RPKI_Exception):
+ "Bad HTTP Content-Type."
+
+class ResourceClassMismatch(RPKI_Exception):
+ "Up-down resource class does not match."
+
+class IRDBExpired(RPKI_Exception):
+ "Back-end database record has expired."
diff --git a/rpki/fields.py b/rpki/fields.py
new file mode 100644
index 00000000..6a2dc4d0
--- /dev/null
+++ b/rpki/fields.py
@@ -0,0 +1,205 @@
+# $Id$
+#
+# Copyright (C) 2013--2014 Dragon Research Labs ("DRL")
+# Portions copyright (C) 2011--2012 Internet Systems Consortium ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notices and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ISC DISCLAIM ALL
+# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
+# ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA
+# OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Common Django ORM field classes.
+
+Many of these are complex ASN.1 DER objects stored as binaray data,
+since the only sane text representation would just be the Base64
+encoding of the DER and thus would add no value.
+"""
+
+import logging
+
+from django.db import models
+
+import rpki.x509
+import rpki.sundial
+
+logger = logging.getLogger(__name__)
+
+
+class EnumField(models.PositiveSmallIntegerField):
+ """
+ An enumeration type that uses strings in Python and small integers
+ in SQL.
+ """
+
+ description = "An enumeration type"
+
+ def __init__(self, *args, **kwargs):
+ if isinstance(kwargs.get("choices"), (tuple, list)) and isinstance(kwargs["choices"][0], (str, unicode)):
+ kwargs["choices"] = tuple(enumerate(kwargs["choices"], 1))
+ # Might need something here to handle string-valued default parameter
+ models.PositiveSmallIntegerField.__init__(self, *args, **kwargs)
+ self.enum_i2s = dict(self.flatchoices)
+ self.enum_s2i = dict((v, k) for k, v in self.flatchoices)
+
+ def from_db_value(self, value, expression, connection, context):
+ return self.enum_i2s.get(value, value)
+
+ def to_python(self, value):
+ value = super(EnumField, self).to_python(value)
+ return self.enum_i2s.get(value, value)
+
+ def get_prep_value(self, value):
+ return self.enum_s2i.get(value, value)
+
+
+class SundialField(models.DateTimeField):
+ """
+ A field type for our customized datetime objects.
+ """
+
+ description = "A datetime type using our customized datetime objects"
+
+ def from_db_value(self, value, expression, connection, context):
+ return self.to_python(value)
+
+ def to_python(self, value):
+ if isinstance(value, rpki.sundial.pydatetime.datetime):
+ return rpki.sundial.datetime.from_datetime(
+ models.DateTimeField.to_python(self, value))
+ else:
+ return value
+
+ def get_prep_value(self, value):
+ if isinstance(value, rpki.sundial.datetime):
+ return value.to_datetime()
+ else:
+ return value
+
+
+class BlobField(models.Field):
+ """
+ Old BLOB field type, predating Django's BinaryField type.
+
+ Do not use, this is only here for backwards compatabilty during migrations.
+ """
+
+ description = "Raw BLOB type without ASN.1 encoding/decoding"
+
+ def __init__(self, *args, **kwargs):
+ self.blob_type = kwargs.pop("blob_type", None)
+ kwargs["serialize"] = False
+ kwargs["blank"] = True
+ kwargs["default"] = None
+ models.Field.__init__(self, *args, **kwargs)
+
+ def deconstruct(self):
+ name, path, args, kwargs = super(BlobField, self).deconstruct()
+ del kwargs["serialize"]
+ del kwargs["blank"]
+ del kwargs["default"]
+ return name, path, args, kwargs
+
+ def db_type(self, connection):
+ if self.blob_type is not None:
+ return self.blob_type
+ elif connection.settings_dict['ENGINE'] == "django.db.backends.mysql":
+ return "LONGBLOB"
+ elif connection.settings_dict['ENGINE'] == "django.db.backends.posgresql":
+ return "bytea"
+ else:
+ return "BLOB"
+
+
+# For reasons which now escape me, I had a few fields in the old
+# hand-coded SQL which used MySQL type BINARY(20) to hold SKIs.
+# Presumably this was so that I could then use those SKIs in indexes
+# and searches, but apparently I never got around to that part.
+#
+# SKIs probably would be better stored as hex strings anyway, so not
+# bothering with a separate binary type model for this. Deal with
+# this if and when it ever becomes an issue.
+
+
+# DERField used to be a subclass of BlobField. Try changing it to be
+# a subclass of BinaryField instead, leave BlobField (for now) for
+# backwards compatability during migrations,
+
+class DERField(models.BinaryField):
+ """
+ Field class for DER objects, with automatic translation between
+ ASN.1 and Python types. This is an abstract class, concrete field
+ classes are derived from it.
+ """
+
+ rpki_type = rpki.x509.DER_object
+
+ def __init__(self, *args, **kwargs):
+ kwargs["blank"] = True
+ kwargs["default"] = None
+ super(DERField, self).__init__(*args, **kwargs)
+
+ def deconstruct(self):
+ name, path, args, kwargs = super(DERField, self).deconstruct()
+ del kwargs["blank"]
+ del kwargs["default"]
+ return name, path, args, kwargs
+
+ def from_db_value(self, value, expression, connection, context):
+ if value is not None:
+ value = self.rpki_type(DER = str(value))
+ return value
+
+ def to_python(self, value):
+ value = super(DERField, self).to_python(value)
+ if value is not None and not isinstance(value, self.rpki_type):
+ value = self.rpki_type(DER = str(value))
+ return value
+
+ def get_prep_value(self, value):
+ if value is not None:
+ value = value.get_DER()
+ return super(DERField, self).get_prep_value(value)
+
+
+class CertificateField(DERField):
+ description = "X.509 certificate"
+ rpki_type = rpki.x509.X509
+
+class RSAPrivateKeyField(DERField):
+ description = "RSA keypair"
+ rpki_type = rpki.x509.RSA
+
+KeyField = RSAPrivateKeyField
+
+class PublicKeyField(DERField):
+ description = "RSA keypair"
+ rpki_type = rpki.x509.PublicKey
+
+class CRLField(DERField):
+ description = "Certificate Revocation List"
+ rpki_type = rpki.x509.CRL
+
+class PKCS10Field(DERField):
+ description = "PKCS #10 certificate request"
+ rpki_type = rpki.x509.PKCS10
+
+class ManifestField(DERField):
+ description = "RPKI Manifest"
+ rpki_type = rpki.x509.SignedManifest
+
+class ROAField(DERField):
+ description = "ROA"
+ rpki_type = rpki.x509.ROA
+
+class GhostbusterField(DERField):
+ description = "Ghostbuster Record"
+ rpki_type = rpki.x509.Ghostbuster
diff --git a/rpki/gui/app/check_expired.py b/rpki/gui/app/check_expired.py
index a084af79..65f4315f 100644
--- a/rpki/gui/app/check_expired.py
+++ b/rpki/gui/app/check_expired.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2012, 2013, 2014 SPARTA, Inc. a Parsons Company
+# Copyright (C) 2012, 2013, 2014, 2016 SPARTA, Inc. a Parsons Company
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -21,13 +21,14 @@ from cStringIO import StringIO
import logging
import datetime
-from rpki.gui.cacheview.models import Cert
+from rpki.gui.gui_rpki_cache.models import Cert
from rpki.gui.app.models import Conf, ResourceCert, Timestamp, Alert
from rpki.gui.app.glue import list_received_resources
from rpki.irdb import Zookeeper
-from rpki.left_right import report_error_elt, list_published_objects_elt
from rpki.x509 import X509
+from rpki.left_right import version, nsmap, tag_msg, tag_list_published_objects
+from lxml.etree import Element, SubElement
from django.core.mail import send_mail
logger = logging.getLogger(__name__)
@@ -41,8 +42,8 @@ def check_cert(handle, p, errs):
The displayed object name defaults to the class name, but can be overridden
using the `object_name` argument.
-
"""
+
t = p.certificate.getNotAfter()
if t <= expire_time:
e = 'expired' if t <= now else 'will expire'
@@ -60,8 +61,8 @@ def check_expire(conf, errs):
# get certs for `handle'
cert_set = ResourceCert.objects.filter(conf=conf)
for cert in cert_set:
- # look up cert in cacheview db
- obj_set = Cert.objects.filter(repo__uri=cert.uri)
+ # look up cert in gui_rpki_cache db
+ obj_set = Cert.objects.filter(uri=cert.uri)
if not obj_set:
# since the <list_received_resources/> output is cached, this can
# occur if the cache is out of date as well..
@@ -76,7 +77,7 @@ def check_expire(conf, errs):
f = '*'
else:
f = ' '
- msg.append("%s [%d] uri=%s ski=%s name=%s expires=%s" % (f, n, c.repo.uri, c.keyid, c.name, c.not_after))
+ msg.append("%s [%d] uri=%s expires=%s" % (f, n, c.uri, c.not_after))
# find ghostbuster records attached to this cert
for gbr in c.ghostbusters.all():
@@ -102,30 +103,26 @@ def check_expire(conf, errs):
def check_child_certs(conf, errs):
"""Fetch the list of published objects from rpkid, and inspect the issued
resource certs (uri ending in .cer).
-
"""
+
z = Zookeeper(handle=conf.handle)
- req = list_published_objects_elt.make_pdu(action="list",
- tag="list_published_objects",
- self_handle=conf.handle)
+ req = Element(tag_msg, nsmap=nsmap, type="query", version=version)
+ SubElement(req, tag_list_published_objects,
+ tag="list_published_objects", tenant_handle=conf.handle)
pdus = z.call_rpkid(req)
for pdu in pdus:
- if isinstance(pdu, report_error_elt):
- logger.error("rpkid reported an error: %s", pdu.error_code)
- elif isinstance(pdu, list_published_objects_elt):
- if pdu.uri.endswith('.cer'):
- cert = X509()
- cert.set(Base64=pdu.obj)
- t = cert.getNotAfter()
- if t <= expire_time:
- e = 'expired' if t <= now else 'will expire'
- errs.write("%(handle)s's rescert for Child %(child)s %(expire)s on %(date)s uri=%(uri)s subject=%(subject)s\n" % {
- 'handle': conf.handle,
- 'child': pdu.child_handle,
- 'uri': pdu.uri,
- 'subject': cert.getSubject(),
- 'expire': e,
- 'date': t})
+ if pdu.get("uri").endswith('.cer'):
+ cert = X509(Base64=pdu.text)
+ t = cert.getNotAfter()
+ if t <= expire_time:
+ e = 'expired' if t <= now else 'will expire'
+ errs.write("%(handle)s's rescert for Child %(child)s %(expire)s on %(date)s uri=%(uri)s subject=%(subject)s\n" % {
+ 'handle': conf.handle,
+ 'child': pdu.get("child_handle"),
+ 'uri': pdu.get("uri"),
+ 'subject': cert.getSubject(),
+ 'expire': e,
+ 'date': t})
class NetworkError(Exception):
@@ -139,8 +136,8 @@ def notify_expired(expire_days=14, from_email=None):
expire_days: the number of days ahead of today to warn
from_email: set the From: address for the email
-
"""
+
global expire_time # so i don't have to pass it around
global now
diff --git a/rpki/gui/app/forms.py b/rpki/gui/app/forms.py
index a1214297..4a95c8da 100644
--- a/rpki/gui/app/forms.py
+++ b/rpki/gui/app/forms.py
@@ -170,105 +170,105 @@ def ROARequestFormFactory(conf):
"""
class Cls(forms.Form):
- """Form for entering a ROA request.
-
- Handles both IPv4 and IPv6."""
-
- prefix = forms.CharField(
- widget=forms.TextInput(attrs={
- 'autofocus': 'true', 'placeholder': 'Prefix',
- 'class': 'span4'
- })
- )
- max_prefixlen = forms.CharField(
- required=False,
- widget=forms.TextInput(attrs={
- 'placeholder': 'Max len',
- 'class': 'span1'
- })
- )
- asn = forms.IntegerField(
- widget=forms.TextInput(attrs={
- 'placeholder': 'ASN',
- 'class': 'span1'
- })
- )
+ """Form for entering a ROA request.
+
+ Handles both IPv4 and IPv6."""
+
+ prefix = forms.CharField(
+ widget=forms.TextInput(attrs={
+ 'autofocus': 'true', 'placeholder': 'Prefix',
+ 'class': 'span4'
+ })
+ )
+ max_prefixlen = forms.CharField(
+ required=False,
+ widget=forms.TextInput(attrs={
+ 'placeholder': 'Max len',
+ 'class': 'span1'
+ })
+ )
+ asn = forms.IntegerField(
+ widget=forms.TextInput(attrs={
+ 'placeholder': 'ASN',
+ 'class': 'span1'
+ })
+ )
protect_children = forms.BooleanField(required=False)
- def __init__(self, *args, **kwargs):
- kwargs['auto_id'] = False
- super(Cls, self).__init__(*args, **kwargs)
- self.conf = conf # conf is the arg to ROARequestFormFactory
- self.inline = True
- self.use_table = False
-
- def _as_resource_range(self):
- """Convert the prefix in the form to a
- rpki.resource_set.resource_range_ip object.
-
- If there is no mask provided, assume the closest classful mask.
-
- """
- prefix = self.cleaned_data.get('prefix')
- if '/' not in prefix:
- p = IPAddress(prefix)
-
- # determine the first nonzero bit starting from the lsb and
- # subtract from the address size to find the closest classful
- # mask that contains this single address
- prefixlen = 0
- while (p != 0) and (p & 1) == 0:
- prefixlen = prefixlen + 1
- p = p >> 1
- mask = p.bits - (8 * (prefixlen / 8))
- prefix = prefix + '/' + str(mask)
-
- return resource_range_ip.parse_str(prefix)
-
- def clean_asn(self):
- value = self.cleaned_data.get('asn')
- if value < 0:
- raise forms.ValidationError('AS must be a positive value or 0')
- return value
-
- def clean_prefix(self):
- try:
- r = self._as_resource_range()
- except:
- raise forms.ValidationError('invalid prefix')
-
- manager = models.ResourceRangeAddressV4 if r.version == 4 else models.ResourceRangeAddressV6
- if not manager.objects.filter(cert__conf=self.conf,
- prefix_min__lte=r.min,
- prefix_max__gte=r.max).exists():
- raise forms.ValidationError('prefix is not allocated to you')
- return str(r)
-
- def clean_max_prefixlen(self):
- v = self.cleaned_data.get('max_prefixlen')
- if v:
- if v[0] == '/':
- v = v[1:] # allow user to specify /24
- try:
- if int(v) < 0:
- raise forms.ValidationError('max prefix length must be positive or 0')
- except ValueError:
- raise forms.ValidationError('invalid integer value')
- return v
-
- def clean(self):
- if 'prefix' in self.cleaned_data:
- r = self._as_resource_range()
- max_prefixlen = self.cleaned_data.get('max_prefixlen')
- max_prefixlen = int(max_prefixlen) if max_prefixlen else r.prefixlen()
- if max_prefixlen < r.prefixlen():
- raise forms.ValidationError(
- 'max prefix length must be greater than or equal to the prefix length')
- if max_prefixlen > r.min.bits:
- raise forms.ValidationError(
- 'max prefix length (%d) is out of range for IP version (%d)' % (max_prefixlen, r.min.bits))
- self.cleaned_data['max_prefixlen'] = str(max_prefixlen)
- return self.cleaned_data
+ def __init__(self, *args, **kwargs):
+ kwargs['auto_id'] = False
+ super(Cls, self).__init__(*args, **kwargs)
+ self.conf = conf # conf is the arg to ROARequestFormFactory
+ self.inline = True
+ self.use_table = False
+
+ def _as_resource_range(self):
+ """Convert the prefix in the form to a
+ rpki.resource_set.resource_range_ip object.
+
+ If there is no mask provided, assume the closest classful mask.
+
+ """
+ prefix = self.cleaned_data.get('prefix')
+ if '/' not in prefix:
+ p = IPAddress(prefix)
+
+ # determine the first nonzero bit starting from the lsb and
+ # subtract from the address size to find the closest classful
+ # mask that contains this single address
+ prefixlen = 0
+ while (p != 0) and (p & 1) == 0:
+ prefixlen = prefixlen + 1
+ p = p >> 1
+ mask = p.bits - (8 * (prefixlen / 8))
+ prefix = prefix + '/' + str(mask)
+
+ return resource_range_ip.parse_str(prefix)
+
+ def clean_asn(self):
+ value = self.cleaned_data.get('asn')
+ if value < 0:
+ raise forms.ValidationError('AS must be a positive value or 0')
+ return value
+
+ def clean_prefix(self):
+ try:
+ r = self._as_resource_range()
+ except:
+ raise forms.ValidationError('invalid prefix')
+
+ manager = models.ResourceRangeAddressV4 if r.version == 4 else models.ResourceRangeAddressV6
+ if not manager.objects.filter(cert__conf=self.conf,
+ prefix_min__lte=r.min,
+ prefix_max__gte=r.max).exists():
+ raise forms.ValidationError('prefix is not allocated to you')
+ return str(r)
+
+ def clean_max_prefixlen(self):
+ v = self.cleaned_data.get('max_prefixlen')
+ if v:
+ if v[0] == '/':
+ v = v[1:] # allow user to specify /24
+ try:
+ if int(v) < 0:
+ raise forms.ValidationError('max prefix length must be positive or 0')
+ except ValueError:
+ raise forms.ValidationError('invalid integer value')
+ return v
+
+ def clean(self):
+ if 'prefix' in self.cleaned_data:
+ r = self._as_resource_range()
+ max_prefixlen = self.cleaned_data.get('max_prefixlen')
+ max_prefixlen = int(max_prefixlen) if max_prefixlen else r.prefixlen()
+ if max_prefixlen < r.prefixlen():
+ raise forms.ValidationError(
+ 'max prefix length must be greater than or equal to the prefix length')
+ if max_prefixlen > r.min.bits:
+ raise forms.ValidationError(
+ 'max prefix length (%d) is out of range for IP version (%d)' % (max_prefixlen, r.min.bits))
+ self.cleaned_data['max_prefixlen'] = str(max_prefixlen)
+ return self.cleaned_data
return Cls
diff --git a/rpki/gui/app/glue.py b/rpki/gui/app/glue.py
index a2dddb51..745638c4 100644
--- a/rpki/gui/app/glue.py
+++ b/rpki/gui/app/glue.py
@@ -16,7 +16,6 @@
"""
This file contains code that interfaces between the django views implementing
the portal gui and the rpki.* modules.
-
"""
from __future__ import with_statement
@@ -28,17 +27,22 @@ from datetime import datetime
from rpki.resource_set import (resource_set_as, resource_set_ipv4,
resource_set_ipv6, resource_range_ipv4,
resource_range_ipv6)
-from rpki.left_right import list_received_resources_elt, report_error_elt
from rpki.irdb.zookeeper import Zookeeper
from rpki.gui.app import models
from rpki.exceptions import BadIPResource
+from rpki.left_right import nsmap, version, tag_msg, tag_list_received_resources
+from lxml.etree import Element, SubElement
from django.contrib.auth.models import User
-from django.db.transaction import commit_on_success
+from django.db.transaction import atomic
+
+import logging
+logger = logging.getLogger(__name__)
def ghostbuster_to_vcard(gbr):
"""Convert a GhostbusterRequest object into a vCard object."""
+
import vobject
vcard = vobject.vCard()
@@ -66,19 +70,7 @@ def ghostbuster_to_vcard(gbr):
return vcard.serialize()
-class LeftRightError(Exception):
- """Class for wrapping report_error_elt errors from Zookeeper.call_rpkid().
-
- It expects a single argument, which is the associated report_error_elt instance."""
-
- def __str__(self):
- return 'Error occurred while communicating with rpkid: handle=%s code=%s text=%s' % (
- self.args[0].self_handle,
- self.args[0].error_code,
- self.args[0].error_text)
-
-
-@commit_on_success
+@atomic
def list_received_resources(log, conf):
"""
Query rpkid for this resource handle's received resources.
@@ -86,11 +78,18 @@ def list_received_resources(log, conf):
The semantics are to clear the entire table and populate with the list of
certs received. Other models should not reference the table directly with
foreign keys.
-
"""
z = Zookeeper(handle=conf.handle, disable_signal_handlers=True)
- pdus = z.call_rpkid(list_received_resources_elt.make_pdu(self_handle=conf.handle))
+ req = Element(tag_msg, nsmap=nsmap, type="query", version=version)
+ SubElement(req, tag_list_received_resources, tenant_handle=conf.handle, tag=conf.handle)
+ try:
+ pdus = z.call_rpkid(req)
+ except Exception as err:
+ logger.error('caught exception while attempting to query rpkid')
+ logger.exception(err)
+ return
+
# pdus is sometimes None (see https://trac.rpki.net/ticket/681)
if pdus is None:
print >>log, 'error: call_rpkid() returned None for handle %s when fetching received resources' % conf.handle
@@ -99,34 +98,27 @@ def list_received_resources(log, conf):
models.ResourceCert.objects.filter(conf=conf).delete()
for pdu in pdus:
- if isinstance(pdu, report_error_elt):
- # this will cause the db to be rolled back so the above delete()
- # won't clobber existing resources
- raise LeftRightError(pdu)
- elif isinstance(pdu, list_received_resources_elt):
- if pdu.parent_handle != conf.handle:
- parent = models.Parent.objects.get(issuer=conf,
- handle=pdu.parent_handle)
- else:
- # root cert, self-signed
- parent = None
-
- not_before = datetime.strptime(pdu.notBefore, "%Y-%m-%dT%H:%M:%SZ")
- not_after = datetime.strptime(pdu.notAfter, "%Y-%m-%dT%H:%M:%SZ")
-
- cert = models.ResourceCert.objects.create(
- conf=conf, parent=parent, not_before=not_before,
- not_after=not_after, uri=pdu.uri)
-
- for asn in resource_set_as(pdu.asn):
- cert.asn_ranges.create(min=asn.min, max=asn.max)
-
- for rng in resource_set_ipv4(pdu.ipv4):
- cert.address_ranges.create(prefix_min=rng.min,
- prefix_max=rng.max)
-
- for rng in resource_set_ipv6(pdu.ipv6):
- cert.address_ranges_v6.create(prefix_min=rng.min,
- prefix_max=rng.max)
+ if pdu.get("parent_handle") != conf.handle:
+ parent = models.Parent.objects.get(issuer=conf,
+ handle=pdu.get("parent_handle"))
else:
- print >>log, "error: unexpected pdu from rpkid type=%s" % type(pdu)
+ # root cert, self-signed
+ parent = None
+
+ not_before = datetime.strptime(pdu.get("notBefore"), "%Y-%m-%dT%H:%M:%SZ")
+ not_after = datetime.strptime(pdu.get("notAfter"), "%Y-%m-%dT%H:%M:%SZ")
+
+ cert = models.ResourceCert.objects.create(
+ conf=conf, parent=parent, not_before=not_before,
+ not_after=not_after, uri=pdu.get("uri"))
+
+ for asn in resource_set_as(pdu.get("asn")):
+ cert.asn_ranges.create(min=asn.min, max=asn.max)
+
+ for rng in resource_set_ipv4(pdu.get("ipv4")):
+ cert.address_ranges.create(prefix_min=rng.min,
+ prefix_max=rng.max)
+
+ for rng in resource_set_ipv6(pdu.get("ipv6")):
+ cert.address_ranges_v6.create(prefix_min=rng.min,
+ prefix_max=rng.max)
diff --git a/rpki/gui/app/migrations/0001_initial.py b/rpki/gui/app/migrations/0001_initial.py
index 80877901..79d21324 100644
--- a/rpki/gui/app/migrations/0001_initial.py
+++ b/rpki/gui/app/migrations/0001_initial.py
@@ -1,192 +1,249 @@
# -*- coding: utf-8 -*-
-import datetime
-from south.db import db
-from south.v2 import SchemaMigration
-from django.db import models
-
-
-class Migration(SchemaMigration):
-
- def forwards(self, orm):
- # Adding model 'ResourceCert'
- db.create_table('app_resourcecert', (
- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
- ('parent', self.gf('django.db.models.fields.related.ForeignKey')(related_name='certs', to=orm['irdb.Parent'])),
- ('not_before', self.gf('django.db.models.fields.DateTimeField')()),
- ('not_after', self.gf('django.db.models.fields.DateTimeField')()),
- ('uri', self.gf('django.db.models.fields.CharField')(max_length=255)),
- ))
- db.send_create_signal('app', ['ResourceCert'])
-
- # Adding model 'ResourceRangeAddressV4'
- db.create_table('app_resourcerangeaddressv4', (
- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
- ('prefix_min', self.gf('rpki.gui.models.IPv4AddressField')(db_index=True)),
- ('prefix_max', self.gf('rpki.gui.models.IPv4AddressField')(db_index=True)),
- ('cert', self.gf('django.db.models.fields.related.ForeignKey')(related_name='address_ranges', to=orm['app.ResourceCert'])),
- ))
- db.send_create_signal('app', ['ResourceRangeAddressV4'])
-
- # Adding model 'ResourceRangeAddressV6'
- db.create_table('app_resourcerangeaddressv6', (
- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
- ('prefix_min', self.gf('rpki.gui.models.IPv6AddressField')(db_index=True)),
- ('prefix_max', self.gf('rpki.gui.models.IPv6AddressField')(db_index=True)),
- ('cert', self.gf('django.db.models.fields.related.ForeignKey')(related_name='address_ranges_v6', to=orm['app.ResourceCert'])),
- ))
- db.send_create_signal('app', ['ResourceRangeAddressV6'])
-
- # Adding model 'ResourceRangeAS'
- db.create_table('app_resourcerangeas', (
- ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
- ('min', self.gf('django.db.models.fields.PositiveIntegerField')()),
- ('max', self.gf('django.db.models.fields.PositiveIntegerField')()),
- ('cert', self.gf('django.db.models.fields.related.ForeignKey')(related_name='asn_ranges', to=orm['app.ResourceCert'])),
- ))
- db.send_create_signal('app', ['ResourceRangeAS'])
-
- # Adding model 'GhostbusterRequest'
- db.create_table('app_ghostbusterrequest', (
- ('ghostbusterrequest_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['irdb.GhostbusterRequest'], unique=True, primary_key=True)),
- ('full_name', self.gf('django.db.models.fields.CharField')(max_length=40)),
- ('family_name', self.gf('django.db.models.fields.CharField')(max_length=20)),
- ('given_name', self.gf('django.db.models.fields.CharField')(max_length=20)),
- ('additional_name', self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True)),
- ('honorific_prefix', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
- ('honorific_suffix', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
- ('email_address', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
- ('organization', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
- ('telephone', self.gf('rpki.gui.app.models.TelephoneField')(max_length=40, null=True, blank=True)),
- ('box', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
- ('extended', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
- ('street', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
- ('city', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
- ('region', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
- ('code', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
- ('country', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
- ))
- db.send_create_signal('app', ['GhostbusterRequest'])
-
- # Adding model 'Timestamp'
- db.create_table('app_timestamp', (
- ('name', self.gf('django.db.models.fields.CharField')(max_length=30, primary_key=True)),
- ('ts', self.gf('django.db.models.fields.DateTimeField')()),
- ))
- db.send_create_signal('app', ['Timestamp'])
-
-
- def backwards(self, orm):
- # Deleting model 'ResourceCert'
- db.delete_table('app_resourcecert')
-
- # Deleting model 'ResourceRangeAddressV4'
- db.delete_table('app_resourcerangeaddressv4')
-
- # Deleting model 'ResourceRangeAddressV6'
- db.delete_table('app_resourcerangeaddressv6')
-
- # Deleting model 'ResourceRangeAS'
- db.delete_table('app_resourcerangeas')
-
- # Deleting model 'GhostbusterRequest'
- db.delete_table('app_ghostbusterrequest')
-
- # Deleting model 'Timestamp'
- db.delete_table('app_timestamp')
-
-
- models = {
- 'app.ghostbusterrequest': {
- 'Meta': {'ordering': "('family_name', 'given_name')", 'object_name': 'GhostbusterRequest', '_ormbases': ['irdb.GhostbusterRequest']},
- 'additional_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
- 'box': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
- 'city': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
- 'code': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
- 'country': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
- 'email_address': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
- 'extended': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
- 'family_name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
- 'full_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
- 'ghostbusterrequest_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['irdb.GhostbusterRequest']", 'unique': 'True', 'primary_key': 'True'}),
- 'given_name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
- 'honorific_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
- 'honorific_suffix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
- 'organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
- 'region': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
- 'street': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
- 'telephone': ('rpki.gui.app.models.TelephoneField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'})
- },
- 'app.resourcecert': {
- 'Meta': {'object_name': 'ResourceCert'},
- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
- 'not_after': ('django.db.models.fields.DateTimeField', [], {}),
- 'not_before': ('django.db.models.fields.DateTimeField', [], {}),
- 'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'certs'", 'to': "orm['irdb.Parent']"}),
- 'uri': ('django.db.models.fields.CharField', [], {'max_length': '255'})
- },
- 'app.resourcerangeaddressv4': {
- 'Meta': {'ordering': "('prefix_min',)", 'object_name': 'ResourceRangeAddressV4'},
- 'cert': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'address_ranges'", 'to': "orm['app.ResourceCert']"}),
- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
- 'prefix_max': ('rpki.gui.models.IPv4AddressField', [], {'db_index': 'True'}),
- 'prefix_min': ('rpki.gui.models.IPv4AddressField', [], {'db_index': 'True'})
- },
- 'app.resourcerangeaddressv6': {
- 'Meta': {'ordering': "('prefix_min',)", 'object_name': 'ResourceRangeAddressV6'},
- 'cert': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'address_ranges_v6'", 'to': "orm['app.ResourceCert']"}),
- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
- 'prefix_max': ('rpki.gui.models.IPv6AddressField', [], {'db_index': 'True'}),
- 'prefix_min': ('rpki.gui.models.IPv6AddressField', [], {'db_index': 'True'})
- },
- 'app.resourcerangeas': {
- 'Meta': {'ordering': "('min', 'max')", 'object_name': 'ResourceRangeAS'},
- 'cert': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'asn_ranges'", 'to': "orm['app.ResourceCert']"}),
- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
- 'max': ('django.db.models.fields.PositiveIntegerField', [], {}),
- 'min': ('django.db.models.fields.PositiveIntegerField', [], {})
- },
- 'app.timestamp': {
- 'Meta': {'object_name': 'Timestamp'},
- 'name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'primary_key': 'True'}),
- 'ts': ('django.db.models.fields.DateTimeField', [], {})
- },
- 'irdb.ghostbusterrequest': {
- 'Meta': {'object_name': 'GhostbusterRequest'},
- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
- 'issuer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ghostbuster_requests'", 'to': "orm['irdb.ResourceHolderCA']"}),
- 'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ghostbuster_requests'", 'null': 'True', 'to': "orm['irdb.Parent']"}),
- 'vcard': ('django.db.models.fields.TextField', [], {})
- },
- 'irdb.parent': {
- 'Meta': {'unique_together': "(('issuer', 'handle'),)", 'object_name': 'Parent', '_ormbases': ['irdb.Turtle']},
- 'certificate': ('rpki.irdb.models.CertificateField', [], {'default': 'None', 'blank': 'True'}),
- 'child_handle': ('rpki.irdb.models.HandleField', [], {'max_length': '120'}),
- 'handle': ('rpki.irdb.models.HandleField', [], {'max_length': '120'}),
- 'issuer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parents'", 'to': "orm['irdb.ResourceHolderCA']"}),
- 'parent_handle': ('rpki.irdb.models.HandleField', [], {'max_length': '120'}),
- 'referral_authorization': ('rpki.irdb.models.SignedReferralField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
- 'referrer': ('rpki.irdb.models.HandleField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
- 'repository_type': ('rpki.irdb.models.EnumField', [], {}),
- 'ta': ('rpki.irdb.models.CertificateField', [], {'default': 'None', 'blank': 'True'}),
- 'turtle_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['irdb.Turtle']", 'unique': 'True', 'primary_key': 'True'})
- },
- 'irdb.resourceholderca': {
- 'Meta': {'object_name': 'ResourceHolderCA'},
- 'certificate': ('rpki.irdb.models.CertificateField', [], {'default': 'None', 'blank': 'True'}),
- 'handle': ('rpki.irdb.models.HandleField', [], {'unique': 'True', 'max_length': '120'}),
- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
- 'last_crl_update': ('rpki.irdb.models.SundialField', [], {}),
- 'latest_crl': ('rpki.irdb.models.CRLField', [], {'default': 'None', 'blank': 'True'}),
- 'next_crl_number': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
- 'next_crl_update': ('rpki.irdb.models.SundialField', [], {}),
- 'next_serial': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
- 'private_key': ('rpki.irdb.models.RSAKeyField', [], {'default': 'None', 'blank': 'True'})
- },
- 'irdb.turtle': {
- 'Meta': {'object_name': 'Turtle'},
- 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
- 'service_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'})
- }
- }
-
- complete_apps = ['app'] \ No newline at end of file
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+from django.conf import settings
+import rpki.gui.models
+import rpki.gui.app.models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ migrations.swappable_dependency(settings.AUTH_USER_MODEL),
+ ('irdb', '0001_initial'),
+ ('routeview', '__first__'),
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='Alert',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('severity', models.SmallIntegerField(default=0, choices=[(0, b'info'), (1, b'warning'), (2, b'error')])),
+ ('when', models.DateTimeField(auto_now_add=True)),
+ ('seen', models.BooleanField(default=False)),
+ ('subject', models.CharField(max_length=66)),
+ ('text', models.TextField()),
+ ],
+ ),
+ migrations.CreateModel(
+ name='ConfACL',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='GhostbusterRequest',
+ fields=[
+ ('ghostbusterrequest_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='irdb.GhostbusterRequest')),
+ ('full_name', models.CharField(max_length=40)),
+ ('family_name', models.CharField(max_length=20)),
+ ('given_name', models.CharField(max_length=20)),
+ ('additional_name', models.CharField(max_length=20, null=True, blank=True)),
+ ('honorific_prefix', models.CharField(max_length=10, null=True, blank=True)),
+ ('honorific_suffix', models.CharField(max_length=10, null=True, blank=True)),
+ ('email_address', models.EmailField(max_length=254, null=True, blank=True)),
+ ('organization', models.CharField(max_length=255, null=True, blank=True)),
+ ('telephone', rpki.gui.app.models.TelephoneField(max_length=40, null=True, blank=True)),
+ ('box', models.CharField(max_length=40, null=True, verbose_name=b'P.O. Box', blank=True)),
+ ('extended', models.CharField(max_length=255, null=True, blank=True)),
+ ('street', models.CharField(max_length=255, null=True, blank=True)),
+ ('city', models.CharField(max_length=40, null=True, blank=True)),
+ ('region', models.CharField(help_text=b'state or province', max_length=40, null=True, blank=True)),
+ ('code', models.CharField(max_length=40, null=True, verbose_name=b'Postal Code', blank=True)),
+ ('country', models.CharField(max_length=40, null=True, blank=True)),
+ ],
+ options={
+ 'ordering': ('family_name', 'given_name'),
+ },
+ bases=('irdb.ghostbusterrequest',),
+ ),
+ migrations.CreateModel(
+ name='ResourceCert',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('not_before', models.DateTimeField()),
+ ('not_after', models.DateTimeField()),
+ ('uri', models.CharField(max_length=255)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='ResourceRangeAddressV4',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('prefix_min', rpki.gui.models.IPAddressField(db_index=True)),
+ ('prefix_max', rpki.gui.models.IPAddressField(db_index=True)),
+ ('cert', models.ForeignKey(related_name='address_ranges', to='app.ResourceCert')),
+ ],
+ options={
+ 'ordering': ('prefix_min',),
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='ResourceRangeAddressV6',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('prefix_min', rpki.gui.models.IPAddressField(db_index=True)),
+ ('prefix_max', rpki.gui.models.IPAddressField(db_index=True)),
+ ('cert', models.ForeignKey(related_name='address_ranges_v6', to='app.ResourceCert')),
+ ],
+ options={
+ 'ordering': ('prefix_min',),
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='ResourceRangeAS',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('min', models.BigIntegerField(validators=[rpki.gui.models.validate_asn])),
+ ('max', models.BigIntegerField(validators=[rpki.gui.models.validate_asn])),
+ ('cert', models.ForeignKey(related_name='asn_ranges', to='app.ResourceCert')),
+ ],
+ options={
+ 'ordering': ('min', 'max'),
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='Timestamp',
+ fields=[
+ ('name', models.CharField(max_length=30, serialize=False, primary_key=True)),
+ ('ts', models.DateTimeField()),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Child',
+ fields=[
+ ],
+ options={
+ 'proxy': True,
+ 'verbose_name_plural': 'children',
+ },
+ bases=('irdb.child',),
+ ),
+ migrations.CreateModel(
+ name='ChildASN',
+ fields=[
+ ],
+ options={
+ 'proxy': True,
+ },
+ bases=('irdb.childasn',),
+ ),
+ migrations.CreateModel(
+ name='ChildNet',
+ fields=[
+ ],
+ options={
+ 'proxy': True,
+ },
+ bases=('irdb.childnet',),
+ ),
+ migrations.CreateModel(
+ name='Client',
+ fields=[
+ ],
+ options={
+ 'verbose_name': 'Client',
+ 'proxy': True,
+ },
+ bases=('irdb.client',),
+ ),
+ migrations.CreateModel(
+ name='Conf',
+ fields=[
+ ],
+ options={
+ 'proxy': True,
+ },
+ bases=('irdb.resourceholderca',),
+ ),
+ migrations.CreateModel(
+ name='Parent',
+ fields=[
+ ],
+ options={
+ 'proxy': True,
+ },
+ bases=('irdb.parent',),
+ ),
+ migrations.CreateModel(
+ name='Repository',
+ fields=[
+ ],
+ options={
+ 'verbose_name': 'Repository',
+ 'proxy': True,
+ 'verbose_name_plural': 'Repositories',
+ },
+ bases=('irdb.repository',),
+ ),
+ migrations.CreateModel(
+ name='ROARequest',
+ fields=[
+ ],
+ options={
+ 'proxy': True,
+ },
+ bases=('irdb.roarequest',),
+ ),
+ migrations.CreateModel(
+ name='ROARequestPrefix',
+ fields=[
+ ],
+ options={
+ 'proxy': True,
+ },
+ bases=('irdb.roarequestprefix',),
+ ),
+ migrations.CreateModel(
+ name='RouteOrigin',
+ fields=[
+ ],
+ options={
+ 'proxy': True,
+ },
+ bases=('routeview.routeorigin',),
+ ),
+ migrations.CreateModel(
+ name='RouteOriginV6',
+ fields=[
+ ],
+ options={
+ 'proxy': True,
+ },
+ bases=('routeview.routeoriginv6',),
+ ),
+ migrations.AddField(
+ model_name='resourcecert',
+ name='conf',
+ field=models.ForeignKey(related_name='certs', to='app.Conf'),
+ ),
+ migrations.AddField(
+ model_name='resourcecert',
+ name='parent',
+ field=models.ForeignKey(related_name='certs', to='app.Parent', null=True),
+ ),
+ migrations.AddField(
+ model_name='confacl',
+ name='conf',
+ field=models.ForeignKey(to='app.Conf'),
+ ),
+ migrations.AddField(
+ model_name='confacl',
+ name='user',
+ field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
+ ),
+ migrations.AddField(
+ model_name='alert',
+ name='conf',
+ field=models.ForeignKey(related_name='alerts', to='app.Conf'),
+ ),
+ migrations.AlterUniqueTogether(
+ name='confacl',
+ unique_together=set([('user', 'conf')]),
+ ),
+ ]
diff --git a/rpki/gui/app/models.py b/rpki/gui/app/models.py
index 40bdbe2c..fb1cafff 100644
--- a/rpki/gui/app/models.py
+++ b/rpki/gui/app/models.py
@@ -1,5 +1,5 @@
# Copyright (C) 2010 SPARTA, Inc. dba Cobham Analytic Solutions
-# Copyright (C) 2012 SPARTA, Inc. a Parsons Company
+# Copyright (C) 2012, 2016 SPARTA, Inc. a Parsons Company
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -26,7 +26,6 @@ import rpki.irdb.models
import rpki.gui.models
import rpki.gui.routeview.models
import rpki.oids
-from south.modelsinspector import add_introspection_rules
class TelephoneField(models.CharField):
@@ -35,8 +34,6 @@ class TelephoneField(models.CharField):
kwargs['max_length'] = 40
models.CharField.__init__(self, **kwargs)
-add_introspection_rules([], [r'^rpki\.gui\.app\.models\.TelephoneField'])
-
class Parent(rpki.irdb.models.Parent):
"""proxy model for irdb Parent"""
@@ -123,7 +120,7 @@ class Alert(models.Model):
class Conf(rpki.irdb.models.ResourceHolderCA):
"""This is the center of the universe, also known as a place to
- have a handle on a resource-holding entity. It's the <self>
+ have a handle on a resource-holding entity. It's the <tenant/>
in the rpkid schema.
"""
@@ -262,7 +259,7 @@ class ResourceCert(models.Model):
not_after = models.DateTimeField()
# Locator for this object. Used to look up the validation status, expiry
- # of ancestor certs in cacheview
+ # of ancestor certs in gui_rpki_cache
uri = models.CharField(max_length=255)
def __unicode__(self):
diff --git a/rpki/gui/app/range_list.py b/rpki/gui/app/range_list.py
index 21fd1f29..5cb4f5e4 100755
--- a/rpki/gui/app/range_list.py
+++ b/rpki/gui/app/range_list.py
@@ -70,6 +70,7 @@ class RangeList(list):
def difference(self, other):
"""Return a RangeList object which contains ranges in this object which
are not in "other"."""
+
it = iter(other)
try:
@@ -85,6 +86,7 @@ class RangeList(list):
def V(v):
"""convert the integer value to the appropriate type for this
range"""
+
return x.__class__.datum_type(v)
try:
diff --git a/rpki/gui/app/south_migrations/0001_initial.py b/rpki/gui/app/south_migrations/0001_initial.py
new file mode 100644
index 00000000..80877901
--- /dev/null
+++ b/rpki/gui/app/south_migrations/0001_initial.py
@@ -0,0 +1,192 @@
+# -*- coding: utf-8 -*-
+import datetime
+from south.db import db
+from south.v2 import SchemaMigration
+from django.db import models
+
+
+class Migration(SchemaMigration):
+
+ def forwards(self, orm):
+ # Adding model 'ResourceCert'
+ db.create_table('app_resourcecert', (
+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
+ ('parent', self.gf('django.db.models.fields.related.ForeignKey')(related_name='certs', to=orm['irdb.Parent'])),
+ ('not_before', self.gf('django.db.models.fields.DateTimeField')()),
+ ('not_after', self.gf('django.db.models.fields.DateTimeField')()),
+ ('uri', self.gf('django.db.models.fields.CharField')(max_length=255)),
+ ))
+ db.send_create_signal('app', ['ResourceCert'])
+
+ # Adding model 'ResourceRangeAddressV4'
+ db.create_table('app_resourcerangeaddressv4', (
+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
+ ('prefix_min', self.gf('rpki.gui.models.IPv4AddressField')(db_index=True)),
+ ('prefix_max', self.gf('rpki.gui.models.IPv4AddressField')(db_index=True)),
+ ('cert', self.gf('django.db.models.fields.related.ForeignKey')(related_name='address_ranges', to=orm['app.ResourceCert'])),
+ ))
+ db.send_create_signal('app', ['ResourceRangeAddressV4'])
+
+ # Adding model 'ResourceRangeAddressV6'
+ db.create_table('app_resourcerangeaddressv6', (
+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
+ ('prefix_min', self.gf('rpki.gui.models.IPv6AddressField')(db_index=True)),
+ ('prefix_max', self.gf('rpki.gui.models.IPv6AddressField')(db_index=True)),
+ ('cert', self.gf('django.db.models.fields.related.ForeignKey')(related_name='address_ranges_v6', to=orm['app.ResourceCert'])),
+ ))
+ db.send_create_signal('app', ['ResourceRangeAddressV6'])
+
+ # Adding model 'ResourceRangeAS'
+ db.create_table('app_resourcerangeas', (
+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
+ ('min', self.gf('django.db.models.fields.PositiveIntegerField')()),
+ ('max', self.gf('django.db.models.fields.PositiveIntegerField')()),
+ ('cert', self.gf('django.db.models.fields.related.ForeignKey')(related_name='asn_ranges', to=orm['app.ResourceCert'])),
+ ))
+ db.send_create_signal('app', ['ResourceRangeAS'])
+
+ # Adding model 'GhostbusterRequest'
+ db.create_table('app_ghostbusterrequest', (
+ ('ghostbusterrequest_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['irdb.GhostbusterRequest'], unique=True, primary_key=True)),
+ ('full_name', self.gf('django.db.models.fields.CharField')(max_length=40)),
+ ('family_name', self.gf('django.db.models.fields.CharField')(max_length=20)),
+ ('given_name', self.gf('django.db.models.fields.CharField')(max_length=20)),
+ ('additional_name', self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True)),
+ ('honorific_prefix', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
+ ('honorific_suffix', self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True)),
+ ('email_address', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
+ ('organization', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
+ ('telephone', self.gf('rpki.gui.app.models.TelephoneField')(max_length=40, null=True, blank=True)),
+ ('box', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
+ ('extended', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
+ ('street', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
+ ('city', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
+ ('region', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
+ ('code', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
+ ('country', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True)),
+ ))
+ db.send_create_signal('app', ['GhostbusterRequest'])
+
+ # Adding model 'Timestamp'
+ db.create_table('app_timestamp', (
+ ('name', self.gf('django.db.models.fields.CharField')(max_length=30, primary_key=True)),
+ ('ts', self.gf('django.db.models.fields.DateTimeField')()),
+ ))
+ db.send_create_signal('app', ['Timestamp'])
+
+
+ def backwards(self, orm):
+ # Deleting model 'ResourceCert'
+ db.delete_table('app_resourcecert')
+
+ # Deleting model 'ResourceRangeAddressV4'
+ db.delete_table('app_resourcerangeaddressv4')
+
+ # Deleting model 'ResourceRangeAddressV6'
+ db.delete_table('app_resourcerangeaddressv6')
+
+ # Deleting model 'ResourceRangeAS'
+ db.delete_table('app_resourcerangeas')
+
+ # Deleting model 'GhostbusterRequest'
+ db.delete_table('app_ghostbusterrequest')
+
+ # Deleting model 'Timestamp'
+ db.delete_table('app_timestamp')
+
+
+ models = {
+ 'app.ghostbusterrequest': {
+ 'Meta': {'ordering': "('family_name', 'given_name')", 'object_name': 'GhostbusterRequest', '_ormbases': ['irdb.GhostbusterRequest']},
+ 'additional_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
+ 'box': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
+ 'city': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
+ 'code': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
+ 'country': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
+ 'email_address': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
+ 'extended': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
+ 'family_name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
+ 'full_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
+ 'ghostbusterrequest_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['irdb.GhostbusterRequest']", 'unique': 'True', 'primary_key': 'True'}),
+ 'given_name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
+ 'honorific_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
+ 'honorific_suffix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
+ 'organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
+ 'region': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
+ 'street': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
+ 'telephone': ('rpki.gui.app.models.TelephoneField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'})
+ },
+ 'app.resourcecert': {
+ 'Meta': {'object_name': 'ResourceCert'},
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'not_after': ('django.db.models.fields.DateTimeField', [], {}),
+ 'not_before': ('django.db.models.fields.DateTimeField', [], {}),
+ 'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'certs'", 'to': "orm['irdb.Parent']"}),
+ 'uri': ('django.db.models.fields.CharField', [], {'max_length': '255'})
+ },
+ 'app.resourcerangeaddressv4': {
+ 'Meta': {'ordering': "('prefix_min',)", 'object_name': 'ResourceRangeAddressV4'},
+ 'cert': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'address_ranges'", 'to': "orm['app.ResourceCert']"}),
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'prefix_max': ('rpki.gui.models.IPv4AddressField', [], {'db_index': 'True'}),
+ 'prefix_min': ('rpki.gui.models.IPv4AddressField', [], {'db_index': 'True'})
+ },
+ 'app.resourcerangeaddressv6': {
+ 'Meta': {'ordering': "('prefix_min',)", 'object_name': 'ResourceRangeAddressV6'},
+ 'cert': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'address_ranges_v6'", 'to': "orm['app.ResourceCert']"}),
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'prefix_max': ('rpki.gui.models.IPv6AddressField', [], {'db_index': 'True'}),
+ 'prefix_min': ('rpki.gui.models.IPv6AddressField', [], {'db_index': 'True'})
+ },
+ 'app.resourcerangeas': {
+ 'Meta': {'ordering': "('min', 'max')", 'object_name': 'ResourceRangeAS'},
+ 'cert': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'asn_ranges'", 'to': "orm['app.ResourceCert']"}),
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'max': ('django.db.models.fields.PositiveIntegerField', [], {}),
+ 'min': ('django.db.models.fields.PositiveIntegerField', [], {})
+ },
+ 'app.timestamp': {
+ 'Meta': {'object_name': 'Timestamp'},
+ 'name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'primary_key': 'True'}),
+ 'ts': ('django.db.models.fields.DateTimeField', [], {})
+ },
+ 'irdb.ghostbusterrequest': {
+ 'Meta': {'object_name': 'GhostbusterRequest'},
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'issuer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ghostbuster_requests'", 'to': "orm['irdb.ResourceHolderCA']"}),
+ 'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ghostbuster_requests'", 'null': 'True', 'to': "orm['irdb.Parent']"}),
+ 'vcard': ('django.db.models.fields.TextField', [], {})
+ },
+ 'irdb.parent': {
+ 'Meta': {'unique_together': "(('issuer', 'handle'),)", 'object_name': 'Parent', '_ormbases': ['irdb.Turtle']},
+ 'certificate': ('rpki.irdb.models.CertificateField', [], {'default': 'None', 'blank': 'True'}),
+ 'child_handle': ('rpki.irdb.models.HandleField', [], {'max_length': '120'}),
+ 'handle': ('rpki.irdb.models.HandleField', [], {'max_length': '120'}),
+ 'issuer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parents'", 'to': "orm['irdb.ResourceHolderCA']"}),
+ 'parent_handle': ('rpki.irdb.models.HandleField', [], {'max_length': '120'}),
+ 'referral_authorization': ('rpki.irdb.models.SignedReferralField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
+ 'referrer': ('rpki.irdb.models.HandleField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
+ 'repository_type': ('rpki.irdb.models.EnumField', [], {}),
+ 'ta': ('rpki.irdb.models.CertificateField', [], {'default': 'None', 'blank': 'True'}),
+ 'turtle_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['irdb.Turtle']", 'unique': 'True', 'primary_key': 'True'})
+ },
+ 'irdb.resourceholderca': {
+ 'Meta': {'object_name': 'ResourceHolderCA'},
+ 'certificate': ('rpki.irdb.models.CertificateField', [], {'default': 'None', 'blank': 'True'}),
+ 'handle': ('rpki.irdb.models.HandleField', [], {'unique': 'True', 'max_length': '120'}),
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'last_crl_update': ('rpki.irdb.models.SundialField', [], {}),
+ 'latest_crl': ('rpki.irdb.models.CRLField', [], {'default': 'None', 'blank': 'True'}),
+ 'next_crl_number': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
+ 'next_crl_update': ('rpki.irdb.models.SundialField', [], {}),
+ 'next_serial': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
+ 'private_key': ('rpki.irdb.models.RSAKeyField', [], {'default': 'None', 'blank': 'True'})
+ },
+ 'irdb.turtle': {
+ 'Meta': {'object_name': 'Turtle'},
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'service_uri': ('django.db.models.fields.CharField', [], {'max_length': '255'})
+ }
+ }
+
+ complete_apps = ['app'] \ No newline at end of file
diff --git a/rpki/gui/app/migrations/0002_auto__add_field_resourcecert_conf.py b/rpki/gui/app/south_migrations/0002_auto__add_field_resourcecert_conf.py
index d3326f90..d3326f90 100644
--- a/rpki/gui/app/migrations/0002_auto__add_field_resourcecert_conf.py
+++ b/rpki/gui/app/south_migrations/0002_auto__add_field_resourcecert_conf.py
diff --git a/rpki/gui/app/migrations/0003_set_conf_from_parent.py b/rpki/gui/app/south_migrations/0003_set_conf_from_parent.py
index a90a11cc..a90a11cc 100644
--- a/rpki/gui/app/migrations/0003_set_conf_from_parent.py
+++ b/rpki/gui/app/south_migrations/0003_set_conf_from_parent.py
diff --git a/rpki/gui/app/migrations/0004_auto__chg_field_resourcecert_conf.py b/rpki/gui/app/south_migrations/0004_auto__chg_field_resourcecert_conf.py
index a236ad4a..a236ad4a 100644
--- a/rpki/gui/app/migrations/0004_auto__chg_field_resourcecert_conf.py
+++ b/rpki/gui/app/south_migrations/0004_auto__chg_field_resourcecert_conf.py
diff --git a/rpki/gui/app/migrations/0005_auto__chg_field_resourcecert_parent.py b/rpki/gui/app/south_migrations/0005_auto__chg_field_resourcecert_parent.py
index 11e9c814..11e9c814 100644
--- a/rpki/gui/app/migrations/0005_auto__chg_field_resourcecert_parent.py
+++ b/rpki/gui/app/south_migrations/0005_auto__chg_field_resourcecert_parent.py
diff --git a/rpki/gui/app/migrations/0006_add_conf_acl.py b/rpki/gui/app/south_migrations/0006_add_conf_acl.py
index 88fe8171..88fe8171 100644
--- a/rpki/gui/app/migrations/0006_add_conf_acl.py
+++ b/rpki/gui/app/south_migrations/0006_add_conf_acl.py
diff --git a/rpki/gui/app/migrations/0007_default_acls.py b/rpki/gui/app/south_migrations/0007_default_acls.py
index 40656d0f..40656d0f 100644
--- a/rpki/gui/app/migrations/0007_default_acls.py
+++ b/rpki/gui/app/south_migrations/0007_default_acls.py
diff --git a/rpki/gui/app/migrations/0008_add_alerts.py b/rpki/gui/app/south_migrations/0008_add_alerts.py
index 77af68d2..77af68d2 100644
--- a/rpki/gui/app/migrations/0008_add_alerts.py
+++ b/rpki/gui/app/south_migrations/0008_add_alerts.py
diff --git a/rpki/gui/app/south_migrations/__init__.py b/rpki/gui/app/south_migrations/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rpki/gui/app/south_migrations/__init__.py
diff --git a/rpki/gui/app/views.py b/rpki/gui/app/views.py
index bf152f8e..03c7c168 100644
--- a/rpki/gui/app/views.py
+++ b/rpki/gui/app/views.py
@@ -1,5 +1,5 @@
# Copyright (C) 2010, 2011 SPARTA, Inc. dba Cobham Analytic Solutions
-# Copyright (C) 2012, 2014 SPARTA, Inc. a Parsons Company
+# Copyright (C) 2012, 2014, 2016 SPARTA, Inc. a Parsons Company
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -23,7 +23,6 @@ __version__ = '$Id$'
import os
import os.path
-from tempfile import NamedTemporaryFile
import cStringIO
import csv
import logging
@@ -42,7 +41,8 @@ from django.forms.formsets import formset_factory, BaseFormSet
from django.contrib import messages
from django.db.models import Q
-from rpki.irdb import Zookeeper, ChildASN, ChildNet, ROARequestPrefix
+from rpki.irdb import Zookeeper
+from rpki.irdb.models import ChildASN, ChildNet, ROARequestPrefix
from rpki.gui.app import models, forms, glue, range_list
from rpki.resource_set import (resource_range_as, resource_range_ip,
roa_prefix_ipv4)
@@ -50,7 +50,6 @@ from rpki import sundial
import rpki.exceptions
import rpki.csv_utils
-from rpki.gui.cacheview.models import ROA
from rpki.gui.routeview.models import RouteOrigin
from rpki.gui.decorators import tls_required
@@ -136,10 +135,6 @@ def generic_import(request, queryset, configure, form_class=None,
if request.method == 'POST':
form = form_class(request.POST, request.FILES)
if form.is_valid():
- tmpf = NamedTemporaryFile(prefix='import', suffix='.xml',
- delete=False)
- tmpf.write(form.cleaned_data['xml'].read())
- tmpf.close()
z = Zookeeper(handle=conf.handle)
handle = form.cleaned_data.get('handle')
# CharField uses an empty string for the empty value, rather than
@@ -148,27 +143,25 @@ def generic_import(request, queryset, configure, form_class=None,
if handle == '':
handle = None
try:
- # configure_repository returns None, so can't use tuple expansion
- # here. Unpack the tuple below if post_import_redirect is None.
- r = configure(z, tmpf.name, handle)
+ # configure_repository returns None, so can't use tuple expansion
+ # here. Unpack the tuple below if post_import_redirect is None.
+ r = configure(z, form.cleaned_data['xml'], handle)
except lxml.etree.XMLSyntaxError as e:
- logger.exception('caught XMLSyntaxError while parsing uploaded file')
+ logger.exception('caught XMLSyntaxError while parsing uploaded file')
messages.error(
request,
'The uploaded file has an invalid XML syntax'
)
else:
- # force rpkid run now
- z.synchronize_ca(poke=True)
- if post_import_redirect:
- url = post_import_redirect
- else:
- _, handle = r
- url = queryset.get(issuer=conf,
- handle=handle).get_absolute_url()
- return http.HttpResponseRedirect(url)
- finally:
- os.remove(tmpf.name)
+ # force rpkid run now
+ z.synchronize_ca(poke=True)
+ if post_import_redirect:
+ url = post_import_redirect
+ else:
+ _, handle = r
+ url = queryset.get(issuer=conf,
+ handle=handle).get_absolute_url()
+ return http.HttpResponseRedirect(url)
else:
form = form_class()
@@ -298,10 +291,10 @@ def serve_xml(content, basename, ext='xml'):
`basename` is the prefix to specify for the XML filename.
- `csv` is the type (default: xml)
+ `ext` is the type (default: xml)
"""
- resp = http.HttpResponse(content, mimetype='application/%s' % ext)
+ resp = http.HttpResponse(content, content_type='application/%s' % ext)
resp['Content-Disposition'] = 'attachment; filename=%s.%s' % (basename, ext)
return resp
@@ -332,13 +325,10 @@ def import_asns(request):
if request.method == 'POST':
form = forms.ImportCSVForm(request.POST, request.FILES)
if form.is_valid():
- f = NamedTemporaryFile(prefix='asns', suffix='.csv', delete=False)
- f.write(request.FILES['csv'].read())
- f.close()
z = Zookeeper(handle=conf.handle, disable_signal_handlers=True)
try:
z.load_asns(
- f.name,
+ request.FILES['csv'],
ignore_missing_children=form.cleaned_data['ignore_missing_children']
)
except rpki.irdb.models.Child.DoesNotExist:
@@ -353,8 +343,6 @@ def import_asns(request):
z.run_rpkid_now()
messages.success(request, 'Successfully imported AS delgations from CSV file.')
return redirect(dashboard)
- finally:
- os.unlink(f.name)
else:
form = forms.ImportCSVForm()
return render(request, 'app/import_resource_form.html', {
@@ -381,13 +369,10 @@ def import_prefixes(request):
if request.method == 'POST':
form = forms.ImportCSVForm(request.POST, request.FILES)
if form.is_valid():
- f = NamedTemporaryFile(prefix='prefixes', suffix='.csv', delete=False)
- f.write(request.FILES['csv'].read())
- f.close()
z = Zookeeper(handle=conf.handle, disable_signal_handlers=True)
try:
z.load_prefixes(
- f.name,
+ request.FILES['csv'],
ignore_missing_children=form.cleaned_data['ignore_missing_children']
)
except rpki.irdb.models.Child.DoesNotExist:
@@ -399,8 +384,6 @@ def import_prefixes(request):
z.run_rpkid_now()
messages.success(request, 'Successfully imported AS delgations from CSV file.')
return redirect(dashboard)
- finally:
- os.unlink(f.name)
else:
form = forms.ImportCSVForm()
return render(request, 'app/import_resource_form.html', {
@@ -474,10 +457,10 @@ def child_add_prefix(request, pk):
child.address_ranges.create(start_ip=str(r.min), end_ip=str(r.max),
version=version)
Zookeeper(
- handle=conf.handle,
- logstream=logstream,
- disable_signal_handlers=True
- ).run_rpkid_now()
+ handle=conf.handle,
+ logstream=logstream,
+ disable_signal_handlers=True
+ ).run_rpkid_now()
return http.HttpResponseRedirect(child.get_absolute_url())
else:
form = forms.AddNetForm(child=child)
@@ -497,10 +480,10 @@ def child_add_asn(request, pk):
r = resource_range_as.parse_str(asns)
child.asns.create(start_as=r.min, end_as=r.max)
Zookeeper(
- handle=conf.handle,
- logstream=logstream,
- disable_signal_handlers=True
- ).run_rpkid_now()
+ handle=conf.handle,
+ logstream=logstream,
+ disable_signal_handlers=True
+ ).run_rpkid_now()
return http.HttpResponseRedirect(child.get_absolute_url())
else:
form = forms.AddASNForm(child=child)
@@ -531,10 +514,10 @@ def child_edit(request, pk):
models.ChildASN.objects.filter(child=child).exclude(pk__in=form.cleaned_data.get('as_ranges')).delete()
models.ChildNet.objects.filter(child=child).exclude(pk__in=form.cleaned_data.get('address_ranges')).delete()
Zookeeper(
- handle=conf.handle,
- logstream=logstream,
- disable_signal_handlers=True
- ).run_rpkid_now()
+ handle=conf.handle,
+ logstream=log,
+ disable_signal_handlers=True
+ ).run_rpkid_now()
return http.HttpResponseRedirect(child.get_absolute_url())
else:
form = form_class(initial={
@@ -713,27 +696,27 @@ def roa_create_multi(request):
v = []
rng.chop_into_prefixes(v)
init.extend([{'asn': asn, 'prefix': str(p)} for p in v])
- extra = 0 if init else 1
+ extra = 0 if init else 1
formset = formset_factory(forms.ROARequestFormFactory(conf), extra=extra)(initial=init)
elif request.method == 'POST':
formset = formset_factory(forms.ROARequestFormFactory(conf), extra=0)(request.POST, request.FILES)
- # We need to check .has_changed() because .is_valid() will return true
- # if the user clicks the Preview button without filling in the blanks
- # in the ROA form, leaving the form invalid from this view's POV.
+ # We need to check .has_changed() because .is_valid() will return true
+ # if the user clicks the Preview button without filling in the blanks
+ # in the ROA form, leaving the form invalid from this view's POV.
if formset.has_changed() and formset.is_valid():
routes = []
v = []
query = Q() # for matching routes
roas = []
for form in formset:
- asn = form.cleaned_data['asn']
- rng = resource_range_ip.parse_str(form.cleaned_data['prefix'])
- max_prefixlen = int(form.cleaned_data['max_prefixlen'])
+ asn = form.cleaned_data['asn']
+ rng = resource_range_ip.parse_str(form.cleaned_data['prefix'])
+ max_prefixlen = int(form.cleaned_data['max_prefixlen'])
protect_children = form.cleaned_data['protect_children']
roas.append((rng, max_prefixlen, asn, protect_children))
- v.append({'prefix': str(rng), 'max_prefixlen': max_prefixlen,
- 'asn': asn})
+ v.append({'prefix': str(rng), 'max_prefixlen': max_prefixlen,
+ 'asn': asn})
query |= Q(prefix_min__gte=rng.min, prefix_max__lte=rng.max)
@@ -903,14 +886,10 @@ def roa_import(request):
if request.method == 'POST':
form = forms.ImportCSVForm(request.POST, request.FILES)
if form.is_valid():
- import tempfile
- tmp = tempfile.NamedTemporaryFile(suffix='.csv', prefix='roas', delete=False)
- tmp.write(request.FILES['csv'].read())
- tmp.close()
z = Zookeeper(handle=request.session['handle'],
disable_signal_handlers=True)
try:
- z.load_roa_requests(tmp.name)
+ z.load_roa_requests(request.FILES['csv'])
except rpki.csv_utils.BadCSVSyntax as e:
messages.error(request,
'CSV has bad syntax: %s' % (e,))
@@ -918,8 +897,6 @@ def roa_import(request):
z.run_rpkid_now()
messages.success(request, 'Successfully imported ROAs.')
return redirect(dashboard)
- finally:
- os.unlink(tmp.name)
else:
form = forms.ImportCSVForm()
return render(request, 'app/import_resource_form.html', {
@@ -939,7 +916,7 @@ def roa_export(request):
# each roa prefix gets a unique group so rpkid will issue separate roas
for group, roapfx in enumerate(ROARequestPrefix.objects.filter(roa_request__issuer=conf)):
csv_writer.writerow([str(roapfx.as_roa_prefix()), roapfx.roa_request.asn, '%s-%d' % (conf.handle, group)])
- resp = http.HttpResponse(f.getvalue(), mimetype='application/csv')
+ resp = http.HttpResponse(f.getvalue(), content_type='application/csv')
resp['Content-Disposition'] = 'attachment; filename=roas.csv'
return resp
@@ -1215,7 +1192,7 @@ def resource_holder_delete(request, pk):
form = forms.Empty(request.POST)
if form.is_valid():
z = Zookeeper(handle=conf.handle, logstream=log)
- z.delete_self()
+ z.delete_tenant()
z.synchronize_deleted_ca()
return redirect(resource_holder_list)
else:
@@ -1239,22 +1216,13 @@ def resource_holder_create(request):
zk_child = Zookeeper(handle=handle, logstream=log)
identity_xml = zk_child.initialize_resource_bpki()
if parent:
- # FIXME etree_wrapper should allow us to deal with file objects
- t = NamedTemporaryFile(delete=False)
- t.close()
-
- identity_xml.save(t.name)
zk_parent = Zookeeper(handle=parent.handle, logstream=log)
- parent_response, _ = zk_parent.configure_child(t.name)
- parent_response.save(t.name)
+ parent_response, _ = zk_parent.configure_child(identity_xml)
zk_parent.synchronize_ca()
- repo_req, _ = zk_child.configure_parent(t.name)
- repo_req.save(t.name)
- repo_resp, _ = zk_parent.configure_publication_client(t.name)
- repo_resp.save(t.name)
+ repo_req, _ = zk_child.configure_parent(parent_response)
+ repo_resp, _ = zk_parent.configure_publication_client(repo_req)
zk_parent.synchronize_pubd()
- zk_child.configure_repository(t.name)
- os.remove(t.name)
+ zk_child.configure_repository(repo_resp)
zk_child.synchronize_ca()
return redirect(resource_holder_list)
else:
@@ -1460,14 +1428,9 @@ class RouterImportView(FormView):
def form_valid(self, form):
conf = get_conf(self.request.user, self.request.session['handle'])
- tmpf = NamedTemporaryFile(prefix='import', suffix='.xml',
- delete=False)
- tmpf.write(form.cleaned_data['xml'].read())
- tmpf.close()
z = Zookeeper(handle=conf.handle, disable_signal_handlers=True)
- z.add_router_certificate_request(tmpf.name)
+ z.add_router_certificate_request(form.cleaned_data['xml'])
z.run_rpkid_now()
- os.remove(tmpf.name)
return super(RouterImportView, self).form_valid(form)
def get_context_data(self, **kwargs):
diff --git a/rpki/gui/cacheview/forms.py b/rpki/gui/cacheview/forms.py
deleted file mode 100644
index 7ae3601f..00000000
--- a/rpki/gui/cacheview/forms.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright (C) 2011 SPARTA, Inc. dba Cobham Analytic Solutions
-# Copyright (C) 2013 SPARTA, Inc. a Parsons Company
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND SPARTA DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL SPARTA BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-__version__ = '$Id$'
-
-from django import forms
-
-from rpki.gui.cacheview.misc import parse_ipaddr
-from rpki.exceptions import BadIPResource
-from rpki.resource_set import resource_range_as
-
-
-class SearchForm(forms.Form):
- asn = forms.CharField(required=False, help_text='AS or range', label='AS')
- addr = forms.CharField(required=False, max_length=40, help_text='range/CIDR', label='IP Address')
-
- def clean(self):
- asn = self.cleaned_data.get('asn')
- addr = self.cleaned_data.get('addr')
- if (asn and addr) or ((not asn) and (not addr)):
- raise forms.ValidationError('Please specify either an AS or IP range, not both')
-
- if asn:
- try:
- resource_range_as.parse_str(asn)
- except ValueError:
- raise forms.ValidationError('invalid AS range')
-
- if addr:
- #try:
- parse_ipaddr(addr)
- #except BadIPResource:
- # raise forms.ValidationError('invalid IP address range/prefix')
-
- return self.cleaned_data
-
-
-class SearchForm2(forms.Form):
- resource = forms.CharField(required=True)
diff --git a/rpki/gui/cacheview/misc.py b/rpki/gui/cacheview/misc.py
deleted file mode 100644
index 54431224..00000000
--- a/rpki/gui/cacheview/misc.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright (C) 2011 SPARTA, Inc. dba Cobham Analytic Solutions
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND SPARTA DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL SPARTA BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-from rpki.resource_set import resource_range_ipv4, resource_range_ipv6
-from rpki.exceptions import BadIPResource
-
-def parse_ipaddr(s):
- # resource_set functions only accept str
- if isinstance(s, unicode):
- s = s.encode()
- s = s.strip()
- r = resource_range_ipv4.parse_str(s)
- try:
- r = resource_range_ipv4.parse_str(s)
- return 4, r
- except BadIPResource:
- r = resource_range_ipv6.parse_str(s)
- return 6, r
-
-# vim:sw=4 ts=8 expandtab
diff --git a/rpki/gui/cacheview/templates/cacheview/addressrange_detail.html b/rpki/gui/cacheview/templates/cacheview/addressrange_detail.html
deleted file mode 100644
index 76edc1ba..00000000
--- a/rpki/gui/cacheview/templates/cacheview/addressrange_detail.html
+++ /dev/null
@@ -1,18 +0,0 @@
-{% extends "cacheview/cacheview_base.html" %}
-
-{% block content %}
-<h1>{% block title %}IP Range Detail{% endblock %}</h1>
-
-<p>
-IP Range: {{ object }}
-</p>
-
-<p>Covered by the following resource certs:</p>
-
-<ul>
-{% for cert in object.certs.all %}
-<li><a href="{{ cert.get_absolute_url }}">{{ cert }}</a></li>
-{% endfor %}
-</ul>
-
-{% endblock %}
diff --git a/rpki/gui/cacheview/templates/cacheview/cacheview_base.html b/rpki/gui/cacheview/templates/cacheview/cacheview_base.html
deleted file mode 100644
index ec71d740..00000000
--- a/rpki/gui/cacheview/templates/cacheview/cacheview_base.html
+++ /dev/null
@@ -1,10 +0,0 @@
-{% extends "base.html" %}
-{% load url from future %}
-
-{% block sidebar %}
-<form method='post' action='{% url 'res-search' %}'>
- {% csrf_token %}
- <input type='text' id='id_resource' name='resource' placeholder='prefix or AS'>
- <button type='submit'>Search</button>
-</form>
-{% endblock %}
diff --git a/rpki/gui/cacheview/templates/cacheview/cert_detail.html b/rpki/gui/cacheview/templates/cacheview/cert_detail.html
deleted file mode 100644
index 256e7780..00000000
--- a/rpki/gui/cacheview/templates/cacheview/cert_detail.html
+++ /dev/null
@@ -1,105 +0,0 @@
-{% extends "cacheview/signedobject_detail.html" %}
-
-{% block title %}
-Resource Certificate Detail
-{% endblock %}
-
-{% block detail %}
-
-<h2>RFC3779 Resources</h2>
-
-<table class='table table-striped'>
- <thead>
- <tr><th>AS Ranges</th><th>IP Ranges</th></tr>
- </thead>
- <tbody>
- <tr>
- <td style='text-align:left;vertical-align:top'>
- <ul class='compact'>
- {% for asn in object.asns.all %}
- <li><a href="{{ asn.get_absolute_url }}">{{ asn }}</a></li>
- {% endfor %}
- </ul>
- </td>
- <td style='text-align:left;vertical-align:top'>
- <ul class='compact'>
- {% for rng in object.addresses.all %}
- <li><a href="{{ rng.get_absolute_url }}">{{ rng }}</a></li>
- {% endfor %}
- </ul>
- </td>
- </tr>
- </tbody>
-</table>
-
-<div class='section'>
-<h2>Issued Objects</h2>
-<ul>
-
-{% if object.ghostbusters.all %}
- <li>
-<h3>Ghostbusters</h3>
-
-<table class='table table-striped'>
- <thead>
- <tr><th>Name</th><th>Expires</th></tr>
- </thead>
- <tbody>
-
-{% for g in object.ghostbusters.all %}
- <tr class='{{ g.status_id }}'>
- <td><a href="{{ g.get_absolute_url }}">{{ g }}</a></td>
- <td>{{ g.not_after }}</td>
- </tr>
- </tbody>
-{% endfor %}
-
-</table>
-{% endif %}
-
-{% if object.roas.all %}
- <li>
-<h3>ROAs</h3>
-<table class='table table-striped'>
- <thead>
- <tr><th>#</th><th>Prefix</th><th>AS</th><th>Expires</th></tr>
- </thead>
- <tbody>
- {% for roa in object.roas.all %}
- {% for pfx in roa.prefixes.all %}
- <tr class='{{ roa.status_id }}'>
- <td><a href="{{ roa.get_absolute_url }}">#</a></td>
- <td>{{ pfx }}</td>
- <td>{{ roa.asid }}</td>
- <td>{{ roa.not_after }}</td>
- </tr>
- {% endfor %}
- {% endfor %}
- </tbody>
-</table>
-{% endif %}
-
-{% if object.children.all %}
-<li>
-<h3>Children</h3>
-<table class='table table-striped'>
- <thead>
- <tr><th>Name</th><th>Expires</th></tr>
- </thead>
- <tbody>
-
- {% for child in object.children.all %}
- <tr class='{{ child.status_id }}'>
- <td><a href="{{ child.get_absolute_url }}">{{ child.name }}</a></td>
- <td>{{ child.not_after }}</td>
- </tr>
- {% endfor %}
- </tbody>
-</table>
-{% endif %}
-
-</ul>
-
-</div><!--issued objects-->
-
-{% endblock %}
diff --git a/rpki/gui/cacheview/templates/cacheview/ghostbuster_detail.html b/rpki/gui/cacheview/templates/cacheview/ghostbuster_detail.html
deleted file mode 100644
index 4215f757..00000000
--- a/rpki/gui/cacheview/templates/cacheview/ghostbuster_detail.html
+++ /dev/null
@@ -1,13 +0,0 @@
-{% extends "cacheview/signedobject_detail.html" %}
-
-{% block title %}Ghostbuster Detail{% endblock %}
-
-{% block detail %}
-<p>
-<table class='table'>
- <tr><td>Full Name</td><td>{{ object.full_name }}</td></tr>
- <tr><td>Organization</td><td>{{ object.organization }}</td></tr>
- <tr><td>Email</td><td>{{ object.email_address }}</td></tr>
- <tr><td>Telephone</td><td>{{ object.telephone }}</td></tr>
-</table>
-{% endblock %}
diff --git a/rpki/gui/cacheview/templates/cacheview/global_summary.html b/rpki/gui/cacheview/templates/cacheview/global_summary.html
deleted file mode 100644
index 0dbd0ffc..00000000
--- a/rpki/gui/cacheview/templates/cacheview/global_summary.html
+++ /dev/null
@@ -1,26 +0,0 @@
-{% extends "cacheview/cacheview_base.html" %}
-
-{% block content %}
-<div class='page-header'>
- <h1>Browse Global RPKI</h1>
-</div>
-
-<table class="table table-striped">
- <thead>
- <tr>
- <th>Name</th>
- <th>Expires</th>
- <th>URI</th>
- </tr>
- </thead>
- <tbody>
- {% for r in roots %}
- <tr>
- <td><a href="{{ r.get_absolute_url }}">{{ r.name }}</a></td>
- <td>{{ r.not_after }}</td>
- <td>{{ r.repo.uri }}</td>
- </tr>
- {% endfor %}
- </tbody>
-</table>
-{% endblock content %}
diff --git a/rpki/gui/cacheview/templates/cacheview/query_result.html b/rpki/gui/cacheview/templates/cacheview/query_result.html
deleted file mode 100644
index 0694c531..00000000
--- a/rpki/gui/cacheview/templates/cacheview/query_result.html
+++ /dev/null
@@ -1,21 +0,0 @@
-{% extends "cacheview/cacheview_base.html" %}
-
-{% block content %}
-
-<h1>{% block title %}Query Results{% endblock %}</h1>
-
-<table>
- <tr><th>Prefix</th><th>AS</th><th>Valid</th><th>Until</th></tr>
- {% for object in object_list %}
- <tr class='{{ object.1.status.kind_as_str }}'>
- <td>{{ object.0 }}</td>
- <td>{{ object.1.asid }}</td>
- <td><a href="{{ object.1.get_absolute_url }}">{{ object.1.ok }}</a></td>
- <td>{{ object.1.not_after }}</td>
- </tr>
- {% endfor %}
-</table>
-
-<p><a href="{% url rpki.gui.cacheview.views.query_view %}">new query</a></p>
-
-{% endblock %}
diff --git a/rpki/gui/cacheview/templates/cacheview/roa_detail.html b/rpki/gui/cacheview/templates/cacheview/roa_detail.html
deleted file mode 100644
index 39cc547b..00000000
--- a/rpki/gui/cacheview/templates/cacheview/roa_detail.html
+++ /dev/null
@@ -1,18 +0,0 @@
-{% extends "cacheview/signedobject_detail.html" %}
-
-{% block title %}ROA Detail{% endblock %}
-
-{% block detail %}
-<p>
-<table>
- <tr><td>AS</td><td>{{ object.asid }}</td></tr>
-</table>
-
-<h2>Prefixes</h2>
-
-<ul>
-{% for pfx in object.prefixes.all %}
-<li>{{ pfx }}
-{% endfor %}
-</ul>
-{% endblock %}
diff --git a/rpki/gui/cacheview/templates/cacheview/search_form.html b/rpki/gui/cacheview/templates/cacheview/search_form.html
deleted file mode 100644
index 1141615d..00000000
--- a/rpki/gui/cacheview/templates/cacheview/search_form.html
+++ /dev/null
@@ -1,17 +0,0 @@
-{% extends "cacheview/cacheview_base.html" %}
-
-{% block title %}
-{{ search_type }} Search
-{% endblock %}
-
-{% block content %}
-
-<h1>{{search_type}} Search</h1>
-
-<form method='post' action='{{ request.url }}'>
- {% csrf_token %}
- {{ form.as_p }}
- <input type='submit' name='Search'>
-</form>
-
-{% endblock %}
diff --git a/rpki/gui/cacheview/templates/cacheview/search_result.html b/rpki/gui/cacheview/templates/cacheview/search_result.html
deleted file mode 100644
index 7cbf852e..00000000
--- a/rpki/gui/cacheview/templates/cacheview/search_result.html
+++ /dev/null
@@ -1,42 +0,0 @@
-{% extends "cacheview/cacheview_base.html" %}
-
-{% block content %}
-
-<div class='page-header'>
- <h1>Search Results <small>{{ resource }}</small></h1>
-</div>
-
-<h2>Matching Resource Certificates</h2>
-{% if certs %}
-<ul>
-{% for cert in certs %}
-<li><a href="{{ cert.get_absolute_url }}">{{ cert }}</a>
-{% endfor %}
-</ul>
-{% else %}
-<p>none</p>
-{% endif %}
-
-<h2>Matching ROAs</h2>
-{% if roas %}
-<table class='table table-striped'>
- <thead>
- <tr>
- <th>#</th><th>Prefix</th><th>AS</th>
- </tr>
- </thead>
- <tbody>
-{% for roa in roas %}
-<tr>
- <td><a href="{{ roa.get_absolute_url }}">#</a></td>
- <td>{{ roa.prefixes.all.0 }}</td>
- <td>{{ roa.asid }}</td>
-</tr>
-{% endfor %}
-</tbody>
-</table>
-{% else %}
-<p>none</p>
-{% endif %}
-
-{% endblock %}
diff --git a/rpki/gui/cacheview/templates/cacheview/signedobject_detail.html b/rpki/gui/cacheview/templates/cacheview/signedobject_detail.html
deleted file mode 100644
index 22ae3d27..00000000
--- a/rpki/gui/cacheview/templates/cacheview/signedobject_detail.html
+++ /dev/null
@@ -1,58 +0,0 @@
-{% extends "cacheview/cacheview_base.html" %}
-
-{% block content %}
-<div class='page-header'>
-<h1>{% block title %}Signed Object Detail{% endblock %}</h1>
-</div>
-
-<h2>Cert Info</h2>
-<table class='table table-striped'>
- <tr><td>Subject Name</td><td>{{ object.name }}</td></tr>
- <tr><td>SKI</td><td>{{ object.keyid }}</td></tr>
- {% if object.sia %}
- <tr><td>SIA</td><td>{{ object.sia }}</td></tr>
- {% endif %}
- <tr><td>Not Before</td><td>{{ object.not_before }}</td></tr>
- <tr><td>Not After</td><td>{{ object.not_after }}</td></tr>
-</table>
-
-<h2>Metadata</h2>
-
-<table class='table table-striped'>
- <tr><td>URI</td><td>{{ object.repo.uri }}</td></tr>
- <tr><td>Last Modified</td><td>{{ object.mtime_as_datetime|date:"DATETIME_FORMAT" }}</td></tr>
-</table>
-
-<h2>Validation Status</h2>
-<table class='table table-striped'>
- <thead>
- <tr><th>Timestamp</th><th>Generation</th><th>Status</th></tr>
- </thead>
- <tbody>
- {% for status in object.repo.statuses.all %}
- <tr class="{{ status.status.get_kind_display }}"><td>{{ status.timestamp }}</td><td>{{ status.get_generation_display }}</td><td>{{ status.status.status }}</td></tr>
- {% endfor %}
- </tbody>
-</table>
-
-<h2>X.509 Certificate Chain</h2>
-
-<table class='table table-striped'>
- <thead>
- <tr><th>Depth</th><th>Name</th></tr>
- </thead>
- <tbody>
-
-{% for cert in chain %}
-<tr class='{{ cert.1.status_id }}'>
- <td>{{ cert.0 }}</td>
- <td><a href="{{ cert.1.get_absolute_url }}">{{ cert.1.name }}</a></td>
-</tr>
-{% endfor %}
-</tbody>
-
-</table>
-
-{% block detail %}{% endblock %}
-
-{% endblock %}
diff --git a/rpki/gui/cacheview/tests.py b/rpki/gui/cacheview/tests.py
deleted file mode 100644
index 2247054b..00000000
--- a/rpki/gui/cacheview/tests.py
+++ /dev/null
@@ -1,23 +0,0 @@
-"""
-This file demonstrates two different styles of tests (one doctest and one
-unittest). These will both pass when you run "manage.py test".
-
-Replace these with more appropriate tests for your application.
-"""
-
-from django.test import TestCase
-
-class SimpleTest(TestCase):
- def test_basic_addition(self):
- """
- Tests that 1 + 1 always equals 2.
- """
- self.failUnlessEqual(1 + 1, 2)
-
-__test__ = {"doctest": """
-Another way to test that 1 + 1 is equal to 2.
-
->>> 1 + 1 == 2
-True
-"""}
-
diff --git a/rpki/gui/cacheview/urls.py b/rpki/gui/cacheview/urls.py
deleted file mode 100644
index cc03a587..00000000
--- a/rpki/gui/cacheview/urls.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright (C) 2011 SPARTA, Inc. dba Cobham Analytic Solutions
-# Copyright (C) 2013 SPARTA, Inc. a Parsons Company
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND SPARTA DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL SPARTA BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-__version__ = '$Id$'
-
-from django.conf.urls import patterns, url
-from rpki.gui.cacheview.views import (CertDetailView, RoaDetailView,
- GhostbusterDetailView)
-
-urlpatterns = patterns('',
- url(r'^search$', 'rpki.gui.cacheview.views.search_view',
- name='res-search'),
- url(r'^cert/(?P<pk>[^/]+)$', CertDetailView.as_view(), name='cert-detail'),
- url(r'^gbr/(?P<pk>[^/]+)$', GhostbusterDetailView.as_view(),
- name='ghostbuster-detail'),
- url(r'^roa/(?P<pk>[^/]+)$', RoaDetailView.as_view(), name='roa-detail'),
- (r'^$', 'rpki.gui.cacheview.views.global_summary'),
-)
-
-# vim:sw=4 ts=8 expandtab
diff --git a/rpki/gui/cacheview/util.py b/rpki/gui/cacheview/util.py
deleted file mode 100644
index 47425c8c..00000000
--- a/rpki/gui/cacheview/util.py
+++ /dev/null
@@ -1,441 +0,0 @@
-# Copyright (C) 2011 SPARTA, Inc. dba Cobham
-# Copyright (C) 2012, 2013 SPARTA, Inc. a Parsons Company
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND SPARTA DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL SPARTA BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-__version__ = '$Id$'
-__all__ = ('import_rcynic_xml')
-
-default_logfile = '/var/rcynic/data/rcynic.xml'
-default_root = '/var/rcynic/data'
-object_accepted = None # set by import_rcynic_xml()
-
-import time
-import vobject
-import logging
-import os
-import stat
-from socket import getfqdn
-from cStringIO import StringIO
-
-from django.db import transaction
-import django.db.models
-
-import rpki
-import rpki.gui.app.timestamp
-from rpki.gui.app.models import Conf, Alert
-from rpki.gui.cacheview import models
-from rpki.rcynic import rcynic_xml_iterator, label_iterator
-from rpki.sundial import datetime
-from rpki.irdb.zookeeper import Zookeeper
-
-logger = logging.getLogger(__name__)
-
-
-class SomeoneShowMeAWayToGetOuttaHere(Exception):
- "'Cause I constantly pray I'll get outta here."
-
-
-def rcynic_cert(cert, obj):
- if not cert.sia_directory_uri:
- raise SomeoneShowMeAWayToGetOuttaHere
-
- obj.sia = cert.sia_directory_uri
-
- # object must be saved for the related manager methods below to work
- obj.save()
-
- # for the root cert, we can't set inst.issuer = inst until
- # after inst.save() has been called.
- if obj.issuer is None:
- obj.issuer = obj
- obj.save()
-
- # resources can change when a cert is updated
- obj.asns.clear()
- obj.addresses.clear()
-
- if cert.resources.asn.inherit:
- # FIXME: what happens when the parent's resources change and the child
- # cert is not reissued?
- obj.asns.add(*obj.issuer.asns.all())
- else:
- for asr in cert.resources.asn:
- logger.debug('processing %s', asr)
-
- attrs = {'min': asr.min, 'max': asr.max}
- q = models.ASRange.objects.filter(**attrs)
- if not q:
- obj.asns.create(**attrs)
- else:
- obj.asns.add(q[0])
-
- # obj.issuer is None the first time we process the root cert in the
- # hierarchy, so we need to guard against dereference
- for cls, addr_obj, addrset, parentset in (
- models.AddressRange, obj.addresses, cert.resources.v4,
- obj.issuer.addresses.all() if obj.issuer else []
- ), (
- models.AddressRangeV6, obj.addresses_v6, cert.resources.v6,
- obj.issuer.addresses_v6.all() if obj.issuer else []
- ):
- if addrset.inherit:
- addr_obj.add(*parentset)
- else:
- for rng in addrset:
- logger.debug('processing %s', rng)
-
- attrs = {'prefix_min': rng.min, 'prefix_max': rng.max}
- q = cls.objects.filter(**attrs)
- if not q:
- addr_obj.create(**attrs)
- else:
- addr_obj.add(q[0])
-
-
-def rcynic_roa(roa, obj):
- obj.asid = roa.asID
- # object must be saved for the related manager methods below to work
- obj.save()
- obj.prefixes.clear()
- obj.prefixes_v6.clear()
- for pfxset in roa.prefix_sets:
- if pfxset.__class__.__name__ == 'roa_prefix_set_ipv6':
- roa_cls = models.ROAPrefixV6
- prefix_obj = obj.prefixes_v6
- else:
- roa_cls = models.ROAPrefixV4
- prefix_obj = obj.prefixes
-
- for pfx in pfxset:
- attrs = {'prefix_min': pfx.min(),
- 'prefix_max': pfx.max(),
- 'max_length': pfx.max_prefixlen}
- q = roa_cls.objects.filter(**attrs)
- if not q:
- prefix_obj.create(**attrs)
- else:
- prefix_obj.add(q[0])
-
-
-def rcynic_gbr(gbr, obj):
- vcard = vobject.readOne(gbr.vcard)
- obj.full_name = vcard.fn.value if hasattr(vcard, 'fn') else None
- obj.email_address = vcard.email.value if hasattr(vcard, 'email') else None
- obj.telephone = vcard.tel.value if hasattr(vcard, 'tel') else None
- obj.organization = vcard.org.value[0] if hasattr(vcard, 'org') else None
- obj.save()
-
-LABEL_CACHE = {}
-
-# dict keeping mapping of uri to (handle, old status, new status) for objects
-# published by the local rpkid
-uris = {}
-
-dispatch = {
- 'rcynic_certificate': rcynic_cert,
- 'rcynic_roa': rcynic_roa,
- 'rcynic_ghostbuster': rcynic_gbr
-}
-
-model_class = {
- 'rcynic_certificate': models.Cert,
- 'rcynic_roa': models.ROA,
- 'rcynic_ghostbuster': models.Ghostbuster
-}
-
-
-def save_status(repo, vs):
- timestamp = datetime.fromXMLtime(vs.timestamp).to_sql()
- status = LABEL_CACHE[vs.status]
- g = models.generations_dict[vs.generation] if vs.generation else None
- repo.statuses.create(generation=g, timestamp=timestamp, status=status)
-
- # if this object is in our interest set, update with the current validation
- # status
- if repo.uri in uris:
- x, y, z, q = uris[repo.uri]
- valid = z or (status is object_accepted) # don't clobber previous True value
- uris[repo.uri] = x, y, valid, repo
-
- if status is not object_accepted:
- return
-
- cls = model_class[vs.file_class.__name__]
- # find the instance of the signedobject subclass that is associated with
- # this repo instance (may be empty when not accepted)
- inst_qs = cls.objects.filter(repo=repo)
-
- logger.debug('processing %s', vs.filename)
-
- if not inst_qs:
- inst = cls(repo=repo)
- logger.debug('object not found in db, creating new object cls=%s id=%s',
- cls, id(inst))
- else:
- inst = inst_qs[0]
-
- try:
- # determine if the object is changed/new
- mtime = os.stat(vs.filename)[stat.ST_MTIME]
- except OSError as e:
- logger.error('unable to stat %s: %s %s',
- vs.filename, type(e), e)
- # treat as if missing from rcynic.xml
- # use inst_qs rather than deleting inst so that we don't raise an
- # exception for newly created objects (inst_qs will be empty)
- inst_qs.delete()
- return
-
- if mtime != inst.mtime:
- inst.mtime = mtime
- try:
- obj = vs.obj # causes object to be lazily loaded
- except Exception, e:
- logger.warning('Caught %s while processing %s: %s',
- type(e), vs.filename, e)
- return
-
- inst.not_before = obj.notBefore.to_sql()
- inst.not_after = obj.notAfter.to_sql()
- inst.name = obj.subject
- inst.keyid = obj.ski
-
- # look up signing cert
- if obj.issuer == obj.subject:
- # self-signed cert (TA)
- assert isinstance(inst, models.Cert)
- inst.issuer = None
- else:
- # if an object has moved in the repository, the entry for
- # the old location will still be in the database, but
- # without any object_accepted in its validtion status
- qs = models.Cert.objects.filter(
- keyid=obj.aki,
- name=obj.issuer,
- repo__statuses__status=object_accepted
- )
- ncerts = len(qs)
- if ncerts == 0:
- logger.warning('unable to find signing cert with ski=%s (%s)', obj.aki, obj.issuer)
- return
- else:
- if ncerts > 1:
- # multiple matching certs, all of which are valid
- logger.warning('Found multiple certs matching ski=%s sn=%s', obj.aki, obj.issuer)
- for c in qs:
- logger.warning(c.repo.uri)
- # just use the first match
- inst.issuer = qs[0]
-
- try:
- # do object-specific tasks
- dispatch[vs.file_class.__name__](obj, inst)
- except SomeoneShowMeAWayToGetOuttaHere:
- logger.error("something wrong with %s, skipping", vs.filename)
- inst_qs.delete()
- return
- except:
- logger.error('caught exception while processing rcynic_object:\n'
- 'vs=' + repr(vs) + '\nobj=' + repr(obj))
- # .show() writes to stdout
- obj.show()
- raise
-
- logger.debug('object saved id=%s', id(inst))
- else:
- logger.debug('object is unchanged')
-
-
-@transaction.commit_on_success
-def process_cache(root, xml_file):
-
- last_uri = None
- repo = None
-
- logger.info('clearing validation statuses')
- models.ValidationStatus.objects.all().delete()
-
- logger.info('updating validation status')
- for vs in rcynic_xml_iterator(root, xml_file):
- if vs.uri != last_uri:
- repo, created = models.RepositoryObject.objects.get_or_create(uri=vs.uri)
- last_uri = vs.uri
- save_status(repo, vs)
-
- # garbage collection
- # remove all objects which have no ValidationStatus references, which
- # means they did not appear in the last XML output
- logger.info('performing garbage collection')
-
- # Delete all objects that have zero validation status elements.
- models.RepositoryObject.objects.annotate(num_statuses=django.db.models.Count('statuses')).filter(num_statuses=0).delete()
-
- # Delete all SignedObject instances that were not accepted. There may
- # exist rows for objects that were previously accepted.
- # See https://trac.rpki.net/ticket/588#comment:30
- #
- # We have to do this here rather than in save_status() because the
- # <validation_status/> elements are not guaranteed to be consecutive for a
- # given URI. see https://trac.rpki.net/ticket/625#comment:5
- models.SignedObject.objects.exclude(repo__statuses__status=object_accepted).delete()
-
- # ROAPrefixV* objects are M2M so they are not automatically deleted when
- # their ROA object disappears
- models.ROAPrefixV4.objects.annotate(num_roas=django.db.models.Count('roas')).filter(num_roas=0).delete()
- models.ROAPrefixV6.objects.annotate(num_roas=django.db.models.Count('roas')).filter(num_roas=0).delete()
- logger.info('done with garbage collection')
-
-
-@transaction.commit_on_success
-def process_labels(xml_file):
- logger.info('updating labels...')
-
- for label, kind, desc in label_iterator(xml_file):
- logger.debug('label=%s kind=%s desc=%s', label, kind, desc)
- if kind:
- q = models.ValidationLabel.objects.filter(label=label)
- if not q:
- obj = models.ValidationLabel(label=label)
- else:
- obj = q[0]
-
- obj.kind = models.kinds_dict[kind]
- obj.status = desc
- obj.save()
-
- LABEL_CACHE[label] = obj
-
-
-def fetch_published_objects():
- """Query rpkid for all objects published by local users, and look up the
- current validation status of each object. The validation status is used
- later to send alerts for objects which have transitioned to invalid.
-
- """
- logger.info('querying for published objects')
-
- handles = [conf.handle for conf in Conf.objects.all()]
- req = [rpki.left_right.list_published_objects_elt.make_pdu(action='list', self_handle=h, tag=h) for h in handles]
- z = Zookeeper()
- pdus = z.call_rpkid(*req)
- for pdu in pdus:
- if isinstance(pdu, rpki.left_right.list_published_objects_elt):
- # Look up the object in the rcynic cache
- qs = models.RepositoryObject.objects.filter(uri=pdu.uri)
- if qs:
- # get the current validity state
- valid = qs[0].statuses.filter(status=object_accepted).exists()
- uris[pdu.uri] = (pdu.self_handle, valid, False, None)
- logger.debug('adding ' + pdu.uri)
- else:
- # this object is not in the cache. it was either published
- # recently, or disappared previously. if it disappeared
- # previously, it has already been alerted. in either case, we
- # omit the uri from the list since we are interested only in
- # objects which were valid and are no longer valid
- pass
- elif isinstance(pdu, rpki.left_right.report_error_elt):
- logging.error('rpkid reported an error: %s', pdu.error_code)
-
-
-class Handle(object):
- def __init__(self):
- self.invalid = []
- self.missing = []
-
- def add_invalid(self, v):
- self.invalid.append(v)
-
- def add_missing(self, v):
- self.missing.append(v)
-
-
-def notify_invalid():
- """Send email alerts to the addresses registered in ghostbuster records for
- any invalid objects that were published by users of this system.
-
- """
-
- logger.info('sending notifications for invalid objects')
-
- # group invalid objects by user
- notify = {}
- for uri, v in uris.iteritems():
- handle, old_status, new_status, obj = v
-
- if obj is None:
- # object went missing
- n = notify.get(handle, Handle())
- n.add_missing(uri)
- # only select valid->invalid
- elif old_status and not new_status:
- n = notify.get(handle, Handle())
- n.add_invalid(obj)
-
- for handle, v in notify.iteritems():
- conf = Conf.objects.get(handle)
-
- msg = StringIO()
- msg.write('This is an alert about problems with objects published by '
- 'the resource handle %s.\n\n' % handle)
-
- if v.invalid:
- msg.write('The following objects were previously valid, but are '
- 'now invalid:\n')
-
- for o in v.invalid:
- msg.write('\n')
- msg.write(o.repo.uri)
- msg.write('\n')
- for s in o.statuses.all():
- msg.write('\t')
- msg.write(s.status.label)
- msg.write(': ')
- msg.write(s.status.status)
- msg.write('\n')
-
- if v.missing:
- msg.write('The following objects were previously valid but are no '
- 'longer in the cache:\n')
-
- for o in v.missing:
- msg.write(o)
- msg.write('\n')
-
- msg.write("""--
-You are receiving this email because your address is published in a Ghostbuster
-record, or is the default email address for this resource holder account on
-%s.""" % getfqdn())
-
- from_email = 'root@' + getfqdn()
- subj = 'invalid RPKI object alert for resource handle %s' % conf.handle
- conf.send_alert(subj, msg.getvalue(), from_email, severity=Alert.ERROR)
-
-
-def import_rcynic_xml(root=default_root, logfile=default_logfile):
- """Load the contents of rcynic.xml into the rpki.gui.cacheview database."""
-
- global object_accepted
-
- start = time.time()
- process_labels(logfile)
- object_accepted = LABEL_CACHE['object_accepted']
- fetch_published_objects()
- process_cache(root, logfile)
- notify_invalid()
-
- rpki.gui.app.timestamp.update('rcynic_import')
-
- stop = time.time()
- logger.info('elapsed time %d seconds.', (stop - start))
diff --git a/rpki/gui/cacheview/views.py b/rpki/gui/cacheview/views.py
deleted file mode 100644
index 94870eb2..00000000
--- a/rpki/gui/cacheview/views.py
+++ /dev/null
@@ -1,172 +0,0 @@
-# Copyright (C) 2011 SPARTA, Inc. dba Cobham Analytic Solutions
-# Copyright (C) 2013 SPARTA, Inc. a Parsons Company
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND SPARTA DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL SPARTA BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-__version__ = '$Id$'
-
-from django.views.generic import DetailView
-from django.shortcuts import render
-from django.db.models import F
-
-from rpki.gui.cacheview import models, forms, misc
-from rpki.resource_set import resource_range_as, resource_range_ip
-from rpki.POW import IPAddress
-from rpki.exceptions import BadIPResource
-
-
-def cert_chain(obj):
- """
- returns an iterator covering all certs from the root cert down to the EE.
- """
- chain = [obj]
- while obj != obj.issuer:
- obj = obj.issuer
- chain.append(obj)
- return zip(range(len(chain)), reversed(chain))
-
-
-class SignedObjectDetailView(DetailView):
- def get_context_data(self, **kwargs):
- context = super(SignedObjectDetailView,
- self).get_context_data(**kwargs)
- context['chain'] = cert_chain(self.object)
- return context
-
-
-class RoaDetailView(SignedObjectDetailView):
- model = models.ROA
-
-
-class CertDetailView(SignedObjectDetailView):
- model = models.Cert
-
-
-class GhostbusterDetailView(SignedObjectDetailView):
- model = models.Ghostbuster
-
-
-def search_view(request):
- certs = None
- roas = None
-
- if request.method == 'POST':
- form = forms.SearchForm2(request.POST, request.FILES)
- if form.is_valid():
- resource = form.cleaned_data.get('resource')
- # try to determine the type of input given
- try:
- r = resource_range_as.parse_str(resource)
- certs = models.Cert.objects.filter(asns__min__gte=r.min,
- asns__max__lte=r.max)
- roas = models.ROA.objects.filter(asid__gte=r.min,
- asid__lte=r.max)
- except:
- try:
- r = resource_range_ip.parse_str(resource)
- if r.version == 4:
- certs = models.Cert.objects.filter(
- addresses__prefix_min__lte=r.min,
- addresses__prefix_max__gte=r.max)
- roas = models.ROA.objects.filter(
- prefixes__prefix_min__lte=r.min,
- prefixes__prefix_max__gte=r.max)
- else:
- certs = models.Cert.objects.filter(
- addresses_v6__prefix_min__lte=r.min,
- addresses_v6__prefix_max__gte=r.max)
- roas = models.ROA.objects.filter(
- prefixes_v6__prefix_min__lte=r.min,
- prefixes_v6__prefix_max__gte=r.max)
- except BadIPResource:
- pass
-
- return render(request, 'cacheview/search_result.html',
- {'resource': resource, 'certs': certs, 'roas': roas})
-
-
-def cmp_prefix(x, y):
- r = cmp(x[0].family, y[0].family)
- if r == 0:
- r = cmp(x[2], y[2]) # integer address
- if r == 0:
- r = cmp(x[0].bits, y[0].bits)
- if r == 0:
- r = cmp(x[0].max_length, y[0].max_length)
- if r == 0:
- r = cmp(x[1].asid, y[1].asid)
- return r
-
-
-#def cmp_prefix(x,y):
-# for attr in ('family', 'prefix', 'bits', 'max_length'):
-# r = cmp(getattr(x[0], attr), getattr(y[0], attr))
-# if r:
-# return r
-# return cmp(x[1].asid, y[1].asid)
-
-
-def query_view(request):
- """
- Allow the user to search for an AS or prefix, and show all published ROA
- information.
- """
-
- if request.method == 'POST':
- form = forms.SearchForm(request.POST, request.FILES)
- if form.is_valid():
- certs = None
- roas = None
-
- addr = form.cleaned_data.get('addr')
- asn = form.cleaned_data.get('asn')
-
- if addr:
- family, r = misc.parse_ipaddr(addr)
- prefixes = models.ROAPrefix.objects.filter(family=family, prefix=str(r.min))
-
- prefix_list = []
- for pfx in prefixes:
- for roa in pfx.roas.all():
- prefix_list.append((pfx, roa))
- elif asn:
- r = resource_range_as.parse_str(asn)
- roas = models.ROA.objects.filter(asid__gte=r.min, asid__lte=r.max)
-
- # display the results sorted by prefix
- prefix_list = []
- for roa in roas:
- for pfx in roa.prefixes.all():
- addr = IPAddress(pfx.prefix.encode())
- prefix_list.append((pfx, roa, addr))
- prefix_list.sort(cmp=cmp_prefix)
-
- return render('cacheview/query_result.html',
- {'object_list': prefix_list}, request)
- else:
- form = forms.SearchForm()
-
- return render('cacheview/search_form.html', {
- 'form': form, 'search_type': 'ROA '}, request)
-
-
-def global_summary(request):
- """Display a table summarizing the state of the global RPKI."""
-
- roots = models.Cert.objects.filter(issuer=F('pk')) # self-signed
-
- return render(request, 'cacheview/global_summary.html', {
- 'roots': roots
- })
-
-# vim:sw=4 ts=8 expandtab
diff --git a/rpki/gui/default_settings.py b/rpki/gui/default_settings.py
deleted file mode 100644
index a30b0362..00000000
--- a/rpki/gui/default_settings.py
+++ /dev/null
@@ -1,188 +0,0 @@
-"""
-This module contains static configuration settings for the web portal.
-"""
-
-__version__ = '$Id$'
-
-import os
-import random
-import string
-import socket
-
-import rpki.config
-import rpki.autoconf
-
-# Where to put static files.
-STATIC_ROOT = rpki.autoconf.datarootdir + '/rpki/media'
-
-# Must end with a slash!
-STATIC_URL = '/media/'
-
-# Where to email server errors.
-ADMINS = (('Administrator', 'root@localhost'),)
-
-LOGGING = {
- 'version': 1,
- 'formatters': {
- 'verbose': {
- # see http://docs.python.org/2.7/library/logging.html#logging.LogRecord
- 'format': '%(levelname)s %(asctime)s %(name)s %(message)s'
- },
- },
- 'handlers': {
- 'stderr': {
- 'class': 'logging.StreamHandler',
- 'level': 'DEBUG',
- 'formatter': 'verbose',
- },
- 'mail_admins': {
- 'level': 'ERROR',
- 'class': 'django.utils.log.AdminEmailHandler',
- },
- },
- 'loggers': {
- 'rpki.async': {
- # enabled for tracking https://trac.rpki.net/ticket/681
- # need to change this to WARNING once ticket is closed
- 'level': 'DEBUG',
- },
- # The Django default LOGGING configuration disables propagate on these
- # two loggers. Re-enable propagate so they will hit our root logger.
- 'django.request': {
- 'propagate': True,
- },
- 'django.security': {
- 'propagate': True,
- },
- },
- 'root': {
- 'level': 'WARNING',
- 'handlers': ['stderr', 'mail_admins'],
- },
-}
-
-# Load the SQL authentication bits from the system rpki.conf.
-rpki_config = rpki.config.parser(section='web_portal')
-
-DATABASES = {
- 'default': {
- 'ENGINE': 'django.db.backends.mysql',
- 'NAME': rpki_config.get('sql-database'),
- 'USER': rpki_config.get('sql-username'),
- 'PASSWORD': rpki_config.get('sql-password'),
-
- # Ensure the default storage engine is InnoDB since we need
- # foreign key support. The Django documentation suggests
- # removing this after the syncdb is performed as an optimization,
- # but there isn't an easy way to do this automatically.
-
- # Setting charset to latin1 is a disgusting kludge, but without
- # this MySQL 5.6 (and, proably, later) gets tetchy about ASN.1
- # DER stored in BLOB columns not being well-formed UTF8 (sic).
- # If you know of a better solution, tell us.
-
- 'OPTIONS': {
- 'init_command': 'SET storage_engine=INNODB',
- 'charset': 'latin1',
- }
- }
-}
-
-
-def select_tz():
- "Find a supported timezone that looks like UTC"
- for tz in ('UTC', 'GMT', 'Etc/UTC', 'Etc/GMT'):
- if os.path.exists('/usr/share/zoneinfo/' + tz):
- return tz
- # Can't determine the proper timezone, fall back to UTC and let Django
- # report the error to the user.
- return 'UTC'
-
-# Local time zone for this installation. Choices can be found here:
-# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
-# although not all choices may be available on all operating systems.
-# If running in a Windows environment this must be set to the same as your
-# system time zone.
-TIME_ZONE = select_tz()
-
-def get_secret_key():
- """Retrieve the secret-key value from rpki.conf or generate a random value
- if it is not present."""
- d = string.letters + string.digits
- val = ''.join([random.choice(d) for _ in range(50)])
- return rpki_config.get('secret-key', val)
-
-# Make this unique, and don't share it with anybody.
-SECRET_KEY = get_secret_key()
-
-# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
-# for details on why you might need this.
-def get_allowed_hosts():
- allowed_hosts = set(rpki_config.multiget("allowed-hosts"))
- allowed_hosts.add(socket.getfqdn())
- try:
- import netifaces
- for interface in netifaces.interfaces():
- addresses = netifaces.ifaddresses(interface)
- for af in (netifaces.AF_INET, netifaces.AF_INET6):
- if af in addresses:
- for address in addresses[af]:
- if "addr" in address:
- allowed_hosts.add(address["addr"])
- except ImportError:
- pass
- return list(allowed_hosts)
-
-ALLOWED_HOSTS = get_allowed_hosts()
-
-DOWNLOAD_DIRECTORY = rpki_config.get('download-directory', '/var/tmp')
-
-# List of callables that know how to import templates from various sources.
-TEMPLATE_LOADERS = (
- 'django.template.loaders.filesystem.Loader',
- 'django.template.loaders.app_directories.Loader',
- 'django.template.loaders.eggs.Loader'
-)
-
-MIDDLEWARE_CLASSES = (
- 'django.middleware.common.CommonMiddleware',
- 'django.contrib.sessions.middleware.SessionMiddleware',
- 'django.middleware.csrf.CsrfViewMiddleware',
- 'django.contrib.auth.middleware.AuthenticationMiddleware',
- 'django.contrib.messages.middleware.MessageMiddleware'
-)
-
-ROOT_URLCONF = 'rpki.gui.urls'
-
-INSTALLED_APPS = (
- 'django.contrib.auth',
- #'django.contrib.admin',
- #'django.contrib.admindocs',
- 'django.contrib.contenttypes',
- 'django.contrib.sessions',
- 'django.contrib.staticfiles',
- 'rpki.irdb',
- 'rpki.gui.app',
- 'rpki.gui.cacheview',
- 'rpki.gui.routeview',
- 'south',
-)
-
-TEMPLATE_CONTEXT_PROCESSORS = (
- "django.contrib.auth.context_processors.auth",
- "django.core.context_processors.debug",
- "django.core.context_processors.i18n",
- "django.core.context_processors.media",
- "django.contrib.messages.context_processors.messages",
- "django.core.context_processors.request",
- "django.core.context_processors.static"
-)
-
-# Allow local site to override any setting above -- but if there's
-# anything that local sites routinely need to modify, please consider
-# putting that configuration into rpki.conf and just adding code here
-# to read that configuration.
-try:
- from local_settings import *
-except:
- pass
diff --git a/rpki/gui/gui_rpki_cache/__init__.py b/rpki/gui/gui_rpki_cache/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rpki/gui/gui_rpki_cache/__init__.py
diff --git a/rpki/gui/gui_rpki_cache/migrations/0001_initial.py b/rpki/gui/gui_rpki_cache/migrations/0001_initial.py
new file mode 100644
index 00000000..23625f56
--- /dev/null
+++ b/rpki/gui/gui_rpki_cache/migrations/0001_initial.py
@@ -0,0 +1,136 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+import rpki.gui.gui_rpki_cache.models
+import rpki.gui.models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='AddressRange',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('prefix_min', rpki.gui.models.IPAddressField(db_index=True)),
+ ('prefix_max', rpki.gui.models.IPAddressField(db_index=True)),
+ ],
+ options={
+ 'ordering': ('prefix_min',),
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='AddressRangeV6',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('prefix_min', rpki.gui.models.IPAddressField(db_index=True)),
+ ('prefix_max', rpki.gui.models.IPAddressField(db_index=True)),
+ ],
+ options={
+ 'ordering': ('prefix_min',),
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='ASRange',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('min', models.BigIntegerField(validators=[rpki.gui.models.validate_asn])),
+ ('max', models.BigIntegerField(validators=[rpki.gui.models.validate_asn])),
+ ],
+ options={
+ 'ordering': ('min', 'max'),
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='Cert',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('uri', models.TextField()),
+ ('sha256', models.SlugField(unique=True, max_length=64)),
+ ('not_before', models.DateTimeField()),
+ ('not_after', models.DateTimeField()),
+ ('ski', models.SlugField(max_length=40)),
+ ('addresses', models.ManyToManyField(related_name='certs', to='gui_rpki_cache.AddressRange')),
+ ('addresses_v6', models.ManyToManyField(related_name='certs', to='gui_rpki_cache.AddressRangeV6')),
+ ('asns', models.ManyToManyField(related_name='certs', to='gui_rpki_cache.ASRange')),
+ ('issuer', models.ForeignKey(related_name='children', to='gui_rpki_cache.Cert', null=True)),
+ ],
+ options={
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='Ghostbuster',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('uri', models.TextField()),
+ ('sha256', models.SlugField(unique=True, max_length=64)),
+ ('not_before', models.DateTimeField()),
+ ('not_after', models.DateTimeField()),
+ ('full_name', models.CharField(max_length=40)),
+ ('email_address', models.EmailField(max_length=254, null=True, blank=True)),
+ ('organization', models.CharField(max_length=255, null=True, blank=True)),
+ ('telephone', rpki.gui.gui_rpki_cache.models.TelephoneField(max_length=255, null=True, blank=True)),
+ ('issuer', models.ForeignKey(related_name='ghostbusters', to='gui_rpki_cache.Cert')),
+ ],
+ options={
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='ROA',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('uri', models.TextField()),
+ ('sha256', models.SlugField(unique=True, max_length=64)),
+ ('not_before', models.DateTimeField()),
+ ('not_after', models.DateTimeField()),
+ ('asid', models.PositiveIntegerField()),
+ ('issuer', models.ForeignKey(related_name='roas', to='gui_rpki_cache.Cert')),
+ ],
+ options={
+ 'ordering': ('asid',),
+ },
+ ),
+ migrations.CreateModel(
+ name='ROAPrefixV4',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('prefix_min', rpki.gui.models.IPAddressField(db_index=True)),
+ ('prefix_max', rpki.gui.models.IPAddressField(db_index=True)),
+ ('max_length', models.PositiveSmallIntegerField()),
+ ],
+ options={
+ 'ordering': ('prefix_min',),
+ },
+ ),
+ migrations.CreateModel(
+ name='ROAPrefixV6',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('prefix_min', rpki.gui.models.IPAddressField(db_index=True)),
+ ('prefix_max', rpki.gui.models.IPAddressField(db_index=True)),
+ ('max_length', models.PositiveSmallIntegerField()),
+ ],
+ options={
+ 'ordering': ('prefix_min',),
+ },
+ ),
+ migrations.AddField(
+ model_name='roa',
+ name='prefixes',
+ field=models.ManyToManyField(related_name='roas', to='gui_rpki_cache.ROAPrefixV4'),
+ ),
+ migrations.AddField(
+ model_name='roa',
+ name='prefixes_v6',
+ field=models.ManyToManyField(related_name='roas', to='gui_rpki_cache.ROAPrefixV6'),
+ ),
+ ]
diff --git a/rpki/gui/gui_rpki_cache/migrations/0002_auto_20160411_2311.py b/rpki/gui/gui_rpki_cache/migrations/0002_auto_20160411_2311.py
new file mode 100644
index 00000000..e9ceaac0
--- /dev/null
+++ b/rpki/gui/gui_rpki_cache/migrations/0002_auto_20160411_2311.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('gui_rpki_cache', '0001_initial'),
+ ]
+
+ operations = [
+ migrations.RemoveField(
+ model_name='cert',
+ name='sha256',
+ ),
+ migrations.RemoveField(
+ model_name='ghostbuster',
+ name='sha256',
+ ),
+ migrations.RemoveField(
+ model_name='roa',
+ name='sha256',
+ ),
+ migrations.AlterField(
+ model_name='cert',
+ name='issuer',
+ field=models.ForeignKey(to='gui_rpki_cache.Cert', null=True),
+ ),
+ migrations.AlterField(
+ model_name='ghostbuster',
+ name='issuer',
+ field=models.ForeignKey(to='gui_rpki_cache.Cert', null=True),
+ ),
+ migrations.AlterField(
+ model_name='roa',
+ name='issuer',
+ field=models.ForeignKey(to='gui_rpki_cache.Cert', null=True),
+ ),
+ ]
diff --git a/rpki/gui/gui_rpki_cache/migrations/0003_auto_20160420_2146.py b/rpki/gui/gui_rpki_cache/migrations/0003_auto_20160420_2146.py
new file mode 100644
index 00000000..e43ab1de
--- /dev/null
+++ b/rpki/gui/gui_rpki_cache/migrations/0003_auto_20160420_2146.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('gui_rpki_cache', '0002_auto_20160411_2311'),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name='ghostbuster',
+ name='issuer',
+ field=models.ForeignKey(related_name='ghostbusters', to='gui_rpki_cache.Cert', null=True),
+ ),
+ migrations.AlterField(
+ model_name='roa',
+ name='issuer',
+ field=models.ForeignKey(related_name='roas', to='gui_rpki_cache.Cert', null=True),
+ ),
+ ]
diff --git a/rpki/gui/gui_rpki_cache/migrations/__init__.py b/rpki/gui/gui_rpki_cache/migrations/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rpki/gui/gui_rpki_cache/migrations/__init__.py
diff --git a/rpki/gui/cacheview/models.py b/rpki/gui/gui_rpki_cache/models.py
index c3ee8421..dd0739c0 100644
--- a/rpki/gui/cacheview/models.py
+++ b/rpki/gui/gui_rpki_cache/models.py
@@ -1,5 +1,5 @@
# Copyright (C) 2011 SPARTA, Inc. dba Cobham Analytic Solutions
-# Copyright (C) 2012 SPARTA, Inc. a Parsons Company
+# Copyright (C) 2012, 2016 SPARTA, Inc. a Parsons Company
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -13,16 +13,13 @@
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
-__version__ = '$Id$'
-
-from datetime import datetime
-import time
+__version__ = '$Id: $'
from django.db import models
-from django.core.urlresolvers import reverse
import rpki.resource_set
import rpki.gui.models
+import rpki.rcynicdb.models
class TelephoneField(models.CharField):
@@ -31,56 +28,13 @@ class TelephoneField(models.CharField):
models.CharField.__init__(self, *args, **kwargs)
-class AddressRange(rpki.gui.models.PrefixV4):
- @models.permalink
- def get_absolute_url(self):
- return ('rpki.gui.cacheview.views.addressrange_detail', [str(self.pk)])
-
-
-class AddressRangeV6(rpki.gui.models.PrefixV6):
- @models.permalink
- def get_absolute_url(self):
- return ('rpki.gui.cacheview.views.addressrange_detail_v6',
- [str(self.pk)])
-
-
-class ASRange(rpki.gui.models.ASN):
- @models.permalink
- def get_absolute_url(self):
- return ('rpki.gui.cacheview.views.asrange_detail', [str(self.pk)])
-
-kinds = list(enumerate(('good', 'warn', 'bad')))
-kinds_dict = dict((v, k) for k, v in kinds)
-
+class AddressRange(rpki.gui.models.PrefixV4): pass
-class ValidationLabel(models.Model):
- """
- Represents a specific error condition defined in the rcynic XML
- output file.
- """
- label = models.CharField(max_length=79, db_index=True, unique=True)
- status = models.CharField(max_length=255)
- kind = models.PositiveSmallIntegerField(choices=kinds)
-
- def __unicode__(self):
- return self.label
-
-
-class RepositoryObject(models.Model):
- """
- Represents a globally unique RPKI repository object, specified by its URI.
- """
- uri = models.URLField(unique=True, db_index=True)
-generations = list(enumerate(('current', 'backup')))
-generations_dict = dict((val, key) for (key, val) in generations)
+class AddressRangeV6(rpki.gui.models.PrefixV6): pass
-class ValidationStatus(models.Model):
- timestamp = models.DateTimeField()
- generation = models.PositiveSmallIntegerField(choices=generations, null=True)
- status = models.ForeignKey(ValidationLabel)
- repo = models.ForeignKey(RepositoryObject, related_name='statuses')
+class ASRange(rpki.gui.models.ASN): pass
class SignedObject(models.Model):
@@ -89,58 +43,47 @@ class SignedObject(models.Model):
The signing certificate is ommitted here in order to give a proper
value for the 'related_name' attribute.
"""
- repo = models.ForeignKey(RepositoryObject, related_name='cert', unique=True)
-
- # on-disk file modification time
- mtime = models.PositiveIntegerField(default=0)
- # SubjectName
- name = models.CharField(max_length=255)
+ class Meta:
+ abstract = True
- # value from the SKI extension
- keyid = models.CharField(max_length=60, db_index=True)
+ # Duplicate of rpki.rcynicdb.models.RPKIObject
+ uri = models.TextField()
# validity period from EE cert which signed object
not_before = models.DateTimeField()
not_after = models.DateTimeField()
- def mtime_as_datetime(self):
- """
- convert the local timestamp to UTC and convert to a datetime object
- """
- return datetime.utcfromtimestamp(self.mtime + time.timezone)
-
- def status_id(self):
- """
- Returns a HTML class selector for the current object based on its validation status.
- The selector is chosen based on the current generation only. If there is any bad status,
- return bad, else if there are any warn status, return warn, else return good.
- """
- for x in reversed(kinds):
- if self.repo.statuses.filter(generation=generations_dict['current'], status__kind=x[0]):
- return x[1]
- return None # should not happen
-
def __unicode__(self):
- return u'%s' % self.name
+ return u'%s' % self.uri
+
+ def __repr__(self):
+ return u'<%s name=%s uri=%s>' % (self.__class__.__name__, self.uri)
class Cert(SignedObject):
"""
- Object representing a resource certificate.
+ Object representing a resource CA certificate.
"""
+ # Duplicate of rpki.rcynicdb.models.RPKIObject
+ ski = models.SlugField(max_length=40) # hex SHA-1
+
addresses = models.ManyToManyField(AddressRange, related_name='certs')
addresses_v6 = models.ManyToManyField(AddressRangeV6, related_name='certs')
asns = models.ManyToManyField(ASRange, related_name='certs')
- issuer = models.ForeignKey('self', related_name='children', null=True)
- sia = models.CharField(max_length=255)
- def get_absolute_url(self):
- return reverse('cert-detail', args=[str(self.pk)])
+ issuer = models.ForeignKey('self', on_delete=models.CASCADE, null=True)
+
+ def __repr__(self):
+ return u'<Cert uri=%s ski=%s not_before=%s not_after=%s>' % (self.uri, self.ski, self.not_before, self.not_after)
+
+ def __unicode__(self):
+ return u'RPKI CA Cert %s' % (self.uri,)
def get_cert_chain(self):
"""Return a list containing the complete certificate chain for this
certificate."""
+
cert = self
x = [cert]
while cert != cert.issuer:
@@ -180,6 +123,7 @@ class ROAPrefixV4(ROAPrefix, rpki.gui.models.PrefixV4):
@property
def routes(self):
"""return all routes covered by this roa prefix"""
+
return RouteOrigin.objects.filter(prefix_min__gte=self.prefix_min,
prefix_max__lte=self.prefix_max)
@@ -201,10 +145,7 @@ class ROA(SignedObject):
asid = models.PositiveIntegerField()
prefixes = models.ManyToManyField(ROAPrefixV4, related_name='roas')
prefixes_v6 = models.ManyToManyField(ROAPrefixV6, related_name='roas')
- issuer = models.ForeignKey('Cert', related_name='roas')
-
- def get_absolute_url(self):
- return reverse('roa-detail', args=[str(self.pk)])
+ issuer = models.ForeignKey(Cert, on_delete=models.CASCADE, null=True, related_name='roas')
class Meta:
ordering = ('asid',)
@@ -218,11 +159,7 @@ class Ghostbuster(SignedObject):
email_address = models.EmailField(blank=True, null=True)
organization = models.CharField(blank=True, null=True, max_length=255)
telephone = TelephoneField(blank=True, null=True)
- issuer = models.ForeignKey('Cert', related_name='ghostbusters')
-
- def get_absolute_url(self):
- # note that ghostbuster-detail is different from gbr-detail! sigh
- return reverse('ghostbuster-detail', args=[str(self.pk)])
+ issuer = models.ForeignKey(Cert, on_delete=models.CASCADE, null=True, related_name='ghostbusters')
def __unicode__(self):
if self.full_name:
diff --git a/rpki/gui/gui_rpki_cache/util.py b/rpki/gui/gui_rpki_cache/util.py
new file mode 100644
index 00000000..0bc4fa5d
--- /dev/null
+++ b/rpki/gui/gui_rpki_cache/util.py
@@ -0,0 +1,308 @@
+# Copyright (C) 2011 SPARTA, Inc. dba Cobham
+# Copyright (C) 2012, 2013, 2016 SPARTA, Inc. a Parsons Company
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND SPARTA DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL SPARTA BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+__version__ = '$Id: util.py 6335 2016-03-29 03:09:13Z sra $'
+
+import logging
+import time
+import vobject
+from socket import getfqdn
+from cStringIO import StringIO
+
+if __name__ == '__main__':
+ import os
+ logging.basicConfig(level=logging.DEBUG)
+ os.environ.update(DJANGO_SETTINGS_MODULE='rpki.django_settings.gui')
+ import django
+ django.setup()
+
+import os.path
+
+logger = logging.getLogger(__name__)
+
+from django.db import transaction
+import django.db.models
+
+import rpki
+import rpki.resource_set
+import rpki.left_right
+import rpki.gui.app.timestamp
+from rpki.gui.app.models import Conf, Alert
+from rpki.gui.gui_rpki_cache import models
+from rpki.irdb.zookeeper import Zookeeper
+
+from lxml.etree import Element, SubElement
+
+
+def process_certificate(auth, obj):
+ cert = models.Cert.objects.filter(ski=obj.ski).first()
+ if cert:
+ logger.debug('cache hit for CA cert uri=%s ski=%s' % (cert.uri, cert.ski))
+ return cert # cache hit
+
+ logger.debug('parsing cert at %s' % (obj.uri,))
+
+ """Process Resource CA Certificates"""
+ x509 = rpki.POW.X509.derRead(obj.der)
+
+ # ensure this is a resource CA Certificate (ignore Router certs)
+ bc = x509.getBasicConstraints()
+ is_ca = bc is not None and bc[0]
+ if not is_ca:
+ return
+
+ # locate the parent certificate
+ if obj.aki and obj.aki != obj.ski:
+ try:
+ issuer = models.Cert.objects.get(ski=obj.aki)
+ except models.Cert.DoesNotExist:
+ # process parent cert first
+ issuer = process_certificate(auth, rpki.rcynicdb.models.RPKIObject.objects.get(ski=obj.aki, authenticated=auth))
+ else:
+ issuer = None # root
+
+ asns, v4, v6 = x509.getRFC3779()
+
+ cert = models.Cert.objects.create(
+ uri=obj.uri,
+ ski=obj.ski,
+ not_before=x509.getNotBefore(),
+ not_after=x509.getNotAfter(),
+ issuer=issuer
+ )
+
+ if issuer is None:
+ cert.issuer = cert # self-signed
+ cert.save()
+
+ if asns == 'inherit':
+ cert.asns.add(issuer.asns.all())
+ elif asns:
+ for asmin, asmax in asns:
+ asr, _ = models.ASRange.objects.get_or_create(min=asmin, max=asmax)
+ cert.asns.add(asr)
+
+ if v4 == 'inherit':
+ cert.addresses.add(issuer.addresses.all())
+ elif v4:
+ for v4min, v4max in v4:
+ pfx, _ = models.AddressRange.objects.get_or_create(prefix_min=v4min, prefix_max=v4max)
+ cert.addresses.add(pfx)
+
+ if v6 == 'inherit':
+ cert.addresses_v6.add(issuer.addresses_v6.all())
+ elif v6:
+ for v6min, v6max in v6:
+ pfx, _ = models.AddressRangeV6.objects.get_or_create(prefix_min=v6min, prefix_max=v6max)
+ cert.addresses_v6.add(pfx)
+
+ return cert
+
+def process_roa(auth, obj):
+ logger.debug('parsing roa at %s' % (obj.uri,))
+
+ r = rpki.POW.ROA.derRead(obj.der)
+ r.verify() # required in order to extract asID
+ ee = r.certs()[0] # rpki.POW.X509
+ aki = ee.getAKI().encode('hex')
+
+ logger.debug('looking for ca cert with ski=%s' % (aki,))
+
+ # Locate the Resource CA cert that issued the EE that signed this ROA
+ issuer = models.Cert.objects.get(ski=aki)
+
+ roa = models.ROA.objects.create(
+ uri=obj.uri,
+ asid=r.getASID(),
+ not_before=ee.getNotBefore(),
+ not_after=ee.getNotAfter(),
+ issuer=issuer)
+
+ prefixes = r.getPrefixes()
+ if prefixes[0]: # v4
+ for p in prefixes[0]:
+ v = rpki.resource_set.roa_prefix_ipv4(*p)
+ roapfx, _ = models.ROAPrefixV4.objects.get_or_create(prefix_min=v.min(), prefix_max=v.max(), max_length=v.max_prefixlen)
+ roa.prefixes.add(roapfx)
+ if prefixes[1]: # v6
+ for p in prefixes[1]:
+ v = rpki.resource_set.roa_prefix_ipv6(*p)
+ roapfx, _ = models.ROAPrefixV6.objects.get_or_create(prefix_min=v.min(), prefix_max=v.max(), max_length=v.max_prefixlen)
+ roa.prefixes_v6.add(roapfx)
+
+ return roa
+
+def process_ghostbuster(auth, obj):
+ logger.debug('parsing ghostbuster at %s' % (obj.uri,))
+ g = rpki.POW.CMS.derRead(obj.der)
+ ee = g.certs()[0] # rpki.POW.X509
+ aki = ee.getAKI().encode('hex')
+ vcard = vobject.readOne(g.verify())
+
+ # Locate the Resource CA cert that issued the EE that signed this ROA
+ issuer = models.Cert.objects.get(ski=aki)
+
+ gbr = models.Ghostbuster.objects.create(
+ uri=obj.uri,
+ issuer=issuer,
+ not_before=ee.getNotBefore(),
+ not_after=ee.getNotAfter(),
+ full_name = vcard.fn.value if hasattr(vcard, 'fn') else None,
+ email_address = vcard.email.value if hasattr(vcard, 'email') else None,
+ telephone = vcard.tel.value if hasattr(vcard, 'tel') else None,
+ organization = vcard.org.value[0] if hasattr(vcard, 'org') else None
+ )
+
+ return gbr
+
+@transaction.atomic
+def process_cache():
+ logger.info('processing rpki cache')
+
+ # foreign key constraints should cause all other objects to be removed
+ models.Cert.objects.all().delete()
+
+ # certs must be processed first in order to build proper foreign keys for roa/gbr
+ dispatch = {
+ '.cer': process_certificate,
+ '.gbr': process_ghostbuster,
+ '.roa': process_roa
+ }
+
+ auth = rpki.rcynicdb.models.Authenticated.objects.order_by('started').first()
+
+ # Resource CA Certs are processed first in order to attach ROAs and Ghostbusters
+ for suffix in ('.cer', '.roa', '.gbr'):
+ cb = dispatch[suffix]
+
+ for rpkiobj in auth.rpkiobject_set.filter(uri__endswith=suffix):
+ cb(auth, rpkiobj)
+
+ # Garbage collection - remove M2M relations for certs/ROAs which no longer exist
+ models.ASRange.objects.annotate(num_certs=django.db.models.Count('certs')).filter(num_certs=0).delete()
+ models.AddressRange.objects.annotate(num_certs=django.db.models.Count('certs')).filter(num_certs=0).delete()
+ models.AddressRangeV6.objects.annotate(num_certs=django.db.models.Count('certs')).filter(num_certs=0).delete()
+
+ models.ROAPrefixV4.objects.annotate(num_roas=django.db.models.Count('roas')).filter(num_roas=0).delete()
+ models.ROAPrefixV6.objects.annotate(num_roas=django.db.models.Count('roas')).filter(num_roas=0).delete()
+
+
+# dict mapping resource handle to list of published objects, use for notifying objects which have become invalid
+uris = {}
+model_map = { '.cer': models.Cert, '.roa': models.ROA, '.gbr': models.Ghostbuster }
+
+def fetch_published_objects():
+ """Query rpkid for all objects published by local users, and look up the
+ current validation status of each object. The validation status is used
+ later to send alerts for objects which have transitioned to invalid.
+ """
+ logger.info('querying for published objects')
+
+ handles = [conf.handle for conf in Conf.objects.all()]
+ q_msg = Element(rpki.left_right.tag_msg, nsmap = rpki.left_right.nsmap,
+ type = "query", version = rpki.left_right.version)
+ for h in handles:
+ SubElement(q_msg, rpki.left_right.tag_list_published_objects, tenant_handle=h, tag=h)
+ try:
+ z = Zookeeper()
+ r_msg = z.call_rpkid(q_msg)
+ except Exception as err:
+ logger.error('Unable to connect to rpkid to fetch list of published objects')
+ logger.exception(err)
+ # Should be safe to continue processing the rcynic cache, we just don't do any notifications
+ return
+
+ for r_pdu in r_msg:
+ if r_pdu.tag == rpki.left_right.tag_list_published_objects:
+ # Look up the object in the rcynic cache
+ uri = r_pdu.get('uri')
+ ext = os.path.splitext(uri)[1]
+ if ext in model_map:
+ model = model_map[ext]
+ handle = r_pdu.get('tenant_handle')
+
+ if model.objects.filter(uri=uri).exists():
+ v = uris.setdefault(handle, [])
+ v.append(uri)
+ logger.debug('adding %s', uri)
+ #else:
+ # this object is not in the cache. it was either published
+ # recently, or disappared previously. if it disappeared
+ # previously, it has already been alerted. in either case, we
+ # omit the uri from the list since we are interested only in
+ # objects which were valid and are no longer valid
+ else:
+ logger.debug('skipping object ext=%s uri=%s' % (ext, uri))
+
+ elif r_pdu.tag == rpki.left_right.tag_report_error:
+ logging.error('rpkid reported an error: %s', r_pdu.get("error_code"))
+
+
+def notify_invalid():
+ """Send email alerts to the addresses registered in ghostbuster records for
+ any invalid objects that were published by users of this system.
+ """
+
+ logger.info('sending notifications for invalid objects')
+
+ for handle, published_objects in uris.iteritems():
+ missing = []
+ for u in published_objects:
+ ext = os.path.splitext(u)[1]
+ model = model_map[ext]
+ if not model.objects.filter(uri=u).exists():
+ missing.append(u)
+
+ if missing:
+ conf = Conf.objects.get(handle)
+
+ msg = StringIO()
+ msg.write('This is an alert about problems with objects published by '
+ 'the resource handle %s.\n\n' % handle)
+
+ msg.write('The following objects were previously valid, but are '
+ 'now invalid:\n')
+
+ for u in missing:
+ msg.write('\n')
+ msg.write(u)
+ msg.write('\n')
+
+ msg.write("""--
+You are receiving this email because your address is published in a Ghostbuster
+record, or is the default email address for this resource holder account on
+%s.""" % getfqdn())
+
+ from_email = 'root@' + getfqdn()
+ subj = 'invalid RPKI object alert for resource handle %s' % conf.handle
+ conf.send_alert(subj, msg.getvalue(), from_email, severity=Alert.ERROR)
+
+
+def update_cache():
+ """Cache information from the current rcynicdb for display by the gui"""
+
+ start = time.time()
+ fetch_published_objects()
+ process_cache()
+ notify_invalid()
+
+ rpki.gui.app.timestamp.update('rcynic_import')
+
+ stop = time.time()
+ logger.info('elapsed time %d seconds.', (stop - start))
+
+
+if __name__ == '__main__':
+ process_cache()
diff --git a/rpki/gui/models.py b/rpki/gui/models.py
index 184383c0..4d56c18e 100644
--- a/rpki/gui/models.py
+++ b/rpki/gui/models.py
@@ -19,57 +19,72 @@ Common classes for reuse in apps.
__version__ = '$Id$'
from django.db import models
+from django.core.exceptions import ValidationError
import rpki.resource_set
import rpki.POW
-from south.modelsinspector import add_introspection_rules
-class IPv6AddressField(models.Field):
- "Field large enough to hold a 128-bit unsigned integer."
-
- __metaclass__ = models.SubfieldBase
-
- def db_type(self, connection):
- return 'binary(16)'
-
- def to_python(self, value):
- if isinstance(value, rpki.POW.IPAddress):
+class IPAddressField(models.CharField):
+ """
+ Field class for rpki.POW.IPAddress, stored as zero-padded
+ hexadecimal so lexicographic order is identical to numeric order.
+ """
+
+ # Django's CharField type doesn't distinguish between the length
+ # of the human readable form and the length of the storage form,
+ # so we have to leave room for IPv6 punctuation even though we
+ # only store hexadecimal digits and thus will never use the full
+ # width of the database field. Price we pay for portability.
+ #
+ # Documentation on the distinction between the various conversion
+ # methods is fairly opaque, to put it politely, and we have to
+ # handle database engines which sometimes return buffers or other
+ # classes instead of strings, so the conversions are a bit
+ # finicky. If this goes haywire, your best bet is probably to
+ # litter the code with logging.debug() calls and debug by printf.
+
+ def __init__(self, *args, **kwargs):
+ kwargs["max_length"] = 40
+ super(IPAddressField, self).__init__(*args, **kwargs)
+
+ def deconstruct(self):
+ name, path, args, kwargs = super(IPAddressField, self).deconstruct()
+ del kwargs["max_length"]
+ return name, path, args, kwargs
+
+ @staticmethod
+ def _value_to_ipaddress(value):
+ if value is None or isinstance(value, rpki.POW.IPAddress):
return value
- return rpki.POW.IPAddress.fromBytes(value)
-
- def get_db_prep_value(self, value, connection, prepared):
- """
- Note that we add a custom conversion to encode long values as hex
- strings in SQL statements. See settings.get_conv() for details.
-
- """
- return value.toBytes()
+ value = str(value)
+ if ":" in value or "." in value:
+ return rpki.POW.IPAddress(value)
+ else:
+ return rpki.POW.IPAddress.fromBytes(value.decode("hex"))
-
-class IPv4AddressField(models.Field):
- "Wrapper around rpki.POW.IPAddress."
-
- __metaclass__ = models.SubfieldBase
-
- def db_type(self, connection):
- return 'int UNSIGNED'
+ def from_db_value(self, value, expression, connection, context):
+ # Can't use super() here, see Django documentation.
+ return self._value_to_ipaddress(value)
def to_python(self, value):
+ return self._value_to_ipaddress(
+ super(IPAddressField, self).to_python(value))
+
+ @staticmethod
+ def _hex_from_ipaddress(value):
if isinstance(value, rpki.POW.IPAddress):
+ return value.toBytes().encode("hex")
+ else:
return value
- return rpki.POW.IPAddress(value, version=4)
- def get_db_prep_value(self, value, connection, prepared):
- return long(value)
+ def get_prep_value(self, value):
+ return super(IPAddressField, self).get_prep_value(
+ self._hex_from_ipaddress(value))
-add_introspection_rules(
- [
- ([IPv4AddressField, IPv6AddressField], [], {})
- ],
- [r'^rpki\.gui\.models\.IPv4AddressField',
- r'^rpki\.gui\.models\.IPv6AddressField']
-)
+ def get_db_prep_value(self, value, connection, prepared = False):
+ return self._hex_from_ipaddress(
+ super(IPAddressField, self).get_db_prep_value(value, connection, prepared))
class Prefix(models.Model):
@@ -82,6 +97,7 @@ class Prefix(models.Model):
"""
Returns the prefix as a rpki.resource_set.resource_range_ip object.
"""
+
return self.range_cls(self.prefix_min, self.prefix_max)
@property
@@ -96,6 +112,7 @@ class Prefix(models.Model):
def __unicode__(self):
"""This method may be overridden by subclasses. The default
implementation calls get_prefix_display(). """
+
return self.get_prefix_display()
class Meta:
@@ -110,8 +127,8 @@ class PrefixV4(Prefix):
range_cls = rpki.resource_set.resource_range_ipv4
- prefix_min = IPv4AddressField(db_index=True, null=False)
- prefix_max = IPv4AddressField(db_index=True, null=False)
+ prefix_min = IPAddressField(db_index=True, null=False)
+ prefix_max = IPAddressField(db_index=True, null=False)
class Meta(Prefix.Meta):
abstract = True
@@ -122,20 +139,25 @@ class PrefixV6(Prefix):
range_cls = rpki.resource_set.resource_range_ipv6
- prefix_min = IPv6AddressField(db_index=True, null=False)
- prefix_max = IPv6AddressField(db_index=True, null=False)
+ prefix_min = IPAddressField(db_index=True, null=False)
+ prefix_max = IPAddressField(db_index=True, null=False)
class Meta(Prefix.Meta):
abstract = True
+def validate_asn(value):
+ if value < 0 or value > 0xFFFFFFFFL:
+ raise ValidationError('%s is not valid autonomous sequence number' % value)
+
+
class ASN(models.Model):
"""Represents a range of ASNs.
This model is abstract, and is intended to be reused by applications."""
- min = models.PositiveIntegerField(null=False)
- max = models.PositiveIntegerField(null=False)
+ min = models.BigIntegerField(null=False, validators=[validate_asn])
+ max = models.BigIntegerField(null=False, validators=[validate_asn])
class Meta:
abstract = True
diff --git a/rpki/gui/routeview/api.py b/rpki/gui/routeview/api.py
index cf699c9a..b4ff297a 100644
--- a/rpki/gui/routeview/api.py
+++ b/rpki/gui/routeview/api.py
@@ -29,8 +29,8 @@ def route_list(request):
By default, only returns up to 10 matching routes, but the client may
request a different limit with the 'count=' query string parameter.
-
"""
+
hard_limit = 100
if request.method == 'GET' and 'prefix__in' in request.GET:
diff --git a/rpki/gui/routeview/models.py b/rpki/gui/routeview/models.py
index 052860c4..35039136 100644
--- a/rpki/gui/routeview/models.py
+++ b/rpki/gui/routeview/models.py
@@ -1,5 +1,5 @@
# Copyright (C) 2010, 2011 SPARTA, Inc. dba Cobham Analytic Solutions
-# Copyright (C) 2012 SPARTA, Inc. a Parsons Company
+# Copyright (C) 2012, 2016 SPARTA, Inc. a Parsons Company
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -31,7 +31,7 @@ class RouteOrigin(rpki.gui.models.PrefixV4):
@property
def roas(self):
"Return a queryset of ROAs which cover this route."
- return rpki.gui.cacheview.models.ROA.objects.filter(
+ return rpki.gui.gui_rpki_cache.models.ROA.objects.filter(
prefixes__prefix_min__lte=self.prefix_min,
prefixes__prefix_max__gte=self.prefix_max
)
@@ -39,7 +39,7 @@ class RouteOrigin(rpki.gui.models.PrefixV4):
@property
def roa_prefixes(self):
"Return a queryset of ROA prefixes which cover this route."
- return rpki.gui.cacheview.models.ROAPrefixV4.objects.filter(
+ return rpki.gui.gui_rpki_cache.models.ROAPrefixV4.objects.filter(
prefix_min__lte=self.prefix_min,
prefix_max__gte=self.prefix_max
)
@@ -78,4 +78,4 @@ class RouteOriginV6(rpki.gui.models.PrefixV6):
# this goes at the end of the file to avoid problems with circular imports
-import rpki.gui.cacheview.models
+import rpki.gui.gui_rpki_cache.models
diff --git a/rpki/gui/routeview/util.py b/rpki/gui/routeview/util.py
index 1340e9fa..14ac3cf9 100644
--- a/rpki/gui/routeview/util.py
+++ b/rpki/gui/routeview/util.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2012, 2013 SPARTA, Inc. a Parsons Company
+# Copyright (C) 2012, 2013, 2016 SPARTA, Inc. a Parsons Company
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -16,7 +16,6 @@ __version__ = '$Id$'
__all__ = ('import_routeviews_dump')
import itertools
-import _mysql_exceptions
import os.path
import subprocess
import time
@@ -25,12 +24,13 @@ import urlparse
import bz2
from urllib import urlretrieve, unquote
-from django.db import transaction, connection
+from django.db import transaction
from django.conf import settings
from rpki.resource_set import resource_range_ipv4, resource_range_ipv6
from rpki.exceptions import BadIPResource
import rpki.gui.app.timestamp
+from rpki.gui.routeview.models import RouteOrigin
# globals
logger = logging.getLogger(__name__)
@@ -43,28 +43,17 @@ class ParseError(Exception): pass
class RouteDumpParser(object):
"""Base class for parsing various route dump formats."""
- table = 'routeview_routeorigin'
- sql = "INSERT INTO %s_new SET asn=%%s, prefix_min=%%s, prefix_max=%%s" % table
range_class = resource_range_ipv4
def __init__(self, path, *args, **kwargs):
+ transaction.set_autocommit(False)
+
self.path = path
- self.cursor = connection.cursor()
self.last_prefix = None
self.asns = set()
def parse(self):
- try:
- logger.info('Dropping existing staging table...')
- self.cursor.execute('DROP TABLE IF EXISTS %s_new' % self.table)
- except _mysql_exceptions.Warning:
- pass
-
- logger.info('Creating staging table...')
- self.cursor.execute('CREATE TABLE %(table)s_new LIKE %(table)s' % {'table': self.table})
-
- logger.info('Disabling autocommit...')
- self.cursor.execute('SET autocommit=0')
+ RouteOrigin.objects.all().delete()
logger.info('Adding rows to table...')
for line in self.input:
@@ -88,25 +77,13 @@ class RouteDumpParser(object):
self.ins_routes() # process data from last line
- logger.info('Committing...')
- self.cursor.execute('COMMIT')
-
- try:
- logger.info('Dropping old table...')
- self.cursor.execute('DROP TABLE IF EXISTS %s_old' % self.table)
- except _mysql_exceptions.Warning:
- pass
-
- logger.info('Swapping staging table with live table...')
- self.cursor.execute('RENAME TABLE %(table)s TO %(table)s_old, %(table)s_new TO %(table)s' % {'table': self.table})
-
self.cleanup() # allow cleanup function to throw prior to COMMIT
- transaction.commit_unless_managed()
-
logger.info('Updating timestamp metadata...')
rpki.gui.app.timestamp.update('bgp_v4_import')
+ transaction.commit() # not sure if requried, or if transaction.commit() will do it
+
def parse_line(self, row):
"Parse one line of input. Return a (prefix, origin_as) tuple."
return None
@@ -119,9 +96,8 @@ class RouteDumpParser(object):
if self.last_prefix is not None:
try:
rng = self.range_class.parse_str(self.last_prefix)
- rmin = long(rng.min)
- rmax = long(rng.max)
- self.cursor.executemany(self.sql, [(asn, rmin, rmax) for asn in self.asns])
+ for asn in self.asns:
+ RouteOrigin.objects.create(asn=asn, prefix_min=rng.min, prefix_max=rng.max)
except BadIPResource:
logger.warning('skipping bad prefix: ' + self.last_prefix)
self.asns = set() # reset
@@ -151,6 +127,10 @@ class TextDumpParser(RouteDumpParser):
except ValueError:
raise ParseError('bad AS value')
+ # FIXME Django doesn't have a field for positive integers up to 2^32-1
+ if origin_as < 0 or origin_as > 2147483647:
+ raise ParseError('AS value out of supported database range')
+
prefix = cols[1]
# validate the prefix since the "sh ip bgp" output is sometimes
@@ -215,8 +195,8 @@ def import_routeviews_dump(filename=DEFAULT_URL, filetype='text'):
filename [optional]: the full path to the downloaded file to parse
filetype [optional]: 'text' or 'mrt'
-
"""
+
start_time = time.time()
tmpname = None
@@ -229,10 +209,8 @@ def import_routeviews_dump(filename=DEFAULT_URL, filetype='text'):
logger.info("Downloading %s to %s", filename, tmpname)
if os.path.exists(tmpname):
- os.remove(tmpname)
- # filename is replaced with a local filename containing cached copy of
- # URL
- filename, headers = urlretrieve(filename, tmpname)
+ os.remove(tmpname)
+ filename, headers = urlretrieve(filename, tmpname)
try:
dispatch = {'text': TextDumpParser, 'mrt': MrtDumpParser}
diff --git a/rpki/gui/script_util.py b/rpki/gui/script_util.py
index c8248527..289dbbb7 100644
--- a/rpki/gui/script_util.py
+++ b/rpki/gui/script_util.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2013 SPARTA, Inc. a Parsons Company
+# Copyright (C) 2013, 2016 SPARTA, Inc. a Parsons Company
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -16,13 +16,6 @@
This module contains utility functions for use in standalone scripts.
"""
-import django
-
-from django.conf import settings
-
-from rpki import config
-from rpki import autoconf
-
__version__ = '$Id$'
@@ -30,29 +23,11 @@ def setup():
"""
Configure Django enough to use the ORM.
"""
- cfg = config.parser(section='web_portal')
- # INSTALLED_APPS doesn't seem necessary so long as you are only accessing
- # existing tables.
- #
- # Setting charset to latin1 is a disgusting kludge, but without
- # this MySQL 5.6 (and, proably, later) gets tetchy about ASN.1 DER
- # stored in BLOB columns not being well-formed UTF8 (sic). If you
- # know of a better solution, tell us.
- settings.configure(
- DATABASES={
- 'default': {
- 'ENGINE': 'django.db.backends.mysql',
- 'NAME': cfg.get('sql-database'),
- 'USER': cfg.get('sql-username'),
- 'PASSWORD': cfg.get('sql-password'),
- 'OPTIONS': {
- 'charset': 'latin1',
- }
- }
- },
- MIDDLEWARE_CLASSES = (),
- DOWNLOAD_DIRECTORY = cfg.get('download-directory', '/var/tmp'),
- )
- if django.VERSION >= (1, 7):
- from django.apps import apps
- apps.populate(settings.INSTALLED_APPS)
+
+ import os
+
+ os.environ.update(DJANGO_SETTINGS_MODULE = "rpki.django_settings.gui")
+
+ # Initialize Django.
+ import django
+ django.setup()
diff --git a/rpki/gui/urls.py b/rpki/gui/urls.py
index 955092f5..ac1d2916 100644
--- a/rpki/gui/urls.py
+++ b/rpki/gui/urls.py
@@ -1,5 +1,5 @@
# Copyright (C) 2010, 2011 SPARTA, Inc. dba Cobham Analytic Solutions
-# Copyright (C) 2012, 2013 SPARTA, Inc. a Parsons Company
+# Copyright (C) 2012, 2013, 2016 SPARTA, Inc. a Parsons Company
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -28,7 +28,6 @@ urlpatterns = patterns(
#(r'^admin/', include(admin.site.urls)),
(r'^api/', include('rpki.gui.api.urls')),
- (r'^cacheview/', include('rpki.gui.cacheview.urls')),
(r'^rpki/', include('rpki.gui.app.urls')),
(r'^accounts/login/$', 'rpki.gui.views.login'),
diff --git a/rpki/http.py b/rpki/http.py
deleted file mode 100644
index 71239c7f..00000000
--- a/rpki/http.py
+++ /dev/null
@@ -1,1058 +0,0 @@
-# $Id$
-#
-# Copyright (C) 2013--2014 Dragon Research Labs ("DRL")
-# Portions copyright (C) 2009--2012 Internet Systems Consortium ("ISC")
-# Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notices and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND DRL, ISC, AND ARIN DISCLAIM ALL
-# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL,
-# ISC, OR ARIN BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
-# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
-# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-HTTP utilities, both client and server.
-"""
-
-import time
-import socket
-import asyncore
-import asynchat
-import urlparse
-import sys
-import random
-import logging
-import rpki.async
-import rpki.sundial
-import rpki.x509
-import rpki.exceptions
-import rpki.log
-import rpki.POW
-
-logger = logging.getLogger(__name__)
-
-## @var default_content_type
-# HTTP content type used for RPKI messages.
-# Can be overriden on a per-client or per-server basis.
-default_content_type = "application/x-rpki"
-
-## @var want_persistent_client
-# Whether we want persistent HTTP client streams, when server also supports them.
-want_persistent_client = False
-
-## @var want_persistent_server
-# Whether we want persistent HTTP server streams, when client also supports them.
-want_persistent_server = False
-
-## @var default_client_timeout
-# Default HTTP client connection timeout.
-default_client_timeout = rpki.sundial.timedelta(minutes = 5)
-
-## @var default_server_timeout
-# Default HTTP server connection timeouts. Given our druthers, we'd
-# prefer that the client close the connection, as this avoids the
-# problem of client starting to reuse connection just as server closes
-# it, so this should be longer than the client timeout.
-default_server_timeout = rpki.sundial.timedelta(minutes = 10)
-
-## @var default_http_version
-# Preferred HTTP version.
-default_http_version = (1, 0)
-
-## @var default_tcp_port
-# Default port for clients and servers that don't specify one.
-default_tcp_port = 80
-
-## @var enable_ipv6_servers
-# Whether to enable IPv6 listeners. Enabled by default, as it should
-# be harmless. Has no effect if kernel doesn't support IPv6.
-enable_ipv6_servers = True
-
-## @var enable_ipv6_clients
-# Whether to consider IPv6 addresses when making connections.
-# Disabled by default, as IPv6 connectivity is still a bad joke in
-# far too much of the world.
-enable_ipv6_clients = False
-
-## @var have_ipv6
-# Whether the current machine claims to support IPv6. Note that just
-# because the kernel supports it doesn't mean that the machine has
-# usable IPv6 connectivity. I don't know of a simple portable way to
-# probe for connectivity at runtime (the old test of "can you ping
-# SRI-NIC.ARPA?" seems a bit dated...). Don't set this, it's set
-# automatically by probing using the socket() system call at runtime.
-try:
- # pylint: disable=W0702,W0104
- socket.socket(socket.AF_INET6).close()
- socket.IPPROTO_IPV6
- socket.IPV6_V6ONLY
-except:
- have_ipv6 = False
-else:
- have_ipv6 = True
-
-## @var use_adns
-
-# Whether to use rpki.adns code. This is still experimental, so it's
-# not (yet) enabled by default.
-use_adns = False
-try:
- import rpki.adns
-except ImportError:
- pass
-
-def supported_address_families(enable_ipv6):
- """
- IP address families on which servers should listen, and to consider
- when selecting addresses for client connections.
- """
- if enable_ipv6 and have_ipv6:
- return (socket.AF_INET, socket.AF_INET6)
- else:
- return (socket.AF_INET,)
-
-def localhost_addrinfo():
- """
- Return pseudo-getaddrinfo results for localhost.
- """
- result = [(socket.AF_INET, "127.0.0.1")]
- if enable_ipv6_clients and have_ipv6:
- result.append((socket.AF_INET6, "::1"))
- return result
-
-class http_message(object):
- """
- Virtual class representing of one HTTP message.
- """
-
- software_name = "ISC RPKI library"
-
- def __init__(self, version = None, body = None, headers = None):
- self.version = version
- self.body = body
- self.headers = headers
- self.normalize_headers()
-
- def normalize_headers(self, headers = None):
- """
- Clean up (some of) the horrible messes that HTTP allows in its
- headers.
- """
- if headers is None:
- headers = () if self.headers is None else self.headers.items()
- translate_underscore = True
- else:
- translate_underscore = False
- result = {}
- for k, v in headers:
- if translate_underscore:
- k = k.replace("_", "-")
- k = "-".join(s.capitalize() for s in k.split("-"))
- v = v.strip()
- if k in result:
- result[k] += ", " + v
- else:
- result[k] = v
- self.headers = result
-
- @classmethod
- def parse_from_wire(cls, headers):
- """
- Parse and normalize an incoming HTTP message.
- """
- self = cls()
- headers = headers.split("\r\n")
- self.parse_first_line(*headers.pop(0).split(None, 2))
- for i in xrange(len(headers) - 2, -1, -1):
- if headers[i + 1][0].isspace():
- headers[i] += headers[i + 1]
- del headers[i + 1]
- self.normalize_headers([h.split(":", 1) for h in headers])
- return self
-
- def format(self):
- """
- Format an outgoing HTTP message.
- """
- s = self.format_first_line()
- if self.body is not None:
- assert isinstance(self.body, str)
- self.headers["Content-Length"] = len(self.body)
- for kv in self.headers.iteritems():
- s += "%s: %s\r\n" % kv
- s += "\r\n"
- if self.body is not None:
- s += self.body
- return s
-
- def __str__(self):
- return self.format()
-
- def parse_version(self, version):
- """
- Parse HTTP version, raise an exception if we can't.
- """
- if version[:5] != "HTTP/":
- raise rpki.exceptions.HTTPBadVersion("Couldn't parse version %s" % version)
- self.version = tuple(int(i) for i in version[5:].split("."))
-
- @property
- def persistent(self):
- """
- Figure out whether this HTTP message encourages a persistent connection.
- """
- c = self.headers.get("Connection")
- if self.version == (1, 1):
- return c is None or "close" not in c.lower()
- elif self.version == (1, 0):
- return c is not None and "keep-alive" in c.lower()
- else:
- return False
-
-class http_request(http_message):
- """
- HTTP request message.
- """
-
- def __init__(self, cmd = None, path = None, version = default_http_version, body = None, callback = None, errback = None, **headers):
- assert cmd == "POST" or body is None
- http_message.__init__(self, version = version, body = body, headers = headers)
- self.cmd = cmd
- self.path = path
- self.callback = callback
- self.errback = errback
- self.retried = False
-
- def parse_first_line(self, cmd, path, version):
- """
- Parse first line of HTTP request message.
- """
- self.parse_version(version)
- self.cmd = cmd
- self.path = path
-
- def format_first_line(self):
- """
- Format first line of HTTP request message, and set up the
- User-Agent header.
- """
- self.headers.setdefault("User-Agent", self.software_name)
- return "%s %s HTTP/%d.%d\r\n" % (self.cmd, self.path, self.version[0], self.version[1])
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.cmd, self.path)
-
-class http_response(http_message):
- """
- HTTP response message.
- """
-
- def __init__(self, code = None, reason = None, version = default_http_version, body = None, **headers):
- http_message.__init__(self, version = version, body = body, headers = headers)
- self.code = code
- self.reason = reason
-
- def parse_first_line(self, version, code, reason):
- """
- Parse first line of HTTP response message.
- """
- self.parse_version(version)
- self.code = int(code)
- self.reason = reason
-
- def format_first_line(self):
- """
- Format first line of HTTP response message, and set up Date and
- Server headers.
- """
- self.headers.setdefault("Date", time.strftime("%a, %d %b %Y %T GMT"))
- self.headers.setdefault("Server", self.software_name)
- return "HTTP/%d.%d %s %s\r\n" % (self.version[0], self.version[1], self.code, self.reason)
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.code, self.reason)
-
-def addr_to_string(addr):
- """
- Convert socket addr tuple to printable string. Assumes 2-element
- tuple is IPv4, 4-element tuple is IPv6, throws TypeError for
- anything else.
- """
-
- if len(addr) == 2:
- return "%s:%d" % (addr[0], addr[1])
- if len(addr) == 4:
- return "%s.%d" % (addr[0], addr[1])
- raise TypeError
-
-@rpki.log.class_logger(logger)
-class http_stream(asynchat.async_chat):
- """
- Virtual class representing an HTTP message stream.
- """
-
- # Keep pylint happy; @class_logger overwrites this.
- logger = None
-
- def __repr__(self):
- status = ["connected"] if self.connected else []
- try:
- status.append(addr_to_string(self.addr))
- except TypeError:
- pass
- return rpki.log.log_repr(self, *status)
-
- def __init__(self, sock = None):
- self.logger = logging.LoggerAdapter(self.logger, dict(context = self))
- asynchat.async_chat.__init__(self, sock)
- self.buffer = []
- self.timer = rpki.async.timer(self.handle_timeout)
- self.restart()
-
- def restart(self):
- """
- (Re)start HTTP message parser, reset timer.
- """
- assert not self.buffer
- self.chunk_handler = None
- self.set_terminator("\r\n\r\n")
- self.update_timeout()
-
- def update_timeout(self):
- """
- Put this stream's timer in known good state: set it to the
- stream's timeout value if we're doing timeouts, otherwise clear
- it.
- """
- if self.timeout is not None:
- self.logger.debug("Setting timeout %s", self.timeout)
- self.timer.set(self.timeout)
- else:
- self.logger.debug("Clearing timeout")
- self.timer.cancel()
-
- def collect_incoming_data(self, data):
- """
- Buffer incoming data from asynchat.
- """
- self.buffer.append(data)
- self.update_timeout()
-
- def get_buffer(self):
- """
- Consume data buffered from asynchat.
- """
- val = "".join(self.buffer)
- self.buffer = []
- return val
-
- def found_terminator(self):
- """
- Asynchat reported that it found whatever terminator we set, so
- figure out what to do next. This can be messy, because we can be
- in any of several different states:
-
- @li We might be handling chunked HTTP, in which case we have to
- initialize the chunk decoder;
-
- @li We might have found the end of the message body, in which case
- we can (finally) process it; or
-
- @li We might have just gotten to the end of the message headers,
- in which case we have to parse them to figure out which of three
- separate mechanisms (chunked, content-length, TCP close) is going
- to tell us how to find the end of the message body.
- """
- self.update_timeout()
- if self.chunk_handler:
- self.chunk_handler()
- elif not isinstance(self.get_terminator(), str):
- self.handle_body()
- else:
- self.msg = self.parse_type.parse_from_wire(self.get_buffer())
- if self.msg.version == (1, 1) and "chunked" in self.msg.headers.get("Transfer-Encoding", "").lower():
- self.msg.body = []
- self.chunk_handler = self.chunk_header
- self.set_terminator("\r\n")
- elif "Content-Length" in self.msg.headers:
- self.set_terminator(int(self.msg.headers["Content-Length"]))
- else:
- self.handle_no_content_length()
-
- def chunk_header(self):
- """
- Asynchat just handed us what should be the header of one chunk of
- a chunked encoding stream. If this chunk has a body, set the
- stream up to read it; otherwise, this is the last chunk, so start
- the process of exiting the chunk decoder.
- """
- n = int(self.get_buffer().partition(";")[0], 16)
- self.logger.debug("Chunk length %s", n)
- if n:
- self.chunk_handler = self.chunk_body
- self.set_terminator(n)
- else:
- self.msg.body = "".join(self.msg.body)
- self.chunk_handler = self.chunk_discard_trailer
-
- def chunk_body(self):
- """
- Asynchat just handed us what should be the body of a chunk of the
- body of a chunked message (sic). Save it, and prepare to move on
- to the next chunk.
- """
- self.logger.debug("Chunk body")
- self.msg.body += self.buffer
- self.buffer = []
- self.chunk_handler = self.chunk_discard_crlf
- self.set_terminator("\r\n")
-
- def chunk_discard_crlf(self):
- """
- Consume the CRLF that terminates a chunk, reinitialize chunk
- decoder to be ready for the next chunk.
- """
- self.logger.debug("Chunk CRLF")
- s = self.get_buffer()
- assert s == "", "%r: Expected chunk CRLF, got '%s'" % (self, s)
- self.chunk_handler = self.chunk_header
-
- def chunk_discard_trailer(self):
- """
- Consume chunk trailer, which should be empty, then (finally!) exit
- the chunk decoder and hand complete message off to the application.
- """
- self.logger.debug("Chunk trailer")
- s = self.get_buffer()
- assert s == "", "%r: Expected end of chunk trailers, got '%s'" % (self, s)
- self.chunk_handler = None
- self.handle_message()
-
- def handle_body(self):
- """
- Hand normal (not chunked) message off to the application.
- """
- self.msg.body = self.get_buffer()
- self.handle_message()
-
- def handle_error(self):
- """
- Asynchat (or asyncore, or somebody) raised an exception. See
- whether it's one we should just pass along, otherwise log a stack
- trace and close the stream.
- """
- self.timer.cancel()
- etype = sys.exc_info()[0]
- if etype in (SystemExit, rpki.async.ExitNow):
- raise
- if etype is not rpki.exceptions.HTTPClientAborted:
- self.logger.exception("Closing due to error")
- self.close()
-
- def handle_timeout(self):
- """
- Inactivity timer expired, close connection with prejudice.
- """
- self.logger.debug("Timeout, closing")
- self.close()
-
- def handle_close(self):
- """
- Wrapper around asynchat connection close handler, so that we can
- log the event, cancel timer, and so forth.
- """
- self.logger.debug("Close event in HTTP stream handler")
- self.timer.cancel()
- asynchat.async_chat.handle_close(self)
-
-@rpki.log.class_logger(logger)
-class http_server(http_stream):
- """
- HTTP server stream.
- """
-
- ## @var parse_type
- # Stream parser should look for incoming HTTP request messages.
- parse_type = http_request
-
- ## @var timeout
- # Use the default server timeout value set in the module header.
- timeout = default_server_timeout
-
- def __init__(self, sock, handlers):
- self.handlers = handlers
- self.received_content_type = None
- http_stream.__init__(self, sock = sock)
- self.expect_close = not want_persistent_server
- self.logger.debug("Starting")
-
- def handle_no_content_length(self):
- """
- Handle an incoming message that used neither chunking nor a
- Content-Length header (that is: this message will be the last one
- in this server stream). No special action required.
- """
- self.handle_message()
-
- def find_handler(self, path):
- """
- Helper method to search self.handlers.
- """
- for h in self.handlers:
- if path.startswith(h[0]):
- return h[1], h[2] if len(h) > 2 else (default_content_type,)
- return None, None
-
- def handle_message(self):
- """
- HTTP layer managed to deliver a complete HTTP request to
- us, figure out what to do with it. Check the command and
- Content-Type, look for a handler, and if everything looks right,
- pass the message body, path, and a reply callback to the handler.
- """
- self.logger.debug("Received request %r", self.msg)
- if not self.msg.persistent:
- self.expect_close = True
- handler, allowed_content_types = self.find_handler(self.msg.path)
- self.received_content_type = self.msg.headers["Content-Type"]
- error = None
- if self.msg.cmd != "POST":
- error = 501, "No handler for method %s" % self.msg.cmd
- elif self.received_content_type not in allowed_content_types:
- error = 415, "No handler for Content-Type %s" % self.received_content_type
- elif handler is None:
- error = 404, "No handler for URL %s" % self.msg.path
- if error is None:
- try:
- handler(self.msg.body, self.msg.path, self.send_reply)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- self.logger.exception("Unhandled exception while handling HTTP request")
- self.send_error(500, reason = "Unhandled exception %s: %s" % (e.__class__.__name__, e))
- else:
- self.send_error(code = error[0], reason = error[1])
-
- def send_error(self, code, reason):
- """
- Send an error response to this request.
- """
- self.send_message(code = code, reason = reason)
-
- def send_reply(self, code, body = None, reason = "OK"):
- """
- Send a reply to this request.
- """
- self.send_message(code = code, body = body, reason = reason)
-
- def send_message(self, code, reason = "OK", body = None):
- """
- Queue up reply message. If both parties agree that connection is
- persistant, and if no error occurred, restart this stream to
- listen for next message; otherwise, queue up a close event for
- this stream so it will shut down once the reply has been sent.
- """
- self.logger.debug("Sending response %s %s", code, reason)
- if code >= 400:
- self.expect_close = True
- msg = http_response(code = code, reason = reason, body = body,
- Content_Type = self.received_content_type,
- Connection = "Close" if self.expect_close else "Keep-Alive")
- self.push(msg.format())
- if self.expect_close:
- self.logger.debug("Closing")
- self.timer.cancel()
- self.close_when_done()
- else:
- self.logger.debug("Listening for next message")
- self.restart()
-
-@rpki.log.class_logger(logger)
-class http_listener(asyncore.dispatcher):
- """
- Listener for incoming HTTP connections.
- """
-
- def __repr__(self):
- try:
- status = (addr_to_string(self.addr),)
- except TypeError:
- status = ()
- return rpki.log.log_repr(self, *status)
-
- def __init__(self, handlers, addrinfo):
- self.logger = logging.LoggerAdapter(self.logger, dict(context = self))
- asyncore.dispatcher.__init__(self)
- self.handlers = handlers
- try:
- af, socktype, proto, canonname, sockaddr = addrinfo # pylint: disable=W0612
- self.create_socket(af, socktype)
- self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- try:
- self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
- except AttributeError:
- pass
- if have_ipv6 and af == socket.AF_INET6:
- self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
- self.bind(sockaddr)
- self.listen(5)
- except Exception:
- self.logger.exception("Couldn't set up HTTP listener")
- self.close()
- for h in handlers:
- self.logger.debug("Handling %s", h[0])
-
- def handle_accept(self):
- """
- Asyncore says we have an incoming connection, spawn an http_server
- stream for it and pass along all of our handler data.
- """
- try:
- res = self.accept()
- if res is None:
- raise
- sock, addr = res # pylint: disable=W0633
- self.logger.debug("Accepting connection from %s", addr_to_string(addr))
- http_server(sock = sock, handlers = self.handlers)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception:
- self.logger.exception("Unable to accept connection")
-
- def handle_error(self):
- """
- Asyncore signaled an error, pass it along or log it.
- """
- if sys.exc_info()[0] in (SystemExit, rpki.async.ExitNow):
- raise
- self.logger.exception("Error in HTTP listener")
-
-@rpki.log.class_logger(logger)
-class http_client(http_stream):
- """
- HTTP client stream.
- """
-
- ## @var parse_type
- # Stream parser should look for incoming HTTP response messages.
- parse_type = http_response
-
- ## @var timeout
- # Use the default client timeout value set in the module header.
- timeout = default_client_timeout
-
- ## @var state
- # Application layer connection state.
- state = None
-
- def __init__(self, queue, hostport):
- http_stream.__init__(self)
- self.logger.debug("Creating new connection to %s", addr_to_string(hostport))
- self.queue = queue
- self.host = hostport[0]
- self.port = hostport[1]
- self.set_state("opening")
- self.expect_close = not want_persistent_client
-
- def start(self):
- """
- Create socket and request a connection.
- """
- if not use_adns:
- self.logger.debug("Not using ADNS")
- self.gotaddrinfo([(socket.AF_INET, self.host)])
- elif self.host == "localhost":
- self.logger.debug("Bypassing DNS for localhost")
- self.gotaddrinfo(localhost_addrinfo())
- else:
- families = supported_address_families(enable_ipv6_clients)
- self.logger.debug("Starting ADNS lookup for %s in families %r", self.host, families)
- rpki.adns.getaddrinfo(self.gotaddrinfo, self.dns_error, self.host, families)
-
- def dns_error(self, e):
- """
- Handle DNS lookup errors. For now, just whack the connection.
- Undoubtedly we should do something better with diagnostics here.
- """
- self.handle_error()
-
- def gotaddrinfo(self, addrinfo):
- """
- Got address data from DNS, create socket and request connection.
- """
- try:
- self.af, self.address = random.choice(addrinfo)
- self.logger.debug("Connecting to AF %s host %s port %s addr %s", self.af, self.host, self.port, self.address)
- self.create_socket(self.af, socket.SOCK_STREAM)
- self.connect((self.address, self.port))
- if self.addr is None:
- self.addr = (self.host, self.port)
- self.update_timeout()
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception:
- self.handle_error()
-
- def handle_connect(self):
- """
- Asyncore says socket has connected.
- """
- self.logger.debug("Socket connected")
- self.set_state("idle")
- assert self.queue.client is self
- self.queue.send_request()
-
- def set_state(self, state):
- """
- Set HTTP client connection state.
- """
- self.logger.debug("State transition %s => %s", self.state, state)
- self.state = state
-
- def handle_no_content_length(self):
- """
- Handle response message that used neither chunking nor a
- Content-Length header (that is: this message will be the last one
- in this server stream). In this case we want to read until we
- reach the end of the data stream.
- """
- self.set_terminator(None)
-
- def send_request(self, msg):
- """
- Queue up request message and kickstart connection.
- """
- self.logger.debug("Sending request %r", msg)
- assert self.state == "idle", "%r: state should be idle, is %s" % (self, self.state)
- self.set_state("request-sent")
- msg.headers["Connection"] = "Close" if self.expect_close else "Keep-Alive"
- self.push(msg.format())
- self.restart()
-
- def handle_message(self):
- """
- Handle incoming HTTP response message. Make sure we're in a state
- where we expect to see such a message (and allow the mysterious
- empty messages that Apache sends during connection close, no idea
- what that is supposed to be about). If everybody agrees that the
- connection should stay open, put it into an idle state; otherwise,
- arrange for the stream to shut down.
- """
-
- self.logger.debug("Message received, state %s", self.state)
-
- if not self.msg.persistent:
- self.expect_close = True
-
- if self.state != "request-sent":
- if self.state == "closing":
- assert not self.msg.body
- self.logger.debug("Ignoring empty response received while closing")
- return
- raise rpki.exceptions.HTTPUnexpectedState("%r received message while in unexpected state %s" % (self, self.state))
-
- if self.expect_close:
- self.logger.debug("Closing")
- self.set_state("closing")
- self.close_when_done()
- else:
- self.logger.debug("Idling")
- self.set_state("idle")
- self.update_timeout()
-
- if self.msg.code != 200:
- errmsg = "HTTP request failed"
- if self.msg.code is not None:
- errmsg += " with status %s" % self.msg.code
- if self.msg.reason:
- errmsg += ", reason %s" % self.msg.reason
- if self.msg.body:
- errmsg += ", response %s" % self.msg.body
- raise rpki.exceptions.HTTPRequestFailed(errmsg)
- self.queue.return_result(self, self.msg, detach = self.expect_close)
-
- def handle_close(self):
- """
- Asyncore signaled connection close. If we were waiting for that
- to find the end of a response message, process the resulting
- message now; if we were waiting for the response to a request we
- sent, signal the error.
- """
- http_stream.handle_close(self)
- self.logger.debug("State %s", self.state)
- if self.get_terminator() is None:
- self.handle_body()
- elif self.state == "request-sent":
- raise rpki.exceptions.HTTPClientAborted("HTTP request aborted by close event")
- else:
- self.queue.detach(self)
-
- def handle_timeout(self):
- """
- Connection idle timer has expired. Shut down connection in any
- case, noisily if we weren't idle.
- """
- bad = self.state not in ("idle", "closing")
- if bad:
- self.logger.warning("Timeout while in state %s", self.state)
- http_stream.handle_timeout(self)
- if bad:
- try:
- raise rpki.exceptions.HTTPTimeout
- except: # pylint: disable=W0702
- self.handle_error()
- else:
- self.queue.detach(self)
-
- def handle_error(self):
- """
- Asyncore says something threw an exception. Log it, then shut
- down the connection and pass back the exception.
- """
- eclass, edata = sys.exc_info()[0:2]
- self.logger.warning("Error on HTTP client connection %s:%s %s %s", self.host, self.port, eclass, edata)
- http_stream.handle_error(self)
- self.queue.return_result(self, edata, detach = True)
-
-@rpki.log.class_logger(logger)
-class http_queue(object):
- """
- Queue of pending HTTP requests for a single destination. This class
- is very tightly coupled to http_client; http_client handles the HTTP
- stream itself, this class provides a slightly higher-level API.
- """
-
- def __repr__(self):
- return rpki.log.log_repr(self, addr_to_string(self.hostport))
-
- def __init__(self, hostport):
- self.logger = logging.LoggerAdapter(self.logger, dict(context = self))
- self.hostport = hostport
- self.client = None
- self.logger.debug("Created")
- self.queue = []
-
- def request(self, *requests):
- """
- Append http_request object(s) to this queue.
- """
- self.logger.debug("Adding requests %r", requests)
- self.queue.extend(requests)
-
- def restart(self):
- """
- Send next request for this queue, if we can. This may involve
- starting a new http_client stream, reusing an existing idle
- stream, or just ignoring this request if there's an active client
- stream already; in the last case, handling of the response (or
- exception, or timeout) for the query currently in progress will
- call this method when it's time to kick out the next query.
- """
- try:
- if self.client is None:
- self.client = http_client(self, self.hostport)
- self.logger.debug("Attached client %r", self.client)
- self.client.start()
- elif self.client.state == "idle":
- self.logger.debug("Sending request to existing client %r", self.client)
- self.send_request()
- else:
- self.logger.debug("Client %r exists in state %r", self.client, self.client.state)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- self.return_result(self.client, e, detach = True)
-
- def send_request(self):
- """
- Kick out the next query in this queue, if any.
- """
- if self.queue:
- self.client.send_request(self.queue[0])
-
- def detach(self, client_):
- """
- Detatch a client from this queue. Silently ignores attempting to
- detach a client that is not attached to this queue, to simplify
- handling of what otherwise would be a nasty set of race
- conditions.
- """
- if client_ is self.client:
- self.logger.debug("Detaching client %r", client_)
- self.client = None
-
- def return_result(self, client, result, detach = False): # pylint: disable=W0621
- """
- Client stream has returned a result, which we need to pass along
- to the original caller. Result may be either an HTTP response
- message or an exception. In either case, once we're done
- processing this result, kick off next message in the queue, if any.
- """
-
- if client is not self.client:
- self.logger.warning("Wrong client trying to return result. THIS SHOULD NOT HAPPEN. Dropping result %r", result)
- return
-
- if detach:
- self.detach(client)
-
- try:
- req = self.queue.pop(0)
- self.logger.debug("Dequeuing request %r", req)
- except IndexError:
- self.logger.warning("No caller. THIS SHOULD NOT HAPPEN. Dropping result %r", result)
- return
-
- assert isinstance(result, http_response) or isinstance(result, Exception)
-
- if isinstance(result, http_response):
- try:
- self.logger.debug("Returning result %r to caller", result)
- req.callback(result.body)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- result = e
-
- if isinstance(result, Exception):
- try:
- self.logger.warning("Returning exception %r to caller: %s", result, result)
- req.errback(result)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception:
- self.logger.exception("Exception in exception callback, may have lost event chain")
-
- self.logger.debug("Queue: %r", self.queue)
-
- if self.queue:
- self.restart()
-
-## @var client_queues
-# Map of (host, port) tuples to http_queue objects.
-client_queues = {}
-
-def client(msg, url, callback, errback, content_type = default_content_type):
- """
- Open client HTTP connection, send a message, set up callbacks to
- handle response.
- """
-
- u = urlparse.urlparse(url)
-
- if (u.scheme not in ("", "http") or
- u.username is not None or
- u.password is not None or
- u.params != "" or
- u.query != "" or
- u.fragment != ""):
- raise rpki.exceptions.BadClientURL("Unusable URL %s" % url)
-
- logger.debug("Contacting %s", url)
-
- request = http_request(
- cmd = "POST",
- path = u.path,
- body = msg,
- callback = callback,
- errback = errback,
- Host = u.hostname,
- Content_Type = content_type)
-
- hostport = (u.hostname or "localhost", u.port or default_tcp_port)
-
- logger.debug("Created request %r for %s", request, addr_to_string(hostport))
- if hostport not in client_queues:
- client_queues[hostport] = http_queue(hostport)
- client_queues[hostport].request(request)
-
- # Defer connection attempt until after we've had time to process any
- # pending I/O events, in case connections have closed.
-
- logger.debug("Scheduling connection startup for %r", request)
- rpki.async.event_defer(client_queues[hostport].restart)
-
-def server(handlers, port, host = ""):
- """
- Run an HTTP server and wait (forever) for connections.
- """
-
- if not isinstance(handlers, (tuple, list)):
- handlers = (("/", handlers),)
-
- # Yes, this is sick. So is getaddrinfo() returning duplicate
- # records, which RedHat has the gall to claim is a feature.
- ai = []
- for af in supported_address_families(enable_ipv6_servers):
- try:
- if host:
- h = host
- elif have_ipv6 and af == socket.AF_INET6:
- h = "::"
- else:
- h = "0.0.0.0"
- for a in socket.getaddrinfo(h, port, af, socket.SOCK_STREAM):
- if a not in ai:
- ai.append(a)
- except socket.gaierror:
- pass
-
- for a in ai:
- http_listener(addrinfo = a, handlers = handlers)
-
- rpki.async.event_loop()
-
-class caller(object):
- """
- Handle client-side mechanics for protocols based on HTTP, CMS, and
- rpki.xml_utils. Calling sequence is intended to nest within
- rpki.async.sync_wrapper.
- """
-
- debug = False
-
- def __init__(self, proto, client_key, client_cert, server_ta, server_cert, url, debug = None):
- self.proto = proto
- self.client_key = client_key
- self.client_cert = client_cert
- self.server_ta = server_ta
- self.server_cert = server_cert
- self.url = url
- self.cms_timestamp = None
- if debug is not None:
- self.debug = debug
-
- def __call__(self, cb, eb, *pdus):
-
- def done(r_der):
- """
- Handle CMS-wrapped XML response message.
- """
- try:
- r_cms = self.proto.cms_msg(DER = r_der)
- r_msg = r_cms.unwrap((self.server_ta, self.server_cert))
- self.cms_timestamp = r_cms.check_replay(self.cms_timestamp, self.url)
- if self.debug:
- print "<!-- Reply -->"
- print r_cms.pretty_print_content()
- cb(r_msg)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- eb(e)
-
- q_msg = self.proto.msg.query(*pdus)
- q_cms = self.proto.cms_msg()
- q_der = q_cms.wrap(q_msg, self.client_key, self.client_cert)
- if self.debug:
- print "<!-- Query -->"
- print q_cms.pretty_print_content()
-
- client(url = self.url, msg = q_der, callback = done, errback = eb)
diff --git a/rpki/http_simple.py b/rpki/http_simple.py
new file mode 100644
index 00000000..86b2eb5a
--- /dev/null
+++ b/rpki/http_simple.py
@@ -0,0 +1,138 @@
+# $Id$
+#
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+HTTP using Python standard libraries, for RPKI programs that don't
+need the full-blown rpki.http asynchronous code.
+"""
+
+import logging
+import httplib
+import urlparse
+import BaseHTTPServer
+
+logger = logging.getLogger(__name__)
+
+
+default_content_type = "application/x-rpki"
+
+
+class HTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+ """
+ HTTP request handler simple RPKI servers.
+ """
+
+ rpki_handlers = ()
+
+ def do_POST(self):
+ try:
+ content_type = self.headers.get("Content-Type")
+ content_length = self.headers.get("Content-Length")
+ for handler_path, handler, handler_content_type in self.rpki_handlers:
+ if self.path.startswith(handler_path) and content_type in handler_content_type:
+ return handler(self,
+ self.rfile.read()
+ if content_length is None else
+ self.rfile.read(int(content_length)))
+ self.send_error(404, "No handler for path %s" % self.path)
+ except Exception, e:
+ logger.exception("Unhandled exception")
+ self.send_error(501, "Unhandled exception %s" % e)
+
+ def send_cms_response(self, der):
+ self.send_response(200)
+ self.send_header("Content-Type", default_content_type)
+ self.send_header("Content-Length", str(len(der)))
+ self.end_headers()
+ self.wfile.write(der)
+
+ def log_message(self, *args):
+ logger.info(*args, extra = dict(context = "%s:%s" % self.client_address))
+
+ def send_error(self, code, message = None):
+ # BaseHTTPRequestHandler.send_error() generates HTML error messages,
+ # which we don't want, so we override the method to suppress this.
+ self.send_response(code, message)
+ self.send_header("Content-Type", default_content_type)
+ self.send_header("Connection", "close")
+ self.end_headers()
+
+
+def server(handlers, port, host = ""):
+ """
+ Run an HTTP server and wait (forever) for connections.
+ """
+
+ if isinstance(handlers, (tuple, list)):
+ handlers = tuple(h[:3] if len(h) > 2 else (h[0], h[1], default_content_type)
+ for h in handlers)
+ else:
+ handlers = (("/", handlers, default_content_type),)
+
+ class RequestHandler(HTTPRequestHandler):
+ rpki_handlers = handlers
+
+ BaseHTTPServer.HTTPServer((host, port), RequestHandler).serve_forever()
+
+
+class BadURL(Exception):
+ "Bad contact URL"
+
+class RequestFailed(Exception):
+ "HTTP returned failure"
+
+class BadContentType(Exception):
+ "Wrong HTTP Content-Type"
+
+
+def client(proto_cms_msg, client_key, client_cert, server_ta, server_cert, url, q_msg,
+ debug = None, replay_track = None, client_crl = None, content_type = default_content_type):
+ """
+ Issue single a query and return the response, handling all the CMS and XML goo.
+ """
+
+ u = urlparse.urlparse(url)
+
+ if u.scheme not in ("", "http") or u.username or u.password or u.params or u.query or u.fragment:
+ raise BadURL("Unusable URL %s", url)
+
+ q_cms = proto_cms_msg()
+ q_der = q_cms.wrap(q_msg, client_key, client_cert, client_crl)
+
+ if debug is not None:
+ debug.write("<!-- Query -->\n" + q_cms.pretty_print_content() + "\n")
+
+ http = httplib.HTTPConnection(u.hostname, u.port or httplib.HTTP_PORT)
+ http.request("POST", u.path, q_der, {"Content-Type" : content_type})
+ r = http.getresponse()
+
+ if r.status != 200:
+ raise RequestFailed("HTTP request failed with status %r reason %r" % (r.status, r.reason))
+
+ if r.getheader("Content-Type") != content_type:
+ raise BadContentType("HTTP Content-Type %r, expected %r" % (r.getheader("Content-Type"), content_type))
+
+ r_der = r.read()
+ r_cms = proto_cms_msg(DER = r_der)
+ r_msg = r_cms.unwrap((server_ta, server_cert))
+
+ if replay_track is not None:
+ replay_track.cms_timestamp = r_cms.check_replay(replay_track.cms_timestamp, url)
+
+ if debug is not None:
+ debug.write("<!-- Reply -->\n" + r_cms.pretty_print_content() + "\n")
+
+ return r_msg
diff --git a/rpki/ipaddrs.py b/rpki/ipaddrs.py
index 68b2d27d..5117585c 100644
--- a/rpki/ipaddrs.py
+++ b/rpki/ipaddrs.py
@@ -48,90 +48,99 @@ once, here, thus avoiding a lot of duplicate code elsewhere.
import socket, struct
class v4addr(long):
- """
- IPv4 address.
+ """
+ IPv4 address.
- Derived from long, but supports IPv4 print syntax.
- """
+ Derived from long, but supports IPv4 print syntax.
+ """
- bits = 32
- ipversion = 4
+ bits = 32
+ ipversion = 4
- def __new__(cls, x):
- """
- Construct a v4addr object.
- """
- if isinstance(x, unicode):
- x = x.encode("ascii")
- if isinstance(x, str):
- return cls.from_bytes(socket.inet_pton(socket.AF_INET, ".".join(str(int(i)) for i in x.split("."))))
- else:
- return long.__new__(cls, x)
-
- def to_bytes(self):
- """
- Convert a v4addr object to a raw byte string.
- """
- return struct.pack("!I", long(self))
+ def __new__(cls, x):
+ """
+ Construct a v4addr object.
+ """
- @classmethod
- def from_bytes(cls, x):
- """
- Convert from a raw byte string to a v4addr object.
- """
- return cls(struct.unpack("!I", x)[0])
+ if isinstance(x, unicode):
+ x = x.encode("ascii")
+ if isinstance(x, str):
+ return cls.from_bytes(socket.inet_pton(socket.AF_INET, ".".join(str(int(i)) for i in x.split("."))))
+ else:
+ return long.__new__(cls, x)
- def __str__(self):
- """
- Convert a v4addr object to string format.
- """
- return socket.inet_ntop(socket.AF_INET, self.to_bytes())
+ def to_bytes(self):
+ """
+ Convert a v4addr object to a raw byte string.
+ """
-class v6addr(long):
- """
- IPv6 address.
+ return struct.pack("!I", long(self))
- Derived from long, but supports IPv6 print syntax.
- """
+ @classmethod
+ def from_bytes(cls, x):
+ """
+ Convert from a raw byte string to a v4addr object.
+ """
- bits = 128
- ipversion = 6
+ return cls(struct.unpack("!I", x)[0])
- def __new__(cls, x):
- """
- Construct a v6addr object.
- """
- if isinstance(x, unicode):
- x = x.encode("ascii")
- if isinstance(x, str):
- return cls.from_bytes(socket.inet_pton(socket.AF_INET6, x))
- else:
- return long.__new__(cls, x)
-
- def to_bytes(self):
- """
- Convert a v6addr object to a raw byte string.
- """
- return struct.pack("!QQ", long(self) >> 64, long(self) & 0xFFFFFFFFFFFFFFFF)
+ def __str__(self):
+ """
+ Convert a v4addr object to string format.
+ """
- @classmethod
- def from_bytes(cls, x):
+ return socket.inet_ntop(socket.AF_INET, self.to_bytes())
+
+class v6addr(long):
"""
- Convert from a raw byte string to a v6addr object.
+ IPv6 address.
+
+ Derived from long, but supports IPv6 print syntax.
"""
- x = struct.unpack("!QQ", x)
- return cls((x[0] << 64) | x[1])
- def __str__(self):
+ bits = 128
+ ipversion = 6
+
+ def __new__(cls, x):
+ """
+ Construct a v6addr object.
+ """
+
+ if isinstance(x, unicode):
+ x = x.encode("ascii")
+ if isinstance(x, str):
+ return cls.from_bytes(socket.inet_pton(socket.AF_INET6, x))
+ else:
+ return long.__new__(cls, x)
+
+ def to_bytes(self):
+ """
+ Convert a v6addr object to a raw byte string.
+ """
+
+ return struct.pack("!QQ", long(self) >> 64, long(self) & 0xFFFFFFFFFFFFFFFF)
+
+ @classmethod
+ def from_bytes(cls, x):
+ """
+ Convert from a raw byte string to a v6addr object.
+ """
+
+ x = struct.unpack("!QQ", x)
+ return cls((x[0] << 64) | x[1])
+
+ def __str__(self):
+ """
+ Convert a v6addr object to string format.
+ """
+
+ return socket.inet_ntop(socket.AF_INET6, self.to_bytes())
+
+def parse(s):
"""
- Convert a v6addr object to string format.
+ Parse a string as either an IPv4 or IPv6 address, and return object of appropriate class.
"""
- return socket.inet_ntop(socket.AF_INET6, self.to_bytes())
-def parse(s):
- """
- Parse a string as either an IPv4 or IPv6 address, and return object of appropriate class.
- """
- if isinstance(s, unicode):
- s = s.encode("ascii")
- return v6addr(s) if ":" in s else v4addr(s)
+ if isinstance(s, unicode):
+ s = s.encode("ascii")
+ return v6addr(s) if ":" in s else v4addr(s)
diff --git a/rpki/irdb/__init__.py b/rpki/irdb/__init__.py
index 7f3b880e..64b0ea28 100644
--- a/rpki/irdb/__init__.py
+++ b/rpki/irdb/__init__.py
@@ -19,8 +19,5 @@ Django really wants its models packaged up in a "models" module within a
Python package, so humor it.
"""
-# pylint: disable=W0401
-
-from rpki.irdb.models import *
from rpki.irdb.zookeeper import Zookeeper
from rpki.irdb.router import DBContextRouter, database
diff --git a/rpki/irdb/migrations/0001_initial.py b/rpki/irdb/migrations/0001_initial.py
new file mode 100644
index 00000000..f2d34d8b
--- /dev/null
+++ b/rpki/irdb/migrations/0001_initial.py
@@ -0,0 +1,362 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+import rpki.irdb.models
+import rpki.fields
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='BSC',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('certificate', rpki.fields.CertificateField()),
+ ('handle', rpki.irdb.models.HandleField(max_length=120)),
+ ('pkcs10', rpki.fields.PKCS10Field()),
+ ],
+ options={
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='Child',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('certificate', rpki.fields.CertificateField()),
+ ('handle', rpki.irdb.models.HandleField(max_length=120)),
+ ('ta', rpki.fields.CertificateField()),
+ ('valid_until', rpki.fields.SundialField()),
+ ('name', models.TextField(null=True, blank=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='ChildASN',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('start_as', models.BigIntegerField()),
+ ('end_as', models.BigIntegerField()),
+ ('child', models.ForeignKey(related_name='asns', to='irdb.Child')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='ChildNet',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('start_ip', models.CharField(max_length=40)),
+ ('end_ip', models.CharField(max_length=40)),
+ ('version', rpki.fields.EnumField(choices=[(4, b'IPv4'), (6, b'IPv6')])),
+ ('child', models.ForeignKey(related_name='address_ranges', to='irdb.Child')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Client',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('certificate', rpki.fields.CertificateField()),
+ ('handle', rpki.irdb.models.HandleField(max_length=120)),
+ ('ta', rpki.fields.CertificateField()),
+ ('sia_base', models.TextField()),
+ ],
+ ),
+ migrations.CreateModel(
+ name='EECertificateRequest',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('valid_until', rpki.fields.SundialField()),
+ ('pkcs10', rpki.fields.PKCS10Field()),
+ ('gski', models.CharField(max_length=27)),
+ ('cn', models.CharField(max_length=64)),
+ ('sn', models.CharField(max_length=64)),
+ ('eku', models.TextField(null=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='EECertificateRequestASN',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('start_as', models.BigIntegerField()),
+ ('end_as', models.BigIntegerField()),
+ ('ee_certificate_request', models.ForeignKey(related_name='asns', to='irdb.EECertificateRequest')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='EECertificateRequestNet',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('start_ip', models.CharField(max_length=40)),
+ ('end_ip', models.CharField(max_length=40)),
+ ('version', rpki.fields.EnumField(choices=[(4, b'IPv4'), (6, b'IPv6')])),
+ ('ee_certificate_request', models.ForeignKey(related_name='address_ranges', to='irdb.EECertificateRequest')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='GhostbusterRequest',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('vcard', models.TextField()),
+ ],
+ ),
+ migrations.CreateModel(
+ name='HostedCA',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('certificate', rpki.fields.CertificateField()),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Parent',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('certificate', rpki.fields.CertificateField()),
+ ('handle', rpki.irdb.models.HandleField(max_length=120)),
+ ('ta', rpki.fields.CertificateField()),
+ ('service_uri', models.CharField(max_length=255)),
+ ('parent_handle', rpki.irdb.models.HandleField(max_length=120)),
+ ('child_handle', rpki.irdb.models.HandleField(max_length=120)),
+ ('repository_type', rpki.fields.EnumField(choices=[(1, b'none'), (2, b'offer'), (3, b'referral')])),
+ ('referrer', rpki.irdb.models.HandleField(max_length=120, null=True, blank=True)),
+ ('referral_authorization', rpki.irdb.models.SignedReferralField(null=True)),
+ ('asn_resources', models.TextField(blank=True)),
+ ('ipv4_resources', models.TextField(blank=True)),
+ ('ipv6_resources', models.TextField(blank=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Referral',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('certificate', rpki.fields.CertificateField()),
+ ('private_key', rpki.fields.RSAPrivateKeyField()),
+ ],
+ options={
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='Repository',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('certificate', rpki.fields.CertificateField()),
+ ('handle', rpki.irdb.models.HandleField(max_length=120)),
+ ('ta', rpki.fields.CertificateField()),
+ ('client_handle', rpki.irdb.models.HandleField(max_length=120)),
+ ('service_uri', models.CharField(max_length=255)),
+ ('sia_base', models.TextField()),
+ ('rrdp_notification_uri', models.TextField(null=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='ResourceHolderCA',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('certificate', rpki.fields.CertificateField()),
+ ('private_key', rpki.fields.RSAPrivateKeyField()),
+ ('latest_crl', rpki.fields.CRLField()),
+ ('next_serial', models.BigIntegerField(default=1)),
+ ('next_crl_number', models.BigIntegerField(default=1)),
+ ('last_crl_update', rpki.fields.SundialField()),
+ ('next_crl_update', rpki.fields.SundialField()),
+ ('handle', rpki.irdb.models.HandleField(unique=True, max_length=120)),
+ ],
+ options={
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='ResourceHolderRevocation',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('serial', models.BigIntegerField()),
+ ('revoked', rpki.fields.SundialField()),
+ ('expires', rpki.fields.SundialField()),
+ ('issuer', models.ForeignKey(related_name='revocations', to='irdb.ResourceHolderCA')),
+ ],
+ options={
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='ROARequest',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('asn', models.BigIntegerField()),
+ ('issuer', models.ForeignKey(related_name='roa_requests', to='irdb.ResourceHolderCA')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='ROARequestPrefix',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('version', rpki.fields.EnumField(choices=[(4, b'IPv4'), (6, b'IPv6')])),
+ ('prefix', models.CharField(max_length=40)),
+ ('prefixlen', models.PositiveSmallIntegerField()),
+ ('max_prefixlen', models.PositiveSmallIntegerField()),
+ ('roa_request', models.ForeignKey(related_name='prefixes', to='irdb.ROARequest')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='ServerCA',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('certificate', rpki.fields.CertificateField()),
+ ('private_key', rpki.fields.RSAPrivateKeyField()),
+ ('latest_crl', rpki.fields.CRLField()),
+ ('next_serial', models.BigIntegerField(default=1)),
+ ('next_crl_number', models.BigIntegerField(default=1)),
+ ('last_crl_update', rpki.fields.SundialField()),
+ ('next_crl_update', rpki.fields.SundialField()),
+ ],
+ options={
+ 'abstract': False,
+ },
+ ),
+ migrations.CreateModel(
+ name='ServerEE',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('certificate', rpki.fields.CertificateField()),
+ ('private_key', rpki.fields.RSAPrivateKeyField()),
+ ('purpose', rpki.fields.EnumField(choices=[(1, b'rpkid'), (2, b'pubd'), (3, b'irdbd'), (4, b'irbe')])),
+ ('issuer', models.ForeignKey(related_name='ee_certificates', to='irdb.ServerCA')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='ServerRevocation',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('serial', models.BigIntegerField()),
+ ('revoked', rpki.fields.SundialField()),
+ ('expires', rpki.fields.SundialField()),
+ ('issuer', models.ForeignKey(related_name='revocations', to='irdb.ServerCA')),
+ ],
+ options={
+ 'abstract': False,
+ },
+ ),
+ migrations.AddField(
+ model_name='repository',
+ name='issuer',
+ field=models.ForeignKey(related_name='repositories', to='irdb.ResourceHolderCA'),
+ ),
+ migrations.AddField(
+ model_name='repository',
+ name='parent',
+ field=models.OneToOneField(related_name='repository', to='irdb.Parent'),
+ ),
+ migrations.AddField(
+ model_name='referral',
+ name='issuer',
+ field=models.OneToOneField(related_name='referral_certificate', to='irdb.ResourceHolderCA'),
+ ),
+ migrations.AddField(
+ model_name='parent',
+ name='issuer',
+ field=models.ForeignKey(related_name='parents', to='irdb.ResourceHolderCA'),
+ ),
+ migrations.AddField(
+ model_name='hostedca',
+ name='hosted',
+ field=models.OneToOneField(related_name='hosted_by', to='irdb.ResourceHolderCA'),
+ ),
+ migrations.AddField(
+ model_name='hostedca',
+ name='issuer',
+ field=models.ForeignKey(to='irdb.ServerCA'),
+ ),
+ migrations.AddField(
+ model_name='ghostbusterrequest',
+ name='issuer',
+ field=models.ForeignKey(related_name='ghostbuster_requests', to='irdb.ResourceHolderCA'),
+ ),
+ migrations.AddField(
+ model_name='ghostbusterrequest',
+ name='parent',
+ field=models.ForeignKey(related_name='ghostbuster_requests', to='irdb.Parent', null=True),
+ ),
+ migrations.AddField(
+ model_name='eecertificaterequest',
+ name='issuer',
+ field=models.ForeignKey(related_name='ee_certificate_requests', to='irdb.ResourceHolderCA'),
+ ),
+ migrations.AddField(
+ model_name='client',
+ name='issuer',
+ field=models.ForeignKey(related_name='clients', to='irdb.ServerCA'),
+ ),
+ migrations.AddField(
+ model_name='child',
+ name='issuer',
+ field=models.ForeignKey(related_name='children', to='irdb.ResourceHolderCA'),
+ ),
+ migrations.AddField(
+ model_name='bsc',
+ name='issuer',
+ field=models.ForeignKey(related_name='bscs', to='irdb.ResourceHolderCA'),
+ ),
+ migrations.AlterUniqueTogether(
+ name='serverrevocation',
+ unique_together=set([('issuer', 'serial')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='serveree',
+ unique_together=set([('issuer', 'purpose')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='roarequestprefix',
+ unique_together=set([('roa_request', 'version', 'prefix', 'prefixlen', 'max_prefixlen')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='resourceholderrevocation',
+ unique_together=set([('issuer', 'serial')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='repository',
+ unique_together=set([('issuer', 'handle')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='parent',
+ unique_together=set([('issuer', 'handle')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='hostedca',
+ unique_together=set([('issuer', 'hosted')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='eecertificaterequestnet',
+ unique_together=set([('ee_certificate_request', 'start_ip', 'end_ip', 'version')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='eecertificaterequestasn',
+ unique_together=set([('ee_certificate_request', 'start_as', 'end_as')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='eecertificaterequest',
+ unique_together=set([('issuer', 'gski')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='client',
+ unique_together=set([('issuer', 'handle')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='childnet',
+ unique_together=set([('child', 'start_ip', 'end_ip', 'version')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='childasn',
+ unique_together=set([('child', 'start_as', 'end_as')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='child',
+ unique_together=set([('issuer', 'handle')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='bsc',
+ unique_together=set([('issuer', 'handle')]),
+ ),
+ ]
diff --git a/rpki/irdb/migrations/__init__.py b/rpki/irdb/migrations/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rpki/irdb/migrations/__init__.py
diff --git a/rpki/irdb/models.py b/rpki/irdb/models.py
index 6fa48c59..55e3012f 100644
--- a/rpki/irdb/models.py
+++ b/rpki/irdb/models.py
@@ -24,7 +24,7 @@ to be usable by command line programs and other scripts, not just
Django GUI code, so be careful.
"""
-# pylint: disable=W0232,C1001
+# pylint: disable=W5101,W5103
import django.db.models
import rpki.x509
@@ -32,7 +32,8 @@ import rpki.sundial
import rpki.resource_set
import socket
import rpki.POW
-from south.modelsinspector import add_introspection_rules
+
+from rpki.fields import EnumField, SundialField, CertificateField, DERField, RSAPrivateKeyField, CRLField, PKCS10Field
## @var ip_version_choices
# Choice argument for fields implementing IP version numbers.
@@ -61,586 +62,491 @@ ee_certificate_lifetime = rpki.sundial.timedelta(days = 60)
###
-# Field types
+# Field classes
class HandleField(django.db.models.CharField):
- """
- A handle field type.
- """
-
- description = 'A "handle" in one of the RPKI protocols'
-
- def __init__(self, *args, **kwargs):
- kwargs["max_length"] = 120
- django.db.models.CharField.__init__(self, *args, **kwargs)
-
-class EnumField(django.db.models.PositiveSmallIntegerField):
- """
- An enumeration type that uses strings in Python and small integers
- in SQL.
- """
-
- description = "An enumeration type"
-
- __metaclass__ = django.db.models.SubfieldBase
-
- def __init__(self, *args, **kwargs):
- if isinstance(kwargs.get("choices"), (tuple, list)) and isinstance(kwargs["choices"][0], str):
- kwargs["choices"] = tuple(enumerate(kwargs["choices"], 1))
- django.db.models.PositiveSmallIntegerField.__init__(self, *args, **kwargs)
- self.enum_i2s = dict(self.flatchoices)
- self.enum_s2i = dict((v, k) for k, v in self.flatchoices)
-
- def to_python(self, value):
- return self.enum_i2s.get(value, value)
-
- def get_prep_value(self, value):
- return self.enum_s2i.get(value, value)
-
-class SundialField(django.db.models.DateTimeField):
- """
- A field type for our customized datetime objects.
- """
- __metaclass__ = django.db.models.SubfieldBase
-
- description = "A datetime type using our customized datetime objects"
-
- def to_python(self, value):
- if isinstance(value, rpki.sundial.pydatetime.datetime):
- return rpki.sundial.datetime.from_datetime(
- django.db.models.DateTimeField.to_python(self, value))
- else:
- return value
-
- def get_prep_value(self, value):
- if isinstance(value, rpki.sundial.datetime):
- return value.to_datetime()
- else:
- return value
-
-
-class DERField(django.db.models.Field):
- """
- Field types for DER objects.
- """
-
- __metaclass__ = django.db.models.SubfieldBase
-
- def __init__(self, *args, **kwargs):
- kwargs["serialize"] = False
- kwargs["blank"] = True
- kwargs["default"] = None
- django.db.models.Field.__init__(self, *args, **kwargs)
-
- def db_type(self, connection):
- if connection.settings_dict['ENGINE'] == "django.db.backends.posgresql":
- return "bytea"
- else:
- return "BLOB"
-
- def to_python(self, value):
- assert value is None or isinstance(value, (self.rpki_type, str))
- if isinstance(value, str):
- return self.rpki_type(DER = value)
- else:
- return value
-
- def get_prep_value(self, value):
- assert value is None or isinstance(value, (self.rpki_type, str))
- if isinstance(value, self.rpki_type):
- return value.get_DER()
- else:
- return value
-
-class CertificateField(DERField):
- description = "X.509 certificate"
- rpki_type = rpki.x509.X509
-
-class RSAKeyField(DERField):
- description = "RSA keypair"
- rpki_type = rpki.x509.RSA
-
-class CRLField(DERField):
- description = "Certificate Revocation List"
- rpki_type = rpki.x509.CRL
-
-class PKCS10Field(DERField):
- description = "PKCS #10 certificate request"
- rpki_type = rpki.x509.PKCS10
+ """
+ A handle field class. Replace this with SlugField?
+ """
+
+ description = 'A "handle" in one of the RPKI protocols'
+
+ def __init__(self, *args, **kwargs):
+ kwargs["max_length"] = 120
+ django.db.models.CharField.__init__(self, *args, **kwargs)
+
class SignedReferralField(DERField):
- description = "CMS signed object containing XML"
- rpki_type = rpki.x509.SignedReferral
+ description = "CMS signed object containing XML"
+ rpki_type = rpki.x509.SignedReferral
# Custom managers
class CertificateManager(django.db.models.Manager):
- def get_or_certify(self, **kwargs):
- """
- Sort of like .get_or_create(), but for models containing
- certificates which need to be generated based on other fields.
-
- Takes keyword arguments like .get(), checks for existing object.
- If none, creates a new one; if found an existing object but some
- of the non-key fields don't match, updates the existing object.
- Runs certification method for new or updated objects. Returns a
- tuple consisting of the object and a boolean indicating whether
- anything has changed.
- """
+ def get_or_certify(self, **kwargs):
+ """
+ Sort of like .get_or_create(), but for models containing
+ certificates which need to be generated based on other fields.
+
+ Takes keyword arguments like .get(), checks for existing object.
+ If none, creates a new one; if found an existing object but some
+ of the non-key fields don't match, updates the existing object.
+ Runs certification method for new or updated objects. Returns a
+ tuple consisting of the object and a boolean indicating whether
+ anything has changed.
+ """
- changed = False
+ # pylint: disable=E1101
- try:
- obj = self.get(**self._get_or_certify_keys(kwargs))
+ changed = False
- except self.model.DoesNotExist:
- obj = self.model(**kwargs)
- changed = True
+ try:
+ obj = self.get(**self._get_or_certify_keys(kwargs))
- else:
- for k in kwargs:
- if getattr(obj, k) != kwargs[k]:
- setattr(obj, k, kwargs[k])
- changed = True
+ except self.model.DoesNotExist:
+ obj = self.model(**kwargs)
+ changed = True
- if changed:
- obj.avow()
- obj.save()
+ else:
+ for k in kwargs:
+ if getattr(obj, k) != kwargs[k]:
+ setattr(obj, k, kwargs[k])
+ changed = True
- return obj, changed
+ if changed:
+ obj.avow()
+ obj.save()
- def _get_or_certify_keys(self, kwargs):
- assert len(self.model._meta.unique_together) == 1
- return dict((k, kwargs[k]) for k in self.model._meta.unique_together[0])
+ return obj, changed
+
+ def _get_or_certify_keys(self, kwargs):
+ # pylint: disable=E1101,W0212
+ assert len(self.model._meta.unique_together) == 1
+ return dict((k, kwargs[k]) for k in self.model._meta.unique_together[0])
class ResourceHolderCAManager(CertificateManager):
- def _get_or_certify_keys(self, kwargs):
- return { "handle" : kwargs["handle"] }
+ def _get_or_certify_keys(self, kwargs):
+ return { "handle" : kwargs["handle"] }
class ServerCAManager(CertificateManager):
- def _get_or_certify_keys(self, kwargs):
- return { "pk" : 1 }
+ def _get_or_certify_keys(self, kwargs):
+ return { "pk" : 1 }
class ResourceHolderEEManager(CertificateManager):
- def _get_or_certify_keys(self, kwargs):
- return { "issuer" : kwargs["issuer"] }
+ def _get_or_certify_keys(self, kwargs):
+ return { "issuer" : kwargs["issuer"] }
###
class CA(django.db.models.Model):
- certificate = CertificateField()
- private_key = RSAKeyField()
- latest_crl = CRLField()
-
- # Might want to bring these into line with what rpkid does. Current
- # variables here were chosen to map easily to what OpenSSL command
- # line tool was keeping on disk.
-
- next_serial = django.db.models.BigIntegerField(default = 1)
- next_crl_number = django.db.models.BigIntegerField(default = 1)
- last_crl_update = SundialField()
- next_crl_update = SundialField()
-
- class Meta:
- abstract = True
-
- def avow(self):
- if self.private_key is None:
- self.private_key = rpki.x509.RSA.generate(quiet = True)
- now = rpki.sundial.now()
- notAfter = now + ca_certificate_lifetime
- self.certificate = rpki.x509.X509.bpki_self_certify(
- keypair = self.private_key,
- subject_name = self.subject_name,
- serial = self.next_serial,
- now = now,
- notAfter = notAfter)
- self.next_serial += 1
- self.generate_crl()
- return self.certificate
-
- def certify(self, subject_name, subject_key, validity_interval, is_ca, pathLenConstraint = None):
- now = rpki.sundial.now()
- notAfter = now + validity_interval
- result = self.certificate.bpki_certify(
- keypair = self.private_key,
- subject_name = subject_name,
- subject_key = subject_key,
- serial = self.next_serial,
- now = now,
- notAfter = notAfter,
- is_ca = is_ca,
- pathLenConstraint = pathLenConstraint)
- self.next_serial += 1
- return result
-
- def revoke(self, cert):
- Revocation.objects.create(
- issuer = self,
- revoked = rpki.sundial.now(),
- serial = cert.certificate.getSerial(),
- expires = cert.certificate.getNotAfter() + crl_interval)
- cert.delete()
- self.generate_crl()
-
- def generate_crl(self):
- now = rpki.sundial.now()
- self.revocations.filter(expires__lt = now).delete()
- revoked = [(r.serial, r.revoked) for r in self.revocations.all()]
- self.latest_crl = rpki.x509.CRL.generate(
- keypair = self.private_key,
- issuer = self.certificate,
- serial = self.next_crl_number,
- thisUpdate = now,
- nextUpdate = now + crl_interval,
- revokedCertificates = revoked)
- self.last_crl_update = now
- self.next_crl_update = now + crl_interval
- self.next_crl_number += 1
+ certificate = CertificateField()
+ private_key = RSAPrivateKeyField()
+ latest_crl = CRLField()
+
+ # Might want to bring these into line with what rpkid does. Current
+ # variables here were chosen to map easily to what OpenSSL command
+ # line tool was keeping on disk.
+
+ next_serial = django.db.models.BigIntegerField(default = 1)
+ next_crl_number = django.db.models.BigIntegerField(default = 1)
+ last_crl_update = SundialField()
+ next_crl_update = SundialField()
+
+ class Meta:
+ abstract = True
+
+ @property
+ def subject_name(self):
+ raise NotImplementedError
+
+ def avow(self):
+ if self.private_key is None:
+ self.private_key = rpki.x509.RSA.generate(quiet = True)
+ now = rpki.sundial.now()
+ notAfter = now + ca_certificate_lifetime
+ self.certificate = rpki.x509.X509.bpki_self_certify(
+ keypair = self.private_key,
+ subject_name = self.subject_name,
+ serial = self.next_serial,
+ now = now,
+ notAfter = notAfter)
+ self.next_serial += 1
+ self.generate_crl()
+ return self.certificate
+
+ def certify(self, subject_name, subject_key, validity_interval, is_ca, pathLenConstraint = None):
+ now = rpki.sundial.now()
+ notAfter = now + validity_interval
+ result = self.certificate.bpki_certify(
+ keypair = self.private_key,
+ subject_name = subject_name,
+ subject_key = subject_key,
+ serial = self.next_serial,
+ now = now,
+ notAfter = notAfter,
+ is_ca = is_ca,
+ pathLenConstraint = pathLenConstraint)
+ self.next_serial += 1
+ return result
+
+ def revoke(self, cert):
+ Revocation.objects.create(
+ issuer = self,
+ revoked = rpki.sundial.now(),
+ serial = cert.certificate.getSerial(),
+ expires = cert.certificate.getNotAfter() + crl_interval)
+ cert.delete()
+ self.generate_crl()
+
+ def generate_crl(self):
+ now = rpki.sundial.now()
+ self.revocations.filter(expires__lt = now).delete()
+ revoked = [(r.serial, r.revoked) for r in self.revocations.all()]
+ self.latest_crl = rpki.x509.CRL.generate(
+ keypair = self.private_key,
+ issuer = self.certificate,
+ serial = self.next_crl_number,
+ thisUpdate = now,
+ nextUpdate = now + crl_interval,
+ revokedCertificates = revoked)
+ self.last_crl_update = now
+ self.next_crl_update = now + crl_interval
+ self.next_crl_number += 1
class ServerCA(CA):
- objects = ServerCAManager()
+ objects = ServerCAManager()
- def __unicode__(self):
- return ""
+ def __unicode__(self):
+ return ""
- @property
- def subject_name(self):
- if self.certificate is not None:
- return self.certificate.getSubject()
- else:
- return rpki.x509.X501DN.from_cn("%s BPKI server CA" % socket.gethostname())
+ @property
+ def subject_name(self):
+ if self.certificate is not None:
+ return self.certificate.getSubject()
+ else:
+ return rpki.x509.X501DN.from_cn("%s BPKI server CA" % socket.gethostname())
class ResourceHolderCA(CA):
- handle = HandleField(unique = True)
- objects = ResourceHolderCAManager()
+ handle = HandleField(unique = True)
+ objects = ResourceHolderCAManager()
- def __unicode__(self):
- return self.handle
+ def __unicode__(self):
+ return self.handle
- @property
- def subject_name(self):
- if self.certificate is not None:
- return self.certificate.getSubject()
- else:
- return rpki.x509.X501DN.from_cn("%s BPKI resource CA" % self.handle)
+ @property
+ def subject_name(self):
+ if self.certificate is not None:
+ return self.certificate.getSubject()
+ else:
+ return rpki.x509.X501DN.from_cn("%s BPKI resource CA" % self.handle)
class Certificate(django.db.models.Model):
- certificate = CertificateField()
- objects = CertificateManager()
+ certificate = CertificateField()
+ objects = CertificateManager()
- class Meta:
- abstract = True
- unique_together = ("issuer", "handle")
+ class Meta:
+ abstract = True
+ unique_together = ("issuer", "handle")
- def revoke(self):
- self.issuer.revoke(self)
+ def revoke(self):
+ self.issuer.revoke(self) # pylint: disable=E1101
class CrossCertification(Certificate):
- handle = HandleField()
- ta = CertificateField()
+ handle = HandleField()
+ ta = CertificateField() # pylint: disable=C0103
- class Meta:
- abstract = True
+ class Meta:
+ abstract = True
- def avow(self):
- self.certificate = self.issuer.certify(
- subject_name = self.ta.getSubject(),
- subject_key = self.ta.getPublicKey(),
- validity_interval = ee_certificate_lifetime,
- is_ca = True,
- pathLenConstraint = 0)
+ def avow(self):
+ # pylint: disable=E1101
+ self.certificate = self.issuer.certify(
+ subject_name = self.ta.getSubject(),
+ subject_key = self.ta.getPublicKey(),
+ validity_interval = ee_certificate_lifetime,
+ is_ca = True,
+ pathLenConstraint = 0)
- def __unicode__(self):
- return self.handle
+ def __unicode__(self):
+ return self.handle
class HostedCA(Certificate):
- issuer = django.db.models.ForeignKey(ServerCA)
- hosted = django.db.models.OneToOneField(ResourceHolderCA, related_name = "hosted_by")
+ issuer = django.db.models.ForeignKey(ServerCA)
+ hosted = django.db.models.OneToOneField(ResourceHolderCA, related_name = "hosted_by")
- def avow(self):
- self.certificate = self.issuer.certify(
- subject_name = self.hosted.certificate.getSubject(),
- subject_key = self.hosted.certificate.getPublicKey(),
- validity_interval = ee_certificate_lifetime,
- is_ca = True,
- pathLenConstraint = 1)
+ def avow(self):
+ self.certificate = self.issuer.certify(
+ subject_name = self.hosted.certificate.getSubject(),
+ subject_key = self.hosted.certificate.getPublicKey(),
+ validity_interval = ee_certificate_lifetime,
+ is_ca = True,
+ pathLenConstraint = 1)
- class Meta:
- unique_together = ("issuer", "hosted")
+ class Meta:
+ unique_together = ("issuer", "hosted")
- def __unicode__(self):
- return self.hosted.handle
+ def __unicode__(self):
+ return self.hosted.handle
class Revocation(django.db.models.Model):
- serial = django.db.models.BigIntegerField()
- revoked = SundialField()
- expires = SundialField()
+ serial = django.db.models.BigIntegerField()
+ revoked = SundialField()
+ expires = SundialField()
- class Meta:
- abstract = True
- unique_together = ("issuer", "serial")
+ class Meta:
+ abstract = True
+ unique_together = ("issuer", "serial")
class ServerRevocation(Revocation):
- issuer = django.db.models.ForeignKey(ServerCA, related_name = "revocations")
+ issuer = django.db.models.ForeignKey(ServerCA, related_name = "revocations")
class ResourceHolderRevocation(Revocation):
- issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "revocations")
+ issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "revocations")
class EECertificate(Certificate):
- private_key = RSAKeyField()
+ private_key = RSAPrivateKeyField()
- class Meta:
- abstract = True
+ class Meta:
+ abstract = True
- def avow(self):
- if self.private_key is None:
- self.private_key = rpki.x509.RSA.generate(quiet = True)
- self.certificate = self.issuer.certify(
- subject_name = self.subject_name,
- subject_key = self.private_key.get_public(),
- validity_interval = ee_certificate_lifetime,
- is_ca = False)
+ def avow(self):
+ # pylint: disable=E1101
+ if self.private_key is None:
+ self.private_key = rpki.x509.RSA.generate(quiet = True)
+ self.certificate = self.issuer.certify(
+ subject_name = self.subject_name,
+ subject_key = self.private_key.get_public(),
+ validity_interval = ee_certificate_lifetime,
+ is_ca = False)
class ServerEE(EECertificate):
- issuer = django.db.models.ForeignKey(ServerCA, related_name = "ee_certificates")
- purpose = EnumField(choices = ("rpkid", "pubd", "irdbd", "irbe"))
+ issuer = django.db.models.ForeignKey(ServerCA, related_name = "ee_certificates")
+ purpose = EnumField(choices = ("rpkid", "pubd", "irdbd", "irbe"))
- class Meta:
- unique_together = ("issuer", "purpose")
+ class Meta:
+ unique_together = ("issuer", "purpose")
- @property
- def subject_name(self):
- return rpki.x509.X501DN.from_cn("%s BPKI %s EE" % (socket.gethostname(),
- self.get_purpose_display()))
+ @property
+ def subject_name(self):
+ return rpki.x509.X501DN.from_cn("%s BPKI %s EE" % (socket.gethostname(),
+ self.get_purpose_display()))
class Referral(EECertificate):
- issuer = django.db.models.OneToOneField(ResourceHolderCA, related_name = "referral_certificate")
- objects = ResourceHolderEEManager()
-
- @property
- def subject_name(self):
- return rpki.x509.X501DN.from_cn("%s BPKI Referral EE" % self.issuer.handle)
+ issuer = django.db.models.OneToOneField(ResourceHolderCA, related_name = "referral_certificate")
+ objects = ResourceHolderEEManager()
-class Turtle(django.db.models.Model):
- service_uri = django.db.models.CharField(max_length = 255)
-
-class Rootd(EECertificate, Turtle):
- issuer = django.db.models.OneToOneField(ResourceHolderCA, related_name = "rootd")
- objects = ResourceHolderEEManager()
-
- @property
- def subject_name(self):
- return rpki.x509.X501DN.from_cn("%s BPKI rootd EE" % self.issuer.handle)
+ @property
+ def subject_name(self):
+ return rpki.x509.X501DN.from_cn("%s BPKI Referral EE" % self.issuer.handle)
class BSC(Certificate):
- issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "bscs")
- handle = HandleField()
- pkcs10 = PKCS10Field()
+ issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "bscs")
+ handle = HandleField()
+ pkcs10 = PKCS10Field()
- def avow(self):
- self.certificate = self.issuer.certify(
- subject_name = self.pkcs10.getSubject(),
- subject_key = self.pkcs10.getPublicKey(),
- validity_interval = ee_certificate_lifetime,
- is_ca = False)
+ def avow(self):
+ # pylint: disable=E1101
+ self.certificate = self.issuer.certify(
+ subject_name = self.pkcs10.getSubject(),
+ subject_key = self.pkcs10.getPublicKey(),
+ validity_interval = ee_certificate_lifetime,
+ is_ca = False)
- def __unicode__(self):
- return self.handle
+ def __unicode__(self):
+ return self.handle
class ResourceSet(django.db.models.Model):
- valid_until = SundialField()
+ valid_until = SundialField()
+
+ class Meta:
+ abstract = True
- class Meta:
- abstract = True
+ def _select_resource_bag(self):
+ return (), ()
- @property
- def resource_bag(self):
- raw_asn, raw_net = self._select_resource_bag()
- asns = rpki.resource_set.resource_set_as.from_django(
- (a.start_as, a.end_as) for a in raw_asn)
- ipv4 = rpki.resource_set.resource_set_ipv4.from_django(
- (a.start_ip, a.end_ip) for a in raw_net if a.version == "IPv4")
- ipv6 = rpki.resource_set.resource_set_ipv6.from_django(
- (a.start_ip, a.end_ip) for a in raw_net if a.version == "IPv6")
- return rpki.resource_set.resource_bag(
- valid_until = self.valid_until, asn = asns, v4 = ipv4, v6 = ipv6)
+ @property
+ def resource_bag(self):
+ raw_asn, raw_net = self._select_resource_bag()
+ asns = rpki.resource_set.resource_set_as.from_django(
+ (a.start_as, a.end_as) for a in raw_asn)
+ ipv4 = rpki.resource_set.resource_set_ipv4.from_django(
+ (a.start_ip, a.end_ip) for a in raw_net if a.version == "IPv4")
+ ipv6 = rpki.resource_set.resource_set_ipv6.from_django(
+ (a.start_ip, a.end_ip) for a in raw_net if a.version == "IPv6")
+ return rpki.resource_set.resource_bag(
+ valid_until = self.valid_until, asn = asns, v4 = ipv4, v6 = ipv6)
- # Writing of .setter method deferred until something needs it.
+ # Writing of .setter method deferred until something needs it.
class ResourceSetASN(django.db.models.Model):
- start_as = django.db.models.BigIntegerField()
- end_as = django.db.models.BigIntegerField()
+ start_as = django.db.models.BigIntegerField()
+ end_as = django.db.models.BigIntegerField()
- class Meta:
- abstract = True
+ class Meta:
+ abstract = True
- def as_resource_range(self):
- return rpki.resource_set.resource_range_as(self.start_as, self.end_as)
+ def as_resource_range(self):
+ return rpki.resource_set.resource_range_as(self.start_as, self.end_as)
class ResourceSetNet(django.db.models.Model):
- start_ip = django.db.models.CharField(max_length = 40)
- end_ip = django.db.models.CharField(max_length = 40)
- version = EnumField(choices = ip_version_choices)
+ start_ip = django.db.models.CharField(max_length = 40)
+ end_ip = django.db.models.CharField(max_length = 40)
+ version = EnumField(choices = ip_version_choices)
- class Meta:
- abstract = True
+ class Meta:
+ abstract = True
- def as_resource_range(self):
- return rpki.resource_set.resource_range_ip.from_strings(self.start_ip, self.end_ip)
+ def as_resource_range(self):
+ return rpki.resource_set.resource_range_ip.from_strings(self.start_ip, self.end_ip)
class Child(CrossCertification, ResourceSet):
- issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "children")
- name = django.db.models.TextField(null = True, blank = True)
-
- def _select_resource_bag(self):
- child_asn = rpki.irdb.ChildASN.objects.raw("""
- SELECT *
- FROM irdb_childasn
- WHERE child_id = %s
- """, [self.id])
- child_net = list(rpki.irdb.ChildNet.objects.raw("""
- SELECT *
- FROM irdb_childnet
- WHERE child_id = %s
- """, [self.id]))
- return child_asn, child_net
-
- class Meta:
- unique_together = ("issuer", "handle")
+ issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "children")
+ name = django.db.models.TextField(null = True, blank = True)
+
+ def _select_resource_bag(self):
+ child_asn = rpki.irdb.models.ChildASN.objects.raw("""
+ SELECT *
+ FROM irdb_childasn
+ WHERE child_id = %s
+ """, [self.id])
+ child_net = list(rpki.irdb.models.ChildNet.objects.raw("""
+ SELECT *
+ FROM irdb_childnet
+ WHERE child_id = %s
+ """, [self.id]))
+ return child_asn, child_net
+
+ class Meta:
+ unique_together = ("issuer", "handle")
class ChildASN(ResourceSetASN):
- child = django.db.models.ForeignKey(Child, related_name = "asns")
+ child = django.db.models.ForeignKey(Child, related_name = "asns")
- class Meta:
- unique_together = ("child", "start_as", "end_as")
+ class Meta:
+ unique_together = ("child", "start_as", "end_as")
class ChildNet(ResourceSetNet):
- child = django.db.models.ForeignKey(Child, related_name = "address_ranges")
-
- class Meta:
- unique_together = ("child", "start_ip", "end_ip", "version")
-
-class Parent(CrossCertification, Turtle):
- issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "parents")
- parent_handle = HandleField()
- child_handle = HandleField()
- repository_type = EnumField(choices = ("none", "offer", "referral"))
- referrer = HandleField(null = True, blank = True)
- referral_authorization = SignedReferralField(null = True, blank = True)
-
- # This shouldn't be necessary
- class Meta:
- unique_together = ("issuer", "handle")
+ child = django.db.models.ForeignKey(Child, related_name = "address_ranges")
+
+ class Meta:
+ unique_together = ("child", "start_ip", "end_ip", "version")
+
+class Parent(CrossCertification):
+ issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "parents")
+ service_uri = django.db.models.CharField(max_length = 255)
+ parent_handle = HandleField()
+ child_handle = HandleField()
+ repository_type = EnumField(choices = ("none", "offer", "referral"))
+ referrer = HandleField(null = True, blank = True)
+ referral_authorization = SignedReferralField(null = True, blank = True)
+ asn_resources = django.db.models.TextField(blank = True) # root only
+ ipv4_resources = django.db.models.TextField(blank = True) # root only
+ ipv6_resources = django.db.models.TextField(blank = True) # root only
+
+ # This shouldn't be necessary
+ class Meta:
+ unique_together = ("issuer", "handle")
class ROARequest(django.db.models.Model):
- issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "roa_requests")
- asn = django.db.models.BigIntegerField()
-
- @property
- def roa_prefix_bag(self):
- prefixes = list(rpki.irdb.ROARequestPrefix.objects.raw("""
- SELECT *
- FROM irdb_roarequestprefix
- WHERE roa_request_id = %s
- """, [self.id]))
- v4 = rpki.resource_set.roa_prefix_set_ipv4.from_django(
- (p.prefix, p.prefixlen, p.max_prefixlen) for p in prefixes if p.version == "IPv4")
- v6 = rpki.resource_set.roa_prefix_set_ipv6.from_django(
- (p.prefix, p.prefixlen, p.max_prefixlen) for p in prefixes if p.version == "IPv6")
- return rpki.resource_set.roa_prefix_bag(v4 = v4, v6 = v6)
-
- # Writing of .setter method deferred until something needs it.
+ issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "roa_requests")
+ asn = django.db.models.BigIntegerField()
+
+ @property
+ def roa_prefix_bag(self):
+ prefixes = list(rpki.irdb.models.ROARequestPrefix.objects.raw("""
+ SELECT *
+ FROM irdb_roarequestprefix
+ WHERE roa_request_id = %s
+ """, [self.id]))
+ v4 = rpki.resource_set.roa_prefix_set_ipv4.from_django(
+ (p.prefix, p.prefixlen, p.max_prefixlen) for p in prefixes if p.version == "IPv4")
+ v6 = rpki.resource_set.roa_prefix_set_ipv6.from_django(
+ (p.prefix, p.prefixlen, p.max_prefixlen) for p in prefixes if p.version == "IPv6")
+ return rpki.resource_set.roa_prefix_bag(v4 = v4, v6 = v6)
+
+ # Writing of .setter method deferred until something needs it.
class ROARequestPrefix(django.db.models.Model):
- roa_request = django.db.models.ForeignKey(ROARequest, related_name = "prefixes")
- version = EnumField(choices = ip_version_choices)
- prefix = django.db.models.CharField(max_length = 40)
- prefixlen = django.db.models.PositiveSmallIntegerField()
- max_prefixlen = django.db.models.PositiveSmallIntegerField()
-
- def as_roa_prefix(self):
- if self.version == 'IPv4':
- return rpki.resource_set.roa_prefix_ipv4(rpki.POW.IPAddress(self.prefix), self.prefixlen, self.max_prefixlen)
- else:
- return rpki.resource_set.roa_prefix_ipv6(rpki.POW.IPAddress(self.prefix), self.prefixlen, self.max_prefixlen)
-
- def as_resource_range(self):
- return self.as_roa_prefix().to_resource_range()
-
- class Meta:
- unique_together = ("roa_request", "version", "prefix", "prefixlen", "max_prefixlen")
+ roa_request = django.db.models.ForeignKey(ROARequest, related_name = "prefixes")
+ version = EnumField(choices = ip_version_choices)
+ prefix = django.db.models.CharField(max_length = 40)
+ prefixlen = django.db.models.PositiveSmallIntegerField()
+ max_prefixlen = django.db.models.PositiveSmallIntegerField()
+
+ def as_roa_prefix(self):
+ if self.version == 'IPv4':
+ return rpki.resource_set.roa_prefix_ipv4(rpki.POW.IPAddress(self.prefix),
+ self.prefixlen, self.max_prefixlen)
+ else:
+ return rpki.resource_set.roa_prefix_ipv6(rpki.POW.IPAddress(self.prefix),
+ self.prefixlen, self.max_prefixlen)
+
+ def as_resource_range(self):
+ return self.as_roa_prefix().to_resource_range()
+
+ class Meta:
+ unique_together = ("roa_request", "version", "prefix", "prefixlen", "max_prefixlen")
class GhostbusterRequest(django.db.models.Model):
- issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "ghostbuster_requests")
- parent = django.db.models.ForeignKey(Parent, related_name = "ghostbuster_requests", null = True)
- vcard = django.db.models.TextField()
+ issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "ghostbuster_requests")
+ parent = django.db.models.ForeignKey(Parent, related_name = "ghostbuster_requests", null = True)
+ vcard = django.db.models.TextField()
class EECertificateRequest(ResourceSet):
- issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "ee_certificate_requests")
- pkcs10 = PKCS10Field()
- gski = django.db.models.CharField(max_length = 27)
- cn = django.db.models.CharField(max_length = 64)
- sn = django.db.models.CharField(max_length = 64)
- eku = django.db.models.TextField(null = True)
-
- def _select_resource_bag(self):
- ee_asn = rpki.irdb.EECertificateRequestASN.objects.raw("""
- SELECT *
- FROM irdb_eecertificaterequestasn
- WHERE ee_certificate_request_id = %s
- """, [self.id])
- ee_net = rpki.irdb.EECertificateRequestNet.objects.raw("""
- SELECT *
- FROM irdb_eecertificaterequestnet
- WHERE ee_certificate_request_id = %s
- """, [self.id])
- return ee_asn, ee_net
-
- class Meta:
- unique_together = ("issuer", "gski")
+ issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "ee_certificate_requests")
+ pkcs10 = PKCS10Field()
+ gski = django.db.models.CharField(max_length = 27)
+ cn = django.db.models.CharField(max_length = 64) # pylint: disable=C0103
+ sn = django.db.models.CharField(max_length = 64) # pylint: disable=C0103
+ eku = django.db.models.TextField(null = True)
+
+ def _select_resource_bag(self):
+ ee_asn = rpki.irdb.models.EECertificateRequestASN.objects.raw("""
+ SELECT *
+ FROM irdb_eecertificaterequestasn
+ WHERE ee_certificate_request_id = %s
+ """, [self.id])
+ ee_net = rpki.irdb.models.EECertificateRequestNet.objects.raw("""
+ SELECT *
+ FROM irdb_eecertificaterequestnet
+ WHERE ee_certificate_request_id = %s
+ """, [self.id])
+ return ee_asn, ee_net
+
+ class Meta:
+ unique_together = ("issuer", "gski")
class EECertificateRequestASN(ResourceSetASN):
- ee_certificate_request = django.db.models.ForeignKey(EECertificateRequest, related_name = "asns")
+ ee_certificate_request = django.db.models.ForeignKey(EECertificateRequest, related_name = "asns")
- class Meta:
- unique_together = ("ee_certificate_request", "start_as", "end_as")
+ class Meta:
+ unique_together = ("ee_certificate_request", "start_as", "end_as")
class EECertificateRequestNet(ResourceSetNet):
- ee_certificate_request = django.db.models.ForeignKey(EECertificateRequest, related_name = "address_ranges")
+ ee_certificate_request = django.db.models.ForeignKey(EECertificateRequest, related_name = "address_ranges")
- class Meta:
- unique_together = ("ee_certificate_request", "start_ip", "end_ip", "version")
+ class Meta:
+ unique_together = ("ee_certificate_request", "start_ip", "end_ip", "version")
class Repository(CrossCertification):
- issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "repositories")
- client_handle = HandleField()
- service_uri = django.db.models.CharField(max_length = 255)
- sia_base = django.db.models.TextField()
- turtle = django.db.models.OneToOneField(Turtle, related_name = "repository")
+ issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "repositories")
+ client_handle = HandleField()
+ service_uri = django.db.models.CharField(max_length = 255)
+ sia_base = django.db.models.TextField()
+ rrdp_notification_uri = django.db.models.TextField(null = True)
+ parent = django.db.models.OneToOneField(Parent, related_name = "repository")
- # This shouldn't be necessary
- class Meta:
- unique_together = ("issuer", "handle")
+ # This shouldn't be necessary
+ class Meta:
+ unique_together = ("issuer", "handle")
class Client(CrossCertification):
- issuer = django.db.models.ForeignKey(ServerCA, related_name = "clients")
- sia_base = django.db.models.TextField()
- parent_handle = HandleField()
-
- # This shouldn't be necessary
- class Meta:
- unique_together = ("issuer", "handle")
-
-# for Django South -- these are just simple subclasses
-add_introspection_rules([],
- (r'^rpki\.irdb\.models\.CertificateField',
- r'^rpki\.irdb\.models\.CRLField',
- r'^rpki\.irdb\.models\.EnumField',
- r'^rpki\.irdb\.models\.HandleField',
- r'^rpki\.irdb\.models\.RSAKeyField',
- r'^rpki\.irdb\.models\.SignedReferralField',
- r'^rpki\.irdb\.models\.SundialField'))
+ issuer = django.db.models.ForeignKey(ServerCA, related_name = "clients")
+ sia_base = django.db.models.TextField()
+
+ # This shouldn't be necessary
+ class Meta:
+ unique_together = ("issuer", "handle")
diff --git a/rpki/irdb/router.py b/rpki/irdb/router.py
index 97e3d0b7..a2ba81c7 100644
--- a/rpki/irdb/router.py
+++ b/rpki/irdb/router.py
@@ -26,70 +26,73 @@ passing database names everywhere. Using a database router
accomplishes this.
"""
+# pylint: disable=W0212
+
class DBContextRouter(object):
- """
- A Django database router for use with multiple IRDBs.
-
- This router is designed to work in conjunction with the
- rpki.irdb.database context handler (q.v.).
- """
-
- _app = "irdb"
-
- _database = None
-
- def db_for_read(self, model, **hints):
- if model._meta.app_label == self._app:
- return self._database
- else:
- return None
-
- def db_for_write(self, model, **hints):
- if model._meta.app_label == self._app:
- return self._database
- else:
- return None
-
- def allow_relation(self, obj1, obj2, **hints):
- if self._database is None:
- return None
- elif obj1._meta.app_label == self._app and obj2._meta.app_label == self._app:
- return True
- else:
- return None
-
- def allow_syncdb(self, db, model):
- if db == self._database and model._meta.app_label == self._app:
- return True
- else:
- return None
+ """
+ A Django database router for use with multiple IRDBs.
+
+ This router is designed to work in conjunction with the
+ rpki.irdb.database context handler (q.v.).
+ """
+
+ _app = "irdb"
+
+ _database = None
+
+ def db_for_read(self, model, **hints):
+ if model._meta.app_label == self._app:
+ return self._database
+ else:
+ return None
+
+ def db_for_write(self, model, **hints):
+ if model._meta.app_label == self._app:
+ return self._database
+ else:
+ return None
+
+ def allow_relation(self, obj1, obj2, **hints):
+ if self._database is None:
+ return None
+ elif obj1._meta.app_label == self._app and obj2._meta.app_label == self._app:
+ return True
+ else:
+ return None
+
+ def allow_migrate(self, db, model):
+ if db == self._database and model._meta.app_label == self._app:
+ return True
+ else:
+ return None
class database(object):
- """
- Context manager for use with DBContextRouter. Use thusly:
-
- with rpki.irdb.database("blarg"):
- do_stuff()
-
- This binds IRDB operations to database blarg for the duration of
- the call to do_stuff(), then restores the prior state.
- """
-
- def __init__(self, name, on_entry = None, on_exit = None):
- if not isinstance(name, str):
- raise ValueError("database name must be a string, not %r" % name)
- self.name = name
- self.on_entry = on_entry
- self.on_exit = on_exit
-
- def __enter__(self):
- if self.on_entry is not None:
- self.on_entry()
- self.former = DBContextRouter._database
- DBContextRouter._database = self.name
-
- def __exit__(self, _type, value, traceback):
- assert DBContextRouter._database is self.name
- DBContextRouter._database = self.former
- if self.on_exit is not None:
- self.on_exit()
+ """
+ Context manager for use with DBContextRouter. Use thusly:
+
+ with rpki.irdb.database("blarg"):
+ do_stuff()
+
+ This binds IRDB operations to database blarg for the duration of
+ the call to do_stuff(), then restores the prior state.
+ """
+
+ def __init__(self, name, on_entry = None, on_exit = None):
+ if not isinstance(name, str):
+ raise ValueError("database name must be a string, not %r" % name)
+ self.name = name
+ self.on_entry = on_entry
+ self.on_exit = on_exit
+ self.former = None
+
+ def __enter__(self):
+ if self.on_entry is not None:
+ self.on_entry()
+ self.former = DBContextRouter._database
+ DBContextRouter._database = self.name
+
+ def __exit__(self, _type, value, traceback):
+ assert DBContextRouter._database is self.name
+ DBContextRouter._database = self.former
+ if self.on_exit is not None:
+ self.on_exit()
diff --git a/rpki/irdb/zookeeper.py b/rpki/irdb/zookeeper.py
index c7038889..a30ef7a8 100644
--- a/rpki/irdb/zookeeper.py
+++ b/rpki/irdb/zookeeper.py
@@ -20,21 +20,20 @@
Management code for the IRDB.
"""
-# pylint: disable=W0612,C0325
-
import os
import copy
-import types
+
import rpki.config
import rpki.sundial
import rpki.oids
-import rpki.http
+import rpki.http_simple
import rpki.resource_set
import rpki.relaxng
import rpki.left_right
import rpki.x509
-import rpki.async
import rpki.irdb
+import rpki.publication_control
+
import django.db.transaction
from lxml.etree import (Element, SubElement, ElementTree,
@@ -42,12 +41,11 @@ from lxml.etree import (Element, SubElement, ElementTree,
from rpki.csv_utils import csv_reader
-# XML namespace and protocol version for OOB setup protocol. The name
-# is historical and may change before we propose this as the basis for
-# a standard.
+# XML namespace and protocol version for OOB setup protocol.
-myrpki_xmlns = rpki.relaxng.myrpki.xmlns
-myrpki_version = rpki.relaxng.myrpki.version
+oob_xmlns = rpki.relaxng.oob_setup.xmlns
+oob_nsmap = rpki.relaxng.oob_setup.nsmap
+oob_version = rpki.relaxng.oob_setup.version
# XML namespace and protocol version for router certificate requests.
# We probably ought to be pulling this sort of thing from the schema,
@@ -56,13 +54,32 @@ myrpki_version = rpki.relaxng.myrpki.version
# I'm ready to rewrite the rpki.relaxng code.
routercert_xmlns = rpki.relaxng.router_certificate.xmlns
+routercert_nsmap = rpki.relaxng.router_certificate.nsmap
routercert_version = rpki.relaxng.router_certificate.version
+# XML tags for elements in the above
+
+tag_oob_authorization = oob_xmlns + "authorization"
+tag_oob_child_bpki_ta = oob_xmlns + "child_bpki_ta"
+tag_oob_child_request = oob_xmlns + "child_request"
+tag_oob_error = oob_xmlns + "error"
+tag_oob_offer = oob_xmlns + "offer"
+tag_oob_parent_bpki_ta = oob_xmlns + "parent_bpki_ta"
+tag_oob_parent_response = oob_xmlns + "parent_response"
+tag_oob_publisher_bpki_ta = oob_xmlns + "publisher_bpki_ta"
+tag_oob_publisher_request = oob_xmlns + "publisher_request"
+tag_oob_referral = oob_xmlns + "referral"
+tag_oob_repository_bpki_ta = oob_xmlns + "repository_bpki_ta"
+tag_oob_repository_response = oob_xmlns + "repository_response"
+
+tag_router_certificate_request = routercert_xmlns + "router_certificate_request"
+
+# Configuration file section names
+
myrpki_section = "myrpki"
irdbd_section = "irdbd"
rpkid_section = "rpkid"
pubd_section = "pubd"
-rootd_section = "rootd"
# A whole lot of exceptions
@@ -71,1606 +88,1603 @@ class MissingHandle(Exception): "Missing handle."
class CouldntTalkToDaemon(Exception): "Couldn't talk to daemon."
class BadXMLMessage(Exception): "Bad XML message."
class PastExpiration(Exception): "Expiration date has already passed."
-class CantRunRootd(Exception): "Can't run rootd."
+class CouldntFindRepoParent(Exception): "Couldn't find repository's parent."
def B64Element(e, tag, obj, **kwargs):
- """
- Create an XML element containing Base64 encoded data taken from a
- DER object.
- """
-
- if e is None:
- se = Element(tag, **kwargs)
- else:
- se = SubElement(e, tag, **kwargs)
- if e is not None and e.text is None:
- e.text = "\n"
- se.text = "\n" + obj.get_Base64()
- se.tail = "\n"
- return se
-
-class PEM_writer(object):
- """
- Write PEM files to disk, keeping track of which ones we've already
- written and setting the file mode appropriately.
-
- Comparing the old file with what we're about to write serves no real
- purpose except to calm users who find repeated messages about
- writing the same file confusing.
- """
-
- def __init__(self, logstream = None):
- self.wrote = set()
- self.logstream = logstream
-
- def __call__(self, filename, obj, compare = True):
- filename = os.path.realpath(filename)
- if filename in self.wrote:
- return
- tempname = filename
- pem = obj.get_PEM()
- if not filename.startswith("/dev/"):
- try:
- if compare and pem == open(filename, "r").read():
- return
- except: # pylint: disable=W0702
- pass
- tempname += ".%s.tmp" % os.getpid()
- mode = 0400 if filename.endswith(".key") else 0444
- if self.logstream is not None:
- self.logstream.write("Writing %s\n" % filename)
- f = os.fdopen(os.open(tempname, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, mode), "w")
- f.write(pem)
- f.close()
- if tempname != filename:
- os.rename(tempname, filename)
- self.wrote.add(filename)
-
-
-def etree_read(filename):
- """
- Read an etree from a file, verifying then stripping XML namespace
- cruft.
- """
-
- e = ElementTree(file = filename).getroot()
- rpki.relaxng.myrpki.assertValid(e)
- for i in e.getiterator():
- if i.tag.startswith(myrpki_xmlns):
- i.tag = i.tag[len(myrpki_xmlns):]
- else:
- raise BadXMLMessage("XML tag %r is not in namespace %r" % (i.tag, myrpki_xmlns[1:-1]))
- return e
-
-
-class etree_wrapper(object):
- """
- Wrapper for ETree objects so we can return them as function results
- without requiring the caller to understand much about them.
-
- """
-
- def __init__(self, e, msg = None, debug = False):
- self.msg = msg
- e = copy.deepcopy(e)
- e.set("version", myrpki_version)
- for i in e.getiterator():
- if i.tag[0] != "{":
- i.tag = myrpki_xmlns + i.tag
- assert i.tag.startswith(myrpki_xmlns)
- if debug:
- print ElementToString(e)
- rpki.relaxng.myrpki.assertValid(e)
- self.etree = e
-
- def __str__(self):
- return ElementToString(self.etree)
-
- def save(self, filename, logstream = None):
- filename = os.path.realpath(filename)
- tempname = filename
- if not filename.startswith("/dev/"):
- tempname += ".%s.tmp" % os.getpid()
- ElementTree(self.etree).write(tempname)
- if tempname != filename:
- os.rename(tempname, filename)
- if logstream is not None:
- logstream.write("Wrote %s\n" % filename)
- if self.msg is not None:
- logstream.write(self.msg + "\n")
-
- @property
- def file(self):
- from cStringIO import StringIO
- return StringIO(ElementToString(self.etree))
-
-
-class Zookeeper(object):
-
- ## @var show_xml
- # Whether to show XML for debugging
-
- show_xml = False
-
- def __init__(self, cfg = None, handle = None, logstream = None, disable_signal_handlers = False):
-
- if cfg is None:
- cfg = rpki.config.parser()
-
- if handle is None:
- handle = cfg.get("handle", section = myrpki_section)
-
- self.cfg = cfg
-
- self.logstream = logstream
- self.disable_signal_handlers = disable_signal_handlers
-
- self.run_rpkid = cfg.getboolean("run_rpkid", section = myrpki_section)
- self.run_pubd = cfg.getboolean("run_pubd", section = myrpki_section)
- self.run_rootd = cfg.getboolean("run_rootd", section = myrpki_section)
-
- if self.run_rootd and (not self.run_pubd or not self.run_rpkid):
- raise CantRunRootd("Can't run rootd unless also running rpkid and pubd")
-
- self.default_repository = cfg.get("default_repository", "", section = myrpki_section)
- self.pubd_contact_info = cfg.get("pubd_contact_info", "", section = myrpki_section)
-
- self.rsync_module = cfg.get("publication_rsync_module", section = myrpki_section)
- self.rsync_server = cfg.get("publication_rsync_server", section = myrpki_section)
-
- self.reset_identity(handle)
-
-
- def reset_identity(self, handle):
"""
- Select handle of current resource holding entity.
+ Create an XML element containing Base64 encoded data taken from a
+ DER object.
"""
- if handle is None:
- raise MissingHandle
- self.handle = handle
-
-
- def set_logstream(self, logstream):
- """
- Set log stream for this Zookeeper. The log stream is a file-like
- object, or None to suppress all logging.
- """
-
- self.logstream = logstream
-
-
- def log(self, msg):
- """
- Send some text to this Zookeeper's log stream, if one is set.
- """
-
- if self.logstream is not None:
- self.logstream.write(msg)
- self.logstream.write("\n")
-
-
- @property
- def resource_ca(self):
- """
- Get ResourceHolderCA object associated with current handle.
- """
-
- if self.handle is None:
- raise HandleNotSet
- return rpki.irdb.ResourceHolderCA.objects.get(handle = self.handle)
-
-
- @property
- def server_ca(self):
- """
- Get ServerCA object.
- """
-
- return rpki.irdb.ServerCA.objects.get()
-
-
- @django.db.transaction.commit_on_success
- def initialize_server_bpki(self):
- """
- Initialize server BPKI portion of an RPKI installation. Reads the
- configuration file and generates the initial BPKI server
- certificates needed to start daemons.
- """
-
- if self.run_rpkid or self.run_pubd:
- server_ca, created = rpki.irdb.ServerCA.objects.get_or_certify()
- rpki.irdb.ServerEE.objects.get_or_certify(issuer = server_ca, purpose = "irbe")
-
- if self.run_rpkid:
- rpki.irdb.ServerEE.objects.get_or_certify(issuer = server_ca, purpose = "rpkid")
- rpki.irdb.ServerEE.objects.get_or_certify(issuer = server_ca, purpose = "irdbd")
-
- if self.run_pubd:
- rpki.irdb.ServerEE.objects.get_or_certify(issuer = server_ca, purpose = "pubd")
-
-
- @django.db.transaction.commit_on_success
- def initialize_resource_bpki(self):
- """
- Initialize the resource-holding BPKI for an RPKI installation.
- Returns XML describing the resource holder.
-
- This method is present primarily for backwards compatibility with
- the old combined initialize() method which initialized both the
- server BPKI and the default resource-holding BPKI in a single
- method call. In the long run we want to replace this with
- something that takes a handle as argument and creates the
- resource-holding BPKI idenity if needed.
- """
-
- resource_ca, created = rpki.irdb.ResourceHolderCA.objects.get_or_certify(handle = self.handle)
- return self.generate_identity()
-
-
- def initialize(self):
- """
- Backwards compatibility wrapper: calls initialize_server_bpki()
- and initialize_resource_bpki(), returns latter's result.
- """
-
- self.initialize_server_bpki()
- return self.initialize_resource_bpki()
-
-
- def generate_identity(self):
- """
- Generate identity XML. Broken out of .initialize() because it's
- easier for the GUI this way.
- """
-
- e = Element("identity", handle = self.handle)
- B64Element(e, "bpki_ta", self.resource_ca.certificate)
- return etree_wrapper(e, msg = 'This is the "identity" file you will need to send to your parent')
-
-
- @django.db.transaction.commit_on_success
- def delete_self(self):
- """
- Delete the ResourceHolderCA object corresponding to the current handle.
- This corresponds to deleting an rpkid <self/> object.
+ if e is None:
+ se = Element(tag, **kwargs)
+ else:
+ se = SubElement(e, tag, **kwargs)
+ if e is not None and e.text is None:
+ e.text = "\n"
+ se.text = "\n" + obj.get_Base64()
+ se.tail = "\n"
+ return se
- This code assumes the normal Django cascade-on-delete behavior,
- that is, we assume that deleting the ResourceHolderCA object
- deletes all the subordinate objects that refer to it via foreign
- key relationships.
+class PEM_writer(object):
"""
-
- resource_ca = self.resource_ca
- if resource_ca is not None:
- resource_ca.delete()
+ Write PEM files to disk, keeping track of which ones we've already
+ written and setting the file mode appropriately.
+
+ Comparing the old file with what we're about to write serves no real
+ purpose except to calm users who find repeated messages about
+ writing the same file confusing.
+ """
+
+ def __init__(self, logstream = None):
+ self.wrote = set()
+ self.logstream = logstream
+
+ def __call__(self, filename, obj, compare = True):
+ filename = os.path.realpath(filename)
+ if filename in self.wrote:
+ return
+ tempname = filename
+ pem = obj.get_PEM()
+ if not filename.startswith("/dev/"):
+ try:
+ if compare and pem == open(filename, "r").read():
+ return
+ except:
+ pass
+ tempname += ".%s.tmp" % os.getpid()
+ mode = 0400 if filename.endswith(".key") else 0444
+ if self.logstream is not None:
+ self.logstream.write("Writing %s\n" % filename)
+ f = os.fdopen(os.open(tempname, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, mode), "w")
+ f.write(pem)
+ f.close()
+ if tempname != filename:
+ os.rename(tempname, filename)
+ self.wrote.add(filename)
+
+
+def etree_read(xml_file, schema = rpki.relaxng.oob_setup):
+ """
+ Read an etree from a file-like object, verifying it against a schema.
+
+ As a convenience, we also accept an etree_wrapper object in place
+ of a filename, in which case we deepcopy the etree directly from
+ the etree_wrapper and there's no need for a file.
+ """
+
+ if isinstance(xml_file, etree_wrapper):
+ e = copy.deepcopy(xml_file.etree)
else:
- self.log("No such ResourceHolderCA \"%s\"" % self.handle)
-
-
- @django.db.transaction.commit_on_success
- def configure_rootd(self):
+ e = ElementTree(file = xml_file).getroot()
+ schema.assertValid(e)
+ return e
- assert self.run_rpkid and self.run_pubd and self.run_rootd
- rpki.irdb.Rootd.objects.get_or_certify(
- issuer = self.resource_ca,
- service_uri = "http://localhost:%s/" % self.cfg.get("rootd_server_port", section = myrpki_section))
-
- return self.generate_rootd_repository_offer()
-
-
- def generate_rootd_repository_offer(self):
+class etree_wrapper(object):
"""
- Generate repository offer for rootd. Split out of
- configure_rootd() because that's easier for the GUI.
+ Wrapper for ETree objects so we can return them as function results
+ without requiring the caller to understand much about them.
"""
- # The following assumes we'll set up the respository manually.
- # Not sure this is a reasonable assumption, particularly if we
- # ever fix rootd to use the publication protocol.
+ def __init__(self, e, msg = None, debug = False, schema = rpki.relaxng.oob_setup):
+ self.msg = msg
+ e = copy.deepcopy(e)
+ if debug:
+ print ElementToString(e)
+ schema.assertValid(e)
+ self.etree = e
- try:
- self.resource_ca.repositories.get(handle = self.handle)
- return None
+ def __str__(self):
+ return ElementToString(self.etree)
- except rpki.irdb.Repository.DoesNotExist:
- e = Element("repository", type = "offer", handle = self.handle, parent_handle = self.handle)
- B64Element(e, "bpki_client_ta", self.resource_ca.certificate)
- return etree_wrapper(e, msg = 'This is the "repository offer" file for you to use if you want to publish in your own repository')
+ def save(self, filename, logstream = None):
+ filename = os.path.realpath(filename)
+ tempname = filename
+ if not filename.startswith("/dev/"):
+ tempname += ".%s.tmp" % os.getpid()
+ ElementTree(self.etree).write(tempname)
+ if tempname != filename:
+ os.rename(tempname, filename)
+ if logstream is not None:
+ logstream.write("Wrote %s\n" % filename)
+ if self.msg is not None:
+ logstream.write(self.msg + "\n")
+ @property
+ def file(self):
+ from cStringIO import StringIO
+ return StringIO(ElementToString(self.etree))
- def write_bpki_files(self):
- """
- Write out BPKI certificate, key, and CRL files for daemons that
- need them.
- """
-
- writer = PEM_writer(self.logstream)
-
- if self.run_rpkid:
- rpkid = self.server_ca.ee_certificates.get(purpose = "rpkid")
- writer(self.cfg.get("bpki-ta", section = rpkid_section), self.server_ca.certificate)
- writer(self.cfg.get("rpkid-key", section = rpkid_section), rpkid.private_key)
- writer(self.cfg.get("rpkid-cert", section = rpkid_section), rpkid.certificate)
- writer(self.cfg.get("irdb-cert", section = rpkid_section),
- self.server_ca.ee_certificates.get(purpose = "irdbd").certificate)
- writer(self.cfg.get("irbe-cert", section = rpkid_section),
- self.server_ca.ee_certificates.get(purpose = "irbe").certificate)
-
- if self.run_pubd:
- pubd = self.server_ca.ee_certificates.get(purpose = "pubd")
- writer(self.cfg.get("bpki-ta", section = pubd_section), self.server_ca.certificate)
- writer(self.cfg.get("pubd-key", section = pubd_section), pubd.private_key)
- writer(self.cfg.get("pubd-cert", section = pubd_section), pubd.certificate)
- writer(self.cfg.get("irbe-cert", section = pubd_section),
- self.server_ca.ee_certificates.get(purpose = "irbe").certificate)
-
- if self.run_rootd:
- try:
- rootd = rpki.irdb.ResourceHolderCA.objects.get(handle = self.handle).rootd
- writer(self.cfg.get("bpki-ta", section = rootd_section), self.server_ca.certificate)
- writer(self.cfg.get("rootd-bpki-crl", section = rootd_section), self.server_ca.latest_crl)
- writer(self.cfg.get("rootd-bpki-key", section = rootd_section), rootd.private_key)
- writer(self.cfg.get("rootd-bpki-cert", section = rootd_section), rootd.certificate)
- writer(self.cfg.get("child-bpki-cert", section = rootd_section), rootd.issuer.certificate)
- except rpki.irdb.ResourceHolderCA.DoesNotExist:
- self.log("rootd enabled but resource holding entity not yet configured, skipping rootd setup")
- except rpki.irdb.Rootd.DoesNotExist:
- self.log("rootd enabled but not yet configured, skipping rootd setup")
-
-
- @django.db.transaction.commit_on_success
- def update_bpki(self):
- """
- Update BPKI certificates. Assumes an existing RPKI installation.
- Basic plan here is to reissue all BPKI certificates we can, right
- now. In the long run we might want to be more clever about only
- touching ones that need maintenance, but this will do for a start.
-
- We also reissue CRLs for all CAs.
+class Zookeeper(object):
- Most likely this should be run under cron.
- """
+ ## @var show_xml
+ # If not None, a file-like object to which to prettyprint XML, for debugging.
- for model in (rpki.irdb.ServerCA,
- rpki.irdb.ResourceHolderCA,
- rpki.irdb.ServerEE,
- rpki.irdb.Referral,
- rpki.irdb.Rootd,
- rpki.irdb.HostedCA,
- rpki.irdb.BSC,
- rpki.irdb.Child,
- rpki.irdb.Parent,
- rpki.irdb.Client,
- rpki.irdb.Repository):
- for obj in model.objects.all():
- self.log("Regenerating BPKI certificate %s" % obj.certificate.getSubject())
- obj.avow()
- obj.save()
-
- self.log("Regenerating Server BPKI CRL")
- self.server_ca.generate_crl()
- self.server_ca.save()
-
- for ca in rpki.irdb.ResourceHolderCA.objects.all():
- self.log("Regenerating BPKI CRL for Resource Holder %s" % ca.handle)
- ca.generate_crl()
- ca.save()
-
-
- @django.db.transaction.commit_on_success
- def synchronize_bpki(self):
- """
- Synchronize BPKI updates. This is separate from .update_bpki()
- because this requires rpkid to be running and none of the other
- BPKI update stuff does; there may be circumstances under which it
- makes sense to do the rest of the BPKI update and allow this to
- fail with a warning.
- """
+ show_xml = None
- if self.run_rpkid:
- updates = []
-
- updates.extend(
- rpki.left_right.self_elt.make_pdu(
- action = "set",
- tag = "%s__self" % ca.handle,
- self_handle = ca.handle,
- bpki_cert = ca.certificate)
- for ca in rpki.irdb.ResourceHolderCA.objects.all())
-
- updates.extend(
- rpki.left_right.bsc_elt.make_pdu(
- action = "set",
- tag = "%s__bsc__%s" % (bsc.issuer.handle, bsc.handle),
- self_handle = bsc.issuer.handle,
- bsc_handle = bsc.handle,
- signing_cert = bsc.certificate,
- signing_cert_crl = bsc.issuer.latest_crl)
- for bsc in rpki.irdb.BSC.objects.all())
-
- updates.extend(
- rpki.left_right.repository_elt.make_pdu(
- action = "set",
- tag = "%s__repository__%s" % (repository.issuer.handle, repository.handle),
- self_handle = repository.issuer.handle,
- repository_handle = repository.handle,
- bpki_cert = repository.certificate)
- for repository in rpki.irdb.Repository.objects.all())
-
- updates.extend(
- rpki.left_right.parent_elt.make_pdu(
- action = "set",
- tag = "%s__parent__%s" % (parent.issuer.handle, parent.handle),
- self_handle = parent.issuer.handle,
- parent_handle = parent.handle,
- bpki_cms_cert = parent.certificate)
- for parent in rpki.irdb.Parent.objects.all())
-
- updates.extend(
- rpki.left_right.parent_elt.make_pdu(
- action = "set",
- tag = "%s__rootd" % rootd.issuer.handle,
- self_handle = rootd.issuer.handle,
- parent_handle = rootd.issuer.handle,
- bpki_cms_cert = rootd.certificate)
- for rootd in rpki.irdb.Rootd.objects.all())
-
- updates.extend(
- rpki.left_right.child_elt.make_pdu(
- action = "set",
- tag = "%s__child__%s" % (child.issuer.handle, child.handle),
- self_handle = child.issuer.handle,
- child_handle = child.handle,
- bpki_cert = child.certificate)
- for child in rpki.irdb.Child.objects.all())
-
- if updates:
- self.check_error_report(self.call_rpkid(updates))
-
- if self.run_pubd:
- updates = []
-
- updates.append(
- rpki.publication.config_elt.make_pdu(
- action = "set",
- bpki_crl = self.server_ca.latest_crl))
-
- updates.extend(
- rpki.publication.client_elt.make_pdu(
- action = "set",
- client_handle = client.handle,
- bpki_cert = client.certificate)
- for client in self.server_ca.clients.all())
-
- if updates:
- self.check_error_report(self.call_pubd(updates))
-
-
- @django.db.transaction.commit_on_success
- def configure_child(self, filename, child_handle = None, valid_until = None):
- """
- Configure a new child of this RPKI entity, given the child's XML
- identity file as an input. Extracts the child's data from the
- XML, cross-certifies the child's resource-holding BPKI
- certificate, and generates an XML file describing the relationship
- between the child and this parent, including this parent's BPKI
- data and up-down protocol service URI.
- """
+ def __init__(self, cfg = None, handle = None, logstream = None, disable_signal_handlers = False):
- c = etree_read(filename)
+ if cfg is None:
+ cfg = rpki.config.parser()
- if child_handle is None:
- child_handle = c.get("handle")
+ if handle is None:
+ handle = cfg.get("handle", section = myrpki_section)
- if valid_until is None:
- valid_until = rpki.sundial.now() + rpki.sundial.timedelta(days = 365)
- else:
- valid_until = rpki.sundial.datetime.fromXMLtime(valid_until)
- if valid_until < rpki.sundial.now():
- raise PastExpiration("Specified new expiration time %s has passed" % valid_until)
+ self.cfg = cfg
- self.log("Child calls itself %r, we call it %r" % (c.get("handle"), child_handle))
+ self.logstream = logstream
+ self.disable_signal_handlers = disable_signal_handlers
- child, created = rpki.irdb.Child.objects.get_or_certify(
- issuer = self.resource_ca,
- handle = child_handle,
- ta = rpki.x509.X509(Base64 = c.findtext("bpki_ta")),
- valid_until = valid_until)
+ self.run_rpkid = cfg.getboolean("run_rpkid", section = myrpki_section)
+ self.run_pubd = cfg.getboolean("run_pubd", section = myrpki_section)
- return self.generate_parental_response(child), child_handle
+ self.default_repository = cfg.get("default_repository", "", section = myrpki_section)
+ self.pubd_contact_info = cfg.get("pubd_contact_info", "", section = myrpki_section)
+ self.rsync_module = cfg.get("publication_rsync_module", section = myrpki_section)
+ self.rsync_server = cfg.get("publication_rsync_server", section = myrpki_section)
- @django.db.transaction.commit_on_success
- def generate_parental_response(self, child):
- """
- Generate parental response XML. Broken out of .configure_child()
- for GUI.
- """
+ self.reset_identity(handle)
- service_uri = "http://%s:%s/up-down/%s/%s" % (
- self.cfg.get("rpkid_server_host", section = myrpki_section),
- self.cfg.get("rpkid_server_port", section = myrpki_section),
- self.handle, child.handle)
- e = Element("parent", parent_handle = self.handle, child_handle = child.handle,
- service_uri = service_uri, valid_until = str(child.valid_until))
- B64Element(e, "bpki_resource_ta", self.resource_ca.certificate)
- B64Element(e, "bpki_child_ta", child.ta)
+ def reset_identity(self, handle):
+ """
+ Select handle of current resource holding entity.
+ """
- try:
- if self.default_repository:
- repo = self.resource_ca.repositories.get(handle = self.default_repository)
- else:
- repo = self.resource_ca.repositories.get()
- except rpki.irdb.Repository.DoesNotExist:
- repo = None
+ if handle is None:
+ raise MissingHandle
+ self.handle = handle
- if repo is None:
- self.log("Couldn't find any usable repositories, not giving referral")
- elif repo.handle == self.handle:
- SubElement(e, "repository", type = "offer")
+ def set_logstream(self, logstream):
+ """
+ Set log stream for this Zookeeper. The log stream is a file-like
+ object, or None to suppress all logging.
+ """
- else:
- proposed_sia_base = repo.sia_base + child.handle + "/"
- referral_cert, created = rpki.irdb.Referral.objects.get_or_certify(issuer = self.resource_ca)
- auth = rpki.x509.SignedReferral()
- auth.set_content(B64Element(None, myrpki_xmlns + "referral", child.ta,
- version = myrpki_version,
- authorized_sia_base = proposed_sia_base))
- auth.schema_check()
- auth.sign(referral_cert.private_key, referral_cert.certificate, self.resource_ca.latest_crl)
+ self.logstream = logstream
- r = SubElement(e, "repository", type = "referral")
- B64Element(r, "authorization", auth, referrer = repo.client_handle)
- SubElement(r, "contact_info")
- return etree_wrapper(e, msg = "Send this file back to the child you just configured")
+ def log(self, msg):
+ """
+ Send some text to this Zookeeper's log stream, if one is set.
+ """
+ if self.logstream is not None:
+ self.logstream.write(msg)
+ self.logstream.write("\n")
- @django.db.transaction.commit_on_success
- def delete_child(self, child_handle):
- """
- Delete a child of this RPKI entity.
- """
- self.resource_ca.children.get(handle = child_handle).delete()
+ @property
+ def resource_ca(self):
+ """
+ Get ResourceHolderCA object associated with current handle.
+ """
+ if self.handle is None:
+ raise HandleNotSet
+ return rpki.irdb.models.ResourceHolderCA.objects.get(handle = self.handle)
- @django.db.transaction.commit_on_success
- def configure_parent(self, filename, parent_handle = None):
- """
- Configure a new parent of this RPKI entity, given the output of
- the parent's configure_child command as input. Reads the parent's
- response XML, extracts the parent's BPKI and service URI
- information, cross-certifies the parent's BPKI data into this
- entity's BPKI, and checks for offers or referrals of publication
- service. If a publication offer or referral is present, we
- generate a request-for-service message to that repository, in case
- the user wants to avail herself of the referral or offer.
- """
- p = etree_read(filename)
+ @property
+ def server_ca(self):
+ """
+ Get ServerCA object.
+ """
- if parent_handle is None:
- parent_handle = p.get("parent_handle")
+ return rpki.irdb.models.ServerCA.objects.get()
- r = p.find("repository")
- repository_type = "none"
- referrer = None
- referral_authorization = None
+ @django.db.transaction.atomic
+ def initialize_server_bpki(self):
+ """
+ Initialize server BPKI portion of an RPKI installation. Reads the
+ configuration file and generates the initial BPKI server
+ certificates needed to start daemons.
+ """
- if r is not None:
- repository_type = r.get("type")
+ if self.run_rpkid or self.run_pubd:
+ server_ca = rpki.irdb.models.ServerCA.objects.get_or_certify()[0]
+ rpki.irdb.models.ServerEE.objects.get_or_certify(issuer = server_ca, purpose = "irbe")
- if repository_type == "referral":
- a = r.find("authorization")
- referrer = a.get("referrer")
- referral_authorization = rpki.x509.SignedReferral(Base64 = a.text)
+ if self.run_rpkid:
+ rpki.irdb.models.ServerEE.objects.get_or_certify(issuer = server_ca, purpose = "rpkid")
+ rpki.irdb.models.ServerEE.objects.get_or_certify(issuer = server_ca, purpose = "irdbd")
- self.log("Parent calls itself %r, we call it %r" % (p.get("parent_handle"), parent_handle))
- self.log("Parent calls us %r" % p.get("child_handle"))
+ if self.run_pubd:
+ rpki.irdb.models.ServerEE.objects.get_or_certify(issuer = server_ca, purpose = "pubd")
- parent, created = rpki.irdb.Parent.objects.get_or_certify(
- issuer = self.resource_ca,
- handle = parent_handle,
- child_handle = p.get("child_handle"),
- parent_handle = p.get("parent_handle"),
- service_uri = p.get("service_uri"),
- ta = rpki.x509.X509(Base64 = p.findtext("bpki_resource_ta")),
- repository_type = repository_type,
- referrer = referrer,
- referral_authorization = referral_authorization)
- return self.generate_repository_request(parent), parent_handle
+ @django.db.transaction.atomic
+ def initialize_resource_bpki(self):
+ """
+ Initialize the resource-holding BPKI for an RPKI installation.
+ Returns XML describing the resource holder.
+ This method is present primarily for backwards compatibility with
+ the old combined initialize() method which initialized both the
+ server BPKI and the default resource-holding BPKI in a single
+ method call. In the long run we want to replace this with
+ something that takes a handle as argument and creates the
+ resource-holding BPKI idenity if needed.
+ """
- def generate_repository_request(self, parent):
- """
- Generate repository request for a given parent.
- """
+ rpki.irdb.models.ResourceHolderCA.objects.get_or_certify(handle = self.handle)
+ return self.generate_identity()
- e = Element("repository", handle = self.handle,
- parent_handle = parent.handle, type = parent.repository_type)
- if parent.repository_type == "referral":
- B64Element(e, "authorization", parent.referral_authorization, referrer = parent.referrer)
- SubElement(e, "contact_info")
- B64Element(e, "bpki_client_ta", self.resource_ca.certificate)
- return etree_wrapper(e, msg = "This is the file to send to the repository operator")
+ def initialize(self):
+ """
+ Backwards compatibility wrapper: calls initialize_server_bpki()
+ and initialize_resource_bpki(), returns latter's result.
+ """
- @django.db.transaction.commit_on_success
- def delete_parent(self, parent_handle):
- """
- Delete a parent of this RPKI entity.
- """
+ self.initialize_server_bpki()
+ return self.initialize_resource_bpki()
- self.resource_ca.parents.get(handle = parent_handle).delete()
+ def generate_identity(self):
+ """
+ Generate identity XML. Broken out of .initialize() because it's
+ easier for the GUI this way.
+ """
- @django.db.transaction.commit_on_success
- def delete_rootd(self):
- """
- Delete rootd associated with this RPKI entity.
- """
+ e = Element(tag_oob_child_request, nsmap = oob_nsmap, version = oob_version,
+ child_handle = self.handle)
+ B64Element(e, tag_oob_child_bpki_ta, self.resource_ca.certificate)
+ return etree_wrapper(e, msg = 'This is the "identity" file you will need to send to your parent')
- self.resource_ca.rootd.delete()
+ @django.db.transaction.atomic
+ def delete_tenant(self):
+ """
+ Delete the ResourceHolderCA object corresponding to the current handle.
+ This corresponds to deleting an rpkid <tenant/> object.
- @django.db.transaction.commit_on_success
- def configure_publication_client(self, filename, sia_base = None, flat = False):
- """
- Configure publication server to know about a new client, given the
- client's request-for-service message as input. Reads the client's
- request for service, cross-certifies the client's BPKI data, and
- generates a response message containing the repository's BPKI data
- and service URI.
- """
+ This code assumes the normal Django cascade-on-delete behavior,
+ that is, we assume that deleting the ResourceHolderCA object
+ deletes all the subordinate objects that refer to it via foreign
+ key relationships.
+ """
- client = etree_read(filename)
-
- client_ta = rpki.x509.X509(Base64 = client.findtext("bpki_client_ta"))
-
- if sia_base is None and flat:
- self.log("Flat publication structure forced, homing client at top-level")
- sia_base = "rsync://%s/%s/%s/" % (self.rsync_server, self.rsync_module, client.get("handle"))
-
- if sia_base is None and client.get("type") == "referral":
- self.log("This looks like a referral, checking")
- try:
- auth = client.find("authorization")
- referrer = self.server_ca.clients.get(handle = auth.get("referrer"))
- referral_cms = rpki.x509.SignedReferral(Base64 = auth.text)
- referral_xml = referral_cms.unwrap(ta = (referrer.certificate, self.server_ca.certificate))
- if rpki.x509.X509(Base64 = referral_xml.text) != client_ta:
- raise BadXMLMessage("Referral trust anchor does not match")
- sia_base = referral_xml.get("authorized_sia_base")
- except rpki.irdb.Client.DoesNotExist:
- self.log("We have no record of the client (%s) alleged to have made this referral" % auth.get("referrer"))
-
- if sia_base is None and client.get("type") == "offer":
- self.log("This looks like an offer, checking")
- try:
- parent = rpki.irdb.ResourceHolderCA.objects.get(children__ta__exact = client_ta)
- if "/" in parent.repositories.get(ta = self.server_ca.certificate).client_handle:
- self.log("Client's parent is not top-level, this is not a valid offer")
- else:
- self.log("Found client and its parent, nesting")
- sia_base = "rsync://%s/%s/%s/%s/" % (self.rsync_server, self.rsync_module,
- parent.handle, client.get("handle"))
- except rpki.irdb.Repository.DoesNotExist:
- self.log("Found client's parent, but repository isn't set, this shouldn't happen!")
- except rpki.irdb.ResourceHolderCA.DoesNotExist:
- try:
- rpki.irdb.Rootd.objects.get(issuer__certificate__exact = client_ta)
- except rpki.irdb.Rootd.DoesNotExist:
- self.log("We don't host this client's parent, so we didn't make this offer")
+ resource_ca = self.resource_ca
+ if resource_ca is not None:
+ resource_ca.delete()
else:
- self.log("This client's parent is rootd")
-
- if sia_base is None:
- self.log("Don't know where to nest this client, defaulting to top-level")
- sia_base = "rsync://%s/%s/%s/" % (self.rsync_server, self.rsync_module, client.get("handle"))
-
- if not sia_base.startswith("rsync://"):
- raise BadXMLMessage("Malformed sia_base parameter %r, should start with 'rsync://'" % sia_base)
-
- client_handle = "/".join(sia_base.rstrip("/").split("/")[4:])
-
- parent_handle = client.get("parent_handle")
-
- self.log("Client calls itself %r, we call it %r" % (client.get("handle"), client_handle))
- self.log("Client says its parent handle is %r" % parent_handle)
-
- client, created = rpki.irdb.Client.objects.get_or_certify(
- issuer = self.server_ca,
- handle = client_handle,
- parent_handle = parent_handle,
- ta = client_ta,
- sia_base = sia_base)
-
- return self.generate_repository_response(client), client_handle
-
+ self.log("No such ResourceHolderCA \"%s\"" % self.handle)
+
+
+ @django.db.transaction.atomic
+ def configure_root(self, handle, resources):
+
+ if not handle:
+ handle = self.handle
+
+ parent = rpki.irdb.models.Parent.objects.get_or_certify(
+ issuer = self.resource_ca,
+ handle = handle,
+ parent_handle = handle,
+ child_handle = handle,
+ ta = self.resource_ca.certificate,
+ repository_type = "none",
+ asn_resources = str(resources.asn),
+ ipv4_resources = str(resources.v4),
+ ipv6_resources = str(resources.v6))[0]
+
+ return self.generate_repository_request(parent)
+
+
+ def extract_root_certificate_and_uris(self, handle):
+
+ if not handle:
+ handle = self.handle
+
+ q_msg = self.compose_left_right_query()
+ SubElement(q_msg, rpki.left_right.tag_parent, action = "get",
+ tenant_handle = self.handle, parent_handle = handle)
+ r_msg = self.call_rpkid(q_msg)
+ assert len(r_msg) == 1 and r_msg[0].tag == rpki.left_right.tag_parent
+
+ b64 = r_msg[0].findtext(rpki.left_right.tag_rpki_root_cert)
+ if not b64:
+ return None, ()
- def generate_repository_response(self, client):
- """
- Generate repository response XML to a given client.
- """
-
- service_uri = "http://%s:%s/client/%s" % (
- self.cfg.get("pubd_server_host", section = myrpki_section),
- self.cfg.get("pubd_server_port", section = myrpki_section),
- client.handle)
-
- e = Element("repository", type = "confirmed",
- client_handle = client.handle,
- parent_handle = client.parent_handle,
- sia_base = client.sia_base,
- service_uri = service_uri)
-
- B64Element(e, "bpki_server_ta", self.server_ca.certificate)
- B64Element(e, "bpki_client_ta", client.ta)
- SubElement(e, "contact_info").text = self.pubd_contact_info
- return etree_wrapper(e, msg = "Send this file back to the publication client you just configured")
-
-
- @django.db.transaction.commit_on_success
- def delete_publication_client(self, client_handle):
- """
- Delete a publication client of this RPKI entity.
- """
+ cert = rpki.x509.X509(Base64 = b64)
+ caDirectory, rpkiManifest, signedObjectRepository, rpkiNotify = cert.get_SIA()
+ sia_base = r_msg[0].get("sia_base")
+ fn = cert.gSKI() + ".cer"
+
+ https_uri = os.path.join(os.path.dirname(rpkiNotify[0]), fn)
+ rsync_uri = sia_base + fn
+
+ return cert, (https_uri, rsync_uri)
+
+
+ def write_bpki_files(self):
+ """
+ Write out BPKI certificate, key, and CRL files for daemons that
+ need them.
+ """
+
+ writer = PEM_writer(self.logstream)
+
+ if self.run_rpkid:
+ rpkid = self.server_ca.ee_certificates.get(purpose = "rpkid")
+ writer(self.cfg.get("bpki-ta", section = rpkid_section), self.server_ca.certificate)
+ writer(self.cfg.get("rpkid-key", section = rpkid_section), rpkid.private_key)
+ writer(self.cfg.get("rpkid-cert", section = rpkid_section), rpkid.certificate)
+ writer(self.cfg.get("irdb-cert", section = rpkid_section),
+ self.server_ca.ee_certificates.get(purpose = "irdbd").certificate)
+ writer(self.cfg.get("irbe-cert", section = rpkid_section),
+ self.server_ca.ee_certificates.get(purpose = "irbe").certificate)
+
+ if self.run_pubd:
+ pubd = self.server_ca.ee_certificates.get(purpose = "pubd")
+ writer(self.cfg.get("bpki-ta", section = pubd_section), self.server_ca.certificate)
+ writer(self.cfg.get("pubd-crl", section = pubd_section), self.server_ca.latest_crl)
+ writer(self.cfg.get("pubd-key", section = pubd_section), pubd.private_key)
+ writer(self.cfg.get("pubd-cert", section = pubd_section), pubd.certificate)
+ writer(self.cfg.get("irbe-cert", section = pubd_section),
+ self.server_ca.ee_certificates.get(purpose = "irbe").certificate)
+
+
+ @django.db.transaction.atomic
+ def update_bpki(self):
+ """
+ Update BPKI certificates. Assumes an existing RPKI installation.
+
+ Basic plan here is to reissue all BPKI certificates we can, right
+ now. In the long run we might want to be more clever about only
+ touching ones that need maintenance, but this will do for a start.
+
+ We also reissue CRLs for all CAs.
+
+ Most likely this should be run under cron.
+ """
+
+ for model in (rpki.irdb.models.ServerCA,
+ rpki.irdb.models.ResourceHolderCA,
+ rpki.irdb.models.ServerEE,
+ rpki.irdb.models.Referral,
+ rpki.irdb.models.HostedCA,
+ rpki.irdb.models.BSC,
+ rpki.irdb.models.Child,
+ rpki.irdb.models.Parent,
+ rpki.irdb.models.Client,
+ rpki.irdb.models.Repository):
+ for obj in model.objects.all():
+ self.log("Regenerating BPKI certificate %s" % obj.certificate.getSubject())
+ obj.avow()
+ obj.save()
+
+ self.log("Regenerating Server BPKI CRL")
+ self.server_ca.generate_crl()
+ self.server_ca.save()
+
+ for ca in rpki.irdb.models.ResourceHolderCA.objects.all():
+ self.log("Regenerating BPKI CRL for Resource Holder %s" % ca.handle)
+ ca.generate_crl()
+ ca.save()
+
+
+ @staticmethod
+ def compose_left_right_query():
+ """
+ Compose top level element of a left-right query.
+ """
+
+ return Element(rpki.left_right.tag_msg, nsmap = rpki.left_right.nsmap,
+ type = "query", version = rpki.left_right.version)
+
+
+ @staticmethod
+ def _compose_publication_control_query():
+ """
+ Compose top level element of a publication-control query.
+ """
+
+ return Element(rpki.publication_control.tag_msg, nsmap = rpki.publication_control.nsmap,
+ type = "query", version = rpki.publication_control.version)
+
+
+ @django.db.transaction.atomic
+ def synchronize_bpki(self):
+ """
+ Synchronize BPKI updates. This is separate from .update_bpki()
+ because this requires rpkid to be running and none of the other
+ BPKI update stuff does; there may be circumstances under which it
+ makes sense to do the rest of the BPKI update and allow this to
+ fail with a warning.
+ """
+
+ if self.run_rpkid:
+ q_msg = self.compose_left_right_query()
+
+ for ca in rpki.irdb.models.ResourceHolderCA.objects.all():
+ q_pdu = SubElement(q_msg, rpki.left_right.tag_tenant,
+ action = "set",
+ tag = "%s__tenant" % ca.handle,
+ tenant_handle = ca.handle)
+ SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = ca.certificate.get_Base64()
+
+ for bsc in rpki.irdb.models.BSC.objects.all():
+ q_pdu = SubElement(q_msg, rpki.left_right.tag_bsc,
+ action = "set",
+ tag = "%s__bsc__%s" % (bsc.issuer.handle, bsc.handle),
+ tenant_handle = bsc.issuer.handle,
+ bsc_handle = bsc.handle)
+ SubElement(q_pdu, rpki.left_right.tag_signing_cert).text = bsc.certificate.get_Base64()
+ SubElement(q_pdu, rpki.left_right.tag_signing_cert_crl).text = bsc.issuer.latest_crl.get_Base64()
+
+ for repository in rpki.irdb.models.Repository.objects.all():
+ q_pdu = SubElement(q_msg, rpki.left_right.tag_repository,
+ action = "set",
+ tag = "%s__repository__%s" % (repository.issuer.handle, repository.handle),
+ tenant_handle = repository.issuer.handle,
+ repository_handle = repository.handle)
+ SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = repository.certificate.get_Base64()
+
+ for parent in rpki.irdb.models.Parent.objects.all():
+ q_pdu = SubElement(q_msg, rpki.left_right.tag_parent,
+ action = "set",
+ tag = "%s__parent__%s" % (parent.issuer.handle, parent.handle),
+ tenant_handle = parent.issuer.handle,
+ parent_handle = parent.handle)
+ SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = parent.certificate.get_Base64()
+
+ for child in rpki.irdb.models.Child.objects.all():
+ q_pdu = SubElement(q_msg, rpki.left_right.tag_child,
+ action = "set",
+ tag = "%s__child__%s" % (child.issuer.handle, child.handle),
+ tenant_handle = child.issuer.handle,
+ child_handle = child.handle)
+ SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = child.certificate.get_Base64()
+
+ if len(q_msg) > 0:
+ self.call_rpkid(q_msg)
+
+ if self.run_pubd:
+ q_msg = self._compose_publication_control_query()
+
+ for client in self.server_ca.clients.all():
+ q_pdu = SubElement(q_msg, rpki.publication_control.tag_client, action = "set", client_handle = client.handle)
+ SubElement(q_pdu, rpki.publication_control.tag_bpki_cert).text = client.certificate.get_Base64()
+
+ if len(q_msg) > 0:
+ self.call_pubd(q_msg)
+
+
+ @django.db.transaction.atomic
+ def configure_child(self, xml_file, child_handle = None, valid_until = None):
+ """
+ Configure a new child of this RPKI entity, given the child's XML
+ identity file as an input. Extracts the child's data from the
+ XML, cross-certifies the child's resource-holding BPKI
+ certificate, and generates an XML file describing the relationship
+ between the child and this parent, including this parent's BPKI
+ data and up-down protocol service URI.
+ """
+
+ x = etree_read(xml_file)
+
+ if x.tag != tag_oob_child_request:
+ raise BadXMLMessage("Expected %s, got %s", tag_oob_child_request, x.tag)
+
+ if child_handle is None:
+ child_handle = x.get("child_handle")
+
+ if valid_until is None:
+ valid_until = rpki.sundial.now() + rpki.sundial.timedelta(days = 365)
+ else:
+ valid_until = rpki.sundial.datetime.fromXMLtime(valid_until)
+ if valid_until < rpki.sundial.now():
+ raise PastExpiration("Specified new expiration time %s has passed" % valid_until)
- self.server_ca.clients.get(handle = client_handle).delete()
+ self.log("Child calls itself %r, we call it %r" % (x.get("child_handle"), child_handle))
+ child = rpki.irdb.models.Child.objects.get_or_certify(
+ issuer = self.resource_ca,
+ handle = child_handle,
+ ta = rpki.x509.X509(Base64 = x.findtext(tag_oob_child_bpki_ta)),
+ valid_until = valid_until)[0]
- @django.db.transaction.commit_on_success
- def configure_repository(self, filename, parent_handle = None):
- """
- Configure a publication repository for this RPKI entity, given the
- repository's response to our request-for-service message as input.
- Reads the repository's response, extracts and cross-certifies the
- BPKI data and service URI, and links the repository data with the
- corresponding parent data in our local database.
- """
+ return self.generate_parental_response(child), child_handle
- r = etree_read(filename)
- if parent_handle is None:
- parent_handle = r.get("parent_handle")
+ @django.db.transaction.atomic
+ def generate_parental_response(self, child):
+ """
+ Generate parental response XML. Broken out of .configure_child()
+ for GUI.
+ """
- self.log("Repository calls us %r" % (r.get("client_handle")))
- self.log("Repository response associated with parent_handle %r" % parent_handle)
+ service_uri = "http://%s:%s/up-down/%s/%s" % (
+ self.cfg.get("rpkid_server_host", section = myrpki_section),
+ self.cfg.get("rpkid_server_port", section = myrpki_section),
+ self.handle, child.handle)
- try:
- if parent_handle == self.handle:
- turtle = self.resource_ca.rootd
- else:
- turtle = self.resource_ca.parents.get(handle = parent_handle)
+ e = Element(tag_oob_parent_response, nsmap = oob_nsmap, version = oob_version,
+ service_uri = service_uri,
+ child_handle = child.handle,
+ parent_handle = self.handle)
+ B64Element(e, tag_oob_parent_bpki_ta, self.resource_ca.certificate)
- except (rpki.irdb.Parent.DoesNotExist, rpki.irdb.Rootd.DoesNotExist):
- self.log("Could not find parent %r in our database" % parent_handle)
+ try:
+ if self.default_repository:
+ repo = self.resource_ca.repositories.get(handle = self.default_repository)
+ else:
+ repo = self.resource_ca.repositories.get()
+ except rpki.irdb.models.Repository.DoesNotExist:
+ repo = None
- else:
- rpki.irdb.Repository.objects.get_or_certify(
- issuer = self.resource_ca,
- handle = parent_handle,
- client_handle = r.get("client_handle"),
- service_uri = r.get("service_uri"),
- sia_base = r.get("sia_base"),
- ta = rpki.x509.X509(Base64 = r.findtext("bpki_server_ta")),
- turtle = turtle)
-
-
- @django.db.transaction.commit_on_success
- def delete_repository(self, repository_handle):
- """
- Delete a repository of this RPKI entity.
- """
+ if repo is None:
+ self.log("Couldn't find any usable repositories, not giving referral")
- self.resource_ca.repositories.get(handle = repository_handle).delete()
+ elif repo.handle == self.handle:
+ SubElement(e, tag_oob_offer)
+ else:
+ proposed_sia_base = repo.sia_base + child.handle + "/"
+ referral_cert = rpki.irdb.models.Referral.objects.get_or_certify(issuer = self.resource_ca)[0]
+ auth = rpki.x509.SignedReferral()
+ auth.set_content(B64Element(None, tag_oob_authorization, child.ta,
+ nsmap = oob_nsmap, version = oob_version,
+ authorized_sia_base = proposed_sia_base))
+ auth.schema_check()
+ auth.sign(referral_cert.private_key, referral_cert.certificate, self.resource_ca.latest_crl)
+ B64Element(e, tag_oob_referral, auth, referrer = repo.client_handle)
+
+ return etree_wrapper(e, msg = "Send this file back to the child you just configured")
+
+
+ @django.db.transaction.atomic
+ def delete_child(self, child_handle):
+ """
+ Delete a child of this RPKI entity.
+ """
+
+ self.resource_ca.children.get(handle = child_handle).delete()
+
+
+ @django.db.transaction.atomic
+ def configure_parent(self, xml_file, parent_handle = None):
+ """
+ Configure a new parent of this RPKI entity, given the output of
+ the parent's configure_child command as input. Reads the parent's
+ response XML, extracts the parent's BPKI and service URI
+ information, cross-certifies the parent's BPKI data into this
+ entity's BPKI, and checks for offers or referrals of publication
+ service. If a publication offer or referral is present, we
+ generate a request-for-service message to that repository, in case
+ the user wants to avail herself of the referral or offer.
+ """
+
+ x = etree_read(xml_file)
+
+ if x.tag != tag_oob_parent_response:
+ raise BadXMLMessage("Expected %s, got %s", tag_oob_parent_response, x.tag)
+
+ if parent_handle is None:
+ parent_handle = x.get("parent_handle")
+
+ offer = x.find(tag_oob_offer)
+ referral = x.find(tag_oob_referral)
+
+ if offer is not None:
+ repository_type = "offer"
+ referrer = None
+ referral_authorization = None
+
+ elif referral is not None:
+ repository_type = "referral"
+ referrer = referral.get("referrer")
+ referral_authorization = rpki.x509.SignedReferral(Base64 = referral.text)
- @django.db.transaction.commit_on_success
- def renew_children(self, child_handle, valid_until = None):
- """
- Update validity period for one child entity or, if child_handle is
- None, for all child entities.
- """
+ else:
+ repository_type = "none"
+ referrer = None
+ referral_authorization = None
+
+ self.log("Parent calls itself %r, we call it %r" % (x.get("parent_handle"), parent_handle))
+ self.log("Parent calls us %r" % x.get("child_handle"))
+
+ parent = rpki.irdb.models.Parent.objects.get_or_certify(
+ issuer = self.resource_ca,
+ handle = parent_handle,
+ child_handle = x.get("child_handle"),
+ parent_handle = x.get("parent_handle"),
+ service_uri = x.get("service_uri"),
+ ta = rpki.x509.X509(Base64 = x.findtext(tag_oob_parent_bpki_ta)),
+ repository_type = repository_type,
+ referrer = referrer,
+ referral_authorization = referral_authorization)[0]
+
+ return self.generate_repository_request(parent), parent_handle
+
+
+ def generate_repository_request(self, parent):
+ """
+ Generate repository request for a given parent.
+ """
+
+ e = Element(tag_oob_publisher_request, nsmap = oob_nsmap, version = oob_version,
+ publisher_handle = self.handle)
+ B64Element(e, tag_oob_publisher_bpki_ta, self.resource_ca.certificate)
+ if parent.repository_type == "referral":
+ B64Element(e, tag_oob_referral, parent.referral_authorization,
+ referrer = parent.referrer)
+
+ return etree_wrapper(e, msg = "This is the file to send to the repository operator")
+
+
+ @django.db.transaction.atomic
+ def delete_parent(self, parent_handle):
+ """
+ Delete a parent of this RPKI entity.
+ """
+
+ self.resource_ca.parents.get(handle = parent_handle).delete()
+
+
+ @django.db.transaction.atomic
+ def configure_publication_client(self, xml_file, sia_base = None, flat = False):
+ """
+ Configure publication server to know about a new client, given the
+ client's request-for-service message as input. Reads the client's
+ request for service, cross-certifies the client's BPKI data, and
+ generates a response message containing the repository's BPKI data
+ and service URI.
+ """
+
+ # pylint: disable=E1124
+
+ x = etree_read(xml_file)
- if child_handle is None:
- children = self.resource_ca.children.all()
- else:
- children = self.resource_ca.children.filter(handle = child_handle)
+ if x.tag != tag_oob_publisher_request:
+ raise BadXMLMessage("Expected %s, got %s", tag_oob_publisher_request, x.tag)
+
+ client_ta = rpki.x509.X509(Base64 = x.findtext(tag_oob_publisher_bpki_ta))
+
+ referral = x.find(tag_oob_referral)
+
+ default_sia_base = "rsync://{self.rsync_server}/{self.rsync_module}/{handle}/".format(
+ self = self,
+ handle = x.get("publisher_handle"))
+
+ if sia_base is None and flat:
+ self.log("Flat publication structure forced, homing client at top-level")
+ sia_base = default_sia_base
+
+ if sia_base is None and referral is not None:
+ self.log("This looks like a referral, checking")
+ try:
+ referrer = referral.get("referrer")
+ referrer = self.server_ca.clients.get(handle = referrer)
+ referral = rpki.x509.SignedReferral(Base64 = referral.text)
+ referral = referral.unwrap(ta = (referrer.certificate, self.server_ca.certificate))
+ if rpki.x509.X509(Base64 = referral.text) != client_ta:
+ raise BadXMLMessage("Referral trust anchor does not match")
+ sia_base = referral.get("authorized_sia_base")
+ except rpki.irdb.models.Client.DoesNotExist:
+ self.log("We have no record of the client ({}) alleged to have made this referral".format(referrer))
+
+ if sia_base is None and referral is None:
+ self.log("This might be an offer, checking")
+ try:
+ parent = rpki.irdb.models.ResourceHolderCA.objects.get(children__ta = client_ta)
+ if "/" in parent.repositories.get(ta = self.server_ca.certificate).client_handle:
+ self.log("Client's parent is not top-level, this is not a valid offer")
+ else:
+ self.log("Found client and its parent, nesting")
+ sia_base = "rsync://{self.rsync_server}/{self.rsync_module}/{parent_handle}/{client_handle}/".format(
+ self = self,
+ parent_handle = parent.handle,
+ client_handle = x.get("publisher_handle"))
+ except rpki.irdb.models.Repository.DoesNotExist:
+ self.log("Found client's parent, but repository isn't set, this shouldn't happen!")
+ except rpki.irdb.models.ResourceHolderCA.DoesNotExist:
+ self.log("We don't host this client's parent, so we didn't make an offer")
+
+ if sia_base is None:
+ self.log("Don't know where else to nest this client, so defaulting to top-level")
+ sia_base = default_sia_base
+
+ if not sia_base.startswith("rsync://"):
+ raise BadXMLMessage("Malformed sia_base parameter %r, should start with 'rsync://'" % sia_base)
+
+ client_handle = "/".join(sia_base.rstrip("/").split("/")[4:])
- if valid_until is None:
- valid_until = rpki.sundial.now() + rpki.sundial.timedelta(days = 365)
- else:
- valid_until = rpki.sundial.datetime.fromXMLtime(valid_until)
- if valid_until < rpki.sundial.now():
- raise PastExpiration("Specified new expiration time %s has passed" % valid_until)
+ self.log("Client calls itself %r, we call it %r" % (
+ x.get("publisher_handle"), client_handle))
+
+ client = rpki.irdb.models.Client.objects.get_or_certify(
+ issuer = self.server_ca,
+ handle = client_handle,
+ ta = client_ta,
+ sia_base = sia_base)[0]
+
+ return self.generate_repository_response(client), client_handle
- self.log("New validity date %s" % valid_until)
- for child in children:
- child.valid_until = valid_until
- child.save()
+ def generate_repository_response(self, client):
+ """
+ Generate repository response XML to a given client.
+ """
+ service_uri = "http://{host}:{port}/client/{handle}".format(
+ host = self.cfg.get("pubd_server_host", section = myrpki_section),
+ port = self.cfg.get("pubd_server_port", section = myrpki_section),
+ handle = client.handle)
- @django.db.transaction.commit_on_success
- def load_prefixes(self, filename, ignore_missing_children = False):
- """
- Whack IRDB to match prefixes.csv.
- """
+ rrdp_uri = self.cfg.get("publication_rrdp_notification_uri", section = myrpki_section, default = "")
+
+ e = Element(tag_oob_repository_response, nsmap = oob_nsmap, version = oob_version,
+ service_uri = service_uri,
+ publisher_handle = client.handle,
+ sia_base = client.sia_base)
- grouped4 = {}
- grouped6 = {}
+ if rrdp_uri:
+ e.set("rrdp_notification_uri", rrdp_uri)
+
+ B64Element(e, tag_oob_repository_bpki_ta, self.server_ca.certificate)
+ return etree_wrapper(e, msg = "Send this file back to the publication client you just configured")
+
+
+ @django.db.transaction.atomic
+ def delete_publication_client(self, client_handle):
+ """
+ Delete a publication client of this RPKI entity.
+ """
- for handle, prefix in csv_reader(filename, columns = 2):
- grouped = grouped6 if ":" in prefix else grouped4
- if handle not in grouped:
- grouped[handle] = []
- grouped[handle].append(prefix)
+ self.server_ca.clients.get(handle = client_handle).delete()
- primary_keys = []
- for version, grouped, rset in ((4, grouped4, rpki.resource_set.resource_set_ipv4),
- (6, grouped6, rpki.resource_set.resource_set_ipv6)):
- for handle, prefixes in grouped.iteritems():
- try:
- child = self.resource_ca.children.get(handle = handle)
- except rpki.irdb.Child.DoesNotExist:
- if not ignore_missing_children:
- raise
+ @django.db.transaction.atomic
+ def configure_repository(self, xml_file, parent_handle = None):
+ """
+ Configure a publication repository for this RPKI entity, given the
+ repository's response to our request-for-service message as input.
+ Reads the repository's response, extracts and cross-certifies the
+ BPKI data and service URI, and links the repository data with the
+ corresponding parent data in our local database.
+ """
+
+ x = etree_read(xml_file)
+
+ if x.tag != tag_oob_repository_response:
+ raise BadXMLMessage("Expected %s, got %s", tag_oob_repository_response, x.tag)
+
+ self.log("Repository calls us %r" % (x.get("publisher_handle")))
+
+ if parent_handle is not None:
+ self.log("Explicit parent_handle given")
+ try:
+ parent = self.resource_ca.parents.get(handle = parent_handle)
+ except rpki.irdb.models.Parent.DoesNotExist:
+ self.log("Could not find parent %r in our database" % parent_handle)
+ raise CouldntFindRepoParent
+
else:
- for prefix in rset(",".join(prefixes)):
- obj, created = rpki.irdb.ChildNet.objects.get_or_create(
- child = child,
- start_ip = str(prefix.min),
- end_ip = str(prefix.max),
- version = version)
- primary_keys.append(obj.pk)
-
- q = rpki.irdb.ChildNet.objects
- q = q.filter(child__issuer__exact = self.resource_ca)
- q = q.exclude(pk__in = primary_keys)
- q.delete()
-
-
- @django.db.transaction.commit_on_success
- def load_asns(self, filename, ignore_missing_children = False):
- """
- Whack IRDB to match asns.csv.
- """
-
- grouped = {}
-
- for handle, asn in csv_reader(filename, columns = 2):
- if handle not in grouped:
- grouped[handle] = []
- grouped[handle].append(asn)
-
- primary_keys = []
-
- for handle, asns in grouped.iteritems():
- try:
- child = self.resource_ca.children.get(handle = handle)
- except rpki.irdb.Child.DoesNotExist:
- if not ignore_missing_children:
- raise
- else:
- for asn in rpki.resource_set.resource_set_as(",".join(asns)):
- obj, created = rpki.irdb.ChildASN.objects.get_or_create(
- child = child,
- start_as = str(asn.min),
- end_as = str(asn.max))
- primary_keys.append(obj.pk)
-
- q = rpki.irdb.ChildASN.objects
- q = q.filter(child__issuer__exact = self.resource_ca)
- q = q.exclude(pk__in = primary_keys)
- q.delete()
-
-
- @django.db.transaction.commit_on_success
- def load_roa_requests(self, filename):
- """
- Whack IRDB to match roa.csv.
- """
-
- grouped = {}
-
- # format: p/n-m asn group
- for pnm, asn, group in csv_reader(filename, columns = 3, min_columns = 2):
- key = (asn, group or pnm)
- if key not in grouped:
- grouped[key] = []
- grouped[key].append(pnm)
-
- # Deleting and recreating all the ROA requests is inefficient,
- # but rpkid's current representation of ROA requests is wrong
- # (see #32), so it's not worth a lot of effort here as we're
- # just going to have to rewrite this soon anyway.
-
- self.resource_ca.roa_requests.all().delete()
-
- for key, pnms in grouped.iteritems():
- asn, group = key
-
- roa_request = self.resource_ca.roa_requests.create(asn = asn)
-
- for pnm in pnms:
- if ":" in pnm:
- p = rpki.resource_set.roa_prefix_ipv6.parse_str(pnm)
- v = 6
+ # In theory this could be rewritten using an .exists() filter.
+ parents = []
+ for parent in self.resource_ca.parents.all():
+ try:
+ _ = parent.repository # pylint: disable=W0612
+ except rpki.irdb.models.Repository.DoesNotExist:
+ parents.append(parent)
+ if len(parents) != 1:
+ self.log("No explicit parent_handle given and unable to guess")
+ raise CouldntFindRepoParent
+ parent = parents[0]
+ parent_handle = parent.handle
+ self.log("No explicit parent_handle given, guessing parent {}".format(parent_handle))
+
+ rpki.irdb.models.Repository.objects.get_or_certify(
+ issuer = self.resource_ca,
+ handle = parent_handle,
+ client_handle = x.get("publisher_handle"),
+ service_uri = x.get("service_uri"),
+ sia_base = x.get("sia_base"),
+ rrdp_notification_uri = x.get("rrdp_notification_uri"),
+ ta = rpki.x509.X509(Base64 = x.findtext(tag_oob_repository_bpki_ta)),
+ parent = parent)
+
+
+ @django.db.transaction.atomic
+ def delete_repository(self, repository_handle):
+ """
+ Delete a repository of this RPKI entity.
+ """
+
+ self.resource_ca.repositories.get(handle = repository_handle).delete()
+
+
+ @django.db.transaction.atomic
+ def renew_children(self, child_handle, valid_until = None):
+ """
+ Update validity period for one child entity or, if child_handle is
+ None, for all child entities.
+ """
+
+ if child_handle is None:
+ children = self.resource_ca.children.all()
else:
- p = rpki.resource_set.roa_prefix_ipv4.parse_str(pnm)
- v = 4
- roa_request.prefixes.create(
- version = v,
- prefix = str(p.prefix),
- prefixlen = int(p.prefixlen),
- max_prefixlen = int(p.max_prefixlen))
-
+ children = self.resource_ca.children.filter(handle = child_handle)
- @django.db.transaction.commit_on_success
- def load_ghostbuster_requests(self, filename, parent = None):
- """
- Whack IRDB to match ghostbusters.vcard.
-
- This accepts one or more vCards from a file.
- """
-
- self.resource_ca.ghostbuster_requests.filter(parent = parent).delete()
-
- vcard = []
+ if valid_until is None:
+ valid_until = rpki.sundial.now() + rpki.sundial.timedelta(days = 365)
+ else:
+ valid_until = rpki.sundial.datetime.fromXMLtime(valid_until)
+ if valid_until < rpki.sundial.now():
+ raise PastExpiration("Specified new expiration time %s has passed" % valid_until)
+
+ self.log("New validity date %s" % valid_until)
+
+ for child in children:
+ child.valid_until = valid_until
+ child.save()
+
+
+ @django.db.transaction.atomic
+ def load_prefixes(self, csv_file, ignore_missing_children = False):
+ """
+ Whack IRDB to match prefixes.csv.
+ """
+
+ grouped4 = {}
+ grouped6 = {}
+
+ for handle, prefix in csv_reader(csv_file, columns = 2):
+ grouped = grouped6 if ":" in prefix else grouped4
+ if handle not in grouped:
+ grouped[handle] = []
+ grouped[handle].append(prefix)
+
+ primary_keys = []
+
+ for version, grouped, rset in ((4, grouped4, rpki.resource_set.resource_set_ipv4),
+ (6, grouped6, rpki.resource_set.resource_set_ipv6)):
+ for handle, prefixes in grouped.iteritems():
+ try:
+ child = self.resource_ca.children.get(handle = handle)
+ except rpki.irdb.models.Child.DoesNotExist:
+ if not ignore_missing_children:
+ raise
+ else:
+ for prefix in rset(",".join(prefixes)):
+ obj = rpki.irdb.models.ChildNet.objects.get_or_create(
+ child = child,
+ start_ip = str(prefix.min),
+ end_ip = str(prefix.max),
+ version = version)[0]
+ primary_keys.append(obj.pk)
+
+ q = rpki.irdb.models.ChildNet.objects
+ q = q.filter(child__issuer = self.resource_ca)
+ q = q.exclude(pk__in = primary_keys)
+ q.delete()
+
+
+ @django.db.transaction.atomic
+ def load_asns(self, csv_file, ignore_missing_children = False):
+ """
+ Whack IRDB to match asns.csv.
+ """
+
+ grouped = {}
+
+ for handle, asn in csv_reader(csv_file, columns = 2):
+ if handle not in grouped:
+ grouped[handle] = []
+ grouped[handle].append(asn)
+
+ primary_keys = []
+
+ for handle, asns in grouped.iteritems():
+ try:
+ child = self.resource_ca.children.get(handle = handle)
+ except rpki.irdb.models.Child.DoesNotExist:
+ if not ignore_missing_children:
+ raise
+ else:
+ for asn in rpki.resource_set.resource_set_as(",".join(asns)):
+ obj = rpki.irdb.models.ChildASN.objects.get_or_create(
+ child = child,
+ start_as = str(asn.min),
+ end_as = str(asn.max))[0]
+ primary_keys.append(obj.pk)
+
+ q = rpki.irdb.models.ChildASN.objects
+ q = q.filter(child__issuer = self.resource_ca)
+ q = q.exclude(pk__in = primary_keys)
+ q.delete()
+
+
+ @django.db.transaction.atomic
+ def load_roa_requests(self, csv_file):
+ """
+ Whack IRDB to match roa.csv.
+ """
+
+ grouped = {}
+
+ # format: p/n-m asn group
+ for pnm, asn, group in csv_reader(csv_file, columns = 3, min_columns = 2):
+ key = (asn, group or pnm)
+ if key not in grouped:
+ grouped[key] = []
+ grouped[key].append(pnm)
+
+ # Deleting and recreating all the ROA requests is inefficient,
+ # but rpkid's current representation of ROA requests is wrong
+ # (see #32), so it's not worth a lot of effort here as we're
+ # just going to have to rewrite this soon anyway.
+
+ self.resource_ca.roa_requests.all().delete()
+
+ for key, pnms in grouped.iteritems():
+ asn, group = key
+
+ roa_request = self.resource_ca.roa_requests.create(asn = asn)
+
+ for pnm in pnms:
+ if ":" in pnm:
+ p = rpki.resource_set.roa_prefix_ipv6.parse_str(pnm)
+ v = 6
+ else:
+ p = rpki.resource_set.roa_prefix_ipv4.parse_str(pnm)
+ v = 4
+ roa_request.prefixes.create(
+ version = v,
+ prefix = str(p.prefix),
+ prefixlen = int(p.prefixlen),
+ max_prefixlen = int(p.max_prefixlen))
+
+
+ @django.db.transaction.atomic
+ def load_ghostbuster_requests(self, vcard_file, parent = None):
+ """
+ Whack IRDB to match ghostbusters.vcard.
+
+ This accepts one or more vCards from a file.
+ """
+
+ self.resource_ca.ghostbuster_requests.filter(parent = parent).delete()
- for line in open(filename, "r"):
- if not vcard and not line.upper().startswith("BEGIN:VCARD"):
- continue
- vcard.append(line)
- if line.upper().startswith("END:VCARD"):
- self.resource_ca.ghostbuster_requests.create(vcard = "".join(vcard), parent = parent)
vcard = []
+ for line in vcard_file.read().splitlines(True):
+ if not vcard and not line.upper().startswith("BEGIN:VCARD"):
+ continue
+ vcard.append(line)
+ if line.upper().startswith("END:VCARD"):
+ self.resource_ca.ghostbuster_requests.create(vcard = "".join(vcard), parent = parent)
+ vcard = []
- def call_rpkid(self, *pdus):
- """
- Issue a call to rpkid, return result.
-
- Implementation is a little silly, constructs a wrapper object,
- invokes it once, then throws it away. Hard to do better without
- rewriting a bit of the HTTP code, as we want to be sure we're
- using the current BPKI certificate and key objects.
- """
-
- url = "http://%s:%s/left-right" % (
- self.cfg.get("rpkid_server_host", section = myrpki_section),
- self.cfg.get("rpkid_server_port", section = myrpki_section))
-
- rpkid = self.server_ca.ee_certificates.get(purpose = "rpkid")
- irbe = self.server_ca.ee_certificates.get(purpose = "irbe")
-
- if len(pdus) == 1 and isinstance(pdus[0], types.GeneratorType):
- pdus = tuple(pdus[0])
- elif len(pdus) == 1 and isinstance(pdus[0], (tuple, list)):
- pdus = pdus[0]
-
- call_rpkid = rpki.async.sync_wrapper(
- disable_signal_handlers = self.disable_signal_handlers,
- func = rpki.http.caller(
- proto = rpki.left_right,
- client_key = irbe.private_key,
- client_cert = irbe.certificate,
- server_ta = self.server_ca.certificate,
- server_cert = rpkid.certificate,
- url = url,
- debug = self.show_xml))
-
- return call_rpkid(*pdus)
-
-
- def run_rpkid_now(self):
- """
- Poke rpkid to immediately run the cron job for the current handle.
-
- This method is used by the GUI when a user has changed something in the
- IRDB (ghostbuster, roa) which does not require a full synchronize() call,
- to force the object to be immediately issued.
- """
-
- self.call_rpkid(rpki.left_right.self_elt.make_pdu(
- action = "set", self_handle = self.handle, run_now = "yes"))
-
-
- def publish_world_now(self):
- """
- Poke rpkid to (re)publish everything for the current handle.
- """
-
- self.call_rpkid(rpki.left_right.self_elt.make_pdu(
- action = "set", self_handle = self.handle, publish_world_now = "yes"))
-
-
- def reissue(self):
- """
- Poke rpkid to reissue everything for the current handle.
- """
-
- self.call_rpkid(rpki.left_right.self_elt.make_pdu(
- action = "set", self_handle = self.handle, reissue = "yes"))
-
- def rekey(self):
- """
- Poke rpkid to rekey all RPKI certificates received for the current
- handle.
- """
-
- self.call_rpkid(rpki.left_right.self_elt.make_pdu(
- action = "set", self_handle = self.handle, rekey = "yes"))
-
-
- def revoke(self):
- """
- Poke rpkid to revoke old RPKI keys for the current handle.
- """
-
- self.call_rpkid(rpki.left_right.self_elt.make_pdu(
- action = "set", self_handle = self.handle, revoke = "yes"))
-
-
- def revoke_forgotten(self):
- """
- Poke rpkid to revoke old forgotten RPKI keys for the current handle.
- """
-
- self.call_rpkid(rpki.left_right.self_elt.make_pdu(
- action = "set", self_handle = self.handle, revoke_forgotten = "yes"))
-
-
- def clear_all_sql_cms_replay_protection(self):
- """
- Tell rpkid and pubd to clear replay protection for all SQL-based
- entities. This is a fairly blunt instrument, but as we don't
- expect this to be necessary except in the case of gross
- misconfiguration, it should suffice
- """
-
- self.call_rpkid(rpki.left_right.self_elt.make_pdu(action = "set", self_handle = ca.handle,
- clear_replay_protection = "yes")
- for ca in rpki.irdb.ResourceHolderCA.objects.all())
- if self.run_pubd:
- self.call_pubd(rpki.publication.client_elt.make_pdu(action = "set",
- client_handle = client.handle,
- clear_replay_protection = "yes")
- for client in self.server_ca.clients.all())
-
-
- def call_pubd(self, *pdus):
- """
- Issue a call to pubd, return result.
- Implementation is a little silly, constructs a wrapper object,
- invokes it once, then throws it away. Hard to do better without
- rewriting a bit of the HTTP code, as we want to be sure we're
- using the current BPKI certificate and key objects.
- """
-
- url = "http://%s:%s/control" % (
- self.cfg.get("pubd_server_host", section = myrpki_section),
- self.cfg.get("pubd_server_port", section = myrpki_section))
+ def call_rpkid(self, q_msg, suppress_error_check = False):
+ """
+ Issue a call to rpkid, return result.
+ """
- pubd = self.server_ca.ee_certificates.get(purpose = "pubd")
- irbe = self.server_ca.ee_certificates.get(purpose = "irbe")
+ url = "http://%s:%s/left-right" % (
+ self.cfg.get("rpkid_server_host", section = myrpki_section),
+ self.cfg.get("rpkid_server_port", section = myrpki_section))
- if len(pdus) == 1 and isinstance(pdus[0], types.GeneratorType):
- pdus = tuple(pdus[0])
- elif len(pdus) == 1 and isinstance(pdus[0], (tuple, list)):
- pdus = pdus[0]
+ rpkid = self.server_ca.ee_certificates.get(purpose = "rpkid")
+ irbe = self.server_ca.ee_certificates.get(purpose = "irbe")
- call_pubd = rpki.async.sync_wrapper(
- disable_signal_handlers = self.disable_signal_handlers,
- func = rpki.http.caller(
- proto = rpki.publication,
- client_key = irbe.private_key,
- client_cert = irbe.certificate,
- server_ta = self.server_ca.certificate,
- server_cert = pubd.certificate,
- url = url,
- debug = self.show_xml))
+ r_msg = rpki.http_simple.client(
+ proto_cms_msg = rpki.left_right.cms_msg,
+ client_key = irbe.private_key,
+ client_cert = irbe.certificate,
+ server_ta = self.server_ca.certificate,
+ server_cert = rpkid.certificate,
+ url = url,
+ q_msg = q_msg,
+ debug = self.show_xml)
- return call_pubd(*pdus)
+ if not suppress_error_check:
+ self.check_error_report(r_msg)
+ return r_msg
- def check_error_report(self, pdus):
- """
- Check a response from rpkid or pubd for error_report PDUs, log and
- throw exceptions as needed.
- """
-
- if any(isinstance(pdu, (rpki.left_right.report_error_elt, rpki.publication.report_error_elt)) for pdu in pdus):
- for pdu in pdus:
- if isinstance(pdu, rpki.left_right.report_error_elt):
- self.log("rpkid reported failure: %s" % pdu.error_code)
- elif isinstance(pdu, rpki.publication.report_error_elt):
- self.log("pubd reported failure: %s" % pdu.error_code)
- else:
- continue
- if pdu.error_text:
- self.log(pdu.error_text)
- raise CouldntTalkToDaemon
+ def _rpkid_tenant_control(self, *bools):
+ assert all(isinstance(b, str) for b in bools)
+ q_msg = self.compose_left_right_query()
+ q_pdu = SubElement(q_msg, rpki.left_right.tag_tenant, action = "set", tenant_handle = self.handle)
+ for b in bools:
+ q_pdu.set(b, "yes")
+ return self.call_rpkid(q_msg)
- @django.db.transaction.commit_on_success
- def synchronize(self, *handles_to_poke):
- """
- Configure RPKI daemons with the data built up by the other
- commands in this program. Commands which modify the IRDB and want
- to whack everything into sync should call this when they're done,
- but be warned that this can be slow with a lot of CAs.
+ def run_rpkid_now(self):
+ """
+ Poke rpkid to immediately run the cron job for the current handle.
- Any arguments given are handles of CAs which should be poked with a
- <self run_now="yes"/> operation.
- """
+ This method is used by the GUI when a user has changed something in the
+ IRDB (ghostbuster, roa) which does not require a full synchronize() call,
+ to force the object to be immediately issued.
+ """
- for ca in rpki.irdb.ResourceHolderCA.objects.all():
- self.synchronize_rpkid_one_ca_core(ca, ca.handle in handles_to_poke)
- self.synchronize_pubd_core()
- self.synchronize_rpkid_deleted_core()
+ return self._rpkid_tenant_control("run_now")
- @django.db.transaction.commit_on_success
- def synchronize_ca(self, ca = None, poke = False):
- """
- Synchronize one CA. Most commands which modify a CA should call
- this. CA to synchronize defaults to the current resource CA.
- """
-
- if ca is None:
- ca = self.resource_ca
- self.synchronize_rpkid_one_ca_core(ca, poke)
-
-
- @django.db.transaction.commit_on_success
- def synchronize_deleted_ca(self):
- """
- Delete CAs which are present in rpkid's database but not in the
- IRDB.
- """
-
- self.synchronize_rpkid_deleted_core()
+ def publish_world_now(self):
+ """
+ Poke rpkid to (re)publish everything for the current handle.
+ """
+ return self._rpkid_tenant_control("publish_world_now")
- @django.db.transaction.commit_on_success
- def synchronize_pubd(self):
- """
- Synchronize pubd. Most commands which modify pubd should call this.
- """
- self.synchronize_pubd_core()
+ def reissue(self):
+ """
+ Poke rpkid to reissue everything for the current handle.
+ """
+ return self._rpkid_tenant_control("reissue")
- def synchronize_rpkid_one_ca_core(self, ca, poke = False):
- """
- Synchronize one CA. This is the core synchronization code. Don't
- call this directly, instead call one of the methods that calls
- this inside a Django commit wrapper.
- This method configures rpkid with data built up by the other
- commands in this program. Most commands which modify IRDB values
- related to rpkid should call this when they're done.
+ def rekey(self):
+ """
+ Poke rpkid to rekey all RPKI certificates received for the current
+ handle.
+ """
+
+ return self._rpkid_tenant_control("rekey")
- If poke is True, we append a left-right run_now operation for this
- CA to the end of whatever other commands this method generates.
- """
- # We can use a single BSC for everything -- except BSC key
- # rollovers. Drive off that bridge when we get to it.
-
- bsc_handle = "bsc"
-
- # A default RPKI CRL cycle time of six hours seems sane. One
- # might make a case for a day instead, but we've been running with
- # six hours for a while now and haven't seen a lot of whining.
-
- self_crl_interval = self.cfg.getint("self_crl_interval", 6 * 60 * 60, section = myrpki_section)
-
- # regen_margin now just controls how long before RPKI certificate
- # expiration we should regenerate; it used to control the interval
- # before RPKI CRL staleness at which to regenerate the CRL, but
- # using the same timer value for both of these is hopeless.
- #
- # A default regeneration margin of two weeks gives enough time for
- # humans to react. We add a two hour fudge factor in the hope
- # that this will regenerate certificates just *before* the
- # companion cron job warns of impending doom.
-
- self_regen_margin = self.cfg.getint("self_regen_margin", 14 * 24 * 60 * 60 + 2 * 60, section = myrpki_section)
-
- # See what rpkid already has on file for this entity.
-
- rpkid_reply = self.call_rpkid(
- rpki.left_right.self_elt.make_pdu( action = "get", tag = "self", self_handle = ca.handle),
- rpki.left_right.bsc_elt.make_pdu( action = "list", tag = "bsc", self_handle = ca.handle),
- rpki.left_right.repository_elt.make_pdu(action = "list", tag = "repository", self_handle = ca.handle),
- rpki.left_right.parent_elt.make_pdu( action = "list", tag = "parent", self_handle = ca.handle),
- rpki.left_right.child_elt.make_pdu( action = "list", tag = "child", self_handle = ca.handle))
-
- self_pdu = rpkid_reply[0]
- bsc_pdus = dict((x.bsc_handle, x) for x in rpkid_reply if isinstance(x, rpki.left_right.bsc_elt))
- repository_pdus = dict((x.repository_handle, x) for x in rpkid_reply if isinstance(x, rpki.left_right.repository_elt))
- parent_pdus = dict((x.parent_handle, x) for x in rpkid_reply if isinstance(x, rpki.left_right.parent_elt))
- child_pdus = dict((x.child_handle, x) for x in rpkid_reply if isinstance(x, rpki.left_right.child_elt))
-
- rpkid_query = []
-
- self_cert, created = rpki.irdb.HostedCA.objects.get_or_certify(
- issuer = self.server_ca,
- hosted = ca)
-
- # There should be exactly one <self/> object per hosted entity, by definition
-
- if (isinstance(self_pdu, rpki.left_right.report_error_elt) or
- self_pdu.crl_interval != self_crl_interval or
- self_pdu.regen_margin != self_regen_margin or
- self_pdu.bpki_cert != self_cert.certificate):
- rpkid_query.append(rpki.left_right.self_elt.make_pdu(
- action = "create" if isinstance(self_pdu, rpki.left_right.report_error_elt) else "set",
- tag = "self",
- self_handle = ca.handle,
- bpki_cert = ca.certificate,
- crl_interval = self_crl_interval,
- regen_margin = self_regen_margin))
-
- # In general we only need one <bsc/> per <self/>. BSC objects
- # are a little unusual in that the keypair and PKCS #10
- # subelement is generated by rpkid, so complete setup requires
- # two round trips.
-
- bsc_pdu = bsc_pdus.pop(bsc_handle, None)
-
- if bsc_pdu is None:
- rpkid_query.append(rpki.left_right.bsc_elt.make_pdu(
- action = "create",
- tag = "bsc",
- self_handle = ca.handle,
- bsc_handle = bsc_handle,
- generate_keypair = "yes"))
-
- elif bsc_pdu.pkcs10_request is None:
- rpkid_query.append(rpki.left_right.bsc_elt.make_pdu(
- action = "set",
- tag = "bsc",
- self_handle = ca.handle,
- bsc_handle = bsc_handle,
- generate_keypair = "yes"))
-
- rpkid_query.extend(rpki.left_right.bsc_elt.make_pdu(
- action = "destroy", self_handle = ca.handle, bsc_handle = b) for b in bsc_pdus)
-
- # If we've already got actions queued up, run them now, so we
- # can finish setting up the BSC before anything tries to use it.
-
- if rpkid_query:
- rpkid_query.append(rpki.left_right.bsc_elt.make_pdu(action = "list", tag = "bsc", self_handle = ca.handle))
- rpkid_reply = self.call_rpkid(rpkid_query)
- bsc_pdus = dict((x.bsc_handle, x)
- for x in rpkid_reply
- if isinstance(x, rpki.left_right.bsc_elt) and x.action == "list")
- bsc_pdu = bsc_pdus.pop(bsc_handle, None)
- self.check_error_report(rpkid_reply)
-
- rpkid_query = []
-
- assert bsc_pdu.pkcs10_request is not None
-
- bsc, created = rpki.irdb.BSC.objects.get_or_certify(
- issuer = ca,
- handle = bsc_handle,
- pkcs10 = bsc_pdu.pkcs10_request)
-
- if bsc_pdu.signing_cert != bsc.certificate or bsc_pdu.signing_cert_crl != ca.latest_crl:
- rpkid_query.append(rpki.left_right.bsc_elt.make_pdu(
- action = "set",
- tag = "bsc",
- self_handle = ca.handle,
- bsc_handle = bsc_handle,
- signing_cert = bsc.certificate,
- signing_cert_crl = ca.latest_crl))
-
- # At present we need one <repository/> per <parent/>, not because
- # rpkid requires that, but because pubd does. pubd probably should
- # be fixed to support a single client allowed to update multiple
- # trees, but for the moment the easiest way forward is just to
- # enforce a 1:1 mapping between <parent/> and <repository/> objects
-
- for repository in ca.repositories.all():
-
- repository_pdu = repository_pdus.pop(repository.handle, None)
-
- if (repository_pdu is None or
- repository_pdu.bsc_handle != bsc_handle or
- repository_pdu.peer_contact_uri != repository.service_uri or
- repository_pdu.bpki_cert != repository.certificate):
- rpkid_query.append(rpki.left_right.repository_elt.make_pdu(
- action = "create" if repository_pdu is None else "set",
- tag = repository.handle,
- self_handle = ca.handle,
- repository_handle = repository.handle,
- bsc_handle = bsc_handle,
- peer_contact_uri = repository.service_uri,
- bpki_cert = repository.certificate))
-
- rpkid_query.extend(rpki.left_right.repository_elt.make_pdu(
- action = "destroy", self_handle = ca.handle, repository_handle = r) for r in repository_pdus)
-
- # <parent/> setup code currently assumes 1:1 mapping between
- # <repository/> and <parent/>, and further assumes that the handles
- # for an associated pair are the identical (that is:
- # parent.repository_handle == parent.parent_handle).
- #
- # If no such repository exists, our choices are to ignore the
- # parent entry or throw an error. For now, we ignore the parent.
-
- for parent in ca.parents.all():
-
- try:
-
- parent_pdu = parent_pdus.pop(parent.handle, None)
-
- if (parent_pdu is None or
- parent_pdu.bsc_handle != bsc_handle or
- parent_pdu.repository_handle != parent.handle or
- parent_pdu.peer_contact_uri != parent.service_uri or
- parent_pdu.sia_base != parent.repository.sia_base or
- parent_pdu.sender_name != parent.child_handle or
- parent_pdu.recipient_name != parent.parent_handle or
- parent_pdu.bpki_cms_cert != parent.certificate):
- rpkid_query.append(rpki.left_right.parent_elt.make_pdu(
- action = "create" if parent_pdu is None else "set",
- tag = parent.handle,
- self_handle = ca.handle,
- parent_handle = parent.handle,
- bsc_handle = bsc_handle,
- repository_handle = parent.handle,
- peer_contact_uri = parent.service_uri,
- sia_base = parent.repository.sia_base,
- sender_name = parent.child_handle,
- recipient_name = parent.parent_handle,
- bpki_cms_cert = parent.certificate))
-
- except rpki.irdb.Repository.DoesNotExist:
- pass
-
- try:
-
- parent_pdu = parent_pdus.pop(ca.handle, None)
-
- if (parent_pdu is None or
- parent_pdu.bsc_handle != bsc_handle or
- parent_pdu.repository_handle != ca.handle or
- parent_pdu.peer_contact_uri != ca.rootd.service_uri or
- parent_pdu.sia_base != ca.rootd.repository.sia_base or
- parent_pdu.sender_name != ca.handle or
- parent_pdu.recipient_name != ca.handle or
- parent_pdu.bpki_cms_cert != ca.rootd.certificate):
- rpkid_query.append(rpki.left_right.parent_elt.make_pdu(
- action = "create" if parent_pdu is None else "set",
- tag = ca.handle,
- self_handle = ca.handle,
- parent_handle = ca.handle,
- bsc_handle = bsc_handle,
- repository_handle = ca.handle,
- peer_contact_uri = ca.rootd.service_uri,
- sia_base = ca.rootd.repository.sia_base,
- sender_name = ca.handle,
- recipient_name = ca.handle,
- bpki_cms_cert = ca.rootd.certificate))
-
- except rpki.irdb.Rootd.DoesNotExist:
- pass
-
- rpkid_query.extend(rpki.left_right.parent_elt.make_pdu(
- action = "destroy", self_handle = ca.handle, parent_handle = p) for p in parent_pdus)
-
- # Children are simpler than parents, because they call us, so no URL
- # to construct and figuring out what certificate to use is their
- # problem, not ours.
-
- for child in ca.children.all():
-
- child_pdu = child_pdus.pop(child.handle, None)
-
- if (child_pdu is None or
- child_pdu.bsc_handle != bsc_handle or
- child_pdu.bpki_cert != child.certificate):
- rpkid_query.append(rpki.left_right.child_elt.make_pdu(
- action = "create" if child_pdu is None else "set",
- tag = child.handle,
- self_handle = ca.handle,
- child_handle = child.handle,
- bsc_handle = bsc_handle,
- bpki_cert = child.certificate))
-
- rpkid_query.extend(rpki.left_right.child_elt.make_pdu(
- action = "destroy", self_handle = ca.handle, child_handle = c) for c in child_pdus)
-
- # If caller wants us to poke rpkid, add that to the very end of the message
-
- if poke:
- rpkid_query.append(rpki.left_right.self_elt.make_pdu(
- action = "set", self_handle = ca.handle, run_now = "yes"))
-
- # If we changed anything, ship updates off to rpkid
-
- if rpkid_query:
- rpkid_reply = self.call_rpkid(rpkid_query)
- bsc_pdus = dict((x.bsc_handle, x) for x in rpkid_reply if isinstance(x, rpki.left_right.bsc_elt))
- if bsc_handle in bsc_pdus and bsc_pdus[bsc_handle].pkcs10_request:
- bsc_req = bsc_pdus[bsc_handle].pkcs10_request
- self.check_error_report(rpkid_reply)
-
-
- def synchronize_pubd_core(self):
- """
- Configure pubd with data built up by the other commands in this
- program. This is the core synchronization code. Don't call this
- directly, instead call a methods that calls this inside a Django
- commit wrapper.
-
- This method configures pubd with data built up by the other
- commands in this program. Commands which modify IRDB fields
- related to pubd should call this when they're done.
- """
+ def revoke(self):
+ """
+ Poke rpkid to revoke old RPKI keys for the current handle.
+ """
- # If we're not running pubd, the rest of this is a waste of time
+ return self._rpkid_tenant_control("revoke")
- if not self.run_pubd:
- return
- # Make sure that pubd's BPKI CRL is up to date.
+ def revoke_forgotten(self):
+ """
+ Poke rpkid to revoke old forgotten RPKI keys for the current handle.
+ """
+
+ return self._rpkid_tenant_control("revoke_forgotten")
- self.call_pubd(rpki.publication.config_elt.make_pdu(
- action = "set",
- bpki_crl = self.server_ca.latest_crl))
- # See what pubd already has on file
+ def clear_all_sql_cms_replay_protection(self):
+ """
+ Tell rpkid and pubd to clear replay protection for all SQL-based
+ entities. This is a fairly blunt instrument, but as we don't
+ expect this to be necessary except in the case of gross
+ misconfiguration, it should suffice.
+ """
- pubd_reply = self.call_pubd(rpki.publication.client_elt.make_pdu(action = "list"))
- client_pdus = dict((x.client_handle, x) for x in pubd_reply if isinstance(x, rpki.publication.client_elt))
- pubd_query = []
+ if self.run_rpkid:
+ q_msg = self.compose_left_right_query()
+ for ca in rpki.irdb.models.ResourceHolderCA.objects.all():
+ SubElement(q_msg, rpki.left_right.tag_tenant, action = "set",
+ tenant_handle = ca.handle, clear_replay_protection = "yes")
+ self.call_rpkid(q_msg)
- # Check all clients
+ if self.run_pubd:
+ q_msg = self._compose_publication_control_query()
+ for client in self.server_ca.clients.all():
+ SubElement(q_msg, rpki.publication_control.tag_client, action = "set",
+ client_handle = client.handle, clear_replay_protection = "yes")
+ self.call_pubd(q_msg)
- for client in self.server_ca.clients.all():
- client_pdu = client_pdus.pop(client.handle, None)
+ def call_pubd(self, q_msg):
+ """
+ Issue a call to pubd, return result.
+ """
- if (client_pdu is None or
- client_pdu.base_uri != client.sia_base or
- client_pdu.bpki_cert != client.certificate):
- pubd_query.append(rpki.publication.client_elt.make_pdu(
- action = "create" if client_pdu is None else "set",
- client_handle = client.handle,
- bpki_cert = client.certificate,
- base_uri = client.sia_base))
+ url = "http://%s:%s/control" % (
+ self.cfg.get("pubd_server_host", section = myrpki_section),
+ self.cfg.get("pubd_server_port", section = myrpki_section))
- # Delete any unknown clients
+ pubd = self.server_ca.ee_certificates.get(purpose = "pubd")
+ irbe = self.server_ca.ee_certificates.get(purpose = "irbe")
- pubd_query.extend(rpki.publication.client_elt.make_pdu(
- action = "destroy", client_handle = p) for p in client_pdus)
+ r_msg = rpki.http_simple.client(
+ proto_cms_msg = rpki.publication_control.cms_msg,
+ client_key = irbe.private_key,
+ client_cert = irbe.certificate,
+ server_ta = self.server_ca.certificate,
+ server_cert = pubd.certificate,
+ url = url,
+ q_msg = q_msg,
+ debug = self.show_xml)
- # If we changed anything, ship updates off to pubd
+ self.check_error_report(r_msg)
+ return r_msg
- if pubd_query:
- pubd_reply = self.call_pubd(pubd_query)
- self.check_error_report(pubd_reply)
+ def check_error_report(self, r_msg):
+ """
+ Check a response from rpkid or pubd for error_report PDUs, log and
+ throw exceptions as needed.
+ """
- def synchronize_rpkid_deleted_core(self):
- """
- Remove any <self/> objects present in rpkid's database but not
- present in the IRDB. This is the core synchronization code.
- Don't call this directly, instead call a methods that calls this
- inside a Django commit wrapper.
- """
+ failed = False
+ for r_pdu in r_msg.getiterator(rpki.left_right.tag_report_error):
+ failed = True
+ self.log("rpkid reported failure: %s" % r_pdu.get("error_code"))
+ if r_pdu.text:
+ self.log(r_pdu.text)
+ for r_pdu in r_msg.getiterator(rpki.publication_control.tag_report_error):
+ failed = True
+ self.log("pubd reported failure: %s" % r_pdu.get("error_code"))
+ if r_pdu.text:
+ self.log(r_pdu.text)
+ if failed:
+ raise CouldntTalkToDaemon
+
+
+ @django.db.transaction.atomic
+ def synchronize(self, *handles_to_poke):
+ """
+ Configure RPKI daemons with the data built up by the other
+ commands in this program. Commands which modify the IRDB and want
+ to whack everything into sync should call this when they're done,
+ but be warned that this can be slow with a lot of CAs.
- rpkid_reply = self.call_rpkid(rpki.left_right.self_elt.make_pdu(action = "list"))
- self.check_error_report(rpkid_reply)
+ Any arguments given are handles of CAs which should be poked with a
+ <tenant run_now="yes"/> operation.
+ """
+
+ for ca in rpki.irdb.models.ResourceHolderCA.objects.all():
+ self.synchronize_rpkid_one_ca_core(ca, ca.handle in handles_to_poke)
+ self.synchronize_pubd_core()
+ self.synchronize_rpkid_deleted_core()
+
+
+ @django.db.transaction.atomic
+ def synchronize_ca(self, ca = None, poke = False):
+ """
+ Synchronize one CA. Most commands which modify a CA should call
+ this. CA to synchronize defaults to the current resource CA.
+ """
+
+ if ca is None:
+ ca = self.resource_ca
+ self.synchronize_rpkid_one_ca_core(ca, poke)
+
+
+ @django.db.transaction.atomic
+ def synchronize_deleted_ca(self):
+ """
+ Delete CAs which are present in rpkid's database but not in the
+ IRDB.
+ """
+
+ self.synchronize_rpkid_deleted_core()
+
+
+ @django.db.transaction.atomic
+ def synchronize_pubd(self):
+ """
+ Synchronize pubd. Most commands which modify pubd should call this.
+ """
+
+ self.synchronize_pubd_core()
+
+
+ def synchronize_rpkid_one_ca_core(self, ca, poke = False):
+ """
+ Synchronize one CA. This is the core synchronization code. Don't
+ call this directly, instead call one of the methods that calls
+ this inside a Django commit wrapper.
+
+ This method configures rpkid with data built up by the other
+ commands in this program. Most commands which modify IRDB values
+ related to rpkid should call this when they're done.
+
+ If poke is True, we append a left-right run_now operation for this
+ CA to the end of whatever other commands this method generates.
+ """
+
+ # pylint: disable=C0330
+
+ # We can use a single BSC for everything -- except BSC key
+ # rollovers. Drive off that bridge when we get to it.
+
+ bsc_handle = "bsc"
+
+ # A default RPKI CRL cycle time of six hours seems sane. One
+ # might make a case for a day instead, but we've been running with
+ # six hours for a while now and haven't seen a lot of whining.
+
+ tenant_crl_interval = self.cfg.getint("tenant_crl_interval",
+ 6 * 60 * 60,
+ section = myrpki_section)
+
+ # regen_margin now just controls how long before RPKI certificate
+ # expiration we should regenerate; it used to control the interval
+ # before RPKI CRL staleness at which to regenerate the CRL, but
+ # using the same timer value for both of these is hopeless.
+ #
+ # A default regeneration margin of two weeks gives enough time for
+ # humans to react. We add a two hour fudge factor in the hope
+ # that this will regenerate certificates just *before* the
+ # companion cron job warns of impending doom.
+
+ tenant_regen_margin = self.cfg.getint("tenant_regen_margin",
+ 14 * 24 * 60 * 60 + 2 * 60,
+ section = myrpki_section)
+
+ # See what rpkid already has on file for this entity.
+
+ q_msg = self.compose_left_right_query()
+ SubElement(q_msg, rpki.left_right.tag_tenant, action = "get", tenant_handle = ca.handle)
+ SubElement(q_msg, rpki.left_right.tag_bsc, action = "list", tenant_handle = ca.handle)
+ SubElement(q_msg, rpki.left_right.tag_repository, action = "list", tenant_handle = ca.handle)
+ SubElement(q_msg, rpki.left_right.tag_parent, action = "list", tenant_handle = ca.handle)
+ SubElement(q_msg, rpki.left_right.tag_child, action = "list", tenant_handle = ca.handle)
+
+ r_msg = self.call_rpkid(q_msg, suppress_error_check = True)
+
+ self.check_error_report(r_msg)
+
+ tenant_pdu = r_msg.find(rpki.left_right.tag_tenant)
+
+ bsc_pdus = dict((r_pdu.get("bsc_handle"), r_pdu)
+ for r_pdu in r_msg.getiterator(rpki.left_right.tag_bsc))
+ repository_pdus = dict((r_pdu.get("repository_handle"), r_pdu)
+ for r_pdu in r_msg.getiterator(rpki.left_right.tag_repository))
+ parent_pdus = dict((r_pdu.get("parent_handle"), r_pdu)
+ for r_pdu in r_msg.getiterator(rpki.left_right.tag_parent))
+ child_pdus = dict((r_pdu.get("child_handle"), r_pdu)
+ for r_pdu in r_msg.getiterator(rpki.left_right.tag_child))
+
+ q_msg = self.compose_left_right_query()
+
+ tenant_cert = rpki.irdb.models.HostedCA.objects.get_or_certify(
+ issuer = self.server_ca,
+ hosted = ca)[0]
+
+ # There should be exactly one <tenant/> object per hosted entity, by definition
+
+ if (tenant_pdu is None or
+ tenant_pdu.get("crl_interval") != str(tenant_crl_interval) or
+ tenant_pdu.get("regen_margin") != str(tenant_regen_margin) or
+ tenant_pdu.findtext(rpki.left_right.tag_bpki_cert,
+ "").decode("base64") != tenant_cert.certificate.get_DER()):
+ q_pdu = SubElement(q_msg, rpki.left_right.tag_tenant,
+ action = "create" if tenant_pdu is None else "set",
+ tag = "tenant",
+ tenant_handle = ca.handle,
+ crl_interval = str(tenant_crl_interval),
+ regen_margin = str(tenant_regen_margin))
+ SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = ca.certificate.get_Base64()
+
+ # In general we only need one <bsc/> per <tenant/>. BSC objects
+ # are a little unusual in that the keypair and PKCS #10
+ # subelement are generated by rpkid, so complete setup requires
+ # two round trips.
+
+ bsc_pdu = bsc_pdus.pop(bsc_handle, None)
+
+ if bsc_pdu is None or bsc_pdu.find(rpki.left_right.tag_pkcs10_request) is None:
+ SubElement(q_msg, rpki.left_right.tag_bsc,
+ action = "create" if bsc_pdu is None else "set",
+ tag = "bsc",
+ tenant_handle = ca.handle,
+ bsc_handle = bsc_handle,
+ generate_keypair = "yes")
+
+ for bsc_handle in bsc_pdus:
+ SubElement(q_msg, rpki.left_right.tag_bsc,
+ action = "destroy", tenant_handle = ca.handle, bsc_handle = bsc_handle)
+
+ # If we've already got actions queued up, run them now, so we
+ # can finish setting up the BSC before anything tries to use it.
+
+ if len(q_msg) > 0:
+ SubElement(q_msg, rpki.left_right.tag_bsc,
+ action = "list", tag = "bsc", tenant_handle = ca.handle)
+ r_msg = self.call_rpkid(q_msg)
+ bsc_pdus = dict((r_pdu.get("bsc_handle"), r_pdu)
+ for r_pdu in r_msg.getiterator(rpki.left_right.tag_bsc)
+ if r_pdu.get("action") == "list")
+ bsc_pdu = bsc_pdus.pop(bsc_handle, None)
+
+ q_msg = self.compose_left_right_query()
+
+ bsc_pkcs10 = bsc_pdu.find(rpki.left_right.tag_pkcs10_request)
+ assert bsc_pkcs10 is not None
+
+ bsc = rpki.irdb.models.BSC.objects.get_or_certify(
+ issuer = ca,
+ handle = bsc_handle,
+ pkcs10 = rpki.x509.PKCS10(Base64 = bsc_pkcs10.text))[0]
+
+ if (bsc_pdu.findtext(rpki.left_right.tag_signing_cert,
+ "").decode("base64") != bsc.certificate.get_DER() or
+ bsc_pdu.findtext(rpki.left_right.tag_signing_cert_crl,
+ "").decode("base64") != ca.latest_crl.get_DER()):
+ q_pdu = SubElement(q_msg, rpki.left_right.tag_bsc,
+ action = "set",
+ tag = "bsc",
+ tenant_handle = ca.handle,
+ bsc_handle = bsc_handle)
+ SubElement(q_pdu, rpki.left_right.tag_signing_cert).text = bsc.certificate.get_Base64()
+ SubElement(q_pdu, rpki.left_right.tag_signing_cert_crl).text = ca.latest_crl.get_Base64()
+
+ # At present we need one <repository/> per <parent/>, not because
+ # rpkid requires that, but because pubd does. pubd probably should
+ # be fixed to support a single client allowed to update multiple
+ # trees, but for the moment the easiest way forward is just to
+ # enforce a 1:1 mapping between <parent/> and <repository/> objects
+
+ for repository in ca.repositories.all():
+
+ repository_pdu = repository_pdus.pop(repository.handle, None)
+
+ if (repository_pdu is None or
+ repository_pdu.get("bsc_handle") != bsc_handle or
+ repository_pdu.get("peer_contact_uri") != repository.service_uri or
+ repository_pdu.get("rrdp_notification_uri") != repository.rrdp_notification_uri or
+ repository_pdu.findtext(rpki.left_right.tag_bpki_cert,
+ "").decode("base64") != repository.certificate.get_DER()):
+ q_pdu = SubElement(q_msg, rpki.left_right.tag_repository,
+ action = "create" if repository_pdu is None else "set",
+ tag = repository.handle,
+ tenant_handle = ca.handle,
+ repository_handle = repository.handle,
+ bsc_handle = bsc_handle,
+ peer_contact_uri = repository.service_uri)
+ if repository.rrdp_notification_uri:
+ q_pdu.set("rrdp_notification_uri", repository.rrdp_notification_uri)
+ SubElement(q_pdu,
+ rpki.left_right.tag_bpki_cert).text = repository.certificate.get_Base64()
+
+ for repository_handle in repository_pdus:
+ SubElement(q_msg, rpki.left_right.tag_repository, action = "destroy",
+ tenant_handle = ca.handle, repository_handle = repository_handle)
+
+ # <parent/> setup code currently assumes 1:1 mapping between
+ # <repository/> and <parent/>, and further assumes that the handles
+ # for an associated pair are the identical (that is:
+ # parent.repository_handle == parent.parent_handle).
+ #
+ # If no such repository exists, our choices are to ignore the
+ # parent entry or throw an error. For now, we ignore the parent.
+
+ for parent in ca.parents.all():
+
+ try:
+ parent_pdu = parent_pdus.pop(parent.handle, None)
+
+ if (parent_pdu is None or
+ parent_pdu.get("bsc_handle") != bsc_handle or
+ parent_pdu.get("repository_handle") != parent.handle or
+ parent_pdu.get("peer_contact_uri") != parent.service_uri or
+ parent_pdu.get("sia_base") != parent.repository.sia_base or
+ parent_pdu.get("sender_name") != parent.child_handle or
+ parent_pdu.get("recipient_name") != parent.parent_handle or
+ parent_pdu.get("root_asn_resources", "") != parent.asn_resources or
+ parent_pdu.get("root_ipv4_resources", "") != parent.ipv4_resources or
+ parent_pdu.get("root_ipv6_resources", "") != parent.ipv6_resources or
+ parent_pdu.findtext(rpki.left_right.tag_bpki_cert,
+ "").decode("base64") != parent.certificate.get_DER()):
+ q_pdu = SubElement(q_msg, rpki.left_right.tag_parent,
+ action = "create" if parent_pdu is None else "set",
+ tag = parent.handle,
+ tenant_handle = ca.handle,
+ parent_handle = parent.handle,
+ bsc_handle = bsc_handle,
+ repository_handle = parent.handle,
+ peer_contact_uri = parent.service_uri,
+ sia_base = parent.repository.sia_base,
+ sender_name = parent.child_handle,
+ recipient_name = parent.parent_handle,
+ root_asn_resources = parent.asn_resources,
+ root_ipv4_resources = parent.ipv4_resources,
+ root_ipv6_resources = parent.ipv6_resources)
+ SubElement(q_pdu,
+ rpki.left_right.tag_bpki_cert).text = parent.certificate.get_Base64()
+
+ except rpki.irdb.models.Repository.DoesNotExist:
+ pass
+
+ for parent_handle in parent_pdus:
+ SubElement(q_msg, rpki.left_right.tag_parent, action = "destroy",
+ tenant_handle = ca.handle, parent_handle = parent_handle)
+
+ # Children are simpler than parents, because they call us, so no URL
+ # to construct and figuring out what certificate to use is their
+ # problem, not ours.
+
+ for child in ca.children.all():
+
+ child_pdu = child_pdus.pop(child.handle, None)
+
+ if (child_pdu is None or
+ child_pdu.get("bsc_handle") != bsc_handle or
+ child_pdu.findtext(rpki.left_right.tag_bpki_cert).decode("base64") != child.certificate.get_DER()):
+ q_pdu = SubElement(q_msg, rpki.left_right.tag_child,
+ action = "create" if child_pdu is None else "set",
+ tag = child.handle,
+ tenant_handle = ca.handle,
+ child_handle = child.handle,
+ bsc_handle = bsc_handle)
+ SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = child.certificate.get_Base64()
+
+ for child_handle in child_pdus:
+ SubElement(q_msg, rpki.left_right.tag_child, action = "destroy",
+ tenant_handle = ca.handle, child_handle = child_handle)
+
+ # If caller wants us to poke rpkid, add that to the very end of the message
+
+ if poke:
+ SubElement(q_msg, rpki.left_right.tag_tenant, action = "set", tenant_handle = ca.handle, run_now = "yes")
+
+ # If we changed anything, ship updates off to rpkid.
+
+ if len(q_msg) > 0:
+ self.call_rpkid(q_msg)
+
+
+ def synchronize_pubd_core(self):
+ """
+ Configure pubd with data built up by the other commands in this
+ program. This is the core synchronization code. Don't call this
+ directly, instead call a methods that calls this inside a Django
+ commit wrapper.
+
+ This method configures pubd with data built up by the other
+ commands in this program. Commands which modify IRDB fields
+ related to pubd should call this when they're done.
+ """
+
+ # pylint: disable=C0330
+
+ # If we're not running pubd, the rest of this is a waste of time
+
+ if not self.run_pubd:
+ return
+
+ # See what pubd already has on file
+
+ q_msg = self._compose_publication_control_query()
+ SubElement(q_msg, rpki.publication_control.tag_client, action = "list")
+ r_msg = self.call_pubd(q_msg)
+ client_pdus = dict((r_pdu.get("client_handle"), r_pdu)
+ for r_pdu in r_msg)
+
+ # Check all clients
+
+ q_msg = self._compose_publication_control_query()
+
+ for client in self.server_ca.clients.all():
+
+ client_pdu = client_pdus.pop(client.handle, None)
+
+ if (client_pdu is None or
+ client_pdu.get("base_uri") != client.sia_base or
+ client_pdu.findtext(rpki.publication_control.tag_bpki_cert, "").decode("base64") != client.certificate.get_DER()):
+ q_pdu = SubElement(q_msg, rpki.publication_control.tag_client,
+ action = "create" if client_pdu is None else "set",
+ client_handle = client.handle,
+ base_uri = client.sia_base)
+ SubElement(q_pdu, rpki.publication_control.tag_bpki_cert).text = client.certificate.get_Base64()
+
+ # Delete any unknown clients
+
+ for client_handle in client_pdus:
+ SubElement(q_msg, rpki.publication_control.tag_client, action = "destroy", client_handle = client_handle)
- self_handles = set(s.self_handle for s in rpkid_reply)
- ca_handles = set(ca.handle for ca in rpki.irdb.ResourceHolderCA.objects.all())
- assert ca_handles <= self_handles
+ # If we changed anything, ship updates off to pubd
- rpkid_query = [rpki.left_right.self_elt.make_pdu(action = "destroy", self_handle = handle)
- for handle in (self_handles - ca_handles)]
+ if len(q_msg) > 0:
+ self.call_pubd(q_msg)
- if rpkid_query:
- rpkid_reply = self.call_rpkid(rpkid_query)
- self.check_error_report(rpkid_reply)
+ def synchronize_rpkid_deleted_core(self):
+ """
+ Remove any <tenant/> objects present in rpkid's database but not
+ present in the IRDB. This is the core synchronization code.
+ Don't call this directly, instead call a methods that calls this
+ inside a Django commit wrapper.
+ """
- @django.db.transaction.commit_on_success
- def add_ee_certificate_request(self, pkcs10, resources):
- """
- Check a PKCS #10 request to see if it complies with the
- specification for a RPKI EE certificate; if it does, add an
- EECertificateRequest for it to the IRDB.
+ q_msg = self.compose_left_right_query()
+ SubElement(q_msg, rpki.left_right.tag_tenant, action = "list")
+ r_msg = self.call_rpkid(q_msg)
- Not yet sure what we want for update and delete semantics here, so
- for the moment this is straight addition. See methods like
- .load_asns() and .load_prefixes() for other strategies.
- """
+ tenant_handles = set(s.get("tenant_handle") for s in r_msg)
+ ca_handles = set(ca.handle for ca in rpki.irdb.models.ResourceHolderCA.objects.all())
+ assert ca_handles <= tenant_handles
- pkcs10.check_valid_request_ee()
- ee_request = self.resource_ca.ee_certificate_requests.create(
- pkcs10 = pkcs10,
- gski = pkcs10.gSKI(),
- valid_until = resources.valid_until)
- for r in resources.asn:
- ee_request.asns.create(start_as = str(r.min), end_as = str(r.max))
- for r in resources.v4:
- ee_request.address_ranges.create(start_ip = str(r.min), end_ip = str(r.max), version = 4)
- for r in resources.v6:
- ee_request.address_ranges.create(start_ip = str(r.min), end_ip = str(r.max), version = 6)
-
-
- @django.db.transaction.commit_on_success
- def add_router_certificate_request(self, router_certificate_request_xml, valid_until = None):
- """
- Read XML file containing one or more router certificate requests,
- attempt to add request(s) to IRDB.
+ q_msg = self.compose_left_right_query()
+ for handle in (tenant_handles - ca_handles):
+ SubElement(q_msg, rpki.left_right.tag_tenant, action = "destroy", tenant_handle = handle)
- Check each PKCS #10 request to see if it complies with the
- specification for a router certificate; if it does, create an EE
- certificate request for it along with the ASN resources and
- router-ID supplied in the XML.
- """
+ if len(q_msg) > 0:
+ self.call_rpkid(q_msg)
- xml = ElementTree(file = router_certificate_request_xml).getroot()
- rpki.relaxng.router_certificate.assertValid(xml)
- for req in xml.getiterator(routercert_xmlns + "router_certificate_request"):
+ @django.db.transaction.atomic
+ def add_ee_certificate_request(self, pkcs10, resources):
+ """
+ Check a PKCS #10 request to see if it complies with the
+ specification for a RPKI EE certificate; if it does, add an
+ EECertificateRequest for it to the IRDB.
- pkcs10 = rpki.x509.PKCS10(Base64 = req.text)
- router_id = long(req.get("router_id"))
- asns = rpki.resource_set.resource_set_as(req.get("asn"))
- if not valid_until:
- valid_until = req.get("valid_until")
+ Not yet sure what we want for update and delete semantics here, so
+ for the moment this is straight addition. See methods like
+ .load_asns() and .load_prefixes() for other strategies.
+ """
- if valid_until and isinstance(valid_until, (str, unicode)):
- valid_until = rpki.sundial.datetime.fromXMLtime(valid_until)
+ pkcs10.check_valid_request_ee()
+ ee_request = self.resource_ca.ee_certificate_requests.create(
+ pkcs10 = pkcs10,
+ gski = pkcs10.gSKI(),
+ valid_until = resources.valid_until)
+ for r in resources.asn:
+ ee_request.asns.create(start_as = str(r.min), end_as = str(r.max))
+ for r in resources.v4:
+ ee_request.address_ranges.create(start_ip = str(r.min), end_ip = str(r.max), version = 4)
+ for r in resources.v6:
+ ee_request.address_ranges.create(start_ip = str(r.min), end_ip = str(r.max), version = 6)
- if not valid_until:
- valid_until = rpki.sundial.now() + rpki.sundial.timedelta(days = 365)
- elif valid_until < rpki.sundial.now():
- raise PastExpiration("Specified expiration date %s has already passed" % valid_until)
- pkcs10.check_valid_request_router()
+ @django.db.transaction.atomic
+ def add_router_certificate_request(self, xml_file, valid_until = None):
+ """
+ Read XML file containing one or more router certificate requests,
+ attempt to add request(s) to IRDB.
- cn = "ROUTER-%08x" % asns[0].min
- sn = "%08x" % router_id
+ Check each PKCS #10 request to see if it complies with the
+ specification for a router certificate; if it does, create an EE
+ certificate request for it along with the ASN resources and
+ router-ID supplied in the XML.
+ """
- ee_request = self.resource_ca.ee_certificate_requests.create(
- pkcs10 = pkcs10,
- gski = pkcs10.gSKI(),
- valid_until = valid_until,
- cn = cn,
- sn = sn,
- eku = rpki.oids.id_kp_bgpsec_router)
+ x = etree_read(xml_file, schema = rpki.relaxng.router_certificate)
- for r in asns:
- ee_request.asns.create(start_as = str(r.min), end_as = str(r.max))
+ for x in x.getiterator(tag_router_certificate_request):
+ pkcs10 = rpki.x509.PKCS10(Base64 = x.text)
+ router_id = long(x.get("router_id"))
+ asns = rpki.resource_set.resource_set_as(x.get("asn"))
+ if not valid_until:
+ valid_until = x.get("valid_until")
- @django.db.transaction.commit_on_success
- def delete_router_certificate_request(self, gski):
- """
- Delete a router certificate request from this RPKI entity.
- """
+ if valid_until and isinstance(valid_until, (str, unicode)):
+ valid_until = rpki.sundial.datetime.fromXMLtime(valid_until)
- self.resource_ca.ee_certificate_requests.get(gski = gski).delete()
+ if not valid_until:
+ valid_until = rpki.sundial.now() + rpki.sundial.timedelta(days = 365)
+ elif valid_until < rpki.sundial.now():
+ raise PastExpiration("Specified expiration date %s has already passed" % valid_until)
+
+ pkcs10.check_valid_request_router()
+
+ cn = "ROUTER-%08x" % asns[0].min
+ sn = "%08x" % router_id
+
+ ee_request = self.resource_ca.ee_certificate_requests.create(
+ pkcs10 = pkcs10,
+ gski = pkcs10.gSKI(),
+ valid_until = valid_until,
+ cn = cn,
+ sn = sn,
+ eku = rpki.oids.id_kp_bgpsec_router)
+
+ for r in asns:
+ ee_request.asns.create(start_as = str(r.min), end_as = str(r.max))
+
+
+ @django.db.transaction.atomic
+ def delete_router_certificate_request(self, gski):
+ """
+ Delete a router certificate request from this RPKI entity.
+ """
+
+ self.resource_ca.ee_certificate_requests.get(gski = gski).delete()
diff --git a/rpki/irdbd.py b/rpki/irdbd.py
index ae08b6fb..98fe83ea 100644
--- a/rpki/irdbd.py
+++ b/rpki/irdbd.py
@@ -25,8 +25,7 @@ import os
import time
import logging
import argparse
-import urlparse
-import rpki.http
+import rpki.http_simple
import rpki.config
import rpki.resource_set
import rpki.relaxng
@@ -36,241 +35,214 @@ import rpki.log
import rpki.x509
import rpki.daemonize
+from lxml.etree import Element, SubElement, tostring as ElementToString
+
logger = logging.getLogger(__name__)
class main(object):
- def handle_list_resources(self, q_pdu, r_msg):
- child = rpki.irdb.Child.objects.get(
- issuer__handle__exact = q_pdu.self_handle,
- handle = q_pdu.child_handle)
- resources = child.resource_bag
- r_pdu = rpki.left_right.list_resources_elt()
- r_pdu.tag = q_pdu.tag
- r_pdu.self_handle = q_pdu.self_handle
- r_pdu.child_handle = q_pdu.child_handle
- r_pdu.valid_until = child.valid_until.strftime("%Y-%m-%dT%H:%M:%SZ")
- r_pdu.asn = resources.asn
- r_pdu.ipv4 = resources.v4
- r_pdu.ipv6 = resources.v6
- r_msg.append(r_pdu)
-
- def handle_list_roa_requests(self, q_pdu, r_msg):
- for request in rpki.irdb.ROARequest.objects.raw("""
- SELECT irdb_roarequest.*
- FROM irdb_roarequest, irdb_resourceholderca
- WHERE irdb_roarequest.issuer_id = irdb_resourceholderca.id
- AND irdb_resourceholderca.handle = %s
- """, [q_pdu.self_handle]):
- prefix_bag = request.roa_prefix_bag
- r_pdu = rpki.left_right.list_roa_requests_elt()
- r_pdu.tag = q_pdu.tag
- r_pdu.self_handle = q_pdu.self_handle
- r_pdu.asn = request.asn
- r_pdu.ipv4 = prefix_bag.v4
- r_pdu.ipv6 = prefix_bag.v6
- r_msg.append(r_pdu)
-
- def handle_list_ghostbuster_requests(self, q_pdu, r_msg):
- ghostbusters = rpki.irdb.GhostbusterRequest.objects.filter(
- issuer__handle__exact = q_pdu.self_handle,
- parent__handle__exact = q_pdu.parent_handle)
- if ghostbusters.count() == 0:
- ghostbusters = rpki.irdb.GhostbusterRequest.objects.filter(
- issuer__handle__exact = q_pdu.self_handle,
- parent = None)
- for ghostbuster in ghostbusters:
- r_pdu = rpki.left_right.list_ghostbuster_requests_elt()
- r_pdu.tag = q_pdu.tag
- r_pdu.self_handle = q_pdu.self_handle
- r_pdu.parent_handle = q_pdu.parent_handle
- r_pdu.vcard = ghostbuster.vcard
- r_msg.append(r_pdu)
-
- def handle_list_ee_certificate_requests(self, q_pdu, r_msg):
- for ee_req in rpki.irdb.EECertificateRequest.objects.filter(issuer__handle__exact = q_pdu.self_handle):
- resources = ee_req.resource_bag
- r_pdu = rpki.left_right.list_ee_certificate_requests_elt()
- r_pdu.tag = q_pdu.tag
- r_pdu.self_handle = q_pdu.self_handle
- r_pdu.gski = ee_req.gski
- r_pdu.valid_until = ee_req.valid_until.strftime("%Y-%m-%dT%H:%M:%SZ")
- r_pdu.asn = resources.asn
- r_pdu.ipv4 = resources.v4
- r_pdu.ipv6 = resources.v6
- r_pdu.cn = ee_req.cn
- r_pdu.sn = ee_req.sn
- r_pdu.eku = ee_req.eku
- r_pdu.pkcs10 = ee_req.pkcs10
- r_msg.append(r_pdu)
-
- def handler(self, query, path, cb):
- try:
- q_pdu = None
- r_msg = rpki.left_right.msg.reply()
- from django.db import connection
- connection.cursor() # Reconnect to mysqld if necessary
- self.start_new_transaction()
- serverCA = rpki.irdb.ServerCA.objects.get()
- rpkid = serverCA.ee_certificates.get(purpose = "rpkid")
- try:
- q_cms = rpki.left_right.cms_msg(DER = query)
- q_msg = q_cms.unwrap((serverCA.certificate, rpkid.certificate))
- self.cms_timestamp = q_cms.check_replay(self.cms_timestamp, path)
- if not isinstance(q_msg, rpki.left_right.msg) or not q_msg.is_query():
- raise rpki.exceptions.BadQuery("Unexpected %r PDU" % q_msg)
- for q_pdu in q_msg:
- self.dispatch(q_pdu, r_msg)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- logger.exception("Exception while handling HTTP request")
- if q_pdu is None:
- r_msg.append(rpki.left_right.report_error_elt.from_exception(e))
- else:
- r_msg.append(rpki.left_right.report_error_elt.from_exception(e, q_pdu.self_handle, q_pdu.tag))
- irdbd = serverCA.ee_certificates.get(purpose = "irdbd")
- cb(200, body = rpki.left_right.cms_msg().wrap(r_msg, irdbd.private_key, irdbd.certificate))
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- logger.exception("Unhandled exception while processing HTTP request")
- cb(500, reason = "Unhandled exception %s: %s" % (e.__class__.__name__, e))
-
- def dispatch(self, q_pdu, r_msg):
- try:
- handler = self.dispatch_vector[type(q_pdu)]
- except KeyError:
- raise rpki.exceptions.BadQuery("Unexpected %r PDU" % q_pdu)
- else:
- handler(q_pdu, r_msg)
-
- def __init__(self, **kwargs):
-
- global rpki # pylint: disable=W0602
-
- os.environ["TZ"] = "UTC"
- time.tzset()
-
- parser = argparse.ArgumentParser(description = __doc__)
- parser.add_argument("-c", "--config",
- help = "override default location of configuration file")
- parser.add_argument("-f", "--foreground", action = "store_true",
- help = "do not daemonize")
- parser.add_argument("--pidfile",
- help = "override default location of pid file")
- parser.add_argument("--profile",
- help = "enable profiling, saving data to PROFILE")
- rpki.log.argparse_setup(parser)
- args = parser.parse_args()
-
- rpki.log.init("irdbd", args)
-
- self.cfg = rpki.config.parser(args.config, "irdbd")
- self.cfg.set_global_flags()
-
- if not args.foreground:
- rpki.daemonize.daemon(pidfile = args.pidfile)
-
- if args.profile:
- import cProfile
- prof = cProfile.Profile()
- try:
- prof.runcall(self.main)
- finally:
- prof.dump_stats(args.profile)
- logger.info("Dumped profile data to %s", args.profile)
- else:
- self.main()
-
- def main(self):
-
- global rpki # pylint: disable=W0602
-
- import django
-
- from django.conf import settings
-
- startup_msg = self.cfg.get("startup-message", "")
- if startup_msg:
- logger.info(startup_msg)
-
- # Do -not- turn on DEBUG here except for short-lived tests,
- # otherwise irdbd will eventually run out of memory and crash.
- #
- # If you must enable debugging, use django.db.reset_queries() to
- # clear the query list manually, but it's probably better just to
- # run with debugging disabled, since that's the expectation for
- # production code.
- #
- # https://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory
-
- settings.configure(
- DATABASES = {
- "default" : {
- "ENGINE" : "django.db.backends.mysql",
- "NAME" : self.cfg.get("sql-database"),
- "USER" : self.cfg.get("sql-username"),
- "PASSWORD" : self.cfg.get("sql-password"),
- "HOST" : "",
- "PORT" : "" }},
- INSTALLED_APPS = ("rpki.irdb",),
- MIDDLEWARE_CLASSES = (), # API change, feh
- )
-
- if django.VERSION >= (1, 7): # API change, feh
- from django.apps import apps
- apps.populate(settings.INSTALLED_APPS)
-
- import rpki.irdb # pylint: disable=W0621
-
- # Entirely too much fun with read-only access to transactional databases.
- #
- # http://stackoverflow.com/questions/3346124/how-do-i-force-django-to-ignore-any-caches-and-reload-data
- # http://devblog.resolversystems.com/?p=439
- # http://groups.google.com/group/django-users/browse_thread/thread/e25cec400598c06d
- # http://stackoverflow.com/questions/1028671/python-mysqldb-update-query-fails
- # http://dev.mysql.com/doc/refman/5.0/en/set-transaction.html
- #
- # It turns out that MySQL is doing us a favor with this weird
- # transactional behavior on read, because without it there's a
- # race condition if multiple updates are committed to the IRDB
- # while we're in the middle of processing a query. Note that
- # proper transaction management by the committers doesn't protect
- # us, this is a transactional problem on read. So we need to use
- # explicit transaction management. Since irdbd is a read-only
- # consumer of IRDB data, this means we need to commit an empty
- # transaction at the beginning of processing each query, to reset
- # the transaction isolation snapshot.
-
- import django.db.transaction
- self.start_new_transaction = django.db.transaction.commit_manually(django.db.transaction.commit)
-
- self.dispatch_vector = {
- rpki.left_right.list_resources_elt : self.handle_list_resources,
- rpki.left_right.list_roa_requests_elt : self.handle_list_roa_requests,
- rpki.left_right.list_ghostbuster_requests_elt : self.handle_list_ghostbuster_requests,
- rpki.left_right.list_ee_certificate_requests_elt : self.handle_list_ee_certificate_requests}
-
- try:
- self.http_server_host = self.cfg.get("server-host", "")
- self.http_server_port = self.cfg.getint("server-port")
- except: # pylint: disable=W0702
- #
- # Backwards compatibility, remove this eventually.
- #
- u = urlparse.urlparse(self.cfg.get("http-url"))
- if (u.scheme not in ("", "http") or
- u.username is not None or
- u.password is not None or
- u.params or u.query or u.fragment):
- raise
- self.http_server_host = u.hostname
- self.http_server_port = int(u.port)
-
- self.cms_timestamp = None
-
- rpki.http.server(
- host = self.http_server_host,
- port = self.http_server_port,
- handlers = self.handler)
+ # Whether to drop XMl into the log
+
+ debug = False
+
+ def handle_list_resources(self, q_pdu, r_msg):
+ tenant_handle = q_pdu.get("tenant_handle")
+ child_handle = q_pdu.get("child_handle")
+ child = rpki.irdb.models.Child.objects.get(issuer__handle = tenant_handle,
+ handle = child_handle)
+ resources = child.resource_bag
+ r_pdu = SubElement(r_msg, rpki.left_right.tag_list_resources,
+ tenant_handle = tenant_handle, child_handle = child_handle,
+ valid_until = child.valid_until.strftime("%Y-%m-%dT%H:%M:%SZ"))
+ for k, v in (("asn", resources.asn),
+ ("ipv4", resources.v4),
+ ("ipv6", resources.v6),
+ ("tag", q_pdu.get("tag"))):
+ if v:
+ r_pdu.set(k, str(v))
+
+ def handle_list_roa_requests(self, q_pdu, r_msg):
+ tenant_handle = q_pdu.get("tenant_handle")
+ for request in rpki.irdb.models.ROARequest.objects.raw("""
+ SELECT irdb_roarequest.*
+ FROM irdb_roarequest, irdb_resourceholderca
+ WHERE irdb_roarequest.issuer_id = irdb_resourceholderca.id
+ AND irdb_resourceholderca.handle = %s
+ """, [tenant_handle]):
+ prefix_bag = request.roa_prefix_bag
+ r_pdu = SubElement(r_msg, rpki.left_right.tag_list_roa_requests,
+ tenant_handle = tenant_handle, asn = str(request.asn))
+ for k, v in (("ipv4", prefix_bag.v4),
+ ("ipv6", prefix_bag.v6),
+ ("tag", q_pdu.get("tag"))):
+ if v:
+ r_pdu.set(k, str(v))
+
+ def handle_list_ghostbuster_requests(self, q_pdu, r_msg):
+ tenant_handle = q_pdu.get("tenant_handle")
+ parent_handle = q_pdu.get("parent_handle")
+ ghostbusters = rpki.irdb.models.GhostbusterRequest.objects.filter(
+ issuer__handle = tenant_handle, parent__handle = parent_handle)
+ if ghostbusters.count() == 0:
+ ghostbusters = rpki.irdb.models.GhostbusterRequest.objects.filter(
+ issuer__handle = tenant_handle, parent = None)
+ for ghostbuster in ghostbusters:
+ r_pdu = SubElement(r_msg, q_pdu.tag,
+ tenant_handle = tenant_handle, parent_handle = parent_handle)
+ if q_pdu.get("tag"):
+ r_pdu.set("tag", q_pdu.get("tag"))
+ r_pdu.text = ghostbuster.vcard
+
+ def handle_list_ee_certificate_requests(self, q_pdu, r_msg):
+ tenant_handle = q_pdu.get("tenant_handle")
+ for ee_req in rpki.irdb.models.EECertificateRequest.objects.filter(
+ issuer__handle = tenant_handle):
+ resources = ee_req.resource_bag
+ r_pdu = SubElement(r_msg, q_pdu.tag, tenant_handle = tenant_handle, gski = ee_req.gski,
+ valid_until = ee_req.valid_until.strftime("%Y-%m-%dT%H:%M:%SZ"),
+ cn = ee_req.cn, sn = ee_req.sn)
+ for k, v in (("asn", resources.asn),
+ ("ipv4", resources.v4),
+ ("ipv6", resources.v6),
+ ("eku", ee_req.eku),
+ ("tag", q_pdu.get("tag"))):
+ if v:
+ r_pdu.set(k, str(v))
+ SubElement(r_pdu, rpki.left_right.tag_pkcs10).text = ee_req.pkcs10.get_Base64()
+
+ def handler(self, request, q_der):
+ try:
+ from django.db import connection
+ connection.cursor() # Reconnect to mysqld if necessary
+ self.start_new_transaction()
+ serverCA = rpki.irdb.models.ServerCA.objects.get()
+ rpkid = serverCA.ee_certificates.get(purpose = "rpkid")
+ irdbd = serverCA.ee_certificates.get(purpose = "irdbd")
+ q_cms = rpki.left_right.cms_msg(DER = q_der)
+ q_msg = q_cms.unwrap((serverCA.certificate, rpkid.certificate))
+ self.cms_timestamp = q_cms.check_replay(self.cms_timestamp, request.path)
+ if self.debug:
+ logger.debug("Received: %s", ElementToString(q_msg))
+ if q_msg.get("type") != "query":
+ raise rpki.exceptions.BadQuery("Message type is {}, expected query".format(
+ q_msg.get("type")))
+ r_msg = Element(rpki.left_right.tag_msg, nsmap = rpki.left_right.nsmap,
+ type = "reply", version = rpki.left_right.version)
+ try:
+ for q_pdu in q_msg:
+ getattr(self, "handle_" + q_pdu.tag[len(rpki.left_right.xmlns):])(q_pdu, r_msg)
+
+ except Exception, e:
+ logger.exception("Exception processing PDU %r", q_pdu)
+ r_pdu = SubElement(r_msg, rpki.left_right.tag_report_error,
+ error_code = e.__class__.__name__)
+ r_pdu.text = str(e)
+ if q_pdu.get("tag") is not None:
+ r_pdu.set("tag", q_pdu.get("tag"))
+
+ if self.debug:
+ logger.debug("Sending: %s", ElementToString(r_msg))
+ request.send_cms_response(rpki.left_right.cms_msg().wrap(
+ r_msg, irdbd.private_key, irdbd.certificate))
+
+ except Exception, e:
+ logger.exception("Unhandled exception while processing HTTP request")
+ request.send_error(500, "Unhandled exception %s: %s" % (e.__class__.__name__, e))
+
+ def __init__(self, **kwargs):
+
+ global rpki # pylint: disable=W0602
+
+ os.environ.update(TZ = "UTC",
+ DJANGO_SETTINGS_MODULE = "rpki.django_settings.irdb")
+ time.tzset()
+
+ self.cfg = rpki.config.argparser(section = "irdbd", doc = __doc__)
+ self.cfg.add_boolean_argument("--foreground",
+ default = False,
+ help = "whether to daemonize")
+ self.cfg.add_argument("--pidfile",
+ default = os.path.join(rpki.daemonize.default_pid_directory,
+ "irdbd.pid"),
+ help = "override default location of pid file")
+ self.cfg.add_argument("--profile",
+ default = "",
+ help = "enable profiling, saving data to PROFILE")
+ self.cfg.add_logging_arguments()
+ args = self.cfg.argparser.parse_args()
+
+ self.cfg.configure_logging(args = args, ident = "irdbd")
+
+ try:
+ self.cfg.set_global_flags()
+
+ self.cms_timestamp = None
+
+ if not args.foreground:
+ rpki.daemonize.daemon(pidfile = args.pidfile)
+
+ if args.profile:
+ import cProfile
+ prof = cProfile.Profile()
+ try:
+ prof.runcall(self.main)
+ finally:
+ prof.dump_stats(args.profile)
+ logger.info("Dumped profile data to %s", args.profile)
+ else:
+ self.main()
+
+ except:
+ logger.exception("Unandled exception in rpki.irdbd.main()")
+ sys.exit(1)
+
+
+ def main(self):
+
+ startup_msg = self.cfg.get("startup-message", "")
+ if startup_msg:
+ logger.info(startup_msg)
+
+ # Now that we know which configuration file to use, it's OK to
+ # load modules that require Django's settings module.
+
+ import django
+ django.setup()
+
+ global rpki # pylint: disable=W0602
+ import rpki.irdb # pylint: disable=W0621
+
+ self.http_server_host = self.cfg.get("server-host", "")
+ self.http_server_port = self.cfg.getint("server-port")
+
+ rpki.http_simple.server(
+ host = self.http_server_host,
+ port = self.http_server_port,
+ handlers = self.handler)
+
+ def start_new_transaction(self):
+
+ # Entirely too much fun with read-only access to transactional databases.
+ #
+ # http://stackoverflow.com/questions/3346124/how-do-i-force-django-to-ignore-any-caches-and-reload-data
+ # http://devblog.resolversystems.com/?p=439
+ # http://groups.google.com/group/django-users/browse_thread/thread/e25cec400598c06d
+ # http://stackoverflow.com/questions/1028671/python-mysqldb-update-query-fails
+ # http://dev.mysql.com/doc/refman/5.0/en/set-transaction.html
+ #
+ # It turns out that MySQL is doing us a favor with this weird
+ # transactional behavior on read, because without it there's a
+ # race condition if multiple updates are committed to the IRDB
+ # while we're in the middle of processing a query. Note that
+ # proper transaction management by the committers doesn't protect
+ # us, this is a transactional problem on read. So we need to use
+ # explicit transaction management. Since irdbd is a read-only
+ # consumer of IRDB data, this means we need to commit an empty
+ # transaction at the beginning of processing each query, to reset
+ # the transaction isolation snapshot.
+
+ import django.db.transaction
+
+ with django.db.transaction.atomic():
+ #django.db.transaction.commit()
+ pass
diff --git a/rpki/left_right.py b/rpki/left_right.py
index c8b6d19b..02b118c0 100644
--- a/rpki/left_right.py
+++ b/rpki/left_right.py
@@ -22,1270 +22,59 @@ RPKI "left-right" protocol.
"""
import logging
-import rpki.resource_set
+
import rpki.x509
-import rpki.sql
import rpki.exceptions
-import rpki.xml_utils
-import rpki.http
import rpki.up_down
import rpki.relaxng
import rpki.sundial
import rpki.log
import rpki.publication
-import rpki.async
import rpki.rpkid_tasks
-logger = logging.getLogger(__name__)
-
-## @var enforce_strict_up_down_xml_sender
-# Enforce strict checking of XML "sender" field in up-down protocol
-
-enforce_strict_up_down_xml_sender = False
-
-class left_right_namespace(object):
- """
- XML namespace parameters for left-right protocol.
- """
-
- xmlns = rpki.relaxng.left_right.xmlns
- nsmap = rpki.relaxng.left_right.nsmap
-
-class data_elt(rpki.xml_utils.data_elt, rpki.sql.sql_persistent, left_right_namespace):
- """
- Virtual class for top-level left-right protocol data elements.
- """
-
- handles = ()
-
- self_id = None
- self_handle = None
-
- @property
- @rpki.sql.cache_reference
- def self(self):
- """
- Fetch self object to which this object links.
- """
- return self_elt.sql_fetch(self.gctx, self.self_id)
-
- @property
- @rpki.sql.cache_reference
- def bsc(self):
- """
- Return BSC object to which this object links.
- """
- return bsc_elt.sql_fetch(self.gctx, self.bsc_id)
-
- def make_reply_clone_hook(self, r_pdu):
- """
- Set handles when cloning, including _id -> _handle translation.
- """
- if r_pdu.self_handle is None:
- r_pdu.self_handle = self.self_handle
- for tag, elt in self.handles:
- id_name = tag + "_id"
- handle_name = tag + "_handle"
- if getattr(r_pdu, handle_name, None) is None:
- try:
- setattr(r_pdu, handle_name, getattr(elt.sql_fetch(self.gctx, getattr(r_pdu, id_name)), handle_name))
- except AttributeError:
- continue
-
- @classmethod
- def serve_fetch_handle(cls, gctx, self_id, handle):
- """
- Find an object based on its handle.
- """
- return cls.sql_fetch_where1(gctx, cls.element_name + "_handle = %s AND self_id = %s", (handle, self_id))
-
- def serve_fetch_one_maybe(self):
- """
- Find the object on which a get, set, or destroy method should
- operate, or which would conflict with a create method.
- """
- where = "%s.%s_handle = %%s AND %s.self_id = self.self_id AND self.self_handle = %%s" % ((self.element_name,) * 3)
- args = (getattr(self, self.element_name + "_handle"), self.self_handle)
- return self.sql_fetch_where1(self.gctx, where, args, "self")
-
- def serve_fetch_all(self):
- """
- Find the objects on which a list method should operate.
- """
- where = "%s.self_id = self.self_id and self.self_handle = %%s" % self.element_name
- return self.sql_fetch_where(self.gctx, where, (self.self_handle,), "self")
-
- def serve_pre_save_hook(self, q_pdu, r_pdu, cb, eb):
- """
- Hook to do _handle => _id translation before saving.
-
- self is always the object to be saved to SQL. For create
- operations, self and q_pdu are be the same object; for set
- operations, self is the pre-existing object from SQL and q_pdu is
- the set request received from the the IRBE.
- """
- for tag, elt in self.handles:
- id_name = tag + "_id"
- if getattr(self, id_name, None) is None:
- x = elt.serve_fetch_handle(self.gctx, self.self_id, getattr(q_pdu, tag + "_handle"))
- if x is None:
- raise rpki.exceptions.HandleTranslationError("Could not translate %r %s_handle" % (self, tag))
- setattr(self, id_name, getattr(x, id_name))
- cb()
-
-class self_elt(data_elt):
- """
- <self/> element.
- """
-
- element_name = "self"
- attributes = ("action", "tag", "self_handle", "crl_interval", "regen_margin")
- elements = ("bpki_cert", "bpki_glue")
- booleans = ("rekey", "reissue", "revoke", "run_now", "publish_world_now", "revoke_forgotten",
- "clear_replay_protection")
-
- sql_template = rpki.sql.template(
- "self",
- "self_id",
- "self_handle",
- "use_hsm",
- "crl_interval",
- "regen_margin",
- ("bpki_cert", rpki.x509.X509),
- ("bpki_glue", rpki.x509.X509))
-
- handles = ()
-
- use_hsm = False
- crl_interval = None
- regen_margin = None
- bpki_cert = None
- bpki_glue = None
- cron_tasks = None
-
- def __repr__(self):
- return rpki.log.log_repr(self)
-
- @property
- def bscs(self):
- """
- Fetch all BSC objects that link to this self object.
- """
- return bsc_elt.sql_fetch_where(self.gctx, "self_id = %s", (self.self_id,))
-
- @property
- def repositories(self):
- """
- Fetch all repository objects that link to this self object.
- """
- return repository_elt.sql_fetch_where(self.gctx, "self_id = %s", (self.self_id,))
-
- @property
- def parents(self):
- """
- Fetch all parent objects that link to this self object.
- """
- return parent_elt.sql_fetch_where(self.gctx, "self_id = %s", (self.self_id,))
-
- @property
- def children(self):
- """
- Fetch all child objects that link to this self object.
- """
- return child_elt.sql_fetch_where(self.gctx, "self_id = %s", (self.self_id,))
-
- @property
- def roas(self):
- """
- Fetch all ROA objects that link to this self object.
- """
- return rpki.rpkid.roa_obj.sql_fetch_where(self.gctx, "self_id = %s", (self.self_id,))
-
- @property
- def ghostbusters(self):
- """
- Fetch all Ghostbuster record objects that link to this self object.
- """
- return rpki.rpkid.ghostbuster_obj.sql_fetch_where(self.gctx, "self_id = %s", (self.self_id,))
-
- @property
- def ee_certificates(self):
- """
- Fetch all EE certificate objects that link to this self object.
- """
- return rpki.rpkid.ee_cert_obj.sql_fetch_where(self.gctx, "self_id = %s", (self.self_id,))
-
-
- def serve_post_save_hook(self, q_pdu, r_pdu, cb, eb):
- """
- Extra server actions for self_elt.
- """
- actions = []
- if q_pdu.rekey:
- actions.append(self.serve_rekey)
- if q_pdu.revoke:
- actions.append(self.serve_revoke)
- if q_pdu.reissue:
- actions.append(self.serve_reissue)
- if q_pdu.revoke_forgotten:
- actions.append(self.serve_revoke_forgotten)
- if q_pdu.publish_world_now:
- actions.append(self.serve_publish_world_now)
- if q_pdu.run_now:
- actions.append(self.serve_run_now)
- if q_pdu.clear_replay_protection:
- actions.append(self.serve_clear_replay_protection)
- def loop(iterator, action):
- action(iterator, eb)
- rpki.async.iterator(actions, loop, cb)
-
- def serve_rekey(self, cb, eb):
- """
- Handle a left-right rekey action for this self.
- """
- def loop(iterator, parent):
- parent.serve_rekey(iterator, eb)
- rpki.async.iterator(self.parents, loop, cb)
-
- def serve_revoke(self, cb, eb):
- """
- Handle a left-right revoke action for this self.
- """
- def loop(iterator, parent):
- parent.serve_revoke(iterator, eb)
- rpki.async.iterator(self.parents, loop, cb)
-
- def serve_reissue(self, cb, eb):
- """
- Handle a left-right reissue action for this self.
- """
- def loop(iterator, parent):
- parent.serve_reissue(iterator, eb)
- rpki.async.iterator(self.parents, loop, cb)
-
- def serve_revoke_forgotten(self, cb, eb):
- """
- Handle a left-right revoke_forgotten action for this self.
- """
- def loop(iterator, parent):
- parent.serve_revoke_forgotten(iterator, eb)
- rpki.async.iterator(self.parents, loop, cb)
-
- def serve_clear_replay_protection(self, cb, eb):
- """
- Handle a left-right clear_replay_protection action for this self.
- """
- def loop(iterator, obj):
- obj.serve_clear_replay_protection(iterator, eb)
- rpki.async.iterator(self.parents + self.children + self.repositories, loop, cb)
-
- def serve_destroy_hook(self, cb, eb):
- """
- Extra cleanup actions when destroying a self_elt.
- """
- def loop(iterator, parent):
- parent.delete(iterator)
- rpki.async.iterator(self.parents, loop, cb)
-
-
- def serve_publish_world_now(self, cb, eb):
- """
- Handle a left-right publish_world_now action for this self.
-
- The publication stuff needs refactoring, right now publication is
- interleaved with local operations in a way that forces far too
- many bounces through the task system for any complex update. The
- whole thing ought to be rewritten to queue up outgoing publication
- PDUs and only send them when we're all done or when we need to
- force publication at a particular point in a multi-phase operation.
-
- Once that reorganization has been done, this method should be
- rewritten to reuse the low-level publish() methods that each
- object will have...but we're not there yet. So, for now, we just
- do this via brute force. Think of it as a trial version to see
- whether we've identified everything that needs to be republished
- for this operation.
- """
-
- def loop(iterator, parent):
- q_msg = rpki.publication.msg.query()
- for ca in parent.cas:
- ca_detail = ca.active_ca_detail
- if ca_detail is not None:
- q_msg.append(rpki.publication.crl_elt.make_publish(
- ca_detail.crl_uri, ca_detail.latest_crl))
- q_msg.append(rpki.publication.manifest_elt.make_publish(
- ca_detail.manifest_uri, ca_detail.latest_manifest))
- q_msg.extend(rpki.publication.certificate_elt.make_publish(
- c.uri, c.cert) for c in ca_detail.child_certs)
- q_msg.extend(rpki.publication.roa_elt.make_publish(
- r.uri, r.roa) for r in ca_detail.roas if r.roa is not None)
- q_msg.extend(rpki.publication.ghostbuster_elt.make_publish(
- g.uri, g.ghostbuster) for g in ca_detail.ghostbusters)
- parent.repository.call_pubd(iterator, eb, q_msg)
-
- rpki.async.iterator(self.parents, loop, cb)
-
- def serve_run_now(self, cb, eb):
- """
- Handle a left-right run_now action for this self.
- """
- logger.debug("Forced immediate run of periodic actions for self %s[%d]",
- self.self_handle, self.self_id)
- completion = rpki.rpkid_tasks.CompletionHandler(cb)
- self.schedule_cron_tasks(completion)
- assert completion.count > 0
- self.gctx.task_run()
-
- def serve_fetch_one_maybe(self):
- """
- Find the self object upon which a get, set, or destroy action
- should operate, or which would conflict with a create method.
- """
- return self.serve_fetch_handle(self.gctx, None, self.self_handle)
-
- @classmethod
- def serve_fetch_handle(cls, gctx, self_id, self_handle):
- """
- Find a self object based on its self_handle.
- """
- return cls.sql_fetch_where1(gctx, "self_handle = %s", (self_handle,))
-
- def serve_fetch_all(self):
- """
- Find the self objects upon which a list action should operate.
- This is different from the list action for all other objects,
- where list only works within a given self_id context.
- """
- return self.sql_fetch_all(self.gctx)
-
- def schedule_cron_tasks(self, completion):
- """
- Schedule periodic tasks.
- """
-
- if self.cron_tasks is None:
- self.cron_tasks = tuple(task(self) for task in rpki.rpkid_tasks.task_classes)
-
- for task in self.cron_tasks:
- self.gctx.task_add(task)
- completion.register(task)
-
- def find_covering_ca_details(self, resources):
- """
- Return all active ca_detail_objs for this <self/> which cover a
- particular set of resources.
- If we expected there to be a large number of ca_detail_objs, we
- could add index tables and write fancy SQL query to do this, but
- for the expected common case where there are only one or two
- active ca_detail_objs per <self/>, it's probably not worth it. In
- any case, this is an optimization we can leave for later.
- """
-
- results = set()
- for parent in self.parents:
- for ca in parent.cas:
- ca_detail = ca.active_ca_detail
- if ca_detail is not None and ca_detail.covers(resources):
- results.add(ca_detail)
- return results
-
-
-class bsc_elt(data_elt):
- """
- <bsc/> (Business Signing Context) element.
- """
-
- element_name = "bsc"
- attributes = ("action", "tag", "self_handle", "bsc_handle", "key_type", "hash_alg", "key_length")
- elements = ("signing_cert", "signing_cert_crl", "pkcs10_request")
- booleans = ("generate_keypair",)
-
- sql_template = rpki.sql.template(
- "bsc",
- "bsc_id",
- "bsc_handle",
- "self_id",
- "hash_alg",
- ("private_key_id", rpki.x509.RSA),
- ("pkcs10_request", rpki.x509.PKCS10),
- ("signing_cert", rpki.x509.X509),
- ("signing_cert_crl", rpki.x509.CRL))
-
- handles = (("self", self_elt),)
-
- private_key_id = None
- pkcs10_request = None
- signing_cert = None
- signing_cert_crl = None
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.bsc_handle)
-
- @property
- def repositories(self):
- """
- Fetch all repository objects that link to this BSC object.
- """
- return repository_elt.sql_fetch_where(self.gctx, "bsc_id = %s", (self.bsc_id,))
-
- @property
- def parents(self):
- """
- Fetch all parent objects that link to this BSC object.
- """
- return parent_elt.sql_fetch_where(self.gctx, "bsc_id = %s", (self.bsc_id,))
-
- @property
- def children(self):
- """
- Fetch all child objects that link to this BSC object.
- """
- return child_elt.sql_fetch_where(self.gctx, "bsc_id = %s", (self.bsc_id,))
-
- def serve_pre_save_hook(self, q_pdu, r_pdu, cb, eb):
- """
- Extra server actions for bsc_elt -- handle key generation. For
- now this only allows RSA with SHA-256.
- """
- if q_pdu.generate_keypair:
- assert q_pdu.key_type in (None, "rsa") and q_pdu.hash_alg in (None, "sha256")
- self.private_key_id = rpki.x509.RSA.generate(keylength = q_pdu.key_length or 2048)
- self.pkcs10_request = rpki.x509.PKCS10.create(keypair = self.private_key_id)
- r_pdu.pkcs10_request = self.pkcs10_request
- data_elt.serve_pre_save_hook(self, q_pdu, r_pdu, cb, eb)
-
-class repository_elt(data_elt):
- """
- <repository/> element.
- """
-
- element_name = "repository"
- attributes = ("action", "tag", "self_handle", "repository_handle", "bsc_handle", "peer_contact_uri")
- elements = ("bpki_cert", "bpki_glue")
- booleans = ("clear_replay_protection",)
-
- sql_template = rpki.sql.template(
- "repository",
- "repository_id",
- "repository_handle",
- "self_id",
- "bsc_id",
- "peer_contact_uri",
- ("bpki_cert", rpki.x509.X509),
- ("bpki_glue", rpki.x509.X509),
- ("last_cms_timestamp", rpki.sundial.datetime))
-
- handles = (("self", self_elt),
- ("bsc", bsc_elt))
-
- bpki_cert = None
- bpki_glue = None
- last_cms_timestamp = None
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.repository_handle)
-
- @property
- def parents(self):
- """
- Fetch all parent objects that link to this repository object.
- """
- return parent_elt.sql_fetch_where(self.gctx, "repository_id = %s", (self.repository_id,))
-
- def serve_post_save_hook(self, q_pdu, r_pdu, cb, eb):
- """
- Extra server actions for repository_elt.
- """
- actions = []
- if q_pdu.clear_replay_protection:
- actions.append(self.serve_clear_replay_protection)
- def loop(iterator, action):
- action(iterator, eb)
- rpki.async.iterator(actions, loop, cb)
-
- def serve_clear_replay_protection(self, cb, eb):
- """
- Handle a left-right clear_replay_protection action for this repository.
- """
- self.last_cms_timestamp = None
- self.sql_mark_dirty()
- cb()
-
- @staticmethod
- def default_pubd_handler(pdu):
- """
- Default handler for publication response PDUs.
- """
- pdu.raise_if_error()
-
- def call_pubd(self, callback, errback, q_msg, handlers = None):
- """
- Send a message to publication daemon and return the response.
-
- As a convenience, attempting to send an empty message returns
- immediate success without sending anything.
-
- Handlers is a dict of handler functions to process the response
- PDUs. If the tag value in the response PDU appears in the dict,
- the associated handler is called to process the PDU. If no tag
- matches, default_pubd_handler() is called. A handler value of
- False suppresses calling of the default handler.
- """
-
- try:
- self.gctx.sql.sweep()
-
- if not q_msg:
- return callback()
-
- if handlers is None:
- handlers = {}
-
- for q_pdu in q_msg:
- logger.info("Sending %s %s to pubd", q_pdu.action, q_pdu.uri)
-
- bsc = self.bsc
- q_der = rpki.publication.cms_msg().wrap(q_msg, bsc.private_key_id, bsc.signing_cert, bsc.signing_cert_crl)
- bpki_ta_path = (self.gctx.bpki_ta, self.self.bpki_cert, self.self.bpki_glue, self.bpki_cert, self.bpki_glue)
-
- def done(r_der):
- try:
- logger.debug("Received response from pubd")
- r_cms = rpki.publication.cms_msg(DER = r_der)
- r_msg = r_cms.unwrap(bpki_ta_path)
- r_cms.check_replay_sql(self, self.peer_contact_uri)
- for r_pdu in r_msg:
- handler = handlers.get(r_pdu.tag, self.default_pubd_handler)
- if handler:
- logger.debug("Calling pubd handler %r", handler)
- handler(r_pdu)
- if len(q_msg) != len(r_msg):
- raise rpki.exceptions.BadPublicationReply("Wrong number of response PDUs from pubd: sent %r, got %r" % (q_msg, r_msg))
- callback()
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- errback(e)
-
- logger.debug("Sending request to pubd")
- rpki.http.client(
- url = self.peer_contact_uri,
- msg = q_der,
- callback = done,
- errback = errback)
-
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- errback(e)
-
-class parent_elt(data_elt):
- """
- <parent/> element.
- """
-
- element_name = "parent"
- attributes = ("action", "tag", "self_handle", "parent_handle", "bsc_handle", "repository_handle",
- "peer_contact_uri", "sia_base", "sender_name", "recipient_name")
- elements = ("bpki_cms_cert", "bpki_cms_glue")
- booleans = ("rekey", "reissue", "revoke", "revoke_forgotten", "clear_replay_protection")
-
- sql_template = rpki.sql.template(
- "parent",
- "parent_id",
- "parent_handle",
- "self_id",
- "bsc_id",
- "repository_id",
- "peer_contact_uri",
- "sia_base",
- "sender_name",
- "recipient_name",
- ("bpki_cms_cert", rpki.x509.X509),
- ("bpki_cms_glue", rpki.x509.X509),
- ("last_cms_timestamp", rpki.sundial.datetime))
-
- handles = (("self", self_elt),
- ("bsc", bsc_elt),
- ("repository", repository_elt))
-
- bpki_cms_cert = None
- bpki_cms_glue = None
- last_cms_timestamp = None
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.parent_handle)
-
- @property
- @rpki.sql.cache_reference
- def repository(self):
- """
- Fetch repository object to which this parent object links.
- """
- return repository_elt.sql_fetch(self.gctx, self.repository_id)
-
- @property
- def cas(self):
- """
- Fetch all CA objects that link to this parent object.
- """
- return rpki.rpkid.ca_obj.sql_fetch_where(self.gctx, "parent_id = %s", (self.parent_id,))
-
- def serve_post_save_hook(self, q_pdu, r_pdu, cb, eb):
- """
- Extra server actions for parent_elt.
- """
- actions = []
- if q_pdu.rekey:
- actions.append(self.serve_rekey)
- if q_pdu.revoke:
- actions.append(self.serve_revoke)
- if q_pdu.reissue:
- actions.append(self.serve_reissue)
- if q_pdu.revoke_forgotten:
- actions.append(self.serve_revoke_forgotten)
- if q_pdu.clear_replay_protection:
- actions.append(self.serve_clear_replay_protection)
- def loop(iterator, action):
- action(iterator, eb)
- rpki.async.iterator(actions, loop, cb)
-
- def serve_rekey(self, cb, eb):
- """
- Handle a left-right rekey action for this parent.
- """
- def loop(iterator, ca):
- ca.rekey(iterator, eb)
- rpki.async.iterator(self.cas, loop, cb)
-
- def serve_revoke(self, cb, eb):
- """
- Handle a left-right revoke action for this parent.
- """
- def loop(iterator, ca):
- ca.revoke(cb = iterator, eb = eb)
- rpki.async.iterator(self.cas, loop, cb)
-
- def serve_reissue(self, cb, eb):
- """
- Handle a left-right reissue action for this parent.
- """
- def loop(iterator, ca):
- ca.reissue(cb = iterator, eb = eb)
- rpki.async.iterator(self.cas, loop, cb)
-
- def serve_clear_replay_protection(self, cb, eb):
- """
- Handle a left-right clear_replay_protection action for this parent.
- """
- self.last_cms_timestamp = None
- self.sql_mark_dirty()
- cb()
-
-
- def get_skis(self, cb, eb):
- """
- Fetch SKIs that this parent thinks we have. In theory this should
- agree with our own database, but in practice stuff can happen, so
- sometimes we need to know what our parent thinks.
-
- Result is a dictionary with the resource class name as key and a
- set of SKIs as value.
- """
-
- def done(r_msg):
- cb(dict((rc.class_name, set(c.cert.gSKI() for c in rc.certs))
- for rc in r_msg.payload.classes))
-
- rpki.up_down.list_pdu.query(self, done, eb)
-
-
- def revoke_skis(self, rc_name, skis_to_revoke, cb, eb):
- """
- Revoke a set of SKIs within a particular resource class.
- """
-
- def loop(iterator, ski):
- logger.debug("Asking parent %r to revoke class %r, SKI %s", self, rc_name, ski)
- q_pdu = rpki.up_down.revoke_pdu()
- q_pdu.class_name = rc_name
- q_pdu.ski = ski
- self.query_up_down(q_pdu, lambda r_pdu: iterator(), eb)
-
- rpki.async.iterator(skis_to_revoke, loop, cb)
-
-
- def serve_revoke_forgotten(self, cb, eb):
- """
- Handle a left-right revoke_forgotten action for this parent.
-
- This is a bit fiddly: we have to compare the result of an up-down
- list query with what we have locally and identify the SKIs of any
- certificates that have gone missing. This should never happen in
- ordinary operation, but can arise if we have somehow lost a
- private key, in which case there is nothing more we can do with
- the issued cert, so we have to clear it. As this really is not
- supposed to happen, we don't clear it automatically, instead we
- require an explicit trigger.
- """
-
- def got_skis(skis_from_parent):
-
- def loop(iterator, item):
- rc_name, skis_to_revoke = item
- if rc_name in ca_map:
- for ca_detail in ca_map[rc_name].issue_response_candidate_ca_details:
- skis_to_revoke.discard(ca_detail.latest_ca_cert.gSKI())
- self.revoke_skis(rc_name, skis_to_revoke, iterator, eb)
-
- ca_map = dict((ca.parent_resource_class, ca) for ca in self.cas)
- rpki.async.iterator(skis_from_parent.items(), loop, cb)
-
- self.get_skis(got_skis, eb)
-
-
- def delete(self, cb, delete_parent = True):
- """
- Delete all the CA stuff under this parent, and perhaps the parent
- itself.
- """
-
- def loop(iterator, ca):
- self.gctx.checkpoint()
- ca.delete(self, iterator)
-
- def revoke():
- self.gctx.checkpoint()
- self.serve_revoke_forgotten(done, fail)
-
- def fail(e):
- logger.warning("Trouble getting parent to revoke certificates, blundering onwards: %s", e)
- done()
-
- def done():
- self.gctx.checkpoint()
- self.gctx.sql.sweep()
- if delete_parent:
- self.sql_delete()
- cb()
-
- rpki.async.iterator(self.cas, loop, revoke)
-
-
- def serve_destroy_hook(self, cb, eb):
- """
- Extra server actions when destroying a parent_elt.
- """
-
- self.delete(cb, delete_parent = False)
-
-
- def query_up_down(self, q_pdu, cb, eb):
- """
- Client code for sending one up-down query PDU to this parent.
- """
-
- bsc = self.bsc
- if bsc is None:
- raise rpki.exceptions.BSCNotFound("Could not find BSC %s" % self.bsc_id)
-
- if bsc.signing_cert is None:
- raise rpki.exceptions.BSCNotReady("BSC %r[%s] is not yet usable" % (bsc.bsc_handle, bsc.bsc_id))
-
- q_msg = rpki.up_down.message_pdu.make_query(
- payload = q_pdu,
- sender = self.sender_name,
- recipient = self.recipient_name)
-
- q_der = rpki.up_down.cms_msg().wrap(q_msg, bsc.private_key_id,
- bsc.signing_cert,
- bsc.signing_cert_crl)
-
- def unwrap(r_der):
- try:
- r_cms = rpki.up_down.cms_msg(DER = r_der)
- r_msg = r_cms.unwrap((self.gctx.bpki_ta,
- self.self.bpki_cert,
- self.self.bpki_glue,
- self.bpki_cms_cert,
- self.bpki_cms_glue))
- r_cms.check_replay_sql(self, self.peer_contact_uri)
- r_msg.payload.check_response()
- except (SystemExit, rpki.async.ExitNow):
- raise
- except Exception, e:
- eb(e)
- else:
- cb(r_msg)
-
- rpki.http.client(
- msg = q_der,
- url = self.peer_contact_uri,
- callback = unwrap,
- errback = eb,
- content_type = rpki.up_down.content_type)
-
-class child_elt(data_elt):
- """
- <child/> element.
- """
-
- element_name = "child"
- attributes = ("action", "tag", "self_handle", "child_handle", "bsc_handle")
- elements = ("bpki_cert", "bpki_glue")
- booleans = ("reissue", "clear_replay_protection")
-
- sql_template = rpki.sql.template(
- "child",
- "child_id",
- "child_handle",
- "self_id",
- "bsc_id",
- ("bpki_cert", rpki.x509.X509),
- ("bpki_glue", rpki.x509.X509),
- ("last_cms_timestamp", rpki.sundial.datetime))
-
- handles = (("self", self_elt),
- ("bsc", bsc_elt))
-
- bpki_cert = None
- bpki_glue = None
- last_cms_timestamp = None
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.child_handle)
-
- def fetch_child_certs(self, ca_detail = None, ski = None, unique = False):
- """
- Fetch all child_cert objects that link to this child object.
- """
- return rpki.rpkid.child_cert_obj.fetch(self.gctx, self, ca_detail, ski, unique)
-
- @property
- def child_certs(self):
- """
- Fetch all child_cert objects that link to this child object.
- """
- return self.fetch_child_certs()
-
- @property
- def parents(self):
- """
- Fetch all parent objects that link to self object to which this child object links.
- """
- return parent_elt.sql_fetch_where(self.gctx, "self_id = %s", (self.self_id,))
-
- def serve_post_save_hook(self, q_pdu, r_pdu, cb, eb):
- """
- Extra server actions for child_elt.
- """
- actions = []
- if q_pdu.reissue:
- actions.append(self.serve_reissue)
- if q_pdu.clear_replay_protection:
- actions.append(self.serve_clear_replay_protection)
- def loop(iterator, action):
- action(iterator, eb)
- rpki.async.iterator(actions, loop, cb)
-
- def serve_reissue(self, cb, eb):
- """
- Handle a left-right reissue action for this child.
- """
- publisher = rpki.rpkid.publication_queue()
- for child_cert in self.child_certs:
- child_cert.reissue(child_cert.ca_detail, publisher, force = True)
- publisher.call_pubd(cb, eb)
-
- def serve_clear_replay_protection(self, cb, eb):
- """
- Handle a left-right clear_replay_protection action for this child.
- """
- self.last_cms_timestamp = None
- self.sql_mark_dirty()
- cb()
-
- def ca_from_class_name(self, class_name):
- """
- Fetch the CA corresponding to an up-down class_name.
- """
- if not class_name.isdigit():
- raise rpki.exceptions.BadClassNameSyntax("Bad class name %s" % class_name)
- ca = rpki.rpkid.ca_obj.sql_fetch(self.gctx, long(class_name))
- if ca is None:
- raise rpki.exceptions.ClassNameUnknown("Unknown class name %s" % class_name)
- parent = ca.parent
- if self.self_id != parent.self_id:
- raise rpki.exceptions.ClassNameMismatch(
- "Class name mismatch: child.self_id = %d, parent.self_id = %d" % (
- self.self_id, parent.self_id))
- return ca
-
- def serve_destroy_hook(self, cb, eb):
- """
- Extra server actions when destroying a child_elt.
- """
- publisher = rpki.rpkid.publication_queue()
- for child_cert in self.child_certs:
- child_cert.revoke(publisher = publisher,
- generate_crl_and_manifest = True)
- publisher.call_pubd(cb, eb)
-
- def serve_up_down(self, query, callback):
- """
- Outer layer of server handling for one up-down PDU from this child.
- """
-
- bsc = self.bsc
- if bsc is None:
- raise rpki.exceptions.BSCNotFound("Could not find BSC %s" % self.bsc_id)
- q_cms = rpki.up_down.cms_msg(DER = query)
- q_msg = q_cms.unwrap((self.gctx.bpki_ta,
- self.self.bpki_cert,
- self.self.bpki_glue,
- self.bpki_cert,
- self.bpki_glue))
- q_cms.check_replay_sql(self, "child", self.child_handle)
- q_msg.payload.gctx = self.gctx
- if enforce_strict_up_down_xml_sender and q_msg.sender != self.child_handle:
- raise rpki.exceptions.BadSender("Unexpected XML sender %s" % q_msg.sender)
- self.gctx.sql.sweep()
-
- def done(r_msg):
- #
- # Exceptions from this point on are problematic, as we have no
- # sane way of reporting errors in the error reporting mechanism.
- # May require refactoring, ignore the issue for now.
- #
- reply = rpki.up_down.cms_msg().wrap(r_msg, bsc.private_key_id,
- bsc.signing_cert, bsc.signing_cert_crl)
- callback(reply)
-
- try:
- q_msg.serve_top_level(self, done)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except rpki.exceptions.NoActiveCA, data:
- done(q_msg.serve_error(data))
- except Exception, e:
- logger.exception("Unhandled exception serving up-down request from %r", self)
- done(q_msg.serve_error(e))
-
-class list_resources_elt(rpki.xml_utils.base_elt, left_right_namespace):
- """
- <list_resources/> element.
- """
-
- element_name = "list_resources"
- attributes = ("self_handle", "tag", "child_handle", "valid_until", "asn", "ipv4", "ipv6")
- valid_until = None
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.self_handle, self.child_handle, self.asn, self.ipv4, self.ipv6)
-
- def startElement(self, stack, name, attrs):
- """
- Handle <list_resources/> element. This requires special handling
- due to the data types of some of the attributes.
- """
- assert name == "list_resources", "Unexpected name %s, stack %s" % (name, stack)
- self.read_attrs(attrs)
- if isinstance(self.valid_until, str):
- self.valid_until = rpki.sundial.datetime.fromXMLtime(self.valid_until)
- if self.asn is not None:
- self.asn = rpki.resource_set.resource_set_as(self.asn)
- if self.ipv4 is not None:
- self.ipv4 = rpki.resource_set.resource_set_ipv4(self.ipv4)
- if self.ipv6 is not None:
- self.ipv6 = rpki.resource_set.resource_set_ipv6(self.ipv6)
-
- def toXML(self):
- """
- Generate <list_resources/> element. This requires special
- handling due to the data types of some of the attributes.
- """
- elt = self.make_elt()
- if isinstance(self.valid_until, int):
- elt.set("valid_until", self.valid_until.toXMLtime())
- return elt
-
-class list_roa_requests_elt(rpki.xml_utils.base_elt, left_right_namespace):
- """
- <list_roa_requests/> element.
- """
-
- element_name = "list_roa_requests"
- attributes = ("self_handle", "tag", "asn", "ipv4", "ipv6")
-
- def startElement(self, stack, name, attrs):
- """
- Handle <list_roa_requests/> element. This requires special handling
- due to the data types of some of the attributes.
- """
- assert name == "list_roa_requests", "Unexpected name %s, stack %s" % (name, stack)
- self.read_attrs(attrs)
- if self.ipv4 is not None:
- self.ipv4 = rpki.resource_set.roa_prefix_set_ipv4(self.ipv4)
- if self.ipv6 is not None:
- self.ipv6 = rpki.resource_set.roa_prefix_set_ipv6(self.ipv6)
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.self_handle, self.asn, self.ipv4, self.ipv6)
-
-class list_ghostbuster_requests_elt(rpki.xml_utils.text_elt, left_right_namespace):
- """
- <list_ghostbuster_requests/> element.
- """
-
- element_name = "list_ghostbuster_requests"
- attributes = ("self_handle", "tag", "parent_handle")
- text_attribute = "vcard"
-
- vcard = None
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.self_handle, self.parent_handle)
-
-class list_ee_certificate_requests_elt(rpki.xml_utils.base_elt, left_right_namespace):
- """
- <list_ee_certificate_requests/> element.
- """
-
- element_name = "list_ee_certificate_requests"
- attributes = ("self_handle", "tag", "gski", "valid_until", "asn", "ipv4", "ipv6", "cn", "sn", "eku")
- elements = ("pkcs10",)
-
- pkcs10 = None
- valid_until = None
- eku = None
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.self_handle, self.gski, self.cn, self.sn, self.asn, self.ipv4, self.ipv6)
-
- def startElement(self, stack, name, attrs):
- """
- Handle <list_ee_certificate_requests/> element. This requires special
- handling due to the data types of some of the attributes.
- """
- if name not in self.elements:
- assert name == self.element_name, "Unexpected name %s, stack %s" % (name, stack)
- self.read_attrs(attrs)
- if isinstance(self.valid_until, str):
- self.valid_until = rpki.sundial.datetime.fromXMLtime(self.valid_until)
- if self.asn is not None:
- self.asn = rpki.resource_set.resource_set_as(self.asn)
- if self.ipv4 is not None:
- self.ipv4 = rpki.resource_set.resource_set_ipv4(self.ipv4)
- if self.ipv6 is not None:
- self.ipv6 = rpki.resource_set.resource_set_ipv6(self.ipv6)
- if self.eku is not None:
- self.eku = self.eku.split(",")
-
- def endElement(self, stack, name, text):
- """
- Handle <pkcs10/> sub-element.
- """
- assert len(self.elements) == 1
- if name == self.elements[0]:
- self.pkcs10 = rpki.x509.PKCS10(Base64 = text)
- else:
- assert name == self.element_name, "Unexpected name %s, stack %s" % (name, stack)
- stack.pop()
-
- def toXML(self):
- """
- Generate <list_ee_certificate_requests/> element. This requires special
- handling due to the data types of some of the attributes.
- """
- if isinstance(self.eku, (tuple, list)):
- self.eku = ",".join(self.eku)
- elt = self.make_elt()
- for i in self.elements:
- self.make_b64elt(elt, i, getattr(self, i, None))
- if isinstance(self.valid_until, int):
- elt.set("valid_until", self.valid_until.toXMLtime())
- return elt
-
-class list_published_objects_elt(rpki.xml_utils.text_elt, left_right_namespace):
- """
- <list_published_objects/> element.
- """
-
- element_name = "list_published_objects"
- attributes = ("self_handle", "tag", "uri", "child_handle")
- text_attribute = "obj"
-
- obj = None
- child_handle = None
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.self_handle, self.child_handle, self.uri)
-
- def serve_dispatch(self, r_msg, cb, eb):
- """
- Handle a <list_published_objects/> query. The method name is a
- misnomer here, there's no action attribute and no dispatch, we
- just dump every published object for the specified <self/> and return.
- """
- for parent in self_elt.serve_fetch_handle(self.gctx, None, self.self_handle).parents:
- for ca in parent.cas:
- ca_detail = ca.active_ca_detail
- if ca_detail is not None:
- r_msg.append(self.make_reply(ca_detail.crl_uri, ca_detail.latest_crl))
- r_msg.append(self.make_reply(ca_detail.manifest_uri, ca_detail.latest_manifest))
- r_msg.extend(self.make_reply(c.uri, c.cert, c.child.child_handle)
- for c in ca_detail.child_certs)
- r_msg.extend(self.make_reply(r.uri, r.roa)
- for r in ca_detail.roas if r.roa is not None)
- r_msg.extend(self.make_reply(g.uri, g.ghostbuster)
- for g in ca_detail.ghostbusters)
- r_msg.extend(self.make_reply(c.uri, c.cert)
- for c in ca_detail.ee_certificates)
- cb()
-
- def make_reply(self, uri, obj, child_handle = None):
- """
- Generate one reply PDU.
- """
- r_pdu = self.make_pdu(tag = self.tag, self_handle = self.self_handle,
- uri = uri, child_handle = child_handle)
- r_pdu.obj = obj.get_Base64()
- return r_pdu
-
-class list_received_resources_elt(rpki.xml_utils.base_elt, left_right_namespace):
- """
- <list_received_resources/> element.
- """
-
- element_name = "list_received_resources"
- attributes = ("self_handle", "tag", "parent_handle",
- "notBefore", "notAfter", "uri", "sia_uri", "aia_uri", "asn", "ipv4", "ipv6")
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.self_handle, self.parent_handle, self.uri, self.notAfter)
-
- def serve_dispatch(self, r_msg, cb, eb):
- """
- Handle a <list_received_resources/> query. The method name is a
- misnomer here, there's no action attribute and no dispatch, we
- just dump a bunch of data about every certificate issued to us by
- one of our parents, then return.
- """
- for parent in self_elt.serve_fetch_handle(self.gctx, None, self.self_handle).parents:
- for ca in parent.cas:
- ca_detail = ca.active_ca_detail
- if ca_detail is not None and ca_detail.latest_ca_cert is not None:
- r_msg.append(self.make_reply(parent.parent_handle, ca_detail.ca_cert_uri, ca_detail.latest_ca_cert))
- cb()
-
- def make_reply(self, parent_handle, uri, cert):
- """
- Generate one reply PDU.
- """
- resources = cert.get_3779resources()
- return self.make_pdu(
- tag = self.tag,
- self_handle = self.self_handle,
- parent_handle = parent_handle,
- notBefore = str(cert.getNotBefore()),
- notAfter = str(cert.getNotAfter()),
- uri = uri,
- sia_uri = cert.get_sia_directory_uri(),
- aia_uri = cert.get_aia_uri(),
- asn = resources.asn,
- ipv4 = resources.v4,
- ipv6 = resources.v6)
-
-class report_error_elt(rpki.xml_utils.text_elt, left_right_namespace):
- """
- <report_error/> element.
- """
-
- element_name = "report_error"
- attributes = ("tag", "self_handle", "error_code")
- text_attribute = "error_text"
-
- error_text = None
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.self_handle, self.error_code)
-
- @classmethod
- def from_exception(cls, e, self_handle = None, tag = None):
- """
- Generate a <report_error/> element from an exception.
- """
- self = cls()
- self.self_handle = self_handle
- self.tag = tag
- self.error_code = e.__class__.__name__
- self.error_text = str(e)
- return self
-
-class msg(rpki.xml_utils.msg, left_right_namespace):
- """
- Left-right PDU.
- """
+logger = logging.getLogger(__name__)
- ## @var version
- # Protocol version
- version = int(rpki.relaxng.left_right.version)
+xmlns = rpki.relaxng.left_right.xmlns
+nsmap = rpki.relaxng.left_right.nsmap
+version = rpki.relaxng.left_right.version
+
+tag_bpki_cert = xmlns + "bpki_cert"
+tag_bpki_glue = xmlns + "bpki_glue"
+tag_bsc = xmlns + "bsc"
+tag_child = xmlns + "child"
+tag_list_ee_certificate_requests = xmlns + "list_ee_certificate_requests"
+tag_list_ghostbuster_requests = xmlns + "list_ghostbuster_requests"
+tag_list_published_objects = xmlns + "list_published_objects"
+tag_list_received_resources = xmlns + "list_received_resources"
+tag_list_resources = xmlns + "list_resources"
+tag_list_roa_requests = xmlns + "list_roa_requests"
+tag_msg = xmlns + "msg"
+tag_parent = xmlns + "parent"
+tag_pkcs10 = xmlns + "pkcs10"
+tag_pkcs10_request = xmlns + "pkcs10_request"
+tag_report_error = xmlns + "report_error"
+tag_repository = xmlns + "repository"
+tag_rpki_root_cert = xmlns + "rpki_root_cert"
+tag_tenant = xmlns + "tenant"
+tag_signing_cert = xmlns + "signing_cert"
+tag_signing_cert_crl = xmlns + "signing_cert_crl"
+
+## @var content_type
+# Content type to use when sending left-right queries
+content_type = "application/x-rpki"
+
+## @var allowed_content_types
+# Content types we consider acceptable for incoming left-right
+# queries.
+
+allowed_content_types = (content_type,)
- ## @var pdus
- # Dispatch table of PDUs for this protocol.
- pdus = dict((x.element_name, x)
- for x in (self_elt, child_elt, parent_elt, bsc_elt,
- repository_elt, list_resources_elt,
- list_roa_requests_elt, list_ghostbuster_requests_elt,
- list_ee_certificate_requests_elt,
- list_published_objects_elt,
- list_received_resources_elt, report_error_elt))
- def serve_top_level(self, gctx, cb):
+class cms_msg(rpki.x509.XML_CMS_object):
"""
- Serve one msg PDU.
+ CMS-signed left-right PDU.
"""
- r_msg = self.__class__.reply()
-
- def loop(iterator, q_pdu):
-
- def fail(e):
- if not isinstance(e, rpki.exceptions.NotFound):
- logger.exception("Unhandled exception serving left-right PDU %r", q_pdu)
- r_msg.append(report_error_elt.from_exception(
- e, self_handle = q_pdu.self_handle, tag = q_pdu.tag))
- cb(r_msg)
-
- try:
- q_pdu.gctx = gctx
- q_pdu.serve_dispatch(r_msg, iterator, fail)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- fail(e)
-
- def done():
- cb(r_msg)
-
- rpki.async.iterator(self, loop, done)
-
-class sax_handler(rpki.xml_utils.sax_handler):
- """
- SAX handler for Left-Right protocol.
- """
-
- pdu = msg
- name = "msg"
- version = rpki.relaxng.left_right.version
-
-class cms_msg(rpki.x509.XML_CMS_object):
- """
- Class to hold a CMS-signed left-right PDU.
- """
-
- encoding = "us-ascii"
- schema = rpki.relaxng.left_right
- saxify = sax_handler.saxify
+ encoding = "us-ascii"
+ schema = rpki.relaxng.left_right
diff --git a/rpki/log.py b/rpki/log.py
index 2abb3b2c..14805fee 100644
--- a/rpki/log.py
+++ b/rpki/log.py
@@ -29,14 +29,6 @@ import logging.handlers
import argparse
import traceback as tb
-try:
- have_setproctitle = False
- if os.getenv("DISABLE_SETPROCTITLE") is None:
- import setproctitle
- have_setproctitle = True
-except ImportError:
- pass
-
logger = logging.getLogger(__name__)
## @var show_python_ids
@@ -44,221 +36,59 @@ logger = logging.getLogger(__name__)
show_python_ids = False
-## @var enable_tracebacks
-# Whether tracebacks are enabled globally. Individual classes and
-# modules may choose to override this.
-
-enable_tracebacks = False
-
-## @var use_setproctitle
-# Whether to use setproctitle (if available) to change name shown for
-# this process in ps listings (etc).
-
-use_setproctitle = True
-
-## @var proctitle_extra
-
-# Extra text to include in proctitle display. By default this is the
-# tail of the current directory name, as this is often useful, but you
-# can set it to something else if you like. If None or the empty
-# string, the extra information field will be omitted from the proctitle.
-
-proctitle_extra = os.path.basename(os.getcwd())
-
-
-class Formatter(object):
- """
- Reimplementation (easier than subclassing in this case) of
- logging.Formatter.
-
- It turns out that the logging code only cares about this class's
- .format(record) method, everything else is internal; so long as
- .format() converts a record into a properly formatted string, the
- logging code is happy.
-
- So, rather than mess around with dynamically constructing and
- deconstructing and tweaking format strings and ten zillion options
- we don't use, we just provide our own implementation that supports
- what we do need.
- """
-
- converter = time.gmtime
-
- def __init__(self, ident, handler):
- self.ident = ident
- self.is_syslog = isinstance(handler, logging.handlers.SysLogHandler)
-
- def format(self, record):
- return "".join(self.coformat(record)).rstrip("\n")
-
- def coformat(self, record):
- if not self.is_syslog:
- yield time.strftime("%Y-%m-%d %H:%M:%S ", time.gmtime(record.created))
- yield "%s[%d]: " % (self.ident, record.process)
- try:
- yield repr(record.context) + " "
- except AttributeError:
- pass
- yield record.getMessage()
- if record.exc_info:
- if self.is_syslog or not enable_tracebacks:
- lines = tb.format_exception_only(record.exc_info[0], record.exc_info[1])
- lines.insert(0, ": ")
- else:
- lines = tb.format_exception(record.exc_info[0], record.exc_info[1], record.exc_info[2])
- lines.insert(0, "\n")
- for line in lines:
- yield line
-
-
-def argparse_setup(parser, default_thunk = None):
- """
- Set up argparse stuff for functionality in this module.
-
- Default logging destination is syslog, but you can change this
- by setting default_thunk to a callable which takes no arguments
- and which returns a instance of a logging.Handler subclass.
-
- Also see rpki.log.init().
- """
-
- class LogLevelAction(argparse.Action):
- def __call__(self, parser, namespace, values, option_string = None):
- setattr(namespace, self.dest, getattr(logging, values.upper()))
-
- parser.add_argument("--log-level", default = logging.WARNING, action = LogLevelAction,
- choices = ("debug", "info", "warning", "error", "critical"),
- help = "how verbosely to log")
-
- group = parser.add_mutually_exclusive_group()
-
- syslog_address = "/dev/log" if os.path.exists("/dev/log") else ("localhost", logging.handlers.SYSLOG_UDP_PORT)
-
- class SyslogAction(argparse.Action):
- def __call__(self, parser, namespace, values, option_string = None):
- namespace.log_handler = lambda: logging.handlers.SysLogHandler(address = syslog_address, facility = values)
-
- group.add_argument("--log-syslog", nargs = "?", const = "daemon", action = SyslogAction,
- choices = sorted(logging.handlers.SysLogHandler.facility_names.keys()),
- help = "send logging to syslog")
-
- class StreamAction(argparse.Action):
- def __call__(self, parser, namespace, values, option_string = None):
- namespace.log_handler = lambda: logging.StreamHandler(stream = self.const)
-
- group.add_argument("--log-stderr", nargs = 0, action = StreamAction, const = sys.stderr,
- help = "send logging to standard error")
-
- group.add_argument("--log-stdout", nargs = 0, action = StreamAction, const = sys.stdout,
- help = "send logging to standard output")
-
- class WatchedFileAction(argparse.Action):
- def __call__(self, parser, namespace, values, option_string = None):
- namespace.log_handler = lambda: logging.handlers.WatchedFileHandler(filename = values)
-
- group.add_argument("--log-file", action = WatchedFileAction,
- help = "send logging to a file, reopening if rotated away")
-
- class RotatingFileAction(argparse.Action):
- def __call__(self, parser, namespace, values, option_string = None):
- namespace.log_handler = lambda: logging.handlers.RotatingFileHandler(
- filename = values[0],
- maxBytes = int(values[1]) * 1024,
- backupCount = int(values[2]))
-
- group.add_argument("--log-rotating-file", action = RotatingFileAction,
- nargs = 3, metavar = ("FILENAME", "KBYTES", "COUNT"),
- help = "send logging to rotating file")
-
- class TimedRotatingFileAction(argparse.Action):
- def __call__(self, parser, namespace, values, option_string = None):
- namespace.log_handler = lambda: logging.handlers.TimedRotatingFileHandler(
- filename = values[0],
- interval = int(values[1]),
- backupCount = int(values[2]),
- when = "H",
- utc = True)
-
- group.add_argument("--log-timed-rotating-file", action = TimedRotatingFileAction,
- nargs = 3, metavar = ("FILENAME", "HOURS", "COUNT"),
- help = "send logging to timed rotating file")
-
- if default_thunk is None:
- default_thunk = lambda: logging.handlers.SysLogHandler(address = syslog_address, facility = "daemon")
-
- parser.set_defaults(log_handler = default_thunk)
-
-
-def init(ident = None, args = None):
- """
- Initialize logging system.
-
- Default logging destination is stderr if "args" is not specified.
- """
-
- # pylint: disable=E1103
-
- if ident is None:
- ident = os.path.basename(sys.argv[0])
-
- if args is None:
- args = argparse.Namespace(log_level = logging.WARNING,
- log_handler = logging.StreamHandler)
-
- handler = args.log_handler()
- handler.setFormatter(Formatter(ident, handler))
-
- root_logger = logging.getLogger()
- root_logger.addHandler(handler)
- root_logger.setLevel(args.log_level)
-
- if ident and have_setproctitle and use_setproctitle:
- if proctitle_extra:
- setproctitle.setproctitle("%s (%s)" % (ident, proctitle_extra))
- else:
- setproctitle.setproctitle(ident)
-
def class_logger(module_logger, attribute = "logger"):
- """
- Class decorator to add a class-level Logger object as a class
- attribute. This allows control of debugging messages at the class
- level rather than just the module level.
+ """
+ Class decorator to add a class-level Logger object as a class
+ attribute. This allows control of debugging messages at the class
+ level rather than just the module level.
- This decorator takes the module logger as an argument.
- """
+ This decorator takes the module logger as an argument.
+ """
- def decorator(cls):
- setattr(cls, attribute, module_logger.getChild(cls.__name__))
- return cls
- return decorator
+ def decorator(cls):
+ setattr(cls, attribute, module_logger.getChild(cls.__name__))
+ return cls
+ return decorator
def log_repr(obj, *tokens):
- """
- Constructor for __repr__() strings, handles suppression of Python
- IDs as needed, includes self_handle when available.
- """
-
- # pylint: disable=W0702
-
- words = ["%s.%s" % (obj.__class__.__module__, obj.__class__.__name__)]
- try:
- words.append("{%s}" % obj.self.self_handle)
- except:
- pass
+ """
+ Constructor for __repr__() strings, handles suppression of Python
+ IDs as needed, includes tenant_handle when available.
+ """
- for token in tokens:
- if token is not None:
- try:
- s = str(token)
- except:
- s = "???"
- logger.exception("Failed to generate repr() string for object of type %r", type(token))
- if s:
- words.append(s)
-
- if show_python_ids:
- words.append(" at %#x" % id(obj))
-
- return "<" + " ".join(words) + ">"
+ words = ["%s.%s" % (obj.__class__.__module__, obj.__class__.__name__)]
+ try:
+ words.append("{%s}" % obj.tenant.tenant_handle)
+ except:
+ pass
+
+ for token in tokens:
+ if token is not None:
+ try:
+ s = str(token)
+ except:
+ s = "???"
+ logger.exception("Failed to generate repr() string for object of type %r", type(token))
+ if s:
+ words.append(s)
+
+ if show_python_ids:
+ words.append(" at %#x" % id(obj))
+
+ return "<" + " ".join(words) + ">"
+
+
+def show_stack(stack_logger = None):
+ """
+ Log a stack trace.
+ """
+
+ if stack_logger is None:
+ stack_logger = logger
+
+ for frame in tb.format_stack():
+ for line in frame.split("\n"):
+ if line:
+ stack_logger.debug("%s", line.rstrip())
diff --git a/rpki/myrpki.py b/rpki/myrpki.py
index 2ae912f0..929c2a70 100644
--- a/rpki/myrpki.py
+++ b/rpki/myrpki.py
@@ -19,5 +19,5 @@ This is a tombstone for a program that no longer exists.
"""
if __name__ != "__main__": # sic -- don't break regression tests
- import sys
- sys.exit('"myrpki" is obsolete. Please use "rpkic" instead.')
+ import sys
+ sys.exit('"myrpki" is obsolete. Please use "rpkic" instead.')
diff --git a/rpki/mysql_import.py b/rpki/mysql_import.py
index 538e1916..bbb7ac22 100644
--- a/rpki/mysql_import.py
+++ b/rpki/mysql_import.py
@@ -52,11 +52,11 @@ from __future__ import with_statement
import warnings
if hasattr(warnings, "catch_warnings"):
- with warnings.catch_warnings():
- warnings.simplefilter("ignore", DeprecationWarning)
- import MySQLdb
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", DeprecationWarning)
+ import MySQLdb
else:
- import MySQLdb
+ import MySQLdb
import _mysql_exceptions
diff --git a/rpki/oids.py b/rpki/oids.py
index 9fa30a04..b27be6f3 100644
--- a/rpki/oids.py
+++ b/rpki/oids.py
@@ -57,6 +57,7 @@ id_ad_caRepository = "1.3.6.1.5.5.7.48.5"
id_ad_signedObjectRepository = "1.3.6.1.5.5.7.48.9"
id_ad_rpkiManifest = "1.3.6.1.5.5.7.48.10"
id_ad_signedObject = "1.3.6.1.5.5.7.48.11"
+id_ad_rpkiNotify = "1.3.6.1.5.5.7.48.13"
commonName = "2.5.4.3"
serialNumber = "2.5.4.5"
countryName = "2.5.4.6"
@@ -81,22 +82,21 @@ id_sha256 = "2.16.840.1.101.3.4.2.1"
_oid2name = {}
for _sym in dir():
- if not _sym.startswith("_"):
- _val = globals()[_sym]
- if not isinstance(_val, str) or not all(_v.isdigit() for _v in _val.split(".")):
- raise ValueError("Bad OID definition: %s = %r" % (_sym, _val))
- _oid2name[_val] = _sym.replace("_", "-")
+ if not _sym.startswith("_"):
+ _val = globals()[_sym]
+ if not isinstance(_val, str) or not all(_v.isdigit() for _v in _val.split(".")):
+ raise ValueError("Bad OID definition: %s = %r" % (_sym, _val))
+ _oid2name[_val] = _sym.replace("_", "-")
-# pylint: disable=W0631
-del _sym
+del _sym # pylint: disable=W0631
del _val
def oid2name(oid):
- """
- Translate an OID into a string suitable for printing.
- """
+ """
+ Translate an OID into a string suitable for printing.
+ """
- if not isinstance(oid, (str, unicode)) or not all(o.isdigit() for o in oid.split(".")):
- raise ValueError("Parameter does not look like an OID string: " + repr(oid))
+ if not isinstance(oid, (str, unicode)) or not all(o.isdigit() for o in oid.split(".")):
+ raise ValueError("Parameter does not look like an OID string: " + repr(oid))
- return _oid2name.get(oid, oid)
+ return _oid2name.get(oid, oid)
diff --git a/rpki/old_irdbd.py b/rpki/old_irdbd.py
index 6c026a31..c08ce362 100644
--- a/rpki/old_irdbd.py
+++ b/rpki/old_irdbd.py
@@ -25,12 +25,14 @@ and perhaps still useful as a minimal example. This does NOT work with
the GUI, rpkic, or any of the other more recent tools.
"""
+# pylint: skip-file
+
import os
import time
import logging
import argparse
import urlparse
-import rpki.http
+import rpki.http_simple
import rpki.config
import rpki.resource_set
import rpki.relaxng
@@ -46,279 +48,266 @@ logger = logging.getLogger(__name__)
class main(object):
- def handle_list_resources(self, q_pdu, r_msg):
-
- r_pdu = rpki.left_right.list_resources_elt()
- r_pdu.tag = q_pdu.tag
- r_pdu.self_handle = q_pdu.self_handle
- r_pdu.child_handle = q_pdu.child_handle
-
- self.cur.execute(
- """
- SELECT registrant_id, valid_until
- FROM registrant
- WHERE registry_handle = %s AND registrant_handle = %s
- """,
- (q_pdu.self_handle, q_pdu.child_handle))
-
- if self.cur.rowcount != 1:
- raise rpki.exceptions.NotInDatabase(
- "This query should have produced a single exact match, something's messed up"
- " (rowcount = %d, self_handle = %s, child_handle = %s)"
- % (self.cur.rowcount, q_pdu.self_handle, q_pdu.child_handle))
-
- registrant_id, valid_until = self.cur.fetchone()
-
- r_pdu.valid_until = valid_until.strftime("%Y-%m-%dT%H:%M:%SZ")
-
- r_pdu.asn = rpki.resource_set.resource_set_as.from_sql(
- self.cur,
- """
- SELECT start_as, end_as
- FROM registrant_asn
- WHERE registrant_id = %s
- """,
- (registrant_id,))
-
- r_pdu.ipv4 = rpki.resource_set.resource_set_ipv4.from_sql(
- self.cur,
- """
- SELECT start_ip, end_ip
- FROM registrant_net
- WHERE registrant_id = %s AND version = 4
- """,
- (registrant_id,))
-
- r_pdu.ipv6 = rpki.resource_set.resource_set_ipv6.from_sql(
- self.cur,
- """
- SELECT start_ip, end_ip
- FROM registrant_net
- WHERE registrant_id = %s AND version = 6
- """,
- (registrant_id,))
-
- r_msg.append(r_pdu)
-
-
- def handle_list_roa_requests(self, q_pdu, r_msg):
-
- self.cur.execute(
- "SELECT roa_request_id, asn FROM roa_request WHERE self_handle = %s",
- (q_pdu.self_handle,))
-
- for roa_request_id, asn in self.cur.fetchall():
-
- r_pdu = rpki.left_right.list_roa_requests_elt()
- r_pdu.tag = q_pdu.tag
- r_pdu.self_handle = q_pdu.self_handle
- r_pdu.asn = asn
-
- r_pdu.ipv4 = rpki.resource_set.roa_prefix_set_ipv4.from_sql(
- self.cur,
- """
- SELECT prefix, prefixlen, max_prefixlen
- FROM roa_request_prefix
- WHERE roa_request_id = %s AND version = 4
- """,
- (roa_request_id,))
-
- r_pdu.ipv6 = rpki.resource_set.roa_prefix_set_ipv6.from_sql(
- self.cur,
- """
- SELECT prefix, prefixlen, max_prefixlen
- FROM roa_request_prefix
- WHERE roa_request_id = %s AND version = 6
- """,
- (roa_request_id,))
-
- r_msg.append(r_pdu)
-
-
- def handle_list_ghostbuster_requests(self, q_pdu, r_msg):
-
- self.cur.execute(
- """
- SELECT vcard
- FROM ghostbuster_request
- WHERE self_handle = %s AND parent_handle = %s
- """,
- (q_pdu.self_handle, q_pdu.parent_handle))
-
- vcards = [result[0] for result in self.cur.fetchall()]
-
- if not vcards:
-
- self.cur.execute(
- """
- SELECT vcard
- FROM ghostbuster_request
- WHERE self_handle = %s AND parent_handle IS NULL
- """,
- (q_pdu.self_handle,))
-
- vcards = [result[0] for result in self.cur.fetchall()]
-
- for vcard in vcards:
- r_pdu = rpki.left_right.list_ghostbuster_requests_elt()
- r_pdu.tag = q_pdu.tag
- r_pdu.self_handle = q_pdu.self_handle
- r_pdu.parent_handle = q_pdu.parent_handle
- r_pdu.vcard = vcard
- r_msg.append(r_pdu)
-
-
- def handle_list_ee_certificate_requests(self, q_pdu, r_msg):
-
- self.cur.execute(
- """
- SELECT ee_certificate_id, pkcs10, gski, cn, sn, eku, valid_until
- FROM ee_certificate
- WHERE self_handle = %s
- """,
- (q_pdu.self_handle,))
-
- for ee_certificate_id, pkcs10, gski, cn, sn, eku, valid_until in self.cur.fetchall():
-
- r_pdu = rpki.left_right.list_ee_certificate_requests_elt()
- r_pdu.tag = q_pdu.tag
- r_pdu.self_handle = q_pdu.self_handle
- r_pdu.valid_until = valid_until.strftime("%Y-%m-%dT%H:%M:%SZ")
- r_pdu.pkcs10 = rpki.x509.PKCS10(DER = pkcs10)
- r_pdu.gski = gski
- r_pdu.cn = cn
- r_pdu.sn = sn
- r_pdu.eku = eku
-
- r_pdu.asn = rpki.resource_set.resource_set_as.from_sql(
- self.cur,
- """
- SELECT start_as, end_as
- FROM ee_certificate_asn
- WHERE ee_certificate_id = %s
- """,
- (ee_certificate_id,))
-
- r_pdu.ipv4 = rpki.resource_set.resource_set_ipv4.from_sql(
- self.cur,
- """
- SELECT start_ip, end_ip
- FROM ee_certificate_net
- WHERE ee_certificate_id = %s AND version = 4
- """,
- (ee_certificate_id,))
-
- r_pdu.ipv6 = rpki.resource_set.resource_set_ipv6.from_sql(
- self.cur,
- """
- SELECT start_ip, end_ip
- FROM ee_certificate_net
- WHERE ee_certificate_id = %s AND version = 6
- """,
- (ee_certificate_id,))
-
- r_msg.append(r_pdu)
-
-
- handle_dispatch = {
- rpki.left_right.list_resources_elt : handle_list_resources,
- rpki.left_right.list_roa_requests_elt : handle_list_roa_requests,
- rpki.left_right.list_ghostbuster_requests_elt : handle_list_ghostbuster_requests,
- rpki.left_right.list_ee_certificate_requests_elt : handle_list_ee_certificate_requests }
-
- def handler(self, query, path, cb):
- try:
-
- self.db.ping(True)
-
- r_msg = rpki.left_right.msg.reply()
-
- try:
-
- q_msg = rpki.left_right.cms_msg(DER = query).unwrap((self.bpki_ta, self.rpkid_cert))
-
- if not isinstance(q_msg, rpki.left_right.msg) or not q_msg.is_query():
- raise rpki.exceptions.BadQuery("Unexpected %r PDU" % q_msg)
-
- for q_pdu in q_msg:
-
- try:
+ def handle_list_resources(self, q_pdu, r_msg):
+
+ r_pdu = rpki.left_right.list_resources_elt()
+ r_pdu.tag = q_pdu.tag
+ r_pdu.self_handle = q_pdu.self_handle
+ r_pdu.child_handle = q_pdu.child_handle
+
+ self.cur.execute(
+ """
+ SELECT registrant_id, valid_until
+ FROM registrant
+ WHERE registry_handle = %s AND registrant_handle = %s
+ """,
+ (q_pdu.self_handle, q_pdu.child_handle))
+
+ if self.cur.rowcount != 1:
+ raise rpki.exceptions.NotInDatabase(
+ "This query should have produced a single exact match, something's messed up"
+ " (rowcount = %d, self_handle = %s, child_handle = %s)"
+ % (self.cur.rowcount, q_pdu.self_handle, q_pdu.child_handle))
+
+ registrant_id, valid_until = self.cur.fetchone()
+
+ r_pdu.valid_until = valid_until.strftime("%Y-%m-%dT%H:%M:%SZ")
+
+ r_pdu.asn = rpki.resource_set.resource_set_as.from_sql(
+ self.cur,
+ """
+ SELECT start_as, end_as
+ FROM registrant_asn
+ WHERE registrant_id = %s
+ """,
+ (registrant_id,))
+
+ r_pdu.ipv4 = rpki.resource_set.resource_set_ipv4.from_sql(
+ self.cur,
+ """
+ SELECT start_ip, end_ip
+ FROM registrant_net
+ WHERE registrant_id = %s AND version = 4
+ """,
+ (registrant_id,))
+
+ r_pdu.ipv6 = rpki.resource_set.resource_set_ipv6.from_sql(
+ self.cur,
+ """
+ SELECT start_ip, end_ip
+ FROM registrant_net
+ WHERE registrant_id = %s AND version = 6
+ """,
+ (registrant_id,))
+
+ r_msg.append(r_pdu)
+
+
+ def handle_list_roa_requests(self, q_pdu, r_msg):
+
+ self.cur.execute(
+ "SELECT roa_request_id, asn FROM roa_request WHERE self_handle = %s",
+ (q_pdu.self_handle,))
+
+ for roa_request_id, asn in self.cur.fetchall():
+
+ r_pdu = rpki.left_right.list_roa_requests_elt()
+ r_pdu.tag = q_pdu.tag
+ r_pdu.self_handle = q_pdu.self_handle
+ r_pdu.asn = asn
+
+ r_pdu.ipv4 = rpki.resource_set.roa_prefix_set_ipv4.from_sql(
+ self.cur,
+ """
+ SELECT prefix, prefixlen, max_prefixlen
+ FROM roa_request_prefix
+ WHERE roa_request_id = %s AND version = 4
+ """,
+ (roa_request_id,))
+
+ r_pdu.ipv6 = rpki.resource_set.roa_prefix_set_ipv6.from_sql(
+ self.cur,
+ """
+ SELECT prefix, prefixlen, max_prefixlen
+ FROM roa_request_prefix
+ WHERE roa_request_id = %s AND version = 6
+ """,
+ (roa_request_id,))
+
+ r_msg.append(r_pdu)
+
+
+ def handle_list_ghostbuster_requests(self, q_pdu, r_msg):
+
+ self.cur.execute(
+ """
+ SELECT vcard
+ FROM ghostbuster_request
+ WHERE self_handle = %s AND parent_handle = %s
+ """,
+ (q_pdu.self_handle, q_pdu.parent_handle))
+
+ vcards = [result[0] for result in self.cur.fetchall()]
+
+ if not vcards:
+
+ self.cur.execute(
+ """
+ SELECT vcard
+ FROM ghostbuster_request
+ WHERE self_handle = %s AND parent_handle IS NULL
+ """,
+ (q_pdu.self_handle,))
+
+ vcards = [result[0] for result in self.cur.fetchall()]
+
+ for vcard in vcards:
+ r_pdu = rpki.left_right.list_ghostbuster_requests_elt()
+ r_pdu.tag = q_pdu.tag
+ r_pdu.self_handle = q_pdu.self_handle
+ r_pdu.parent_handle = q_pdu.parent_handle
+ r_pdu.vcard = vcard
+ r_msg.append(r_pdu)
+
+
+ def handle_list_ee_certificate_requests(self, q_pdu, r_msg):
+
+ self.cur.execute(
+ """
+ SELECT ee_certificate_id, pkcs10, gski, cn, sn, eku, valid_until
+ FROM ee_certificate
+ WHERE self_handle = %s
+ """,
+ (q_pdu.self_handle,))
+
+ for ee_certificate_id, pkcs10, gski, cn, sn, eku, valid_until in self.cur.fetchall():
+
+ r_pdu = rpki.left_right.list_ee_certificate_requests_elt()
+ r_pdu.tag = q_pdu.tag
+ r_pdu.self_handle = q_pdu.self_handle
+ r_pdu.valid_until = valid_until.strftime("%Y-%m-%dT%H:%M:%SZ")
+ r_pdu.pkcs10 = rpki.x509.PKCS10(DER = pkcs10)
+ r_pdu.gski = gski
+ r_pdu.cn = cn
+ r_pdu.sn = sn
+ r_pdu.eku = eku
+
+ r_pdu.asn = rpki.resource_set.resource_set_as.from_sql(
+ self.cur,
+ """
+ SELECT start_as, end_as
+ FROM ee_certificate_asn
+ WHERE ee_certificate_id = %s
+ """,
+ (ee_certificate_id,))
+
+ r_pdu.ipv4 = rpki.resource_set.resource_set_ipv4.from_sql(
+ self.cur,
+ """
+ SELECT start_ip, end_ip
+ FROM ee_certificate_net
+ WHERE ee_certificate_id = %s AND version = 4
+ """,
+ (ee_certificate_id,))
+
+ r_pdu.ipv6 = rpki.resource_set.resource_set_ipv6.from_sql(
+ self.cur,
+ """
+ SELECT start_ip, end_ip
+ FROM ee_certificate_net
+ WHERE ee_certificate_id = %s AND version = 6
+ """,
+ (ee_certificate_id,))
+
+ r_msg.append(r_pdu)
+
+
+ handle_dispatch = {
+ rpki.left_right.list_resources_elt : handle_list_resources,
+ rpki.left_right.list_roa_requests_elt : handle_list_roa_requests,
+ rpki.left_right.list_ghostbuster_requests_elt : handle_list_ghostbuster_requests,
+ rpki.left_right.list_ee_certificate_requests_elt : handle_list_ee_certificate_requests }
+
+ def handler(self, request, q_der):
+ try:
+
+ self.db.ping(True)
+
+ r_msg = rpki.left_right.msg.reply()
try:
- h = self.handle_dispatch[type(q_pdu)]
- except KeyError:
- raise rpki.exceptions.BadQuery("Unexpected %r PDU" % q_pdu)
- else:
- h(self, q_pdu, r_msg)
- except (rpki.async.ExitNow, SystemExit):
- raise
+ q_msg = rpki.left_right.cms_msg_saxify(DER = q_der).unwrap((self.bpki_ta, self.rpkid_cert))
+
+ if not isinstance(q_msg, rpki.left_right.msg) or not q_msg.is_query():
+ raise rpki.exceptions.BadQuery("Unexpected %r PDU" % q_msg)
- except Exception, e:
- logger.exception("Exception serving PDU %r", q_pdu)
- r_msg.append(rpki.left_right.report_error_elt.from_exception(e, q_pdu.self_handle, q_pdu.tag))
+ for q_pdu in q_msg:
- except (rpki.async.ExitNow, SystemExit):
- raise
+ try:
- except Exception, e:
- logger.exception("Exception decoding query")
- r_msg.append(rpki.left_right.report_error_elt.from_exception(e))
+ try:
+ h = self.handle_dispatch[type(q_pdu)]
+ except KeyError:
+ raise rpki.exceptions.BadQuery("Unexpected %r PDU" % q_pdu)
+ else:
+ h(self, q_pdu, r_msg)
- cb(200, body = rpki.left_right.cms_msg().wrap(r_msg, self.irdbd_key, self.irdbd_cert))
+ except Exception, e:
+ logger.exception("Exception serving PDU %r", q_pdu)
+ r_msg.append(rpki.left_right.report_error_elt.from_exception(e, q_pdu.self_handle, q_pdu.tag))
- except (rpki.async.ExitNow, SystemExit):
- raise
+ except Exception, e:
+ logger.exception("Exception decoding query")
+ r_msg.append(rpki.left_right.report_error_elt.from_exception(e))
- except Exception, e:
- logger.exception("Unhandled exception, returning HTTP failure")
- cb(500, reason = "Unhandled exception %s: %s" % (e.__class__.__name__, e))
+ request.send_cms_response(rpki.left_right.cms_msg_saxify().wrap(r_msg, self.irdbd_key, self.irdbd_cert))
+ except Exception, e:
+ logger.exception("Unhandled exception, returning HTTP failure")
+ request.send_error(500, "Unhandled exception %s: %s" % (e.__class__.__name__, e))
- def __init__(self):
- os.environ["TZ"] = "UTC"
- time.tzset()
+ def __init__(self):
- parser = argparse.ArgumentParser(description = __doc__)
- parser.add_argument("-c", "--config",
- help = "override default location of configuration file")
- parser.add_argument("-f", "--foreground", action = "store_true",
- help = "do not daemonize (ignored, old_irdbd never daemonizes)")
- rpki.log.argparse_setup(parser)
- args = parser.parse_args()
+ os.environ["TZ"] = "UTC"
+ time.tzset()
- rpki.log.init("irdbd", args)
+ self.cfg = rpki.config.argparser(section = "irdbd", doc = __doc__)
+ self.cfg.add_boolean_argument("--foreground", default = False,
+ help = "do not daemonize (ignored, old_irdbd never daemonizes)")
+ self.cfg.add_logging_arguments()
+ args = parser.parse_args()
- self.cfg = rpki.config.parser(args.config, "irdbd")
+ cfg.configure_logging(args = args, ident = "irdbd")
- startup_msg = self.cfg.get("startup-message", "")
- if startup_msg:
- logger.info(startup_msg)
+ startup_msg = self.cfg.get("startup-message", "")
+ if startup_msg:
+ logger.info(startup_msg)
- self.cfg.set_global_flags()
+ self.cfg.set_global_flags()
- self.db = MySQLdb.connect(user = self.cfg.get("sql-username"),
- db = self.cfg.get("sql-database"),
- passwd = self.cfg.get("sql-password"))
+ self.db = MySQLdb.connect(user = self.cfg.get("sql-username"),
+ db = self.cfg.get("sql-database"),
+ passwd = self.cfg.get("sql-password"))
- self.cur = self.db.cursor()
- self.db.autocommit(True)
+ self.cur = self.db.cursor()
+ self.db.autocommit(True)
- self.bpki_ta = rpki.x509.X509(Auto_update = self.cfg.get("bpki-ta"))
- self.rpkid_cert = rpki.x509.X509(Auto_update = self.cfg.get("rpkid-cert"))
- self.irdbd_cert = rpki.x509.X509(Auto_update = self.cfg.get("irdbd-cert"))
- self.irdbd_key = rpki.x509.RSA( Auto_update = self.cfg.get("irdbd-key"))
+ self.bpki_ta = rpki.x509.X509(Auto_update = self.cfg.get("bpki-ta"))
+ self.rpkid_cert = rpki.x509.X509(Auto_update = self.cfg.get("rpkid-cert"))
+ self.irdbd_cert = rpki.x509.X509(Auto_update = self.cfg.get("irdbd-cert"))
+ self.irdbd_key = rpki.x509.RSA( Auto_update = self.cfg.get("irdbd-key"))
- u = urlparse.urlparse(self.cfg.get("http-url"))
+ u = urlparse.urlparse(self.cfg.get("http-url"))
- assert u.scheme in ("", "http") and \
- u.username is None and \
- u.password is None and \
- u.params == "" and \
- u.query == "" and \
- u.fragment == ""
+ assert u.scheme in ("", "http") and \
+ u.username is None and \
+ u.password is None and \
+ u.params == "" and \
+ u.query == "" and \
+ u.fragment == ""
- rpki.http.server(host = u.hostname or "localhost",
- port = u.port or 443,
- handlers = ((u.path, self.handler),))
+ rpki.http_simple.server(host = u.hostname or "localhost",
+ port = u.port or 443,
+ handlers = ((u.path, self.handler),))
diff --git a/rpki/pubd.py b/rpki/pubd.py
index 79315a78..389936bb 100644
--- a/rpki/pubd.py
+++ b/rpki/pubd.py
@@ -23,151 +23,285 @@ RPKI publication engine.
import os
import re
+import uuid
import time
+import socket
import logging
import argparse
+
import rpki.resource_set
-import rpki.up_down
import rpki.x509
-import rpki.sql
-import rpki.http
import rpki.config
import rpki.exceptions
-import rpki.relaxng
import rpki.log
import rpki.publication
+import rpki.publication_control
import rpki.daemonize
+import rpki.http_simple
+
+from lxml.etree import Element, SubElement
logger = logging.getLogger(__name__)
+
class main(object):
- """
- Main program for pubd.
- """
+ """
+ Main program for pubd.
+ """
- def __init__(self):
+ def __init__(self):
- os.environ["TZ"] = "UTC"
- time.tzset()
+ os.environ.update(TZ = "UTC",
+ DJANGO_SETTINGS_MODULE = "rpki.django_settings.pubd")
+ time.tzset()
- self.irbe_cms_timestamp = None
+ self.irbe_cms_timestamp = None
- parser = argparse.ArgumentParser(description = __doc__)
- parser.add_argument("-c", "--config",
- help = "override default location of configuration file")
- parser.add_argument("-f", "--foreground", action = "store_true",
- help = "do not daemonize")
- parser.add_argument("--pidfile",
- help = "override default location of pid file")
- parser.add_argument("--profile",
- help = "enable profiling, saving data to PROFILE")
- rpki.log.argparse_setup(parser)
- args = parser.parse_args()
+ self.cfg = rpki.config.argparser(section = "pubd", doc = __doc__)
+ self.cfg.add_boolean_argument("--foreground",
+ default = False,
+ help = "whether to daemonize")
+ self.cfg.add_argument("--pidfile",
+ default = os.path.join(rpki.daemonize.default_pid_directory,
+ "pubd.pid"),
+ help = "override default location of pid file")
+ self.cfg.add_argument("--profile",
+ default = "",
+ help = "enable profiling, saving data to PROFILE")
+ self.cfg.add_logging_arguments()
+ args = self.cfg.argparser.parse_args()
- self.profile = args.profile
+ self.profile = args.profile
- rpki.log.init("pubd", args)
+ self.cfg.configure_logging(args = args, ident = "pubd")
- self.cfg = rpki.config.parser(args.config, "pubd")
- self.cfg.set_global_flags()
+ try:
+ self.cfg.set_global_flags()
- if not args.foreground:
- rpki.daemonize.daemon(pidfile = args.pidfile)
+ if not args.foreground:
+ rpki.daemonize.daemon(pidfile = args.pidfile)
- if self.profile:
- import cProfile
- prof = cProfile.Profile()
- try:
- prof.runcall(self.main)
- finally:
- prof.dump_stats(self.profile)
- logger.info("Dumped profile data to %s", self.profile)
- else:
- self.main()
+ if self.profile:
+ import cProfile
+ prof = cProfile.Profile()
+ try:
+ prof.runcall(self.main)
+ finally:
+ prof.dump_stats(self.profile)
+ logger.info("Dumped profile data to %s", self.profile)
+ else:
+ self.main()
- def main(self):
+ except:
+ logger.exception("Unandled exception in rpki.pubd.main()")
+ sys.exit(1)
- if self.profile:
- logger.info("Running in profile mode with output to %s", self.profile)
- self.sql = rpki.sql.session(self.cfg)
+ def main(self):
- self.bpki_ta = rpki.x509.X509(Auto_update = self.cfg.get("bpki-ta"))
- self.irbe_cert = rpki.x509.X509(Auto_update = self.cfg.get("irbe-cert"))
- self.pubd_cert = rpki.x509.X509(Auto_update = self.cfg.get("pubd-cert"))
- self.pubd_key = rpki.x509.RSA( Auto_update = self.cfg.get("pubd-key"))
+ if self.profile:
+ logger.info("Running in profile mode with output to %s", self.profile)
- self.http_server_host = self.cfg.get("server-host", "")
- self.http_server_port = self.cfg.getint("server-port")
+ import django
+ django.setup()
- self.publication_base = self.cfg.get("publication-base", "publication/")
+ global rpki # pylint: disable=W0602
+ import rpki.pubdb # pylint: disable=W0621
- self.publication_multimodule = self.cfg.getboolean("publication-multimodule", False)
+ self.bpki_ta = rpki.x509.X509(Auto_update = self.cfg.get("bpki-ta"))
+ self.irbe_cert = rpki.x509.X509(Auto_update = self.cfg.get("irbe-cert"))
+ self.pubd_cert = rpki.x509.X509(Auto_update = self.cfg.get("pubd-cert"))
+ self.pubd_key = rpki.x509.RSA( Auto_update = self.cfg.get("pubd-key"))
+ self.pubd_crl = rpki.x509.CRL( Auto_update = self.cfg.get("pubd-crl"))
- rpki.http.server(
- host = self.http_server_host,
- port = self.http_server_port,
- handlers = (("/control", self.control_handler),
- ("/client/", self.client_handler)))
+ self.http_server_host = self.cfg.get("server-host", "")
+ self.http_server_port = self.cfg.getint("server-port")
- def handler_common(self, query, client, cb, certs, crl = None):
- """
- Common PDU handler code.
- """
+ self.publication_base = self.cfg.get("publication-base", "publication/")
- def done(r_msg):
- reply = rpki.publication.cms_msg().wrap(r_msg, self.pubd_key, self.pubd_cert, crl)
- self.sql.sweep()
- cb(reply)
+ self.rrdp_base_uri = self.cfg.get("rrdp-base-uri", "https://%s/rrdp/" % socket.getfqdn())
+ self.rrdp_expiration_interval = rpki.sundial.timedelta.parse(self.cfg.get("rrdp-expiration-interval", "6h"))
+ self.rrdp_publication_base = self.cfg.get("rrdp-publication-base", "rrdp-publication/")
- q_cms = rpki.publication.cms_msg(DER = query)
- q_msg = q_cms.unwrap(certs)
- if client is None:
- self.irbe_cms_timestamp = q_cms.check_replay(self.irbe_cms_timestamp, "control")
- else:
- q_cms.check_replay_sql(client, client.client_handle)
- q_msg.serve_top_level(self, client, done)
+ try:
+ self.session = rpki.pubdb.models.Session.objects.get()
+ except rpki.pubdb.models.Session.DoesNotExist:
+ self.session = rpki.pubdb.models.Session.objects.create(uuid = str(uuid.uuid4()), serial = 0)
- def control_handler(self, query, path, cb):
- """
- Process one PDU from the IRBE.
- """
+ rpki.http_simple.server(
+ host = self.http_server_host,
+ port = self.http_server_port,
+ handlers = (("/control", self.control_handler),
+ ("/client/", self.client_handler)))
- def done(body):
- cb(200, body = body)
- try:
- self.handler_common(query, None, done, (self.bpki_ta, self.irbe_cert))
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- logger.exception("Unhandled exception processing control query, path %r", path)
- cb(500, reason = "Unhandled exception %s: %s" % (e.__class__.__name__, e))
+ def control_handler(self, request, q_der):
+ """
+ Process one PDU from the IRBE.
+ """
- client_url_regexp = re.compile("/client/([-A-Z0-9_/]+)$", re.I)
+ from django.db import transaction, connection
- def client_handler(self, query, path, cb):
- """
- Process one PDU from a client.
- """
+ try:
+ connection.cursor() # Reconnect to mysqld if necessary
+ q_cms = rpki.publication_control.cms_msg(DER = q_der)
+ q_msg = q_cms.unwrap((self.bpki_ta, self.irbe_cert))
+ self.irbe_cms_timestamp = q_cms.check_replay(self.irbe_cms_timestamp, "control")
+ if q_msg.get("type") != "query":
+ raise rpki.exceptions.BadQuery("Message type is %s, expected query" % q_msg.get("type"))
+ r_msg = Element(rpki.publication_control.tag_msg, nsmap = rpki.publication_control.nsmap,
+ type = "reply", version = rpki.publication_control.version)
+
+ try:
+ q_pdu = None
+ with transaction.atomic():
+
+ for q_pdu in q_msg:
+ if q_pdu.tag != rpki.publication_control.tag_client:
+ raise rpki.exceptions.BadQuery("PDU is %s, expected client" % q_pdu.tag)
+ client_handle = q_pdu.get("client_handle")
+ action = q_pdu.get("action")
+ if client_handle is None:
+ logger.info("Control %s request", action)
+ else:
+ logger.info("Control %s request for %s", action, client_handle)
+
+ if action in ("get", "list"):
+ if action == "get":
+ clients = rpki.pubdb.models.Client.objects.get(client_handle = client_handle),
+ else:
+ clients = rpki.pubdb.models.Client.objects.all()
+ for client in clients:
+ r_pdu = SubElement(r_msg, q_pdu.tag, action = action,
+ client_handle = client.client_handle, base_uri = client.base_uri)
+ if q_pdu.get("tag"):
+ r_pdu.set("tag", q_pdu.get("tag"))
+ SubElement(r_pdu, rpki.publication_control.tag_bpki_cert).text = client.bpki_cert.get_Base64()
+ if client.bpki_glue is not None:
+ SubElement(r_pdu, rpki.publication_control.tag_bpki_glue).text = client.bpki_glue.get_Base64()
+
+ if action in ("create", "set"):
+ if action == "create":
+ client = rpki.pubdb.models.Client(client_handle = client_handle)
+ else:
+ client = rpki.pubdb.models.Client.objects.get(client_handle = client_handle)
+ if q_pdu.get("base_uri"):
+ client.base_uri = q_pdu.get("base_uri")
+ bpki_cert = q_pdu.find(rpki.publication_control.tag_bpki_cert)
+ if bpki_cert is not None:
+ client.bpki_cert = rpki.x509.X509(Base64 = bpki_cert.text)
+ bpki_glue = q_pdu.find(rpki.publication_control.tag_bpki_glue)
+ if bpki_glue is not None:
+ client.bpki_glue = rpki.x509.X509(Base64 = bpki_glue.text)
+ if q_pdu.get("clear_replay_protection") == "yes":
+ client.last_cms_timestamp = None
+ client.save()
+ logger.debug("Stored client_handle %s, base_uri %s, bpki_cert %r, bpki_glue %r, last_cms_timestamp %s",
+ client.client_handle, client.base_uri, client.bpki_cert, client.bpki_glue,
+ client.last_cms_timestamp)
+ r_pdu = SubElement(r_msg, q_pdu.tag, action = action, client_handle = client_handle)
+ if q_pdu.get("tag"):
+ r_pdu.set("tag", q_pdu.get("tag"))
+
+ if action == "destroy":
+ rpki.pubdb.models.Client.objects.filter(client_handle = client_handle).delete()
+ r_pdu = SubElement(r_msg, q_pdu.tag, action = action, client_handle = client_handle)
+ if q_pdu.get("tag"):
+ r_pdu.set("tag", q_pdu.get("tag"))
+
+ except Exception as e:
+ logger.exception("Exception processing PDU %r action = %s client_handle = %s", q_pdu, q_pdu.get("action"), q_pdu.get("client_handle"))
+ r_pdu = SubElement(r_msg, rpki.publication_control.tag_report_error, error_code = e.__class__.__name__)
+ r_pdu.text = str(e)
+ if q_pdu.get("tag") is not None:
+ r_pdu.set("tag", q_pdu.get("tag"))
+
+ request.send_cms_response(rpki.publication_control.cms_msg().wrap(r_msg, self.pubd_key, self.pubd_cert))
+
+ except Exception as e:
+ logger.exception("Unhandled exception processing control query, path %r", request.path)
+ request.send_error(500, "Unhandled exception %s: %s" % (e.__class__.__name__, e))
+
+
+ client_url_regexp = re.compile("/client/([-A-Z0-9_/]+)$", re.I)
+
+ def client_handler(self, request, q_der):
+ """
+ Process one PDU from a client.
+ """
+
+ from django.db import transaction, connection
+
+ try:
+ connection.cursor() # Reconnect to mysqld if necessary
+ match = self.client_url_regexp.search(request.path)
+ if match is None:
+ raise rpki.exceptions.BadContactURL("Bad path: %s" % request.path)
+ client = rpki.pubdb.models.Client.objects.get(client_handle = match.group(1))
+ q_cms = rpki.publication.cms_msg(DER = q_der)
+ q_msg = q_cms.unwrap((self.bpki_ta, client.bpki_cert, client.bpki_glue))
+ client.last_cms_timestamp = q_cms.check_replay(client.last_cms_timestamp, client.client_handle)
+ client.save()
+ if q_msg.get("type") != "query":
+ raise rpki.exceptions.BadQuery("Message type is %s, expected query" % q_msg.get("type"))
+ r_msg = Element(rpki.publication.tag_msg, nsmap = rpki.publication.nsmap,
+ type = "reply", version = rpki.publication.version)
+ delta = None
+ try:
+ with transaction.atomic():
+ for q_pdu in q_msg:
+ if q_pdu.get("uri"):
+ logger.info("Client %s request for %s", q_pdu.tag, q_pdu.get("uri"))
+ else:
+ logger.info("Client %s request", q_pdu.tag)
+
+ if q_pdu.tag == rpki.publication.tag_list:
+ for obj in client.publishedobject_set.all():
+ r_pdu = SubElement(r_msg, q_pdu.tag, uri = obj.uri, hash = obj.hash)
+ if q_pdu.get("tag") is not None:
+ r_pdu.set("tag", q_pdu.get("tag"))
+
+ else:
+ assert q_pdu.tag in (rpki.publication.tag_publish, rpki.publication.tag_withdraw)
+ if delta is None:
+ delta = self.session.new_delta(rpki.sundial.now() + self.rrdp_expiration_interval)
+ client.check_allowed_uri(q_pdu.get("uri"))
+ if q_pdu.tag == rpki.publication.tag_publish:
+ der = q_pdu.text.decode("base64")
+ logger.info("Publishing %s", rpki.x509.uri_dispatch(q_pdu.get("uri"))(DER = der).tracking_data(q_pdu.get("uri")))
+ delta.publish(client, der, q_pdu.get("uri"), q_pdu.get("hash"))
+ else:
+ logger.info("Withdrawing %s", q_pdu.get("uri"))
+ delta.withdraw(client, q_pdu.get("uri"), q_pdu.get("hash"))
+ r_pdu = SubElement(r_msg, q_pdu.tag, uri = q_pdu.get("uri"))
+ if q_pdu.get("tag") is not None:
+ r_pdu.set("tag", q_pdu.get("tag"))
+
+ if delta is not None:
+ delta.activate(self.rrdp_publication_base)
+ self.session.expire_deltas()
+
+ except Exception as e:
+ if isinstance(e, (rpki.exceptions.ExistingObjectAtURI,
+ rpki.exceptions.DifferentObjectAtURI,
+ rpki.exceptions.NoObjectAtURI)):
+ logger.warn("Database synchronization error processing PDU %r hash %s uri %s: %s",
+ q_pdu, q_pdu.get("hash"), q_pdu.get("uri"), e)
+ else:
+ logger.exception("Exception processing PDU %r hash = %s uri = %s",
+ q_pdu, q_pdu.get("hash"), q_pdu.get("uri"))
+ r_pdu = SubElement(r_msg, rpki.publication.tag_report_error, error_code = e.__class__.__name__)
+ r_pdu.text = str(e)
+ if q_pdu.get("tag") is not None:
+ r_pdu.set("tag", q_pdu.get("tag"))
+
+ else:
+ if delta is not None:
+ self.session.synchronize_rrdp_files(self.rrdp_publication_base, self.rrdp_base_uri)
+ delta.update_rsync_files(self.publication_base)
+
+ request.send_cms_response(rpki.publication.cms_msg().wrap(r_msg, self.pubd_key, self.pubd_cert, self.pubd_crl))
- def done(body):
- cb(200, body = body)
-
- try:
- match = self.client_url_regexp.search(path)
- if match is None:
- raise rpki.exceptions.BadContactURL("Bad path: %s" % path)
- client_handle = match.group(1)
- client = rpki.publication.client_elt.sql_fetch_where1(self, "client_handle = %s", (client_handle,))
- if client is None:
- raise rpki.exceptions.ClientNotFound("Could not find client %s" % client_handle)
- config = rpki.publication.config_elt.fetch(self)
- if config is None or config.bpki_crl is None:
- raise rpki.exceptions.CMSCRLNotSet
- self.handler_common(query, client, done, (self.bpki_ta, client.bpki_cert, client.bpki_glue), config.bpki_crl)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- logger.exception("Unhandled exception processing client query, path %r", path)
- cb(500, reason = "Could not process PDU: %s" % e)
+ except Exception as e:
+ logger.exception("Unhandled exception processing client query, path %r", request.path)
+ request.send_error(500, "Could not process PDU: %s" % e)
diff --git a/rpki/pubdb/__init__.py b/rpki/pubdb/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rpki/pubdb/__init__.py
diff --git a/rpki/pubdb/migrations/0001_initial.py b/rpki/pubdb/migrations/0001_initial.py
new file mode 100644
index 00000000..e278d7dd
--- /dev/null
+++ b/rpki/pubdb/migrations/0001_initial.py
@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+import rpki.fields
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='Client',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('client_handle', models.CharField(unique=True, max_length=255)),
+ ('base_uri', models.TextField()),
+ ('bpki_cert', rpki.fields.CertificateField()),
+ ('bpki_glue', rpki.fields.CertificateField(null=True)),
+ ('last_cms_timestamp', rpki.fields.SundialField(null=True, blank=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Delta',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('serial', models.BigIntegerField()),
+ ('xml', models.TextField()),
+ ('hash', models.CharField(max_length=64)),
+ ('expires', rpki.fields.SundialField()),
+ ],
+ ),
+ migrations.CreateModel(
+ name='PublishedObject',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('uri', models.CharField(max_length=255)),
+ ('der', models.BinaryField()),
+ ('hash', models.CharField(max_length=64)),
+ ('client', models.ForeignKey(to='pubdb.Client')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Session',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('uuid', models.CharField(unique=True, max_length=36)),
+ ('serial', models.BigIntegerField()),
+ ('snapshot', models.TextField(blank=True)),
+ ('hash', models.CharField(max_length=64, blank=True)),
+ ],
+ ),
+ migrations.AddField(
+ model_name='publishedobject',
+ name='session',
+ field=models.ForeignKey(to='pubdb.Session'),
+ ),
+ migrations.AddField(
+ model_name='delta',
+ name='session',
+ field=models.ForeignKey(to='pubdb.Session'),
+ ),
+ migrations.AlterUniqueTogether(
+ name='publishedobject',
+ unique_together=set([('session', 'hash'), ('session', 'uri')]),
+ ),
+ ]
diff --git a/rpki/pubdb/migrations/0002_auto_20160221_0617.py b/rpki/pubdb/migrations/0002_auto_20160221_0617.py
new file mode 100644
index 00000000..a83ad3d3
--- /dev/null
+++ b/rpki/pubdb/migrations/0002_auto_20160221_0617.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('pubdb', '0001_initial'),
+ ]
+
+ operations = [
+ migrations.RemoveField(
+ model_name='session',
+ name='hash',
+ ),
+ migrations.RemoveField(
+ model_name='session',
+ name='snapshot',
+ ),
+ ]
diff --git a/rpki/pubdb/migrations/0003_remove_delta_xml.py b/rpki/pubdb/migrations/0003_remove_delta_xml.py
new file mode 100644
index 00000000..e2c0ce16
--- /dev/null
+++ b/rpki/pubdb/migrations/0003_remove_delta_xml.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('pubdb', '0002_auto_20160221_0617'),
+ ]
+
+ operations = [
+ migrations.RemoveField(
+ model_name='delta',
+ name='xml',
+ ),
+ ]
diff --git a/rpki/pubdb/migrations/__init__.py b/rpki/pubdb/migrations/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rpki/pubdb/migrations/__init__.py
diff --git a/rpki/pubdb/models.py b/rpki/pubdb/models.py
new file mode 100644
index 00000000..21508bed
--- /dev/null
+++ b/rpki/pubdb/models.py
@@ -0,0 +1,329 @@
+# $Id$
+#
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Django ORM models for pubd.
+"""
+
+from __future__ import unicode_literals
+from django.db import models
+from rpki.fields import CertificateField, SundialField
+from lxml.etree import Element, SubElement, ElementTree, xmlfile as XMLFile
+
+import os
+import logging
+import rpki.exceptions
+import rpki.relaxng
+import rpki.x509
+import rpki.POW
+
+logger = logging.getLogger(__name__)
+
+
+# pylint: disable=W5101
+
+# Some of this probably ought to move into a rpki.rrdp module.
+
+rrdp_xmlns = rpki.relaxng.rrdp.xmlns
+rrdp_nsmap = rpki.relaxng.rrdp.nsmap
+rrdp_version = "1"
+
+rrdp_tag_delta = rrdp_xmlns + "delta"
+rrdp_tag_notification = rrdp_xmlns + "notification"
+rrdp_tag_publish = rrdp_xmlns + "publish"
+rrdp_tag_snapshot = rrdp_xmlns + "snapshot"
+rrdp_tag_withdraw = rrdp_xmlns + "withdraw"
+
+
+# This would probably be useful to more than just this module, not
+# sure quite where to put it at the moment.
+
+def DERSubElement(elt, name, der, attrib = None, **kwargs):
+ """
+ Convenience wrapper around SubElement for use with Base64 text.
+ """
+
+ se = SubElement(elt, name, attrib, **kwargs)
+ se.text = rpki.x509.base64_with_linebreaks(der)
+ se.tail = "\n"
+ return se
+
+
+def sha256_file(f):
+ """
+ Read data from a file-like object, return hex-encoded sha256 hash.
+ """
+
+ h = rpki.POW.Digest(rpki.POW.SHA256_DIGEST)
+ while True:
+ x = f.read(8192)
+ if len(x) == 0:
+ return h.digest().encode("hex")
+ h.update(x)
+
+
+class Client(models.Model):
+ client_handle = models.CharField(unique = True, max_length = 255)
+ base_uri = models.TextField()
+ bpki_cert = CertificateField()
+ bpki_glue = CertificateField(null = True)
+ last_cms_timestamp = SundialField(blank = True, null = True)
+
+
+ def check_allowed_uri(self, uri):
+ """
+ Make sure that a target URI is within this client's allowed URI space.
+ """
+
+ if not uri.startswith(self.base_uri):
+ raise rpki.exceptions.ForbiddenURI
+
+
+class Session(models.Model):
+ uuid = models.CharField(unique = True, max_length=36)
+ serial = models.BigIntegerField()
+
+
+ def new_delta(self, expires):
+ """
+ Construct a new delta associated with this session.
+ """
+
+ # pylint: disable=W0201
+
+ delta = Delta(session = self,
+ serial = self.serial + 1,
+ expires = expires)
+ delta.xml = Element(rrdp_tag_delta,
+ nsmap = rrdp_nsmap,
+ version = rrdp_version,
+ session_id = self.uuid,
+ serial = str(delta.serial))
+ return delta
+
+
+ def expire_deltas(self):
+ """
+ Delete deltas whose expiration date has passed.
+ """
+
+ self.delta_set.filter(expires__lt = rpki.sundial.now()).delete()
+
+
+ @property
+ def snapshot_fn(self):
+ return "%s/snapshot/%s.xml" % (self.uuid, self.serial)
+
+
+ @property
+ def notification_fn(self):
+ return "notify.xml"
+
+
+ @staticmethod
+ def _rrdp_filename_to_uri(fn, rrdp_base_uri):
+ return "%s/%s" % (rrdp_base_uri.rstrip("/"), fn)
+
+
+ def write_snapshot_file(self, rrdp_publication_base):
+ fn = os.path.join(rrdp_publication_base, self.snapshot_fn)
+ tn = fn + ".%s.tmp" % os.getpid()
+ dn = os.path.dirname(fn)
+ if not os.path.isdir(dn):
+ os.makedirs(dn)
+ with open(tn, "wb+") as f:
+ with XMLFile(f) as xf:
+ with xf.element(rrdp_tag_snapshot, nsmap = rrdp_nsmap,
+ version = rrdp_version, session_id = self.uuid, serial = str(self.serial)):
+ xf.write("\n")
+ for obj in self.publishedobject_set.all():
+ e = Element(rrdp_tag_publish, nsmap = rrdp_nsmap, uri = obj.uri)
+ e.text = rpki.x509.base64_with_linebreaks(obj.der)
+ xf.write(e, pretty_print = True)
+ f.seek(0)
+ h = sha256_file(f)
+ os.rename(tn, fn)
+ return h
+
+
+ def write_notification_xml(self, rrdp_base_uri, snapshot_hash, rrdp_publication_base):
+ xml = Element(rrdp_tag_notification, nsmap = rrdp_nsmap,
+ version = rrdp_version,
+ session_id = self.uuid,
+ serial = str(self.serial))
+ SubElement(xml, rrdp_tag_snapshot,
+ uri = self._rrdp_filename_to_uri(self.snapshot_fn, rrdp_base_uri),
+ hash = snapshot_hash)
+ for delta in self.delta_set.all():
+ SubElement(xml, rrdp_tag_delta,
+ uri = self._rrdp_filename_to_uri(delta.fn, rrdp_base_uri),
+ hash = delta.hash,
+ serial = str(delta.serial))
+ rpki.relaxng.rrdp.assertValid(xml)
+ fn = os.path.join(rrdp_publication_base, self.notification_fn)
+ tn = fn + ".%s.tmp" % os.getpid()
+ ElementTree(xml).write(file = tn, pretty_print = True)
+ os.rename(tn, fn)
+
+
+ def synchronize_rrdp_files(self, rrdp_publication_base, rrdp_base_uri):
+ """
+ Write current RRDP files to disk, clean up old files and directories.
+ """
+
+ if os.path.isdir(rrdp_publication_base):
+ current_filenames = set(fn for fn in os.listdir(rrdp_publication_base)
+ if fn.endswith(".cer") or fn.endswith(".tal"))
+ else:
+ current_filenames = set()
+
+ snapshot_hash = self.write_snapshot_file(rrdp_publication_base)
+ current_filenames.add(self.snapshot_fn)
+
+ for delta in self.delta_set.all():
+ current_filenames.add(delta.fn)
+
+ self.write_notification_xml(rrdp_base_uri, snapshot_hash, rrdp_publication_base),
+ current_filenames.add(self.notification_fn)
+
+ for root, dirs, files in os.walk(rrdp_publication_base, topdown = False):
+ for fn in files:
+ fn = os.path.join(root, fn)
+ if fn[len(rrdp_publication_base):].lstrip("/") not in current_filenames:
+ os.remove(fn)
+ for dn in dirs:
+ try:
+ os.rmdir(os.path.join(root, dn))
+ except OSError:
+ pass
+
+
+class Delta(models.Model):
+ serial = models.BigIntegerField()
+ hash = models.CharField(max_length = 64)
+ expires = SundialField()
+ session = models.ForeignKey(Session)
+
+
+ @staticmethod
+ def _uri_to_filename(uri, publication_base):
+ if not uri.startswith("rsync://"):
+ raise rpki.exceptions.BadURISyntax(uri)
+ path = uri.split("/")[4:]
+ path.insert(0, publication_base.rstrip("/"))
+ filename = "/".join(path)
+ if "/../" in filename or filename.endswith("/.."):
+ raise rpki.exceptions.BadURISyntax(filename)
+ return filename
+
+
+ @property
+ def fn(self):
+ return "%s/deltas/%s.xml" % (self.session.uuid, self.serial)
+
+
+ def activate(self, rrdp_publication_base):
+ rpki.relaxng.rrdp.assertValid(self.xml)
+ fn = os.path.join(rrdp_publication_base, self.fn)
+ tn = fn + ".%s.tmp" % os.getpid()
+ dn = os.path.dirname(fn)
+ if not os.path.isdir(dn):
+ os.makedirs(dn)
+ with open(tn, "wb+") as f:
+ ElementTree(self.xml).write(file = f, pretty_print = True)
+ f.flush()
+ f.seek(0)
+ self.hash = sha256_file(f)
+ os.rename(tn, fn)
+ self.save()
+ self.session.serial += 1
+ self.session.save()
+
+
+ def publish(self, client, der, uri, obj_hash):
+ try:
+ obj = client.publishedobject_set.get(session = self.session, uri = uri)
+ if obj.hash == obj_hash:
+ obj.delete()
+ elif obj_hash is None:
+ raise rpki.exceptions.ExistingObjectAtURI("Object already published at %s" % uri)
+ else:
+ raise rpki.exceptions.DifferentObjectAtURI("Found different object at %s (old %s, new %s)" % (uri, obj.hash, obj_hash))
+ except rpki.pubdb.models.PublishedObject.DoesNotExist:
+ pass
+ logger.debug("Publishing %s", uri)
+ PublishedObject.objects.create(session = self.session, client = client, der = der, uri = uri,
+ hash = rpki.x509.sha256(der).encode("hex"))
+ se = DERSubElement(self.xml, rrdp_tag_publish, der = der, uri = uri)
+ if obj_hash is not None:
+ se.set("hash", obj_hash)
+ rpki.relaxng.rrdp.assertValid(self.xml)
+
+
+ def withdraw(self, client, uri, obj_hash):
+ try:
+ obj = client.publishedobject_set.get(session = self.session, uri = uri)
+ except rpki.pubdb.models.PublishedObject.DoesNotExist:
+ raise rpki.exceptions.NoObjectAtURI("No published object found at %s" % uri)
+ if obj.hash != obj_hash:
+ raise rpki.exceptions.DifferentObjectAtURI("Found different object at %s (old %s, new %s)" % (uri, obj.hash, obj_hash))
+ logger.debug("Withdrawing %s", uri)
+ obj.delete()
+ SubElement(self.xml, rrdp_tag_withdraw, uri = uri, hash = obj_hash).tail = "\n"
+ rpki.relaxng.rrdp.assertValid(self.xml)
+
+
+ def update_rsync_files(self, publication_base):
+ from errno import ENOENT
+ min_path_len = len(publication_base.rstrip("/"))
+ for pdu in self.xml:
+ assert pdu.tag in (rrdp_tag_publish, rrdp_tag_withdraw)
+ fn = self._uri_to_filename(pdu.get("uri"), publication_base)
+ if pdu.tag == rrdp_tag_publish:
+ tn = fn + ".tmp"
+ dn = os.path.dirname(fn)
+ if not os.path.isdir(dn):
+ os.makedirs(dn)
+ with open(tn, "wb") as f:
+ f.write(pdu.text.decode("base64"))
+ os.rename(tn, fn)
+ else:
+ try:
+ os.remove(fn)
+ except OSError, e:
+ if e.errno != ENOENT:
+ raise
+ dn = os.path.dirname(fn)
+ while len(dn) > min_path_len:
+ try:
+ os.rmdir(dn)
+ except OSError:
+ break
+ else:
+ dn = os.path.dirname(dn)
+ del self.xml
+
+
+class PublishedObject(models.Model):
+ uri = models.CharField(max_length = 255)
+ der = models.BinaryField()
+ hash = models.CharField(max_length = 64)
+ client = models.ForeignKey(Client)
+ session = models.ForeignKey(Session)
+
+ class Meta:
+ unique_together = (("session", "hash"),
+ ("session", "uri"))
diff --git a/rpki/publication.py b/rpki/publication.py
index 5fc7f3dd..393e078e 100644
--- a/rpki/publication.py
+++ b/rpki/publication.py
@@ -1,470 +1,84 @@
# $Id$
#
-# Copyright (C) 2009--2012 Internet Systems Consortium ("ISC")
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-#
+# Copyright (C) 2013--2014 Dragon Research Labs ("DRL")
+# Portions copyright (C) 2009--2012 Internet Systems Consortium ("ISC")
# Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
+# copyright notices and this permission notice appear in all copies.
#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL, ISC, AND ARIN DISCLAIM ALL
+# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL,
+# ISC, OR ARIN BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
+# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
-RPKI "publication" protocol.
+RPKI publication protocol.
"""
-import os
-import errno
import logging
-import rpki.resource_set
+
import rpki.x509
-import rpki.sql
import rpki.exceptions
-import rpki.xml_utils
-import rpki.http
-import rpki.up_down
import rpki.relaxng
-import rpki.sundial
-import rpki.log
logger = logging.getLogger(__name__)
-class publication_namespace(object):
- """
- XML namespace parameters for publication protocol.
- """
-
- xmlns = rpki.relaxng.publication.xmlns
- nsmap = rpki.relaxng.publication.nsmap
-
-class control_elt(rpki.xml_utils.data_elt, rpki.sql.sql_persistent, publication_namespace):
- """
- Virtual class for control channel objects.
- """
-
- def serve_dispatch(self, r_msg, cb, eb):
- """
- Action dispatch handler. This needs special handling because we
- need to make sure that this PDU arrived via the control channel.
- """
- if self.client is not None:
- raise rpki.exceptions.BadQuery("Control query received on client channel")
- rpki.xml_utils.data_elt.serve_dispatch(self, r_msg, cb, eb)
-
-class config_elt(control_elt):
- """
- <config/> element. This is a little weird because there should
- never be more than one row in the SQL config table, but we have to
- put the BPKI CRL somewhere and SQL is the least bad place available.
-
- So we reuse a lot of the SQL machinery, but we nail config_id at 1,
- we don't expose it in the XML protocol, and we only support the get
- and set actions.
- """
-
- attributes = ("action", "tag")
- element_name = "config"
- elements = ("bpki_crl",)
-
- sql_template = rpki.sql.template(
- "config",
- "config_id",
- ("bpki_crl", rpki.x509.CRL))
-
- wired_in_config_id = 1
-
- def startElement(self, stack, name, attrs):
- """
- StartElement() handler for config object. This requires special
- handling because of the weird way we treat config_id.
- """
- control_elt.startElement(self, stack, name, attrs)
- self.config_id = self.wired_in_config_id
-
- @classmethod
- def fetch(cls, gctx):
- """
- Fetch the config object from SQL. This requires special handling
- because of the weird way we treat config_id.
- """
- return cls.sql_fetch(gctx, cls.wired_in_config_id)
-
- def serve_set(self, r_msg, cb, eb):
- """
- Handle a set action. This requires special handling because
- config doesn't support the create method.
- """
- if self.sql_fetch(self.gctx, self.config_id) is None:
- control_elt.serve_create(self, r_msg, cb, eb)
- else:
- control_elt.serve_set(self, r_msg, cb, eb)
-
- def serve_fetch_one_maybe(self):
- """
- Find the config object on which a get or set method should
- operate.
- """
- return self.sql_fetch(self.gctx, self.config_id)
-
-class client_elt(control_elt):
- """
- <client/> element.
- """
-
- element_name = "client"
- attributes = ("action", "tag", "client_handle", "base_uri")
- elements = ("bpki_cert", "bpki_glue")
- booleans = ("clear_replay_protection",)
-
- sql_template = rpki.sql.template(
- "client",
- "client_id",
- "client_handle",
- "base_uri",
- ("bpki_cert", rpki.x509.X509),
- ("bpki_glue", rpki.x509.X509),
- ("last_cms_timestamp", rpki.sundial.datetime))
-
- base_uri = None
- bpki_cert = None
- bpki_glue = None
- last_cms_timestamp = None
-
- def serve_post_save_hook(self, q_pdu, r_pdu, cb, eb):
- """
- Extra server actions for client_elt.
- """
- actions = []
- if q_pdu.clear_replay_protection:
- actions.append(self.serve_clear_replay_protection)
- def loop(iterator, action):
- action(iterator, eb)
- rpki.async.iterator(actions, loop, cb)
-
- def serve_clear_replay_protection(self, cb, eb):
- """
- Handle a clear_replay_protection action for this client.
- """
- self.last_cms_timestamp = None
- self.sql_mark_dirty()
- cb()
-
- def serve_fetch_one_maybe(self):
- """
- Find the client object on which a get, set, or destroy method
- should operate, or which would conflict with a create method.
- """
- return self.sql_fetch_where1(self.gctx, "client_handle = %s", (self.client_handle,))
-
- def serve_fetch_all(self):
- """
- Find client objects on which a list method should operate.
- """
- return self.sql_fetch_all(self.gctx)
-
- def check_allowed_uri(self, uri):
- """
- Make sure that a target URI is within this client's allowed URI space.
- """
- if not uri.startswith(self.base_uri):
- raise rpki.exceptions.ForbiddenURI
-
-class publication_object_elt(rpki.xml_utils.base_elt, publication_namespace):
- """
- Virtual class for publishable objects. These have very similar
- syntax, differences lie in underlying datatype and methods. XML
- methods are a little different from the pattern used for objects
- that support the create/set/get/list/destroy actions, but
- publishable objects don't go in SQL either so these classes would be
- different in any case.
- """
-
- attributes = ("action", "tag", "client_handle", "uri")
- payload_type = None
- payload = None
+nsmap = rpki.relaxng.publication.nsmap
+version = rpki.relaxng.publication.version
- def endElement(self, stack, name, text):
- """
- Handle a publishable element element.
- """
- assert name == self.element_name, "Unexpected name %s, stack %s" % (name, stack)
- if text:
- self.payload = self.payload_type(Base64 = text) # pylint: disable=E1102
- stack.pop()
+tag_msg = rpki.relaxng.publication.xmlns + "msg"
+tag_list = rpki.relaxng.publication.xmlns + "list"
+tag_publish = rpki.relaxng.publication.xmlns + "publish"
+tag_withdraw = rpki.relaxng.publication.xmlns + "withdraw"
+tag_report_error = rpki.relaxng.publication.xmlns + "report_error"
- def toXML(self):
- """
- Generate XML element for publishable object.
- """
- elt = self.make_elt()
- if self.payload:
- elt.text = self.payload.get_Base64()
- return elt
- def serve_dispatch(self, r_msg, cb, eb):
- """
- Action dispatch handler.
- """
- # pylint: disable=E0203
- try:
- if self.client is None:
- raise rpki.exceptions.BadQuery("Client query received on control channel")
- dispatch = { "publish" : self.serve_publish,
- "withdraw" : self.serve_withdraw }
- if self.action not in dispatch:
- raise rpki.exceptions.BadQuery("Unexpected query: action %s" % self.action)
- self.client.check_allowed_uri(self.uri)
- dispatch[self.action]()
- r_pdu = self.__class__()
- r_pdu.action = self.action
- r_pdu.tag = self.tag
- r_pdu.uri = self.uri
- r_msg.append(r_pdu)
- cb()
- except rpki.exceptions.NoObjectAtURI, e:
- # This can happen when we're cleaning up from a prior mess, so
- # we generate a <report_error/> PDU then carry on.
- r_msg.append(report_error_elt.from_exception(e, self.tag))
- cb()
+## @var content_type
+# Content type to use when sending left-right queries
+content_type = "application/x-rpki"
- def serve_publish(self):
- """
- Publish an object.
- """
- logger.info("Publishing %s", self.payload.tracking_data(self.uri))
- filename = self.uri_to_filename()
- filename_tmp = filename + ".tmp"
- dirname = os.path.dirname(filename)
- if not os.path.isdir(dirname):
- os.makedirs(dirname)
- f = open(filename_tmp, "wb")
- f.write(self.payload.get_DER())
- f.close()
- os.rename(filename_tmp, filename)
+## @var allowed_content_types
+# Content types we consider acceptable for incoming left-right
+# queries.
- def serve_withdraw(self):
- """
- Withdraw an object, then recursively delete empty directories.
- """
- logger.info("Withdrawing %s", self.uri)
- filename = self.uri_to_filename()
- try:
- os.remove(filename)
- except OSError, e:
- if e.errno == errno.ENOENT:
- raise rpki.exceptions.NoObjectAtURI("No object published at %s" % self.uri)
- else:
- raise
- min_path_len = len(self.gctx.publication_base.rstrip("/"))
- dirname = os.path.dirname(filename)
- while len(dirname) > min_path_len:
- try:
- os.rmdir(dirname)
- except OSError:
- break
- else:
- dirname = os.path.dirname(dirname)
+allowed_content_types = (content_type,)
- def uri_to_filename(self):
- """
- Convert a URI to a local filename.
- """
- if not self.uri.startswith("rsync://"):
- raise rpki.exceptions.BadURISyntax(self.uri)
- path = self.uri.split("/")[3:]
- if not self.gctx.publication_multimodule:
- del path[0]
- path.insert(0, self.gctx.publication_base.rstrip("/"))
- filename = "/".join(path)
- if "/../" in filename or filename.endswith("/.."):
- raise rpki.exceptions.BadURISyntax(filename)
- return filename
- @classmethod
- def make_publish(cls, uri, obj, tag = None):
- """
- Construct a publication PDU.
+def raise_if_error(pdu):
"""
- assert cls.payload_type is not None and type(obj) is cls.payload_type
- return cls.make_pdu(action = "publish", uri = uri, payload = obj, tag = tag)
+ Raise an appropriate error if this is a <report_error/> PDU.
- @classmethod
- def make_withdraw(cls, uri, obj, tag = None):
+ As a convenience, this will also accept a <msg/> PDU and raise an
+ appropriate error if it contains any <report_error/> PDUs or if
+ the <msg/> is not a reply.
"""
- Construct a withdrawal PDU.
- """
- assert cls.payload_type is not None and type(obj) is cls.payload_type
- return cls.make_pdu(action = "withdraw", uri = uri, tag = tag)
-
- def raise_if_error(self):
- """
- No-op, since this is not a <report_error/> PDU.
- """
- pass
-
-class certificate_elt(publication_object_elt):
- """
- <certificate/> element.
- """
-
- element_name = "certificate"
- payload_type = rpki.x509.X509
-
-class crl_elt(publication_object_elt):
- """
- <crl/> element.
- """
-
- element_name = "crl"
- payload_type = rpki.x509.CRL
-
-class manifest_elt(publication_object_elt):
- """
- <manifest/> element.
- """
-
- element_name = "manifest"
- payload_type = rpki.x509.SignedManifest
-
-class roa_elt(publication_object_elt):
- """
- <roa/> element.
- """
-
- element_name = "roa"
- payload_type = rpki.x509.ROA
-class ghostbuster_elt(publication_object_elt):
- """
- <ghostbuster/> element.
- """
+ if pdu.tag == tag_report_error:
+ code = pdu.get("error_code")
+ logger.debug("<report_error/> code %r", code)
+ e = getattr(rpki.exceptions, code, None)
+ if e is not None and issubclass(e, rpki.exceptions.RPKI_Exception):
+ raise e(pdu.text)
+ else:
+ raise rpki.exceptions.BadPublicationReply("Unexpected response from pubd: %r, %r" % (code, pdu))
- element_name = "ghostbuster"
- payload_type = rpki.x509.Ghostbuster
+ if pdu.tag == tag_msg:
+ if pdu.get("type") != "reply":
+ raise rpki.exceptions.BadPublicationReply("Unexpected response from pubd: expected reply, got %r" % pdu.get("type"))
+ for p in pdu:
+ raise_if_error(p)
-publication_object_elt.obj2elt = dict(
- (e.payload_type, e) for e in
- (certificate_elt, crl_elt, manifest_elt, roa_elt, ghostbuster_elt))
-class report_error_elt(rpki.xml_utils.text_elt, publication_namespace):
- """
- <report_error/> element.
- """
-
- element_name = "report_error"
- attributes = ("tag", "error_code")
- text_attribute = "error_text"
-
- error_text = None
-
- @classmethod
- def from_exception(cls, e, tag = None):
- """
- Generate a <report_error/> element from an exception.
- """
- self = cls()
- self.tag = tag
- self.error_code = e.__class__.__name__
- self.error_text = str(e)
- return self
-
- def __str__(self):
- s = ""
- if getattr(self, "tag", None) is not None:
- s += "[%s] " % self.tag
- s += self.error_code
- if getattr(self, "error_text", None) is not None:
- s += ": " + self.error_text
- return s
-
- def raise_if_error(self):
- """
- Raise exception associated with this <report_error/> PDU.
- """
- t = rpki.exceptions.__dict__.get(self.error_code)
- if isinstance(t, type) and issubclass(t, rpki.exceptions.RPKI_Exception):
- raise t(getattr(self, "text", None))
- else:
- raise rpki.exceptions.BadPublicationReply("Unexpected response from pubd: %s" % self)
-
-class msg(rpki.xml_utils.msg, publication_namespace):
- """
- Publication PDU.
- """
-
- ## @var version
- # Protocol version
- version = int(rpki.relaxng.publication.version)
-
- ## @var pdus
- # Dispatch table of PDUs for this protocol.
- pdus = dict((x.element_name, x) for x in
- (config_elt, client_elt, certificate_elt, crl_elt, manifest_elt, roa_elt, ghostbuster_elt, report_error_elt))
-
- def serve_top_level(self, gctx, client, cb):
+class cms_msg(rpki.x509.XML_CMS_object):
"""
- Serve one msg PDU.
+ CMS-signed publication PDU.
"""
- if not self.is_query():
- raise rpki.exceptions.BadQuery("Message type is not query")
- r_msg = self.__class__.reply()
-
- def loop(iterator, q_pdu):
-
- def fail(e):
- if not isinstance(e, rpki.exceptions.NotFound):
- logger.exception("Exception processing PDU %r", q_pdu)
- r_msg.append(report_error_elt.from_exception(e, q_pdu.tag))
- cb(r_msg)
-
- try:
- q_pdu.gctx = gctx
- q_pdu.client = client
- q_pdu.serve_dispatch(r_msg, iterator, fail)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- fail(e)
-
- def done():
- cb(r_msg)
-
- rpki.async.iterator(self, loop, done)
-
-class sax_handler(rpki.xml_utils.sax_handler):
- """
- SAX handler for publication protocol.
- """
-
- pdu = msg
- name = "msg"
- version = rpki.relaxng.publication.version
-
-
-class cms_msg(rpki.x509.XML_CMS_object):
- """
- Class to hold a CMS-signed publication PDU.
- """
- encoding = "us-ascii"
- schema = rpki.relaxng.publication
- saxify = sax_handler.saxify
+ encoding = "us-ascii"
+ schema = rpki.relaxng.publication
diff --git a/rpki/publication_control.py b/rpki/publication_control.py
new file mode 100644
index 00000000..b0668eef
--- /dev/null
+++ b/rpki/publication_control.py
@@ -0,0 +1,74 @@
+# $Id$
+#
+# Copyright (C) 2013--2014 Dragon Research Labs ("DRL")
+# Portions copyright (C) 2009--2012 Internet Systems Consortium ("ISC")
+# Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notices and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL, ISC, AND ARIN DISCLAIM ALL
+# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL,
+# ISC, OR ARIN BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
+# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+RPKI publication control protocol.
+
+Per IETF SIDR WG discussion, this is now separate from the publication
+protocol itself.
+"""
+
+import logging
+
+import rpki.x509
+import rpki.exceptions
+import rpki.relaxng
+
+logger = logging.getLogger(__name__)
+
+
+nsmap = rpki.relaxng.publication_control.nsmap
+version = rpki.relaxng.publication_control.version
+
+tag_msg = rpki.relaxng.publication_control.xmlns + "msg"
+tag_client = rpki.relaxng.publication_control.xmlns + "client"
+tag_bpki_cert = rpki.relaxng.publication_control.xmlns + "bpki_cert"
+tag_bpki_glue = rpki.relaxng.publication_control.xmlns + "bpki_glue"
+tag_report_error = rpki.relaxng.publication_control.xmlns + "report_error"
+
+
+def raise_if_error(pdu):
+ """
+ Raise an appropriate error if this is a <report_error/> PDU.
+
+ As a convience, this will also accept a <msg/> PDU and raise an
+ appropriate error if it contains any <report_error/> PDUs.
+ """
+
+ if pdu.tag == tag_report_error:
+ code = pdu.get("error_code")
+ logger.debug("<report_error/> code %r", code)
+ e = getattr(rpki.exceptions, code, None)
+ if e is not None and issubclass(e, rpki.exceptions.RPKI_Exception):
+ raise e(pdu.text)
+ else:
+ raise rpki.exceptions.BadPublicationReply("Unexpected response from pubd: %r, %r" % (code, pdu))
+
+ if pdu.tag == tag_msg:
+ for p in pdu:
+ raise_if_error(p)
+
+
+class cms_msg(rpki.x509.XML_CMS_object):
+ """
+ CMS-signed publication control PDU.
+ """
+
+ encoding = "us-ascii"
+ schema = rpki.relaxng.publication_control
diff --git a/rpki/rcynic.py b/rpki/rcynic.py
index 10ad7516..c6ad60d5 100644
--- a/rpki/rcynic.py
+++ b/rpki/rcynic.py
@@ -25,251 +25,258 @@ import rpki.resource_set
from xml.etree.ElementTree import ElementTree
class UnknownObject(rpki.exceptions.RPKI_Exception):
- """
- Unrecognized object in rcynic result cache.
- """
+ """
+ Unrecognized object in rcynic result cache.
+ """
class NotRsyncURI(rpki.exceptions.RPKI_Exception):
- """
- URI is not an rsync URI.
- """
+ """
+ URI is not an rsync URI.
+ """
class rcynic_object(object):
- """
- An object read from rcynic cache.
- """
+ """
+ An object read from rcynic cache.
+ """
- def __init__(self, filename, **kwargs):
- self.filename = filename
- for k, v in kwargs.iteritems():
- setattr(self, k, v)
- self.obj = self.obj_class(DER_file = filename)
+ def __init__(self, filename, **kwargs):
+ self.filename = filename
+ for k, v in kwargs.iteritems():
+ setattr(self, k, v)
+ self.obj = self.obj_class(DER_file = filename) # pylint: disable=E1101
- def __repr__(self):
- return "<%s %s %s at 0x%x>" % (self.__class__.__name__, self.uri, self.resources, id(self))
+ def __repr__(self):
+ # pylint: disable=E1101
+ return "<%s %s %s at 0x%x>" % (self.__class__.__name__, self.uri, self.resources, id(self))
- def show_attrs(self, *attrs):
- """
- Print a bunch of object attributes, quietly ignoring any that
- might be missing.
- """
- for a in attrs:
- try:
- print "%s: %s" % (a.capitalize(), getattr(self, a))
- except AttributeError:
- pass
+ def show_attrs(self, *attrs):
+ """
+ Print a bunch of object attributes, quietly ignoring any that
+ might be missing.
+ """
- def show(self):
- """
- Print common object attributes.
- """
- self.show_attrs("filename", "uri", "status", "timestamp")
+ for a in attrs:
+ try:
+ print "%s: %s" % (a.capitalize(), getattr(self, a))
+ except AttributeError:
+ pass
+
+ def show(self):
+ """
+ Print common object attributes.
+ """
+
+ self.show_attrs("filename", "uri", "status", "timestamp")
class rcynic_certificate(rcynic_object):
- """
- A certificate from rcynic cache.
- """
-
- obj_class = rpki.x509.X509
-
- def __init__(self, filename, **kwargs):
- rcynic_object.__init__(self, filename, **kwargs)
- self.notBefore = self.obj.getNotBefore()
- self.notAfter = self.obj.getNotAfter()
- self.aia_uri = self.obj.get_aia_uri()
- self.sia_directory_uri = self.obj.get_sia_directory_uri()
- self.manifest_uri = self.obj.get_sia_manifest_uri()
- self.resources = self.obj.get_3779resources()
- self.is_ca = self.obj.is_CA()
- self.serial = self.obj.getSerial()
- self.issuer = self.obj.getIssuer()
- self.subject = self.obj.getSubject()
- self.ski = self.obj.hSKI()
- self.aki = self.obj.hAKI()
-
- def show(self):
"""
- Print certificate attributes.
+ A certificate from rcynic cache.
"""
- rcynic_object.show(self)
- self.show_attrs("notBefore", "notAfter", "aia_uri", "sia_directory_uri", "resources")
+
+ obj_class = rpki.x509.X509
+
+ def __init__(self, filename, **kwargs):
+ rcynic_object.__init__(self, filename, **kwargs)
+ self.notBefore = self.obj.getNotBefore()
+ self.notAfter = self.obj.getNotAfter()
+ self.aia_uri = self.obj.get_aia_uri()
+ self.sia_directory_uri = self.obj.get_sia_directory_uri()
+ self.manifest_uri = self.obj.get_sia_manifest_uri()
+ self.resources = self.obj.get_3779resources()
+ self.is_ca = self.obj.is_CA()
+ self.serial = self.obj.getSerial()
+ self.issuer = self.obj.getIssuer()
+ self.subject = self.obj.getSubject()
+ self.ski = self.obj.hSKI()
+ self.aki = self.obj.hAKI()
+
+ def show(self):
+ """
+ Print certificate attributes.
+ """
+
+ rcynic_object.show(self)
+ self.show_attrs("notBefore", "notAfter", "aia_uri", "sia_directory_uri", "resources")
class rcynic_roa(rcynic_object):
- """
- A ROA from rcynic cache.
- """
-
- obj_class = rpki.x509.ROA
-
- def __init__(self, filename, **kwargs):
- rcynic_object.__init__(self, filename, **kwargs)
- self.obj.extract()
- self.asID = self.obj.get_POW().getASID()
- self.prefix_sets = []
- v4, v6 = self.obj.get_POW().getPrefixes()
- if v4:
- self.prefix_sets.append(rpki.resource_set.roa_prefix_set_ipv4([
- rpki.resource_set.roa_prefix_ipv4(p[0], p[1], p[2]) for p in v4]))
- if v6:
- self.prefix_sets.append(rpki.resource_set.roa_prefix_set_ipv6([
- rpki.resource_set.roa_prefix_ipv6(p[0], p[1], p[2]) for p in v6]))
- self.ee = rpki.x509.X509(POW = self.obj.get_POW().certs()[0])
- self.notBefore = self.ee.getNotBefore()
- self.notAfter = self.ee.getNotAfter()
- self.aia_uri = self.ee.get_aia_uri()
- self.resources = self.ee.get_3779resources()
- self.issuer = self.ee.getIssuer()
- self.serial = self.ee.getSerial()
- self.subject = self.ee.getSubject()
- self.aki = self.ee.hAKI()
- self.ski = self.ee.hSKI()
-
- def show(self):
"""
- Print ROA attributes.
+ A ROA from rcynic cache.
"""
- rcynic_object.show(self)
- self.show_attrs("notBefore", "notAfter", "aia_uri", "resources", "asID")
- if self.prefix_sets:
- print "Prefixes:", ",".join(str(i) for i in self.prefix_sets)
+
+ obj_class = rpki.x509.ROA
+
+ def __init__(self, filename, **kwargs):
+ rcynic_object.__init__(self, filename, **kwargs)
+ self.obj.extract()
+ self.asID = self.obj.get_POW().getASID()
+ self.prefix_sets = []
+ v4, v6 = self.obj.get_POW().getPrefixes()
+ if v4:
+ self.prefix_sets.append(rpki.resource_set.roa_prefix_set_ipv4([
+ rpki.resource_set.roa_prefix_ipv4(p[0], p[1], p[2]) for p in v4]))
+ if v6:
+ self.prefix_sets.append(rpki.resource_set.roa_prefix_set_ipv6([
+ rpki.resource_set.roa_prefix_ipv6(p[0], p[1], p[2]) for p in v6]))
+ self.ee = rpki.x509.X509(POW = self.obj.get_POW().certs()[0])
+ self.notBefore = self.ee.getNotBefore()
+ self.notAfter = self.ee.getNotAfter()
+ self.aia_uri = self.ee.get_aia_uri()
+ self.resources = self.ee.get_3779resources()
+ self.issuer = self.ee.getIssuer()
+ self.serial = self.ee.getSerial()
+ self.subject = self.ee.getSubject()
+ self.aki = self.ee.hAKI()
+ self.ski = self.ee.hSKI()
+
+ def show(self):
+ """
+ Print ROA attributes.
+ """
+
+ rcynic_object.show(self)
+ self.show_attrs("notBefore", "notAfter", "aia_uri", "resources", "asID")
+ if self.prefix_sets:
+ print "Prefixes:", ",".join(str(i) for i in self.prefix_sets)
class rcynic_ghostbuster(rcynic_object):
- """
- Ghostbuster record from the rcynic cache.
- """
-
- obj_class = rpki.x509.Ghostbuster
-
- def __init__(self, *args, **kwargs):
- rcynic_object.__init__(self, *args, **kwargs)
- self.obj.extract()
- self.vcard = self.obj.get_content()
- self.ee = rpki.x509.X509(POW = self.obj.get_POW().certs()[0])
- self.notBefore = self.ee.getNotBefore()
- self.notAfter = self.ee.getNotAfter()
- self.aia_uri = self.ee.get_aia_uri()
- self.issuer = self.ee.getIssuer()
- self.serial = self.ee.getSerial()
- self.subject = self.ee.getSubject()
- self.aki = self.ee.hAKI()
- self.ski = self.ee.hSKI()
-
- def show(self):
- rcynic_object.show(self)
- self.show_attrs("notBefore", "notAfter", "vcard")
+ """
+ Ghostbuster record from the rcynic cache.
+ """
+
+ obj_class = rpki.x509.Ghostbuster
+
+ def __init__(self, *args, **kwargs):
+ rcynic_object.__init__(self, *args, **kwargs)
+ self.obj.extract()
+ self.vcard = self.obj.get_content()
+ self.ee = rpki.x509.X509(POW = self.obj.get_POW().certs()[0])
+ self.notBefore = self.ee.getNotBefore()
+ self.notAfter = self.ee.getNotAfter()
+ self.aia_uri = self.ee.get_aia_uri()
+ self.issuer = self.ee.getIssuer()
+ self.serial = self.ee.getSerial()
+ self.subject = self.ee.getSubject()
+ self.aki = self.ee.hAKI()
+ self.ski = self.ee.hSKI()
+
+ def show(self):
+ rcynic_object.show(self)
+ self.show_attrs("notBefore", "notAfter", "vcard")
file_name_classes = {
- ".cer" : rcynic_certificate,
- ".gbr" : rcynic_ghostbuster,
- ".roa" : rcynic_roa }
+ ".cer" : rcynic_certificate,
+ ".gbr" : rcynic_ghostbuster,
+ ".roa" : rcynic_roa }
class rcynic_file_iterator(object):
- """
- Iterate over files in an rcynic output tree, yielding a Python
- representation of each object found.
- """
-
- def __init__(self, rcynic_root,
- authenticated_subdir = "authenticated"):
- self.rcynic_dir = os.path.join(rcynic_root, authenticated_subdir)
-
- def __iter__(self):
- for root, dirs, files in os.walk(self.rcynic_dir): # pylint: disable=W0612
- for filename in files:
- filename = os.path.join(root, filename)
- ext = os.path.splitext(filename)[1]
- if ext in file_name_classes:
- yield file_name_classes[ext](filename)
+ """
+ Iterate over files in an rcynic output tree, yielding a Python
+ representation of each object found.
+ """
+
+ def __init__(self, rcynic_root,
+ authenticated_subdir = "authenticated"):
+ self.rcynic_dir = os.path.join(rcynic_root, authenticated_subdir)
+
+ def __iter__(self):
+ # pylint: disable=W0612
+ for root, dirs, files in os.walk(self.rcynic_dir):
+ for filename in files:
+ filename = os.path.join(root, filename)
+ ext = os.path.splitext(filename)[1]
+ if ext in file_name_classes:
+ yield file_name_classes[ext](filename)
class validation_status_element(object):
- def __init__(self, *args, **kwargs):
- self.attrs = []
- for k, v in kwargs.iteritems():
- setattr(self, k, v)
- # attribute names are saved so that the __repr__ method can
- # display the subset of attributes the user specified
- self.attrs.append(k)
- self._obj = None
-
- def get_obj(self):
- if not self._obj:
- self._obj = self.file_class(filename=self.filename, uri=self.uri)
- return self._obj
-
- def __repr__(self):
- v = [self.__class__.__name__, 'id=%s' % str(id(self))]
- v.extend(['%s=%s' % (x, getattr(self, x)) for x in self.attrs])
- return '<%s>' % (' '.join(v),)
-
- obj = property(get_obj)
+ def __init__(self, *args, **kwargs):
+ self.attrs = []
+ for k, v in kwargs.iteritems():
+ setattr(self, k, v)
+ # attribute names are saved so that the __repr__ method can
+ # display the subset of attributes the user specified
+ self.attrs.append(k)
+ self._obj = None
+
+ def get_obj(self):
+ # pylint: disable=E1101
+ if not self._obj:
+ self._obj = self.file_class(filename=self.filename, uri=self.uri)
+ return self._obj
+
+ def __repr__(self):
+ v = [self.__class__.__name__, 'id=%s' % str(id(self))]
+ v.extend(['%s=%s' % (x, getattr(self, x)) for x in self.attrs])
+ return '<%s>' % (' '.join(v),)
+
+ obj = property(get_obj)
class rcynic_xml_iterator(object):
- """
- Iterate over validation_status entries in the XML output from an
- rcynic run. Yields a tuple for each entry:
-
- timestamp, generation, status, object
-
- where URI, status, and timestamp are the corresponding values from
- the XML element, OK is a boolean indicating whether validation was
- considered succesful, and object is a Python representation of the
- object in question. If OK is True, object will be from rcynic's
- authenticated output tree; otherwise, object will be from rcynic's
- unauthenticated output tree.
-
- Note that it is possible for the same URI to appear in more than one
- validation_status element; in such cases, the succesful case (OK
- True) should be the last entry (as rcynic will stop trying once it
- gets a good copy), but there may be multiple failures, which might
- or might not have different status codes.
- """
-
- def __init__(self, rcynic_root, xml_file,
- authenticated_old_subdir = "authenticated.old",
- unauthenticated_subdir = "unauthenticated"):
- self.rcynic_root = rcynic_root
- self.xml_file = xml_file
- self.authenticated_subdir = os.path.join(rcynic_root, 'authenticated')
- self.authenticated_old_subdir = os.path.join(rcynic_root, authenticated_old_subdir)
- self.unauthenticated_subdir = os.path.join(rcynic_root, unauthenticated_subdir)
-
- base_uri = "rsync://"
-
- def uri_to_filename(self, uri):
- if uri.startswith(self.base_uri):
- return uri[len(self.base_uri):]
- else:
- raise NotRsyncURI("Not an rsync URI %r" % uri)
-
- def __iter__(self):
- for validation_status in ElementTree(file=self.xml_file).getroot().getiterator("validation_status"):
- timestamp = validation_status.get("timestamp")
- status = validation_status.get("status")
- uri = validation_status.text.strip()
- generation = validation_status.get("generation")
-
- # determine the path to this object
- if status == 'object_accepted':
- d = self.authenticated_subdir
- elif generation == 'backup':
- d = self.authenticated_old_subdir
- else:
- d = self.unauthenticated_subdir
-
- filename = os.path.join(d, self.uri_to_filename(uri))
-
- ext = os.path.splitext(filename)[1]
- if ext in file_name_classes:
- yield validation_status_element(timestamp = timestamp, generation = generation,
- uri=uri, status = status, filename = filename,
- file_class = file_name_classes[ext])
+ """
+ Iterate over validation_status entries in the XML output from an
+ rcynic run. Yields a tuple for each entry:
+
+ timestamp, generation, status, object
+
+ where URI, status, and timestamp are the corresponding values from
+ the XML element, OK is a boolean indicating whether validation was
+ considered succesful, and object is a Python representation of the
+ object in question. If OK is True, object will be from rcynic's
+ authenticated output tree; otherwise, object will be from rcynic's
+ unauthenticated output tree.
+
+ Note that it is possible for the same URI to appear in more than one
+ validation_status element; in such cases, the succesful case (OK
+ True) should be the last entry (as rcynic will stop trying once it
+ gets a good copy), but there may be multiple failures, which might
+ or might not have different status codes.
+ """
+
+ def __init__(self, rcynic_root, xml_file,
+ authenticated_old_subdir = "authenticated.old",
+ unauthenticated_subdir = "unauthenticated"):
+ self.rcynic_root = rcynic_root
+ self.xml_file = xml_file
+ self.authenticated_subdir = os.path.join(rcynic_root, 'authenticated')
+ self.authenticated_old_subdir = os.path.join(rcynic_root, authenticated_old_subdir)
+ self.unauthenticated_subdir = os.path.join(rcynic_root, unauthenticated_subdir)
+
+ base_uri = "rsync://"
+
+ def uri_to_filename(self, uri):
+ if uri.startswith(self.base_uri):
+ return uri[len(self.base_uri):]
+ else:
+ raise NotRsyncURI("Not an rsync URI %r" % uri)
+
+ def __iter__(self):
+ for validation_status in ElementTree(file=self.xml_file).getroot().getiterator("validation_status"):
+ timestamp = validation_status.get("timestamp")
+ status = validation_status.get("status")
+ uri = validation_status.text.strip()
+ generation = validation_status.get("generation")
+
+ # determine the path to this object
+ if status == 'object_accepted':
+ d = self.authenticated_subdir
+ elif generation == 'backup':
+ d = self.authenticated_old_subdir
+ else:
+ d = self.unauthenticated_subdir
+
+ filename = os.path.join(d, self.uri_to_filename(uri))
+
+ ext = os.path.splitext(filename)[1]
+ if ext in file_name_classes:
+ yield validation_status_element(timestamp = timestamp, generation = generation,
+ uri=uri, status = status, filename = filename,
+ file_class = file_name_classes[ext])
def label_iterator(xml_file):
- """
- Returns an iterator which contains all defined labels from an rcynic XML
- output file. Each item is a tuple of the form
- (label, kind, description).
- """
-
- for label in ElementTree(file=xml_file).find("labels"):
- yield label.tag, label.get("kind"), label.text.strip()
+ """
+ Returns an iterator which contains all defined labels from an rcynic XML
+ output file. Each item is a tuple of the form
+ (label, kind, description).
+ """
+
+ for label in ElementTree(file=xml_file).find("labels"):
+ yield label.tag, label.get("kind"), label.text.strip()
diff --git a/rpki/rcynicdb/__init__.py b/rpki/rcynicdb/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rpki/rcynicdb/__init__.py
diff --git a/rpki/rcynicdb/iterator.py b/rpki/rcynicdb/iterator.py
new file mode 100644
index 00000000..a754ed72
--- /dev/null
+++ b/rpki/rcynicdb/iterator.py
@@ -0,0 +1,49 @@
+"""
+rcynic database iterator.
+
+At least for the moment, we attempt to provide an iterator that works
+with both old-style (directory tree of file objects with names similar
+to what wget would use) and new style (Django ORM) databases.
+"""
+
+import os
+
+initialized_django = False
+
+def _uri_to_class(uri, class_map):
+ return class_map[uri[uri.rindex(".")+1:]]
+
+def authenticated_objects(directory_tree = None, uri_suffix = None, class_map = None):
+
+ if class_map is None:
+ import rpki.POW
+ class_map = dict(cer = rpki.POW.X509,
+ crl = rpki.POW.CRL,
+ gbr = rpki.POW.CMS,
+ mft = rpki.POW.Manifest,
+ roa = rpki.POW.ROA)
+
+ if directory_tree:
+ for head, dirs, files in os.walk(directory_tree):
+ for fn in files:
+ if uri_suffix is None or fn.endswith(uri_suffix):
+ fn = os.path.join(head, fn)
+ uri = "rsync://" + fn[len(directory_tree):].lstrip("/")
+ yield uri, _uri_to_class(uri, class_map).derReadFile(fn)
+ return
+
+ global initialized_django
+ if not initialized_django:
+ os.environ.update(DJANGO_SETTINGS_MODULE = "rpki.django_settings.rcynic")
+ import django
+ django.setup()
+ initialized_django = True
+
+ import rpki.rcynicdb
+ auth = rpki.rcynicdb.models.Authenticated.objects.order_by("-started").first()
+ if auth is None:
+ return
+
+ q = auth.rpkiobject_set
+ for obj in q.filter(uri__endswith = uri_suffix) if uri_suffix else q.all():
+ yield obj.uri, _uri_to_class(obj.uri, class_map).derRead(obj.der)
diff --git a/rpki/rcynicdb/migrations/0001_initial.py b/rpki/rcynicdb/migrations/0001_initial.py
new file mode 100644
index 00000000..5f60253b
--- /dev/null
+++ b/rpki/rcynicdb/migrations/0001_initial.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='Authenticated',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('started', models.DateTimeField()),
+ ('finished', models.DateTimeField(null=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Retrieval',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('uri', models.TextField()),
+ ('started', models.DateTimeField()),
+ ('finished', models.DateTimeField()),
+ ('successful', models.BooleanField()),
+ ],
+ ),
+ migrations.CreateModel(
+ name='RPKIObject',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('der', models.BinaryField()),
+ ('uri', models.TextField()),
+ ('aki', models.SlugField(max_length=40)),
+ ('ski', models.SlugField(max_length=40)),
+ ('sha256', models.SlugField(unique=True, max_length=64)),
+ ('authenticated', models.ManyToManyField(to='rcynicdb.Authenticated')),
+ ('retrieved', models.ForeignKey(to='rcynicdb.Retrieval')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='RRDPSnapshot',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('session_id', models.UUIDField()),
+ ('serial', models.BigIntegerField()),
+ ('retrieved', models.OneToOneField(to='rcynicdb.Retrieval')),
+ ],
+ ),
+ migrations.AddField(
+ model_name='rpkiobject',
+ name='snapshot',
+ field=models.ManyToManyField(to='rcynicdb.RRDPSnapshot'),
+ ),
+ ]
diff --git a/rpki/rcynicdb/migrations/0002_auto_20160227_2003.py b/rpki/rcynicdb/migrations/0002_auto_20160227_2003.py
new file mode 100644
index 00000000..9c3acecb
--- /dev/null
+++ b/rpki/rcynicdb/migrations/0002_auto_20160227_2003.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('rcynicdb', '0001_initial'),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name='retrieval',
+ name='finished',
+ field=models.DateTimeField(null=True),
+ ),
+ migrations.AlterField(
+ model_name='retrieval',
+ name='successful',
+ field=models.BooleanField(default=False),
+ ),
+ migrations.AlterField(
+ model_name='rrdpsnapshot',
+ name='retrieved',
+ field=models.OneToOneField(null=True, to='rcynicdb.Retrieval'),
+ ),
+ ]
diff --git a/rpki/rcynicdb/migrations/0003_auto_20160301_0333.py b/rpki/rcynicdb/migrations/0003_auto_20160301_0333.py
new file mode 100644
index 00000000..ea6e5499
--- /dev/null
+++ b/rpki/rcynicdb/migrations/0003_auto_20160301_0333.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('rcynicdb', '0002_auto_20160227_2003'),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name='retrieval',
+ name='finished',
+ field=models.DateTimeField(),
+ ),
+ migrations.AlterField(
+ model_name='retrieval',
+ name='successful',
+ field=models.BooleanField(),
+ ),
+ ]
diff --git a/rpki/rcynicdb/migrations/__init__.py b/rpki/rcynicdb/migrations/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rpki/rcynicdb/migrations/__init__.py
diff --git a/rpki/rcynicdb/models.py b/rpki/rcynicdb/models.py
new file mode 100644
index 00000000..9a790230
--- /dev/null
+++ b/rpki/rcynicdb/models.py
@@ -0,0 +1,81 @@
+# First cut at ORM models for rcynicng.
+
+from django.db import models
+
+# HTTP/HTTPS/RSYNC fetch event.
+
+class Retrieval(models.Model):
+ uri = models.TextField()
+ started = models.DateTimeField()
+ finished = models.DateTimeField()
+ successful = models.BooleanField()
+
+ def __repr__(self):
+ try:
+ return "<Retrieval: {0.uri} started {0.started} finished {0.finished} successful {0.successful}>".format(self)
+ except:
+ return "<Retrieval: {}>".format(id(self))
+
+# Collection of validated objects.
+
+class Authenticated(models.Model):
+ started = models.DateTimeField()
+ finished = models.DateTimeField(null = True)
+
+ def __repr__(self):
+ try:
+ return "<Authenticated: started {0.started} finished {0.finished}>".format(self)
+ except:
+ return "<Authenticated: {}>".format(id(self))
+
+# One instance of an RRDP snapshot.
+
+class RRDPSnapshot(models.Model):
+ session_id = models.UUIDField()
+ serial = models.BigIntegerField()
+ retrieved = models.OneToOneField(Retrieval, null = True)
+
+ def __repr__(self):
+ try:
+ return "<RRDPSnapshot: serial {0.serial} session_id {0.session_id} retrieved {0.retrieved!r}>".format(self)
+ except:
+ return "<RRDPSnapshot: {}>".format(id(self))
+
+
+# RPKI objects.
+#
+# Might need to add an on_delete argument to the ForeignKey for the
+# retrieved field: the default behavior is CASCADE, which is may not
+# what we want in this case.
+#
+# https://docs.djangoproject.com/en/1.9/ref/models/fields/#django.db.models.ForeignKey.on_delete
+#
+# Might also want to provide names for the reverse relationships, code
+# uses blah_set for now.
+
+# Setting unique = True on the der field breaks with PostgreSQL, see
+# https://code.djangoproject.com/ticket/14904
+#
+# In theory collisions on sha256 are possible, but in practice they're
+# not going to occur by accident. Setting unique = True on the sha256
+# field risks deliberate collisions, defending against that would
+# require detecting the collision and figuring out which is the
+# attacking object (easy in theory, as it probably won't validate),
+# then figuring out what to do about it (possibly harder -- do we drop
+# an entire RRDP zone because of one evil object?).
+
+class RPKIObject(models.Model):
+ der = models.BinaryField() # unique = True
+ uri = models.TextField()
+ aki = models.SlugField(max_length = 40) # hex SHA-1
+ ski = models.SlugField(max_length = 40) # hex SHA-1
+ sha256 = models.SlugField(max_length = 64, unique = True) # hex SHA-256
+ retrieved = models.ForeignKey(Retrieval)
+ authenticated = models.ManyToManyField(Authenticated)
+ snapshot = models.ManyToManyField(RRDPSnapshot)
+
+ def __repr__(self):
+ try:
+ return "<RPKIObject: uri {0.uri} sha256 {0.sha256} ski {0.ski} aki {0.aki} retrieved {0.retrieved!r}>".format(self)
+ except:
+ return "<RPKIObject: {}>".format(id(self))
diff --git a/rpki/relaxng.py b/rpki/relaxng.py
index e43384e7..9a01306c 100644
--- a/rpki/relaxng.py
+++ b/rpki/relaxng.py
@@ -1,12 +1,14 @@
# Automatically generated, do not edit.
+# pylint: skip-file
+
from rpki.relaxng_parser import RelaxNGParser
## @var left_right
## Parsed RelaxNG left_right schema
left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<!--
- $Id: left-right-schema.rnc 5902 2014-07-18 16:37:04Z sra $
+ $Id$
RelaxNG schema for RPKI left-right protocol.
@@ -61,7 +63,7 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
</start>
<!-- PDUs allowed in a query -->
<define name="query_elt" combine="choice">
- <ref name="self_query"/>
+ <ref name="tenant_query"/>
</define>
<define name="query_elt" combine="choice">
<ref name="bsc_query"/>
@@ -95,7 +97,7 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
</define>
<!-- PDUs allowed in a reply -->
<define name="reply_elt" combine="choice">
- <ref name="self_reply"/>
+ <ref name="tenant_reply"/>
</define>
<define name="reply_elt" combine="choice">
<ref name="bsc_reply"/>
@@ -227,8 +229,8 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<param name="pattern">[\-,0-9/:a-fA-F]*</param>
</data>
</define>
- <!-- <self/> element -->
- <define name="self_bool">
+ <!-- <tenant/> element -->
+ <define name="tenant_bool">
<optional>
<attribute name="rekey">
<value>yes</value>
@@ -265,7 +267,7 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
</attribute>
</optional>
</define>
- <define name="self_payload">
+ <define name="tenant_payload">
<optional>
<attribute name="use_hsm">
<choice>
@@ -295,74 +297,74 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
</element>
</optional>
</define>
- <define name="self_handle">
- <attribute name="self_handle">
+ <define name="tenant_handle">
+ <attribute name="tenant_handle">
<ref name="object_handle"/>
</attribute>
</define>
- <define name="self_query" combine="choice">
- <element name="self">
+ <define name="tenant_query" combine="choice">
+ <element name="tenant">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
- <ref name="self_bool"/>
- <ref name="self_payload"/>
+ <ref name="tenant_handle"/>
+ <ref name="tenant_bool"/>
+ <ref name="tenant_payload"/>
</element>
</define>
- <define name="self_reply" combine="choice">
- <element name="self">
+ <define name="tenant_reply" combine="choice">
+ <element name="tenant">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
- <define name="self_query" combine="choice">
- <element name="self">
+ <define name="tenant_query" combine="choice">
+ <element name="tenant">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
- <ref name="self_bool"/>
- <ref name="self_payload"/>
+ <ref name="tenant_handle"/>
+ <ref name="tenant_bool"/>
+ <ref name="tenant_payload"/>
</element>
</define>
- <define name="self_reply" combine="choice">
- <element name="self">
+ <define name="tenant_reply" combine="choice">
+ <element name="tenant">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
- <define name="self_query" combine="choice">
- <element name="self">
+ <define name="tenant_query" combine="choice">
+ <element name="tenant">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
- <define name="self_reply" combine="choice">
- <element name="self">
+ <define name="tenant_reply" combine="choice">
+ <element name="tenant">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
- <ref name="self_payload"/>
+ <ref name="tenant_handle"/>
+ <ref name="tenant_payload"/>
</element>
</define>
- <define name="self_query" combine="choice">
- <element name="self">
+ <define name="tenant_query" combine="choice">
+ <element name="tenant">
<ref name="ctl_list"/>
</element>
</define>
- <define name="self_reply" combine="choice">
- <element name="self">
+ <define name="tenant_reply" combine="choice">
+ <element name="tenant">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
- <ref name="self_payload"/>
+ <ref name="tenant_handle"/>
+ <ref name="tenant_payload"/>
</element>
</define>
- <define name="self_query" combine="choice">
- <element name="self">
+ <define name="tenant_query" combine="choice">
+ <element name="tenant">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
- <define name="self_reply" combine="choice">
- <element name="self">
+ <define name="tenant_reply" combine="choice">
+ <element name="tenant">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<!-- <bsc/> element. Key parameters hardwired for now. -->
@@ -415,7 +417,7 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="bsc_query" combine="choice">
<element name="bsc">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
<ref name="bsc_bool"/>
<ref name="bsc_payload"/>
@@ -424,7 +426,7 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="bsc_reply" combine="choice">
<element name="bsc">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
<ref name="bsc_readonly"/>
</element>
@@ -432,7 +434,7 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="bsc_query" combine="choice">
<element name="bsc">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
<ref name="bsc_bool"/>
<ref name="bsc_payload"/>
@@ -441,7 +443,7 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="bsc_reply" combine="choice">
<element name="bsc">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
<ref name="bsc_readonly"/>
</element>
@@ -449,14 +451,14 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="bsc_query" combine="choice">
<element name="bsc">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
</element>
</define>
<define name="bsc_reply" combine="choice">
<element name="bsc">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
<ref name="bsc_payload"/>
<ref name="bsc_readonly"/>
@@ -465,13 +467,13 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="bsc_query" combine="choice">
<element name="bsc">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<define name="bsc_reply" combine="choice">
<element name="bsc">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
<ref name="bsc_payload"/>
<ref name="bsc_readonly"/>
@@ -480,14 +482,14 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="bsc_query" combine="choice">
<element name="bsc">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
</element>
</define>
<define name="bsc_reply" combine="choice">
<element name="bsc">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
</element>
</define>
@@ -552,12 +554,34 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
</attribute>
</optional>
<optional>
- <element name="bpki_cms_cert">
+ <attribute name="root_asn_resources">
+ <ref name="asn_list"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="root_ipv4_resources">
+ <ref name="ipv4_list"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="root_ipv6_resources">
+ <ref name="ipv6_list"/>
+ </attribute>
+ </optional>
+ <optional>
+ <element name="bpki_cert">
+ <ref name="base64"/>
+ </element>
+ </optional>
+ <optional>
+ <element name="bpki_glue">
<ref name="base64"/>
</element>
</optional>
+ </define>
+ <define name="parent_readonly">
<optional>
- <element name="bpki_cms_glue">
+ <element name="rpki_root_cert">
<ref name="base64"/>
</element>
</optional>
@@ -565,7 +589,7 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="parent_query" combine="choice">
<element name="parent">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
<ref name="parent_bool"/>
<ref name="parent_payload"/>
@@ -574,14 +598,15 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="parent_reply" combine="choice">
<element name="parent">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
+ <ref name="parent_readonly"/>
</element>
</define>
<define name="parent_query" combine="choice">
<element name="parent">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
<ref name="parent_bool"/>
<ref name="parent_payload"/>
@@ -590,50 +615,53 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="parent_reply" combine="choice">
<element name="parent">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
+ <ref name="parent_readonly"/>
</element>
</define>
<define name="parent_query" combine="choice">
<element name="parent">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
</element>
</define>
<define name="parent_reply" combine="choice">
<element name="parent">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
<ref name="parent_payload"/>
+ <ref name="parent_readonly"/>
</element>
</define>
<define name="parent_query" combine="choice">
<element name="parent">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<define name="parent_reply" combine="choice">
<element name="parent">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
<ref name="parent_payload"/>
+ <ref name="parent_readonly"/>
</element>
</define>
<define name="parent_query" combine="choice">
<element name="parent">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
</element>
</define>
<define name="parent_reply" combine="choice">
<element name="parent">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
</element>
</define>
@@ -673,7 +701,7 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="child_query" combine="choice">
<element name="child">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
<ref name="child_bool"/>
<ref name="child_payload"/>
@@ -682,14 +710,14 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="child_reply" combine="choice">
<element name="child">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
</element>
</define>
<define name="child_query" combine="choice">
<element name="child">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
<ref name="child_bool"/>
<ref name="child_payload"/>
@@ -698,21 +726,21 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="child_reply" combine="choice">
<element name="child">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
</element>
</define>
<define name="child_query" combine="choice">
<element name="child">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
</element>
</define>
<define name="child_reply" combine="choice">
<element name="child">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
<ref name="child_payload"/>
</element>
@@ -720,13 +748,13 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="child_query" combine="choice">
<element name="child">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<define name="child_reply" combine="choice">
<element name="child">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
<ref name="child_payload"/>
</element>
@@ -734,14 +762,14 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="child_query" combine="choice">
<element name="child">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
</element>
</define>
<define name="child_reply" combine="choice">
<element name="child">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
</element>
</define>
@@ -768,6 +796,11 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<ref name="bsc_handle"/>
</optional>
<optional>
+ <attribute name="rrdp_notification_uri">
+ <ref name="uri"/>
+ </attribute>
+ </optional>
+ <optional>
<element name="bpki_cert">
<ref name="base64"/>
</element>
@@ -781,7 +814,7 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="repository_query" combine="choice">
<element name="repository">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
<ref name="repository_bool"/>
<ref name="repository_payload"/>
@@ -790,14 +823,14 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="repository_reply" combine="choice">
<element name="repository">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
</element>
</define>
<define name="repository_query" combine="choice">
<element name="repository">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
<ref name="repository_bool"/>
<ref name="repository_payload"/>
@@ -806,21 +839,21 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="repository_reply" combine="choice">
<element name="repository">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
</element>
</define>
<define name="repository_query" combine="choice">
<element name="repository">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
</element>
</define>
<define name="repository_reply" combine="choice">
<element name="repository">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
<ref name="repository_payload"/>
</element>
@@ -828,13 +861,13 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="repository_query" combine="choice">
<element name="repository">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<define name="repository_reply" combine="choice">
<element name="repository">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
<ref name="repository_payload"/>
</element>
@@ -842,14 +875,14 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="repository_query" combine="choice">
<element name="repository">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
</element>
</define>
<define name="repository_reply" combine="choice">
<element name="repository">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
</element>
</define>
@@ -857,14 +890,14 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="list_resources_query">
<element name="list_resources">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
</element>
</define>
<define name="list_resources_reply">
<element name="list_resources">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
<attribute name="valid_until">
<data type="dateTime">
@@ -892,13 +925,13 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="list_roa_requests_query">
<element name="list_roa_requests">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<define name="list_roa_requests_reply">
<element name="list_roa_requests">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<attribute name="asn">
<data type="nonNegativeInteger"/>
</attribute>
@@ -918,14 +951,14 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="list_ghostbuster_requests_query">
<element name="list_ghostbuster_requests">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
</element>
</define>
<define name="list_ghostbuster_requests_reply">
<element name="list_ghostbuster_requests">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
<data type="string"/>
</element>
@@ -934,13 +967,13 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="list_ee_certificate_requests_query">
<element name="list_ee_certificate_requests">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<define name="list_ee_certificate_requests_reply">
<element name="list_ee_certificate_requests">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<attribute name="gski">
<data type="token">
<param name="minLength">27</param>
@@ -967,14 +1000,12 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<ref name="ipv6_list"/>
</attribute>
</optional>
- <optional>
- <attribute name="cn">
- <data type="string">
- <param name="maxLength">64</param>
- <param name="pattern">[\-0-9A-Za-z_ ]+</param>
- </data>
- </attribute>
- </optional>
+ <attribute name="cn">
+ <data type="string">
+ <param name="maxLength">64</param>
+ <param name="pattern">[\-0-9A-Za-z_ ]+</param>
+ </data>
+ </attribute>
<optional>
<attribute name="sn">
<data type="string">
@@ -1000,13 +1031,13 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="list_published_objects_query">
<element name="list_published_objects">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<define name="list_published_objects_reply">
<element name="list_published_objects">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<attribute name="uri">
<ref name="uri"/>
</attribute>
@@ -1022,13 +1053,13 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<define name="list_received_resources_query">
<element name="list_received_resources">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<define name="list_received_resources_reply">
<element name="list_received_resources">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
<attribute name="notBefore">
<data type="dateTime">
@@ -1076,7 +1107,7 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<element name="report_error">
<ref name="tag"/>
<optional>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</optional>
<attribute name="error_code">
<ref name="error"/>
@@ -1102,7 +1133,7 @@ left_right = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
## Parsed RelaxNG myrpki schema
myrpki = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<!--
- $Id: myrpki.rnc 5757 2014-04-05 22:42:12Z sra $
+ $Id: myrpki.rnc 5876 2014-06-26 19:00:12Z sra $
RelaxNG schema for MyRPKI XML messages.
@@ -1481,11 +1512,183 @@ myrpki = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
-->
''')
-## @var publication
-## Parsed RelaxNG publication schema
-publication = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
+## @var oob_setup
+## Parsed RelaxNG oob_setup schema
+oob_setup = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
+<!-- $Id: rpki-setup.rnc 3429 2015-10-14 23:46:50Z sra $ -->
+<grammar ns="http://www.hactrn.net/uris/rpki/rpki-setup/" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <define name="version">
+ <value>1</value>
+ </define>
+ <define name="base64">
+ <data type="base64Binary">
+ <param name="maxLength">512000</param>
+ </data>
+ </define>
+ <define name="handle">
+ <data type="string">
+ <param name="maxLength">255</param>
+ <param name="pattern">[\-_A-Za-z0-9/]*</param>
+ </data>
+ </define>
+ <define name="uri">
+ <data type="anyURI">
+ <param name="maxLength">4096</param>
+ </data>
+ </define>
+ <define name="any">
+ <element>
+ <anyName/>
+ <zeroOrMore>
+ <attribute>
+ <anyName/>
+ </attribute>
+ </zeroOrMore>
+ <zeroOrMore>
+ <choice>
+ <ref name="any"/>
+ <text/>
+ </choice>
+ </zeroOrMore>
+ </element>
+ </define>
+ <define name="authorization_token">
+ <ref name="base64"/>
+ </define>
+ <define name="bpki_ta">
+ <ref name="base64"/>
+ </define>
+ <start combine="choice">
+ <element name="child_request">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="child_handle">
+ <ref name="handle"/>
+ </attribute>
+ <element name="child_bpki_ta">
+ <ref name="bpki_ta"/>
+ </element>
+ </element>
+ </start>
+ <start combine="choice">
+ <element name="parent_response">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="service_uri">
+ <ref name="uri"/>
+ </attribute>
+ <attribute name="child_handle">
+ <ref name="handle"/>
+ </attribute>
+ <attribute name="parent_handle">
+ <ref name="handle"/>
+ </attribute>
+ <element name="parent_bpki_ta">
+ <ref name="bpki_ta"/>
+ </element>
+ <optional>
+ <element name="offer">
+ <empty/>
+ </element>
+ </optional>
+ <zeroOrMore>
+ <element name="referral">
+ <attribute name="referrer">
+ <ref name="handle"/>
+ </attribute>
+ <optional>
+ <attribute name="contact_uri">
+ <ref name="uri"/>
+ </attribute>
+ </optional>
+ <ref name="authorization_token"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </start>
+ <start combine="choice">
+ <element name="publisher_request">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="publisher_handle">
+ <ref name="handle"/>
+ </attribute>
+ <element name="publisher_bpki_ta">
+ <ref name="bpki_ta"/>
+ </element>
+ <zeroOrMore>
+ <element name="referral">
+ <attribute name="referrer">
+ <ref name="handle"/>
+ </attribute>
+ <ref name="authorization_token"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </start>
+ <start combine="choice">
+ <element name="repository_response">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="service_uri">
+ <ref name="uri"/>
+ </attribute>
+ <attribute name="publisher_handle">
+ <ref name="handle"/>
+ </attribute>
+ <attribute name="sia_base">
+ <ref name="uri"/>
+ </attribute>
+ <optional>
+ <attribute name="rrdp_notification_uri">
+ <ref name="uri"/>
+ </attribute>
+ </optional>
+ <element name="repository_bpki_ta">
+ <ref name="bpki_ta"/>
+ </element>
+ </element>
+ </start>
+ <start combine="choice">
+ <element name="authorization">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="authorized_sia_base">
+ <ref name="uri"/>
+ </attribute>
+ <ref name="bpki_ta"/>
+ </element>
+ </start>
+ <start combine="choice">
+ <element name="error">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="reason">
+ <choice>
+ <value>syntax-error</value>
+ <value>authentication-failure</value>
+ <value>refused</value>
+ </choice>
+ </attribute>
+ <optional>
+ <ref name="any"/>
+ </optional>
+ </element>
+ </start>
+</grammar>
+''')
+
+## @var publication_control
+## Parsed RelaxNG publication_control schema
+publication_control = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<!--
- $Id: publication-schema.rnc 5902 2014-07-18 16:37:04Z sra $
+ $Id: publication-control.rnc 5903 2014-07-18 17:08:13Z sra $
RelaxNG schema for RPKI publication protocol.
@@ -1506,7 +1709,7 @@ publication = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-->
-<grammar ns="http://www.hactrn.net/uris/rpki/publication-spec/" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+<grammar ns="http://www.hactrn.net/uris/rpki/publication-control/" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
<define name="version">
<value>1</value>
</define>
@@ -1540,26 +1743,12 @@ publication = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
</start>
<!-- PDUs allowed in a query -->
<define name="query_elt">
- <choice>
- <ref name="config_query"/>
- <ref name="client_query"/>
- <ref name="certificate_query"/>
- <ref name="crl_query"/>
- <ref name="manifest_query"/>
- <ref name="roa_query"/>
- <ref name="ghostbuster_query"/>
- </choice>
+ <ref name="client_query"/>
</define>
<!-- PDUs allowed in a reply -->
<define name="reply_elt">
<choice>
- <ref name="config_reply"/>
<ref name="client_reply"/>
- <ref name="certificate_reply"/>
- <ref name="crl_reply"/>
- <ref name="manifest_reply"/>
- <ref name="roa_reply"/>
- <ref name="ghostbuster_reply"/>
<ref name="report_error_reply"/>
</choice>
</define>
@@ -1603,60 +1792,7 @@ publication = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<param name="pattern">[\-_A-Za-z0-9/]+</param>
</data>
</define>
- <!--
- <config/> element (use restricted to repository operator)
- config_handle attribute, create, list, and destroy commands omitted deliberately, see code for details
- -->
- <define name="config_payload">
- <optional>
- <element name="bpki_crl">
- <ref name="base64"/>
- </element>
- </optional>
- </define>
- <define name="config_query" combine="choice">
- <element name="config">
- <attribute name="action">
- <value>set</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="config_payload"/>
- </element>
- </define>
- <define name="config_reply" combine="choice">
- <element name="config">
- <attribute name="action">
- <value>set</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- </element>
- </define>
- <define name="config_query" combine="choice">
- <element name="config">
- <attribute name="action">
- <value>get</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- </element>
- </define>
- <define name="config_reply" combine="choice">
- <element name="config">
- <attribute name="action">
- <value>get</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="config_payload"/>
- </element>
- </define>
- <!-- <client/> element (use restricted to repository operator) -->
+ <!-- <client/> element -->
<define name="client_handle">
<attribute name="client_handle">
<ref name="object_handle"/>
@@ -1801,242 +1937,217 @@ publication = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<ref name="client_handle"/>
</element>
</define>
- <!-- <certificate/> element -->
- <define name="certificate_query" combine="choice">
- <element name="certificate">
- <attribute name="action">
- <value>publish</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- <ref name="base64"/>
- </element>
+ <!-- <report_error/> element -->
+ <define name="error">
+ <data type="token">
+ <param name="maxLength">1024</param>
+ </data>
</define>
- <define name="certificate_reply" combine="choice">
- <element name="certificate">
- <attribute name="action">
- <value>publish</value>
- </attribute>
+ <define name="report_error_reply">
+ <element name="report_error">
<optional>
<ref name="tag"/>
</optional>
- <ref name="uri"/>
- </element>
- </define>
- <define name="certificate_query" combine="choice">
- <element name="certificate">
- <attribute name="action">
- <value>withdraw</value>
+ <attribute name="error_code">
+ <ref name="error"/>
</attribute>
<optional>
- <ref name="tag"/>
+ <data type="string">
+ <param name="maxLength">512000</param>
+ </data>
</optional>
- <ref name="uri"/>
</element>
</define>
- <define name="certificate_reply" combine="choice">
- <element name="certificate">
- <attribute name="action">
- <value>withdraw</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
+</grammar>
+<!--
+ Local Variables:
+ indent-tabs-mode: nil
+ comment-start: "# "
+ comment-start-skip: "#[ \t]*"
+ End:
+-->
+''')
+
+## @var publication
+## Parsed RelaxNG publication schema
+publication = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ $Id: publication.rnc 5896 2014-07-15 19:34:32Z sra $
+
+ RelaxNG schema for RPKI publication protocol, from current I-D.
+
+ Copyright (c) 2014 IETF Trust and the persons identified as authors
+ of the code. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ * Neither the name of Internet Society, IETF or IETF Trust, nor the
+ names of specific contributors, may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+-->
+<grammar ns="http://www.hactrn.net/uris/rpki/publication-spec/" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <!-- This is version 3 of the protocol. -->
+ <define name="version">
+ <value>3</value>
</define>
- <!-- <crl/> element -->
- <define name="crl_query" combine="choice">
- <element name="crl">
- <attribute name="action">
- <value>publish</value>
+ <!-- Top level PDU is either a query or a reply. -->
+ <start combine="choice">
+ <element name="msg">
+ <attribute name="version">
+ <ref name="version"/>
</attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- <ref name="base64"/>
- </element>
- </define>
- <define name="crl_reply" combine="choice">
- <element name="crl">
- <attribute name="action">
- <value>publish</value>
+ <attribute name="type">
+ <value>query</value>
</attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
+ <zeroOrMore>
+ <ref name="query_elt"/>
+ </zeroOrMore>
</element>
- </define>
- <define name="crl_query" combine="choice">
- <element name="crl">
- <attribute name="action">
- <value>withdraw</value>
+ </start>
+ <start combine="choice">
+ <element name="msg">
+ <attribute name="version">
+ <ref name="version"/>
</attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
- </define>
- <define name="crl_reply" combine="choice">
- <element name="crl">
- <attribute name="action">
- <value>withdraw</value>
+ <attribute name="type">
+ <value>reply</value>
</attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
+ <zeroOrMore>
+ <ref name="reply_elt"/>
+ </zeroOrMore>
</element>
+ </start>
+ <!-- PDUs allowed in queries and replies. -->
+ <define name="query_elt">
+ <choice>
+ <ref name="publish_query"/>
+ <ref name="withdraw_query"/>
+ <ref name="list_query"/>
+ </choice>
</define>
- <!-- <manifest/> element -->
- <define name="manifest_query" combine="choice">
- <element name="manifest">
- <attribute name="action">
- <value>publish</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- <ref name="base64"/>
- </element>
+ <define name="reply_elt">
+ <choice>
+ <ref name="publish_reply"/>
+ <ref name="withdraw_reply"/>
+ <ref name="list_reply"/>
+ <ref name="report_error_reply"/>
+ </choice>
</define>
- <define name="manifest_reply" combine="choice">
- <element name="manifest">
- <attribute name="action">
- <value>publish</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
+ <!-- Tag attributes for bulk operations. -->
+ <define name="tag">
+ <attribute name="tag">
+ <data type="token">
+ <param name="maxLength">1024</param>
+ </data>
+ </attribute>
</define>
- <define name="manifest_query" combine="choice">
- <element name="manifest">
- <attribute name="action">
- <value>withdraw</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
+ <!-- Base64 encoded DER stuff. -->
+ <define name="base64">
+ <data type="base64Binary"/>
</define>
- <define name="manifest_reply" combine="choice">
- <element name="manifest">
- <attribute name="action">
- <value>withdraw</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
+ <!-- Publication URIs. -->
+ <define name="uri">
+ <attribute name="uri">
+ <data type="anyURI">
+ <param name="maxLength">4096</param>
+ </data>
+ </attribute>
</define>
- <!-- <roa/> element -->
- <define name="roa_query" combine="choice">
- <element name="roa">
- <attribute name="action">
- <value>publish</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- <ref name="base64"/>
- </element>
+ <!-- Digest of objects being withdrawn -->
+ <define name="hash">
+ <attribute name="hash">
+ <data type="string">
+ <param name="pattern">[0-9a-fA-F]+</param>
+ </data>
+ </attribute>
</define>
- <define name="roa_reply" combine="choice">
- <element name="roa">
- <attribute name="action">
- <value>publish</value>
- </attribute>
+ <!-- Error codes. -->
+ <define name="error">
+ <data type="token">
+ <param name="maxLength">1024</param>
+ </data>
+ </define>
+ <!-- <publish/> element -->
+ <define name="publish_query">
+ <element name="publish">
<optional>
<ref name="tag"/>
</optional>
<ref name="uri"/>
- </element>
- </define>
- <define name="roa_query" combine="choice">
- <element name="roa">
- <attribute name="action">
- <value>withdraw</value>
- </attribute>
<optional>
- <ref name="tag"/>
+ <ref name="hash"/>
</optional>
- <ref name="uri"/>
+ <ref name="base64"/>
</element>
</define>
- <define name="roa_reply" combine="choice">
- <element name="roa">
- <attribute name="action">
- <value>withdraw</value>
- </attribute>
+ <define name="publish_reply">
+ <element name="publish">
<optional>
<ref name="tag"/>
</optional>
<ref name="uri"/>
</element>
</define>
- <!-- <ghostbuster/> element -->
- <define name="ghostbuster_query" combine="choice">
- <element name="ghostbuster">
- <attribute name="action">
- <value>publish</value>
- </attribute>
+ <!-- <withdraw/> element -->
+ <define name="withdraw_query">
+ <element name="withdraw">
<optional>
<ref name="tag"/>
</optional>
<ref name="uri"/>
- <ref name="base64"/>
+ <ref name="hash"/>
</element>
</define>
- <define name="ghostbuster_reply" combine="choice">
- <element name="ghostbuster">
- <attribute name="action">
- <value>publish</value>
- </attribute>
+ <define name="withdraw_reply">
+ <element name="withdraw">
<optional>
<ref name="tag"/>
</optional>
<ref name="uri"/>
</element>
</define>
- <define name="ghostbuster_query" combine="choice">
- <element name="ghostbuster">
- <attribute name="action">
- <value>withdraw</value>
- </attribute>
+ <!-- <list/> element -->
+ <define name="list_query">
+ <element name="list">
<optional>
<ref name="tag"/>
</optional>
- <ref name="uri"/>
</element>
</define>
- <define name="ghostbuster_reply" combine="choice">
- <element name="ghostbuster">
- <attribute name="action">
- <value>withdraw</value>
- </attribute>
+ <define name="list_reply">
+ <element name="list">
<optional>
<ref name="tag"/>
</optional>
<ref name="uri"/>
+ <ref name="hash"/>
</element>
</define>
<!-- <report_error/> element -->
- <define name="error">
- <data type="token">
- <param name="maxLength">1024</param>
- </data>
- </define>
<define name="report_error_reply">
<element name="report_error">
<optional>
@@ -2066,7 +2177,7 @@ publication = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
## Parsed RelaxNG router_certificate schema
router_certificate = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<!--
- $Id: router-certificate-schema.rnc 5757 2014-04-05 22:42:12Z sra $
+ $Id: router-certificate.rnc 5881 2014-07-03 16:55:02Z sra $
RelaxNG schema for BGPSEC router certificate interchange format.
@@ -2164,11 +2275,165 @@ router_certificate = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
-->
''')
+## @var rrdp
+## Parsed RelaxNG rrdp schema
+rrdp = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ $Id: rrdp.rnc 6010 2014-11-08 18:01:58Z sra $
+
+ RelaxNG schema for RPKI Repository Delta Protocol (RRDP).
+
+ Copyright (C) 2014 Dragon Research Labs ("DRL")
+
+ Permission to use, copy, modify, and distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+ REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+ INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ PERFORMANCE OF THIS SOFTWARE.
+-->
+<grammar ns="http://www.ripe.net/rpki/rrdp" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <define name="version">
+ <data type="positiveInteger">
+ <param name="maxInclusive">1</param>
+ </data>
+ </define>
+ <define name="serial">
+ <data type="nonNegativeInteger"/>
+ </define>
+ <define name="uri">
+ <data type="anyURI"/>
+ </define>
+ <define name="uuid">
+ <data type="string">
+ <param name="pattern">[\-0-9a-fA-F]+</param>
+ </data>
+ </define>
+ <define name="hash">
+ <data type="string">
+ <param name="pattern">[0-9a-fA-F]+</param>
+ </data>
+ </define>
+ <define name="base64">
+ <data type="base64Binary"/>
+ </define>
+ <!-- Notification file: lists current snapshots and deltas -->
+ <start combine="choice">
+ <element name="notification">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="session_id">
+ <ref name="uuid"/>
+ </attribute>
+ <attribute name="serial">
+ <ref name="serial"/>
+ </attribute>
+ <element name="snapshot">
+ <attribute name="uri">
+ <ref name="uri"/>
+ </attribute>
+ <attribute name="hash">
+ <ref name="hash"/>
+ </attribute>
+ </element>
+ <zeroOrMore>
+ <element name="delta">
+ <attribute name="serial">
+ <ref name="serial"/>
+ </attribute>
+ <attribute name="uri">
+ <ref name="uri"/>
+ </attribute>
+ <attribute name="hash">
+ <ref name="hash"/>
+ </attribute>
+ </element>
+ </zeroOrMore>
+ </element>
+ </start>
+ <!-- Snapshot segment: think DNS AXFR. -->
+ <start combine="choice">
+ <element name="snapshot">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="session_id">
+ <ref name="uuid"/>
+ </attribute>
+ <attribute name="serial">
+ <ref name="serial"/>
+ </attribute>
+ <zeroOrMore>
+ <element name="publish">
+ <attribute name="uri">
+ <ref name="uri"/>
+ </attribute>
+ <ref name="base64"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </start>
+ <!-- Delta segment: think DNS IXFR. -->
+ <start combine="choice">
+ <element name="delta">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="session_id">
+ <ref name="uuid"/>
+ </attribute>
+ <attribute name="serial">
+ <ref name="serial"/>
+ </attribute>
+ <oneOrMore>
+ <ref name="delta_element"/>
+ </oneOrMore>
+ </element>
+ </start>
+ <define name="delta_element" combine="choice">
+ <element name="publish">
+ <attribute name="uri">
+ <ref name="uri"/>
+ </attribute>
+ <optional>
+ <attribute name="hash">
+ <ref name="hash"/>
+ </attribute>
+ </optional>
+ <ref name="base64"/>
+ </element>
+ </define>
+ <define name="delta_element" combine="choice">
+ <element name="withdraw">
+ <attribute name="uri">
+ <ref name="uri"/>
+ </attribute>
+ <attribute name="hash">
+ <ref name="hash"/>
+ </attribute>
+ </element>
+ </define>
+</grammar>
+<!--
+ Local Variables:
+ indent-tabs-mode: nil
+ comment-start: "# "
+ comment-start-skip: "#[ \t]*"
+ End:
+-->
+''')
+
## @var up_down
## Parsed RelaxNG up_down schema
up_down = RelaxNGParser(r'''<?xml version="1.0" encoding="UTF-8"?>
<!--
- $Id: up-down-schema.rnc 5757 2014-04-05 22:42:12Z sra $
+ $Id: up-down.rnc 5881 2014-07-03 16:55:02Z sra $
RelaxNG schema for the up-down protocol, extracted from RFC 6492.
diff --git a/rpki/relaxng_parser.py b/rpki/relaxng_parser.py
index 466b1a79..53ec8f0d 100644
--- a/rpki/relaxng_parser.py
+++ b/rpki/relaxng_parser.py
@@ -22,21 +22,21 @@ from an XML-format RelaxNG schema.
import lxml.etree
class RelaxNGParser(object):
- """
- Parse schema, extract XML namespace and protocol version (if any).
- Method calls are just passed along to the parsed RelaxNG schema.
- """
+ """
+ Parse schema, extract XML namespace and protocol version (if any).
+ Method calls are just passed along to the parsed RelaxNG schema.
+ """
- def __init__(self, text):
- xml = lxml.etree.fromstring(text)
- self.schema = lxml.etree.RelaxNG(xml)
- ns = xml.get("ns")
- self.xmlns = "{" + ns + "}"
- self.nsmap = { None : ns }
- x = xml.xpath("ns0:define[@name = 'version']/ns0:value",
- namespaces = dict(ns0 = "http://relaxng.org/ns/structure/1.0"))
- if len(x) == 1:
- self.version = x[0].text
+ def __init__(self, text):
+ xml = lxml.etree.fromstring(text)
+ self.schema = lxml.etree.RelaxNG(xml)
+ ns = xml.get("ns")
+ self.xmlns = "{" + ns + "}"
+ self.nsmap = { None : ns }
+ x = xml.xpath("ns0:define[@name = 'version']/ns0:value",
+ namespaces = dict(ns0 = "http://relaxng.org/ns/structure/1.0"))
+ if len(x) == 1:
+ self.version = x[0].text
- def __getattr__(self, name):
- return getattr(self.schema, name)
+ def __getattr__(self, name):
+ return getattr(self.schema, name)
diff --git a/rpki/resource_set.py b/rpki/resource_set.py
index fea6ad2d..055076dd 100644
--- a/rpki/resource_set.py
+++ b/rpki/resource_set.py
@@ -44,745 +44,784 @@ re_prefix_with_maxlen = re.compile("^([0-9:.a-fA-F]+)/([0-9]+)-([0-9]+)$")
re_prefix = re.compile("^([0-9:.a-fA-F]+)/([0-9]+)$")
class resource_range(object):
- """
- Generic resource range type. Assumes underlying type is some kind
- of integer.
-
- This is a virtual class. You probably don't want to use this type
- directly.
- """
-
- def __init__(self, range_min, range_max):
- assert range_min.__class__ is range_max.__class__, \
- "Type mismatch, %r doesn't match %r" % (range_min.__class__, range_max.__class__)
- assert range_min <= range_max, "Mis-ordered range: %s before %s" % (range_min, range_max)
- self.min = range_min
- self.max = range_max
-
- def __cmp__(self, other):
- assert self.__class__ is other.__class__, \
- "Type mismatch, comparing %r with %r" % (self.__class__, other.__class__)
- return cmp(self.min, other.min) or cmp(self.max, other.max)
-
-class resource_range_as(resource_range):
- """
- Range of Autonomous System Numbers.
-
- Denotes a single ASN by a range whose min and max values are
- identical.
- """
-
- ## @var datum_type
- # Type of underlying data (min and max).
-
- datum_type = long
-
- def __init__(self, range_min, range_max):
- resource_range.__init__(self,
- long(range_min) if isinstance(range_min, int) else range_min,
- long(range_max) if isinstance(range_max, int) else range_max)
-
- def __str__(self):
- """
- Convert a resource_range_as to string format.
- """
- if self.min == self.max:
- return str(self.min)
- else:
- return str(self.min) + "-" + str(self.max)
-
- @classmethod
- def parse_str(cls, x):
"""
- Parse ASN resource range from text (eg, XML attributes).
- """
- r = re_asn_range.match(x)
- if r:
- return cls(long(r.group(1)), long(r.group(2)))
- else:
- return cls(long(x), long(x))
+ Generic resource range type. Assumes underlying type is some kind
+ of integer.
- @classmethod
- def from_strings(cls, a, b = None):
- """
- Construct ASN range from strings.
+ This is a virtual class. You probably don't want to use this type
+ directly.
"""
- if b is None:
- b = a
- return cls(long(a), long(b))
-
-class resource_range_ip(resource_range):
- """
- Range of (generic) IP addresses.
-
- Prefixes are converted to ranges on input, and ranges that can be
- represented as prefixes are written as prefixes on output.
- This is a virtual class. You probably don't want to use it
- directly.
- """
+ # Give pylint a little help here
- ## @var datum_type
- # Type of underlying data (min and max).
-
- datum_type = rpki.POW.IPAddress
-
- def prefixlen(self):
- """
- Determine whether a resource_range_ip can be expressed as a
- prefix. Returns prefix length if it can, otherwise raises
- MustBePrefix exception.
- """
- mask = self.min ^ self.max
- if self.min & mask != 0:
- raise rpki.exceptions.MustBePrefix
- prefixlen = self.min.bits
- while mask & 1:
- prefixlen -= 1
- mask >>= 1
- if mask:
- raise rpki.exceptions.MustBePrefix
- return prefixlen
-
- @property
- def can_be_prefix(self):
- """
- Boolean property indicating whether this range can be expressed as
- a prefix.
+ datum_type = int
+ parse_str = int
- This just calls .prefixlen() to do the work, so that we can keep
- the logic in one place. This property is useful primarily in
- context where catching an exception isn't practical.
- """
- try:
- self.prefixlen()
- return True
- except rpki.exceptions.MustBePrefix:
- return False
+ def __init__(self, range_min, range_max):
+ assert range_min.__class__ is range_max.__class__, \
+ "Type mismatch, %r doesn't match %r" % (range_min.__class__, range_max.__class__)
+ assert range_min <= range_max, "Mis-ordered range: %s before %s" % (range_min, range_max)
+ self.min = range_min
+ self.max = range_max
- def __str__(self):
- """
- Convert a resource_range_ip to string format.
- """
- try:
- return str(self.min) + "/" + str(self.prefixlen())
- except rpki.exceptions.MustBePrefix:
- return str(self.min) + "-" + str(self.max)
+ def __cmp__(self, other):
+ assert self.__class__ is other.__class__, \
+ "Type mismatch, comparing %r with %r" % (self.__class__, other.__class__)
+ return cmp(self.min, other.min) or cmp(self.max, other.max)
- @classmethod
- def parse_str(cls, x):
- """
- Parse IP address range or prefix from text (eg, XML attributes).
- """
- r = re_address_range.match(x)
- if r:
- return cls.from_strings(r.group(1), r.group(2))
- r = re_prefix.match(x)
- if r:
- a = rpki.POW.IPAddress(r.group(1))
- if cls is resource_range_ip and a.version == 4:
- cls = resource_range_ipv4
- if cls is resource_range_ip and a.version == 6:
- cls = resource_range_ipv6
- return cls.make_prefix(a, int(r.group(2)))
- raise rpki.exceptions.BadIPResource('Bad IP resource "%s"' % x)
-
- @classmethod
- def make_prefix(cls, prefix, prefixlen):
- """
- Construct a resource range corresponding to a prefix.
+class resource_range_as(resource_range):
"""
- assert isinstance(prefix, rpki.POW.IPAddress) and isinstance(prefixlen, (int, long))
- assert prefixlen >= 0 and prefixlen <= prefix.bits, "Nonsensical prefix length: %s" % prefixlen
- mask = (1 << (prefix.bits - prefixlen)) - 1
- assert (prefix & mask) == 0, "Resource not in canonical form: %s/%s" % (prefix, prefixlen)
- return cls(prefix, rpki.POW.IPAddress(prefix | mask))
+ Range of Autonomous System Numbers.
- def chop_into_prefixes(self, result):
- """
- Chop up a resource_range_ip into ranges that can be represented as
- prefixes.
- """
- try:
- self.prefixlen()
- result.append(self)
- except rpki.exceptions.MustBePrefix:
- range_min = self.min
- range_max = self.max
- while range_max >= range_min:
- bits = int(math.log(long(range_max - range_min + 1), 2))
- while True:
- mask = ~(~0 << bits)
- assert range_min + mask <= range_max
- if range_min & mask == 0:
- break
- assert bits > 0
- bits -= 1
- result.append(self.make_prefix(range_min, range_min.bits - bits))
- range_min = range_min + mask + 1
-
- @classmethod
- def from_strings(cls, a, b = None):
+ Denotes a single ASN by a range whose min and max values are
+ identical.
"""
- Construct IP address range from strings.
- """
- if b is None:
- b = a
- a = rpki.POW.IPAddress(a)
- b = rpki.POW.IPAddress(b)
- if a.version != b.version:
- raise TypeError
- if cls is resource_range_ip:
- if a.version == 4:
- return resource_range_ipv4(a, b)
- if a.version == 6:
- return resource_range_ipv6(a, b)
- elif a.version == cls.version:
- return cls(a, b)
- else:
- raise TypeError
-class resource_range_ipv4(resource_range_ip):
- """
- Range of IPv4 addresses.
- """
-
- version = 4
-
-class resource_range_ipv6(resource_range_ip):
- """
- Range of IPv6 addresses.
- """
-
- version = 6
+ ## @var datum_type
+ # Type of underlying data (min and max).
-def _rsplit(rset, that):
- """
- Utility function to split a resource range into two resource ranges.
- """
+ datum_type = long
- this = rset.pop(0)
+ def __init__(self, range_min, range_max):
+ resource_range.__init__(self,
+ long(range_min) if isinstance(range_min, int) else range_min,
+ long(range_max) if isinstance(range_max, int) else range_max)
- assert type(this) is type(that), "type(this) [%r] is not type(that) [%r]" % (type(this), type(that))
-
- assert type(this.min) is type(that.min), "type(this.min) [%r] is not type(that.min) [%r]" % (type(this.min), type(that.min))
- assert type(this.min) is type(this.max), "type(this.min) [%r] is not type(this.max) [%r]" % (type(this.min), type(this.max))
- assert type(that.min) is type(that.max), "type(that.min) [%r] is not type(that.max) [%r]" % (type(that.min), type(that.max))
-
- if this.min < that.min:
- rset.insert(0, type(this)(this.min, type(that.min)(that.min - 1)))
- rset.insert(1, type(this)(that.min, this.max))
-
- else:
- assert this.max > that.max
- rset.insert(0, type(this)(this.min, that.max))
- rset.insert(1, type(this)(type(that.max)(that.max + 1), this.max))
-
-class resource_set(list):
- """
- Generic resource set, a list subclass containing resource ranges.
+ def __str__(self):
+ """
+ Convert a resource_range_as to string format.
+ """
- This is a virtual class. You probably don't want to use it
- directly.
- """
+ if self.min == self.max:
+ return str(self.min)
+ else:
+ return str(self.min) + "-" + str(self.max)
- ## @var inherit
- # Boolean indicating whether this resource_set uses RFC 3779 inheritance.
+ @classmethod
+ def parse_str(cls, x):
+ """
+ Parse ASN resource range from text (eg, XML attributes).
+ """
- inherit = False
+ r = re_asn_range.match(x)
+ if r:
+ return cls(long(r.group(1)), long(r.group(2)))
+ else:
+ return cls(long(x), long(x))
- ## @var canonical
- # Whether this resource_set is currently in canonical form.
+ @classmethod
+ def from_strings(cls, a, b = None):
+ """
+ Construct ASN range from strings.
+ """
- canonical = False
+ if b is None:
+ b = a
+ return cls(long(a), long(b))
- def __init__(self, ini = None, allow_overlap = False):
- """
- Initialize a resource_set.
- """
- list.__init__(self)
- if isinstance(ini, (int, long)):
- ini = str(ini)
- if ini is inherit_token:
- self.inherit = True
- elif isinstance(ini, str) and len(ini):
- self.extend(self.parse_str(s) for s in ini.split(","))
- elif isinstance(ini, list):
- self.extend(ini)
- elif ini is not None and ini != "":
- raise ValueError("Unexpected initializer: %s" % str(ini))
- self.canonize(allow_overlap)
-
- def canonize(self, allow_overlap = False):
- """
- Whack this resource_set into canonical form.
+class resource_range_ip(resource_range):
"""
- assert not self.inherit or len(self) == 0
- if not self.canonical:
- self.sort()
- i = 0
- while i + 1 < len(self):
- if allow_overlap and self[i].max + 1 >= self[i+1].min:
- self[i] = type(self[i])(self[i].min, max(self[i].max, self[i+1].max))
- del self[i+1]
- elif self[i].max + 1 == self[i+1].min:
- self[i] = type(self[i])(self[i].min, self[i+1].max)
- del self[i+1]
+ Range of (generic) IP addresses.
+
+ Prefixes are converted to ranges on input, and ranges that can be
+ represented as prefixes are written as prefixes on output.
+
+ This is a virtual class. You probably don't want to use it
+ directly.
+ """
+
+ ## @var datum_type
+ # Type of underlying data (min and max).
+
+ datum_type = rpki.POW.IPAddress
+
+ # Give pylint a little help here
+ version = None
+
+ def prefixlen(self):
+ """
+ Determine whether a resource_range_ip can be expressed as a
+ prefix. Returns prefix length if it can, otherwise raises
+ MustBePrefix exception.
+ """
+
+ mask = self.min ^ self.max
+ if self.min & mask != 0:
+ raise rpki.exceptions.MustBePrefix
+ prefixlen = self.min.bits
+ while mask & 1:
+ prefixlen -= 1
+ mask >>= 1
+ if mask:
+ raise rpki.exceptions.MustBePrefix
+ return prefixlen
+
+ @property
+ def can_be_prefix(self):
+ """
+ Boolean property indicating whether this range can be expressed as
+ a prefix.
+
+ This just calls .prefixlen() to do the work, so that we can keep
+ the logic in one place. This property is useful primarily in
+ context where catching an exception isn't practical.
+ """
+
+ try:
+ self.prefixlen()
+ return True
+ except rpki.exceptions.MustBePrefix:
+ return False
+
+ def __str__(self):
+ """
+ Convert a resource_range_ip to string format.
+ """
+
+ try:
+ return str(self.min) + "/" + str(self.prefixlen())
+ except rpki.exceptions.MustBePrefix:
+ return str(self.min) + "-" + str(self.max)
+
+ @classmethod
+ def parse_str(cls, x):
+ """
+ Parse IP address range or prefix from text (eg, XML attributes).
+ """
+
+ r = re_address_range.match(x)
+ if r:
+ return cls.from_strings(r.group(1), r.group(2))
+ r = re_prefix.match(x)
+ if r:
+ a = rpki.POW.IPAddress(r.group(1))
+ if cls is resource_range_ip and a.version == 4:
+ cls = resource_range_ipv4
+ if cls is resource_range_ip and a.version == 6:
+ cls = resource_range_ipv6
+ return cls.make_prefix(a, int(r.group(2)))
+ raise rpki.exceptions.BadIPResource('Bad IP resource "%s"' % x)
+
+ @classmethod
+ def make_prefix(cls, prefix, prefixlen):
+ """
+ Construct a resource range corresponding to a prefix.
+ """
+
+ assert isinstance(prefix, rpki.POW.IPAddress) and isinstance(prefixlen, (int, long))
+ assert prefixlen >= 0 and prefixlen <= prefix.bits, "Nonsensical prefix length: %s" % prefixlen
+ mask = (1 << (prefix.bits - prefixlen)) - 1
+ assert (prefix & mask) == 0, "Resource not in canonical form: %s/%s" % (prefix, prefixlen)
+ return cls(prefix, rpki.POW.IPAddress(prefix | mask))
+
+ def chop_into_prefixes(self, result):
+ """
+ Chop up a resource_range_ip into ranges that can be represented as
+ prefixes.
+ """
+
+ try:
+ self.prefixlen()
+ result.append(self)
+ except rpki.exceptions.MustBePrefix:
+ range_min = self.min
+ range_max = self.max
+ while range_max >= range_min:
+ bits = int(math.log(long(range_max - range_min + 1), 2))
+ while True:
+ mask = ~(~0 << bits)
+ assert range_min + mask <= range_max
+ if range_min & mask == 0:
+ break
+ assert bits > 0
+ bits -= 1
+ result.append(self.make_prefix(range_min, range_min.bits - bits))
+ range_min = range_min + mask + 1
+
+ @classmethod
+ def from_strings(cls, a, b = None):
+ """
+ Construct IP address range from strings.
+ """
+
+ if b is None:
+ b = a
+ a = rpki.POW.IPAddress(a)
+ b = rpki.POW.IPAddress(b)
+ if a.version != b.version:
+ raise TypeError
+ if cls is resource_range_ip:
+ if a.version == 4:
+ return resource_range_ipv4(a, b)
+ if a.version == 6:
+ return resource_range_ipv6(a, b)
+ elif a.version == cls.version:
+ return cls(a, b)
else:
- i += 1
- for i in xrange(0, len(self) - 1):
- if self[i].max >= self[i+1].min:
- raise rpki.exceptions.ResourceOverlap("Resource overlap: %s %s" % (self[i], self[i+1]))
- self.canonical = True
+ raise TypeError
- def append(self, item):
- """
- Wrapper around list.append() (q.v.) to reset canonical flag.
- """
- list.append(self, item)
- self.canonical = False
-
- def extend(self, item):
+class resource_range_ipv4(resource_range_ip):
"""
- Wrapper around list.extend() (q.v.) to reset canonical flag.
+ Range of IPv4 addresses.
"""
- list.extend(self, item)
- self.canonical = False
- def __str__(self):
- """
- Convert a resource_set to string format.
- """
- if self.inherit:
- return inherit_token
- else:
- return ",".join(str(x) for x in self)
+ version = 4
- def _comm(self, other):
+class resource_range_ipv6(resource_range_ip):
"""
- Like comm(1), sort of.
-
- Returns a tuple of three resource sets: resources only in self,
- resources only in other, and resources in both. Used (not very
- efficiently) as the basis for most set operations on resource
- sets.
+ Range of IPv6 addresses.
"""
- assert not self.inherit
- assert type(self) is type(other), "Type mismatch %r %r" % (type(self), type(other))
- set1 = type(self)(self) # clone and whack into canonical form
- set2 = type(other)(other) # ditto
- only1, only2, both = [], [], []
- while set1 or set2:
- if set1 and (not set2 or set1[0].max < set2[0].min):
- only1.append(set1.pop(0))
- elif set2 and (not set1 or set2[0].max < set1[0].min):
- only2.append(set2.pop(0))
- elif set1[0].min < set2[0].min:
- _rsplit(set1, set2[0])
- elif set2[0].min < set1[0].min:
- _rsplit(set2, set1[0])
- elif set1[0].max < set2[0].max:
- _rsplit(set2, set1[0])
- elif set2[0].max < set1[0].max:
- _rsplit(set1, set2[0])
- else:
- assert set1[0].min == set2[0].min and set1[0].max == set2[0].max
- both.append(set1.pop(0))
- set2.pop(0)
- return type(self)(only1), type(self)(only2), type(self)(both)
-
- def union(self, other):
- """
- Set union for resource sets.
- """
+ version = 6
- assert not self.inherit
- assert type(self) is type(other), "Type mismatch: %r %r" % (type(self), type(other))
- set1 = type(self)(self) # clone and whack into canonical form
- set2 = type(other)(other) # ditto
- result = []
- while set1 or set2:
- if set1 and (not set2 or set1[0].max < set2[0].min):
- result.append(set1.pop(0))
- elif set2 and (not set1 or set2[0].max < set1[0].min):
- result.append(set2.pop(0))
- else:
- this = set1.pop(0)
- that = set2.pop(0)
- assert type(this) is type(that)
- range_min = min(this.min, that.min)
- range_max = max(this.max, that.max)
- result.append(type(this)(range_min, range_max))
- while set1 and set1[0].max <= range_max:
- assert set1[0].min >= range_min
- del set1[0]
- while set2 and set2[0].max <= range_max:
- assert set2[0].min >= range_min
- del set2[0]
- return type(self)(result)
-
- __or__ = union
-
- def intersection(self, other):
+def _rsplit(rset, that):
"""
- Set intersection for resource sets.
+ Utility function to split a resource range into two resource ranges.
"""
- return self._comm(other)[2]
- __and__ = intersection
+ this = rset.pop(0)
- def difference(self, other):
- """
- Set difference for resource sets.
- """
- return self._comm(other)[0]
+ assert type(this) is type(that), "type(this) [%r] is not type(that) [%r]" % (type(this), type(that))
- __sub__ = difference
-
- def symmetric_difference(self, other):
- """
- Set symmetric difference (XOR) for resource sets.
- """
- com = self._comm(other)
- return com[0] | com[1]
+ assert type(this.min) is type(that.min), "type(this.min) [%r] is not type(that.min) [%r]" % (type(this.min), type(that.min))
+ assert type(this.min) is type(this.max), "type(this.min) [%r] is not type(this.max) [%r]" % (type(this.min), type(this.max))
+ assert type(that.min) is type(that.max), "type(that.min) [%r] is not type(that.max) [%r]" % (type(that.min), type(that.max))
- __xor__ = symmetric_difference
+ if this.min < that.min:
+ rset.insert(0, type(this)(this.min, type(that.min)(that.min - 1)))
+ rset.insert(1, type(this)(that.min, this.max))
- def contains(self, item):
- """
- Set membership test for resource sets.
- """
- assert not self.inherit
- self.canonize()
- if not self:
- return False
- if type(item) is type(self[0]):
- range_min = item.min
- range_max = item.max
else:
- range_min = item
- range_max = item
- lo = 0
- hi = len(self)
- while lo < hi:
- mid = (lo + hi) / 2
- if self[mid].max < range_max:
- lo = mid + 1
- else:
- hi = mid
- return lo < len(self) and self[lo].min <= range_min and self[lo].max >= range_max
-
- __contains__ = contains
-
- def issubset(self, other):
- """
- Test whether self is a subset (possibly improper) of other.
- """
- for i in self:
- if not other.contains(i):
- return False
- return True
-
- __le__ = issubset
+ assert this.max > that.max
+ rset.insert(0, type(this)(this.min, that.max))
+ rset.insert(1, type(this)(type(that.max)(that.max + 1), this.max))
- def issuperset(self, other):
- """
- Test whether self is a superset (possibly improper) of other.
+class resource_set(list):
"""
- return other.issubset(self)
+ Generic resource set, a list subclass containing resource ranges.
+
+ This is a virtual class. You probably don't want to use it
+ directly.
+ """
+
+ ## @var inherit
+ # Boolean indicating whether this resource_set uses RFC 3779 inheritance.
+
+ inherit = False
+
+ ## @var canonical
+ # Whether this resource_set is currently in canonical form.
+
+ canonical = False
+
+ # Give pylint a little help here
+ range_type = resource_range
+
+ def __init__(self, ini = None, allow_overlap = False):
+ """
+ Initialize a resource_set.
+ """
+
+ list.__init__(self)
+ if isinstance(ini, (int, long)):
+ ini = str(ini)
+ if ini is inherit_token:
+ self.inherit = True
+ elif isinstance(ini, (str, unicode)) and len(ini):
+ self.extend(self.parse_str(s) for s in ini.split(","))
+ elif isinstance(ini, list):
+ self.extend(ini)
+ elif ini is not None and ini != "":
+ raise ValueError("Unexpected initializer: %s" % str(ini))
+ self.canonize(allow_overlap)
+
+ def canonize(self, allow_overlap = False):
+ """
+ Whack this resource_set into canonical form.
+ """
+
+ assert not self.inherit or len(self) == 0
+ if not self.canonical:
+ self.sort()
+ i = 0
+ while i + 1 < len(self):
+ if allow_overlap and self[i].max + 1 >= self[i+1].min:
+ self[i] = type(self[i])(self[i].min, max(self[i].max, self[i+1].max))
+ del self[i+1]
+ elif self[i].max + 1 == self[i+1].min:
+ self[i] = type(self[i])(self[i].min, self[i+1].max)
+ del self[i+1]
+ else:
+ i += 1
+ for i in xrange(0, len(self) - 1):
+ if self[i].max >= self[i+1].min:
+ raise rpki.exceptions.ResourceOverlap("Resource overlap: %s %s" % (self[i], self[i+1]))
+ self.canonical = True
+
+ def append(self, item):
+ """
+ Wrapper around list.append() (q.v.) to reset canonical flag.
+ """
+
+ list.append(self, item)
+ self.canonical = False
+
+ def extend(self, item):
+ """
+ Wrapper around list.extend() (q.v.) to reset canonical flag.
+ """
+
+ list.extend(self, item)
+ self.canonical = False
+
+ def __str__(self):
+ """
+ Convert a resource_set to string format.
+ """
+
+ if self.inherit:
+ return inherit_token
+ else:
+ return ",".join(str(x) for x in self)
+
+ def _comm(self, other):
+ """
+ Like comm(1), sort of.
+
+ Returns a tuple of three resource sets: resources only in self,
+ resources only in other, and resources in both. Used (not very
+ efficiently) as the basis for most set operations on resource
+ sets.
+ """
+
+ assert not self.inherit
+ assert type(self) is type(other), "Type mismatch %r %r" % (type(self), type(other))
+ set1 = type(self)(self) # clone and whack into canonical form
+ set2 = type(other)(other) # ditto
+ only1, only2, both = [], [], []
+ while set1 or set2:
+ if set1 and (not set2 or set1[0].max < set2[0].min):
+ only1.append(set1.pop(0))
+ elif set2 and (not set1 or set2[0].max < set1[0].min):
+ only2.append(set2.pop(0))
+ elif set1[0].min < set2[0].min:
+ _rsplit(set1, set2[0])
+ elif set2[0].min < set1[0].min:
+ _rsplit(set2, set1[0])
+ elif set1[0].max < set2[0].max:
+ _rsplit(set2, set1[0])
+ elif set2[0].max < set1[0].max:
+ _rsplit(set1, set2[0])
+ else:
+ assert set1[0].min == set2[0].min and set1[0].max == set2[0].max
+ both.append(set1.pop(0))
+ set2.pop(0)
+ return type(self)(only1), type(self)(only2), type(self)(both)
+
+ def union(self, other):
+ """
+ Set union for resource sets.
+ """
+
+ assert not self.inherit
+ assert type(self) is type(other), "Type mismatch: %r %r" % (type(self), type(other))
+ set1 = type(self)(self) # clone and whack into canonical form
+ set2 = type(other)(other) # ditto
+ result = []
+ while set1 or set2:
+ if set1 and (not set2 or set1[0].max < set2[0].min):
+ result.append(set1.pop(0))
+ elif set2 and (not set1 or set2[0].max < set1[0].min):
+ result.append(set2.pop(0))
+ else:
+ this = set1.pop(0)
+ that = set2.pop(0)
+ assert type(this) is type(that)
+ range_min = min(this.min, that.min)
+ range_max = max(this.max, that.max)
+ result.append(type(this)(range_min, range_max))
+ while set1 and set1[0].max <= range_max:
+ assert set1[0].min >= range_min
+ del set1[0]
+ while set2 and set2[0].max <= range_max:
+ assert set2[0].min >= range_min
+ del set2[0]
+ return type(self)(result)
+
+ __or__ = union
+
+ def intersection(self, other):
+ """
+ Set intersection for resource sets.
+ """
+
+ return self._comm(other)[2]
+
+ __and__ = intersection
+
+ def difference(self, other):
+ """
+ Set difference for resource sets.
+ """
+
+ return self._comm(other)[0]
+
+ __sub__ = difference
+
+ def symmetric_difference(self, other):
+ """
+ Set symmetric difference (XOR) for resource sets.
+ """
+
+ com = self._comm(other)
+ return com[0] | com[1]
+
+ __xor__ = symmetric_difference
+
+ def contains(self, item):
+ """
+ Set membership test for resource sets.
+ """
+
+ assert not self.inherit
+ self.canonize()
+ if not self:
+ return False
+ if type(item) is type(self[0]):
+ range_min = item.min
+ range_max = item.max
+ else:
+ range_min = item
+ range_max = item
+ lo = 0
+ hi = len(self)
+ while lo < hi:
+ mid = (lo + hi) / 2
+ if self[mid].max < range_max:
+ lo = mid + 1
+ else:
+ hi = mid
+ return lo < len(self) and self[lo].min <= range_min and self[lo].max >= range_max
- __ge__ = issuperset
+ __contains__ = contains
- def __lt__(self, other):
- return not self.issuperset(other)
+ def issubset(self, other):
+ """
+ Test whether self is a subset (possibly improper) of other.
+ """
- def __gt__(self, other):
- return not self.issubset(other)
+ for i in self:
+ if not other.contains(i):
+ return False
+ return True
- def __ne__(self, other):
- """
- A set with the inherit bit set is always unequal to any other set, because
- we can't know the answer here. This is also consistent with __nonzero__
- which returns True for inherit sets, and False for empty sets.
- """
- return self.inherit or other.inherit or list.__ne__(self, other)
+ __le__ = issubset
- def __eq__(self, other):
- return not self.__ne__(other)
+ def issuperset(self, other):
+ """
+ Test whether self is a superset (possibly improper) of other.
+ """
- def __nonzero__(self):
- """
- Tests whether or not this set is empty. Note that sets with the inherit
- bit set are considered non-empty, despite having zero length.
- """
- return self.inherit or len(self)
+ return other.issubset(self)
- @classmethod
- def from_sql(cls, sql, query, args = None):
- """
- Create resource set from an SQL query.
+ __ge__ = issuperset
- sql is an object that supports execute() and fetchall() methods
- like a DB API 2.0 cursor object.
+ def __lt__(self, other):
+ return not self.issuperset(other)
- query is an SQL query that returns a sequence of (min, max) pairs.
- """
+ def __gt__(self, other):
+ return not self.issubset(other)
- sql.execute(query, args)
- return cls(ini = [cls.range_type(cls.range_type.datum_type(b),
- cls.range_type.datum_type(e))
- for (b, e) in sql.fetchall()])
+ def __ne__(self, other):
+ """
+ A set with the inherit bit set is always unequal to any other set, because
+ we can't know the answer here. This is also consistent with __nonzero__
+ which returns True for inherit sets, and False for empty sets.
+ """
- @classmethod
- def from_django(cls, iterable):
- """
- Create resource set from a Django query.
+ return self.inherit or other.inherit or list.__ne__(self, other)
- iterable is something which returns (min, max) pairs.
- """
+ def __eq__(self, other):
+ return not self.__ne__(other)
- return cls(ini = [cls.range_type(cls.range_type.datum_type(b),
- cls.range_type.datum_type(e))
- for (b, e) in iterable])
+ def __nonzero__(self):
+ """
+ Tests whether or not this set is empty. Note that sets with the inherit
+ bit set are considered non-empty, despite having zero length.
+ """
- @classmethod
- def parse_str(cls, s):
- """
- Parse resource set from text string (eg, XML attributes). This is
- a backwards compatability wrapper, real functionality is now part
- of the range classes.
- """
- return cls.range_type.parse_str(s)
+ return self.inherit or len(self)
-class resource_set_as(resource_set):
- """
- Autonomous System Number resource set.
- """
+ @classmethod
+ def from_django(cls, iterable):
+ """
+ Create resource set from a Django query.
- ## @var range_type
- # Type of range underlying this type of resource_set.
+ iterable is something which returns (min, max) pairs.
+ """
- range_type = resource_range_as
+ return cls(ini = [cls.range_type(cls.range_type.datum_type(b),
+ cls.range_type.datum_type(e))
+ for (b, e) in iterable])
-class resource_set_ip(resource_set):
- """
- (Generic) IP address resource set.
+ @classmethod
+ def parse_str(cls, s):
+ """
+ Parse resource set from text string (eg, XML attributes). This is
+ a backwards compatability wrapper, real functionality is now part
+ of the range classes.
+ """
- This is a virtual class. You probably don't want to use it
- directly.
- """
+ return cls.range_type.parse_str(s)
- def to_roa_prefix_set(self):
+class resource_set_as(resource_set):
"""
- Convert from a resource set to a ROA prefix set.
+ Autonomous System Number resource set.
"""
- prefix_ranges = []
- for r in self:
- r.chop_into_prefixes(prefix_ranges)
- return self.roa_prefix_set_type([
- self.roa_prefix_set_type.prefix_type(r.min, r.prefixlen())
- for r in prefix_ranges])
-
-class resource_set_ipv4(resource_set_ip):
- """
- IPv4 address resource set.
- """
-
- ## @var range_type
- # Type of range underlying this type of resource_set.
-
- range_type = resource_range_ipv4
-
-class resource_set_ipv6(resource_set_ip):
- """
- IPv6 address resource set.
- """
-
- ## @var range_type
- # Type of range underlying this type of resource_set.
-
- range_type = resource_range_ipv6
-
-class resource_bag(object):
- """
- Container to simplify passing around the usual triple of ASN, IPv4,
- and IPv6 resource sets.
- """
- ## @var asn
- # Set of Autonomous System Number resources.
+ ## @var range_type
+ # Type of range underlying this type of resource_set.
- ## @var v4
- # Set of IPv4 resources.
+ range_type = resource_range_as
- ## @var v6
- # Set of IPv6 resources.
-
- ## @var valid_until
- # Expiration date of resources, for setting certificate notAfter field.
-
- def __init__(self, asn = None, v4 = None, v6 = None, valid_until = None):
- self.asn = asn or resource_set_as()
- self.v4 = v4 or resource_set_ipv4()
- self.v6 = v6 or resource_set_ipv6()
- self.valid_until = valid_until
-
- def oversized(self, other):
- """
- True iff self is oversized with respect to other.
- """
- return not self.asn.issubset(other.asn) or \
- not self.v4.issubset(other.v4) or \
- not self.v6.issubset(other.v6)
-
- def undersized(self, other):
- """
- True iff self is undersized with respect to other.
- """
- return not other.asn.issubset(self.asn) or \
- not other.v4.issubset(self.v4) or \
- not other.v6.issubset(self.v6)
-
- @classmethod
- def from_inheritance(cls):
- """
- Build a resource bag that just inherits everything from its
- parent.
- """
- self = cls()
- self.asn = resource_set_as()
- self.v4 = resource_set_ipv4()
- self.v6 = resource_set_ipv6()
- self.asn.inherit = True
- self.v4.inherit = True
- self.v6.inherit = True
- return self
-
- @classmethod
- def from_str(cls, text, allow_overlap = False):
- """
- Parse a comma-separated text string into a resource_bag. Not
- particularly efficient, fix that if and when it becomes an issue.
- """
- asns = []
- v4s = []
- v6s = []
- for word in text.split(","):
- if "." in word:
- v4s.append(word)
- elif ":" in word:
- v6s.append(word)
- else:
- asns.append(word)
- return cls(asn = resource_set_as(",".join(asns), allow_overlap) if asns else None,
- v4 = resource_set_ipv4(",".join(v4s), allow_overlap) if v4s else None,
- v6 = resource_set_ipv6(",".join(v6s), allow_overlap) if v6s else None)
-
- @classmethod
- def from_POW_rfc3779(cls, resources):
+class resource_set_ip(resource_set):
"""
- Build a resource_bag from data returned by
- rpki.POW.X509.getRFC3779().
+ (Generic) IP address resource set.
- The conversion to long for v4 and v6 is (intended to be)
- temporary: in the long run, we should be using rpki.POW.IPAddress
- rather than long here.
+ This is a virtual class. You probably don't want to use it
+ directly.
"""
- asn = inherit_token if resources[0] == "inherit" else [resource_range_as( r[0], r[1]) for r in resources[0] or ()]
- v4 = inherit_token if resources[1] == "inherit" else [resource_range_ipv4(r[0], r[1]) for r in resources[1] or ()]
- v6 = inherit_token if resources[2] == "inherit" else [resource_range_ipv6(r[0], r[1]) for r in resources[2] or ()]
- return cls(resource_set_as(asn) if asn else None,
- resource_set_ipv4(v4) if v4 else None,
- resource_set_ipv6(v6) if v6 else None)
-
- def empty(self):
- """
- True iff all resource sets in this bag are empty.
- """
- return not self.asn and not self.v4 and not self.v6
-
- def __nonzero__(self):
- return not self.empty()
- def __eq__(self, other):
- return self.asn == other.asn and \
- self.v4 == other.v4 and \
- self.v6 == other.v6 and \
- self.valid_until == other.valid_until
+ def to_roa_prefix_set(self):
+ """
+ Convert from a resource set to a ROA prefix set.
+ """
- def __ne__(self, other):
- return not (self == other) # pylint: disable=C0325
+ # pylint: disable=E1101
+ prefix_ranges = []
+ for r in self:
+ r.chop_into_prefixes(prefix_ranges)
+ return self.roa_prefix_set_type([
+ self.roa_prefix_set_type.prefix_type(r.min, r.prefixlen())
+ for r in prefix_ranges])
- def intersection(self, other):
+class resource_set_ipv4(resource_set_ip):
"""
- Compute intersection with another resource_bag. valid_until
- attribute (if any) inherits from self.
+ IPv4 address resource set.
"""
- return self.__class__(self.asn & other.asn,
- self.v4 & other.v4,
- self.v6 & other.v6,
- self.valid_until)
- __and__ = intersection
+ ## @var range_type
+ # Type of range underlying this type of resource_set.
- def union(self, other):
- """
- Compute union with another resource_bag. valid_until attribute
- (if any) inherits from self.
- """
- return self.__class__(self.asn | other.asn,
- self.v4 | other.v4,
- self.v6 | other.v6,
- self.valid_until)
+ range_type = resource_range_ipv4
- __or__ = union
-
- def difference(self, other):
+class resource_set_ipv6(resource_set_ip):
"""
- Compute difference against another resource_bag. valid_until
- attribute (if any) inherits from self
+ IPv6 address resource set.
"""
- return self.__class__(self.asn - other.asn,
- self.v4 - other.v4,
- self.v6 - other.v6,
- self.valid_until)
- __sub__ = difference
+ ## @var range_type
+ # Type of range underlying this type of resource_set.
- def symmetric_difference(self, other):
- """
- Compute symmetric difference against another resource_bag.
- valid_until attribute (if any) inherits from self
+ range_type = resource_range_ipv6
+
+class resource_bag(object):
"""
- return self.__class__(self.asn ^ other.asn,
- self.v4 ^ other.v4,
- self.v6 ^ other.v6,
- self.valid_until)
-
- __xor__ = symmetric_difference
-
- def __str__(self):
- s = ""
- if self.asn:
- s += "ASN: %s" % self.asn
- if self.v4:
- if s:
- s += ", "
- s += "V4: %s" % self.v4
- if self.v6:
- if s:
- s += ", "
- s += "V6: %s" % self.v6
- return s
-
- def __iter__(self):
- for r in self.asn:
- yield r
- for r in self.v4:
- yield r
- for r in self.v6:
- yield r
+ Container to simplify passing around the usual triple of ASN, IPv4,
+ and IPv6 resource sets.
+ """
+
+ ## @var asn
+ # Set of Autonomous System Number resources.
+
+ ## @var v4
+ # Set of IPv4 resources.
+
+ ## @var v6
+ # Set of IPv6 resources.
+
+ ## @var valid_until
+ # Expiration date of resources, for setting certificate notAfter field.
+
+ def __init__(self, asn = None, v4 = None, v6 = None, valid_until = None):
+ if isinstance(asn, (str, unicode)):
+ asn = resource_set_as(asn)
+ if isinstance(v4, (str, unicode)):
+ v4 = resource_set_ipv4(v4)
+ if isinstance(v6, (str, unicode)):
+ v6 = resource_set_ipv6(v6)
+ if isinstance(valid_until, (str, unicode)):
+ valid_until = rpki.sundial.datetime.fromXMLtime(valid_until)
+ self.asn = asn or resource_set_as()
+ self.v4 = v4 or resource_set_ipv4()
+ self.v6 = v6 or resource_set_ipv6()
+ self.valid_until = valid_until
+
+ def oversized(self, other):
+ """
+ True iff self is oversized with respect to other.
+ """
+
+ return not self.asn.issubset(other.asn) or \
+ not self.v4.issubset(other.v4) or \
+ not self.v6.issubset(other.v6)
+
+ def undersized(self, other):
+ """
+ True iff self is undersized with respect to other.
+ """
+
+ return not other.asn.issubset(self.asn) or \
+ not other.v4.issubset(self.v4) or \
+ not other.v6.issubset(self.v6)
+
+ @classmethod
+ def from_inheritance(cls):
+ """
+ Build a resource bag that just inherits everything from its
+ parent.
+ """
+
+ self = cls()
+ self.asn = resource_set_as()
+ self.v4 = resource_set_ipv4()
+ self.v6 = resource_set_ipv6()
+ self.asn.inherit = True
+ self.v4.inherit = True
+ self.v6.inherit = True
+ return self
+
+ @classmethod
+ def from_str(cls, text, allow_overlap = False):
+ """
+ Parse a comma-separated text string into a resource_bag. Not
+ particularly efficient, fix that if and when it becomes an issue.
+ """
+
+ asns = []
+ v4s = []
+ v6s = []
+ for word in text.split(","):
+ if "." in word:
+ v4s.append(word)
+ elif ":" in word:
+ v6s.append(word)
+ else:
+ asns.append(word)
+ return cls(asn = resource_set_as(",".join(asns), allow_overlap) if asns else None,
+ v4 = resource_set_ipv4(",".join(v4s), allow_overlap) if v4s else None,
+ v6 = resource_set_ipv6(",".join(v6s), allow_overlap) if v6s else None)
+
+ @classmethod
+ def from_POW_rfc3779(cls, resources):
+ """
+ Build a resource_bag from data returned by
+ rpki.POW.X509.getRFC3779().
+
+ The conversion to long for v4 and v6 is (intended to be)
+ temporary: in the long run, we should be using rpki.POW.IPAddress
+ rather than long here.
+ """
+
+ asn = inherit_token if resources[0] == "inherit" else [resource_range_as( r[0], r[1]) for r in resources[0] or ()]
+ v4 = inherit_token if resources[1] == "inherit" else [resource_range_ipv4(r[0], r[1]) for r in resources[1] or ()]
+ v6 = inherit_token if resources[2] == "inherit" else [resource_range_ipv6(r[0], r[1]) for r in resources[2] or ()]
+ return cls(resource_set_as(asn) if asn else None,
+ resource_set_ipv4(v4) if v4 else None,
+ resource_set_ipv6(v6) if v6 else None)
+
+ def empty(self):
+ """
+ True iff all resource sets in this bag are empty.
+ """
+
+ return not self.asn and not self.v4 and not self.v6
+
+ def __nonzero__(self):
+ return not self.empty()
+
+ def __eq__(self, other):
+ return self.asn == other.asn and \
+ self.v4 == other.v4 and \
+ self.v6 == other.v6 and \
+ self.valid_until == other.valid_until
+
+ def __ne__(self, other):
+ return not (self == other) # pylint: disable=C0325
+
+ def intersection(self, other):
+ """
+ Compute intersection with another resource_bag. valid_until
+ attribute (if any) inherits from self.
+ """
+
+ return self.__class__(self.asn & other.asn,
+ self.v4 & other.v4,
+ self.v6 & other.v6,
+ self.valid_until)
+
+ __and__ = intersection
+
+ def union(self, other):
+ """
+ Compute union with another resource_bag. valid_until attribute
+ (if any) inherits from self.
+ """
+
+ return self.__class__(self.asn | other.asn,
+ self.v4 | other.v4,
+ self.v6 | other.v6,
+ self.valid_until)
+
+ __or__ = union
+
+ def difference(self, other):
+ """
+ Compute difference against another resource_bag. valid_until
+ attribute (if any) inherits from self
+ """
+
+ return self.__class__(self.asn - other.asn,
+ self.v4 - other.v4,
+ self.v6 - other.v6,
+ self.valid_until)
+
+ __sub__ = difference
+
+ def symmetric_difference(self, other):
+ """
+ Compute symmetric difference against another resource_bag.
+ valid_until attribute (if any) inherits from self
+ """
+
+ return self.__class__(self.asn ^ other.asn,
+ self.v4 ^ other.v4,
+ self.v6 ^ other.v6,
+ self.valid_until)
+
+ __xor__ = symmetric_difference
+
+ def __str__(self):
+ s = ""
+ if self.asn:
+ s += "ASN: %s" % self.asn
+ if self.v4:
+ if s:
+ s += ", "
+ s += "V4: %s" % self.v4
+ if self.v6:
+ if s:
+ s += ", "
+ s += "V6: %s" % self.v6
+ return s
+
+ def __iter__(self):
+ for r in self.asn:
+ yield r
+ for r in self.v4:
+ yield r
+ for r in self.v6:
+ yield r
# Sadly, there are enough differences between RFC 3779 and the data
# structures in the latest proposed ROA format that we can't just use
@@ -793,356 +832,377 @@ class resource_bag(object):
# worth.
class roa_prefix(object):
- """
- ROA prefix. This is similar to the resource_range_ip class, but
- differs in that it only represents prefixes, never ranges, and
- includes the maximum prefix length as an additional value.
+ """
+ ROA prefix. This is similar to the resource_range_ip class, but
+ differs in that it only represents prefixes, never ranges, and
+ includes the maximum prefix length as an additional value.
- This is a virtual class, you probably don't want to use it directly.
- """
+ This is a virtual class, you probably don't want to use it directly.
+ """
- ## @var prefix
- # The prefix itself, an IP address with bits beyond the prefix
- # length zeroed.
+ ## @var prefix
+ # The prefix itself, an IP address with bits beyond the prefix
+ # length zeroed.
- ## @var prefixlen
- # (Minimum) prefix length.
+ ## @var prefixlen
+ # (Minimum) prefix length.
- ## @var max_prefixlen
- # Maxmimum prefix length.
+ ## @var max_prefixlen
+ # Maxmimum prefix length.
- def __init__(self, prefix, prefixlen, max_prefixlen = None):
- """
- Initialize a ROA prefix. max_prefixlen is optional and defaults
- to prefixlen. max_prefixlen must not be smaller than prefixlen.
- """
- if max_prefixlen is None:
- max_prefixlen = prefixlen
- assert max_prefixlen >= prefixlen, "Bad max_prefixlen: %d must not be shorter than %d" % (max_prefixlen, prefixlen)
- self.prefix = prefix
- self.prefixlen = prefixlen
- self.max_prefixlen = max_prefixlen
-
- def __cmp__(self, other):
- """
- Compare two ROA prefix objects. Comparision is based on prefix,
- prefixlen, and max_prefixlen, in that order.
- """
- assert self.__class__ is other.__class__
- return (cmp(self.prefix, other.prefix) or
- cmp(self.prefixlen, other.prefixlen) or
- cmp(self.max_prefixlen, other.max_prefixlen))
+ # Give pylint a little help
+ range_type = resource_range_ip
- def __str__(self):
- """
- Convert a ROA prefix to string format.
- """
- if self.prefixlen == self.max_prefixlen:
- return str(self.prefix) + "/" + str(self.prefixlen)
- else:
- return str(self.prefix) + "/" + str(self.prefixlen) + "-" + str(self.max_prefixlen)
+ def __init__(self, prefix, prefixlen, max_prefixlen = None):
+ """
+ Initialize a ROA prefix. max_prefixlen is optional and defaults
+ to prefixlen. max_prefixlen must not be smaller than prefixlen.
+ """
- def to_resource_range(self):
- """
- Convert this ROA prefix to the equivilent resource_range_ip
- object. This is an irreversable transformation because it loses
- the max_prefixlen attribute, nothing we can do about that.
- """
- return self.range_type.make_prefix(self.prefix, self.prefixlen)
+ if max_prefixlen is None:
+ max_prefixlen = prefixlen
+ assert max_prefixlen >= prefixlen, "Bad max_prefixlen: %d must not be shorter than %d" % (max_prefixlen, prefixlen)
+ self.prefix = prefix
+ self.prefixlen = prefixlen
+ self.max_prefixlen = max_prefixlen
- def min(self):
- """
- Return lowest address covered by prefix.
- """
- return self.prefix
+ def __cmp__(self, other):
+ """
+ Compare two ROA prefix objects. Comparision is based on prefix,
+ prefixlen, and max_prefixlen, in that order.
+ """
- def max(self):
- """
- Return highest address covered by prefix.
- """
- return self.prefix | ((1 << (self.prefix.bits - self.prefixlen)) - 1)
+ assert self.__class__ is other.__class__
+ return (cmp(self.prefix, other.prefix) or
+ cmp(self.prefixlen, other.prefixlen) or
+ cmp(self.max_prefixlen, other.max_prefixlen))
- def to_POW_roa_tuple(self):
- """
- Convert a resource_range_ip to rpki.POW.ROA.setPrefixes() format.
- """
- return self.prefix, self.prefixlen, self.max_prefixlen
+ def __str__(self):
+ """
+ Convert a ROA prefix to string format.
+ """
- @classmethod
- def parse_str(cls, x):
- """
- Parse ROA prefix from text (eg, an XML attribute).
- """
- r = re_prefix_with_maxlen.match(x)
- if r:
- return cls(rpki.POW.IPAddress(r.group(1)), int(r.group(2)), int(r.group(3)))
- r = re_prefix.match(x)
- if r:
- return cls(rpki.POW.IPAddress(r.group(1)), int(r.group(2)))
- raise rpki.exceptions.BadROAPrefix('Bad ROA prefix "%s"' % x)
+ if self.prefixlen == self.max_prefixlen:
+ return str(self.prefix) + "/" + str(self.prefixlen)
+ else:
+ return str(self.prefix) + "/" + str(self.prefixlen) + "-" + str(self.max_prefixlen)
-class roa_prefix_ipv4(roa_prefix):
- """
- IPv4 ROA prefix.
- """
+ def to_resource_range(self):
+ """
+ Convert this ROA prefix to the equivilent resource_range_ip
+ object. This is an irreversable transformation because it loses
+ the max_prefixlen attribute, nothing we can do about that.
+ """
- ## @var range_type
- # Type of corresponding resource_range_ip.
+ return self.range_type.make_prefix(self.prefix, self.prefixlen)
- range_type = resource_range_ipv4
+ def min(self):
+ """
+ Return lowest address covered by prefix.
+ """
-class roa_prefix_ipv6(roa_prefix):
- """
- IPv6 ROA prefix.
- """
+ return self.prefix
- ## @var range_type
- # Type of corresponding resource_range_ip.
+ def max(self):
+ """
+ Return highest address covered by prefix.
+ """
- range_type = resource_range_ipv6
+ return self.prefix | ((1 << (self.prefix.bits - self.prefixlen)) - 1)
-class roa_prefix_set(list):
- """
- Set of ROA prefixes, analogous to the resource_set_ip class.
- """
+ def to_POW_roa_tuple(self):
+ """
+ Convert a resource_range_ip to rpki.POW.ROA.setPrefixes() format.
+ """
- def __init__(self, ini = None):
- """
- Initialize a ROA prefix set.
- """
- list.__init__(self)
- if isinstance(ini, str) and len(ini):
- self.extend(self.parse_str(s) for s in ini.split(","))
- elif isinstance(ini, (list, tuple)):
- self.extend(ini)
- else:
- assert ini is None or ini == "", "Unexpected initializer: %s" % str(ini)
- self.sort()
+ return self.prefix, self.prefixlen, self.max_prefixlen
- def __str__(self):
- """
- Convert a ROA prefix set to string format.
- """
- return ",".join(str(x) for x in self)
+ @classmethod
+ def parse_str(cls, x):
+ """
+ Parse ROA prefix from text (eg, an XML attribute).
+ """
- @classmethod
- def parse_str(cls, s):
- """
- Parse ROA prefix from text (eg, an XML attribute).
- This method is a backwards compatability shim.
- """
- return cls.prefix_type.parse_str(s)
+ r = re_prefix_with_maxlen.match(x)
+ if r:
+ return cls(rpki.POW.IPAddress(r.group(1)), int(r.group(2)), int(r.group(3)))
+ r = re_prefix.match(x)
+ if r:
+ return cls(rpki.POW.IPAddress(r.group(1)), int(r.group(2)))
+ raise rpki.exceptions.BadROAPrefix('Bad ROA prefix "%s"' % x)
- def to_resource_set(self):
- """
- Convert a ROA prefix set to a resource set. This is an
- irreversable transformation. We have to compute a union here
- because ROA prefix sets can include overlaps, while RFC 3779
- resource sets cannot. This is ugly, and there is almost certainly
- a more efficient way to do this, but start by getting the output
- right before worrying about making it fast or pretty.
+class roa_prefix_ipv4(roa_prefix):
"""
- r = self.resource_set_type()
- s = self.resource_set_type()
- s.append(None)
- for p in self:
- s[0] = p.to_resource_range()
- r |= s
- return r
-
- @classmethod
- def from_sql(cls, sql, query, args = None):
+ IPv4 ROA prefix.
"""
- Create ROA prefix set from an SQL query.
-
- sql is an object that supports execute() and fetchall() methods
- like a DB API 2.0 cursor object.
- query is an SQL query that returns a sequence of (prefix,
- prefixlen, max_prefixlen) triples.
- """
+ ## @var range_type
+ # Type of corresponding resource_range_ip.
- sql.execute(query, args)
- return cls([cls.prefix_type(rpki.POW.IPAddress(x), int(y), int(z))
- for (x, y, z) in sql.fetchall()])
+ range_type = resource_range_ipv4
- @classmethod
- def from_django(cls, iterable):
+class roa_prefix_ipv6(roa_prefix):
"""
- Create ROA prefix set from a Django query.
-
- iterable is something which returns (prefix, prefixlen,
- max_prefixlen) triples.
+ IPv6 ROA prefix.
"""
- return cls([cls.prefix_type(rpki.POW.IPAddress(x), int(y), int(z))
- for (x, y, z) in iterable])
+ ## @var range_type
+ # Type of corresponding resource_range_ip.
- def to_POW_roa_tuple(self):
+ range_type = resource_range_ipv6
+
+class roa_prefix_set(list):
"""
- Convert ROA prefix set to form used by rpki.POW.ROA.setPrefixes().
+ Set of ROA prefixes, analogous to the resource_set_ip class.
"""
- if self:
- return tuple(a.to_POW_roa_tuple() for a in self)
- else:
- return None
+
+ # Give pylint a little help
+
+ prefix_type = roa_prefix
+ resource_set_type = resource_set_ip
+
+ def __init__(self, ini = None):
+ """
+ Initialize a ROA prefix set.
+ """
+
+ list.__init__(self)
+ if isinstance(ini, (str, unicode)) and len(ini):
+ self.extend(self.parse_str(s) for s in ini.split(","))
+ elif isinstance(ini, (list, tuple)):
+ self.extend(ini)
+ else:
+ assert ini is None or ini == "", "Unexpected initializer: %s" % str(ini)
+ self.sort()
+
+ def __str__(self):
+ """
+ Convert a ROA prefix set to string format.
+ """
+
+ return ",".join(str(x) for x in self)
+
+ @classmethod
+ def parse_str(cls, s):
+ """
+ Parse ROA prefix from text (eg, an XML attribute).
+ This method is a backwards compatability shim.
+ """
+
+ return cls.prefix_type.parse_str(s)
+
+ def to_resource_set(self):
+ """
+ Convert a ROA prefix set to a resource set. This is an
+ irreversable transformation. We have to compute a union here
+ because ROA prefix sets can include overlaps, while RFC 3779
+ resource sets cannot. This is ugly, and there is almost certainly
+ a more efficient way to do this, but start by getting the output
+ right before worrying about making it fast or pretty.
+ """
+
+ r = self.resource_set_type()
+ s = self.resource_set_type()
+ s.append(None)
+ for p in self:
+ s[0] = p.to_resource_range()
+ r |= s
+ return r
+
+ @classmethod
+ def from_sql(cls, sql, query, args = None):
+ """
+ Create ROA prefix set from an SQL query.
+
+ sql is an object that supports execute() and fetchall() methods
+ like a DB API 2.0 cursor object.
+
+ query is an SQL query that returns a sequence of (prefix,
+ prefixlen, max_prefixlen) triples.
+ """
+
+ sql.execute(query, args)
+ return cls([cls.prefix_type(rpki.POW.IPAddress(x), int(y), int(z))
+ for (x, y, z) in sql.fetchall()])
+
+ @classmethod
+ def from_django(cls, iterable):
+ """
+ Create ROA prefix set from a Django query.
+
+ iterable is something which returns (prefix, prefixlen,
+ max_prefixlen) triples.
+ """
+
+ return cls([cls.prefix_type(rpki.POW.IPAddress(x), int(y), int(z))
+ for (x, y, z) in iterable])
+
+ def to_POW_roa_tuple(self):
+ """
+ Convert ROA prefix set to form used by rpki.POW.ROA.setPrefixes().
+ """
+
+ if self:
+ return tuple(a.to_POW_roa_tuple() for a in self)
+ else:
+ return None
class roa_prefix_set_ipv4(roa_prefix_set):
- """
- Set of IPv4 ROA prefixes.
- """
+ """
+ Set of IPv4 ROA prefixes.
+ """
- ## @var prefix_type
- # Type of underlying roa_prefix.
+ ## @var prefix_type
+ # Type of underlying roa_prefix.
- prefix_type = roa_prefix_ipv4
+ prefix_type = roa_prefix_ipv4
- ## @var resource_set_type
- # Type of corresponding resource_set_ip class.
+ ## @var resource_set_type
+ # Type of corresponding resource_set_ip class.
- resource_set_type = resource_set_ipv4
+ resource_set_type = resource_set_ipv4
# Fix back link from resource_set to roa_prefix
resource_set_ipv4.roa_prefix_set_type = roa_prefix_set_ipv4
class roa_prefix_set_ipv6(roa_prefix_set):
- """
- Set of IPv6 ROA prefixes.
- """
+ """
+ Set of IPv6 ROA prefixes.
+ """
- ## @var prefix_type
- # Type of underlying roa_prefix.
+ ## @var prefix_type
+ # Type of underlying roa_prefix.
- prefix_type = roa_prefix_ipv6
+ prefix_type = roa_prefix_ipv6
- ## @var resource_set_type
- # Type of corresponding resource_set_ip class.
+ ## @var resource_set_type
+ # Type of corresponding resource_set_ip class.
- resource_set_type = resource_set_ipv6
+ resource_set_type = resource_set_ipv6
# Fix back link from resource_set to roa_prefix
resource_set_ipv6.roa_prefix_set_type = roa_prefix_set_ipv6
class roa_prefix_bag(object):
- """
- Container to simplify passing around the combination of an IPv4 ROA
- prefix set and an IPv6 ROA prefix set.
- """
+ """
+ Container to simplify passing around the combination of an IPv4 ROA
+ prefix set and an IPv6 ROA prefix set.
+ """
- ## @var v4
- # Set of IPv4 prefixes.
+ ## @var v4
+ # Set of IPv4 prefixes.
- ## @var v6
- # Set of IPv6 prefixes.
+ ## @var v6
+ # Set of IPv6 prefixes.
- def __init__(self, v4 = None, v6 = None):
- self.v4 = v4 or roa_prefix_set_ipv4()
- self.v6 = v6 or roa_prefix_set_ipv6()
+ def __init__(self, v4 = None, v6 = None):
+ self.v4 = v4 or roa_prefix_set_ipv4()
+ self.v6 = v6 or roa_prefix_set_ipv6()
- def __eq__(self, other):
- return self.v4 == other.v4 and self.v6 == other.v6
+ def __eq__(self, other):
+ return self.v4 == other.v4 and self.v6 == other.v6
- def __ne__(self, other):
- return not (self == other) # pylint: disable=C0325
+ def __ne__(self, other):
+ return not (self == other) # pylint: disable=C0325
# Test suite for set operations.
if __name__ == "__main__":
- def testprefix(v):
- return " (%s)" % v.to_roa_prefix_set() if isinstance(v, resource_set_ip) else ""
-
- def test1(t, s1, s2):
- if isinstance(s1, str) and isinstance(s2, str):
- print "x: ", s1
- print "y: ", s2
- r1 = t(s1)
- r2 = t(s2)
- print "x: ", r1, testprefix(r1)
- print "y: ", r2, testprefix(r2)
- v1 = r1._comm(r2)
- v2 = r2._comm(r1)
- assert v1[0] == v2[1] and v1[1] == v2[0] and v1[2] == v2[2]
- for i in r1: assert i in r1 and i.min in r1 and i.max in r1
- for i in r2: assert i in r2 and i.min in r2 and i.max in r2
- for i in v1[0]: assert i in r1 and i not in r2
- for i in v1[1]: assert i not in r1 and i in r2
- for i in v1[2]: assert i in r1 and i in r2
- v1 = r1 | r2
- v2 = r2 | r1
- assert v1 == v2
- print "x|y:", v1, testprefix(v1)
- v1 = r1 - r2
- v2 = r2 - r1
- print "x-y:", v1, testprefix(v1)
- print "y-x:", v2, testprefix(v2)
- v1 = r1 ^ r2
- v2 = r2 ^ r1
- assert v1 == v2
- print "x^y:", v1, testprefix(v1)
- v1 = r1 & r2
- v2 = r2 & r1
- assert v1 == v2
- print "x&y:", v1, testprefix(v1)
-
- def test2(t, s1, s2):
- print "x: ", s1
- print "y: ", s2
- r1 = t(s1)
- r2 = t(s2)
- print "x: ", r1
- print "y: ", r2
- print "x>y:", (r1 > r2)
- print "x<y:", (r1 < r2)
- test1(t.resource_set_type,
- r1.to_resource_set(),
- r2.to_resource_set())
-
- def test3(t, s1, s2):
- test1(t, s1, s2)
- r1 = t(s1).to_roa_prefix_set()
- r2 = t(s2).to_roa_prefix_set()
- print "x: ", r1
- print "y: ", r2
- print "x>y:", (r1 > r2)
- print "x<y:", (r1 < r2)
- test1(t.roa_prefix_set_type.resource_set_type,
- r1.to_resource_set(),
- r2.to_resource_set())
-
- print
- print "Testing set operations on resource sets"
- print
- test1(resource_set_as, "1,2,3,4,5,6,11,12,13,14,15", "1,2,3,4,5,6,111,121,131,141,151")
- print
- test1(resource_set_ipv4, "10.0.0.44/32,10.6.0.2/32", "10.3.0.0/24,10.0.0.77/32")
- print
- test1(resource_set_ipv4, "10.0.0.44/32,10.6.0.2/32", "10.0.0.0/24")
- print
- test1(resource_set_ipv4, "10.0.0.0/24", "10.3.0.0/24,10.0.0.77/32")
- print
- test1(resource_set_ipv4, "10.0.0.0/24", "10.0.0.0/32,10.0.0.2/32,10.0.0.4/32")
- print
- print "Testing set operations on ROA prefixes"
- print
- test2(roa_prefix_set_ipv4, "10.0.0.44/32,10.6.0.2/32", "10.3.0.0/24,10.0.0.77/32")
- print
- test2(roa_prefix_set_ipv4, "10.0.0.0/24-32,10.6.0.0/24-32", "10.3.0.0/24,10.0.0.0/16-32")
- print
- test2(roa_prefix_set_ipv4, "10.3.0.0/24-24,10.0.0.0/16-32", "10.3.0.0/24,10.0.0.0/16-32")
- print
- test2(roa_prefix_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::2/128")
- print
- test2(roa_prefix_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::7/128")
- print
- test2(roa_prefix_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::/120")
- print
- test2(roa_prefix_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::/120-128")
- print
- test3(resource_set_ipv4, "10.0.0.44/32,10.6.0.2/32", "10.3.0.0/24,10.0.0.77/32")
- print
- test3(resource_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::2/128")
- print
- test3(resource_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::/120")
+ def testprefix(v):
+ return " (%s)" % v.to_roa_prefix_set() if isinstance(v, resource_set_ip) else ""
+
+ def test1(t, s1, s2):
+ if isinstance(s1, (str, unicode)) and isinstance(s2, (str, unicode)):
+ print "x: ", s1
+ print "y: ", s2
+ r1 = t(s1)
+ r2 = t(s2)
+ print "x: ", r1, testprefix(r1)
+ print "y: ", r2, testprefix(r2)
+ v1 = r1._comm(r2) # pylint: disable=W0212
+ v2 = r2._comm(r1) # pylint: disable=W0212
+ assert v1[0] == v2[1] and v1[1] == v2[0] and v1[2] == v2[2]
+ assert all(i in r1 and i.min in r1 and i.max in r1 for i in r1)
+ assert all(i in r2 and i.min in r2 and i.max in r2 for i in r2)
+ assert all(i in r1 and i not in r2 for i in v1[0])
+ assert all(i not in r1 and i in r2 for i in v1[1])
+ assert all(i in r1 and i in r2 for i in v1[2])
+ v1 = r1 | r2
+ v2 = r2 | r1
+ assert v1 == v2
+ print "x|y:", v1, testprefix(v1)
+ v1 = r1 - r2
+ v2 = r2 - r1
+ print "x-y:", v1, testprefix(v1)
+ print "y-x:", v2, testprefix(v2)
+ v1 = r1 ^ r2
+ v2 = r2 ^ r1
+ assert v1 == v2
+ print "x^y:", v1, testprefix(v1)
+ v1 = r1 & r2
+ v2 = r2 & r1
+ assert v1 == v2
+ print "x&y:", v1, testprefix(v1)
+
+ def test2(t, s1, s2):
+ print "x: ", s1
+ print "y: ", s2
+ r1 = t(s1)
+ r2 = t(s2)
+ print "x: ", r1
+ print "y: ", r2
+ print "x>y:", (r1 > r2)
+ print "x<y:", (r1 < r2)
+ test1(t.resource_set_type,
+ r1.to_resource_set(),
+ r2.to_resource_set())
+
+ def test3(t, s1, s2):
+ test1(t, s1, s2)
+ r1 = t(s1).to_roa_prefix_set()
+ r2 = t(s2).to_roa_prefix_set()
+ print "x: ", r1
+ print "y: ", r2
+ print "x>y:", (r1 > r2)
+ print "x<y:", (r1 < r2)
+ test1(t.roa_prefix_set_type.resource_set_type,
+ r1.to_resource_set(),
+ r2.to_resource_set())
+
+ print
+ print "Testing set operations on resource sets"
+ print
+ test1(resource_set_as, "1,2,3,4,5,6,11,12,13,14,15", "1,2,3,4,5,6,111,121,131,141,151")
+ print
+ test1(resource_set_ipv4, "10.0.0.44/32,10.6.0.2/32", "10.3.0.0/24,10.0.0.77/32")
+ print
+ test1(resource_set_ipv4, "10.0.0.44/32,10.6.0.2/32", "10.0.0.0/24")
+ print
+ test1(resource_set_ipv4, "10.0.0.0/24", "10.3.0.0/24,10.0.0.77/32")
+ print
+ test1(resource_set_ipv4, "10.0.0.0/24", "10.0.0.0/32,10.0.0.2/32,10.0.0.4/32")
+ print
+ print "Testing set operations on ROA prefixes"
+ print
+ test2(roa_prefix_set_ipv4, "10.0.0.44/32,10.6.0.2/32", "10.3.0.0/24,10.0.0.77/32")
+ print
+ test2(roa_prefix_set_ipv4, "10.0.0.0/24-32,10.6.0.0/24-32", "10.3.0.0/24,10.0.0.0/16-32")
+ print
+ test2(roa_prefix_set_ipv4, "10.3.0.0/24-24,10.0.0.0/16-32", "10.3.0.0/24,10.0.0.0/16-32")
+ print
+ test2(roa_prefix_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::2/128")
+ print
+ test2(roa_prefix_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::7/128")
+ print
+ test2(roa_prefix_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::/120")
+ print
+ test2(roa_prefix_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::/120-128")
+ print
+ test3(resource_set_ipv4, "10.0.0.44/32,10.6.0.2/32", "10.3.0.0/24,10.0.0.77/32")
+ print
+ test3(resource_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::2/128")
+ print
+ test3(resource_set_ipv6, "2002:0a00:002c::1/128", "2002:0a00:002c::/120")
diff --git a/rpki/rootd.py b/rpki/rootd.py
index 78a71bba..dca60956 100644
--- a/rpki/rootd.py
+++ b/rpki/rootd.py
@@ -18,371 +18,444 @@
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
-Trivial RPKI up-down protocol root server. Not recommended for
-production use. Overrides a bunch of method definitions from the
-rpki.* classes in order to reuse as much code as possible.
+Trivial RPKI up-down protocol root server.
"""
import os
import time
import logging
import argparse
+
import rpki.resource_set
import rpki.up_down
-import rpki.left_right
import rpki.x509
-import rpki.http
+import rpki.http_simple
import rpki.config
import rpki.exceptions
import rpki.relaxng
import rpki.sundial
import rpki.log
import rpki.daemonize
+import rpki.publication
+
+from lxml.etree import Element, SubElement
logger = logging.getLogger(__name__)
-rootd = None
-
-class list_pdu(rpki.up_down.list_pdu):
- def serve_pdu(self, q_msg, r_msg, ignored, callback, errback):
- r_msg.payload = rpki.up_down.list_response_pdu()
- rootd.compose_response(r_msg)
- callback()
-
-class issue_pdu(rpki.up_down.issue_pdu):
- def serve_pdu(self, q_msg, r_msg, ignored, callback, errback):
- self.pkcs10.check_valid_request_ca()
- r_msg.payload = rpki.up_down.issue_response_pdu()
- rootd.compose_response(r_msg, self.pkcs10)
- callback()
-
-class revoke_pdu(rpki.up_down.revoke_pdu):
- def serve_pdu(self, q_msg, r_msg, ignored, callback, errback):
- logger.debug("Revocation requested for SKI %s", self.ski)
- subject_cert = rootd.get_subject_cert()
- if subject_cert is None:
- logger.debug("No subject certificate, nothing to revoke")
- raise rpki.exceptions.NotInDatabase
- if subject_cert.gSKI() != self.ski:
- logger.debug("Subject certificate has different SKI %s, not revoking", subject_cert.gSKI())
- raise rpki.exceptions.NotInDatabase
- logger.debug("Revoking certificate %s", self.ski)
- now = rpki.sundial.now()
- rootd.revoke_subject_cert(now)
- rootd.del_subject_cert()
- rootd.del_subject_pkcs10()
- rootd.generate_crl_and_manifest(now)
- r_msg.payload = rpki.up_down.revoke_response_pdu()
- r_msg.payload.class_name = self.class_name
- r_msg.payload.ski = self.ski
- callback()
-
-class error_response_pdu(rpki.up_down.error_response_pdu):
- exceptions = rpki.up_down.error_response_pdu.exceptions.copy()
- exceptions[rpki.exceptions.ClassNameUnknown, revoke_pdu] = 1301
- exceptions[rpki.exceptions.NotInDatabase, revoke_pdu] = 1302
-
-class message_pdu(rpki.up_down.message_pdu):
-
- name2type = {
- "list" : list_pdu,
- "list_response" : rpki.up_down.list_response_pdu,
- "issue" : issue_pdu,
- "issue_response" : rpki.up_down.issue_response_pdu,
- "revoke" : revoke_pdu,
- "revoke_response" : rpki.up_down.revoke_response_pdu,
- "error_response" : error_response_pdu }
-
- type2name = dict((v, k) for k, v in name2type.items())
-
- error_pdu_type = error_response_pdu
-
- def log_query(self, child):
+
+class ReplayTracker(object):
"""
- Log query we're handling.
+ Stash for replay protection timestamps.
"""
- logger.info("Serving %s query", self.type)
-class sax_handler(rpki.up_down.sax_handler):
- pdu = message_pdu
+ def __init__(self):
+ self.cms_timestamp = None
+
-class cms_msg(rpki.up_down.cms_msg):
- saxify = sax_handler.saxify
class main(object):
- def get_root_cert(self):
- logger.debug("Read root cert %s", self.rpki_root_cert_file)
- self.rpki_root_cert = rpki.x509.X509(Auto_file = self.rpki_root_cert_file)
-
- def root_newer_than_subject(self):
- return os.stat(self.rpki_root_cert_file).st_mtime > \
- os.stat(os.path.join(self.rpki_root_dir, self.rpki_subject_cert)).st_mtime
-
- def get_subject_cert(self):
- filename = os.path.join(self.rpki_root_dir, self.rpki_subject_cert)
- try:
- x = rpki.x509.X509(Auto_file = filename)
- logger.debug("Read subject cert %s", filename)
- return x
- except IOError:
- return None
-
- def set_subject_cert(self, cert):
- filename = os.path.join(self.rpki_root_dir, self.rpki_subject_cert)
- logger.debug("Writing subject cert %s, SKI %s", filename, cert.hSKI())
- f = open(filename, "wb")
- f.write(cert.get_DER())
- f.close()
-
- def del_subject_cert(self):
- filename = os.path.join(self.rpki_root_dir, self.rpki_subject_cert)
- logger.debug("Deleting subject cert %s", filename)
- os.remove(filename)
-
- def get_subject_pkcs10(self):
- try:
- x = rpki.x509.PKCS10(Auto_file = self.rpki_subject_pkcs10)
- logger.debug("Read subject PKCS #10 %s", self.rpki_subject_pkcs10)
- return x
- except IOError:
- return None
-
- def set_subject_pkcs10(self, pkcs10):
- logger.debug("Writing subject PKCS #10 %s", self.rpki_subject_pkcs10)
- f = open(self.rpki_subject_pkcs10, "wb")
- f.write(pkcs10.get_DER())
- f.close()
-
- def del_subject_pkcs10(self):
- logger.debug("Deleting subject PKCS #10 %s", self.rpki_subject_pkcs10)
- try:
- os.remove(self.rpki_subject_pkcs10)
- except OSError:
- pass
-
- def issue_subject_cert_maybe(self, new_pkcs10):
- now = rpki.sundial.now()
- subject_cert = self.get_subject_cert()
- old_pkcs10 = self.get_subject_pkcs10()
- if new_pkcs10 is not None and new_pkcs10 != old_pkcs10:
- self.set_subject_pkcs10(new_pkcs10)
- if subject_cert is not None:
- logger.debug("PKCS #10 changed, regenerating subject certificate")
+
+ def root_newer_than_subject(self):
+ return self.rpki_root_cert.mtime > os.stat(self.rpki_subject_cert_file).st_mtime
+
+
+ def get_subject_cert(self):
+ try:
+ x = rpki.x509.X509(Auto_file = self.rpki_subject_cert_file)
+ logger.debug("Read subject cert %s", self.rpki_subject_cert_file)
+ return x
+ except IOError:
+ return None
+
+
+ def set_subject_cert(self, cert):
+ logger.debug("Writing subject cert %s, SKI %s", self.rpki_subject_cert_file, cert.hSKI())
+ with open(self.rpki_subject_cert_file, "wb") as f:
+ f.write(cert.get_DER())
+
+
+ def del_subject_cert(self):
+ logger.debug("Deleting subject cert %s", self.rpki_subject_cert_file)
+ os.remove(self.rpki_subject_cert_file)
+
+
+ def get_subject_pkcs10(self):
+ try:
+ x = rpki.x509.PKCS10(Auto_file = self.rpki_subject_pkcs10)
+ logger.debug("Read subject PKCS #10 %s", self.rpki_subject_pkcs10)
+ return x
+ except IOError:
+ return None
+
+
+ def set_subject_pkcs10(self, pkcs10):
+ logger.debug("Writing subject PKCS #10 %s", self.rpki_subject_pkcs10)
+ with open(self.rpki_subject_pkcs10, "wb") as f:
+ f.write(pkcs10.get_DER())
+
+
+ def del_subject_pkcs10(self):
+ logger.debug("Deleting subject PKCS #10 %s", self.rpki_subject_pkcs10)
+ try:
+ os.remove(self.rpki_subject_pkcs10)
+ except OSError:
+ pass
+
+
+ def issue_subject_cert_maybe(self, new_pkcs10):
+ now = rpki.sundial.now()
+ subject_cert = self.get_subject_cert()
+ if subject_cert is None:
+ subject_cert_hash = None
+ else:
+ subject_cert_hash = rpki.x509.sha256(subject_cert.get_DER()).encode("hex")
+ old_pkcs10 = self.get_subject_pkcs10()
+ if new_pkcs10 is not None and new_pkcs10 != old_pkcs10:
+ self.set_subject_pkcs10(new_pkcs10)
+ if subject_cert is not None:
+ logger.debug("PKCS #10 changed, regenerating subject certificate")
+ self.revoke_subject_cert(now)
+ subject_cert = None
+ if subject_cert is not None and subject_cert.getNotAfter() <= now + self.rpki_subject_regen:
+ logger.debug("Subject certificate has reached expiration threshold, regenerating")
+ self.revoke_subject_cert(now)
+ subject_cert = None
+ if subject_cert is not None and self.root_newer_than_subject():
+ logger.debug("Root certificate has changed, regenerating subject")
+ self.revoke_subject_cert(now)
+ subject_cert = None
+ if subject_cert is not None:
+ return subject_cert, None
+ pkcs10 = old_pkcs10 if new_pkcs10 is None else new_pkcs10
+ if pkcs10 is None:
+ logger.debug("No PKCS #10 request, can't generate subject certificate yet")
+ return None, None
+ resources = self.rpki_root_cert.get_3779resources()
+ notAfter = now + self.rpki_subject_lifetime
+ logger.info("Generating subject cert %s with resources %s, expires %s",
+ self.rpki_subject_cert_uri, resources, notAfter)
+ req_key = pkcs10.getPublicKey()
+ req_sia = pkcs10.get_SIA()
+ self.next_serial_number()
+ subject_cert = self.rpki_root_cert.issue(
+ keypair = self.rpki_root_key,
+ subject_key = req_key,
+ serial = self.serial_number,
+ sia = req_sia,
+ aia = self.rpki_root_cert_uri,
+ crldp = self.rpki_root_crl_uri,
+ resources = resources,
+ notBefore = now,
+ notAfter = notAfter)
+ self.set_subject_cert(subject_cert)
+ pubd_msg = Element(rpki.publication.tag_msg, nsmap = rpki.publication.nsmap,
+ type = "query", version = rpki.publication.version)
+ pdu = SubElement(pubd_msg, rpki.publication.tag_publish, uri = self.rpki_subject_cert_uri)
+ pdu.text = subject_cert.get_Base64()
+ if subject_cert_hash is not None:
+ pdu.set("hash", subject_cert_hash)
+ self.generate_crl_and_manifest(now, pubd_msg)
+ return subject_cert, pubd_msg
+
+
+ def generate_crl_and_manifest(self, now, pubd_msg):
+ subject_cert = self.get_subject_cert()
+ self.next_serial_number()
+ self.next_crl_number()
+ while self.revoked and self.revoked[0][1] + 2 * self.rpki_subject_regen < now:
+ del self.revoked[0]
+ crl = rpki.x509.CRL.generate(
+ keypair = self.rpki_root_key,
+ issuer = self.rpki_root_cert,
+ serial = self.crl_number,
+ thisUpdate = now,
+ nextUpdate = now + self.rpki_subject_regen,
+ revokedCertificates = self.revoked)
+ crl_hash = self.read_hash_maybe(self.rpki_root_crl_file)
+ logger.debug("Writing CRL %s", self.rpki_root_crl_file)
+ with open(self.rpki_root_crl_file, "wb") as f:
+ f.write(crl.get_DER())
+ pdu = SubElement(pubd_msg, rpki.publication.tag_publish, uri = self.rpki_root_crl_uri)
+ pdu.text = crl.get_Base64()
+ if crl_hash is not None:
+ pdu.set("hash", crl_hash)
+ manifest_content = [(os.path.basename(self.rpki_root_crl_uri), crl)]
+ if subject_cert is not None:
+ manifest_content.append((os.path.basename(self.rpki_subject_cert_uri), subject_cert))
+ manifest_resources = rpki.resource_set.resource_bag.from_inheritance()
+ manifest_keypair = rpki.x509.RSA.generate()
+ manifest_cert = self.rpki_root_cert.issue(
+ keypair = self.rpki_root_key,
+ subject_key = manifest_keypair.get_public(),
+ serial = self.serial_number,
+ sia = (None, None, self.rpki_root_manifest_uri, self.rrdp_notification_uri),
+ aia = self.rpki_root_cert_uri,
+ crldp = self.rpki_root_crl_uri,
+ resources = manifest_resources,
+ notBefore = now,
+ notAfter = now + self.rpki_subject_lifetime,
+ is_ca = False)
+ manifest = rpki.x509.SignedManifest.build(
+ serial = self.crl_number,
+ thisUpdate = now,
+ nextUpdate = now + self.rpki_subject_regen,
+ names_and_objs = manifest_content,
+ keypair = manifest_keypair,
+ certs = manifest_cert)
+ mft_hash = self.read_hash_maybe(self.rpki_root_manifest_file)
+ logger.debug("Writing manifest %s", self.rpki_root_manifest_file)
+ with open(self.rpki_root_manifest_file, "wb") as f:
+ f.write(manifest.get_DER())
+ pdu = SubElement(pubd_msg, rpki.publication.tag_publish, uri = self.rpki_root_manifest_uri)
+ pdu.text = manifest.get_Base64()
+ if mft_hash is not None:
+ pdu.set("hash", mft_hash)
+ cer_hash = rpki.x509.sha256(self.rpki_root_cert.get_DER()).encode("hex")
+ if cer_hash != self.rpki_root_cert_hash:
+ pdu = SubElement(pubd_msg, rpki.publication.tag_publish, uri = self.rpki_root_cert_uri)
+ pdu.text = self.rpki_root_cert.get_Base64()
+ if self.rpki_root_cert_hash is not None:
+ pdu.set("hash", self.rpki_root_cert_hash)
+ self.rpki_root_cert_hash = cer_hash
+
+
+ @staticmethod
+ def read_hash_maybe(fn):
+ try:
+ with open(fn, "rb") as f:
+ return rpki.x509.sha256(f.read()).encode("hex")
+ except IOError:
+ return None
+
+
+ def revoke_subject_cert(self, now):
+ self.revoked.append((self.get_subject_cert().getSerial(), now))
+
+
+ def publish(self, q_msg):
+ if q_msg is None:
+ return
+ assert len(q_msg) > 0
+
+ if not all(q_pdu.get("hash") is not None for q_pdu in q_msg):
+ logger.debug("Some publication PDUs are missing hashes, checking published data...")
+ q = Element(rpki.publication.tag_msg, nsmap = rpki.publication.nsmap,
+ type = "query", version = rpki.publication.version)
+ SubElement(q, rpki.publication.tag_list)
+ published_hash = dict((r.get("uri"), r.get("hash")) for r in self.call_pubd(q))
+ for q_pdu in q_msg:
+ q_uri = q_pdu.get("uri")
+ if q_pdu.get("hash") is None and published_hash.get(q_uri) is not None:
+ logger.debug("Updating hash of %s to %s from previously published data", q_uri, published_hash[q_uri])
+ q_pdu.set("hash", published_hash[q_uri])
+
+ r_msg = self.call_pubd(q_msg)
+ if len(q_msg) != len(r_msg):
+ raise rpki.exceptions.BadPublicationReply("Wrong number of response PDUs from pubd: sent %s, got %s" % (len(q_msg), len(r_msg)))
+
+
+ def call_pubd(self, q_msg):
+ for q_pdu in q_msg:
+ logger.info("Sending %s to pubd", q_pdu.get("uri"))
+ r_msg = rpki.http_simple.client(
+ proto_cms_msg = rpki.publication.cms_msg,
+ client_key = self.rootd_bpki_key,
+ client_cert = self.rootd_bpki_cert,
+ client_crl = self.rootd_bpki_crl,
+ server_ta = self.bpki_ta,
+ server_cert = self.pubd_bpki_cert,
+ url = self.pubd_url,
+ q_msg = q_msg,
+ replay_track = self.pubd_replay_tracker)
+ rpki.publication.raise_if_error(r_msg)
+ return r_msg
+
+
+ def compose_response(self, r_msg, pkcs10 = None):
+ subject_cert, pubd_msg = self.issue_subject_cert_maybe(pkcs10)
+ bag = self.rpki_root_cert.get_3779resources()
+ rc = SubElement(r_msg, rpki.up_down.tag_class,
+ class_name = self.rpki_class_name,
+ cert_url = str(rpki.up_down.multi_uri(self.rpki_root_cert_uri)),
+ resource_set_as = str(bag.asn),
+ resource_set_ipv4 = str(bag.v4),
+ resource_set_ipv6 = str(bag.v6),
+ resource_set_notafter = str(bag.valid_until))
+ if subject_cert is not None:
+ c = SubElement(rc, rpki.up_down.tag_certificate,
+ cert_url = str(rpki.up_down.multi_uri(self.rpki_subject_cert_uri)))
+ c.text = subject_cert.get_Base64()
+ SubElement(rc, rpki.up_down.tag_issuer).text = self.rpki_root_cert.get_Base64()
+ self.publish(pubd_msg)
+
+
+ def handle_list(self, q_msg, r_msg):
+ self.compose_response(r_msg)
+
+
+ def handle_issue(self, q_msg, r_msg):
+ # This is where we'd check q_msg[0].get("class_name") if this weren't rootd.
+ self.compose_response(r_msg, rpki.x509.PKCS10(Base64 = q_msg[0].text))
+
+
+ def handle_revoke(self, q_msg, r_msg):
+ class_name = q_msg[0].get("class_name")
+ ski = q_msg[0].get("ski")
+ logger.debug("Revocation requested for class %s SKI %s", class_name, ski)
+ subject_cert = self.get_subject_cert()
+ if subject_cert is None:
+ logger.debug("No subject certificate, nothing to revoke")
+ raise rpki.exceptions.NotInDatabase
+ if subject_cert.gSKI() != ski:
+ logger.debug("Subject certificate has different SKI %s, not revoking", subject_cert.gSKI())
+ raise rpki.exceptions.NotInDatabase
+ logger.debug("Revoking certificate %s", ski)
+ now = rpki.sundial.now()
+ pubd_msg = Element(rpki.publication.tag_msg, nsmap = rpki.publication.nsmap,
+ type = "query", version = rpki.publication.version)
self.revoke_subject_cert(now)
- subject_cert = None
- if subject_cert is not None and subject_cert.getNotAfter() <= now + self.rpki_subject_regen:
- logger.debug("Subject certificate has reached expiration threshold, regenerating")
- self.revoke_subject_cert(now)
- subject_cert = None
- if subject_cert is not None and self.root_newer_than_subject():
- logger.debug("Root certificate has changed, regenerating subject")
- self.revoke_subject_cert(now)
- subject_cert = None
- self.get_root_cert()
- if subject_cert is not None:
- return subject_cert
- pkcs10 = old_pkcs10 if new_pkcs10 is None else new_pkcs10
- if pkcs10 is None:
- logger.debug("No PKCS #10 request, can't generate subject certificate yet")
- return None
- resources = self.rpki_root_cert.get_3779resources()
- notAfter = now + self.rpki_subject_lifetime
- logger.info("Generating subject cert %s with resources %s, expires %s",
- self.rpki_base_uri + self.rpki_subject_cert, resources, notAfter)
- req_key = pkcs10.getPublicKey()
- req_sia = pkcs10.get_SIA()
- self.next_serial_number()
- subject_cert = self.rpki_root_cert.issue(
- keypair = self.rpki_root_key,
- subject_key = req_key,
- serial = self.serial_number,
- sia = req_sia,
- aia = self.rpki_root_cert_uri,
- crldp = self.rpki_base_uri + self.rpki_root_crl,
- resources = resources,
- notBefore = now,
- notAfter = notAfter)
- self.set_subject_cert(subject_cert)
- self.generate_crl_and_manifest(now)
- return subject_cert
-
- def generate_crl_and_manifest(self, now):
- subject_cert = self.get_subject_cert()
- self.next_serial_number()
- self.next_crl_number()
- while self.revoked and self.revoked[0][1] + 2 * self.rpki_subject_regen < now:
- del self.revoked[0]
- crl = rpki.x509.CRL.generate(
- keypair = self.rpki_root_key,
- issuer = self.rpki_root_cert,
- serial = self.crl_number,
- thisUpdate = now,
- nextUpdate = now + self.rpki_subject_regen,
- revokedCertificates = self.revoked)
- fn = os.path.join(self.rpki_root_dir, self.rpki_root_crl)
- logger.debug("Writing CRL %s", fn)
- f = open(fn, "wb")
- f.write(crl.get_DER())
- f.close()
- manifest_content = [(self.rpki_root_crl, crl)]
- if subject_cert is not None:
- manifest_content.append((self.rpki_subject_cert, subject_cert))
- manifest_resources = rpki.resource_set.resource_bag.from_inheritance()
- manifest_keypair = rpki.x509.RSA.generate()
- manifest_cert = self.rpki_root_cert.issue(
- keypair = self.rpki_root_key,
- subject_key = manifest_keypair.get_public(),
- serial = self.serial_number,
- sia = (None, None, self.rpki_base_uri + self.rpki_root_manifest),
- aia = self.rpki_root_cert_uri,
- crldp = self.rpki_base_uri + self.rpki_root_crl,
- resources = manifest_resources,
- notBefore = now,
- notAfter = now + self.rpki_subject_lifetime,
- is_ca = False)
- manifest = rpki.x509.SignedManifest.build(
- serial = self.crl_number,
- thisUpdate = now,
- nextUpdate = now + self.rpki_subject_regen,
- names_and_objs = manifest_content,
- keypair = manifest_keypair,
- certs = manifest_cert)
- fn = os.path.join(self.rpki_root_dir, self.rpki_root_manifest)
- logger.debug("Writing manifest %s", fn)
- f = open(fn, "wb")
- f.write(manifest.get_DER())
- f.close()
-
- def revoke_subject_cert(self, now):
- self.revoked.append((self.get_subject_cert().getSerial(), now))
-
- def compose_response(self, r_msg, pkcs10 = None):
- subject_cert = self.issue_subject_cert_maybe(pkcs10)
- rc = rpki.up_down.class_elt()
- rc.class_name = self.rpki_class_name
- rc.cert_url = rpki.up_down.multi_uri(self.rpki_root_cert_uri)
- rc.from_resource_bag(self.rpki_root_cert.get_3779resources())
- rc.issuer = self.rpki_root_cert
- r_msg.payload.classes.append(rc)
- if subject_cert is not None:
- rc.certs.append(rpki.up_down.certificate_elt())
- rc.certs[0].cert_url = rpki.up_down.multi_uri(self.rpki_base_uri + self.rpki_subject_cert)
- rc.certs[0].cert = subject_cert
-
- def up_down_handler(self, query, path, cb):
- try:
- q_cms = cms_msg(DER = query)
- q_msg = q_cms.unwrap((self.bpki_ta, self.child_bpki_cert))
- self.cms_timestamp = q_cms.check_replay(self.cms_timestamp, path)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- logger.exception("Problem decoding PDU")
- return cb(400, reason = "Could not decode PDU: %s" % e)
-
- def done(r_msg):
- cb(200, body = cms_msg().wrap(
- r_msg, self.rootd_bpki_key, self.rootd_bpki_cert,
- self.rootd_bpki_crl if self.include_bpki_crl else None))
-
- try:
- q_msg.serve_top_level(None, done)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- try:
- logger.exception("Exception serving up-down request %r", q_msg)
- done(q_msg.serve_error(e))
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- logger.exception("Exception while generating error report")
- cb(500, reason = "Could not process PDU: %s" % e)
-
-
- def next_crl_number(self):
- if self.crl_number is None:
- try:
- crl = rpki.x509.CRL(DER_file = os.path.join(self.rpki_root_dir, self.rpki_root_crl))
- self.crl_number = crl.getCRLNumber()
- except: # pylint: disable=W0702
- self.crl_number = 0
- self.crl_number += 1
- return self.crl_number
-
-
- def next_serial_number(self):
- if self.serial_number is None:
- subject_cert = self.get_subject_cert()
- if subject_cert is not None:
- self.serial_number = subject_cert.getSerial() + 1
- else:
- self.serial_number = 0
- self.serial_number += 1
- return self.serial_number
-
-
- def __init__(self):
-
- global rootd
- rootd = self # Gross, but simpler than what we'd have to do otherwise
-
- self.rpki_root_cert = None
- self.serial_number = None
- self.crl_number = None
- self.revoked = []
- self.cms_timestamp = None
-
- os.environ["TZ"] = "UTC"
- time.tzset()
-
- parser = argparse.ArgumentParser(description = __doc__)
- parser.add_argument("-c", "--config",
- help = "override default location of configuration file")
- parser.add_argument("-f", "--foreground", action = "store_true",
- help = "do not daemonize")
- parser.add_argument("--pidfile",
- help = "override default location of pid file")
- rpki.log.argparse_setup(parser)
- args = parser.parse_args()
-
- rpki.log.init("rootd", args)
-
- self.cfg = rpki.config.parser(args.config, "rootd")
- self.cfg.set_global_flags()
-
- if not args.foreground:
- rpki.daemonize.daemon(pidfile = args.pidfile)
-
- self.bpki_ta = rpki.x509.X509(Auto_update = self.cfg.get("bpki-ta"))
- self.rootd_bpki_key = rpki.x509.RSA( Auto_update = self.cfg.get("rootd-bpki-key"))
- self.rootd_bpki_cert = rpki.x509.X509(Auto_update = self.cfg.get("rootd-bpki-cert"))
- self.rootd_bpki_crl = rpki.x509.CRL( Auto_update = self.cfg.get("rootd-bpki-crl"))
- self.child_bpki_cert = rpki.x509.X509(Auto_update = self.cfg.get("child-bpki-cert"))
-
- self.http_server_host = self.cfg.get("server-host", "")
- self.http_server_port = self.cfg.getint("server-port")
-
- self.rpki_class_name = self.cfg.get("rpki-class-name", "wombat")
-
- self.rpki_root_dir = self.cfg.get("rpki-root-dir")
- self.rpki_base_uri = self.cfg.get("rpki-base-uri", "rsync://" + self.rpki_class_name + ".invalid/")
-
- self.rpki_root_key = rpki.x509.RSA(Auto_update = self.cfg.get("rpki-root-key"))
- self.rpki_root_cert_file = self.cfg.get("rpki-root-cert")
- self.rpki_root_cert_uri = self.cfg.get("rpki-root-cert-uri", self.rpki_base_uri + "root.cer")
-
- self.rpki_root_manifest = self.cfg.get("rpki-root-manifest", "root.mft")
- self.rpki_root_crl = self.cfg.get("rpki-root-crl", "root.crl")
- self.rpki_subject_cert = self.cfg.get("rpki-subject-cert", "child.cer")
- self.rpki_subject_pkcs10 = self.cfg.get("rpki-subject-pkcs10", "child.pkcs10")
-
- self.rpki_subject_lifetime = rpki.sundial.timedelta.parse(self.cfg.get("rpki-subject-lifetime", "8w"))
- self.rpki_subject_regen = rpki.sundial.timedelta.parse(self.cfg.get("rpki-subject-regen", self.rpki_subject_lifetime.convert_to_seconds() / 2))
-
- self.include_bpki_crl = self.cfg.getboolean("include-bpki-crl", False)
-
- rpki.http.server(host = self.http_server_host,
- port = self.http_server_port,
- handlers = (("/", self.up_down_handler, rpki.up_down.allowed_content_types),))
+ self.del_subject_cert()
+ self.del_subject_pkcs10()
+ SubElement(r_msg, q_msg[0].tag, class_name = class_name, ski = ski)
+ self.generate_crl_and_manifest(now, pubd_msg)
+ self.publish(pubd_msg)
+
+
+ # Need to do something about mapping exceptions to up-down error
+ # codes, right now everything shows up as "internal error".
+ #
+ #exceptions = {
+ # rpki.exceptions.ClassNameUnknown : 1201,
+ # rpki.exceptions.NoActiveCA : 1202,
+ # (rpki.exceptions.ClassNameUnknown, revoke_pdu) : 1301,
+ # (rpki.exceptions.NotInDatabase, revoke_pdu) : 1302 }
+ #
+ # Might be that what we want here is a subclass of
+ # rpki.exceptions.RPKI_Exception which carries an extra data field
+ # for the up-down error code, so that we can add the correct code
+ # when we instantiate it.
+ #
+ # There are also a few that are also schema violations, which means
+ # we'd have to catch them before validating or pick them out of a
+ # message that failed validation or otherwise break current
+ # modularity. Maybe an optional pre-validation check method hook in
+ # rpki.x509.XML_CMS_object which we can use to intercept such things?
+
+
+ def handler(self, request, q_der):
+ try:
+ q_cms = rpki.up_down.cms_msg(DER = q_der)
+ q_msg = q_cms.unwrap((self.bpki_ta, self.child_bpki_cert))
+ q_type = q_msg.get("type")
+ logger.info("Serving %s query", q_type)
+ r_msg = Element(rpki.up_down.tag_message, nsmap = rpki.up_down.nsmap,
+ version = rpki.up_down.version,
+ sender = q_msg.get("recipient"), recipient = q_msg.get("sender"),
+ type = q_type + "_response")
+ try:
+ self.rpkid_cms_timestamp = q_cms.check_replay(self.rpkid_cms_timestamp, request.path)
+ getattr(self, "handle_" + q_type)(q_msg, r_msg)
+ except Exception, e:
+ logger.exception("Exception processing up-down %s message", q_type)
+ rpki.up_down.generate_error_response_from_exception(r_msg, e, q_type)
+ request.send_cms_response(rpki.up_down.cms_msg().wrap(
+ r_msg, self.rootd_bpki_key, self.rootd_bpki_cert,
+ self.rootd_bpki_crl if self.include_bpki_crl else None))
+ except Exception, e:
+ logger.exception("Unhandled exception processing up-down message")
+ request.send_error(500, "Unhandled exception %s: %s" % (e.__class__.__name__, e))
+
+
+ def next_crl_number(self):
+ if self.crl_number is None:
+ try:
+ crl = rpki.x509.CRL(DER_file = self.rpki_root_crl_file)
+ self.crl_number = crl.getCRLNumber()
+ except:
+ self.crl_number = 0
+ self.crl_number += 1
+ return self.crl_number
+
+
+ def next_serial_number(self):
+ if self.serial_number is None:
+ subject_cert = self.get_subject_cert()
+ if subject_cert is not None:
+ self.serial_number = subject_cert.getSerial() + 1
+ else:
+ self.serial_number = 0
+ self.serial_number += 1
+ return self.serial_number
+
+
+ def __init__(self):
+ self.serial_number = None
+ self.crl_number = None
+ self.revoked = []
+ self.rpkid_cms_timestamp = None
+ self.pubd_replay_tracker = ReplayTracker()
+
+ os.environ["TZ"] = "UTC"
+ time.tzset()
+
+ self.cfg = rpki.config.argparser(section = "rootd", doc = __doc__)
+ self.cfg.add_boolean_argument("--foreground", default = False,
+ help = "do not daemonize")
+ self.cfg.add_argument("--pidfile",
+ default = os.pat.join(rpki.daemonize.default_pid_directory,
+ "rootd.pid"),
+ help = "override default location of pid file")
+ self.cfg.add_logging_arguments()
+ args = parser.parse_args()
+
+ self.cfg.configure_logging(args = args, ident = "rootd")
+
+ self.cfg.set_global_flags()
+
+ if not args.foreground:
+ rpki.daemonize.daemon(pidfile = args.pidfile)
+
+ self.bpki_ta = rpki.x509.X509(Auto_update = self.cfg.get("bpki-ta"))
+ self.rootd_bpki_key = rpki.x509.RSA( Auto_update = self.cfg.get("rootd-bpki-key"))
+ self.rootd_bpki_cert = rpki.x509.X509(Auto_update = self.cfg.get("rootd-bpki-cert"))
+ self.rootd_bpki_crl = rpki.x509.CRL( Auto_update = self.cfg.get("rootd-bpki-crl"))
+ self.child_bpki_cert = rpki.x509.X509(Auto_update = self.cfg.get("child-bpki-cert"))
+
+ if self.cfg.has_option("pubd-bpki-cert"):
+ self.pubd_bpki_cert = rpki.x509.X509(Auto_update = self.cfg.get("pubd-bpki-cert"))
+ else:
+ self.pubd_bpki_cert = None
+
+ self.http_server_host = self.cfg.get("server-host", "")
+ self.http_server_port = self.cfg.getint("server-port")
+
+ self.rpki_class_name = self.cfg.get("rpki-class-name")
+
+ self.rpki_root_key = rpki.x509.RSA( Auto_update = self.cfg.get("rpki-root-key-file"))
+ self.rpki_root_cert = rpki.x509.X509(Auto_update = self.cfg.get("rpki-root-cert-file"))
+ self.rpki_root_cert_uri = self.cfg.get("rpki-root-cert-uri")
+ self.rpki_root_cert_hash = None
+
+ self.rpki_root_manifest_file = self.cfg.get("rpki-root-manifest-file")
+ self.rpki_root_manifest_uri = self.cfg.get("rpki-root-manifest-uri")
+
+ self.rpki_root_crl_file = self.cfg.get("rpki-root-crl-file")
+ self.rpki_root_crl_uri = self.cfg.get("rpki-root-crl-uri")
+
+ self.rpki_subject_cert_file = self.cfg.get("rpki-subject-cert-file")
+ self.rpki_subject_cert_uri = self.cfg.get("rpki-subject-cert-uri")
+ self.rpki_subject_pkcs10 = self.cfg.get("rpki-subject-pkcs10-file")
+ self.rpki_subject_lifetime = rpki.sundial.timedelta.parse(self.cfg.get("rpki-subject-lifetime", "8w"))
+ self.rpki_subject_regen = rpki.sundial.timedelta.parse(self.cfg.get("rpki-subject-regen",
+ self.rpki_subject_lifetime.convert_to_seconds() / 2))
+
+ self.include_bpki_crl = self.cfg.getboolean("include-bpki-crl", False)
+
+ self.pubd_url = self.cfg.get("pubd-contact-uri")
+
+ self.rrdp_notification_uri = self.cfg.get("rrdp-notification-uri")
+
+ rpki.http_simple.server(host = self.http_server_host,
+ port = self.http_server_port,
+ handlers = (("/", self.handler, rpki.up_down.allowed_content_types),))
diff --git a/rpki/rpkic.py b/rpki/rpkic.py
index 126ce828..5e0efe0f 100644
--- a/rpki/rpkic.py
+++ b/rpki/rpkic.py
@@ -24,864 +24,941 @@ an overview of the available commands; type "help foo" for (more) detailed help
on the "foo" command.
"""
-# NB: As of this writing, I'm trying really hard to avoid having this
-# program depend on a Django settings.py file. This may prove to be a
-# waste of time in the long run, but for for now, this means that one
-# has to be careful about exactly how and when one imports Django
-# modules, or anything that imports Django modules. Bottom line is
-# that we don't import such modules until we need them.
-
import os
-import argparse
import sys
+import pwd
import time
+import argparse
import rpki.config
import rpki.sundial
import rpki.log
-import rpki.http
import rpki.resource_set
import rpki.relaxng
import rpki.exceptions
import rpki.left_right
import rpki.x509
-import rpki.async
import rpki.version
-from rpki.cli import Cmd, parsecmd, cmdarg
+from lxml.etree import SubElement
-class BadPrefixSyntax(Exception): "Bad prefix syntax."
-class CouldntTalkToDaemon(Exception): "Couldn't talk to daemon."
-class BadXMLMessage(Exception): "Bad XML message."
-class PastExpiration(Exception): "Expiration date has already passed."
-class CantRunRootd(Exception): "Can't run rootd."
+from rpki.cli import Cmd, parsecmd, cmdarg
module_doc = __doc__
-class main(Cmd):
- prompt = "rpkic> "
-
- completedefault = Cmd.filename_complete
-
- # Top-level argparser, for stuff that one might want when starting
- # up the interactive command loop. Not sure -i belongs here, but
- # it's harmless so leave it here for the moment.
-
- top_argparser = argparse.ArgumentParser(add_help = False)
- top_argparser.add_argument("-c", "--config",
- help = "override default location of configuration file")
- top_argparser.add_argument("-i", "--identity", "--handle",
- help = "set initial entity handdle")
- top_argparser.add_argument("--profile",
- help = "enable profiling, saving data to PROFILE")
-
- # Argparser for non-interactive commands (no command loop).
-
- full_argparser = argparse.ArgumentParser(parents = [top_argparser],
- description = module_doc)
- argsubparsers = full_argparser.add_subparsers(title = "Commands", metavar = "")
-
- def __init__(self):
-
- Cmd.__init__(self)
- os.environ["TZ"] = "UTC"
- time.tzset()
-
- # Try parsing just the arguments that make sense if we're
- # going to be running an interactive command loop. If that
- # parses everything, we're interactive, otherwise, it's either
- # a non-interactive command or a parse error, so we let the full
- # parser sort that out for us.
-
- args, argv = self.top_argparser.parse_known_args()
- self.interactive = not argv
- if not self.interactive:
- args = self.full_argparser.parse_args()
-
- self.cfg_file = args.config
- self.handle = args.identity
-
- if args.profile:
- import cProfile
- prof = cProfile.Profile()
- try:
- prof.runcall(self.main, args)
- finally:
- prof.dump_stats(args.profile)
- print "Dumped profile data to %s" % args.profile
- else:
- self.main(args)
-
- def main(self, args):
- rpki.log.init("rpkic")
- self.read_config()
- if self.interactive:
- self.cmdloop_with_history()
- else:
- args.func(self, args)
-
- def read_config(self):
- global rpki # pylint: disable=W0602
-
- try:
- cfg = rpki.config.parser(self.cfg_file, "myrpki")
- cfg.set_global_flags()
- except IOError, e:
- sys.exit("%s: %s" % (e.strerror, e.filename))
-
- self.histfile = cfg.get("history_file", os.path.expanduser("~/.rpkic_history"))
- self.autosync = cfg.getboolean("autosync", True, section = "rpkic")
-
- import django
-
- from django.conf import settings
-
- settings.configure(
- DATABASES = { "default" : {
- "ENGINE" : "django.db.backends.mysql",
- "NAME" : cfg.get("sql-database", section = "irdbd"),
- "USER" : cfg.get("sql-username", section = "irdbd"),
- "PASSWORD" : cfg.get("sql-password", section = "irdbd"),
- "HOST" : "",
- "PORT" : "",
- "OPTIONS" : { "init_command": "SET storage_engine=INNODB",
- "charset" : "latin1" }}},
- INSTALLED_APPS = ("rpki.irdb",),
- MIDDLEWARE_CLASSES = (), # API change, feh
- )
-
- if django.VERSION >= (1, 7): # API change, feh
- from django.apps import apps
- apps.populate(settings.INSTALLED_APPS)
-
- import rpki.irdb # pylint: disable=W0621
-
- try:
- rpki.irdb.models.ca_certificate_lifetime = rpki.sundial.timedelta.parse(
- cfg.get("bpki_ca_certificate_lifetime", section = "rpkic"))
- except rpki.config.ConfigParser.Error:
- pass
-
- try:
- rpki.irdb.models.ee_certificate_lifetime = rpki.sundial.timedelta.parse(
- cfg.get("bpki_ee_certificate_lifetime", section = "rpkic"))
- except rpki.config.ConfigParser.Error:
- pass
-
- try:
- rpki.irdb.models.crl_interval = rpki.sundial.timedelta.parse(
- cfg.get("bpki_crl_interval", section = "rpkic"))
- except rpki.config.ConfigParser.Error:
- pass
-
- import django.core.management
- django.core.management.call_command("syncdb", verbosity = 0, load_initial_data = False)
-
- self.zoo = rpki.irdb.Zookeeper(cfg = cfg, handle = self.handle, logstream = sys.stdout)
-
-
- def do_help(self, arg):
+class swap_uids(object):
"""
- List available commands with "help" or detailed help with "help cmd".
+ Context manager to wrap os.setreuid() calls safely.
"""
- argv = arg.split()
-
- if not argv:
- #return self.full_argparser.print_help()
- return self.print_topics(
- self.doc_header,
- sorted(set(name[3:] for name in self.get_names()
- if name.startswith("do_")
- and getattr(self, name).__doc__)),
- 15, 80)
-
- try:
- return getattr(self, "help_" + argv[0])()
- except AttributeError:
- pass
-
- func = getattr(self, "do_" + argv[0], None)
-
- try:
- return func.argparser.print_help()
- except AttributeError:
- pass
+ def __init__(self):
+ self.uid = os.getuid()
+ self.euid = os.geteuid()
- try:
- return self.stdout.write(func.__doc__ + "\n")
- except AttributeError:
- pass
+ def __enter__(self):
+ os.setreuid(self.euid, self.uid)
+ return self
- self.stdout.write((self.nohelp + "\n") % arg)
+ def __exit__(self, _type, value, traceback):
+ os.setreuid(self.uid, self.euid)
+ return False
- def irdb_handle_complete(self, manager, text, line, begidx, endidx):
- return [obj.handle for obj in manager.all() if obj.handle and obj.handle.startswith(text)]
-
-
- @parsecmd(argsubparsers,
- cmdarg("handle", help = "new handle"))
- def do_select_identity(self, args):
- """
- Select an identity handle for use with later commands.
+def open_swapped_uids(*open_args):
"""
-
- self.zoo.reset_identity(args.handle)
-
- def complete_select_identity(self, *args):
- return self.irdb_handle_complete(rpki.irdb.ResourceHolderCA.objects, *args)
-
-
- @parsecmd(argsubparsers)
- def do_initialize(self, args):
- """
- Initialize an RPKI installation. DEPRECATED.
-
- This command reads the configuration file, creates the BPKI and
- EntityDB directories, generates the initial BPKI certificates, and
- creates an XML file describing the resource-holding aspect of this
- RPKI installation.
+ Open a file with UIDs swapped for the duration of the open() call.
"""
- rootd_case = self.zoo.run_rootd and self.zoo.handle == self.zoo.cfg.get("handle")
+ with swap_uids():
+ return open(*open_args)
- r = self.zoo.initialize()
- r.save("%s.identity.xml" % self.zoo.handle,
- None if rootd_case else sys.stdout)
- if rootd_case:
- r = self.zoo.configure_rootd()
- if r is not None:
- r.save("%s.%s.repository-request.xml" % (self.zoo.handle, self.zoo.handle), sys.stdout)
-
- self.zoo.write_bpki_files()
-
-
- @parsecmd(argsubparsers,
- cmdarg("handle", help = "handle of entity to create"))
- def do_create_identity(self, args):
- """
- Create a new resource-holding entity.
-
- Returns XML file describing the new resource holder.
-
- This command is idempotent: calling it for a resource holder which
- already exists returns the existing identity.
- """
-
- self.zoo.reset_identity(args.handle)
-
- r = self.zoo.initialize_resource_bpki()
- r.save("%s.identity.xml" % self.zoo.handle, sys.stdout)
-
-
- @parsecmd(argsubparsers)
- def do_initialize_server_bpki(self, args):
- """
- Initialize server BPKI portion of an RPKI installation.
-
- Reads server configuration from configuration file and creates the
- server BPKI objects needed to start daemons.
- """
-
- self.zoo.initialize_server_bpki()
- self.zoo.write_bpki_files()
-
-
- @parsecmd(argsubparsers)
- def do_update_bpki(self, args):
- """
- Update BPKI certificates. Assumes an existing RPKI installation.
-
- Basic plan here is to reissue all BPKI certificates we can, right
- now. In the long run we might want to be more clever about only
- touching ones that need maintenance, but this will do for a start.
-
- We also reissue CRLs for all CAs.
-
- Most likely this should be run under cron.
- """
-
- self.zoo.update_bpki()
- self.zoo.write_bpki_files()
- try:
- self.zoo.synchronize_bpki()
- except Exception, e:
- print "Couldn't push updated BPKI material into daemons: %s" % e
-
-
- @parsecmd(argsubparsers,
- cmdarg("--child_handle", help = "override default handle for new child"),
- cmdarg("--valid_until", help = "override default validity interval"),
- cmdarg("child_xml", help = "XML file containing child's identity"))
- def do_configure_child(self, args):
- """
- Configure a new child of this RPKI entity.
-
- This command extracts the child's data from an XML input file,
- cross-certifies the child's resource-holding BPKI certificate, and
- generates an XML output file describing the relationship between
- the child and this parent, including this parent's BPKI data and
- up-down protocol service URI.
- """
-
- r, child_handle = self.zoo.configure_child(args.child_xml, args.child_handle, args.valid_until)
- r.save("%s.%s.parent-response.xml" % (self.zoo.handle, child_handle), sys.stdout)
- self.zoo.synchronize_ca()
-
-
- @parsecmd(argsubparsers,
- cmdarg("child_handle", help = "handle of child to delete"))
- def do_delete_child(self, args):
- """
- Delete a child of this RPKI entity.
- """
-
- try:
- self.zoo.delete_child(args.child_handle)
- self.zoo.synchronize_ca()
- except rpki.irdb.ResourceHolderCA.DoesNotExist:
- print "No such resource holder \"%s\"" % self.zoo.handle
- except rpki.irdb.Child.DoesNotExist:
- print "No such child \"%s\"" % args.child_handle
-
- def complete_delete_child(self, *args):
- return self.irdb_handle_complete(self.zoo.resource_ca.children, *args)
-
-
- @parsecmd(argsubparsers,
- cmdarg("--parent_handle", help = "override default handle for new parent"),
- cmdarg("parent_xml", help = "XML file containing parent's response"))
- def do_configure_parent(self, args):
- """
- Configure a new parent of this RPKI entity.
-
- This command reads the parent's response XML, extracts the
- parent's BPKI and service URI information, cross-certifies the
- parent's BPKI data into this entity's BPKI, and checks for offers
- or referrals of publication service. If a publication offer or
- referral is present, we generate a request-for-service message to
- that repository, in case the user wants to avail herself of the
- referral or offer.
-
- We do NOT attempt automatic synchronization with rpkid at the
- completion of this command, because synchronization at this point
- will usually fail due to the repository not being set up yet. If
- you know what you are doing and for some reason really want to
- synchronize here, run the synchronize command yourself.
- """
-
- r, parent_handle = self.zoo.configure_parent(args.parent_xml, args.parent_handle)
- r.save("%s.%s.repository-request.xml" % (self.zoo.handle, parent_handle), sys.stdout)
-
-
- @parsecmd(argsubparsers,
- cmdarg("parent_handle", help = "handle of parent to delete"))
- def do_delete_parent(self, args):
- """
- Delete a parent of this RPKI entity.
- """
-
- try:
- self.zoo.delete_parent(args.parent_handle)
- self.zoo.synchronize_ca()
- except rpki.irdb.ResourceHolderCA.DoesNotExist:
- print "No such resource holder \"%s\"" % self.zoo.handle
- except rpki.irdb.Parent.DoesNotExist:
- print "No such parent \"%s\"" % args.parent_handle
+class main(Cmd):
- def complete_delete_parent(self, *args):
- return self.irdb_handle_complete(self.zoo.resource_ca.parents, *args)
+ prompt = "rpkic> "
+ completedefault = Cmd.filename_complete
+
+ # Top-level argparser, for stuff that one might want when starting
+ # up the interactive command loop. Not sure -i belongs here, but
+ # it's harmless so leave it here for the moment.
+
+ top_argparser = argparse.ArgumentParser(add_help = False)
+ top_argparser.add_argument("-c", "--config",
+ help = "override default location of configuration file")
+ top_argparser.add_argument("-i", "--identity", "--handle",
+ help = "set initial entity handdle")
+ top_argparser.add_argument("--profile",
+ help = "enable profiling, saving data to PROFILE")
+
+ # Argparser for non-interactive commands (no command loop).
+
+ full_argparser = argparse.ArgumentParser(parents = [top_argparser],
+ description = module_doc)
+ argsubparsers = full_argparser.add_subparsers(title = "Commands", metavar = "")
+
+ def __init__(self):
+ Cmd.__init__(self)
+ os.environ["TZ"] = "UTC"
+ time.tzset()
+
+ # Try parsing just the arguments that make sense if we're
+ # going to be running an interactive command loop. If that
+ # parses everything, we're interactive, otherwise, it's either
+ # a non-interactive command or a parse error, so we let the full
+ # parser sort that out for us.
+
+ args, argv = self.top_argparser.parse_known_args()
+ self.interactive = not argv
+ if not self.interactive:
+ args = self.full_argparser.parse_args()
+
+ self.cfg_file = args.config
+ self.handle = args.identity
+
+ if args.profile:
+ import cProfile
+ prof = cProfile.Profile()
+ try:
+ prof.runcall(self.main, args)
+ finally:
+ prof.dump_stats(args.profile)
+ print "Dumped profile data to %s" % args.profile
+ else:
+ self.main(args)
+
+ def main(self, args):
+ self.read_config()
+ if self.interactive:
+ self.cmdloop_with_history()
+ else:
+ args.func(self, args)
+
+ def read_history(self):
+ """
+ UID-swapping wrapper for parent .read_history() method.
+ """
- @parsecmd(argsubparsers)
- def do_configure_root(self, args):
- """
- Configure the current resource holding identity as a root.
+ with swap_uids():
+ Cmd.read_history(self)
+
+ def save_history(self):
+ """
+ UID-swapping wrapper for parent .save_history() method.
+ """
- This configures rpkid to talk to rootd as (one of) its parent(s).
- Returns repository request XML file like configure_parent does.
- """
+ with swap_uids():
+ Cmd.save_history(self)
- r = self.zoo.configure_rootd()
- if r is not None:
- r.save("%s.%s.repository-request.xml" % (self.zoo.handle, self.zoo.handle), sys.stdout)
- self.zoo.write_bpki_files()
+ def read_config(self):
+ # pylint: disable=W0201,W0602,W0621
- @parsecmd(argsubparsers)
- def do_delete_root(self, args):
- """
- Delete local RPKI root as parent of the current entity.
+ global rpki
- This tells the current rpkid identity (<self/>) to stop talking to
- rootd.
- """
+ try:
+ cfg = rpki.config.parser(set_filename = self.cfg_file, section = "myrpki")
+ cfg.configure_logging(
+ args = argparse.Namespace(
+ log_destination = "stderr",
+ log_level = "warning"),
+ ident = "rpkic")
+ cfg.set_global_flags()
+ except IOError, e:
+ sys.exit("%s: %s" % (e.strerror, e.filename))
- try:
- self.zoo.delete_rootd()
- self.zoo.synchronize_ca()
- except rpki.irdb.ResourceHolderCA.DoesNotExist:
- print "No such resource holder \"%s\"" % self.zoo.handle
- except rpki.irdb.Rootd.DoesNotExist:
- print "No associated rootd"
+ self.histfile = cfg.get("history_file", os.path.expanduser("~/.rpkic_history"))
+ self.autosync = cfg.getboolean("autosync", True, section = "rpkic")
+ os.environ.update(DJANGO_SETTINGS_MODULE = "rpki.django_settings.irdb")
- @parsecmd(argsubparsers,
- cmdarg("--flat", help = "use flat publication scheme", action = "store_true"),
- cmdarg("--sia_base", help = "override SIA base value"),
- cmdarg("client_xml", help = "XML file containing client request"))
- def do_configure_publication_client(self, args):
- """
- Configure publication server to know about a new client.
-
- This command reads the client's request for service,
- cross-certifies the client's BPKI data, and generates a response
- message containing the repository's BPKI data and service URI.
- """
-
- r, client_handle = self.zoo.configure_publication_client(args.client_xml, args.sia_base, args.flat)
- r.save("%s.repository-response.xml" % client_handle.replace("/", "."), sys.stdout)
- try:
- self.zoo.synchronize_pubd()
- except rpki.irdb.Repository.DoesNotExist:
- pass
+ import django
+ django.setup()
+ import rpki.irdb
- @parsecmd(argsubparsers,
- cmdarg("client_handle", help = "handle of client to delete"))
- def do_delete_publication_client(self, args):
- """
- Delete a publication client of this RPKI entity.
- """
+ try:
+ rpki.irdb.models.ca_certificate_lifetime = rpki.sundial.timedelta.parse(
+ cfg.get("bpki_ca_certificate_lifetime", section = "rpkic"))
+ except rpki.config.ConfigParser.Error:
+ pass
- try:
- self.zoo.delete_publication_client(args.client_handle)
- self.zoo.synchronize_pubd()
- except rpki.irdb.ResourceHolderCA.DoesNotExist:
- print "No such resource holder \"%s\"" % self.zoo.handle
- except rpki.irdb.Client.DoesNotExist:
- print "No such client \"%s\"" % args.client_handle
+ try:
+ rpki.irdb.models.ee_certificate_lifetime = rpki.sundial.timedelta.parse(
+ cfg.get("bpki_ee_certificate_lifetime", section = "rpkic"))
+ except rpki.config.ConfigParser.Error:
+ pass
- def complete_delete_publication_client(self, *args):
- return self.irdb_handle_complete(self.zoo.server_ca.clients, *args)
+ try:
+ rpki.irdb.models.crl_interval = rpki.sundial.timedelta.parse(
+ cfg.get("bpki_crl_interval", section = "rpkic"))
+ except rpki.config.ConfigParser.Error:
+ pass
+ self.zoo = rpki.irdb.Zookeeper(cfg = cfg, handle = self.handle, logstream = sys.stdout)
- @parsecmd(argsubparsers,
- cmdarg("--parent_handle", help = "override default parent handle"),
- cmdarg("repository_xml", help = "XML file containing repository response"))
- def do_configure_repository(self, args):
- """
- Configure a publication repository for this RPKI entity.
- This command reads the repository's response to this entity's
- request for publication service, extracts and cross-certifies the
- BPKI data and service URI, and links the repository data with the
- corresponding parent data in our local database.
- """
+ def do_help(self, arg):
+ """
+ List available commands with "help" or detailed help with "help cmd".
+ """
- self.zoo.configure_repository(args.repository_xml, args.parent_handle)
- self.zoo.synchronize_ca()
+ argv = arg.split()
+ if not argv:
+ #return self.full_argparser.print_help()
+ return self.print_topics(
+ self.doc_header,
+ sorted(set(name[3:] for name in self.get_names()
+ if name.startswith("do_")
+ and getattr(self, name).__doc__)),
+ 15, 80)
- @parsecmd(argsubparsers,
- cmdarg("repository_handle", help = "handle of repository to delete"))
- def do_delete_repository(self, args):
- """
- Delete a repository of this RPKI entity.
- """
+ try:
+ return getattr(self, "help_" + argv[0])()
+ except AttributeError:
+ pass
- try:
- self.zoo.delete_repository(args.repository_handle)
- self.zoo.synchronize_ca()
- except rpki.irdb.ResourceHolderCA.DoesNotExist:
- print "No such resource holder \"%s\"" % self.zoo.handle
- except rpki.irdb.Repository.DoesNotExist:
- print "No such repository \"%s\"" % args.repository_handle
+ func = getattr(self, "do_" + argv[0], None)
- def complete_delete_repository(self, *args):
- return self.irdb_handle_complete(self.zoo.resource_ca.repositories, *args)
+ try:
+ return func.argparser.print_help()
+ except AttributeError:
+ pass
+ try:
+ return self.stdout.write(func.__doc__ + "\n")
+ except AttributeError:
+ pass
- @parsecmd(argsubparsers)
- def do_delete_identity(self, args):
- """
- Delete the current RPKI identity (rpkid <self/> object).
- """
+ self.stdout.write((self.nohelp + "\n") % arg)
- try:
- self.zoo.delete_self()
- self.zoo.synchronize_deleted_ca()
- except rpki.irdb.ResourceHolderCA.DoesNotExist:
- print "No such resource holder \"%s\"" % self.zoo.handle
+ def irdb_handle_complete(self, manager, text, line, begidx, endidx):
+ return [obj.handle for obj in manager.all() if obj.handle and obj.handle.startswith(text)]
- @parsecmd(argsubparsers,
- cmdarg("--valid_until", help = "override default new validity interval"),
- cmdarg("child_handle", help = "handle of child to renew"))
- def do_renew_child(self, args):
- """
- Update validity period for one child entity.
- """
- self.zoo.renew_children(args.child_handle, args.valid_until)
- self.zoo.synchronize_ca()
- if self.autosync:
- self.zoo.run_rpkid_now()
+ @parsecmd(argsubparsers,
+ cmdarg("handle", help = "new handle"))
+ def do_select_identity(self, args):
+ """
+ Select an identity handle for use with later commands.
+ """
- def complete_renew_child(self, *args):
- return self.irdb_handle_complete(self.zoo.resource_ca.children, *args)
+ self.zoo.reset_identity(args.handle)
+ def complete_select_identity(self, *args):
+ return self.irdb_handle_complete(rpki.irdb.models.ResourceHolderCA.objects, *args)
- @parsecmd(argsubparsers,
- cmdarg("--valid_until", help = "override default new validity interval"))
- def do_renew_all_children(self, args):
- """
- Update validity period for all child entities.
- """
- self.zoo.renew_children(None, args.valid_until)
- self.zoo.synchronize_ca()
- if self.autosync:
- self.zoo.run_rpkid_now()
+ @parsecmd(argsubparsers)
+ def do_initialize(self, args):
+ """
+ Initialize an RPKI installation. DEPRECATED.
+ This command reads the configuration file, creates the BPKI and
+ EntityDB directories, generates the initial BPKI certificates, and
+ creates an XML file describing the resource-holding aspect of this
+ RPKI installation.
+ """
- @parsecmd(argsubparsers,
- cmdarg("prefixes_csv", help = "CSV file listing prefixes"))
- def do_load_prefixes(self, args):
- """
- Load prefixes into IRDB from CSV file.
- """
+ r = self.zoo.initialize()
+ with swap_uids():
+ r.save("%s.identity.xml" % self.zoo.handle, sys.stdout)
- self.zoo.load_prefixes(args.prefixes_csv, True)
- if self.autosync:
- self.zoo.run_rpkid_now()
+ self.zoo.write_bpki_files()
- @parsecmd(argsubparsers)
- def do_show_child_resources(self, args):
- """
- Show resources assigned to children.
- """
+ @parsecmd(argsubparsers,
+ cmdarg("handle", help = "handle of entity to create"))
+ def do_create_identity(self, args):
+ """
+ Create a new resource-holding entity.
- for child in self.zoo.resource_ca.children.all():
- resources = child.resource_bag
- print "Child:", child.handle
- if resources.asn:
- print " ASN:", resources.asn
- if resources.v4:
- print " IPv4:", resources.v4
- if resources.v6:
- print " IPv6:", resources.v6
+ Returns XML file describing the new resource holder.
+ This command is idempotent: calling it for a resource holder which
+ already exists returns the existing identity.
+ """
- @parsecmd(argsubparsers)
- def do_show_roa_requests(self, args):
- """
- Show ROA requests.
- """
+ self.zoo.reset_identity(args.handle)
- for roa_request in self.zoo.resource_ca.roa_requests.all():
- prefixes = roa_request.roa_prefix_bag
- print "ASN: ", roa_request.asn
- if prefixes.v4:
- print " IPv4:", prefixes.v4
- if prefixes.v6:
- print " IPv6:", prefixes.v6
+ r = self.zoo.initialize_resource_bpki()
+ with swap_uids():
+ r.save("%s.identity.xml" % self.zoo.handle, sys.stdout)
- @parsecmd(argsubparsers)
- def do_show_ghostbuster_requests(self, args):
- """
- Show Ghostbuster requests.
- """
+ @parsecmd(argsubparsers)
+ def do_initialize_server_bpki(self, args):
+ """
+ Initialize server BPKI portion of an RPKI installation.
- for ghostbuster_request in self.zoo.resource_ca.ghostbuster_requests.all():
- print "Parent:", ghostbuster_request.parent or "*"
- print ghostbuster_request.vcard
+ Reads server configuration from configuration file and creates the
+ server BPKI objects needed to start daemons.
+ """
+ self.zoo.initialize_server_bpki()
+ self.zoo.write_bpki_files()
- @parsecmd(argsubparsers)
- def do_show_received_resources(self, args):
- """
- Show resources received by this entity from its parent(s).
- """
- for pdu in self.zoo.call_rpkid(
- rpki.left_right.list_received_resources_elt.make_pdu(self_handle = self.zoo.handle)):
+ @parsecmd(argsubparsers)
+ def do_update_bpki(self, args):
+ """
+ Update BPKI certificates. Assumes an existing RPKI installation.
+
+ Basic plan here is to reissue all BPKI certificates we can, right
+ now. In the long run we might want to be more clever about only
+ touching ones that need maintenance, but this will do for a start.
- print "Parent: ", pdu.parent_handle
- print " notBefore:", pdu.notBefore
- print " notAfter: ", pdu.notAfter
- print " URI: ", pdu.uri
- print " SIA URI: ", pdu.sia_uri
- print " AIA URI: ", pdu.aia_uri
- print " ASN: ", pdu.asn
- print " IPv4: ", pdu.ipv4
- print " IPv6: ", pdu.ipv6
+ We also reissue CRLs for all CAs.
+ Most likely this should be run under cron.
+ """
+
+ self.zoo.update_bpki()
+ self.zoo.write_bpki_files()
+ try:
+ self.zoo.synchronize_bpki()
+ except Exception, e:
+ print "Couldn't push updated BPKI material into daemons: %s" % e
- @parsecmd(argsubparsers)
- def do_show_published_objects(self, args):
- """
- Show published objects.
- """
- for pdu in self.zoo.call_rpkid(
- rpki.left_right.list_published_objects_elt.make_pdu(self_handle = self.zoo.handle)):
+ @parsecmd(argsubparsers,
+ cmdarg("--child_handle", help = "override default handle for new child"),
+ cmdarg("--valid_until", help = "override default validity interval"),
+ cmdarg("child_xml", help = "XML file containing child's identity"))
+ def do_configure_child(self, args):
+ """
+ Configure a new child of this RPKI entity.
- track = rpki.x509.uri_dispatch(pdu.uri)(Base64 = pdu.obj).tracking_data(pdu.uri)
- child = pdu.child_handle
+ This command extracts the child's data from an XML input file,
+ cross-certifies the child's resource-holding BPKI certificate, and
+ generates an XML output file describing the relationship between
+ the child and this parent, including this parent's BPKI data and
+ up-down protocol service URI.
+ """
+
+ with open_swapped_uids(args.child_xml) as f:
+ r, child_handle = self.zoo.configure_child(f, args.child_handle, args.valid_until)
+ with swap_uids():
+ r.save("%s.%s.parent-response.xml" % (self.zoo.handle, child_handle), sys.stdout)
+ self.zoo.synchronize_ca()
- if child is None:
- print track
- else:
- print track, child
+ @parsecmd(argsubparsers,
+ cmdarg("child_handle", help = "handle of child to delete"))
+ def do_delete_child(self, args):
+ """
+ Delete a child of this RPKI entity.
+ """
+
+ try:
+ self.zoo.delete_child(args.child_handle)
+ self.zoo.synchronize_ca()
+ except rpki.irdb.models.ResourceHolderCA.DoesNotExist:
+ print "No such resource holder \"%s\"" % self.zoo.handle
+ except rpki.irdb.models.Child.DoesNotExist:
+ print "No such child \"%s\"" % args.child_handle
+
+ def complete_delete_child(self, *args):
+ return self.irdb_handle_complete(self.zoo.resource_ca.children, *args)
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("--parent_handle", help = "override default handle for new parent"),
+ cmdarg("parent_xml", help = "XML file containing parent's response"))
+ def do_configure_parent(self, args):
+ """
+ Configure a new parent of this RPKI entity.
+
+ This command reads the parent's response XML, extracts the
+ parent's BPKI and service URI information, cross-certifies the
+ parent's BPKI data into this entity's BPKI, and checks for offers
+ or referrals of publication service. If a publication offer or
+ referral is present, we generate a request-for-service message to
+ that repository, in case the user wants to avail herself of the
+ referral or offer.
+
+ We do NOT attempt automatic synchronization with rpkid at the
+ completion of this command, because synchronization at this point
+ will usually fail due to the repository not being set up yet. If
+ you know what you are doing and for some reason really want to
+ synchronize here, run the synchronize command yourself.
+ """
+
+ with open_swapped_uids(args.parent_xml) as f:
+ r, parent_handle = self.zoo.configure_parent(f, args.parent_handle)
+ with swap_uids():
+ r.save("%s.%s.repository-request.xml" % (self.zoo.handle, parent_handle), sys.stdout)
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("parent_handle", help = "handle of parent to delete"))
+ def do_delete_parent(self, args):
+ """
+ Delete a parent of this RPKI entity.
+ """
+
+ try:
+ self.zoo.delete_parent(args.parent_handle)
+ self.zoo.synchronize_ca()
+ except rpki.irdb.models.ResourceHolderCA.DoesNotExist:
+ print "No such resource holder \"%s\"" % self.zoo.handle
+ except rpki.irdb.models.Parent.DoesNotExist:
+ print "No such parent \"%s\"" % args.parent_handle
+
+ def complete_delete_parent(self, *args):
+ return self.irdb_handle_complete(self.zoo.resource_ca.parents, *args)
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("--resources", help = "restrict root to specified resources",
+ type = rpki.resource_set.resource_bag.from_str,
+ default = "0.0.0.0/0,::/0,0-4294967295"),
+ cmdarg("--root_handle", help = "override default handle for new root"))
+ def do_configure_root(self, args):
+ """
+ Configure the current resource holding identity as a root.
+
+ Returns repository request XML file like configure_parent does.
+ """
+
+ print "Generating root for resources {!s}".format(args.resources) # XXX
+
+ r = self.zoo.configure_root(args.root_handle, args.resources)
+ if r is not None:
+ with swap_uids():
+ r.save("%s.%s.repository-request.xml" % (self.zoo.handle, self.zoo.handle), sys.stdout)
+ self.zoo.write_bpki_files()
+
+
+ @parsecmd(argsubparsers)
+ def do_delete_root(self, args):
+ """
+ Delete local RPKI root as parent of the current entity.
+ """
+
+ raise NotImplementedError
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("--root_handle", help = "override default handle"),
+ cmdarg("--output_file", help = "override default output filename"))
+ def do_extract_root_certificate(self, args):
+ """
+ Extract self-signed RPKI certificate from a root object.
+ """
+
+ cert, uris = self.zoo.extract_root_certificate_and_uris(args.root_handle)
+ if cert is None:
+ print "No certificate currently available"
+ else:
+ fn = args.output_file or (cert.gSKI() + ".cer")
+ with open_swapped_uids(fn, "wb") as f:
+ print "Writing", f.name
+ f.write(cert.get_DER())
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("--root_handle", help = "override default handle"),
+ cmdarg("--output_file", help = "override default output filename"))
+ def do_extract_root_tal(self, args):
+ """
+ Extract self-signed RPKI certificate from a root object.
+ """
+
+ cert, uris = self.zoo.extract_root_certificate_and_uris(args.root_handle)
+ if cert is None:
+ print "No certificate currently available"
+ else:
+ fn = args.output_file or (cert.gSKI() + ".tal")
+ with open_swapped_uids(fn, "w") as f:
+ print "Writing", f.name
+ for uri in uris:
+ f.write(uri + "\n")
+ f.write("\n")
+ f.write(cert.getPublicKey().get_Base64())
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("--flat", help = "use flat publication scheme", action = "store_true"),
+ cmdarg("--sia_base", help = "override SIA base value"),
+ cmdarg("client_xml", help = "XML file containing client request"))
+ def do_configure_publication_client(self, args):
+ """
+ Configure publication server to know about a new client.
+
+ This command reads the client's request for service,
+ cross-certifies the client's BPKI data, and generates a response
+ message containing the repository's BPKI data and service URI.
+ """
+
+ with open_swapped_uids(args.client_xml) as f:
+ r, client_handle = self.zoo.configure_publication_client(f, args.sia_base, args.flat)
+ with swap_uids():
+ r.save("%s.repository-response.xml" % client_handle.replace("/", "."), sys.stdout)
+ try:
+ self.zoo.synchronize_pubd()
+ except rpki.irdb.models.Repository.DoesNotExist:
+ pass
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("client_handle", help = "handle of client to delete"))
+ def do_delete_publication_client(self, args):
+ """
+ Delete a publication client of this RPKI entity.
+ """
+
+ try:
+ self.zoo.delete_publication_client(args.client_handle)
+ self.zoo.synchronize_pubd()
+ except rpki.irdb.models.ResourceHolderCA.DoesNotExist:
+ print "No such resource holder \"%s\"" % self.zoo.handle
+ except rpki.irdb.models.Client.DoesNotExist:
+ print "No such client \"%s\"" % args.client_handle
+
+ def complete_delete_publication_client(self, *args):
+ return self.irdb_handle_complete(self.zoo.server_ca.clients, *args)
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("--parent_handle", help = "override default parent handle"),
+ cmdarg("repository_xml", help = "XML file containing repository response"))
+ def do_configure_repository(self, args):
+ """
+ Configure a publication repository for this RPKI entity.
+
+ This command reads the repository's response to this entity's
+ request for publication service, extracts and cross-certifies the
+ BPKI data and service URI, and links the repository data with the
+ corresponding parent data in our local database.
+ """
+
+ with open_swapped_uids(args.repository_xml) as f:
+ self.zoo.configure_repository(f, args.parent_handle)
+ self.zoo.synchronize_ca()
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("repository_handle", help = "handle of repository to delete"))
+ def do_delete_repository(self, args):
+ """
+ Delete a repository of this RPKI entity.
+ """
+
+ try:
+ self.zoo.delete_repository(args.repository_handle)
+ self.zoo.synchronize_ca()
+ except rpki.irdb.models.ResourceHolderCA.DoesNotExist:
+ print "No such resource holder \"%s\"" % self.zoo.handle
+ except rpki.irdb.models.Repository.DoesNotExist:
+ print "No such repository \"%s\"" % args.repository_handle
+
+ def complete_delete_repository(self, *args):
+ return self.irdb_handle_complete(self.zoo.resource_ca.repositories, *args)
+
+
+ @parsecmd(argsubparsers)
+ def do_delete_identity(self, args):
+ """
+ Delete the current RPKI identity (rpkid <tenant/> object).
+ """
+
+ try:
+ self.zoo.delete_tenant()
+ self.zoo.synchronize_deleted_ca()
+ except rpki.irdb.models.ResourceHolderCA.DoesNotExist:
+ print "No such resource holder \"%s\"" % self.zoo.handle
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("--valid_until", help = "override default new validity interval"),
+ cmdarg("child_handle", help = "handle of child to renew"))
+ def do_renew_child(self, args):
+ """
+ Update validity period for one child entity.
+ """
+
+ self.zoo.renew_children(args.child_handle, args.valid_until)
+ self.zoo.synchronize_ca()
+ if self.autosync:
+ self.zoo.run_rpkid_now()
+
+ def complete_renew_child(self, *args):
+ return self.irdb_handle_complete(self.zoo.resource_ca.children, *args)
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("--valid_until", help = "override default new validity interval"))
+ def do_renew_all_children(self, args):
+ """
+ Update validity period for all child entities.
+ """
+
+ self.zoo.renew_children(None, args.valid_until)
+ self.zoo.synchronize_ca()
+ if self.autosync:
+ self.zoo.run_rpkid_now()
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("prefixes_csv", help = "CSV file listing prefixes"))
+ def do_load_prefixes(self, args):
+ """
+ Load prefixes into IRDB from CSV file.
+ """
+
+ with open_swapped_uids(args.prefixes_csv) as f:
+ self.zoo.load_prefixes(f, True)
+ if self.autosync:
+ self.zoo.run_rpkid_now()
+
+
+ @parsecmd(argsubparsers)
+ def do_show_child_resources(self, args):
+ """
+ Show resources assigned to children.
+ """
+
+ for child in self.zoo.resource_ca.children.all():
+ resources = child.resource_bag
+ print "Child:", child.handle
+ if resources.asn:
+ print " ASN:", resources.asn
+ if resources.v4:
+ print " IPv4:", resources.v4
+ if resources.v6:
+ print " IPv6:", resources.v6
+
+
+ @parsecmd(argsubparsers)
+ def do_show_roa_requests(self, args):
+ """
+ Show ROA requests.
+ """
+
+ for roa_request in self.zoo.resource_ca.roa_requests.all():
+ prefixes = roa_request.roa_prefix_bag
+ print "ASN: ", roa_request.asn
+ if prefixes.v4:
+ print " IPv4:", prefixes.v4
+ if prefixes.v6:
+ print " IPv6:", prefixes.v6
+
+
+ @parsecmd(argsubparsers)
+ def do_show_ghostbuster_requests(self, args):
+ """
+ Show Ghostbuster requests.
+ """
+
+ for ghostbuster_request in self.zoo.resource_ca.ghostbuster_requests.all():
+ print "Parent:", ghostbuster_request.parent or "*"
+ print ghostbuster_request.vcard
+
+
+ @parsecmd(argsubparsers)
+ def do_show_received_resources(self, args):
+ """
+ Show resources received by this entity from its parent(s).
+ """
+
+ q_msg = self.zoo.compose_left_right_query()
+ SubElement(q_msg, rpki.left_right.tag_list_received_resources, tenant_handle = self.zoo.handle)
+
+ for r_pdu in self.zoo.call_rpkid(q_msg):
+
+ print "Parent: ", r_pdu.get("parent_handle")
+ print " notBefore:", r_pdu.get("notBefore")
+ print " notAfter: ", r_pdu.get("notAfter")
+ print " URI: ", r_pdu.get("uri")
+ print " SIA URI: ", r_pdu.get("sia_uri")
+ print " AIA URI: ", r_pdu.get("aia_uri")
+ print " ASN: ", r_pdu.get("asn")
+ print " IPv4: ", r_pdu.get("ipv4")
+ print " IPv6: ", r_pdu.get("ipv6")
+
+
+ @parsecmd(argsubparsers)
+ def do_show_published_objects(self, args):
+ """
+ Show published objects.
+ """
+
+ q_msg = self.zoo.compose_left_right_query()
+ SubElement(q_msg, rpki.left_right.tag_list_published_objects, tenant_handle = self.zoo.handle)
+
+ for r_pdu in self.zoo.call_rpkid(q_msg):
+ uri = r_pdu.get("uri")
+ track = rpki.x509.uri_dispatch(uri)(Base64 = r_pdu.text).tracking_data(uri)
+ child_handle = r_pdu.get("child_handle")
+
+ if child_handle is None:
+ print track
+ else:
+ print track, child_handle
+
+
+ @parsecmd(argsubparsers)
+ def do_show_bpki(self, args):
+ """
+ Show this entity's BPKI objects.
+ """
+
+ print "Self: ", self.zoo.resource_ca.handle
+ print " notBefore:", self.zoo.resource_ca.certificate.getNotBefore()
+ print " notAfter: ", self.zoo.resource_ca.certificate.getNotAfter()
+ print " Subject: ", self.zoo.resource_ca.certificate.getSubject()
+ print " SKI: ", self.zoo.resource_ca.certificate.hSKI()
+ for bsc in self.zoo.resource_ca.bscs.all():
+ print "BSC: ", bsc.handle
+ print " notBefore:", bsc.certificate.getNotBefore()
+ print " notAfter: ", bsc.certificate.getNotAfter()
+ print " Subject: ", bsc.certificate.getSubject()
+ print " SKI: ", bsc.certificate.hSKI()
+ for parent in self.zoo.resource_ca.parents.all():
+ print "Parent: ", parent.handle
+ print " notBefore:", parent.certificate.getNotBefore()
+ print " notAfter: ", parent.certificate.getNotAfter()
+ print " Subject: ", parent.certificate.getSubject()
+ print " SKI: ", parent.certificate.hSKI()
+ print " URL: ", parent.service_uri
+ for child in self.zoo.resource_ca.children.all():
+ print "Child: ", child.handle
+ print " notBefore:", child.certificate.getNotBefore()
+ print " notAfter: ", child.certificate.getNotAfter()
+ print " Subject: ", child.certificate.getSubject()
+ print " SKI: ", child.certificate.hSKI()
+ for repository in self.zoo.resource_ca.repositories.all():
+ print "Repository: ", repository.handle
+ print " notBefore:", repository.certificate.getNotBefore()
+ print " notAfter: ", repository.certificate.getNotAfter()
+ print " Subject: ", repository.certificate.getSubject()
+ print " SKI: ", repository.certificate.hSKI()
+ print " URL: ", repository.service_uri
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("asns_csv", help = "CSV file listing ASNs"))
+ def do_load_asns(self, args):
+ """
+ Load ASNs into IRDB from CSV file.
+ """
+
+ with open_swapped_uids(args.asns_csv) as f:
+ self.zoo.load_asns(f, True)
+ if self.autosync:
+ self.zoo.run_rpkid_now()
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("roa_requests_csv", help = "CSV file listing ROA requests"))
+ def do_load_roa_requests(self, args):
+ """
+ Load ROA requests into IRDB from CSV file.
+ """
+
+ with open_swapped_uids(args.roa_requests_csv) as f:
+ self.zoo.load_roa_requests(f)
+ if self.autosync:
+ self.zoo.run_rpkid_now()
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("ghostbuster_requests", help = "file listing Ghostbuster requests as a sequence of VCards"))
+ def do_load_ghostbuster_requests(self, args):
+ """
+ Load Ghostbuster requests into IRDB from file.
+ """
+
+ with open_swapped_uids(args.ghostbuster_requests) as f:
+ self.zoo.load_ghostbuster_requests(f)
+ if self.autosync:
+ self.zoo.run_rpkid_now()
+
+
+ @parsecmd(argsubparsers,
+ cmdarg("--valid_until", help = "override default validity interval"),
+ cmdarg("router_certificate_request_xml", help = "file containing XML router certificate request"))
+ def do_add_router_certificate_request(self, args):
+ """
+ Load router certificate request(s) into IRDB from XML file.
+ """
+
+ with open_swapped_uids(args.router_certificate_request_xml) as f:
+ self.zoo.add_router_certificate_request(f, args.valid_until)
+ if self.autosync:
+ self.zoo.run_rpkid_now()
+
+ @parsecmd(argsubparsers,
+ cmdarg("gski", help = "g(SKI) of router certificate request to delete"))
+ def do_delete_router_certificate_request(self, args):
+ """
+ Delete a router certificate request from the IRDB.
+ """
+
+ try:
+ self.zoo.delete_router_certificate_request(args.gski)
+ if self.autosync:
+ self.zoo.run_rpkid_now()
+ except rpki.irdb.models.ResourceHolderCA.DoesNotExist:
+ print "No such resource holder \"%s\"" % self.zoo.handle
+ except rpki.irdb.models.EECertificateRequest.DoesNotExist:
+ print "No certificate request matching g(SKI) \"%s\"" % args.gski
+
+ def complete_delete_router_certificate_request(self, text, line, begidx, endidx):
+ return [obj.gski for obj in self.zoo.resource_ca.ee_certificate_requests.all()
+ if obj.gski and obj.gski.startswith(text)]
+
+
+ @parsecmd(argsubparsers)
+ def do_show_router_certificate_requests(self, args):
+ """
+ Show this entity's router certificate requests.
+ """
+
+ for req in self.zoo.resource_ca.ee_certificate_requests.all():
+ print "%s %s %s %s" % (req.gski, req.valid_until, req.cn, req.sn)
+
+
+ # What about updates? Validity interval, change router-id, change
+ # ASNs. Not sure what this looks like yet, blunder ahead with the
+ # core code while mulling over the UI.
+
+
+ @parsecmd(argsubparsers)
+ def do_synchronize(self, args):
+ """
+ Whack daemons to match IRDB.
- @parsecmd(argsubparsers)
- def do_show_bpki(self, args):
- """
- Show this entity's BPKI objects.
- """
+ This command may be replaced by implicit synchronization embedded
+ in of other commands, haven't decided yet.
+ """
- print "Self: ", self.zoo.resource_ca.handle
- print " notBefore:", self.zoo.resource_ca.certificate.getNotBefore()
- print " notAfter: ", self.zoo.resource_ca.certificate.getNotAfter()
- print " Subject: ", self.zoo.resource_ca.certificate.getSubject()
- print " SKI: ", self.zoo.resource_ca.certificate.hSKI()
- for bsc in self.zoo.resource_ca.bscs.all():
- print "BSC: ", bsc.handle
- print " notBefore:", bsc.certificate.getNotBefore()
- print " notAfter: ", bsc.certificate.getNotAfter()
- print " Subject: ", bsc.certificate.getSubject()
- print " SKI: ", bsc.certificate.hSKI()
- for parent in self.zoo.resource_ca.parents.all():
- print "Parent: ", parent.handle
- print " notBefore:", parent.certificate.getNotBefore()
- print " notAfter: ", parent.certificate.getNotAfter()
- print " Subject: ", parent.certificate.getSubject()
- print " SKI: ", parent.certificate.hSKI()
- print " URL: ", parent.service_uri
- for child in self.zoo.resource_ca.children.all():
- print "Child: ", child.handle
- print " notBefore:", child.certificate.getNotBefore()
- print " notAfter: ", child.certificate.getNotAfter()
- print " Subject: ", child.certificate.getSubject()
- print " SKI: ", child.certificate.hSKI()
- for repository in self.zoo.resource_ca.repositories.all():
- print "Repository: ", repository.handle
- print " notBefore:", repository.certificate.getNotBefore()
- print " notAfter: ", repository.certificate.getNotAfter()
- print " Subject: ", repository.certificate.getSubject()
- print " SKI: ", repository.certificate.hSKI()
- print " URL: ", repository.service_uri
-
-
- @parsecmd(argsubparsers,
- cmdarg("asns_csv", help = "CSV file listing ASNs"))
- def do_load_asns(self, args):
- """
- Load ASNs into IRDB from CSV file.
- """
+ self.zoo.synchronize()
- self.zoo.load_asns(args.asns_csv, True)
- if self.autosync:
- self.zoo.run_rpkid_now()
+ @parsecmd(argsubparsers)
+ def do_force_publication(self, args):
+ """
+ Whack rpkid to force (re)publication of everything.
- @parsecmd(argsubparsers,
- cmdarg("roa_requests_csv", help = "CSV file listing ROA requests"))
- def do_load_roa_requests(self, args):
- """
- Load ROA requests into IRDB from CSV file.
- """
+ This is not usually necessary, as rpkid automatically publishes
+ changes it makes, but this command can be useful occasionally when
+ a fault or configuration error has left rpkid holding data which
+ it has not been able to publish.
+ """
- self.zoo.load_roa_requests(args.roa_requests_csv)
- if self.autosync:
- self.zoo.run_rpkid_now()
+ self.zoo.publish_world_now()
- @parsecmd(argsubparsers,
- cmdarg("ghostbuster_requests", help = "file listing Ghostbuster requests as a sequence of VCards"))
- def do_load_ghostbuster_requests(self, args):
- """
- Load Ghostbuster requests into IRDB from file.
- """
+ @parsecmd(argsubparsers)
+ def do_force_reissue(self, args):
+ """
+ Whack rpkid to force reissuance of everything.
- self.zoo.load_ghostbuster_requests(args.ghostbuster_requests)
- if self.autosync:
- self.zoo.run_rpkid_now()
+ This is not usually necessary, as rpkid reissues automatically
+ objects automatically as needed, but this command can be useful
+ occasionally when a fault or configuration error has prevented
+ rpkid from reissuing when it should have.
+ """
+ self.zoo.reissue()
- @parsecmd(argsubparsers,
- cmdarg("--valid_until", help = "override default validity interval"),
- cmdarg("router_certificate_request_xml", help = "file containing XML router certificate request"))
- def do_add_router_certificate_request(self, args):
- """
- Load router certificate request(s) into IRDB from XML file.
- """
- self.zoo.add_router_certificate_request(args.router_certificate_request_xml, args.valid_until)
- if self.autosync:
- self.zoo.run_rpkid_now()
+ @parsecmd(argsubparsers)
+ def do_force_run_now(self, args):
+ """
+ Force rpkid to run periodic tasks for this Tenant immediately.
- @parsecmd(argsubparsers,
- cmdarg("gski", help = "g(SKI) of router certificate request to delete"))
- def do_delete_router_certificate_request(self, args):
- """
- Delete a router certificate request from the IRDB.
- """
+ This is not usually necessary, as rpkid runs all of these
+ tasks on a regular schedule, but this command can be useful
+ occasionally when configuration change is taking a long time
+ to percolate through a series of parent/child exchanges.
+ """
- try:
- self.zoo.delete_router_certificate_request(args.gski)
- if self.autosync:
self.zoo.run_rpkid_now()
- except rpki.irdb.ResourceHolderCA.DoesNotExist:
- print "No such resource holder \"%s\"" % self.zoo.handle
- except rpki.irdb.EECertificateRequest.DoesNotExist:
- print "No certificate request matching g(SKI) \"%s\"" % args.gski
- def complete_delete_router_certificate_request(self, text, line, begidx, endidx):
- return [obj.gski for obj in self.zoo.resource_ca.ee_certificate_requests.all()
- if obj.gski and obj.gski.startswith(text)]
+ @parsecmd(argsubparsers)
+ def do_up_down_rekey(self, args):
+ """
+ Initiate a "rekey" operation.
- @parsecmd(argsubparsers)
- def do_show_router_certificate_requests(self, args):
- """
- Show this entity's router certificate requests.
- """
-
- for req in self.zoo.resource_ca.ee_certificate_requests.all():
- print "%s %s %s %s" % (req.gski, req.valid_until, req.cn, req.sn)
-
-
- # What about updates? Validity interval, change router-id, change
- # ASNs. Not sure what this looks like yet, blunder ahead with the
- # core code while mulling over the UI.
-
-
- @parsecmd(argsubparsers)
- def do_synchronize(self, args):
- """
- Whack daemons to match IRDB.
-
- This command may be replaced by implicit synchronization embedded
- in of other commands, haven't decided yet.
- """
-
- self.zoo.synchronize()
-
-
- @parsecmd(argsubparsers)
- def do_force_publication(self, args):
- """
- Whack rpkid to force (re)publication of everything.
-
- This is not usually necessary, as rpkid automatically publishes
- changes it makes, but this command can be useful occasionally when
- a fault or configuration error has left rpkid holding data which
- it has not been able to publish.
- """
-
- self.zoo.publish_world_now()
-
-
- @parsecmd(argsubparsers)
- def do_force_reissue(self, args):
- """
- Whack rpkid to force reissuance of everything.
+ This tells rpkid to generate new keys for each certificate issued
+ to it via the up-down protocol.
- This is not usually necessary, as rpkid reissues automatically
- objects automatically as needed, but this command can be useful
- occasionally when a fault or configuration error has prevented
- rpkid from reissuing when it should have.
- """
+ Rekeying is the first stage of a key rollover operation. You will
+ need to follow it up later with a "revoke" operation to clean up
+ the old keys
+ """
- self.zoo.reissue()
+ self.zoo.rekey()
- @parsecmd(argsubparsers)
- def do_up_down_rekey(self, args):
- """
- Initiate a "rekey" operation.
+ @parsecmd(argsubparsers)
+ def do_up_down_revoke(self, args):
+ """
+ Initiate a "revoke" operation.
- This tells rpkid to generate new keys for each certificate issued
- to it via the up-down protocol.
-
- Rekeying is the first stage of a key rollover operation. You will
- need to follow it up later with a "revoke" operation to clean up
- the old keys
- """
-
- self.zoo.rekey()
-
-
- @parsecmd(argsubparsers)
- def do_up_down_revoke(self, args):
- """
- Initiate a "revoke" operation.
+ This tells rpkid to clean up old keys formerly used by
+ certificates issued to it via the up-down protocol.
- This tells rpkid to clean up old keys formerly used by
- certificates issued to it via the up-down protocol.
+ This is the cleanup stage of a key rollover operation.
+ """
- This is the cleanup stage of a key rollover operation.
- """
+ self.zoo.revoke()
- self.zoo.revoke()
+ @parsecmd(argsubparsers)
+ def do_revoke_forgotten(self, args):
+ """
+ Initiate a "revoke_forgotten" operation.
- @parsecmd(argsubparsers)
- def do_revoke_forgotten(self, args):
- """
- Initiate a "revoke_forgotten" operation.
-
- This tells rpkid to ask its parent to revoke certificates for
- which rpkid does not know the private keys.
-
- This should never happen during ordinary operation, but can happen
- if rpkid is misconfigured or its database has been damaged, so we
- need a way to resynchronize rpkid with its parent in such cases.
- We could do this automatically, but as we don't know the precise
- cause of the failure we don't know if it's recoverable locally
- (eg, from an SQL backup), so we require a manual trigger before
- discarding possibly-useful certificates.
- """
+ This tells rpkid to ask its parent to revoke certificates for
+ which rpkid does not know the private keys.
- self.zoo.revoke_forgotten()
+ This should never happen during ordinary operation, but can happen
+ if rpkid is misconfigured or its database has been damaged, so we
+ need a way to resynchronize rpkid with its parent in such cases.
+ We could do this automatically, but as we don't know the precise
+ cause of the failure we don't know if it's recoverable locally
+ (eg, from an SQL backup), so we require a manual trigger before
+ discarding possibly-useful certificates.
+ """
+ self.zoo.revoke_forgotten()
- @parsecmd(argsubparsers)
- def do_clear_all_sql_cms_replay_protection(self, args):
- """
- Tell rpkid and pubd to clear replay protection.
- This clears the replay protection timestamps stored in SQL for all
- entities known to rpkid and pubd. This is a fairly blunt
- instrument, but as we don't expect this to be necessary except in
- the case of gross misconfiguration, it should suffice
- """
+ @parsecmd(argsubparsers)
+ def do_clear_all_sql_cms_replay_protection(self, args):
+ """
+ Tell rpkid and pubd to clear replay protection.
- self.zoo.clear_all_sql_cms_replay_protection()
+ This clears the replay protection timestamps stored in SQL for all
+ entities known to rpkid and pubd. This is a fairly blunt
+ instrument, but as we don't expect this to be necessary except in
+ the case of gross misconfiguration, it should suffice
+ """
+ self.zoo.clear_all_sql_cms_replay_protection()
- @parsecmd(argsubparsers)
- def do_version(self, args):
- """
- Show current software version number.
- """
- print rpki.version.VERSION
+ @parsecmd(argsubparsers)
+ def do_version(self, args):
+ """
+ Show current software version number.
+ """
+ print rpki.version.VERSION
- @parsecmd(argsubparsers)
- def do_list_self_handles(self, args):
- """
- List all <self/> handles in this rpkid instance.
- """
- for ca in rpki.irdb.ResourceHolderCA.objects.all():
- print ca.handle
+ @parsecmd(argsubparsers)
+ def do_list_tenant_handles(self, args):
+ """
+ List all <tenant/> handles in this rpkid instance.
+ """
+ for ca in rpki.irdb.models.ResourceHolderCA.objects.all():
+ print ca.handle
diff --git a/rpki/rpkid.py b/rpki/rpkid.py
index 628209af..4b2333d2 100644
--- a/rpki/rpkid.py
+++ b/rpki/rpkid.py
@@ -22,2470 +22,768 @@ RPKI CA engine.
"""
import os
-import re
import time
import random
-import base64
import logging
+import weakref
import argparse
+import urlparse
+
+import tornado.gen
+import tornado.web
+import tornado.locks
+import tornado.ioloop
+import tornado.queues
+import tornado.httputil
+import tornado.httpclient
+import tornado.httpserver
+
+from lxml.etree import Element, SubElement, tostring as ElementToString
+
import rpki.resource_set
import rpki.up_down
import rpki.left_right
import rpki.x509
-import rpki.sql
-import rpki.http
import rpki.config
import rpki.exceptions
import rpki.relaxng
import rpki.log
-import rpki.async
import rpki.daemonize
-import rpki.rpkid_tasks
-
-logger = logging.getLogger(__name__)
-
-class main(object):
- """
- Main program for rpkid.
- """
-
- def __init__(self):
-
- os.environ["TZ"] = "UTC"
- time.tzset()
-
- self.irdbd_cms_timestamp = None
- self.irbe_cms_timestamp = None
- self.task_current = None
- self.task_queue = []
-
- parser = argparse.ArgumentParser(description = __doc__)
- parser.add_argument("-c", "--config",
- help = "override default location of configuration file")
- parser.add_argument("-f", "--foreground", action = "store_true",
- help = "do not daemonize")
- parser.add_argument("--pidfile",
- help = "override default location of pid file")
- parser.add_argument("--profile",
- help = "enable profiling, saving data to PROFILE")
- rpki.log.argparse_setup(parser)
- args = parser.parse_args()
-
- self.profile = args.profile
-
- rpki.log.init("rpkid", args)
-
- self.cfg = rpki.config.parser(args.config, "rpkid")
- self.cfg.set_global_flags()
-
- if not args.foreground:
- rpki.daemonize.daemon(pidfile = args.pidfile)
-
- if self.profile:
- import cProfile
- prof = cProfile.Profile()
- try:
- prof.runcall(self.main)
- finally:
- prof.dump_stats(self.profile)
- logger.info("Dumped profile data to %s", self.profile)
- else:
- self.main()
-
- def main(self):
-
- startup_msg = self.cfg.get("startup-message", "")
- if startup_msg:
- logger.info(startup_msg)
-
- if self.profile:
- logger.info("Running in profile mode with output to %s", self.profile)
-
- self.sql = rpki.sql.session(self.cfg)
-
- self.bpki_ta = rpki.x509.X509(Auto_update = self.cfg.get("bpki-ta"))
- self.irdb_cert = rpki.x509.X509(Auto_update = self.cfg.get("irdb-cert"))
- self.irbe_cert = rpki.x509.X509(Auto_update = self.cfg.get("irbe-cert"))
- self.rpkid_cert = rpki.x509.X509(Auto_update = self.cfg.get("rpkid-cert"))
- self.rpkid_key = rpki.x509.RSA( Auto_update = self.cfg.get("rpkid-key"))
-
- self.irdb_url = self.cfg.get("irdb-url")
-
- self.http_server_host = self.cfg.get("server-host", "")
- self.http_server_port = self.cfg.getint("server-port")
-
- self.publication_kludge_base = self.cfg.get("publication-kludge-base", "publication/")
-
- # Icky hack to let Iain do some testing quickly, should go away
- # once we sort out whether we can make this change permanent.
- #
- # OK, the stuff to add router certificate support makes enough
- # other changes that we're going to need a migration program in
- # any case, so might as well throw the switch here too, or at
- # least find out if it (still) works as expected.
-
- self.merge_publication_directories = self.cfg.getboolean("merge_publication_directories",
- True)
-
- self.use_internal_cron = self.cfg.getboolean("use-internal-cron", True)
-
- self.initial_delay = random.randint(self.cfg.getint("initial-delay-min", 10),
- self.cfg.getint("initial-delay-max", 120))
-
- # Should be much longer in production
- self.cron_period = rpki.sundial.timedelta(seconds = self.cfg.getint("cron-period", 120))
- self.cron_keepalive = rpki.sundial.timedelta(seconds = self.cfg.getint("cron-keepalive", 0))
- if not self.cron_keepalive:
- self.cron_keepalive = self.cron_period * 4
- self.cron_timeout = None
-
- self.start_cron()
-
- rpki.http.server(
- host = self.http_server_host,
- port = self.http_server_port,
- handlers = (("/left-right", self.left_right_handler),
- ("/up-down/", self.up_down_handler, rpki.up_down.allowed_content_types),
- ("/cronjob", self.cronjob_handler)))
-
- def start_cron(self):
- """
- Start clock for rpkid's internal cron process.
- """
-
- if self.use_internal_cron:
- self.cron_timer = rpki.async.timer(handler = self.cron)
- when = rpki.sundial.now() + rpki.sundial.timedelta(seconds = self.initial_delay)
- logger.debug("Scheduling initial cron pass at %s", when)
- self.cron_timer.set(when)
- else:
- logger.debug("Not using internal clock, start_cron() call ignored")
-
- def irdb_query(self, callback, errback, *q_pdus, **kwargs):
- """
- Perform an IRDB callback query.
- """
-
- try:
- q_types = tuple(type(q_pdu) for q_pdu in q_pdus)
-
- expected_pdu_count = kwargs.pop("expected_pdu_count", None)
- assert len(kwargs) == 0
-
- q_msg = rpki.left_right.msg.query()
- q_msg.extend(q_pdus)
- q_der = rpki.left_right.cms_msg().wrap(q_msg, self.rpkid_key, self.rpkid_cert)
-
- def unwrap(r_der):
- try:
- r_cms = rpki.left_right.cms_msg(DER = r_der)
- r_msg = r_cms.unwrap((self.bpki_ta, self.irdb_cert))
- self.irdbd_cms_timestamp = r_cms.check_replay(self.irdbd_cms_timestamp, self.irdb_url)
- if not r_msg.is_reply() or not all(type(r_pdu) in q_types for r_pdu in r_msg):
- raise rpki.exceptions.BadIRDBReply(
- "Unexpected response to IRDB query: %s" % r_cms.pretty_print_content())
- if expected_pdu_count is not None and len(r_msg) != expected_pdu_count:
- assert isinstance(expected_pdu_count, (int, long))
- raise rpki.exceptions.BadIRDBReply(
- "Expected exactly %d PDU%s from IRDB: %s" % (
- expected_pdu_count, "" if expected_pdu_count == 1 else "s",
- r_cms.pretty_print_content()))
- callback(r_msg)
- except Exception, e:
- errback(e)
-
- rpki.http.client(
- url = self.irdb_url,
- msg = q_der,
- callback = unwrap,
- errback = errback)
-
- except Exception, e:
- errback(e)
-
-
- def irdb_query_child_resources(self, self_handle, child_handle, callback, errback):
- """
- Ask IRDB about a child's resources.
- """
-
- q_pdu = rpki.left_right.list_resources_elt()
- q_pdu.self_handle = self_handle
- q_pdu.child_handle = child_handle
-
- def done(r_msg):
- callback(rpki.resource_set.resource_bag(
- asn = r_msg[0].asn,
- v4 = r_msg[0].ipv4,
- v6 = r_msg[0].ipv6,
- valid_until = r_msg[0].valid_until))
-
- self.irdb_query(done, errback, q_pdu, expected_pdu_count = 1)
-
- def irdb_query_roa_requests(self, self_handle, callback, errback):
- """
- Ask IRDB about self's ROA requests.
- """
-
- q_pdu = rpki.left_right.list_roa_requests_elt()
- q_pdu.self_handle = self_handle
-
- self.irdb_query(callback, errback, q_pdu)
-
- def irdb_query_ghostbuster_requests(self, self_handle, parent_handles, callback, errback):
- """
- Ask IRDB about self's ghostbuster record requests.
- """
-
- q_pdus = []
-
- for parent_handle in parent_handles:
- q_pdu = rpki.left_right.list_ghostbuster_requests_elt()
- q_pdu.self_handle = self_handle
- q_pdu.parent_handle = parent_handle
- q_pdus.append(q_pdu)
-
- self.irdb_query(callback, errback, *q_pdus)
-
- def irdb_query_ee_certificate_requests(self, self_handle, callback, errback):
- """
- Ask IRDB about self's EE certificate requests.
- """
-
- q_pdu = rpki.left_right.list_ee_certificate_requests_elt()
- q_pdu.self_handle = self_handle
-
- self.irdb_query(callback, errback, q_pdu)
-
- def left_right_handler(self, query, path, cb):
- """
- Process one left-right PDU.
- """
-
- def done(r_msg):
- reply = rpki.left_right.cms_msg().wrap(r_msg, self.rpkid_key, self.rpkid_cert)
- self.sql.sweep()
- cb(200, body = reply)
-
- try:
- q_cms = rpki.left_right.cms_msg(DER = query)
- q_msg = q_cms.unwrap((self.bpki_ta, self.irbe_cert))
- self.irbe_cms_timestamp = q_cms.check_replay(self.irbe_cms_timestamp, path)
- if not q_msg.is_query():
- raise rpki.exceptions.BadQuery("Message type is not query")
- q_msg.serve_top_level(self, done)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- logger.exception("Unhandled exception serving left-right request")
- cb(500, reason = "Unhandled exception %s: %s" % (e.__class__.__name__, e))
-
- up_down_url_regexp = re.compile("/up-down/([-A-Z0-9_]+)/([-A-Z0-9_]+)$", re.I)
-
- def up_down_handler(self, query, path, cb):
- """
- Process one up-down PDU.
- """
-
- def done(reply):
- self.sql.sweep()
- cb(200, body = reply)
-
- try:
- match = self.up_down_url_regexp.search(path)
- if match is None:
- raise rpki.exceptions.BadContactURL("Bad URL path received in up_down_handler(): %s" % path)
- self_handle, child_handle = match.groups()
- child = rpki.left_right.child_elt.sql_fetch_where1(self,
- "self.self_handle = %s AND child.child_handle = %s AND child.self_id = self.self_id",
- (self_handle, child_handle),
- "self")
- if child is None:
- raise rpki.exceptions.ChildNotFound("Could not find child %s of self %s in up_down_handler()" % (child_handle, self_handle))
- child.serve_up_down(query, done)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except (rpki.exceptions.ChildNotFound, rpki.exceptions.BadContactURL), e:
- logger.warning(str(e))
- cb(400, reason = str(e))
- except Exception, e:
- logger.exception("Unhandled exception processing up-down request")
- cb(400, reason = "Could not process PDU: %s" % e)
-
- def checkpoint(self, force = False):
- """
- Record that we were still alive when we got here, by resetting
- keepalive timer.
- """
- if force or self.cron_timeout is not None:
- self.cron_timeout = rpki.sundial.now() + self.cron_keepalive
-
- def task_add(self, task):
- """
- Add a task to the scheduler task queue, unless it's already queued.
- """
- if task not in self.task_queue:
- logger.debug("Adding %r to task queue", task)
- self.task_queue.append(task)
- return True
- else:
- logger.debug("Task %r was already in the task queue", task)
- return False
-
- def task_next(self):
- """
- Pull next task from the task queue and put it the deferred event
- queue (we don't want to run it directly, as that could eventually
- blow out our call stack).
- """
- try:
- self.task_current = self.task_queue.pop(0)
- except IndexError:
- self.task_current = None
- else:
- rpki.async.event_defer(self.task_current)
-
- def task_run(self):
- """
- Run first task on the task queue, unless one is running already.
- """
- if self.task_current is None:
- self.task_next()
-
- def cron(self, cb = None):
- """
- Periodic tasks.
- """
-
- now = rpki.sundial.now()
-
- logger.debug("Starting cron run")
-
- def done():
- self.sql.sweep()
- self.cron_timeout = None
- logger.info("Finished cron run started at %s", now)
- if cb is not None:
- cb()
-
- completion = rpki.rpkid_tasks.CompletionHandler(done)
- try:
- selves = rpki.left_right.self_elt.sql_fetch_all(self)
- except Exception:
- logger.exception("Error pulling self_elts from SQL, maybe SQL server is down?")
- else:
- for s in selves:
- s.schedule_cron_tasks(completion)
- nothing_queued = completion.count == 0
-
- assert self.use_internal_cron or self.cron_timeout is None
-
- if self.cron_timeout is not None and self.cron_timeout < now:
- logger.warning("cron keepalive threshold %s has expired, breaking lock", self.cron_timeout)
- self.cron_timeout = None
-
- if self.use_internal_cron:
- when = now + self.cron_period
- logger.debug("Scheduling next cron run at %s", when)
- self.cron_timer.set(when)
-
- if self.cron_timeout is None:
- self.checkpoint(self.use_internal_cron)
- self.task_run()
-
- elif self.use_internal_cron:
- logger.warning("cron already running, keepalive will expire at %s", self.cron_timeout)
-
- if nothing_queued:
- done()
-
- def cronjob_handler(self, query, path, cb):
- """
- External trigger for periodic tasks. This is somewhat obsolete
- now that we have internal timers, but the test framework still
- uses it.
- """
-
- def done():
- cb(200, body = "OK")
-
- if self.use_internal_cron:
- cb(500, reason = "Running cron internally")
- else:
- logger.debug("Starting externally triggered cron")
- self.cron(done)
-
-class ca_obj(rpki.sql.sql_persistent):
- """
- Internal CA object.
- """
-
- sql_template = rpki.sql.template(
- "ca",
- "ca_id",
- "last_crl_sn",
- ("next_crl_update", rpki.sundial.datetime),
- "last_issued_sn",
- "last_manifest_sn",
- ("next_manifest_update", rpki.sundial.datetime),
- "sia_uri",
- "parent_id",
- "parent_resource_class")
-
- last_crl_sn = 0
- last_issued_sn = 0
- last_manifest_sn = 0
-
- def __repr__(self):
- return rpki.log.log_repr(self, repr(self.parent), self.parent_resource_class)
-
- @property
- @rpki.sql.cache_reference
- def parent(self):
- """
- Fetch parent object to which this CA object links.
- """
- return rpki.left_right.parent_elt.sql_fetch(self.gctx, self.parent_id)
-
- @property
- def ca_details(self):
- """
- Fetch all ca_detail objects that link to this CA object.
- """
- return ca_detail_obj.sql_fetch_where(self.gctx, "ca_id = %s", (self.ca_id,))
-
- @property
- def pending_ca_details(self):
- """
- Fetch the pending ca_details for this CA, if any.
- """
- return ca_detail_obj.sql_fetch_where(self.gctx, "ca_id = %s AND state = 'pending'", (self.ca_id,))
-
- @property
- def active_ca_detail(self):
- """
- Fetch the active ca_detail for this CA, if any.
- """
- return ca_detail_obj.sql_fetch_where1(self.gctx, "ca_id = %s AND state = 'active'", (self.ca_id,))
-
- @property
- def deprecated_ca_details(self):
- """
- Fetch deprecated ca_details for this CA, if any.
- """
- return ca_detail_obj.sql_fetch_where(self.gctx, "ca_id = %s AND state = 'deprecated'", (self.ca_id,))
-
- @property
- def active_or_deprecated_ca_details(self):
- """
- Fetch active and deprecated ca_details for this CA, if any.
- """
- return ca_detail_obj.sql_fetch_where(self.gctx, "ca_id = %s AND (state = 'active' OR state = 'deprecated')", (self.ca_id,))
-
- @property
- def revoked_ca_details(self):
- """
- Fetch revoked ca_details for this CA, if any.
- """
- return ca_detail_obj.sql_fetch_where(self.gctx, "ca_id = %s AND state = 'revoked'", (self.ca_id,))
-
- @property
- def issue_response_candidate_ca_details(self):
- """
- Fetch ca_details which are candidates for consideration when
- processing an up-down issue_response PDU.
- """
- #return ca_detail_obj.sql_fetch_where(self.gctx, "ca_id = %s AND latest_ca_cert IS NOT NULL AND state != 'revoked'", (self.ca_id,))
- return ca_detail_obj.sql_fetch_where(self.gctx, "ca_id = %s AND state != 'revoked'", (self.ca_id,))
-
- def construct_sia_uri(self, parent, rc):
- """
- Construct the sia_uri value for this CA given configured
- information and the parent's up-down protocol list_response PDU.
- """
-
- sia_uri = rc.suggested_sia_head and rc.suggested_sia_head.rsync()
- if not sia_uri or not sia_uri.startswith(parent.sia_base):
- sia_uri = parent.sia_base
- if not sia_uri.endswith("/"):
- raise rpki.exceptions.BadURISyntax("SIA URI must end with a slash: %s" % sia_uri)
- # With luck this can go away sometime soon.
- if self.gctx.merge_publication_directories:
- return sia_uri
- else:
- return sia_uri + str(self.ca_id) + "/"
-
- def check_for_updates(self, parent, rc, cb, eb):
- """
- Parent has signaled continued existance of a resource class we
- already knew about, so we need to check for an updated
- certificate, changes in resource coverage, revocation and reissue
- with the same key, etc.
- """
-
- sia_uri = self.construct_sia_uri(parent, rc)
- sia_uri_changed = self.sia_uri != sia_uri
- if sia_uri_changed:
- logger.debug("SIA changed: was %s now %s", self.sia_uri, sia_uri)
- self.sia_uri = sia_uri
- self.sql_mark_dirty()
-
- rc_resources = rc.to_resource_bag()
- cert_map = dict((c.cert.get_SKI(), c) for c in rc.certs)
-
- def loop(iterator, ca_detail):
-
- self.gctx.checkpoint()
-
- rc_cert = cert_map.pop(ca_detail.public_key.get_SKI(), None)
-
- if rc_cert is None:
-
- logger.warning("SKI %s in resource class %s is in database but missing from list_response to %s from %s, maybe parent certificate went away?",
- ca_detail.public_key.gSKI(), rc.class_name, parent.self.self_handle, parent.parent_handle)
- publisher = publication_queue()
- ca_detail.delete(ca = ca_detail.ca, publisher = publisher)
- return publisher.call_pubd(iterator, eb)
-
- else:
-
- if ca_detail.state == "active" and ca_detail.ca_cert_uri != rc_cert.cert_url.rsync():
- logger.debug("AIA changed: was %s now %s", ca_detail.ca_cert_uri, rc_cert.cert_url.rsync())
- ca_detail.ca_cert_uri = rc_cert.cert_url.rsync()
- ca_detail.sql_mark_dirty()
-
- if ca_detail.state in ("pending", "active"):
-
- if ca_detail.state == "pending":
- current_resources = rpki.resource_set.resource_bag()
- else:
- current_resources = ca_detail.latest_ca_cert.get_3779resources()
-
- if (ca_detail.state == "pending" or
- sia_uri_changed or
- ca_detail.latest_ca_cert != rc_cert.cert or
- ca_detail.latest_ca_cert.getNotAfter() != rc_resources.valid_until or
- current_resources.undersized(rc_resources) or
- current_resources.oversized(rc_resources)):
- return ca_detail.update(
- parent = parent,
- ca = self,
- rc = rc,
- sia_uri_changed = sia_uri_changed,
- old_resources = current_resources,
- callback = iterator,
- errback = eb)
-
- iterator()
-
- def done():
- if cert_map:
- logger.warning("Unknown certificate SKI%s %s in resource class %s in list_response to %s from %s, maybe you want to \"revoke_forgotten\"?",
- "" if len(cert_map) == 1 else "s",
- ", ".join(c.cert.gSKI() for c in cert_map.values()),
- rc.class_name, parent.self.self_handle, parent.parent_handle)
- self.gctx.sql.sweep()
- self.gctx.checkpoint()
- cb()
-
- ca_details = self.issue_response_candidate_ca_details
-
- if True:
- skis_parent = set(x.cert.gSKI()
- for x in cert_map.itervalues())
- skis_me = set(x.latest_ca_cert.gSKI()
- for x in ca_details
- if x.latest_ca_cert is not None)
- for ski in skis_parent & skis_me:
- logger.debug("Parent %s agrees that %s has SKI %s in resource class %s",
- parent.parent_handle, parent.self.self_handle, ski, rc.class_name)
- for ski in skis_parent - skis_me:
- logger.debug("Parent %s thinks %s has SKI %s in resource class %s but I don't think so",
- parent.parent_handle, parent.self.self_handle, ski, rc.class_name)
- for ski in skis_me - skis_parent:
- logger.debug("I think %s has SKI %s in resource class %s but parent %s doesn't think so",
- parent.self.self_handle, ski, rc.class_name, parent.parent_handle)
-
- if ca_details:
- rpki.async.iterator(ca_details, loop, done)
- else:
- logger.warning("Existing resource class %s to %s from %s with no certificates, rekeying",
- rc.class_name, parent.self.self_handle, parent.parent_handle)
- self.gctx.checkpoint()
- self.rekey(cb, eb)
-
- @classmethod
- def create(cls, parent, rc, cb, eb):
- """
- Parent has signaled existance of a new resource class, so we need
- to create and set up a corresponding CA object.
- """
-
- self = cls()
- self.gctx = parent.gctx
- self.parent_id = parent.parent_id
- self.parent_resource_class = rc.class_name
- self.sql_store()
- try:
- self.sia_uri = self.construct_sia_uri(parent, rc)
- except rpki.exceptions.BadURISyntax:
- self.sql_delete()
- raise
- ca_detail = ca_detail_obj.create(self)
-
- def done(issue_response):
- c = issue_response.payload.classes[0].certs[0]
- logger.debug("CA %r received certificate %s", self, c.cert_url)
- ca_detail.activate(
- ca = self,
- cert = c.cert,
- uri = c.cert_url,
- callback = cb,
- errback = eb)
-
- logger.debug("Sending issue request to %r from %r", parent, self.create)
- rpki.up_down.issue_pdu.query(parent, self, ca_detail, done, eb)
-
- def delete(self, parent, callback):
- """
- The list of current resource classes received from parent does not
- include the class corresponding to this CA, so we need to delete
- it (and its little dog too...).
-
- All certs published by this CA are now invalid, so need to
- withdraw them, the CRL, and the manifest from the repository,
- delete all child_cert and ca_detail records associated with this
- CA, then finally delete this CA itself.
- """
-
- def lose(e):
- logger.exception("Could not delete CA %r, skipping", self)
- callback()
-
- def done():
- logger.debug("Deleting %r", self)
- self.sql_delete()
- callback()
-
- publisher = publication_queue()
- for ca_detail in self.ca_details:
- ca_detail.delete(ca = self, publisher = publisher, allow_failure = True)
- publisher.call_pubd(done, lose)
-
- def next_serial_number(self):
- """
- Allocate a certificate serial number.
- """
- self.last_issued_sn += 1
- self.sql_mark_dirty()
- return self.last_issued_sn
-
- def next_manifest_number(self):
- """
- Allocate a manifest serial number.
- """
- self.last_manifest_sn += 1
- self.sql_mark_dirty()
- return self.last_manifest_sn
-
- def next_crl_number(self):
- """
- Allocate a CRL serial number.
- """
- self.last_crl_sn += 1
- self.sql_mark_dirty()
- return self.last_crl_sn
-
- def rekey(self, cb, eb):
- """
- Initiate a rekey operation for this ca. Generate a new keypair.
- Request cert from parent using new keypair. Mark result as our
- active ca_detail. Reissue all child certs issued by this ca using
- the new ca_detail.
- """
-
- parent = self.parent
- old_detail = self.active_ca_detail
- new_detail = ca_detail_obj.create(self)
-
- def done(issue_response):
- c = issue_response.payload.classes[0].certs[0]
- logger.debug("CA %r received certificate %s", self, c.cert_url)
- new_detail.activate(
- ca = self,
- cert = c.cert,
- uri = c.cert_url,
- predecessor = old_detail,
- callback = cb,
- errback = eb)
-
- logger.debug("Sending issue request to %r from %r", parent, self.rekey)
- rpki.up_down.issue_pdu.query(parent, self, new_detail, done, eb)
-
- def revoke(self, cb, eb, revoke_all = False):
- """
- Revoke deprecated ca_detail objects associated with this CA, or
- all ca_details associated with this CA if revoke_all is set.
- """
-
- def loop(iterator, ca_detail):
- ca_detail.revoke(cb = iterator, eb = eb)
-
- ca_details = self.ca_details if revoke_all else self.deprecated_ca_details
-
- rpki.async.iterator(ca_details, loop, cb)
-
- def reissue(self, cb, eb):
- """
- Reissue all current certificates issued by this CA.
- """
-
- ca_detail = self.active_ca_detail
- if ca_detail:
- ca_detail.reissue(cb, eb)
- else:
- cb()
-
-class ca_detail_obj(rpki.sql.sql_persistent):
- """
- Internal CA detail object.
- """
-
- sql_template = rpki.sql.template(
- "ca_detail",
- "ca_detail_id",
- ("private_key_id", rpki.x509.RSA),
- ("public_key", rpki.x509.PublicKey),
- ("latest_ca_cert", rpki.x509.X509),
- ("manifest_private_key_id", rpki.x509.RSA),
- ("manifest_public_key", rpki.x509.PublicKey),
- ("latest_manifest_cert", rpki.x509.X509),
- ("latest_manifest", rpki.x509.SignedManifest),
- ("latest_crl", rpki.x509.CRL),
- ("crl_published", rpki.sundial.datetime),
- ("manifest_published", rpki.sundial.datetime),
- "state",
- "ca_cert_uri",
- "ca_id")
-
- crl_published = None
- manifest_published = None
- latest_ca_cert = None
- latest_crl = None
- latest_manifest = None
- ca_cert_uri = None
-
- def __repr__(self):
- return rpki.log.log_repr(self, repr(self.ca), self.state, self.ca_cert_uri)
-
- def sql_decode(self, vals):
- """
- Extra assertions for SQL decode of a ca_detail_obj.
- """
- rpki.sql.sql_persistent.sql_decode(self, vals)
- assert self.public_key is None or self.private_key_id is None or self.public_key.get_DER() == self.private_key_id.get_public_DER()
- assert self.manifest_public_key is None or self.manifest_private_key_id is None or self.manifest_public_key.get_DER() == self.manifest_private_key_id.get_public_DER()
-
- @property
- @rpki.sql.cache_reference
- def ca(self):
- """
- Fetch CA object to which this ca_detail links.
- """
- return ca_obj.sql_fetch(self.gctx, self.ca_id)
-
- def fetch_child_certs(self, child = None, ski = None, unique = False, unpublished = None):
- """
- Fetch all child_cert objects that link to this ca_detail.
- """
- return rpki.rpkid.child_cert_obj.fetch(self.gctx, child, self, ski, unique, unpublished)
-
- @property
- def child_certs(self):
- """
- Fetch all child_cert objects that link to this ca_detail.
- """
- return self.fetch_child_certs()
-
- def unpublished_child_certs(self, when):
- """
- Fetch all unpublished child_cert objects linked to this ca_detail
- with attempted publication dates older than when.
- """
- return self.fetch_child_certs(unpublished = when)
-
- @property
- def revoked_certs(self):
- """
- Fetch all revoked_cert objects that link to this ca_detail.
- """
- return revoked_cert_obj.sql_fetch_where(self.gctx, "ca_detail_id = %s", (self.ca_detail_id,))
-
- @property
- def roas(self):
- """
- Fetch all ROA objects that link to this ca_detail.
- """
- return rpki.rpkid.roa_obj.sql_fetch_where(self.gctx, "ca_detail_id = %s", (self.ca_detail_id,))
-
- def unpublished_roas(self, when):
- """
- Fetch all unpublished ROA objects linked to this ca_detail with
- attempted publication dates older than when.
- """
- return rpki.rpkid.roa_obj.sql_fetch_where(self.gctx, "ca_detail_id = %s AND published IS NOT NULL and published < %s", (self.ca_detail_id, when))
-
- @property
- def ghostbusters(self):
- """
- Fetch all Ghostbuster objects that link to this ca_detail.
- """
- return rpki.rpkid.ghostbuster_obj.sql_fetch_where(self.gctx, "ca_detail_id = %s", (self.ca_detail_id,))
-
- @property
- def ee_certificates(self):
- """
- Fetch all EE certificate objects that link to this ca_detail.
- """
- return rpki.rpkid.ee_cert_obj.sql_fetch_where(self.gctx, "ca_detail_id = %s", (self.ca_detail_id,))
-
- def unpublished_ghostbusters(self, when):
- """
- Fetch all unpublished Ghostbusters objects linked to this
- ca_detail with attempted publication dates older than when.
- """
- return rpki.rpkid.ghostbuster_obj.sql_fetch_where(self.gctx, "ca_detail_id = %s AND published IS NOT NULL and published < %s", (self.ca_detail_id, when))
-
- @property
- def crl_uri(self):
- """
- Return publication URI for this ca_detail's CRL.
- """
- return self.ca.sia_uri + self.crl_uri_tail
-
- @property
- def crl_uri_tail(self):
- """
- Return tail (filename portion) of publication URI for this ca_detail's CRL.
- """
- return self.public_key.gSKI() + ".crl"
-
- @property
- def manifest_uri(self):
- """
- Return publication URI for this ca_detail's manifest.
- """
- return self.ca.sia_uri + self.public_key.gSKI() + ".mft"
-
- def has_expired(self):
- """
- Return whether this ca_detail's certificate has expired.
- """
- return self.latest_ca_cert.getNotAfter() <= rpki.sundial.now()
-
- def covers(self, target):
- """
- Test whether this ca-detail covers a given set of resources.
- """
-
- assert not target.asn.inherit and not target.v4.inherit and not target.v6.inherit
- me = self.latest_ca_cert.get_3779resources()
- return target.asn <= me.asn and target.v4 <= me.v4 and target.v6 <= me.v6
-
- def activate(self, ca, cert, uri, callback, errback, predecessor = None):
- """
- Activate this ca_detail.
- """
-
- publisher = publication_queue()
-
- self.latest_ca_cert = cert
- self.ca_cert_uri = uri.rsync()
- self.generate_manifest_cert()
- self.state = "active"
- self.generate_crl(publisher = publisher)
- self.generate_manifest(publisher = publisher)
- self.sql_store()
-
- if predecessor is not None:
- predecessor.state = "deprecated"
- predecessor.sql_store()
- for child_cert in predecessor.child_certs:
- child_cert.reissue(ca_detail = self, publisher = publisher)
- for roa in predecessor.roas:
- roa.regenerate(publisher = publisher)
- for ghostbuster in predecessor.ghostbusters:
- ghostbuster.regenerate(publisher = publisher)
- predecessor.generate_crl(publisher = publisher)
- predecessor.generate_manifest(publisher = publisher)
-
- publisher.call_pubd(callback, errback)
-
- def delete(self, ca, publisher, allow_failure = False):
- """
- Delete this ca_detail and all of the certs it issued.
-
- If allow_failure is true, we clean up as much as we can but don't
- raise an exception.
- """
-
- repository = ca.parent.repository
- handler = False if allow_failure else None
- for child_cert in self.child_certs:
- publisher.withdraw(cls = rpki.publication.certificate_elt,
- uri = child_cert.uri,
- obj = child_cert.cert,
- repository = repository,
- handler = handler)
- child_cert.sql_mark_deleted()
- for roa in self.roas:
- roa.revoke(publisher = publisher, allow_failure = allow_failure, fast = True)
- for ghostbuster in self.ghostbusters:
- ghostbuster.revoke(publisher = publisher, allow_failure = allow_failure, fast = True)
- try:
- latest_manifest = self.latest_manifest
- except AttributeError:
- latest_manifest = None
- if latest_manifest is not None:
- publisher.withdraw(cls = rpki.publication.manifest_elt,
- uri = self.manifest_uri,
- obj = self.latest_manifest,
- repository = repository,
- handler = handler)
- try:
- latest_crl = self.latest_crl
- except AttributeError:
- latest_crl = None
- if latest_crl is not None:
- publisher.withdraw(cls = rpki.publication.crl_elt,
- uri = self.crl_uri,
- obj = self.latest_crl,
- repository = repository,
- handler = handler)
- self.gctx.sql.sweep()
- for cert in self.revoked_certs: # + self.child_certs
- logger.debug("Deleting %r", cert)
- cert.sql_delete()
- logger.debug("Deleting %r", self)
- self.sql_delete()
-
- def revoke(self, cb, eb):
- """
- Request revocation of all certificates whose SKI matches the key
- for this ca_detail.
-
- Tasks:
-
- - Request revocation of old keypair by parent.
-
- - Revoke all child certs issued by the old keypair.
-
- - Generate a final CRL, signed with the old keypair, listing all
- the revoked certs, with a next CRL time after the last cert or
- CRL signed by the old keypair will have expired.
-
- - Generate a corresponding final manifest.
-
- - Destroy old keypairs.
-
- - Leave final CRL and manifest in place until their nextupdate
- time has passed.
- """
-
- ca = self.ca
- parent = ca.parent
-
- def parent_revoked(r_msg):
-
- if r_msg.payload.ski != self.latest_ca_cert.gSKI():
- raise rpki.exceptions.SKIMismatch
-
- logger.debug("Parent revoked %s, starting cleanup", self.latest_ca_cert.gSKI())
-
- crl_interval = rpki.sundial.timedelta(seconds = parent.self.crl_interval)
-
- nextUpdate = rpki.sundial.now()
-
- if self.latest_manifest is not None:
- self.latest_manifest.extract_if_needed()
- nextUpdate = nextUpdate.later(self.latest_manifest.getNextUpdate())
-
- if self.latest_crl is not None:
- nextUpdate = nextUpdate.later(self.latest_crl.getNextUpdate())
-
- publisher = publication_queue()
-
- for child_cert in self.child_certs:
- nextUpdate = nextUpdate.later(child_cert.cert.getNotAfter())
- child_cert.revoke(publisher = publisher)
-
- for roa in self.roas:
- nextUpdate = nextUpdate.later(roa.cert.getNotAfter())
- roa.revoke(publisher = publisher)
-
- for ghostbuster in self.ghostbusters:
- nextUpdate = nextUpdate.later(ghostbuster.cert.getNotAfter())
- ghostbuster.revoke(publisher = publisher)
-
- nextUpdate += crl_interval
- self.generate_crl(publisher = publisher, nextUpdate = nextUpdate)
- self.generate_manifest(publisher = publisher, nextUpdate = nextUpdate)
- self.private_key_id = None
- self.manifest_private_key_id = None
- self.manifest_public_key = None
- self.latest_manifest_cert = None
- self.state = "revoked"
- self.sql_mark_dirty()
- publisher.call_pubd(cb, eb)
-
- logger.debug("Asking parent to revoke CA certificate %s", self.latest_ca_cert.gSKI())
- rpki.up_down.revoke_pdu.query(ca, self.latest_ca_cert.gSKI(), parent_revoked, eb)
- def update(self, parent, ca, rc, sia_uri_changed, old_resources, callback, errback):
- """
- Need to get a new certificate for this ca_detail and perhaps frob
- children of this ca_detail.
- """
-
- def issued(issue_response):
- c = issue_response.payload.classes[0].certs[0]
- logger.debug("CA %r received certificate %s", self, c.cert_url)
-
- if self.state == "pending":
- return self.activate(
- ca = ca,
- cert = c.cert,
- uri = c.cert_url,
- callback = callback,
- errback = errback)
-
- validity_changed = self.latest_ca_cert is None or self.latest_ca_cert.getNotAfter() != c.cert.getNotAfter()
-
- publisher = publication_queue()
-
- if self.latest_ca_cert != c.cert:
- self.latest_ca_cert = c.cert
- self.sql_mark_dirty()
- self.generate_manifest_cert()
- self.generate_crl(publisher = publisher)
- self.generate_manifest(publisher = publisher)
-
- new_resources = self.latest_ca_cert.get_3779resources()
-
- if sia_uri_changed or old_resources.oversized(new_resources):
- for child_cert in self.child_certs:
- child_resources = child_cert.cert.get_3779resources()
- if sia_uri_changed or child_resources.oversized(new_resources):
- child_cert.reissue(
- ca_detail = self,
- resources = child_resources & new_resources,
- publisher = publisher)
-
- if sia_uri_changed or validity_changed or old_resources.oversized(new_resources):
- for roa in self.roas:
- roa.update(publisher = publisher, fast = True)
-
- if sia_uri_changed or validity_changed:
- for ghostbuster in self.ghostbusters:
- ghostbuster.update(publisher = publisher, fast = True)
-
- publisher.call_pubd(callback, errback)
-
- logger.debug("Sending issue request to %r from %r", parent, self.update)
- rpki.up_down.issue_pdu.query(parent, ca, self, issued, errback)
-
- @classmethod
- def create(cls, ca):
- """
- Create a new ca_detail object for a specified CA.
- """
- self = cls()
- self.gctx = ca.gctx
- self.ca_id = ca.ca_id
- self.state = "pending"
-
- self.private_key_id = rpki.x509.RSA.generate()
- self.public_key = self.private_key_id.get_public()
-
- self.manifest_private_key_id = rpki.x509.RSA.generate()
- self.manifest_public_key = self.manifest_private_key_id.get_public()
-
- self.sql_store()
- return self
-
- def issue_ee(self, ca, resources, subject_key, sia,
- cn = None, sn = None, notAfter = None, eku = None):
- """
- Issue a new EE certificate.
- """
-
- if notAfter is None:
- notAfter = self.latest_ca_cert.getNotAfter()
-
- return self.latest_ca_cert.issue(
- keypair = self.private_key_id,
- subject_key = subject_key,
- serial = ca.next_serial_number(),
- sia = sia,
- aia = self.ca_cert_uri,
- crldp = self.crl_uri,
- resources = resources,
- notAfter = notAfter,
- is_ca = False,
- cn = cn,
- sn = sn,
- eku = eku)
-
- def generate_manifest_cert(self):
- """
- Generate a new manifest certificate for this ca_detail.
- """
-
- resources = rpki.resource_set.resource_bag.from_inheritance()
- self.latest_manifest_cert = self.issue_ee(
- ca = self.ca,
- resources = resources,
- subject_key = self.manifest_public_key,
- sia = (None, None, self.manifest_uri))
-
- def issue(self, ca, child, subject_key, sia, resources, publisher, child_cert = None):
- """
- Issue a new certificate to a child. Optional child_cert argument
- specifies an existing child_cert object to update in place; if not
- specified, we create a new one. Returns the child_cert object
- containing the newly issued cert.
- """
-
- self.check_failed_publication(publisher)
-
- assert child_cert is None or child_cert.child_id == child.child_id
-
- cert = self.latest_ca_cert.issue(
- keypair = self.private_key_id,
- subject_key = subject_key,
- serial = ca.next_serial_number(),
- aia = self.ca_cert_uri,
- crldp = self.crl_uri,
- sia = sia,
- resources = resources,
- notAfter = resources.valid_until)
-
- if child_cert is None:
- child_cert = rpki.rpkid.child_cert_obj(
- gctx = child.gctx,
- child_id = child.child_id,
- ca_detail_id = self.ca_detail_id,
- cert = cert)
- logger.debug("Created new child_cert %r", child_cert)
- else:
- child_cert.cert = cert
- del child_cert.ca_detail
- child_cert.ca_detail_id = self.ca_detail_id
- logger.debug("Reusing existing child_cert %r", child_cert)
-
- child_cert.ski = cert.get_SKI()
- child_cert.published = rpki.sundial.now()
- child_cert.sql_store()
- publisher.publish(
- cls = rpki.publication.certificate_elt,
- uri = child_cert.uri,
- obj = child_cert.cert,
- repository = ca.parent.repository,
- handler = child_cert.published_callback)
- self.generate_manifest(publisher = publisher)
- return child_cert
-
- def generate_crl(self, publisher, nextUpdate = None):
- """
- Generate a new CRL for this ca_detail. At the moment this is
- unconditional, that is, it is up to the caller to decide whether a
- new CRL is needed.
- """
-
- self.check_failed_publication(publisher)
-
- ca = self.ca
- parent = ca.parent
- crl_interval = rpki.sundial.timedelta(seconds = parent.self.crl_interval)
- now = rpki.sundial.now()
-
- if nextUpdate is None:
- nextUpdate = now + crl_interval
-
- certlist = []
- for revoked_cert in self.revoked_certs:
- if now > revoked_cert.expires + crl_interval:
- revoked_cert.sql_delete()
- else:
- certlist.append((revoked_cert.serial, revoked_cert.revoked))
- certlist.sort()
-
- self.latest_crl = rpki.x509.CRL.generate(
- keypair = self.private_key_id,
- issuer = self.latest_ca_cert,
- serial = ca.next_crl_number(),
- thisUpdate = now,
- nextUpdate = nextUpdate,
- revokedCertificates = certlist)
-
- self.crl_published = rpki.sundial.now()
- self.sql_mark_dirty()
- publisher.publish(
- cls = rpki.publication.crl_elt,
- uri = self.crl_uri,
- obj = self.latest_crl,
- repository = parent.repository,
- handler = self.crl_published_callback)
-
- def crl_published_callback(self, pdu):
- """
- Check result of CRL publication.
- """
- pdu.raise_if_error()
- self.crl_published = None
- self.sql_mark_dirty()
-
- def generate_manifest(self, publisher, nextUpdate = None):
- """
- Generate a new manifest for this ca_detail.
- """
-
- self.check_failed_publication(publisher)
-
- ca = self.ca
- parent = ca.parent
- crl_interval = rpki.sundial.timedelta(seconds = parent.self.crl_interval)
- now = rpki.sundial.now()
- uri = self.manifest_uri
-
- if nextUpdate is None:
- nextUpdate = now + crl_interval
-
- if (self.latest_manifest_cert is None or
- (self.latest_manifest_cert.getNotAfter() < nextUpdate and
- self.latest_manifest_cert.getNotAfter() < self.latest_ca_cert.getNotAfter())):
- logger.debug("Generating EE certificate for %s", uri)
- self.generate_manifest_cert()
- logger.debug("Latest CA cert notAfter %s, new %s EE notAfter %s",
- self.latest_ca_cert.getNotAfter(), uri, self.latest_manifest_cert.getNotAfter())
-
- logger.debug("Constructing manifest object list for %s", uri)
- objs = [(self.crl_uri_tail, self.latest_crl)]
- objs.extend((c.uri_tail, c.cert) for c in self.child_certs)
- objs.extend((r.uri_tail, r.roa) for r in self.roas if r.roa is not None)
- objs.extend((g.uri_tail, g.ghostbuster) for g in self.ghostbusters)
- objs.extend((e.uri_tail, e.cert) for e in self.ee_certificates)
-
- logger.debug("Building manifest object %s", uri)
- self.latest_manifest = rpki.x509.SignedManifest.build(
- serial = ca.next_manifest_number(),
- thisUpdate = now,
- nextUpdate = nextUpdate,
- names_and_objs = objs,
- keypair = self.manifest_private_key_id,
- certs = self.latest_manifest_cert)
-
- logger.debug("Manifest generation took %s", rpki.sundial.now() - now)
-
- self.manifest_published = rpki.sundial.now()
- self.sql_mark_dirty()
- publisher.publish(cls = rpki.publication.manifest_elt,
- uri = uri,
- obj = self.latest_manifest,
- repository = parent.repository,
- handler = self.manifest_published_callback)
-
- def manifest_published_callback(self, pdu):
- """
- Check result of manifest publication.
- """
- pdu.raise_if_error()
- self.manifest_published = None
- self.sql_mark_dirty()
-
- def reissue(self, cb, eb):
- """
- Reissue all current certificates issued by this ca_detail.
- """
+import rpki.rpkid_tasks
- publisher = publication_queue()
- self.check_failed_publication(publisher)
- for roa in self.roas:
- roa.regenerate(publisher, fast = True)
- for ghostbuster in self.ghostbusters:
- ghostbuster.regenerate(publisher, fast = True)
- for ee_certificate in self.ee_certificates:
- ee_certificate.reissue(publisher, force = True)
- for child_cert in self.child_certs:
- child_cert.reissue(self, publisher, force = True)
- self.gctx.sql.sweep()
- self.generate_manifest_cert()
- self.sql_mark_dirty()
- self.generate_crl(publisher = publisher)
- self.generate_manifest(publisher = publisher)
- self.gctx.sql.sweep()
- publisher.call_pubd(cb, eb)
-
- def check_failed_publication(self, publisher, check_all = True):
- """
- Check for failed publication of objects issued by this ca_detail.
-
- All publishable objects have timestamp fields recording time of
- last attempted publication, and callback methods which clear these
- timestamps once publication has succeeded. Our task here is to
- look for objects issued by this ca_detail which have timestamps
- set (indicating that they have not been published) and for which
- the timestamps are not very recent (for some definition of very
- recent -- intent is to allow a bit of slack in case pubd is just
- being slow). In such cases, we want to retry publication.
-
- As an optimization, we can probably skip checking other products
- if manifest and CRL have been published, thus saving ourselves
- several complex SQL queries. Not sure yet whether this
- optimization is worthwhile.
-
- For the moment we check everything without optimization, because
- it simplifies testing.
-
- For the moment our definition of staleness is hardwired; this
- should become configurable.
- """
- logger.debug("Checking for failed publication for %r", self)
-
- stale = rpki.sundial.now() - rpki.sundial.timedelta(seconds = 60)
- repository = self.ca.parent.repository
-
- if self.latest_crl is not None and \
- self.crl_published is not None and \
- self.crl_published < stale:
- logger.debug("Retrying publication for %s", self.crl_uri)
- publisher.publish(cls = rpki.publication.crl_elt,
- uri = self.crl_uri,
- obj = self.latest_crl,
- repository = repository,
- handler = self.crl_published_callback)
-
- if self.latest_manifest is not None and \
- self.manifest_published is not None and \
- self.manifest_published < stale:
- logger.debug("Retrying publication for %s", self.manifest_uri)
- publisher.publish(cls = rpki.publication.manifest_elt,
- uri = self.manifest_uri,
- obj = self.latest_manifest,
- repository = repository,
- handler = self.manifest_published_callback)
-
- if not check_all:
- return
-
- # Might also be able to return here if manifest and CRL are up to
- # date, but let's avoid premature optimization
-
- for child_cert in self.unpublished_child_certs(stale):
- logger.debug("Retrying publication for %s", child_cert)
- publisher.publish(
- cls = rpki.publication.certificate_elt,
- uri = child_cert.uri,
- obj = child_cert.cert,
- repository = repository,
- handler = child_cert.published_callback)
-
- for roa in self.unpublished_roas(stale):
- logger.debug("Retrying publication for %s", roa)
- publisher.publish(
- cls = rpki.publication.roa_elt,
- uri = roa.uri,
- obj = roa.roa,
- repository = repository,
- handler = roa.published_callback)
-
- for ghostbuster in self.unpublished_ghostbusters(stale):
- logger.debug("Retrying publication for %s", ghostbuster)
- publisher.publish(
- cls = rpki.publication.ghostbuster_elt,
- uri = ghostbuster.uri,
- obj = ghostbuster.ghostbuster,
- repository = repository,
- handler = ghostbuster.published_callback)
-
-class child_cert_obj(rpki.sql.sql_persistent):
- """
- Certificate that has been issued to a child.
- """
-
- sql_template = rpki.sql.template(
- "child_cert",
- "child_cert_id",
- ("cert", rpki.x509.X509),
- "child_id",
- "ca_detail_id",
- "ski",
- ("published", rpki.sundial.datetime))
-
- def __repr__(self):
- args = [self]
- try:
- args.append(self.uri)
- except: # pylint: disable=W0702
- pass
- return rpki.log.log_repr(*args)
-
- def __init__(self, gctx = None, child_id = None, ca_detail_id = None, cert = None):
- """
- Initialize a child_cert_obj.
- """
- rpki.sql.sql_persistent.__init__(self)
- self.gctx = gctx
- self.child_id = child_id
- self.ca_detail_id = ca_detail_id
- self.cert = cert
- self.published = None
- if child_id or ca_detail_id or cert:
- self.sql_mark_dirty()
-
- @property
- @rpki.sql.cache_reference
- def child(self):
- """
- Fetch child object to which this child_cert object links.
- """
- return rpki.left_right.child_elt.sql_fetch(self.gctx, self.child_id)
-
- @property
- @rpki.sql.cache_reference
- def ca_detail(self):
- """
- Fetch ca_detail object to which this child_cert object links.
- """
- return ca_detail_obj.sql_fetch(self.gctx, self.ca_detail_id)
+logger = logging.getLogger(__name__)
- @ca_detail.deleter
- def ca_detail(self):
- try:
- del self._ca_detail
- except AttributeError:
- pass
- @property
- def uri_tail(self):
+class main(object):
"""
- Return the tail (filename) portion of the URI for this child_cert.
+ Main program for rpkid.
"""
- return self.cert.gSKI() + ".cer"
- @property
- def uri(self):
- """
- Return the publication URI for this child_cert.
- """
- return self.ca_detail.ca.sia_uri + self.uri_tail
+ def __init__(self):
- def revoke(self, publisher, generate_crl_and_manifest = True):
- """
- Revoke a child cert.
- """
+ os.environ.update(TZ = "UTC",
+ DJANGO_SETTINGS_MODULE = "rpki.django_settings.rpkid")
+ time.tzset()
- ca_detail = self.ca_detail
- ca = ca_detail.ca
- logger.debug("Revoking %r %r", self, self.uri)
- revoked_cert_obj.revoke(cert = self.cert, ca_detail = ca_detail)
- publisher.withdraw(
- cls = rpki.publication.certificate_elt,
- uri = self.uri,
- obj = self.cert,
- repository = ca.parent.repository)
- self.gctx.sql.sweep()
- self.sql_delete()
- if generate_crl_and_manifest:
- ca_detail.generate_crl(publisher = publisher)
- ca_detail.generate_manifest(publisher = publisher)
-
- def reissue(self, ca_detail, publisher, resources = None, sia = None, force = False):
- """
- Reissue an existing child cert, reusing the public key. If the
- child cert we would generate is identical to the one we already
- have, we just return the one we already have. If we have to
- revoke the old child cert when generating the new one, we have to
- generate a new child_cert_obj, so calling code that needs the
- updated child_cert_obj must use the return value from this method.
- """
+ self.irdbd_cms_timestamp = None
+ self.irbe_cms_timestamp = None
- ca = ca_detail.ca
- child = self.child
+ self.task_queue = tornado.queues.Queue()
+ self.task_ready = set()
- old_resources = self.cert.get_3779resources()
- old_sia = self.cert.get_SIA()
- old_aia = self.cert.get_AIA()[0]
- old_ca_detail = self.ca_detail
+ self.http_client_serialize = weakref.WeakValueDictionary()
- needed = False
+ self.cfg = rpki.config.argparser(section = "rpkid", doc = __doc__)
+ self.cfg.add_boolean_argument("--foreground",
+ default = False,
+ help = "whether to daemonize")
+ self.cfg.add_argument("--pidfile",
+ default = os.path.join(rpki.daemonize.default_pid_directory,
+ "rpkid.pid"),
+ help = "override default location of pid file")
+ self.cfg.add_argument("--profile",
+ default = "",
+ help = "enable profiling, saving data to PROFILE")
+ self.cfg.add_logging_arguments()
+ args = self.cfg.argparser.parse_args()
- if resources is None:
- resources = old_resources
+ self.cfg.configure_logging(args = args, ident = "rpkid")
- if sia is None:
- sia = old_sia
+ self.profile = args.profile
- assert resources.valid_until is not None and old_resources.valid_until is not None
+ try:
+ self.cfg.set_global_flags()
- if resources.asn != old_resources.asn or resources.v4 != old_resources.v4 or resources.v6 != old_resources.v6:
- logger.debug("Resources changed for %r: old %s new %s", self, old_resources, resources)
- needed = True
+ if not args.foreground:
+ rpki.daemonize.daemon(pidfile = args.pidfile)
- if resources.valid_until != old_resources.valid_until:
- logger.debug("Validity changed for %r: old %s new %s",
- self, old_resources.valid_until, resources.valid_until)
- needed = True
+ if self.profile:
+ import cProfile
+ prof = cProfile.Profile()
+ try:
+ prof.runcall(self.main)
+ finally:
+ prof.dump_stats(self.profile)
+ logger.info("Dumped profile data to %s", self.profile)
+ else:
+ self.main()
+ except:
+ logger.exception("Unandled exception in rpki.rpkid.main()")
+ sys.exit(1)
- if sia != old_sia:
- logger.debug("SIA changed for %r: old %r new %r", self, old_sia, sia)
- needed = True
- if ca_detail != old_ca_detail:
- logger.debug("Issuer changed for %r: old %r new %r", self, old_ca_detail, ca_detail)
- needed = True
+ def main(self):
- if ca_detail.ca_cert_uri != old_aia:
- logger.debug("AIA changed for %r: old %r new %r", self, old_aia, ca_detail.ca_cert_uri)
- needed = True
+ startup_msg = self.cfg.get("startup-message", "")
+ if startup_msg:
+ logger.info(startup_msg)
- must_revoke = old_resources.oversized(resources) or old_resources.valid_until > resources.valid_until
- if must_revoke:
- logger.debug("Must revoke any existing cert(s) for %r", self)
- needed = True
+ if self.profile:
+ logger.info("Running in profile mode with output to %s", self.profile)
- if not needed and force:
- logger.debug("No change needed for %r, forcing reissuance anyway", self)
- needed = True
+ logger.debug("Initializing Django")
+ import django
+ django.setup()
- if not needed:
- logger.debug("No change to %r", self)
- return self
+ logger.debug("Initializing rpkidb...")
+ global rpki # pylint: disable=W0602
+ import rpki.rpkidb # pylint: disable=W0621
- if must_revoke:
- for x in child.fetch_child_certs(ca_detail = ca_detail, ski = self.ski):
- logger.debug("Revoking child_cert %r", x)
- x.revoke(publisher = publisher)
- ca_detail.generate_crl(publisher = publisher)
- ca_detail.generate_manifest(publisher = publisher)
+ logger.debug("Initializing rpkidb...done")
- child_cert = ca_detail.issue(
- ca = ca,
- child = child,
- subject_key = self.cert.getPublicKey(),
- sia = sia,
- resources = resources,
- child_cert = None if must_revoke else self,
- publisher = publisher)
+ self.bpki_ta = rpki.x509.X509(Auto_update = self.cfg.get("bpki-ta"))
+ self.irdb_cert = rpki.x509.X509(Auto_update = self.cfg.get("irdb-cert"))
+ self.irbe_cert = rpki.x509.X509(Auto_update = self.cfg.get("irbe-cert"))
+ self.rpkid_cert = rpki.x509.X509(Auto_update = self.cfg.get("rpkid-cert"))
+ self.rpkid_key = rpki.x509.RSA( Auto_update = self.cfg.get("rpkid-key"))
- logger.debug("New child_cert %r uri %s", child_cert, child_cert.uri)
+ self.irdb_url = self.cfg.get("irdb-url")
- return child_cert
+ self.http_server_host = self.cfg.get("server-host", "")
+ self.http_server_port = self.cfg.getint("server-port")
- @classmethod
- def fetch(cls, gctx = None, child = None, ca_detail = None, ski = None, unique = False, unpublished = None):
- """
- Fetch all child_cert objects matching a particular set of
- parameters. This is a wrapper to consolidate various queries that
- would otherwise be inline SQL WHERE expressions. In most cases
- code calls this indirectly, through methods in other classes.
- """
+ self.http_client_timeout = self.cfg.getint("http-client-timeout", 900)
- args = []
- where = []
+ self.use_internal_cron = self.cfg.getboolean("use-internal-cron", True)
- if child:
- where.append("child_id = %s")
- args.append(child.child_id)
+ self.initial_delay = random.randint(self.cfg.getint("initial-delay-min", 10),
+ self.cfg.getint("initial-delay-max", 120))
- if ca_detail:
- where.append("ca_detail_id = %s")
- args.append(ca_detail.ca_detail_id)
+ self.cron_period = self.cfg.getint("cron-period", 1800)
- if ski:
- where.append("ski = %s")
- args.append(ski)
+ if self.use_internal_cron:
+ logger.debug("Scheduling initial cron pass in %s seconds", self.initial_delay)
+ tornado.ioloop.IOLoop.current().spawn_callback(self.cron_loop)
- if unpublished is not None:
- where.append("published IS NOT NULL AND published < %s")
- args.append(unpublished)
+ logger.debug("Scheduling task loop")
+ tornado.ioloop.IOLoop.current().spawn_callback(self.task_loop)
- where = " AND ".join(where)
+ rpkid = self
- gctx = gctx or (child and child.gctx) or (ca_detail and ca_detail.gctx) or None
+ class LeftRightHandler(tornado.web.RequestHandler): # pylint: disable=W0223
+ @tornado.gen.coroutine
+ def post(self):
+ yield rpkid.left_right_handler(self)
- if unique:
- return cls.sql_fetch_where1(gctx, where, args)
- else:
- return cls.sql_fetch_where(gctx, where, args)
+ class UpDownHandler(tornado.web.RequestHandler): # pylint: disable=W0223
+ @tornado.gen.coroutine
+ def post(self, tenant_handle, child_handle): # pylint: disable=W0221
+ yield rpkid.up_down_handler(self, tenant_handle, child_handle)
- def published_callback(self, pdu):
- """
- Publication callback: check result and mark published.
- """
- pdu.raise_if_error()
- self.published = None
- self.sql_mark_dirty()
-
-class revoked_cert_obj(rpki.sql.sql_persistent):
- """
- Tombstone for a revoked certificate.
- """
-
- sql_template = rpki.sql.template(
- "revoked_cert",
- "revoked_cert_id",
- "serial",
- "ca_detail_id",
- ("revoked", rpki.sundial.datetime),
- ("expires", rpki.sundial.datetime))
-
- def __repr__(self):
- return rpki.log.log_repr(self, repr(self.ca_detail), self.serial, self.revoked)
-
- def __init__(self, gctx = None, serial = None, revoked = None, expires = None, ca_detail_id = None):
- """
- Initialize a revoked_cert_obj.
- """
- rpki.sql.sql_persistent.__init__(self)
- self.gctx = gctx
- self.serial = serial
- self.revoked = revoked
- self.expires = expires
- self.ca_detail_id = ca_detail_id
- if serial or revoked or expires or ca_detail_id:
- self.sql_mark_dirty()
-
- @property
- @rpki.sql.cache_reference
- def ca_detail(self):
- """
- Fetch ca_detail object to which this revoked_cert_obj links.
- """
- return ca_detail_obj.sql_fetch(self.gctx, self.ca_detail_id)
+ class CronjobHandler(tornado.web.RequestHandler): # pylint: disable=W0223
+ @tornado.gen.coroutine
+ def post(self):
+ yield rpkid.cronjob_handler(self)
- @classmethod
- def revoke(cls, cert, ca_detail):
- """
- Revoke a certificate.
- """
- return cls(
- serial = cert.getSerial(),
- expires = cert.getNotAfter(),
- revoked = rpki.sundial.now(),
- gctx = ca_detail.gctx,
- ca_detail_id = ca_detail.ca_detail_id)
-
-class roa_obj(rpki.sql.sql_persistent):
- """
- Route Origin Authorization.
- """
-
- sql_template = rpki.sql.template(
- "roa",
- "roa_id",
- "ca_detail_id",
- "self_id",
- "asn",
- ("roa", rpki.x509.ROA),
- ("cert", rpki.x509.X509),
- ("published", rpki.sundial.datetime))
-
- ca_detail_id = None
- cert = None
- roa = None
- published = None
-
- @property
- @rpki.sql.cache_reference
- def self(self):
- """
- Fetch self object to which this roa_obj links.
- """
- return rpki.left_right.self_elt.sql_fetch(self.gctx, self.self_id)
+ application = tornado.web.Application((
+ (r"/left-right", LeftRightHandler),
+ (r"/up-down/([-a-zA-Z0-9_]+)/([-a-zA-Z0-9_]+)", UpDownHandler),
+ (r"/cronjob", CronjobHandler)))
- @property
- @rpki.sql.cache_reference
- def ca_detail(self):
- """
- Fetch ca_detail object to which this roa_obj links.
- """
- return rpki.rpkid.ca_detail_obj.sql_fetch(self.gctx, self.ca_detail_id)
+ application.listen(
+ address = self.http_server_host,
+ port = self.http_server_port)
- @ca_detail.deleter
- def ca_detail(self):
- try:
- del self._ca_detail
- except AttributeError:
- pass
+ tornado.ioloop.IOLoop.current().start()
- def sql_fetch_hook(self):
- """
- Extra SQL fetch actions for roa_obj -- handle prefix lists.
- """
- for version, datatype, attribute in ((4, rpki.resource_set.roa_prefix_set_ipv4, "ipv4"),
- (6, rpki.resource_set.roa_prefix_set_ipv6, "ipv6")):
- setattr(self, attribute, datatype.from_sql(
- self.gctx.sql,
+ def task_add(self, *tasks):
+ """
+ Add tasks to the task queue.
"""
- SELECT prefix, prefixlen, max_prefixlen FROM roa_prefix
- WHERE roa_id = %s AND version = %s
- """,
- (self.roa_id, version)))
-
- def sql_insert_hook(self):
- """
- Extra SQL insert actions for roa_obj -- handle prefix lists.
- """
- for version, prefix_set in ((4, self.ipv4), (6, self.ipv6)):
- if prefix_set:
- self.gctx.sql.executemany(
- """
- INSERT roa_prefix (roa_id, prefix, prefixlen, max_prefixlen, version)
- VALUES (%s, %s, %s, %s, %s)
- """,
- ((self.roa_id, x.prefix, x.prefixlen, x.max_prefixlen, version)
- for x in prefix_set))
-
- def sql_delete_hook(self):
- """
- Extra SQL delete actions for roa_obj -- handle prefix lists.
- """
- self.gctx.sql.execute("DELETE FROM roa_prefix WHERE roa_id = %s", (self.roa_id,))
-
- def __repr__(self):
- args = [self, self.asn, self.ipv4, self.ipv6]
- try:
- args.append(self.uri)
- except: # pylint: disable=W0702
- pass
- return rpki.log.log_repr(*args)
-
- def __init__(self, gctx = None, self_id = None, asn = None, ipv4 = None, ipv6 = None):
- rpki.sql.sql_persistent.__init__(self)
- self.gctx = gctx
- self.self_id = self_id
- self.asn = asn
- self.ipv4 = ipv4
- self.ipv6 = ipv6
-
- # Defer marking new ROA as dirty until .generate() has a chance to
- # finish setup, otherwise we get SQL consistency errors.
- #
- #if self_id or asn or ipv4 or ipv6: self.sql_mark_dirty()
-
- def update(self, publisher, fast = False):
- """
- Bring this roa_obj's ROA up to date if necesssary.
- """
-
- v4 = self.ipv4.to_resource_set() if self.ipv4 is not None else rpki.resource_set.resource_set_ipv4()
- v6 = self.ipv6.to_resource_set() if self.ipv6 is not None else rpki.resource_set.resource_set_ipv6()
-
- if self.roa is None:
- logger.debug("%r doesn't exist, generating", self)
- return self.generate(publisher = publisher, fast = fast)
-
- ca_detail = self.ca_detail
-
- if ca_detail is None:
- logger.debug("%r has no associated ca_detail, generating", self)
- return self.generate(publisher = publisher, fast = fast)
-
- if ca_detail.state != "active":
- logger.debug("ca_detail associated with %r not active (state %s), regenerating", self, ca_detail.state)
- return self.regenerate(publisher = publisher, fast = fast)
-
- now = rpki.sundial.now()
- regen_time = self.cert.getNotAfter() - rpki.sundial.timedelta(seconds = self.self.regen_margin)
-
- if now > regen_time and self.cert.getNotAfter() < ca_detail.latest_ca_cert.getNotAfter():
- logger.debug("%r past threshold %s, regenerating", self, regen_time)
- return self.regenerate(publisher = publisher, fast = fast)
-
- if now > regen_time:
- logger.warning("%r is past threshold %s but so is issuer %r, can't regenerate", self, regen_time, ca_detail)
-
- ca_resources = ca_detail.latest_ca_cert.get_3779resources()
- ee_resources = self.cert.get_3779resources()
-
- if ee_resources.oversized(ca_resources):
- logger.debug("%r oversized with respect to CA, regenerating", self)
- return self.regenerate(publisher = publisher, fast = fast)
-
- if ee_resources.v4 != v4 or ee_resources.v6 != v6:
- logger.debug("%r resources do not match EE, regenerating", self)
- return self.regenerate(publisher = publisher, fast = fast)
-
- if self.cert.get_AIA()[0] != ca_detail.ca_cert_uri:
- logger.debug("%r AIA changed, regenerating", self)
- return self.regenerate(publisher = publisher, fast = fast)
-
- def generate(self, publisher, fast = False):
- """
- Generate a ROA.
-
- At present we have no way of performing a direct lookup from a
- desired set of resources to a covering certificate, so we have to
- search. This could be quite slow if we have a lot of active
- ca_detail objects. Punt on the issue for now, revisit if
- profiling shows this as a hotspot.
-
- Once we have the right covering certificate, we generate the ROA
- payload, generate a new EE certificate, use the EE certificate to
- sign the ROA payload, publish the result, then throw away the
- private key for the EE cert, all per the ROA specification. This
- implies that generating a lot of ROAs will tend to thrash
- /dev/random, but there is not much we can do about that.
-
- If fast is set, we leave generating the new manifest for our
- caller to handle, presumably at the end of a bulk operation.
- """
-
- if self.ipv4 is None and self.ipv6 is None:
- raise rpki.exceptions.EmptyROAPrefixList
-
- # Ugly and expensive search for covering ca_detail, there has to
- # be a better way, but it would require the ability to test for
- # resource subsets in SQL.
-
- v4 = self.ipv4.to_resource_set() if self.ipv4 is not None else rpki.resource_set.resource_set_ipv4()
- v6 = self.ipv6.to_resource_set() if self.ipv6 is not None else rpki.resource_set.resource_set_ipv6()
-
- ca_detail = self.ca_detail
- if ca_detail is None or ca_detail.state != "active" or ca_detail.has_expired():
- logger.debug("Searching for new ca_detail for ROA %r", self)
- ca_detail = None
- for parent in self.self.parents:
- for ca in parent.cas:
- ca_detail = ca.active_ca_detail
- assert ca_detail is None or ca_detail.state == "active"
- if ca_detail is not None and not ca_detail.has_expired():
- resources = ca_detail.latest_ca_cert.get_3779resources()
- if v4.issubset(resources.v4) and v6.issubset(resources.v6):
- break
- ca_detail = None
- if ca_detail is not None:
- break
- else:
- logger.debug("Keeping old ca_detail for ROA %r", self)
-
- if ca_detail is None:
- raise rpki.exceptions.NoCoveringCertForROA("Could not find a certificate covering %r" % self)
-
- logger.debug("Using new ca_detail %r for ROA %r, ca_detail_state %s",
- ca_detail, self, ca_detail.state)
-
- ca = ca_detail.ca
- resources = rpki.resource_set.resource_bag(v4 = v4, v6 = v6)
- keypair = rpki.x509.RSA.generate()
-
- del self.ca_detail
- self.ca_detail_id = ca_detail.ca_detail_id
- self.cert = ca_detail.issue_ee(
- ca = ca,
- resources = resources,
- subject_key = keypair.get_public(),
- sia = (None, None, self.uri_from_key(keypair)))
- self.roa = rpki.x509.ROA.build(self.asn, self.ipv4, self.ipv6, keypair, (self.cert,))
- self.published = rpki.sundial.now()
- self.sql_store()
-
- logger.debug("Generating %r URI %s", self, self.uri)
- publisher.publish(
- cls = rpki.publication.roa_elt,
- uri = self.uri,
- obj = self.roa,
- repository = ca.parent.repository,
- handler = self.published_callback)
- if not fast:
- ca_detail.generate_manifest(publisher = publisher)
-
-
- def published_callback(self, pdu):
- """
- Check publication result.
- """
- pdu.raise_if_error()
- self.published = None
- self.sql_mark_dirty()
-
- def revoke(self, publisher, regenerate = False, allow_failure = False, fast = False):
- """
- Withdraw ROA associated with this roa_obj.
-
- In order to preserve make-before-break properties without
- duplicating code, this method also handles generating a
- replacement ROA when requested.
-
- If allow_failure is set, failing to withdraw the ROA will not be
- considered an error.
-
- If fast is set, SQL actions will be deferred, on the assumption
- that our caller will handle regenerating CRL and manifest and
- flushing the SQL cache.
- """
-
- ca_detail = self.ca_detail
- cert = self.cert
- roa = self.roa
- uri = self.uri
-
- logger.debug("%s %r, ca_detail %r state is %s",
- "Regenerating" if regenerate else "Not regenerating",
- self, ca_detail, ca_detail.state)
-
- if regenerate:
- self.generate(publisher = publisher, fast = fast)
-
- logger.debug("Withdrawing %r %s and revoking its EE cert", self, uri)
- rpki.rpkid.revoked_cert_obj.revoke(cert = cert, ca_detail = ca_detail)
- publisher.withdraw(cls = rpki.publication.roa_elt, uri = uri, obj = roa,
- repository = ca_detail.ca.parent.repository,
- handler = False if allow_failure else None)
-
- if not regenerate:
- self.sql_mark_deleted()
-
- if not fast:
- ca_detail.generate_crl(publisher = publisher)
- ca_detail.generate_manifest(publisher = publisher)
- self.gctx.sql.sweep()
-
- def regenerate(self, publisher, fast = False):
- """
- Reissue ROA associated with this roa_obj.
- """
- if self.ca_detail is None:
- self.generate(publisher = publisher, fast = fast)
- else:
- self.revoke(publisher = publisher, regenerate = True, fast = fast)
-
- def uri_from_key(self, key):
- """
- Return publication URI for a public key.
- """
- return self.ca_detail.ca.sia_uri + key.gSKI() + ".roa"
-
- @property
- def uri(self):
- """
- Return the publication URI for this roa_obj's ROA.
- """
- return self.ca_detail.ca.sia_uri + self.uri_tail
-
- @property
- def uri_tail(self):
- """
- Return the tail (filename portion) of the publication URI for this
- roa_obj's ROA.
- """
- return self.cert.gSKI() + ".roa"
-
-
-class ghostbuster_obj(rpki.sql.sql_persistent):
- """
- Ghostbusters record.
- """
-
- sql_template = rpki.sql.template(
- "ghostbuster",
- "ghostbuster_id",
- "ca_detail_id",
- "self_id",
- "vcard",
- ("ghostbuster", rpki.x509.Ghostbuster),
- ("cert", rpki.x509.X509),
- ("published", rpki.sundial.datetime))
-
- ca_detail_id = None
- cert = None
- ghostbuster = None
- published = None
- vcard = None
-
- def __repr__(self):
- args = [self]
- try:
- args.extend(self.vcard.splitlines()[2:-1])
- except: # pylint: disable=W0702
- pass
- try:
- args.append(self.uri)
- except: # pylint: disable=W0702
- pass
- return rpki.log.log_repr(*args)
-
- @property
- @rpki.sql.cache_reference
- def self(self):
- """
- Fetch self object to which this ghostbuster_obj links.
- """
- return rpki.left_right.self_elt.sql_fetch(self.gctx, self.self_id)
-
- @property
- @rpki.sql.cache_reference
- def ca_detail(self):
- """
- Fetch ca_detail object to which this ghostbuster_obj links.
- """
- return rpki.rpkid.ca_detail_obj.sql_fetch(self.gctx, self.ca_detail_id)
-
- def __init__(self, gctx = None, self_id = None, ca_detail_id = None, vcard = None):
- rpki.sql.sql_persistent.__init__(self)
- self.gctx = gctx
- self.self_id = self_id
- self.ca_detail_id = ca_detail_id
- self.vcard = vcard
-
- # Defer marking new ghostbuster as dirty until .generate() has a chance to
- # finish setup, otherwise we get SQL consistency errors.
-
- def update(self, publisher, fast = False):
- """
- Bring this ghostbuster_obj up to date if necesssary.
- """
-
- if self.ghostbuster is None:
- logger.debug("Ghostbuster record doesn't exist, generating")
- return self.generate(publisher = publisher, fast = fast)
- now = rpki.sundial.now()
- regen_time = self.cert.getNotAfter() - rpki.sundial.timedelta(seconds = self.self.regen_margin)
+ for task in tasks:
+ if task in self.task_ready:
+ logger.debug("Task %r already queued", task)
+ else:
+ logger.debug("Adding %r to task queue", task)
+ self.task_queue.put(task)
+ self.task_ready.add(task)
- if now > regen_time and self.cert.getNotAfter() < self.ca_detail.latest_ca_cert.getNotAfter():
- logger.debug("%r past threshold %s, regenerating", self, regen_time)
- return self.regenerate(publisher = publisher, fast = fast)
+ @tornado.gen.coroutine
+ def task_loop(self):
+ """
+ Asynchronous infinite loop to run background tasks.
+ """
- if now > regen_time:
- logger.warning("%r is past threshold %s but so is issuer %r, can't regenerate", self, regen_time, self.ca_detail)
+ logger.debug("Starting task loop")
- if self.cert.get_AIA()[0] != self.ca_detail.ca_cert_uri:
- logger.debug("%r AIA changed, regenerating", self)
- return self.regenerate(publisher = publisher, fast = fast)
+ while True:
+ task = None
+ try:
+ task = yield self.task_queue.get()
+ self.task_ready.discard(task)
+ yield task.start()
+ except:
+ logger.exception("Unhandled exception from %r", task)
- def generate(self, publisher, fast = False):
- """
- Generate a Ghostbuster record
+ @tornado.gen.coroutine
+ def cron_loop(self):
+ """
+ Asynchronous infinite loop to drive internal cron cycle.
+ """
- Once we have the right covering certificate, we generate the
- ghostbuster payload, generate a new EE certificate, use the EE
- certificate to sign the ghostbuster payload, publish the result,
- then throw away the private key for the EE cert. This is modeled
- after the way we handle ROAs.
+ logger.debug("cron_loop(): Starting")
+ assert self.use_internal_cron
+ logger.debug("cron_loop(): Startup delay %d seconds", self.initial_delay)
+ yield tornado.gen.sleep(self.initial_delay)
+ while True:
+ logger.debug("cron_loop(): Running")
+ try:
+ self.cron_run()
+ except:
+ logger.exception("Error queuing cron tasks")
+ logger.debug("cron_loop(): Sleeping %d seconds", self.cron_period)
+ yield tornado.gen.sleep(self.cron_period)
+
+ def cron_run(self):
+ """
+ Schedule periodic tasks.
+ """
- If fast is set, we leave generating the new manifest for our
- caller to handle, presumably at the end of a bulk operation.
- """
+ for tenant in rpki.rpkidb.models.Tenant.objects.all():
+ self.task_add(*tenant.cron_tasks(self))
- ca_detail = self.ca_detail
- ca = ca_detail.ca
-
- resources = rpki.resource_set.resource_bag.from_inheritance()
- keypair = rpki.x509.RSA.generate()
-
- self.cert = ca_detail.issue_ee(
- ca = ca,
- resources = resources,
- subject_key = keypair.get_public(),
- sia = (None, None, self.uri_from_key(keypair)))
- self.ghostbuster = rpki.x509.Ghostbuster.build(self.vcard, keypair, (self.cert,))
- self.published = rpki.sundial.now()
- self.sql_store()
-
- logger.debug("Generating Ghostbuster record %r", self.uri)
- publisher.publish(
- cls = rpki.publication.ghostbuster_elt,
- uri = self.uri,
- obj = self.ghostbuster,
- repository = ca.parent.repository,
- handler = self.published_callback)
- if not fast:
- ca_detail.generate_manifest(publisher = publisher)
-
- def published_callback(self, pdu):
- """
- Check publication result.
- """
- pdu.raise_if_error()
- self.published = None
- self.sql_mark_dirty()
-
- def revoke(self, publisher, regenerate = False, allow_failure = False, fast = False):
- """
- Withdraw Ghostbuster associated with this ghostbuster_obj.
+ @tornado.gen.coroutine
+ def cronjob_handler(self, handler):
+ """
+ External trigger to schedule periodic tasks. Obsolete for
+ production use, but portions of the test framework still use this.
+ """
- In order to preserve make-before-break properties without
- duplicating code, this method also handles generating a
- replacement ghostbuster when requested.
+ if self.use_internal_cron:
+ handler.set_status(500, "Running cron internally")
+ else:
+ logger.debug("Starting externally triggered cron")
+ self.cron_run()
+ handler.set_status(200)
+ handler.finish()
- If allow_failure is set, failing to withdraw the ghostbuster will not be
- considered an error.
+ @tornado.gen.coroutine
+ def http_fetch(self, request, serialize_on_full_url = False):
+ """
+ Wrapper around tornado.httpclient.AsyncHTTPClient() which
+ serializes requests to any particular HTTP server, to avoid
+ spurious CMS replay errors.
+ """
- If fast is set, SQL actions will be deferred, on the assumption
- that our caller will handle regenerating CRL and manifest and
- flushing the SQL cache.
- """
+ # The current definition of "particular HTTP server" is based only
+ # on the "netloc" portion of the URL, which could in theory could
+ # cause deadlocks in a loopback scenario; no such deadlocks have
+ # shown up in testing, but if such a thing were to occur, it would
+ # look like an otherwise inexplicable HTTP timeout. The solution,
+ # should this occur, would be to use the entire URL as the lookup
+ # key, perhaps only for certain protocols.
+ #
+ # The reason for the current scheme is that at least one protocol
+ # (publication) uses RESTful URLs but has a single service-wide
+ # CMS replay detection database, which translates to meaning that
+ # we need to serialize all requests for that service, not just
+ # requests to a particular URL.
+
+ if serialize_on_full_url:
+ netlock = request.url
+ else:
+ netlock = urlparse.urlparse(request.url).netloc
- ca_detail = self.ca_detail
- cert = self.cert
- ghostbuster = self.ghostbuster
- uri = self.uri
+ try:
+ lock = self.http_client_serialize[netlock]
+ except KeyError:
+ lock = self.http_client_serialize[netlock] = tornado.locks.Lock()
- logger.debug("%s %r, ca_detail %r state is %s",
- "Regenerating" if regenerate else "Not regenerating",
- self, ca_detail, ca_detail.state)
+ http_client = tornado.httpclient.AsyncHTTPClient()
- if regenerate:
- self.generate(publisher = publisher, fast = fast)
+ with (yield lock.acquire()):
+ try:
+ started = time.time()
+ response = yield http_client.fetch(request)
+ except tornado.httpclient.HTTPError as e:
- logger.debug("Withdrawing %r %s and revoking its EE cert", self, uri)
- rpki.rpkid.revoked_cert_obj.revoke(cert = cert, ca_detail = ca_detail)
- publisher.withdraw(cls = rpki.publication.ghostbuster_elt, uri = uri, obj = ghostbuster,
- repository = ca_detail.ca.parent.repository,
- handler = False if allow_failure else None)
+ # XXX This is not a solution, just an attempt to
+ # gather data on whether the timeout arguments are
+ # working as expected.
- if not regenerate:
- self.sql_mark_deleted()
+ logger.warning("%r: HTTP error contacting %r: %s", self, request, e)
+ if e.code == 599:
+ logger.warning("%r: HTTP timeout after time %s seconds", self, time.time() - started)
+ raise
- if not fast:
- ca_detail.generate_crl(publisher = publisher)
- ca_detail.generate_manifest(publisher = publisher)
- self.gctx.sql.sweep()
+ raise tornado.gen.Return(response)
- def regenerate(self, publisher, fast = False):
- """
- Reissue Ghostbuster associated with this ghostbuster_obj.
- """
- if self.ghostbuster is None:
- self.generate(publisher = publisher, fast = fast)
- else:
- self.revoke(publisher = publisher, regenerate = True, fast = fast)
+ @staticmethod
+ def compose_left_right_query():
+ """
+ Compose top level element of a left-right query to irdbd.
+ """
- def uri_from_key(self, key):
- """
- Return publication URI for a public key.
- """
- return self.ca_detail.ca.sia_uri + key.gSKI() + ".gbr"
+ return Element(rpki.left_right.tag_msg, nsmap = rpki.left_right.nsmap,
+ type = "query", version = rpki.left_right.version)
- @property
- def uri(self):
- """
- Return the publication URI for this ghostbuster_obj's ghostbuster.
- """
- return self.ca_detail.ca.sia_uri + self.uri_tail
+ @tornado.gen.coroutine
+ def irdb_query(self, q_msg):
+ """
+ Perform an IRDB callback query.
+ """
- @property
- def uri_tail(self):
- """
- Return the tail (filename portion) of the publication URI for this
- ghostbuster_obj's ghostbuster.
- """
- return self.cert.gSKI() + ".gbr"
-
-
-class ee_cert_obj(rpki.sql.sql_persistent):
- """
- EE certificate (router certificate or generic).
- """
-
- sql_template = rpki.sql.template(
- "ee_cert",
- "ee_cert_id",
- "self_id",
- "ca_detail_id",
- "ski",
- ("cert", rpki.x509.X509),
- ("published", rpki.sundial.datetime))
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.cert.getSubject(), self.uri)
-
- def __init__(self, gctx = None, self_id = None, ca_detail_id = None, cert = None):
- rpki.sql.sql_persistent.__init__(self)
- self.gctx = gctx
- self.self_id = self_id
- self.ca_detail_id = ca_detail_id
- self.cert = cert
- self.ski = None if cert is None else cert.get_SKI()
- self.published = None
- if self_id or ca_detail_id or cert:
- self.sql_mark_dirty()
-
- @property
- @rpki.sql.cache_reference
- def self(self):
- """
- Fetch self object to which this ee_cert_obj links.
- """
- return rpki.left_right.self_elt.sql_fetch(self.gctx, self.self_id)
+ q_tags = set(q_pdu.tag for q_pdu in q_msg)
- @property
- @rpki.sql.cache_reference
- def ca_detail(self):
- """
- Fetch ca_detail object to which this ee_cert_obj links.
- """
- return rpki.rpkid.ca_detail_obj.sql_fetch(self.gctx, self.ca_detail_id)
+ q_der = rpki.left_right.cms_msg().wrap(q_msg, self.rpkid_key, self.rpkid_cert)
- @ca_detail.deleter
- def ca_detail(self):
- try:
- del self._ca_detail
- except AttributeError:
- pass
+ http_request = tornado.httpclient.HTTPRequest(
+ url = self.irdb_url,
+ method = "POST",
+ body = q_der,
+ headers = { "Content-Type" : rpki.left_right.content_type },
+ connect_timeout = self.http_client_timeout,
+ request_timeout = self.http_client_timeout)
- @property
- def gski(self):
- """
- Calculate g(SKI), for ease of comparison with XML.
+ http_response = yield self.http_fetch(http_request)
- Although, really, one has to ask why we don't just store g(SKI)
- in rpkid.sql instead of ski....
- """
- return base64.urlsafe_b64encode(self.ski).rstrip("=")
+ # Tornado already checked http_response.code for us
- @gski.setter
- def gski(self, val):
- self.ski = base64.urlsafe_b64decode(val + ("=" * ((4 - len(val)) % 4)))
+ content_type = http_response.headers.get("Content-Type")
- @property
- def uri(self):
- """
- Return the publication URI for this ee_cert_obj.
- """
- return self.ca_detail.ca.sia_uri + self.uri_tail
+ if content_type not in rpki.left_right.allowed_content_types:
+ raise rpki.exceptions.BadContentType("HTTP Content-Type %r, expected %r" % (rpki.left_right.content_type, content_type))
- @property
- def uri_tail(self):
- """
- Return the tail (filename portion) of the publication URI for this
- ee_cert_obj.
- """
- return self.cert.gSKI() + ".cer"
+ r_der = http_response.body
- @classmethod
- def create(cls, ca_detail, subject_name, subject_key, resources, publisher, eku = None):
- """
- Generate a new certificate and stuff it in a new ee_cert_obj.
- """
+ r_cms = rpki.left_right.cms_msg(DER = r_der)
+ r_msg = r_cms.unwrap((self.bpki_ta, self.irdb_cert))
- cn, sn = subject_name.extract_cn_and_sn()
- ca = ca_detail.ca
+ self.irdbd_cms_timestamp = r_cms.check_replay(self.irdbd_cms_timestamp, self.irdb_url)
- cert = ca_detail.issue_ee(
- ca = ca,
- subject_key = subject_key,
- sia = None,
- resources = resources,
- notAfter = resources.valid_until,
- cn = cn,
- sn = sn,
- eku = eku)
+ #rpki.left_right.check_response(r_msg)
- self = cls(
- gctx = ca_detail.gctx,
- self_id = ca.parent.self.self_id,
- ca_detail_id = ca_detail.ca_detail_id,
- cert = cert)
+ if r_msg.get("type") != "reply" or not all(r_pdu.tag in q_tags for r_pdu in r_msg):
+ raise rpki.exceptions.BadIRDBReply("Unexpected response to IRDB query: %s" % r_cms.pretty_print_content())
- publisher.publish(
- cls = rpki.publication.certificate_elt,
- uri = self.uri,
- obj = self.cert,
- repository = ca.parent.repository,
- handler = self.published_callback)
+ raise tornado.gen.Return(r_msg)
- self.sql_store()
+ @tornado.gen.coroutine
+ def irdb_query_children_resources(self, tenant_handle, child_handles):
+ """
+ Ask IRDB about resources for one or more children.
+ """
- ca_detail.generate_manifest(publisher = publisher)
+ q_msg = self.compose_left_right_query()
+ for child_handle in child_handles:
+ SubElement(q_msg, rpki.left_right.tag_list_resources, tenant_handle = tenant_handle, child_handle = child_handle)
- logger.debug("New ee_cert %r", self)
+ r_msg = yield self.irdb_query(q_msg)
- return self
+ if len(r_msg) != len(q_msg):
+ raise rpki.exceptions.BadIRDBReply("Expected IRDB response to be same length as query: %s" % r_msg.pretty_print_content())
- def revoke(self, publisher, generate_crl_and_manifest = True):
- """
- Revoke and withdraw an EE certificate.
- """
+ bags = [rpki.resource_set.resource_bag(asn = r_pdu.get("asn"),
+ v4 = r_pdu.get("ipv4"),
+ v6 = r_pdu.get("ipv6"),
+ valid_until = r_pdu.get("valid_until"))
+ for r_pdu in r_msg]
- ca_detail = self.ca_detail
- ca = ca_detail.ca
- logger.debug("Revoking %r %r", self, self.uri)
- revoked_cert_obj.revoke(cert = self.cert, ca_detail = ca_detail)
- publisher.withdraw(cls = rpki.publication.certificate_elt,
- uri = self.uri,
- obj = self.cert,
- repository = ca.parent.repository)
- self.gctx.sql.sweep()
- self.sql_delete()
- if generate_crl_and_manifest:
- ca_detail.generate_crl(publisher = publisher)
- ca_detail.generate_manifest(publisher = publisher)
-
- def reissue(self, publisher, ca_detail = None, resources = None, force = False):
- """
- Reissue an existing EE cert, reusing the public key. If the EE
- cert we would generate is identical to the one we already have, we
- just return; if we need to reissue, we reuse this ee_cert_obj and
- just update its contents, as the publication URI will not have
- changed.
- """
+ raise tornado.gen.Return(bags)
- needed = False
+ @tornado.gen.coroutine
+ def irdb_query_child_resources(self, tenant_handle, child_handle):
+ """
+ Ask IRDB about a single child's resources.
+ """
- old_cert = self.cert
+ bags = yield self.irdb_query_children_resources(tenant_handle, (child_handle,))
+ raise tornado.gen.Return(bags[0])
- old_ca_detail = self.ca_detail
- if ca_detail is None:
- ca_detail = old_ca_detail
+ @tornado.gen.coroutine
+ def irdb_query_roa_requests(self, tenant_handle):
+ """
+ Ask IRDB about self's ROA requests.
+ """
- assert ca_detail.ca is old_ca_detail.ca
+ q_msg = self.compose_left_right_query()
+ SubElement(q_msg, rpki.left_right.tag_list_roa_requests, tenant_handle = tenant_handle)
+ r_msg = yield self.irdb_query(q_msg)
+ raise tornado.gen.Return(r_msg)
- old_resources = old_cert.get_3779resources()
- if resources is None:
- resources = old_resources
+ @tornado.gen.coroutine
+ def irdb_query_ghostbuster_requests(self, tenant_handle, parent_handles):
+ """
+ Ask IRDB about self's ghostbuster record requests.
+ """
- assert resources.valid_until is not None and old_resources.valid_until is not None
+ q_msg = self.compose_left_right_query()
+ for parent_handle in parent_handles:
+ SubElement(q_msg, rpki.left_right.tag_list_ghostbuster_requests,
+ tenant_handle = tenant_handle, parent_handle = parent_handle)
+ r_msg = yield self.irdb_query(q_msg)
+ raise tornado.gen.Return(r_msg)
- assert ca_detail.covers(resources)
+ @tornado.gen.coroutine
+ def irdb_query_ee_certificate_requests(self, tenant_handle):
+ """
+ Ask IRDB about self's EE certificate requests.
+ """
- if ca_detail != self.ca_detail:
- logger.debug("ca_detail changed for %r: old %r new %r",
- self, self.ca_detail, ca_detail)
- needed = True
+ q_msg = self.compose_left_right_query()
+ SubElement(q_msg, rpki.left_right.tag_list_ee_certificate_requests, tenant_handle = tenant_handle)
+ r_msg = yield self.irdb_query(q_msg)
+ raise tornado.gen.Return(r_msg)
- if ca_detail.ca_cert_uri != old_cert.get_AIA()[0]:
- logger.debug("AIA changed for %r: old %s new %s",
- self, old_cert.get_AIA()[0], ca_detail.ca_cert_uri)
- needed = True
+ @property
+ def left_right_models(self):
+ """
+ Map element tag to rpkidb model.
+ """
- if resources.valid_until != old_resources.valid_until:
- logger.debug("Validity changed for %r: old %s new %s",
- self, old_resources.valid_until, resources.valid_until)
- needed = True
+ # pylint: disable=W0621,W0201
- if resources.asn != old_resources.asn or resources.v4 != old_resources.v4 or resources.v6 != old_resources.v6:
- logger.debug("Resources changed for %r: old %s new %s",
- self, old_resources, resources)
- needed = True
+ try:
+ return self._left_right_models
+ except AttributeError:
+ import rpki.rpkidb.models
+ self._left_right_models = {
+ rpki.left_right.tag_tenant : rpki.rpkidb.models.Tenant,
+ rpki.left_right.tag_bsc : rpki.rpkidb.models.BSC,
+ rpki.left_right.tag_parent : rpki.rpkidb.models.Parent,
+ rpki.left_right.tag_child : rpki.rpkidb.models.Child,
+ rpki.left_right.tag_repository : rpki.rpkidb.models.Repository }
+ return self._left_right_models
+
+ @property
+ def left_right_trivial_handlers(self):
+ """
+ Map element tag to bound handler methods for trivial PDU types.
+ """
- must_revoke = (old_resources.oversized(resources) or
- old_resources.valid_until > resources.valid_until)
- if must_revoke:
- logger.debug("Must revoke existing cert(s) for %r", self)
- needed = True
+ # pylint: disable=W0201
- if not needed and force:
- logger.debug("No change needed for %r, forcing reissuance anyway", self)
- needed = True
+ try:
+ return self._left_right_trivial_handlers
+ except AttributeError:
+ self._left_right_trivial_handlers = {
+ rpki.left_right.tag_list_published_objects : self.handle_list_published_objects,
+ rpki.left_right.tag_list_received_resources : self.handle_list_received_resources }
+ return self._left_right_trivial_handlers
+
+ def handle_list_published_objects(self, q_pdu, r_msg):
+ """
+ <list_published_objects/> server.
+ """
- if not needed:
- logger.debug("No change to %r", self)
- return
+ tenant_handle = q_pdu.get("tenant_handle")
+ msg_tag = q_pdu.get("tag")
+
+ kw = dict(tenant_handle = tenant_handle)
+ if msg_tag is not None:
+ kw.update(tag = msg_tag)
+
+ for ca_detail in rpki.rpkidb.models.CADetail.objects.filter(ca__parent__tenant__tenant_handle = tenant_handle, state = "active"):
+ SubElement(r_msg, rpki.left_right.tag_list_published_objects,
+ uri = ca_detail.crl_uri, **kw).text = ca_detail.latest_crl.get_Base64()
+ SubElement(r_msg, rpki.left_right.tag_list_published_objects,
+ uri = ca_detail.manifest_uri, **kw).text = ca_detail.latest_manifest.get_Base64()
+ for c in ca_detail.child_certs.all():
+ SubElement(r_msg, rpki.left_right.tag_list_published_objects,
+ uri = c.uri, child_handle = c.child.child_handle, **kw).text = c.cert.get_Base64()
+ for r in ca_detail.roas.filter(roa__isnull = False):
+ SubElement(r_msg, rpki.left_right.tag_list_published_objects,
+ uri = r.uri, **kw).text = r.roa.get_Base64()
+ for g in ca_detail.ghostbusters.all():
+ SubElement(r_msg, rpki.left_right.tag_list_published_objects,
+ uri = g.uri, **kw).text = g.ghostbuster.get_Base64()
+ for c in ca_detail.ee_certificates.all():
+ SubElement(r_msg, rpki.left_right.tag_list_published_objects,
+ uri = c.uri, **kw).text = c.cert.get_Base64()
+
+ def handle_list_received_resources(self, q_pdu, r_msg):
+ """
+ <list_received_resources/> server.
+ """
- cn, sn = self.cert.getSubject().extract_cn_and_sn()
+ logger.debug(".handle_list_received_resources() %s", ElementToString(q_pdu))
+ tenant_handle = q_pdu.get("tenant_handle")
+ msg_tag = q_pdu.get("tag")
+ for ca_detail in rpki.rpkidb.models.CADetail.objects.filter(ca__parent__tenant__tenant_handle = tenant_handle,
+ state = "active", latest_ca_cert__isnull = False):
+ cert = ca_detail.latest_ca_cert
+ resources = cert.get_3779resources()
+ r_pdu = SubElement(r_msg, rpki.left_right.tag_list_received_resources,
+ tenant_handle = tenant_handle,
+ parent_handle = ca_detail.ca.parent.parent_handle,
+ uri = ca_detail.ca_cert_uri,
+ notBefore = str(cert.getNotBefore()),
+ notAfter = str(cert.getNotAfter()),
+ sia_uri = cert.get_sia_directory_uri(),
+ aia_uri = cert.get_aia_uri() or "",
+ asn = str(resources.asn),
+ ipv4 = str(resources.v4),
+ ipv6 = str(resources.v6))
+ if msg_tag is not None:
+ r_pdu.set("tag", msg_tag)
+
+ @tornado.gen.coroutine
+ def left_right_handler(self, handler):
+ """
+ Process one left-right message.
+ """
- self.cert = ca_detail.issue_ee(
- ca = ca_detail.ca,
- subject_key = self.cert.getPublicKey(),
- eku = self.cert.get_EKU(),
- sia = None,
- resources = resources,
- notAfter = resources.valid_until,
- cn = cn,
- sn = sn)
+ content_type = handler.request.headers["Content-Type"]
+ if content_type not in rpki.left_right.allowed_content_types:
+ handler.set_status(415, "No handler for Content-Type %s" % content_type)
+ handler.finish()
+ return
- self.sql_mark_dirty()
+ handler.set_header("Content-Type", rpki.left_right.content_type)
- publisher.publish(
- cls = rpki.publication.certificate_elt,
- uri = self.uri,
- obj = self.cert,
- repository = ca_detail.ca.parent.repository,
- handler = self.published_callback)
+ try:
+ q_cms = rpki.left_right.cms_msg(DER = handler.request.body)
+ q_msg = q_cms.unwrap((self.bpki_ta, self.irbe_cert))
+ r_msg = Element(rpki.left_right.tag_msg, nsmap = rpki.left_right.nsmap,
+ type = "reply", version = rpki.left_right.version)
+ self.irbe_cms_timestamp = q_cms.check_replay(self.irbe_cms_timestamp, handler.request.path)
+
+ assert q_msg.tag.startswith(rpki.left_right.xmlns)
+ assert all(q_pdu.tag.startswith(rpki.left_right.xmlns) for q_pdu in q_msg)
+
+ if q_msg.get("version") != rpki.left_right.version:
+ raise rpki.exceptions.BadQuery("Unrecognized protocol version")
+
+ if q_msg.get("type") != "query":
+ raise rpki.exceptions.BadQuery("Message type is not query")
+
+ for q_pdu in q_msg:
+
+ try:
+ action = q_pdu.get("action")
+ model = self.left_right_models.get(q_pdu.tag)
+
+ if q_pdu.tag in self.left_right_trivial_handlers:
+ self.left_right_trivial_handlers[q_pdu.tag](q_pdu, r_msg)
+
+ elif action in ("get", "list"):
+ for obj in model.objects.xml_list(q_pdu):
+ obj.xml_template.encode(obj, q_pdu, r_msg)
+
+ elif action == "destroy":
+ obj = model.objects.xml_get_for_delete(q_pdu)
+ yield obj.xml_pre_delete_hook(self)
+ obj.delete()
+ obj.xml_template.acknowledge(obj, q_pdu, r_msg)
+
+ elif action in ("create", "set"):
+ obj = model.objects.xml_get_or_create(q_pdu)
+ obj.xml_template.decode(obj, q_pdu)
+ obj.xml_pre_save_hook(q_pdu)
+ obj.save()
+ yield obj.xml_post_save_hook(self, q_pdu)
+ obj.xml_template.acknowledge(obj, q_pdu, r_msg)
+
+ else:
+ raise rpki.exceptions.BadQuery("Unrecognized action %r" % action)
+
+ except Exception, e:
+ if not isinstance(e, rpki.exceptions.NotFound):
+ logger.exception("Unhandled exception serving left-right PDU %r", q_pdu)
+ error_tenant_handle = q_pdu.get("tenant_handle")
+ error_tag = q_pdu.get("tag")
+ r_pdu = SubElement(r_msg, rpki.left_right.tag_report_error, error_code = e.__class__.__name__)
+ r_pdu.text = str(e)
+ if error_tag is not None:
+ r_pdu.set("tag", error_tag)
+ if error_tenant_handle is not None:
+ r_pdu.set("tenant_handle", error_tenant_handle)
+ break
+
+ handler.set_status(200)
+ handler.finish(rpki.left_right.cms_msg().wrap(r_msg, self.rpkid_key, self.rpkid_cert))
- if must_revoke:
- revoked_cert_obj.revoke(cert = old_cert.cert, ca_detail = old_ca_detail)
+ except Exception, e:
+ logger.exception("Unhandled exception serving left-right request")
+ handler.set_status(500, "Unhandled exception %s: %s" % (e.__class__.__name__, e))
+ handler.finish()
- self.gctx.sql.sweep()
+ @tornado.gen.coroutine
+ def up_down_handler(self, handler, tenant_handle, child_handle):
+ """
+ Process one up-down PDU.
+ """
- if must_revoke:
- ca_detail.generate_crl(publisher = publisher)
- self.gctx.sql.sweep()
+ content_type = handler.request.headers["Content-Type"]
+ if content_type not in rpki.up_down.allowed_content_types:
+ handler.set_status(415, "No handler for Content-Type %s" % content_type)
+ handler.finish()
+ return
- ca_detail.generate_manifest(publisher = publisher)
+ try:
+ child = rpki.rpkidb.models.Child.objects.get(tenant__tenant_handle = tenant_handle, child_handle = child_handle)
+ q_der = handler.request.body
+ r_der = yield child.serve_up_down(self, q_der)
+ handler.set_header("Content-Type", rpki.up_down.content_type)
+ handler.set_status(200)
+ handler.finish(r_der)
+
+ except rpki.rpkidb.models.Child.DoesNotExist:
+ logger.info("Child %r of tenant %r not found", child_handle, tenant_handle)
+ handler.set_status(400, "Child %r not found" % child_handle)
+ handler.finish()
- def published_callback(self, pdu):
- """
- Publication callback: check result and mark published.
- """
- pdu.raise_if_error()
- self.published = None
- self.sql_mark_dirty()
+ except Exception, e:
+ logger.exception("Unhandled exception processing up-down request")
+ handler.set_status(400, "Could not process PDU: %s" % e)
+ handler.finish()
class publication_queue(object):
- """
- Utility to simplify publication from within rpkid.
-
- General idea here is to accumulate a collection of objects to be
- published, in one or more repositories, each potentially with its
- own completion callback. Eventually we want to publish everything
- we've accumulated, at which point we need to iterate over the
- collection and do repository.call_pubd() for each repository.
- """
-
- replace = True
-
- def __init__(self):
- self.clear()
-
- def clear(self):
- self.repositories = {}
- self.msgs = {}
- self.handlers = {}
- if self.replace:
- self.uris = {}
-
- def _add(self, uri, obj, repository, handler, make_pdu):
- rid = id(repository)
- if rid not in self.repositories:
- self.repositories[rid] = repository
- self.msgs[rid] = rpki.publication.msg.query()
- if self.replace and uri in self.uris:
- logger.debug("Removing publication duplicate <%s %r %r>",
- self.uris[uri].action, self.uris[uri].uri, self.uris[uri].payload)
- self.msgs[rid].remove(self.uris.pop(uri))
- pdu = make_pdu(uri = uri, obj = obj)
- if handler is not None:
- self.handlers[id(pdu)] = handler
- pdu.tag = id(pdu)
- self.msgs[rid].append(pdu)
- if self.replace:
- self.uris[uri] = pdu
-
- def publish(self, cls, uri, obj, repository, handler = None):
- return self._add( uri, obj, repository, handler, cls.make_publish)
-
- def withdraw(self, cls, uri, obj, repository, handler = None):
- return self._add( uri, obj, repository, handler, cls.make_withdraw)
-
- def call_pubd(self, cb, eb):
- def loop(iterator, rid):
- logger.debug("Calling pubd[%r]", self.repositories[rid])
- self.repositories[rid].call_pubd(iterator, eb, self.msgs[rid], self.handlers)
- def done():
- self.clear()
- cb()
- rpki.async.iterator(self.repositories, loop, done)
-
- @property
- def size(self):
- return sum(len(self.msgs[rid]) for rid in self.repositories)
-
- def empty(self):
- assert (not self.msgs) == (self.size == 0)
- return not self.msgs
+ """
+ Utility to simplify publication from within rpkid.
+
+ General idea here is to accumulate a collection of objects to be
+ published, in one or more repositories, each potentially with its
+ own completion callback. Eventually we want to publish everything
+ we've accumulated, at which point we need to iterate over the
+ collection and do repository.call_pubd() for each repository.
+ """
+
+ # At present, ._inplay and .inplay() are debugging tools only. If
+ # there turns out to be a real race condition here, this might
+ # evolve into the hook for some kind of Condition()-based
+ # mechanism.
+
+ _inplay = weakref.WeakValueDictionary()
+
+ def __init__(self, rpkid):
+ self.rpkid = rpkid
+ self.clear()
+
+ def clear(self):
+ self.repositories = {}
+ self.msgs = {}
+ self.handlers = {}
+ self.uris = {}
+
+ def inplay(self, uri):
+ who = self._inplay.get(uri, self)
+ return who is not self and uri in who.uris
+
+ def queue(self, uri, repository, handler = None,
+ old_obj = None, new_obj = None, old_hash = None):
+
+ assert old_obj is not None or new_obj is not None or old_hash is not None
+ assert old_obj is None or old_hash is None
+ assert old_obj is None or isinstance(old_obj, rpki.x509.uri_dispatch(uri))
+ assert new_obj is None or isinstance(new_obj, rpki.x509.uri_dispatch(uri))
+
+ logger.debug("Queuing publication action: uri %s, old %r, new %r, hash %s",
+ uri, old_obj, new_obj, old_hash)
+
+ if self.inplay(uri):
+ logger.warning("%s is already in play", uri)
+
+ rid = repository.peer_contact_uri
+ if rid not in self.repositories:
+ self.repositories[rid] = repository
+ self.msgs[rid] = Element(rpki.publication.tag_msg, nsmap = rpki.publication.nsmap,
+ type = "query", version = rpki.publication.version)
+
+ if uri in self.uris:
+ logger.debug("Removing publication duplicate %r %s hash %s",
+ self.uris[uri], uri, self.uris[uri].get("hash"))
+ old_pdu = self.uris.pop(uri)
+ self.msgs[rid].remove(old_pdu)
+ pdu_hash = old_pdu.get("hash")
+ if pdu_hash is None and new_obj is None:
+ logger.debug("Withdrawing object %r which was never published simplifies to no-op",
+ old_pdu)
+ return
+ elif old_hash is not None:
+ logger.debug("Old hash supplied") # XXX Debug log
+ pdu_hash = old_hash
+ elif old_obj is None:
+ logger.debug("No old object present") # XXX Debug log
+ pdu_hash = None
+ else:
+ logger.debug("Calculating hash of old object") # XXX Debug log
+ pdu_hash = rpki.x509.sha256(old_obj.get_DER()).encode("hex")
+
+ logger.debug("uri %s old hash %s new hash %s", uri, pdu_hash, # XXX Debug log
+ None if new_obj is None else rpki.x509.sha256(new_obj.get_DER()).encode("hex"))
+
+ if new_obj is None:
+ pdu = SubElement(self.msgs[rid], rpki.publication.tag_withdraw, uri = uri, hash = pdu_hash)
+ else:
+ pdu = SubElement(self.msgs[rid], rpki.publication.tag_publish, uri = uri)
+ pdu.text = new_obj.get_Base64()
+ if pdu_hash is not None:
+ pdu.set("hash", pdu_hash)
+
+ if handler is not None:
+ self.handlers[uri] = handler
+
+ self.uris[uri] = pdu
+ self._inplay[uri] = self
+
+ @tornado.gen.coroutine
+ def call_pubd(self):
+ for rid in self.repositories:
+ logger.debug("Calling pubd[%r]", self.repositories[rid])
+ try:
+ yield self.repositories[rid].call_pubd(self.rpkid, self.msgs[rid], self.handlers)
+ except (rpki.exceptions.ExistingObjectAtURI,
+ rpki.exceptions.DifferentObjectAtURI,
+ rpki.exceptions.NoObjectAtURI) as e:
+ logger.warn("Lost synchronization with %r: %s", self.repositories[rid], e)
+ yield self.resync(self.repositories[rid])
+ for k in self.uris.iterkeys():
+ if self._inplay.get(k) is self:
+ del self._inplay[k]
+ self.clear()
+
+ @tornado.gen.coroutine
+ def resync(self, repository):
+ logger.info("Attempting resynchronization with %r", repository)
+
+ # A lot of this is copy and paste from .serve_publish_world_now().
+ # Refactor when we have more of a clue about how this should work.
+
+ q_msg = Element(rpki.publication.tag_msg, nsmap = rpki.publication.nsmap,
+ type = "query", version = rpki.publication.version)
+ SubElement(q_msg, rpki.publication.tag_list, tag = "list")
+ r_msg = yield repository.call_pubd(self.rpkid, q_msg, length_check = False)
+
+ if not all(r_pdu.tag == rpki.publication.tag_list for r_pdu in r_msg):
+ raise rpki.exceptions.BadPublicationReply("Unexpected XML tag in publication response")
+
+ pubd_objs = dict((r_pdu.get("uri"), r_pdu.get("hash")) for r_pdu in r_msg)
+
+ our_objs = []
+ for ca_detail in rpki.rpkidb.models.CADetail.objects.filter(
+ ca__parent__tenant = repository.tenant, state = "active"):
+ our_objs = [(ca_detail.crl_uri, ca_detail.latest_crl),
+ (ca_detail.manifest_uri, ca_detail.latest_manifest)]
+ our_objs.extend((c.uri, c.cert) for c in ca_detail.child_certs.all())
+ our_objs.extend((r.uri, r.roa) for r in ca_detail.roas.filter(roa__isnull = False))
+ our_objs.extend((g.uri, g.ghostbuster) for g in ca_detail.ghostbusters.all())
+ our_objs.extend((c.uri, c.cert) for c in ca_detail.ee_certificates.all())
+
+ q_msg = Element(rpki.publication.tag_msg, nsmap = rpki.publication.nsmap,
+ type = "query", version = rpki.publication.version)
+
+ for uri, obj in our_objs:
+ if uri not in pubd_objs:
+ SubElement(q_msg, rpki.publication.tag_publish, uri = uri).text = obj.get_Base64()
+ else:
+ h = pubd_objs.pop(uri)
+ if h != rpki.x509.sha256(obj.get_DER()).encode("hex"):
+ SubElement(q_msg, rpki.publication.tag_publish,
+ uri = uri, hash = h).text = obj.get_Base64()
+
+ for uri, h in pubd_objs.iteritems():
+ SubElement(q_msg, rpki.publication.tag_withdraw, uri = uri, hash = h)
+
+ yield repository.call_pubd(self.rpkid, q_msg)
+
+ @property
+ def size(self):
+ return sum(len(self.msgs[rid]) for rid in self.repositories)
+
+ def empty(self):
+ return not self.msgs
diff --git a/rpki/rpkid_tasks.py b/rpki/rpkid_tasks.py
index 58b4bcfe..ee4f90d3 100644
--- a/rpki/rpkid_tasks.py
+++ b/rpki/rpkid_tasks.py
@@ -22,9 +22,18 @@ because interactions with rpkid scheduler were getting too complicated.
"""
import logging
+import random
+
+import tornado.gen
+import tornado.web
+import tornado.locks
+import tornado.ioloop
+import tornado.httputil
+import tornado.httpclient
+import tornado.httpserver
+
import rpki.log
import rpki.rpkid
-import rpki.async
import rpki.up_down
import rpki.sundial
import rpki.publication
@@ -35,700 +44,634 @@ logger = logging.getLogger(__name__)
task_classes = ()
def queue_task(cls):
- """
- Class decorator to add a new task class to task_classes.
- """
-
- global task_classes
- task_classes += (cls,)
- return cls
-
-
-class CompletionHandler(object):
- """
- Track one or more scheduled rpkid tasks and execute a callback when
- the last of them terminates.
- """
-
- ## @var debug
- # Debug logging.
-
- debug = False
-
- def __init__(self, cb):
- self.cb = cb
- self.tasks = set()
-
- def register(self, task):
- if self.debug:
- logger.debug("Completion handler %r registering task %r", self, task)
- self.tasks.add(task)
- task.register_completion(self.done)
-
- def done(self, task):
- try:
- self.tasks.remove(task)
- except KeyError:
- logger.warning("Completion handler %r called with unregistered task %r, blundering onwards", self, task)
- else:
- if self.debug:
- logger.debug("Completion handler %r called with registered task %r", self, task)
- if not self.tasks:
- if self.debug:
- logger.debug("Completion handler %r finished, calling %r", self, self.cb)
- self.cb()
-
- @property
- def count(self):
- return len(self.tasks)
+ """
+ Class decorator to add a new task class to task_classes.
+ """
+
+ global task_classes # pylint: disable=W0603
+ task_classes += (cls,)
+ return cls
+
+
+class PostponeTask(Exception):
+ """
+ Exit a task without finishing it. We use this to signal that a
+ long-running task wants to yield to the task loop but hasn't yet
+ run to completion.
+ """
class AbstractTask(object):
- """
- Abstract base class for rpkid scheduler task objects. This just
- handles the scheduler hooks, real work starts in self.start.
-
- NB: This assumes that the rpki.rpkid.rpkid.task_* methods have been
- rewritten to expect instances of subclasses of this class, rather
- than expecting thunks to be wrapped up in the older version of this
- class. Rewrite, rewrite, remove this comment when done, OK!
- """
-
- ## @var timeslice
- # How long before a task really should consider yielding the CPU to
- # let something else run.
-
- timeslice = rpki.sundial.timedelta(seconds = 15)
-
- def __init__(self, s, description = None):
- self.self = s
- self.description = description
- self.completions = []
- self.continuation = None
- self.due_date = None
- self.clear()
-
- def __repr__(self):
- return rpki.log.log_repr(self, self.description)
-
- def register_completion(self, completion):
- self.completions.append(completion)
-
- def exit(self):
- self.self.gctx.sql.sweep()
- while self.completions:
- self.completions.pop(0)(self)
- self.clear()
- self.due_date = None
- self.self.gctx.task_next()
-
- def postpone(self, continuation):
- self.self.gctx.sql.sweep()
- self.continuation = continuation
- self.due_date = None
- self.self.gctx.task_add(self)
- self.self.gctx.task_next()
-
- def __call__(self):
- self.due_date = rpki.sundial.now() + self.timeslice
- if self.continuation is None:
- logger.debug("Running task %r", self)
- self.clear()
- self.start()
- else:
- logger.debug("Restarting task %r at %r", self, self.continuation)
- continuation = self.continuation
- self.continuation = None
- continuation()
-
- @property
- def overdue(self):
- return rpki.sundial.now() > self.due_date
-
- def __getattr__(self, name):
- return getattr(self.self, name)
-
- def start(self):
- raise NotImplementedError
-
- def clear(self):
- pass
+ """
+ Abstract base class for rpkid scheduler task objects.
+ """
+
+ ## @var timeslice
+ # How long before a task really should consider yielding the CPU
+ # to let something else run. Should this be something we can
+ # configure from rpki.conf?
+
+ #timeslice = rpki.sundial.timedelta(seconds = 15)
+ timeslice = rpki.sundial.timedelta(seconds = 120)
+
+ def __init__(self, rpkid, tenant, description = None):
+ self.rpkid = rpkid
+ self.tenant = tenant
+ self.description = description
+ self.done_this = None
+ self.done_next = None
+ self.due_date = None
+ self.started = False
+ self.postponed = False
+ self.clear()
+
+ def __repr__(self):
+ return rpki.log.log_repr(self, self.description)
+
+ @tornado.gen.coroutine
+ def start(self):
+ try:
+ logger.debug("%r: Starting", self)
+ self.due_date = rpki.sundial.now() + self.timeslice
+ self.clear()
+ self.started = True
+ self.postponed = False
+ yield self.main()
+ except PostponeTask:
+ self.postponed = True
+ except:
+ logger.exception("%r: Unhandled exception", self)
+ finally:
+ self.due_date = None
+ self.started = False
+ self.clear()
+ if self.postponed:
+ logger.debug("%r: Postponing", self)
+ self.rpkid.task_add(self)
+ else:
+ logger.debug("%r: Exiting", self)
+ if self.done_this is not None:
+ self.done_this.notify_all()
+ self.done_this = self.done_next
+ self.done_next = None
+
+ def wait(self):
+ done = "done_next" if self.started else "done_this"
+ condition = getattr(self, done)
+ if condition is None:
+ condition = tornado.locks.Condition()
+ setattr(self, done, condition)
+ future = condition.wait()
+ return future
+
+ def waiting(self):
+ return self.done_this is not None
+
+ @tornado.gen.coroutine
+ def overdue(self):
+ yield tornado.gen.moment
+ raise tornado.gen.Return(rpki.sundial.now() > self.due_date and
+ any(not task.postponed for task in self.rpkid.task_ready))
+
+ @tornado.gen.coroutine
+ def main(self):
+ raise NotImplementedError
+
+ def clear(self):
+ pass
@queue_task
class PollParentTask(AbstractTask):
- """
- Run the regular client poll cycle with each of this self's
- parents, in turn.
- """
-
- def clear(self):
- self.parent_iterator = None
- self.parent = None
- self.ca_map = None
- self.class_iterator = None
-
- def start(self):
- self.gctx.checkpoint()
- logger.debug("Self %s[%d] polling parents", self.self_handle, self.self_id)
- rpki.async.iterator(self.parents, self.parent_loop, self.exit)
-
- def parent_loop(self, parent_iterator, parent):
- self.parent_iterator = parent_iterator
- self.parent = parent
- rpki.up_down.list_pdu.query(parent, self.got_list, self.list_failed)
-
- def got_list(self, r_msg):
- self.ca_map = dict((ca.parent_resource_class, ca) for ca in self.parent.cas)
- self.gctx.checkpoint()
- rpki.async.iterator(r_msg.payload.classes, self.class_loop, self.class_done)
-
- def list_failed(self, e):
- logger.exception("Couldn't get resource class list from parent %r, skipping", self.parent)
- self.parent_iterator()
-
- def class_loop(self, class_iterator, rc):
- self.gctx.checkpoint()
- self.class_iterator = class_iterator
- try:
- ca = self.ca_map.pop(rc.class_name)
- except KeyError:
- rpki.rpkid.ca_obj.create(self.parent, rc, class_iterator, self.class_create_failed)
- else:
- ca.check_for_updates(self.parent, rc, class_iterator, self.class_update_failed)
-
- def class_update_failed(self, e):
- logger.exception("Couldn't update class, skipping")
- self.class_iterator()
-
- def class_create_failed(self, e):
- logger.exception("Couldn't create class, skipping")
- self.class_iterator()
-
- def class_done(self):
- rpki.async.iterator(self.ca_map.values(), self.ca_loop, self.ca_done)
-
- def ca_loop(self, iterator, ca):
- self.gctx.checkpoint()
- ca.delete(self.parent, iterator)
-
- def ca_done(self):
- self.gctx.checkpoint()
- self.gctx.sql.sweep()
- self.parent_iterator()
+ """
+ Run the regular client poll cycle with each of this tenant's
+ parents, in turn.
+ """
+
+ @tornado.gen.coroutine
+ def main(self):
+ logger.debug("%r: Polling parents", self)
+
+ for parent in rpki.rpkidb.models.Parent.objects.filter(tenant = self.tenant):
+ try:
+ logger.debug("%r: Executing list query", self)
+ list_r_msg = yield parent.up_down_list_query(rpkid = self.rpkid)
+ except:
+ logger.exception("%r: Couldn't get resource class list from %r, skipping", self, parent)
+ continue
+
+ logger.debug("%r: Parsing list response", self)
+
+ ca_map = dict((ca.parent_resource_class, ca) for ca in parent.cas.all())
+
+ for rc in list_r_msg.getiterator(rpki.up_down.tag_class):
+ try:
+ class_name = rc.get("class_name")
+ ca = ca_map.pop(class_name, None)
+ if ca is None:
+ yield self.create(parent = parent, rc = rc, class_name = class_name)
+ else:
+ yield self.update(parent = parent, rc = rc, class_name = class_name, ca = ca)
+ except:
+ logger.exception("Couldn't update resource class %r, skipping", class_name)
+
+ for class_name, ca in ca_map.iteritems():
+ logger.debug("%r: Destroying orphaned %r for resource class %r", self, ca, class_name)
+ yield ca.destroy(rpkid = self.rpkid, parent = parent)
+
+ @tornado.gen.coroutine
+ def create(self, parent, rc, class_name):
+ logger.debug("%r: Creating new CA for resource class %r", self, class_name)
+ ca = rpki.rpkidb.models.CA.objects.create(
+ parent = parent,
+ parent_resource_class = class_name,
+ sia_uri = parent.construct_sia_uri(rc))
+ ca_detail = ca.create_detail()
+ r_msg = yield parent.up_down_issue_query(rpkid = self.rpkid, ca = ca, ca_detail = ca_detail)
+ elt = r_msg.find(rpki.up_down.tag_class).find(rpki.up_down.tag_certificate)
+ uri = elt.get("cert_url")
+ cert = rpki.x509.X509(Base64 = elt.text)
+ logger.debug("%r: %r received certificate %s", self, ca, uri)
+ yield ca_detail.activate(rpkid = self.rpkid, ca = ca, cert = cert, uri = uri)
+
+ @tornado.gen.coroutine
+ def update(self, parent, rc, class_name, ca):
+
+ # pylint: disable=C0330
+
+ logger.debug("%r: Checking updates for %r", self, ca)
+
+ sia_uri = parent.construct_sia_uri(rc)
+ sia_uri_changed = ca.sia_uri != sia_uri
+
+ if sia_uri_changed:
+ logger.debug("SIA changed: was %s now %s", ca.sia_uri, sia_uri)
+ ca.sia_uri = sia_uri
+
+ rc_resources = rpki.resource_set.resource_bag(
+ asn = rc.get("resource_set_as"),
+ v4 = rc.get("resource_set_ipv4"),
+ v6 = rc.get("resource_set_ipv6"),
+ valid_until = rc.get("resource_set_notafter"))
+
+ cert_map = {}
+
+ for c in rc.getiterator(rpki.up_down.tag_certificate):
+ x = rpki.x509.X509(Base64 = c.text)
+ u = rpki.up_down.multi_uri(c.get("cert_url")).rsync()
+ cert_map[x.gSKI()] = (x, u)
+
+ ca_details = ca.ca_details.exclude(state = "revoked")
+
+ if not ca_details:
+ logger.warning("Existing resource class %s to %s from %s with no certificates, rekeying",
+ class_name, parent.tenant.tenant_handle, parent.parent_handle)
+ yield ca.rekey(rpkid = self.rpkid)
+ return
+
+ for ca_detail in ca_details:
+
+ rc_cert, rc_cert_uri = cert_map.pop(ca_detail.public_key.gSKI(), (None, None))
+
+ if rc_cert is None:
+ logger.warning("g(SKI) %s in resource class %s is in database but missing from list_response to %s from %s, "
+ "maybe parent certificate went away?",
+ ca_detail.public_key.gSKI(), class_name, parent.tenant.tenant_handle, parent.parent_handle)
+ publisher = rpki.rpkid.publication_queue(rpkid = self.rpkid)
+ ca_detail.destroy(publisher = publisher)
+ yield publisher.call_pubd()
+ continue
+
+ if ca_detail.state == "active" and ca_detail.ca_cert_uri != rc_cert_uri:
+ logger.debug("AIA changed: was %s now %s", ca_detail.ca_cert_uri, rc_cert_uri)
+ ca_detail.ca_cert_uri = rc_cert_uri
+ ca_detail.save()
+
+ if ca_detail.state not in ("pending", "active"):
+ continue
+
+ if ca_detail.state == "pending":
+ current_resources = rpki.resource_set.resource_bag()
+ else:
+ current_resources = ca_detail.latest_ca_cert.get_3779resources()
+
+ if (ca_detail.state == "pending" or
+ sia_uri_changed or
+ ca_detail.latest_ca_cert != rc_cert or
+ ca_detail.latest_ca_cert.getNotAfter() != rc_resources.valid_until or
+ current_resources.undersized(rc_resources) or
+ current_resources.oversized(rc_resources)):
+
+ yield ca_detail.update(
+ rpkid = self.rpkid,
+ parent = parent,
+ ca = ca,
+ rc = rc,
+ sia_uri_changed = sia_uri_changed,
+ old_resources = current_resources)
+
+ if cert_map:
+ logger.warning("Unknown certificate g(SKI)%s %s in resource class %s in list_response to %s from %s, maybe you want to \"revoke_forgotten\"?",
+ "" if len(cert_map) == 1 else "s", ", ".join(cert_map), class_name, parent.tenant.tenant_handle, parent.parent_handle)
@queue_task
class UpdateChildrenTask(AbstractTask):
- """
- Check for updated IRDB data for all of this self's children and
- issue new certs as necessary. Must handle changes both in
- resources and in expiration date.
- """
-
- def clear(self):
- self.now = None
- self.rsn = None
- self.publisher = None
- self.iterator = None
- self.child = None
- self.child_certs = None
-
- def start(self):
- self.gctx.checkpoint()
- logger.debug("Self %s[%d] updating children", self.self_handle, self.self_id)
- self.now = rpki.sundial.now()
- self.rsn = self.now + rpki.sundial.timedelta(seconds = self.regen_margin)
- self.publisher = rpki.rpkid.publication_queue()
- rpki.async.iterator(self.children, self.loop, self.done)
-
- def loop(self, iterator, child):
- self.gctx.checkpoint()
- self.gctx.sql.sweep()
- self.iterator = iterator
- self.child = child
- self.child_certs = child.child_certs
- if self.overdue:
- self.publisher.call_pubd(lambda: self.postpone(self.do_child), self.publication_failed)
- else:
- self.do_child()
-
- def do_child(self):
- if self.child_certs:
- self.gctx.irdb_query_child_resources(self.child.self.self_handle, self.child.child_handle,
- self.got_resources, self.lose)
- else:
- self.iterator()
-
- def lose(self, e):
- logger.exception("Couldn't update child %r, skipping", self.child)
- self.iterator()
-
- def got_resources(self, irdb_resources):
- try:
- for child_cert in self.child_certs:
- ca_detail = child_cert.ca_detail
- ca = ca_detail.ca
- if ca_detail.state == "active":
- old_resources = child_cert.cert.get_3779resources()
- new_resources = old_resources & irdb_resources & ca_detail.latest_ca_cert.get_3779resources()
- old_aia = child_cert.cert.get_AIA()[0]
- new_aia = ca_detail.ca_cert_uri
-
- if new_resources.empty():
- logger.debug("Resources shrank to the null set, revoking and withdrawing child %s certificate SKI %s",
- self.child.child_handle, child_cert.cert.gSKI())
- child_cert.revoke(publisher = self.publisher)
- ca_detail.generate_crl(publisher = self.publisher)
- ca_detail.generate_manifest(publisher = self.publisher)
-
- elif (old_resources != new_resources or
- old_aia != new_aia or
- (old_resources.valid_until < self.rsn and
- irdb_resources.valid_until > self.now and
- old_resources.valid_until != irdb_resources.valid_until)):
-
- logger.debug("Need to reissue child %s certificate SKI %s",
- self.child.child_handle, child_cert.cert.gSKI())
- if old_resources != new_resources:
- logger.debug("Child %s SKI %s resources changed: old %s new %s",
- self.child.child_handle, child_cert.cert.gSKI(),
- old_resources, new_resources)
- if old_resources.valid_until != irdb_resources.valid_until:
- logger.debug("Child %s SKI %s validity changed: old %s new %s",
- self.child.child_handle, child_cert.cert.gSKI(),
- old_resources.valid_until, irdb_resources.valid_until)
-
- new_resources.valid_until = irdb_resources.valid_until
- child_cert.reissue(
- ca_detail = ca_detail,
- resources = new_resources,
- publisher = self.publisher)
-
- elif old_resources.valid_until < self.now:
- logger.debug("Child %s certificate SKI %s has expired: cert.valid_until %s, irdb.valid_until %s",
- self.child.child_handle, child_cert.cert.gSKI(),
- old_resources.valid_until, irdb_resources.valid_until)
- child_cert.sql_delete()
- self.publisher.withdraw(
- cls = rpki.publication.certificate_elt,
- uri = child_cert.uri,
- obj = child_cert.cert,
- repository = ca.parent.repository)
- ca_detail.generate_manifest(publisher = self.publisher)
-
- except (SystemExit, rpki.async.ExitNow):
- raise
- except Exception, e:
- self.gctx.checkpoint()
- self.lose(e)
- else:
- self.gctx.checkpoint()
- self.gctx.sql.sweep()
- self.iterator()
-
- def done(self):
- self.gctx.checkpoint()
- self.gctx.sql.sweep()
- self.publisher.call_pubd(self.exit, self.publication_failed)
-
- def publication_failed(self, e):
- logger.exception("Couldn't publish for %s, skipping", self.self_handle)
- self.gctx.checkpoint()
- self.exit()
+ """
+ Check for updated IRDB data for all of this tenant's children and
+ issue new certs as necessary. Must handle changes both in
+ resources and in expiration date.
+ """
+
+ @tornado.gen.coroutine
+ def main(self):
+ logger.debug("%r: Updating children", self)
+ now = rpki.sundial.now()
+ rsn = now + rpki.sundial.timedelta(seconds = self.tenant.regen_margin)
+ publisher = rpki.rpkid.publication_queue(self.rpkid)
+ postponing = False
+
+ child_certs = rpki.rpkidb.models.ChildCert.objects.filter(child__tenant = self.tenant, ca_detail__state = "active")
+ child_handles = sorted(set(child_cert.child.child_handle for child_cert in child_certs))
+ irdb_resources = dict(zip(child_handles, (yield self.rpkid.irdb_query_children_resources(self.tenant.tenant_handle, child_handles))))
+
+ for child_cert in child_certs:
+ try:
+ ca_detail = child_cert.ca_detail
+ child_handle = child_cert.child.child_handle
+ old_resources = child_cert.cert.get_3779resources()
+ new_resources = old_resources & irdb_resources[child_handle] & ca_detail.latest_ca_cert.get_3779resources()
+ old_aia = child_cert.cert.get_AIA()[0]
+ new_aia = ca_detail.ca_cert_uri
+
+ assert child_cert.gski == child_cert.cert.gSKI()
+
+ if new_resources.empty():
+ logger.debug("Resources shrank to null set, revoking and withdrawing child %s g(SKI) %s",
+ child_handle, child_cert.gski)
+ child_cert.revoke(publisher = publisher)
+ ca_detail.generate_crl_and_manifest(publisher = publisher)
+
+ elif (old_resources != new_resources or old_aia != new_aia or
+ (old_resources.valid_until < rsn and
+ irdb_resources[child_handle].valid_until > now and
+ old_resources.valid_until != irdb_resources[child_handle].valid_until)):
+ logger.debug("Need to reissue child %s certificate g(SKI) %s", child_handle,
+ child_cert.gski)
+ if old_resources != new_resources:
+ logger.debug("Child %s g(SKI) %s resources changed: old %s new %s",
+ child_handle, child_cert.gski, old_resources, new_resources)
+ if old_resources.valid_until != irdb_resources[child_handle].valid_until:
+ logger.debug("Child %s g(SKI) %s validity changed: old %s new %s",
+ child_handle, child_cert.gski, old_resources.valid_until,
+ irdb_resources[child_handle].valid_until)
+
+ new_resources.valid_until = irdb_resources[child_handle].valid_until
+ child_cert.reissue(ca_detail = ca_detail, resources = new_resources, publisher = publisher)
+
+ elif old_resources.valid_until < now:
+ logger.debug("Child %s certificate g(SKI) %s has expired: cert.valid_until %s, irdb.valid_until %s",
+ child_handle, child_cert.gski, old_resources.valid_until,
+ irdb_resources[child_handle].valid_until)
+ child_cert.delete()
+ publisher.queue(uri = child_cert.uri,
+ old_obj = child_cert.cert,
+ repository = ca_detail.ca.parent.repository)
+ ca_detail.generate_crl_and_manifest(publisher = publisher)
+
+ except:
+ logger.exception("%r: Couldn't update %r, skipping", self, child_cert)
+
+ finally:
+ if (yield self.overdue()):
+ postponing = True
+ break
+ try:
+ yield publisher.call_pubd()
+ except:
+ logger.exception("%r: Couldn't publish, skipping", self)
-@queue_task
-class UpdateROAsTask(AbstractTask):
- """
- Generate or update ROAs for this self.
- """
-
- def clear(self):
- self.orphans = None
- self.updates = None
- self.publisher = None
- self.ca_details = None
- self.count = None
-
- def start(self):
- self.gctx.checkpoint()
- self.gctx.sql.sweep()
- logger.debug("Self %s[%d] updating ROAs", self.self_handle, self.self_id)
-
- logger.debug("Issuing query for ROA requests")
- self.gctx.irdb_query_roa_requests(self.self_handle, self.got_roa_requests, self.roa_requests_failed)
-
- def got_roa_requests(self, roa_requests):
- self.gctx.checkpoint()
- logger.debug("Received response to query for ROA requests")
-
- if self.gctx.sql.dirty:
- logger.warning("Unexpected dirty SQL cache, flushing")
- self.gctx.sql.sweep()
-
- roas = {}
- seen = set()
- self.orphans = []
- self.updates = []
- self.publisher = rpki.rpkid.publication_queue()
- self.ca_details = set()
-
- for roa in self.roas:
- k = (roa.asn, str(roa.ipv4), str(roa.ipv6))
- if k not in roas:
- roas[k] = roa
- elif (roa.roa is not None and roa.cert is not None and roa.ca_detail is not None and roa.ca_detail.state == "active" and
- (roas[k].roa is None or roas[k].cert is None or roas[k].ca_detail is None or roas[k].ca_detail.state != "active")):
- self.orphans.append(roas[k])
- roas[k] = roa
- else:
- self.orphans.append(roa)
-
- for roa_request in roa_requests:
- k = (roa_request.asn, str(roa_request.ipv4), str(roa_request.ipv6))
- if k in seen:
- logger.warning("Skipping duplicate ROA request %r", roa_request)
- else:
- seen.add(k)
- roa = roas.pop(k, None)
- if roa is None:
- roa = rpki.rpkid.roa_obj(self.gctx, self.self_id, roa_request.asn, roa_request.ipv4, roa_request.ipv6)
- logger.debug("Created new %r", roa)
- else:
- logger.debug("Found existing %r", roa)
- self.updates.append(roa)
-
- self.orphans.extend(roas.itervalues())
-
- if self.overdue:
- self.postpone(self.begin_loop)
- else:
- self.begin_loop()
-
- def begin_loop(self):
- self.count = 0
- rpki.async.iterator(self.updates, self.loop, self.done, pop_list = True)
-
- def loop(self, iterator, roa):
- self.gctx.checkpoint()
- try:
- roa.update(publisher = self.publisher, fast = True)
- self.ca_details.add(roa.ca_detail)
- self.gctx.sql.sweep()
- except (SystemExit, rpki.async.ExitNow):
- raise
- except rpki.exceptions.NoCoveringCertForROA:
- logger.warning("No covering certificate for %r, skipping", roa)
- except Exception:
- logger.exception("Could not update %r, skipping", roa)
- self.count += 1
- if self.overdue:
- self.publish(lambda: self.postpone(iterator))
- else:
- iterator()
-
- def publish(self, done):
- if not self.publisher.empty():
- for ca_detail in self.ca_details:
- logger.debug("Generating new CRL for %r", ca_detail)
- ca_detail.generate_crl(publisher = self.publisher)
- logger.debug("Generating new manifest for %r", ca_detail)
- ca_detail.generate_manifest(publisher = self.publisher)
- self.ca_details.clear()
- self.gctx.sql.sweep()
- self.gctx.checkpoint()
- self.publisher.call_pubd(done, self.publication_failed)
-
- def publication_failed(self, e):
- logger.exception("Couldn't publish for %s, skipping", self.self_handle)
- self.gctx.checkpoint()
- self.exit()
-
- def done(self):
- for roa in self.orphans:
- try:
- self.ca_details.add(roa.ca_detail)
- roa.revoke(publisher = self.publisher, fast = True)
- except (SystemExit, rpki.async.ExitNow):
- raise
- except Exception:
- logger.exception("Could not revoke %r", roa)
- self.gctx.sql.sweep()
- self.gctx.checkpoint()
- self.publish(self.exit)
-
- def roa_requests_failed(self, e):
- logger.exception("Could not fetch ROA requests for %s, skipping", self.self_handle)
- self.exit()
+ if postponing:
+ raise PostponeTask
@queue_task
-class UpdateGhostbustersTask(AbstractTask):
- """
- Generate or update Ghostbuster records for this self.
-
- This was originally based on the ROA update code. It's possible
- that both could benefit from refactoring, but at this point the
- potential scaling issues for ROAs completely dominate structure of
- the ROA code, and aren't relevant here unless someone is being
- exceptionally silly.
- """
-
- def start(self):
- self.gctx.checkpoint()
- logger.debug("Self %s[%d] updating Ghostbuster records",
- self.self_handle, self.self_id)
-
- self.gctx.irdb_query_ghostbuster_requests(self.self_handle,
- (p.parent_handle for p in self.parents),
- self.got_ghostbuster_requests,
- self.ghostbuster_requests_failed)
-
- def got_ghostbuster_requests(self, ghostbuster_requests):
-
- try:
- self.gctx.checkpoint()
- if self.gctx.sql.dirty:
- logger.warning("Unexpected dirty SQL cache, flushing")
- self.gctx.sql.sweep()
-
- ghostbusters = {}
- orphans = []
- publisher = rpki.rpkid.publication_queue()
- ca_details = set()
- seen = set()
-
- parents = dict((p.parent_handle, p) for p in self.parents)
-
- for ghostbuster in self.ghostbusters:
- k = (ghostbuster.ca_detail_id, ghostbuster.vcard)
- if ghostbuster.ca_detail.state != "active" or k in ghostbusters:
- orphans.append(ghostbuster)
- else:
- ghostbusters[k] = ghostbuster
-
- for ghostbuster_request in ghostbuster_requests:
- if ghostbuster_request.parent_handle not in parents:
- logger.warning("Unknown parent_handle %r in Ghostbuster request, skipping", ghostbuster_request.parent_handle)
- continue
- k = (ghostbuster_request.parent_handle, ghostbuster_request.vcard)
- if k in seen:
- logger.warning("Skipping duplicate Ghostbuster request %r", ghostbuster_request)
- continue
- seen.add(k)
- for ca in parents[ghostbuster_request.parent_handle].cas:
- ca_detail = ca.active_ca_detail
- if ca_detail is not None:
- ghostbuster = ghostbusters.pop((ca_detail.ca_detail_id, ghostbuster_request.vcard), None)
- if ghostbuster is None:
- ghostbuster = rpki.rpkid.ghostbuster_obj(self.gctx, self.self_id, ca_detail.ca_detail_id, ghostbuster_request.vcard)
- logger.debug("Created new %r for %r", ghostbuster, ghostbuster_request.parent_handle)
+class UpdateROAsTask(AbstractTask):
+ """
+ Generate or update ROAs for this tenant.
+ """
+
+ # XXX This might need rewriting to avoid race conditions.
+ #
+ # There's a theoretical race condition here if we're chugging away
+ # and something else needs to update the manifest or CRL, or if
+ # some back-end operation generates or destroys ROAs. The risk is
+ # fairly low given that we defer CRL and manifest generation until
+ # we're ready to publish, but it's theoretically present.
+
+ @tornado.gen.coroutine
+ def main(self):
+ logger.debug("%r: Updating ROAs", self)
+
+ try:
+ r_msg = yield self.rpkid.irdb_query_roa_requests(self.tenant.tenant_handle)
+ except:
+ logger.exception("Could not fetch ROA requests for %s, skipping", self.tenant.tenant_handle)
+ return
+
+ logger.debug("%r: Received response to query for ROA requests: %r", self, r_msg)
+
+ roas = {}
+ seen = set()
+ orphans = []
+ creates = []
+ updates = []
+ publisher = rpki.rpkid.publication_queue(self.rpkid)
+ ca_details = set()
+
+ for roa in self.tenant.roas.all():
+ k = "{!s} {!s} {!s}".format(roa.asn, roa.ipv4, roa.ipv6)
+ if k not in roas:
+ roas[k] = roa
+ elif roa.roa is not None and roa.cert is not None and roa.ca_detail is not None and roa.ca_detail.state == "active" and (roas[k].roa is None or roas[k].cert is None or roas[k].ca_detail is None or roas[k].ca_detail.state != "active"):
+ orphans.append(roas[k])
+ roas[k] = roa
+ else:
+ orphans.append(roa)
+
+ for r_pdu in r_msg:
+ k = "{!s} {!s} {!s}".format(r_pdu.get("asn"), r_pdu.get("ipv4"), r_pdu.get("ipv6"))
+ if k in seen:
+ logger.warning("%r: Skipping duplicate ROA request %r", self, r_pdu)
+ continue
+ seen.add(k)
+ roa = roas.pop(k, None)
+ if roa is None:
+ roa = rpki.rpkidb.models.ROA(tenant = self.tenant, asn = long(r_pdu.get("asn")), ipv4 = r_pdu.get("ipv4"), ipv6 = r_pdu.get("ipv6"))
+ logger.debug("%r: Try to create %r", self, roa)
+ creates.append(roa)
else:
- logger.debug("Found existing %r for %s", ghostbuster, ghostbuster_request.parent_handle)
- ghostbuster.update(publisher = publisher, fast = True)
- ca_details.add(ca_detail)
+ logger.debug("%r: Found existing %r", self, roa)
+ updates.append(roa)
+
+ orphans.extend(roas.itervalues())
+
+ roas = creates + updates
+
+ r_msg = seen = creates = updates = None
- orphans.extend(ghostbusters.itervalues())
- for ghostbuster in orphans:
- ca_details.add(ghostbuster.ca_detail)
- ghostbuster.revoke(publisher = publisher, fast = True)
+ postponing = False
- for ca_detail in ca_details:
- ca_detail.generate_crl(publisher = publisher)
- ca_detail.generate_manifest(publisher = publisher)
+ while roas and not postponing:
+ if (yield self.overdue()):
+ postponing = True
+ break
+ roa = roas.pop(0)
+ try:
+ roa.update(publisher = publisher)
+ ca_details.add(roa.ca_detail)
+ except rpki.exceptions.NoCoveringCertForROA:
+ logger.warning("%r: No covering certificate for %r, skipping", self, roa)
+ except:
+ logger.exception("%r: Could not update %r, skipping", self, roa)
- self.gctx.sql.sweep()
+ if not postponing:
+ for roa in orphans:
+ try:
+ ca_details.add(roa.ca_detail)
+ roa.revoke(publisher = publisher)
+ except:
+ logger.exception("%r: Could not revoke %r", self, roa)
- self.gctx.checkpoint()
- publisher.call_pubd(self.exit, self.publication_failed)
+ if not publisher.empty():
+ for ca_detail in ca_details:
+ logger.debug("%r: Generating new CRL and manifest for %r", self, ca_detail)
+ ca_detail.generate_crl_and_manifest(publisher = publisher)
+ yield publisher.call_pubd()
- except (SystemExit, rpki.async.ExitNow):
- raise
- except Exception:
- logger.exception("Could not update Ghostbuster records for %s, skipping", self.self_handle)
- self.exit()
+ if postponing:
+ raise PostponeTask
- def publication_failed(self, e):
- logger.exception("Couldn't publish Ghostbuster updates for %s, skipping", self.self_handle)
- self.gctx.checkpoint()
- self.exit()
- def ghostbuster_requests_failed(self, e):
- logger.exception("Could not fetch Ghostbuster record requests for %s, skipping", self.self_handle)
- self.exit()
+@queue_task
+class UpdateGhostbustersTask(AbstractTask):
+ """
+ Generate or update Ghostbuster records for this tenant.
+
+ This was originally based on the ROA update code. It's possible
+ that both could benefit from refactoring, but at this point the
+ potential scaling issues for ROAs completely dominate structure of
+ the ROA code, and aren't relevant here unless someone is being
+ exceptionally silly.
+ """
+
+ @tornado.gen.coroutine
+ def main(self):
+ logger.debug("%r: Updating Ghostbuster records", self)
+ parent_handles = set(p.parent_handle for p in rpki.rpkidb.models.Parent.objects.filter(tenant = self.tenant))
+
+ try:
+ r_msg = yield self.rpkid.irdb_query_ghostbuster_requests(self.tenant.tenant_handle, parent_handles)
+
+ ghostbusters = {}
+ orphans = []
+ publisher = rpki.rpkid.publication_queue(self.rpkid)
+ ca_details = set()
+ seen = set()
+
+ for ghostbuster in self.tenant.ghostbusters.all():
+ k = (ghostbuster.ca_detail.pk, ghostbuster.vcard)
+ if ghostbuster.ca_detail.state != "active" or k in ghostbusters:
+ orphans.append(ghostbuster)
+ else:
+ ghostbusters[k] = ghostbuster
+
+ for r_pdu in r_msg:
+ if not rpki.rpkidb.models.Parent.objects.filter(tenant = self.tenant, parent_handle = r_pdu.get("parent_handle")).exists():
+ logger.warning("%r: Unknown parent_handle %r in Ghostbuster request, skipping", self, r_pdu.get("parent_handle"))
+ continue
+ k = (r_pdu.get("parent_handle"), r_pdu.text)
+ if k in seen:
+ logger.warning("%r: Skipping duplicate Ghostbuster request %r", self, r_pdu)
+ continue
+ seen.add(k)
+ for ca_detail in rpki.rpkidb.models.CADetail.objects.filter(ca__parent__parent_handle = r_pdu.get("parent_handle"),
+ ca__parent__tenant = self.tenant,
+ state = "active"):
+ ghostbuster = ghostbusters.pop((ca_detail.pk, r_pdu.text), None)
+ if ghostbuster is None:
+ ghostbuster = rpki.rpkidb.models.Ghostbuster(tenant = self.tenant, ca_detail = ca_detail, vcard = r_pdu.text)
+ logger.debug("%r: Created new %r for %r", self, ghostbuster, r_pdu.get("parent_handle"))
+ else:
+ logger.debug("%r: Found existing %r for %r", self, ghostbuster, r_pdu.get("parent_handle"))
+ ghostbuster.update(publisher = publisher)
+ ca_details.add(ca_detail)
+
+ orphans.extend(ghostbusters.itervalues())
+ for ghostbuster in orphans:
+ ca_details.add(ghostbuster.ca_detail)
+ ghostbuster.revoke(publisher = publisher)
+
+ for ca_detail in ca_details:
+ ca_detail.generate_crl_and_manifest(publisher = publisher)
+
+ yield publisher.call_pubd()
+
+ except:
+ logger.exception("Could not update Ghostbuster records for %s, skipping", self.tenant.tenant_handle)
@queue_task
class UpdateEECertificatesTask(AbstractTask):
- """
- Generate or update EE certificates for this self.
-
- Not yet sure what kind of scaling constraints this task might have,
- so keeping it simple for initial version, we can optimize later.
- """
-
- def start(self):
- self.gctx.checkpoint()
- logger.debug("Self %s[%d] updating EE certificates", self.self_handle, self.self_id)
-
- self.gctx.irdb_query_ee_certificate_requests(self.self_handle,
- self.got_requests,
- self.get_requests_failed)
-
- def got_requests(self, requests):
-
- try:
- self.gctx.checkpoint()
- if self.gctx.sql.dirty:
- logger.warning("Unexpected dirty SQL cache, flushing")
- self.gctx.sql.sweep()
-
- publisher = rpki.rpkid.publication_queue()
-
- existing = dict()
- for ee in self.ee_certificates:
- gski = ee.gski
- if gski not in existing:
- existing[gski] = set()
- existing[gski].add(ee)
-
- ca_details = set()
-
- for req in requests:
- ees = existing.pop(req.gski, ())
- resources = rpki.resource_set.resource_bag(
- asn = req.asn,
- v4 = req.ipv4,
- v6 = req.ipv6,
- valid_until = req.valid_until)
- covering = self.find_covering_ca_details(resources)
- ca_details.update(covering)
-
- for ee in ees:
- if ee.ca_detail in covering:
- logger.debug("Updating existing EE certificate for %s %s",
- req.gski, resources)
- ee.reissue(
- resources = resources,
- publisher = publisher)
- covering.remove(ee.ca_detail)
- else:
- logger.debug("Existing EE certificate for %s %s is no longer covered",
- req.gski, resources)
- ee.revoke(publisher = publisher)
-
- for ca_detail in covering:
- logger.debug("No existing EE certificate for %s %s",
- req.gski, resources)
- rpki.rpkid.ee_cert_obj.create(
- ca_detail = ca_detail,
- subject_name = rpki.x509.X501DN.from_cn(req.cn, req.sn),
- subject_key = req.pkcs10.getPublicKey(),
- resources = resources,
- publisher = publisher,
- eku = req.eku or None)
-
- # Anything left is an orphan
- for ees in existing.values():
- for ee in ees:
- ca_details.add(ee.ca_detail)
- ee.revoke(publisher = publisher)
-
- self.gctx.sql.sweep()
-
- for ca_detail in ca_details:
- ca_detail.generate_crl(publisher = publisher)
- ca_detail.generate_manifest(publisher = publisher)
-
- self.gctx.sql.sweep()
-
- self.gctx.checkpoint()
- publisher.call_pubd(self.exit, self.publication_failed)
-
- except (SystemExit, rpki.async.ExitNow):
- raise
- except Exception:
- logger.exception("Could not update EE certificates for %s, skipping", self.self_handle)
- self.exit()
-
- def publication_failed(self, e):
- logger.exception("Couldn't publish EE certificate updates for %s, skipping", self.self_handle)
- self.gctx.checkpoint()
- self.exit()
-
- def get_requests_failed(self, e):
- logger.exception("Could not fetch EE certificate requests for %s, skipping", self.self_handle)
- self.exit()
+ """
+ Generate or update EE certificates for this tenant.
+
+ Not yet sure what kind of scaling constraints this task might have,
+ so keeping it simple for initial version, we can optimize later.
+ """
+
+ @tornado.gen.coroutine
+ def main(self):
+ logger.debug("%r: Updating EE certificates", self)
+
+ try:
+ r_msg = yield self.rpkid.irdb_query_ee_certificate_requests(self.tenant.tenant_handle)
+
+ publisher = rpki.rpkid.publication_queue(self.rpkid)
+
+ logger.debug("%r: Examining EE certificate requests", self)
+
+ existing = dict()
+ for ee in self.tenant.ee_certificates.all():
+ gski = ee.gski
+ if gski not in existing:
+ existing[gski] = set()
+ existing[gski].add(ee)
+
+ ca_details = set()
+
+ for r_pdu in r_msg:
+ gski = r_pdu.get("gski")
+ ees = existing.pop(gski, ())
+
+ resources = rpki.resource_set.resource_bag(
+ asn = r_pdu.get("asn"),
+ v4 = r_pdu.get("ipv4"),
+ v6 = r_pdu.get("ipv6"),
+ valid_until = r_pdu.get("valid_until"))
+ covering = self.tenant.find_covering_ca_details(resources)
+ ca_details.update(covering)
+
+ for ee in ees:
+ if ee.ca_detail in covering:
+ logger.debug("%r: Updating %r for %s %s", self, ee, gski, resources)
+ ee.reissue(resources = resources, publisher = publisher)
+ covering.remove(ee.ca_detail)
+ else:
+ # This probably never happens, as the most likely cause would be a CA certificate
+ # being revoked, which should trigger automatic clean up of issued certificates.
+ logger.debug("%r: %r for %s %s is no longer covered", self, ee, gski, resources)
+ ca_details.add(ee.ca_detail)
+ ee.revoke(publisher = publisher)
+
+ subject_name = rpki.x509.X501DN.from_cn(r_pdu.get("cn"), r_pdu.get("sn"))
+ subject_key = rpki.x509.PKCS10(Base64 = r_pdu[0].text).getPublicKey()
+
+ for ca_detail in covering:
+ logger.debug("%r: No existing EE certificate for %s %s", self, gski, resources)
+ cn, sn = subject_name.extract_cn_and_sn()
+ cert = ca_detail.issue_ee(
+ ca = ca_detail.ca,
+ subject_key = subject_key,
+ sia = None,
+ resources = resources,
+ notAfter = resources.valid_until,
+ cn = cn,
+ sn = sn,
+ eku = r_pdu.get("eku", "").split(",") or None)
+ ee = rpki.rpkidb.models.EECertificate.objects.create(
+ tenant = ca_detail.ca.parent.tenant,
+ ca_detail = ca_detail,
+ cert = cert,
+ gski = subject_key.gSKI())
+ publisher.queue(
+ uri = ee.uri,
+ new_obj = cert,
+ repository = ca_detail.ca.parent.repository,
+ handler = ee.published_callback)
+
+ # Anything left is an orphan
+ for ees in existing.values():
+ for ee in ees:
+ ca_details.add(ee.ca_detail)
+ ee.revoke(publisher = publisher)
+
+ for ca_detail in ca_details:
+ ca_detail.generate_crl_and_manifest(publisher = publisher)
+
+ yield publisher.call_pubd()
+
+ except:
+ logger.exception("%r: Could not update EE certificates, skipping", self)
@queue_task
class RegenerateCRLsAndManifestsTask(AbstractTask):
- """
- Generate new CRLs and manifests as necessary for all of this self's
- CAs. Extracting nextUpdate from a manifest is hard at the moment
- due to implementation silliness, so for now we generate a new
- manifest whenever we generate a new CRL
-
- This code also cleans up tombstones left behind by revoked ca_detail
- objects, since we're walking through the relevant portions of the
- database anyway.
- """
-
- def start(self):
- self.gctx.checkpoint()
- logger.debug("Self %s[%d] regenerating CRLs and manifests",
- self.self_handle, self.self_id)
-
- now = rpki.sundial.now()
- crl_interval = rpki.sundial.timedelta(seconds = self.crl_interval)
- regen_margin = max(self.gctx.cron_period * 2, crl_interval / 4)
- publisher = rpki.rpkid.publication_queue()
-
- for parent in self.parents:
- for ca in parent.cas:
+ """
+ Generate new CRLs and manifests as necessary for all of this tenant's
+ CAs. Extracting nextUpdate from a manifest is hard at the moment
+ due to implementation silliness, so for now we generate a new
+ manifest whenever we generate a new CRL
+
+ This code also cleans up tombstones left behind by revoked ca_detail
+ objects, since we're walking through the relevant portions of the
+ database anyway.
+ """
+
+ @tornado.gen.coroutine
+ def main(self):
+ logger.debug("%r: Regenerating CRLs and manifests", self)
+
try:
- for ca_detail in ca.revoked_ca_details:
- if now > ca_detail.latest_crl.getNextUpdate():
- ca_detail.delete(ca = ca, publisher = publisher)
- for ca_detail in ca.active_or_deprecated_ca_details:
- if now + regen_margin > ca_detail.latest_crl.getNextUpdate():
- ca_detail.generate_crl(publisher = publisher)
- ca_detail.generate_manifest(publisher = publisher)
- except (SystemExit, rpki.async.ExitNow):
- raise
- except Exception:
- logger.exception("Couldn't regenerate CRLs and manifests for CA %r, skipping", ca)
-
- self.gctx.checkpoint()
- self.gctx.sql.sweep()
- publisher.call_pubd(self.exit, self.lose)
-
- def lose(self, e):
- logger.exception("Couldn't publish updated CRLs and manifests for self %r, skipping", self.self_handle)
- self.gctx.checkpoint()
- self.exit()
+ publisher = rpki.rpkid.publication_queue(self.rpkid)
+ now = rpki.sundial.now()
+
+ ca_details = rpki.rpkidb.models.CADetail.objects.filter(ca__parent__tenant = self.tenant,
+ next_crl_manifest_update__isnull = False)
+
+ for ca_detail in ca_details.filter(next_crl_manifest_update__lt = now,
+ state = "revoked"):
+ ca_detail.destroy(publisher = publisher)
+
+ for ca_detail in ca_details.filter(state__in = ("active", "deprecated"),
+ next_crl_manifest_update__lt = now + max(
+ rpki.sundial.timedelta(seconds = self.tenant.crl_interval) / 4,
+ rpki.sundial.timedelta(seconds = self.rpkid.cron_period ) * 2)):
+ ca_detail.generate_crl_and_manifest(publisher = publisher)
+
+ yield publisher.call_pubd()
+
+ except:
+ logger.exception("%r: Couldn't publish updated CRLs and manifests, skipping", self)
@queue_task
class CheckFailedPublication(AbstractTask):
- """
- Periodic check for objects we tried to publish but failed (eg, due
- to pubd being down or unreachable).
- """
-
- def start(self):
- publisher = rpki.rpkid.publication_queue()
- for parent in self.parents:
- for ca in parent.cas:
- ca_detail = ca.active_ca_detail
- if ca_detail is not None:
- ca_detail.check_failed_publication(publisher)
- self.gctx.checkpoint()
- self.gctx.sql.sweep()
- publisher.call_pubd(self.exit, self.publication_failed)
-
- def publication_failed(self, e):
- logger.exception("Couldn't publish for %s, skipping", self.self_handle)
- self.gctx.checkpoint()
- self.exit()
+ """
+ Periodic check for objects we tried to publish but failed (eg, due
+ to pubd being down or unreachable).
+ """
+
+ @tornado.gen.coroutine
+ def main(self):
+ logger.debug("%r: Checking for failed publication actions", self)
+
+ try:
+ publisher = rpki.rpkid.publication_queue(self.rpkid)
+ for ca_detail in rpki.rpkidb.models.CADetail.objects.filter(ca__parent__tenant = self.tenant, state = "active"):
+ ca_detail.check_failed_publication(publisher)
+ yield publisher.call_pubd()
+
+ except:
+ logger.exception("%r: Couldn't run failed publications, skipping", self)
diff --git a/rpki/rpkidb/__init__.py b/rpki/rpkidb/__init__.py
new file mode 100644
index 00000000..7764913c
--- /dev/null
+++ b/rpki/rpkidb/__init__.py
@@ -0,0 +1,3 @@
+# $Id$
+#
+# Placeholder for rpkidb Django models not yet written.
diff --git a/rpki/rpkidb/migrations/0001_initial.py b/rpki/rpkidb/migrations/0001_initial.py
new file mode 100644
index 00000000..274775e3
--- /dev/null
+++ b/rpki/rpkidb/migrations/0001_initial.py
@@ -0,0 +1,222 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+import rpki.fields
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='BSC',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('bsc_handle', models.SlugField(max_length=255)),
+ ('private_key_id', rpki.fields.RSAPrivateKeyField()),
+ ('pkcs10_request', rpki.fields.PKCS10Field()),
+ ('hash_alg', rpki.fields.EnumField(default='sha256', choices=[(1, 'sha256')])),
+ ('signing_cert', rpki.fields.CertificateField(null=True)),
+ ('signing_cert_crl', rpki.fields.CRLField(null=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='CA',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('last_crl_manifest_number', models.BigIntegerField(default=1)),
+ ('last_issued_sn', models.BigIntegerField(default=1)),
+ ('sia_uri', models.TextField(null=True)),
+ ('parent_resource_class', models.TextField(null=True)),
+ ],
+ ),
+ migrations.CreateModel(
+ name='CADetail',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('public_key', rpki.fields.PublicKeyField(null=True)),
+ ('private_key_id', rpki.fields.RSAPrivateKeyField(null=True)),
+ ('latest_crl', rpki.fields.CRLField(null=True)),
+ ('crl_published', rpki.fields.SundialField(null=True)),
+ ('latest_ca_cert', rpki.fields.CertificateField(null=True)),
+ ('manifest_private_key_id', rpki.fields.RSAPrivateKeyField(null=True)),
+ ('manifest_public_key', rpki.fields.PublicKeyField(null=True)),
+ ('latest_manifest', rpki.fields.ManifestField(null=True)),
+ ('manifest_published', rpki.fields.SundialField(null=True)),
+ ('next_crl_manifest_update', rpki.fields.SundialField(null=True)),
+ ('state', rpki.fields.EnumField(choices=[(1, 'pending'), (2, 'active'), (3, 'deprecated'), (4, 'revoked')])),
+ ('ca_cert_uri', models.TextField(null=True)),
+ ('ca', models.ForeignKey(related_name='ca_details', to='rpkidb.CA')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Child',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('child_handle', models.SlugField(max_length=255)),
+ ('bpki_cert', rpki.fields.CertificateField(null=True)),
+ ('bpki_glue', rpki.fields.CertificateField(null=True)),
+ ('last_cms_timestamp', rpki.fields.SundialField(null=True)),
+ ('bsc', models.ForeignKey(related_name='children', to='rpkidb.BSC')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='ChildCert',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('cert', rpki.fields.CertificateField()),
+ ('published', rpki.fields.SundialField(null=True)),
+ ('gski', models.CharField(max_length=27)),
+ ('ca_detail', models.ForeignKey(related_name='child_certs', to='rpkidb.CADetail')),
+ ('child', models.ForeignKey(related_name='child_certs', to='rpkidb.Child')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='EECertificate',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('gski', models.CharField(max_length=27)),
+ ('cert', rpki.fields.CertificateField()),
+ ('published', rpki.fields.SundialField(null=True)),
+ ('ca_detail', models.ForeignKey(related_name='ee_certificates', to='rpkidb.CADetail')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Ghostbuster',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('vcard', models.TextField()),
+ ('cert', rpki.fields.CertificateField()),
+ ('ghostbuster', rpki.fields.GhostbusterField()),
+ ('published', rpki.fields.SundialField(null=True)),
+ ('ca_detail', models.ForeignKey(related_name='ghostbusters', to='rpkidb.CADetail')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Parent',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('parent_handle', models.SlugField(max_length=255)),
+ ('bpki_cert', rpki.fields.CertificateField(null=True)),
+ ('bpki_glue', rpki.fields.CertificateField(null=True)),
+ ('peer_contact_uri', models.TextField(null=True)),
+ ('sia_base', models.TextField(null=True)),
+ ('sender_name', models.TextField(null=True)),
+ ('recipient_name', models.TextField(null=True)),
+ ('last_cms_timestamp', rpki.fields.SundialField(null=True)),
+ ('bsc', models.ForeignKey(related_name='parents', to='rpkidb.BSC')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Repository',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('repository_handle', models.SlugField(max_length=255)),
+ ('peer_contact_uri', models.TextField(null=True)),
+ ('rrdp_notification_uri', models.TextField(null=True)),
+ ('bpki_cert', rpki.fields.CertificateField(null=True)),
+ ('bpki_glue', rpki.fields.CertificateField(null=True)),
+ ('last_cms_timestamp', rpki.fields.SundialField(null=True)),
+ ('bsc', models.ForeignKey(related_name='repositories', to='rpkidb.BSC')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='RevokedCert',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('serial', models.BigIntegerField()),
+ ('revoked', rpki.fields.SundialField()),
+ ('expires', rpki.fields.SundialField()),
+ ('ca_detail', models.ForeignKey(related_name='revoked_certs', to='rpkidb.CADetail')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='ROA',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('asn', models.BigIntegerField()),
+ ('ipv4', models.TextField(null=True)),
+ ('ipv6', models.TextField(null=True)),
+ ('cert', rpki.fields.CertificateField()),
+ ('roa', rpki.fields.ROAField()),
+ ('published', rpki.fields.SundialField(null=True)),
+ ('ca_detail', models.ForeignKey(related_name='roas', to='rpkidb.CADetail')),
+ ],
+ ),
+ migrations.CreateModel(
+ name='Tenant',
+ fields=[
+ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
+ ('tenant_handle', models.SlugField(max_length=255)),
+ ('use_hsm', models.BooleanField(default=False)),
+ ('crl_interval', models.BigIntegerField(null=True)),
+ ('regen_margin', models.BigIntegerField(null=True)),
+ ('bpki_cert', rpki.fields.CertificateField(null=True)),
+ ('bpki_glue', rpki.fields.CertificateField(null=True)),
+ ],
+ ),
+ migrations.AddField(
+ model_name='roa',
+ name='tenant',
+ field=models.ForeignKey(related_name='roas', to='rpkidb.Tenant'),
+ ),
+ migrations.AddField(
+ model_name='repository',
+ name='tenant',
+ field=models.ForeignKey(related_name='repositories', to='rpkidb.Tenant'),
+ ),
+ migrations.AddField(
+ model_name='parent',
+ name='repository',
+ field=models.ForeignKey(related_name='parents', to='rpkidb.Repository'),
+ ),
+ migrations.AddField(
+ model_name='parent',
+ name='tenant',
+ field=models.ForeignKey(related_name='parents', to='rpkidb.Tenant'),
+ ),
+ migrations.AddField(
+ model_name='ghostbuster',
+ name='tenant',
+ field=models.ForeignKey(related_name='ghostbusters', to='rpkidb.Tenant'),
+ ),
+ migrations.AddField(
+ model_name='eecertificate',
+ name='tenant',
+ field=models.ForeignKey(related_name='ee_certificates', to='rpkidb.Tenant'),
+ ),
+ migrations.AddField(
+ model_name='child',
+ name='tenant',
+ field=models.ForeignKey(related_name='children', to='rpkidb.Tenant'),
+ ),
+ migrations.AddField(
+ model_name='ca',
+ name='parent',
+ field=models.ForeignKey(related_name='cas', to='rpkidb.Parent'),
+ ),
+ migrations.AddField(
+ model_name='bsc',
+ name='tenant',
+ field=models.ForeignKey(related_name='bscs', to='rpkidb.Tenant'),
+ ),
+ migrations.AlterUniqueTogether(
+ name='repository',
+ unique_together=set([('tenant', 'repository_handle')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='parent',
+ unique_together=set([('tenant', 'parent_handle')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='child',
+ unique_together=set([('tenant', 'child_handle')]),
+ ),
+ migrations.AlterUniqueTogether(
+ name='bsc',
+ unique_together=set([('tenant', 'bsc_handle')]),
+ ),
+ ]
diff --git a/rpki/rpkidb/migrations/0002_root.py b/rpki/rpkidb/migrations/0002_root.py
new file mode 100644
index 00000000..de2b95dd
--- /dev/null
+++ b/rpki/rpkidb/migrations/0002_root.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('rpkidb', '0001_initial'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='parent',
+ name='root_asn_resources',
+ field=models.TextField(default=''),
+ ),
+ migrations.AddField(
+ model_name='parent',
+ name='root_ipv4_resources',
+ field=models.TextField(default=''),
+ ),
+ migrations.AddField(
+ model_name='parent',
+ name='root_ipv6_resources',
+ field=models.TextField(default=''),
+ ),
+ ]
diff --git a/rpki/rpkidb/migrations/__init__.py b/rpki/rpkidb/migrations/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rpki/rpkidb/migrations/__init__.py
diff --git a/rpki/rpkidb/models.py b/rpki/rpkidb/models.py
new file mode 100644
index 00000000..3021a0d4
--- /dev/null
+++ b/rpki/rpkidb/models.py
@@ -0,0 +1,2466 @@
+"""
+Django ORM models for rpkid.
+"""
+
+from __future__ import unicode_literals
+
+import logging
+
+import tornado.gen
+import tornado.web
+import tornado.ioloop
+import tornado.httputil
+import tornado.httpclient
+import tornado.httpserver
+
+from django.db import models
+
+import rpki.left_right
+import rpki.sundial
+
+from rpki.fields import (EnumField, SundialField,
+ CertificateField, RSAPrivateKeyField,
+ PublicKeyField, CRLField, PKCS10Field,
+ ManifestField, ROAField, GhostbusterField)
+
+from lxml.etree import Element, SubElement, tostring as ElementToString
+
+logger = logging.getLogger(__name__)
+
+# pylint: disable=W5101
+
+
+# XXX Temporary hack to help trace call chains so we can clear some of
+# the historical clutter out of this module.
+
+def trace_call_chain():
+ if False:
+ from traceback import extract_stack
+ caller, callee = extract_stack(None, 3)[:2]
+ caller_file, caller_line, caller_name = caller[:3]
+ callee_file, callee_line, callee_name = callee[:3]
+ logger.debug("<Call trace> %s() at %s:%s called by %s() at %s:%s",
+ callee_name, callee_file, callee_line,
+ caller_name, caller_file, caller_line)
+
+
+# The objects available via the left-right protocol allow NULL values
+# in places we wouldn't otherwise (eg, bpki_cert fields), to support
+# existing protocol which allows back-end to build up objects
+# gradually. We may want to rethink this eventually, but that yak can
+# wait for its shave, particularly since disallowing null should be a
+# very simple change given migrations.
+
+class XMLTemplate(object):
+ """
+ Encapsulate all the voodoo for transcoding between lxml and ORM.
+ """
+
+ # Whether to drop XMl into the log
+
+ debug = False
+
+ # Type map to simplify declaration of Base64 sub-elements.
+
+ element_type = dict(bpki_cert = rpki.x509.X509,
+ bpki_glue = rpki.x509.X509,
+ rpki_root_cert = rpki.x509.X509,
+ pkcs10_request = rpki.x509.PKCS10,
+ signing_cert = rpki.x509.X509,
+ signing_cert_crl = rpki.x509.CRL)
+
+
+ def __init__(self, name, attributes = (), booleans = (), elements = (), readonly = (), handles = ()):
+ self.name = name
+ self.handles = handles
+ self.attributes = attributes
+ self.booleans = booleans
+ self.elements = elements
+ self.readonly = readonly
+
+
+ def encode(self, obj, q_pdu, r_msg):
+ """
+ Encode an ORM object as XML.
+ """
+
+ r_pdu = SubElement(r_msg, rpki.left_right.xmlns + self.name, nsmap = rpki.left_right.nsmap, action = q_pdu.get("action"))
+ if self.name != "tenant":
+ r_pdu.set("tenant_handle", obj.tenant.tenant_handle)
+ r_pdu.set(self.name + "_handle", getattr(obj, self.name + "_handle"))
+ if q_pdu.get("tag"):
+ r_pdu.set("tag", q_pdu.get("tag"))
+ for h in self.handles:
+ k = h.xml_template.name
+ v = getattr(obj, k)
+ if v is not None:
+ r_pdu.set(k + "_handle", getattr(v, k + "_handle"))
+ for k in self.attributes:
+ v = getattr(obj, k)
+ if v is not None:
+ r_pdu.set(k, str(v))
+ for k in self.booleans:
+ if getattr(obj, k):
+ r_pdu.set(k, "yes")
+ for k in self.elements + self.readonly:
+ v = getattr(obj, k)
+ if v is not None and not v.empty():
+ SubElement(r_pdu, rpki.left_right.xmlns + k).text = v.get_Base64()
+ if self.debug:
+ logger.debug("XMLTemplate.encode(): %s", ElementToString(r_pdu))
+
+
+ def acknowledge(self, obj, q_pdu, r_msg):
+ """
+ Add an acknowledgement PDU in response to a create, set, or
+ destroy action.
+ """
+
+ assert q_pdu.tag == rpki.left_right.xmlns + self.name
+ action = q_pdu.get("action")
+ r_pdu = SubElement(r_msg, rpki.left_right.xmlns + self.name, nsmap = rpki.left_right.nsmap, action = action)
+ if self.name != "tenant":
+ r_pdu.set("tenant_handle", obj.tenant.tenant_handle)
+ r_pdu.set(self.name + "_handle", getattr(obj, self.name + "_handle"))
+ if q_pdu.get("tag"):
+ r_pdu.set("tag", q_pdu.get("tag"))
+ if action != "destroy":
+ for k in self.readonly:
+ v = getattr(obj, k)
+ if v is not None and not v.empty():
+ SubElement(r_pdu, rpki.left_right.xmlns + k).text = v.get_Base64()
+ if self.debug:
+ logger.debug("XMLTemplate.acknowledge(): %s", ElementToString(r_pdu))
+
+
+ def decode(self, obj, q_pdu):
+ """
+ Decode XML into an ORM object.
+ """
+
+ if self.debug:
+ logger.debug("XMLTemplate.decode(): %r %s", obj, ElementToString(q_pdu))
+ assert q_pdu.tag == rpki.left_right.xmlns + self.name
+ for h in self.handles:
+ k = h.xml_template.name
+ v = q_pdu.get(k + "_handle")
+ if v is not None:
+ setattr(obj, k, h.objects.get(**{k + "_handle" : v, "tenant" : obj.tenant}))
+ for k in self.attributes:
+ v = q_pdu.get(k)
+ if v is not None:
+ v.encode("ascii")
+ if v.isdigit():
+ v = long(v)
+ setattr(obj, k, v)
+ for k in self.booleans:
+ v = q_pdu.get(k)
+ if v is not None:
+ setattr(obj, k, v == "yes")
+ for k in self.elements:
+ v = q_pdu.findtext(rpki.left_right.xmlns + k)
+ if v and v.strip():
+ setattr(obj, k, self.element_type[k](Base64 = v))
+
+
+class XMLManager(models.Manager):
+ """
+ Add a few methods which locate or create an object or objects
+ corresponding to the handles in an XML element, as appropriate.
+
+ This assumes that models which use it have an "xml_template"
+ class attribute holding an XMLTemplate object (above).
+ """
+
+ # Whether to blather about what we're doing
+
+ debug = False
+
+ # pylint: disable=E1101
+
+ def xml_get_or_create(self, xml):
+ name = self.model.xml_template.name
+ action = xml.get("action")
+ assert xml.tag == rpki.left_right.xmlns + name and action in ("create", "set")
+ d = { name + "_handle" : xml.get(name + "_handle") }
+ if name != "tenant" and action != "create":
+ d["tenant__tenant_handle"] = xml.get("tenant_handle")
+ if self.debug:
+ logger.debug("XMLManager.xml_get_or_create(): name %s action %s filter %r", name, action, d)
+ result = self.model(**d) if action == "create" else self.get(**d)
+ if name != "tenant" and action == "create":
+ result.tenant = Tenant.objects.get(tenant_handle = xml.get("tenant_handle"))
+ if self.debug:
+ logger.debug("XMLManager.xml_get_or_create(): name %s action %s filter %r result %r", name, action, d, result)
+ return result
+
+ def xml_list(self, xml):
+ name = self.model.xml_template.name
+ action = xml.get("action")
+ assert xml.tag == rpki.left_right.xmlns + name and action in ("get", "list")
+ d = {}
+ if action == "get":
+ d[name + "_handle"] = xml.get(name + "_handle")
+ if name != "tenant":
+ d["tenant__tenant_handle"] = xml.get("tenant_handle")
+ if self.debug:
+ logger.debug("XMLManager.xml_list(): name %s action %s filter %r", name, action, d)
+ result = self.filter(**d) if d else self.all()
+ if self.debug:
+ logger.debug("XMLManager.xml_list(): name %s action %s filter %r result %r", name, action, d, result)
+ return result
+
+ def xml_get_for_delete(self, xml):
+ name = self.model.xml_template.name
+ action = xml.get("action")
+ assert xml.tag == rpki.left_right.xmlns + name and action == "destroy"
+ d = { name + "_handle" : xml.get(name + "_handle") }
+ if name != "tenant":
+ d["tenant__tenant_handle"] = xml.get("tenant_handle")
+ if self.debug:
+ logger.debug("XMLManager.xml_get_for_delete(): name %s action %s filter %r", name, action, d)
+ result = self.get(**d)
+ if self.debug:
+ logger.debug("XMLManager.xml_get_for_delete(): name %s action %s filter %r result %r", name, action, d, result)
+ return result
+
+
+def xml_hooks(cls):
+ """
+ Class decorator to add default XML hooks.
+ """
+
+ # Maybe inheritance from an abstract model would work here. Then
+ # again, maybe we could use this decorator to do something prettier
+ # for the XMLTemplate setup. Whatever. Gussie up later.
+
+ def default_xml_pre_save_hook(self, q_pdu):
+ #logger.debug("default_xml_pre_save_hook()")
+ pass
+
+ @tornado.gen.coroutine
+ def default_xml_post_save_hook(self, rpkid, q_pdu):
+ #logger.debug("default_xml_post_save_hook()")
+ pass
+
+ @tornado.gen.coroutine
+ def default_xml_pre_delete_hook(self, rpkid):
+ #logger.debug("default_xml_pre_delete_hook()")
+ pass
+
+ for name, method in (("xml_pre_save_hook", default_xml_pre_save_hook),
+ ("xml_post_save_hook", default_xml_post_save_hook),
+ ("xml_pre_delete_hook", default_xml_pre_delete_hook)):
+ if not hasattr(cls, name):
+ setattr(cls, name, method)
+
+ return cls
+
+
+# Models.
+#
+# There's far too much random code hanging off of model methods, relic
+# of the earlier implementation. Clean up as time permits.
+
+@xml_hooks
+class Tenant(models.Model):
+ tenant_handle = models.SlugField(max_length = 255)
+ use_hsm = models.BooleanField(default = False)
+ crl_interval = models.BigIntegerField(null = True)
+ regen_margin = models.BigIntegerField(null = True)
+ bpki_cert = CertificateField(null = True)
+ bpki_glue = CertificateField(null = True)
+ objects = XMLManager()
+
+ xml_template = XMLTemplate(
+ name = "tenant",
+ attributes = ("crl_interval", "regen_margin"),
+ booleans = ("use_hsm",),
+ elements = ("bpki_cert", "bpki_glue"))
+
+ def __repr__(self):
+ try:
+ return "<Tenant: {}>".format(self.tenant_handle)
+ except:
+ return "<Tenant: Tenant object>"
+
+ @tornado.gen.coroutine
+ def xml_pre_delete_hook(self, rpkid):
+ trace_call_chain()
+ yield [parent.destroy(rpkid = rpkid) for parent in self.parents.all()]
+
+ @tornado.gen.coroutine
+ def xml_post_save_hook(self, rpkid, q_pdu):
+ trace_call_chain()
+
+ rekey = q_pdu.get("rekey")
+ revoke = q_pdu.get("revoke")
+ reissue = q_pdu.get("reissue")
+ revoke_forgotten = q_pdu.get("revoke_forgotten")
+
+ if q_pdu.get("clear_replay_protection"):
+ for parent in self.parents.all():
+ parent.clear_replay_protection()
+ for child in self.children.all():
+ child.clear_replay_protection()
+ for repository in self.repositories.all():
+ repository.clear_replay_protection()
+
+ futures = []
+
+ if rekey or revoke or reissue or revoke_forgotten:
+ for parent in self.parents.all():
+ if rekey:
+ futures.append(parent.serve_rekey(rpkid = rpkid))
+ if revoke:
+ futures.append(parent.serve_revoke(rpkid = rpkid))
+ if reissue:
+ futures.append(parent.serve_reissue(rpkid = rpkid))
+ if revoke_forgotten:
+ futures.append(parent.serve_revoke_forgotten(rpkid = rpkid))
+
+ if q_pdu.get("publish_world_now"):
+ futures.append(self.serve_publish_world_now(rpkid = rpkid))
+ if q_pdu.get("run_now"):
+ futures.append(self.serve_run_now(rpkid = rpkid))
+
+ yield futures
+
+
+ @tornado.gen.coroutine
+ def serve_publish_world_now(self, rpkid):
+ trace_call_chain()
+
+ publisher = rpki.rpkid.publication_queue(rpkid = rpkid)
+ objects = dict()
+
+ for repository in self.repositories.all():
+ q_msg = Element(rpki.publication.tag_msg, nsmap = rpki.publication.nsmap,
+ type = "query", version = rpki.publication.version)
+ SubElement(q_msg, rpki.publication.tag_list, tag = "list")
+ r_msg = yield repository.call_pubd(rpkid, q_msg, length_check = False)
+ if not all(r_pdu.tag == rpki.publication.tag_list for r_pdu in r_msg):
+ raise rpki.exceptions.BadPublicationReply("Unexpected XML tag in publication response")
+ objs = dict((r_pdu.get("uri"), (r_pdu.get("hash"), repository))
+ for r_pdu in r_msg if r_pdu.tag == rpki.publication.tag_list)
+ if any(uri in objects for uri in objs):
+ for uri in sorted(set(objects) & set(objs)):
+ logger.warning("Duplicated publication URI %s between %r and %r, this should not happen",
+ uri, objects[uri][1], objs[uri][1])
+ objects.update(objs)
+
+ for ca_detail in CADetail.objects.filter(ca__parent__tenant = self, state = "active"):
+ repository = ca_detail.ca.parent.repository
+ objs = [(ca_detail.crl_uri, ca_detail.latest_crl),
+ (ca_detail.manifest_uri, ca_detail.latest_manifest)]
+ objs.extend((c.uri, c.cert) for c in ca_detail.child_certs.all())
+ objs.extend((r.uri, r.roa) for r in ca_detail.roas.filter(roa__isnull = False))
+ objs.extend((g.uri, g.ghostbuster) for g in ca_detail.ghostbusters.all())
+ objs.extend((c.uri, c.cert) for c in ca_detail.ee_certificates.all())
+ for uri, obj in objs:
+ h, r = objects.get(uri, (None, None))
+ if uri in objects and r == repository:
+ publisher.queue(uri = uri, new_obj = obj, repository = repository, old_hash = h)
+ del objects[uri]
+ else:
+ publisher.queue(uri = uri, new_obj = obj, repository = repository)
+
+ for u in objects:
+ h, r = objects[u]
+ publisher.queue(uri = u, old_hash = h, repository = r)
+
+ yield publisher.call_pubd()
+
+
+ @tornado.gen.coroutine
+ def serve_run_now(self, rpkid):
+ trace_call_chain()
+ logger.debug("Forced immediate run of periodic actions for %r", self)
+ tasks = self.cron_tasks(rpkid = rpkid)
+ rpkid.task_add(*tasks)
+ yield [task.wait() for task in tasks]
+
+
+ def cron_tasks(self, rpkid):
+ trace_call_chain()
+ # pylint: disable=W0201
+ try:
+ return self._cron_tasks
+ except AttributeError:
+ self._cron_tasks = tuple(task(rpkid, self) for task in rpki.rpkid_tasks.task_classes)
+ return self._cron_tasks
+
+
+ def find_covering_ca_details(self, resources):
+ """
+ Return all active CADetails for this <tenant/> which cover a
+ particular set of resources.
+
+ If we expected there to be a large number of CADetails, we
+ could add index tables and write fancy SQL query to do this, but
+ for the expected common case where there are only one or two
+ active CADetails per <tenant/>, it's probably not worth it. In
+ any case, this is an optimization we can leave for later.
+ """
+
+ trace_call_chain()
+ return set(ca_detail
+ for ca_detail in CADetail.objects.filter(ca__parent__tenant = self, state = "active")
+ if ca_detail.covers(resources))
+
+
+@xml_hooks
+class BSC(models.Model):
+ bsc_handle = models.SlugField(max_length = 255)
+ private_key_id = RSAPrivateKeyField()
+ pkcs10_request = PKCS10Field()
+ hash_alg = EnumField(choices = ("sha256",), default = "sha256")
+ signing_cert = CertificateField(null = True)
+ signing_cert_crl = CRLField(null = True)
+ tenant = models.ForeignKey(Tenant, related_name = "bscs")
+ objects = XMLManager()
+
+ class Meta:
+ unique_together = ("tenant", "bsc_handle")
+
+ xml_template = XMLTemplate(
+ name = "bsc",
+ elements = ("signing_cert", "signing_cert_crl"),
+ readonly = ("pkcs10_request",))
+
+ def __repr__(self):
+ try:
+ return "<BSC: {}.{}>".format(self.tenant.tenant_handle, self.bsc_handle)
+ except:
+ return "<BSC: BSC object>"
+
+ def xml_pre_save_hook(self, q_pdu):
+ # Handle key generation, only supports RSA with SHA-256 for now.
+ if q_pdu.get("generate_keypair"):
+ assert q_pdu.get("key_type") in (None, "rsa") and q_pdu.get("hash_alg") in (None, "sha256")
+ self.private_key_id = rpki.x509.RSA.generate(keylength = int(q_pdu.get("key_length", 2048)))
+ self.pkcs10_request = rpki.x509.PKCS10.create(keypair = self.private_key_id)
+
+
+@xml_hooks
+class Repository(models.Model):
+ repository_handle = models.SlugField(max_length = 255)
+ peer_contact_uri = models.TextField(null = True)
+ rrdp_notification_uri = models.TextField(null = True)
+ bpki_cert = CertificateField(null = True)
+ bpki_glue = CertificateField(null = True)
+ last_cms_timestamp = SundialField(null = True)
+ bsc = models.ForeignKey(BSC, related_name = "repositories")
+ tenant = models.ForeignKey(Tenant, related_name = "repositories")
+ objects = XMLManager()
+
+ class Meta:
+ unique_together = ("tenant", "repository_handle")
+
+ xml_template = XMLTemplate(
+ name = "repository",
+ handles = (BSC,),
+ attributes = ("peer_contact_uri", "rrdp_notification_uri"),
+ elements = ("bpki_cert", "bpki_glue"))
+
+ def __repr__(self):
+ try:
+ uri = " " + self.peer_contact_uri
+ except:
+ uri = ""
+ try:
+ return "<Repository: {}.{}{}>".format(self.tenant.tenant_handle, self.repository_handle, uri)
+ except:
+ return "<Repository: Repository object>"
+
+
+ @tornado.gen.coroutine
+ def xml_post_save_hook(self, rpkid, q_pdu):
+ trace_call_chain()
+ if q_pdu.get("clear_replay_protection"):
+ self.clear_replay_protection()
+
+
+ def clear_replay_protection(self):
+ trace_call_chain()
+ self.last_cms_timestamp = None
+ self.save()
+
+
+ @tornado.gen.coroutine
+ def call_pubd(self, rpkid, q_msg, handlers = None, length_check = True):
+ """
+ Send a message to publication daemon and return the response.
+
+ As a convenience, attempting to send an empty message returns
+ immediate success without sending anything.
+
+ handlers is a dict of handler functions to process the
+ response PDUs. If the uri value in the response PDU appears
+ in the dict, the associated handler is called to process the
+ PDU; otherwise, a default handler is called to check for
+ errors. A handler value of False suppresses calling of the
+ default handler.
+ """
+
+ trace_call_chain()
+ if len(q_msg) == 0:
+ return
+ if handlers is None:
+ handlers = {}
+ for q_pdu in q_msg:
+ logger.info("Sending %r hash = %s uri = %s to pubd", q_pdu, q_pdu.get("hash"), q_pdu.get("uri"))
+ http_request = tornado.httpclient.HTTPRequest(
+ url = self.peer_contact_uri,
+ method = "POST",
+ body = rpki.publication.cms_msg().wrap(q_msg, self.bsc.private_key_id,
+ self.bsc.signing_cert, self.bsc.signing_cert_crl),
+ headers = { "Content-Type" : rpki.publication.content_type },
+ connect_timeout = rpkid.http_client_timeout,
+ request_timeout = rpkid.http_client_timeout)
+ http_response = yield rpkid.http_fetch(http_request)
+ if http_response.headers.get("Content-Type") not in rpki.publication.allowed_content_types:
+ raise rpki.exceptions.BadContentType("HTTP Content-Type %r, expected %r" % (
+ rpki.publication.content_type, http_response.headers.get("Content-Type")))
+ r_cms = rpki.publication.cms_msg(DER = http_response.body)
+ r_msg = r_cms.unwrap((rpkid.bpki_ta, self.tenant.bpki_cert, self.tenant.bpki_glue, self.bpki_cert, self.bpki_glue))
+ r_cms.check_replay_sql(self, self.peer_contact_uri)
+ for r_pdu in r_msg:
+ logger.info("Received %r hash = %s uri = %s from pubd", r_pdu, r_pdu.get("hash"), r_pdu.get("uri"))
+ handler = handlers.get(r_pdu.get("uri"), rpki.publication.raise_if_error)
+ if handler:
+ logger.debug("Calling pubd handler %r", handler)
+ handler(r_pdu)
+ if length_check and len(q_msg) != len(r_msg):
+ raise rpki.exceptions.BadPublicationReply("Wrong number of response PDUs from pubd: sent %r, got %r" % (q_msg, r_msg))
+ raise tornado.gen.Return(r_msg)
+
+
+@xml_hooks
+class Parent(models.Model):
+ parent_handle = models.SlugField(max_length = 255)
+ tenant = models.ForeignKey(Tenant, related_name = "parents")
+ repository = models.ForeignKey(Repository, related_name = "parents")
+ bpki_cert = CertificateField(null = True)
+ bpki_glue = CertificateField(null = True)
+ peer_contact_uri = models.TextField(null = True)
+ sia_base = models.TextField(null = True)
+ sender_name = models.TextField(null = True)
+ recipient_name = models.TextField(null = True)
+ last_cms_timestamp = SundialField(null = True)
+ bsc = models.ForeignKey(BSC, related_name = "parents")
+ root_asn_resources = models.TextField(default = "")
+ root_ipv4_resources = models.TextField(default = "")
+ root_ipv6_resources = models.TextField(default = "")
+ objects = XMLManager()
+
+ class Meta:
+ unique_together = ("tenant", "parent_handle")
+
+ xml_template = XMLTemplate(
+ name = "parent",
+ handles = (BSC, Repository),
+ attributes = ("peer_contact_uri", "sia_base", "sender_name", "recipient_name",
+ "root_asn_resources", "root_ipv4_resources", "root_ipv6_resources"),
+ elements = ("bpki_cert", "bpki_glue"),
+ readonly = ("rpki_root_cert",))
+
+
+ def __repr__(self):
+ try:
+ uri = " " + self.peer_contact_uri
+ except:
+ uri = ""
+ try:
+ return "<Parent: {}.{}{}>".format(self.tenant.tenant_handle, self.parent_handle, uri)
+ except:
+ return "<Parent: Parent object>"
+
+ @property
+ def rpki_root_cert(self):
+ if self.root_asn_resources or self.root_ipv4_resources or self.root_ipv6_resources:
+ logger.debug("%r checking for rpki_root_cert", self)
+ try:
+ return CADetail.objects.get(ca__parent = self, state = "active").latest_ca_cert
+ except CADetail.DoesNotExist:
+ pass
+ return None
+
+ @tornado.gen.coroutine
+ def xml_pre_delete_hook(self, rpkid):
+ trace_call_chain()
+ yield self.destroy(rpkid = rpkid, delete_parent = False)
+
+ @tornado.gen.coroutine
+ def xml_post_save_hook(self, rpkid, q_pdu):
+ trace_call_chain()
+ if q_pdu.get("clear_replay_protection"):
+ self.clear_replay_protection()
+ futures = []
+ if q_pdu.get("rekey"):
+ futures.append(self.serve_rekey(rpkid = rpkid))
+ if q_pdu.get("revoke"):
+ futures.append(self.serve_revoke(rpkid = rpkid))
+ if q_pdu.get("reissue"):
+ futures.append(self.serve_reissue(rpkid = rpkid))
+ if q_pdu.get("revoke_forgotten"):
+ futures.append(self.serve_revoke_forgotten(rpkid = rpkid))
+ yield futures
+
+ @tornado.gen.coroutine
+ def serve_rekey(self, rpkid):
+ trace_call_chain()
+ yield [ca.rekey(rpkid = rpkid) for ca in self.cas.all()]
+
+ @tornado.gen.coroutine
+ def serve_revoke(self, rpkid):
+ trace_call_chain()
+ yield [ca.revoke(rpkid = rpkid) for ca in self.cas.all()]
+
+ @tornado.gen.coroutine
+ def serve_reissue(self, rpkid):
+ trace_call_chain()
+ yield [ca.reissue(rpkid = rpkid) for ca in self.cas.all()]
+
+ def clear_replay_protection(self):
+ trace_call_chain()
+ self.last_cms_timestamp = None
+ self.save()
+
+
+ @tornado.gen.coroutine
+ def get_skis(self, rpkid):
+ """
+ Fetch SKIs that this parent thinks we have. In theory this should
+ agree with our own database, but in practice stuff can happen, so
+ sometimes we need to know what our parent thinks.
+
+ Result is a dictionary with the resource class name as key and a
+ set of SKIs as value.
+
+ This, like everything else dealing with SKIs in the up-down
+ protocol, is mis-named: we're really dealing with g(SKI) values,
+ not raw SKI values. Sorry.
+ """
+
+ trace_call_chain()
+ r_msg = yield self.up_down_list_query(rpkid = rpkid)
+ ski_map = {}
+ for rc in r_msg.getiterator(rpki.up_down.tag_class):
+ skis = set()
+ for c in rc.getiterator(rpki.up_down.tag_certificate):
+ skis.add(rpki.x509.X509(Base64 = c.text).gSKI())
+ ski_map[rc.get("class_name")] = skis
+ raise tornado.gen.Return(ski_map)
+
+
+ @tornado.gen.coroutine
+ def revoke_skis(self, rpkid, rc_name, skis_to_revoke):
+ """
+ Revoke a set of SKIs within a particular resource class.
+ """
+
+ trace_call_chain()
+ for ski in skis_to_revoke:
+ logger.debug("Asking parent %r to revoke class %r, g(SKI) %s", self, rc_name, ski)
+ yield self.up_down_revoke_query(rpkid = rpkid, class_name = rc_name, ski = ski)
+
+
+ @tornado.gen.coroutine
+ def serve_revoke_forgotten(self, rpkid):
+ """
+ Handle a left-right revoke_forgotten action for this parent.
+
+ This is a bit fiddly: we have to compare the result of an up-down
+ list query with what we have locally and identify the SKIs of any
+ certificates that have gone missing. This should never happen in
+ ordinary operation, but can arise if we have somehow lost a
+ private key, in which case there is nothing more we can do with
+ the issued cert, so we have to clear it. As this really is not
+ supposed to happen, we don't clear it automatically, instead we
+ require an explicit trigger.
+ """
+
+ trace_call_chain()
+ skis_from_parent = yield self.get_skis(rpkid = rpkid)
+ for rc_name, skis_to_revoke in skis_from_parent.iteritems():
+ for ca_detail in CADetail.objects.filter(ca__parent = self).exclude(state = "revoked"):
+ skis_to_revoke.discard(ca_detail.latest_ca_cert.gSKI())
+ yield self.revoke_skis(rpkid, rc_name, skis_to_revoke)
+
+
+ @tornado.gen.coroutine
+ def destroy(self, rpkid, delete_parent = True):
+ """
+ Delete all the CA stuff under this parent, and perhaps the parent
+ itself.
+ """
+
+ trace_call_chain()
+ yield self.serve_revoke_forgotten(rpkid = rpkid)
+ yield [ca.destroy(rpkid = rpkid, parent = self)
+ for ca in self.cas().all()]
+ if delete_parent:
+ self.delete()
+
+
+ def _compose_up_down_query(self, query_type):
+ return Element(rpki.up_down.tag_message, nsmap = rpki.up_down.nsmap,
+ version = rpki.up_down.version, type = query_type,
+ sender = self.sender_name, recipient = self.recipient_name)
+
+
+ @tornado.gen.coroutine
+ def up_down_list_query(self, rpkid):
+ trace_call_chain()
+ q_msg = self._compose_up_down_query("list")
+ r_msg = yield self.query_up_down(rpkid, q_msg)
+ raise tornado.gen.Return(r_msg)
+
+
+ @tornado.gen.coroutine
+ def up_down_issue_query(self, rpkid, ca, ca_detail):
+ trace_call_chain()
+ logger.debug("Parent.up_down_issue_query(): caRepository %r rpkiManifest %r rpkiNotify %r",
+ ca.sia_uri, ca_detail.manifest_uri, ca.parent.repository.rrdp_notification_uri)
+ pkcs10 = rpki.x509.PKCS10.create(
+ keypair = ca_detail.private_key_id,
+ is_ca = True,
+ caRepository = ca.sia_uri,
+ rpkiManifest = ca_detail.manifest_uri,
+ rpkiNotify = ca.parent.repository.rrdp_notification_uri)
+ q_msg = self._compose_up_down_query("issue")
+ q_pdu = SubElement(q_msg, rpki.up_down.tag_request, class_name = ca.parent_resource_class)
+ q_pdu.text = pkcs10.get_Base64()
+ r_msg = yield self.query_up_down(rpkid, q_msg)
+ raise tornado.gen.Return(r_msg)
+
+
+ @tornado.gen.coroutine
+ def up_down_revoke_query(self, rpkid, class_name, ski):
+ trace_call_chain()
+ q_msg = self._compose_up_down_query("revoke")
+ SubElement(q_msg, rpki.up_down.tag_key, class_name = class_name, ski = ski)
+ r_msg = yield self.query_up_down(rpkid, q_msg)
+ raise tornado.gen.Return(r_msg)
+
+
+ @tornado.gen.coroutine
+ def query_up_down(self, rpkid, q_msg):
+ trace_call_chain()
+ #logger.debug("%r query_up_down(): %s", self, ElementToString(q_msg))
+ if self.root_asn_resources or self.root_ipv4_resources or self.root_ipv6_resources:
+ r_msg = yield self.query_up_down_root(rpkid, q_msg)
+ elif self.bsc is None:
+ raise rpki.exceptions.BSCNotFound("Could not find BSC")
+ elif self.bsc.signing_cert is None:
+ raise rpki.exceptions.BSCNotReady("%r is not yet usable" % self.bsc)
+ else:
+ http_request = tornado.httpclient.HTTPRequest(
+ url = self.peer_contact_uri,
+ method = "POST",
+ body = rpki.up_down.cms_msg().wrap(q_msg, self.bsc.private_key_id,
+ self.bsc.signing_cert,
+ self.bsc.signing_cert_crl),
+ headers = { "Content-Type" : rpki.up_down.content_type },
+ connect_timeout = rpkid.http_client_timeout,
+ request_timeout = rpkid.http_client_timeout)
+ http_response = yield rpkid.http_fetch(http_request)
+ if http_response.headers.get("Content-Type") not in rpki.up_down.allowed_content_types:
+ raise rpki.exceptions.BadContentType("HTTP Content-Type %r, expected %r" % (
+ rpki.up_down.content_type, http_response.headers.get("Content-Type")))
+ r_cms = rpki.up_down.cms_msg(DER = http_response.body)
+ r_msg = r_cms.unwrap((rpkid.bpki_ta,
+ self.tenant.bpki_cert, self.tenant.bpki_glue,
+ self.bpki_cert, self.bpki_glue))
+ r_cms.check_replay_sql(self, self.peer_contact_uri)
+ #logger.debug("%r query_up_down(): %s", self, ElementToString(r_msg))
+ rpki.up_down.check_response(r_msg, q_msg.get("type"))
+ raise tornado.gen.Return(r_msg)
+
+
+ @tornado.gen.coroutine
+ def query_up_down_root(self, rpkid, q_msg):
+ """
+ Internal RPKI root, divered from the normal up_down client.
+
+ While it looks a bit silly, the simplest way to drop this in
+ without rewriting all of the up-down client code is to
+ implement a minimal version of the server side of the up-down
+ protocol here, XML and all. This has the additional advantage
+ of using a well-defined protocol, one with a formal schema,
+ even. Yes, there's a bit of XML overhead, but we'd be paying
+ that in any case for an external root, so it's just a minor
+ optimization we've chosen not to take.
+
+ We do skip the CMS wrapper, though, since this is all internal
+ not just to a single Tenant but to a single Parent.
+ """
+
+ trace_call_chain()
+ publisher = rpki.rpkid.publication_queue(rpkid = rpkid)
+
+ r_msg = Element(rpki.up_down.tag_message,
+ nsmap = rpki.up_down.nsmap,
+ version = rpki.up_down.version,
+ sender = self.recipient_name,
+ recipient = self.sender_name)
+
+ try:
+
+ if q_msg.get("type") == "revoke":
+ ca_detail = CADetail.objects.get(
+ ca__parent = self,
+ state__in = ("active", "deprecated"),
+ ca__parent_resource_class = q_msg[0].get("class_name"),
+ ca_cert_uri__endswith = q_msg[0].get("ski") + ".cer")
+ publisher.queue(
+ uri = ca_detail.ca_cert_uri,
+ old_obj = ca_detail.latest_ca_cert.certificate,
+ repository = self.repository)
+ yield publisher.call_pubd()
+ r_msg.set("type", "revoke_response")
+ SubElement(r_msg, rpki.up_down.tag_key,
+ class_name = q_msg[0].get("class_name"),
+ ski = q_msg[0].get("ski"))
+
+ else: # Not revocation
+
+ notAfter = rpki.sundial.now() + rpki.sundial.timedelta.parse(
+ rpkid.cfg.get("rpki-root-certificate-lifetime", "1y"))
+
+ bag = rpki.resource_set.resource_bag(
+ asn = self.root_asn_resources,
+ v4 = self.root_ipv4_resources,
+ v6 = self.root_ipv6_resources,
+ valid_until = notAfter)
+
+ rc = SubElement(
+ r_msg, rpki.up_down.tag_class,
+ class_name = self.parent_handle,
+ cert_url = self.sia_base + "root.cer",
+ resource_set_as = str(bag.asn),
+ resource_set_ipv4 = str(bag.v4),
+ resource_set_ipv6 = str(bag.v6),
+ resource_set_notafter = str(bag.valid_until))
+
+ if q_msg.get("type") == "list":
+ r_msg.set("type", "list_response")
+ for ca_detail in CADetail.objects.filter(
+ ca__parent = self,
+ state__in = ("active", "deprecated"),
+ ca__parent_resource_class = self.parent_handle):
+ uri = self.sia_base + ca_detail.latest_ca_cert.gSKI() + ".cer"
+ SubElement(rc, rpki.up_down.tag_certificate,
+ cert_url = uri).text = ca_detail.latest_ca_cert.get_Base64()
+
+ else:
+ assert q_msg.get("type") == "issue"
+ r_msg.set("type", "issue_response")
+ pkcs10 = rpki.x509.PKCS10(Base64 = q_msg[0].text)
+ pkcs10_key = pkcs10.getPublicKey()
+ pkcs10_sia = pkcs10.get_SIA()
+ pkcs10_gski = pkcs10_key.gSKI()
+
+ uri = self.sia_base + pkcs10_gski + ".cer"
+
+ ca_details = dict(
+ (ca_detail.public_key.gSKI(), ca_detail)
+ for ca_detail in CADetail.objects.filter(
+ ca__parent = self,
+ ca__parent_resource_class = q_msg[0].get("class_name"),
+ state__in = ("pending", "active")))
+
+ ca_detail = ca_details[pkcs10_gski]
+
+ threshold = rpki.sundial.now() + rpki.sundial.timedelta(
+ seconds = self.tenant.regen_margin)
+
+ need_to_issue = (
+ ca_detail.state == "pending" or
+ ca_detail.public_key != pkcs10_key or
+ ca_detail.latest_ca_cert.get_SIA() != pkcs10_sia or
+ ca_detail.latest_ca_cert.getNotAfter() < threshold)
+
+ if need_to_issue:
+ cert = rpki.x509.X509.self_certify(
+ keypair = ca_detail.private_key_id,
+ subject_key = pkcs10_key,
+ serial = ca_detail.ca.next_serial_number(),
+ sia = pkcs10_sia,
+ notAfter = bag.valid_until,
+ resources = bag)
+ publisher.queue(
+ uri = uri,
+ new_obj = cert,
+ repository = self.repository)
+ yield publisher.call_pubd()
+ logger.debug("%r Internal root issued, old CADetail %r, new cert %r",
+ self, ca_detail, cert)
+ else:
+ cert = ca_detail.latest_ca_cert
+
+ SubElement(rc, rpki.up_down.tag_certificate,
+ cert_url = uri).text = cert.get_Base64()
+
+ SubElement(rc, rpki.up_down.tag_issuer)
+
+ except tornado.gen.Return:
+ raise
+
+ except:
+ logger.exception("%r Up-down %s query to internal root failed:",
+ self, q_msg.get("type"))
+ del r_msg[:]
+ r_msg.set("type", "error_response")
+ SubElement(r_msg, rpki.up_down.tag_status).text = "2001"
+
+ raise tornado.gen.Return(r_msg)
+
+
+ def construct_sia_uri(self, rc):
+ """
+ Construct the sia_uri value for a CA under this parent given
+ configured information and the parent's up-down protocol
+ list_response PDU.
+ """
+
+ trace_call_chain()
+ sia_uri = rc.get("suggested_sia_head", "")
+ if not sia_uri.startswith("rsync://") or not sia_uri.startswith(self.sia_base):
+ sia_uri = self.sia_base
+ if not sia_uri.endswith("/"):
+ raise rpki.exceptions.BadURISyntax("SIA URI must end with a slash: %s" % sia_uri)
+ return sia_uri
+
+
+class CA(models.Model):
+ last_crl_manifest_number = models.BigIntegerField(default = 1)
+ last_issued_sn = models.BigIntegerField(default = 1)
+ sia_uri = models.TextField(null = True)
+ parent_resource_class = models.TextField(null = True) # Not sure this should allow NULL
+ parent = models.ForeignKey(Parent, related_name = "cas")
+
+ # So it turns out that there's always a 1:1 mapping between the
+ # class_name we receive from our parent and the class_name we issue
+ # to our children: in spite of the obfuscated way that we used to
+ # handle class names, we never actually added a way for the back-end
+ # to create new classes. Not clear we want to encourage this, but
+ # if we wanted to support it, simple approach would probably be an
+ # optional class_name attribute in the left-right <list_resources/>
+ # response; if not present, we'd use parent's class_name as now,
+ # otherwise we'd use the supplied class_name.
+
+
+ def __repr__(self):
+ try:
+ return "<CA: {}.{} class {}>".format(self.parent.tenant.tenant_handle,
+ self.parent.parent_handle,
+ self.parent_resource_class)
+ except:
+ return "<CA: CA object>"
+
+
+ @tornado.gen.coroutine
+ def destroy(self, rpkid, parent):
+ """
+ The list of current resource classes received from parent does not
+ include the class corresponding to this CA, so we need to delete
+ it (and its little dog too...).
+
+ All certs published by this CA are now invalid, so need to
+ withdraw them, the CRL, and the manifest from the repository,
+ delete all child_cert and ca_detail records associated with this
+ CA, then finally delete this CA itself.
+ """
+
+ trace_call_chain()
+ publisher = rpki.rpkid.publication_queue(rpkid = rpkid)
+ for ca_detail in self.ca_details.all():
+ ca_detail.destroy(publisher = publisher, allow_failure = True)
+ try:
+ yield publisher.call_pubd()
+ except:
+ logger.exception("Could not destroy %r, skipping", self)
+ else:
+ logger.debug("Destroying %r", self)
+ self.delete()
+
+
+ def next_serial_number(self):
+ """
+ Allocate a certificate serial number.
+ """
+
+ trace_call_chain()
+ self.last_issued_sn += 1
+ self.save()
+ return self.last_issued_sn
+
+
+ def next_crl_manifest_number(self):
+ """
+ Allocate a CRL/Manifest number.
+ """
+
+ trace_call_chain()
+ self.last_crl_manifest_number += 1
+ self.save()
+ return self.last_crl_manifest_number
+
+
+ def create_detail(self):
+ """
+ Create a new CADetail object for this CA.
+ """
+
+ trace_call_chain()
+ cer_keypair = rpki.x509.RSA.generate()
+ mft_keypair = rpki.x509.RSA.generate()
+ return CADetail.objects.create(
+ ca = self,
+ state = "pending",
+ private_key_id = cer_keypair,
+ public_key = cer_keypair.get_public(),
+ manifest_private_key_id = mft_keypair,
+ manifest_public_key = mft_keypair.get_public())
+
+
+ @tornado.gen.coroutine
+ def rekey(self, rpkid):
+ """
+ Initiate a rekey operation for this CA. Generate a new keypair.
+ Request cert from parent using new keypair. Mark result as our
+ active ca_detail. Reissue all child certs issued by this CA using
+ the new ca_detail.
+ """
+
+ trace_call_chain()
+ try:
+ old_detail = self.ca_details.get(state = "active")
+ except CADetail.DoesNotExist:
+ old_detail = None
+ new_detail = self.create_detail()
+ logger.debug("Sending issue request to %r from %r", self.parent, self.rekey)
+ r_msg = yield self.parent.up_down_issue_query(rpkid = rpkid, ca = self, ca_detail = new_detail)
+ c = r_msg[0][0]
+ logger.debug("%r received certificate %s", self, c.get("cert_url"))
+ yield new_detail.activate(
+ rpkid = rpkid,
+ ca = self,
+ cert = rpki.x509.X509(Base64 = c.text),
+ uri = c.get("cert_url"),
+ predecessor = old_detail)
+
+
+ @tornado.gen.coroutine
+ def revoke(self, rpkid, revoke_all = False):
+ """
+ Revoke deprecated ca_detail objects associated with this CA, or
+ all ca_details associated with this CA if revoke_all is set.
+
+ For each CADetail, this involves: requesting revocation of the
+ keypair by parent; revoking all issued certificates;
+ generating final CRL and manifest covering the period one CRL
+ cycle past the time that the last certificate would have
+ expired; and destroying the keypair. We leave final CRL and
+ manifest in place until their nextupdate time has passed.
+ """
+
+ trace_call_chain()
+
+ publisher = rpki.rpkid.publication_queue(rpkid = rpkid)
+
+ if revoke_all:
+ ca_details = self.ca_details.all()
+ else:
+ ca_details = self.ca_details.filter(state = "deprecated")
+
+ for ca_detail in ca_details:
+
+ gski = ca_detail.latest_ca_cert.gSKI()
+ logger.debug("Asking parent to revoke CA certificate matching g(SKI) = %s", gski)
+ r_msg = yield self.parent.up_down_revoke_query(rpkid = rpkid, class_name = self.parent_resource_class, ski = gski)
+ if r_msg[0].get("class_name") != self.parent_resource_class:
+ raise rpki.exceptions.ResourceClassMismatch
+ if r_msg[0].get("ski") != gski:
+ raise rpki.exceptions.SKIMismatch
+ logger.debug("Parent revoked g(SKI) %s, starting cleanup", gski)
+
+ nextUpdate = rpki.sundial.now()
+ if ca_detail.latest_manifest is not None:
+ ca_detail.latest_manifest.extract_if_needed()
+ nextUpdate = nextUpdate.later(ca_detail.latest_manifest.getNextUpdate())
+ if ca_detail.latest_crl is not None:
+ nextUpdate = nextUpdate.later(ca_detail.latest_crl.getNextUpdate())
+ for child_cert in ca_detail.child_certs.all():
+ nextUpdate = nextUpdate.later(child_cert.cert.getNotAfter())
+ child_cert.revoke(publisher = publisher)
+ for roa in ca_detail.roas.all():
+ nextUpdate = nextUpdate.later(roa.cert.getNotAfter())
+ roa.revoke(publisher = publisher)
+ for ghostbuster in ca_detail.ghostbusters.all():
+ nextUpdate = nextUpdate.later(ghostbuster.cert.getNotAfter())
+ ghostbuster.revoke(publisher = publisher)
+ for eecert in ca_detail.ee_certificates.all():
+ nextUpdate = nextUpdate.later(eecert.cert.getNotAfter())
+ eecert.revoke(publisher = publisher)
+ nextUpdate += rpki.sundial.timedelta(seconds = self.parent.tenant.crl_interval)
+
+ ca_detail.generate_crl_and_manifest(publisher = publisher, nextUpdate = nextUpdate)
+ ca_detail.private_key_id = None
+ ca_detail.manifest_private_key_id = None
+ ca_detail.manifest_public_key = None
+ ca_detail.state = "revoked"
+ ca_detail.save()
+
+ yield publisher.call_pubd()
+
+
+ @tornado.gen.coroutine
+ def reissue(self, rpkid):
+ """
+ Reissue all current certificates issued by this CA.
+ """
+
+ trace_call_chain()
+ ca_detail = self.ca_details.get(state = "active")
+ if ca_detail:
+ yield ca_detail.reissue(rpkid = rpkid)
+
+
+class CADetail(models.Model):
+ public_key = PublicKeyField(null = True)
+ private_key_id = RSAPrivateKeyField(null = True)
+ latest_crl = CRLField(null = True)
+ crl_published = SundialField(null = True)
+ latest_ca_cert = CertificateField(null = True)
+ manifest_private_key_id = RSAPrivateKeyField(null = True)
+ manifest_public_key = PublicKeyField(null = True)
+ latest_manifest = ManifestField(null = True)
+ manifest_published = SundialField(null = True)
+ next_crl_manifest_update = SundialField(null = True)
+ state = EnumField(choices = ("pending", "active", "deprecated", "revoked"))
+ ca_cert_uri = models.TextField(null = True)
+ ca = models.ForeignKey(CA, related_name = "ca_details") # pylint: disable=C0103
+
+ def __repr__(self):
+ try:
+ return "<CADetail: {}.{} class {} {} {}>".format(self.ca.parent.tenant.tenant_handle,
+ self.ca.parent.parent_handle,
+ self.ca.parent_resource_class,
+ self.state,
+ self.ca_cert_uri)
+ except:
+ return "<CADetail: CADetail object>"
+
+
+ @property
+ def crl_uri(self):
+ """
+ Return publication URI for this ca_detail's CRL.
+ """
+
+ return self.ca.sia_uri + self.crl_uri_tail
+
+
+ @property
+ def crl_uri_tail(self):
+ """
+ Return tail (filename portion) of publication URI for this ca_detail's CRL.
+ """
+
+ # pylint: disable=E1101
+ return self.public_key.gSKI() + ".crl"
+
+
+ @property
+ def manifest_uri(self):
+ """
+ Return publication URI for this ca_detail's manifest.
+ """
+
+ # pylint: disable=E1101
+ return self.ca.sia_uri + self.public_key.gSKI() + ".mft"
+
+
+ def has_expired(self):
+ """
+ Return whether this ca_detail's certificate has expired.
+ """
+
+ return self.latest_ca_cert.getNotAfter() <= rpki.sundial.now()
+
+
+ def covers(self, target):
+ """
+ Test whether this ca-detail covers a given set of resources.
+ """
+
+ assert not target.asn.inherit and not target.v4.inherit and not target.v6.inherit
+ me = self.latest_ca_cert.get_3779resources()
+ return target.asn <= me.asn and target.v4 <= me.v4 and target.v6 <= me.v6
+
+
+ @tornado.gen.coroutine
+ def activate(self, rpkid, ca, cert, uri, predecessor = None):
+ """
+ Activate this ca_detail.
+ """
+
+ trace_call_chain()
+ publisher = rpki.rpkid.publication_queue(rpkid = rpkid)
+ self.latest_ca_cert = cert
+ self.ca_cert_uri = uri
+ self.state = "active"
+ self.generate_crl_and_manifest(publisher = publisher)
+ self.save()
+
+ if predecessor is not None:
+ predecessor.state = "deprecated"
+ predecessor.save()
+ for child_cert in predecessor.child_certs.all():
+ child_cert.reissue(ca_detail = self, publisher = publisher)
+ for roa in predecessor.roas.all():
+ roa.regenerate(publisher = publisher)
+ for ghostbuster in predecessor.ghostbusters.all():
+ ghostbuster.regenerate(publisher = publisher)
+ for eecert in predecessor.ee_certificates.all():
+ eecert.reissue(publisher = publisher, ca_detail = self)
+ predecessor.generate_crl_and_manifest(publisher = publisher)
+
+ yield publisher.call_pubd()
+
+
+ def destroy(self, publisher, allow_failure = False):
+ """
+ Delete this ca_detail and all of the certs it issued.
+
+ If allow_failure is true, we clean up as much as we can but don't
+ raise an exception.
+ """
+
+ trace_call_chain()
+ repository = self.ca.parent.repository
+ handler = False if allow_failure else None
+ for child_cert in self.child_certs.all():
+ publisher.queue(uri = child_cert.uri, old_obj = child_cert.cert, repository = repository, handler = handler)
+ child_cert.delete()
+ for roa in self.roas.all():
+ roa.revoke(publisher = publisher, allow_failure = allow_failure)
+ for ghostbuster in self.ghostbusters.all():
+ ghostbuster.revoke(publisher = publisher, allow_failure = allow_failure)
+ for eecert in self.ee_certificates.all():
+ eecert.revoke(publisher = publisher)
+ if self.latest_manifest is not None:
+ publisher.queue(uri = self.manifest_uri, old_obj = self.latest_manifest, repository = repository, handler = handler)
+ if self.latest_crl is not None:
+ publisher.queue(uri = self.crl_uri, old_obj = self.latest_crl, repository = repository, handler = handler)
+ for cert in self.revoked_certs.all(): # + self.child_certs.all()
+ logger.debug("Deleting %r", cert)
+ cert.delete()
+ logger.debug("Deleting %r", self)
+ self.delete()
+
+
+ @tornado.gen.coroutine
+ def update(self, rpkid, parent, ca, rc, sia_uri_changed, old_resources):
+ """
+ Need to get a new certificate for this ca_detail and perhaps frob
+ children of this ca_detail.
+ """
+
+ trace_call_chain()
+
+ logger.debug("Sending issue request to %r from %r", parent, self.update)
+
+ r_msg = yield parent.up_down_issue_query(rpkid = rpkid, ca = ca, ca_detail = self)
+
+ c = r_msg[0][0]
+
+ cert = rpki.x509.X509(Base64 = c.text)
+ cert_url = c.get("cert_url")
+
+ logger.debug("%r received certificate %s", self, cert_url)
+
+ if self.state == "pending":
+ yield self.activate(rpkid = rpkid, ca = ca, cert = cert, uri = cert_url)
+ return
+
+ validity_changed = self.latest_ca_cert is None or self.latest_ca_cert.getNotAfter() != cert.getNotAfter()
+
+ publisher = rpki.rpkid.publication_queue(rpkid = rpkid)
+
+ if self.latest_ca_cert != cert:
+ self.latest_ca_cert = cert
+ self.save()
+ self.generate_crl_and_manifest(publisher = publisher)
+
+ new_resources = self.latest_ca_cert.get_3779resources()
+
+ if sia_uri_changed or old_resources.oversized(new_resources):
+ for child_cert in self.child_certs.all():
+ child_resources = child_cert.cert.get_3779resources()
+ if sia_uri_changed or child_resources.oversized(new_resources):
+ child_cert.reissue(ca_detail = self, resources = child_resources & new_resources, publisher = publisher)
+
+ if sia_uri_changed or validity_changed or old_resources.oversized(new_resources):
+ for roa in self.roas.all():
+ roa.update(publisher = publisher)
+
+ if sia_uri_changed or validity_changed:
+ for ghostbuster in self.ghostbusters.all():
+ ghostbuster.update(publisher = publisher)
+
+ yield publisher.call_pubd()
+
+
+ def issue_ee(self, ca, resources, subject_key, sia,
+ cn = None, sn = None, notAfter = None, eku = None, notBefore = None):
+ """
+ Issue a new EE certificate.
+ """
+
+ trace_call_chain()
+ if notAfter is None:
+ notAfter = self.latest_ca_cert.getNotAfter()
+ return self.latest_ca_cert.issue(
+ keypair = self.private_key_id,
+ subject_key = subject_key,
+ serial = ca.next_serial_number(),
+ sia = sia,
+ aia = self.ca_cert_uri,
+ crldp = self.crl_uri,
+ resources = resources,
+ notBefore = notBefore,
+ notAfter = notAfter,
+ is_ca = False,
+ cn = cn,
+ sn = sn,
+ eku = eku)
+
+
+ def issue(self, ca, child, subject_key, sia, resources, publisher, child_cert = None):
+ """
+ Issue a new certificate to a child. Optional child_cert argument
+ specifies an existing child_cert object to update in place; if not
+ specified, we create a new one. Returns the child_cert object
+ containing the newly issued cert.
+ """
+
+ trace_call_chain()
+ self.check_failed_publication(publisher)
+ cert = self.latest_ca_cert.issue(
+ keypair = self.private_key_id,
+ subject_key = subject_key,
+ serial = ca.next_serial_number(),
+ aia = self.ca_cert_uri,
+ crldp = self.crl_uri,
+ sia = sia,
+ resources = resources,
+ notAfter = resources.valid_until)
+ if child_cert is None:
+ old_cert = None
+ child_cert = ChildCert(child = child, ca_detail = self, cert = cert)
+ logger.debug("Created new child_cert %r", child_cert)
+ else:
+ old_cert = child_cert.cert
+ child_cert.cert = cert
+ child_cert.ca_detail = self
+ logger.debug("Reusing existing child_cert %r", child_cert)
+ child_cert.gski = cert.gSKI()
+ child_cert.published = rpki.sundial.now()
+ child_cert.save()
+ publisher.queue(
+ uri = child_cert.uri,
+ old_obj = old_cert,
+ new_obj = child_cert.cert,
+ repository = ca.parent.repository,
+ handler = child_cert.published_callback)
+ self.generate_crl_and_manifest(publisher = publisher)
+ return child_cert
+
+
+ def generate_crl_and_manifest(self, publisher, nextUpdate = None):
+ """
+ Generate a new CRL and a new manifest for this ca_detail.
+
+ At the moment this is unconditional, that is, it is up to the
+ caller to decide whether a new CRL is needed.
+
+ We used to handle CRL and manifest as two separate operations,
+ but there's no real point, and it's simpler to do them at once.
+ """
+
+ trace_call_chain()
+
+ self.check_failed_publication(publisher)
+
+ crl_interval = rpki.sundial.timedelta(seconds = self.ca.parent.tenant.crl_interval)
+ now = rpki.sundial.now()
+ if nextUpdate is None:
+ nextUpdate = now + crl_interval
+
+ old_crl = self.latest_crl
+ old_manifest = self.latest_manifest
+ crl_uri = self.crl_uri
+ manifest_uri = self.manifest_uri
+
+ crl_manifest_number = self.ca.next_crl_manifest_number()
+
+ manifest_cert = self.issue_ee(
+ ca = self.ca,
+ resources = rpki.resource_set.resource_bag.from_inheritance(),
+ subject_key = self.manifest_public_key,
+ sia = (None, None, manifest_uri, self.ca.parent.repository.rrdp_notification_uri),
+ notBefore = now)
+
+ certlist = []
+ for revoked_cert in self.revoked_certs.all():
+ if now > revoked_cert.expires + crl_interval:
+ revoked_cert.delete()
+ else:
+ certlist.append((revoked_cert.serial, revoked_cert.revoked))
+ certlist.sort()
+
+ self.latest_crl = rpki.x509.CRL.generate(
+ keypair = self.private_key_id,
+ issuer = self.latest_ca_cert,
+ serial = crl_manifest_number,
+ thisUpdate = now,
+ nextUpdate = nextUpdate,
+ revokedCertificates = certlist)
+
+ # XXX
+ logger.debug("%r Generating manifest, child_certs_all(): %r", self, self.child_certs.all())
+
+ objs = [(self.crl_uri_tail, self.latest_crl)]
+ objs.extend((c.uri_tail, c.cert) for c in self.child_certs.all())
+ objs.extend((r.uri_tail, r.roa) for r in self.roas.filter(roa__isnull = False))
+ objs.extend((g.uri_tail, g.ghostbuster) for g in self.ghostbusters.all())
+ objs.extend((e.uri_tail, e.cert) for e in self.ee_certificates.all())
+
+ # XXX
+ logger.debug("%r Generating manifest, objs: %r", self, objs)
+
+ self.latest_manifest = rpki.x509.SignedManifest.build(
+ serial = crl_manifest_number,
+ thisUpdate = now,
+ nextUpdate = nextUpdate,
+ names_and_objs = objs,
+ keypair = self.manifest_private_key_id,
+ certs = manifest_cert)
+
+ self.crl_published = now
+ self.manifest_published = now
+ self.next_crl_manifest_update = nextUpdate
+ self.save()
+
+ publisher.queue(
+ uri = crl_uri,
+ old_obj = old_crl,
+ new_obj = self.latest_crl,
+ repository = self.ca.parent.repository,
+ handler = self.crl_published_callback)
+
+ publisher.queue(
+ uri = manifest_uri,
+ old_obj = old_manifest,
+ new_obj = self.latest_manifest,
+ repository = self.ca.parent.repository,
+ handler = self.manifest_published_callback)
+
+
+ def crl_published_callback(self, pdu):
+ """
+ Check result of CRL publication.
+ """
+
+ trace_call_chain()
+ rpki.publication.raise_if_error(pdu)
+ self.crl_published = None
+ self.save()
+
+ def manifest_published_callback(self, pdu):
+ """
+ Check result of manifest publication.
+ """
+
+ trace_call_chain()
+ rpki.publication.raise_if_error(pdu)
+ self.manifest_published = None
+ self.save()
+
+
+ @tornado.gen.coroutine
+ def reissue(self, rpkid):
+ """
+ Reissue all current certificates issued by this ca_detail.
+ """
+
+ trace_call_chain()
+ publisher = rpki.rpkid.publication_queue(rpkid = rpkid)
+ self.check_failed_publication(publisher)
+ for roa in self.roas.all():
+ roa.regenerate(publisher)
+ for ghostbuster in self.ghostbusters.all():
+ ghostbuster.regenerate(publisher)
+ for ee_certificate in self.ee_certificates.all():
+ ee_certificate.reissue(publisher, force = True)
+ for child_cert in self.child_certs.all():
+ child_cert.reissue(self, publisher, force = True)
+ self.generate_crl_and_manifest(publisher = publisher)
+ self.save()
+ yield publisher.call_pubd()
+
+
+ def check_failed_publication(self, publisher, check_all = True):
+ """
+ Check for failed publication of objects issued by this ca_detail.
+
+ All publishable objects have timestamp fields recording time of
+ last attempted publication, and callback methods which clear these
+ timestamps once publication has succeeded. Our task here is to
+ look for objects issued by this ca_detail which have timestamps
+ set (indicating that they have not been published) and for which
+ the timestamps are not very recent (for some definition of very
+ recent -- intent is to allow a bit of slack in case pubd is just
+ being slow). In such cases, we want to retry publication.
+
+ As an optimization, we can probably skip checking other products
+ if manifest and CRL have been published, thus saving ourselves
+ several complex SQL queries. Not sure yet whether this
+ optimization is worthwhile.
+
+ For the moment we check everything without optimization, because
+ it simplifies testing.
+
+ For the moment our definition of staleness is hardwired; this
+ should become configurable.
+ """
+
+ trace_call_chain()
+
+ logger.debug("Checking for failed publication for %r", self)
+
+ stale = rpki.sundial.now() - rpki.sundial.timedelta(seconds = 60)
+ repository = self.ca.parent.repository
+ if self.latest_crl is not None and self.crl_published is not None and self.crl_published < stale:
+ logger.debug("Retrying publication for %s", self.crl_uri)
+ publisher.queue(uri = self.crl_uri,
+ new_obj = self.latest_crl,
+ repository = repository,
+ handler = self.crl_published_callback)
+ if self.latest_manifest is not None and self.manifest_published is not None and self.manifest_published < stale:
+ logger.debug("Retrying publication for %s", self.manifest_uri)
+ publisher.queue(uri = self.manifest_uri,
+ new_obj = self.latest_manifest,
+ repository = repository,
+ handler = self.manifest_published_callback)
+ if not check_all:
+ return
+ for child_cert in self.child_certs.filter(published__isnull = False, published__lt = stale):
+ logger.debug("Retrying publication for %s", child_cert)
+ publisher.queue(
+ uri = child_cert.uri,
+ new_obj = child_cert.cert,
+ repository = repository,
+ handler = child_cert.published_callback)
+ for roa in self.roas.filter(published__isnull = False, published__lt = stale):
+ logger.debug("Retrying publication for %s", roa)
+ publisher.queue(
+ uri = roa.uri,
+ new_obj = roa.roa,
+ repository = repository,
+ handler = roa.published_callback)
+ for ghostbuster in self.ghostbusters.filter(published__isnull = False, published__lt = stale):
+ logger.debug("Retrying publication for %s", ghostbuster)
+ publisher.queue(
+ uri = ghostbuster.uri,
+ new_obj = ghostbuster.ghostbuster,
+ repository = repository,
+ handler = ghostbuster.published_callback)
+ for ee_cert in self.ee_certificates.filter(published__isnull = False, published__lt = stale):
+ logger.debug("Retrying publication for %s", ee_cert)
+ publisher.queue(
+ uri = ee_cert.uri,
+ new_obj = ee_cert.cert,
+ repository = repository,
+ handler = ee_cert.published_callback)
+
+
+@xml_hooks
+class Child(models.Model):
+ child_handle = models.SlugField(max_length = 255)
+ bpki_cert = CertificateField(null = True)
+ bpki_glue = CertificateField(null = True)
+ last_cms_timestamp = SundialField(null = True)
+ tenant = models.ForeignKey(Tenant, related_name = "children")
+ bsc = models.ForeignKey(BSC, related_name = "children")
+ objects = XMLManager()
+
+ class Meta:
+ unique_together = ("tenant", "child_handle")
+
+ xml_template = XMLTemplate(
+ name = "child",
+ handles = (BSC,),
+ elements = ("bpki_cert", "bpki_glue"))
+
+ def __repr__(self):
+ try:
+ return "<Child: {}.{}>".format(self.tenant.tenant_handle, self.child_handle)
+ except:
+ return "<Child: Child object>"
+
+
+ @tornado.gen.coroutine
+ def xml_pre_delete_hook(self, rpkid):
+ trace_call_chain()
+ publisher = rpki.rpkid.publication_queue(rpkid = rpkid)
+ ca_details = set()
+ for child_cert in self.child_certs.all():
+ ca_details.add(child_cert.ca_detail)
+ child_cert.revoke(publisher = publisher)
+ for ca_detail in ca_details:
+ ca_detail.generate_crl_and_manifest(publisher = publisher)
+ yield publisher.call_pubd()
+
+
+ @tornado.gen.coroutine
+ def xml_post_save_hook(self, rpkid, q_pdu):
+ trace_call_chain()
+ if q_pdu.get("clear_replay_protection"):
+ self.clear_replay_protection()
+ if q_pdu.get("reissue"):
+ yield self.serve_reissue(rpkid = rpkid)
+
+
+ def serve_reissue(self, rpkid):
+ trace_call_chain()
+ publisher = rpki.rpkid.publication_queue(rpkid = rpkid)
+ for child_cert in self.child_certs.all():
+ child_cert.reissue(child_cert.ca_detail, publisher, force = True)
+ yield publisher.call_pubd()
+
+
+ def clear_replay_protection(self):
+ trace_call_chain()
+ self.last_cms_timestamp = None
+ self.save()
+
+
+ @tornado.gen.coroutine
+ def up_down_handle_list(self, rpkid, q_msg, r_msg):
+
+ trace_call_chain()
+ irdb_resources = yield rpkid.irdb_query_child_resources(self.tenant.tenant_handle, self.child_handle)
+ if irdb_resources.valid_until < rpki.sundial.now():
+ logger.debug("Child %s's resources expired %s", self.child_handle, irdb_resources.valid_until)
+ else:
+ for ca_detail in CADetail.objects.filter(ca__parent__tenant = self.tenant, state = "active"):
+ resources = ca_detail.latest_ca_cert.get_3779resources() & irdb_resources
+ if resources.empty():
+ logger.debug("No overlap between received resources and what child %s should get ([%s], [%s])",
+ self.child_handle, ca_detail.latest_ca_cert.get_3779resources(), irdb_resources)
+ continue
+ rc = SubElement(r_msg, rpki.up_down.tag_class,
+ class_name = ca_detail.ca.parent_resource_class,
+ cert_url = ca_detail.ca_cert_uri,
+ resource_set_as = str(resources.asn),
+ resource_set_ipv4 = str(resources.v4),
+ resource_set_ipv6 = str(resources.v6),
+ resource_set_notafter = str(resources.valid_until))
+ for child_cert in self.child_certs.filter(ca_detail = ca_detail):
+ c = SubElement(rc, rpki.up_down.tag_certificate, cert_url = child_cert.uri)
+ c.text = child_cert.cert.get_Base64()
+ SubElement(rc, rpki.up_down.tag_issuer).text = ca_detail.latest_ca_cert.get_Base64()
+
+
+ @tornado.gen.coroutine
+ def up_down_handle_issue(self, rpkid, q_msg, r_msg):
+
+ trace_call_chain()
+
+ req = q_msg[0]
+ assert req.tag == rpki.up_down.tag_request
+
+ # Subsetting not yet implemented, this is the one place where
+ # we have to handle it, by reporting that we're lame.
+
+ if any(req.get(a) for a in ("req_resource_set_as",
+ "req_resource_set_ipv4", "req_resource_set_ipv6")):
+ raise rpki.exceptions.NotImplementedYet("req_* attributes not implemented yet, sorry")
+
+ class_name = req.get("class_name")
+ pkcs10 = rpki.x509.PKCS10(Base64 = req.text)
+ pkcs10.check_valid_request_ca()
+ ca_detail = CADetail.objects.get(ca__parent__tenant = self.tenant,
+ ca__parent_resource_class = class_name,
+ state = "active")
+
+ irdb_resources = yield rpkid.irdb_query_child_resources(self.tenant.tenant_handle,
+ self.child_handle)
+
+ if irdb_resources.valid_until < rpki.sundial.now():
+ raise rpki.exceptions.IRDBExpired("IRDB entry for child %s expired %s" % (
+ self.child_handle, irdb_resources.valid_until))
+
+ resources = irdb_resources & ca_detail.latest_ca_cert.get_3779resources()
+ resources.valid_until = irdb_resources.valid_until
+ req_key = pkcs10.getPublicKey()
+ req_sia = pkcs10.get_SIA()
+
+ # Generate new cert or regenerate old one if necessary
+
+ publisher = rpki.rpkid.publication_queue(rpkid = rpkid)
+
+ try:
+ child_cert = self.child_certs.get(ca_detail = ca_detail, gski = req_key.gSKI())
+
+ except ChildCert.DoesNotExist:
+ child_cert = ca_detail.issue(
+ ca = ca_detail.ca,
+ child = self,
+ subject_key = req_key,
+ sia = req_sia,
+ resources = resources,
+ publisher = publisher)
+
+ else:
+ child_cert = child_cert.reissue(
+ ca_detail = ca_detail,
+ sia = req_sia,
+ resources = resources,
+ publisher = publisher)
+
+ yield publisher.call_pubd()
+
+ rc = SubElement(r_msg, rpki.up_down.tag_class,
+ class_name = class_name,
+ cert_url = ca_detail.ca_cert_uri,
+ resource_set_as = str(resources.asn),
+ resource_set_ipv4 = str(resources.v4),
+ resource_set_ipv6 = str(resources.v6),
+ resource_set_notafter = str(resources.valid_until))
+ c = SubElement(rc, rpki.up_down.tag_certificate, cert_url = child_cert.uri)
+ c.text = child_cert.cert.get_Base64()
+ SubElement(rc, rpki.up_down.tag_issuer).text = ca_detail.latest_ca_cert.get_Base64()
+
+
+ @tornado.gen.coroutine
+ def up_down_handle_revoke(self, rpkid, q_msg, r_msg):
+ trace_call_chain()
+ key = q_msg[0]
+ assert key.tag == rpki.up_down.tag_key
+ class_name = key.get("class_name")
+ publisher = rpki.rpkid.publication_queue(rpkid = rpkid)
+ ca_details = set()
+ for child_cert in ChildCert.objects.filter(ca_detail__ca__parent__tenant = self.tenant,
+ ca_detail__ca__parent_resource_class = class_name,
+ gski = key.get("ski")):
+ ca_details.add(child_cert.ca_detail)
+ child_cert.revoke(publisher = publisher)
+ for ca_detail in ca_details:
+ ca_detail.generate_crl_and_manifest(publisher = publisher)
+ yield publisher.call_pubd()
+ SubElement(r_msg, key.tag, class_name = class_name, ski = key.get("ski"))
+
+
+ @tornado.gen.coroutine
+ def serve_up_down(self, rpkid, q_der):
+ """
+ Outer layer of server handling for one up-down PDU from this child.
+ """
+
+ trace_call_chain()
+
+ if self.bsc is None:
+ raise rpki.exceptions.BSCNotFound("Could not find BSC")
+
+ q_cms = rpki.up_down.cms_msg(DER = q_der)
+ q_msg = q_cms.unwrap((rpkid.bpki_ta, self.tenant.bpki_cert, self.tenant.bpki_glue, self.bpki_cert, self.bpki_glue))
+ q_cms.check_replay_sql(self, "child", self.child_handle)
+ q_type = q_msg.get("type")
+
+ logger.info("Serving %s query from child %s [sender %s, recipient %s]",
+ q_type, self.child_handle, q_msg.get("sender"), q_msg.get("recipient"))
+
+ if rpki.up_down.enforce_strict_up_down_xml_sender and q_msg.get("sender") != self.child_handle:
+ raise rpki.exceptions.BadSender("Unexpected XML sender %s" % q_msg.get("sender"))
+
+ r_msg = Element(rpki.up_down.tag_message, nsmap = rpki.up_down.nsmap, version = rpki.up_down.version,
+ sender = q_msg.get("recipient"), recipient = q_msg.get("sender"), type = q_type + "_response")
+
+ try:
+ yield getattr(self, "up_down_handle_" + q_type)(rpkid, q_msg, r_msg)
+
+ except Exception, e:
+ logger.exception("Unhandled exception serving child %r", self)
+ rpki.up_down.generate_error_response_from_exception(r_msg, e, q_type)
+
+ r_der = rpki.up_down.cms_msg().wrap(r_msg, self.bsc.private_key_id, self.bsc.signing_cert, self.bsc.signing_cert_crl)
+ raise tornado.gen.Return(r_der)
+
+class ChildCert(models.Model):
+ cert = CertificateField()
+ published = SundialField(null = True)
+ gski = models.CharField(max_length = 27) # Assumes SHA-1 -- SHA-256 would be 43, SHA-512 would be 86, etc.
+ child = models.ForeignKey(Child, related_name = "child_certs")
+ ca_detail = models.ForeignKey(CADetail, related_name = "child_certs")
+
+ def __repr__(self):
+ try:
+ return "<ChildCert: {}.{} {}>".format(self.child.tenant.tenant_handle,
+ self.child.child_handle,
+ self.uri)
+ except:
+ return "<ChildCert: ChildCert object>"
+
+
+ @property
+ def uri_tail(self):
+ """
+ Return the tail (filename) portion of the URI for this child_cert.
+ """
+
+ return self.gski + ".cer"
+
+
+ @property
+ def uri(self):
+ """
+ Return the publication URI for this child_cert.
+ """
+
+ return self.ca_detail.ca.sia_uri + self.uri_tail
+
+
+ def revoke(self, publisher):
+ """
+ Revoke a child cert.
+ """
+
+ trace_call_chain()
+ ca_detail = self.ca_detail
+ logger.debug("Revoking %r", self)
+ RevokedCert.revoke(cert = self.cert, ca_detail = ca_detail)
+ publisher.queue(uri = self.uri, old_obj = self.cert, repository = ca_detail.ca.parent.repository)
+ self.delete()
+
+
+ def reissue(self, ca_detail, publisher, resources = None, sia = None, force = False):
+ """
+ Reissue an existing child cert, reusing the public key. If
+ the child cert we would generate is identical to the one we
+ already have, we just return the one we already have. If we
+ have to revoke the old child cert when generating the new one,
+ we have to generate a new ChildCert, so calling code that
+ needs the updated ChildCert must use the return value from
+ this method.
+ """
+
+ trace_call_chain()
+ # pylint: disable=E1101
+ ca = ca_detail.ca
+ child = self.child
+ old_resources = self.cert.get_3779resources()
+ old_sia = self.cert.get_SIA()
+ old_aia = self.cert.get_AIA()[0]
+ old_ca_detail = self.ca_detail
+ needed = False
+ if resources is None:
+ resources = old_resources
+ if sia is None:
+ sia = old_sia
+ if len(sia) < 4 or not sia[3]:
+ sia = (sia[0], sia[1], sia[2], ca_detail.ca.parent.repository.rrdp_notification_uri)
+ assert resources.valid_until is not None and old_resources.valid_until is not None
+ if resources.asn != old_resources.asn or resources.v4 != old_resources.v4 or resources.v6 != old_resources.v6:
+ logger.debug("Resources changed for %r: old %s new %s", self, old_resources, resources)
+ needed = True
+ if resources.valid_until != old_resources.valid_until:
+ logger.debug("Validity changed for %r: old %s new %s",
+ self, old_resources.valid_until, resources.valid_until)
+ needed = True
+ if sia != old_sia:
+ logger.debug("SIA changed for %r: old %r new %r", self, old_sia, sia)
+ needed = True
+ if ca_detail != old_ca_detail:
+ logger.debug("Issuer changed for %r: old %r new %r", self, old_ca_detail, ca_detail)
+ needed = True
+ if ca_detail.ca_cert_uri != old_aia:
+ logger.debug("AIA changed for %r: old %r new %r", self, old_aia, ca_detail.ca_cert_uri)
+ needed = True
+ must_revoke = old_resources.oversized(resources) or old_resources.valid_until > resources.valid_until
+ if must_revoke:
+ logger.debug("Must revoke any existing cert(s) for %r", self)
+ needed = True
+ if not needed and force:
+ logger.debug("No change needed for %r, forcing reissuance anyway", self)
+ needed = True
+ if not needed:
+ logger.debug("No change to %r", self)
+ return self
+ if must_revoke:
+ for child_cert in child.child_certs.filter(ca_detail = ca_detail, gski = self.gski):
+ logger.debug("Revoking %r", child_cert)
+ child_cert.revoke(publisher = publisher)
+ ca_detail.generate_crl_and_manifest(publisher = publisher)
+ child_cert = ca_detail.issue(
+ ca = ca,
+ child = child,
+ subject_key = self.cert.getPublicKey(),
+ sia = sia,
+ resources = resources,
+ child_cert = None if must_revoke else self,
+ publisher = publisher)
+ logger.debug("New %r", child_cert)
+ return child_cert
+
+
+ def published_callback(self, pdu):
+ """
+ Publication callback: check result and mark published.
+ """
+
+ trace_call_chain()
+ rpki.publication.raise_if_error(pdu)
+ self.published = None
+ self.save()
+
+
+class EECertificate(models.Model):
+ gski = models.CharField(max_length = 27) # Assumes SHA-1 -- SHA-256 would be 43, SHA-512 would be 86, etc.
+ cert = CertificateField()
+ published = SundialField(null = True)
+ tenant = models.ForeignKey(Tenant, related_name = "ee_certificates")
+ ca_detail = models.ForeignKey(CADetail, related_name = "ee_certificates")
+
+ def __repr__(self):
+ try:
+ return "<EECertificate: {} {}>".format(self.tenant.tenant_handle,
+ self.uri)
+ except:
+ return "<EECertificate: EECertificate object>"
+
+
+ @property
+ def uri(self):
+ """
+ Return the publication URI for this EECertificate.
+ """
+
+ return self.ca_detail.ca.sia_uri + self.uri_tail
+
+
+ @property
+ def uri_tail(self):
+ """
+ Return the tail (filename portion) of the publication URI for this
+ EECertificate.
+ """
+
+ return self.gski + ".cer"
+
+
+ def revoke(self, publisher):
+ """
+ Revoke and withdraw an EE certificate.
+ """
+
+ trace_call_chain()
+ ca_detail = self.ca_detail
+ logger.debug("Revoking %r", self)
+ RevokedCert.revoke(cert = self.cert, ca_detail = ca_detail)
+ publisher.queue(uri = self.uri, old_obj = self.cert, repository = ca_detail.ca.parent.repository)
+ self.delete()
+
+
+ def reissue(self, publisher, ca_detail = None, resources = None, force = False):
+ """
+ Reissue an existing EE cert, reusing the public key. If the EE
+ cert we would generate is identical to the one we already have, we
+ just return; if we need to reissue, we reuse this EECertificate and
+ just update its contents, as the publication URI will not have
+ changed.
+ """
+
+ trace_call_chain()
+ needed = False
+ old_cert = self.cert
+ old_ca_detail = self.ca_detail
+ if ca_detail is None:
+ ca_detail = old_ca_detail
+ assert ca_detail.ca is old_ca_detail.ca
+ old_resources = old_cert.get_3779resources()
+ if resources is None:
+ resources = old_resources
+ assert resources.valid_until is not None and old_resources.valid_until is not None
+ assert ca_detail.covers(resources)
+ if ca_detail != self.ca_detail:
+ logger.debug("ca_detail changed for %r: old %r new %r", self, self.ca_detail, ca_detail)
+ needed = True
+ if ca_detail.ca_cert_uri != old_cert.get_AIA()[0]:
+ logger.debug("AIA changed for %r: old %s new %s", self, old_cert.get_AIA()[0], ca_detail.ca_cert_uri)
+ needed = True
+ if resources.valid_until != old_resources.valid_until:
+ logger.debug("Validity changed for %r: old %s new %s", self, old_resources.valid_until, resources.valid_until)
+ needed = True
+ if resources.asn != old_resources.asn or resources.v4 != old_resources.v4 or resources.v6 != old_resources.v6:
+ logger.debug("Resources changed for %r: old %s new %s", self, old_resources, resources)
+ needed = True
+ must_revoke = old_resources.oversized(resources) or old_resources.valid_until > resources.valid_until
+ if must_revoke:
+ logger.debug("Must revoke existing cert(s) for %r", self)
+ needed = True
+ if not needed and force:
+ logger.debug("No change needed for %r, forcing reissuance anyway", self)
+ needed = True
+ if not needed:
+ logger.debug("No change to %r", self)
+ return
+ cn, sn = self.cert.getSubject().extract_cn_and_sn()
+ self.cert = ca_detail.issue_ee(
+ ca = ca_detail.ca,
+ subject_key = self.cert.getPublicKey(),
+ eku = self.cert.get_EKU(),
+ sia = None,
+ resources = resources,
+ notAfter = resources.valid_until,
+ cn = cn,
+ sn = sn)
+ self.save()
+ publisher.queue(
+ uri = self.uri,
+ old_obj = old_cert,
+ new_obj = self.cert,
+ repository = ca_detail.ca.parent.repository,
+ handler = self.published_callback)
+ if must_revoke:
+ RevokedCert.revoke(cert = old_cert.cert, ca_detail = old_ca_detail)
+ ca_detail.generate_crl_and_manifest(publisher = publisher)
+
+
+ def published_callback(self, pdu):
+ """
+ Publication callback: check result and mark published.
+ """
+
+ trace_call_chain()
+ rpki.publication.raise_if_error(pdu)
+ self.published = None
+ self.save()
+
+
+
+class Ghostbuster(models.Model):
+ vcard = models.TextField()
+ cert = CertificateField()
+ ghostbuster = GhostbusterField()
+ published = SundialField(null = True)
+ tenant = models.ForeignKey(Tenant, related_name = "ghostbusters")
+ ca_detail = models.ForeignKey(CADetail, related_name = "ghostbusters")
+
+ def __repr__(self):
+ try:
+ uri = " " + self.uri
+ except:
+ uri = ""
+ try:
+ return "<Ghostbuster: {}{}>".format(self.tenant.tenant_handle, uri)
+ except:
+ return "<Ghostbuster: Ghostbuster object>"
+
+
+ def update(self, publisher):
+ """
+ Bring this Ghostbuster up to date if necesssary.
+ """
+
+ trace_call_chain()
+
+ if self.ghostbuster is None:
+ logger.debug("Ghostbuster record doesn't exist, generating")
+ return self.generate(publisher = publisher)
+
+ now = rpki.sundial.now()
+ regen_time = self.cert.getNotAfter() - rpki.sundial.timedelta(seconds = self.tenant.regen_margin)
+
+ if now > regen_time and self.cert.getNotAfter() < self.ca_detail.latest_ca_cert.getNotAfter():
+ logger.debug("%r past threshold %s, regenerating", self, regen_time)
+ return self.regenerate(publisher = publisher)
+
+ if now > regen_time:
+ logger.warning("%r is past threshold %s but so is issuer %r, can't regenerate", self, regen_time, self.ca_detail)
+
+ if self.cert.get_AIA()[0] != self.ca_detail.ca_cert_uri:
+ logger.debug("%r AIA changed, regenerating", self)
+ return self.regenerate(publisher = publisher)
+
+
+ def generate(self, publisher):
+ """
+ Generate a Ghostbuster record
+
+ As with ROAs, we generate a new keypair every time.
+ """
+
+ trace_call_chain()
+ resources = rpki.resource_set.resource_bag.from_inheritance()
+ keypair = rpki.x509.RSA.generate()
+ self.cert = self.ca_detail.issue_ee(
+ ca = self.ca_detail.ca,
+ resources = resources,
+ subject_key = keypair.get_public(),
+ sia = (None, None, self.uri_from_key(keypair),
+ self.ca_detail.ca.parent.repository.rrdp_notification_uri))
+ self.ghostbuster = rpki.x509.Ghostbuster.build(self.vcard, keypair, (self.cert,))
+ self.published = rpki.sundial.now()
+ self.save()
+ logger.debug("Generating %r", self)
+ publisher.queue(
+ uri = self.uri,
+ new_obj = self.ghostbuster,
+ repository = self.ca_detail.ca.parent.repository,
+ handler = self.published_callback)
+
+
+ def published_callback(self, pdu):
+ """
+ Check publication result.
+ """
+
+ trace_call_chain()
+ rpki.publication.raise_if_error(pdu)
+ self.published = None
+ self.save()
+
+
+ def revoke(self, publisher, regenerate = False, allow_failure = False):
+ """
+ Withdraw Ghostbuster associated with this Ghostbuster.
+
+ In order to preserve make-before-break properties without
+ duplicating code, this method also handles generating a
+ replacement ghostbuster when requested.
+
+ If allow_failure is set, failing to withdraw the ghostbuster will not be
+ considered an error.
+ """
+
+ trace_call_chain()
+ logger.debug("%s %r", "Regenerating" if regenerate else "Not regenerating", self)
+ old_ca_detail = self.ca_detail
+ old_obj = self.ghostbuster
+ old_cer = self.cert
+ old_uri = self.uri
+ if regenerate:
+ self.generate(publisher = publisher)
+ logger.debug("Withdrawing %r and revoking its EE cert", self)
+ RevokedCert.revoke(cert = old_cer, ca_detail = old_ca_detail)
+ publisher.queue(
+ uri = old_uri,
+ old_obj = old_obj,
+ repository = old_ca_detail.ca.parent.repository,
+ handler = False if allow_failure else None)
+ if not regenerate:
+ self.delete()
+
+
+ def regenerate(self, publisher):
+ """
+ Reissue Ghostbuster associated with this Ghostbuster.
+ """
+
+ trace_call_chain()
+ if self.ghostbuster is None:
+ self.generate(publisher = publisher)
+ else:
+ self.revoke(publisher = publisher, regenerate = True)
+
+
+ def uri_from_key(self, key):
+ """
+ Return publication URI for a public key.
+ """
+
+ trace_call_chain()
+ return self.ca_detail.ca.sia_uri + key.gSKI() + ".gbr"
+
+
+ @property
+ def uri(self):
+ """
+ Return the publication URI for this Ghostbuster.
+ """
+
+ return self.ca_detail.ca.sia_uri + self.uri_tail
+
+
+ @property
+ def uri_tail(self):
+ """
+ Return the tail (filename portion) of the publication URI for this
+ Ghostbuster.
+ """
+
+ return self.cert.gSKI() + ".gbr"
+
+
+class RevokedCert(models.Model):
+ serial = models.BigIntegerField()
+ revoked = SundialField()
+ expires = SundialField()
+ ca_detail = models.ForeignKey(CADetail, related_name = "revoked_certs")
+
+ def __repr__(self):
+ try:
+ return "<RevokedCert: {}.{} class {} {} serial {} revoked {} expires {}>".format(
+ self.ca_detail.ca.parent.tenant.tenant_handle,
+ self.ca_detail.ca.parent.parent_handle,
+ self.ca_detail.ca.parent_resource_class,
+ self.ca_detail.crl_uri,
+ self.serial,
+ self.revoked,
+ self.expires)
+ except:
+ return "<RevokedCert: RevokedCert object>"
+
+
+ @classmethod
+ def revoke(cls, cert, ca_detail):
+ """
+ Revoke a certificate.
+ """
+
+ trace_call_chain()
+ return cls.objects.create(
+ serial = cert.getSerial(),
+ expires = cert.getNotAfter(),
+ revoked = rpki.sundial.now(),
+ ca_detail = ca_detail)
+
+
+class ROA(models.Model):
+ asn = models.BigIntegerField()
+ ipv4 = models.TextField(null = True)
+ ipv6 = models.TextField(null = True)
+ cert = CertificateField()
+ roa = ROAField()
+ published = SundialField(null = True)
+ tenant = models.ForeignKey(Tenant, related_name = "roas")
+ ca_detail = models.ForeignKey(CADetail, related_name = "roas")
+
+ def __repr__(self):
+ try:
+ resources = " {} {}".format(self.asn, ",".join(str(ip) for ip in (self.ipv4, self.ipv6) if ip is not None))
+ except:
+ resources = ""
+ try:
+ uri = " " + self.uri
+ except:
+ uri = ""
+ try:
+ return "<ROA: {}{}{}>".format(self.tenant.tenant_handle, resources, uri)
+ except:
+ return "<ROA: ROA object>"
+
+
+ def update(self, publisher):
+ """
+ Bring ROA up to date if necesssary.
+ """
+
+ trace_call_chain()
+
+ if self.roa is None:
+ logger.debug("%r doesn't exist, generating", self)
+ return self.generate(publisher = publisher)
+
+ if self.ca_detail is None:
+ logger.debug("%r has no associated ca_detail, generating", self)
+ return self.generate(publisher = publisher)
+
+ if self.ca_detail.state != "active":
+ logger.debug("ca_detail associated with %r not active (state %s), regenerating", self, self.ca_detail.state)
+ return self.regenerate(publisher = publisher)
+
+ now = rpki.sundial.now()
+ regen_time = self.cert.getNotAfter() - rpki.sundial.timedelta(seconds = self.tenant.regen_margin)
+
+ if now > regen_time and self.cert.getNotAfter() < self.ca_detail.latest_ca_cert.getNotAfter():
+ logger.debug("%r past threshold %s, regenerating", self, regen_time)
+ return self.regenerate(publisher = publisher)
+
+ if now > regen_time:
+ logger.warning("%r is past threshold %s but so is issuer %r, can't regenerate", self, regen_time, self.ca_detail)
+
+ ca_resources = self.ca_detail.latest_ca_cert.get_3779resources()
+ ee_resources = self.cert.get_3779resources()
+
+ if ee_resources.oversized(ca_resources):
+ logger.debug("%r oversized with respect to CA, regenerating", self)
+ return self.regenerate(publisher = publisher)
+
+ v4 = rpki.resource_set.resource_set_ipv4(self.ipv4)
+ v6 = rpki.resource_set.resource_set_ipv6(self.ipv6)
+
+ if ee_resources.v4 != v4 or ee_resources.v6 != v6:
+ logger.debug("%r resources do not match EE, regenerating", self)
+ return self.regenerate(publisher = publisher)
+
+ if self.cert.get_AIA()[0] != self.ca_detail.ca_cert_uri:
+ logger.debug("%r AIA changed, regenerating", self)
+ return self.regenerate(publisher = publisher)
+
+
+ def generate(self, publisher):
+ """
+ Generate a ROA.
+
+ At present we have no way of performing a direct lookup from a
+ desired set of resources to a covering certificate, so we have to
+ search. This could be quite slow if we have a lot of active
+ ca_detail objects. Punt on the issue for now, revisit if
+ profiling shows this as a hotspot.
+
+ Once we have the right covering certificate, we generate the ROA
+ payload, generate a new EE certificate, use the EE certificate to
+ sign the ROA payload, publish the result, then throw away the
+ private key for the EE cert, all per the ROA specification. This
+ implies that generating a lot of ROAs will tend to thrash
+ /dev/random, but there is not much we can do about that.
+ """
+
+ trace_call_chain()
+
+ if self.ipv4 is None and self.ipv6 is None:
+ raise rpki.exceptions.EmptyROAPrefixList
+
+ v4 = rpki.resource_set.resource_set_ipv4(self.ipv4)
+ v6 = rpki.resource_set.resource_set_ipv6(self.ipv6)
+
+ # http://stackoverflow.com/questions/26270042/how-do-you-catch-this-exception
+ # "Django is amazing when its not terrifying."
+ try:
+ ca_detail = self.ca_detail
+ except CADetail.DoesNotExist:
+ ca_detail = None
+
+ if ca_detail is not None and ca_detail.state == "active" and not ca_detail.has_expired():
+ logger.debug("Keeping old ca_detail %r for ROA %r", ca_detail, self)
+ else:
+ logger.debug("Searching for new ca_detail for ROA %r", self)
+ for ca_detail in CADetail.objects.filter(ca__parent__tenant = self.tenant, state = "active"):
+ resources = ca_detail.latest_ca_cert.get_3779resources()
+ if not ca_detail.has_expired() and v4.issubset(resources.v4) and v6.issubset(resources.v6):
+ logger.debug("Using %r for ROA %r", ca_detail, self)
+ self.ca_detail = ca_detail
+ break
+ else:
+ raise rpki.exceptions.NoCoveringCertForROA("Could not find a certificate covering %r" % self)
+
+ resources = rpki.resource_set.resource_bag(v4 = v4, v6 = v6)
+ keypair = rpki.x509.RSA.generate()
+
+ self.cert = self.ca_detail.issue_ee(
+ ca = self.ca_detail.ca,
+ resources = resources,
+ subject_key = keypair.get_public(),
+ sia = (None, None, self.uri_from_key(keypair),
+ self.ca_detail.ca.parent.repository.rrdp_notification_uri))
+ self.roa = rpki.x509.ROA.build(self.asn,
+ rpki.resource_set.roa_prefix_set_ipv4(self.ipv4),
+ rpki.resource_set.roa_prefix_set_ipv6(self.ipv6),
+ keypair,
+ (self.cert,))
+ self.published = rpki.sundial.now()
+ self.save()
+
+ logger.debug("Generating %r", self)
+ publisher.queue(uri = self.uri, new_obj = self.roa,
+ repository = self.ca_detail.ca.parent.repository,
+ handler = self.published_callback)
+
+
+ def published_callback(self, pdu):
+ """
+ Check publication result.
+ """
+
+ trace_call_chain()
+ rpki.publication.raise_if_error(pdu)
+ self.published = None
+ self.save()
+
+
+ def revoke(self, publisher, regenerate = False, allow_failure = False):
+ """
+ Withdraw this ROA.
+
+ In order to preserve make-before-break properties without
+ duplicating code, this method also handles generating a
+ replacement ROA when requested.
+
+ If allow_failure is set, failing to withdraw the ROA will not be
+ considered an error.
+ """
+
+ trace_call_chain()
+ logger.debug("%s %r", "Regenerating" if regenerate else "Not regenerating", self)
+ old_ca_detail = self.ca_detail
+ old_obj = self.roa
+ old_cer = self.cert
+ old_uri = self.uri
+ if regenerate:
+ self.generate(publisher = publisher)
+ logger.debug("Withdrawing %r and revoking its EE cert", self)
+ RevokedCert.revoke(cert = old_cer, ca_detail = old_ca_detail)
+ publisher.queue(
+ uri = old_uri,
+ old_obj = old_obj,
+ repository = old_ca_detail.ca.parent.repository,
+ handler = False if allow_failure else None)
+ if not regenerate:
+ self.delete()
+
+
+ def regenerate(self, publisher):
+ """
+ Reissue this ROA.
+ """
+
+ trace_call_chain()
+ if self.ca_detail is None:
+ self.generate(publisher = publisher)
+ else:
+ self.revoke(publisher = publisher, regenerate = True)
+
+
+ def uri_from_key(self, key):
+ """
+ Return publication URI for a public key.
+ """
+
+ trace_call_chain()
+ return self.ca_detail.ca.sia_uri + key.gSKI() + ".roa"
+
+
+ @property
+ def uri(self):
+ """
+ Return the publication URI for this ROA.
+ """
+
+ return self.ca_detail.ca.sia_uri + self.uri_tail
+
+
+ @property
+ def uri_tail(self):
+ """
+ Return the tail (filename portion) of the publication URI for this
+ ROA.
+ """
+
+ return self.cert.gSKI() + ".roa"
diff --git a/rpki/rtr/bgpdump.py b/rpki/rtr/bgpdump.py
index fc3ae9df..22ac0d83 100755
--- a/rpki/rtr/bgpdump.py
+++ b/rpki/rtr/bgpdump.py
@@ -39,292 +39,295 @@ from rpki.rtr.channels import Timestamp
class IgnoreThisRecord(Exception):
- pass
+ pass
class PrefixPDU(rpki.rtr.generator.PrefixPDU):
- @staticmethod
- def from_bgpdump(line, rib_dump):
- try:
- assert isinstance(rib_dump, bool)
- fields = line.split("|")
-
- # Parse prefix, including figuring out IP protocol version
- cls = rpki.rtr.generator.IPv6PrefixPDU if ":" in fields[5] else rpki.rtr.generator.IPv4PrefixPDU
- self = cls()
- self.timestamp = Timestamp(fields[1])
- p, l = fields[5].split("/")
- self.prefix = rpki.POW.IPAddress(p)
- self.prefixlen = self.max_prefixlen = int(l)
-
- # Withdrawals don't have AS paths, so be careful
- assert fields[2] == "B" if rib_dump else fields[2] in ("A", "W")
- if fields[2] == "W":
- self.asn = 0
- self.announce = 0
- else:
- self.announce = 1
- if not fields[6] or "{" in fields[6] or "(" in fields[6]:
- raise IgnoreThisRecord
- a = fields[6].split()[-1]
- if "." in a:
- a = [int(s) for s in a.split(".")]
- if len(a) != 2 or a[0] < 0 or a[0] > 65535 or a[1] < 0 or a[1] > 65535:
- logging.warn("Bad dotted ASNum %r, ignoring record", fields[6])
+ @staticmethod
+ def from_bgpdump(line, rib_dump):
+ try:
+ assert isinstance(rib_dump, bool)
+ fields = line.split("|")
+
+ # Parse prefix, including figuring out IP protocol version
+ cls = rpki.rtr.generator.IPv6PrefixPDU if ":" in fields[5] else rpki.rtr.generator.IPv4PrefixPDU
+ self = cls(version = min(rpki.rtr.pdus.PDU.version_map))
+ self.timestamp = Timestamp(fields[1])
+ p, l = fields[5].split("/")
+ self.prefix = rpki.POW.IPAddress(p)
+ self.prefixlen = self.max_prefixlen = int(l)
+
+ # Withdrawals don't have AS paths, so be careful
+ assert fields[2] == "B" if rib_dump else fields[2] in ("A", "W")
+ if fields[2] == "W":
+ self.asn = 0
+ self.announce = 0
+ else:
+ self.announce = 1
+ if not fields[6] or "{" in fields[6] or "(" in fields[6]:
+ raise IgnoreThisRecord
+ a = fields[6].split()[-1]
+ if "." in a:
+ a = [int(s) for s in a.split(".")]
+ if len(a) != 2 or a[0] < 0 or a[0] > 65535 or a[1] < 0 or a[1] > 65535:
+ logging.warn("Bad dotted ASNum %r, ignoring record", fields[6])
+ raise IgnoreThisRecord
+ a = (a[0] << 16) | a[1]
+ else:
+ a = int(a)
+ self.asn = a
+
+ self.check()
+ return self
+
+ except IgnoreThisRecord:
+ raise
+
+ except Exception, e:
+ logging.warn("Ignoring line %r: %s", line, e)
raise IgnoreThisRecord
- a = (a[0] << 16) | a[1]
- else:
- a = int(a)
- self.asn = a
- self.check()
- return self
- except IgnoreThisRecord:
- raise
+class AXFRSet(rpki.rtr.generator.AXFRSet):
- except Exception, e:
- logging.warn("Ignoring line %r: %s", line, e)
- raise IgnoreThisRecord
+ serial = None
+
+ @staticmethod
+ def read_bgpdump(filename):
+ assert filename.endswith(".bz2")
+ logging.debug("Reading %s", filename)
+ bunzip2 = subprocess.Popen(("bzip2", "-c", "-d", filename), stdout = subprocess.PIPE)
+ bgpdump = subprocess.Popen(("bgpdump", "-m", "-"), stdin = bunzip2.stdout, stdout = subprocess.PIPE)
+ return bgpdump.stdout
+
+ @classmethod
+ def parse_bgpdump_rib_dump(cls, filename):
+ # pylint: disable=W0201
+ assert os.path.basename(filename).startswith("ribs.")
+ self = cls(version = min(rpki.rtr.pdus.PDU.version_map))
+ self.serial = None
+ for line in cls.read_bgpdump(filename):
+ try:
+ pfx = PrefixPDU.from_bgpdump(line, rib_dump = True)
+ except IgnoreThisRecord:
+ continue
+ self.append(pfx)
+ self.serial = pfx.timestamp
+ if self.serial is None:
+ sys.exit("Failed to parse anything useful from %s" % filename)
+ self.sort()
+ for i in xrange(len(self) - 2, -1, -1):
+ if self[i] == self[i + 1]:
+ del self[i + 1]
+ return self
+
+ def parse_bgpdump_update(self, filename):
+ assert os.path.basename(filename).startswith("updates.")
+ for line in self.read_bgpdump(filename):
+ try:
+ pfx = PrefixPDU.from_bgpdump(line, rib_dump = False)
+ except IgnoreThisRecord:
+ continue
+ announce = pfx.announce
+ pfx.announce = 1
+ i = bisect.bisect_left(self, pfx)
+ if announce:
+ if i >= len(self) or pfx != self[i]:
+ self.insert(i, pfx)
+ else:
+ while i < len(self) and pfx.prefix == self[i].prefix and pfx.prefixlen == self[i].prefixlen:
+ del self[i]
+ self.serial = pfx.timestamp
-class AXFRSet(rpki.rtr.generator.AXFRSet):
+def bgpdump_convert_main(args):
+ """
+ * DANGER WILL ROBINSON! * DEBUGGING AND TEST USE ONLY! *
+ Simulate route origin data from a set of BGP dump files.
+ argv is an ordered list of filenames. Each file must be a BGP RIB
+ dumps, a BGP UPDATE dumps, or an AXFR dump in the format written by
+ this program's --cronjob command. The first file must be a RIB dump
+ or AXFR dump, it cannot be an UPDATE dump. Output will be a set of
+ AXFR and IXFR files with timestamps derived from the BGP dumps,
+ which can be used as input to this program's --server command for
+ test purposes. SUCH DATA PROVIDE NO SECURITY AT ALL.
+ * DANGER WILL ROBINSON! * DEBUGGING AND TEST USE ONLY! *
+ """
+
+ first = True
+ db = None
+ axfrs = []
+ version = max(rpki.rtr.pdus.PDU.version_map.iterkeys())
+
+ for filename in args.files:
+
+ if ".ax.v" in filename:
+ logging.debug("Reading %s", filename)
+ db = AXFRSet.load(filename)
+
+ elif os.path.basename(filename).startswith("ribs."):
+ db = AXFRSet.parse_bgpdump_rib_dump(filename)
+ db.save_axfr()
+
+ elif not first:
+ assert db is not None
+ db.parse_bgpdump_update(filename)
+ db.save_axfr()
- @staticmethod
- def read_bgpdump(filename):
- assert filename.endswith(".bz2")
- logging.debug("Reading %s", filename)
- bunzip2 = subprocess.Popen(("bzip2", "-c", "-d", filename), stdout = subprocess.PIPE)
- bgpdump = subprocess.Popen(("bgpdump", "-m", "-"), stdin = bunzip2.stdout, stdout = subprocess.PIPE)
- return bgpdump.stdout
-
- @classmethod
- def parse_bgpdump_rib_dump(cls, filename):
- assert os.path.basename(filename).startswith("ribs.")
- self = cls()
- self.serial = None
- for line in cls.read_bgpdump(filename):
- try:
- pfx = PrefixPDU.from_bgpdump(line, rib_dump = True)
- except IgnoreThisRecord:
- continue
- self.append(pfx)
- self.serial = pfx.timestamp
- if self.serial is None:
- sys.exit("Failed to parse anything useful from %s" % filename)
- self.sort()
- for i in xrange(len(self) - 2, -1, -1):
- if self[i] == self[i + 1]:
- del self[i + 1]
- return self
-
- def parse_bgpdump_update(self, filename):
- assert os.path.basename(filename).startswith("updates.")
- for line in self.read_bgpdump(filename):
- try:
- pfx = PrefixPDU.from_bgpdump(line, rib_dump = False)
- except IgnoreThisRecord:
- continue
- announce = pfx.announce
- pfx.announce = 1
- i = bisect.bisect_left(self, pfx)
- if announce:
- if i >= len(self) or pfx != self[i]:
- self.insert(i, pfx)
- else:
- while i < len(self) and pfx.prefix == self[i].prefix and pfx.prefixlen == self[i].prefixlen:
- del self[i]
- self.serial = pfx.timestamp
+ else:
+ sys.exit("First argument must be a RIB dump or .ax file, don't know what to do with %s" % filename)
+ logging.debug("DB serial now %d (%s)", db.serial, db.serial)
+ if first and rpki.rtr.server.read_current(version) == (None, None):
+ db.mark_current()
+ first = False
-def bgpdump_convert_main(args):
- """
- * DANGER WILL ROBINSON! * DEBUGGING AND TEST USE ONLY! *
- Simulate route origin data from a set of BGP dump files.
- argv is an ordered list of filenames. Each file must be a BGP RIB
- dumps, a BGP UPDATE dumps, or an AXFR dump in the format written by
- this program's --cronjob command. The first file must be a RIB dump
- or AXFR dump, it cannot be an UPDATE dump. Output will be a set of
- AXFR and IXFR files with timestamps derived from the BGP dumps,
- which can be used as input to this program's --server command for
- test purposes. SUCH DATA PROVIDE NO SECURITY AT ALL.
- * DANGER WILL ROBINSON! * DEBUGGING AND TEST USE ONLY! *
- """
-
- first = True
- db = None
- axfrs = []
- version = max(rpki.rtr.pdus.PDU.version_map.iterkeys())
-
- for filename in args.files:
-
- if ".ax.v" in filename:
- logging.debug("Reading %s", filename)
- db = AXFRSet.load(filename)
-
- elif os.path.basename(filename).startswith("ribs."):
- db = AXFRSet.parse_bgpdump_rib_dump(filename)
- db.save_axfr()
-
- elif not first:
- assert db is not None
- db.parse_bgpdump_update(filename)
- db.save_axfr()
-
- else:
- sys.exit("First argument must be a RIB dump or .ax file, don't know what to do with %s" % filename)
-
- logging.debug("DB serial now %d (%s)", db.serial, db.serial)
- if first and rpki.rtr.server.read_current(version) == (None, None):
- db.mark_current()
- first = False
-
- for axfr in axfrs:
- logging.debug("Loading %s", axfr)
- ax = AXFRSet.load(axfr)
- logging.debug("Computing changes from %d (%s) to %d (%s)", ax.serial, ax.serial, db.serial, db.serial)
- db.save_ixfr(ax)
- del ax
-
- axfrs.append(db.filename())
+ for axfr in axfrs:
+ logging.debug("Loading %s", axfr)
+ ax = AXFRSet.load(axfr)
+ logging.debug("Computing changes from %d (%s) to %d (%s)", ax.serial, ax.serial, db.serial, db.serial)
+ db.save_ixfr(ax)
+ del ax
+
+ axfrs.append(db.filename())
def bgpdump_select_main(args):
- """
- * DANGER WILL ROBINSON! * DEBUGGING AND TEST USE ONLY! *
- Simulate route origin data from a set of BGP dump files.
- Set current serial number to correspond to an .ax file created by
- converting BGP dump files. SUCH DATA PROVIDE NO SECURITY AT ALL.
- * DANGER WILL ROBINSON! * DEBUGGING AND TEST USE ONLY! *
- """
+ """
+ * DANGER WILL ROBINSON! * DEBUGGING AND TEST USE ONLY! *
+ Simulate route origin data from a set of BGP dump files.
+ Set current serial number to correspond to an .ax file created by
+ converting BGP dump files. SUCH DATA PROVIDE NO SECURITY AT ALL.
+ * DANGER WILL ROBINSON! * DEBUGGING AND TEST USE ONLY! *
+ """
- head, sep, tail = os.path.basename(args.ax_file).partition(".")
- if not head.isdigit() or sep != "." or not tail.startswith("ax.v") or not tail[4:].isdigit():
- sys.exit("Argument must be name of a .ax file")
+ head, sep, tail = os.path.basename(args.ax_file).partition(".")
+ if not head.isdigit() or sep != "." or not tail.startswith("ax.v") or not tail[4:].isdigit():
+ sys.exit("Argument must be name of a .ax file")
- serial = Timestamp(head)
- version = int(tail[4:])
+ serial = Timestamp(head)
+ version = int(tail[4:])
- if version not in rpki.rtr.pdus.PDU.version_map:
- sys.exit("Unknown protocol version %d" % version)
+ if version not in rpki.rtr.pdus.PDU.version_map:
+ sys.exit("Unknown protocol version %d" % version)
- nonce = rpki.rtr.server.read_current(version)[1]
- if nonce is None:
- nonce = rpki.rtr.generator.new_nonce()
+ nonce = rpki.rtr.server.read_current(version)[1]
+ if nonce is None:
+ nonce = rpki.rtr.generator.AXFRSet.new_nonce(force_zero_nonce = False)
- rpki.rtr.server.write_current(serial, nonce, version)
- rpki.rtr.generator.kick_all(serial)
+ rpki.rtr.server.write_current(serial, nonce, version)
+ rpki.rtr.generator.kick_all(serial)
class BGPDumpReplayClock(object):
- """
- Internal clock for replaying BGP dump files.
+ """
+ Internal clock for replaying BGP dump files.
- * DANGER WILL ROBINSON! *
- * DEBUGGING AND TEST USE ONLY! *
+ * DANGER WILL ROBINSON! *
+ * DEBUGGING AND TEST USE ONLY! *
- This class replaces the normal on-disk serial number mechanism with
- an in-memory version based on pre-computed data.
+ This class replaces the normal on-disk serial number mechanism with
+ an in-memory version based on pre-computed data.
- bgpdump_server_main() uses this hack to replay historical data for
- testing purposes. DO NOT USE THIS IN PRODUCTION.
+ bgpdump_server_main() uses this hack to replay historical data for
+ testing purposes. DO NOT USE THIS IN PRODUCTION.
- You have been warned.
- """
+ You have been warned.
+ """
- def __init__(self):
- self.timestamps = [Timestamp(int(f.split(".")[0])) for f in glob.iglob("*.ax.v*")]
- self.timestamps.sort()
- self.offset = self.timestamps[0] - int(time.time())
- self.nonce = rpki.rtr.generator.new_nonce()
+ def __init__(self):
+ self.timestamps = [Timestamp(int(f.split(".")[0])) for f in glob.iglob("*.ax.v*")]
+ self.timestamps.sort()
+ self.offset = self.timestamps[0] - int(time.time())
+ self.nonce = rpki.rtr.generator.AXFRSet.new_nonce(force_zero_nonce = False)
- def __nonzero__(self):
- return len(self.timestamps) > 0
+ def __nonzero__(self):
+ return len(self.timestamps) > 0
- def now(self):
- return Timestamp.now(self.offset)
+ def now(self):
+ return Timestamp.now(self.offset)
- def read_current(self, version):
- now = self.now()
- while len(self.timestamps) > 1 and now >= self.timestamps[1]:
- del self.timestamps[0]
- return self.timestamps[0], self.nonce
+ def read_current(self, version):
+ now = self.now()
+ while len(self.timestamps) > 1 and now >= self.timestamps[1]:
+ del self.timestamps[0]
+ return self.timestamps[0], self.nonce
- def siesta(self):
- now = self.now()
- if len(self.timestamps) <= 1:
- return None
- elif now < self.timestamps[1]:
- return self.timestamps[1] - now
- else:
- return 1
+ def siesta(self):
+ now = self.now()
+ if len(self.timestamps) <= 1:
+ return None
+ elif now < self.timestamps[1]:
+ return self.timestamps[1] - now
+ else:
+ return 1
def bgpdump_server_main(args):
- """
- Simulate route origin data from a set of BGP dump files.
+ """
+ Simulate route origin data from a set of BGP dump files.
+
+ * DANGER WILL ROBINSON! *
+ * DEBUGGING AND TEST USE ONLY! *
+
+ This is a clone of server_main() which replaces the external serial
+ number updates triggered via the kickme channel by cronjob_main with
+ an internal clocking mechanism to replay historical test data.
- * DANGER WILL ROBINSON! *
- * DEBUGGING AND TEST USE ONLY! *
+ DO NOT USE THIS IN PRODUCTION.
- This is a clone of server_main() which replaces the external serial
- number updates triggered via the kickme channel by cronjob_main with
- an internal clocking mechanism to replay historical test data.
+ You have been warned.
+ """
- DO NOT USE THIS IN PRODUCTION.
+ logger = logging.LoggerAdapter(logging.root, dict(connection = rpki.rtr.server.hostport_tag()))
- You have been warned.
- """
+ logger.debug("[Starting]")
- logger = logging.LoggerAdapter(logging.root, dict(connection = rpki.rtr.server._hostport_tag()))
+ if args.rpki_rtr_dir:
+ try:
+ os.chdir(args.rpki_rtr_dir)
+ except OSError, e:
+ sys.exit(e)
- logger.debug("[Starting]")
+ # Yes, this really does replace a global function defined in another
+ # module with a bound method to our clock object. Fun stuff, huh?
+ #
+ clock = BGPDumpReplayClock()
+ rpki.rtr.server.read_current = clock.read_current
- if args.rpki_rtr_dir:
try:
- os.chdir(args.rpki_rtr_dir)
- except OSError, e:
- sys.exit(e)
-
- # Yes, this really does replace a global function defined in another
- # module with a bound method to our clock object. Fun stuff, huh?
- #
- clock = BGPDumpReplayClock()
- rpki.rtr.server.read_current = clock.read_current
-
- try:
- server = rpki.rtr.server.ServerChannel(logger = logger)
- old_serial = server.get_serial()
- logger.debug("[Starting at serial %d (%s)]", old_serial, old_serial)
- while clock:
- new_serial = server.get_serial()
- if old_serial != new_serial:
- logger.debug("[Serial bumped from %d (%s) to %d (%s)]", old_serial, old_serial, new_serial, new_serial)
- server.notify()
- old_serial = new_serial
- asyncore.loop(timeout = clock.siesta(), count = 1)
- except KeyboardInterrupt:
- sys.exit(0)
+ server = rpki.rtr.server.ServerChannel(logger = logger, refresh = args.refresh, retry = args.retry, expire = args.expire)
+ old_serial = server.get_serial()
+ logger.debug("[Starting at serial %d (%s)]", old_serial, old_serial)
+ while clock:
+ new_serial = server.get_serial()
+ if old_serial != new_serial:
+ logger.debug("[Serial bumped from %d (%s) to %d (%s)]", old_serial, old_serial, new_serial, new_serial)
+ server.notify()
+ old_serial = new_serial
+ asyncore.loop(timeout = clock.siesta(), count = 1)
+ except KeyboardInterrupt:
+ sys.exit(0)
def argparse_setup(subparsers):
- """
- Set up argparse stuff for commands in this module.
- """
-
- subparser = subparsers.add_parser("bgpdump-convert", description = bgpdump_convert_main.__doc__,
- help = "Convert bgpdump to fake ROAs")
- subparser.set_defaults(func = bgpdump_convert_main, default_log_to = "syslog")
- subparser.add_argument("files", nargs = "+", help = "input files")
-
- subparser = subparsers.add_parser("bgpdump-select", description = bgpdump_select_main.__doc__,
- help = "Set current serial number for fake ROA data")
- subparser.set_defaults(func = bgpdump_select_main, default_log_to = "syslog")
- subparser.add_argument("ax_file", help = "name of the .ax to select")
-
- subparser = subparsers.add_parser("bgpdump-server", description = bgpdump_server_main.__doc__,
- help = "Replay fake ROAs generated from historical data")
- subparser.set_defaults(func = bgpdump_server_main, default_log_to = "syslog")
- subparser.add_argument("rpki_rtr_dir", nargs = "?", help = "directory containing RPKI-RTR database")
+ """
+ Set up argparse stuff for commands in this module.
+ """
+
+ subparser = subparsers.add_parser("bgpdump-convert", description = bgpdump_convert_main.__doc__,
+ help = "Convert bgpdump to fake ROAs")
+ subparser.set_defaults(func = bgpdump_convert_main, default_log_destination = "syslog")
+ subparser.add_argument("files", nargs = "+", help = "input files")
+
+ subparser = subparsers.add_parser("bgpdump-select", description = bgpdump_select_main.__doc__,
+ help = "Set current serial number for fake ROA data")
+ subparser.set_defaults(func = bgpdump_select_main, default_log_destination = "syslog")
+ subparser.add_argument("ax_file", help = "name of the .ax to select")
+
+ subparser = subparsers.add_parser("bgpdump-server", description = bgpdump_server_main.__doc__,
+ help = "Replay fake ROAs generated from historical data")
+ subparser.set_defaults(func = bgpdump_server_main, default_log_destination = "syslog")
+ subparser.add_argument("rpki_rtr_dir", nargs = "?", help = "directory containing RPKI-RTR database")
diff --git a/rpki/rtr/channels.py b/rpki/rtr/channels.py
index d14c024d..a4dccbc1 100644
--- a/rpki/rtr/channels.py
+++ b/rpki/rtr/channels.py
@@ -32,215 +32,217 @@ import rpki.rtr.pdus
class Timestamp(int):
- """
- Wrapper around time module.
- """
-
- def __new__(cls, t):
- # __new__() is a static method, not a class method, hence the odd calling sequence.
- return super(Timestamp, cls).__new__(cls, t)
-
- @classmethod
- def now(cls, delta = 0):
- return cls(time.time() + delta)
-
- def __str__(self):
- return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(self))
-
-
-class ReadBuffer(object):
- """
- Wrapper around synchronous/asynchronous read state.
-
- This also handles tracking the current protocol version,
- because it has to go somewhere and there's no better place.
- """
-
- def __init__(self):
- self.buffer = ""
- self.version = None
-
- def update(self, need, callback):
"""
- Update count of needed bytes and callback, then dispatch to callback.
+ Wrapper around time module.
"""
- self.need = need
- self.callback = callback
- return self.retry()
+ def __new__(cls, t):
+ # __new__() is a static method, not a class method, hence the odd calling sequence.
+ return super(Timestamp, cls).__new__(cls, t)
- def retry(self):
- """
- Try dispatching to the callback again.
- """
+ @classmethod
+ def now(cls, delta = 0):
+ return cls(time.time() + delta)
- return self.callback(self)
+ def __str__(self):
+ return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(self))
- def available(self):
- """
- How much data do we have available in this buffer?
- """
- return len(self.buffer)
-
- def needed(self):
- """
- How much more data does this buffer need to become ready?
+class ReadBuffer(object):
"""
+ Wrapper around synchronous/asynchronous read state.
- return self.need - self.available()
-
- def ready(self):
- """
- Is this buffer ready to read yet?
+ This also handles tracking the current protocol version,
+ because it has to go somewhere and there's no better place.
"""
- return self.available() >= self.need
+ def __init__(self):
+ self.buffer = ""
+ self.version = None
+ self.need = None
+ self.callback = None
- def get(self, n):
- """
- Hand some data to the caller.
- """
+ def update(self, need, callback):
+ """
+ Update count of needed bytes and callback, then dispatch to callback.
+ """
- b = self.buffer[:n]
- self.buffer = self.buffer[n:]
- return b
+ self.need = need
+ self.callback = callback
+ return self.retry()
- def put(self, b):
- """
- Accumulate some data.
- """
+ def retry(self):
+ """
+ Try dispatching to the callback again.
+ """
- self.buffer += b
+ return self.callback(self)
- def check_version(self, version):
- """
- Track version number of PDUs read from this buffer.
- Once set, the version must not change.
- """
+ def available(self):
+ """
+ How much data do we have available in this buffer?
+ """
- if self.version is not None and version != self.version:
- raise rpki.rtr.pdus.CorruptData(
- "Received PDU version %d, expected %d" % (version, self.version))
- if self.version is None and version not in rpki.rtr.pdus.PDU.version_map:
- raise rpki.rtr.pdus.UnsupportedProtocolVersion(
- "Received PDU version %s, known versions %s" % (
- version, ", ".join(str(v) for v in rpki.rtr.pdus.PDU.version_map)))
- self.version = version
+ return len(self.buffer)
+ def needed(self):
+ """
+ How much more data does this buffer need to become ready?
+ """
-class PDUChannel(asynchat.async_chat, object):
- """
- asynchat subclass that understands our PDUs. This just handles
- network I/O. Specific engines (client, server) should be subclasses
- of this with methods that do something useful with the resulting
- PDUs.
- """
-
- def __init__(self, root_pdu_class, sock = None):
- asynchat.async_chat.__init__(self, sock) # Old-style class, can't use super()
- self.reader = ReadBuffer()
- assert issubclass(root_pdu_class, rpki.rtr.pdus.PDU)
- self.root_pdu_class = root_pdu_class
-
- @property
- def version(self):
- return self.reader.version
-
- @version.setter
- def version(self, version):
- self.reader.check_version(version)
-
- def start_new_pdu(self):
- """
- Start read of a new PDU.
- """
-
- try:
- p = self.root_pdu_class.read_pdu(self.reader)
- while p is not None:
- self.deliver_pdu(p)
- p = self.root_pdu_class.read_pdu(self.reader)
- except rpki.rtr.pdus.PDUException, e:
- self.push_pdu(e.make_error_report(version = self.version))
- self.close_when_done()
- else:
- assert not self.reader.ready()
- self.set_terminator(self.reader.needed())
-
- def collect_incoming_data(self, data):
- """
- Collect data into the read buffer.
- """
-
- self.reader.put(data)
-
- def found_terminator(self):
- """
- Got requested data, see if we now have a PDU. If so, pass it
- along, then restart cycle for a new PDU.
- """
-
- p = self.reader.retry()
- if p is None:
- self.set_terminator(self.reader.needed())
- else:
- self.deliver_pdu(p)
- self.start_new_pdu()
-
- def push_pdu(self, pdu):
- """
- Write PDU to stream.
- """
+ return self.need - self.available()
- try:
- self.push(pdu.to_pdu())
- except OSError, e:
- if e.errno != errno.EAGAIN:
- raise
+ def ready(self):
+ """
+ Is this buffer ready to read yet?
+ """
- def log(self, msg):
- """
- Intercept asyncore's logging.
- """
+ return self.available() >= self.need
- logging.info(msg)
+ def get(self, n):
+ """
+ Hand some data to the caller.
+ """
- def log_info(self, msg, tag = "info"):
- """
- Intercept asynchat's logging.
- """
+ b = self.buffer[:n]
+ self.buffer = self.buffer[n:]
+ return b
- logging.info("asynchat: %s: %s", tag, msg)
+ def put(self, b):
+ """
+ Accumulate some data.
+ """
- def handle_error(self):
- """
- Handle errors caught by asyncore main loop.
- """
+ self.buffer += b
- logging.exception("[Unhandled exception]")
- logging.critical("[Exiting after unhandled exception]")
- sys.exit(1)
+ def check_version(self, version):
+ """
+ Track version number of PDUs read from this buffer.
+ Once set, the version must not change.
+ """
- def init_file_dispatcher(self, fd):
- """
- Kludge to plug asyncore.file_dispatcher into asynchat. Call from
- subclass's __init__() method, after calling
- PDUChannel.__init__(), and don't read this on a full stomach.
- """
+ if self.version is not None and version != self.version:
+ raise rpki.rtr.pdus.CorruptData(
+ "Received PDU version %d, expected %d" % (version, self.version))
+ if self.version is None and version not in rpki.rtr.pdus.PDU.version_map:
+ raise rpki.rtr.pdus.UnsupportedProtocolVersion(
+ "Received PDU version %s, known versions %s" % (
+ version, ", ".join(str(v) for v in rpki.rtr.pdus.PDU.version_map)))
+ self.version = version
- self.connected = True
- self._fileno = fd
- self.socket = asyncore.file_wrapper(fd)
- self.add_channel()
- flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
- flags = flags | os.O_NONBLOCK
- fcntl.fcntl(fd, fcntl.F_SETFL, flags)
- def handle_close(self):
- """
- Exit when channel closed.
+class PDUChannel(asynchat.async_chat, object):
"""
-
- asynchat.async_chat.handle_close(self)
- sys.exit(0)
+ asynchat subclass that understands our PDUs. This just handles
+ network I/O. Specific engines (client, server) should be subclasses
+ of this with methods that do something useful with the resulting
+ PDUs.
+ """
+
+ def __init__(self, root_pdu_class, sock = None):
+ asynchat.async_chat.__init__(self, sock) # Old-style class, can't use super()
+ self.reader = ReadBuffer()
+ assert issubclass(root_pdu_class, rpki.rtr.pdus.PDU)
+ self.root_pdu_class = root_pdu_class
+
+ @property
+ def version(self):
+ return self.reader.version
+
+ @version.setter
+ def version(self, version):
+ self.reader.check_version(version)
+
+ def start_new_pdu(self):
+ """
+ Start read of a new PDU.
+ """
+
+ try:
+ p = self.root_pdu_class.read_pdu(self.reader)
+ while p is not None:
+ self.deliver_pdu(p)
+ p = self.root_pdu_class.read_pdu(self.reader)
+ except rpki.rtr.pdus.PDUException, e:
+ self.push_pdu(e.make_error_report(version = self.version))
+ self.close_when_done()
+ else:
+ assert not self.reader.ready()
+ self.set_terminator(self.reader.needed())
+
+ def collect_incoming_data(self, data):
+ """
+ Collect data into the read buffer.
+ """
+
+ self.reader.put(data)
+
+ def found_terminator(self):
+ """
+ Got requested data, see if we now have a PDU. If so, pass it
+ along, then restart cycle for a new PDU.
+ """
+
+ p = self.reader.retry()
+ if p is None:
+ self.set_terminator(self.reader.needed())
+ else:
+ self.deliver_pdu(p)
+ self.start_new_pdu()
+
+ def push_pdu(self, pdu):
+ """
+ Write PDU to stream.
+ """
+
+ try:
+ self.push(pdu.to_pdu())
+ except OSError, e:
+ if e.errno != errno.EAGAIN:
+ raise
+
+ def log(self, msg):
+ """
+ Intercept asyncore's logging.
+ """
+
+ logging.info(msg)
+
+ def log_info(self, msg, tag = "info"):
+ """
+ Intercept asynchat's logging.
+ """
+
+ logging.info("asynchat: %s: %s", tag, msg)
+
+ def handle_error(self):
+ """
+ Handle errors caught by asyncore main loop.
+ """
+
+ logging.exception("[Unhandled exception]")
+ logging.critical("[Exiting after unhandled exception]")
+ sys.exit(1)
+
+ def init_file_dispatcher(self, fd):
+ """
+ Kludge to plug asyncore.file_dispatcher into asynchat. Call from
+ subclass's __init__() method, after calling
+ PDUChannel.__init__(), and don't read this on a full stomach.
+ """
+
+ self.connected = True
+ self._fileno = fd
+ self.socket = asyncore.file_wrapper(fd)
+ self.add_channel()
+ flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
+ flags = flags | os.O_NONBLOCK
+ fcntl.fcntl(fd, fcntl.F_SETFL, flags)
+
+ def handle_close(self):
+ """
+ Exit when channel closed.
+ """
+
+ asynchat.async_chat.handle_close(self)
+ sys.exit(0)
diff --git a/rpki/rtr/client.py b/rpki/rtr/client.py
index a35ab81d..a8348087 100644
--- a/rpki/rtr/client.py
+++ b/rpki/rtr/client.py
@@ -37,13 +37,13 @@ from rpki.rtr.channels import Timestamp
class PDU(rpki.rtr.pdus.PDU):
- def consume(self, client):
- """
- Handle results in test client. Default behavior is just to print
- out the PDU; data PDU subclasses may override this.
- """
+ def consume(self, client):
+ """
+ Handle results in test client. Default behavior is just to print
+ out the PDU; data PDU subclasses may override this.
+ """
- logging.debug(self)
+ logging.debug(self)
clone_pdu = rpki.rtr.pdus.clone_pdu_root(PDU)
@@ -52,407 +52,407 @@ clone_pdu = rpki.rtr.pdus.clone_pdu_root(PDU)
@clone_pdu
class SerialNotifyPDU(rpki.rtr.pdus.SerialNotifyPDU):
- def consume(self, client):
- """
- Respond to a SerialNotifyPDU with either a SerialQueryPDU or a
- ResetQueryPDU, depending on what we already know.
- """
+ def consume(self, client):
+ """
+ Respond to a SerialNotifyPDU with either a SerialQueryPDU or a
+ ResetQueryPDU, depending on what we already know.
+ """
- logging.debug(self)
- if client.serial is None or client.nonce != self.nonce:
- client.push_pdu(ResetQueryPDU(version = client.version))
- elif self.serial != client.serial:
- client.push_pdu(SerialQueryPDU(version = client.version,
- serial = client.serial,
- nonce = client.nonce))
- else:
- logging.debug("[Notify did not change serial number, ignoring]")
+ logging.debug(self)
+ if client.serial is None or client.nonce != self.nonce:
+ client.push_pdu(ResetQueryPDU(version = client.version))
+ elif self.serial != client.serial:
+ client.push_pdu(SerialQueryPDU(version = client.version,
+ serial = client.serial,
+ nonce = client.nonce))
+ else:
+ logging.debug("[Notify did not change serial number, ignoring]")
@clone_pdu
class CacheResponsePDU(rpki.rtr.pdus.CacheResponsePDU):
- def consume(self, client):
- """
- Handle CacheResponsePDU.
- """
+ def consume(self, client):
+ """
+ Handle CacheResponsePDU.
+ """
- logging.debug(self)
- if self.nonce != client.nonce:
- logging.debug("[Nonce changed, resetting]")
- client.cache_reset()
+ logging.debug(self)
+ if self.nonce != client.nonce:
+ logging.debug("[Nonce changed, resetting]")
+ client.cache_reset()
@clone_pdu
class EndOfDataPDUv0(rpki.rtr.pdus.EndOfDataPDUv0):
- def consume(self, client):
- """
- Handle EndOfDataPDU response.
- """
+ def consume(self, client):
+ """
+ Handle EndOfDataPDU response.
+ """
- logging.debug(self)
- client.end_of_data(self.version, self.serial, self.nonce, self.refresh, self.retry, self.expire)
+ logging.debug(self)
+ client.end_of_data(self.version, self.serial, self.nonce, self.refresh, self.retry, self.expire)
@clone_pdu
class EndOfDataPDUv1(rpki.rtr.pdus.EndOfDataPDUv1):
- def consume(self, client):
- """
- Handle EndOfDataPDU response.
- """
+ def consume(self, client):
+ """
+ Handle EndOfDataPDU response.
+ """
- logging.debug(self)
- client.end_of_data(self.version, self.serial, self.nonce, self.refresh, self.retry, self.expire)
+ logging.debug(self)
+ client.end_of_data(self.version, self.serial, self.nonce, self.refresh, self.retry, self.expire)
@clone_pdu
class CacheResetPDU(rpki.rtr.pdus.CacheResetPDU):
- def consume(self, client):
- """
- Handle CacheResetPDU response, by issuing a ResetQueryPDU.
- """
+ def consume(self, client):
+ """
+ Handle CacheResetPDU response, by issuing a ResetQueryPDU.
+ """
- logging.debug(self)
- client.cache_reset()
- client.push_pdu(ResetQueryPDU(version = client.version))
+ logging.debug(self)
+ client.cache_reset()
+ client.push_pdu(ResetQueryPDU(version = client.version))
class PrefixPDU(rpki.rtr.pdus.PrefixPDU):
- """
- Object representing one prefix. This corresponds closely to one PDU
- in the rpki-router protocol, so closely that we use lexical ordering
- of the wire format of the PDU as the ordering for this class.
-
- This is a virtual class, but the .from_text() constructor
- instantiates the correct concrete subclass (IPv4PrefixPDU or
- IPv6PrefixPDU) depending on the syntax of its input text.
- """
-
- def consume(self, client):
"""
- Handle one incoming prefix PDU
+ Object representing one prefix. This corresponds closely to one PDU
+ in the rpki-router protocol, so closely that we use lexical ordering
+ of the wire format of the PDU as the ordering for this class.
+
+ This is a virtual class, but the .from_text() constructor
+ instantiates the correct concrete subclass (IPv4PrefixPDU or
+ IPv6PrefixPDU) depending on the syntax of its input text.
"""
- logging.debug(self)
- client.consume_prefix(self)
+ def consume(self, client):
+ """
+ Handle one incoming prefix PDU
+ """
+
+ logging.debug(self)
+ client.consume_prefix(self)
@clone_pdu
class IPv4PrefixPDU(PrefixPDU, rpki.rtr.pdus.IPv4PrefixPDU):
- pass
+ pass
@clone_pdu
class IPv6PrefixPDU(PrefixPDU, rpki.rtr.pdus.IPv6PrefixPDU):
- pass
+ pass
@clone_pdu
class ErrorReportPDU(PDU, rpki.rtr.pdus.ErrorReportPDU):
- pass
+ pass
@clone_pdu
class RouterKeyPDU(rpki.rtr.pdus.RouterKeyPDU):
- """
- Router Key PDU.
- """
-
- def consume(self, client):
"""
- Handle one incoming Router Key PDU
+ Router Key PDU.
"""
- logging.debug(self)
- client.consume_routerkey(self)
+ def consume(self, client):
+ """
+ Handle one incoming Router Key PDU
+ """
+ logging.debug(self)
+ client.consume_routerkey(self)
-class ClientChannel(rpki.rtr.channels.PDUChannel):
- """
- Client protocol engine, handles upcalls from PDUChannel.
- """
-
- serial = None
- nonce = None
- sql = None
- host = None
- port = None
- cache_id = None
- refresh = rpki.rtr.pdus.default_refresh
- retry = rpki.rtr.pdus.default_retry
- expire = rpki.rtr.pdus.default_expire
- updated = Timestamp(0)
-
- def __init__(self, sock, proc, killsig, args, host = None, port = None):
- self.killsig = killsig
- self.proc = proc
- self.args = args
- self.host = args.host if host is None else host
- self.port = args.port if port is None else port
- super(ClientChannel, self).__init__(sock = sock, root_pdu_class = PDU)
- if args.force_version is not None:
- self.version = args.force_version
- self.start_new_pdu()
- if args.sql_database:
- self.setup_sql()
-
- @classmethod
- def ssh(cls, args):
- """
- Set up ssh connection and start listening for first PDU.
- """
- if args.port is None:
- argv = ("ssh", "-s", args.host, "rpki-rtr")
- else:
- argv = ("ssh", "-p", args.port, "-s", args.host, "rpki-rtr")
- logging.debug("[Running ssh: %s]", " ".join(argv))
- s = socket.socketpair()
- return cls(sock = s[1],
- proc = subprocess.Popen(argv, executable = "/usr/bin/ssh",
- stdin = s[0], stdout = s[0], close_fds = True),
- killsig = signal.SIGKILL, args = args)
-
- @classmethod
- def tcp(cls, args):
- """
- Set up TCP connection and start listening for first PDU.
+class ClientChannel(rpki.rtr.channels.PDUChannel):
"""
-
- logging.debug("[Starting raw TCP connection to %s:%s]", args.host, args.port)
- try:
- addrinfo = socket.getaddrinfo(args.host, args.port, socket.AF_UNSPEC, socket.SOCK_STREAM)
- except socket.error, e:
- logging.debug("[socket.getaddrinfo() failed: %s]", e)
- else:
- for ai in addrinfo:
- af, socktype, proto, cn, sa = ai # pylint: disable=W0612
- logging.debug("[Trying addr %s port %s]", sa[0], sa[1])
+ Client protocol engine, handles upcalls from PDUChannel.
+ """
+
+ serial = None
+ nonce = None
+ sql = None
+ host = None
+ port = None
+ cache_id = None
+ refresh = rpki.rtr.pdus.default_refresh
+ retry = rpki.rtr.pdus.default_retry
+ expire = rpki.rtr.pdus.default_expire
+ updated = Timestamp(0)
+
+ def __init__(self, sock, proc, killsig, args, host = None, port = None):
+ self.killsig = killsig
+ self.proc = proc
+ self.args = args
+ self.host = args.host if host is None else host
+ self.port = args.port if port is None else port
+ super(ClientChannel, self).__init__(sock = sock, root_pdu_class = PDU)
+ if args.force_version is not None:
+ self.version = args.force_version
+ self.start_new_pdu()
+ if args.sql_database:
+ self.setup_sql()
+
+ @classmethod
+ def ssh(cls, args):
+ """
+ Set up ssh connection and start listening for first PDU.
+ """
+
+ if args.port is None:
+ argv = ("ssh", "-s", args.host, "rpki-rtr")
+ else:
+ argv = ("ssh", "-p", args.port, "-s", args.host, "rpki-rtr")
+ logging.debug("[Running ssh: %s]", " ".join(argv))
+ s = socket.socketpair()
+ return cls(sock = s[1],
+ proc = subprocess.Popen(argv, executable = "/usr/bin/ssh",
+ stdin = s[0], stdout = s[0], close_fds = True),
+ killsig = signal.SIGKILL, args = args)
+
+ @classmethod
+ def tcp(cls, args):
+ """
+ Set up TCP connection and start listening for first PDU.
+ """
+
+ logging.debug("[Starting raw TCP connection to %s:%s]", args.host, args.port)
try:
- s = socket.socket(af, socktype, proto)
+ addrinfo = socket.getaddrinfo(args.host, args.port, socket.AF_UNSPEC, socket.SOCK_STREAM)
except socket.error, e:
- logging.debug("[socket.socket() failed: %s]", e)
- continue
+ logging.debug("[socket.getaddrinfo() failed: %s]", e)
+ else:
+ for ai in addrinfo:
+ af, socktype, proto, cn, sa = ai # pylint: disable=W0612
+ logging.debug("[Trying addr %s port %s]", sa[0], sa[1])
+ try:
+ s = socket.socket(af, socktype, proto)
+ except socket.error, e:
+ logging.debug("[socket.socket() failed: %s]", e)
+ continue
+ try:
+ s.connect(sa)
+ except socket.error, e:
+ logging.exception("[socket.connect() failed: %s]", e)
+ s.close()
+ continue
+ return cls(sock = s, proc = None, killsig = None, args = args)
+ sys.exit(1)
+
+ @classmethod
+ def loopback(cls, args):
+ """
+ Set up loopback connection and start listening for first PDU.
+ """
+
+ s = socket.socketpair()
+ logging.debug("[Using direct subprocess kludge for testing]")
+ argv = (sys.executable, sys.argv[0], "server")
+ return cls(sock = s[1],
+ proc = subprocess.Popen(argv, stdin = s[0], stdout = s[0], close_fds = True),
+ killsig = signal.SIGINT, args = args,
+ host = args.host or "none", port = args.port or "none")
+
+ @classmethod
+ def tls(cls, args):
+ """
+ Set up TLS connection and start listening for first PDU.
+
+ NB: This uses OpenSSL's "s_client" command, which does not
+ check server certificates properly, so this is not suitable for
+ production use. Fixing this would be a trivial change, it just
+ requires using a client program which does check certificates
+ properly (eg, gnutls-cli, or stunnel's client mode if that works
+ for such purposes this week).
+ """
+
+ argv = ("openssl", "s_client", "-tls1", "-quiet", "-connect", "%s:%s" % (args.host, args.port))
+ logging.debug("[Running: %s]", " ".join(argv))
+ s = socket.socketpair()
+ return cls(sock = s[1],
+ proc = subprocess.Popen(argv, stdin = s[0], stdout = s[0], close_fds = True),
+ killsig = signal.SIGKILL, args = args)
+
+ def setup_sql(self):
+ """
+ Set up an SQLite database to contain the table we receive. If
+ necessary, we will create the database.
+ """
+
+ import sqlite3
+ missing = not os.path.exists(self.args.sql_database)
+ self.sql = sqlite3.connect(self.args.sql_database, detect_types = sqlite3.PARSE_DECLTYPES)
+ self.sql.text_factory = str
+ cur = self.sql.cursor()
+ cur.execute("PRAGMA foreign_keys = on")
+ if missing:
+ cur.execute('''
+ CREATE TABLE cache (
+ cache_id INTEGER PRIMARY KEY NOT NULL,
+ host TEXT NOT NULL,
+ port TEXT NOT NULL,
+ version INTEGER,
+ nonce INTEGER,
+ serial INTEGER,
+ updated INTEGER,
+ refresh INTEGER,
+ retry INTEGER,
+ expire INTEGER,
+ UNIQUE (host, port))''')
+ cur.execute('''
+ CREATE TABLE prefix (
+ cache_id INTEGER NOT NULL
+ REFERENCES cache(cache_id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ asn INTEGER NOT NULL,
+ prefix TEXT NOT NULL,
+ prefixlen INTEGER NOT NULL,
+ max_prefixlen INTEGER NOT NULL,
+ UNIQUE (cache_id, asn, prefix, prefixlen, max_prefixlen))''')
+ cur.execute('''
+ CREATE TABLE routerkey (
+ cache_id INTEGER NOT NULL
+ REFERENCES cache(cache_id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ asn INTEGER NOT NULL,
+ ski TEXT NOT NULL,
+ key TEXT NOT NULL,
+ UNIQUE (cache_id, asn, ski),
+ UNIQUE (cache_id, asn, key))''')
+ elif self.args.reset_session:
+ cur.execute("DELETE FROM cache WHERE host = ? and port = ?", (self.host, self.port))
+ cur.execute("SELECT cache_id, version, nonce, serial, refresh, retry, expire, updated "
+ "FROM cache WHERE host = ? AND port = ?",
+ (self.host, self.port))
try:
- s.connect(sa)
- except socket.error, e:
- logging.exception("[socket.connect() failed: %s]", e)
- s.close()
- continue
- return cls(sock = s, proc = None, killsig = None, args = args)
- sys.exit(1)
-
- @classmethod
- def loopback(cls, args):
- """
- Set up loopback connection and start listening for first PDU.
- """
-
- s = socket.socketpair()
- logging.debug("[Using direct subprocess kludge for testing]")
- argv = (sys.executable, sys.argv[0], "server")
- return cls(sock = s[1],
- proc = subprocess.Popen(argv, stdin = s[0], stdout = s[0], close_fds = True),
- killsig = signal.SIGINT, args = args,
- host = args.host or "none", port = args.port or "none")
-
- @classmethod
- def tls(cls, args):
- """
- Set up TLS connection and start listening for first PDU.
-
- NB: This uses OpenSSL's "s_client" command, which does not
- check server certificates properly, so this is not suitable for
- production use. Fixing this would be a trivial change, it just
- requires using a client program which does check certificates
- properly (eg, gnutls-cli, or stunnel's client mode if that works
- for such purposes this week).
- """
-
- argv = ("openssl", "s_client", "-tls1", "-quiet", "-connect", "%s:%s" % (args.host, args.port))
- logging.debug("[Running: %s]", " ".join(argv))
- s = socket.socketpair()
- return cls(sock = s[1],
- proc = subprocess.Popen(argv, stdin = s[0], stdout = s[0], close_fds = True),
- killsig = signal.SIGKILL, args = args)
-
- def setup_sql(self):
- """
- Set up an SQLite database to contain the table we receive. If
- necessary, we will create the database.
- """
-
- import sqlite3
- missing = not os.path.exists(self.args.sql_database)
- self.sql = sqlite3.connect(self.args.sql_database, detect_types = sqlite3.PARSE_DECLTYPES)
- self.sql.text_factory = str
- cur = self.sql.cursor()
- cur.execute("PRAGMA foreign_keys = on")
- if missing:
- cur.execute('''
- CREATE TABLE cache (
- cache_id INTEGER PRIMARY KEY NOT NULL,
- host TEXT NOT NULL,
- port TEXT NOT NULL,
- version INTEGER,
- nonce INTEGER,
- serial INTEGER,
- updated INTEGER,
- refresh INTEGER,
- retry INTEGER,
- expire INTEGER,
- UNIQUE (host, port))''')
- cur.execute('''
- CREATE TABLE prefix (
- cache_id INTEGER NOT NULL
- REFERENCES cache(cache_id)
- ON DELETE CASCADE
- ON UPDATE CASCADE,
- asn INTEGER NOT NULL,
- prefix TEXT NOT NULL,
- prefixlen INTEGER NOT NULL,
- max_prefixlen INTEGER NOT NULL,
- UNIQUE (cache_id, asn, prefix, prefixlen, max_prefixlen))''')
- cur.execute('''
- CREATE TABLE routerkey (
- cache_id INTEGER NOT NULL
- REFERENCES cache(cache_id)
- ON DELETE CASCADE
- ON UPDATE CASCADE,
- asn INTEGER NOT NULL,
- ski TEXT NOT NULL,
- key TEXT NOT NULL,
- UNIQUE (cache_id, asn, ski),
- UNIQUE (cache_id, asn, key))''')
- elif self.args.reset_session:
- cur.execute("DELETE FROM cache WHERE host = ? and port = ?", (self.host, self.port))
- cur.execute("SELECT cache_id, version, nonce, serial, refresh, retry, expire, updated "
- "FROM cache WHERE host = ? AND port = ?",
- (self.host, self.port))
- try:
- self.cache_id, version, self.nonce, self.serial, refresh, retry, expire, updated = cur.fetchone()
- if version is not None and self.version is not None and version != self.version:
- cur.execute("DELETE FROM cache WHERE host = ? and port = ?", (self.host, self.port))
- raise TypeError # Simulate lookup failure case
- if version is not None:
- self.version = version
- if refresh is not None:
+ self.cache_id, version, self.nonce, self.serial, refresh, retry, expire, updated = cur.fetchone()
+ if version is not None and self.version is not None and version != self.version:
+ cur.execute("DELETE FROM cache WHERE host = ? and port = ?", (self.host, self.port))
+ raise TypeError # Simulate lookup failure case
+ if version is not None:
+ self.version = version
+ if refresh is not None:
+ self.refresh = refresh
+ if retry is not None:
+ self.retry = retry
+ if expire is not None:
+ self.expire = expire
+ if updated is not None:
+ self.updated = Timestamp(updated)
+ except TypeError:
+ cur.execute("INSERT INTO cache (host, port) VALUES (?, ?)", (self.host, self.port))
+ self.cache_id = cur.lastrowid
+ self.sql.commit()
+ logging.info("[Session %d version %s nonce %s serial %s refresh %s retry %s expire %s updated %s]",
+ self.cache_id, self.version, self.nonce,
+ self.serial, self.refresh, self.retry, self.expire, self.updated)
+
+ def cache_reset(self):
+ """
+ Handle CacheResetPDU actions.
+ """
+
+ self.serial = None
+ if self.sql:
+ cur = self.sql.cursor()
+ cur.execute("DELETE FROM prefix WHERE cache_id = ?", (self.cache_id,))
+ cur.execute("DELETE FROM routerkey WHERE cache_id = ?", (self.cache_id,))
+ cur.execute("UPDATE cache SET version = ?, serial = NULL WHERE cache_id = ?", (self.version, self.cache_id))
+ self.sql.commit()
+
+ def end_of_data(self, version, serial, nonce, refresh, retry, expire):
+ """
+ Handle EndOfDataPDU actions.
+ """
+
+ assert version == self.version
+ self.serial = serial
+ self.nonce = nonce
self.refresh = refresh
- if retry is not None:
- self.retry = retry
- if expire is not None:
- self.expire = expire
- if updated is not None:
- self.updated = Timestamp(updated)
- except TypeError:
- cur.execute("INSERT INTO cache (host, port) VALUES (?, ?)", (self.host, self.port))
- self.cache_id = cur.lastrowid
- self.sql.commit()
- logging.info("[Session %d version %s nonce %s serial %s refresh %s retry %s expire %s updated %s]",
- self.cache_id, self.version, self.nonce,
- self.serial, self.refresh, self.retry, self.expire, self.updated)
-
- def cache_reset(self):
- """
- Handle CacheResetPDU actions.
- """
-
- self.serial = None
- if self.sql:
- cur = self.sql.cursor()
- cur.execute("DELETE FROM prefix WHERE cache_id = ?", (self.cache_id,))
- cur.execute("DELETE FROM routerkey WHERE cache_id = ?", (self.cache_id,))
- cur.execute("UPDATE cache SET version = ?, serial = NULL WHERE cache_id = ?", (self.version, self.cache_id))
- self.sql.commit()
-
- def end_of_data(self, version, serial, nonce, refresh, retry, expire):
- """
- Handle EndOfDataPDU actions.
- """
-
- assert version == self.version
- self.serial = serial
- self.nonce = nonce
- self.refresh = refresh
- self.retry = retry
- self.expire = expire
- self.updated = Timestamp.now()
- if self.sql:
- self.sql.execute("UPDATE cache SET"
- " version = ?, serial = ?, nonce = ?,"
- " refresh = ?, retry = ?, expire = ?,"
- " updated = ? "
- "WHERE cache_id = ?",
- (version, serial, nonce, refresh, retry, expire, int(self.updated), self.cache_id))
- self.sql.commit()
-
- def consume_prefix(self, prefix):
- """
- Handle one prefix PDU.
- """
-
- if self.sql:
- values = (self.cache_id, prefix.asn, str(prefix.prefix), prefix.prefixlen, prefix.max_prefixlen)
- if prefix.announce:
- self.sql.execute("INSERT INTO prefix (cache_id, asn, prefix, prefixlen, max_prefixlen) "
- "VALUES (?, ?, ?, ?, ?)",
- values)
- else:
- self.sql.execute("DELETE FROM prefix "
- "WHERE cache_id = ? AND asn = ? AND prefix = ? AND prefixlen = ? AND max_prefixlen = ?",
- values)
-
- def consume_routerkey(self, routerkey):
- """
- Handle one Router Key PDU.
- """
-
- if self.sql:
- values = (self.cache_id, routerkey.asn,
- base64.urlsafe_b64encode(routerkey.ski).rstrip("="),
- base64.b64encode(routerkey.key))
- if routerkey.announce:
- self.sql.execute("INSERT INTO routerkey (cache_id, asn, ski, key) "
- "VALUES (?, ?, ?, ?)",
- values)
- else:
- self.sql.execute("DELETE FROM routerkey "
- "WHERE cache_id = ? AND asn = ? AND (ski = ? OR key = ?)",
- values)
-
- def deliver_pdu(self, pdu):
- """
- Handle received PDU.
- """
-
- pdu.consume(self)
-
- def push_pdu(self, pdu):
- """
- Log outbound PDU then write it to stream.
- """
-
- logging.debug(pdu)
- super(ClientChannel, self).push_pdu(pdu)
-
- def cleanup(self):
- """
- Force clean up this client's child process. If everything goes
- well, child will have exited already before this method is called,
- but we may need to whack it with a stick if something breaks.
- """
-
- if self.proc is not None and self.proc.returncode is None:
- try:
- os.kill(self.proc.pid, self.killsig)
- except OSError:
- pass
-
- def handle_close(self):
- """
- Intercept close event so we can log it, then shut down.
- """
-
- logging.debug("Server closed channel")
- super(ClientChannel, self).handle_close()
+ self.retry = retry
+ self.expire = expire
+ self.updated = Timestamp.now()
+ if self.sql:
+ self.sql.execute("UPDATE cache SET"
+ " version = ?, serial = ?, nonce = ?,"
+ " refresh = ?, retry = ?, expire = ?,"
+ " updated = ? "
+ "WHERE cache_id = ?",
+ (version, serial, nonce, refresh, retry, expire, int(self.updated), self.cache_id))
+ self.sql.commit()
+
+ def consume_prefix(self, prefix):
+ """
+ Handle one prefix PDU.
+ """
+
+ if self.sql:
+ values = (self.cache_id, prefix.asn, str(prefix.prefix), prefix.prefixlen, prefix.max_prefixlen)
+ if prefix.announce:
+ self.sql.execute("INSERT INTO prefix (cache_id, asn, prefix, prefixlen, max_prefixlen) "
+ "VALUES (?, ?, ?, ?, ?)",
+ values)
+ else:
+ self.sql.execute("DELETE FROM prefix "
+ "WHERE cache_id = ? AND asn = ? AND prefix = ? AND prefixlen = ? AND max_prefixlen = ?",
+ values)
+
+ def consume_routerkey(self, routerkey):
+ """
+ Handle one Router Key PDU.
+ """
+
+ if self.sql:
+ values = (self.cache_id, routerkey.asn,
+ base64.urlsafe_b64encode(routerkey.ski).rstrip("="),
+ base64.b64encode(routerkey.key))
+ if routerkey.announce:
+ self.sql.execute("INSERT INTO routerkey (cache_id, asn, ski, key) "
+ "VALUES (?, ?, ?, ?)",
+ values)
+ else:
+ self.sql.execute("DELETE FROM routerkey "
+ "WHERE cache_id = ? AND asn = ? AND (ski = ? OR key = ?)",
+ values)
+
+ def deliver_pdu(self, pdu):
+ """
+ Handle received PDU.
+ """
+
+ pdu.consume(self)
+
+ def push_pdu(self, pdu):
+ """
+ Log outbound PDU then write it to stream.
+ """
+
+ logging.debug(pdu)
+ super(ClientChannel, self).push_pdu(pdu)
+
+ def cleanup(self):
+ """
+ Force clean up this client's child process. If everything goes
+ well, child will have exited already before this method is called,
+ but we may need to whack it with a stick if something breaks.
+ """
+
+ if self.proc is not None and self.proc.returncode is None:
+ try:
+ os.kill(self.proc.pid, self.killsig)
+ except OSError:
+ pass
+
+ def handle_close(self):
+ """
+ Intercept close event so we can log it, then shut down.
+ """
+
+ logging.debug("Server closed channel")
+ super(ClientChannel, self).handle_close()
# Hack to let us subclass this from scripts without needing to rewrite client_main().
@@ -460,73 +460,73 @@ class ClientChannel(rpki.rtr.channels.PDUChannel):
ClientChannelClass = ClientChannel
def client_main(args):
- """
- Test client, intended primarily for debugging.
- """
+ """
+ Test client, intended primarily for debugging.
+ """
- logging.debug("[Startup]")
+ logging.debug("[Startup]")
- assert issubclass(ClientChannelClass, ClientChannel)
- constructor = getattr(ClientChannelClass, args.protocol)
+ assert issubclass(ClientChannelClass, ClientChannel)
+ constructor = getattr(ClientChannelClass, args.protocol)
- client = None
- try:
- client = constructor(args)
+ client = None
+ try:
+ client = constructor(args)
- polled = client.updated
- wakeup = None
+ polled = client.updated
+ wakeup = None
- while True:
+ while True:
- now = Timestamp.now()
+ now = Timestamp.now()
- if client.serial is not None and now > client.updated + client.expire:
- logging.info("[Expiring client data: serial %s, last updated %s, expire %s]",
- client.serial, client.updated, client.expire)
- client.cache_reset()
+ if client.serial is not None and now > client.updated + client.expire:
+ logging.info("[Expiring client data: serial %s, last updated %s, expire %s]",
+ client.serial, client.updated, client.expire)
+ client.cache_reset()
- if client.serial is None or client.nonce is None:
- polled = now
- client.push_pdu(ResetQueryPDU(version = client.version))
+ if client.serial is None or client.nonce is None:
+ polled = now
+ client.push_pdu(ResetQueryPDU(version = client.version))
- elif now >= client.updated + client.refresh:
- polled = now
- client.push_pdu(SerialQueryPDU(version = client.version,
- serial = client.serial,
- nonce = client.nonce))
+ elif now >= client.updated + client.refresh:
+ polled = now
+ client.push_pdu(SerialQueryPDU(version = client.version,
+ serial = client.serial,
+ nonce = client.nonce))
- remaining = 1
+ remaining = 1
- while remaining > 0:
- now = Timestamp.now()
- timer = client.retry if (now >= client.updated + client.refresh) else client.refresh
- wokeup = wakeup
- wakeup = max(now, Timestamp(max(polled, client.updated) + timer))
- remaining = wakeup - now
- if wakeup != wokeup:
- logging.info("[Last client poll %s, next %s]", polled, wakeup)
- asyncore.loop(timeout = remaining, count = 1)
+ while remaining > 0:
+ now = Timestamp.now()
+ timer = client.retry if (now >= client.updated + client.refresh) else client.refresh
+ wokeup = wakeup
+ wakeup = max(now, Timestamp(max(polled, client.updated) + timer))
+ remaining = wakeup - now
+ if wakeup != wokeup:
+ logging.info("[Last client poll %s, next %s]", polled, wakeup)
+ asyncore.loop(timeout = remaining, count = 1)
- except KeyboardInterrupt:
- sys.exit(0)
+ except KeyboardInterrupt:
+ sys.exit(0)
- finally:
- if client is not None:
- client.cleanup()
+ finally:
+ if client is not None:
+ client.cleanup()
def argparse_setup(subparsers):
- """
- Set up argparse stuff for commands in this module.
- """
-
- subparser = subparsers.add_parser("client", description = client_main.__doc__,
- help = "Test client for RPKI-RTR protocol")
- subparser.set_defaults(func = client_main, default_log_to = "stderr")
- subparser.add_argument("--sql-database", help = "filename for sqlite3 database of client state")
- subparser.add_argument("--force-version", type = int, choices = PDU.version_map, help = "force specific protocol version")
- subparser.add_argument("--reset-session", action = "store_true", help = "reset any existing session found in sqlite3 database")
- subparser.add_argument("protocol", choices = ("loopback", "tcp", "ssh", "tls"), help = "connection protocol")
- subparser.add_argument("host", nargs = "?", help = "server host")
- subparser.add_argument("port", nargs = "?", help = "server port")
- return subparser
+ """
+ Set up argparse stuff for commands in this module.
+ """
+
+ subparser = subparsers.add_parser("client", description = client_main.__doc__,
+ help = "Test client for RPKI-RTR protocol")
+ subparser.set_defaults(func = client_main, default_log_destination = "stderr")
+ subparser.add_argument("--sql-database", help = "filename for sqlite3 database of client state")
+ subparser.add_argument("--force-version", type = int, choices = PDU.version_map, help = "force specific protocol version")
+ subparser.add_argument("--reset-session", action = "store_true", help = "reset any existing session found in sqlite3 database")
+ subparser.add_argument("protocol", choices = ("loopback", "tcp", "ssh", "tls"), help = "connection protocol")
+ subparser.add_argument("host", nargs = "?", help = "server host")
+ subparser.add_argument("port", nargs = "?", help = "server port")
+ return subparser
diff --git a/rpki/rtr/generator.py b/rpki/rtr/generator.py
index 26e25b6e..4536de30 100644
--- a/rpki/rtr/generator.py
+++ b/rpki/rtr/generator.py
@@ -36,540 +36,553 @@ import rpki.rtr.server
from rpki.rtr.channels import Timestamp
-class PrefixPDU(rpki.rtr.pdus.PrefixPDU):
- """
- Object representing one prefix. This corresponds closely to one PDU
- in the rpki-router protocol, so closely that we use lexical ordering
- of the wire format of the PDU as the ordering for this class.
-
- This is a virtual class, but the .from_text() constructor
- instantiates the correct concrete subclass (IPv4PrefixPDU or
- IPv6PrefixPDU) depending on the syntax of its input text.
- """
-
- @staticmethod
- def from_text(version, asn, addr):
- """
- Construct a prefix from its text form.
- """
+from rpki.rcynicdb.iterator import authenticated_objects
- cls = IPv6PrefixPDU if ":" in addr else IPv4PrefixPDU
- self = cls(version = version)
- self.asn = long(asn)
- p, l = addr.split("/")
- self.prefix = rpki.POW.IPAddress(p)
- if "-" in l:
- self.prefixlen, self.max_prefixlen = tuple(int(i) for i in l.split("-"))
- else:
- self.prefixlen = self.max_prefixlen = int(l)
- self.announce = 1
- self.check()
- return self
-
- @staticmethod
- def from_roa(version, asn, prefix_tuple):
- """
- Construct a prefix from a ROA.
+class PrefixPDU(rpki.rtr.pdus.PrefixPDU):
"""
-
- address, length, maxlength = prefix_tuple
- cls = IPv6PrefixPDU if address.version == 6 else IPv4PrefixPDU
- self = cls(version = version)
- self.asn = asn
- self.prefix = address
- self.prefixlen = length
- self.max_prefixlen = length if maxlength is None else maxlength
- self.announce = 1
- self.check()
- return self
+ Object representing one prefix. This corresponds closely to one PDU
+ in the rpki-router protocol, so closely that we use lexical ordering
+ of the wire format of the PDU as the ordering for this class.
+
+ This is a virtual class, but the .from_text() constructor
+ instantiates the correct concrete subclass (IPv4PrefixPDU or
+ IPv6PrefixPDU) depending on the syntax of its input text.
+ """
+
+ @staticmethod
+ def from_text(version, asn, addr):
+ """
+ Construct a prefix from its text form.
+ """
+
+ cls = IPv6PrefixPDU if ":" in addr else IPv4PrefixPDU
+ self = cls(version = version)
+ self.asn = long(asn)
+ p, l = addr.split("/")
+ self.prefix = rpki.POW.IPAddress(p)
+ if "-" in l:
+ self.prefixlen, self.max_prefixlen = tuple(int(i) for i in l.split("-"))
+ else:
+ self.prefixlen = self.max_prefixlen = int(l)
+ self.announce = 1
+ self.check()
+ return self
+
+ @staticmethod
+ def from_roa(version, asn, prefix_tuple):
+ """
+ Construct a prefix from a ROA.
+ """
+
+ address, length, maxlength = prefix_tuple
+ cls = IPv6PrefixPDU if address.version == 6 else IPv4PrefixPDU
+ self = cls(version = version)
+ self.asn = asn
+ self.prefix = address
+ self.prefixlen = length
+ self.max_prefixlen = length if maxlength is None else maxlength
+ self.announce = 1
+ self.check()
+ return self
class IPv4PrefixPDU(PrefixPDU):
- """
- IPv4 flavor of a prefix.
- """
+ """
+ IPv4 flavor of a prefix.
+ """
- pdu_type = 4
- address_byte_count = 4
+ pdu_type = 4
+ address_byte_count = 4
class IPv6PrefixPDU(PrefixPDU):
- """
- IPv6 flavor of a prefix.
- """
-
- pdu_type = 6
- address_byte_count = 16
-
-class RouterKeyPDU(rpki.rtr.pdus.RouterKeyPDU):
- """
- Router Key PDU.
- """
-
- @classmethod
- def from_text(cls, version, asn, gski, key):
"""
- Construct a router key from its text form.
+ IPv6 flavor of a prefix.
"""
- self = cls(version = version)
- self.asn = long(asn)
- self.ski = base64.urlsafe_b64decode(gski + "=")
- self.key = base64.b64decode(key)
- self.announce = 1
- self.check()
- return self
+ pdu_type = 6
+ address_byte_count = 16
- @classmethod
- def from_certificate(cls, version, asn, ski, key):
+class RouterKeyPDU(rpki.rtr.pdus.RouterKeyPDU):
"""
- Construct a router key from a certificate.
+ Router Key PDU.
"""
- self = cls(version = version)
- self.asn = asn
- self.ski = ski
- self.key = key
- self.announce = 1
- self.check()
- return self
+ announce = None
+ ski = None
+ asn = None
+ key = None
+ @classmethod
+ def from_text(cls, version, asn, gski, key):
+ """
+ Construct a router key from its text form.
+ """
-class ROA(rpki.POW.ROA): # pylint: disable=W0232
- """
- Minor additions to rpki.POW.ROA.
- """
-
- @classmethod
- def derReadFile(cls, fn): # pylint: disable=E1002
- self = super(ROA, cls).derReadFile(fn)
- self.extractWithoutVerifying()
- return self
-
- @property
- def prefixes(self):
- v4, v6 = self.getPrefixes()
- if v4 is not None:
- for p in v4:
- yield p
- if v6 is not None:
- for p in v6:
- yield p
+ self = cls(version = version)
+ self.asn = long(asn)
+ self.ski = base64.urlsafe_b64decode(gski + "=")
+ self.key = base64.b64decode(key)
+ self.announce = 1
+ self.check()
+ return self
-class X509(rpki.POW.X509): # pylint: disable=W0232
- """
- Minor additions to rpki.POW.X509.
- """
+ @classmethod
+ def from_certificate(cls, version, asn, ski, key):
+ """
+ Construct a router key from a certificate.
+ """
- @property
- def asns(self):
- resources = self.getRFC3779()
- if resources is not None and resources[0] is not None:
- for min_asn, max_asn in resources[0]:
- for asn in xrange(min_asn, max_asn + 1):
- yield asn
+ self = cls(version = version)
+ self.asn = asn
+ self.ski = ski
+ self.key = key
+ self.announce = 1
+ self.check()
+ return self
-class PDUSet(list):
- """
- Object representing a set of PDUs, that is, one versioned and
- (theoretically) consistant set of prefixes and router keys extracted
- from rcynic's output.
- """
-
- def __init__(self, version):
- assert version in rpki.rtr.pdus.PDU.version_map
- super(PDUSet, self).__init__()
- self.version = version
-
- @classmethod
- def _load_file(cls, filename, version):
+class ROA(rpki.POW.ROA): # pylint: disable=W0232
"""
- Low-level method to read PDUSet from a file.
+ Minor additions to rpki.POW.ROA.
"""
- self = cls(version = version)
- f = open(filename, "rb")
- r = rpki.rtr.channels.ReadBuffer()
- while True:
- p = rpki.rtr.pdus.PDU.read_pdu(r)
- while p is None:
- b = f.read(r.needed())
- if b == "":
- assert r.available() == 0
- return self
- r.put(b)
- p = r.retry()
- assert p.version == self.version
- self.append(p)
-
- @staticmethod
- def seq_ge(a, b):
- return ((a - b) % (1 << 32)) < (1 << 31)
+ @classmethod
+ def derReadFile(cls, fn):
+ # pylint: disable=E1002
+ self = super(ROA, cls).derReadFile(fn)
+ self.extractWithoutVerifying()
+ return self
+ @property
+ def prefixes(self):
+ v4, v6 = self.getPrefixes() # pylint: disable=E1101
+ if v4 is not None:
+ for p in v4:
+ yield p
+ if v6 is not None:
+ for p in v6:
+ yield p
-class AXFRSet(PDUSet):
- """
- Object representing a complete set of PDUs, that is, one versioned
- and (theoretically) consistant set of prefixes and router
- certificates extracted from rcynic's output, all with the announce
- field set.
- """
-
- @classmethod
- def parse_rcynic(cls, rcynic_dir, version, scan_roas = None, scan_routercerts = None):
+class X509(rpki.POW.X509): # pylint: disable=W0232
"""
- Parse ROAS and router certificates fetched (and validated!) by
- rcynic to create a new AXFRSet.
-
- In normal operation, we use os.walk() and the rpki.POW library to
- parse these data directly, but we can, if so instructed, use
- external programs instead, for testing, simulation, or to provide
- a way to inject local data.
-
- At some point the ability to parse these data from external
- programs may move to a separate constructor function, so that we
- can make this one a bit simpler and faster.
+ Minor additions to rpki.POW.X509.
"""
- self = cls(version = version)
- self.serial = rpki.rtr.channels.Timestamp.now()
-
- include_routercerts = RouterKeyPDU.pdu_type in rpki.rtr.pdus.PDU.version_map[version]
-
- if scan_roas is None or (scan_routercerts is None and include_routercerts):
- for root, dirs, files in os.walk(rcynic_dir): # pylint: disable=W0612
- for fn in files:
- if scan_roas is None and fn.endswith(".roa"):
- roa = ROA.derReadFile(os.path.join(root, fn))
- asn = roa.getASID()
- self.extend(PrefixPDU.from_roa(version = version, asn = asn, prefix_tuple = prefix_tuple)
- for prefix_tuple in roa.prefixes)
- if include_routercerts and scan_routercerts is None and fn.endswith(".cer"):
- x = X509.derReadFile(os.path.join(root, fn))
- eku = x.getEKU()
- if eku is not None and rpki.oids.id_kp_bgpsec_router in eku:
- ski = x.getSKI()
- key = x.getPublicKey().derWritePublic()
- self.extend(RouterKeyPDU.from_certificate(version = version, asn = asn, ski = ski, key = key)
- for asn in x.asns)
-
- if scan_roas is not None:
- try:
- p = subprocess.Popen((scan_roas, rcynic_dir), stdout = subprocess.PIPE)
- for line in p.stdout:
- line = line.split()
- asn = line[1]
- self.extend(PrefixPDU.from_text(version = version, asn = asn, addr = addr)
- for addr in line[2:])
- except OSError, e:
- sys.exit("Could not run %s: %s" % (scan_roas, e))
-
- if include_routercerts and scan_routercerts is not None:
- try:
- p = subprocess.Popen((scan_routercerts, rcynic_dir), stdout = subprocess.PIPE)
- for line in p.stdout:
- line = line.split()
- gski = line[0]
- key = line[-1]
- self.extend(RouterKeyPDU.from_text(version = version, asn = asn, gski = gski, key = key)
- for asn in line[1:-1])
- except OSError, e:
- sys.exit("Could not run %s: %s" % (scan_routercerts, e))
-
- self.sort()
- for i in xrange(len(self) - 2, -1, -1):
- if self[i] == self[i + 1]:
- del self[i + 1]
- return self
-
- @classmethod
- def load(cls, filename):
- """
- Load an AXFRSet from a file, parse filename to obtain version and serial.
- """
+ @property
+ def asns(self):
+ resources = self.getRFC3779() # pylint: disable=E1101
+ if resources is not None and resources[0] is not None:
+ for min_asn, max_asn in resources[0]:
+ for asn in xrange(min_asn, max_asn + 1):
+ yield asn
- fn1, fn2, fn3 = os.path.basename(filename).split(".")
- assert fn1.isdigit() and fn2 == "ax" and fn3.startswith("v") and fn3[1:].isdigit()
- version = int(fn3[1:])
- self = cls._load_file(filename, version)
- self.serial = rpki.rtr.channels.Timestamp(fn1)
- return self
- def filename(self):
- """
- Generate filename for this AXFRSet.
+class PDUSet(list):
"""
+ Object representing a set of PDUs, that is, one versioned and
+ (theoretically) consistant set of prefixes and router keys extracted
+ from rcynic's output.
+ """
+
+ def __init__(self, version):
+ assert version in rpki.rtr.pdus.PDU.version_map
+ super(PDUSet, self).__init__()
+ self.version = version
+
+ @classmethod
+ def _load_file(cls, filename, version):
+ """
+ Low-level method to read PDUSet from a file.
+ """
+
+ self = cls(version = version)
+ f = open(filename, "rb")
+ r = rpki.rtr.channels.ReadBuffer()
+ while True:
+ p = rpki.rtr.pdus.PDU.read_pdu(r)
+ while p is None:
+ b = f.read(r.needed())
+ if b == "":
+ assert r.available() == 0
+ return self
+ r.put(b)
+ p = r.retry()
+ assert p.version == self.version
+ self.append(p)
+
+ @staticmethod
+ def seq_ge(a, b):
+ return ((a - b) % (1 << 32)) < (1 << 31)
- return "%d.ax.v%d" % (self.serial, self.version)
- @classmethod
- def load_current(cls, version):
- """
- Load current AXFRSet. Return None if can't.
+class AXFRSet(PDUSet):
"""
+ Object representing a complete set of PDUs, that is, one versioned
+ and (theoretically) consistant set of prefixes and router
+ certificates extracted from rcynic's output, all with the announce
+ field set.
+ """
+
+ class_map = dict(cer = X509, roa = ROA)
+
+ serial = None
+
+ @classmethod
+ def parse_rcynic(cls, rcynic_dir, version, scan_roas = None, scan_routercerts = None):
+ """
+ Parse ROAS and router certificates fetched (and validated!) by
+ rcynic to create a new AXFRSet.
+
+ In normal operation, we parse these data directly from whatever rcynic is using
+ as a validator this week, but we can, if so instructed, use external programs
+ instead, for testing, simulation, or to provide a way to inject local data.
+
+ At some point the ability to parse these data from external
+ programs may move to a separate constructor function, so that we
+ can make this one a bit simpler and faster.
+ """
+
+ self = cls(version = version)
+ self.serial = rpki.rtr.channels.Timestamp.now()
+
+ include_routercerts = RouterKeyPDU.pdu_type in rpki.rtr.pdus.PDU.version_map[version]
+
+ if scan_roas is None:
+ for uri, roa in authenticated_objects(rcynic_dir, uri_suffix = ".roa", class_map = self.class_map):
+ roa.extractWithoutVerifying()
+ asn = roa.getASID()
+ self.extend(PrefixPDU.from_roa(version = version, asn = asn, prefix_tuple = prefix_tuple)
+ for prefix_tuple in roa.prefixes)
+
+ if scan_routercerts is None and include_routercerts:
+ for uri, cer in authenticated_objects(rcynic_dir, uri_suffix = ".cer", class_map = self.class_map):
+ eku = cer.getEKU()
+ if eku is not None and rpki.oids.id_kp_bgpsec_router in eku:
+ ski = cer.getSKI()
+ key = cer.getPublicKey().derWritePublic()
+ self.extend(RouterKeyPDU.from_certificate(version = version, asn = asn, ski = ski, key = key)
+ for asn in cer.asns)
+
+ if scan_roas is not None:
+ try:
+ p = subprocess.Popen((scan_roas, rcynic_dir), stdout = subprocess.PIPE)
+ for line in p.stdout:
+ line = line.split()
+ asn = line[1]
+ self.extend(PrefixPDU.from_text(version = version, asn = asn, addr = addr)
+ for addr in line[2:])
+ except OSError, e:
+ sys.exit("Could not run %s: %s" % (scan_roas, e))
+
+ if include_routercerts and scan_routercerts is not None:
+ try:
+ p = subprocess.Popen((scan_routercerts, rcynic_dir), stdout = subprocess.PIPE)
+ for line in p.stdout:
+ line = line.split()
+ gski = line[0]
+ key = line[-1]
+ self.extend(RouterKeyPDU.from_text(version = version, asn = asn, gski = gski, key = key)
+ for asn in line[1:-1])
+ except OSError, e:
+ sys.exit("Could not run %s: %s" % (scan_routercerts, e))
+
+ self.sort()
+ for i in xrange(len(self) - 2, -1, -1):
+ if self[i] == self[i + 1]:
+ del self[i + 1]
+ return self
+
+ @classmethod
+ def load(cls, filename):
+ """
+ Load an AXFRSet from a file, parse filename to obtain version and serial.
+ """
+
+ fn1, fn2, fn3 = os.path.basename(filename).split(".")
+ assert fn1.isdigit() and fn2 == "ax" and fn3.startswith("v") and fn3[1:].isdigit()
+ version = int(fn3[1:])
+ self = cls._load_file(filename, version)
+ self.serial = rpki.rtr.channels.Timestamp(fn1)
+ return self
+
+ def filename(self):
+ """
+ Generate filename for this AXFRSet.
+ """
+
+ return "%d.ax.v%d" % (self.serial, self.version)
+
+ @classmethod
+ def load_current(cls, version):
+ """
+ Load current AXFRSet. Return None if can't.
+ """
+
+ serial = rpki.rtr.server.read_current(version)[0]
+ if serial is None:
+ return None
+ try:
+ return cls.load("%d.ax.v%d" % (serial, version))
+ except IOError:
+ return None
+
+ def save_axfr(self):
+ """
+ Write AXFRSet to file with magic filename.
+ """
+
+ f = open(self.filename(), "wb")
+ for p in self:
+ f.write(p.to_pdu())
+ f.close()
+
+ def destroy_old_data(self):
+ """
+ Destroy old data files, presumably because our nonce changed and
+ the old serial numbers are no longer valid.
+ """
+
+ for i in glob.iglob("*.ix.*.v%d" % self.version):
+ os.unlink(i)
+ for i in glob.iglob("*.ax.v%d" % self.version):
+ if i != self.filename():
+ os.unlink(i)
+
+ @staticmethod
+ def new_nonce(force_zero_nonce):
+ """
+ Create and return a new nonce value.
+ """
+
+ if force_zero_nonce:
+ return 0
+ try:
+ return int(random.SystemRandom().getrandbits(16))
+ except NotImplementedError:
+ return int(random.getrandbits(16))
+
+ def mark_current(self, force_zero_nonce = False):
+ """
+ Save current serial number and nonce, creating new nonce if
+ necessary. Creating a new nonce triggers cleanup of old state, as
+ the new nonce invalidates all old serial numbers.
+ """
+
+ assert self.version in rpki.rtr.pdus.PDU.version_map
+ old_serial, nonce = rpki.rtr.server.read_current(self.version)
+ if old_serial is None or self.seq_ge(old_serial, self.serial):
+ logging.debug("Creating new nonce and deleting stale data")
+ nonce = self.new_nonce(force_zero_nonce)
+ self.destroy_old_data()
+ rpki.rtr.server.write_current(self.serial, nonce, self.version)
+
+ def save_ixfr(self, other):
+ """
+ Comparing this AXFRSet with an older one and write the resulting
+ IXFRSet to file with magic filename. Since we store PDUSets
+ in sorted order, computing the difference is a trivial linear
+ comparison.
+ """
+
+ f = open("%d.ix.%d.v%d" % (self.serial, other.serial, self.version), "wb")
+ old = other
+ new = self
+ len_old = len(old)
+ len_new = len(new)
+ i_old = i_new = 0
+ while i_old < len_old and i_new < len_new:
+ if old[i_old] < new[i_new]:
+ f.write(old[i_old].to_pdu(announce = 0))
+ i_old += 1
+ elif old[i_old] > new[i_new]:
+ f.write(new[i_new].to_pdu(announce = 1))
+ i_new += 1
+ else:
+ i_old += 1
+ i_new += 1
+ for i in xrange(i_old, len_old):
+ f.write(old[i].to_pdu(announce = 0))
+ for i in xrange(i_new, len_new):
+ f.write(new[i].to_pdu(announce = 1))
+ f.close()
+
+ def show(self):
+ """
+ Print this AXFRSet.
+ """
+
+ logging.debug("# AXFR %d (%s) v%d", self.serial, self.serial, self.version)
+ for p in self:
+ logging.debug(p)
- serial = rpki.rtr.server.read_current(version)[0]
- if serial is None:
- return None
- try:
- return cls.load("%d.ax.v%d" % (serial, version))
- except IOError:
- return None
- def save_axfr(self):
+class IXFRSet(PDUSet):
"""
- Write AXFRSet to file with magic filename.
+ Object representing an incremental set of PDUs, that is, the
+ differences between one versioned and (theoretically) consistant set
+ of prefixes and router certificates extracted from rcynic's output
+ and another, with the announce fields set or cleared as necessary to
+ indicate the changes.
"""
- f = open(self.filename(), "wb")
- for p in self:
- f.write(p.to_pdu())
- f.close()
+ from_serial = None
+ to_serial = None
- def destroy_old_data(self):
- """
- Destroy old data files, presumably because our nonce changed and
- the old serial numbers are no longer valid.
- """
+ @classmethod
+ def load(cls, filename):
+ """
+ Load an IXFRSet from a file, parse filename to obtain version and serials.
+ """
- for i in glob.iglob("*.ix.*.v%d" % self.version):
- os.unlink(i)
- for i in glob.iglob("*.ax.v%d" % self.version):
- if i != self.filename():
- os.unlink(i)
+ fn1, fn2, fn3, fn4 = os.path.basename(filename).split(".")
+ assert fn1.isdigit() and fn2 == "ix" and fn3.isdigit() and fn4.startswith("v") and fn4[1:].isdigit()
+ version = int(fn4[1:])
+ self = cls._load_file(filename, version)
+ self.from_serial = rpki.rtr.channels.Timestamp(fn3)
+ self.to_serial = rpki.rtr.channels.Timestamp(fn1)
+ return self
- @staticmethod
- def new_nonce(force_zero_nonce):
- """
- Create and return a new nonce value.
- """
+ def filename(self):
+ """
+ Generate filename for this IXFRSet.
+ """
- if force_zero_nonce:
- return 0
- try:
- return int(random.SystemRandom().getrandbits(16))
- except NotImplementedError:
- return int(random.getrandbits(16))
+ return "%d.ix.%d.v%d" % (self.to_serial, self.from_serial, self.version)
- def mark_current(self, force_zero_nonce = False):
- """
- Save current serial number and nonce, creating new nonce if
- necessary. Creating a new nonce triggers cleanup of old state, as
- the new nonce invalidates all old serial numbers.
- """
+ def show(self):
+ """
+ Print this IXFRSet.
+ """
- assert self.version in rpki.rtr.pdus.PDU.version_map
- old_serial, nonce = rpki.rtr.server.read_current(self.version)
- if old_serial is None or self.seq_ge(old_serial, self.serial):
- logging.debug("Creating new nonce and deleting stale data")
- nonce = self.new_nonce(force_zero_nonce)
- self.destroy_old_data()
- rpki.rtr.server.write_current(self.serial, nonce, self.version)
+ logging.debug("# IXFR %d (%s) -> %d (%s) v%d",
+ self.from_serial, self.from_serial,
+ self.to_serial, self.to_serial,
+ self.version)
+ for p in self:
+ logging.debug(p)
- def save_ixfr(self, other):
- """
- Comparing this AXFRSet with an older one and write the resulting
- IXFRSet to file with magic filename. Since we store PDUSets
- in sorted order, computing the difference is a trivial linear
- comparison.
- """
- f = open("%d.ix.%d.v%d" % (self.serial, other.serial, self.version), "wb")
- old = other
- new = self
- len_old = len(old)
- len_new = len(new)
- i_old = i_new = 0
- while i_old < len_old and i_new < len_new:
- if old[i_old] < new[i_new]:
- f.write(old[i_old].to_pdu(announce = 0))
- i_old += 1
- elif old[i_old] > new[i_new]:
- f.write(new[i_new].to_pdu(announce = 1))
- i_new += 1
- else:
- i_old += 1
- i_new += 1
- for i in xrange(i_old, len_old):
- f.write(old[i].to_pdu(announce = 0))
- for i in xrange(i_new, len_new):
- f.write(new[i].to_pdu(announce = 1))
- f.close()
-
- def show(self):
+def kick_all(serial):
"""
- Print this AXFRSet.
+ Kick any existing server processes to wake them up.
"""
- logging.debug("# AXFR %d (%s) v%d", self.serial, self.serial, self.version)
- for p in self:
- logging.debug(p)
+ try:
+ os.stat(rpki.rtr.server.kickme_dir)
+ except OSError:
+ logging.debug('# Creating directory "%s"', rpki.rtr.server.kickme_dir)
+ os.makedirs(rpki.rtr.server.kickme_dir)
+
+ msg = "Good morning, serial %d is ready" % serial
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
+ for name in glob.iglob("%s.*" % rpki.rtr.server.kickme_base):
+ try:
+ logging.debug("# Kicking %s", name)
+ sock.sendto(msg, name)
+ except socket.error:
+ try:
+ logging.exception("# Failed to kick %s, probably dead socket, attempting cleanup", name)
+ os.unlink(name)
+ except Exception, e:
+ logging.exception("# Couldn't unlink suspected dead socket %s: %s", name, e)
+ except Exception, e:
+ logging.warning("# Failed to kick %s and don't understand why: %s", name, e)
+ sock.close()
-class IXFRSet(PDUSet):
- """
- Object representing an incremental set of PDUs, that is, the
- differences between one versioned and (theoretically) consistant set
- of prefixes and router certificates extracted from rcynic's output
- and another, with the announce fields set or cleared as necessary to
- indicate the changes.
- """
-
- @classmethod
- def load(cls, filename):
- """
- Load an IXFRSet from a file, parse filename to obtain version and serials.
+def cronjob_main(args):
"""
+ Run this right after running rcynic to wade through the ROAs and
+ router certificates that rcynic collects and translate that data
+ into the form used in the rpki-router protocol. Output is an
+ updated database containing both full dumps (AXFR) and incremental
+ dumps against a specific prior version (IXFR). After updating the
+ database, kicks any active servers, so that they can notify their
+ clients that a new version is available.
+ """
+
+ if args.rpki_rtr_dir:
+ try:
+ if not os.path.isdir(args.rpki_rtr_dir):
+ os.makedirs(args.rpki_rtr_dir)
+ os.chdir(args.rpki_rtr_dir)
+ except OSError, e:
+ logging.critical(str(e))
+ sys.exit(1)
+
+ for version in sorted(rpki.rtr.server.PDU.version_map.iterkeys(), reverse = True):
+
+ logging.debug("# Generating updates for protocol version %d", version)
+
+ old_ixfrs = glob.glob("*.ix.*.v%d" % version)
+
+ current = rpki.rtr.server.read_current(version)[0]
+ cutoff = Timestamp.now(-(24 * 60 * 60))
+ for f in glob.iglob("*.ax.v%d" % version):
+ t = Timestamp(int(f.split(".")[0]))
+ if t < cutoff and t != current:
+ logging.debug("# Deleting old file %s, timestamp %s", f, t)
+ os.unlink(f)
+
+ pdus = rpki.rtr.generator.AXFRSet.parse_rcynic(args.rcynic_dir, version, args.scan_roas, args.scan_routercerts)
+ if pdus == rpki.rtr.generator.AXFRSet.load_current(version):
+ logging.debug("# No change, new serial not needed")
+ continue
+ pdus.save_axfr()
+ for axfr in glob.iglob("*.ax.v%d" % version):
+ if axfr != pdus.filename():
+ pdus.save_ixfr(rpki.rtr.generator.AXFRSet.load(axfr))
+ pdus.mark_current(args.force_zero_nonce)
+
+ logging.debug("# New serial is %d (%s)", pdus.serial, pdus.serial)
+
+ rpki.rtr.generator.kick_all(pdus.serial)
+
+ old_ixfrs.sort()
+ for ixfr in old_ixfrs:
+ try:
+ logging.debug("# Deleting old file %s", ixfr)
+ os.unlink(ixfr)
+ except OSError:
+ pass
- fn1, fn2, fn3, fn4 = os.path.basename(filename).split(".")
- assert fn1.isdigit() and fn2 == "ix" and fn3.isdigit() and fn4.startswith("v") and fn4[1:].isdigit()
- version = int(fn4[1:])
- self = cls._load_file(filename, version)
- self.from_serial = rpki.rtr.channels.Timestamp(fn3)
- self.to_serial = rpki.rtr.channels.Timestamp(fn1)
- return self
- def filename(self):
+def show_main(args):
"""
- Generate filename for this IXFRSet.
+ Display current rpki-rtr server database in textual form.
"""
- return "%d.ix.%d.v%d" % (self.to_serial, self.from_serial, self.version)
+ if args.rpki_rtr_dir:
+ try:
+ os.chdir(args.rpki_rtr_dir)
+ except OSError, e:
+ sys.exit(e)
- def show(self):
- """
- Print this IXFRSet.
- """
+ g = glob.glob("*.ax.v*")
+ g.sort()
+ for f in g:
+ rpki.rtr.generator.AXFRSet.load(f).show()
- logging.debug("# IXFR %d (%s) -> %d (%s) v%d",
- self.from_serial, self.from_serial,
- self.to_serial, self.to_serial,
- self.version)
- for p in self:
- logging.debug(p)
+ g = glob.glob("*.ix.*.v*")
+ g.sort()
+ for f in g:
+ rpki.rtr.generator.IXFRSet.load(f).show()
+def argparse_setup(subparsers):
+ """
+ Set up argparse stuff for commands in this module.
+ """
-def kick_all(serial):
- """
- Kick any existing server processes to wake them up.
- """
-
- try:
- os.stat(rpki.rtr.server.kickme_dir)
- except OSError:
- logging.debug('# Creating directory "%s"', rpki.rtr.server.kickme_dir)
- os.makedirs(rpki.rtr.server.kickme_dir)
-
- msg = "Good morning, serial %d is ready" % serial
- sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
- for name in glob.iglob("%s.*" % rpki.rtr.server.kickme_base):
- try:
- logging.debug("# Kicking %s", name)
- sock.sendto(msg, name)
- except socket.error:
- try:
- logging.exception("# Failed to kick %s, probably dead socket, attempting cleanup", name)
- os.unlink(name)
- except Exception, e:
- logging.exception("# Couldn't unlink suspected dead socket %s: %s", name, e)
- except Exception, e:
- logging.warning("# Failed to kick %s and don't understand why: %s", name, e)
- sock.close()
-
-
-def cronjob_main(args):
- """
- Run this right after running rcynic to wade through the ROAs and
- router certificates that rcynic collects and translate that data
- into the form used in the rpki-router protocol. Output is an
- updated database containing both full dumps (AXFR) and incremental
- dumps against a specific prior version (IXFR). After updating the
- database, kicks any active servers, so that they can notify their
- clients that a new version is available.
- """
-
- if args.rpki_rtr_dir:
- try:
- if not os.path.isdir(args.rpki_rtr_dir):
- os.makedirs(args.rpki_rtr_dir)
- os.chdir(args.rpki_rtr_dir)
- except OSError, e:
- logging.critical(str(e))
- sys.exit(1)
-
- for version in sorted(rpki.rtr.server.PDU.version_map.iterkeys(), reverse = True):
-
- logging.debug("# Generating updates for protocol version %d", version)
-
- old_ixfrs = glob.glob("*.ix.*.v%d" % version)
-
- current = rpki.rtr.server.read_current(version)[0]
- cutoff = Timestamp.now(-(24 * 60 * 60))
- for f in glob.iglob("*.ax.v%d" % version):
- t = Timestamp(int(f.split(".")[0]))
- if t < cutoff and t != current:
- logging.debug("# Deleting old file %s, timestamp %s", f, t)
- os.unlink(f)
-
- pdus = rpki.rtr.generator.AXFRSet.parse_rcynic(args.rcynic_dir, version, args.scan_roas, args.scan_routercerts)
- if pdus == rpki.rtr.generator.AXFRSet.load_current(version):
- logging.debug("# No change, new serial not needed")
- continue
- pdus.save_axfr()
- for axfr in glob.iglob("*.ax.v%d" % version):
- if axfr != pdus.filename():
- pdus.save_ixfr(rpki.rtr.generator.AXFRSet.load(axfr))
- pdus.mark_current(args.force_zero_nonce)
-
- logging.debug("# New serial is %d (%s)", pdus.serial, pdus.serial)
-
- rpki.rtr.generator.kick_all(pdus.serial)
-
- old_ixfrs.sort()
- for ixfr in old_ixfrs:
- try:
- logging.debug("# Deleting old file %s", ixfr)
- os.unlink(ixfr)
- except OSError:
- pass
-
-
-def show_main(args):
- """
- Display current rpki-rtr server database in textual form.
- """
-
- if args.rpki_rtr_dir:
- try:
- os.chdir(args.rpki_rtr_dir)
- except OSError, e:
- sys.exit(e)
-
- g = glob.glob("*.ax.v*")
- g.sort()
- for f in g:
- rpki.rtr.generator.AXFRSet.load(f).show()
-
- g = glob.glob("*.ix.*.v*")
- g.sort()
- for f in g:
- rpki.rtr.generator.IXFRSet.load(f).show()
+ subparser = subparsers.add_parser("cronjob", description = cronjob_main.__doc__,
+ help = "Generate RPKI-RTR database from rcynic output")
+ subparser.set_defaults(func = cronjob_main, default_log_destination = "syslog")
+ subparser.add_argument("--scan-roas", help = "specify an external scan_roas program")
+ subparser.add_argument("--scan-routercerts", help = "specify an external scan_routercerts program")
+ subparser.add_argument("--force_zero_nonce", action = "store_true", help = "force nonce value of zero")
+ subparser.add_argument("rcynic_dir", nargs = "?", help = "directory containing validated rcynic output tree")
+ subparser.add_argument("rpki_rtr_dir", nargs = "?", help = "directory containing RPKI-RTR database")
-def argparse_setup(subparsers):
- """
- Set up argparse stuff for commands in this module.
- """
-
- subparser = subparsers.add_parser("cronjob", description = cronjob_main.__doc__,
- help = "Generate RPKI-RTR database from rcynic output")
- subparser.set_defaults(func = cronjob_main, default_log_to = "syslog")
- subparser.add_argument("--scan-roas", help = "specify an external scan_roas program")
- subparser.add_argument("--scan-routercerts", help = "specify an external scan_routercerts program")
- subparser.add_argument("--force_zero_nonce", action = "store_true", help = "force nonce value of zero")
- subparser.add_argument("rcynic_dir", help = "directory containing validated rcynic output tree")
- subparser.add_argument("rpki_rtr_dir", nargs = "?", help = "directory containing RPKI-RTR database")
-
- subparser = subparsers.add_parser("show", description = show_main.__doc__,
- help = "Display content of RPKI-RTR database")
- subparser.set_defaults(func = show_main, default_log_to = "stderr")
- subparser.add_argument("rpki_rtr_dir", nargs = "?", help = "directory containing RPKI-RTR database")
+ subparser = subparsers.add_parser("show", description = show_main.__doc__,
+ help = "Display content of RPKI-RTR database")
+ subparser.set_defaults(func = show_main, default_log_destination = "stderr")
+ subparser.add_argument("rpki_rtr_dir", nargs = "?", help = "directory containing RPKI-RTR database")
diff --git a/rpki/rtr/main.py b/rpki/rtr/main.py
index 12de30cc..b915f809 100644
--- a/rpki/rtr/main.py
+++ b/rpki/rtr/main.py
@@ -25,70 +25,35 @@ import os
import sys
import time
import logging
-import logging.handlers
-import argparse
+import rpki.config
-class Formatter(logging.Formatter):
-
- converter = time.gmtime
-
- def __init__(self, debug, fmt, datefmt):
- self.debug = debug
- super(Formatter, self).__init__(fmt, datefmt)
-
- def format(self, record):
- if getattr(record, "connection", None) is None:
- record.connection = ""
- return super(Formatter, self).format(record)
-
- def formatException(self, ei):
- if self.debug:
- return super(Formatter, self).formatException(ei)
- else:
- return str(ei[1])
def main():
- os.environ["TZ"] = "UTC"
- time.tzset()
-
- from rpki.rtr.server import argparse_setup as argparse_setup_server
- from rpki.rtr.client import argparse_setup as argparse_setup_client
- from rpki.rtr.generator import argparse_setup as argparse_setup_generator
+ os.environ["TZ"] = "UTC"
+ time.tzset()
- if "rpki.rtr.bgpdump" in sys.modules:
- from rpki.rtr.bgpdump import argparse_setup as argparse_setup_bgpdump
- else:
- def argparse_setup_bgpdump(ignored):
- pass
+ from rpki.rtr.server import argparse_setup as argparse_setup_server
+ from rpki.rtr.client import argparse_setup as argparse_setup_client
+ from rpki.rtr.generator import argparse_setup as argparse_setup_generator
- argparser = argparse.ArgumentParser(description = __doc__)
- argparser.add_argument("--debug", action = "store_true", help = "debugging mode")
- argparser.add_argument("--log-level", default = "debug",
- choices = ("debug", "info", "warning", "error", "critical"),
- type = lambda s: s.lower())
- argparser.add_argument("--log-to",
- choices = ("syslog", "stderr"))
- subparsers = argparser.add_subparsers(title = "Commands", metavar = "", dest = "mode")
- argparse_setup_server(subparsers)
- argparse_setup_client(subparsers)
- argparse_setup_generator(subparsers)
- argparse_setup_bgpdump(subparsers)
- args = argparser.parse_args()
-
- fmt = "rpki-rtr/" + args.mode + "%(connection)s[%(process)d] %(message)s"
-
- if (args.log_to or args.default_log_to) == "stderr":
- handler = logging.StreamHandler()
- fmt = "%(asctime)s " + fmt
- elif os.path.exists("/dev/log"):
- handler = logging.handlers.SysLogHandler("/dev/log")
- else:
- handler = logging.handlers.SysLogHandler()
-
- handler.setFormatter(Formatter(args.debug, fmt, "%Y-%m-%dT%H:%M:%SZ"))
- logging.root.addHandler(handler)
- logging.root.setLevel(int(getattr(logging, args.log_level.upper())))
-
- return args.func(args)
+ if "rpki.rtr.bgpdump" in sys.modules:
+ from rpki.rtr.bgpdump import argparse_setup as argparse_setup_bgpdump
+ else:
+ def argparse_setup_bgpdump(ignored):
+ pass
+
+ cfg = rpki.config.argparser(section = "rpki-rtr", doc = __doc__)
+ cfg.argparser.add_argument("--debug", action = "store_true", help = "debugging mode")
+ cfg.add_logging_arguments()
+ subparsers = cfg.argparser.add_subparsers(title = "Commands", metavar = "", dest = "mode")
+ argparse_setup_server(subparsers)
+ argparse_setup_client(subparsers)
+ argparse_setup_generator(subparsers)
+ argparse_setup_bgpdump(subparsers)
+ args = cfg.argparser.parse_args()
+
+ cfg.configure_logging(args = args, ident = "rpki-rtr/" + args.mode)
+
+ return args.func(args)
diff --git a/rpki/rtr/pdus.py b/rpki/rtr/pdus.py
index 0d2e5928..3fb7457d 100644
--- a/rpki/rtr/pdus.py
+++ b/rpki/rtr/pdus.py
@@ -28,292 +28,300 @@ import rpki.POW
# Exceptions
class PDUException(Exception):
- """
- Parent exception type for exceptions that signal particular protocol
- errors. String value of exception instance will be the message to
- put in the ErrorReportPDU, error_report_code value of exception
- will be the numeric code to use.
- """
-
- def __init__(self, msg = None, pdu = None):
- super(PDUException, self).__init__()
- assert msg is None or isinstance(msg, (str, unicode))
- self.error_report_msg = msg
- self.error_report_pdu = pdu
-
- def __str__(self):
- return self.error_report_msg or self.__class__.__name__
-
- def make_error_report(self, version):
- return ErrorReportPDU(version = version,
- errno = self.error_report_code,
- errmsg = self.error_report_msg,
- errpdu = self.error_report_pdu)
+ """
+ Parent exception type for exceptions that signal particular protocol
+ errors. String value of exception instance will be the message to
+ put in the ErrorReportPDU, error_report_code value of exception
+ will be the numeric code to use.
+ """
+
+ def __init__(self, msg = None, pdu = None):
+ super(PDUException, self).__init__()
+ assert msg is None or isinstance(msg, (str, unicode))
+ self.error_report_msg = msg
+ self.error_report_pdu = pdu
+
+ def __str__(self):
+ return self.error_report_msg or self.__class__.__name__
+
+ def make_error_report(self, version):
+ return ErrorReportPDU(version = version,
+ errno = self.error_report_code,
+ errmsg = self.error_report_msg,
+ errpdu = self.error_report_pdu)
class UnsupportedProtocolVersion(PDUException):
- error_report_code = 4
+ error_report_code = 4
class UnsupportedPDUType(PDUException):
- error_report_code = 5
+ error_report_code = 5
class CorruptData(PDUException):
- error_report_code = 0
+ error_report_code = 0
# Decorators
def wire_pdu(cls, versions = None):
- """
- Class decorator to add a PDU class to the set of known PDUs
- for all supported protocol versions.
- """
+ """
+ Class decorator to add a PDU class to the set of known PDUs
+ for all supported protocol versions.
+ """
- for v in PDU.version_map.iterkeys() if versions is None else versions:
- assert cls.pdu_type not in PDU.version_map[v]
- PDU.version_map[v][cls.pdu_type] = cls
- return cls
+ for v in PDU.version_map.iterkeys() if versions is None else versions:
+ assert cls.pdu_type not in PDU.version_map[v]
+ PDU.version_map[v][cls.pdu_type] = cls
+ return cls
def wire_pdu_only(*versions):
- """
- Class decorator to add a PDU class to the set of known PDUs
- for specific protocol versions.
- """
+ """
+ Class decorator to add a PDU class to the set of known PDUs
+ for specific protocol versions.
+ """
- assert versions and all(v in PDU.version_map for v in versions)
- return lambda cls: wire_pdu(cls, versions)
+ assert versions and all(v in PDU.version_map for v in versions)
+ return lambda cls: wire_pdu(cls, versions)
def clone_pdu_root(root_pdu_class):
- """
- Replace a PDU root class's version_map with a two-level deep copy of itself,
- and return a class decorator which subclasses can use to replace their
- parent classes with themselves in the resulting cloned version map.
+ """
+ Replace a PDU root class's version_map with a two-level deep copy of itself,
+ and return a class decorator which subclasses can use to replace their
+ parent classes with themselves in the resulting cloned version map.
- This function is not itself a decorator, it returns one.
- """
+ This function is not itself a decorator, it returns one.
+ """
- root_pdu_class.version_map = dict((k, v.copy()) for k, v in root_pdu_class.version_map.iteritems())
+ root_pdu_class.version_map = dict((k, v.copy()) for k, v in root_pdu_class.version_map.iteritems())
- def decorator(cls):
- for pdu_map in root_pdu_class.version_map.itervalues():
- for pdu_type, pdu_class in pdu_map.items():
- if pdu_class in cls.__bases__:
- pdu_map[pdu_type] = cls
- return cls
+ def decorator(cls):
+ for pdu_map in root_pdu_class.version_map.itervalues():
+ for pdu_type, pdu_class in pdu_map.items():
+ if pdu_class in cls.__bases__:
+ pdu_map[pdu_type] = cls
+ return cls
- return decorator
+ return decorator
# PDUs
class PDU(object):
- """
- Base PDU. Real PDUs are subclasses of this class.
- """
-
- version_map = {0 : {}, 1 : {}} # Updated by @wire_pdu
-
- _pdu = None # Cached when first generated
+ """
+ Base PDU. Real PDUs are subclasses of this class.
+ """
- header_struct = struct.Struct("!BB2xL")
+ version_map = {0 : {}, 1 : {}} # Updated by @wire_pdu
- def __init__(self, version):
- assert version in self.version_map
- self.version = version
+ _pdu = None # Cached when first generated
- def __cmp__(self, other):
- return cmp(self.to_pdu(), other.to_pdu())
+ header_struct = struct.Struct("!BB2xL")
- @property
- def default_version(self):
- return max(self.version_map.iterkeys())
+ pdu_type = None
- def check(self):
- pass
+ def __init__(self, version):
+ assert version in self.version_map
+ self.version = version
- @classmethod
- def read_pdu(cls, reader):
- return reader.update(need = cls.header_struct.size, callback = cls.got_header)
+ def __cmp__(self, other):
+ return cmp(self.to_pdu(), other.to_pdu())
- @classmethod
- def got_header(cls, reader):
- if not reader.ready():
- return None
- assert reader.available() >= cls.header_struct.size
- version, pdu_type, length = cls.header_struct.unpack(reader.buffer[:cls.header_struct.size])
- reader.check_version(version)
- if pdu_type not in cls.version_map[version]:
- raise UnsupportedPDUType(
- "Received unsupported PDU type %d" % pdu_type)
- if length < 8:
- raise CorruptData(
- "Received PDU with length %d, which is too short to be valid" % length)
- self = cls.version_map[version][pdu_type](version = version)
- return reader.update(need = length, callback = self.got_pdu)
+ def to_pdu(self, announce = None):
+ return NotImplementedError
+ @property
+ def default_version(self):
+ return max(self.version_map.iterkeys())
-class PDUWithSerial(PDU):
- """
- Base class for PDUs consisting of just a serial number and nonce.
- """
+ def check(self):
+ pass
- header_struct = struct.Struct("!BBHLL")
+ @classmethod
+ def read_pdu(cls, reader):
+ return reader.update(need = cls.header_struct.size, callback = cls.got_header)
- def __init__(self, version, serial = None, nonce = None):
- super(PDUWithSerial, self).__init__(version)
- if serial is not None:
- assert isinstance(serial, int)
- self.serial = serial
- if nonce is not None:
- assert isinstance(nonce, int)
- self.nonce = nonce
+ @classmethod
+ def got_header(cls, reader):
+ if not reader.ready():
+ return None
+ assert reader.available() >= cls.header_struct.size
+ version, pdu_type, length = cls.header_struct.unpack(reader.buffer[:cls.header_struct.size])
+ reader.check_version(version)
+ if pdu_type not in cls.version_map[version]:
+ raise UnsupportedPDUType(
+ "Received unsupported PDU type %d" % pdu_type)
+ if length < 8:
+ raise CorruptData(
+ "Received PDU with length %d, which is too short to be valid" % length)
+ self = cls.version_map[version][pdu_type](version = version)
+ return reader.update(need = length, callback = self.got_pdu)
- def __str__(self):
- return "[%s, serial #%d nonce %d]" % (self.__class__.__name__, self.serial, self.nonce)
- def to_pdu(self):
+class PDUWithSerial(PDU):
"""
- Generate the wire format PDU.
+ Base class for PDUs consisting of just a serial number and nonce.
"""
- if self._pdu is None:
- self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.nonce,
- self.header_struct.size, self.serial)
- return self._pdu
-
- def got_pdu(self, reader):
- if not reader.ready():
- return None
- b = reader.get(self.header_struct.size)
- version, pdu_type, self.nonce, length, self.serial = self.header_struct.unpack(b)
- assert version == self.version and pdu_type == self.pdu_type
- if length != 12:
- raise CorruptData("PDU length of %d can't be right" % length, pdu = self)
- assert b == self.to_pdu()
- return self
+ header_struct = struct.Struct("!BBHLL")
+
+ def __init__(self, version, serial = None, nonce = None):
+ super(PDUWithSerial, self).__init__(version)
+ if serial is not None:
+ assert isinstance(serial, int)
+ self.serial = serial
+ if nonce is not None:
+ assert isinstance(nonce, int)
+ self.nonce = nonce
+
+ def __str__(self):
+ return "[%s, serial #%d nonce %d]" % (self.__class__.__name__, self.serial, self.nonce)
+
+ def to_pdu(self, announce = None):
+ """
+ Generate the wire format PDU.
+ """
+
+ assert announce is None
+ if self._pdu is None:
+ self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.nonce,
+ self.header_struct.size, self.serial)
+ return self._pdu
+
+ def got_pdu(self, reader):
+ if not reader.ready():
+ return None
+ b = reader.get(self.header_struct.size)
+ version, pdu_type, self.nonce, length, self.serial = self.header_struct.unpack(b)
+ assert version == self.version and pdu_type == self.pdu_type
+ if length != 12:
+ raise CorruptData("PDU length of %d can't be right" % length, pdu = self)
+ assert b == self.to_pdu()
+ return self
class PDUWithNonce(PDU):
- """
- Base class for PDUs consisting of just a nonce.
- """
-
- header_struct = struct.Struct("!BBHL")
-
- def __init__(self, version, nonce = None):
- super(PDUWithNonce, self).__init__(version)
- if nonce is not None:
- assert isinstance(nonce, int)
- self.nonce = nonce
-
- def __str__(self):
- return "[%s, nonce %d]" % (self.__class__.__name__, self.nonce)
-
- def to_pdu(self):
"""
- Generate the wire format PDU.
+ Base class for PDUs consisting of just a nonce.
"""
- if self._pdu is None:
- self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.nonce, self.header_struct.size)
- return self._pdu
+ header_struct = struct.Struct("!BBHL")
- def got_pdu(self, reader):
- if not reader.ready():
- return None
- b = reader.get(self.header_struct.size)
- version, pdu_type, self.nonce, length = self.header_struct.unpack(b)
- assert version == self.version and pdu_type == self.pdu_type
- if length != 8:
- raise CorruptData("PDU length of %d can't be right" % length, pdu = self)
- assert b == self.to_pdu()
- return self
+ def __init__(self, version, nonce = None):
+ super(PDUWithNonce, self).__init__(version)
+ if nonce is not None:
+ assert isinstance(nonce, int)
+ self.nonce = nonce
+ def __str__(self):
+ return "[%s, nonce %d]" % (self.__class__.__name__, self.nonce)
-class PDUEmpty(PDU):
- """
- Base class for empty PDUs.
- """
+ def to_pdu(self, announce = None):
+ """
+ Generate the wire format PDU.
+ """
- header_struct = struct.Struct("!BBHL")
+ assert announce is None
+ if self._pdu is None:
+ self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.nonce, self.header_struct.size)
+ return self._pdu
- def __str__(self):
- return "[%s]" % self.__class__.__name__
+ def got_pdu(self, reader):
+ if not reader.ready():
+ return None
+ b = reader.get(self.header_struct.size)
+ version, pdu_type, self.nonce, length = self.header_struct.unpack(b)
+ assert version == self.version and pdu_type == self.pdu_type
+ if length != 8:
+ raise CorruptData("PDU length of %d can't be right" % length, pdu = self)
+ assert b == self.to_pdu()
+ return self
- def to_pdu(self):
+
+class PDUEmpty(PDU):
"""
- Generate the wire format PDU for this prefix.
+ Base class for empty PDUs.
"""
- if self._pdu is None:
- self._pdu = self.header_struct.pack(self.version, self.pdu_type, 0, self.header_struct.size)
- return self._pdu
-
- def got_pdu(self, reader):
- if not reader.ready():
- return None
- b = reader.get(self.header_struct.size)
- version, pdu_type, zero, length = self.header_struct.unpack(b)
- assert version == self.version and pdu_type == self.pdu_type
- if zero != 0:
- raise CorruptData("Must-be-zero field isn't zero" % length, pdu = self)
- if length != 8:
- raise CorruptData("PDU length of %d can't be right" % length, pdu = self)
- assert b == self.to_pdu()
- return self
+ header_struct = struct.Struct("!BBHL")
+
+ def __str__(self):
+ return "[%s]" % self.__class__.__name__
+
+ def to_pdu(self, announce = None):
+ """
+ Generate the wire format PDU for this prefix.
+ """
+
+ assert announce is None
+ if self._pdu is None:
+ self._pdu = self.header_struct.pack(self.version, self.pdu_type, 0, self.header_struct.size)
+ return self._pdu
+
+ def got_pdu(self, reader):
+ if not reader.ready():
+ return None
+ b = reader.get(self.header_struct.size)
+ version, pdu_type, zero, length = self.header_struct.unpack(b)
+ assert version == self.version and pdu_type == self.pdu_type
+ if zero != 0:
+ raise CorruptData("Must-be-zero field isn't zero" % length, pdu = self)
+ if length != 8:
+ raise CorruptData("PDU length of %d can't be right" % length, pdu = self)
+ assert b == self.to_pdu()
+ return self
@wire_pdu
class SerialNotifyPDU(PDUWithSerial):
- """
- Serial Notify PDU.
- """
+ """
+ Serial Notify PDU.
+ """
- pdu_type = 0
+ pdu_type = 0
@wire_pdu
class SerialQueryPDU(PDUWithSerial):
- """
- Serial Query PDU.
- """
+ """
+ Serial Query PDU.
+ """
- pdu_type = 1
+ pdu_type = 1
- def __init__(self, version, serial = None, nonce = None):
- super(SerialQueryPDU, self).__init__(self.default_version if version is None else version, serial, nonce)
+ def __init__(self, version, serial = None, nonce = None):
+ super(SerialQueryPDU, self).__init__(self.default_version if version is None else version, serial, nonce)
@wire_pdu
class ResetQueryPDU(PDUEmpty):
- """
- Reset Query PDU.
- """
+ """
+ Reset Query PDU.
+ """
- pdu_type = 2
+ pdu_type = 2
- def __init__(self, version):
- super(ResetQueryPDU, self).__init__(self.default_version if version is None else version)
+ def __init__(self, version):
+ super(ResetQueryPDU, self).__init__(self.default_version if version is None else version)
@wire_pdu
class CacheResponsePDU(PDUWithNonce):
- """
- Cache Response PDU.
- """
+ """
+ Cache Response PDU.
+ """
- pdu_type = 3
+ pdu_type = 3
def EndOfDataPDU(version, *args, **kwargs):
- """
- Factory for the EndOfDataPDU classes, which take different forms in
- different protocol versions.
- """
+ """
+ Factory for the EndOfDataPDU classes, which take different forms in
+ different protocol versions.
+ """
- if version == 0:
- return EndOfDataPDUv0(version, *args, **kwargs)
- if version == 1:
- return EndOfDataPDUv1(version, *args, **kwargs)
- raise NotImplementedError
+ if version == 0:
+ return EndOfDataPDUv0(version, *args, **kwargs)
+ if version == 1:
+ return EndOfDataPDUv1(version, *args, **kwargs)
+ raise NotImplementedError
# Min, max, and default values, from the current RFC 6810 bis I-D.
@@ -324,325 +332,345 @@ def EndOfDataPDU(version, *args, **kwargs):
default_refresh = 3600
def valid_refresh(refresh):
- if not isinstance(refresh, int) or refresh < 120 or refresh > 86400:
- raise ValueError
- return refresh
+ if not isinstance(refresh, int) or refresh < 120 or refresh > 86400:
+ raise ValueError
+ return refresh
default_retry = 600
def valid_retry(retry):
- if not isinstance(retry, int) or retry < 120 or retry > 7200:
- raise ValueError
- return retry
+ if not isinstance(retry, int) or retry < 120 or retry > 7200:
+ raise ValueError
+ return retry
default_expire = 7200
def valid_expire(expire):
- if not isinstance(expire, int) or expire < 600 or expire > 172800:
- raise ValueError
- return expire
+ if not isinstance(expire, int) or expire < 600 or expire > 172800:
+ raise ValueError
+ return expire
@wire_pdu_only(0)
class EndOfDataPDUv0(PDUWithSerial):
- """
- End of Data PDU, protocol version 0.
- """
+ """
+ End of Data PDU, protocol version 0.
+ """
- pdu_type = 7
+ pdu_type = 7
- def __init__(self, version, serial = None, nonce = None, refresh = None, retry = None, expire = None):
- super(EndOfDataPDUv0, self).__init__(version, serial, nonce)
- self.refresh = valid_refresh(default_refresh if refresh is None else refresh)
- self.retry = valid_retry( default_retry if retry is None else retry)
- self.expire = valid_expire( default_expire if expire is None else expire)
+ def __init__(self, version, serial = None, nonce = None, refresh = None, retry = None, expire = None):
+ super(EndOfDataPDUv0, self).__init__(version, serial, nonce)
+ self.refresh = valid_refresh(default_refresh if refresh is None else refresh)
+ self.retry = valid_retry( default_retry if retry is None else retry)
+ self.expire = valid_expire( default_expire if expire is None else expire)
@wire_pdu_only(1)
class EndOfDataPDUv1(EndOfDataPDUv0):
- """
- End of Data PDU, protocol version 1.
- """
-
- header_struct = struct.Struct("!BBHLLLLL")
-
- def __str__(self):
- return "[%s, serial #%d nonce %d refresh %d retry %d expire %d]" % (
- self.__class__.__name__, self.serial, self.nonce, self.refresh, self.retry, self.expire)
-
- def to_pdu(self):
"""
- Generate the wire format PDU.
+ End of Data PDU, protocol version 1.
"""
- if self._pdu is None:
- self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.nonce,
- self.header_struct.size, self.serial,
- self.refresh, self.retry, self.expire)
- return self._pdu
-
- def got_pdu(self, reader):
- if not reader.ready():
- return None
- b = reader.get(self.header_struct.size)
- version, pdu_type, self.nonce, length, self.serial, self.refresh, self.retry, self.expire \
- = self.header_struct.unpack(b)
- assert version == self.version and pdu_type == self.pdu_type
- if length != 24:
- raise CorruptData("PDU length of %d can't be right" % length, pdu = self)
- assert b == self.to_pdu()
- return self
+ header_struct = struct.Struct("!BBHLLLLL")
+
+ def __str__(self):
+ return "[%s, serial #%d nonce %d refresh %d retry %d expire %d]" % (
+ self.__class__.__name__, self.serial, self.nonce, self.refresh, self.retry, self.expire)
+
+ def to_pdu(self, announce = None):
+ """
+ Generate the wire format PDU.
+ """
+
+ assert announce is None
+ if self._pdu is None:
+ self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.nonce,
+ self.header_struct.size, self.serial,
+ self.refresh, self.retry, self.expire)
+ return self._pdu
+
+ def got_pdu(self, reader):
+ if not reader.ready():
+ return None
+ b = reader.get(self.header_struct.size)
+ version, pdu_type, self.nonce, length, self.serial, self.refresh, self.retry, self.expire \
+ = self.header_struct.unpack(b)
+ assert version == self.version and pdu_type == self.pdu_type
+ if length != 24:
+ raise CorruptData("PDU length of %d can't be right" % length, pdu = self)
+ assert b == self.to_pdu()
+ return self
@wire_pdu
class CacheResetPDU(PDUEmpty):
- """
- Cache reset PDU.
- """
+ """
+ Cache reset PDU.
+ """
- pdu_type = 8
+ pdu_type = 8
class PrefixPDU(PDU):
- """
- Object representing one prefix. This corresponds closely to one PDU
- in the rpki-router protocol, so closely that we use lexical ordering
- of the wire format of the PDU as the ordering for this class.
-
- This is a virtual class, but the .from_text() constructor
- instantiates the correct concrete subclass (IPv4PrefixPDU or
- IPv6PrefixPDU) depending on the syntax of its input text.
- """
-
- header_struct = struct.Struct("!BB2xLBBBx")
- asnum_struct = struct.Struct("!L")
-
- def __str__(self):
- plm = "%s/%s-%s" % (self.prefix, self.prefixlen, self.max_prefixlen)
- return "%s %8s %-32s %s" % ("+" if self.announce else "-", self.asn, plm,
- ":".join(("%02X" % ord(b) for b in self.to_pdu())))
-
- def show(self):
- logging.debug("# Class: %s", self.__class__.__name__)
- logging.debug("# ASN: %s", self.asn)
- logging.debug("# Prefix: %s", self.prefix)
- logging.debug("# Prefixlen: %s", self.prefixlen)
- logging.debug("# MaxPrefixlen: %s", self.max_prefixlen)
- logging.debug("# Announce: %s", self.announce)
-
- def check(self):
- """
- Check attributes to make sure they're within range.
- """
-
- if self.announce not in (0, 1):
- raise CorruptData("Announce value %d is neither zero nor one" % self.announce, pdu = self)
- if self.prefix.bits != self.address_byte_count * 8:
- raise CorruptData("IP address length %d does not match expectation" % self.prefix.bits, pdu = self)
- if self.prefixlen < 0 or self.prefixlen > self.prefix.bits:
- raise CorruptData("Implausible prefix length %d" % self.prefixlen, pdu = self)
- if self.max_prefixlen < self.prefixlen or self.max_prefixlen > self.prefix.bits:
- raise CorruptData("Implausible max prefix length %d" % self.max_prefixlen, pdu = self)
- pdulen = self.header_struct.size + self.prefix.bits/8 + self.asnum_struct.size
- if len(self.to_pdu()) != pdulen:
- raise CorruptData("Expected %d byte PDU, got %d" % (pdulen, len(self.to_pdu())), pdu = self)
-
- def to_pdu(self, announce = None):
- """
- Generate the wire format PDU for this prefix.
- """
-
- if announce is not None:
- assert announce in (0, 1)
- elif self._pdu is not None:
- return self._pdu
- pdulen = self.header_struct.size + self.prefix.bits/8 + self.asnum_struct.size
- pdu = (self.header_struct.pack(self.version, self.pdu_type, pdulen,
- announce if announce is not None else self.announce,
- self.prefixlen, self.max_prefixlen) +
- self.prefix.toBytes() +
- self.asnum_struct.pack(self.asn))
- if announce is None:
- assert self._pdu is None
- self._pdu = pdu
- return pdu
-
- def got_pdu(self, reader):
- if not reader.ready():
- return None
- b1 = reader.get(self.header_struct.size)
- b2 = reader.get(self.address_byte_count)
- b3 = reader.get(self.asnum_struct.size)
- version, pdu_type, length, self.announce, self.prefixlen, self.max_prefixlen = self.header_struct.unpack(b1)
- assert version == self.version and pdu_type == self.pdu_type
- if length != len(b1) + len(b2) + len(b3):
- raise CorruptData("Got PDU length %d, expected %d" % (length, len(b1) + len(b2) + len(b3)), pdu = self)
- self.prefix = rpki.POW.IPAddress.fromBytes(b2)
- self.asn = self.asnum_struct.unpack(b3)[0]
- assert b1 + b2 + b3 == self.to_pdu()
- return self
+ """
+ Object representing one prefix. This corresponds closely to one PDU
+ in the rpki-router protocol, so closely that we use lexical ordering
+ of the wire format of the PDU as the ordering for this class.
+
+ This is a virtual class, but the .from_text() constructor
+ instantiates the correct concrete subclass (IPv4PrefixPDU or
+ IPv6PrefixPDU) depending on the syntax of its input text.
+ """
+
+ header_struct = struct.Struct("!BB2xLBBBx")
+ asnum_struct = struct.Struct("!L")
+ address_byte_count = 0
+
+ def __init__(self, version):
+ super(PrefixPDU, self).__init__(version)
+ self.asn = None
+ self.prefix = None
+ self.prefixlen = None
+ self.max_prefixlen = None
+ self.announce = None
+
+ def __str__(self):
+ plm = "%s/%s-%s" % (self.prefix, self.prefixlen, self.max_prefixlen)
+ return "%s %8s %-32s %s" % ("+" if self.announce else "-", self.asn, plm,
+ ":".join(("%02X" % ord(b) for b in self.to_pdu())))
+
+ def show(self):
+ logging.debug("# Class: %s", self.__class__.__name__)
+ logging.debug("# ASN: %s", self.asn)
+ logging.debug("# Prefix: %s", self.prefix)
+ logging.debug("# Prefixlen: %s", self.prefixlen)
+ logging.debug("# MaxPrefixlen: %s", self.max_prefixlen)
+ logging.debug("# Announce: %s", self.announce)
+
+ def check(self):
+ """
+ Check attributes to make sure they're within range.
+ """
+
+ if self.announce not in (0, 1):
+ raise CorruptData("Announce value %d is neither zero nor one" % self.announce, pdu = self)
+ if self.prefix.bits != self.address_byte_count * 8:
+ raise CorruptData("IP address length %d does not match expectation" % self.prefix.bits, pdu = self)
+ if self.prefixlen < 0 or self.prefixlen > self.prefix.bits:
+ raise CorruptData("Implausible prefix length %d" % self.prefixlen, pdu = self)
+ if self.max_prefixlen < self.prefixlen or self.max_prefixlen > self.prefix.bits:
+ raise CorruptData("Implausible max prefix length %d" % self.max_prefixlen, pdu = self)
+ pdulen = self.header_struct.size + self.prefix.bits/8 + self.asnum_struct.size
+ if len(self.to_pdu()) != pdulen:
+ raise CorruptData("Expected %d byte PDU, got %d" % (pdulen, len(self.to_pdu())), pdu = self)
+
+ def to_pdu(self, announce = None):
+ """
+ Generate the wire format PDU for this prefix.
+ """
+
+ if announce is not None:
+ assert announce in (0, 1)
+ elif self._pdu is not None:
+ return self._pdu
+ pdulen = self.header_struct.size + self.prefix.bits/8 + self.asnum_struct.size
+ pdu = (self.header_struct.pack(self.version, self.pdu_type, pdulen,
+ announce if announce is not None else self.announce,
+ self.prefixlen, self.max_prefixlen) +
+ self.prefix.toBytes() +
+ self.asnum_struct.pack(self.asn))
+ if announce is None:
+ assert self._pdu is None
+ self._pdu = pdu
+ return pdu
+
+ def got_pdu(self, reader):
+ if not reader.ready():
+ return None
+ b1 = reader.get(self.header_struct.size)
+ b2 = reader.get(self.address_byte_count)
+ b3 = reader.get(self.asnum_struct.size)
+ version, pdu_type, length, self.announce, self.prefixlen, self.max_prefixlen = self.header_struct.unpack(b1)
+ assert version == self.version and pdu_type == self.pdu_type
+ if length != len(b1) + len(b2) + len(b3):
+ raise CorruptData("Got PDU length %d, expected %d" % (length, len(b1) + len(b2) + len(b3)), pdu = self)
+ self.prefix = rpki.POW.IPAddress.fromBytes(b2)
+ self.asn = self.asnum_struct.unpack(b3)[0]
+ assert b1 + b2 + b3 == self.to_pdu()
+ return self
@wire_pdu
class IPv4PrefixPDU(PrefixPDU):
- """
- IPv4 flavor of a prefix.
- """
+ """
+ IPv4 flavor of a prefix.
+ """
- pdu_type = 4
- address_byte_count = 4
+ pdu_type = 4
+ address_byte_count = 4
@wire_pdu
class IPv6PrefixPDU(PrefixPDU):
- """
- IPv6 flavor of a prefix.
- """
+ """
+ IPv6 flavor of a prefix.
+ """
- pdu_type = 6
- address_byte_count = 16
+ pdu_type = 6
+ address_byte_count = 16
@wire_pdu_only(1)
class RouterKeyPDU(PDU):
- """
- Router Key PDU.
- """
-
- pdu_type = 9
-
- header_struct = struct.Struct("!BBBxL20sL")
-
- def __str__(self):
- return "%s %8s %-32s %s" % ("+" if self.announce else "-", self.asn,
- base64.urlsafe_b64encode(self.ski).rstrip("="),
- ":".join(("%02X" % ord(b) for b in self.to_pdu())))
-
- def check(self):
- """
- Check attributes to make sure they're within range.
- """
-
- if self.announce not in (0, 1):
- raise CorruptData("Announce value %d is neither zero nor one" % self.announce, pdu = self)
- if len(self.ski) != 20:
- raise CorruptData("Implausible SKI length %d" % len(self.ski), pdu = self)
- pdulen = self.header_struct.size + len(self.key)
- if len(self.to_pdu()) != pdulen:
- raise CorruptData("Expected %d byte PDU, got %d" % (pdulen, len(self.to_pdu())), pdu = self)
-
- def to_pdu(self, announce = None):
- if announce is not None:
- assert announce in (0, 1)
- elif self._pdu is not None:
- return self._pdu
- pdulen = self.header_struct.size + len(self.key)
- pdu = (self.header_struct.pack(self.version,
- self.pdu_type,
- announce if announce is not None else self.announce,
- pdulen,
- self.ski,
- self.asn)
- + self.key)
- if announce is None:
- assert self._pdu is None
- self._pdu = pdu
- return pdu
-
- def got_pdu(self, reader):
- if not reader.ready():
- return None
- header = reader.get(self.header_struct.size)
- version, pdu_type, self.announce, length, self.ski, self.asn = self.header_struct.unpack(header)
- assert version == self.version and pdu_type == self.pdu_type
- remaining = length - self.header_struct.size
- if remaining <= 0:
- raise CorruptData("Got PDU length %d, minimum is %d" % (length, self.header_struct.size + 1), pdu = self)
- self.key = reader.get(remaining)
- assert header + self.key == self.to_pdu()
- return self
+ """
+ Router Key PDU.
+ """
+
+ pdu_type = 9
+
+ header_struct = struct.Struct("!BBBxL20sL")
+
+ def __init__(self, version):
+ super(RouterKeyPDU, self).__init__(version)
+ self.announce = None
+ self.ski = None
+ self.asn = None
+ self.key = None
+
+ def __str__(self):
+ return "%s %8s %-32s %s" % ("+" if self.announce else "-", self.asn,
+ base64.urlsafe_b64encode(self.ski).rstrip("="),
+ ":".join(("%02X" % ord(b) for b in self.to_pdu())))
+
+ def check(self):
+ """
+ Check attributes to make sure they're within range.
+ """
+
+ if self.announce not in (0, 1):
+ raise CorruptData("Announce value %d is neither zero nor one" % self.announce, pdu = self)
+ if len(self.ski) != 20:
+ raise CorruptData("Implausible SKI length %d" % len(self.ski), pdu = self)
+ pdulen = self.header_struct.size + len(self.key)
+ if len(self.to_pdu()) != pdulen:
+ raise CorruptData("Expected %d byte PDU, got %d" % (pdulen, len(self.to_pdu())), pdu = self)
+
+ def to_pdu(self, announce = None):
+ if announce is not None:
+ assert announce in (0, 1)
+ elif self._pdu is not None:
+ return self._pdu
+ pdulen = self.header_struct.size + len(self.key)
+ pdu = (self.header_struct.pack(self.version,
+ self.pdu_type,
+ announce if announce is not None else self.announce,
+ pdulen,
+ self.ski,
+ self.asn)
+ + self.key)
+ if announce is None:
+ assert self._pdu is None
+ self._pdu = pdu
+ return pdu
+
+ def got_pdu(self, reader):
+ if not reader.ready():
+ return None
+ header = reader.get(self.header_struct.size)
+ version, pdu_type, self.announce, length, self.ski, self.asn = self.header_struct.unpack(header)
+ assert version == self.version and pdu_type == self.pdu_type
+ remaining = length - self.header_struct.size
+ if remaining <= 0:
+ raise CorruptData("Got PDU length %d, minimum is %d" % (length, self.header_struct.size + 1), pdu = self)
+ self.key = reader.get(remaining)
+ assert header + self.key == self.to_pdu()
+ return self
@wire_pdu
class ErrorReportPDU(PDU):
- """
- Error Report PDU.
- """
-
- pdu_type = 10
-
- header_struct = struct.Struct("!BBHL")
- string_struct = struct.Struct("!L")
-
- errors = {
- 2 : "No Data Available" }
-
- fatal = {
- 0 : "Corrupt Data",
- 1 : "Internal Error",
- 3 : "Invalid Request",
- 4 : "Unsupported Protocol Version",
- 5 : "Unsupported PDU Type",
- 6 : "Withdrawal of Unknown Record",
- 7 : "Duplicate Announcement Received" }
-
- assert set(errors) & set(fatal) == set()
-
- errors.update(fatal)
-
- codes = dict((v, k) for k, v in errors.items())
-
- def __init__(self, version, errno = None, errpdu = None, errmsg = None):
- super(ErrorReportPDU, self).__init__(version)
- assert errno is None or errno in self.errors
- self.errno = errno
- self.errpdu = errpdu
- self.errmsg = errmsg if errmsg is not None or errno is None else self.errors[errno]
-
- def __str__(self):
- return "[%s, error #%s: %r]" % (self.__class__.__name__, self.errno, self.errmsg)
-
- def to_counted_string(self, s):
- return self.string_struct.pack(len(s)) + s
-
- def read_counted_string(self, reader, remaining):
- assert remaining >= self.string_struct.size
- n = self.string_struct.unpack(reader.get(self.string_struct.size))[0]
- assert remaining >= self.string_struct.size + n
- return n, reader.get(n), (remaining - self.string_struct.size - n)
-
- def to_pdu(self):
- """
- Generate the wire format PDU for this error report.
- """
-
- if self._pdu is None:
- assert isinstance(self.errno, int)
- assert not isinstance(self.errpdu, ErrorReportPDU)
- p = self.errpdu
- if p is None:
- p = ""
- elif isinstance(p, PDU):
- p = p.to_pdu()
- assert isinstance(p, str)
- pdulen = self.header_struct.size + self.string_struct.size * 2 + len(p) + len(self.errmsg)
- self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.errno, pdulen)
- self._pdu += self.to_counted_string(p)
- self._pdu += self.to_counted_string(self.errmsg.encode("utf8"))
- return self._pdu
-
- def got_pdu(self, reader):
- if not reader.ready():
- return None
- header = reader.get(self.header_struct.size)
- version, pdu_type, self.errno, length = self.header_struct.unpack(header)
- assert version == self.version and pdu_type == self.pdu_type
- remaining = length - self.header_struct.size
- self.pdulen, self.errpdu, remaining = self.read_counted_string(reader, remaining)
- self.errlen, self.errmsg, remaining = self.read_counted_string(reader, remaining)
- if length != self.header_struct.size + self.string_struct.size * 2 + self.pdulen + self.errlen:
- raise CorruptData("Got PDU length %d, expected %d" % (
- length, self.header_struct.size + self.string_struct.size * 2 + self.pdulen + self.errlen))
- assert (header
- + self.to_counted_string(self.errpdu)
- + self.to_counted_string(self.errmsg.encode("utf8"))
- == self.to_pdu())
- return self
+ """
+ Error Report PDU.
+ """
+
+ pdu_type = 10
+
+ header_struct = struct.Struct("!BBHL")
+ string_struct = struct.Struct("!L")
+
+ errors = {
+ 2 : "No Data Available" }
+
+ fatal = {
+ 0 : "Corrupt Data",
+ 1 : "Internal Error",
+ 3 : "Invalid Request",
+ 4 : "Unsupported Protocol Version",
+ 5 : "Unsupported PDU Type",
+ 6 : "Withdrawal of Unknown Record",
+ 7 : "Duplicate Announcement Received" }
+
+ assert set(errors) & set(fatal) == set()
+
+ errors.update(fatal)
+
+ codes = dict((v, k) for k, v in errors.items())
+
+ def __init__(self, version, errno = None, errpdu = None, errmsg = None):
+ super(ErrorReportPDU, self).__init__(version)
+ assert errno is None or errno in self.errors
+ self.errno = errno
+ self.errpdu = errpdu
+ self.errmsg = errmsg if errmsg is not None or errno is None else self.errors[errno]
+ self.pdulen = None
+ self.errlen = None
+
+ def __str__(self):
+ return "[%s, error #%s: %r]" % (self.__class__.__name__, self.errno, self.errmsg)
+
+ def to_counted_string(self, s):
+ return self.string_struct.pack(len(s)) + s
+
+ def read_counted_string(self, reader, remaining):
+ assert remaining >= self.string_struct.size
+ n = self.string_struct.unpack(reader.get(self.string_struct.size))[0]
+ assert remaining >= self.string_struct.size + n
+ return n, reader.get(n), (remaining - self.string_struct.size - n)
+
+ def to_pdu(self, announce = None):
+ """
+ Generate the wire format PDU for this error report.
+ """
+
+ assert announce is None
+ if self._pdu is None:
+ assert isinstance(self.errno, int)
+ assert not isinstance(self.errpdu, ErrorReportPDU)
+ p = self.errpdu
+ if p is None:
+ p = ""
+ elif isinstance(p, PDU):
+ p = p.to_pdu()
+ assert isinstance(p, str)
+ pdulen = self.header_struct.size + self.string_struct.size * 2 + len(p) + len(self.errmsg)
+ self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.errno, pdulen)
+ self._pdu += self.to_counted_string(p)
+ self._pdu += self.to_counted_string(self.errmsg.encode("utf8"))
+ return self._pdu
+
+ def got_pdu(self, reader):
+ if not reader.ready():
+ return None
+ header = reader.get(self.header_struct.size)
+ version, pdu_type, self.errno, length = self.header_struct.unpack(header)
+ assert version == self.version and pdu_type == self.pdu_type
+ remaining = length - self.header_struct.size
+ self.pdulen, self.errpdu, remaining = self.read_counted_string(reader, remaining)
+ self.errlen, self.errmsg, remaining = self.read_counted_string(reader, remaining)
+ if length != self.header_struct.size + self.string_struct.size * 2 + self.pdulen + self.errlen:
+ raise CorruptData("Got PDU length %d, expected %d" % (
+ length, self.header_struct.size + self.string_struct.size * 2 + self.pdulen + self.errlen))
+ assert (header
+ + self.to_counted_string(self.errpdu)
+ + self.to_counted_string(self.errmsg.encode("utf8"))
+ == self.to_pdu())
+ return self
diff --git a/rpki/rtr/server.py b/rpki/rtr/server.py
index 2ea3a040..c08320fc 100644
--- a/rpki/rtr/server.py
+++ b/rpki/rtr/server.py
@@ -44,37 +44,37 @@ kickme_base = os.path.join(kickme_dir, "kickme")
class PDU(rpki.rtr.pdus.PDU):
- """
- Generic server PDU.
- """
-
- def send_file(self, server, filename):
"""
- Send a content of a file as a cache response. Caller should catch IOError.
+ Generic server PDU.
"""
- fn2 = os.path.splitext(filename)[1]
- assert fn2.startswith(".v") and fn2[2:].isdigit() and int(fn2[2:]) == server.version
-
- f = open(filename, "rb")
- server.push_pdu(CacheResponsePDU(version = server.version,
- nonce = server.current_nonce))
- server.push_file(f)
- server.push_pdu(EndOfDataPDU(version = server.version,
- serial = server.current_serial,
- nonce = server.current_nonce,
- refresh = server.refresh,
- retry = server.retry,
- expire = server.expire))
-
- def send_nodata(self, server):
- """
- Send a nodata error.
- """
+ def send_file(self, server, filename):
+ """
+ Send a content of a file as a cache response. Caller should catch IOError.
+ """
+
+ fn2 = os.path.splitext(filename)[1]
+ assert fn2.startswith(".v") and fn2[2:].isdigit() and int(fn2[2:]) == server.version
- server.push_pdu(ErrorReportPDU(version = server.version,
- errno = ErrorReportPDU.codes["No Data Available"],
- errpdu = self))
+ f = open(filename, "rb")
+ server.push_pdu(CacheResponsePDU(version = server.version,
+ nonce = server.current_nonce))
+ server.push_file(f)
+ server.push_pdu(EndOfDataPDU(version = server.version,
+ serial = server.current_serial,
+ nonce = server.current_nonce,
+ refresh = server.refresh,
+ retry = server.retry,
+ expire = server.expire))
+
+ def send_nodata(self, server):
+ """
+ Send a nodata error.
+ """
+
+ server.push_pdu(ErrorReportPDU(version = server.version,
+ errno = ErrorReportPDU.codes["No Data Available"],
+ errpdu = self))
clone_pdu = clone_pdu_root(PDU)
@@ -82,513 +82,513 @@ clone_pdu = clone_pdu_root(PDU)
@clone_pdu
class SerialQueryPDU(PDU, rpki.rtr.pdus.SerialQueryPDU):
- """
- Serial Query PDU.
- """
-
- def serve(self, server):
- """
- Received a serial query, send incremental transfer in response.
- If client is already up to date, just send an empty incremental
- transfer.
"""
-
- server.logger.debug(self)
- if server.get_serial() is None:
- self.send_nodata(server)
- elif server.current_nonce != self.nonce:
- server.logger.info("[Client requested wrong nonce, resetting client]")
- server.push_pdu(CacheResetPDU(version = server.version))
- elif server.current_serial == self.serial:
- server.logger.debug("[Client is already current, sending empty IXFR]")
- server.push_pdu(CacheResponsePDU(version = server.version,
- nonce = server.current_nonce))
- server.push_pdu(EndOfDataPDU(version = server.version,
- serial = server.current_serial,
- nonce = server.current_nonce,
- refresh = server.refresh,
- retry = server.retry,
- expire = server.expire))
- elif disable_incrementals:
- server.push_pdu(CacheResetPDU(version = server.version))
- else:
- try:
- self.send_file(server, "%d.ix.%d.v%d" % (server.current_serial, self.serial, server.version))
- except IOError:
- server.push_pdu(CacheResetPDU(version = server.version))
+ Serial Query PDU.
+ """
+
+ def serve(self, server):
+ """
+ Received a serial query, send incremental transfer in response.
+ If client is already up to date, just send an empty incremental
+ transfer.
+ """
+
+ server.logger.debug(self)
+ if server.get_serial() is None:
+ self.send_nodata(server)
+ elif server.current_nonce != self.nonce:
+ server.logger.info("[Client requested wrong nonce, resetting client]")
+ server.push_pdu(CacheResetPDU(version = server.version))
+ elif server.current_serial == self.serial:
+ server.logger.debug("[Client is already current, sending empty IXFR]")
+ server.push_pdu(CacheResponsePDU(version = server.version,
+ nonce = server.current_nonce))
+ server.push_pdu(EndOfDataPDU(version = server.version,
+ serial = server.current_serial,
+ nonce = server.current_nonce,
+ refresh = server.refresh,
+ retry = server.retry,
+ expire = server.expire))
+ elif disable_incrementals:
+ server.push_pdu(CacheResetPDU(version = server.version))
+ else:
+ try:
+ self.send_file(server, "%d.ix.%d.v%d" % (server.current_serial, self.serial, server.version))
+ except IOError:
+ server.push_pdu(CacheResetPDU(version = server.version))
@clone_pdu
class ResetQueryPDU(PDU, rpki.rtr.pdus.ResetQueryPDU):
- """
- Reset Query PDU.
- """
-
- def serve(self, server):
"""
- Received a reset query, send full current state in response.
+ Reset Query PDU.
"""
- server.logger.debug(self)
- if server.get_serial() is None:
- self.send_nodata(server)
- else:
- try:
- fn = "%d.ax.v%d" % (server.current_serial, server.version)
- self.send_file(server, fn)
- except IOError:
- server.push_pdu(ErrorReportPDU(version = server.version,
- errno = ErrorReportPDU.codes["Internal Error"],
- errpdu = self,
- errmsg = "Couldn't open %s" % fn))
+ def serve(self, server):
+ """
+ Received a reset query, send full current state in response.
+ """
+
+ server.logger.debug(self)
+ if server.get_serial() is None:
+ self.send_nodata(server)
+ else:
+ try:
+ fn = "%d.ax.v%d" % (server.current_serial, server.version)
+ self.send_file(server, fn)
+ except IOError:
+ server.push_pdu(ErrorReportPDU(version = server.version,
+ errno = ErrorReportPDU.codes["Internal Error"],
+ errpdu = self,
+ errmsg = "Couldn't open %s" % fn))
@clone_pdu
class ErrorReportPDU(rpki.rtr.pdus.ErrorReportPDU):
- """
- Error Report PDU.
- """
-
- def serve(self, server):
"""
- Received an ErrorReportPDU from client. Not much we can do beyond
- logging it, then killing the connection if error was fatal.
+ Error Report PDU.
"""
- server.logger.error(self)
- if self.errno in self.fatal:
- server.logger.error("[Shutting down due to reported fatal protocol error]")
- sys.exit(1)
+ def serve(self, server):
+ """
+ Received an ErrorReportPDU from client. Not much we can do beyond
+ logging it, then killing the connection if error was fatal.
+ """
+
+ server.logger.error(self)
+ if self.errno in self.fatal:
+ server.logger.error("[Shutting down due to reported fatal protocol error]")
+ sys.exit(1)
def read_current(version):
- """
- Read current serial number and nonce. Return None for both if
- serial and nonce not recorded. For backwards compatibility, treat
- file containing just a serial number as having a nonce of zero.
- """
-
- if version is None:
- return None, None
- try:
- with open("current.v%d" % version, "r") as f:
- values = tuple(int(s) for s in f.read().split())
- return values[0], values[1]
- except IndexError:
- return values[0], 0
- except IOError:
- return None, None
+ """
+ Read current serial number and nonce. Return None for both if
+ serial and nonce not recorded. For backwards compatibility, treat
+ file containing just a serial number as having a nonce of zero.
+ """
+
+ if version is None:
+ return None, None
+ try:
+ with open("current.v%d" % version, "r") as f:
+ values = tuple(int(s) for s in f.read().split())
+ return values[0], values[1]
+ except IndexError:
+ return values[0], 0
+ except IOError:
+ return None, None
def write_current(serial, nonce, version):
- """
- Write serial number and nonce.
- """
+ """
+ Write serial number and nonce.
+ """
- curfn = "current.v%d" % version
- tmpfn = curfn + "%d.tmp" % os.getpid()
- with open(tmpfn, "w") as f:
- f.write("%d %d\n" % (serial, nonce))
- os.rename(tmpfn, curfn)
+ curfn = "current.v%d" % version
+ tmpfn = curfn + "%d.tmp" % os.getpid()
+ with open(tmpfn, "w") as f:
+ f.write("%d %d\n" % (serial, nonce))
+ os.rename(tmpfn, curfn)
class FileProducer(object):
- """
- File-based producer object for asynchat.
- """
+ """
+ File-based producer object for asynchat.
+ """
- def __init__(self, handle, buffersize):
- self.handle = handle
- self.buffersize = buffersize
+ def __init__(self, handle, buffersize):
+ self.handle = handle
+ self.buffersize = buffersize
- def more(self):
- return self.handle.read(self.buffersize)
+ def more(self):
+ return self.handle.read(self.buffersize)
class ServerWriteChannel(rpki.rtr.channels.PDUChannel):
- """
- Kludge to deal with ssh's habit of sometimes (compile time option)
- invoking us with two unidirectional pipes instead of one
- bidirectional socketpair. All the server logic is in the
- ServerChannel class, this class just deals with sending the
- server's output to a different file descriptor.
- """
-
- def __init__(self):
"""
- Set up stdout.
+ Kludge to deal with ssh's habit of sometimes (compile time option)
+ invoking us with two unidirectional pipes instead of one
+ bidirectional socketpair. All the server logic is in the
+ ServerChannel class, this class just deals with sending the
+ server's output to a different file descriptor.
"""
- super(ServerWriteChannel, self).__init__(root_pdu_class = PDU)
- self.init_file_dispatcher(sys.stdout.fileno())
+ def __init__(self):
+ """
+ Set up stdout.
+ """
- def readable(self):
- """
- This channel is never readable.
- """
+ super(ServerWriteChannel, self).__init__(root_pdu_class = PDU)
+ self.init_file_dispatcher(sys.stdout.fileno())
- return False
+ def readable(self):
+ """
+ This channel is never readable.
+ """
- def push_file(self, f):
- """
- Write content of a file to stream.
- """
+ return False
- try:
- self.push_with_producer(FileProducer(f, self.ac_out_buffer_size))
- except OSError, e:
- if e.errno != errno.EAGAIN:
- raise
+ def push_file(self, f):
+ """
+ Write content of a file to stream.
+ """
+ try:
+ self.push_with_producer(FileProducer(f, self.ac_out_buffer_size))
+ except OSError, e:
+ if e.errno != errno.EAGAIN:
+ raise
-class ServerChannel(rpki.rtr.channels.PDUChannel):
- """
- Server protocol engine, handles upcalls from PDUChannel to
- implement protocol logic.
- """
- def __init__(self, logger, refresh, retry, expire):
+class ServerChannel(rpki.rtr.channels.PDUChannel):
"""
- Set up stdin and stdout as connection and start listening for
- first PDU.
+ Server protocol engine, handles upcalls from PDUChannel to
+ implement protocol logic.
"""
- super(ServerChannel, self).__init__(root_pdu_class = PDU)
- self.init_file_dispatcher(sys.stdin.fileno())
- self.writer = ServerWriteChannel()
- self.logger = logger
- self.refresh = refresh
- self.retry = retry
- self.expire = expire
- self.get_serial()
- self.start_new_pdu()
-
- def writable(self):
- """
- This channel is never writable.
- """
+ def __init__(self, logger, refresh, retry, expire):
+ """
+ Set up stdin and stdout as connection and start listening for
+ first PDU.
+ """
- return False
+ super(ServerChannel, self).__init__(root_pdu_class = PDU)
+ self.init_file_dispatcher(sys.stdin.fileno())
+ self.writer = ServerWriteChannel()
+ self.logger = logger
+ self.refresh = refresh
+ self.retry = retry
+ self.expire = expire
+ self.get_serial()
+ self.start_new_pdu()
- def push(self, data):
- """
- Redirect to writer channel.
- """
+ def writable(self):
+ """
+ This channel is never writable.
+ """
- return self.writer.push(data)
+ return False
- def push_with_producer(self, producer):
- """
- Redirect to writer channel.
- """
+ def push(self, data):
+ """
+ Redirect to writer channel.
+ """
- return self.writer.push_with_producer(producer)
+ return self.writer.push(data)
- def push_pdu(self, pdu):
- """
- Redirect to writer channel.
- """
+ def push_with_producer(self, producer):
+ """
+ Redirect to writer channel.
+ """
- return self.writer.push_pdu(pdu)
+ return self.writer.push_with_producer(producer)
- def push_file(self, f):
- """
- Redirect to writer channel.
- """
+ def push_pdu(self, pdu):
+ """
+ Redirect to writer channel.
+ """
- return self.writer.push_file(f)
+ return self.writer.push_pdu(pdu)
- def deliver_pdu(self, pdu):
- """
- Handle received PDU.
- """
+ def push_file(self, f):
+ """
+ Redirect to writer channel.
+ """
- pdu.serve(self)
+ return self.writer.push_file(f)
- def get_serial(self):
- """
- Read, cache, and return current serial number, or None if we can't
- find the serial number file. The latter condition should never
- happen, but maybe we got started in server mode while the cronjob
- mode instance is still building its database.
- """
+ def deliver_pdu(self, pdu):
+ """
+ Handle received PDU.
+ """
- self.current_serial, self.current_nonce = read_current(self.version)
- return self.current_serial
+ pdu.serve(self)
- def check_serial(self):
- """
- Check for a new serial number.
- """
+ def get_serial(self):
+ """
+ Read, cache, and return current serial number, or None if we can't
+ find the serial number file. The latter condition should never
+ happen, but maybe we got started in server mode while the cronjob
+ mode instance is still building its database.
+ """
- old_serial = self.current_serial
- return old_serial != self.get_serial()
+ self.current_serial, self.current_nonce = read_current(self.version)
+ return self.current_serial
- def notify(self, data = None, force = False):
- """
- Cronjob instance kicked us: check whether our serial number has
- changed, and send a notify message if so.
+ def check_serial(self):
+ """
+ Check for a new serial number.
+ """
- We have to check rather than just blindly notifying when kicked
- because the cronjob instance has no good way of knowing which
- protocol version we're running, thus has no good way of knowing
- whether we care about a particular change set or not.
- """
+ old_serial = self.current_serial
+ return old_serial != self.get_serial()
- if force or self.check_serial():
- self.push_pdu(SerialNotifyPDU(version = self.version,
- serial = self.current_serial,
- nonce = self.current_nonce))
- else:
- self.logger.debug("Cronjob kicked me but I see no serial change, ignoring")
+ def notify(self, data = None, force = False):
+ """
+ Cronjob instance kicked us: check whether our serial number has
+ changed, and send a notify message if so.
+
+ We have to check rather than just blindly notifying when kicked
+ because the cronjob instance has no good way of knowing which
+ protocol version we're running, thus has no good way of knowing
+ whether we care about a particular change set or not.
+ """
+
+ if force or self.check_serial():
+ self.push_pdu(SerialNotifyPDU(version = self.version,
+ serial = self.current_serial,
+ nonce = self.current_nonce))
+ else:
+ self.logger.debug("Cronjob kicked me but I see no serial change, ignoring")
class KickmeChannel(asyncore.dispatcher, object):
- """
- asyncore dispatcher for the PF_UNIX socket that cronjob mode uses to
- kick servers when it's time to send notify PDUs to clients.
- """
-
- def __init__(self, server):
- asyncore.dispatcher.__init__(self) # Old-style class
- self.server = server
- self.sockname = "%s.%d" % (kickme_base, os.getpid())
- self.create_socket(socket.AF_UNIX, socket.SOCK_DGRAM)
- try:
- self.bind(self.sockname)
- os.chmod(self.sockname, 0660)
- except socket.error, e:
- self.server.logger.exception("Couldn't bind() kickme socket: %r", e)
- self.close()
- except OSError, e:
- self.server.logger.exception("Couldn't chmod() kickme socket: %r", e)
-
- def writable(self):
"""
- This socket is read-only, never writable.
+ asyncore dispatcher for the PF_UNIX socket that cronjob mode uses to
+ kick servers when it's time to send notify PDUs to clients.
"""
- return False
+ def __init__(self, server):
+ asyncore.dispatcher.__init__(self) # Old-style class
+ self.server = server
+ self.sockname = "%s.%d" % (kickme_base, os.getpid())
+ self.create_socket(socket.AF_UNIX, socket.SOCK_DGRAM)
+ try:
+ self.bind(self.sockname)
+ os.chmod(self.sockname, 0660)
+ except socket.error, e:
+ self.server.logger.exception("Couldn't bind() kickme socket: %r", e)
+ self.close()
+ except OSError, e:
+ self.server.logger.exception("Couldn't chmod() kickme socket: %r", e)
+
+ def writable(self):
+ """
+ This socket is read-only, never writable.
+ """
+
+ return False
+
+ def handle_connect(self):
+ """
+ Ignore connect events (not very useful on datagram socket).
+ """
+
+ pass
+
+ def handle_read(self):
+ """
+ Handle receipt of a datagram.
+ """
+
+ data = self.recv(512)
+ self.server.notify(data)
+
+ def cleanup(self):
+ """
+ Clean up this dispatcher's socket.
+ """
+
+ self.close()
+ try:
+ os.unlink(self.sockname)
+ except:
+ pass
- def handle_connect(self):
- """
- Ignore connect events (not very useful on datagram socket).
- """
+ def log(self, msg):
+ """
+ Intercept asyncore's logging.
+ """
- pass
+ self.server.logger.info(msg)
- def handle_read(self):
- """
- Handle receipt of a datagram.
- """
+ def log_info(self, msg, tag = "info"):
+ """
+ Intercept asyncore's logging.
+ """
- data = self.recv(512)
- self.server.notify(data)
+ self.server.logger.info("asyncore: %s: %s", tag, msg)
- def cleanup(self):
- """
- Clean up this dispatcher's socket.
- """
+ def handle_error(self):
+ """
+ Handle errors caught by asyncore main loop.
+ """
+
+ self.server.logger.exception("[Unhandled exception]")
+ self.server.logger.critical("[Exiting after unhandled exception]")
+ sys.exit(1)
- self.close()
- try:
- os.unlink(self.sockname)
- except: # pylint: disable=W0702
- pass
- def log(self, msg):
+def hostport_tag():
"""
- Intercept asyncore's logging.
+ Construct hostname/address + port when we're running under a
+ protocol we understand well enough to do that. This is all
+ kludgery. Just grit your teeth, or perhaps just close your eyes.
"""
- self.server.logger.info(msg)
+ proto = None
- def log_info(self, msg, tag = "info"):
- """
- Intercept asyncore's logging.
- """
+ if proto is None:
+ try:
+ host, port = socket.fromfd(0, socket.AF_INET, socket.SOCK_STREAM).getpeername()
+ proto = "tcp"
+ except:
+ pass
+
+ if proto is None:
+ try:
+ host, port = socket.fromfd(0, socket.AF_INET6, socket.SOCK_STREAM).getpeername()[0:2]
+ proto = "tcp"
+ except:
+ pass
+
+ if proto is None:
+ try:
+ host, port = os.environ["SSH_CONNECTION"].split()[0:2]
+ proto = "ssh"
+ except:
+ pass
+
+ if proto is None:
+ try:
+ host, port = os.environ["REMOTE_HOST"], os.getenv("REMOTE_PORT")
+ proto = "ssl"
+ except:
+ pass
+
+ if proto is None:
+ return ""
+ elif not port:
+ return "/%s/%s" % (proto, host)
+ elif ":" in host:
+ return "/%s/%s.%s" % (proto, host, port)
+ else:
+ return "/%s/%s:%s" % (proto, host, port)
- self.server.logger.info("asyncore: %s: %s", tag, msg)
- def handle_error(self):
+def server_main(args):
"""
- Handle errors caught by asyncore main loop.
+ Implement the server side of the rpkk-router protocol. Other than
+ one PF_UNIX socket inode, this doesn't write anything to disk, so it
+ can be run with minimal privileges. Most of the work has already
+ been done by the database generator, so all this server has to do is
+ pass the results along to a client.
"""
- self.server.logger.exception("[Unhandled exception]")
- self.server.logger.critical("[Exiting after unhandled exception]")
- sys.exit(1)
-
+ logger = logging.LoggerAdapter(logging.root, dict(connection = hostport_tag()))
-def _hostport_tag():
- """
- Construct hostname/address + port when we're running under a
- protocol we understand well enough to do that. This is all
- kludgery. Just grit your teeth, or perhaps just close your eyes.
- """
+ logger.debug("[Starting]")
- proto = None
+ if args.rpki_rtr_dir:
+ try:
+ os.chdir(args.rpki_rtr_dir)
+ except OSError, e:
+ logger.error("[Couldn't chdir(%r), exiting: %s]", args.rpki_rtr_dir, e)
+ sys.exit(1)
- if proto is None:
+ kickme = None
try:
- host, port = socket.fromfd(0, socket.AF_INET, socket.SOCK_STREAM).getpeername()
- proto = "tcp"
- except: # pylint: disable=W0702
- pass
+ server = rpki.rtr.server.ServerChannel(logger = logger, refresh = args.refresh, retry = args.retry, expire = args.expire)
+ kickme = rpki.rtr.server.KickmeChannel(server = server)
+ asyncore.loop(timeout = None)
+ signal.signal(signal.SIGINT, signal.SIG_IGN) # Theorized race condition
+ except KeyboardInterrupt:
+ sys.exit(0)
+ finally:
+ signal.signal(signal.SIGINT, signal.SIG_IGN) # Observed race condition
+ if kickme is not None:
+ kickme.cleanup()
- if proto is None:
- try:
- host, port = socket.fromfd(0, socket.AF_INET6, socket.SOCK_STREAM).getpeername()[0:2]
- proto = "tcp"
- except: # pylint: disable=W0702
- pass
- if proto is None:
- try:
- host, port = os.environ["SSH_CONNECTION"].split()[0:2]
- proto = "ssh"
- except: # pylint: disable=W0702
- pass
+def listener_main(args):
+ """
+ Totally insecure TCP listener for rpki-rtr protocol. We only
+ implement this because it's all that the routers currently support.
+ In theory, we will all be running TCP-AO in the future, at which
+ point this listener will go away or become a TCP-AO listener.
+ """
- if proto is None:
- try:
- host, port = os.environ["REMOTE_HOST"], os.getenv("REMOTE_PORT")
- proto = "ssl"
- except: # pylint: disable=W0702
- pass
+ # Perhaps we should daemonize? Deal with that later.
- if proto is None:
- return ""
- elif not port:
- return "/%s/%s" % (proto, host)
- elif ":" in host:
- return "/%s/%s.%s" % (proto, host, port)
- else:
- return "/%s/%s:%s" % (proto, host, port)
+ # server_main() handles args.rpki_rtr_dir.
+ listener = None
+ try:
+ listener = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+ listener.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
+ except:
+ if listener is not None:
+ listener.close()
+ listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ try:
+ listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+ except AttributeError:
+ pass
+ listener.bind(("", args.port))
+ listener.listen(5)
+ logging.debug("[Listening on port %s]", args.port)
+ while True:
+ try:
+ s, ai = listener.accept()
+ except KeyboardInterrupt:
+ sys.exit(0)
+ logging.debug("[Received connection from %r]", ai)
+ pid = os.fork()
+ if pid == 0:
+ os.dup2(s.fileno(), 0) # pylint: disable=E1101
+ os.dup2(s.fileno(), 1) # pylint: disable=E1101
+ s.close()
+ #os.closerange(3, os.sysconf("SC_OPEN_MAX"))
+ server_main(args)
+ sys.exit()
+ else:
+ logging.debug("[Spawned server %d]", pid)
+ while True:
+ try:
+ pid, status = os.waitpid(0, os.WNOHANG)
+ if pid:
+ logging.debug("[Server %s exited with status 0x%x]", pid, status)
+ continue
+ except:
+ pass
+ break
-def server_main(args):
- """
- Implement the server side of the rpkk-router protocol. Other than
- one PF_UNIX socket inode, this doesn't write anything to disk, so it
- can be run with minimal privileges. Most of the work has already
- been done by the database generator, so all this server has to do is
- pass the results along to a client.
- """
- logger = logging.LoggerAdapter(logging.root, dict(connection = _hostport_tag()))
+def argparse_setup(subparsers):
+ """
+ Set up argparse stuff for commands in this module.
+ """
- logger.debug("[Starting]")
+ # These could have been lambdas, but doing it this way results in
+ # more useful error messages on argparse failures.
- if args.rpki_rtr_dir:
- try:
- os.chdir(args.rpki_rtr_dir)
- except OSError, e:
- logger.error("[Couldn't chdir(%r), exiting: %s]", args.rpki_rtr_dir, e)
- sys.exit(1)
-
- kickme = None
- try:
- server = rpki.rtr.server.ServerChannel(logger = logger, refresh = args.refresh, retry = args.retry, expire = args.expire)
- kickme = rpki.rtr.server.KickmeChannel(server = server)
- asyncore.loop(timeout = None)
- signal.signal(signal.SIGINT, signal.SIG_IGN) # Theorized race condition
- except KeyboardInterrupt:
- sys.exit(0)
- finally:
- signal.signal(signal.SIGINT, signal.SIG_IGN) # Observed race condition
- if kickme is not None:
- kickme.cleanup()
+ def refresh(v):
+ return rpki.rtr.pdus.valid_refresh(int(v))
+ def retry(v):
+ return rpki.rtr.pdus.valid_retry(int(v))
-def listener_main(args):
- """
- Totally insecure TCP listener for rpki-rtr protocol. We only
- implement this because it's all that the routers currently support.
- In theory, we will all be running TCP-AO in the future, at which
- point this listener will go away or become a TCP-AO listener.
- """
-
- # Perhaps we should daemonize? Deal with that later.
-
- # server_main() handles args.rpki_rtr_dir.
-
- listener = None
- try:
- listener = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
- listener.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
- except: # pylint: disable=W0702
- if listener is not None:
- listener.close()
- listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- try:
- listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
- except AttributeError:
- pass
- listener.bind(("", args.port))
- listener.listen(5)
- logging.debug("[Listening on port %s]", args.port)
- while True:
- try:
- s, ai = listener.accept()
- except KeyboardInterrupt:
- sys.exit(0)
- logging.debug("[Received connection from %r]", ai)
- pid = os.fork()
- if pid == 0:
- os.dup2(s.fileno(), 0) # pylint: disable=E1103
- os.dup2(s.fileno(), 1) # pylint: disable=E1103
- s.close()
- #os.closerange(3, os.sysconf("SC_OPEN_MAX"))
- server_main(args)
- sys.exit()
- else:
- logging.debug("[Spawned server %d]", pid)
- while True:
- try:
- pid, status = os.waitpid(0, os.WNOHANG) # pylint: disable=W0612
- if pid:
- logging.debug("[Server %s exited]", pid)
- continue
- except: # pylint: disable=W0702
- pass
- break
+ def expire(v):
+ return rpki.rtr.pdus.valid_expire(int(v))
+ # Some duplication of arguments here, not enough to be worth huge
+ # effort to clean up, worry about it later in any case.
-def argparse_setup(subparsers):
- """
- Set up argparse stuff for commands in this module.
- """
-
- # These could have been lambdas, but doing it this way results in
- # more useful error messages on argparse failures.
-
- def refresh(v):
- return rpki.rtr.pdus.valid_refresh(int(v))
-
- def retry(v):
- return rpki.rtr.pdus.valid_retry(int(v))
-
- def expire(v):
- return rpki.rtr.pdus.valid_expire(int(v))
-
- # Some duplication of arguments here, not enough to be worth huge
- # effort to clean up, worry about it later in any case.
-
- subparser = subparsers.add_parser("server", description = server_main.__doc__,
- help = "RPKI-RTR protocol server")
- subparser.set_defaults(func = server_main, default_log_to = "syslog")
- subparser.add_argument("--refresh", type = refresh, help = "override default refresh timer")
- subparser.add_argument("--retry", type = retry, help = "override default retry timer")
- subparser.add_argument("--expire", type = expire, help = "override default expire timer")
- subparser.add_argument("rpki_rtr_dir", nargs = "?", help = "directory containing RPKI-RTR database")
-
- subparser = subparsers.add_parser("listener", description = listener_main.__doc__,
- help = "TCP listener for RPKI-RTR protocol server")
- subparser.set_defaults(func = listener_main, default_log_to = "syslog")
- subparser.add_argument("--refresh", type = refresh, help = "override default refresh timer")
- subparser.add_argument("--retry", type = retry, help = "override default retry timer")
- subparser.add_argument("--expire", type = expire, help = "override default expire timer")
- subparser.add_argument("port", type = int, help = "TCP port on which to listen")
- subparser.add_argument("rpki_rtr_dir", nargs = "?", help = "directory containing RPKI-RTR database")
+ subparser = subparsers.add_parser("server", description = server_main.__doc__,
+ help = "RPKI-RTR protocol server")
+ subparser.set_defaults(func = server_main, default_log_destination = "syslog")
+ subparser.add_argument("--refresh", type = refresh, help = "override default refresh timer")
+ subparser.add_argument("--retry", type = retry, help = "override default retry timer")
+ subparser.add_argument("--expire", type = expire, help = "override default expire timer")
+ subparser.add_argument("rpki_rtr_dir", nargs = "?", help = "directory containing RPKI-RTR database")
+
+ subparser = subparsers.add_parser("listener", description = listener_main.__doc__,
+ help = "TCP listener for RPKI-RTR protocol server")
+ subparser.set_defaults(func = listener_main, default_log_destination = "syslog")
+ subparser.add_argument("--refresh", type = refresh, help = "override default refresh timer")
+ subparser.add_argument("--retry", type = retry, help = "override default retry timer")
+ subparser.add_argument("--expire", type = expire, help = "override default expire timer")
+ subparser.add_argument("port", type = int, help = "TCP port on which to listen")
+ subparser.add_argument("rpki_rtr_dir", nargs = "?", help = "directory containing RPKI-RTR database")
diff --git a/rpki/sql.py b/rpki/sql.py
deleted file mode 100644
index 96c8d086..00000000
--- a/rpki/sql.py
+++ /dev/null
@@ -1,437 +0,0 @@
-# $Id$
-#
-# Copyright (C) 2009-2013 Internet Systems Consortium ("ISC")
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-#
-# Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-"""
-SQL interface code.
-"""
-
-import logging
-import weakref
-
-from rpki.mysql_import import (MySQLdb, _mysql_exceptions)
-
-import rpki.x509
-import rpki.resource_set
-import rpki.sundial
-import rpki.log
-
-logger = logging.getLogger(__name__)
-
-class session(object):
- """
- SQL session layer.
- """
-
- ## @var ping_threshold
- # Timeout after which we should issue a ping command before the real
- # one. Intent is to keep the MySQL connection alive without pinging
- # before every single command.
-
- ping_threshold = rpki.sundial.timedelta(seconds = 60)
-
- def __init__(self, cfg):
-
- self.username = cfg.get("sql-username")
- self.database = cfg.get("sql-database")
- self.password = cfg.get("sql-password")
-
- self.conv = MySQLdb.converters.conversions.copy()
- self.conv.update({
- rpki.sundial.datetime : MySQLdb.converters.DateTime2literal,
- MySQLdb.converters.FIELD_TYPE.DATETIME : rpki.sundial.datetime.DateTime_or_None })
-
- self.cache = weakref.WeakValueDictionary()
- self.dirty = set()
-
- self.connect()
-
- def connect(self):
- self.db = MySQLdb.connect(user = self.username,
- db = self.database,
- passwd = self.password,
- conv = self.conv)
- self.cur = self.db.cursor()
- self.db.autocommit(True)
- self.timestamp = rpki.sundial.now()
-
- # Try this as a workaround for MySQL 5.6 UTF8 characterset
- # braindamage, in which MySQL starts rejecting ASN.1 DER because
- # it's not valid UTF-8. Twits.
- #
- # Except that it breaks MySQL 5.5, so wrap it and ignore errors. Twits ** 2.
- try:
- self.execute("charset = latin1")
- except:
- logger.info("Whacking charset to Latin1 to save MySQL 5.6 from its own confusion failed, blundering onwards")
-
- def close(self):
- if self.cur:
- self.cur.close()
- self.cur = None
- if self.db:
- self.db.close()
- self.db = None
-
- def _wrap_execute(self, func, query, args):
- try:
- now = rpki.sundial.now()
- if now > self.timestamp + self.ping_threshold:
- self.db.ping(True)
- self.timestamp = now
- return func(query, args)
- except _mysql_exceptions.MySQLError:
- if self.dirty:
- logger.warning("MySQL exception with dirty objects in SQL cache!")
- raise
-
- def execute(self, query, args = None):
- return self._wrap_execute(self.cur.execute, query, args)
-
- def executemany(self, query, args):
- return self._wrap_execute(self.cur.executemany, query, args)
-
- def fetchall(self):
- return self.cur.fetchall()
-
- def lastrowid(self):
- return self.cur.lastrowid
-
- def cache_clear(self):
- """
- Clear the SQL object cache. Shouldn't be necessary now that the
- cache uses weak references, but should be harmless.
- """
- logger.debug("Clearing SQL cache")
- self.assert_pristine()
- self.cache.clear()
-
- def assert_pristine(self):
- """
- Assert that there are no dirty objects in the cache.
- """
- assert not self.dirty, "Dirty objects in SQL cache: %s" % self.dirty
-
- def sweep(self):
- """
- Write any dirty objects out to SQL.
- """
- for s in self.dirty.copy():
- #if s.sql_cache_debug:
- logger.debug("Sweeping (%s) %r", "deleting" if s.sql_deleted else "storing", s)
- if s.sql_deleted:
- s.sql_delete()
- else:
- s.sql_store()
- self.assert_pristine()
-
-class template(object):
- """
- SQL template generator.
- """
-
- def __init__(self, table_name, index_column, *data_columns):
- """
- Build a SQL template.
- """
- type_map = dict((x[0], x[1]) for x in data_columns if isinstance(x, tuple))
- data_columns = tuple(isinstance(x, tuple) and x[0] or x for x in data_columns)
- columns = (index_column,) + data_columns
- self.table = table_name
- self.index = index_column
- self.columns = columns
- self.map = type_map
- self.select = "SELECT %s FROM %s" % (", ".join("%s.%s" % (table_name, c) for c in columns), table_name)
- self.insert = "INSERT %s (%s) VALUES (%s)" % (table_name,
- ", ".join(data_columns),
- ", ".join("%(" + s + ")s" for s in data_columns))
- self.update = "UPDATE %s SET %s WHERE %s = %%(%s)s" % (table_name,
- ", ".join(s + " = %(" + s + ")s" for s in data_columns),
- index_column,
- index_column)
- self.delete = "DELETE FROM %s WHERE %s = %%s" % (table_name, index_column)
-
-class sql_persistent(object):
- """
- Mixin for persistent class that needs to be stored in SQL.
- """
-
- ## @var sql_in_db
- # Whether this object is already in SQL or not.
-
- sql_in_db = False
-
- ## @var sql_deleted
- # Whether our cached copy of this object has been deleted.
-
- sql_deleted = False
-
- ## @var sql_debug
- # Enable logging of SQL actions
-
- sql_debug = False
-
- ## @var sql_cache_debug
- # Enable debugging of SQL cache actions
-
- sql_cache_debug = False
-
- @classmethod
- def sql_fetch(cls, gctx, id): # pylint: disable=W0622
- """
- Fetch one object from SQL, based on its primary key.
-
- Since in this one case we know that the primary index is also the
- cache key, we check for a cache hit directly in the hope of
- bypassing the SQL lookup entirely.
-
- This method is usually called via a one-line class-specific
- wrapper. As a convenience, we also accept an id of None, and just
- return None in this case.
- """
-
- if id is None:
- return None
- assert isinstance(id, (int, long)), "id should be an integer, was %r" % type(id)
- key = (cls, id)
- if key in gctx.sql.cache:
- return gctx.sql.cache[key]
- else:
- return cls.sql_fetch_where1(gctx, "%s = %%s" % cls.sql_template.index, (id,))
-
- @classmethod
- def sql_fetch_where1(cls, gctx, where, args = None, also_from = None):
- """
- Fetch one object from SQL, based on an arbitrary SQL WHERE expression.
- """
- results = cls.sql_fetch_where(gctx, where, args, also_from)
- if len(results) == 0:
- return None
- elif len(results) == 1:
- return results[0]
- else:
- raise rpki.exceptions.DBConsistancyError(
- "Database contained multiple matches for %s where %s: %r" %
- (cls.__name__, where % tuple(repr(a) for a in args), results))
-
- @classmethod
- def sql_fetch_all(cls, gctx):
- """
- Fetch all objects of this type from SQL.
- """
- return cls.sql_fetch_where(gctx, None)
-
- @classmethod
- def sql_fetch_where(cls, gctx, where, args = None, also_from = None):
- """
- Fetch objects of this type matching an arbitrary SQL WHERE expression.
- """
- if where is None:
- assert args is None and also_from is None
- if cls.sql_debug:
- logger.debug("sql_fetch_where(%r)", cls.sql_template.select)
- gctx.sql.execute(cls.sql_template.select)
- else:
- query = cls.sql_template.select
- if also_from is not None:
- query += "," + also_from
- query += " WHERE " + where
- if cls.sql_debug:
- logger.debug("sql_fetch_where(%r, %r)", query, args)
- gctx.sql.execute(query, args)
- results = []
- for row in gctx.sql.fetchall():
- key = (cls, row[0])
- if key in gctx.sql.cache:
- results.append(gctx.sql.cache[key])
- else:
- results.append(cls.sql_init(gctx, row, key))
- return results
-
- @classmethod
- def sql_init(cls, gctx, row, key):
- """
- Initialize one Python object from the result of a SQL query.
- """
- self = cls()
- self.gctx = gctx
- self.sql_decode(dict(zip(cls.sql_template.columns, row)))
- gctx.sql.cache[key] = self
- self.sql_in_db = True
- self.sql_fetch_hook()
- return self
-
- def sql_mark_dirty(self):
- """
- Mark this object as needing to be written back to SQL.
- """
- if self.sql_cache_debug and not self.sql_is_dirty:
- logger.debug("Marking %r SQL dirty", self)
- self.gctx.sql.dirty.add(self)
-
- def sql_mark_clean(self):
- """
- Mark this object as not needing to be written back to SQL.
- """
- if self.sql_cache_debug and self.sql_is_dirty:
- logger.debug("Marking %r SQL clean", self)
- self.gctx.sql.dirty.discard(self)
-
- @property
- def sql_is_dirty(self):
- """
- Query whether this object needs to be written back to SQL.
- """
- return self in self.gctx.sql.dirty
-
- def sql_mark_deleted(self):
- """
- Mark this object as needing to be deleted in SQL.
- """
- self.sql_deleted = True
- self.sql_mark_dirty()
-
- def sql_store(self):
- """
- Store this object to SQL.
- """
- args = self.sql_encode()
- if not self.sql_in_db:
- if self.sql_debug:
- logger.debug("sql_store(%r, %r)", self.sql_template.insert, args)
- self.gctx.sql.execute(self.sql_template.insert, args)
- setattr(self, self.sql_template.index, self.gctx.sql.lastrowid())
- self.gctx.sql.cache[(self.__class__, self.gctx.sql.lastrowid())] = self
- self.sql_insert_hook()
- else:
- if self.sql_debug:
- logger.debug("sql_store(%r, %r)", self.sql_template.update, args)
- self.gctx.sql.execute(self.sql_template.update, args)
- self.sql_update_hook()
- key = (self.__class__, getattr(self, self.sql_template.index))
- assert key in self.gctx.sql.cache and self.gctx.sql.cache[key] == self
- self.sql_mark_clean()
- self.sql_in_db = True
-
- def sql_delete(self):
- """
- Delete this object from SQL.
- """
- if self.sql_in_db:
- id = getattr(self, self.sql_template.index) # pylint: disable=W0622
- if self.sql_debug:
- logger.debug("sql_delete(%r, %r)", self.sql_template.delete, id)
- self.sql_delete_hook()
- self.gctx.sql.execute(self.sql_template.delete, (id,))
- key = (self.__class__, id)
- if self.gctx.sql.cache.get(key) == self:
- del self.gctx.sql.cache[key]
- self.sql_in_db = False
- self.sql_mark_clean()
-
- def sql_encode(self):
- """
- Convert object attributes into a dict for use with canned SQL
- queries. This is a default version that assumes a one-to-one
- mapping between column names in SQL and attribute names in Python.
- If you need something fancier, override this.
- """
- d = dict((a, getattr(self, a, None)) for a in self.sql_template.columns)
- for i in self.sql_template.map:
- if d.get(i) is not None:
- d[i] = self.sql_template.map[i].to_sql(d[i])
- return d
-
- def sql_decode(self, vals):
- """
- Initialize an object with values returned by self.sql_fetch().
- This is a default version that assumes a one-to-one mapping
- between column names in SQL and attribute names in Python. If you
- need something fancier, override this.
- """
- for a in self.sql_template.columns:
- if vals.get(a) is not None and a in self.sql_template.map:
- setattr(self, a, self.sql_template.map[a].from_sql(vals[a]))
- else:
- setattr(self, a, vals[a])
-
- def sql_fetch_hook(self):
- """
- Customization hook.
- """
- pass
-
- def sql_insert_hook(self):
- """
- Customization hook.
- """
- pass
-
- def sql_update_hook(self):
- """
- Customization hook.
- """
- self.sql_delete_hook()
- self.sql_insert_hook()
-
- def sql_delete_hook(self):
- """
- Customization hook.
- """
- pass
-
-
-def cache_reference(func):
- """
- Decorator for use with property methods which just do an SQL lookup based on an ID.
- Check for an existing reference to the object, just return that if we find it,
- otherwise perform the SQL lookup.
-
- Not 100% certain this is a good idea, but I //think// it should work well with the
- current weak reference SQL cache, so long as we create no circular references.
- So don't do that.
- """
-
- attr_name = "_" + func.__name__
-
- def wrapped(self):
- try:
- value = getattr(self, attr_name)
- assert value is not None
- except AttributeError:
- value = func(self)
- if value is not None:
- setattr(self, attr_name, value)
- return value
-
- wrapped.__name__ = func.__name__
- wrapped.__doc__ = func.__doc__
- wrapped.__dict__.update(func.__dict__)
-
- return wrapped
diff --git a/rpki/sql_schemas.py b/rpki/sql_schemas.py
deleted file mode 100644
index 07037970..00000000
--- a/rpki/sql_schemas.py
+++ /dev/null
@@ -1,319 +0,0 @@
-# Automatically generated, do not edit.
-
-## @var rpkid
-## SQL schema rpkid
-rpkid = '''-- $Id: rpkid.sql 5845 2014-05-29 22:31:15Z sra $
-
--- Copyright (C) 2009--2011 Internet Systems Consortium ("ISC")
---
--- Permission to use, copy, modify, and distribute this software for any
--- purpose with or without fee is hereby granted, provided that the above
--- copyright notice and this permission notice appear in all copies.
---
--- THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
--- REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
--- AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
--- INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
--- LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
--- OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
--- PERFORMANCE OF THIS SOFTWARE.
-
--- Copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
---
--- Permission to use, copy, modify, and distribute this software for any
--- purpose with or without fee is hereby granted, provided that the above
--- copyright notice and this permission notice appear in all copies.
---
--- THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
--- REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
--- AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
--- INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
--- LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
--- OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
--- PERFORMANCE OF THIS SOFTWARE.
-
--- SQL objects needed by the RPKI engine (rpkid.py).
-
--- DROP TABLE commands must be in correct (reverse dependency) order
--- to satisfy FOREIGN KEY constraints.
-
-DROP TABLE IF EXISTS ee_cert;
-DROP TABLE IF EXISTS ghostbuster;
-DROP TABLE IF EXISTS roa_prefix;
-DROP TABLE IF EXISTS roa;
-DROP TABLE IF EXISTS revoked_cert;
-DROP TABLE IF EXISTS child_cert;
-DROP TABLE IF EXISTS child;
-DROP TABLE IF EXISTS ca_detail;
-DROP TABLE IF EXISTS ca;
-DROP TABLE IF EXISTS parent;
-DROP TABLE IF EXISTS repository;
-DROP TABLE IF EXISTS bsc;
-DROP TABLE IF EXISTS self;
-
-CREATE TABLE self (
- self_id SERIAL NOT NULL,
- self_handle VARCHAR(255) NOT NULL,
- use_hsm BOOLEAN NOT NULL DEFAULT FALSE,
- crl_interval BIGINT UNSIGNED,
- regen_margin BIGINT UNSIGNED,
- bpki_cert LONGBLOB,
- bpki_glue LONGBLOB,
- PRIMARY KEY (self_id),
- UNIQUE (self_handle)
-) ENGINE=InnoDB;
-
-CREATE TABLE bsc (
- bsc_id SERIAL NOT NULL,
- bsc_handle VARCHAR(255) NOT NULL,
- private_key_id LONGBLOB,
- pkcs10_request LONGBLOB,
- hash_alg ENUM ('sha256'),
- signing_cert LONGBLOB,
- signing_cert_crl LONGBLOB,
- self_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (bsc_id),
- CONSTRAINT bsc_self_id
- FOREIGN KEY (self_id) REFERENCES self (self_id) ON DELETE CASCADE,
- UNIQUE (self_id, bsc_handle)
-) ENGINE=InnoDB;
-
-CREATE TABLE repository (
- repository_id SERIAL NOT NULL,
- repository_handle VARCHAR(255) NOT NULL,
- peer_contact_uri TEXT,
- bpki_cert LONGBLOB,
- bpki_glue LONGBLOB,
- last_cms_timestamp DATETIME,
- bsc_id BIGINT UNSIGNED NOT NULL,
- self_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (repository_id),
- CONSTRAINT repository_self_id
- FOREIGN KEY (self_id) REFERENCES self (self_id) ON DELETE CASCADE,
- CONSTRAINT repository_bsc_id
- FOREIGN KEY (bsc_id) REFERENCES bsc (bsc_id) ON DELETE CASCADE,
- UNIQUE (self_id, repository_handle)
-) ENGINE=InnoDB;
-
-CREATE TABLE parent (
- parent_id SERIAL NOT NULL,
- parent_handle VARCHAR(255) NOT NULL,
- bpki_cms_cert LONGBLOB,
- bpki_cms_glue LONGBLOB,
- peer_contact_uri TEXT,
- sia_base TEXT,
- sender_name TEXT,
- recipient_name TEXT,
- last_cms_timestamp DATETIME,
- self_id BIGINT UNSIGNED NOT NULL,
- bsc_id BIGINT UNSIGNED NOT NULL,
- repository_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (parent_id),
- CONSTRAINT parent_repository_id
- FOREIGN KEY (repository_id) REFERENCES repository (repository_id) ON DELETE CASCADE,
- CONSTRAINT parent_bsc_id
- FOREIGN KEY (bsc_id) REFERENCES bsc (bsc_id) ON DELETE CASCADE,
- CONSTRAINT parent_self_id
- FOREIGN KEY (self_id) REFERENCES self (self_id) ON DELETE CASCADE,
- UNIQUE (self_id, parent_handle)
-) ENGINE=InnoDB;
-
-CREATE TABLE ca (
- ca_id SERIAL NOT NULL,
- last_crl_sn BIGINT UNSIGNED NOT NULL,
- last_manifest_sn BIGINT UNSIGNED NOT NULL,
- next_manifest_update DATETIME,
- next_crl_update DATETIME,
- last_issued_sn BIGINT UNSIGNED NOT NULL,
- sia_uri TEXT,
- parent_resource_class TEXT,
- parent_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (ca_id),
- CONSTRAINT ca_parent_id
- FOREIGN KEY (parent_id) REFERENCES parent (parent_id) ON DELETE CASCADE
-) ENGINE=InnoDB;
-
-CREATE TABLE ca_detail (
- ca_detail_id SERIAL NOT NULL,
- public_key LONGBLOB,
- private_key_id LONGBLOB,
- latest_crl LONGBLOB,
- crl_published DATETIME,
- latest_ca_cert LONGBLOB,
- manifest_private_key_id LONGBLOB,
- manifest_public_key LONGBLOB,
- latest_manifest_cert LONGBLOB,
- latest_manifest LONGBLOB,
- manifest_published DATETIME,
- state ENUM ('pending', 'active', 'deprecated', 'revoked') NOT NULL,
- ca_cert_uri TEXT,
- ca_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (ca_detail_id),
- CONSTRAINT ca_detail_ca_id
- FOREIGN KEY (ca_id) REFERENCES ca (ca_id) ON DELETE CASCADE
-) ENGINE=InnoDB;
-
-CREATE TABLE child (
- child_id SERIAL NOT NULL,
- child_handle VARCHAR(255) NOT NULL,
- bpki_cert LONGBLOB,
- bpki_glue LONGBLOB,
- last_cms_timestamp DATETIME,
- self_id BIGINT UNSIGNED NOT NULL,
- bsc_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (child_id),
- CONSTRAINT child_bsc_id
- FOREIGN KEY (bsc_id) REFERENCES bsc (bsc_id) ON DELETE CASCADE,
- CONSTRAINT child_self_id
- FOREIGN KEY (self_id) REFERENCES self (self_id) ON DELETE CASCADE,
- UNIQUE (self_id, child_handle)
-) ENGINE=InnoDB;
-
-CREATE TABLE child_cert (
- child_cert_id SERIAL NOT NULL,
- cert LONGBLOB NOT NULL,
- published DATETIME,
- ski TINYBLOB NOT NULL,
- child_id BIGINT UNSIGNED NOT NULL,
- ca_detail_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (child_cert_id),
- CONSTRAINT child_cert_ca_detail_id
- FOREIGN KEY (ca_detail_id) REFERENCES ca_detail (ca_detail_id) ON DELETE CASCADE,
- CONSTRAINT child_cert_child_id
- FOREIGN KEY (child_id) REFERENCES child (child_id) ON DELETE CASCADE
-) ENGINE=InnoDB;
-
-CREATE TABLE revoked_cert (
- revoked_cert_id SERIAL NOT NULL,
- serial BIGINT UNSIGNED NOT NULL,
- revoked DATETIME NOT NULL,
- expires DATETIME NOT NULL,
- ca_detail_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (revoked_cert_id),
- CONSTRAINT revoked_cert_ca_detail_id
- FOREIGN KEY (ca_detail_id) REFERENCES ca_detail (ca_detail_id) ON DELETE CASCADE
-) ENGINE=InnoDB;
-
-CREATE TABLE roa (
- roa_id SERIAL NOT NULL,
- asn BIGINT UNSIGNED NOT NULL,
- cert LONGBLOB NOT NULL,
- roa LONGBLOB NOT NULL,
- published DATETIME,
- self_id BIGINT UNSIGNED NOT NULL,
- ca_detail_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (roa_id),
- CONSTRAINT roa_self_id
- FOREIGN KEY (self_id) REFERENCES self (self_id) ON DELETE CASCADE,
- CONSTRAINT roa_ca_detail_id
- FOREIGN KEY (ca_detail_id) REFERENCES ca_detail (ca_detail_id) ON DELETE CASCADE
-) ENGINE=InnoDB;
-
-CREATE TABLE roa_prefix (
- prefix VARCHAR(40) NOT NULL,
- prefixlen TINYINT UNSIGNED NOT NULL,
- max_prefixlen TINYINT UNSIGNED NOT NULL,
- version TINYINT UNSIGNED NOT NULL,
- roa_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (roa_id, prefix, prefixlen, max_prefixlen),
- CONSTRAINT roa_prefix_roa_id
- FOREIGN KEY (roa_id) REFERENCES roa (roa_id) ON DELETE CASCADE
-) ENGINE=InnoDB;
-
-CREATE TABLE ghostbuster (
- ghostbuster_id SERIAL NOT NULL,
- vcard LONGBLOB NOT NULL,
- cert LONGBLOB NOT NULL,
- ghostbuster LONGBLOB NOT NULL,
- published DATETIME,
- self_id BIGINT UNSIGNED NOT NULL,
- ca_detail_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (ghostbuster_id),
- CONSTRAINT ghostbuster_self_id
- FOREIGN KEY (self_id) REFERENCES self (self_id) ON DELETE CASCADE,
- CONSTRAINT ghostbuster_ca_detail_id
- FOREIGN KEY (ca_detail_id) REFERENCES ca_detail (ca_detail_id) ON DELETE CASCADE
-) ENGINE=InnoDB;
-
-CREATE TABLE ee_cert (
- ee_cert_id SERIAL NOT NULL,
- ski BINARY(20) NOT NULL,
- cert LONGBLOB NOT NULL,
- published DATETIME,
- self_id BIGINT UNSIGNED NOT NULL,
- ca_detail_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (ee_cert_id),
- CONSTRAINT ee_cert_self_id
- FOREIGN KEY (self_id) REFERENCES self (self_id) ON DELETE CASCADE,
- CONSTRAINT ee_cert_ca_detail_id
- FOREIGN KEY (ca_detail_id) REFERENCES ca_detail (ca_detail_id) ON DELETE CASCADE
-) ENGINE=InnoDB;
-
--- Local Variables:
--- indent-tabs-mode: nil
--- End:
-'''
-
-## @var pubd
-## SQL schema pubd
-pubd = '''-- $Id: pubd.sql 5757 2014-04-05 22:42:12Z sra $
-
--- Copyright (C) 2009--2010 Internet Systems Consortium ("ISC")
---
--- Permission to use, copy, modify, and distribute this software for any
--- purpose with or without fee is hereby granted, provided that the above
--- copyright notice and this permission notice appear in all copies.
---
--- THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
--- REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
--- AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
--- INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
--- LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
--- OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
--- PERFORMANCE OF THIS SOFTWARE.
-
--- Copyright (C) 2008 American Registry for Internet Numbers ("ARIN")
---
--- Permission to use, copy, modify, and distribute this software for any
--- purpose with or without fee is hereby granted, provided that the above
--- copyright notice and this permission notice appear in all copies.
---
--- THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
--- REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
--- AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
--- INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
--- LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
--- OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
--- PERFORMANCE OF THIS SOFTWARE.
-
--- SQL objects needed by pubd.py.
-
--- The config table is weird because we're really only using it
--- to store one BPKI CRL, but putting this here lets us use a lot of
--- existing machinery and the alternatives are whacky in other ways.
-
-DROP TABLE IF EXISTS client;
-DROP TABLE IF EXISTS config;
-
-CREATE TABLE config (
- config_id SERIAL NOT NULL,
- bpki_crl LONGBLOB,
- PRIMARY KEY (config_id)
-) ENGINE=InnoDB;
-
-CREATE TABLE client (
- client_id SERIAL NOT NULL,
- client_handle VARCHAR(255) NOT NULL,
- base_uri TEXT,
- bpki_cert LONGBLOB,
- bpki_glue LONGBLOB,
- last_cms_timestamp DATETIME,
- PRIMARY KEY (client_id),
- UNIQUE (client_handle)
-) ENGINE=InnoDB;
-
--- Local Variables:
--- indent-tabs-mode: nil
--- End:
-'''
-
diff --git a/rpki/sundial.py b/rpki/sundial.py
index 7be122c8..b788940d 100644
--- a/rpki/sundial.py
+++ b/rpki/sundial.py
@@ -48,242 +48,289 @@ import datetime as pydatetime
import re
def now():
- """
- Get current timestamp.
- """
- return datetime.utcnow()
-
-class ParseFailure(Exception):
- """
- Parse failure constructing timedelta.
- """
-
-class datetime(pydatetime.datetime):
- """
- RPKI extensions to standard datetime.datetime class. All work here
- is in UTC, so we use naive datetime objects.
- """
-
- def totimestamp(self):
"""
- Convert to seconds from epoch (like time.time()). Conversion
- method is a bit silly, but avoids time module timezone whackiness.
+ Get current timestamp.
"""
- return int(self.strftime("%s"))
- @classmethod
- def fromXMLtime(cls, x):
+ return datetime.utcnow()
+
+class ParseFailure(Exception):
"""
- Convert from XML time representation.
+ Parse failure constructing timedelta.
"""
- if x is None:
- return None
- else:
- return cls.strptime(x, "%Y-%m-%dT%H:%M:%SZ")
- def toXMLtime(self):
+class datetime(pydatetime.datetime):
"""
- Convert to XML time representation.
+ RPKI extensions to standard datetime.datetime class. All work here
+ is in UTC, so we use naive datetime objects.
"""
- return self.strftime("%Y-%m-%dT%H:%M:%SZ")
- def __str__(self):
- return self.toXMLtime()
+ def totimestamp(self):
+ """
+ Convert to seconds from epoch (like time.time()). Conversion
+ method is a bit silly, but avoids time module timezone whackiness.
+ """
- @classmethod
- def from_datetime(cls, x):
- """
- Convert a datetime.datetime object into this subclass. This is
- whacky due to the weird constructors for datetime.
- """
- return cls.combine(x.date(), x.time())
+ return int(self.strftime("%s"))
- def to_datetime(self):
- """
- Convert to a datetime.datetime object. In most cases this
- shouldn't be necessary, but convincing SQL interfaces to use
- subclasses of datetime can be hard.
- """
- return pydatetime.datetime(year = self.year, month = self.month, day = self.day,
- hour = self.hour, minute = self.minute, second = self.second,
- microsecond = 0, tzinfo = None)
+ @classmethod
+ def fromXMLtime(cls, x):
+ """
+ Convert from XML time representation.
+ """
+ if x is None:
+ return None
+ else:
+ return cls.strptime(x, "%Y-%m-%dT%H:%M:%SZ")
+
+ def toXMLtime(self):
+ """
+ Convert to XML time representation.
+ """
- @classmethod
- def fromOpenSSL(cls, x):
- """
- Convert from the format OpenSSL's command line tool uses into this
- subclass. May require rewriting if we run into locale problems.
- """
- if x.startswith("notBefore=") or x.startswith("notAfter="):
- x = x.partition("=")[2]
- return cls.strptime(x, "%b %d %H:%M:%S %Y GMT")
+ return self.strftime("%Y-%m-%dT%H:%M:%SZ")
- @classmethod
- def from_sql(cls, x):
- """
- Convert from SQL storage format.
- """
- return cls.from_datetime(x)
+ def __str__(self):
+ return self.toXMLtime()
- def to_sql(self):
- """
- Convert to SQL storage format.
- """
- return self.to_datetime()
+ @classmethod
+ def from_datetime(cls, x):
+ """
+ Convert a datetime.datetime object into this subclass. This is
+ whacky due to the weird constructors for datetime.
+ """
- def later(self, other):
- """
- Return the later of two timestamps.
- """
- return other if other > self else self
+ return cls.combine(x.date(), x.time())
- def earlier(self, other):
- """
- Return the earlier of two timestamps.
- """
- return other if other < self else self
+ def to_datetime(self):
+ """
+ Convert to a datetime.datetime object. In most cases this
+ shouldn't be necessary, but convincing SQL interfaces to use
+ subclasses of datetime can be hard.
+ """
- def __add__(self, y): return _cast(pydatetime.datetime.__add__(self, y))
- def __radd__(self, y): return _cast(pydatetime.datetime.__radd__(self, y))
- def __rsub__(self, y): return _cast(pydatetime.datetime.__rsub__(self, y))
- def __sub__(self, y): return _cast(pydatetime.datetime.__sub__(self, y))
+ return pydatetime.datetime(year = self.year, month = self.month, day = self.day,
+ hour = self.hour, minute = self.minute, second = self.second,
+ microsecond = 0, tzinfo = None)
- @classmethod
- def DateTime_or_None(cls, s):
- """
- MySQLdb converter. Parse as this class if we can, let the default
- MySQLdb DateTime_or_None() converter deal with failure cases.
- """
- for sep in " T":
- d, _, t = s.partition(sep) # pylint: disable=W0612
- if t:
- try:
- return cls(*[int(x) for x in d.split("-") + t.split(":")])
- except: # pylint: disable=W0702
- break
+ @classmethod
+ def fromOpenSSL(cls, x):
+ """
+ Convert from the format OpenSSL's command line tool uses into this
+ subclass. May require rewriting if we run into locale problems.
+ """
+
+ if x.startswith("notBefore=") or x.startswith("notAfter="):
+ x = x.partition("=")[2]
+ return cls.strptime(x, "%b %d %H:%M:%S %Y GMT")
+
+ @classmethod
+ def from_sql(cls, x):
+ """
+ Convert from SQL storage format.
+ """
+
+ return cls.from_datetime(x)
+
+ def to_sql(self):
+ """
+ Convert to SQL storage format.
+ """
+
+ return self.to_datetime()
+
+ def later(self, other):
+ """
+ Return the later of two timestamps.
+ """
+
+ return other if other > self else self
+
+ def earlier(self, other):
+ """
+ Return the earlier of two timestamps.
+ """
+
+ return other if other < self else self
+
+ def __add__(self, y):
+ return _cast(pydatetime.datetime.__add__(self, y))
+
+ def __radd__(self, y):
+ return _cast(pydatetime.datetime.__radd__(self, y))
+
+ def __rsub__(self, y):
+ return _cast(pydatetime.datetime.__rsub__(self, y))
- from rpki.mysql_import import MySQLdb
- return MySQLdb.times.DateTime_or_None(s)
+ def __sub__(self, y):
+ return _cast(pydatetime.datetime.__sub__(self, y))
+
+ @classmethod
+ def DateTime_or_None(cls, s):
+ """
+ MySQLdb converter. Parse as this class if we can, let the default
+ MySQLdb DateTime_or_None() converter deal with failure cases.
+ """
+
+ for sep in " T":
+ d, _, t = s.partition(sep) # pylint: disable=W0612
+ if t:
+ try:
+ return cls(*[int(x) for x in d.split("-") + t.split(":")])
+ except:
+ break
+
+ from rpki.mysql_import import MySQLdb
+ return MySQLdb.times.DateTime_or_None(s)
class timedelta(pydatetime.timedelta):
- """
- Timedelta with text parsing. This accepts two input formats:
-
- - A simple integer, indicating a number of seconds.
-
- - A string of the form "uY vW wD xH yM zS" where u, v, w, x, y, and z
- are integers and Y, W, D, H, M, and S indicate years, weeks, days,
- hours, minutes, and seconds. All of the fields are optional, but
- at least one must be specified. Eg,"3D4H" means "three days plus
- four hours".
-
- There is no "months" format, because the definition of a month is too
- fuzzy to be useful (what day is six months from August 30th?)
-
- Similarly, the "years" conversion may produce surprising results, as
- "one year" in conventional English does not refer to a fixed interval
- but rather a fixed (and in some cases undefined) offset within the
- Gregorian calendar (what day is one year from February 29th?) 1Y as
- implemented by this code refers to a specific number of seconds.
- If you mean 365 days or 52 weeks, say that instead.
- """
-
- ## @var regexp
- # Hideously ugly regular expression to parse the complex text form.
- # Tags are intended for use with re.MatchObject.groupdict() and map
- # directly to the keywords expected by the timedelta constructor.
-
- regexp = re.compile("\\s*".join(("^",
- "(?:(?P<years>\\d+)Y)?",
- "(?:(?P<weeks>\\d+)W)?",
- "(?:(?P<days>\\d+)D)?",
- "(?:(?P<hours>\\d+)H)?",
- "(?:(?P<minutes>\\d+)M)?",
- "(?:(?P<seconds>\\d+)S)?",
- "$")),
- re.I)
-
- ## @var years_to_seconds
- # Conversion factor from years to seconds (value furnished by the
- # "units" program).
-
- years_to_seconds = 31556926
-
- @classmethod
- def parse(cls, arg):
- """
- Parse text into a timedelta object.
"""
- if not isinstance(arg, str):
- return cls(seconds = arg)
- elif arg.isdigit():
- return cls(seconds = int(arg))
- else:
- match = cls.regexp.match(arg)
- if match:
- #return cls(**dict((k, int(v)) for (k, v) in match.groupdict().items() if v is not None))
- d = match.groupdict("0")
- for k, v in d.iteritems():
- d[k] = int(v)
- d["days"] += d.pop("weeks") * 7
- d["seconds"] += d.pop("years") * cls.years_to_seconds
- return cls(**d)
- else:
- raise ParseFailure("Couldn't parse timedelta %r" % (arg,))
-
- def convert_to_seconds(self):
- """
- Convert a timedelta interval to seconds.
+ Timedelta with text parsing. This accepts two input formats:
+
+ - A simple integer, indicating a number of seconds.
+
+ - A string of the form "uY vW wD xH yM zS" where u, v, w, x, y, and z
+ are integers and Y, W, D, H, M, and S indicate years, weeks, days,
+ hours, minutes, and seconds. All of the fields are optional, but
+ at least one must be specified. Eg,"3D4H" means "three days plus
+ four hours".
+
+ There is no "months" format, because the definition of a month is too
+ fuzzy to be useful (what day is six months from August 30th?)
+
+ Similarly, the "years" conversion may produce surprising results, as
+ "one year" in conventional English does not refer to a fixed interval
+ but rather a fixed (and in some cases undefined) offset within the
+ Gregorian calendar (what day is one year from February 29th?) 1Y as
+ implemented by this code refers to a specific number of seconds.
+ If you mean 365 days or 52 weeks, say that instead.
"""
- return self.days * 24 * 60 * 60 + self.seconds
- @classmethod
- def fromtimedelta(cls, x):
+ ## @var regexp
+ # Hideously ugly regular expression to parse the complex text form.
+ # Tags are intended for use with re.MatchObject.groupdict() and map
+ # directly to the keywords expected by the timedelta constructor.
+
+ regexp = re.compile("\\s*".join(("^",
+ "(?:(?P<years>\\d+)Y)?",
+ "(?:(?P<weeks>\\d+)W)?",
+ "(?:(?P<days>\\d+)D)?",
+ "(?:(?P<hours>\\d+)H)?",
+ "(?:(?P<minutes>\\d+)M)?",
+ "(?:(?P<seconds>\\d+)S)?",
+ "$")),
+ re.I)
+
+ ## @var years_to_seconds
+ # Conversion factor from years to seconds (value furnished by the
+ # "units" program).
+
+ years_to_seconds = 31556926
+
+ @classmethod
+ def parse(cls, arg):
+ """
+ Parse text into a timedelta object.
+ """
+
+ if not isinstance(arg, (str, unicode)):
+ return cls(seconds = arg)
+ elif arg.isdigit():
+ return cls(seconds = int(arg))
+ else:
+ match = cls.regexp.match(arg)
+ if match:
+ #return cls(**dict((k, int(v)) for (k, v) in match.groupdict().items() if v is not None))
+ d = match.groupdict("0")
+ for k, v in d.iteritems():
+ d[k] = int(v)
+ d["days"] += d.pop("weeks") * 7
+ d["seconds"] += d.pop("years") * cls.years_to_seconds
+ return cls(**d)
+ else:
+ raise ParseFailure("Couldn't parse timedelta %r" % (arg,))
+
+ def convert_to_seconds(self):
+ """
+ Convert a timedelta interval to seconds.
+ """
+
+ return self.days * 24 * 60 * 60 + self.seconds
+
+ @classmethod
+ def fromtimedelta(cls, x):
+ """
+ Convert a datetime.timedelta object into this subclass.
+ """
+
+ return cls(days = x.days, seconds = x.seconds, microseconds = x.microseconds)
+
+ def __abs__(self):
+ return _cast(pydatetime.timedelta.__abs__(self))
+
+ def __add__(self, x):
+ return _cast(pydatetime.timedelta.__add__(self, x))
+
+ def __div__(self, x):
+ return _cast(pydatetime.timedelta.__div__(self, x))
+
+ def __floordiv__(self, x):
+ return _cast(pydatetime.timedelta.__floordiv__(self, x))
+
+ def __mul__(self, x):
+ return _cast(pydatetime.timedelta.__mul__(self, x))
+
+ def __neg__(self):
+ return _cast(pydatetime.timedelta.__neg__(self))
+
+ def __pos__(self):
+ return _cast(pydatetime.timedelta.__pos__(self))
+
+ def __radd__(self, x):
+ return _cast(pydatetime.timedelta.__radd__(self, x))
+
+ def __rdiv__(self, x):
+ return _cast(pydatetime.timedelta.__rdiv__(self, x))
+
+ def __rfloordiv__(self, x):
+ return _cast(pydatetime.timedelta.__rfloordiv__(self, x))
+
+ def __rmul__(self, x):
+ return _cast(pydatetime.timedelta.__rmul__(self, x))
+
+ def __rsub__(self, x):
+ return _cast(pydatetime.timedelta.__rsub__(self, x))
+
+ def __sub__(self, x):
+ return _cast(pydatetime.timedelta.__sub__(self, x))
+
+def _cast(x):
"""
- Convert a datetime.timedelta object into this subclass.
+ Cast result of arithmetic operations back into correct subtype.
"""
- return cls(days = x.days, seconds = x.seconds, microseconds = x.microseconds)
-
- def __abs__(self): return _cast(pydatetime.timedelta.__abs__(self))
- def __add__(self, x): return _cast(pydatetime.timedelta.__add__(self, x))
- def __div__(self, x): return _cast(pydatetime.timedelta.__div__(self, x))
- def __floordiv__(self, x): return _cast(pydatetime.timedelta.__floordiv__(self, x))
- def __mul__(self, x): return _cast(pydatetime.timedelta.__mul__(self, x))
- def __neg__(self): return _cast(pydatetime.timedelta.__neg__(self))
- def __pos__(self): return _cast(pydatetime.timedelta.__pos__(self))
- def __radd__(self, x): return _cast(pydatetime.timedelta.__radd__(self, x))
- def __rdiv__(self, x): return _cast(pydatetime.timedelta.__rdiv__(self, x))
- def __rfloordiv__(self, x): return _cast(pydatetime.timedelta.__rfloordiv__(self, x))
- def __rmul__(self, x): return _cast(pydatetime.timedelta.__rmul__(self, x))
- def __rsub__(self, x): return _cast(pydatetime.timedelta.__rsub__(self, x))
- def __sub__(self, x): return _cast(pydatetime.timedelta.__sub__(self, x))
-def _cast(x):
- """
- Cast result of arithmetic operations back into correct subtype.
- """
- if isinstance(x, pydatetime.datetime):
- return datetime.from_datetime(x)
- if isinstance(x, pydatetime.timedelta):
- return timedelta.fromtimedelta(x)
- return x
+ if isinstance(x, pydatetime.datetime):
+ return datetime.from_datetime(x)
+ if isinstance(x, pydatetime.timedelta):
+ return timedelta.fromtimedelta(x)
+ return x
if __name__ == "__main__":
- def test(t):
- print
- print "str: ", t
- print "repr: ", repr(t)
- print "seconds since epoch:", t.strftime("%s")
- print "XMLtime: ", t.toXMLtime()
- print
+ def test(t):
+ print
+ print "str: ", t
+ print "repr: ", repr(t)
+ print "seconds since epoch:", t.strftime("%s")
+ print "XMLtime: ", t.toXMLtime()
+ print
- print
- print "Testing time conversion routines"
- test(now())
- test(now() + timedelta(days = 30))
- test(now() + timedelta.parse("3d5s"))
- test(now() + timedelta.parse(" 3d 5s "))
- test(now() + timedelta.parse("1y3d5h"))
+ print
+ print "Testing time conversion routines"
+ test(now())
+ test(now() + timedelta(days = 30))
+ test(now() + timedelta.parse("3d5s"))
+ test(now() + timedelta.parse(" 3d 5s "))
+ test(now() + timedelta.parse("1y3d5h"))
diff --git a/rpki/up_down.py b/rpki/up_down.py
index 5339e9a7..e2292efb 100644
--- a/rpki/up_down.py
+++ b/rpki/up_down.py
@@ -21,520 +21,78 @@
RPKI "up-down" protocol.
"""
-import base64
import logging
-import lxml.etree
import rpki.resource_set
import rpki.x509
import rpki.exceptions
import rpki.log
-import rpki.xml_utils
import rpki.relaxng
+from lxml.etree import SubElement, tostring as ElementToString
+
logger = logging.getLogger(__name__)
-xmlns = rpki.relaxng.up_down.xmlns
-nsmap = rpki.relaxng.up_down.nsmap
+xmlns = rpki.relaxng.up_down.xmlns
+nsmap = rpki.relaxng.up_down.nsmap
+version = "1"
## @var content_type
# MIME content type to use when sending up-down queries.
-#content_type = "application/rpki-updown"
-content_type = "application/x-rpki"
+content_type = "application/rpki-updown"
+#content_type = "application/x-rpki"
## @var allowed_content_types
# MIME content types which we consider acceptable for incoming up-down
# queries.
allowed_content_types = ("application/rpki-updown", "application/x-rpki")
-class base_elt(object):
- """
- Generic PDU object.
-
- Virtual class, just provides some default methods.
- """
-
- def startElement(self, stack, name, attrs):
- """
- Ignore startElement() if there's no specific handler.
-
- Some elements have no attributes and we only care about their
- text content.
- """
- pass
-
- def endElement(self, stack, name, text):
- """
- Ignore endElement() if there's no specific handler.
-
- If we don't need to do anything else, just pop the stack.
- """
- stack.pop()
+## @var enforce_strict_up_down_xml_sender
+# Enforce strict checking of XML "sender" field in up-down protocol
- def make_elt(self, name, *attrs):
- """
- Construct a element, copying over a set of attributes.
- """
- elt = lxml.etree.Element(xmlns + name, nsmap = nsmap)
- for key in attrs:
- val = getattr(self, key, None)
- if val is not None:
- elt.set(key, str(val))
- return elt
-
- def make_b64elt(self, elt, name, value):
- """
- Construct a sub-element with Base64 text content.
- """
- if value is not None and not value.empty():
- lxml.etree.SubElement(elt, xmlns + name, nsmap = nsmap).text = value.get_Base64()
+enforce_strict_up_down_xml_sender = False
- def serve_pdu(self, q_msg, r_msg, child, callback, errback):
- """
- Default PDU handler to catch unexpected types.
- """
- raise rpki.exceptions.BadQuery("Unexpected query type %s" % q_msg.type)
+tag_certificate = xmlns + "certificate"
+tag_class = xmlns + "class"
+tag_description = xmlns + "description"
+tag_issuer = xmlns + "issuer"
+tag_key = xmlns + "key"
+tag_message = xmlns + "message"
+tag_request = xmlns + "request"
+tag_status = xmlns + "status"
- def check_response(self):
- """
- Placeholder for response checking.
- """
- pass
class multi_uri(list):
- """
- Container for a set of URIs.
- """
-
- def __init__(self, ini):
- """
- Initialize a set of URIs, which includes basic some syntax checking.
- """
- list.__init__(self)
- if isinstance(ini, (list, tuple)):
- self[:] = ini
- elif isinstance(ini, str):
- self[:] = ini.split(",")
- for s in self:
- if s.strip() != s or "://" not in s:
- raise rpki.exceptions.BadURISyntax("Bad URI \"%s\"" % s)
- else:
- raise TypeError
-
- def __str__(self):
"""
- Convert a multi_uri back to a string representation.
+ Container for a set of URIs. This probably could be simplified.
"""
- return ",".join(self)
- def rsync(self):
- """
- Find first rsync://... URI in self.
- """
- for s in self:
- if s.startswith("rsync://"):
- return s
- return None
+ def __init__(self, ini):
+ list.__init__(self)
+ if isinstance(ini, (list, tuple)):
+ self[:] = ini
+ elif isinstance(ini, str):
+ self[:] = ini.split(",")
+ for s in self:
+ if s.strip() != s or "://" not in s:
+ raise rpki.exceptions.BadURISyntax("Bad URI \"%s\"" % s)
+ else:
+ raise TypeError
-class certificate_elt(base_elt):
- """
- Up-Down protocol representation of an issued certificate.
- """
+ def __str__(self):
+ return ",".join(self)
- def startElement(self, stack, name, attrs):
- """
- Handle attributes of <certificate/> element.
- """
- assert name == "certificate", "Unexpected name %s, stack %s" % (name, stack)
- self.cert_url = multi_uri(attrs["cert_url"])
- self.req_resource_set_as = rpki.resource_set.resource_set_as(attrs.get("req_resource_set_as"))
- self.req_resource_set_ipv4 = rpki.resource_set.resource_set_ipv4(attrs.get("req_resource_set_ipv4"))
- self.req_resource_set_ipv6 = rpki.resource_set.resource_set_ipv6(attrs.get("req_resource_set_ipv6"))
+ def rsync(self):
+ """
+ Find first rsync://... URI in self.
+ """
- def endElement(self, stack, name, text):
- """
- Handle text content of a <certificate/> element.
- """
- assert name == "certificate", "Unexpected name %s, stack %s" % (name, stack)
- self.cert = rpki.x509.X509(Base64 = text)
- stack.pop()
+ for s in self:
+ if s.startswith("rsync://"):
+ return s
+ return None
- def toXML(self):
- """
- Generate a <certificate/> element.
- """
- elt = self.make_elt("certificate", "cert_url",
- "req_resource_set_as", "req_resource_set_ipv4", "req_resource_set_ipv6")
- elt.text = self.cert.get_Base64()
- return elt
-
-class class_elt(base_elt):
- """
- Up-Down protocol representation of a resource class.
- """
-
- issuer = None
-
- def __init__(self):
- """
- Initialize class_elt.
- """
- base_elt.__init__(self)
- self.certs = []
-
- def startElement(self, stack, name, attrs):
- """
- Handle <class/> elements and their children.
- """
- if name == "certificate":
- cert = certificate_elt()
- self.certs.append(cert)
- stack.append(cert)
- cert.startElement(stack, name, attrs)
- elif name != "issuer":
- assert name == "class", "Unexpected name %s, stack %s" % (name, stack)
- self.class_name = attrs["class_name"]
- self.cert_url = multi_uri(attrs["cert_url"])
- self.suggested_sia_head = attrs.get("suggested_sia_head")
- self.resource_set_as = rpki.resource_set.resource_set_as(attrs["resource_set_as"])
- self.resource_set_ipv4 = rpki.resource_set.resource_set_ipv4(attrs["resource_set_ipv4"])
- self.resource_set_ipv6 = rpki.resource_set.resource_set_ipv6(attrs["resource_set_ipv6"])
- self.resource_set_notafter = rpki.sundial.datetime.fromXMLtime(attrs.get("resource_set_notafter"))
-
- def endElement(self, stack, name, text):
- """
- Handle <class/> elements and their children.
- """
- if name == "issuer":
- self.issuer = rpki.x509.X509(Base64 = text)
- else:
- assert name == "class", "Unexpected name %s, stack %s" % (name, stack)
- stack.pop()
- def toXML(self):
- """
- Generate a <class/> element.
- """
- elt = self.make_elt("class", "class_name", "cert_url", "resource_set_as",
- "resource_set_ipv4", "resource_set_ipv6",
- "resource_set_notafter", "suggested_sia_head")
- elt.extend([i.toXML() for i in self.certs])
- self.make_b64elt(elt, "issuer", self.issuer)
- return elt
-
- def to_resource_bag(self):
- """
- Build a resource_bag from from this <class/> element.
- """
- return rpki.resource_set.resource_bag(self.resource_set_as,
- self.resource_set_ipv4,
- self.resource_set_ipv6,
- self.resource_set_notafter)
-
- def from_resource_bag(self, bag):
- """
- Set resources of this class element from a resource_bag.
- """
- self.resource_set_as = bag.asn
- self.resource_set_ipv4 = bag.v4
- self.resource_set_ipv6 = bag.v6
- self.resource_set_notafter = bag.valid_until
-
-class list_pdu(base_elt):
- """
- Up-Down protocol "list" PDU.
- """
-
- def toXML(self):
- """Generate (empty) payload of "list" PDU."""
- return []
-
- def serve_pdu(self, q_msg, r_msg, child, callback, errback):
- """
- Serve one "list" PDU.
- """
-
- def handle(irdb_resources):
-
- r_msg.payload = list_response_pdu()
-
- if irdb_resources.valid_until < rpki.sundial.now():
- logger.debug("Child %s's resources expired %s", child.child_handle, irdb_resources.valid_until)
- else:
- for parent in child.parents:
- for ca in parent.cas:
- ca_detail = ca.active_ca_detail
- if not ca_detail:
- logger.debug("No active ca_detail, can't issue to %s", child.child_handle)
- continue
- resources = ca_detail.latest_ca_cert.get_3779resources() & irdb_resources
- if resources.empty():
- logger.debug("No overlap between received resources and what child %s should get ([%s], [%s])",
- child.child_handle, ca_detail.latest_ca_cert.get_3779resources(), irdb_resources)
- continue
- rc = class_elt()
- rc.class_name = str(ca.ca_id)
- rc.cert_url = multi_uri(ca_detail.ca_cert_uri)
- rc.from_resource_bag(resources)
- for child_cert in child.fetch_child_certs(ca_detail = ca_detail):
- c = certificate_elt()
- c.cert_url = multi_uri(child_cert.uri)
- c.cert = child_cert.cert
- rc.certs.append(c)
- rc.issuer = ca_detail.latest_ca_cert
- r_msg.payload.classes.append(rc)
-
- callback()
-
- self.gctx.irdb_query_child_resources(child.self.self_handle, child.child_handle, handle, errback)
-
- @classmethod
- def query(cls, parent, cb, eb):
- """
- Send a "list" query to parent.
- """
- try:
- logger.info('Sending "list" request to parent %s', parent.parent_handle)
- parent.query_up_down(cls(), cb, eb)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- eb(e)
-
-class class_response_syntax(base_elt):
- """
- Syntax for Up-Down protocol "list_response" and "issue_response" PDUs.
- """
-
- def __init__(self):
- """
- Initialize class_response_syntax.
- """
- base_elt.__init__(self)
- self.classes = []
-
- def startElement(self, stack, name, attrs):
- """
- Handle "list_response" and "issue_response" PDUs.
- """
- assert name == "class", "Unexpected name %s, stack %s" % (name, stack)
- c = class_elt()
- self.classes.append(c)
- stack.append(c)
- c.startElement(stack, name, attrs)
-
- def toXML(self):
- """Generate payload of "list_response" and "issue_response" PDUs."""
- return [c.toXML() for c in self.classes]
-
-class list_response_pdu(class_response_syntax):
- """
- Up-Down protocol "list_response" PDU.
- """
- pass
-
-class issue_pdu(base_elt):
- """
- Up-Down protocol "issue" PDU.
- """
-
- def startElement(self, stack, name, attrs):
- """
- Handle "issue" PDU.
- """
- assert name == "request", "Unexpected name %s, stack %s" % (name, stack)
- self.class_name = attrs["class_name"]
- self.req_resource_set_as = rpki.resource_set.resource_set_as(attrs.get("req_resource_set_as"))
- self.req_resource_set_ipv4 = rpki.resource_set.resource_set_ipv4(attrs.get("req_resource_set_ipv4"))
- self.req_resource_set_ipv6 = rpki.resource_set.resource_set_ipv6(attrs.get("req_resource_set_ipv6"))
-
- def endElement(self, stack, name, text):
- """
- Handle "issue" PDU.
- """
- assert name == "request", "Unexpected name %s, stack %s" % (name, stack)
- self.pkcs10 = rpki.x509.PKCS10(Base64 = text)
- stack.pop()
-
- def toXML(self):
- """
- Generate payload of "issue" PDU.
- """
- elt = self.make_elt("request", "class_name", "req_resource_set_as",
- "req_resource_set_ipv4", "req_resource_set_ipv6")
- elt.text = self.pkcs10.get_Base64()
- return [elt]
-
- def serve_pdu(self, q_msg, r_msg, child, callback, errback):
- """
- Serve one issue request PDU.
- """
-
- # Subsetting not yet implemented, this is the one place where we
- # have to handle it, by reporting that we're lame.
-
- if self.req_resource_set_as or \
- self.req_resource_set_ipv4 or \
- self.req_resource_set_ipv6:
- raise rpki.exceptions.NotImplementedYet("req_* attributes not implemented yet, sorry")
-
- # Check the request
- self.pkcs10.check_valid_request_ca()
- ca = child.ca_from_class_name(self.class_name)
- ca_detail = ca.active_ca_detail
- if ca_detail is None:
- raise rpki.exceptions.NoActiveCA("No active CA for class %r" % self.class_name)
-
- # Check current cert, if any
-
- def got_resources(irdb_resources):
-
- if irdb_resources.valid_until < rpki.sundial.now():
- raise rpki.exceptions.IRDBExpired("IRDB entry for child %s expired %s" % (
- child.child_handle, irdb_resources.valid_until))
-
- resources = irdb_resources & ca_detail.latest_ca_cert.get_3779resources()
- resources.valid_until = irdb_resources.valid_until
- req_key = self.pkcs10.getPublicKey()
- req_sia = self.pkcs10.get_SIA()
- child_cert = child.fetch_child_certs(ca_detail = ca_detail, ski = req_key.get_SKI(), unique = True)
-
- # Generate new cert or regenerate old one if necessary
-
- publisher = rpki.rpkid.publication_queue()
-
- if child_cert is None:
- child_cert = ca_detail.issue(
- ca = ca,
- child = child,
- subject_key = req_key,
- sia = req_sia,
- resources = resources,
- publisher = publisher)
- else:
- child_cert = child_cert.reissue(
- ca_detail = ca_detail,
- sia = req_sia,
- resources = resources,
- publisher = publisher)
-
- def done():
- c = certificate_elt()
- c.cert_url = multi_uri(child_cert.uri)
- c.cert = child_cert.cert
- rc = class_elt()
- rc.class_name = self.class_name
- rc.cert_url = multi_uri(ca_detail.ca_cert_uri)
- rc.from_resource_bag(resources)
- rc.certs.append(c)
- rc.issuer = ca_detail.latest_ca_cert
- r_msg.payload = issue_response_pdu()
- r_msg.payload.classes.append(rc)
- callback()
-
- self.gctx.sql.sweep()
- assert child_cert and child_cert.sql_in_db
- publisher.call_pubd(done, errback)
-
- self.gctx.irdb_query_child_resources(child.self.self_handle, child.child_handle, got_resources, errback)
-
- @classmethod
- def query(cls, parent, ca, ca_detail, callback, errback):
- """
- Send an "issue" request to parent associated with ca.
- """
- assert ca_detail is not None and ca_detail.state in ("pending", "active")
- self = cls()
- self.class_name = ca.parent_resource_class
- self.pkcs10 = rpki.x509.PKCS10.create(
- keypair = ca_detail.private_key_id,
- is_ca = True,
- caRepository = ca.sia_uri,
- rpkiManifest = ca_detail.manifest_uri)
- logger.info('Sending "issue" request to parent %s', parent.parent_handle)
- parent.query_up_down(self, callback, errback)
-
-class issue_response_pdu(class_response_syntax):
- """
- Up-Down protocol "issue_response" PDU.
- """
-
- def check_response(self):
- """
- Check whether this looks like a reasonable issue_response PDU.
- XML schema should be tighter for this response.
- """
- if len(self.classes) != 1 or len(self.classes[0].certs) != 1:
- raise rpki.exceptions.BadIssueResponse
-
-class revoke_syntax(base_elt):
- """
- Syntax for Up-Down protocol "revoke" and "revoke_response" PDUs.
- """
-
- def startElement(self, stack, name, attrs):
- """Handle "revoke" PDU."""
- self.class_name = attrs["class_name"]
- self.ski = attrs["ski"]
-
- def toXML(self):
- """Generate payload of "revoke" PDU."""
- return [self.make_elt("key", "class_name", "ski")]
-
-class revoke_pdu(revoke_syntax):
- """
- Up-Down protocol "revoke" PDU.
- """
-
- def get_SKI(self):
- """
- Convert g(SKI) encoding from PDU back to raw SKI.
- """
- return base64.urlsafe_b64decode(self.ski + "=")
-
- def serve_pdu(self, q_msg, r_msg, child, cb, eb):
- """
- Serve one revoke request PDU.
- """
-
- def done():
- r_msg.payload = revoke_response_pdu()
- r_msg.payload.class_name = self.class_name
- r_msg.payload.ski = self.ski
- cb()
-
- ca = child.ca_from_class_name(self.class_name)
- publisher = rpki.rpkid.publication_queue()
- for ca_detail in ca.ca_details:
- for child_cert in child.fetch_child_certs(ca_detail = ca_detail, ski = self.get_SKI()):
- child_cert.revoke(publisher = publisher)
- self.gctx.sql.sweep()
- publisher.call_pubd(done, eb)
-
- @classmethod
- def query(cls, ca, gski, cb, eb):
- """
- Send a "revoke" request for certificate(s) named by gski to parent associated with ca.
- """
- parent = ca.parent
- self = cls()
- self.class_name = ca.parent_resource_class
- self.ski = gski
- logger.info('Sending "revoke" request for SKI %s to parent %s', gski, parent.parent_handle)
- parent.query_up_down(self, cb, eb)
-
-class revoke_response_pdu(revoke_syntax):
- """
- Up-Down protocol "revoke_response" PDU.
- """
-
- pass
-
-class error_response_pdu(base_elt):
- """
- Up-Down protocol "error_response" PDU.
- """
-
- codes = {
+error_response_codes = {
1101 : "Already processing request",
1102 : "Version number error",
1103 : "Unrecognised request type",
@@ -545,200 +103,71 @@ class error_response_pdu(base_elt):
1302 : "Revoke - no such key",
2001 : "Internal Server Error - Request not performed" }
- exceptions = {
- rpki.exceptions.NoActiveCA : 1202,
- (rpki.exceptions.ClassNameUnknown, revoke_pdu) : 1301,
- rpki.exceptions.ClassNameUnknown : 1201,
- (rpki.exceptions.NotInDatabase, revoke_pdu) : 1302 }
- def __init__(self, exception = None, request_payload = None):
- """
- Initialize an error_response PDU from an exception object.
- """
- base_elt.__init__(self)
- if exception is not None:
- logger.debug("Constructing up-down error response from exception %s", exception)
- exception_type = type(exception)
- request_type = None if request_payload is None else type(request_payload)
- logger.debug("Constructing up-down error response: exception_type %s, request_type %s",
- exception_type, request_type)
- if False:
- self.status = self.exceptions.get((exception_type, request_type),
- self.exceptions.get(exception_type, 2001))
- else:
- self.status = self.exceptions.get((exception_type, request_type))
- if self.status is None:
- logger.debug("No request-type-specific match, trying exception match")
- self.status = self.exceptions.get(exception_type)
- if self.status is None:
- logger.debug("No exception match either, defaulting")
- self.status = 2001
- self.description = str(exception)
- logger.debug("Chosen status code: %s", self.status)
-
- def endElement(self, stack, name, text):
- """
- Handle "error_response" PDU.
- """
- if name == "status":
- code = int(text)
- if code not in self.codes:
- raise rpki.exceptions.BadStatusCode("%s is not a known status code" % code)
- self.status = code
- elif name == "description":
- self.description = text
- else:
- assert name == "message", "Unexpected name %s, stack %s" % (name, stack)
- stack.pop()
- stack[-1].endElement(stack, name, text)
-
- def toXML(self):
- """
- Generate payload of "error_response" PDU.
- """
- assert self.status in self.codes
- elt = self.make_elt("status")
- elt.text = str(self.status)
- payload = [elt]
- if self.description:
- elt = self.make_elt("description")
- elt.text = str(self.description)
- elt.set("{http://www.w3.org/XML/1998/namespace}lang", "en-US")
- payload.append(elt)
- return payload
-
- def check_response(self):
+exception_map = {
+ rpki.exceptions.NoActiveCA : 1202,
+ (rpki.exceptions.ClassNameUnknown, "revoke") : 1301,
+ rpki.exceptions.ClassNameUnknown : 1201,
+ (rpki.exceptions.NotInDatabase, "revoke") : 1302 }
+
+
+def check_response(r_msg, q_type):
"""
- Handle an error response. For now, just raise an exception,
- perhaps figure out something more clever to do later.
+ Additional checks beyond the XML schema for whether this looks like
+ a reasonable up-down response message.
"""
- raise rpki.exceptions.UpstreamError(self.codes[self.status])
-class message_pdu(base_elt):
- """
- Up-Down protocol message wrapper PDU.
- """
+ r_type = r_msg.get("type")
- version = 1
+ if r_type == "error_response":
+ raise rpki.exceptions.UpstreamError(error_response_codes[int(r_msg.findtext(tag_status))])
- name2type = {
- "list" : list_pdu,
- "list_response" : list_response_pdu,
- "issue" : issue_pdu,
- "issue_response" : issue_response_pdu,
- "revoke" : revoke_pdu,
- "revoke_response" : revoke_response_pdu,
- "error_response" : error_response_pdu }
+ if r_type != q_type + "_response":
+ raise rpki.exceptions.UnexpectedUpDownResponse
- type2name = dict((v, k) for k, v in name2type.items())
+ if r_type == "issue_response" and (len(r_msg) != 1 or len(r_msg[0]) != 2):
+ logger.debug("Weird issue_response %r: len(r_msg) %s len(r_msg[0]) %s",
+ r_msg, len(r_msg), len(r_msg[0]) if len(r_msg) else None)
+ logger.debug("Offending message\n%s", ElementToString(r_msg))
+ raise rpki.exceptions.BadIssueResponse
- error_pdu_type = error_response_pdu
- def toXML(self):
+def generate_error_response(r_msg, status = 2001, description = None):
"""
- Generate payload of message PDU.
+ Generate an error response. If status is given, it specifies the
+ numeric code to use, otherwise we default to "internal error".
+ If description is specified, we use it as the description, otherwise
+ we just use the default string associated with status.
"""
- elt = self.make_elt("message", "version", "sender", "recipient", "type")
- elt.extend(self.payload.toXML())
- return elt
- def startElement(self, stack, name, attrs):
- """
- Handle message PDU.
+ assert status in error_response_codes
+ del r_msg[:]
+ r_msg.set("type", "error_response")
+ SubElement(r_msg, tag_status).text = str(status)
+ se = SubElement(r_msg, tag_description)
+ se.set("{http://www.w3.org/XML/1998/namespace}lang", "en-US")
+ se.text = str(description or error_response_codes[status])
- Payload of the <message/> element varies depending on the "type"
- attribute, so after some basic checks we have to instantiate the
- right class object to handle whatever kind of PDU this is.
- """
- assert name == "message", "Unexpected name %s, stack %s" % (name, stack)
- assert self.version == int(attrs["version"])
- self.sender = attrs["sender"]
- self.recipient = attrs["recipient"]
- self.type = attrs["type"]
- self.payload = self.name2type[attrs["type"]]()
- stack.append(self.payload)
-
- def __str__(self):
- """
- Convert a message PDU to a string.
- """
- return lxml.etree.tostring(self.toXML(), pretty_print = True, encoding = "UTF-8")
- def serve_top_level(self, child, callback):
+def generate_error_response_from_exception(r_msg, e, q_type):
"""
- Serve one message request PDU.
+ Construct an error response from an exception. q_type
+ specifies the kind of query to which this is a response, since the
+ same exception can generate different codes in response to different
+ queries.
"""
- r_msg = message_pdu()
- r_msg.sender = self.recipient
- r_msg.recipient = self.sender
+ t = type(e)
+ code = (exception_map.get((t, q_type)) or exception_map.get(t) or 2001)
+ generate_error_response(r_msg, code, e)
- def done():
- r_msg.type = self.type2name[type(r_msg.payload)]
- callback(r_msg)
- def lose(e):
- logger.exception("Unhandled exception serving child %r", child)
- callback(self.serve_error(e))
-
- try:
- self.log_query(child)
- self.payload.serve_pdu(self, r_msg, child, done, lose)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception, e:
- lose(e)
-
- def log_query(self, child):
- """
- Log query we're handling. Separate method so rootd can override.
- """
- logger.info("Serving %s query from child %s [sender %s, recipient %s]", self.type, child.child_handle, self.sender, self.recipient)
-
- def serve_error(self, exception):
- """
- Generate an error_response message PDU.
- """
- r_msg = message_pdu()
- r_msg.sender = self.recipient
- r_msg.recipient = self.sender
- r_msg.payload = self.error_pdu_type(exception, self.payload)
- r_msg.type = self.type2name[type(r_msg.payload)]
- return r_msg
-
- @classmethod
- def make_query(cls, payload, sender, recipient):
+class cms_msg(rpki.x509.XML_CMS_object):
"""
- Construct one message PDU.
+ CMS-signed up-down PDU.
"""
- assert not cls.type2name[type(payload)].endswith("_response")
- if sender is None:
- sender = "tweedledee"
- if recipient is None:
- recipient = "tweedledum"
- self = cls()
- self.sender = sender
- self.recipient = recipient
- self.payload = payload
- self.type = self.type2name[type(payload)]
- return self
-
-class sax_handler(rpki.xml_utils.sax_handler):
- """
- SAX handler for Up-Down protocol.
- """
-
- pdu = message_pdu
- name = "message"
- version = "1"
-class cms_msg(rpki.x509.XML_CMS_object):
- """
- Class to hold a CMS-signed up-down PDU.
- """
-
- encoding = "UTF-8"
- schema = rpki.relaxng.up_down
- saxify = sax_handler.saxify
- allow_extra_certs = True
- allow_extra_crls = True
+ encoding = "UTF-8"
+ schema = rpki.relaxng.up_down
+ allow_extra_certs = True
+ allow_extra_crls = True
diff --git a/rpki/x509.py b/rpki/x509.py
index a7e4d17a..0acb3859 100644
--- a/rpki/x509.py
+++ b/rpki/x509.py
@@ -47,1065 +47,1185 @@ import rpki.resource_set
import rpki.oids
import rpki.sundial
import rpki.log
-import rpki.async
import rpki.relaxng
logger = logging.getLogger(__name__)
def base64_with_linebreaks(der):
- """
- Encode DER (really, anything) as Base64 text, with linebreaks to
- keep the result (sort of) readable.
- """
- b = base64.b64encode(der)
- n = len(b)
- return "\n" + "\n".join(b[i : min(i + 64, n)] for i in xrange(0, n, 64)) + "\n"
-
-def looks_like_PEM(text):
- """
- Guess whether text looks like a PEM encoding.
- """
-
- i = text.find("-----BEGIN ")
- return i >= 0 and text.find("\n-----END ", i) > i
-
-def first_rsync_uri(xia):
- """
- Find first rsync URI in a sequence of AIA or SIA URIs.
- Returns the URI if found, otherwise None.
- """
-
- if xia is not None:
- for uri in xia:
- if uri.startswith("rsync://"):
- return uri
- return None
-
-class X501DN(object):
- """
- Class to hold an X.501 Distinguished Name.
-
- This is nothing like a complete implementation, just enough for our
- purposes. See RFC 5280 4.1.2.4 for the ASN.1 details. In brief:
-
- - A DN is a SEQUENCE OF RDNs.
-
- - A RDN is a SET OF AttributeAndValues; in practice, multi-value
- RDNs are rare, so an RDN is almost always a set with a single
- element.
-
- - An AttributeAndValue is a SEQUENCE consisting of a OID and a
- value, where a whole bunch of things including both syntax and
- semantics of the value are determined by the OID.
-
- - The value is some kind of ASN.1 string; there are far too many
- encoding options options, most of which are either strongly
- discouraged or outright forbidden by the PKIX profile, but which
- persist for historical reasons. The only ones PKIX actually
- likes are PrintableString and UTF8String, but there are nuances
- and special cases where some of the others are required.
-
- The RPKI profile further restricts DNs to a single mandatory
- CommonName attribute with a single optional SerialNumber attribute
- (not to be confused with the certificate serial number).
-
- BPKI certificates should (we hope) follow the general PKIX guideline
- but the ones we construct ourselves are likely to be relatively
- simple.
- """
-
- def __str__(self):
- return "".join("/" + "+".join("%s=%s" % (rpki.oids.oid2name(a[0]), a[1])
- for a in rdn)
- for rdn in self.dn)
-
- def __cmp__(self, other):
- return cmp(self.dn, other.dn)
-
- def __repr__(self):
- return rpki.log.log_repr(self, str(self))
-
- def _debug(self):
- logger.debug("++ %r %r", self, self.dn)
-
- @classmethod
- def from_cn(cls, cn, sn = None):
- assert isinstance(cn, (str, unicode))
- if isinstance(sn, (int, long)):
- sn = "%08X" % sn
- elif isinstance(sn, (str, unicode)):
- assert all(c in "0123456789abcdefABCDEF" for c in sn)
- sn = str(sn)
- self = cls()
- if sn is not None:
- self.dn = (((rpki.oids.commonName, cn),), ((rpki.oids.serialNumber, sn),))
- else:
- self.dn = (((rpki.oids.commonName, cn),),)
- return self
-
- @classmethod
- def from_POW(cls, t):
- assert isinstance(t, tuple)
- self = cls()
- self.dn = t
- return self
-
- def get_POW(self):
- return self.dn
-
- def extract_cn_and_sn(self):
- cn = None
- sn = None
-
- for rdn in self.dn:
- if len(rdn) == 1 and len(rdn[0]) == 2:
- oid = rdn[0][0]
- val = rdn[0][1]
- if oid == rpki.oids.commonName and cn is None:
- cn = val
- continue
- if oid == rpki.oids.serialNumber and sn is None:
- sn = val
- continue
- raise rpki.exceptions.BadX510DN("Bad subject name: %s" % (self.dn,))
-
- if cn is None:
- raise rpki.exceptions.BadX510DN("Subject name is missing CN: %s" % (self.dn,))
-
- return cn, sn
-
-
-class DER_object(object):
- """
- Virtual class to hold a generic DER object.
- """
-
- ## @var formats
- # Formats supported in this object. This is kind of redundant now
- # that we're down to a single ASN.1 package and everything supports
- # the same DER and POW formats, it's mostly historical baggage from
- # the days when we had three different ASN.1 encoders, each with its
- # own low-level Python object format. Clean up, some day.
- formats = ("DER", "POW")
-
- ## @var POW_class
- # Class of underlying POW object. Concrete subclasses must supply this.
- POW_class = None
-
- ## Other attributes that self.clear() should whack.
- other_clear = ()
-
- ## @var DER
- # DER value of this object
- DER = None
-
- ## @var failure_threshold
- # Rate-limiting interval between whines about Auto_update objects.
- failure_threshold = rpki.sundial.timedelta(minutes = 5)
-
- def empty(self):
"""
- Test whether this object is empty.
+ Encode DER (really, anything) as Base64 text, with linebreaks to
+ keep the result (sort of) readable.
"""
- return all(getattr(self, a, None) is None for a in self.formats)
- def clear(self):
- """
- Make this object empty.
- """
- for a in self.formats + self.other_clear:
- setattr(self, a, None)
- self.filename = None
- self.timestamp = None
- self.lastfail = None
+ b = base64.b64encode(der)
+ n = len(b)
+ return "\n" + "\n".join(b[i : min(i + 64, n)] for i in xrange(0, n, 64)) + "\n"
- def __init__(self, **kw):
+def looks_like_PEM(text):
"""
- Initialize a DER_object.
+ Guess whether text looks like a PEM encoding.
"""
- self.clear()
- if len(kw):
- self.set(**kw)
- def set(self, **kw):
- """
- Set this object by setting one of its known formats.
+ i = text.find("-----BEGIN ")
+ return i >= 0 and text.find("\n-----END ", i) > i
- This method only allows one to set one format at a time.
- Subsequent calls will clear the object first. The point of all
- this is to let the object's internal converters handle mustering
- the object into whatever format you need at the moment.
+def first_uri_matching_prefix(xia, prefix):
+ """
+ Find first URI in a sequence of AIA or SIA URIs which matches a
+ particular prefix string. Returns the URI if found, otherwise None.
"""
- if len(kw) == 1:
- name = kw.keys()[0]
- if name in self.formats:
- self.clear()
- setattr(self, name, kw[name])
- return
- if name == "PEM":
- self.clear()
- self._set_PEM(kw[name])
- return
- if name == "Base64":
- self.clear()
- self.DER = base64.b64decode(kw[name])
- return
- if name == "Auto_update":
- self.filename = kw[name]
- self.check_auto_update()
- return
- if name in ("PEM_file", "DER_file", "Auto_file"):
- f = open(kw[name], "rb")
- value = f.read()
- f.close()
- self.clear()
- if name == "PEM_file" or (name == "Auto_file" and looks_like_PEM(value)):
- self._set_PEM(value)
- else:
- self.DER = value
- return
- raise rpki.exceptions.DERObjectConversionError("Can't honor conversion request %r" % (kw,))
+ if xia is not None:
+ for uri in xia:
+ if uri.startswith(prefix):
+ return uri
+ return None
- def check_auto_update(self):
- """
- Check for updates to a DER object that auto-updates from a file.
- """
- if self.filename is None:
- return
- try:
- filename = self.filename
- timestamp = os.stat(self.filename).st_mtime
- if self.timestamp is None or self.timestamp < timestamp:
- logger.debug("Updating %s, timestamp %s",
- filename, rpki.sundial.datetime.fromtimestamp(timestamp))
- f = open(filename, "rb")
- value = f.read()
- f.close()
- self.clear()
- if looks_like_PEM(value):
- self._set_PEM(value)
- else:
- self.DER = value
- self.filename = filename
- self.timestamp = timestamp
- except (IOError, OSError), e:
- now = rpki.sundial.now()
- if self.lastfail is None or now > self.lastfail + self.failure_threshold:
- logger.warning("Could not auto_update %r (last failure %s): %s", self, self.lastfail, e)
- self.lastfail = now
- else:
- self.lastfail = None
-
- def check(self):
+def first_rsync_uri(xia):
"""
- Perform basic checks on a DER object.
+ Find first rsync URI in a sequence of AIA or SIA URIs.
+ Returns the URI if found, otherwise None.
"""
- self.check_auto_update()
- assert not self.empty()
- def _set_PEM(self, pem):
- """
- Set the POW value of this object based on a PEM input value.
- Subclasses may need to override this.
- """
- assert self.empty()
- self.POW = self.POW_class.pemRead(pem)
+ return first_uri_matching_prefix(xia, "rsync://")
- def get_DER(self):
- """
- Get the DER value of this object.
- Subclasses may need to override this method.
- """
- self.check()
- if self.DER:
- return self.DER
- if self.POW:
- self.DER = self.POW.derWrite()
- return self.get_DER()
- raise rpki.exceptions.DERObjectConversionError("No conversion path to DER available")
-
- def get_POW(self):
+def first_http_uri(xia):
"""
- Get the rpki.POW value of this object.
- Subclasses may need to override this method.
+ Find first HTTP URI in a sequence of AIA or SIA URIs.
+ Returns the URI if found, otherwise None.
"""
- self.check()
- if not self.POW: # pylint: disable=E0203
- self.POW = self.POW_class.derRead(self.get_DER())
- return self.POW
- def get_Base64(self):
- """
- Get the Base64 encoding of the DER value of this object.
- """
- return base64_with_linebreaks(self.get_DER())
+ return first_uri_matching_prefix(xia, "http://")
- def get_PEM(self):
+def first_https_uri(xia):
"""
- Get the PEM representation of this object.
+ Find first HTTPS URI in a sequence of AIA or SIA URIs.
+ Returns the URI if found, otherwise None.
"""
- return self.get_POW().pemWrite()
- def __cmp__(self, other):
- """
- Compare two DER-encoded objects.
- """
- if self is None and other is None:
- return 0
- elif self is None:
- return -1
- elif other is None:
- return 1
- elif isinstance(other, str):
- return cmp(self.get_DER(), other)
- else:
- return cmp(self.get_DER(), other.get_DER())
-
- def hSKI(self):
- """
- Return hexadecimal string representation of SKI for this object.
- Only work for subclasses that implement get_SKI().
- """
- ski = self.get_SKI()
- return ":".join(("%02X" % ord(i) for i in ski)) if ski else ""
+ return first_uri_matching_prefix(xia, "https://")
- def gSKI(self):
+def sha1(data):
"""
- Calculate g(SKI) for this object. Only work for subclasses
- that implement get_SKI().
+ Calculate SHA-1 digest of some data.
+ Convenience wrapper around rpki.POW.Digest class.
"""
- return base64.urlsafe_b64encode(self.get_SKI()).rstrip("=")
- def hAKI(self):
- """
- Return hexadecimal string representation of AKI for this
- object. Only work for subclasses that implement get_AKI().
- """
- aki = self.get_AKI()
- return ":".join(("%02X" % ord(i) for i in aki)) if aki else ""
+ d = rpki.POW.Digest(rpki.POW.SHA1_DIGEST)
+ d.update(data)
+ return d.digest()
- def gAKI(self):
+def sha256(data):
"""
- Calculate g(AKI) for this object. Only work for subclasses
- that implement get_AKI().
+ Calculate SHA-256 digest of some data.
+ Convenience wrapper around rpki.POW.Digest class.
"""
- return base64.urlsafe_b64encode(self.get_AKI()).rstrip("=")
- def get_AKI(self):
- """
- Get the AKI extension from this object, if supported.
- """
- return self.get_POW().getAKI()
+ d = rpki.POW.Digest(rpki.POW.SHA256_DIGEST)
+ d.update(data)
+ return d.digest()
- def get_SKI(self):
- """
- Get the SKI extension from this object, if supported.
- """
- return self.get_POW().getSKI()
- def get_EKU(self):
- """
- Get the Extended Key Usage extension from this object, if supported.
+class X501DN(object):
"""
- return self.get_POW().getEKU()
+ Class to hold an X.501 Distinguished Name.
- def get_SIA(self):
- """
- Get the SIA extension from this object. Only works for subclasses
- that support getSIA().
- """
- return self.get_POW().getSIA()
+ This is nothing like a complete implementation, just enough for our
+ purposes. See RFC 5280 4.1.2.4 for the ASN.1 details. In brief:
- def get_sia_directory_uri(self):
- """
- Get SIA directory (id-ad-caRepository) URI from this object.
- Only works for subclasses that support getSIA().
- """
- sia = self.get_POW().getSIA()
- return None if sia is None else first_rsync_uri(sia[0])
+ - A DN is a SEQUENCE OF RDNs.
- def get_sia_manifest_uri(self):
- """
- Get SIA manifest (id-ad-rpkiManifest) URI from this object.
- Only works for subclasses that support getSIA().
- """
- sia = self.get_POW().getSIA()
- return None if sia is None else first_rsync_uri(sia[1])
+ - A RDN is a SET OF AttributeAndValues; in practice, multi-value
+ RDNs are rare, so an RDN is almost always a set with a single
+ element.
- def get_sia_object_uri(self):
- """
- Get SIA object (id-ad-signedObject) URI from this object.
- Only works for subclasses that support getSIA().
- """
- sia = self.get_POW().getSIA()
- return None if sia is None else first_rsync_uri(sia[2])
+ - An AttributeAndValue is a SEQUENCE consisting of a OID and a
+ value, where a whole bunch of things including both syntax and
+ semantics of the value are determined by the OID.
- def get_AIA(self):
- """
- Get the SIA extension from this object. Only works for subclasses
- that support getAIA().
- """
- return self.get_POW().getAIA()
+ - The value is some kind of ASN.1 string; there are far too many
+ encoding options options, most of which are either strongly
+ discouraged or outright forbidden by the PKIX profile, but which
+ persist for historical reasons. The only ones PKIX actually
+ likes are PrintableString and UTF8String, but there are nuances
+ and special cases where some of the others are required.
- def get_aia_uri(self):
- """
- Get AIA (id-ad-caIssuers) URI from this object.
- Only works for subclasses that support getAIA().
- """
- return first_rsync_uri(self.get_POW().getAIA())
+ The RPKI profile further restricts DNs to a single mandatory
+ CommonName attribute with a single optional SerialNumber attribute
+ (not to be confused with the certificate serial number).
- def get_basicConstraints(self):
- """
- Get the basicConstraints extension from this object. Only works
- for subclasses that support getExtension().
+ BPKI certificates should (we hope) follow the general PKIX guideline
+ but the ones we construct ourselves are likely to be relatively
+ simple.
"""
- return self.get_POW().getBasicConstraints()
- def is_CA(self):
- """
- Return True if and only if object has the basicConstraints
- extension and its cA value is true.
- """
- basicConstraints = self.get_basicConstraints()
- return basicConstraints is not None and basicConstraints[0]
+ def __init__(self, dn):
+ assert isinstance(dn, tuple)
+ self.dn = dn
- def get_3779resources(self):
- """
- Get RFC 3779 resources as rpki.resource_set objects.
- """
- resources = rpki.resource_set.resource_bag.from_POW_rfc3779(self.get_POW().getRFC3779())
- try:
- resources.valid_until = self.getNotAfter()
- except AttributeError:
- pass
- return resources
-
- @classmethod
- def from_sql(cls, x):
- """
- Convert from SQL storage format.
- """
- return cls(DER = x)
+ def __str__(self):
+ return "".join("/" + "+".join("%s=%s" % (rpki.oids.oid2name(a[0]), a[1])
+ for a in rdn)
+ for rdn in self.dn)
- def to_sql(self):
- """
- Convert to SQL storage format.
- """
- return self.get_DER()
+ def __cmp__(self, other):
+ return cmp(self.dn, other.dn)
- def dumpasn1(self):
- """
- Pretty print an ASN.1 DER object using cryptlib dumpasn1 tool.
- Use a temporary file rather than popen4() because dumpasn1 uses
- seek() when decoding ASN.1 content nested in OCTET STRING values.
- """
+ def __repr__(self):
+ return rpki.log.log_repr(self, str(self))
- ret = None
- fn = "dumpasn1.%d.tmp" % os.getpid()
- try:
- f = open(fn, "wb")
- f.write(self.get_DER())
- f.close()
- p = subprocess.Popen(("dumpasn1", "-a", fn), stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
- ret = "\n".join(x for x in p.communicate()[0].splitlines() if x.startswith(" "))
- except Exception, e:
- ret = "[Could not run dumpasn1: %s]" % e
- finally:
- os.unlink(fn)
- return ret
-
- def tracking_data(self, uri):
- """
- Return a string containing data we want to log when tracking how
- objects move through the RPKI system. Subclasses may wrap this to
- provide more information, but should make sure to include at least
- this information at the start of the tracking line.
- """
- try:
- d = rpki.POW.Digest(rpki.POW.SHA1_DIGEST)
- d.update(self.get_DER())
- return "%s %s %s" % (uri, self.creation_timestamp,
- "".join(("%02X" % ord(b) for b in d.digest())))
- except: # pylint: disable=W0702
- return uri
-
- def __getstate__(self):
- """
- Pickling protocol -- pickle the DER encoding.
- """
- return self.get_DER()
+ def _debug(self):
+ logger.debug("++ %r %r", self, self.dn)
- def __setstate__(self, state):
- """
- Pickling protocol -- unpickle the DER encoding.
- """
- self.set(DER = state)
+ @classmethod
+ def from_cn(cls, cn, sn = None):
+ assert isinstance(cn, (str, unicode))
+ if isinstance(sn, (int, long)):
+ sn = "%08X" % sn
+ elif isinstance(sn, (str, unicode)):
+ assert all(c in "0123456789abcdefABCDEF" for c in sn)
+ sn = str(sn)
+ if sn is not None:
+ dn = (((rpki.oids.commonName, cn),), ((rpki.oids.serialNumber, sn),))
+ else:
+ dn = (((rpki.oids.commonName, cn),),)
+ return cls(dn)
-class X509(DER_object):
- """
- X.509 certificates.
+ @classmethod
+ def from_POW(cls, dn):
+ return cls(dn)
- This class is designed to hold all the different representations of
- X.509 certs we're using and convert between them. X.509 support in
- Python a nasty maze of half-cooked stuff (except perhaps for
- cryptlib, which is just different). Users of this module should not
- have to care about this implementation nightmare.
- """
+ def get_POW(self):
+ return self.dn
- POW_class = rpki.POW.X509
+ def extract_cn_and_sn(self):
+ cn = None
+ sn = None
- def getIssuer(self):
- """
- Get the issuer of this certificate.
- """
- return X501DN.from_POW(self.get_POW().getIssuer())
+ for rdn in self.dn:
+ if len(rdn) == 1 and len(rdn[0]) == 2:
+ oid = rdn[0][0]
+ val = rdn[0][1]
+ if oid == rpki.oids.commonName and cn is None:
+ cn = val
+ continue
+ if oid == rpki.oids.serialNumber and sn is None:
+ sn = val
+ continue
+ raise rpki.exceptions.BadX510DN("Bad subject name: %s" % (self.dn,))
- def getSubject(self):
- """
- Get the subject of this certificate.
- """
- return X501DN.from_POW(self.get_POW().getSubject())
+ if cn is None:
+ raise rpki.exceptions.BadX510DN("Subject name is missing CN: %s" % (self.dn,))
- def getNotBefore(self):
- """
- Get the inception time of this certificate.
- """
- return self.get_POW().getNotBefore()
+ return cn, sn
- def getNotAfter(self):
- """
- Get the expiration time of this certificate.
- """
- return self.get_POW().getNotAfter()
- def getSerial(self):
+class DER_object(object):
"""
- Get the serial number of this certificate.
+ Virtual class to hold a generic DER object.
"""
- return self.get_POW().getSerial()
- def getPublicKey(self):
- """
- Extract the public key from this certificate.
- """
- return PublicKey(POW = self.get_POW().getPublicKey())
+ ## @var formats
+ # Formats supported in this object. This is kind of redundant now
+ # that we're down to a single ASN.1 package and everything supports
+ # the same DER and POW formats, it's mostly historical baggage from
+ # the days when we had three different ASN.1 encoders, each with its
+ # own low-level Python object format. Clean up, some day.
+ formats = ("DER", "POW")
- def get_SKI(self):
- """
- Get the SKI extension from this object.
- """
- return self.get_POW().getSKI()
+ ## @var POW_class
+ # Class of underlying POW object. Concrete subclasses must supply this.
+ POW_class = None
- def expired(self):
- """
- Test whether this certificate has expired.
- """
- return self.getNotAfter() <= rpki.sundial.now()
+ ## Other attributes that self.clear() should whack.
+ other_clear = ()
- def issue(self, keypair, subject_key, serial, sia, aia, crldp, notAfter,
- cn = None, resources = None, is_ca = True, notBefore = None,
- sn = None, eku = None):
- """
- Issue an RPKI certificate.
- """
+ ## @var DER
+ # DER value of this object
+ DER = None
- assert aia is not None and crldp is not None
-
- assert eku is None or not is_ca
-
- return self._issue(
- keypair = keypair,
- subject_key = subject_key,
- serial = serial,
- sia = sia,
- aia = aia,
- crldp = crldp,
- notBefore = notBefore,
- notAfter = notAfter,
- cn = cn,
- sn = sn,
- resources = resources,
- is_ca = is_ca,
- aki = self.get_SKI(),
- issuer_name = self.getSubject(),
- eku = eku)
-
-
- @classmethod
- def self_certify(cls, keypair, subject_key, serial, sia, notAfter,
- cn = None, resources = None, notBefore = None,
- sn = None):
- """
- Generate a self-certified RPKI certificate.
- """
+ ## @var failure_threshold
+ # Rate-limiting interval between whines about Auto_update objects.
+ failure_threshold = rpki.sundial.timedelta(minutes = 5)
- ski = subject_key.get_SKI()
-
- if cn is None:
- cn = "".join(("%02X" % ord(i) for i in ski))
-
- return cls._issue(
- keypair = keypair,
- subject_key = subject_key,
- serial = serial,
- sia = sia,
- aia = None,
- crldp = None,
- notBefore = notBefore,
- notAfter = notAfter,
- cn = cn,
- sn = sn,
- resources = resources,
- is_ca = True,
- aki = ski,
- issuer_name = X501DN.from_cn(cn, sn),
- eku = None)
-
-
- @classmethod
- def _issue(cls, keypair, subject_key, serial, sia, aia, crldp, notAfter,
- cn, sn, resources, is_ca, aki, issuer_name, notBefore, eku):
- """
- Common code to issue an RPKI certificate.
- """
+ def empty(self):
+ """
+ Test whether this object is empty.
+ """
- now = rpki.sundial.now()
- ski = subject_key.get_SKI()
+ return all(getattr(self, a, None) is None for a in self.formats)
- if notBefore is None:
- notBefore = now
+ def clear(self):
+ """
+ Make this object empty.
+ """
- if cn is None:
- cn = "".join(("%02X" % ord(i) for i in ski))
+ for a in self.formats + self.other_clear:
+ setattr(self, a, None)
+ self.filename = None
+ self.timestamp = None
+ self.lastfail = None
- if now >= notAfter:
- raise rpki.exceptions.PastNotAfter("notAfter value %s is already in the past" % notAfter)
+ def __init__(self, **kw):
+ """
+ Initialize a DER_object.
+ """
- if notBefore >= notAfter:
- raise rpki.exceptions.NullValidityInterval("notAfter value %s predates notBefore value %s" %
- (notAfter, notBefore))
+ self.clear()
+ if len(kw):
+ self.set(**kw)
+
+ def set(self, **kw):
+ """
+ Set this object by setting one of its known formats.
+
+ This method only allows one to set one format at a time.
+ Subsequent calls will clear the object first. The point of all
+ this is to let the object's internal converters handle mustering
+ the object into whatever format you need at the moment.
+ """
+
+ if len(kw) == 1:
+ name = kw.keys()[0]
+ if name in self.formats:
+ self.clear()
+ setattr(self, name, kw[name])
+ return
+ if name == "PEM":
+ self.clear()
+ self._set_PEM(kw[name])
+ return
+ if name == "Base64":
+ self.clear()
+ self.DER = base64.b64decode(kw[name])
+ return
+ if name == "Auto_update":
+ self.filename = kw[name]
+ self.check_auto_update()
+ return
+ if name in ("PEM_file", "DER_file", "Auto_file"):
+ f = open(kw[name], "rb")
+ value = f.read()
+ f.close()
+ self.clear()
+ if name == "PEM_file" or (name == "Auto_file" and looks_like_PEM(value)):
+ self._set_PEM(value)
+ else:
+ self.DER = value
+ return
+ raise rpki.exceptions.DERObjectConversionError("Can't honor conversion request %r" % (kw,))
+
+ def check_auto_update(self):
+ """
+ Check for updates to a DER object that auto-updates from a file.
+ """
+
+ # pylint: disable=W0201
+
+ if self.filename is None:
+ return
+ try:
+ filename = self.filename
+ timestamp = os.stat(self.filename).st_mtime
+ if self.timestamp is None or self.timestamp < timestamp:
+ logger.debug("Updating %s, timestamp %s",
+ filename, rpki.sundial.datetime.fromtimestamp(timestamp))
+ f = open(filename, "rb")
+ value = f.read()
+ f.close()
+ self.clear()
+ if looks_like_PEM(value):
+ self._set_PEM(value)
+ else:
+ self.DER = value
+ self.filename = filename
+ self.timestamp = timestamp
+ except (IOError, OSError), e:
+ now = rpki.sundial.now()
+ if self.lastfail is None or now > self.lastfail + self.failure_threshold:
+ logger.warning("Could not auto_update %r (last failure %s): %s", self, self.lastfail, e)
+ self.lastfail = now
+ else:
+ self.lastfail = None
- cert = rpki.POW.X509()
+ @property
+ def mtime(self):
+ """
+ Retrieve os.stat().st_mtime for auto-update files.
+ """
- cert.setVersion(2)
- cert.setSerial(serial)
- cert.setIssuer(issuer_name.get_POW())
- cert.setSubject(X501DN.from_cn(cn, sn).get_POW())
- cert.setNotBefore(notBefore)
- cert.setNotAfter(notAfter)
- cert.setPublicKey(subject_key.get_POW())
- cert.setSKI(ski)
- cert.setAKI(aki)
- cert.setCertificatePolicies((rpki.oids.id_cp_ipAddr_asNumber,))
+ return os.stat(self.filename).st_mtime
- if crldp is not None:
- cert.setCRLDP((crldp,))
+ def check(self):
+ """
+ Perform basic checks on a DER object.
+ """
- if aia is not None:
- cert.setAIA((aia,))
+ self.check_auto_update()
+ assert not self.empty()
+
+ def _set_PEM(self, pem):
+ """
+ Set the POW value of this object based on a PEM input value.
+ Subclasses may need to override this.
+ """
+
+ # pylint: disable=W0201
+ assert self.empty()
+ self.POW = self.POW_class.pemRead(pem)
+
+ def get_DER(self):
+ """
+ Get the DER value of this object.
+ Subclasses may need to override this method.
+ """
+
+ self.check()
+ if self.DER:
+ return self.DER
+ if self.POW:
+ self.DER = self.POW.derWrite()
+ return self.get_DER()
+ raise rpki.exceptions.DERObjectConversionError("No conversion path to DER available")
+
+ def get_POW(self):
+ """
+ Get the rpki.POW value of this object.
+ Subclasses may need to override this method.
+ """
+
+ # pylint: disable=E0203,W0201
+ self.check()
+ if not self.POW:
+ self.POW = self.POW_class.derRead(self.get_DER())
+ return self.POW
+
+ def get_Base64(self):
+ """
+ Get the Base64 encoding of the DER value of this object.
+ """
+
+ return base64_with_linebreaks(self.get_DER())
+
+ def get_PEM(self):
+ """
+ Get the PEM representation of this object.
+ """
+
+ return self.get_POW().pemWrite()
+
+ def __cmp__(self, other):
+ """
+ Compare two DER-encoded objects.
+ """
+
+ if self is None and other is None:
+ return 0
+ elif self is None:
+ return -1
+ elif other is None:
+ return 1
+ elif isinstance(other, str):
+ return cmp(self.get_DER(), other)
+ else:
+ return cmp(self.get_DER(), other.get_DER())
- if is_ca:
- cert.setBasicConstraints(True, None)
- cert.setKeyUsage(frozenset(("keyCertSign", "cRLSign")))
+ def hSKI(self):
+ """
+ Return hexadecimal string representation of SKI for this object.
+ Only work for subclasses that implement get_SKI().
+ """
+
+ ski = self.get_SKI()
+ return ":".join(("%02X" % ord(i) for i in ski)) if ski else ""
- else:
- cert.setKeyUsage(frozenset(("digitalSignature",)))
+ def gSKI(self):
+ """
+ Calculate g(SKI) for this object. Only work for subclasses
+ that implement get_SKI().
+ """
+
+ return base64.urlsafe_b64encode(self.get_SKI()).rstrip("=")
- assert sia is not None or not is_ca
+ def hAKI(self):
+ """
+ Return hexadecimal string representation of AKI for this
+ object. Only work for subclasses that implement get_AKI().
+ """
+
+ aki = self.get_AKI()
+ return ":".join(("%02X" % ord(i) for i in aki)) if aki else ""
+
+ def gAKI(self):
+ """
+ Calculate g(AKI) for this object. Only work for subclasses
+ that implement get_AKI().
+ """
+
+ return base64.urlsafe_b64encode(self.get_AKI()).rstrip("=")
+
+ def get_AKI(self):
+ """
+ Get the AKI extension from this object, if supported.
+ """
+
+ return self.get_POW().getAKI()
+
+ def get_SKI(self):
+ """
+ Get the SKI extension from this object, if supported.
+ """
+
+ return self.get_POW().getSKI()
+
+ def get_EKU(self):
+ """
+ Get the Extended Key Usage extension from this object, if supported.
+ """
+
+ return self.get_POW().getEKU()
+
+ def get_SIA(self):
+ """
+ Get the SIA extension from this object. Only works for subclasses
+ that support getSIA().
+ """
+
+ return self.get_POW().getSIA()
+
+ def get_sia_directory_uri(self):
+ """
+ Get SIA directory (id-ad-caRepository) URI from this object.
+ Only works for subclasses that support getSIA().
+ """
+
+ sia = self.get_POW().getSIA()
+ return None if sia is None else first_rsync_uri(sia[0])
+
+ def get_sia_manifest_uri(self):
+ """
+ Get SIA manifest (id-ad-rpkiManifest) URI from this object.
+ Only works for subclasses that support getSIA().
+ """
+
+ sia = self.get_POW().getSIA()
+ return None if sia is None else first_rsync_uri(sia[1])
+
+ def get_sia_object_uri(self):
+ """
+ Get SIA object (id-ad-signedObject) URI from this object.
+ Only works for subclasses that support getSIA().
+ """
+
+ sia = self.get_POW().getSIA()
+ return None if sia is None else first_rsync_uri(sia[2])
+
+ def get_sia_rrdp_notify(self):
+ """
+ Get SIA RRDP (id-ad-rpkiNotify) URI from this object.
+ We prefer HTTPS over HTTP if both are present.
+ Only works for subclasses that support getSIA().
+ """
+
+ sia = self.get_POW().getSIA()
+ return None if sia is None else first_https_uri(sia[3]) or first_http_uri(sia[3])
+
+ def get_AIA(self):
+ """
+ Get the SIA extension from this object. Only works for subclasses
+ that support getAIA().
+ """
+
+ return self.get_POW().getAIA()
+
+ def get_aia_uri(self):
+ """
+ Get AIA (id-ad-caIssuers) URI from this object.
+ Only works for subclasses that support getAIA().
+ """
+
+ return first_rsync_uri(self.get_POW().getAIA())
+
+ def get_basicConstraints(self):
+ """
+ Get the basicConstraints extension from this object. Only works
+ for subclasses that support getExtension().
+ """
+
+ return self.get_POW().getBasicConstraints()
+
+ def is_CA(self):
+ """
+ Return True if and only if object has the basicConstraints
+ extension and its cA value is true.
+ """
+
+ basicConstraints = self.get_basicConstraints()
+ return basicConstraints is not None and basicConstraints[0]
+
+ def get_3779resources(self):
+ """
+ Get RFC 3779 resources as rpki.resource_set objects.
+ """
+
+ resources = rpki.resource_set.resource_bag.from_POW_rfc3779(self.get_POW().getRFC3779())
+ try:
+ resources.valid_until = self.getNotAfter() # pylint: disable=E1101
+ except AttributeError:
+ pass
+ return resources
+
+ @classmethod
+ def from_sql(cls, x):
+ """
+ Convert from SQL storage format.
+ """
+
+ return cls(DER = x)
+
+ def to_sql(self):
+ """
+ Convert to SQL storage format.
+ """
+
+ return self.get_DER()
+
+ def dumpasn1(self):
+ """
+ Pretty print an ASN.1 DER object using cryptlib dumpasn1 tool.
+ Use a temporary file rather than popen4() because dumpasn1 uses
+ seek() when decoding ASN.1 content nested in OCTET STRING values.
+ """
+
+ ret = None
+ fn = "dumpasn1.%d.tmp" % os.getpid()
+ try:
+ f = open(fn, "wb")
+ f.write(self.get_DER())
+ f.close()
+ p = subprocess.Popen(("dumpasn1", "-a", fn), stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
+ ret = "\n".join(x for x in p.communicate()[0].splitlines() if x.startswith(" "))
+ except Exception, e:
+ ret = "[Could not run dumpasn1: %s]" % e
+ finally:
+ os.unlink(fn)
+ return ret
+
+ def tracking_data(self, uri):
+ """
+ Return a string containing data we want to log when tracking how
+ objects move through the RPKI system. Subclasses may wrap this to
+ provide more information, but should make sure to include at least
+ this information at the start of the tracking line.
+ """
+
+ # pylint: disable=E1101
+
+ try:
+ return "%s %s %s" % (uri, self.creation_timestamp, "".join(("%02X" % ord(b) for b in sha1(self.get_DER()))))
+ except:
+ return uri
+
+ def __getstate__(self):
+ """
+ Pickling protocol -- pickle the DER encoding.
+ """
+
+ return self.get_DER()
+
+ def __setstate__(self, state):
+ """
+ Pickling protocol -- unpickle the DER encoding.
+ """
+
+ self.set(DER = state)
- if sia is not None:
- caRepository, rpkiManifest, signedObject = sia
- cert.setSIA(
- (caRepository,) if isinstance(caRepository, str) else caRepository,
- (rpkiManifest,) if isinstance(rpkiManifest, str) else rpkiManifest,
- (signedObject,) if isinstance(signedObject, str) else signedObject)
+class X509(DER_object):
+ """
+ X.509 certificates.
- if resources is not None:
- cert.setRFC3779(
- asn = ("inherit" if resources.asn.inherit else
- ((r.min, r.max) for r in resources.asn)),
- ipv4 = ("inherit" if resources.v4.inherit else
- ((r.min, r.max) for r in resources.v4)),
- ipv6 = ("inherit" if resources.v6.inherit else
- ((r.min, r.max) for r in resources.v6)))
+ This class is designed to hold all the different representations of
+ X.509 certs we're using and convert between them. X.509 support in
+ Python a nasty maze of half-cooked stuff (except perhaps for
+ cryptlib, which is just different). Users of this module should not
+ have to care about this implementation nightmare.
+ """
- if eku is not None:
- assert not is_ca
- cert.setEKU(eku)
+ POW_class = rpki.POW.X509
- cert.sign(keypair.get_POW(), rpki.POW.SHA256_DIGEST)
+ def getIssuer(self):
+ """
+ Get the issuer of this certificate.
+ """
- return cls(POW = cert)
+ return X501DN.from_POW(self.get_POW().getIssuer())
- def bpki_cross_certify(self, keypair, source_cert, serial, notAfter,
- now = None, pathLenConstraint = 0):
- """
- Issue a BPKI certificate with values taking from an existing certificate.
- """
- return self.bpki_certify(
- keypair = keypair,
- subject_name = source_cert.getSubject(),
- subject_key = source_cert.getPublicKey(),
- serial = serial,
- notAfter = notAfter,
- now = now,
- pathLenConstraint = pathLenConstraint,
- is_ca = True)
-
- @classmethod
- def bpki_self_certify(cls, keypair, subject_name, serial, notAfter,
- now = None, pathLenConstraint = None):
- """
- Issue a self-signed BPKI CA certificate.
- """
- return cls._bpki_certify(
- keypair = keypair,
- issuer_name = subject_name,
- subject_name = subject_name,
- subject_key = keypair.get_public(),
- serial = serial,
- now = now,
- notAfter = notAfter,
- pathLenConstraint = pathLenConstraint,
- is_ca = True)
-
- def bpki_certify(self, keypair, subject_name, subject_key, serial, notAfter, is_ca,
- now = None, pathLenConstraint = None):
- """
- Issue a normal BPKI certificate.
- """
- assert keypair.get_public() == self.getPublicKey()
- return self._bpki_certify(
- keypair = keypair,
- issuer_name = self.getSubject(),
- subject_name = subject_name,
- subject_key = subject_key,
- serial = serial,
- now = now,
- notAfter = notAfter,
- pathLenConstraint = pathLenConstraint,
- is_ca = is_ca)
-
- @classmethod
- def _bpki_certify(cls, keypair, issuer_name, subject_name, subject_key,
- serial, now, notAfter, pathLenConstraint, is_ca):
- """
- Issue a BPKI certificate. This internal method does the real
- work, after one of the wrapper methods has extracted the relevant
- fields.
- """
+ def getSubject(self):
+ """
+ Get the subject of this certificate.
+ """
+
+ return X501DN.from_POW(self.get_POW().getSubject())
+
+ def getNotBefore(self):
+ """
+ Get the inception time of this certificate.
+ """
+
+ return self.get_POW().getNotBefore()
+
+ def getNotAfter(self):
+ """
+ Get the expiration time of this certificate.
+ """
+
+ return self.get_POW().getNotAfter()
+
+ def getSerial(self):
+ """
+ Get the serial number of this certificate.
+ """
+
+ return self.get_POW().getSerial()
+
+ def getPublicKey(self):
+ """
+ Extract the public key from this certificate.
+ """
+
+ return PublicKey(POW = self.get_POW().getPublicKey())
+
+ def get_SKI(self):
+ """
+ Get the SKI extension from this object.
+ """
- if now is None:
- now = rpki.sundial.now()
-
- issuer_key = keypair.get_public()
-
- assert (issuer_key == subject_key) == (issuer_name == subject_name)
- assert is_ca or issuer_name != subject_name
- assert is_ca or pathLenConstraint is None
- assert pathLenConstraint is None or (isinstance(pathLenConstraint, (int, long)) and
- pathLenConstraint >= 0)
-
- cert = rpki.POW.X509()
- cert.setVersion(2)
- cert.setSerial(serial)
- cert.setIssuer(issuer_name.get_POW())
- cert.setSubject(subject_name.get_POW())
- cert.setNotBefore(now)
- cert.setNotAfter(notAfter)
- cert.setPublicKey(subject_key.get_POW())
- cert.setSKI(subject_key.get_POW().calculateSKI())
- if issuer_key != subject_key:
- cert.setAKI(issuer_key.get_POW().calculateSKI())
- if is_ca:
- cert.setBasicConstraints(True, pathLenConstraint)
- cert.sign(keypair.get_POW(), rpki.POW.SHA256_DIGEST)
- return cls(POW = cert)
-
- @classmethod
- def normalize_chain(cls, chain):
- """
- Normalize a chain of certificates into a tuple of X509 objects.
- Given all the glue certificates needed for BPKI cross
- certification, it's easiest to allow sloppy arguments to the CMS
- validation methods and provide a single method that normalizes the
- allowed cases. So this method allows X509, None, lists, and
- tuples, and returns a tuple of X509 objects.
- """
- if isinstance(chain, cls):
- chain = (chain,)
- return tuple(x for x in chain if x is not None)
+ return self.get_POW().getSKI()
+
+ def expired(self):
+ """
+ Test whether this certificate has expired.
+ """
+
+ return self.getNotAfter() <= rpki.sundial.now()
+
+ def issue(self, keypair, subject_key, serial, sia, aia, crldp, notAfter,
+ cn = None, resources = None, is_ca = True, notBefore = None,
+ sn = None, eku = None):
+ """
+ Issue an RPKI certificate.
+ """
+
+ assert aia is not None and crldp is not None
+
+ assert eku is None or not is_ca
+
+ return self._issue(
+ keypair = keypair,
+ subject_key = subject_key,
+ serial = serial,
+ sia = sia,
+ aia = aia,
+ crldp = crldp,
+ notBefore = notBefore,
+ notAfter = notAfter,
+ cn = cn,
+ sn = sn,
+ resources = resources,
+ is_ca = is_ca,
+ aki = self.get_SKI(),
+ issuer_name = self.getSubject(),
+ eku = eku)
+
+
+ @classmethod
+ def self_certify(cls, keypair, subject_key, serial, sia, notAfter,
+ cn = None, resources = None, notBefore = None,
+ sn = None):
+ """
+ Generate a self-certified RPKI certificate.
+ """
+
+ ski = subject_key.get_SKI()
+
+ if cn is None:
+ cn = "".join(("%02X" % ord(i) for i in ski))
+
+ return cls._issue(
+ keypair = keypair,
+ subject_key = subject_key,
+ serial = serial,
+ sia = sia,
+ aia = None,
+ crldp = None,
+ notBefore = notBefore,
+ notAfter = notAfter,
+ cn = cn,
+ sn = sn,
+ resources = resources,
+ is_ca = True,
+ aki = ski,
+ issuer_name = X501DN.from_cn(cn, sn),
+ eku = None)
+
+
+ @classmethod
+ def _issue(cls, keypair, subject_key, serial, sia, aia, crldp, notAfter,
+ cn, sn, resources, is_ca, aki, issuer_name, notBefore, eku):
+ """
+ Common code to issue an RPKI certificate.
+ """
+
+ if sia is not None:
+ assert len(sia) == 4 and sia[3]
+ sia = tuple((str(s),) if isinstance(s, (str, unicode)) else s for s in sia)
+
+ now = rpki.sundial.now()
+ ski = subject_key.get_SKI()
+
+ if notBefore is None:
+ notBefore = now
+
+ if cn is None:
+ cn = "".join(("%02X" % ord(i) for i in ski))
+
+ if now >= notAfter:
+ raise rpki.exceptions.PastNotAfter("notAfter value %s is already in the past" % notAfter)
+
+ if notBefore >= notAfter:
+ raise rpki.exceptions.NullValidityInterval("notAfter value %s predates notBefore value %s" %
+ (notAfter, notBefore))
+
+ cert = rpki.POW.X509()
+
+ cert.setVersion(2)
+ cert.setSerial(serial)
+ cert.setIssuer(issuer_name.get_POW())
+ cert.setSubject(X501DN.from_cn(cn, sn).get_POW())
+ cert.setNotBefore(notBefore)
+ cert.setNotAfter(notAfter)
+ cert.setPublicKey(subject_key.get_POW())
+ cert.setSKI(ski)
+ cert.setAKI(aki)
+ cert.setCertificatePolicies((rpki.oids.id_cp_ipAddr_asNumber,))
+
+ if crldp is not None:
+ cert.setCRLDP((crldp,))
+
+ if aia is not None:
+ cert.setAIA((aia,))
+
+ if is_ca:
+ cert.setBasicConstraints(True, None)
+ cert.setKeyUsage(frozenset(("keyCertSign", "cRLSign")))
+
+ else:
+ cert.setKeyUsage(frozenset(("digitalSignature",)))
+
+ assert sia is not None or not is_ca
+
+ if sia is not None:
+ caRepository, rpkiManifest, signedObject, rpkiNotify = sia
+ cert.setSIA(
+ (caRepository,) if isinstance(caRepository, str) else caRepository,
+ (rpkiManifest,) if isinstance(rpkiManifest, str) else rpkiManifest,
+ (signedObject,) if isinstance(signedObject, str) else signedObject,
+ (rpkiNotify,) if isinstance(rpkiNotify, str) else rpkiNotify)
+
+ if resources is not None:
+ cert.setRFC3779(
+ asn = ("inherit" if resources.asn.inherit else
+ ((r.min, r.max) for r in resources.asn)),
+ ipv4 = ("inherit" if resources.v4.inherit else
+ ((r.min, r.max) for r in resources.v4)),
+ ipv6 = ("inherit" if resources.v6.inherit else
+ ((r.min, r.max) for r in resources.v6)))
+
+ if eku is not None:
+ assert not is_ca
+ cert.setEKU(eku)
+
+ cert.sign(keypair.get_POW(), rpki.POW.SHA256_DIGEST)
+
+ return cls(POW = cert)
+
+ def bpki_cross_certify(self, keypair, source_cert, serial, notAfter,
+ now = None, pathLenConstraint = 0):
+ """
+ Issue a BPKI certificate with values taking from an existing certificate.
+ """
+
+ return self.bpki_certify(
+ keypair = keypair,
+ subject_name = source_cert.getSubject(),
+ subject_key = source_cert.getPublicKey(),
+ serial = serial,
+ notAfter = notAfter,
+ now = now,
+ pathLenConstraint = pathLenConstraint,
+ is_ca = True)
+
+ @classmethod
+ def bpki_self_certify(cls, keypair, subject_name, serial, notAfter,
+ now = None, pathLenConstraint = None):
+ """
+ Issue a self-signed BPKI CA certificate.
+ """
+
+ return cls._bpki_certify(
+ keypair = keypair,
+ issuer_name = subject_name,
+ subject_name = subject_name,
+ subject_key = keypair.get_public(),
+ serial = serial,
+ now = now,
+ notAfter = notAfter,
+ pathLenConstraint = pathLenConstraint,
+ is_ca = True)
+
+ def bpki_certify(self, keypair, subject_name, subject_key, serial, notAfter, is_ca,
+ now = None, pathLenConstraint = None):
+ """
+ Issue a normal BPKI certificate.
+ """
+
+ assert keypair.get_public() == self.getPublicKey()
+ return self._bpki_certify(
+ keypair = keypair,
+ issuer_name = self.getSubject(),
+ subject_name = subject_name,
+ subject_key = subject_key,
+ serial = serial,
+ now = now,
+ notAfter = notAfter,
+ pathLenConstraint = pathLenConstraint,
+ is_ca = is_ca)
+
+ @classmethod
+ def _bpki_certify(cls, keypair, issuer_name, subject_name, subject_key,
+ serial, now, notAfter, pathLenConstraint, is_ca):
+ """
+ Issue a BPKI certificate. This internal method does the real
+ work, after one of the wrapper methods has extracted the relevant
+ fields.
+ """
+
+ if now is None:
+ now = rpki.sundial.now()
+
+ issuer_key = keypair.get_public()
+
+ assert (issuer_key == subject_key) == (issuer_name == subject_name)
+ assert is_ca or issuer_name != subject_name
+ assert is_ca or pathLenConstraint is None
+ assert pathLenConstraint is None or (isinstance(pathLenConstraint, (int, long)) and
+ pathLenConstraint >= 0)
+
+ cert = rpki.POW.X509()
+ cert.setVersion(2)
+ cert.setSerial(serial)
+ cert.setIssuer(issuer_name.get_POW())
+ cert.setSubject(subject_name.get_POW())
+ cert.setNotBefore(now)
+ cert.setNotAfter(notAfter)
+ cert.setPublicKey(subject_key.get_POW())
+ cert.setSKI(subject_key.get_POW().calculateSKI())
+ if issuer_key != subject_key:
+ cert.setAKI(issuer_key.get_POW().calculateSKI())
+ if is_ca:
+ cert.setBasicConstraints(True, pathLenConstraint)
+ cert.sign(keypair.get_POW(), rpki.POW.SHA256_DIGEST)
+ return cls(POW = cert)
+
+ @classmethod
+ def normalize_chain(cls, chain):
+ """
+ Normalize a chain of certificates into a tuple of X509 objects.
+ Given all the glue certificates needed for BPKI cross
+ certification, it's easiest to allow sloppy arguments to the CMS
+ validation methods and provide a single method that normalizes the
+ allowed cases. So this method allows X509, None, lists, and
+ tuples, and returns a tuple of X509 objects.
+ """
+
+ if isinstance(chain, cls):
+ chain = (chain,)
+ return tuple(x for x in chain if x is not None)
+
+ @property
+ def creation_timestamp(self):
+ """
+ Time at which this object was created.
+ """
+
+ return self.getNotBefore()
- @property
- def creation_timestamp(self):
+class PKCS10(DER_object):
"""
- Time at which this object was created.
+ Class to hold a PKCS #10 request.
"""
- return self.getNotBefore()
-class PKCS10(DER_object):
- """
- Class to hold a PKCS #10 request.
- """
+ POW_class = rpki.POW.PKCS10
- POW_class = rpki.POW.PKCS10
+ ## @var expected_ca_keyUsage
+ # KeyUsage extension flags expected for CA requests.
- ## @var expected_ca_keyUsage
- # KeyUsage extension flags expected for CA requests.
+ expected_ca_keyUsage = frozenset(("keyCertSign", "cRLSign"))
- expected_ca_keyUsage = frozenset(("keyCertSign", "cRLSign"))
+ ## @var allowed_extensions
+ # Extensions allowed by RPKI profile.
- ## @var allowed_extensions
- # Extensions allowed by RPKI profile.
+ allowed_extensions = frozenset((rpki.oids.basicConstraints,
+ rpki.oids.keyUsage,
+ rpki.oids.subjectInfoAccess,
+ rpki.oids.extendedKeyUsage))
- allowed_extensions = frozenset((rpki.oids.basicConstraints,
- rpki.oids.keyUsage,
- rpki.oids.subjectInfoAccess,
- rpki.oids.extendedKeyUsage))
+ def get_DER(self):
+ """
+ Get the DER value of this certification request.
+ """
- def get_DER(self):
- """
- Get the DER value of this certification request.
- """
- self.check()
- if self.DER:
- return self.DER
- if self.POW:
- self.DER = self.POW.derWrite()
- return self.get_DER()
- raise rpki.exceptions.DERObjectConversionError("No conversion path to DER available")
-
- def get_POW(self):
- """
- Get the rpki.POW value of this certification request.
- """
- self.check()
- if not self.POW: # pylint: disable=E0203
- self.POW = rpki.POW.PKCS10.derRead(self.get_DER())
- return self.POW
+ self.check()
+ if self.DER:
+ return self.DER
+ if self.POW:
+ self.DER = self.POW.derWrite()
+ return self.get_DER()
+ raise rpki.exceptions.DERObjectConversionError("No conversion path to DER available")
- def getSubject(self):
- """
- Extract the subject name from this certification request.
- """
- return X501DN.from_POW(self.get_POW().getSubject())
+ def get_POW(self):
+ """
+ Get the rpki.POW value of this certification request.
+ """
- def getPublicKey(self):
- """
- Extract the public key from this certification request.
- """
- return PublicKey(POW = self.get_POW().getPublicKey())
+ # pylint: disable=E0203,W0201
+ self.check()
+ if not self.POW:
+ self.POW = rpki.POW.PKCS10.derRead(self.get_DER())
+ return self.POW
- def get_SKI(self):
- """
- Compute SKI for public key from this certification request.
- """
- return self.getPublicKey().get_SKI()
+ def getSubject(self):
+ """
+ Extract the subject name from this certification request.
+ """
+ return X501DN.from_POW(self.get_POW().getSubject())
- def check_valid_request_common(self):
- """
- Common code for checking this certification requests to see
- whether they conform to the RPKI certificate profile.
+ def getPublicKey(self):
+ """
+ Extract the public key from this certification request.
+ """
- Throws an exception if the request isn't valid, so if this method
- returns at all, the request is ok.
+ return PublicKey(POW = self.get_POW().getPublicKey())
- You probably don't want to call this directly, as it only performs
- the checks that are common to all RPKI certificates.
- """
+ def get_SKI(self):
+ """
+ Compute SKI for public key from this certification request.
+ """
- if not self.get_POW().verify():
- raise rpki.exceptions.BadPKCS10("PKCS #10 signature check failed")
+ return self.getPublicKey().get_SKI()
- ver = self.get_POW().getVersion()
- if ver != 0:
- raise rpki.exceptions.BadPKCS10("PKCS #10 request has bad version number %s" % ver)
+ def check_valid_request_common(self):
+ """
+ Common code for checking this certification requests to see
+ whether they conform to the RPKI certificate profile.
- ku = self.get_POW().getKeyUsage()
+ Throws an exception if the request isn't valid, so if this method
+ returns at all, the request is ok.
- if ku is not None and self.expected_ca_keyUsage != ku:
- raise rpki.exceptions.BadPKCS10("PKCS #10 keyUsage doesn't match profile: %r" % ku)
+ You probably don't want to call this directly, as it only performs
+ the checks that are common to all RPKI certificates.
+ """
- forbidden_extensions = self.get_POW().getExtensionOIDs() - self.allowed_extensions
+ if not self.get_POW().verify():
+ raise rpki.exceptions.BadPKCS10("PKCS #10 signature check failed")
- if forbidden_extensions:
- raise rpki.exceptions.BadExtension("Forbidden extension%s in PKCS #10 certificate request: %s" % (
- "" if len(forbidden_extensions) == 1 else "s",
- ", ".join(forbidden_extensions)))
+ ver = self.get_POW().getVersion()
+ if ver != 0:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 request has bad version number %s" % ver)
- def check_valid_request_ca(self):
- """
- Check this certification request to see whether it's a valid
- request for an RPKI CA certificate.
+ ku = self.get_POW().getKeyUsage()
- Throws an exception if the request isn't valid, so if this method
- returns at all, the request is ok.
- """
+ if ku is not None and self.expected_ca_keyUsage != ku:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 keyUsage doesn't match profile: %r" % ku)
- self.check_valid_request_common()
+ forbidden_extensions = self.get_POW().getExtensionOIDs() - self.allowed_extensions
- alg = self.get_POW().getSignatureAlgorithm()
- bc = self.get_POW().getBasicConstraints()
- eku = self.get_POW().getEKU()
- sias = self.get_POW().getSIA()
+ if forbidden_extensions:
+ raise rpki.exceptions.BadExtension("Forbidden extension%s in PKCS #10 certificate request: %s" % (
+ "" if len(forbidden_extensions) == 1 else "s",
+ ", ".join(forbidden_extensions)))
- if alg != rpki.oids.sha256WithRSAEncryption:
- raise rpki.exceptions.BadPKCS10("PKCS #10 has bad signature algorithm for CA: %s" % alg)
- if bc is None or not bc[0] or bc[1] is not None:
- raise rpki.exceptions.BadPKCS10("PKCS #10 CA bad basicConstraints")
+ def check_valid_request_ca(self):
+ """
+ Check this certification request to see whether it's a valid
+ request for an RPKI CA certificate.
- if eku is not None:
- raise rpki.exceptions.BadPKCS10("PKCS #10 CA EKU not allowed")
+ Throws an exception if the request isn't valid, so if this method
+ returns at all, the request is ok.
+ """
- if sias is None:
- raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA missing")
+ self.check_valid_request_common()
- caRepository, rpkiManifest, signedObject = sias
+ alg = self.get_POW().getSignatureAlgorithm()
+ bc = self.get_POW().getBasicConstraints()
+ eku = self.get_POW().getEKU()
+ sia = self.get_POW().getSIA()
- if signedObject:
- raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA must not have id-ad-signedObject")
+ if alg != rpki.oids.sha256WithRSAEncryption:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 has bad signature algorithm for CA: %s" % alg)
- if not caRepository:
- raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA must have id-ad-caRepository")
+ if bc is None or not bc[0] or bc[1] is not None:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 CA bad basicConstraints")
- if not any(uri.startswith("rsync://") for uri in caRepository):
- raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA id-ad-caRepository contains no rsync URIs")
+ if eku is not None:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 CA EKU not allowed")
- if any(uri.startswith("rsync://") and not uri.endswith("/") for uri in caRepository):
- raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA id-ad-caRepository does not end with slash")
+ if sia is None:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA missing")
- if not rpkiManifest:
- raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA must have id-ad-rpkiManifest")
+ caRepository, rpkiManifest, signedObject, rpkiNotify = sia
- if not any(uri.startswith("rsync://") for uri in rpkiManifest):
- raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA id-ad-rpkiManifest contains no rsync URIs")
+ logger.debug("check_valid_request_ca(): sia: %r", sia)
- if any(uri.startswith("rsync://") and uri.endswith("/") for uri in rpkiManifest):
- raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA id-ad-rpkiManifest ends with slash")
+ if signedObject:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA must not have id-ad-signedObject")
+ if not caRepository:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA must have id-ad-caRepository")
- def check_valid_request_ee(self):
- """
- Check this certification request to see whether it's a valid
- request for an RPKI EE certificate.
-
- Throws an exception if the request isn't valid, so if this method
- returns at all, the request is ok.
-
- We're a bit less strict here than we are for either CA
- certificates or BGPSEC router certificates, because the profile is
- less tightly nailed down for unspecified-use RPKI EE certificates.
- Future specific purposes may impose tighter constraints.
-
- Note that this method does NOT apply to so-called "infrastructure"
- EE certificates (eg, the EE certificates embedded in manifests and
- ROAs); those are constrained fairly tightly, but they're also
- generated internally so we don't need to check them as user or
- protocol input.
- """
+ if not any(uri.startswith("rsync://") for uri in caRepository):
+ raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA id-ad-caRepository contains no rsync URIs")
- self.check_valid_request_common()
+ if any(uri.startswith("rsync://") and not uri.endswith("/") for uri in caRepository):
+ raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA id-ad-caRepository does not end with slash")
- alg = self.get_POW().getSignatureAlgorithm()
- bc = self.get_POW().getBasicConstraints()
- sia = self.get_POW().getSIA()
+ if not rpkiManifest:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA must have id-ad-rpkiManifest")
- caRepository, rpkiManifest, signedObject = sia or (None, None, None)
+ if not any(uri.startswith("rsync://") for uri in rpkiManifest):
+ raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA id-ad-rpkiManifest contains no rsync URIs")
- if alg not in (rpki.oids.sha256WithRSAEncryption, rpki.oids.ecdsa_with_SHA256):
- raise rpki.exceptions.BadPKCS10("PKCS #10 has bad signature algorithm for EE: %s" % alg)
+ if any(uri.startswith("rsync://") and uri.endswith("/") for uri in rpkiManifest):
+ raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA id-ad-rpkiManifest ends with slash")
- if bc is not None and (bc[0] or bc[1] is not None):
- raise rpki.exceptions.BadPKCS10("PKCS #10 EE has bad basicConstraints")
+ if any(not uri.startswith("http://") and not uri.startswith("https://") for uri in rpkiNotify):
+ raise rpki.exceptions.BadPKCS10("PKCS #10 CA SIA id-ad-rpkiNotify neither HTTP nor HTTPS")
- if caRepository:
- raise rpki.exceptions.BadPKCS10("PKCS #10 EE must not have id-ad-caRepository")
+ def check_valid_request_ee(self):
+ """
+ Check this certification request to see whether it's a valid
+ request for an RPKI EE certificate.
- if rpkiManifest:
- raise rpki.exceptions.BadPKCS10("PKCS #10 EE must not have id-ad-rpkiManifest")
+ Throws an exception if the request isn't valid, so if this method
+ returns at all, the request is ok.
- if signedObject and not any(uri.startswith("rsync://") for uri in signedObject):
- raise rpki.exceptions.BadPKCS10("PKCS #10 EE SIA id-ad-signedObject contains no rsync URIs")
+ We're a bit less strict here than we are for either CA
+ certificates or BGPSEC router certificates, because the profile is
+ less tightly nailed down for unspecified-use RPKI EE certificates.
+ Future specific purposes may impose tighter constraints.
+ Note that this method does NOT apply to so-called "infrastructure"
+ EE certificates (eg, the EE certificates embedded in manifests and
+ ROAs); those are constrained fairly tightly, but they're also
+ generated internally so we don't need to check them as user or
+ protocol input.
+ """
- def check_valid_request_router(self):
- """
- Check this certification request to see whether it's a valid
- request for a BGPSEC router certificate.
+ self.check_valid_request_common()
- Throws an exception if the request isn't valid, so if this method
- returns at all, the request is ok.
+ alg = self.get_POW().getSignatureAlgorithm()
+ bc = self.get_POW().getBasicConstraints()
+ sia = self.get_POW().getSIA()
- draft-ietf-sidr-bgpsec-pki-profiles 3.2 says follow RFC 6487 3
- except where explicitly overriden, and does not override for SIA.
- But draft-ietf-sidr-bgpsec-pki-profiles also says that router
- certificates don't get SIA, while RFC 6487 requires SIA. So what
- do we do with SIA in PKCS #10 for router certificates?
+ logger.debug("check_valid_request_ee(): sia: %r", sia)
- For the moment, ignore it, but make sure we don't include it in
- the certificate when we get to the code that generates that.
- """
+ caRepository, rpkiManifest, signedObject, rpkiNotify = sia or (None, None, None, None)
- self.check_valid_request_ee()
+ if alg not in (rpki.oids.sha256WithRSAEncryption, rpki.oids.ecdsa_with_SHA256):
+ raise rpki.exceptions.BadPKCS10("PKCS #10 has bad signature algorithm for EE: %s" % alg)
- alg = self.get_POW().getSignatureAlgorithm()
- eku = self.get_POW().getEKU()
+ if bc is not None and (bc[0] or bc[1] is not None):
+ raise rpki.exceptions.BadPKCS10("PKCS #10 EE has bad basicConstraints")
- if alg != rpki.oids.ecdsa_with_SHA256:
- raise rpki.exceptions.BadPKCS10("PKCS #10 has bad signature algorithm for router: %s" % alg)
+ if caRepository:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 EE must not have id-ad-caRepository")
- # Not really clear to me whether PKCS #10 should have EKU or not, so allow
- # either, but insist that it be the right one if present.
+ if rpkiManifest:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 EE must not have id-ad-rpkiManifest")
- if eku is not None and rpki.oids.id_kp_bgpsec_router not in eku:
- raise rpki.exceptions.BadPKCS10("PKCS #10 router must have EKU")
+ if signedObject and not any(uri.startswith("rsync://") for uri in signedObject):
+ raise rpki.exceptions.BadPKCS10("PKCS #10 EE SIA id-ad-signedObject contains no rsync URIs")
+ if rpkiNotify and any(not uri.startswith("http://") and not uri.startswith("https://") for uri in rpkiNotify):
+ raise rpki.exceptions.BadPKCS10("PKCS #10 EE SIA id-ad-rpkiNotify neither HTTP nor HTTPS")
- @classmethod
- def create(cls, keypair, exts = None, is_ca = False,
- caRepository = None, rpkiManifest = None, signedObject = None,
- cn = None, sn = None, eku = None):
- """
- Create a new request for a given keypair.
- """
+ def check_valid_request_router(self):
+ """
+ Check this certification request to see whether it's a valid
+ request for a BGPSEC router certificate.
+
+ Throws an exception if the request isn't valid, so if this method
+ returns at all, the request is ok.
+
+ draft-ietf-sidr-bgpsec-pki-profiles 3.2 says follow RFC 6487 3
+ except where explicitly overriden, and does not override for SIA.
+ But draft-ietf-sidr-bgpsec-pki-profiles also says that router
+ certificates don't get SIA, while RFC 6487 requires SIA. So what
+ do we do with SIA in PKCS #10 for router certificates?
+
+ For the moment, ignore it, but make sure we don't include it in
+ the certificate when we get to the code that generates that.
+ """
- assert exts is None, "Old calling sequence to rpki.x509.PKCS10.create()"
+ self.check_valid_request_ee()
- if cn is None:
- cn = "".join(("%02X" % ord(i) for i in keypair.get_SKI()))
+ alg = self.get_POW().getSignatureAlgorithm()
+ eku = self.get_POW().getEKU()
- if isinstance(caRepository, str):
- caRepository = (caRepository,)
+ if alg != rpki.oids.ecdsa_with_SHA256:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 has bad signature algorithm for router: %s" % alg)
- if isinstance(rpkiManifest, str):
- rpkiManifest = (rpkiManifest,)
+ # Not really clear to me whether PKCS #10 should have EKU or not, so allow
+ # either, but insist that it be the right one if present.
- if isinstance(signedObject, str):
- signedObject = (signedObject,)
+ if eku is not None and rpki.oids.id_kp_bgpsec_router not in eku:
+ raise rpki.exceptions.BadPKCS10("PKCS #10 router must have EKU")
- req = rpki.POW.PKCS10()
- req.setVersion(0)
- req.setSubject(X501DN.from_cn(cn, sn).get_POW())
- req.setPublicKey(keypair.get_POW())
- if is_ca:
- req.setBasicConstraints(True, None)
- req.setKeyUsage(cls.expected_ca_keyUsage)
+ @classmethod
+ def create(cls, keypair, exts = None, is_ca = False,
+ caRepository = None, rpkiManifest = None, signedObject = None,
+ cn = None, sn = None, eku = None, rpkiNotify = None):
+ """
+ Create a new request for a given keypair.
+ """
- if caRepository or rpkiManifest or signedObject:
- req.setSIA(caRepository, rpkiManifest, signedObject)
+ if cn is None:
+ cn = "".join(("%02X" % ord(i) for i in keypair.get_SKI()))
- if eku:
- req.setEKU(eku)
+ req = rpki.POW.PKCS10()
+ req.setVersion(0)
+ req.setSubject(X501DN.from_cn(cn, sn).get_POW())
+ req.setPublicKey(keypair.get_POW())
- req.sign(keypair.get_POW(), rpki.POW.SHA256_DIGEST)
- return cls(POW = req)
+ if is_ca:
+ req.setBasicConstraints(True, None)
+ req.setKeyUsage(cls.expected_ca_keyUsage)
+
+ sia = (caRepository, rpkiManifest, signedObject, rpkiNotify)
+ if not all(s is None for s in sia):
+ req.setSIA(*tuple([str(s)] if isinstance(s, (str, unicode)) else s for s in sia))
+
+ if eku:
+ req.setEKU(eku)
+
+ req.sign(keypair.get_POW(), rpki.POW.SHA256_DIGEST)
+ return cls(POW = req)
## @var generate_insecure_debug_only_rsa_key
# Debugging hack to let us save throwaway RSA keys from one debug
@@ -1115,913 +1235,981 @@ generate_insecure_debug_only_rsa_key = None
class insecure_debug_only_rsa_key_generator(object):
- def __init__(self, filename, keyno = 0):
- try:
- try:
- import gdbm as dbm_du_jour
- except ImportError:
- import dbm as dbm_du_jour
- self.keyno = long(keyno)
- self.filename = filename
- self.db = dbm_du_jour.open(filename, "c")
- except:
- logger.warning("insecure_debug_only_rsa_key_generator initialization FAILED, hack inoperative")
- raise
-
- def __call__(self):
- k = str(self.keyno)
- try:
- v = rpki.POW.Asymmetric.derReadPrivate(self.db[k])
- except KeyError:
- v = rpki.POW.Asymmetric.generateRSA(2048)
- self.db[k] = v.derWritePrivate()
- self.keyno += 1
- return v
+ def __init__(self, filename, keyno = 0):
+ try:
+ try:
+ import gdbm as dbm_du_jour
+ except ImportError:
+ import dbm as dbm_du_jour
+ self.keyno = long(keyno)
+ self.filename = filename
+ self.db = dbm_du_jour.open(filename, "c")
+ except:
+ logger.warning("insecure_debug_only_rsa_key_generator initialization FAILED, hack inoperative")
+ raise
+
+ def __call__(self):
+ k = str(self.keyno)
+ try:
+ v = rpki.POW.Asymmetric.derReadPrivate(self.db[k])
+ except KeyError:
+ v = rpki.POW.Asymmetric.generateRSA(2048)
+ self.db[k] = v.derWritePrivate()
+ self.keyno += 1
+ return v
class PrivateKey(DER_object):
- """
- Class to hold a Public/Private key pair.
- """
-
- POW_class = rpki.POW.Asymmetric
-
- def get_DER(self):
"""
- Get the DER value of this keypair.
+ Class to hold a Public/Private key pair.
"""
- self.check()
- if self.DER:
- return self.DER
- if self.POW:
- self.DER = self.POW.derWritePrivate()
- return self.get_DER()
- raise rpki.exceptions.DERObjectConversionError("No conversion path to DER available")
-
- def get_POW(self):
- """
- Get the rpki.POW value of this keypair.
- """
- self.check()
- if not self.POW: # pylint: disable=E0203
- self.POW = rpki.POW.Asymmetric.derReadPrivate(self.get_DER())
- return self.POW
- def get_PEM(self):
- """
- Get the PEM representation of this keypair.
- """
- return self.get_POW().pemWritePrivate()
+ POW_class = rpki.POW.Asymmetric
- def _set_PEM(self, pem):
- """
- Set the POW value of this keypair from a PEM string.
- """
- assert self.empty()
- self.POW = self.POW_class.pemReadPrivate(pem)
+ def get_DER(self):
+ """
+ Get the DER value of this keypair.
+ """
- def get_public_DER(self):
- """
- Get the DER encoding of the public key from this keypair.
- """
- return self.get_POW().derWritePublic()
+ self.check()
+ if self.DER:
+ return self.DER
+ if self.POW:
+ self.DER = self.POW.derWritePrivate()
+ return self.get_DER()
+ raise rpki.exceptions.DERObjectConversionError("No conversion path to DER available")
- def get_SKI(self):
- """
- Calculate the SKI of this keypair.
- """
- return self.get_POW().calculateSKI()
+ def get_POW(self):
+ """
+ Get the rpki.POW value of this keypair.
+ """
- def get_public(self):
- """
- Convert the public key of this keypair into a PublicKey object.
- """
- return PublicKey(DER = self.get_public_DER())
+ # pylint: disable=E0203,W0201
+ self.check()
+ if not self.POW:
+ self.POW = rpki.POW.Asymmetric.derReadPrivate(self.get_DER())
+ return self.POW
-class PublicKey(DER_object):
- """
- Class to hold a public key.
- """
+ def get_PEM(self):
+ """
+ Get the PEM representation of this keypair.
+ """
- POW_class = rpki.POW.Asymmetric
+ return self.get_POW().pemWritePrivate()
- def get_DER(self):
- """
- Get the DER value of this public key.
- """
- self.check()
- if self.DER:
- return self.DER
- if self.POW:
- self.DER = self.POW.derWritePublic()
- return self.get_DER()
- raise rpki.exceptions.DERObjectConversionError("No conversion path to DER available")
-
- def get_POW(self):
+ def _set_PEM(self, pem):
+ """
+ Set the POW value of this keypair from a PEM string.
+ """
+
+ # pylint: disable=W0201
+ assert self.empty()
+ self.POW = self.POW_class.pemReadPrivate(pem)
+
+ def get_public_DER(self):
+ """
+ Get the DER encoding of the public key from this keypair.
+ """
+
+ return self.get_POW().derWritePublic()
+
+ def get_SKI(self):
+ """
+ Calculate the SKI of this keypair.
+ """
+
+ return self.get_POW().calculateSKI()
+
+ def get_public(self):
+ """
+ Convert the public key of this keypair into a PublicKey object.
+ """
+
+ return PublicKey(DER = self.get_public_DER())
+
+class PublicKey(DER_object):
"""
- Get the rpki.POW value of this public key.
+ Class to hold a public key.
"""
- self.check()
- if not self.POW: # pylint: disable=E0203
- self.POW = rpki.POW.Asymmetric.derReadPublic(self.get_DER())
- return self.POW
- def get_PEM(self):
+ POW_class = rpki.POW.Asymmetric
+
+ def get_DER(self):
+ """
+ Get the DER value of this public key.
+ """
+
+ self.check()
+ if self.DER:
+ return self.DER
+ if self.POW:
+ self.DER = self.POW.derWritePublic()
+ return self.get_DER()
+ raise rpki.exceptions.DERObjectConversionError("No conversion path to DER available")
+
+ def get_POW(self):
+ """
+ Get the rpki.POW value of this public key.
+ """
+
+ # pylint: disable=E0203,W0201
+ self.check()
+ if not self.POW:
+ self.POW = rpki.POW.Asymmetric.derReadPublic(self.get_DER())
+ return self.POW
+
+ def get_PEM(self):
+ """
+ Get the PEM representation of this public key.
+ """
+
+ return self.get_POW().pemWritePublic()
+
+ def _set_PEM(self, pem):
+ """
+ Set the POW value of this public key from a PEM string.
+ """
+
+ # pylint: disable=W0201
+ assert self.empty()
+ self.POW = self.POW_class.pemReadPublic(pem)
+
+ def get_SKI(self):
+ """
+ Calculate the SKI of this public key.
+ """
+
+ return self.get_POW().calculateSKI()
+
+class KeyParams(DER_object):
"""
- Get the PEM representation of this public key.
+ Wrapper for OpenSSL's asymmetric key parameter classes.
"""
- return self.get_POW().pemWritePublic()
- def _set_PEM(self, pem):
+ POW_class = rpki.POW.AsymmetricParams
+
+ @classmethod
+ def generateEC(cls, curve = rpki.POW.EC_P256_CURVE):
+ return cls(POW = rpki.POW.AsymmetricParams.generateEC(curve = curve))
+
+class RSA(PrivateKey):
"""
- Set the POW value of this public key from a PEM string.
+ Class to hold an RSA key pair.
"""
- assert self.empty()
- self.POW = self.POW_class.pemReadPublic(pem)
- def get_SKI(self):
+ @classmethod
+ def generate(cls, keylength = 2048, quiet = False):
+ """
+ Generate a new keypair.
+ """
+
+ if not quiet:
+ logger.debug("Generating new %d-bit RSA key", keylength)
+ if generate_insecure_debug_only_rsa_key is not None:
+ return cls(POW = generate_insecure_debug_only_rsa_key())
+ else:
+ return cls(POW = rpki.POW.Asymmetric.generateRSA(keylength))
+
+class ECDSA(PrivateKey):
"""
- Calculate the SKI of this public key.
+ Class to hold an ECDSA key pair.
"""
- return self.get_POW().calculateSKI()
-class KeyParams(DER_object):
- """
- Wrapper for OpenSSL's asymmetric key parameter classes.
- """
+ @classmethod
+ def generate(cls, params = None, quiet = False):
+ """
+ Generate a new keypair.
+ """
- POW_class = rpki.POW.AsymmetricParams
+ if params is None:
+ if not quiet:
+ logger.debug("Generating new ECDSA key parameters")
+ params = KeyParams.generateEC()
- @classmethod
- def generateEC(cls, curve = rpki.POW.EC_P256_CURVE):
- return cls(POW = rpki.POW.AsymmetricParams.generateEC(curve = curve))
+ assert isinstance(params, KeyParams)
-class RSA(PrivateKey):
- """
- Class to hold an RSA key pair.
- """
+ if not quiet:
+ logger.debug("Generating new ECDSA key")
+
+ return cls(POW = rpki.POW.Asymmetric.generateFromParams(params.get_POW()))
- @classmethod
- def generate(cls, keylength = 2048, quiet = False):
+class CMS_object(DER_object):
"""
- Generate a new keypair.
+ Abstract class to hold a CMS object.
"""
- if not quiet:
- logger.debug("Generating new %d-bit RSA key", keylength)
- if generate_insecure_debug_only_rsa_key is not None:
- return cls(POW = generate_insecure_debug_only_rsa_key())
- else:
- return cls(POW = rpki.POW.Asymmetric.generateRSA(keylength))
-class ECDSA(PrivateKey):
- """
- Class to hold an ECDSA key pair.
- """
+ econtent_oid = rpki.oids.id_data
+ POW_class = rpki.POW.CMS
- @classmethod
- def generate(cls, params = None, quiet = False):
- """
- Generate a new keypair.
- """
+ ## @var dump_on_verify_failure
+ # Set this to True to get dumpasn1 dumps of ASN.1 on CMS verify failures.
- if params is None:
- if not quiet:
- logger.debug("Generating new ECDSA key parameters")
- params = KeyParams.generateEC()
+ dump_on_verify_failure = True
- assert isinstance(params, KeyParams)
+ ## @var debug_cms_certs
+ # Set this to True to log a lot of chatter about CMS certificates.
- if not quiet:
- logger.debug("Generating new ECDSA key")
+ debug_cms_certs = False
- return cls(POW = rpki.POW.Asymmetric.generateFromParams(params.get_POW()))
+ ## @var dump_using_dumpasn1
+ # Set this to use external dumpasn1 program, which is prettier and
+ # more informative than OpenSSL's CMS text dump, but which won't
+ # work if the dumpasn1 program isn't installed.
-class CMS_object(DER_object):
- """
- Abstract class to hold a CMS object.
- """
+ dump_using_dumpasn1 = False
- econtent_oid = rpki.oids.id_data
- POW_class = rpki.POW.CMS
+ ## @var require_crls
+ # Set this to False to make CMS CRLs optional in the cases where we
+ # would otherwise require them. Some day this option should go away
+ # and CRLs should be uncondtionally mandatory in such cases.
- ## @var dump_on_verify_failure
- # Set this to True to get dumpasn1 dumps of ASN.1 on CMS verify failures.
+ require_crls = False
- dump_on_verify_failure = True
+ ## @var allow_extra_certs
+ # Set this to True to allow CMS messages to contain CA certificates.
- ## @var debug_cms_certs
- # Set this to True to log a lot of chatter about CMS certificates.
+ allow_extra_certs = False
- debug_cms_certs = False
+ ## @var allow_extra_crls
+ # Set this to True to allow CMS messages to contain multiple CRLs.
- ## @var dump_using_dumpasn1
- # Set this to use external dumpasn1 program, which is prettier and
- # more informative than OpenSSL's CMS text dump, but which won't
- # work if the dumpasn1 program isn't installed.
+ allow_extra_crls = False
- dump_using_dumpasn1 = False
+ ## @var print_on_der_error
+ # Set this to True to log alleged DER when we have trouble parsing
+ # it, in case it's really a Perl backtrace or something.
- ## @var require_crls
- # Set this to False to make CMS CRLs optional in the cases where we
- # would otherwise require them. Some day this option should go away
- # and CRLs should be uncondtionally mandatory in such cases.
+ print_on_der_error = True
- require_crls = False
+ def get_DER(self):
+ """
+ Get the DER value of this CMS_object.
+ """
- ## @var allow_extra_certs
- # Set this to True to allow CMS messages to contain CA certificates.
+ self.check()
+ if self.DER:
+ return self.DER
+ if self.POW:
+ self.DER = self.POW.derWrite()
+ return self.get_DER()
+ raise rpki.exceptions.DERObjectConversionError("No conversion path to DER available")
- allow_extra_certs = False
+ def get_POW(self):
+ """
+ Get the rpki.POW value of this CMS_object.
+ """
- ## @var allow_extra_crls
- # Set this to True to allow CMS messages to contain multiple CRLs.
+ # pylint: disable=E0203,W0201
+ self.check()
+ if not self.POW:
+ self.POW = self.POW_class.derRead(self.get_DER())
+ return self.POW
- allow_extra_crls = False
+ def get_signingTime(self):
+ """
+ Extract signingTime from CMS signed attributes.
+ """
- ## @var print_on_der_error
- # Set this to True to log alleged DER when we have trouble parsing
- # it, in case it's really a Perl backtrace or something.
+ return self.get_POW().signingTime()
- print_on_der_error = True
+ def verify(self, ta):
+ """
+ Verify CMS wrapper and store inner content.
+ """
- def get_DER(self):
- """
- Get the DER value of this CMS_object.
- """
- self.check()
- if self.DER:
- return self.DER
- if self.POW:
- self.DER = self.POW.derWrite()
- return self.get_DER()
- raise rpki.exceptions.DERObjectConversionError("No conversion path to DER available")
-
- def get_POW(self):
- """
- Get the rpki.POW value of this CMS_object.
- """
- self.check()
- if not self.POW: # pylint: disable=E0203
- self.POW = self.POW_class.derRead(self.get_DER())
- return self.POW
+ try:
+ cms = self.get_POW()
+ except:
+ if self.print_on_der_error:
+ logger.debug("Problem parsing DER CMS message, might not really be DER: %r",
+ self.get_DER())
+ raise rpki.exceptions.UnparsableCMSDER
- def get_signingTime(self):
- """
- Extract signingTime from CMS signed attributes.
- """
- return self.get_POW().signingTime()
+ if cms.eContentType() != self.econtent_oid:
+ raise rpki.exceptions.WrongEContentType("Got CMS eContentType %s, expected %s" % (
+ cms.eContentType(), self.econtent_oid))
- def verify(self, ta):
- """
- Verify CMS wrapper and store inner content.
- """
+ certs = [X509(POW = x) for x in cms.certs()]
+ crls = [CRL(POW = c) for c in cms.crls()]
- try:
- cms = self.get_POW()
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception:
- if self.print_on_der_error:
- logger.debug("Problem parsing DER CMS message, might not really be DER: %r",
- self.get_DER())
- raise rpki.exceptions.UnparsableCMSDER
-
- if cms.eContentType() != self.econtent_oid:
- raise rpki.exceptions.WrongEContentType("Got CMS eContentType %s, expected %s" % (
- cms.eContentType(), self.econtent_oid))
-
- certs = [X509(POW = x) for x in cms.certs()]
- crls = [CRL(POW = c) for c in cms.crls()]
-
- if self.debug_cms_certs:
- for x in certs:
- logger.debug("Received CMS cert issuer %s subject %s SKI %s",
- x.getIssuer(), x.getSubject(), x.hSKI())
- for c in crls:
- logger.debug("Received CMS CRL issuer %r", c.getIssuer())
-
- store = rpki.POW.X509Store()
-
- now = rpki.sundial.now()
-
- trusted_ee = None
-
- for x in X509.normalize_chain(ta):
- if self.debug_cms_certs:
- logger.debug("CMS trusted cert issuer %s subject %s SKI %s",
- x.getIssuer(), x.getSubject(), x.hSKI())
- if x.getNotAfter() < now:
- raise rpki.exceptions.TrustedCMSCertHasExpired("Trusted CMS certificate has expired",
- "%s (%s)" % (x.getSubject(), x.hSKI()))
- if not x.is_CA():
- if trusted_ee is None:
- trusted_ee = x
- else:
- raise rpki.exceptions.MultipleCMSEECert("Multiple CMS EE certificates", *("%s (%s)" % (
- x.getSubject(), x.hSKI()) for x in ta if not x.is_CA()))
- store.addTrust(x.get_POW())
-
- if trusted_ee:
- if self.debug_cms_certs:
- logger.debug("Trusted CMS EE cert issuer %s subject %s SKI %s",
- trusted_ee.getIssuer(), trusted_ee.getSubject(), trusted_ee.hSKI())
- if len(certs) > 1 or (len(certs) == 1 and
- (certs[0].getSubject() != trusted_ee.getSubject() or
- certs[0].getPublicKey() != trusted_ee.getPublicKey())):
- raise rpki.exceptions.UnexpectedCMSCerts("Unexpected CMS certificates", *("%s (%s)" % (
- x.getSubject(), x.hSKI()) for x in certs))
- if crls:
- raise rpki.exceptions.UnexpectedCMSCRLs("Unexpected CRLs", *("%s (%s)" % (
- c.getIssuer(), c.hAKI()) for c in crls))
-
- else:
- untrusted_ee = [x for x in certs if not x.is_CA()]
- if len(untrusted_ee) < 1:
- raise rpki.exceptions.MissingCMSEEcert
- if len(untrusted_ee) > 1 or (not self.allow_extra_certs and len(certs) > len(untrusted_ee)):
- raise rpki.exceptions.UnexpectedCMSCerts("Unexpected CMS certificates", *("%s (%s)" % (
- x.getSubject(), x.hSKI()) for x in certs))
- if len(crls) < 1:
- if self.require_crls:
- raise rpki.exceptions.MissingCMSCRL
- else:
- logger.warning("MISSING CMS CRL! Ignoring per self.require_crls setting")
- if len(crls) > 1 and not self.allow_extra_crls:
- raise rpki.exceptions.UnexpectedCMSCRLs("Unexpected CRLs", *("%s (%s)" % (
- c.getIssuer(), c.hAKI()) for c in crls))
-
- for x in certs:
- if x.getNotAfter() < now:
- raise rpki.exceptions.CMSCertHasExpired("CMS certificate has expired", "%s (%s)" % (
- x.getSubject(), x.hSKI()))
-
- for c in crls:
- if c.getNextUpdate() < now:
- logger.warning("Stale BPKI CMS CRL (%s %s %s)", c.getNextUpdate(), c.getIssuer(), c.hAKI())
-
- try:
- content = cms.verify(store)
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception:
- if self.dump_on_verify_failure:
- if self.dump_using_dumpasn1:
- dbg = self.dumpasn1()
- else:
- dbg = cms.pprint()
- logger.warning("CMS verification failed, dumping ASN.1 (%d octets):", len(self.get_DER()))
- for line in dbg.splitlines():
- logger.warning(line)
- raise rpki.exceptions.CMSVerificationFailed("CMS verification failed")
+ if self.debug_cms_certs:
+ for x in certs:
+ logger.debug("Received CMS cert issuer %s subject %s SKI %s",
+ x.getIssuer(), x.getSubject(), x.hSKI())
+ for c in crls:
+ logger.debug("Received CMS CRL issuer %r", c.getIssuer())
+
+ now = rpki.sundial.now()
- return content
+ trusted_ee = None
+ trusted_ca = []
+ untrusted_ee = None
+
+ for x in X509.normalize_chain(ta):
+ if self.debug_cms_certs:
+ logger.debug("CMS trusted cert issuer %s subject %s SKI %s",
+ x.getIssuer(), x.getSubject(), x.hSKI())
+ if x.getNotAfter() < now:
+ raise rpki.exceptions.TrustedCMSCertHasExpired("Trusted CMS certificate has expired",
+ "%s (%s)" % (x.getSubject(), x.hSKI()))
+ if x.is_CA():
+ trusted_ca.append(x)
+ else:
+ if trusted_ee is None:
+ trusted_ee = x
+ else:
+ raise rpki.exceptions.MultipleCMSEECert("Multiple CMS EE certificates", *("%s (%s)" % (
+ x.getSubject(), x.hSKI()) for x in ta if not x.is_CA()))
+
+ if trusted_ee:
+ if self.debug_cms_certs:
+ logger.debug("Trusted CMS EE cert issuer %s subject %s SKI %s",
+ trusted_ee.getIssuer(), trusted_ee.getSubject(), trusted_ee.hSKI())
+ if len(certs) > 1 or (len(certs) == 1 and
+ (certs[0].getSubject() != trusted_ee.getSubject() or
+ certs[0].getPublicKey() != trusted_ee.getPublicKey())):
+ raise rpki.exceptions.UnexpectedCMSCerts("Unexpected CMS certificates", *("%s (%s)" % (
+ x.getSubject(), x.hSKI()) for x in certs))
+ if crls:
+ raise rpki.exceptions.UnexpectedCMSCRLs("Unexpected CRLs", *("%s (%s)" % (
+ c.getIssuer(), c.hAKI()) for c in crls))
- def extract(self):
- """
- Extract and store inner content from CMS wrapper without verifying
- the CMS.
+ else:
+ untrusted_ee = [x for x in certs if not x.is_CA()]
+ if len(untrusted_ee) < 1:
+ raise rpki.exceptions.MissingCMSEEcert
+ if len(untrusted_ee) > 1 or (not self.allow_extra_certs and len(certs) > len(untrusted_ee)):
+ raise rpki.exceptions.UnexpectedCMSCerts("Unexpected CMS certificates", *("%s (%s)" % (
+ x.getSubject(), x.hSKI()) for x in certs))
+ untrusted_ee = untrusted_ee[0]
+ if len(crls) < 1:
+ if self.require_crls:
+ raise rpki.exceptions.MissingCMSCRL
+ else:
+ logger.warning("MISSING CMS CRL! Ignoring per self.require_crls setting")
+ if len(crls) > 1 and not self.allow_extra_crls:
+ raise rpki.exceptions.UnexpectedCMSCRLs("Unexpected CRLs", *("%s (%s)" % (
+ c.getIssuer(), c.hAKI()) for c in crls))
+
+ for x in certs:
+ if x.getNotAfter() < now:
+ raise rpki.exceptions.CMSCertHasExpired("CMS certificate has expired", "%s (%s)" % (
+ x.getSubject(), x.hSKI()))
+
+ for c in crls:
+ if c.getNextUpdate() < now:
+ logger.warning("Stale BPKI CMS CRL (%s %s %s)", c.getNextUpdate(), c.getIssuer(), c.hAKI())
+
+ # XXX Verify certificate chain via X.509 machinery, not CMS
+ # machinery. Awful mess due to history, needs cleanup, but
+ # get it working again first.
+
+ cert = (trusted_ee or untrusted_ee).get_POW()
+
+ cert.verify(trusted = (x.get_POW() for x in trusted_ca),
+ crl = crls[0].get_POW() if untrusted_ee and crls else None)
+
+ try:
+ # XXX This isn't right yet, but let's test before gettting more complicated
+ #
+ # Aside from all the type and exception abominations, the
+ # main problem here is that we're no longer verifying the
+ # certificate chain, just the CMS signature. Certificate
+ # verificaiton is a separate step under the new scheme,
+ # and probably comes before this, but let's write down
+ # what the problem is before it gets lost...
+
+ content = cms.verify(certs = (x.get_POW() for x in X509.normalize_chain(ta)),
+ flags = rpki.POW.CMS_NO_SIGNER_CERT_VERIFY)
+ except:
+ if self.dump_on_verify_failure:
+ if self.dump_using_dumpasn1:
+ dbg = self.dumpasn1()
+ else:
+ dbg = cms.pprint()
+ logger.warning("CMS verification failed, dumping ASN.1 (%d octets):", len(self.get_DER()))
+ for line in dbg.splitlines():
+ logger.warning(line)
+
+ # XXX Old code replaced rpki.POW exception with this. For
+ # debugging I'd rather see what POW has to say; decide
+ # later whether to keep this change.
+ #
+ #raise rpki.exceptions.CMSVerificationFailed("CMS verification failed")
+ raise
+
+ return content
+
+ def extract(self):
+ """
+ Extract and store inner content from CMS wrapper without verifying
+ the CMS.
+
+ DANGER WILL ROBINSON!!!
+
+ Do not use this method on unvalidated data. Use the verify()
+ method instead.
+
+ If you don't understand this warning, don't use this method.
+ """
+
+ try:
+ cms = self.get_POW()
+ except:
+ raise rpki.exceptions.UnparsableCMSDER
+
+ if cms.eContentType() != self.econtent_oid:
+ raise rpki.exceptions.WrongEContentType("Got CMS eContentType %s, expected %s" % (
+ cms.eContentType(), self.econtent_oid))
+
+ return cms.verify(flags = (rpki.POW.CMS_NOCRL | rpki.POW.CMS_NO_SIGNER_CERT_VERIFY |
+ rpki.POW.CMS_NO_ATTR_VERIFY | rpki.POW.CMS_NO_CONTENT_VERIFY))
+
+
+ def sign(self, keypair, certs, crls = None, no_certs = False):
+ """
+ Sign and wrap inner content.
+ """
+
+ if isinstance(certs, X509):
+ cert = certs
+ certs = ()
+ else:
+ cert = certs[0]
+ certs = certs[1:]
- DANGER WILL ROBINSON!!!
+ if crls is None:
+ crls = ()
+ elif isinstance(crls, CRL):
+ crls = (crls,)
- Do not use this method on unvalidated data. Use the verify()
- method instead.
+ if self.debug_cms_certs:
+ logger.debug("Signing with cert issuer %s subject %s SKI %s",
+ cert.getIssuer(), cert.getSubject(), cert.hSKI())
+ for i, c in enumerate(certs):
+ logger.debug("Additional cert %d issuer %s subject %s SKI %s",
+ i, c.getIssuer(), c.getSubject(), c.hSKI())
- If you don't understand this warning, don't use this method.
- """
+ self._sign(cert.get_POW(),
+ keypair.get_POW(),
+ [x.get_POW() for x in certs],
+ [c.get_POW() for c in crls],
+ rpki.POW.CMS_NOCERTS if no_certs else 0)
- try:
- cms = self.get_POW()
- except (rpki.async.ExitNow, SystemExit):
- raise
- except Exception:
- raise rpki.exceptions.UnparsableCMSDER
+ def _sign(self, cert, keypair, certs, crls, flags):
+ raise NotImplementedError
- if cms.eContentType() != self.econtent_oid:
- raise rpki.exceptions.WrongEContentType("Got CMS eContentType %s, expected %s" % (
- cms.eContentType(), self.econtent_oid))
+ @property
+ def creation_timestamp(self):
+ """
+ Time at which this object was created.
+ """
- return cms.verify(rpki.POW.X509Store(), None,
- (rpki.POW.CMS_NOCRL | rpki.POW.CMS_NO_SIGNER_CERT_VERIFY |
- rpki.POW.CMS_NO_ATTR_VERIFY | rpki.POW.CMS_NO_CONTENT_VERIFY))
+ return self.get_signingTime()
- def sign(self, keypair, certs, crls = None, no_certs = False):
- """
- Sign and wrap inner content.
+class Wrapped_CMS_object(CMS_object):
"""
+ Abstract class to hold CMS objects wrapping non-DER content (eg, XML
+ or VCard).
- if isinstance(certs, X509):
- cert = certs
- certs = ()
- else:
- cert = certs[0]
- certs = certs[1:]
-
- if crls is None:
- crls = ()
- elif isinstance(crls, CRL):
- crls = (crls,)
-
- if self.debug_cms_certs:
- logger.debug("Signing with cert issuer %s subject %s SKI %s",
- cert.getIssuer(), cert.getSubject(), cert.hSKI())
- for i, c in enumerate(certs):
- logger.debug("Additional cert %d issuer %s subject %s SKI %s",
- i, c.getIssuer(), c.getSubject(), c.hSKI())
-
- self._sign(cert.get_POW(),
- keypair.get_POW(),
- [x.get_POW() for x in certs],
- [c.get_POW() for c in crls],
- rpki.POW.CMS_NOCERTS if no_certs else 0)
-
- @property
- def creation_timestamp(self):
- """
- Time at which this object was created.
+ CMS-wrapped objects are a little different from the other DER_object
+ types because the signed object is CMS wrapping some other kind of
+ inner content. A Wrapped_CMS_object is the outer CMS wrapped object
+ so that the usual DER and PEM operations do the obvious things, and
+ the inner content is handle via separate methods.
"""
- return self.get_signingTime()
+ other_clear = ("content",)
-class Wrapped_CMS_object(CMS_object):
- """
- Abstract class to hold CMS objects wrapping non-DER content (eg, XML
- or VCard).
+ def get_content(self):
+ """
+ Get the inner content of this Wrapped_CMS_object.
+ """
- CMS-wrapped objects are a little different from the other DER_object
- types because the signed object is CMS wrapping some other kind of
- inner content. A Wrapped_CMS_object is the outer CMS wrapped object
- so that the usual DER and PEM operations do the obvious things, and
- the inner content is handle via separate methods.
- """
+ if self.content is None:
+ raise rpki.exceptions.CMSContentNotSet("Inner content of CMS object %r is not set" % self)
+ return self.content
- other_clear = ("content",)
+ def set_content(self, content):
+ """
+ Set the (inner) content of this Wrapped_CMS_object, clearing the wrapper.
+ """
- def get_content(self):
- """
- Get the inner content of this Wrapped_CMS_object.
- """
- if self.content is None:
- raise rpki.exceptions.CMSContentNotSet("Inner content of CMS object %r is not set" % self)
- return self.content
+ # pylint: disable=W0201
+ self.clear()
+ self.content = content
- def set_content(self, content):
- """
- Set the (inner) content of this Wrapped_CMS_object, clearing the wrapper.
- """
- self.clear()
- self.content = content
+ def verify(self, ta):
+ """
+ Verify CMS wrapper and store inner content.
+ """
- def verify(self, ta):
- """
- Verify CMS wrapper and store inner content.
- """
+ self.decode(CMS_object.verify(self, ta))
+ return self.get_content()
- self.decode(CMS_object.verify(self, ta))
- return self.get_content()
+ def extract(self):
+ """
+ Extract and store inner content from CMS wrapper without verifying
+ the CMS.
- def extract(self):
- """
- Extract and store inner content from CMS wrapper without verifying
- the CMS.
+ DANGER WILL ROBINSON!!!
- DANGER WILL ROBINSON!!!
+ Do not use this method on unvalidated data. Use the verify()
+ method instead.
- Do not use this method on unvalidated data. Use the verify()
- method instead.
+ If you don't understand this warning, don't use this method.
+ """
- If you don't understand this warning, don't use this method.
- """
+ self.decode(CMS_object.extract(self))
+ return self.get_content()
- self.decode(CMS_object.extract(self))
- return self.get_content()
+ def extract_if_needed(self):
+ """
+ Extract inner content if needed. See caveats for .extract(), do
+ not use unless you really know what you are doing.
+ """
- def extract_if_needed(self):
- """
- Extract inner content if needed. See caveats for .extract(), do
- not use unless you really know what you are doing.
- """
+ if self.content is None:
+ self.extract()
+
+ def _sign(self, cert, keypair, certs, crls, flags):
+ """
+ Internal method to call POW to do CMS signature. This is split
+ out from the .sign() API method to handle differences in how
+ different CMS-based POW classes handle the inner content.
+ """
- if self.content is None:
- self.extract()
+ # pylint: disable=W0201
+ cms = self.POW_class()
+ cms.sign(cert, keypair, self.encode(), certs, crls, self.econtent_oid, flags)
+ self.POW = cms
- def _sign(self, cert, keypair, certs, crls, flags):
+ def decode(self, whatever):
+ raise NotImplementedError
+
+ def encode(self):
+ raise NotImplementedError
+
+
+class DER_CMS_object(CMS_object):
"""
- Internal method to call POW to do CMS signature. This is split
- out from the .sign() API method to handle differences in how
- different CMS-based POW classes handle the inner content.
+ Abstract class for CMS-based objects with DER-encoded content
+ handled by C-level subclasses of rpki.POW.CMS.
"""
- cms = self.POW_class()
- cms.sign(cert, keypair, self.encode(), certs, crls, self.econtent_oid, flags)
- self.POW = cms
+ def _sign(self, cert, keypair, certs, crls, flags):
+ self.get_POW().sign(cert, keypair, certs, crls, self.econtent_oid, flags)
-class DER_CMS_object(CMS_object):
- """
- Abstract class for CMS-based objects with DER-encoded content
- handled by C-level subclasses of rpki.POW.CMS.
- """
+ def extract_if_needed(self):
+ """
+ Extract inner content if needed. See caveats for .extract(), do
+ not use unless you really know what you are doing.
+ """
- def _sign(self, cert, keypair, certs, crls, flags):
- self.get_POW().sign(cert, keypair, certs, crls, self.econtent_oid, flags)
+ try:
+ self.get_POW().getVersion()
+ except rpki.POW.NotVerifiedError:
+ self.extract()
- def extract_if_needed(self):
+class SignedManifest(DER_CMS_object):
"""
- Extract inner content if needed. See caveats for .extract(), do
- not use unless you really know what you are doing.
+ Class to hold a signed manifest.
"""
- try:
- self.get_POW().getVersion()
- except rpki.POW.NotVerifiedError:
- self.extract()
+ econtent_oid = rpki.oids.id_ct_rpkiManifest
+ POW_class = rpki.POW.Manifest
+ def getThisUpdate(self):
+ """
+ Get thisUpdate value from this manifest.
+ """
-class SignedManifest(DER_CMS_object):
- """
- Class to hold a signed manifest.
- """
+ return self.get_POW().getThisUpdate()
- econtent_oid = rpki.oids.id_ct_rpkiManifest
- POW_class = rpki.POW.Manifest
+ def getNextUpdate(self):
+ """
+ Get nextUpdate value from this manifest.
+ """
- def getThisUpdate(self):
- """
- Get thisUpdate value from this manifest.
- """
- return self.get_POW().getThisUpdate()
+ return self.get_POW().getNextUpdate()
- def getNextUpdate(self):
+ @classmethod
+ def build(cls, serial, thisUpdate, nextUpdate, names_and_objs, keypair, certs, version = 0):
+ """
+ Build a signed manifest.
+ """
+
+ filelist = []
+ for name, obj in names_and_objs:
+ filelist.append((name.rpartition("/")[2], sha256(obj.get_DER())))
+ filelist.sort(key = lambda x: x[0])
+
+ obj = cls.POW_class()
+ obj.setVersion(version)
+ obj.setManifestNumber(serial)
+ obj.setThisUpdate(thisUpdate)
+ obj.setNextUpdate(nextUpdate)
+ obj.setAlgorithm(rpki.oids.id_sha256)
+ obj.addFiles(filelist)
+
+ self = cls(POW = obj)
+ self.sign(keypair, certs)
+ return self
+
+class ROA(DER_CMS_object):
"""
- Get nextUpdate value from this manifest.
+ Class to hold a signed ROA.
+ """
+
+ econtent_oid = rpki.oids.id_ct_routeOriginAttestation
+ POW_class = rpki.POW.ROA
+
+ @classmethod
+ def build(cls, asn, ipv4, ipv6, keypair, certs, version = 0):
+ """
+ Build a ROA.
+ """
+
+ ipv4 = ipv4.to_POW_roa_tuple() if ipv4 else None
+ ipv6 = ipv6.to_POW_roa_tuple() if ipv6 else None
+ obj = cls.POW_class()
+ obj.setVersion(version)
+ obj.setASID(asn)
+ obj.setPrefixes(ipv4 = ipv4, ipv6 = ipv6)
+ self = cls(POW = obj)
+ self.sign(keypair, certs)
+ return self
+
+ def tracking_data(self, uri):
+ """
+ Return a string containing data we want to log when tracking how
+ objects move through the RPKI system.
+ """
+
+ msg = DER_CMS_object.tracking_data(self, uri)
+ try:
+ self.extract_if_needed()
+ asn = self.get_POW().getASID()
+ text = []
+ for prefixes in self.get_POW().getPrefixes():
+ if prefixes is not None:
+ for prefix, prefixlen, maxprefixlen in prefixes:
+ if maxprefixlen is None or prefixlen == maxprefixlen:
+ text.append("%s/%s" % (prefix, prefixlen))
+ else:
+ text.append("%s/%s-%s" % (prefix, prefixlen, maxprefixlen))
+ text.sort()
+ msg = "%s %s %s" % (msg, asn, ",".join(text))
+ except:
+ pass
+ return msg
+
+class DeadDrop(object):
"""
- return self.get_POW().getNextUpdate()
+ Dead-drop utility for storing copies of CMS messages for debugging or
+ audit. At the moment this uses Maildir mailbox format, as it has
+ approximately the right properties and a number of useful tools for
+ manipulating it already exist.
+ """
+
+ def __init__(self, name):
+ self.name = name
+ self.pid = os.getpid()
+ self.maildir = mailbox.Maildir(name, factory = None, create = True)
+ self.warned = False
+
+ def dump(self, obj):
+ try:
+ now = time.time()
+ msg = email.mime.application.MIMEApplication(obj.get_DER(), "x-rpki")
+ msg["Date"] = email.utils.formatdate(now)
+ msg["Subject"] = "Process %s dump of %r" % (self.pid, obj)
+ msg["Message-ID"] = email.utils.make_msgid()
+ msg["X-RPKI-PID"] = str(self.pid)
+ msg["X-RPKI-Object"] = repr(obj)
+ msg["X-RPKI-Timestamp"] = "%f" % now
+ self.maildir.add(msg)
+ self.warned = False
+ except Exception, e:
+ if not self.warned:
+ logger.warning("Could not write to mailbox %s: %s", self.name, e)
+ self.warned = True
- @classmethod
- def build(cls, serial, thisUpdate, nextUpdate, names_and_objs, keypair, certs, version = 0):
+class XML_CMS_object(Wrapped_CMS_object):
"""
- Build a signed manifest.
+ Class to hold CMS-wrapped XML protocol data.
"""
- filelist = []
- for name, obj in names_and_objs:
- d = rpki.POW.Digest(rpki.POW.SHA256_DIGEST)
- d.update(obj.get_DER())
- filelist.append((name.rpartition("/")[2], d.digest()))
- filelist.sort(key = lambda x: x[0])
-
- obj = cls.POW_class()
- obj.setVersion(version)
- obj.setManifestNumber(serial)
- obj.setThisUpdate(thisUpdate)
- obj.setNextUpdate(nextUpdate)
- obj.setAlgorithm(rpki.oids.id_sha256)
- obj.addFiles(filelist)
-
- self = cls(POW = obj)
- self.sign(keypair, certs)
- return self
+ econtent_oid = rpki.oids.id_ct_xml
+ encoding = None
+ schema = None
-class ROA(DER_CMS_object):
- """
- Class to hold a signed ROA.
- """
+ ## @var dump_outbound_cms
+ # If set, we write all outbound XML-CMS PDUs to disk, for debugging.
+ # If set, value should be a DeadDrop object.
- econtent_oid = rpki.oids.id_ct_routeOriginAttestation
- POW_class = rpki.POW.ROA
+ dump_outbound_cms = None
- @classmethod
- def build(cls, asn, ipv4, ipv6, keypair, certs, version = 0):
- """
- Build a ROA.
- """
- ipv4 = ipv4.to_POW_roa_tuple() if ipv4 else None
- ipv6 = ipv6.to_POW_roa_tuple() if ipv6 else None
- obj = cls.POW_class()
- obj.setVersion(version)
- obj.setASID(asn)
- obj.setPrefixes(ipv4 = ipv4, ipv6 = ipv6)
- self = cls(POW = obj)
- self.sign(keypair, certs)
- return self
-
- def tracking_data(self, uri):
- """
- Return a string containing data we want to log when tracking how
- objects move through the RPKI system.
- """
- msg = DER_CMS_object.tracking_data(self, uri)
- try:
- self.extract_if_needed()
- asn = self.get_POW().getASID()
- text = []
- for prefixes in self.get_POW().getPrefixes():
- if prefixes is not None:
- for prefix, prefixlen, maxprefixlen in prefixes:
- if maxprefixlen is None or prefixlen == maxprefixlen:
- text.append("%s/%s" % (prefix, prefixlen))
- else:
- text.append("%s/%s-%s" % (prefix, prefixlen, maxprefixlen))
- text.sort()
- msg = "%s %s %s" % (msg, asn, ",".join(text))
- except: # pylint: disable=W0702
- pass
- return msg
+ ## @var dump_inbound_cms
+ # If set, we write all inbound XML-CMS PDUs to disk, for debugging.
+ # If set, value should be a DeadDrop object.
-class DeadDrop(object):
- """
- Dead-drop utility for storing copies of CMS messages for debugging or
- audit. At the moment this uses Maildir mailbox format, as it has
- approximately the right properties and a number of useful tools for
- manipulating it already exist.
- """
-
- def __init__(self, name):
- self.name = name
- self.pid = os.getpid()
- self.maildir = mailbox.Maildir(name, factory = None, create = True)
- self.warned = False
-
- def dump(self, obj):
- try:
- now = time.time()
- msg = email.mime.application.MIMEApplication(obj.get_DER(), "x-rpki")
- msg["Date"] = email.utils.formatdate(now)
- msg["Subject"] = "Process %s dump of %r" % (self.pid, obj)
- msg["Message-ID"] = email.utils.make_msgid()
- msg["X-RPKI-PID"] = str(self.pid)
- msg["X-RPKI-Object"] = repr(obj)
- msg["X-RPKI-Timestamp"] = "%f" % now
- self.maildir.add(msg)
- self.warned = False
- except Exception, e:
- if not self.warned:
- logger.warning("Could not write to mailbox %s: %s", self.name, e)
- self.warned = True
+ dump_inbound_cms = None
-class XML_CMS_object(Wrapped_CMS_object):
- """
- Class to hold CMS-wrapped XML protocol data.
- """
+ ## @var check_inbound_schema
+ # If set, perform RelaxNG schema check on inbound messages.
- econtent_oid = rpki.oids.id_ct_xml
+ check_inbound_schema = True
- ## @var dump_outbound_cms
- # If set, we write all outbound XML-CMS PDUs to disk, for debugging.
- # If set, value should be a DeadDrop object.
+ ## @var check_outbound_schema
+ # If set, perform RelaxNG schema check on outbound messages.
- dump_outbound_cms = None
+ check_outbound_schema = True
- ## @var dump_inbound_cms
- # If set, we write all inbound XML-CMS PDUs to disk, for debugging.
- # If set, value should be a DeadDrop object.
+ def encode(self):
+ """
+ Encode inner content for signing.
+ """
- dump_inbound_cms = None
+ return lxml.etree.tostring(self.get_content(),
+ pretty_print = True,
+ encoding = self.encoding,
+ xml_declaration = True)
- ## @var check_inbound_schema
- # If set, perform RelaxNG schema check on inbound messages.
+ def decode(self, xml):
+ """
+ Decode XML and set inner content.
+ """
- check_inbound_schema = True
+ # pylint: disable=W0201
+ self.content = lxml.etree.fromstring(xml)
- ## @var check_outbound_schema
- # If set, perform RelaxNG schema check on outbound messages.
+ def pretty_print_content(self):
+ """
+ Pretty print XML content of this message.
+ """
- check_outbound_schema = False
+ return lxml.etree.tostring(self.get_content(),
+ pretty_print = True,
+ encoding = self.encoding,
+ xml_declaration = True)
- def encode(self):
- """
- Encode inner content for signing.
- """
- return lxml.etree.tostring(self.get_content(),
- pretty_print = True,
- encoding = self.encoding,
- xml_declaration = True)
+ def schema_check(self):
+ """
+ Handle XML RelaxNG schema check.
+ """
- def decode(self, xml):
- """
- Decode XML and set inner content.
- """
- self.content = lxml.etree.fromstring(xml)
+ try:
+ self.schema.assertValid(self.get_content())
+ except lxml.etree.DocumentInvalid:
+ logger.error("PDU failed schema check")
+ for line in self.pretty_print_content().splitlines():
+ logger.warning(line)
+ raise
- def pretty_print_content(self):
- """
- Pretty print XML content of this message.
- """
- return lxml.etree.tostring(self.get_content(),
- pretty_print = True,
- encoding = self.encoding,
- xml_declaration = True)
+ def dump_to_disk(self, prefix):
+ """
+ Write DER of current message to disk, for debugging.
+ """
- def schema_check(self):
- """
- Handle XML RelaxNG schema check.
- """
- try:
- self.schema.assertValid(self.get_content())
- except lxml.etree.DocumentInvalid:
- logger.error("PDU failed schema check")
- for line in self.pretty_print_content().splitlines():
- logger.warning(line)
- raise
-
- def dump_to_disk(self, prefix):
- """
- Write DER of current message to disk, for debugging.
- """
- f = open(prefix + rpki.sundial.now().isoformat() + "Z.cms", "wb")
- f.write(self.get_DER())
- f.close()
+ f = open(prefix + rpki.sundial.now().isoformat() + "Z.cms", "wb")
+ f.write(self.get_DER())
+ f.close()
- def wrap(self, msg, keypair, certs, crls = None):
- """
- Wrap an XML PDU in CMS and return its DER encoding.
- """
- if self.saxify is None:
- self.set_content(msg)
- else:
- self.set_content(msg.toXML())
- if self.check_outbound_schema:
- self.schema_check()
- self.sign(keypair, certs, crls)
- if self.dump_outbound_cms:
- self.dump_outbound_cms.dump(self)
- return self.get_DER()
-
- def unwrap(self, ta):
- """
- Unwrap a CMS-wrapped XML PDU and return Python objects.
- """
- if self.dump_inbound_cms:
- self.dump_inbound_cms.dump(self)
- self.verify(ta)
- if self.check_inbound_schema:
- self.schema_check()
- if self.saxify is None:
- return self.get_content()
- else:
- return self.saxify(self.get_content()) # pylint: disable=E1102
-
- def check_replay(self, timestamp, *context):
- """
- Check CMS signing-time in this object against a recorded
- timestamp. Raises an exception if the recorded timestamp is more
- recent, otherwise returns the new timestamp.
- """
- new_timestamp = self.get_signingTime()
- if timestamp is not None and timestamp > new_timestamp:
- if context:
- context = " (" + " ".join(context) + ")"
- raise rpki.exceptions.CMSReplay(
- "CMS replay: last message %s, this message %s%s" % (
- timestamp, new_timestamp, context))
- return new_timestamp
-
- def check_replay_sql(self, obj, *context):
+ def wrap(self, msg, keypair, certs, crls = None):
+ """
+ Wrap an XML PDU in CMS and return its DER encoding.
+ """
+
+ self.set_content(msg)
+ if self.check_outbound_schema:
+ self.schema_check()
+ self.sign(keypair, certs, crls)
+ if self.dump_outbound_cms:
+ self.dump_outbound_cms.dump(self)
+ return self.get_DER()
+
+ def unwrap(self, ta):
+ """
+ Unwrap a CMS-wrapped XML PDU and return Python objects.
+ """
+
+ if self.dump_inbound_cms:
+ self.dump_inbound_cms.dump(self)
+ self.verify(ta)
+ if self.check_inbound_schema:
+ self.schema_check()
+ return self.get_content()
+
+ def check_replay(self, timestamp, *context):
+ """
+ Check CMS signing-time in this object against a recorded
+ timestamp. Raises an exception if the recorded timestamp is more
+ recent, otherwise returns the new timestamp.
+ """
+
+ new_timestamp = self.get_signingTime()
+ if timestamp is not None and timestamp > new_timestamp:
+ if context:
+ context = " (" + " ".join(context) + ")"
+ raise rpki.exceptions.CMSReplay(
+ "CMS replay: last message %s, this message %s%s" % (
+ timestamp, new_timestamp, context))
+ return new_timestamp
+
+ def check_replay_sql(self, obj, *context):
+ """
+ Like .check_replay() but gets recorded timestamp from
+ "last_cms_timestamp" field of an SQL object and stores the new
+ timestamp back in that same field.
+ """
+
+ obj.last_cms_timestamp = self.check_replay(obj.last_cms_timestamp, *context)
+ obj.save()
+
+class SignedReferral(XML_CMS_object):
+ encoding = "us-ascii"
+ schema = rpki.relaxng.oob_setup
+
+class Ghostbuster(Wrapped_CMS_object):
"""
- Like .check_replay() but gets recorded timestamp from
- "last_cms_timestamp" field of an SQL object and stores the new
- timestamp back in that same field.
+ Class to hold Ghostbusters record (CMS-wrapped VCard). This is
+ quite minimal because we treat the VCard as an opaque byte string
+ managed by the back-end.
"""
- obj.last_cms_timestamp = self.check_replay(obj.last_cms_timestamp, *context)
- obj.sql_mark_dirty()
- ## @var saxify
- # SAX handler hook. Subclasses can set this to a SAX handler, in
- # which case .unwrap() will call it and return the result.
- # Otherwise, .unwrap() just returns a verified element tree.
+ econtent_oid = rpki.oids.id_ct_rpkiGhostbusters
- saxify = None
+ def encode(self):
+ """
+ Encode inner content for signing. At the moment we're treating
+ the VCard as an opaque byte string, so no encoding needed here.
+ """
-class SignedReferral(XML_CMS_object):
- encoding = "us-ascii"
- schema = rpki.relaxng.myrpki
- saxify = None
+ return self.get_content()
-class Ghostbuster(Wrapped_CMS_object):
- """
- Class to hold Ghostbusters record (CMS-wrapped VCard). This is
- quite minimal because we treat the VCard as an opaque byte string
- managed by the back-end.
- """
+ def decode(self, vcard):
+ """
+ Decode XML and set inner content. At the moment we're treating
+ the VCard as an opaque byte string, so no encoding needed here.
+ """
- econtent_oid = rpki.oids.id_ct_rpkiGhostbusters
+ # pylint: disable=W0201
+ self.content = vcard
- def encode(self):
- """
- Encode inner content for signing. At the moment we're treating
- the VCard as an opaque byte string, so no encoding needed here.
- """
- return self.get_content()
+ @classmethod
+ def build(cls, vcard, keypair, certs):
+ """
+ Build a Ghostbuster record.
+ """
+
+ self = cls()
+ self.set_content(vcard)
+ self.sign(keypair, certs)
+ return self
- def decode(self, vcard):
- """
- Decode XML and set inner content. At the moment we're treating
- the VCard as an opaque byte string, so no encoding needed here.
- """
- self.content = vcard
- @classmethod
- def build(cls, vcard, keypair, certs):
+class CRL(DER_object):
"""
- Build a Ghostbuster record.
+ Class to hold a Certificate Revocation List.
"""
- self = cls()
- self.set_content(vcard)
- self.sign(keypair, certs)
- return self
+ POW_class = rpki.POW.CRL
-class CRL(DER_object):
- """
- Class to hold a Certificate Revocation List.
- """
+ def get_DER(self):
+ """
+ Get the DER value of this CRL.
+ """
- POW_class = rpki.POW.CRL
+ self.check()
+ if self.DER:
+ return self.DER
+ if self.POW:
+ self.DER = self.POW.derWrite()
+ return self.get_DER()
+ raise rpki.exceptions.DERObjectConversionError("No conversion path to DER available")
- def get_DER(self):
- """
- Get the DER value of this CRL.
- """
- self.check()
- if self.DER:
- return self.DER
- if self.POW:
- self.DER = self.POW.derWrite()
- return self.get_DER()
- raise rpki.exceptions.DERObjectConversionError("No conversion path to DER available")
-
- def get_POW(self):
- """
- Get the rpki.POW value of this CRL.
- """
- self.check()
- if not self.POW: # pylint: disable=E0203
- self.POW = rpki.POW.CRL.derRead(self.get_DER())
- return self.POW
+ def get_POW(self):
+ """
+ Get the rpki.POW value of this CRL.
+ """
- def getThisUpdate(self):
- """
- Get thisUpdate value from this CRL.
- """
- return self.get_POW().getThisUpdate()
+ # pylint: disable=W0201,E0203
+ self.check()
+ if not self.POW:
+ self.POW = rpki.POW.CRL.derRead(self.get_DER())
+ return self.POW
- def getNextUpdate(self):
- """
- Get nextUpdate value from this CRL.
- """
- return self.get_POW().getNextUpdate()
+ def getThisUpdate(self):
+ """
+ Get thisUpdate value from this CRL.
+ """
- def getIssuer(self):
- """
- Get issuer value of this CRL.
- """
- return X501DN.from_POW(self.get_POW().getIssuer())
+ return self.get_POW().getThisUpdate()
- def getCRLNumber(self):
- """
- Get CRL Number value for this CRL.
- """
- return self.get_POW().getCRLNumber()
+ def getNextUpdate(self):
+ """
+ Get nextUpdate value from this CRL.
+ """
- @classmethod
- def generate(cls, keypair, issuer, serial, thisUpdate, nextUpdate, revokedCertificates, version = 1):
- """
- Generate a new CRL.
- """
- crl = rpki.POW.CRL()
- crl.setVersion(version)
- crl.setIssuer(issuer.getSubject().get_POW())
- crl.setThisUpdate(thisUpdate)
- crl.setNextUpdate(nextUpdate)
- crl.setAKI(issuer.get_SKI())
- crl.setCRLNumber(serial)
- crl.addRevocations(revokedCertificates)
- crl.sign(keypair.get_POW())
- return cls(POW = crl)
-
- @property
- def creation_timestamp(self):
- """
- Time at which this object was created.
- """
- return self.getThisUpdate()
+ return self.get_POW().getNextUpdate()
+
+ def getIssuer(self):
+ """
+ Get issuer value of this CRL.
+ """
+
+ return X501DN.from_POW(self.get_POW().getIssuer())
+
+ def getCRLNumber(self):
+ """
+ Get CRL Number value for this CRL.
+ """
+
+ return self.get_POW().getCRLNumber()
+
+ @classmethod
+ def generate(cls, keypair, issuer, serial, thisUpdate, nextUpdate, revokedCertificates, version = 1):
+ """
+ Generate a new CRL.
+ """
+
+ crl = rpki.POW.CRL()
+ crl.setVersion(version)
+ crl.setIssuer(issuer.getSubject().get_POW())
+ crl.setThisUpdate(thisUpdate)
+ crl.setNextUpdate(nextUpdate)
+ crl.setAKI(issuer.get_SKI())
+ crl.setCRLNumber(serial)
+ crl.addRevocations(revokedCertificates)
+ crl.sign(keypair.get_POW())
+ return cls(POW = crl)
+
+ @property
+ def creation_timestamp(self):
+ """
+ Time at which this object was created.
+ """
+
+ return self.getThisUpdate()
## @var uri_dispatch_map
# Map of known URI filename extensions and corresponding classes.
uri_dispatch_map = {
- ".cer" : X509,
- ".crl" : CRL,
- ".gbr" : Ghostbuster,
- ".mft" : SignedManifest,
- ".mnf" : SignedManifest,
- ".roa" : ROA,
- }
+ ".cer" : X509,
+ ".crl" : CRL,
+ ".gbr" : Ghostbuster,
+ ".mft" : SignedManifest,
+ ".mnf" : SignedManifest,
+ ".roa" : ROA }
def uri_dispatch(uri):
- """
- Return the Python class object corresponding to a given URI.
- """
- return uri_dispatch_map[os.path.splitext(uri)[1]]
+ """
+ Return the Python class object corresponding to a given URI.
+ """
+
+ return uri_dispatch_map[os.path.splitext(uri)[1]]
diff --git a/rpki/xml_utils.py b/rpki/xml_utils.py
deleted file mode 100644
index c276ce98..00000000
--- a/rpki/xml_utils.py
+++ /dev/null
@@ -1,494 +0,0 @@
-# $Id$
-#
-# Copyright (C) 2009-2012 Internet Systems Consortium ("ISC")
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-#
-# Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-"""
-XML utilities.
-"""
-
-import xml.sax
-import lxml.sax
-import lxml.etree
-import rpki.exceptions
-
-class sax_handler(xml.sax.handler.ContentHandler):
- """
- SAX handler for RPKI protocols.
-
- This class provides some basic amenities for parsing protocol XML of
- the kind we use in the RPKI protocols, including whacking all the
- protocol element text into US-ASCII, simplifying accumulation of
- text fields, and hiding some of the fun relating to XML namespaces.
-
- General assumption: by the time this parsing code gets invoked, the
- XML has already passed RelaxNG validation, so we only have to check
- for errors that the schema can't catch, and we don't have to play as
- many XML namespace games.
- """
-
- def __init__(self):
- """
- Initialize SAX handler.
- """
- xml.sax.handler.ContentHandler.__init__(self)
- self.text = ""
- self.stack = []
-
- def startElementNS(self, name, qname, attrs):
- """
- Redirect startElementNS() events to startElement().
- """
- return self.startElement(name[1], attrs)
-
- def endElementNS(self, name, qname):
- """
- Redirect endElementNS() events to endElement().
- """
- return self.endElement(name[1])
-
- def characters(self, content):
- """
- Accumulate a chuck of element content (text).
- """
- self.text += content
-
- def startElement(self, name, attrs):
- """
- Handle startElement() events.
-
- We maintain a stack of nested elements under construction so that
- we can feed events directly to the current element rather than
- having to pass them through all the nesting elements.
-
- If the stack is empty, this event is for the outermost element, so
- we call a virtual method to create the corresponding object and
- that's the object we'll be returning as our final result.
- """
-
- a = dict()
- for k, v in attrs.items():
- if isinstance(k, tuple):
- if k == ("http://www.w3.org/XML/1998/namespace", "lang"):
- k = "xml:lang"
- else:
- assert k[0] is None
- k = k[1]
- a[k.encode("ascii")] = v.encode("ascii")
- if len(self.stack) == 0:
- assert not hasattr(self, "result")
- self.result = self.create_top_level(name, a)
- self.stack.append(self.result)
- self.stack[-1].startElement(self.stack, name, a)
-
- def endElement(self, name):
- """
- Handle endElement() events. Mostly this means handling any
- accumulated element text.
- """
- text = self.text.encode("ascii").strip()
- self.text = ""
- self.stack[-1].endElement(self.stack, name, text)
-
- @classmethod
- def saxify(cls, elt):
- """
- Create a one-off SAX parser, parse an ETree, return the result.
- """
- self = cls()
- lxml.sax.saxify(elt, self)
- return self.result
-
- def create_top_level(self, name, attrs):
- """
- Handle top-level PDU for this protocol.
- """
- assert name == self.name and attrs["version"] == self.version
- return self.pdu()
-
-class base_elt(object):
- """
- Virtual base class for XML message elements. The left-right and
- publication protocols use this. At least for now, the up-down
- protocol does not, due to different design assumptions.
- """
-
- ## @var attributes
- # XML attributes for this element.
- attributes = ()
-
- ## @var elements
- # XML elements contained by this element.
- elements = ()
-
- ## @var booleans
- # Boolean attributes (value "yes" or "no") for this element.
- booleans = ()
-
- def startElement(self, stack, name, attrs):
- """
- Default startElement() handler: just process attributes.
- """
- if name not in self.elements:
- assert name == self.element_name, "Unexpected name %s, stack %s" % (name, stack)
- self.read_attrs(attrs)
-
- def endElement(self, stack, name, text):
- """
- Default endElement() handler: just pop the stack.
- """
- assert name == self.element_name, "Unexpected name %s, stack %s" % (name, stack)
- stack.pop()
-
- def toXML(self):
- """
- Default toXML() element generator.
- """
- return self.make_elt()
-
- def read_attrs(self, attrs):
- """
- Template-driven attribute reader.
- """
- for key in self.attributes:
- val = attrs.get(key, None)
- if isinstance(val, str) and val.isdigit() and not key.endswith("_handle"):
- val = long(val)
- setattr(self, key, val)
- for key in self.booleans:
- setattr(self, key, attrs.get(key, False))
-
- def make_elt(self):
- """
- XML element constructor.
- """
- elt = lxml.etree.Element(self.xmlns + self.element_name, nsmap = self.nsmap)
- for key in self.attributes:
- val = getattr(self, key, None)
- if val is not None:
- elt.set(key, str(val))
- for key in self.booleans:
- if getattr(self, key, False):
- elt.set(key, "yes")
- return elt
-
- def make_b64elt(self, elt, name, value):
- """
- Constructor for Base64-encoded subelement.
- """
- if value is not None and not value.empty():
- lxml.etree.SubElement(elt, self.xmlns + name, nsmap = self.nsmap).text = value.get_Base64()
-
- def __str__(self):
- """
- Convert a base_elt object to string format.
- """
- return lxml.etree.tostring(self.toXML(), pretty_print = True, encoding = "us-ascii")
-
- @classmethod
- def make_pdu(cls, **kargs):
- """
- Generic PDU constructor.
- """
- self = cls()
- for k, v in kargs.items():
- if isinstance(v, bool):
- v = 1 if v else 0
- setattr(self, k, v)
- return self
-
-class text_elt(base_elt):
- """
- Virtual base class for XML message elements that contain text.
- """
-
- ## @var text_attribute
- # Name of the class attribute that holds the text value.
- text_attribute = None
-
- def endElement(self, stack, name, text):
- """
- Extract text from parsed XML.
- """
- base_elt.endElement(self, stack, name, text)
- setattr(self, self.text_attribute, text)
-
- def toXML(self):
- """
- Insert text into generated XML.
- """
- elt = self.make_elt()
- elt.text = getattr(self, self.text_attribute) or None
- return elt
-
-class data_elt(base_elt):
- """
- Virtual base class for PDUs that map to SQL objects. These objects
- all implement the create/set/get/list/destroy action attribute.
- """
-
- def endElement(self, stack, name, text):
- """
- Default endElement handler for SQL-based objects. This assumes
- that sub-elements are Base64-encoded using the sql_template
- mechanism.
- """
- if name in self.elements:
- elt_type = self.sql_template.map.get(name)
- assert elt_type is not None, "Couldn't find element type for %s, stack %s" % (name, stack)
- setattr(self, name, elt_type(Base64 = text))
- else:
- assert name == self.element_name, "Unexpected name %s, stack %s" % (name, stack)
- stack.pop()
-
- def toXML(self):
- """
- Default element generator for SQL-based objects. This assumes
- that sub-elements are Base64-encoded DER objects.
- """
- elt = self.make_elt()
- for i in self.elements:
- self.make_b64elt(elt, i, getattr(self, i, None))
- return elt
-
- def make_reply(self, r_pdu = None):
- """
- Construct a reply PDU.
- """
- if r_pdu is None:
- r_pdu = self.__class__()
- self.make_reply_clone_hook(r_pdu)
- handle_name = self.element_name + "_handle"
- setattr(r_pdu, handle_name, getattr(self, handle_name, None))
- else:
- self.make_reply_clone_hook(r_pdu)
- for b in r_pdu.booleans:
- setattr(r_pdu, b, False)
- r_pdu.action = self.action
- r_pdu.tag = self.tag
- return r_pdu
-
- def make_reply_clone_hook(self, r_pdu):
- """
- Overridable hook.
- """
- pass
-
- def serve_fetch_one(self):
- """
- Find the object on which a get, set, or destroy method should
- operate.
- """
- r = self.serve_fetch_one_maybe()
- if r is None:
- raise rpki.exceptions.NotFound
- return r
-
- def serve_pre_save_hook(self, q_pdu, r_pdu, cb, eb):
- """
- Overridable hook.
- """
- cb()
-
- def serve_post_save_hook(self, q_pdu, r_pdu, cb, eb):
- """
- Overridable hook.
- """
- cb()
-
- def serve_create(self, r_msg, cb, eb):
- """
- Handle a create action.
- """
-
- r_pdu = self.make_reply()
-
- def one():
- self.sql_store()
- setattr(r_pdu, self.sql_template.index, getattr(self, self.sql_template.index))
- self.serve_post_save_hook(self, r_pdu, two, eb)
-
- def two():
- r_msg.append(r_pdu)
- cb()
-
- oops = self.serve_fetch_one_maybe()
- if oops is not None:
- raise rpki.exceptions.DuplicateObject("Object already exists: %r[%r] %r[%r]" % (self, getattr(self, self.element_name + "_handle"),
- oops, getattr(oops, oops.element_name + "_handle")))
-
- self.serve_pre_save_hook(self, r_pdu, one, eb)
-
- def serve_set(self, r_msg, cb, eb):
- """
- Handle a set action.
- """
-
- db_pdu = self.serve_fetch_one()
- r_pdu = self.make_reply()
- for a in db_pdu.sql_template.columns[1:]:
- v = getattr(self, a, None)
- if v is not None:
- setattr(db_pdu, a, v)
- db_pdu.sql_mark_dirty()
-
- def one():
- db_pdu.sql_store()
- db_pdu.serve_post_save_hook(self, r_pdu, two, eb)
-
- def two():
- r_msg.append(r_pdu)
- cb()
-
- db_pdu.serve_pre_save_hook(self, r_pdu, one, eb)
-
- def serve_get(self, r_msg, cb, eb):
- """
- Handle a get action.
- """
- r_pdu = self.serve_fetch_one()
- self.make_reply(r_pdu)
- r_msg.append(r_pdu)
- cb()
-
- def serve_list(self, r_msg, cb, eb):
- """
- Handle a list action for non-self objects.
- """
- for r_pdu in self.serve_fetch_all():
- self.make_reply(r_pdu)
- r_msg.append(r_pdu)
- cb()
-
- def serve_destroy_hook(self, cb, eb):
- """
- Overridable hook.
- """
- cb()
-
- def serve_destroy(self, r_msg, cb, eb):
- """
- Handle a destroy action.
- """
- def done():
- db_pdu.sql_delete()
- r_msg.append(self.make_reply())
- cb()
- db_pdu = self.serve_fetch_one()
- db_pdu.serve_destroy_hook(done, eb)
-
- def serve_dispatch(self, r_msg, cb, eb):
- """
- Action dispatch handler.
- """
- dispatch = { "create" : self.serve_create,
- "set" : self.serve_set,
- "get" : self.serve_get,
- "list" : self.serve_list,
- "destroy" : self.serve_destroy }
- if self.action not in dispatch:
- raise rpki.exceptions.BadQuery("Unexpected query: action %s" % self.action)
- dispatch[self.action](r_msg, cb, eb)
-
- def unimplemented_control(self, *controls):
- """
- Uniform handling for unimplemented control operations.
- """
- unimplemented = [x for x in controls if getattr(self, x, False)]
- if unimplemented:
- raise rpki.exceptions.NotImplementedYet("Unimplemented control %s" % ", ".join(unimplemented))
-
-class msg(list):
- """
- Generic top-level PDU.
- """
-
- def startElement(self, stack, name, attrs):
- """
- Handle top-level PDU.
- """
- if name == "msg":
- assert self.version == int(attrs["version"])
- self.type = attrs["type"]
- else:
- elt = self.pdus[name]()
- self.append(elt)
- stack.append(elt)
- elt.startElement(stack, name, attrs)
-
- def endElement(self, stack, name, text):
- """
- Handle top-level PDU.
- """
- assert name == "msg", "Unexpected name %s, stack %s" % (name, stack)
- assert len(stack) == 1
- stack.pop()
-
- def __str__(self):
- """
- Convert msg object to string.
- """
- return lxml.etree.tostring(self.toXML(), pretty_print = True, encoding = "us-ascii")
-
- def toXML(self):
- """
- Generate top-level PDU.
- """
- elt = lxml.etree.Element(self.xmlns + "msg", nsmap = self.nsmap, version = str(self.version), type = self.type)
- elt.extend([i.toXML() for i in self])
- return elt
-
- @classmethod
- def query(cls, *args):
- """
- Create a query PDU.
- """
- self = cls(args)
- self.type = "query"
- return self
-
- @classmethod
- def reply(cls, *args):
- """
- Create a reply PDU.
- """
- self = cls(args)
- self.type = "reply"
- return self
-
- def is_query(self):
- """
- Is this msg a query?
- """
- return self.type == "query"
-
- def is_reply(self):
- """
- Is this msg a reply?
- """
- return self.type == "reply"
diff --git a/schemas/relaxng/left-right-schema.rnc b/schemas/relaxng/left-right.rnc
index 201f8ff0..c2592c0f 100644
--- a/schemas/relaxng/left-right-schema.rnc
+++ b/schemas/relaxng/left-right.rnc
@@ -32,7 +32,7 @@ start = element msg {
}
# PDUs allowed in a query
-query_elt |= self_query
+query_elt |= tenant_query
query_elt |= bsc_query
query_elt |= parent_query
query_elt |= child_query
@@ -45,7 +45,7 @@ query_elt |= list_published_objects_query
query_elt |= list_received_resources_query
# PDUs allowed in a reply
-reply_elt |= self_reply
+reply_elt |= tenant_reply
reply_elt |= bsc_reply
reply_elt |= parent_reply
reply_elt |= child_reply
@@ -92,34 +92,34 @@ asn_list = xsd:string { maxLength="512000" pattern="[\-,0-9]*" }
ipv4_list = xsd:string { maxLength="512000" pattern="[\-,0-9/.]*" }
ipv6_list = xsd:string { maxLength="512000" pattern="[\-,0-9/:a-fA-F]*" }
-# <self/> element
-
-self_bool = (attribute rekey { "yes" }?,
- attribute reissue { "yes" }?,
- attribute revoke { "yes" }?,
- attribute run_now { "yes" }?,
- attribute publish_world_now { "yes" }?,
- attribute revoke_forgotten { "yes" }?,
- attribute clear_replay_protection { "yes" }?)
-
-self_payload = (attribute use_hsm { "yes" | "no" }?,
- attribute crl_interval { xsd:positiveInteger }?,
- attribute regen_margin { xsd:positiveInteger }?,
- element bpki_cert { base64 }?,
- element bpki_glue { base64 }?)
-
-self_handle = attribute self_handle { object_handle }
-
-self_query |= element self { ctl_create, self_handle, self_bool, self_payload }
-self_reply |= element self { ctl_create, self_handle }
-self_query |= element self { ctl_set, self_handle, self_bool, self_payload }
-self_reply |= element self { ctl_set, self_handle }
-self_query |= element self { ctl_get, self_handle }
-self_reply |= element self { ctl_get, self_handle, self_payload }
-self_query |= element self { ctl_list }
-self_reply |= element self { ctl_list, self_handle, self_payload }
-self_query |= element self { ctl_destroy, self_handle }
-self_reply |= element self { ctl_destroy, self_handle }
+# <tenant/> element
+
+tenant_bool = (attribute rekey { "yes" }?,
+ attribute reissue { "yes" }?,
+ attribute revoke { "yes" }?,
+ attribute run_now { "yes" }?,
+ attribute publish_world_now { "yes" }?,
+ attribute revoke_forgotten { "yes" }?,
+ attribute clear_replay_protection { "yes" }?)
+
+tenant_payload = (attribute use_hsm { "yes" | "no" }?,
+ attribute crl_interval { xsd:positiveInteger }?,
+ attribute regen_margin { xsd:positiveInteger }?,
+ element bpki_cert { base64 }?,
+ element bpki_glue { base64 }?)
+
+tenant_handle = attribute tenant_handle { object_handle }
+
+tenant_query |= element tenant { ctl_create, tenant_handle, tenant_bool, tenant_payload }
+tenant_reply |= element tenant { ctl_create, tenant_handle }
+tenant_query |= element tenant { ctl_set, tenant_handle, tenant_bool, tenant_payload }
+tenant_reply |= element tenant { ctl_set, tenant_handle }
+tenant_query |= element tenant { ctl_get, tenant_handle }
+tenant_reply |= element tenant { ctl_get, tenant_handle, tenant_payload }
+tenant_query |= element tenant { ctl_list }
+tenant_reply |= element tenant { ctl_list, tenant_handle, tenant_payload }
+tenant_query |= element tenant { ctl_destroy, tenant_handle }
+tenant_reply |= element tenant { ctl_destroy, tenant_handle }
# <bsc/> element. Key parameters hardwired for now.
@@ -135,16 +135,16 @@ bsc_payload = (element signing_cert { base64 }?,
bsc_readonly = element pkcs10_request { base64 }?
-bsc_query |= element bsc { ctl_create, self_handle, bsc_handle, bsc_bool, bsc_payload }
-bsc_reply |= element bsc { ctl_create, self_handle, bsc_handle, bsc_readonly }
-bsc_query |= element bsc { ctl_set, self_handle, bsc_handle, bsc_bool, bsc_payload }
-bsc_reply |= element bsc { ctl_set, self_handle, bsc_handle, bsc_readonly }
-bsc_query |= element bsc { ctl_get, self_handle, bsc_handle }
-bsc_reply |= element bsc { ctl_get, self_handle, bsc_handle, bsc_payload, bsc_readonly }
-bsc_query |= element bsc { ctl_list, self_handle }
-bsc_reply |= element bsc { ctl_list, self_handle, bsc_handle, bsc_payload, bsc_readonly }
-bsc_query |= element bsc { ctl_destroy, self_handle, bsc_handle }
-bsc_reply |= element bsc { ctl_destroy, self_handle, bsc_handle }
+bsc_query |= element bsc { ctl_create, tenant_handle, bsc_handle, bsc_bool, bsc_payload }
+bsc_reply |= element bsc { ctl_create, tenant_handle, bsc_handle, bsc_readonly }
+bsc_query |= element bsc { ctl_set, tenant_handle, bsc_handle, bsc_bool, bsc_payload }
+bsc_reply |= element bsc { ctl_set, tenant_handle, bsc_handle, bsc_readonly }
+bsc_query |= element bsc { ctl_get, tenant_handle, bsc_handle }
+bsc_reply |= element bsc { ctl_get, tenant_handle, bsc_handle, bsc_payload, bsc_readonly }
+bsc_query |= element bsc { ctl_list, tenant_handle }
+bsc_reply |= element bsc { ctl_list, tenant_handle, bsc_handle, bsc_payload, bsc_readonly }
+bsc_query |= element bsc { ctl_destroy, tenant_handle, bsc_handle }
+bsc_reply |= element bsc { ctl_destroy, tenant_handle, bsc_handle }
# <parent/> element
@@ -154,7 +154,7 @@ parent_bool = (attribute rekey { "yes" }?,
attribute reissue { "yes" }?,
attribute revoke { "yes" }?,
attribute revoke_forgotten { "yes" }?,
- attribute clear_replay_protection { "yes" }?)
+ attribute clear_replay_protection { "yes" }?)
parent_payload = (attribute peer_contact_uri { uri }?,
attribute sia_base { uri }?,
@@ -162,19 +162,24 @@ parent_payload = (attribute peer_contact_uri { uri }?,
repository_handle?,
attribute sender_name { up_down_name }?,
attribute recipient_name { up_down_name }?,
- element bpki_cms_cert { base64 }?,
- element bpki_cms_glue { base64 }?)
-
-parent_query |= element parent { ctl_create, self_handle, parent_handle, parent_bool, parent_payload }
-parent_reply |= element parent { ctl_create, self_handle, parent_handle }
-parent_query |= element parent { ctl_set, self_handle, parent_handle, parent_bool, parent_payload }
-parent_reply |= element parent { ctl_set, self_handle, parent_handle }
-parent_query |= element parent { ctl_get, self_handle, parent_handle }
-parent_reply |= element parent { ctl_get, self_handle, parent_handle, parent_payload }
-parent_query |= element parent { ctl_list, self_handle }
-parent_reply |= element parent { ctl_list, self_handle, parent_handle, parent_payload }
-parent_query |= element parent { ctl_destroy, self_handle, parent_handle }
-parent_reply |= element parent { ctl_destroy, self_handle, parent_handle }
+ attribute root_asn_resources { asn_list }?,
+ attribute root_ipv4_resources { ipv4_list }?,
+ attribute root_ipv6_resources { ipv6_list }?,
+ element bpki_cert { base64 }?,
+ element bpki_glue { base64 }?)
+
+parent_readonly = element rpki_root_cert { base64 }?
+
+parent_query |= element parent { ctl_create, tenant_handle, parent_handle, parent_bool, parent_payload }
+parent_reply |= element parent { ctl_create, tenant_handle, parent_handle, parent_readonly }
+parent_query |= element parent { ctl_set, tenant_handle, parent_handle, parent_bool, parent_payload }
+parent_reply |= element parent { ctl_set, tenant_handle, parent_handle, parent_readonly }
+parent_query |= element parent { ctl_get, tenant_handle, parent_handle }
+parent_reply |= element parent { ctl_get, tenant_handle, parent_handle, parent_payload, parent_readonly }
+parent_query |= element parent { ctl_list, tenant_handle }
+parent_reply |= element parent { ctl_list, tenant_handle, parent_handle, parent_payload, parent_readonly }
+parent_query |= element parent { ctl_destroy, tenant_handle, parent_handle }
+parent_reply |= element parent { ctl_destroy, tenant_handle, parent_handle }
# <child/> element
@@ -187,16 +192,16 @@ child_payload = (bsc_handle?,
element bpki_cert { base64 }?,
element bpki_glue { base64 }?)
-child_query |= element child { ctl_create, self_handle, child_handle, child_bool, child_payload }
-child_reply |= element child { ctl_create, self_handle, child_handle }
-child_query |= element child { ctl_set, self_handle, child_handle, child_bool, child_payload }
-child_reply |= element child { ctl_set, self_handle, child_handle }
-child_query |= element child { ctl_get, self_handle, child_handle }
-child_reply |= element child { ctl_get, self_handle, child_handle, child_payload }
-child_query |= element child { ctl_list, self_handle }
-child_reply |= element child { ctl_list, self_handle, child_handle, child_payload }
-child_query |= element child { ctl_destroy, self_handle, child_handle }
-child_reply |= element child { ctl_destroy, self_handle, child_handle }
+child_query |= element child { ctl_create, tenant_handle, child_handle, child_bool, child_payload }
+child_reply |= element child { ctl_create, tenant_handle, child_handle }
+child_query |= element child { ctl_set, tenant_handle, child_handle, child_bool, child_payload }
+child_reply |= element child { ctl_set, tenant_handle, child_handle }
+child_query |= element child { ctl_get, tenant_handle, child_handle }
+child_reply |= element child { ctl_get, tenant_handle, child_handle, child_payload }
+child_query |= element child { ctl_list, tenant_handle }
+child_reply |= element child { ctl_list, tenant_handle, child_handle, child_payload }
+child_query |= element child { ctl_destroy, tenant_handle, child_handle }
+child_reply |= element child { ctl_destroy, tenant_handle, child_handle }
# <repository/> element
@@ -206,28 +211,29 @@ repository_bool = attribute clear_replay_protection { "yes" }?
repository_payload = (attribute peer_contact_uri { uri }?,
bsc_handle?,
+ attribute rrdp_notification_uri { uri }?,
element bpki_cert { base64 }?,
element bpki_glue { base64 }?)
-repository_query |= element repository { ctl_create, self_handle, repository_handle, repository_bool, repository_payload }
-repository_reply |= element repository { ctl_create, self_handle, repository_handle }
-repository_query |= element repository { ctl_set, self_handle, repository_handle, repository_bool, repository_payload }
-repository_reply |= element repository { ctl_set, self_handle, repository_handle }
-repository_query |= element repository { ctl_get, self_handle, repository_handle }
-repository_reply |= element repository { ctl_get, self_handle, repository_handle, repository_payload }
-repository_query |= element repository { ctl_list, self_handle }
-repository_reply |= element repository { ctl_list, self_handle, repository_handle, repository_payload }
-repository_query |= element repository { ctl_destroy, self_handle, repository_handle }
-repository_reply |= element repository { ctl_destroy, self_handle, repository_handle }
+repository_query |= element repository { ctl_create, tenant_handle, repository_handle, repository_bool, repository_payload }
+repository_reply |= element repository { ctl_create, tenant_handle, repository_handle }
+repository_query |= element repository { ctl_set, tenant_handle, repository_handle, repository_bool, repository_payload }
+repository_reply |= element repository { ctl_set, tenant_handle, repository_handle }
+repository_query |= element repository { ctl_get, tenant_handle, repository_handle }
+repository_reply |= element repository { ctl_get, tenant_handle, repository_handle, repository_payload }
+repository_query |= element repository { ctl_list, tenant_handle }
+repository_reply |= element repository { ctl_list, tenant_handle, repository_handle, repository_payload }
+repository_query |= element repository { ctl_destroy, tenant_handle, repository_handle }
+repository_reply |= element repository { ctl_destroy, tenant_handle, repository_handle }
# <list_resources/> element
list_resources_query = element list_resources {
- tag, self_handle, child_handle
+ tag, tenant_handle, child_handle
}
list_resources_reply = element list_resources {
- tag, self_handle, child_handle,
+ tag, tenant_handle, child_handle,
attribute valid_until { xsd:dateTime { pattern=".*Z" } },
attribute asn { asn_list }?,
attribute ipv4 { ipv4_list }?,
@@ -237,11 +243,11 @@ list_resources_reply = element list_resources {
# <list_roa_requests/> element
list_roa_requests_query = element list_roa_requests {
- tag, self_handle
+ tag, tenant_handle
}
list_roa_requests_reply = element list_roa_requests {
- tag, self_handle,
+ tag, tenant_handle,
attribute asn { xsd:nonNegativeInteger },
attribute ipv4 { ipv4_list }?,
attribute ipv6 { ipv6_list }?
@@ -250,28 +256,28 @@ list_roa_requests_reply = element list_roa_requests {
# <list_ghostbuster_requests/> element
list_ghostbuster_requests_query = element list_ghostbuster_requests {
- tag, self_handle, parent_handle
+ tag, tenant_handle, parent_handle
}
list_ghostbuster_requests_reply = element list_ghostbuster_requests {
- tag, self_handle, parent_handle,
+ tag, tenant_handle, parent_handle,
xsd:string
}
# <list_ee_certificate_requests/> element
list_ee_certificate_requests_query = element list_ee_certificate_requests {
- tag, self_handle
+ tag, tenant_handle
}
list_ee_certificate_requests_reply = element list_ee_certificate_requests {
- tag, self_handle,
+ tag, tenant_handle,
attribute gski { xsd:token { minLength="27" maxLength="27" } },
attribute valid_until { xsd:dateTime { pattern=".*Z" } },
attribute asn { asn_list }?,
attribute ipv4 { ipv4_list }?,
attribute ipv6 { ipv6_list }?,
- attribute cn { xsd:string { maxLength="64" pattern="[\-0-9A-Za-z_ ]+" } }?,
+ attribute cn { xsd:string { maxLength="64" pattern="[\-0-9A-Za-z_ ]+" } },
attribute sn { xsd:string { maxLength="64" pattern="[0-9A-Fa-f]+" } }?,
attribute eku { xsd:string { maxLength="512000" pattern="[.,0-9]+" } }?,
element pkcs10 { base64 }
@@ -280,11 +286,11 @@ list_ee_certificate_requests_reply = element list_ee_certificate_requests {
# <list_published_objects/> element
list_published_objects_query = element list_published_objects {
- tag, self_handle
+ tag, tenant_handle
}
list_published_objects_reply = element list_published_objects {
- tag, self_handle,
+ tag, tenant_handle,
attribute uri { uri },
attribute child_handle { object_handle }?,
base64
@@ -293,11 +299,11 @@ list_published_objects_reply = element list_published_objects {
# <list_received_resources/> element
list_received_resources_query = element list_received_resources {
- tag, self_handle
+ tag, tenant_handle
}
list_received_resources_reply = element list_received_resources {
- tag, self_handle, parent_handle,
+ tag, tenant_handle, parent_handle,
attribute notBefore { xsd:dateTime { pattern=".*Z" } },
attribute notAfter { xsd:dateTime { pattern=".*Z" } },
attribute uri { uri },
@@ -313,7 +319,7 @@ list_received_resources_reply = element list_received_resources {
error = xsd:token { maxLength="1024" }
report_error_reply = element report_error {
- tag, self_handle?,
+ tag, tenant_handle?,
attribute error_code { error },
xsd:string { maxLength="512000" }?
}
diff --git a/schemas/relaxng/left-right-schema.rng b/schemas/relaxng/left-right.rng
index c5596a2f..cd8c1896 100644
--- a/schemas/relaxng/left-right-schema.rng
+++ b/schemas/relaxng/left-right.rng
@@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
- $Id: left-right-schema.rnc 5902 2014-07-18 16:37:04Z sra $
+ $Id$
RelaxNG schema for RPKI left-right protocol.
@@ -55,7 +55,7 @@
</start>
<!-- PDUs allowed in a query -->
<define name="query_elt" combine="choice">
- <ref name="self_query"/>
+ <ref name="tenant_query"/>
</define>
<define name="query_elt" combine="choice">
<ref name="bsc_query"/>
@@ -89,7 +89,7 @@
</define>
<!-- PDUs allowed in a reply -->
<define name="reply_elt" combine="choice">
- <ref name="self_reply"/>
+ <ref name="tenant_reply"/>
</define>
<define name="reply_elt" combine="choice">
<ref name="bsc_reply"/>
@@ -221,8 +221,8 @@
<param name="pattern">[\-,0-9/:a-fA-F]*</param>
</data>
</define>
- <!-- <self/> element -->
- <define name="self_bool">
+ <!-- <tenant/> element -->
+ <define name="tenant_bool">
<optional>
<attribute name="rekey">
<value>yes</value>
@@ -259,7 +259,7 @@
</attribute>
</optional>
</define>
- <define name="self_payload">
+ <define name="tenant_payload">
<optional>
<attribute name="use_hsm">
<choice>
@@ -289,74 +289,74 @@
</element>
</optional>
</define>
- <define name="self_handle">
- <attribute name="self_handle">
+ <define name="tenant_handle">
+ <attribute name="tenant_handle">
<ref name="object_handle"/>
</attribute>
</define>
- <define name="self_query" combine="choice">
- <element name="self">
+ <define name="tenant_query" combine="choice">
+ <element name="tenant">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
- <ref name="self_bool"/>
- <ref name="self_payload"/>
+ <ref name="tenant_handle"/>
+ <ref name="tenant_bool"/>
+ <ref name="tenant_payload"/>
</element>
</define>
- <define name="self_reply" combine="choice">
- <element name="self">
+ <define name="tenant_reply" combine="choice">
+ <element name="tenant">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
- <define name="self_query" combine="choice">
- <element name="self">
+ <define name="tenant_query" combine="choice">
+ <element name="tenant">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
- <ref name="self_bool"/>
- <ref name="self_payload"/>
+ <ref name="tenant_handle"/>
+ <ref name="tenant_bool"/>
+ <ref name="tenant_payload"/>
</element>
</define>
- <define name="self_reply" combine="choice">
- <element name="self">
+ <define name="tenant_reply" combine="choice">
+ <element name="tenant">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
- <define name="self_query" combine="choice">
- <element name="self">
+ <define name="tenant_query" combine="choice">
+ <element name="tenant">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
- <define name="self_reply" combine="choice">
- <element name="self">
+ <define name="tenant_reply" combine="choice">
+ <element name="tenant">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
- <ref name="self_payload"/>
+ <ref name="tenant_handle"/>
+ <ref name="tenant_payload"/>
</element>
</define>
- <define name="self_query" combine="choice">
- <element name="self">
+ <define name="tenant_query" combine="choice">
+ <element name="tenant">
<ref name="ctl_list"/>
</element>
</define>
- <define name="self_reply" combine="choice">
- <element name="self">
+ <define name="tenant_reply" combine="choice">
+ <element name="tenant">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
- <ref name="self_payload"/>
+ <ref name="tenant_handle"/>
+ <ref name="tenant_payload"/>
</element>
</define>
- <define name="self_query" combine="choice">
- <element name="self">
+ <define name="tenant_query" combine="choice">
+ <element name="tenant">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
- <define name="self_reply" combine="choice">
- <element name="self">
+ <define name="tenant_reply" combine="choice">
+ <element name="tenant">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<!-- <bsc/> element. Key parameters hardwired for now. -->
@@ -409,7 +409,7 @@
<define name="bsc_query" combine="choice">
<element name="bsc">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
<ref name="bsc_bool"/>
<ref name="bsc_payload"/>
@@ -418,7 +418,7 @@
<define name="bsc_reply" combine="choice">
<element name="bsc">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
<ref name="bsc_readonly"/>
</element>
@@ -426,7 +426,7 @@
<define name="bsc_query" combine="choice">
<element name="bsc">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
<ref name="bsc_bool"/>
<ref name="bsc_payload"/>
@@ -435,7 +435,7 @@
<define name="bsc_reply" combine="choice">
<element name="bsc">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
<ref name="bsc_readonly"/>
</element>
@@ -443,14 +443,14 @@
<define name="bsc_query" combine="choice">
<element name="bsc">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
</element>
</define>
<define name="bsc_reply" combine="choice">
<element name="bsc">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
<ref name="bsc_payload"/>
<ref name="bsc_readonly"/>
@@ -459,13 +459,13 @@
<define name="bsc_query" combine="choice">
<element name="bsc">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<define name="bsc_reply" combine="choice">
<element name="bsc">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
<ref name="bsc_payload"/>
<ref name="bsc_readonly"/>
@@ -474,14 +474,14 @@
<define name="bsc_query" combine="choice">
<element name="bsc">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
</element>
</define>
<define name="bsc_reply" combine="choice">
<element name="bsc">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="bsc_handle"/>
</element>
</define>
@@ -546,12 +546,34 @@
</attribute>
</optional>
<optional>
- <element name="bpki_cms_cert">
+ <attribute name="root_asn_resources">
+ <ref name="asn_list"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="root_ipv4_resources">
+ <ref name="ipv4_list"/>
+ </attribute>
+ </optional>
+ <optional>
+ <attribute name="root_ipv6_resources">
+ <ref name="ipv6_list"/>
+ </attribute>
+ </optional>
+ <optional>
+ <element name="bpki_cert">
<ref name="base64"/>
</element>
</optional>
<optional>
- <element name="bpki_cms_glue">
+ <element name="bpki_glue">
+ <ref name="base64"/>
+ </element>
+ </optional>
+ </define>
+ <define name="parent_readonly">
+ <optional>
+ <element name="rpki_root_cert">
<ref name="base64"/>
</element>
</optional>
@@ -559,7 +581,7 @@
<define name="parent_query" combine="choice">
<element name="parent">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
<ref name="parent_bool"/>
<ref name="parent_payload"/>
@@ -568,14 +590,15 @@
<define name="parent_reply" combine="choice">
<element name="parent">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
+ <ref name="parent_readonly"/>
</element>
</define>
<define name="parent_query" combine="choice">
<element name="parent">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
<ref name="parent_bool"/>
<ref name="parent_payload"/>
@@ -584,50 +607,53 @@
<define name="parent_reply" combine="choice">
<element name="parent">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
+ <ref name="parent_readonly"/>
</element>
</define>
<define name="parent_query" combine="choice">
<element name="parent">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
</element>
</define>
<define name="parent_reply" combine="choice">
<element name="parent">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
<ref name="parent_payload"/>
+ <ref name="parent_readonly"/>
</element>
</define>
<define name="parent_query" combine="choice">
<element name="parent">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<define name="parent_reply" combine="choice">
<element name="parent">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
<ref name="parent_payload"/>
+ <ref name="parent_readonly"/>
</element>
</define>
<define name="parent_query" combine="choice">
<element name="parent">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
</element>
</define>
<define name="parent_reply" combine="choice">
<element name="parent">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
</element>
</define>
@@ -667,7 +693,7 @@
<define name="child_query" combine="choice">
<element name="child">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
<ref name="child_bool"/>
<ref name="child_payload"/>
@@ -676,14 +702,14 @@
<define name="child_reply" combine="choice">
<element name="child">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
</element>
</define>
<define name="child_query" combine="choice">
<element name="child">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
<ref name="child_bool"/>
<ref name="child_payload"/>
@@ -692,21 +718,21 @@
<define name="child_reply" combine="choice">
<element name="child">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
</element>
</define>
<define name="child_query" combine="choice">
<element name="child">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
</element>
</define>
<define name="child_reply" combine="choice">
<element name="child">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
<ref name="child_payload"/>
</element>
@@ -714,13 +740,13 @@
<define name="child_query" combine="choice">
<element name="child">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<define name="child_reply" combine="choice">
<element name="child">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
<ref name="child_payload"/>
</element>
@@ -728,14 +754,14 @@
<define name="child_query" combine="choice">
<element name="child">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
</element>
</define>
<define name="child_reply" combine="choice">
<element name="child">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
</element>
</define>
@@ -762,6 +788,11 @@
<ref name="bsc_handle"/>
</optional>
<optional>
+ <attribute name="rrdp_notification_uri">
+ <ref name="uri"/>
+ </attribute>
+ </optional>
+ <optional>
<element name="bpki_cert">
<ref name="base64"/>
</element>
@@ -775,7 +806,7 @@
<define name="repository_query" combine="choice">
<element name="repository">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
<ref name="repository_bool"/>
<ref name="repository_payload"/>
@@ -784,14 +815,14 @@
<define name="repository_reply" combine="choice">
<element name="repository">
<ref name="ctl_create"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
</element>
</define>
<define name="repository_query" combine="choice">
<element name="repository">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
<ref name="repository_bool"/>
<ref name="repository_payload"/>
@@ -800,21 +831,21 @@
<define name="repository_reply" combine="choice">
<element name="repository">
<ref name="ctl_set"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
</element>
</define>
<define name="repository_query" combine="choice">
<element name="repository">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
</element>
</define>
<define name="repository_reply" combine="choice">
<element name="repository">
<ref name="ctl_get"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
<ref name="repository_payload"/>
</element>
@@ -822,13 +853,13 @@
<define name="repository_query" combine="choice">
<element name="repository">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<define name="repository_reply" combine="choice">
<element name="repository">
<ref name="ctl_list"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
<ref name="repository_payload"/>
</element>
@@ -836,14 +867,14 @@
<define name="repository_query" combine="choice">
<element name="repository">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
</element>
</define>
<define name="repository_reply" combine="choice">
<element name="repository">
<ref name="ctl_destroy"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="repository_handle"/>
</element>
</define>
@@ -851,14 +882,14 @@
<define name="list_resources_query">
<element name="list_resources">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
</element>
</define>
<define name="list_resources_reply">
<element name="list_resources">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="child_handle"/>
<attribute name="valid_until">
<data type="dateTime">
@@ -886,13 +917,13 @@
<define name="list_roa_requests_query">
<element name="list_roa_requests">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<define name="list_roa_requests_reply">
<element name="list_roa_requests">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<attribute name="asn">
<data type="nonNegativeInteger"/>
</attribute>
@@ -912,14 +943,14 @@
<define name="list_ghostbuster_requests_query">
<element name="list_ghostbuster_requests">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
</element>
</define>
<define name="list_ghostbuster_requests_reply">
<element name="list_ghostbuster_requests">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
<data type="string"/>
</element>
@@ -928,13 +959,13 @@
<define name="list_ee_certificate_requests_query">
<element name="list_ee_certificate_requests">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<define name="list_ee_certificate_requests_reply">
<element name="list_ee_certificate_requests">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<attribute name="gski">
<data type="token">
<param name="minLength">27</param>
@@ -961,14 +992,12 @@
<ref name="ipv6_list"/>
</attribute>
</optional>
- <optional>
- <attribute name="cn">
- <data type="string">
- <param name="maxLength">64</param>
- <param name="pattern">[\-0-9A-Za-z_ ]+</param>
- </data>
- </attribute>
- </optional>
+ <attribute name="cn">
+ <data type="string">
+ <param name="maxLength">64</param>
+ <param name="pattern">[\-0-9A-Za-z_ ]+</param>
+ </data>
+ </attribute>
<optional>
<attribute name="sn">
<data type="string">
@@ -994,13 +1023,13 @@
<define name="list_published_objects_query">
<element name="list_published_objects">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<define name="list_published_objects_reply">
<element name="list_published_objects">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<attribute name="uri">
<ref name="uri"/>
</attribute>
@@ -1016,13 +1045,13 @@
<define name="list_received_resources_query">
<element name="list_received_resources">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</element>
</define>
<define name="list_received_resources_reply">
<element name="list_received_resources">
<ref name="tag"/>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
<ref name="parent_handle"/>
<attribute name="notBefore">
<data type="dateTime">
@@ -1070,7 +1099,7 @@
<element name="report_error">
<ref name="tag"/>
<optional>
- <ref name="self_handle"/>
+ <ref name="tenant_handle"/>
</optional>
<attribute name="error_code">
<ref name="error"/>
diff --git a/schemas/relaxng/myrpki.rng b/schemas/relaxng/myrpki.rng
index 8c7473eb..3beafe8f 100644
--- a/schemas/relaxng/myrpki.rng
+++ b/schemas/relaxng/myrpki.rng
@@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
- $Id: myrpki.rnc 5757 2014-04-05 22:42:12Z sra $
+ $Id: myrpki.rnc 5876 2014-06-26 19:00:12Z sra $
RelaxNG schema for MyRPKI XML messages.
diff --git a/schemas/relaxng/oob-setup.rnc b/schemas/relaxng/oob-setup.rnc
new file mode 100644
index 00000000..3bd7a652
--- /dev/null
+++ b/schemas/relaxng/oob-setup.rnc
@@ -0,0 +1,68 @@
+# $Id: rpki-setup.rnc 3429 2015-10-14 23:46:50Z sra $
+
+default namespace = "http://www.hactrn.net/uris/rpki/rpki-setup/"
+
+version = "1"
+
+base64 = xsd:base64Binary { maxLength="512000" }
+handle = xsd:string { maxLength="255" pattern="[\-_A-Za-z0-9/]*" }
+uri = xsd:anyURI { maxLength="4096" }
+any = element * { attribute * { text }*, ( any | text )* }
+
+authorization_token = base64
+bpki_ta = base64
+
+start |= element child_request {
+ attribute version { version },
+ attribute child_handle { handle },
+ element child_bpki_ta { bpki_ta }
+}
+
+start |= element parent_response {
+ attribute version { version },
+ attribute service_uri { uri },
+ attribute child_handle { handle },
+ attribute parent_handle { handle },
+ element parent_bpki_ta { bpki_ta },
+ element offer { empty }?,
+ element referral {
+ attribute referrer { handle },
+ attribute contact_uri { uri }?,
+ authorization_token
+ }*
+}
+
+start |= element publisher_request {
+ attribute version { version },
+ attribute publisher_handle { handle },
+ element publisher_bpki_ta { bpki_ta },
+ element referral {
+ attribute referrer { handle },
+ authorization_token
+ }*
+}
+
+start |= element repository_response {
+ attribute version { version },
+ attribute service_uri { uri },
+ attribute publisher_handle { handle },
+ attribute sia_base { uri },
+ attribute rrdp_notification_uri { uri }?,
+ element repository_bpki_ta { bpki_ta }
+}
+
+start |= element authorization {
+ attribute version { version },
+ attribute authorized_sia_base { uri },
+ bpki_ta
+}
+
+start |= element error {
+ attribute version { version },
+ attribute reason {
+ "syntax-error" |
+ "authentication-failure" |
+ "refused"
+ },
+ any?
+}
diff --git a/schemas/relaxng/oob-setup.rng b/schemas/relaxng/oob-setup.rng
new file mode 100644
index 00000000..00278047
--- /dev/null
+++ b/schemas/relaxng/oob-setup.rng
@@ -0,0 +1,168 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- $Id: rpki-setup.rnc 3429 2015-10-14 23:46:50Z sra $ -->
+<grammar ns="http://www.hactrn.net/uris/rpki/rpki-setup/" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <define name="version">
+ <value>1</value>
+ </define>
+ <define name="base64">
+ <data type="base64Binary">
+ <param name="maxLength">512000</param>
+ </data>
+ </define>
+ <define name="handle">
+ <data type="string">
+ <param name="maxLength">255</param>
+ <param name="pattern">[\-_A-Za-z0-9/]*</param>
+ </data>
+ </define>
+ <define name="uri">
+ <data type="anyURI">
+ <param name="maxLength">4096</param>
+ </data>
+ </define>
+ <define name="any">
+ <element>
+ <anyName/>
+ <zeroOrMore>
+ <attribute>
+ <anyName/>
+ </attribute>
+ </zeroOrMore>
+ <zeroOrMore>
+ <choice>
+ <ref name="any"/>
+ <text/>
+ </choice>
+ </zeroOrMore>
+ </element>
+ </define>
+ <define name="authorization_token">
+ <ref name="base64"/>
+ </define>
+ <define name="bpki_ta">
+ <ref name="base64"/>
+ </define>
+ <start combine="choice">
+ <element name="child_request">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="child_handle">
+ <ref name="handle"/>
+ </attribute>
+ <element name="child_bpki_ta">
+ <ref name="bpki_ta"/>
+ </element>
+ </element>
+ </start>
+ <start combine="choice">
+ <element name="parent_response">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="service_uri">
+ <ref name="uri"/>
+ </attribute>
+ <attribute name="child_handle">
+ <ref name="handle"/>
+ </attribute>
+ <attribute name="parent_handle">
+ <ref name="handle"/>
+ </attribute>
+ <element name="parent_bpki_ta">
+ <ref name="bpki_ta"/>
+ </element>
+ <optional>
+ <element name="offer">
+ <empty/>
+ </element>
+ </optional>
+ <zeroOrMore>
+ <element name="referral">
+ <attribute name="referrer">
+ <ref name="handle"/>
+ </attribute>
+ <optional>
+ <attribute name="contact_uri">
+ <ref name="uri"/>
+ </attribute>
+ </optional>
+ <ref name="authorization_token"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </start>
+ <start combine="choice">
+ <element name="publisher_request">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="publisher_handle">
+ <ref name="handle"/>
+ </attribute>
+ <element name="publisher_bpki_ta">
+ <ref name="bpki_ta"/>
+ </element>
+ <zeroOrMore>
+ <element name="referral">
+ <attribute name="referrer">
+ <ref name="handle"/>
+ </attribute>
+ <ref name="authorization_token"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </start>
+ <start combine="choice">
+ <element name="repository_response">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="service_uri">
+ <ref name="uri"/>
+ </attribute>
+ <attribute name="publisher_handle">
+ <ref name="handle"/>
+ </attribute>
+ <attribute name="sia_base">
+ <ref name="uri"/>
+ </attribute>
+ <optional>
+ <attribute name="rrdp_notification_uri">
+ <ref name="uri"/>
+ </attribute>
+ </optional>
+ <element name="repository_bpki_ta">
+ <ref name="bpki_ta"/>
+ </element>
+ </element>
+ </start>
+ <start combine="choice">
+ <element name="authorization">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="authorized_sia_base">
+ <ref name="uri"/>
+ </attribute>
+ <ref name="bpki_ta"/>
+ </element>
+ </start>
+ <start combine="choice">
+ <element name="error">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="reason">
+ <choice>
+ <value>syntax-error</value>
+ <value>authentication-failure</value>
+ <value>refused</value>
+ </choice>
+ </attribute>
+ <optional>
+ <ref name="any"/>
+ </optional>
+ </element>
+ </start>
+</grammar>
diff --git a/schemas/relaxng/publication-schema.rnc b/schemas/relaxng/publication-control.rnc
index fdf38c9e..ac59c617 100644
--- a/schemas/relaxng/publication-schema.rnc
+++ b/schemas/relaxng/publication-control.rnc
@@ -19,7 +19,7 @@
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-default namespace = "http://www.hactrn.net/uris/rpki/publication-spec/"
+default namespace = "http://www.hactrn.net/uris/rpki/publication-control/"
version = "1"
@@ -32,12 +32,10 @@ start = element msg {
}
# PDUs allowed in a query
-query_elt = ( config_query | client_query | certificate_query | crl_query |
- manifest_query | roa_query | ghostbuster_query )
+query_elt = client_query
# PDUs allowed in a reply
-reply_elt = ( config_reply | client_reply | certificate_reply | crl_reply |
- manifest_reply | roa_reply | ghostbuster_reply | report_error_reply )
+reply_elt = ( client_reply | report_error_reply )
# Tag attributes for bulk operations
tag = attribute tag { xsd:token {maxLength="1024" } }
@@ -58,17 +56,7 @@ uri = attribute uri { uri_t }
# hierarchy delimiter.
object_handle = xsd:string { maxLength="255" pattern="[\-_A-Za-z0-9/]+" }
-# <config/> element (use restricted to repository operator)
-# config_handle attribute, create, list, and destroy commands omitted deliberately, see code for details
-
-config_payload = (element bpki_crl { base64 }?)
-
-config_query |= element config { attribute action { "set" }, tag?, config_payload }
-config_reply |= element config { attribute action { "set" }, tag? }
-config_query |= element config { attribute action { "get" }, tag? }
-config_reply |= element config { attribute action { "get" }, tag?, config_payload }
-
-# <client/> element (use restricted to repository operator)
+# <client/> element
client_handle = attribute client_handle { object_handle }
@@ -87,41 +75,6 @@ client_reply |= element client { attribute action { "list" }, tag?, client_ha
client_query |= element client { attribute action { "destroy" }, tag?, client_handle }
client_reply |= element client { attribute action { "destroy" }, tag?, client_handle }
-# <certificate/> element
-
-certificate_query |= element certificate { attribute action { "publish" }, tag?, uri, base64 }
-certificate_reply |= element certificate { attribute action { "publish" }, tag?, uri }
-certificate_query |= element certificate { attribute action { "withdraw" }, tag?, uri }
-certificate_reply |= element certificate { attribute action { "withdraw" }, tag?, uri }
-
-# <crl/> element
-
-crl_query |= element crl { attribute action { "publish" }, tag?, uri, base64 }
-crl_reply |= element crl { attribute action { "publish" }, tag?, uri }
-crl_query |= element crl { attribute action { "withdraw" }, tag?, uri }
-crl_reply |= element crl { attribute action { "withdraw" }, tag?, uri }
-
-# <manifest/> element
-
-manifest_query |= element manifest { attribute action { "publish" }, tag?, uri, base64 }
-manifest_reply |= element manifest { attribute action { "publish" }, tag?, uri }
-manifest_query |= element manifest { attribute action { "withdraw" }, tag?, uri }
-manifest_reply |= element manifest { attribute action { "withdraw" }, tag?, uri }
-
-# <roa/> element
-
-roa_query |= element roa { attribute action { "publish" }, tag?, uri, base64 }
-roa_reply |= element roa { attribute action { "publish" }, tag?, uri }
-roa_query |= element roa { attribute action { "withdraw" }, tag?, uri }
-roa_reply |= element roa { attribute action { "withdraw" }, tag?, uri }
-
-# <ghostbuster/> element
-
-ghostbuster_query |= element ghostbuster { attribute action { "publish" }, tag?, uri, base64 }
-ghostbuster_reply |= element ghostbuster { attribute action { "publish" }, tag?, uri }
-ghostbuster_query |= element ghostbuster { attribute action { "withdraw" }, tag?, uri }
-ghostbuster_reply |= element ghostbuster { attribute action { "withdraw" }, tag?, uri }
-
# <report_error/> element
error = xsd:token { maxLength="1024" }
diff --git a/schemas/relaxng/publication-control.rng b/schemas/relaxng/publication-control.rng
new file mode 100644
index 00000000..606deb53
--- /dev/null
+++ b/schemas/relaxng/publication-control.rng
@@ -0,0 +1,280 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ $Id: publication-control.rnc 5903 2014-07-18 17:08:13Z sra $
+
+ RelaxNG schema for RPKI publication protocol.
+
+ Copyright (C) 2012- -2014 Dragon Research Labs ("DRL")
+ Portions copyright (C) 2009- -2011 Internet Systems Consortium ("ISC")
+ Portions copyright (C) 2007- -2008 American Registry for Internet Numbers ("ARIN")
+
+ Permission to use, copy, modify, and distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notices and this permission notice appear in all copies.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND DRL, ISC, AND ARIN DISCLAIM ALL
+ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL,
+ ISC, OR ARIN BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
+ CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+ OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+ NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+-->
+<grammar ns="http://www.hactrn.net/uris/rpki/publication-control/" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <define name="version">
+ <value>1</value>
+ </define>
+ <!-- Top level PDU -->
+ <start>
+ <element name="msg">
+ <attribute name="version">
+ <data type="positiveInteger">
+ <param name="maxInclusive">1</param>
+ </data>
+ </attribute>
+ <choice>
+ <group>
+ <attribute name="type">
+ <value>query</value>
+ </attribute>
+ <zeroOrMore>
+ <ref name="query_elt"/>
+ </zeroOrMore>
+ </group>
+ <group>
+ <attribute name="type">
+ <value>reply</value>
+ </attribute>
+ <zeroOrMore>
+ <ref name="reply_elt"/>
+ </zeroOrMore>
+ </group>
+ </choice>
+ </element>
+ </start>
+ <!-- PDUs allowed in a query -->
+ <define name="query_elt">
+ <ref name="client_query"/>
+ </define>
+ <!-- PDUs allowed in a reply -->
+ <define name="reply_elt">
+ <choice>
+ <ref name="client_reply"/>
+ <ref name="report_error_reply"/>
+ </choice>
+ </define>
+ <!-- Tag attributes for bulk operations -->
+ <define name="tag">
+ <attribute name="tag">
+ <data type="token">
+ <param name="maxLength">1024</param>
+ </data>
+ </attribute>
+ </define>
+ <!--
+ Base64 encoded DER stuff
+ base64 = xsd:base64Binary { maxLength="512000" }
+
+ Sadly, it turns out that CRLs can in fact get longer than this for an active CA.
+ Remove length limit for now, think about whether to put it back later.
+ -->
+ <define name="base64">
+ <data type="base64Binary"/>
+ </define>
+ <!-- Publication URLs -->
+ <define name="uri_t">
+ <data type="anyURI">
+ <param name="maxLength">4096</param>
+ </data>
+ </define>
+ <define name="uri">
+ <attribute name="uri">
+ <ref name="uri_t"/>
+ </attribute>
+ </define>
+ <!--
+ Handles on remote objects (replaces passing raw SQL IDs). NB:
+ Unlike the up-down protocol, handles in this protocol allow "/" as a
+ hierarchy delimiter.
+ -->
+ <define name="object_handle">
+ <data type="string">
+ <param name="maxLength">255</param>
+ <param name="pattern">[\-_A-Za-z0-9/]+</param>
+ </data>
+ </define>
+ <!-- <client/> element -->
+ <define name="client_handle">
+ <attribute name="client_handle">
+ <ref name="object_handle"/>
+ </attribute>
+ </define>
+ <define name="client_bool">
+ <optional>
+ <attribute name="clear_replay_protection">
+ <value>yes</value>
+ </attribute>
+ </optional>
+ </define>
+ <define name="client_payload">
+ <optional>
+ <attribute name="base_uri">
+ <ref name="uri_t"/>
+ </attribute>
+ </optional>
+ <optional>
+ <element name="bpki_cert">
+ <ref name="base64"/>
+ </element>
+ </optional>
+ <optional>
+ <element name="bpki_glue">
+ <ref name="base64"/>
+ </element>
+ </optional>
+ </define>
+ <define name="client_query" combine="choice">
+ <element name="client">
+ <attribute name="action">
+ <value>create</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="client_handle"/>
+ <ref name="client_bool"/>
+ <ref name="client_payload"/>
+ </element>
+ </define>
+ <define name="client_reply" combine="choice">
+ <element name="client">
+ <attribute name="action">
+ <value>create</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="client_handle"/>
+ </element>
+ </define>
+ <define name="client_query" combine="choice">
+ <element name="client">
+ <attribute name="action">
+ <value>set</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="client_handle"/>
+ <ref name="client_bool"/>
+ <ref name="client_payload"/>
+ </element>
+ </define>
+ <define name="client_reply" combine="choice">
+ <element name="client">
+ <attribute name="action">
+ <value>set</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="client_handle"/>
+ </element>
+ </define>
+ <define name="client_query" combine="choice">
+ <element name="client">
+ <attribute name="action">
+ <value>get</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="client_handle"/>
+ </element>
+ </define>
+ <define name="client_reply" combine="choice">
+ <element name="client">
+ <attribute name="action">
+ <value>get</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="client_handle"/>
+ <ref name="client_payload"/>
+ </element>
+ </define>
+ <define name="client_query" combine="choice">
+ <element name="client">
+ <attribute name="action">
+ <value>list</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ </element>
+ </define>
+ <define name="client_reply" combine="choice">
+ <element name="client">
+ <attribute name="action">
+ <value>list</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="client_handle"/>
+ <ref name="client_payload"/>
+ </element>
+ </define>
+ <define name="client_query" combine="choice">
+ <element name="client">
+ <attribute name="action">
+ <value>destroy</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="client_handle"/>
+ </element>
+ </define>
+ <define name="client_reply" combine="choice">
+ <element name="client">
+ <attribute name="action">
+ <value>destroy</value>
+ </attribute>
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="client_handle"/>
+ </element>
+ </define>
+ <!-- <report_error/> element -->
+ <define name="error">
+ <data type="token">
+ <param name="maxLength">1024</param>
+ </data>
+ </define>
+ <define name="report_error_reply">
+ <element name="report_error">
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <attribute name="error_code">
+ <ref name="error"/>
+ </attribute>
+ <optional>
+ <data type="string">
+ <param name="maxLength">512000</param>
+ </data>
+ </optional>
+ </element>
+ </define>
+</grammar>
+<!--
+ Local Variables:
+ indent-tabs-mode: nil
+ comment-start: "# "
+ comment-start-skip: "#[ \t]*"
+ End:
+-->
diff --git a/schemas/relaxng/publication-schema.rng b/schemas/relaxng/publication-schema.rng
deleted file mode 100644
index 482fa477..00000000
--- a/schemas/relaxng/publication-schema.rng
+++ /dev/null
@@ -1,577 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- $Id: publication-schema.rnc 5902 2014-07-18 16:37:04Z sra $
-
- RelaxNG schema for RPKI publication protocol.
-
- Copyright (C) 2012- -2014 Dragon Research Labs ("DRL")
- Portions copyright (C) 2009- -2011 Internet Systems Consortium ("ISC")
- Portions copyright (C) 2007- -2008 American Registry for Internet Numbers ("ARIN")
-
- Permission to use, copy, modify, and distribute this software for any
- purpose with or without fee is hereby granted, provided that the above
- copyright notices and this permission notice appear in all copies.
-
- THE SOFTWARE IS PROVIDED "AS IS" AND DRL, ISC, AND ARIN DISCLAIM ALL
- WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL,
- ISC, OR ARIN BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
- CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
- OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
- NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
- WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
--->
-<grammar ns="http://www.hactrn.net/uris/rpki/publication-spec/" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
- <define name="version">
- <value>1</value>
- </define>
- <!-- Top level PDU -->
- <start>
- <element name="msg">
- <attribute name="version">
- <data type="positiveInteger">
- <param name="maxInclusive">1</param>
- </data>
- </attribute>
- <choice>
- <group>
- <attribute name="type">
- <value>query</value>
- </attribute>
- <zeroOrMore>
- <ref name="query_elt"/>
- </zeroOrMore>
- </group>
- <group>
- <attribute name="type">
- <value>reply</value>
- </attribute>
- <zeroOrMore>
- <ref name="reply_elt"/>
- </zeroOrMore>
- </group>
- </choice>
- </element>
- </start>
- <!-- PDUs allowed in a query -->
- <define name="query_elt">
- <choice>
- <ref name="config_query"/>
- <ref name="client_query"/>
- <ref name="certificate_query"/>
- <ref name="crl_query"/>
- <ref name="manifest_query"/>
- <ref name="roa_query"/>
- <ref name="ghostbuster_query"/>
- </choice>
- </define>
- <!-- PDUs allowed in a reply -->
- <define name="reply_elt">
- <choice>
- <ref name="config_reply"/>
- <ref name="client_reply"/>
- <ref name="certificate_reply"/>
- <ref name="crl_reply"/>
- <ref name="manifest_reply"/>
- <ref name="roa_reply"/>
- <ref name="ghostbuster_reply"/>
- <ref name="report_error_reply"/>
- </choice>
- </define>
- <!-- Tag attributes for bulk operations -->
- <define name="tag">
- <attribute name="tag">
- <data type="token">
- <param name="maxLength">1024</param>
- </data>
- </attribute>
- </define>
- <!--
- Base64 encoded DER stuff
- base64 = xsd:base64Binary { maxLength="512000" }
-
- Sadly, it turns out that CRLs can in fact get longer than this for an active CA.
- Remove length limit for now, think about whether to put it back later.
- -->
- <define name="base64">
- <data type="base64Binary"/>
- </define>
- <!-- Publication URLs -->
- <define name="uri_t">
- <data type="anyURI">
- <param name="maxLength">4096</param>
- </data>
- </define>
- <define name="uri">
- <attribute name="uri">
- <ref name="uri_t"/>
- </attribute>
- </define>
- <!--
- Handles on remote objects (replaces passing raw SQL IDs). NB:
- Unlike the up-down protocol, handles in this protocol allow "/" as a
- hierarchy delimiter.
- -->
- <define name="object_handle">
- <data type="string">
- <param name="maxLength">255</param>
- <param name="pattern">[\-_A-Za-z0-9/]+</param>
- </data>
- </define>
- <!--
- <config/> element (use restricted to repository operator)
- config_handle attribute, create, list, and destroy commands omitted deliberately, see code for details
- -->
- <define name="config_payload">
- <optional>
- <element name="bpki_crl">
- <ref name="base64"/>
- </element>
- </optional>
- </define>
- <define name="config_query" combine="choice">
- <element name="config">
- <attribute name="action">
- <value>set</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="config_payload"/>
- </element>
- </define>
- <define name="config_reply" combine="choice">
- <element name="config">
- <attribute name="action">
- <value>set</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- </element>
- </define>
- <define name="config_query" combine="choice">
- <element name="config">
- <attribute name="action">
- <value>get</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- </element>
- </define>
- <define name="config_reply" combine="choice">
- <element name="config">
- <attribute name="action">
- <value>get</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="config_payload"/>
- </element>
- </define>
- <!-- <client/> element (use restricted to repository operator) -->
- <define name="client_handle">
- <attribute name="client_handle">
- <ref name="object_handle"/>
- </attribute>
- </define>
- <define name="client_bool">
- <optional>
- <attribute name="clear_replay_protection">
- <value>yes</value>
- </attribute>
- </optional>
- </define>
- <define name="client_payload">
- <optional>
- <attribute name="base_uri">
- <ref name="uri_t"/>
- </attribute>
- </optional>
- <optional>
- <element name="bpki_cert">
- <ref name="base64"/>
- </element>
- </optional>
- <optional>
- <element name="bpki_glue">
- <ref name="base64"/>
- </element>
- </optional>
- </define>
- <define name="client_query" combine="choice">
- <element name="client">
- <attribute name="action">
- <value>create</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="client_handle"/>
- <ref name="client_bool"/>
- <ref name="client_payload"/>
- </element>
- </define>
- <define name="client_reply" combine="choice">
- <element name="client">
- <attribute name="action">
- <value>create</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="client_handle"/>
- </element>
- </define>
- <define name="client_query" combine="choice">
- <element name="client">
- <attribute name="action">
- <value>set</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="client_handle"/>
- <ref name="client_bool"/>
- <ref name="client_payload"/>
- </element>
- </define>
- <define name="client_reply" combine="choice">
- <element name="client">
- <attribute name="action">
- <value>set</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="client_handle"/>
- </element>
- </define>
- <define name="client_query" combine="choice">
- <element name="client">
- <attribute name="action">
- <value>get</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="client_handle"/>
- </element>
- </define>
- <define name="client_reply" combine="choice">
- <element name="client">
- <attribute name="action">
- <value>get</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="client_handle"/>
- <ref name="client_payload"/>
- </element>
- </define>
- <define name="client_query" combine="choice">
- <element name="client">
- <attribute name="action">
- <value>list</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- </element>
- </define>
- <define name="client_reply" combine="choice">
- <element name="client">
- <attribute name="action">
- <value>list</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="client_handle"/>
- <ref name="client_payload"/>
- </element>
- </define>
- <define name="client_query" combine="choice">
- <element name="client">
- <attribute name="action">
- <value>destroy</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="client_handle"/>
- </element>
- </define>
- <define name="client_reply" combine="choice">
- <element name="client">
- <attribute name="action">
- <value>destroy</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="client_handle"/>
- </element>
- </define>
- <!-- <certificate/> element -->
- <define name="certificate_query" combine="choice">
- <element name="certificate">
- <attribute name="action">
- <value>publish</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- <ref name="base64"/>
- </element>
- </define>
- <define name="certificate_reply" combine="choice">
- <element name="certificate">
- <attribute name="action">
- <value>publish</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
- </define>
- <define name="certificate_query" combine="choice">
- <element name="certificate">
- <attribute name="action">
- <value>withdraw</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
- </define>
- <define name="certificate_reply" combine="choice">
- <element name="certificate">
- <attribute name="action">
- <value>withdraw</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
- </define>
- <!-- <crl/> element -->
- <define name="crl_query" combine="choice">
- <element name="crl">
- <attribute name="action">
- <value>publish</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- <ref name="base64"/>
- </element>
- </define>
- <define name="crl_reply" combine="choice">
- <element name="crl">
- <attribute name="action">
- <value>publish</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
- </define>
- <define name="crl_query" combine="choice">
- <element name="crl">
- <attribute name="action">
- <value>withdraw</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
- </define>
- <define name="crl_reply" combine="choice">
- <element name="crl">
- <attribute name="action">
- <value>withdraw</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
- </define>
- <!-- <manifest/> element -->
- <define name="manifest_query" combine="choice">
- <element name="manifest">
- <attribute name="action">
- <value>publish</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- <ref name="base64"/>
- </element>
- </define>
- <define name="manifest_reply" combine="choice">
- <element name="manifest">
- <attribute name="action">
- <value>publish</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
- </define>
- <define name="manifest_query" combine="choice">
- <element name="manifest">
- <attribute name="action">
- <value>withdraw</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
- </define>
- <define name="manifest_reply" combine="choice">
- <element name="manifest">
- <attribute name="action">
- <value>withdraw</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
- </define>
- <!-- <roa/> element -->
- <define name="roa_query" combine="choice">
- <element name="roa">
- <attribute name="action">
- <value>publish</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- <ref name="base64"/>
- </element>
- </define>
- <define name="roa_reply" combine="choice">
- <element name="roa">
- <attribute name="action">
- <value>publish</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
- </define>
- <define name="roa_query" combine="choice">
- <element name="roa">
- <attribute name="action">
- <value>withdraw</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
- </define>
- <define name="roa_reply" combine="choice">
- <element name="roa">
- <attribute name="action">
- <value>withdraw</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
- </define>
- <!-- <ghostbuster/> element -->
- <define name="ghostbuster_query" combine="choice">
- <element name="ghostbuster">
- <attribute name="action">
- <value>publish</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- <ref name="base64"/>
- </element>
- </define>
- <define name="ghostbuster_reply" combine="choice">
- <element name="ghostbuster">
- <attribute name="action">
- <value>publish</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
- </define>
- <define name="ghostbuster_query" combine="choice">
- <element name="ghostbuster">
- <attribute name="action">
- <value>withdraw</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
- </define>
- <define name="ghostbuster_reply" combine="choice">
- <element name="ghostbuster">
- <attribute name="action">
- <value>withdraw</value>
- </attribute>
- <optional>
- <ref name="tag"/>
- </optional>
- <ref name="uri"/>
- </element>
- </define>
- <!-- <report_error/> element -->
- <define name="error">
- <data type="token">
- <param name="maxLength">1024</param>
- </data>
- </define>
- <define name="report_error_reply">
- <element name="report_error">
- <optional>
- <ref name="tag"/>
- </optional>
- <attribute name="error_code">
- <ref name="error"/>
- </attribute>
- <optional>
- <data type="string">
- <param name="maxLength">512000</param>
- </data>
- </optional>
- </element>
- </define>
-</grammar>
-<!--
- Local Variables:
- indent-tabs-mode: nil
- comment-start: "# "
- comment-start-skip: "#[ \t]*"
- End:
--->
diff --git a/schemas/relaxng/publication.rnc b/schemas/relaxng/publication.rnc
new file mode 100644
index 00000000..f3d1f94e
--- /dev/null
+++ b/schemas/relaxng/publication.rnc
@@ -0,0 +1,111 @@
+# $Id$
+#
+# RelaxNG schema for RPKI publication protocol, from current I-D.
+#
+# Copyright (c) 2014 IETF Trust and the persons identified as authors
+# of the code. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+#
+# * Neither the name of Internet Society, IETF or IETF Trust, nor the
+# names of specific contributors, may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+default namespace =
+ "http://www.hactrn.net/uris/rpki/publication-spec/"
+
+# This is version 3 of the protocol.
+
+version = "3"
+
+# Top level PDU is either a query or a reply.
+
+start |= element msg {
+ attribute version { version },
+ attribute type { "query" },
+ query_elt*
+}
+
+start |= element msg {
+ attribute version { version },
+ attribute type { "reply" },
+ reply_elt*
+}
+
+# PDUs allowed in queries and replies.
+
+query_elt = publish_query | withdraw_query | list_query
+reply_elt = publish_reply | withdraw_reply | list_reply | report_error_reply
+
+# Tag attributes for bulk operations.
+
+tag = attribute tag { xsd:token { maxLength="1024" } }
+
+# Base64 encoded DER stuff.
+
+base64 = xsd:base64Binary
+
+# Publication URIs.
+
+uri = attribute uri { xsd:anyURI { maxLength="4096" } }
+
+# Digest of objects being withdrawn
+
+hash = attribute hash { xsd:string { pattern = "[0-9a-fA-F]+" } }
+
+# Error codes.
+
+error = xsd:token { maxLength="1024" }
+
+# <publish/> element
+
+publish_query = element publish { tag?, uri, hash?, base64 }
+publish_reply = element publish { tag?, uri }
+
+# <withdraw/> element
+
+withdraw_query = element withdraw { tag?, uri, hash }
+withdraw_reply = element withdraw { tag?, uri }
+
+# <list/> element
+
+list_query = element list { tag? }
+list_reply = element list { tag?, uri, hash }
+
+# <report_error/> element
+
+report_error_reply = element report_error {
+ tag?,
+ attribute error_code { error },
+ xsd:string { maxLength="512000" }?
+}
+
+# Local Variables:
+# indent-tabs-mode: nil
+# comment-start: "# "
+# comment-start-skip: "#[ \t]*"
+# End:
diff --git a/schemas/relaxng/publication.rng b/schemas/relaxng/publication.rng
new file mode 100644
index 00000000..5e72407e
--- /dev/null
+++ b/schemas/relaxng/publication.rng
@@ -0,0 +1,201 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ $Id: publication.rnc 5896 2014-07-15 19:34:32Z sra $
+
+ RelaxNG schema for RPKI publication protocol, from current I-D.
+
+ Copyright (c) 2014 IETF Trust and the persons identified as authors
+ of the code. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ * Neither the name of Internet Society, IETF or IETF Trust, nor the
+ names of specific contributors, may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+-->
+<grammar ns="http://www.hactrn.net/uris/rpki/publication-spec/" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <!-- This is version 3 of the protocol. -->
+ <define name="version">
+ <value>3</value>
+ </define>
+ <!-- Top level PDU is either a query or a reply. -->
+ <start combine="choice">
+ <element name="msg">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="type">
+ <value>query</value>
+ </attribute>
+ <zeroOrMore>
+ <ref name="query_elt"/>
+ </zeroOrMore>
+ </element>
+ </start>
+ <start combine="choice">
+ <element name="msg">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="type">
+ <value>reply</value>
+ </attribute>
+ <zeroOrMore>
+ <ref name="reply_elt"/>
+ </zeroOrMore>
+ </element>
+ </start>
+ <!-- PDUs allowed in queries and replies. -->
+ <define name="query_elt">
+ <choice>
+ <ref name="publish_query"/>
+ <ref name="withdraw_query"/>
+ <ref name="list_query"/>
+ </choice>
+ </define>
+ <define name="reply_elt">
+ <choice>
+ <ref name="publish_reply"/>
+ <ref name="withdraw_reply"/>
+ <ref name="list_reply"/>
+ <ref name="report_error_reply"/>
+ </choice>
+ </define>
+ <!-- Tag attributes for bulk operations. -->
+ <define name="tag">
+ <attribute name="tag">
+ <data type="token">
+ <param name="maxLength">1024</param>
+ </data>
+ </attribute>
+ </define>
+ <!-- Base64 encoded DER stuff. -->
+ <define name="base64">
+ <data type="base64Binary"/>
+ </define>
+ <!-- Publication URIs. -->
+ <define name="uri">
+ <attribute name="uri">
+ <data type="anyURI">
+ <param name="maxLength">4096</param>
+ </data>
+ </attribute>
+ </define>
+ <!-- Digest of objects being withdrawn -->
+ <define name="hash">
+ <attribute name="hash">
+ <data type="string">
+ <param name="pattern">[0-9a-fA-F]+</param>
+ </data>
+ </attribute>
+ </define>
+ <!-- Error codes. -->
+ <define name="error">
+ <data type="token">
+ <param name="maxLength">1024</param>
+ </data>
+ </define>
+ <!-- <publish/> element -->
+ <define name="publish_query">
+ <element name="publish">
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="uri"/>
+ <optional>
+ <ref name="hash"/>
+ </optional>
+ <ref name="base64"/>
+ </element>
+ </define>
+ <define name="publish_reply">
+ <element name="publish">
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="uri"/>
+ </element>
+ </define>
+ <!-- <withdraw/> element -->
+ <define name="withdraw_query">
+ <element name="withdraw">
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="uri"/>
+ <ref name="hash"/>
+ </element>
+ </define>
+ <define name="withdraw_reply">
+ <element name="withdraw">
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="uri"/>
+ </element>
+ </define>
+ <!-- <list/> element -->
+ <define name="list_query">
+ <element name="list">
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ </element>
+ </define>
+ <define name="list_reply">
+ <element name="list">
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <ref name="uri"/>
+ <ref name="hash"/>
+ </element>
+ </define>
+ <!-- <report_error/> element -->
+ <define name="report_error_reply">
+ <element name="report_error">
+ <optional>
+ <ref name="tag"/>
+ </optional>
+ <attribute name="error_code">
+ <ref name="error"/>
+ </attribute>
+ <optional>
+ <data type="string">
+ <param name="maxLength">512000</param>
+ </data>
+ </optional>
+ </element>
+ </define>
+</grammar>
+<!--
+ Local Variables:
+ indent-tabs-mode: nil
+ comment-start: "# "
+ comment-start-skip: "#[ \t]*"
+ End:
+-->
diff --git a/schemas/relaxng/router-certificate-schema.rnc b/schemas/relaxng/router-certificate.rnc
index 8cc325ce..8cc325ce 100644
--- a/schemas/relaxng/router-certificate-schema.rnc
+++ b/schemas/relaxng/router-certificate.rnc
diff --git a/schemas/relaxng/router-certificate-schema.rng b/schemas/relaxng/router-certificate.rng
index 90b50107..9352ed76 100644
--- a/schemas/relaxng/router-certificate-schema.rng
+++ b/schemas/relaxng/router-certificate.rng
@@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
- $Id: router-certificate-schema.rnc 5757 2014-04-05 22:42:12Z sra $
+ $Id: router-certificate.rnc 5881 2014-07-03 16:55:02Z sra $
RelaxNG schema for BGPSEC router certificate interchange format.
diff --git a/schemas/relaxng/rrdp.rnc b/schemas/relaxng/rrdp.rnc
new file mode 100644
index 00000000..7809abdd
--- /dev/null
+++ b/schemas/relaxng/rrdp.rnc
@@ -0,0 +1,81 @@
+# $Id$
+#
+# RelaxNG schema for RPKI Repository Delta Protocol (RRDP).
+#
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+default namespace = "http://www.ripe.net/rpki/rrdp"
+
+version = xsd:positiveInteger { maxInclusive="1" }
+serial = xsd:nonNegativeInteger
+uri = xsd:anyURI
+uuid = xsd:string { pattern = "[\-0-9a-fA-F]+" }
+hash = xsd:string { pattern = "[0-9a-fA-F]+" }
+base64 = xsd:base64Binary
+
+# Notification file: lists current snapshots and deltas
+
+start |= element notification {
+ attribute version { version },
+ attribute session_id { uuid },
+ attribute serial { serial },
+ element snapshot {
+ attribute uri { uri },
+ attribute hash { hash }
+ },
+ element delta {
+ attribute serial { serial },
+ attribute uri { uri },
+ attribute hash { hash }
+ }*
+}
+
+# Snapshot segment: think DNS AXFR.
+
+start |= element snapshot {
+ attribute version { version },
+ attribute session_id { uuid },
+ attribute serial { serial },
+ element publish {
+ attribute uri { uri },
+ base64
+ }*
+}
+
+# Delta segment: think DNS IXFR.
+
+start |= element delta {
+ attribute version { version },
+ attribute session_id { uuid },
+ attribute serial { serial },
+ delta_element+
+}
+
+delta_element |= element publish {
+ attribute uri { uri },
+ attribute hash { hash }?,
+ base64
+}
+
+delta_element |= element withdraw {
+ attribute uri { uri },
+ attribute hash { hash }
+}
+
+# Local Variables:
+# indent-tabs-mode: nil
+# comment-start: "# "
+# comment-start-skip: "#[ \t]*"
+# End:
diff --git a/schemas/relaxng/rrdp.rng b/schemas/relaxng/rrdp.rng
new file mode 100644
index 00000000..7d2fde9c
--- /dev/null
+++ b/schemas/relaxng/rrdp.rng
@@ -0,0 +1,150 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ $Id: rrdp.rnc 6010 2014-11-08 18:01:58Z sra $
+
+ RelaxNG schema for RPKI Repository Delta Protocol (RRDP).
+
+ Copyright (C) 2014 Dragon Research Labs ("DRL")
+
+ Permission to use, copy, modify, and distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+ REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+ INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ PERFORMANCE OF THIS SOFTWARE.
+-->
+<grammar ns="http://www.ripe.net/rpki/rrdp" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <define name="version">
+ <data type="positiveInteger">
+ <param name="maxInclusive">1</param>
+ </data>
+ </define>
+ <define name="serial">
+ <data type="nonNegativeInteger"/>
+ </define>
+ <define name="uri">
+ <data type="anyURI"/>
+ </define>
+ <define name="uuid">
+ <data type="string">
+ <param name="pattern">[\-0-9a-fA-F]+</param>
+ </data>
+ </define>
+ <define name="hash">
+ <data type="string">
+ <param name="pattern">[0-9a-fA-F]+</param>
+ </data>
+ </define>
+ <define name="base64">
+ <data type="base64Binary"/>
+ </define>
+ <!-- Notification file: lists current snapshots and deltas -->
+ <start combine="choice">
+ <element name="notification">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="session_id">
+ <ref name="uuid"/>
+ </attribute>
+ <attribute name="serial">
+ <ref name="serial"/>
+ </attribute>
+ <element name="snapshot">
+ <attribute name="uri">
+ <ref name="uri"/>
+ </attribute>
+ <attribute name="hash">
+ <ref name="hash"/>
+ </attribute>
+ </element>
+ <zeroOrMore>
+ <element name="delta">
+ <attribute name="serial">
+ <ref name="serial"/>
+ </attribute>
+ <attribute name="uri">
+ <ref name="uri"/>
+ </attribute>
+ <attribute name="hash">
+ <ref name="hash"/>
+ </attribute>
+ </element>
+ </zeroOrMore>
+ </element>
+ </start>
+ <!-- Snapshot segment: think DNS AXFR. -->
+ <start combine="choice">
+ <element name="snapshot">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="session_id">
+ <ref name="uuid"/>
+ </attribute>
+ <attribute name="serial">
+ <ref name="serial"/>
+ </attribute>
+ <zeroOrMore>
+ <element name="publish">
+ <attribute name="uri">
+ <ref name="uri"/>
+ </attribute>
+ <ref name="base64"/>
+ </element>
+ </zeroOrMore>
+ </element>
+ </start>
+ <!-- Delta segment: think DNS IXFR. -->
+ <start combine="choice">
+ <element name="delta">
+ <attribute name="version">
+ <ref name="version"/>
+ </attribute>
+ <attribute name="session_id">
+ <ref name="uuid"/>
+ </attribute>
+ <attribute name="serial">
+ <ref name="serial"/>
+ </attribute>
+ <oneOrMore>
+ <ref name="delta_element"/>
+ </oneOrMore>
+ </element>
+ </start>
+ <define name="delta_element" combine="choice">
+ <element name="publish">
+ <attribute name="uri">
+ <ref name="uri"/>
+ </attribute>
+ <optional>
+ <attribute name="hash">
+ <ref name="hash"/>
+ </attribute>
+ </optional>
+ <ref name="base64"/>
+ </element>
+ </define>
+ <define name="delta_element" combine="choice">
+ <element name="withdraw">
+ <attribute name="uri">
+ <ref name="uri"/>
+ </attribute>
+ <attribute name="hash">
+ <ref name="hash"/>
+ </attribute>
+ </element>
+ </define>
+</grammar>
+<!--
+ Local Variables:
+ indent-tabs-mode: nil
+ comment-start: "# "
+ comment-start-skip: "#[ \t]*"
+ End:
+-->
diff --git a/schemas/relaxng/up-down-schema.rnc b/schemas/relaxng/up-down.rnc
index a603b8fe..a603b8fe 100644
--- a/schemas/relaxng/up-down-schema.rnc
+++ b/schemas/relaxng/up-down.rnc
diff --git a/schemas/relaxng/up-down-schema.rng b/schemas/relaxng/up-down.rng
index 89235b7e..a0fc0514 100644
--- a/schemas/relaxng/up-down-schema.rng
+++ b/schemas/relaxng/up-down.rng
@@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
- $Id: up-down-schema.rnc 5757 2014-04-05 22:42:12Z sra $
+ $Id: up-down.rnc 5881 2014-07-03 16:55:02Z sra $
RelaxNG schema for the up-down protocol, extracted from RFC 6492.
diff --git a/schemas/sql/pubd.sql b/schemas/sql/pubd.sql
deleted file mode 100644
index 3a58ec00..00000000
--- a/schemas/sql/pubd.sql
+++ /dev/null
@@ -1,59 +0,0 @@
--- $Id$
-
--- Copyright (C) 2009--2010 Internet Systems Consortium ("ISC")
---
--- Permission to use, copy, modify, and distribute this software for any
--- purpose with or without fee is hereby granted, provided that the above
--- copyright notice and this permission notice appear in all copies.
---
--- THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
--- REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
--- AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
--- INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
--- LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
--- OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
--- PERFORMANCE OF THIS SOFTWARE.
-
--- Copyright (C) 2008 American Registry for Internet Numbers ("ARIN")
---
--- Permission to use, copy, modify, and distribute this software for any
--- purpose with or without fee is hereby granted, provided that the above
--- copyright notice and this permission notice appear in all copies.
---
--- THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
--- REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
--- AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
--- INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
--- LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
--- OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
--- PERFORMANCE OF THIS SOFTWARE.
-
--- SQL objects needed by pubd.py.
-
--- The config table is weird because we're really only using it
--- to store one BPKI CRL, but putting this here lets us use a lot of
--- existing machinery and the alternatives are whacky in other ways.
-
-DROP TABLE IF EXISTS client;
-DROP TABLE IF EXISTS config;
-
-CREATE TABLE config (
- config_id SERIAL NOT NULL,
- bpki_crl LONGBLOB,
- PRIMARY KEY (config_id)
-) ENGINE=InnoDB;
-
-CREATE TABLE client (
- client_id SERIAL NOT NULL,
- client_handle VARCHAR(255) NOT NULL,
- base_uri TEXT,
- bpki_cert LONGBLOB,
- bpki_glue LONGBLOB,
- last_cms_timestamp DATETIME,
- PRIMARY KEY (client_id),
- UNIQUE (client_handle)
-) ENGINE=InnoDB;
-
--- Local Variables:
--- indent-tabs-mode: nil
--- End:
diff --git a/schemas/sql/rpkid.sql b/schemas/sql/rpkid.sql
deleted file mode 100644
index ad0c39b0..00000000
--- a/schemas/sql/rpkid.sql
+++ /dev/null
@@ -1,250 +0,0 @@
--- $Id$
-
--- Copyright (C) 2009--2011 Internet Systems Consortium ("ISC")
---
--- Permission to use, copy, modify, and distribute this software for any
--- purpose with or without fee is hereby granted, provided that the above
--- copyright notice and this permission notice appear in all copies.
---
--- THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
--- REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
--- AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
--- INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
--- LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
--- OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
--- PERFORMANCE OF THIS SOFTWARE.
-
--- Copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN")
---
--- Permission to use, copy, modify, and distribute this software for any
--- purpose with or without fee is hereby granted, provided that the above
--- copyright notice and this permission notice appear in all copies.
---
--- THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
--- REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
--- AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
--- INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
--- LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
--- OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
--- PERFORMANCE OF THIS SOFTWARE.
-
--- SQL objects needed by the RPKI engine (rpkid.py).
-
--- DROP TABLE commands must be in correct (reverse dependency) order
--- to satisfy FOREIGN KEY constraints.
-
-DROP TABLE IF EXISTS ee_cert;
-DROP TABLE IF EXISTS ghostbuster;
-DROP TABLE IF EXISTS roa_prefix;
-DROP TABLE IF EXISTS roa;
-DROP TABLE IF EXISTS revoked_cert;
-DROP TABLE IF EXISTS child_cert;
-DROP TABLE IF EXISTS child;
-DROP TABLE IF EXISTS ca_detail;
-DROP TABLE IF EXISTS ca;
-DROP TABLE IF EXISTS parent;
-DROP TABLE IF EXISTS repository;
-DROP TABLE IF EXISTS bsc;
-DROP TABLE IF EXISTS self;
-
-CREATE TABLE self (
- self_id SERIAL NOT NULL,
- self_handle VARCHAR(255) NOT NULL,
- use_hsm BOOLEAN NOT NULL DEFAULT FALSE,
- crl_interval BIGINT UNSIGNED,
- regen_margin BIGINT UNSIGNED,
- bpki_cert LONGBLOB,
- bpki_glue LONGBLOB,
- PRIMARY KEY (self_id),
- UNIQUE (self_handle)
-) ENGINE=InnoDB;
-
-CREATE TABLE bsc (
- bsc_id SERIAL NOT NULL,
- bsc_handle VARCHAR(255) NOT NULL,
- private_key_id LONGBLOB,
- pkcs10_request LONGBLOB,
- hash_alg ENUM ('sha256'),
- signing_cert LONGBLOB,
- signing_cert_crl LONGBLOB,
- self_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (bsc_id),
- CONSTRAINT bsc_self_id
- FOREIGN KEY (self_id) REFERENCES self (self_id) ON DELETE CASCADE,
- UNIQUE (self_id, bsc_handle)
-) ENGINE=InnoDB;
-
-CREATE TABLE repository (
- repository_id SERIAL NOT NULL,
- repository_handle VARCHAR(255) NOT NULL,
- peer_contact_uri TEXT,
- bpki_cert LONGBLOB,
- bpki_glue LONGBLOB,
- last_cms_timestamp DATETIME,
- bsc_id BIGINT UNSIGNED NOT NULL,
- self_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (repository_id),
- CONSTRAINT repository_self_id
- FOREIGN KEY (self_id) REFERENCES self (self_id) ON DELETE CASCADE,
- CONSTRAINT repository_bsc_id
- FOREIGN KEY (bsc_id) REFERENCES bsc (bsc_id) ON DELETE CASCADE,
- UNIQUE (self_id, repository_handle)
-) ENGINE=InnoDB;
-
-CREATE TABLE parent (
- parent_id SERIAL NOT NULL,
- parent_handle VARCHAR(255) NOT NULL,
- bpki_cms_cert LONGBLOB,
- bpki_cms_glue LONGBLOB,
- peer_contact_uri TEXT,
- sia_base TEXT,
- sender_name TEXT,
- recipient_name TEXT,
- last_cms_timestamp DATETIME,
- self_id BIGINT UNSIGNED NOT NULL,
- bsc_id BIGINT UNSIGNED NOT NULL,
- repository_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (parent_id),
- CONSTRAINT parent_repository_id
- FOREIGN KEY (repository_id) REFERENCES repository (repository_id) ON DELETE CASCADE,
- CONSTRAINT parent_bsc_id
- FOREIGN KEY (bsc_id) REFERENCES bsc (bsc_id) ON DELETE CASCADE,
- CONSTRAINT parent_self_id
- FOREIGN KEY (self_id) REFERENCES self (self_id) ON DELETE CASCADE,
- UNIQUE (self_id, parent_handle)
-) ENGINE=InnoDB;
-
-CREATE TABLE ca (
- ca_id SERIAL NOT NULL,
- last_crl_sn BIGINT UNSIGNED NOT NULL,
- last_manifest_sn BIGINT UNSIGNED NOT NULL,
- next_manifest_update DATETIME,
- next_crl_update DATETIME,
- last_issued_sn BIGINT UNSIGNED NOT NULL,
- sia_uri TEXT,
- parent_resource_class TEXT,
- parent_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (ca_id),
- CONSTRAINT ca_parent_id
- FOREIGN KEY (parent_id) REFERENCES parent (parent_id) ON DELETE CASCADE
-) ENGINE=InnoDB;
-
-CREATE TABLE ca_detail (
- ca_detail_id SERIAL NOT NULL,
- public_key LONGBLOB,
- private_key_id LONGBLOB,
- latest_crl LONGBLOB,
- crl_published DATETIME,
- latest_ca_cert LONGBLOB,
- manifest_private_key_id LONGBLOB,
- manifest_public_key LONGBLOB,
- latest_manifest_cert LONGBLOB,
- latest_manifest LONGBLOB,
- manifest_published DATETIME,
- state ENUM ('pending', 'active', 'deprecated', 'revoked') NOT NULL,
- ca_cert_uri TEXT,
- ca_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (ca_detail_id),
- CONSTRAINT ca_detail_ca_id
- FOREIGN KEY (ca_id) REFERENCES ca (ca_id) ON DELETE CASCADE
-) ENGINE=InnoDB;
-
-CREATE TABLE child (
- child_id SERIAL NOT NULL,
- child_handle VARCHAR(255) NOT NULL,
- bpki_cert LONGBLOB,
- bpki_glue LONGBLOB,
- last_cms_timestamp DATETIME,
- self_id BIGINT UNSIGNED NOT NULL,
- bsc_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (child_id),
- CONSTRAINT child_bsc_id
- FOREIGN KEY (bsc_id) REFERENCES bsc (bsc_id) ON DELETE CASCADE,
- CONSTRAINT child_self_id
- FOREIGN KEY (self_id) REFERENCES self (self_id) ON DELETE CASCADE,
- UNIQUE (self_id, child_handle)
-) ENGINE=InnoDB;
-
-CREATE TABLE child_cert (
- child_cert_id SERIAL NOT NULL,
- cert LONGBLOB NOT NULL,
- published DATETIME,
- ski TINYBLOB NOT NULL,
- child_id BIGINT UNSIGNED NOT NULL,
- ca_detail_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (child_cert_id),
- CONSTRAINT child_cert_ca_detail_id
- FOREIGN KEY (ca_detail_id) REFERENCES ca_detail (ca_detail_id) ON DELETE CASCADE,
- CONSTRAINT child_cert_child_id
- FOREIGN KEY (child_id) REFERENCES child (child_id) ON DELETE CASCADE
-) ENGINE=InnoDB;
-
-CREATE TABLE revoked_cert (
- revoked_cert_id SERIAL NOT NULL,
- serial BIGINT UNSIGNED NOT NULL,
- revoked DATETIME NOT NULL,
- expires DATETIME NOT NULL,
- ca_detail_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (revoked_cert_id),
- CONSTRAINT revoked_cert_ca_detail_id
- FOREIGN KEY (ca_detail_id) REFERENCES ca_detail (ca_detail_id) ON DELETE CASCADE
-) ENGINE=InnoDB;
-
-CREATE TABLE roa (
- roa_id SERIAL NOT NULL,
- asn BIGINT UNSIGNED NOT NULL,
- cert LONGBLOB NOT NULL,
- roa LONGBLOB NOT NULL,
- published DATETIME,
- self_id BIGINT UNSIGNED NOT NULL,
- ca_detail_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (roa_id),
- CONSTRAINT roa_self_id
- FOREIGN KEY (self_id) REFERENCES self (self_id) ON DELETE CASCADE,
- CONSTRAINT roa_ca_detail_id
- FOREIGN KEY (ca_detail_id) REFERENCES ca_detail (ca_detail_id) ON DELETE CASCADE
-) ENGINE=InnoDB;
-
-CREATE TABLE roa_prefix (
- prefix VARCHAR(40) NOT NULL,
- prefixlen TINYINT UNSIGNED NOT NULL,
- max_prefixlen TINYINT UNSIGNED NOT NULL,
- version TINYINT UNSIGNED NOT NULL,
- roa_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (roa_id, prefix, prefixlen, max_prefixlen),
- CONSTRAINT roa_prefix_roa_id
- FOREIGN KEY (roa_id) REFERENCES roa (roa_id) ON DELETE CASCADE
-) ENGINE=InnoDB;
-
-CREATE TABLE ghostbuster (
- ghostbuster_id SERIAL NOT NULL,
- vcard LONGBLOB NOT NULL,
- cert LONGBLOB NOT NULL,
- ghostbuster LONGBLOB NOT NULL,
- published DATETIME,
- self_id BIGINT UNSIGNED NOT NULL,
- ca_detail_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (ghostbuster_id),
- CONSTRAINT ghostbuster_self_id
- FOREIGN KEY (self_id) REFERENCES self (self_id) ON DELETE CASCADE,
- CONSTRAINT ghostbuster_ca_detail_id
- FOREIGN KEY (ca_detail_id) REFERENCES ca_detail (ca_detail_id) ON DELETE CASCADE
-) ENGINE=InnoDB;
-
-CREATE TABLE ee_cert (
- ee_cert_id SERIAL NOT NULL,
- ski BINARY(20) NOT NULL,
- cert LONGBLOB NOT NULL,
- published DATETIME,
- self_id BIGINT UNSIGNED NOT NULL,
- ca_detail_id BIGINT UNSIGNED NOT NULL,
- PRIMARY KEY (ee_cert_id),
- CONSTRAINT ee_cert_self_id
- FOREIGN KEY (self_id) REFERENCES self (self_id) ON DELETE CASCADE,
- CONSTRAINT ee_cert_ca_detail_id
- FOREIGN KEY (ca_detail_id) REFERENCES ca_detail (ca_detail_id) ON DELETE CASCADE
-) ENGINE=InnoDB;
-
--- Local Variables:
--- indent-tabs-mode: nil
--- End:
diff --git a/setup.py b/setup.py
index 181e5a94..c655dbc8 100644
--- a/setup.py
+++ b/setup.py
@@ -1,12 +1,12 @@
# $Id$
-#
+#
# Copyright (C) 2014 Dragon Research Labs ("DRL")
# Portions copyright (C) 2011--2013 Internet Systems Consortium ("ISC")
-#
+#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notices and this permission notice appear in all copies.
-#
+#
# THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ISC DISCLAIM ALL
# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
@@ -21,109 +21,128 @@ from glob import glob
import setup_extensions
try:
- import setup_autoconf as autoconf
+ import setup_autoconf as autoconf
except ImportError:
- class autoconf:
- "Fake autoconf object to let --help work without autoconf."
- sbindir = libexecdir = datarootdir = sysconfdir = ""
- CFLAGS = LDFLAGS = LIBS = CA_TARGET = RP_TARGET = ""
+ class autoconf:
+ "Fake autoconf object to let --help work without autoconf."
+ sbindir = libexecdir = datarootdir = sysconfdir = ""
+ CFLAGS = LDFLAGS = LIBS = CA_TARGET = RP_TARGET = ""
try:
- from rpki.version import VERSION
+ from rpki.version import VERSION
except ImportError:
- VERSION = "0.0"
+ VERSION = "0.0"
# pylint: disable=W0622
-setup_args = dict(
- name = "rpki",
- version = VERSION,
- description = "RPKI Toolkit",
- license = "BSD",
- url = "http://rpki.net/",
- cmdclass = {"build_scripts" : setup_extensions.build_scripts,
- "install_scripts" : setup_extensions.install_scripts})
-
-scripts = []
+scripts = []
+data_files = []
+packages = []
+package_data = {}
+ext_modules = []
# I keep forgetting to update the packages list here. Could we
# automate this by looking for __init__.py files in the rpki/ tree?
-# Might have to filter out some rpki.gui.app subdirs.
+# Might have to filter out some rpki.gui.app subdirs, or, rather,
+# list those as package_data instead.
if autoconf.RP_TARGET == "rp":
- setup_args.update(
- packages = ["rpki",
- "rpki.POW",
- "rpki.rtr",
- "rpki.irdb",
- "rpki.gui",
- "rpki.gui.app",
- "rpki.gui.cacheview",
- "rpki.gui.api",
- "rpki.gui.routeview"],
- ext_modules = [Extension("rpki.POW._POW", ["ext/POW.c"],
- extra_compile_args = autoconf.CFLAGS.split(),
- extra_link_args = (autoconf.LDFLAGS + " " +
- autoconf.LIBS).split())],
- package_data = {"rpki.gui.app" :
- ["migrations/*.py",
- "static/*/*",
- "templates/*.html",
- "templates/*/*.html",
- "templatetags/*.py"],
- "rpki.gui.cacheview" :
- ["templates/*/*.html"]})
-
- scripts += [(autoconf.bindir,
- ["rp/rcynic/rcynic-cron",
- "rp/rcynic/rcynic-html",
- "rp/rcynic/rcynic-svn",
- "rp/rcynic/rcynic-text",
- "rp/rcynic/validation_status",
- "rp/rpki-rtr/rpki-rtr",
- "rp/utils/find_roa",
- "rp/utils/hashdir",
- "rp/utils/print_roa",
- "rp/utils/print_rpki_manifest",
- "rp/utils/scan_roas",
- "rp/utils/scan_routercerts",
- "rp/utils/uri"])]
+
+ packages += ["rpki",
+ "rpki.POW",
+ "rpki.django_settings",
+ "rpki.rtr",
+ "rpki.irdb",
+ "rpki.pubdb",
+ "rpki.rpkidb",
+ "rpki.rcynicdb",
+ "rpki.gui",
+ "rpki.gui.app",
+ "rpki.gui.gui_rpki_cache",
+ "rpki.gui.api",
+ "rpki.gui.routeview"]
+
+ ext_modules += [Extension("rpki.POW._POW", ["ext/POW.c"],
+ include_dirs = [cflag[2:] for cflag in autoconf.CFLAGS.split() if cflag.startswith("-I")],
+ extra_compile_args = [cflag for cflag in autoconf.CFLAGS.split() if not cflag.startswith("-I")],
+ extra_link_args = autoconf.LDFLAGS.split() + autoconf.LIBS.split())]
+
+ for package in ("rpki.irdb", "rpki.pubdb", "rpki.rpkidb", "rpki.rcynicdb"):
+ package_data[package] = ["migrations/*.py"]
+
+ data_files += [(autoconf.sysconfdir + "/rpki",
+ ["rp/config/rpki-confgen.xml"])]
+
+ scripts += [(autoconf.bindir,
+ ["rp/rcynic/rcynic-cron",
+ "rp/rcynic/rcynic-html",
+ "rp/rcynic/rcynic-svn",
+ "rp/rcynic/rcynic-text",
+ "rp/rcynic/validation_status",
+ "rp/rpki-rtr/rpki-rtr",
+ "rp/utils/find_roa",
+ "rp/utils/hashdir",
+ "rp/utils/print_roa",
+ "rp/utils/print_rpki_manifest",
+ "rp/utils/scan_roas",
+ "rp/utils/scan_routercerts",
+ "rp/utils/uri"]),
+ (autoconf.sbindir,
+ ["rp/config/rpki-confgen",
+ "rp/config/rpki-sql-backup",
+ "rp/config/rpki-sql-setup",
+ "rp/config/rpki-manage",
+ "rp/config/rpki-generate-root-certificate"])]
if autoconf.CA_TARGET == "ca":
- setup_args.update(
- data_files = [(autoconf.sysconfdir + "/rpki",
- ["ca/rpki-confgen.xml"]),
- (autoconf.datarootdir + "/rpki/wsgi",
+
+ package_data["rpki.gui.app"] = ["migrations/*.py",
+ "static/*/*",
+ "templates/*.html",
+ "templates/*/*.html",
+ "templatetags/*.py"]
+
+ package_data["rpki.gui.gui_rpki_cache"] = ["migrations/*.py"]
+
+
+ data_files += [(autoconf.datarootdir + "/rpki/wsgi",
["ca/rpki.wsgi"]),
(autoconf.datarootdir + "/rpki/media/css",
glob("rpki/gui/app/static/css/*")),
(autoconf.datarootdir + "/rpki/media/js",
glob("rpki/gui/app/static/js/*")),
(autoconf.datarootdir + "/rpki/media/img",
- glob("rpki/gui/app/static/img/*")),
- (autoconf.datarootdir + "/rpki/upgrade-scripts",
- glob("ca/upgrade-scripts/*"))])
-
- scripts += [(autoconf.sbindir,
- ["ca/rpkic",
- "ca/rpki-confgen",
- "ca/rpki-start-servers",
- "ca/rpki-sql-backup",
- "ca/rpki-sql-setup",
- "ca/rpki-manage",
- "ca/rpkigui-query-routes",
- "ca/irbe_cli"]),
- (autoconf.libexecdir,
- ["ca/irdbd",
- "ca/pubd",
- "ca/rootd",
- "ca/rpkid",
- "ca/rpkigui-import-routes",
- "ca/rpkigui-check-expired",
- "ca/rpkigui-rcynic",
- "ca/rpkigui-apache-conf-gen"])]
-
-setup_args.update(scripts = scripts)
+ glob("rpki/gui/app/static/img/*"))]
+
+ scripts += [(autoconf.sbindir,
+ ["ca/rpkic",
+ "ca/rpkigui-query-routes",
+ "ca/irbe_cli"]),
+ (autoconf.libexecdir,
+ ["ca/irdbd",
+ "ca/pubd",
+ "ca/rootd",
+ "ca/rpkid",
+ "ca/rpki-nanny",
+ "ca/rpkigui-import-routes",
+ "ca/rpkigui-check-expired",
+ "ca/rpkigui-rcynic",
+ "ca/rpkigui-apache-conf-gen"])]
+
+setup_args = dict(
+ name = "rpki",
+ version = VERSION,
+ description = "RPKI Toolkit",
+ license = "BSD",
+ url = "http://rpki.net/",
+ cmdclass = {"build_scripts" : setup_extensions.build_scripts,
+ "install_scripts" : setup_extensions.install_scripts})
+
+for name in ("scripts", "data_files", "packages", "package_data", "ext_modules"):
+ val = globals().get(name)
+ if val:
+ setup_args[name] = val
+
setup(**setup_args)
diff --git a/setup_extensions.py b/setup_extensions.py
index 12b123aa..7294f317 100644
--- a/setup_extensions.py
+++ b/setup_extensions.py
@@ -16,79 +16,79 @@ from stat import S_IMODE
import os
class build_scripts(_build_scripts):
- """
- Hacked version of distutils.build_scripts, designed to support
- multiple target installation directories like install_data does.
+ """
+ Hacked version of distutils.build_scripts, designed to support
+ multiple target installation directories like install_data does.
- [(target_directory, [list_of_source_scripts]), ...]
+ [(target_directory, [list_of_source_scripts]), ...]
- Most of the real work is in the companion hacked install_scripts,
- but we need to tweak the list of source files that build_scripts
- pulls out of the Distribution object.
- """
+ Most of the real work is in the companion hacked install_scripts,
+ but we need to tweak the list of source files that build_scripts
+ pulls out of the Distribution object.
+ """
- def finalize_options(self):
- _build_scripts.finalize_options(self)
- self.scripts = []
- for script in self.distribution.scripts:
- if isinstance(script, str):
- self.scripts.append(script)
- else:
- self.scripts.extend(script[1])
+ def finalize_options(self):
+ _build_scripts.finalize_options(self)
+ self.scripts = []
+ for script in self.distribution.scripts:
+ if isinstance(script, str):
+ self.scripts.append(script)
+ else:
+ self.scripts.extend(script[1])
class install_scripts(_install_scripts):
- """
- Hacked version of distutils.install_scripts, designed to support
- multiple target installation directories like install_data does.
+ """
+ Hacked version of distutils.install_scripts, designed to support
+ multiple target installation directories like install_data does.
- [(target_directory, [list_of_source_scripts]), ...]
+ [(target_directory, [list_of_source_scripts]), ...]
- The code here is a tweaked combination of what the stock
- install_scripts and install_data classes do.
- """
+ The code here is a tweaked combination of what the stock
+ install_scripts and install_data classes do.
+ """
- user_options = _install_scripts.user_options + [
- ("root=", None, "install everything relative to this alternate root directory")]
+ user_options = _install_scripts.user_options + [
+ ("root=", None, "install everything relative to this alternate root directory")]
- def initialize_options(self):
- _install_scripts.initialize_options(self)
- self.outfiles = []
- self.root = None
+ def initialize_options(self):
+ _install_scripts.initialize_options(self)
+ self.outfiles = []
+ self.root = None
- def finalize_options (self):
- self.set_undefined_options("build",
- ("build_scripts", "build_dir"))
- self.set_undefined_options("install",
- ("install_scripts", "install_dir"),
- ("root", "root"),
- ("force", "force"),
- ("skip_build", "skip_build"))
+ def finalize_options (self):
+ self.set_undefined_options("build",
+ ("build_scripts", "build_dir"))
+ self.set_undefined_options("install",
+ ("install_scripts", "install_dir"),
+ ("root", "root"),
+ ("force", "force"),
+ ("skip_build", "skip_build"))
- def run(self):
- if not self.skip_build:
- self.run_command("build_scripts")
- for script in self.distribution.scripts:
- if isinstance(script, str):
- fn = os.path.join(self.build_dir, os.path.basename(convert_path(script)))
- out, _ = self.copy_file(fn, self.install_dir)
- self.outfiles.append(out)
- else:
- dn = convert_path(script[0])
- if not os.path.isabs(dn):
- dn = os.path.join(self.install_dir, dn)
- elif self.root:
- dn = change_root(self.root, dn)
- self.mkpath(dn)
- if not script[1]:
- self.outfiles.append(dn)
- else:
- for s in script[1]:
- fn = os.path.join(self.build_dir, os.path.basename(convert_path(s)))
- out, _ = self.copy_file(fn, dn)
- self.outfiles.append(out)
- if os.name == "posix":
- for fn in self.get_outputs():
- mode = S_IMODE(os.stat(fn).st_mode) | 0555
- log.info("changing mode of %s to %o", fn, mode)
- if not self.dry_run:
- os.chmod(fn, mode)
+ def run(self):
+ if not self.skip_build:
+ self.run_command("build_scripts")
+ for script in self.distribution.scripts:
+ if isinstance(script, str):
+ fn = os.path.join(self.build_dir, os.path.basename(convert_path(script)))
+ out, _ = self.copy_file(fn, self.install_dir)
+ self.outfiles.append(out)
+ else:
+ dn = convert_path(script[0])
+ if not os.path.isabs(dn):
+ dn = os.path.join(self.install_dir, dn)
+ elif self.root:
+ dn = change_root(self.root, dn)
+ self.mkpath(dn)
+ if not script[1]:
+ self.outfiles.append(dn)
+ else:
+ for s in script[1]:
+ fn = os.path.join(self.build_dir, os.path.basename(convert_path(s)))
+ out, _ = self.copy_file(fn, dn)
+ self.outfiles.append(out)
+ if os.name == "posix":
+ for fn in self.get_outputs():
+ mode = S_IMODE(os.stat(fn).st_mode) | 0555
+ log.info("changing mode of %s to %o", fn, mode)
+ if not self.dry_run:
+ os.chmod(fn, mode)