From 440687c9d13a28b902bd9594f88eea6f42be686e Mon Sep 17 00:00:00 2001 From: Rob Austein Date: Mon, 7 Apr 2014 21:25:15 +0000 Subject: Whack test code to work with new tree, fix relaxng.py. svn path=/branches/tk685/; revision=5765 --- Makefile.in | 9 + ca/Makefile.in | 64 +- ca/doc/README | 16 - ca/doc/mainpage.py | 71 -- ca/doc/pubd-bpki.dot | 42 - ca/doc/rpkid-bpki.dot | 76 -- ca/tests/revoke.yaml | 60 +- ca/tests/smoketest.1.yaml | 4 +- ca/tests/smoketest.3.yaml | 12 +- ca/tests/smoketest.7.yaml | 4 +- ca/tests/smoketest.py | 8 +- ca/tests/yamltest-test-all.sh | 6 +- configure | 12 +- configure.ac | 10 +- doc/pubd-bpki.dot | 42 + doc/rpkid-bpki.dot | 76 ++ rp/Makefile.in | 2 +- rp/rpki-rtr/Makefile.in | 51 + rp/rpki-rtr/README | 11 + rp/rpki-rtr/rtr-origin | 2278 ++++++++++++++++++++++++++++++++++++++++ rp/rpki-rtr/rules.darwin.mk | 9 + rp/rpki-rtr/rules.freebsd.mk | 37 + rp/rpki-rtr/rules.linux.mk | 29 + rp/rpki-rtr/rules.unknown.mk | 8 + rp/rpki-rtr/server.sh | 17 + rp/rpki-rtr/sshd.conf | 23 + rp/rtr-origin/Makefile.in | 51 - rp/rtr-origin/README | 11 - rp/rtr-origin/rtr-origin | 2278 ---------------------------------------- rp/rtr-origin/rules.darwin.mk | 9 - rp/rtr-origin/rules.freebsd.mk | 37 - rp/rtr-origin/rules.linux.mk | 29 - rp/rtr-origin/rules.unknown.mk | 8 - rp/rtr-origin/server.sh | 17 - rp/rtr-origin/sshd.conf | 23 - rpki/relaxng.py | 1764 +++++++++++++++---------------- schemas/Makefile.in | 2 +- 37 files changed, 3539 insertions(+), 3667 deletions(-) delete mode 100644 ca/doc/README delete mode 100644 ca/doc/mainpage.py delete mode 100644 ca/doc/pubd-bpki.dot delete mode 100644 ca/doc/rpkid-bpki.dot create mode 100644 doc/pubd-bpki.dot create mode 100644 doc/rpkid-bpki.dot create mode 100644 rp/rpki-rtr/Makefile.in create mode 100644 rp/rpki-rtr/README create mode 100755 rp/rpki-rtr/rtr-origin create mode 100644 rp/rpki-rtr/rules.darwin.mk create mode 100644 rp/rpki-rtr/rules.freebsd.mk create mode 100644 rp/rpki-rtr/rules.linux.mk create mode 100644 rp/rpki-rtr/rules.unknown.mk create mode 100755 rp/rpki-rtr/server.sh create mode 100644 rp/rpki-rtr/sshd.conf delete mode 100644 rp/rtr-origin/Makefile.in delete mode 100644 rp/rtr-origin/README delete mode 100755 rp/rtr-origin/rtr-origin delete mode 100644 rp/rtr-origin/rules.darwin.mk delete mode 100644 rp/rtr-origin/rules.freebsd.mk delete mode 100644 rp/rtr-origin/rules.linux.mk delete mode 100644 rp/rtr-origin/rules.unknown.mk delete mode 100755 rp/rtr-origin/server.sh delete mode 100644 rp/rtr-origin/sshd.conf diff --git a/Makefile.in b/Makefile.in index 71dab019..4465ef9e 100644 --- a/Makefile.in +++ b/Makefile.in @@ -97,3 +97,12 @@ ${POW_SO}: .FORCE setup_autoconf.py build/stamp: .FORCE setup_autoconf.py ${PYTHON} setup.py build touch $@ + +lint: + find rpki -name '*.py' | xargs pylint --rcfile ${abs_top_srcdir}/buildtools/pylint.rc + +tags: Makefile + find rpki rp ca schemas -type f \ + \( -name '*.[ch] -o -name '*.py' -o -name '*.sql' -o -name '*.rnc' \) \ + ! -name relaxng.py ! -name sql_schemas.py -print | \ + etags - diff --git a/ca/Makefile.in b/ca/Makefile.in index 0764edc1..317b8395 100644 --- a/ca/Makefile.in +++ b/ca/Makefile.in @@ -55,73 +55,23 @@ uninstall deinstall:: distclean:: rm -f installed -dont-run-trang: - touch *.rng - -relaxng: left-right-schema.rng up-down-schema.rng publication-schema.rng +relaxng: cd tests; $(MAKE) protocol-samples - xmllint --noout --relaxng left-right-schema.rng tests/left-right-protocol-samples/*.xml - xmllint --noout --relaxng up-down-schema.rng tests/up-down-protocol-samples/*.xml - xmllint --noout --relaxng publication-schema.rng tests/publication-protocol-samples/*.xml + xmllint --noout --relaxng ../schemas/relaxng/left-right-schema.rng tests/left-right-protocol-samples/*.xml + xmllint --noout --relaxng ../schemas/relaxng/up-down-schema.rng tests/up-down-protocol-samples/*.xml + xmllint --noout --relaxng ../schemas/relaxng/publication-schema.rng tests/publication-protocol-samples/*.xml unit-tests: all PWD=`pwd`; for i in rpki/*.py; do echo "[$$i]"; PYTHONPATH=$$PWD ${PYTHON} $$i; done -all-tests:: unit-tests - -all-tests:: relaxng +all-tests:: unit-tests relaxng test all-tests parse-test profile yamltest yamlconf:: all cd tests; $(MAKE) $@ -tags: Makefile - find . -type d -name build -prune -o -type f \( -name '*.py' -o -name '*.sql' -o -name '*.rnc' -o -name '*.py.in' \) ! -name relaxng.py ! -name sql_schemas.py -print | etags - - -lint: - pylint --rcfile ${abs_top_srcdir}/buildtools/pylint.rc rpki/*.py rpki/irdb/*.py *.py tests/*.py - -# Documentation - -doc/pubd.dot: pubd.sql - sh ${abs_top_srcdir}/buildtools/graphviz-sql.sh $? >$@ - -doc/rpkid.dot: rpkid.sql - sh ${abs_top_srcdir}/buildtools/graphviz-sql.sh $? >$@ - -.SUFFIXES: .dot .png .pdf .eps - -.dot.pdf: - dot -Tps2 $? | ps2pdf - $@ - -.dot.eps: - dot -o $@ -Teps $? - -.dot.png: - dot -o $@ -Tpng $? - -dot: doc/pubd.dot doc/rpkid.dot - -eps: doc/pubd.eps doc/rpkid.eps doc/rpkid-bpki.eps doc/pubd-bpki.eps - -png: doc/pubd.png doc/rpkid.png doc/rpkid-bpki.png doc/pubd-bpki.png - -pdf: doc/pubd.pdf doc/rpkid.pdf doc/rpkid-bpki.pdf doc/pubd-bpki.pdf - -docclean: - rm -rf doc/html doc/latex doc/xml - rm -f doc/*.eps doc/*.pdf doc/*.png - rm -f doc/pubd.dot doc/rpkid.dot - -html: dot eps png - TZ='' IMAGE_PATH=${abs_builddir}/doc doxygen - -docs: dot eps png html pdf - -## - -distclean:: clean docclean +distclean:: clean cd tests; ${MAKE} $@ - rm -f TAGS Makefile + rm -f Makefile all:: examples/rpki.conf diff --git a/ca/doc/README b/ca/doc/README deleted file mode 100644 index 33902d7e..00000000 --- a/ca/doc/README +++ /dev/null @@ -1,16 +0,0 @@ -$Id$ - -Internals documentation for the RPKI CA tools. - -Once upon a time this included the hand-written documentation for the -CA tools, but that is now part of the overall package documentation. -What's left here is just what Doxygen generates from the source code -and a few Graphviz diagrams. - -At the moment the control for the stuff generated here is still -../Makefile, that may change at some point. - -We no longer generate the documentation here automatically, as it's -kind of large and we're not sure anybody else cares about it, so if -you want this manual you'll have to install Doxygen and build it -yourself. diff --git a/ca/doc/mainpage.py b/ca/doc/mainpage.py deleted file mode 100644 index 4570547b..00000000 --- a/ca/doc/mainpage.py +++ /dev/null @@ -1,71 +0,0 @@ -## @file -# @details -# Doxygen documentation source, expressed as Python comments to make Doxygen happy. -# -# $Id$ -# -# Copyright (C) 2009--2012 Internet Systems Consortium ("ISC") -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH -# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY -# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, -# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM -# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE -# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR -# PERFORMANCE OF THIS SOFTWARE. -# -# Portions copyright (C) 2007--2008 American Registry for Internet Numbers ("ARIN") -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH -# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY -# AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT, -# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM -# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE -# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR -# PERFORMANCE OF THIS SOFTWARE. - -## @mainpage RPKI Engine Reference Manual -# -# This collection of Python modules implements an RPKI CA engine. -# -# See http://trac.rpki.net/ for the RPKI tools package documentation. -# -# The documentation you're reading is generated automatically by -# Doxygen from comments and documentation in -# the code. -# -# At one point this manual also included documentation for the CA -# tools, but that has been integrated into the overall package -# documentation. This manual is now just the CA tools internals. - -## @page sql-schemas SQL database schemas -# -# @li @subpage rpkid-sql "rpkid database schema" -# @li @subpage pubd-sql "pubd database schema" - -## @page rpkid-sql rpkid SQL schema -# -# @image html rpkid.png "Diagram of rpkid.sql" -# @image latex rpkid.eps "Diagram of rpkid.sql" height=\textheight -# -# @verbinclude rpkid.sql - -## @page pubd-sql pubd SQL Schema -# -# @image html pubd.png "Diagram of pubd.sql" -# @image latex pubd.eps "Diagram of pubd.sql" width=\textwidth -# -# @verbinclude pubd.sql - -# Local Variables: -# mode:python -# compile-command: "cd ../.. && ./config.status && cd rpkid && make docs" -# End: diff --git a/ca/doc/pubd-bpki.dot b/ca/doc/pubd-bpki.dot deleted file mode 100644 index 44ad8a90..00000000 --- a/ca/doc/pubd-bpki.dot +++ /dev/null @@ -1,42 +0,0 @@ -// $Id$ - -// Color code: -// Black: Operating entity -// Red: Cross-certified client -// -// Shape code: -// Octagon: TA -// Diamond: CA -// Record: EE - -digraph bpki_pubd { - splines = true; - size = "14,14"; - node [ fontname = Times, fontsize = 9 ]; - - // Operating entity - node [ color = black, fontcolor = black, shape = record ]; - TA [ shape = octagon, label = "BPKI TA" ]; - pubd [ label = "pubd|{HTTPS server|CMS}" ]; - ctl [ label = "Control|{HTTPS client|CMS}" ]; - - // Clients - node [ color = red, fontcolor = red, shape = diamond ]; - Alice_CA; - Bob_CA; - node [ color = red, fontcolor = red, shape = record ]; - Alice_EE [ label = "Alice\nEE|{HTTPS client|CMS}" ]; - Bob_EE [ label = "Bob\nEE|{HTTPS client|CMS}" ]; - - edge [ color = black, style = dotted ]; - TA -> pubd; - TA -> ctl; - - edge [ color = black, style = solid ]; - TA -> Alice_CA; - TA -> Bob_CA; - - edge [ color = red, style = solid ]; - Alice_CA -> Alice_EE; - Bob_CA -> Bob_EE; -} diff --git a/ca/doc/rpkid-bpki.dot b/ca/doc/rpkid-bpki.dot deleted file mode 100644 index 651591cb..00000000 --- a/ca/doc/rpkid-bpki.dot +++ /dev/null @@ -1,76 +0,0 @@ -// $Id$ - -// Color code: -// Black: Hosting entity -// Blue: Hosted entity -// Red: Cross-certified peer -// -// Shape code: -// Octagon: TA -// Diamond: CA -// Record: EE - -digraph bpki_rpkid { - splines = true; - size = "14,14"; - node [ fontname = Times, fontsize = 9 ]; - - // Hosting entity - node [ color = black, shape = record ]; - TA [ shape = octagon, label = "BPKI TA" ]; - rpkid [ label = "rpkid|{HTTPS server|HTTPS left-right client|CMS left-right}" ]; - irdbd [ label = "irdbd|{HTTPS left-right server|CMS left-right}" ]; - irbe [ label = "IRBE|{HTTPS left-right client|CMS left-right}" ]; - - // Hosted entities - node [ color = blue, fontcolor = blue ]; - Alice_CA [ shape = diamond ]; - Alice_EE [ label = "Alice\nBSC EE|{HTTPS up-down client|CMS up-down}" ]; - Ellen_CA [ shape = diamond ]; - Ellen_EE [ label = "Ellen\nBSC EE|{HTTPS up-down client|CMS up-down}" ]; - - // Peers - node [ color = red, fontcolor = red, shape = diamond ]; - Bob_CA; - Carol_CA; - Dave_CA; - Frank_CA; - Ginny_CA; - Harry_CA; - node [ shape = record ]; - Bob_EE [ label = "Bob\nEE|{HTTPS up-down|CMS up-down}" ]; - Carol_EE [ label = "Carol\nEE|{HTTPS up-down|CMS up-down}" ]; - Dave_EE [ label = "Dave\nEE|{HTTPS up-down|CMS up-down}" ]; - Frank_EE [ label = "Frank\nEE|{HTTPS up-down|CMS up-down}" ]; - Ginny_EE [ label = "Ginny\nEE|{HTTPS up-down|CMS up-down}" ]; - Harry_EE [ label = "Bob\nEE|{HTTPS up-down|CMS up-down}" ]; - - edge [ color = black, style = solid ]; - TA -> Alice_CA; - TA -> Ellen_CA; - - edge [ color = black, style = dotted ]; - TA -> rpkid; - TA -> irdbd; - TA -> irbe; - - edge [ color = blue, style = solid ]; - Alice_CA -> Bob_CA; - Alice_CA -> Carol_CA; - Alice_CA -> Dave_CA; - Ellen_CA -> Frank_CA; - Ellen_CA -> Ginny_CA; - Ellen_CA -> Harry_CA; - - edge [ color = blue, style = dotted ]; - Alice_CA -> Alice_EE; - Ellen_CA -> Ellen_EE; - - edge [ color = red, style = solid ]; - Bob_CA -> Bob_EE; - Carol_CA -> Carol_EE; - Dave_CA -> Dave_EE; - Frank_CA -> Frank_EE; - Ginny_CA -> Ginny_EE; - Harry_CA -> Harry_EE; -} diff --git a/ca/tests/revoke.yaml b/ca/tests/revoke.yaml index 2edb8335..ae53f0d8 100644 --- a/ca/tests/revoke.yaml +++ b/ca/tests/revoke.yaml @@ -54,7 +54,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - name: R0 @@ -67,7 +67,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 @@ -81,7 +81,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - sleep 30 @@ -92,7 +92,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - name: R0 @@ -105,7 +105,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - name: R0 @@ -118,7 +118,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - sleep 30 @@ -129,7 +129,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - name: R0 @@ -142,7 +142,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - name: R0 @@ -155,7 +155,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - sleep 30 @@ -166,7 +166,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - name: R0 @@ -179,7 +179,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - name: R0 @@ -192,7 +192,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - sleep 30 @@ -203,7 +203,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - name: R0 @@ -216,7 +216,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - name: R0 @@ -229,7 +229,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - sleep 30 @@ -240,7 +240,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - name: R0 @@ -253,7 +253,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - name: R0 @@ -266,7 +266,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - sleep 30 @@ -277,7 +277,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - name: R0 @@ -290,7 +290,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - name: R0 @@ -303,7 +303,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - sleep 30 @@ -314,7 +314,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - name: R0 @@ -327,7 +327,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - name: R0 @@ -340,7 +340,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - sleep 30 @@ -351,7 +351,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - name: R0 @@ -364,7 +364,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - name: R0 @@ -377,7 +377,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - sleep 30 @@ -388,7 +388,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - name: R0 @@ -401,7 +401,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - name: R0 @@ -414,7 +414,7 @@ kids: cd rcynic-data; pax -rwl . ../$dir; find . -type f -name '*.cer' | sort | - xargs ../../../../utils/uri/uri -s + xargs ../../../../rp/utils/uri -s >../${dir%.data}.uris; sleep 1 - sleep 30 diff --git a/ca/tests/smoketest.1.yaml b/ca/tests/smoketest.1.yaml index 914aaae4..81bc887a 100644 --- a/ca/tests/smoketest.1.yaml +++ b/ca/tests/smoketest.1.yaml @@ -52,7 +52,7 @@ kids: --- - shell set -x; - rtr_origin='python ../../../rtr-origin/rtr-origin.py'; + rtr_origin='python ../../../rp/rpki-rtr/rtr-origin.py'; $rtr_origin --cronjob rcynic-data/authenticated && $rtr_origin --show --- @@ -70,7 +70,7 @@ kids: ipv4: 10.3.0.1/32, 10.0.0.44/32 --- - shell set -x; - rtr_origin='python ../../../rtr-origin/rtr-origin.py'; + rtr_origin='python ../../../rp/rpki-rtr/rtr-origin.py'; $rtr_origin --cronjob rcynic-data/authenticated && $rtr_origin --show --- diff --git a/ca/tests/smoketest.3.yaml b/ca/tests/smoketest.3.yaml index e6a10a12..41a88643 100644 --- a/ca/tests/smoketest.3.yaml +++ b/ca/tests/smoketest.3.yaml @@ -51,17 +51,17 @@ kids: ipv4: 10.3.0.0/23 --- #- shell find publication -type f -name '*.roa' -# -print -exec ../../../utils/print_roa/print_roa {} \; +# -print -exec ../../../rp/utils/print_roa {} \; #- shell find publication -type f -name '*.mft' -# -print -exec ../../../utils/print_manifest/print_manifest {} \; +# -print -exec ../../../rp/utils/print_manifest {} \; #--- #- shell find publication -type f -name '*.roa' -# -print -exec ../../../utils/print_roa/print_roa {} \; +# -print -exec ../../../rp/utils/print_roa {} \; #- shell find publication -type f -name '*.mft' -# -print -exec ../../../utils/print_manifest/print_manifest {} \; +# -print -exec ../../../rp/utils/print_manifest {} \; #--- - shell set -x; - rtr_origin=../../../rtr-origin/rtr-origin; + rtr_origin=../../../rp/rpki-rtr/rtr-origin; $rtr_origin --cronjob rcynic-data/authenticated && $rtr_origin --show --- @@ -76,6 +76,6 @@ kids: ipv6: 2002:0a00::/32-128 --- - shell set -x; - rtr_origin=../../../rtr-origin/rtr-origin; + rtr_origin=../../../rp/rpki-rtr/rtr-origin; $rtr_origin --cronjob rcynic-data/authenticated && $rtr_origin --show diff --git a/ca/tests/smoketest.7.yaml b/ca/tests/smoketest.7.yaml index fedd2fff..7c808552 100644 --- a/ca/tests/smoketest.7.yaml +++ b/ca/tests/smoketest.7.yaml @@ -70,8 +70,8 @@ roa_request: --- - shell set -x; find publication -type f -name '*.roa' - -print -exec ../../../utils/print_roa/print_roa {} \; + -print -exec ../../../rp/utils/print_roa {} \; ; - rtr_origin=../../../rtr-origin/rtr-origin; + rtr_origin=../../../rp/rpki-rtr/rtr-origin; $rtr_origin --cronjob rcynic-data/authenticated && $rtr_origin --show diff --git a/ca/tests/smoketest.py b/ca/tests/smoketest.py index 28bedaa4..e5f5a754 100644 --- a/ca/tests/smoketest.py +++ b/ca/tests/smoketest.py @@ -115,14 +115,14 @@ prog_poke = cfg.get("prog_poke", "../testpoke.py") prog_rootd = cfg.get("prog_rootd", "../../rootd") prog_pubd = cfg.get("prog_pubd", "../../pubd") prog_rsyncd = cfg.get("prog_rsyncd", "rsync") -prog_rcynic = cfg.get("prog_rcynic", "../../../rcynic/rcynic") +prog_rcynic = cfg.get("prog_rcynic", "../../../rp/rcynic/rcynic") prog_openssl = cfg.get("prog_openssl", "../../../openssl/openssl/apps/openssl") -rcynic_stats = cfg.get("rcynic_stats", "echo ; ../../../rcynic/rcynic-text %s.xml ; echo" % rcynic_name) +rcynic_stats = cfg.get("rcynic_stats", "echo ; ../../../rp/rcynic/rcynic-text %s.xml ; echo" % rcynic_name) -rpki_sql_file = cfg.get("rpki_sql_file", "../rpkid.sql") +rpki_sql_file = cfg.get("rpki_sql_file", "../../schemas/sql/rpkid.sql") irdb_sql_file = cfg.get("irdb_sql_file", "old_irdbd.sql") -pub_sql_file = cfg.get("pub_sql_file", "../pubd.sql") +pub_sql_file = cfg.get("pub_sql_file", "../../schemas/sql/pubd.sql") startup_delay = int(cfg.get("startup_delay", "10")) diff --git a/ca/tests/yamltest-test-all.sh b/ca/tests/yamltest-test-all.sh index 8daea04e..4bd5c560 100644 --- a/ca/tests/yamltest-test-all.sh +++ b/ca/tests/yamltest-test-all.sh @@ -43,9 +43,9 @@ do do sleep 30 date - ../../rcynic/rcynic - ../../rcynic/rcynic-text rcynic.xml - ../../utils/scan_roas/scan_roas rcynic-data/authenticated + ../../rp/rcynic/rcynic + ../../rp/rcynic/rcynic-text rcynic.xml + ../../rp/utils/scan_roas rcynic-data/authenticated date echo "$title" done diff --git a/configure b/configure index 921affbf..f701ab4c 100755 --- a/configure +++ b/configure @@ -4576,22 +4576,22 @@ $as_echo "$enable_target_installation" >&6; } case $host_os in darwin*) RCYNIC_MAKE_RULES='rp/rcynic/rules.darwin.mk' - RTR_ORIGIN_MAKE_RULES='rp/rtr-origin/rules.darwin.mk' + RTR_ORIGIN_MAKE_RULES='rp/rpki-rtr/rules.darwin.mk' CA_MAKE_RULES='ca/rules.darwin.mk' ;; freebsd*) RCYNIC_MAKE_RULES='rp/rcynic/rules.freebsd.mk' - RTR_ORIGIN_MAKE_RULES='rp/rtr-origin/rules.freebsd.mk' + RTR_ORIGIN_MAKE_RULES='rp/rpki-rtr/rules.freebsd.mk' CA_MAKE_RULES='ca/rules.freebsd.mk' ;; linux*) RCYNIC_MAKE_RULES='rp/rcynic/rules.linux.mk' - RTR_ORIGIN_MAKE_RULES='rp/rtr-origin/rules.linux.mk' + RTR_ORIGIN_MAKE_RULES='rp/rpki-rtr/rules.linux.mk' CA_MAKE_RULES='ca/rules.linux.mk' ;; *) RCYNIC_MAKE_RULES='rp/rcynic/rules.unknown.mk' - RTR_ORIGIN_MAKE_RULES='rp/rtr-origin/rules.unknown.mk' + RTR_ORIGIN_MAKE_RULES='rp/rpki-rtr/rules.unknown.mk' CA_MAKE_RULES='ca/rules.unknown.mk' ;; esac @@ -5056,7 +5056,7 @@ fi if test $build_rp_tools = yes then - ac_config_files="$ac_config_files rp/Makefile rp/rcynic/Makefile rp/rcynic/static-rsync/Makefile rp/utils/Makefile rp/rtr-origin/Makefile" + ac_config_files="$ac_config_files rp/Makefile rp/rcynic/Makefile rp/rcynic/static-rsync/Makefile rp/utils/Makefile rp/rpki-rtr/Makefile" fi @@ -5810,7 +5810,7 @@ do "rp/rcynic/Makefile") CONFIG_FILES="$CONFIG_FILES rp/rcynic/Makefile" ;; "rp/rcynic/static-rsync/Makefile") CONFIG_FILES="$CONFIG_FILES rp/rcynic/static-rsync/Makefile" ;; "rp/utils/Makefile") CONFIG_FILES="$CONFIG_FILES rp/utils/Makefile" ;; - "rp/rtr-origin/Makefile") CONFIG_FILES="$CONFIG_FILES rp/rtr-origin/Makefile" ;; + "rp/rpki-rtr/Makefile") CONFIG_FILES="$CONFIG_FILES rp/rpki-rtr/Makefile" ;; "ca/Makefile") CONFIG_FILES="$CONFIG_FILES ca/Makefile" ;; "ca/tests/Makefile") CONFIG_FILES="$CONFIG_FILES ca/tests/Makefile" ;; diff --git a/configure.ac b/configure.ac index ff25b982..c8f153d7 100644 --- a/configure.ac +++ b/configure.ac @@ -388,22 +388,22 @@ AC_MSG_RESULT([$enable_target_installation]) case $host_os in darwin*) RCYNIC_MAKE_RULES='rp/rcynic/rules.darwin.mk' - RTR_ORIGIN_MAKE_RULES='rp/rtr-origin/rules.darwin.mk' + RTR_ORIGIN_MAKE_RULES='rp/rpki-rtr/rules.darwin.mk' CA_MAKE_RULES='ca/rules.darwin.mk' ;; freebsd*) RCYNIC_MAKE_RULES='rp/rcynic/rules.freebsd.mk' - RTR_ORIGIN_MAKE_RULES='rp/rtr-origin/rules.freebsd.mk' + RTR_ORIGIN_MAKE_RULES='rp/rpki-rtr/rules.freebsd.mk' CA_MAKE_RULES='ca/rules.freebsd.mk' ;; linux*) RCYNIC_MAKE_RULES='rp/rcynic/rules.linux.mk' - RTR_ORIGIN_MAKE_RULES='rp/rtr-origin/rules.linux.mk' + RTR_ORIGIN_MAKE_RULES='rp/rpki-rtr/rules.linux.mk' CA_MAKE_RULES='ca/rules.linux.mk' ;; *) RCYNIC_MAKE_RULES='rp/rcynic/rules.unknown.mk' - RTR_ORIGIN_MAKE_RULES='rp/rtr-origin/rules.unknown.mk' + RTR_ORIGIN_MAKE_RULES='rp/rpki-rtr/rules.unknown.mk' CA_MAKE_RULES='ca/rules.unknown.mk' ;; esac @@ -818,7 +818,7 @@ then rp/rcynic/Makefile rp/rcynic/static-rsync/Makefile rp/utils/Makefile - rp/rtr-origin/Makefile]) + rp/rpki-rtr/Makefile]) fi if test $build_ca_tools = yes diff --git a/doc/pubd-bpki.dot b/doc/pubd-bpki.dot new file mode 100644 index 00000000..44ad8a90 --- /dev/null +++ b/doc/pubd-bpki.dot @@ -0,0 +1,42 @@ +// $Id$ + +// Color code: +// Black: Operating entity +// Red: Cross-certified client +// +// Shape code: +// Octagon: TA +// Diamond: CA +// Record: EE + +digraph bpki_pubd { + splines = true; + size = "14,14"; + node [ fontname = Times, fontsize = 9 ]; + + // Operating entity + node [ color = black, fontcolor = black, shape = record ]; + TA [ shape = octagon, label = "BPKI TA" ]; + pubd [ label = "pubd|{HTTPS server|CMS}" ]; + ctl [ label = "Control|{HTTPS client|CMS}" ]; + + // Clients + node [ color = red, fontcolor = red, shape = diamond ]; + Alice_CA; + Bob_CA; + node [ color = red, fontcolor = red, shape = record ]; + Alice_EE [ label = "Alice\nEE|{HTTPS client|CMS}" ]; + Bob_EE [ label = "Bob\nEE|{HTTPS client|CMS}" ]; + + edge [ color = black, style = dotted ]; + TA -> pubd; + TA -> ctl; + + edge [ color = black, style = solid ]; + TA -> Alice_CA; + TA -> Bob_CA; + + edge [ color = red, style = solid ]; + Alice_CA -> Alice_EE; + Bob_CA -> Bob_EE; +} diff --git a/doc/rpkid-bpki.dot b/doc/rpkid-bpki.dot new file mode 100644 index 00000000..651591cb --- /dev/null +++ b/doc/rpkid-bpki.dot @@ -0,0 +1,76 @@ +// $Id$ + +// Color code: +// Black: Hosting entity +// Blue: Hosted entity +// Red: Cross-certified peer +// +// Shape code: +// Octagon: TA +// Diamond: CA +// Record: EE + +digraph bpki_rpkid { + splines = true; + size = "14,14"; + node [ fontname = Times, fontsize = 9 ]; + + // Hosting entity + node [ color = black, shape = record ]; + TA [ shape = octagon, label = "BPKI TA" ]; + rpkid [ label = "rpkid|{HTTPS server|HTTPS left-right client|CMS left-right}" ]; + irdbd [ label = "irdbd|{HTTPS left-right server|CMS left-right}" ]; + irbe [ label = "IRBE|{HTTPS left-right client|CMS left-right}" ]; + + // Hosted entities + node [ color = blue, fontcolor = blue ]; + Alice_CA [ shape = diamond ]; + Alice_EE [ label = "Alice\nBSC EE|{HTTPS up-down client|CMS up-down}" ]; + Ellen_CA [ shape = diamond ]; + Ellen_EE [ label = "Ellen\nBSC EE|{HTTPS up-down client|CMS up-down}" ]; + + // Peers + node [ color = red, fontcolor = red, shape = diamond ]; + Bob_CA; + Carol_CA; + Dave_CA; + Frank_CA; + Ginny_CA; + Harry_CA; + node [ shape = record ]; + Bob_EE [ label = "Bob\nEE|{HTTPS up-down|CMS up-down}" ]; + Carol_EE [ label = "Carol\nEE|{HTTPS up-down|CMS up-down}" ]; + Dave_EE [ label = "Dave\nEE|{HTTPS up-down|CMS up-down}" ]; + Frank_EE [ label = "Frank\nEE|{HTTPS up-down|CMS up-down}" ]; + Ginny_EE [ label = "Ginny\nEE|{HTTPS up-down|CMS up-down}" ]; + Harry_EE [ label = "Bob\nEE|{HTTPS up-down|CMS up-down}" ]; + + edge [ color = black, style = solid ]; + TA -> Alice_CA; + TA -> Ellen_CA; + + edge [ color = black, style = dotted ]; + TA -> rpkid; + TA -> irdbd; + TA -> irbe; + + edge [ color = blue, style = solid ]; + Alice_CA -> Bob_CA; + Alice_CA -> Carol_CA; + Alice_CA -> Dave_CA; + Ellen_CA -> Frank_CA; + Ellen_CA -> Ginny_CA; + Ellen_CA -> Harry_CA; + + edge [ color = blue, style = dotted ]; + Alice_CA -> Alice_EE; + Ellen_CA -> Ellen_EE; + + edge [ color = red, style = solid ]; + Bob_CA -> Bob_EE; + Carol_CA -> Carol_EE; + Dave_CA -> Dave_EE; + Frank_CA -> Frank_EE; + Ginny_CA -> Ginny_EE; + Harry_CA -> Harry_EE; +} diff --git a/rp/Makefile.in b/rp/Makefile.in index ceeef9f1..2c770a46 100644 --- a/rp/Makefile.in +++ b/rp/Makefile.in @@ -1,6 +1,6 @@ # $Id$ -SUBDIRS = rcynic rtr-origin utils +SUBDIRS = rcynic rpki-rtr utils all clean test distclean install deinstall uninstall:: @for i in ${SUBDIRS}; do echo "Making $@ in $$i"; (cd $$i && ${MAKE} $@); done diff --git a/rp/rpki-rtr/Makefile.in b/rp/rpki-rtr/Makefile.in new file mode 100644 index 00000000..e587305b --- /dev/null +++ b/rp/rpki-rtr/Makefile.in @@ -0,0 +1,51 @@ +# $Id$ + +BIN = rtr-origin + +INSTALL = @INSTALL@ +PYTHON = @PYTHON@ +AWK = @AWK@ + +prefix = @prefix@ +exec_prefix = @exec_prefix@ +datarootdir = @datarootdir@ +datadir = @datadir@ +localstatedir = @localstatedir@ +sharedstatedir = @sharedstatedir@ +sysconfdir = @sysconfdir@ +bindir = @bindir@ +sbindir = @sbindir@ +libexecdir = @libexecdir@ +libdir = @libdir@ + +abs_top_srcdir = @abs_top_srcdir@ +abs_top_builddir = @abs_top_builddir@ + +RTR_ORIGIN_INSTALL_TARGETS = @RCYNIC_INSTALL_TARGETS@ + +RPKI_RTR_PORT = 43779 + +SCAN_ROAS = ${bindir}/scan_roas + + +all clean test:: + @true + +install: all ${RTR_ORIGIN_INSTALL_TARGETS} + +install-binary: + if test -d ${DESTDIR}${bindir} ; then :; else ${INSTALL} -m 555 -d ${DESTDIR}${bindir}; fi + ${INSTALL} -m 555 ${BIN} ${DESTDIR}${bindir}/${BIN} + +deinstall uninstall: + rm -f ${DESTDIR}${bindir}/${BIN} + +distclean: clean + rm -rf current sockets *.ax *.ix.* + rm -f Makefile + +.FORCE: + +# Platform-specific rules below here. + +@RTR_ORIGIN_MAKE_RULES@ diff --git a/rp/rpki-rtr/README b/rp/rpki-rtr/README new file mode 100644 index 00000000..dae53010 --- /dev/null +++ b/rp/rpki-rtr/README @@ -0,0 +1,11 @@ +$Id$ + +Sample implementation of draft-ymbk-rpki-rtr-protocol. + +See: + +- The primary documentation at http://trac.rpki.net/ + +- The PDF manual in ../doc/manual.pdf, or + +- The flat text page ../doc/doc.RPKI.RP.rpki-rtr diff --git a/rp/rpki-rtr/rtr-origin b/rp/rpki-rtr/rtr-origin new file mode 100755 index 00000000..e1e82ccf --- /dev/null +++ b/rp/rpki-rtr/rtr-origin @@ -0,0 +1,2278 @@ +#!/usr/bin/env python + +# Router origin-authentication rpki-router protocol implementation. See +# draft-ietf-sidr-rpki-rtr in fine Internet-Draft repositories near you. +# +# Run the program with the --help argument for usage information, or see +# documentation for the *_main() functions. +# +# +# $Id$ +# +# Copyright (C) 2009-2013 Internet Systems Consortium ("ISC") +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +# PERFORMANCE OF THIS SOFTWARE. + +import sys +import os +import struct +import time +import glob +import socket +import fcntl +import signal +import syslog +import errno +import asyncore +import asynchat +import subprocess +import traceback +import getopt +import bisect +import random +import base64 + + +# Debugging only, should be False in production +disable_incrementals = False + +# Whether to log backtraces +backtrace_on_exceptions = False + +class IgnoreThisRecord(Exception): + pass + + +class timestamp(int): + """ + Wrapper around time module. + """ + + def __new__(cls, x): + return int.__new__(cls, x) + + @classmethod + def now(cls, delta = 0): + return cls(time.time() + delta) + + def __str__(self): + return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(self)) + + +class ipaddr(object): + """ + IP addresses. + """ + + def __init__(self, string = None, value = None): + assert (string is None) != (value is None) + if string is not None: + value = socket.inet_pton(self.af, string) + assert len(value) == self.size + self.value = value + + def __str__(self): + return socket.inet_ntop(self.af, self.value) + + def __cmp__(self, other): + return cmp(self.value, other.value) + +class v4addr(ipaddr): + af = socket.AF_INET + size = 4 + +class v6addr(ipaddr): + af = socket.AF_INET6 + size = 16 + +def read_current(): + """ + Read current serial number and nonce. Return None for both if + serial and nonce not recorded. For backwards compatibility, treat + file containing just a serial number as having a nonce of zero. + """ + try: + f = open("current", "r") + values = tuple(int(s) for s in f.read().split()) + f.close() + return values[0], values[1] + except IndexError: + return values[0], 0 + except IOError: + return None, None + +def write_current(serial, nonce): + """ + Write serial number and nonce. + """ + tmpfn = "current.%d.tmp" % os.getpid() + try: + f = open(tmpfn, "w") + f.write("%d %d\n" % (serial, nonce)) + f.close() + os.rename(tmpfn, "current") + finally: + try: + os.unlink(tmpfn) + except: + pass + + +def new_nonce(): + """ + Create and return a new nonce value. + """ + if force_zero_nonce: + return 0 + try: + return int(random.SystemRandom().getrandbits(16)) + except NotImplementedError: + return int(random.getrandbits(16)) + + +class read_buffer(object): + """ + Wrapper around synchronous/asynchronous read state. + """ + + def __init__(self): + self.buffer = "" + + def update(self, need, callback): + """ + Update count of needed bytes and callback, then dispatch to callback. + """ + self.need = need + self.callback = callback + return self.callback(self) + + def available(self): + """ + How much data do we have available in this buffer? + """ + return len(self.buffer) + + def needed(self): + """ + How much more data does this buffer need to become ready? + """ + return self.need - self.available() + + def ready(self): + """ + Is this buffer ready to read yet? + """ + return self.available() >= self.need + + def get(self, n): + """ + Hand some data to the caller. + """ + b = self.buffer[:n] + self.buffer = self.buffer[n:] + return b + + def put(self, b): + """ + Accumulate some data. + """ + self.buffer += b + + def retry(self): + """ + Try dispatching to the callback again. + """ + return self.callback(self) + +class PDUException(Exception): + """ + Parent exception type for exceptions that signal particular protocol + errors. String value of exception instance will be the message to + put in the error_report PDU, error_report_code value of exception + will be the numeric code to use. + """ + + def __init__(self, msg = None, pdu = None): + assert msg is None or isinstance(msg, (str, unicode)) + self.error_report_msg = msg + self.error_report_pdu = pdu + + def __str__(self): + return self.error_report_msg or self.__class__.__name__ + + def make_error_report(self): + return error_report(errno = self.error_report_code, + errmsg = self.error_report_msg, + errpdu = self.error_report_pdu) + +class UnsupportedProtocolVersion(PDUException): + error_report_code = 4 + +class UnsupportedPDUType(PDUException): + error_report_code = 5 + +class CorruptData(PDUException): + error_report_code = 0 + +class pdu(object): + """ + Object representing a generic PDU in the rpki-router protocol. + Real PDUs are subclasses of this class. + """ + + version = 0 # Protocol version + + _pdu = None # Cached when first generated + + header_struct = struct.Struct("!BBHL") + + def __cmp__(self, other): + return cmp(self.to_pdu(), other.to_pdu()) + + def check(self): + """ + Check attributes to make sure they're within range. + """ + pass + + @classmethod + def read_pdu(cls, reader): + return reader.update(need = cls.header_struct.size, callback = cls.got_header) + + @classmethod + def got_header(cls, reader): + if not reader.ready(): + return None + assert reader.available() >= cls.header_struct.size + version, pdu_type, whatever, length = cls.header_struct.unpack(reader.buffer[:cls.header_struct.size]) + if version != cls.version: + raise UnsupportedProtocolVersion( + "Received PDU version %d, expected %d" % (version, cls.version)) + if pdu_type not in cls.pdu_map: + raise UnsupportedPDUType( + "Received unsupported PDU type %d" % pdu_type) + if length < 8: + raise CorruptData( + "Received PDU with length %d, which is too short to be valid" % length) + self = cls.pdu_map[pdu_type]() + return reader.update(need = length, callback = self.got_pdu) + + def consume(self, client): + """ + Handle results in test client. Default behavior is just to print + out the PDU. + """ + blather(self) + + def send_file(self, server, filename): + """ + Send a content of a file as a cache response. Caller should catch IOError. + """ + f = open(filename, "rb") + server.push_pdu(cache_response(nonce = server.current_nonce)) + server.push_file(f) + server.push_pdu(end_of_data(serial = server.current_serial, nonce = server.current_nonce)) + + def send_nodata(self, server): + """ + Send a nodata error. + """ + server.push_pdu(error_report(errno = error_report.codes["No Data Available"], errpdu = self)) + +class pdu_with_serial(pdu): + """ + Base class for PDUs consisting of just a serial number and nonce. + """ + + header_struct = struct.Struct("!BBHLL") + + def __init__(self, serial = None, nonce = None): + if serial is not None: + assert isinstance(serial, int) + self.serial = serial + if nonce is not None: + assert isinstance(nonce, int) + self.nonce = nonce + + def __str__(self): + return "[%s, serial #%d nonce %d]" % (self.__class__.__name__, self.serial, self.nonce) + + def to_pdu(self): + """ + Generate the wire format PDU. + """ + if self._pdu is None: + self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.nonce, + self.header_struct.size, self.serial) + return self._pdu + + def got_pdu(self, reader): + if not reader.ready(): + return None + b = reader.get(self.header_struct.size) + version, pdu_type, self.nonce, length, self.serial = self.header_struct.unpack(b) + if length != 12: + raise CorruptData("PDU length of %d can't be right" % length, pdu = self) + assert b == self.to_pdu() + return self + +class pdu_nonce(pdu): + """ + Base class for PDUs consisting of just a nonce. + """ + + header_struct = struct.Struct("!BBHL") + + def __init__(self, nonce = None): + if nonce is not None: + assert isinstance(nonce, int) + self.nonce = nonce + + def __str__(self): + return "[%s, nonce %d]" % (self.__class__.__name__, self.nonce) + + def to_pdu(self): + """ + Generate the wire format PDU. + """ + if self._pdu is None: + self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.nonce, self.header_struct.size) + return self._pdu + + def got_pdu(self, reader): + if not reader.ready(): + return None + b = reader.get(self.header_struct.size) + version, pdu_type, self.nonce, length = self.header_struct.unpack(b) + if length != 8: + raise CorruptData("PDU length of %d can't be right" % length, pdu = self) + assert b == self.to_pdu() + return self + +class pdu_empty(pdu): + """ + Base class for empty PDUs. + """ + + header_struct = struct.Struct("!BBHL") + + def __str__(self): + return "[%s]" % self.__class__.__name__ + + def to_pdu(self): + """ + Generate the wire format PDU for this prefix. + """ + if self._pdu is None: + self._pdu = self.header_struct.pack(self.version, self.pdu_type, 0, self.header_struct.size) + return self._pdu + + def got_pdu(self, reader): + if not reader.ready(): + return None + b = reader.get(self.header_struct.size) + version, pdu_type, zero, length = self.header_struct.unpack(b) + if zero != 0: + raise CorruptData("Must-be-zero field isn't zero" % length, pdu = self) + if length != 8: + raise CorruptData("PDU length of %d can't be right" % length, pdu = self) + assert b == self.to_pdu() + return self + +class serial_notify(pdu_with_serial): + """ + Serial Notify PDU. + """ + + pdu_type = 0 + + def consume(self, client): + """ + Respond to a serial_notify message with either a serial_query or + reset_query, depending on what we already know. + """ + blather(self) + if client.current_serial is None or client.current_nonce != self.nonce: + client.push_pdu(reset_query()) + elif self.serial != client.current_serial: + client.push_pdu(serial_query(serial = client.current_serial, nonce = client.current_nonce)) + else: + blather("[Notify did not change serial number, ignoring]") + +class serial_query(pdu_with_serial): + """ + Serial Query PDU. + """ + + pdu_type = 1 + + def serve(self, server): + """ + Received a serial query, send incremental transfer in response. + If client is already up to date, just send an empty incremental + transfer. + """ + blather(self) + if server.get_serial() is None: + self.send_nodata(server) + elif server.current_nonce != self.nonce: + log("[Client requested wrong nonce, resetting client]") + server.push_pdu(cache_reset()) + elif server.current_serial == self.serial: + blather("[Client is already current, sending empty IXFR]") + server.push_pdu(cache_response(nonce = server.current_nonce)) + server.push_pdu(end_of_data(serial = server.current_serial, nonce = server.current_nonce)) + elif disable_incrementals: + server.push_pdu(cache_reset()) + else: + try: + self.send_file(server, "%d.ix.%d" % (server.current_serial, self.serial)) + except IOError: + server.push_pdu(cache_reset()) + +class reset_query(pdu_empty): + """ + Reset Query PDU. + """ + + pdu_type = 2 + + def serve(self, server): + """ + Received a reset query, send full current state in response. + """ + blather(self) + if server.get_serial() is None: + self.send_nodata(server) + else: + try: + fn = "%d.ax" % server.current_serial + self.send_file(server, fn) + except IOError: + server.push_pdu(error_report(errno = error_report.codes["Internal Error"], + errpdu = self, errmsg = "Couldn't open %s" % fn)) + +class cache_response(pdu_nonce): + """ + Cache Response PDU. + """ + + pdu_type = 3 + + def consume(self, client): + """ + Handle cache_response. + """ + blather(self) + if self.nonce != client.current_nonce: + blather("[Nonce changed, resetting]") + client.cache_reset() + +class end_of_data(pdu_with_serial): + """ + End of Data PDU. + """ + + pdu_type = 7 + + def consume(self, client): + """ + Handle end_of_data response. + """ + blather(self) + client.end_of_data(self.serial, self.nonce) + +class cache_reset(pdu_empty): + """ + Cache reset PDU. + """ + + pdu_type = 8 + + def consume(self, client): + """ + Handle cache_reset response, by issuing a reset_query. + """ + blather(self) + client.cache_reset() + client.push_pdu(reset_query()) + +class prefix(pdu): + """ + Object representing one prefix. This corresponds closely to one PDU + in the rpki-router protocol, so closely that we use lexical ordering + of the wire format of the PDU as the ordering for this class. + + This is a virtual class, but the .from_text() constructor + instantiates the correct concrete subclass (ipv4_prefix or + ipv6_prefix) depending on the syntax of its input text. + """ + + header_struct = struct.Struct("!BB2xLBBBx") + asnum_struct = struct.Struct("!L") + + @staticmethod + def from_text(asnum, addr): + """ + Construct a prefix from its text form. + """ + cls = ipv6_prefix if ":" in addr else ipv4_prefix + self = cls() + self.asn = long(asnum) + p, l = addr.split("/") + self.prefix = self.addr_type(string = p) + if "-" in l: + self.prefixlen, self.max_prefixlen = tuple(int(i) for i in l.split("-")) + else: + self.prefixlen = self.max_prefixlen = int(l) + self.announce = 1 + self.check() + return self + + def __str__(self): + plm = "%s/%s-%s" % (self.prefix, self.prefixlen, self.max_prefixlen) + return "%s %8s %-32s %s" % ("+" if self.announce else "-", self.asn, plm, + ":".join(("%02X" % ord(b) for b in self.to_pdu()))) + + def show(self): + blather("# Class: %s" % self.__class__.__name__) + blather("# ASN: %s" % self.asn) + blather("# Prefix: %s" % self.prefix) + blather("# Prefixlen: %s" % self.prefixlen) + blather("# MaxPrefixlen: %s" % self.max_prefixlen) + blather("# Announce: %s" % self.announce) + + def consume(self, client): + """ + Handle one incoming prefix PDU + """ + blather(self) + client.consume_prefix(self) + + def check(self): + """ + Check attributes to make sure they're within range. + """ + if self.announce not in (0, 1): + raise CorruptData("Announce value %d is neither zero nor one" % self.announce, pdu = self) + if self.prefixlen < 0 or self.prefixlen > self.addr_type.size * 8: + raise CorruptData("Implausible prefix length %d" % self.prefixlen, pdu = self) + if self.max_prefixlen < self.prefixlen or self.max_prefixlen > self.addr_type.size * 8: + raise CorruptData("Implausible max prefix length %d" % self.max_prefixlen, pdu = self) + pdulen = self.header_struct.size + self.addr_type.size + self.asnum_struct.size + if len(self.to_pdu()) != pdulen: + raise CorruptData("Expected %d byte PDU, got %d" % (pdulen, len(self.to_pdu())), pdu = self) + + def to_pdu(self, announce = None): + """ + Generate the wire format PDU for this prefix. + """ + if announce is not None: + assert announce in (0, 1) + elif self._pdu is not None: + return self._pdu + pdulen = self.header_struct.size + self.addr_type.size + self.asnum_struct.size + pdu = (self.header_struct.pack(self.version, self.pdu_type, pdulen, + announce if announce is not None else self.announce, + self.prefixlen, self.max_prefixlen) + + self.prefix.value + + self.asnum_struct.pack(self.asn)) + if announce is None: + assert self._pdu is None + self._pdu = pdu + return pdu + + def got_pdu(self, reader): + if not reader.ready(): + return None + b1 = reader.get(self.header_struct.size) + b2 = reader.get(self.addr_type.size) + b3 = reader.get(self.asnum_struct.size) + version, pdu_type, length, self.announce, self.prefixlen, self.max_prefixlen = self.header_struct.unpack(b1) + if length != len(b1) + len(b2) + len(b3): + raise CorruptData("Got PDU length %d, expected %d" % (length, len(b1) + len(b2) + len(b3)), pdu = self) + self.prefix = self.addr_type(value = b2) + self.asn = self.asnum_struct.unpack(b3)[0] + assert b1 + b2 + b3 == self.to_pdu() + return self + + @staticmethod + def from_bgpdump(line, rib_dump): + try: + assert isinstance(rib_dump, bool) + fields = line.split("|") + + # Parse prefix, including figuring out IP protocol version + cls = ipv6_prefix if ":" in fields[5] else ipv4_prefix + self = cls() + self.timestamp = timestamp(fields[1]) + p, l = fields[5].split("/") + self.prefix = self.addr_type(p) + self.prefixlen = self.max_prefixlen = int(l) + + # Withdrawals don't have AS paths, so be careful + assert fields[2] == "B" if rib_dump else fields[2] in ("A", "W") + if fields[2] == "W": + self.asn = 0 + self.announce = 0 + else: + self.announce = 1 + if not fields[6] or "{" in fields[6] or "(" in fields[6]: + raise IgnoreThisRecord + a = fields[6].split()[-1] + if "." in a: + a = [int(s) for s in a.split(".")] + if len(a) != 2 or a[0] < 0 or a[0] > 65535 or a[1] < 0 or a[1] > 65535: + log("Bad dotted ASNum %r, ignoring record" % fields[6]) + raise IgnoreThisRecord + a = (a[0] << 16) | a[1] + else: + a = int(a) + self.asn = a + + self.check() + return self + + except IgnoreThisRecord: + raise + + except Exception, e: + log("Ignoring line %r: %s" % (line, e)) + raise IgnoreThisRecord + +class ipv4_prefix(prefix): + """ + IPv4 flavor of a prefix. + """ + pdu_type = 4 + addr_type = v4addr + +class ipv6_prefix(prefix): + """ + IPv6 flavor of a prefix. + """ + pdu_type = 6 + addr_type = v6addr + +class router_key(pdu): + """ + Router Key PDU. + """ + + pdu_type = 9 + + header_struct = struct.Struct("!BBBxL20sL") + + @classmethod + def from_text(cls, asnum, gski, key): + """ + Construct a router key from its text form. + """ + + self = cls() + self.asn = long(asnum) + self.ski = base64.urlsafe_b64decode(gski + "=") + self.key = base64.b64decode(key) + self.announce = 1 + self.check() + return self + + def __str__(self): + return "%s %8s %-32s %s" % ("+" if self.announce else "-", self.asn, + base64.urlsafe_b64encode(self.ski).rstrip("="), + ":".join(("%02X" % ord(b) for b in self.to_pdu()))) + + def consume(self, client): + """ + Handle one incoming Router Key PDU + """ + + blather(self) + client.consume_routerkey(self) + + def check(self): + """ + Check attributes to make sure they're within range. + """ + + if self.announce not in (0, 1): + raise CorruptData("Announce value %d is neither zero nor one" % self.announce, pdu = self) + if len(self.ski) != 20: + raise CorruptData("Implausible SKI length %d" % len(self.ski), pdu = self) + pdulen = self.header_struct.size + len(self.key) + if len(self.to_pdu()) != pdulen: + raise CorruptData("Expected %d byte PDU, got %d" % (pdulen, len(self.to_pdu())), pdu = self) + + def to_pdu(self, announce = None): + if announce is not None: + assert announce in (0, 1) + elif self._pdu is not None: + return self._pdu + pdulen = self.header_struct.size + len(self.key) + pdu = (self.header_struct.pack(self.version, + self.pdu_type, + announce if announce is not None else self.announce, + pdulen, + self.ski, + self.asn) + + self.key) + if announce is None: + assert self._pdu is None + self._pdu = pdu + return pdu + + def got_pdu(self, reader): + if not reader.ready(): + return None + header = reader.get(self.header_struct.size) + version, pdu_type, self.announce, length, self.ski, self.asn = self.header_struct.unpack(header) + remaining = length - self.header_struct.size + if remaining <= 0: + raise CorruptData("Got PDU length %d, minimum is %d" % (length, self.header_struct.size + 1), pdu = self) + self.key = reader.get(remaining) + assert header + self.key == self.to_pdu() + return self + + +class error_report(pdu): + """ + Error Report PDU. + """ + + pdu_type = 10 + + header_struct = struct.Struct("!BBHL") + string_struct = struct.Struct("!L") + + errors = { + 2 : "No Data Available" } + + fatal = { + 0 : "Corrupt Data", + 1 : "Internal Error", + 3 : "Invalid Request", + 4 : "Unsupported Protocol Version", + 5 : "Unsupported PDU Type", + 6 : "Withdrawal of Unknown Record", + 7 : "Duplicate Announcement Received" } + + assert set(errors) & set(fatal) == set() + + errors.update(fatal) + + codes = dict((v, k) for k, v in errors.items()) + + def __init__(self, errno = None, errpdu = None, errmsg = None): + assert errno is None or errno in self.errors + self.errno = errno + self.errpdu = errpdu + self.errmsg = errmsg if errmsg is not None or errno is None else self.errors[errno] + + def __str__(self): + return "[%s, error #%s: %r]" % (self.__class__.__name__, self.errno, self.errmsg) + + def to_counted_string(self, s): + return self.string_struct.pack(len(s)) + s + + def read_counted_string(self, reader, remaining): + assert remaining >= self.string_struct.size + n = self.string_struct.unpack(reader.get(self.string_struct.size))[0] + assert remaining >= self.string_struct.size + n + return n, reader.get(n), (remaining - self.string_struct.size - n) + + def to_pdu(self): + """ + Generate the wire format PDU for this error report. + """ + if self._pdu is None: + assert isinstance(self.errno, int) + assert not isinstance(self.errpdu, error_report) + p = self.errpdu + if p is None: + p = "" + elif isinstance(p, pdu): + p = p.to_pdu() + assert isinstance(p, str) + pdulen = self.header_struct.size + self.string_struct.size * 2 + len(p) + len(self.errmsg) + self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.errno, pdulen) + self._pdu += self.to_counted_string(p) + self._pdu += self.to_counted_string(self.errmsg.encode("utf8")) + return self._pdu + + def got_pdu(self, reader): + if not reader.ready(): + return None + header = reader.get(self.header_struct.size) + version, pdu_type, self.errno, length = self.header_struct.unpack(header) + remaining = length - self.header_struct.size + self.pdulen, self.errpdu, remaining = self.read_counted_string(reader, remaining) + self.errlen, self.errmsg, remaining = self.read_counted_string(reader, remaining) + if length != self.header_struct.size + self.string_struct.size * 2 + self.pdulen + self.errlen: + raise CorruptData("Got PDU length %d, expected %d" % ( + length, self.header_struct.size + self.string_struct.size * 2 + self.pdulen + self.errlen)) + assert (header + + self.to_counted_string(self.errpdu) + + self.to_counted_string(self.errmsg.encode("utf8")) + == self.to_pdu()) + return self + + def serve(self, server): + """ + Received an error_report from client. Not much we can do beyond + logging it, then killing the connection if error was fatal. + """ + log(self) + if self.errno in self.fatal: + log("[Shutting down due to reported fatal protocol error]") + sys.exit(1) + +pdu.pdu_map = dict((p.pdu_type, p) for p in (ipv4_prefix, ipv6_prefix, serial_notify, serial_query, reset_query, + cache_response, end_of_data, cache_reset, router_key, error_report)) + +class pdu_set(list): + """ + Object representing a set of PDUs, that is, one versioned and + (theoretically) consistant set of prefixes and router keys extracted + from rcynic's output. + """ + + @classmethod + def _load_file(cls, filename): + """ + Low-level method to read pdu_set from a file. + """ + self = cls() + f = open(filename, "rb") + r = read_buffer() + while True: + p = pdu.read_pdu(r) + while p is None: + b = f.read(r.needed()) + if b == "": + assert r.available() == 0 + return self + r.put(b) + p = r.retry() + self.append(p) + + @staticmethod + def seq_ge(a, b): + return ((a - b) % (1 << 32)) < (1 << 31) + + +class axfr_set(pdu_set): + """ + Object representing a complete set of PDUs, that is, one versioned + and (theoretically) consistant set of prefixes and router + certificates extracted from rcynic's output, all with the announce + field set. + """ + + @classmethod + def parse_rcynic(cls, rcynic_dir): + """ + Parse ROAS and router certificates fetched (and validated!) by + rcynic to create a new axfr_set. We use the scan_roas and + scan_routercerts utilities to parse the ASN.1, although we may go + back to parsing the files directly using the rpki.POW library code + some day. + """ + + self = cls() + self.serial = timestamp.now() + + try: + p = subprocess.Popen((scan_roas, rcynic_dir), stdout = subprocess.PIPE) + for line in p.stdout: + line = line.split() + asn = line[1] + self.extend(prefix.from_text(asn, addr) for addr in line[2:]) + except OSError, e: + sys.exit("Could not run %s, check your $PATH variable? (%s)" % (scan_roas, e)) + + try: + p = subprocess.Popen((scan_routercerts, rcynic_dir), stdout = subprocess.PIPE) + for line in p.stdout: + line = line.split() + gski = line[0] + key = line[-1] + self.extend(router_key.from_text(asn, gski, key) for asn in line[1:-1]) + except OSError, e: + sys.exit("Could not run %s, check your $PATH variable? (%s)" % (scan_routercerts, e)) + + self.sort() + for i in xrange(len(self) - 2, -1, -1): + if self[i] == self[i + 1]: + del self[i + 1] + return self + + @classmethod + def load(cls, filename): + """ + Load an axfr_set from a file, parse filename to obtain serial. + """ + fn1, fn2 = os.path.basename(filename).split(".") + assert fn1.isdigit() and fn2 == "ax" + self = cls._load_file(filename) + self.serial = timestamp(fn1) + return self + + def filename(self): + """ + Generate filename for this axfr_set. + """ + return "%d.ax" % self.serial + + @classmethod + def load_current(cls): + """ + Load current axfr_set. Return None if can't. + """ + serial = read_current()[0] + if serial is None: + return None + try: + return cls.load("%d.ax" % serial) + except IOError: + return None + + def save_axfr(self): + """ + Write axfr__set to file with magic filename. + """ + f = open(self.filename(), "wb") + for p in self: + f.write(p.to_pdu()) + f.close() + + def destroy_old_data(self): + """ + Destroy old data files, presumably because our nonce changed and + the old serial numbers are no longer valid. + """ + for i in glob.iglob("*.ix.*"): + os.unlink(i) + for i in glob.iglob("*.ax"): + if i != self.filename(): + os.unlink(i) + + def mark_current(self): + """ + Save current serial number and nonce, creating new nonce if + necessary. Creating a new nonce triggers cleanup of old state, as + the new nonce invalidates all old serial numbers. + """ + old_serial, nonce = read_current() + if old_serial is None or self.seq_ge(old_serial, self.serial): + blather("Creating new nonce and deleting stale data") + nonce = new_nonce() + self.destroy_old_data() + write_current(self.serial, nonce) + + def save_ixfr(self, other): + """ + Comparing this axfr_set with an older one and write the resulting + ixfr_set to file with magic filename. Since we store pdu_sets + in sorted order, computing the difference is a trivial linear + comparison. + """ + f = open("%d.ix.%d" % (self.serial, other.serial), "wb") + old = other + new = self + len_old = len(old) + len_new = len(new) + i_old = i_new = 0 + while i_old < len_old and i_new < len_new: + if old[i_old] < new[i_new]: + f.write(old[i_old].to_pdu(announce = 0)) + i_old += 1 + elif old[i_old] > new[i_new]: + f.write(new[i_new].to_pdu(announce = 1)) + i_new += 1 + else: + i_old += 1 + i_new += 1 + for i in xrange(i_old, len_old): + f.write(old[i].to_pdu(announce = 0)) + for i in xrange(i_new, len_new): + f.write(new[i].to_pdu(announce = 1)) + f.close() + + def show(self): + """ + Print this axfr_set. + """ + blather("# AXFR %d (%s)" % (self.serial, self.serial)) + for p in self: + blather(p) + + @staticmethod + def read_bgpdump(filename): + assert filename.endswith(".bz2") + blather("Reading %s" % filename) + bunzip2 = subprocess.Popen(("bzip2", "-c", "-d", filename), stdout = subprocess.PIPE) + bgpdump = subprocess.Popen(("bgpdump", "-m", "-"), stdin = bunzip2.stdout, stdout = subprocess.PIPE) + return bgpdump.stdout + + @classmethod + def parse_bgpdump_rib_dump(cls, filename): + assert os.path.basename(filename).startswith("ribs.") + self = cls() + self.serial = None + for line in cls.read_bgpdump(filename): + try: + pfx = prefix.from_bgpdump(line, rib_dump = True) + except IgnoreThisRecord: + continue + self.append(pfx) + self.serial = pfx.timestamp + if self.serial is None: + sys.exit("Failed to parse anything useful from %s" % filename) + self.sort() + for i in xrange(len(self) - 2, -1, -1): + if self[i] == self[i + 1]: + del self[i + 1] + return self + + def parse_bgpdump_update(self, filename): + assert os.path.basename(filename).startswith("updates.") + for line in self.read_bgpdump(filename): + try: + pfx = prefix.from_bgpdump(line, rib_dump = False) + except IgnoreThisRecord: + continue + announce = pfx.announce + pfx.announce = 1 + i = bisect.bisect_left(self, pfx) + if announce: + if i >= len(self) or pfx != self[i]: + self.insert(i, pfx) + else: + while i < len(self) and pfx.prefix == self[i].prefix and pfx.prefixlen == self[i].prefixlen: + del self[i] + self.serial = pfx.timestamp + +class ixfr_set(pdu_set): + """ + Object representing an incremental set of PDUs, that is, the + differences between one versioned and (theoretically) consistant set + of prefixes and router certificates extracted from rcynic's output + and another, with the announce fields set or cleared as necessary to + indicate the changes. + """ + + @classmethod + def load(cls, filename): + """ + Load an ixfr_set from a file, parse filename to obtain serials. + """ + fn1, fn2, fn3 = os.path.basename(filename).split(".") + assert fn1.isdigit() and fn2 == "ix" and fn3.isdigit() + self = cls._load_file(filename) + self.from_serial = timestamp(fn3) + self.to_serial = timestamp(fn1) + return self + + def filename(self): + """ + Generate filename for this ixfr_set. + """ + return "%d.ix.%d" % (self.to_serial, self.from_serial) + + def show(self): + """ + Print this ixfr_set. + """ + blather("# IXFR %d (%s) -> %d (%s)" % (self.from_serial, self.from_serial, + self.to_serial, self.to_serial)) + for p in self: + blather(p) + +class file_producer(object): + """ + File-based producer object for asynchat. + """ + + def __init__(self, handle, buffersize): + self.handle = handle + self.buffersize = buffersize + + def more(self): + return self.handle.read(self.buffersize) + +class pdu_channel(asynchat.async_chat): + """ + asynchat subclass that understands our PDUs. This just handles + network I/O. Specific engines (client, server) should be subclasses + of this with methods that do something useful with the resulting + PDUs. + """ + + def __init__(self, conn = None): + asynchat.async_chat.__init__(self, conn) + self.reader = read_buffer() + + def start_new_pdu(self): + """ + Start read of a new PDU. + """ + try: + p = pdu.read_pdu(self.reader) + while p is not None: + self.deliver_pdu(p) + p = pdu.read_pdu(self.reader) + except PDUException, e: + self.push_pdu(e.make_error_report()) + self.close_when_done() + else: + assert not self.reader.ready() + self.set_terminator(self.reader.needed()) + + def collect_incoming_data(self, data): + """ + Collect data into the read buffer. + """ + self.reader.put(data) + + def found_terminator(self): + """ + Got requested data, see if we now have a PDU. If so, pass it + along, then restart cycle for a new PDU. + """ + p = self.reader.retry() + if p is None: + self.set_terminator(self.reader.needed()) + else: + self.deliver_pdu(p) + self.start_new_pdu() + + def push_pdu(self, pdu): + """ + Write PDU to stream. + """ + try: + self.push(pdu.to_pdu()) + except OSError, e: + if e.errno != errno.EAGAIN: + raise + + def push_file(self, f): + """ + Write content of a file to stream. + """ + try: + self.push_with_producer(file_producer(f, self.ac_out_buffer_size)) + except OSError, e: + if e.errno != errno.EAGAIN: + raise + + def log(self, msg): + """ + Intercept asyncore's logging. + """ + log(msg) + + def log_info(self, msg, tag = "info"): + """ + Intercept asynchat's logging. + """ + log("asynchat: %s: %s" % (tag, msg)) + + def handle_error(self): + """ + Handle errors caught by asyncore main loop. + """ + c, e = sys.exc_info()[:2] + if backtrace_on_exceptions or e == 0: + for line in traceback.format_exc().splitlines(): + log(line) + else: + log("[Exception: %s: %s]" % (c.__name__, e)) + log("[Exiting after unhandled exception]") + sys.exit(1) + + def init_file_dispatcher(self, fd): + """ + Kludge to plug asyncore.file_dispatcher into asynchat. Call from + subclass's __init__() method, after calling + pdu_channel.__init__(), and don't read this on a full stomach. + """ + self.connected = True + self._fileno = fd + self.socket = asyncore.file_wrapper(fd) + self.add_channel() + flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0) + flags = flags | os.O_NONBLOCK + fcntl.fcntl(fd, fcntl.F_SETFL, flags) + + def handle_close(self): + """ + Exit when channel closed. + """ + asynchat.async_chat.handle_close(self) + sys.exit(0) + +class server_write_channel(pdu_channel): + """ + Kludge to deal with ssh's habit of sometimes (compile time option) + invoking us with two unidirectional pipes instead of one + bidirectional socketpair. All the server logic is in the + server_channel class, this class just deals with sending the + server's output to a different file descriptor. + """ + + def __init__(self): + """ + Set up stdout. + """ + pdu_channel.__init__(self) + self.init_file_dispatcher(sys.stdout.fileno()) + + def readable(self): + """ + This channel is never readable. + """ + return False + +class server_channel(pdu_channel): + """ + Server protocol engine, handles upcalls from pdu_channel to + implement protocol logic. + """ + + def __init__(self): + """ + Set up stdin and stdout as connection and start listening for + first PDU. + """ + pdu_channel.__init__(self) + self.init_file_dispatcher(sys.stdin.fileno()) + self.writer = server_write_channel() + self.get_serial() + self.start_new_pdu() + + def writable(self): + """ + This channel is never writable. + """ + return False + + def push(self, data): + """ + Redirect to writer channel. + """ + return self.writer.push(data) + + def push_with_producer(self, producer): + """ + Redirect to writer channel. + """ + return self.writer.push_with_producer(producer) + + def push_pdu(self, pdu): + """ + Redirect to writer channel. + """ + return self.writer.push_pdu(pdu) + + def push_file(self, f): + """ + Redirect to writer channel. + """ + return self.writer.push_file(f) + + def deliver_pdu(self, pdu): + """ + Handle received PDU. + """ + pdu.serve(self) + + def get_serial(self): + """ + Read, cache, and return current serial number, or None if we can't + find the serial number file. The latter condition should never + happen, but maybe we got started in server mode while the cronjob + mode instance is still building its database. + """ + self.current_serial, self.current_nonce = read_current() + return self.current_serial + + def check_serial(self): + """ + Check for a new serial number. + """ + old_serial = self.current_serial + return old_serial != self.get_serial() + + def notify(self, data = None): + """ + Cronjob instance kicked us, send a notify message. + """ + if self.check_serial() is not None: + self.push_pdu(serial_notify(serial = self.current_serial, nonce = self.current_nonce)) + else: + log("Cronjob kicked me without a valid current serial number") + +class client_channel(pdu_channel): + """ + Client protocol engine, handles upcalls from pdu_channel. + """ + + current_serial = None + current_nonce = None + sql = None + host = None + port = None + cache_id = None + + def __init__(self, sock, proc, killsig, host, port): + self.killsig = killsig + self.proc = proc + self.host = host + self.port = port + pdu_channel.__init__(self, conn = sock) + self.start_new_pdu() + + @classmethod + def ssh(cls, host, port): + """ + Set up ssh connection and start listening for first PDU. + """ + args = ("ssh", "-p", port, "-s", host, "rpki-rtr") + blather("[Running ssh: %s]" % " ".join(args)) + s = socket.socketpair() + return cls(sock = s[1], + proc = subprocess.Popen(args, executable = "/usr/bin/ssh", + stdin = s[0], stdout = s[0], close_fds = True), + killsig = signal.SIGKILL, + host = host, port = port) + + @classmethod + def tcp(cls, host, port): + """ + Set up TCP connection and start listening for first PDU. + """ + blather("[Starting raw TCP connection to %s:%s]" % (host, port)) + try: + addrinfo = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM) + except socket.error, e: + blather("[socket.getaddrinfo() failed: %s]" % e) + else: + for ai in addrinfo: + af, socktype, proto, cn, sa = ai + blather("[Trying addr %s port %s]" % sa[:2]) + try: + s = socket.socket(af, socktype, proto) + except socket.error, e: + blather("[socket.socket() failed: %s]" % e) + continue + try: + s.connect(sa) + except socket.error, e: + blather("[socket.connect() failed: %s]" % e) + s.close() + continue + return cls(sock = s, proc = None, killsig = None, + host = host, port = port) + sys.exit(1) + + @classmethod + def loopback(cls, host, port): + """ + Set up loopback connection and start listening for first PDU. + """ + s = socket.socketpair() + blather("[Using direct subprocess kludge for testing]") + argv = [sys.executable, sys.argv[0], "--server"] + if "--syslog" in sys.argv: + argv.extend(("--syslog", sys.argv[sys.argv.index("--syslog") + 1])) + return cls(sock = s[1], + proc = subprocess.Popen(argv, stdin = s[0], stdout = s[0], close_fds = True), + killsig = signal.SIGINT, + host = host, port = port) + + @classmethod + def tls(cls, host, port): + """ + Set up TLS connection and start listening for first PDU. + + NB: This uses OpenSSL's "s_client" command, which does not + check server certificates properly, so this is not suitable for + production use. Fixing this would be a trivial change, it just + requires using a client program which does check certificates + properly (eg, gnutls-cli, or stunnel's client mode if that works + for such purposes this week). + """ + args = ("openssl", "s_client", "-tls1", "-quiet", "-connect", "%s:%s" % (host, port)) + blather("[Running: %s]" % " ".join(args)) + s = socket.socketpair() + return cls(sock = s[1], + proc = subprocess.Popen(args, stdin = s[0], stdout = s[0], close_fds = True), + killsig = signal.SIGKILL, + host = host, port = port) + + def setup_sql(self, sqlname): + """ + Set up an SQLite database to contain the table we receive. If + necessary, we will create the database. + """ + import sqlite3 + missing = not os.path.exists(sqlname) + self.sql = sqlite3.connect(sqlname, detect_types = sqlite3.PARSE_DECLTYPES) + self.sql.text_factory = str + cur = self.sql.cursor() + cur.execute("PRAGMA foreign_keys = on") + if missing: + cur.execute(''' + CREATE TABLE cache ( + cache_id INTEGER PRIMARY KEY NOT NULL, + host TEXT NOT NULL, + port TEXT NOT NULL, + nonce INTEGER, + serial INTEGER, + updated INTEGER, + UNIQUE (host, port))''') + cur.execute(''' + CREATE TABLE prefix ( + cache_id INTEGER NOT NULL + REFERENCES cache(cache_id) + ON DELETE CASCADE + ON UPDATE CASCADE, + asn INTEGER NOT NULL, + prefix TEXT NOT NULL, + prefixlen INTEGER NOT NULL, + max_prefixlen INTEGER NOT NULL, + UNIQUE (cache_id, asn, prefix, prefixlen, max_prefixlen))''') + + cur.execute(''' + CREATE TABLE routerkey ( + cache_id INTEGER NOT NULL + REFERENCES cache(cache_id) + ON DELETE CASCADE + ON UPDATE CASCADE, + asn INTEGER NOT NULL, + ski TEXT NOT NULL, + key TEXT NOT NULL, + UNIQUE (cache_id, asn, ski), + UNIQUE (cache_id, asn, key))''') + + cur.execute("SELECT cache_id, nonce, serial FROM cache WHERE host = ? AND port = ?", + (self.host, self.port)) + try: + self.cache_id, self.current_nonce, self.current_serial = cur.fetchone() + except TypeError: + cur.execute("INSERT INTO cache (host, port) VALUES (?, ?)", (self.host, self.port)) + self.cache_id = cur.lastrowid + self.sql.commit() + + def cache_reset(self): + """ + Handle cache_reset actions. + """ + self.current_serial = None + if self.sql: + cur = self.sql.cursor() + cur.execute("DELETE FROM prefix WHERE cache_id = ?", (self.cache_id,)) + cur.execute("UPDATE cache SET serial = NULL WHERE cache_id = ?", (self.cache_id,)) + + def end_of_data(self, serial, nonce): + """ + Handle end_of_data actions. + """ + self.current_serial = serial + self.current_nonce = nonce + if self.sql: + self.sql.execute("UPDATE cache SET serial = ?, nonce = ?, updated = datetime('now') WHERE cache_id = ?", + (serial, nonce, self.cache_id)) + self.sql.commit() + + def consume_prefix(self, prefix): + """ + Handle one prefix PDU. + """ + if self.sql: + values = (self.cache_id, prefix.asn, str(prefix.prefix), prefix.prefixlen, prefix.max_prefixlen) + if prefix.announce: + self.sql.execute("INSERT INTO prefix (cache_id, asn, prefix, prefixlen, max_prefixlen) " + "VALUES (?, ?, ?, ?, ?)", + values) + else: + self.sql.execute("DELETE FROM prefix " + "WHERE cache_id = ? AND asn = ? AND prefix = ? AND prefixlen = ? AND max_prefixlen = ?", + values) + + + def consume_routerkey(self, routerkey): + """ + Handle one Router Key PDU. + """ + + if self.sql: + values = (self.cache_id, routerkey.asn, + base64.urlsafe_b64encode(routerkey.ski).rstrip("="), + base64.b64encode(routerkey.key)) + if routerkey.announce: + self.sql.execute("INSERT INTO routerkey (cache_id, asn, ski, key) " + "VALUES (?, ?, ?, ?)", + values) + else: + self.sql.execute("DELETE FROM routerkey " + "WHERE cache_id = ? AND asn = ? AND (ski = ? OR key = ?)", + values) + + + def deliver_pdu(self, pdu): + """ + Handle received PDU. + """ + pdu.consume(self) + + def push_pdu(self, pdu): + """ + Log outbound PDU then write it to stream. + """ + blather(pdu) + pdu_channel.push_pdu(self, pdu) + + def cleanup(self): + """ + Force clean up this client's child process. If everything goes + well, child will have exited already before this method is called, + but we may need to whack it with a stick if something breaks. + """ + if self.proc is not None and self.proc.returncode is None: + try: + os.kill(self.proc.pid, self.killsig) + except OSError: + pass + + def handle_close(self): + """ + Intercept close event so we can log it, then shut down. + """ + blather("Server closed channel") + pdu_channel.handle_close(self) + +class kickme_channel(asyncore.dispatcher): + """ + asyncore dispatcher for the PF_UNIX socket that cronjob mode uses to + kick servers when it's time to send notify PDUs to clients. + """ + + def __init__(self, server): + asyncore.dispatcher.__init__(self) + self.server = server + self.sockname = "%s.%d" % (kickme_base, os.getpid()) + self.create_socket(socket.AF_UNIX, socket.SOCK_DGRAM) + try: + self.bind(self.sockname) + os.chmod(self.sockname, 0660) + except socket.error, e: + log("Couldn't bind() kickme socket: %r" % e) + self.close() + except OSError, e: + log("Couldn't chmod() kickme socket: %r" % e) + + def writable(self): + """ + This socket is read-only, never writable. + """ + return False + + def handle_connect(self): + """ + Ignore connect events (not very useful on datagram socket). + """ + pass + + def handle_read(self): + """ + Handle receipt of a datagram. + """ + data = self.recv(512) + self.server.notify(data) + + def cleanup(self): + """ + Clean up this dispatcher's socket. + """ + self.close() + try: + os.unlink(self.sockname) + except: + pass + + def log(self, msg): + """ + Intercept asyncore's logging. + """ + log(msg) + + def log_info(self, msg, tag = "info"): + """ + Intercept asyncore's logging. + """ + log("asyncore: %s: %s" % (tag, msg)) + + def handle_error(self): + """ + Handle errors caught by asyncore main loop. + """ + c, e = sys.exc_info()[:2] + if backtrace_on_exceptions or e == 0: + for line in traceback.format_exc().splitlines(): + log(line) + else: + log("[Exception: %s: %s]" % (c.__name__, e)) + log("[Exiting after unhandled exception]") + sys.exit(1) + + +def hostport_tag(): + """ + Construct hostname/address + port when we're running under a + protocol we understand well enough to do that. This is all + kludgery. Just grit your teeth, or perhaps just close your eyes. + """ + + proto = None + + if proto is None: + try: + host, port = socket.fromfd(0, socket.AF_INET, socket.SOCK_STREAM).getpeername() + proto = "tcp" + except: + pass + + if proto is None: + try: + host, port = socket.fromfd(0, socket.AF_INET6, socket.SOCK_STREAM).getpeername()[0:2] + proto = "tcp" + except: + pass + + if proto is None: + try: + host, port = os.environ["SSH_CONNECTION"].split()[0:2] + proto = "ssh" + except: + pass + + if proto is None: + try: + host, port = os.environ["REMOTE_HOST"], os.getenv("REMOTE_PORT") + proto = "ssl" + except: + pass + + if proto is None: + return "" + elif not port: + return "/%s/%s" % (proto, host) + elif ":" in host: + return "/%s/%s.%s" % (proto, host, port) + else: + return "/%s/%s:%s" % (proto, host, port) + + +def kick_all(serial): + """ + Kick any existing server processes to wake them up. + """ + + try: + os.stat(kickme_dir) + except OSError: + blather('# Creating directory "%s"' % kickme_dir) + os.makedirs(kickme_dir) + + msg = "Good morning, serial %d is ready" % serial + sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + for name in glob.iglob("%s.*" % kickme_base): + try: + blather("# Kicking %s" % name) + sock.sendto(msg, name) + except socket.error: + try: + blather("# Failed to kick %s, probably dead socket, attempting cleanup" % name) + os.unlink(name) + except Exception, e: + blather("# Couldn't unlink suspected dead socket %s: %s" % (name, e)) + except Exception, e: + log("# Failed to kick %s and don't understand why: %s" % (name, e)) + sock.close() + +def cronjob_main(argv): + """ + Run this mode right after rcynic to do the real work of groveling + through the ROAs that rcynic collects and translating that data into + the form used in the rpki-router protocol. This mode prepares both + full dumps (AXFR) and incremental dumps against a specific prior + version (IXFR). [Terminology here borrowed from DNS, as is much of + the protocol design.] Finally, this mode kicks any active servers, + so that they can notify their clients that a new version is + available. + + Run this in the directory where you want to write its output files, + which should also be the directory in which you run this program in + --server mode. + + This mode takes one argument on the command line, which specifies + the directory name of rcynic's authenticated output tree (normally + $somewhere/rcynic-data/authenticated/). + """ + + if len(argv) != 1: + sys.exit("Expected one argument, got %r" % (argv,)) + + old_ixfrs = glob.glob("*.ix.*") + + current = read_current()[0] + cutoff = timestamp.now(-(24 * 60 * 60)) + for f in glob.iglob("*.ax"): + t = timestamp(int(f.split(".")[0])) + if t < cutoff and t != current: + blather("# Deleting old file %s, timestamp %s" % (f, t)) + os.unlink(f) + + pdus = axfr_set.parse_rcynic(argv[0]) + if pdus == axfr_set.load_current(): + blather("# No change, new version not needed") + sys.exit() + pdus.save_axfr() + for axfr in glob.iglob("*.ax"): + if axfr != pdus.filename(): + pdus.save_ixfr(axfr_set.load(axfr)) + pdus.mark_current() + + blather("# New serial is %d (%s)" % (pdus.serial, pdus.serial)) + + kick_all(pdus.serial) + + old_ixfrs.sort() + for ixfr in old_ixfrs: + try: + blather("# Deleting old file %s" % ixfr) + os.unlink(ixfr) + except OSError: + pass + +def show_main(argv): + """ + Display dumps created by --cronjob mode in textual form. + Intended only for debugging. + + This mode takes no command line arguments. Run it in the directory + where you ran --cronjob mode. + """ + + if argv: + sys.exit("Unexpected arguments: %r" % (argv,)) + + g = glob.glob("*.ax") + g.sort() + for f in g: + axfr_set.load(f).show() + + g = glob.glob("*.ix.*") + g.sort() + for f in g: + ixfr_set.load(f).show() + +def server_main(argv): + """ + Implement the server side of the rpkk-router protocol. Other than + one PF_UNIX socket inode, this doesn't write anything to disk, so it + can be run with minimal privileges. Most of the hard work has + already been done in --cronjob mode, so all that this mode has to do + is serve up the results. + + In production use this server should run under sshd. The subsystem + mechanism in sshd does not allow us to pass arguments on the command + line, so setting this up might require a wrapper script, but in + production use you will probably want to lock down the public key + used to authenticate the ssh session so that it can only run this + one command, in which case you can just specify the full command + including any arguments in the authorized_keys file. + + Unless you do something special, sshd will have this program running + in whatever it thinks is the home directory associated with the + username given in the ssh prototocol setup, so it may be easiest to + set this up so that the home directory sshd puts this program into + is the one where --cronjob left its files for this mode to pick up. + + This mode must be run in the directory where you ran --cronjob mode. + + This mode takes one optional argument: if provided, the argument is + the name of a directory to which the program should chdir() on + startup; this may simplify setup when running under inetd. + + The server is event driven, so everything interesting happens in the + channel classes. + """ + + blather("[Starting]") + if len(argv) > 1: + sys.exit("Unexpected arguments: %r" % (argv,)) + if argv: + try: + os.chdir(argv[0]) + except OSError, e: + sys.exit(e) + kickme = None + try: + server = server_channel() + kickme = kickme_channel(server = server) + asyncore.loop(timeout = None) + except KeyboardInterrupt: + sys.exit(0) + finally: + if kickme is not None: + kickme.cleanup() + + +def listener_tcp_main(argv): + """ + Simple plain-TCP listener. Listens on a specified TCP port, upon + receiving a connection, forks the process and starts child executing + at server_main(). + + First argument (required) is numeric port number. + + Second argument (optional) is directory, like --server. + + NB: plain-TCP is completely insecure. We only implement this + because it's all that the routers currently support. In theory, we + will all be running TCP-AO in the future, at which point this will + go away. + """ + + # Perhaps we should daemonize? Deal with that later. + + if len(argv) > 2: + sys.exit("Unexpected arguments: %r" % (argv,)) + try: + port = int(argv[0]) if argv[0].isdigit() else socket.getservbyname(argv[0], "tcp") + except: + sys.exit("Couldn't parse port number on which to listen") + if len(argv) > 1: + try: + os.chdir(argv[1]) + except OSError, e: + sys.exit(e) + listener = None + try: + listener = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) + listener.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0) + except: + if listener is not None: + listener.close() + listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + try: + listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) + except AttributeError: + pass + listener.bind(("", port)) + listener.listen(5) + blather("[Listening on port %s]" % port) + while True: + s, ai = listener.accept() + blather("[Received connection from %r]" % (ai,)) + pid = os.fork() + if pid == 0: + os.dup2(s.fileno(), 0) + os.dup2(s.fileno(), 1) + s.close() + #os.closerange(3, os.sysconf("SC_OPEN_MAX")) + global log_tag + log_tag = "rtr-origin/server" + hostport_tag() + syslog.closelog() + syslog.openlog(log_tag, syslog.LOG_PID, syslog_facility) + server_main(()) + sys.exit() + else: + blather("[Spawned server %d]" % pid) + try: + while True: + pid, status = os.waitpid(0, os.WNOHANG) + if pid: + blather("[Server %s exited]" % pid) + else: + break + except: + pass + + +def client_main(argv): + """ + Toy client, intended only for debugging. + + This program takes one or more arguments. The first argument + determines what kind of connection it should open to the server, the + remaining arguments are connection details specific to this + particular type of connection. + + If the first argument is "loopback", the client will run a copy of + the server directly in a subprocess, and communicate with it via a + PF_UNIX socket pair. This sub-mode takes no further arguments. + + If the first argument is "ssh", the client will attempt to run ssh + in as subprocess to connect to the server using the ssh subsystem + mechanism as specified for this protocol. The remaining arguments + should be a hostname (or IP address in a form acceptable to ssh) and + a TCP port number. + + If the first argument is "tcp", the client will attempt to open a + direct (and completely insecure!) TCP connection to the server. + The remaining arguments should be a hostname (or IP address) and + a TCP port number. + + If the first argument is "tls", the client will attempt to open a + TLS connection to the server. The remaining arguments should be a + hostname (or IP address) and a TCP port number. + + An optional final name is the name of a file containing a SQLite + database in which to store the received table. If specified, this + database will be created if missing. + """ + + blather("[Startup]") + client = None + if not argv: + argv = ["loopback"] + proto = argv[0] + if proto == "loopback" and len(argv) in (1, 2): + constructor = client_channel.loopback + host, port = "", "" + sqlname = None if len(argv) == 1 else argv[1] + elif proto in ("ssh", "tcp", "tls") and len(argv) in (3, 4): + constructor = getattr(client_channel, proto) + host, port = argv[1:3] + sqlname = None if len(argv) == 3 else argv[3] + else: + sys.exit("Unexpected arguments: %s" % " ".join(argv)) + + try: + client = constructor(host, port) + if sqlname: + client.setup_sql(sqlname) + while True: + if client.current_serial is None or client.current_nonce is None: + client.push_pdu(reset_query()) + else: + client.push_pdu(serial_query(serial = client.current_serial, nonce = client.current_nonce)) + wakeup = time.time() + 600 + while True: + remaining = wakeup - time.time() + if remaining < 0: + break + asyncore.loop(timeout = remaining, count = 1) + + except KeyboardInterrupt: + sys.exit(0) + finally: + if client is not None: + client.cleanup() + +def bgpdump_convert_main(argv): + """ + Simulate route origin data from a set of BGP dump files. + + * DANGER WILL ROBINSON! * + * DEBUGGING AND TEST USE ONLY! * + + argv is an ordered list of filenames. Each file must be a BGP RIB + dumps, a BGP UPDATE dumps, or an AXFR dump in the format written by + this program's --cronjob command. The first file must be a RIB dump + or AXFR dump, it cannot be an UPDATE dump. Output will be a set of + AXFR and IXFR files with timestamps derived from the BGP dumps, + which can be used as input to this program's --server command for + test purposes. SUCH DATA PROVIDE NO SECURITY AT ALL. + + You have been warned. + """ + + first = True + db = None + axfrs = [] + + for filename in argv: + + if filename.endswith(".ax"): + blather("Reading %s" % filename) + db = axfr_set.load(filename) + + elif os.path.basename(filename).startswith("ribs."): + db = axfr_set.parse_bgpdump_rib_dump(filename) + db.save_axfr() + + elif not first: + assert db is not None + db.parse_bgpdump_update(filename) + db.save_axfr() + + else: + sys.exit("First argument must be a RIB dump or .ax file, don't know what to do with %s" % filename) + + blather("DB serial now %d (%s)" % (db.serial, db.serial)) + if first and read_current() == (None, None): + db.mark_current() + first = False + + for axfr in axfrs: + blather("Loading %s" % axfr) + ax = axfr_set.load(axfr) + blather("Computing changes from %d (%s) to %d (%s)" % (ax.serial, ax.serial, db.serial, db.serial)) + db.save_ixfr(ax) + del ax + + axfrs.append(db.filename()) + + +def bgpdump_select_main(argv): + """ + Simulate route origin data from a set of BGP dump files. + + * DANGER WILL ROBINSON! * + * DEBUGGING AND TEST USE ONLY! * + + Set current serial number to correspond to an .ax file created by + converting BGP dump files. SUCH DATA PROVIDE NO SECURITY AT ALL. + + You have been warned. + """ + + serial = None + try: + head, sep, tail = os.path.basename(argv[0]).partition(".") + if len(argv) == 1 and head.isdigit() and sep == "." and tail == "ax": + serial = timestamp(head) + except: + pass + if serial is None: + sys.exit("Argument must be name of a .ax file") + + nonce = read_current()[1] + if nonce is None: + nonce = new_nonce() + + write_current(serial, nonce) + kick_all(serial) + + +class bgpsec_replay_clock(object): + """ + Internal clock for replaying BGP dump files. + + * DANGER WILL ROBINSON! * + * DEBUGGING AND TEST USE ONLY! * + + This class replaces the normal on-disk serial number mechanism with + an in-memory version based on pre-computed data. + bgpdump_server_main() uses this hack to replay historical data for + testing purposes. DO NOT USE THIS IN PRODUCTION. + + You have been warned. + """ + + def __init__(self): + self.timestamps = [timestamp(int(f.split(".")[0])) for f in glob.iglob("*.ax")] + self.timestamps.sort() + self.offset = self.timestamps[0] - int(time.time()) + self.nonce = new_nonce() + + def __nonzero__(self): + return len(self.timestamps) > 0 + + def now(self): + return timestamp.now(self.offset) + + def read_current(self): + now = self.now() + while len(self.timestamps) > 1 and now >= self.timestamps[1]: + del self.timestamps[0] + return self.timestamps[0], self.nonce + + def siesta(self): + now = self.now() + if len(self.timestamps) <= 1: + return None + elif now < self.timestamps[1]: + return self.timestamps[1] - now + else: + return 1 + + +def bgpdump_server_main(argv): + """ + Simulate route origin data from a set of BGP dump files. + + * DANGER WILL ROBINSON! * + * DEBUGGING AND TEST USE ONLY! * + + This is a clone of server_main() which replaces the external serial + number updates triggered via the kickme channel by cronjob_main with + an internal clocking mechanism to replay historical test data. + + DO NOT USE THIS IN PRODUCTION. + + You have been warned. + """ + + blather("[Starting]") + if len(argv) > 1: + sys.exit("Unexpected arguments: %r" % (argv,)) + if argv: + try: + os.chdir(argv[0]) + except OSError, e: + sys.exit(e) + # + # Yes, this really does replace a global function with a bound + # method to our clock object. Fun stuff, huh? + # + global read_current + clock = bgpsec_replay_clock() + read_current = clock.read_current + # + try: + server = server_channel() + old_serial = server.get_serial() + blather("[Starting at serial %d (%s)]" % (old_serial, old_serial)) + while clock: + new_serial = server.get_serial() + if old_serial != new_serial: + blather("[Serial bumped from %d (%s) to %d (%s)]" % (old_serial, old_serial, new_serial, new_serial)) + server.notify() + old_serial = new_serial + asyncore.loop(timeout = clock.siesta(), count = 1) + except KeyboardInterrupt: + sys.exit(0) + +# Figure out where the scan_roas utility program is today +try: + # Set from autoconf + scan_roas = rpki.autoconf.scan_roas +except NameError: + # Source directory + scan_roas = os.path.normpath(os.path.join(sys.path[0], "..", "utils", + "scan_roas", "scan_roas")) +# If that didn't work, use $PATH and hope for the best +if not os.path.exists(scan_roas): + scan_roas = "scan_roas" + +# Same thing for scan_routercerts +try: + # Set from autoconf + scan_routercerts = rpki.autoconf.scan_routercerts +except NameError: + # Source directory + scan_routercerts = os.path.normpath(os.path.join(sys.path[0], "..", "utils", + "scan_routercerts", "scan_routercerts")) +if not os.path.exists(scan_routercerts): + scan_routercerts = "scan_routercerts" + +force_zero_nonce = False + +kickme_dir = "sockets" +kickme_base = os.path.join(kickme_dir, "kickme") + +main_dispatch = { + "cronjob" : cronjob_main, + "client" : client_main, + "server" : server_main, + "show" : show_main, + "listener_tcp" : listener_tcp_main, + "bgpdump_convert" : bgpdump_convert_main, + "bgpdump_select" : bgpdump_select_main, + "bgpdump_server" : bgpdump_server_main } + +def usage(msg = None): + f = sys.stderr if msg else sys.stdout + f.write("Usage: %s [options] --mode [arguments]\n" % sys.argv[0]) + f.write("\n") + f.write("where options are zero or more of:\n") + f.write("\n") + f.write("--syslog facility.warning_priority[.info_priority]\n") + f.write("\n") + f.write("--zero-nonce\n") + f.write("\n") + f.write("and --mode is one of:\n") + f.write("\n") + for name, func in main_dispatch.iteritems(): + f.write("--%s:\n" % name) + f.write(func.__doc__) + f.write("\n") + sys.exit(msg) + +if __name__ == "__main__": + + os.environ["TZ"] = "UTC" + time.tzset() + + mode = None + + syslog_facility, syslog_warning, syslog_info = syslog.LOG_DAEMON, syslog.LOG_WARNING, syslog.LOG_INFO + + opts, argv = getopt.getopt(sys.argv[1:], "hs:z?", ["help", "syslog=", "zero-nonce"] + main_dispatch.keys()) + for o, a in opts: + if o in ("-h", "--help", "-?"): + usage() + elif o in ("-z", "--zero-nonce"): + force_zero_nonce = True + elif o in ("-s", "--syslog"): + try: + a = [getattr(syslog, "LOG_" + i.upper()) for i in a.split(".")] + if len(a) == 2: + a.append(a[1]) + syslog_facility, syslog_warning, syslog_info = a + if syslog_facility < 8 or syslog_warning >= 8 or syslog_info >= 8: + raise ValueError + except: + usage("Bad value specified for --syslog option") + elif len(o) > 2 and o[2:] in main_dispatch: + if mode is not None: + sys.exit("Conflicting modes specified") + mode = o[2:] + + if mode is None: + usage("No mode specified") + + log_tag = "rtr-origin/" + mode + + if mode in ("server", "bgpdump_server"): + log_tag += hostport_tag() + + if mode in ("cronjob", "server" , "bgpdump_server"): + syslog.openlog(log_tag, syslog.LOG_PID, syslog_facility) + def log(msg): + return syslog.syslog(syslog_warning, str(msg)) + def blather(msg): + return syslog.syslog(syslog_info, str(msg)) + + elif mode == "show": + def log(msg): + try: + os.write(sys.stdout.fileno(), "%s\n" % msg) + except OSError, e: + if e.errno != errno.EPIPE: + raise + blather = log + + else: + def log(msg): + sys.stderr.write("%s %s[%d]: %s\n" % (time.strftime("%F %T"), log_tag, os.getpid(), msg)) + blather = log + + main_dispatch[mode](argv) diff --git a/rp/rpki-rtr/rules.darwin.mk b/rp/rpki-rtr/rules.darwin.mk new file mode 100644 index 00000000..1230db92 --- /dev/null +++ b/rp/rpki-rtr/rules.darwin.mk @@ -0,0 +1,9 @@ +# $Id$ + +install-always: install-binary + +install-postconf: install-listener + +install-listener: + @echo "No rule for $@ on this platform (yet), you'll have to do that yourself if it matters." + diff --git a/rp/rpki-rtr/rules.freebsd.mk b/rp/rpki-rtr/rules.freebsd.mk new file mode 100644 index 00000000..df99da47 --- /dev/null +++ b/rp/rpki-rtr/rules.freebsd.mk @@ -0,0 +1,37 @@ +# $Id$ + +install-always: install-binary + +install-postconf: install-listener + +install-listener: .FORCE + @if /usr/bin/egrep -q '^rpki-rtr' /etc/services ; \ + then \ + echo "You already have a /etc/services entry for rpki-rtr, so I will use it."; \ + elif echo >>/etc/services "rpki-rtr ${RPKI_RTR_PORT}/tcp #RFC 6810" ; \ + then \ + echo "Added rpki-rtr to /etc/services."; \ + else \ + echo "Adding rpki-rtr to /etc/services failed, please fix this, then try again."; \ + exit 1; \ + fi + @if /usr/bin/egrep -q "rpki-rtr[ ]+stream[ ]+tcp[ ]" /etc/inetd.conf; \ + then \ + echo "You already have an inetd.conf entry for rpki-rtr on TCPv4, so I will use it."; \ + elif echo >>/etc/inetd.conf "rpki-rtr stream tcp nowait rpkirtr /usr/local/bin/rtr-origin rtr-origin --server /var/rcynic/rpki-rtr"; \ + then \ + echo "Added rpki-rtr for TCPv4 to /etc/inetd.conf."; \ + else \ + echo "Adding rpki-rtr for TCPv4 to /etc/inetd.conf failed, please fix this, then try again."; \ + exit 1; \ + fi + @if /usr/bin/egrep -q "rpki-rtr[ ]+stream[ ]+tcp6[ ]" /etc/inetd.conf; \ + then \ + echo "You already have an inetd.conf entry for rpki-rtr on TCPv6, so I will use it."; \ + elif echo >>/etc/inetd.conf "rpki-rtr stream tcp6 nowait rpkirtr /usr/local/bin/rtr-origin rtr-origin --server /var/rcynic/rpki-rtr"; \ + then \ + echo "Added rpki-rtr for TCPv6 to /etc/inetd.conf."; \ + else \ + echo "Adding rpki-rtr for TCPv6 to /etc/inetd.conf failed, please fix this, then try again."; \ + exit 1; \ + fi diff --git a/rp/rpki-rtr/rules.linux.mk b/rp/rpki-rtr/rules.linux.mk new file mode 100644 index 00000000..23f90f69 --- /dev/null +++ b/rp/rpki-rtr/rules.linux.mk @@ -0,0 +1,29 @@ +# $Id$ + +install-always: install-binary install-listener + +install-postconf: + @true + +# Only need to make listener if not already present + +install-listener: ${DESTDIR}/etc/xinetd.d/rpki-rtr + +${DESTDIR}/etc/xinetd.d/rpki-rtr: + @${AWK} 'BEGIN { \ + print "service rpki-rtr"; \ + print "{"; \ + print " type = UNLISTED"; \ + print " flags = IPv4"; \ + print " socket_type = stream"; \ + print " protocol = tcp"; \ + print " port = ${RPKI_RTR_PORT}"; \ + print " wait = no"; \ + print " user = rpkirtr"; \ + print " server = ${bindir}/${BIN}"; \ + print " server_args = --server /var/rcynic/rpki-rtr"; \ + print "}"; \ + }' >xinetd.rpki-rtr + ${INSTALL} -d ${DESTDIR}/etc/xinetd.d + ${INSTALL} -m 644 xinetd.rpki-rtr $@ + rm xinetd.rpki-rtr diff --git a/rp/rpki-rtr/rules.unknown.mk b/rp/rpki-rtr/rules.unknown.mk new file mode 100644 index 00000000..fb16e93a --- /dev/null +++ b/rp/rpki-rtr/rules.unknown.mk @@ -0,0 +1,8 @@ +# $Id$ + +install-always: install-binary + +install-postconf: install-listener + +install-listener: + @echo "Don't know how to make $@ on this platform"; exit 1 diff --git a/rp/rpki-rtr/server.sh b/rp/rpki-rtr/server.sh new file mode 100755 index 00000000..7ccf2f38 --- /dev/null +++ b/rp/rpki-rtr/server.sh @@ -0,0 +1,17 @@ +#!/bin/sh - +# +# Wrapper for rtr-origin.py in server mode, for testing. +# +# In production we would probably want to handle all of this either +# directly in the Python code or in the command= setting for a +# particular ssh key, but for initial testing it's simpler to run a +# shall script to change to the right directory and supply any +# necessary command line arguments. +# +# Be warned that almost any error here will cause the subsystem to +# fail mysteriously, leaving behind naught but a SIGCHILD log message +# from sshd as this script dies. + +cd /u/sra/rpki/subvert-rpki.hactrn.net/rtr-origin + +exec /usr/local/bin/python rtr-origin.py --server diff --git a/rp/rpki-rtr/sshd.conf b/rp/rpki-rtr/sshd.conf new file mode 100644 index 00000000..df32ca67 --- /dev/null +++ b/rp/rpki-rtr/sshd.conf @@ -0,0 +1,23 @@ +# $Id$ +# +# sshd config file for testing. Invoke thusly: +# +# /usr/sbin/sshd -f $top/rpki-rtr/sshd.conf -d + +Port 2222 +Protocol 2 +ListenAddress 127.0.0.1 +ListenAddress ::1 +HostKey /u/sra/rpki/subvert-rpki.hactrn.net/rpki-rtr/ssh_host_rsa_key +PermitRootLogin no +PubkeyAuthentication yes +AuthorizedKeysFile /u/sra/rpki/subvert-rpki.hactrn.net/rpki-rtr/authorized_keys +PasswordAuthentication no +PermitEmptyPasswords no +ChallengeResponseAuthentication no +UsePAM no +AllowTcpForwarding no +X11Forwarding no +UseDNS no +PidFile /u/sra/rpki/subvert-rpki.hactrn.net/rpki-rtr/sshd.pid +Subsystem rpki-rtr /u/sra/rpki/subvert-rpki.hactrn.net/rpki-rtr/server.sh diff --git a/rp/rtr-origin/Makefile.in b/rp/rtr-origin/Makefile.in deleted file mode 100644 index e587305b..00000000 --- a/rp/rtr-origin/Makefile.in +++ /dev/null @@ -1,51 +0,0 @@ -# $Id$ - -BIN = rtr-origin - -INSTALL = @INSTALL@ -PYTHON = @PYTHON@ -AWK = @AWK@ - -prefix = @prefix@ -exec_prefix = @exec_prefix@ -datarootdir = @datarootdir@ -datadir = @datadir@ -localstatedir = @localstatedir@ -sharedstatedir = @sharedstatedir@ -sysconfdir = @sysconfdir@ -bindir = @bindir@ -sbindir = @sbindir@ -libexecdir = @libexecdir@ -libdir = @libdir@ - -abs_top_srcdir = @abs_top_srcdir@ -abs_top_builddir = @abs_top_builddir@ - -RTR_ORIGIN_INSTALL_TARGETS = @RCYNIC_INSTALL_TARGETS@ - -RPKI_RTR_PORT = 43779 - -SCAN_ROAS = ${bindir}/scan_roas - - -all clean test:: - @true - -install: all ${RTR_ORIGIN_INSTALL_TARGETS} - -install-binary: - if test -d ${DESTDIR}${bindir} ; then :; else ${INSTALL} -m 555 -d ${DESTDIR}${bindir}; fi - ${INSTALL} -m 555 ${BIN} ${DESTDIR}${bindir}/${BIN} - -deinstall uninstall: - rm -f ${DESTDIR}${bindir}/${BIN} - -distclean: clean - rm -rf current sockets *.ax *.ix.* - rm -f Makefile - -.FORCE: - -# Platform-specific rules below here. - -@RTR_ORIGIN_MAKE_RULES@ diff --git a/rp/rtr-origin/README b/rp/rtr-origin/README deleted file mode 100644 index dae53010..00000000 --- a/rp/rtr-origin/README +++ /dev/null @@ -1,11 +0,0 @@ -$Id$ - -Sample implementation of draft-ymbk-rpki-rtr-protocol. - -See: - -- The primary documentation at http://trac.rpki.net/ - -- The PDF manual in ../doc/manual.pdf, or - -- The flat text page ../doc/doc.RPKI.RP.rpki-rtr diff --git a/rp/rtr-origin/rtr-origin b/rp/rtr-origin/rtr-origin deleted file mode 100755 index e1e82ccf..00000000 --- a/rp/rtr-origin/rtr-origin +++ /dev/null @@ -1,2278 +0,0 @@ -#!/usr/bin/env python - -# Router origin-authentication rpki-router protocol implementation. See -# draft-ietf-sidr-rpki-rtr in fine Internet-Draft repositories near you. -# -# Run the program with the --help argument for usage information, or see -# documentation for the *_main() functions. -# -# -# $Id$ -# -# Copyright (C) 2009-2013 Internet Systems Consortium ("ISC") -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH -# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY -# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, -# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM -# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE -# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR -# PERFORMANCE OF THIS SOFTWARE. - -import sys -import os -import struct -import time -import glob -import socket -import fcntl -import signal -import syslog -import errno -import asyncore -import asynchat -import subprocess -import traceback -import getopt -import bisect -import random -import base64 - - -# Debugging only, should be False in production -disable_incrementals = False - -# Whether to log backtraces -backtrace_on_exceptions = False - -class IgnoreThisRecord(Exception): - pass - - -class timestamp(int): - """ - Wrapper around time module. - """ - - def __new__(cls, x): - return int.__new__(cls, x) - - @classmethod - def now(cls, delta = 0): - return cls(time.time() + delta) - - def __str__(self): - return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(self)) - - -class ipaddr(object): - """ - IP addresses. - """ - - def __init__(self, string = None, value = None): - assert (string is None) != (value is None) - if string is not None: - value = socket.inet_pton(self.af, string) - assert len(value) == self.size - self.value = value - - def __str__(self): - return socket.inet_ntop(self.af, self.value) - - def __cmp__(self, other): - return cmp(self.value, other.value) - -class v4addr(ipaddr): - af = socket.AF_INET - size = 4 - -class v6addr(ipaddr): - af = socket.AF_INET6 - size = 16 - -def read_current(): - """ - Read current serial number and nonce. Return None for both if - serial and nonce not recorded. For backwards compatibility, treat - file containing just a serial number as having a nonce of zero. - """ - try: - f = open("current", "r") - values = tuple(int(s) for s in f.read().split()) - f.close() - return values[0], values[1] - except IndexError: - return values[0], 0 - except IOError: - return None, None - -def write_current(serial, nonce): - """ - Write serial number and nonce. - """ - tmpfn = "current.%d.tmp" % os.getpid() - try: - f = open(tmpfn, "w") - f.write("%d %d\n" % (serial, nonce)) - f.close() - os.rename(tmpfn, "current") - finally: - try: - os.unlink(tmpfn) - except: - pass - - -def new_nonce(): - """ - Create and return a new nonce value. - """ - if force_zero_nonce: - return 0 - try: - return int(random.SystemRandom().getrandbits(16)) - except NotImplementedError: - return int(random.getrandbits(16)) - - -class read_buffer(object): - """ - Wrapper around synchronous/asynchronous read state. - """ - - def __init__(self): - self.buffer = "" - - def update(self, need, callback): - """ - Update count of needed bytes and callback, then dispatch to callback. - """ - self.need = need - self.callback = callback - return self.callback(self) - - def available(self): - """ - How much data do we have available in this buffer? - """ - return len(self.buffer) - - def needed(self): - """ - How much more data does this buffer need to become ready? - """ - return self.need - self.available() - - def ready(self): - """ - Is this buffer ready to read yet? - """ - return self.available() >= self.need - - def get(self, n): - """ - Hand some data to the caller. - """ - b = self.buffer[:n] - self.buffer = self.buffer[n:] - return b - - def put(self, b): - """ - Accumulate some data. - """ - self.buffer += b - - def retry(self): - """ - Try dispatching to the callback again. - """ - return self.callback(self) - -class PDUException(Exception): - """ - Parent exception type for exceptions that signal particular protocol - errors. String value of exception instance will be the message to - put in the error_report PDU, error_report_code value of exception - will be the numeric code to use. - """ - - def __init__(self, msg = None, pdu = None): - assert msg is None or isinstance(msg, (str, unicode)) - self.error_report_msg = msg - self.error_report_pdu = pdu - - def __str__(self): - return self.error_report_msg or self.__class__.__name__ - - def make_error_report(self): - return error_report(errno = self.error_report_code, - errmsg = self.error_report_msg, - errpdu = self.error_report_pdu) - -class UnsupportedProtocolVersion(PDUException): - error_report_code = 4 - -class UnsupportedPDUType(PDUException): - error_report_code = 5 - -class CorruptData(PDUException): - error_report_code = 0 - -class pdu(object): - """ - Object representing a generic PDU in the rpki-router protocol. - Real PDUs are subclasses of this class. - """ - - version = 0 # Protocol version - - _pdu = None # Cached when first generated - - header_struct = struct.Struct("!BBHL") - - def __cmp__(self, other): - return cmp(self.to_pdu(), other.to_pdu()) - - def check(self): - """ - Check attributes to make sure they're within range. - """ - pass - - @classmethod - def read_pdu(cls, reader): - return reader.update(need = cls.header_struct.size, callback = cls.got_header) - - @classmethod - def got_header(cls, reader): - if not reader.ready(): - return None - assert reader.available() >= cls.header_struct.size - version, pdu_type, whatever, length = cls.header_struct.unpack(reader.buffer[:cls.header_struct.size]) - if version != cls.version: - raise UnsupportedProtocolVersion( - "Received PDU version %d, expected %d" % (version, cls.version)) - if pdu_type not in cls.pdu_map: - raise UnsupportedPDUType( - "Received unsupported PDU type %d" % pdu_type) - if length < 8: - raise CorruptData( - "Received PDU with length %d, which is too short to be valid" % length) - self = cls.pdu_map[pdu_type]() - return reader.update(need = length, callback = self.got_pdu) - - def consume(self, client): - """ - Handle results in test client. Default behavior is just to print - out the PDU. - """ - blather(self) - - def send_file(self, server, filename): - """ - Send a content of a file as a cache response. Caller should catch IOError. - """ - f = open(filename, "rb") - server.push_pdu(cache_response(nonce = server.current_nonce)) - server.push_file(f) - server.push_pdu(end_of_data(serial = server.current_serial, nonce = server.current_nonce)) - - def send_nodata(self, server): - """ - Send a nodata error. - """ - server.push_pdu(error_report(errno = error_report.codes["No Data Available"], errpdu = self)) - -class pdu_with_serial(pdu): - """ - Base class for PDUs consisting of just a serial number and nonce. - """ - - header_struct = struct.Struct("!BBHLL") - - def __init__(self, serial = None, nonce = None): - if serial is not None: - assert isinstance(serial, int) - self.serial = serial - if nonce is not None: - assert isinstance(nonce, int) - self.nonce = nonce - - def __str__(self): - return "[%s, serial #%d nonce %d]" % (self.__class__.__name__, self.serial, self.nonce) - - def to_pdu(self): - """ - Generate the wire format PDU. - """ - if self._pdu is None: - self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.nonce, - self.header_struct.size, self.serial) - return self._pdu - - def got_pdu(self, reader): - if not reader.ready(): - return None - b = reader.get(self.header_struct.size) - version, pdu_type, self.nonce, length, self.serial = self.header_struct.unpack(b) - if length != 12: - raise CorruptData("PDU length of %d can't be right" % length, pdu = self) - assert b == self.to_pdu() - return self - -class pdu_nonce(pdu): - """ - Base class for PDUs consisting of just a nonce. - """ - - header_struct = struct.Struct("!BBHL") - - def __init__(self, nonce = None): - if nonce is not None: - assert isinstance(nonce, int) - self.nonce = nonce - - def __str__(self): - return "[%s, nonce %d]" % (self.__class__.__name__, self.nonce) - - def to_pdu(self): - """ - Generate the wire format PDU. - """ - if self._pdu is None: - self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.nonce, self.header_struct.size) - return self._pdu - - def got_pdu(self, reader): - if not reader.ready(): - return None - b = reader.get(self.header_struct.size) - version, pdu_type, self.nonce, length = self.header_struct.unpack(b) - if length != 8: - raise CorruptData("PDU length of %d can't be right" % length, pdu = self) - assert b == self.to_pdu() - return self - -class pdu_empty(pdu): - """ - Base class for empty PDUs. - """ - - header_struct = struct.Struct("!BBHL") - - def __str__(self): - return "[%s]" % self.__class__.__name__ - - def to_pdu(self): - """ - Generate the wire format PDU for this prefix. - """ - if self._pdu is None: - self._pdu = self.header_struct.pack(self.version, self.pdu_type, 0, self.header_struct.size) - return self._pdu - - def got_pdu(self, reader): - if not reader.ready(): - return None - b = reader.get(self.header_struct.size) - version, pdu_type, zero, length = self.header_struct.unpack(b) - if zero != 0: - raise CorruptData("Must-be-zero field isn't zero" % length, pdu = self) - if length != 8: - raise CorruptData("PDU length of %d can't be right" % length, pdu = self) - assert b == self.to_pdu() - return self - -class serial_notify(pdu_with_serial): - """ - Serial Notify PDU. - """ - - pdu_type = 0 - - def consume(self, client): - """ - Respond to a serial_notify message with either a serial_query or - reset_query, depending on what we already know. - """ - blather(self) - if client.current_serial is None or client.current_nonce != self.nonce: - client.push_pdu(reset_query()) - elif self.serial != client.current_serial: - client.push_pdu(serial_query(serial = client.current_serial, nonce = client.current_nonce)) - else: - blather("[Notify did not change serial number, ignoring]") - -class serial_query(pdu_with_serial): - """ - Serial Query PDU. - """ - - pdu_type = 1 - - def serve(self, server): - """ - Received a serial query, send incremental transfer in response. - If client is already up to date, just send an empty incremental - transfer. - """ - blather(self) - if server.get_serial() is None: - self.send_nodata(server) - elif server.current_nonce != self.nonce: - log("[Client requested wrong nonce, resetting client]") - server.push_pdu(cache_reset()) - elif server.current_serial == self.serial: - blather("[Client is already current, sending empty IXFR]") - server.push_pdu(cache_response(nonce = server.current_nonce)) - server.push_pdu(end_of_data(serial = server.current_serial, nonce = server.current_nonce)) - elif disable_incrementals: - server.push_pdu(cache_reset()) - else: - try: - self.send_file(server, "%d.ix.%d" % (server.current_serial, self.serial)) - except IOError: - server.push_pdu(cache_reset()) - -class reset_query(pdu_empty): - """ - Reset Query PDU. - """ - - pdu_type = 2 - - def serve(self, server): - """ - Received a reset query, send full current state in response. - """ - blather(self) - if server.get_serial() is None: - self.send_nodata(server) - else: - try: - fn = "%d.ax" % server.current_serial - self.send_file(server, fn) - except IOError: - server.push_pdu(error_report(errno = error_report.codes["Internal Error"], - errpdu = self, errmsg = "Couldn't open %s" % fn)) - -class cache_response(pdu_nonce): - """ - Cache Response PDU. - """ - - pdu_type = 3 - - def consume(self, client): - """ - Handle cache_response. - """ - blather(self) - if self.nonce != client.current_nonce: - blather("[Nonce changed, resetting]") - client.cache_reset() - -class end_of_data(pdu_with_serial): - """ - End of Data PDU. - """ - - pdu_type = 7 - - def consume(self, client): - """ - Handle end_of_data response. - """ - blather(self) - client.end_of_data(self.serial, self.nonce) - -class cache_reset(pdu_empty): - """ - Cache reset PDU. - """ - - pdu_type = 8 - - def consume(self, client): - """ - Handle cache_reset response, by issuing a reset_query. - """ - blather(self) - client.cache_reset() - client.push_pdu(reset_query()) - -class prefix(pdu): - """ - Object representing one prefix. This corresponds closely to one PDU - in the rpki-router protocol, so closely that we use lexical ordering - of the wire format of the PDU as the ordering for this class. - - This is a virtual class, but the .from_text() constructor - instantiates the correct concrete subclass (ipv4_prefix or - ipv6_prefix) depending on the syntax of its input text. - """ - - header_struct = struct.Struct("!BB2xLBBBx") - asnum_struct = struct.Struct("!L") - - @staticmethod - def from_text(asnum, addr): - """ - Construct a prefix from its text form. - """ - cls = ipv6_prefix if ":" in addr else ipv4_prefix - self = cls() - self.asn = long(asnum) - p, l = addr.split("/") - self.prefix = self.addr_type(string = p) - if "-" in l: - self.prefixlen, self.max_prefixlen = tuple(int(i) for i in l.split("-")) - else: - self.prefixlen = self.max_prefixlen = int(l) - self.announce = 1 - self.check() - return self - - def __str__(self): - plm = "%s/%s-%s" % (self.prefix, self.prefixlen, self.max_prefixlen) - return "%s %8s %-32s %s" % ("+" if self.announce else "-", self.asn, plm, - ":".join(("%02X" % ord(b) for b in self.to_pdu()))) - - def show(self): - blather("# Class: %s" % self.__class__.__name__) - blather("# ASN: %s" % self.asn) - blather("# Prefix: %s" % self.prefix) - blather("# Prefixlen: %s" % self.prefixlen) - blather("# MaxPrefixlen: %s" % self.max_prefixlen) - blather("# Announce: %s" % self.announce) - - def consume(self, client): - """ - Handle one incoming prefix PDU - """ - blather(self) - client.consume_prefix(self) - - def check(self): - """ - Check attributes to make sure they're within range. - """ - if self.announce not in (0, 1): - raise CorruptData("Announce value %d is neither zero nor one" % self.announce, pdu = self) - if self.prefixlen < 0 or self.prefixlen > self.addr_type.size * 8: - raise CorruptData("Implausible prefix length %d" % self.prefixlen, pdu = self) - if self.max_prefixlen < self.prefixlen or self.max_prefixlen > self.addr_type.size * 8: - raise CorruptData("Implausible max prefix length %d" % self.max_prefixlen, pdu = self) - pdulen = self.header_struct.size + self.addr_type.size + self.asnum_struct.size - if len(self.to_pdu()) != pdulen: - raise CorruptData("Expected %d byte PDU, got %d" % (pdulen, len(self.to_pdu())), pdu = self) - - def to_pdu(self, announce = None): - """ - Generate the wire format PDU for this prefix. - """ - if announce is not None: - assert announce in (0, 1) - elif self._pdu is not None: - return self._pdu - pdulen = self.header_struct.size + self.addr_type.size + self.asnum_struct.size - pdu = (self.header_struct.pack(self.version, self.pdu_type, pdulen, - announce if announce is not None else self.announce, - self.prefixlen, self.max_prefixlen) + - self.prefix.value + - self.asnum_struct.pack(self.asn)) - if announce is None: - assert self._pdu is None - self._pdu = pdu - return pdu - - def got_pdu(self, reader): - if not reader.ready(): - return None - b1 = reader.get(self.header_struct.size) - b2 = reader.get(self.addr_type.size) - b3 = reader.get(self.asnum_struct.size) - version, pdu_type, length, self.announce, self.prefixlen, self.max_prefixlen = self.header_struct.unpack(b1) - if length != len(b1) + len(b2) + len(b3): - raise CorruptData("Got PDU length %d, expected %d" % (length, len(b1) + len(b2) + len(b3)), pdu = self) - self.prefix = self.addr_type(value = b2) - self.asn = self.asnum_struct.unpack(b3)[0] - assert b1 + b2 + b3 == self.to_pdu() - return self - - @staticmethod - def from_bgpdump(line, rib_dump): - try: - assert isinstance(rib_dump, bool) - fields = line.split("|") - - # Parse prefix, including figuring out IP protocol version - cls = ipv6_prefix if ":" in fields[5] else ipv4_prefix - self = cls() - self.timestamp = timestamp(fields[1]) - p, l = fields[5].split("/") - self.prefix = self.addr_type(p) - self.prefixlen = self.max_prefixlen = int(l) - - # Withdrawals don't have AS paths, so be careful - assert fields[2] == "B" if rib_dump else fields[2] in ("A", "W") - if fields[2] == "W": - self.asn = 0 - self.announce = 0 - else: - self.announce = 1 - if not fields[6] or "{" in fields[6] or "(" in fields[6]: - raise IgnoreThisRecord - a = fields[6].split()[-1] - if "." in a: - a = [int(s) for s in a.split(".")] - if len(a) != 2 or a[0] < 0 or a[0] > 65535 or a[1] < 0 or a[1] > 65535: - log("Bad dotted ASNum %r, ignoring record" % fields[6]) - raise IgnoreThisRecord - a = (a[0] << 16) | a[1] - else: - a = int(a) - self.asn = a - - self.check() - return self - - except IgnoreThisRecord: - raise - - except Exception, e: - log("Ignoring line %r: %s" % (line, e)) - raise IgnoreThisRecord - -class ipv4_prefix(prefix): - """ - IPv4 flavor of a prefix. - """ - pdu_type = 4 - addr_type = v4addr - -class ipv6_prefix(prefix): - """ - IPv6 flavor of a prefix. - """ - pdu_type = 6 - addr_type = v6addr - -class router_key(pdu): - """ - Router Key PDU. - """ - - pdu_type = 9 - - header_struct = struct.Struct("!BBBxL20sL") - - @classmethod - def from_text(cls, asnum, gski, key): - """ - Construct a router key from its text form. - """ - - self = cls() - self.asn = long(asnum) - self.ski = base64.urlsafe_b64decode(gski + "=") - self.key = base64.b64decode(key) - self.announce = 1 - self.check() - return self - - def __str__(self): - return "%s %8s %-32s %s" % ("+" if self.announce else "-", self.asn, - base64.urlsafe_b64encode(self.ski).rstrip("="), - ":".join(("%02X" % ord(b) for b in self.to_pdu()))) - - def consume(self, client): - """ - Handle one incoming Router Key PDU - """ - - blather(self) - client.consume_routerkey(self) - - def check(self): - """ - Check attributes to make sure they're within range. - """ - - if self.announce not in (0, 1): - raise CorruptData("Announce value %d is neither zero nor one" % self.announce, pdu = self) - if len(self.ski) != 20: - raise CorruptData("Implausible SKI length %d" % len(self.ski), pdu = self) - pdulen = self.header_struct.size + len(self.key) - if len(self.to_pdu()) != pdulen: - raise CorruptData("Expected %d byte PDU, got %d" % (pdulen, len(self.to_pdu())), pdu = self) - - def to_pdu(self, announce = None): - if announce is not None: - assert announce in (0, 1) - elif self._pdu is not None: - return self._pdu - pdulen = self.header_struct.size + len(self.key) - pdu = (self.header_struct.pack(self.version, - self.pdu_type, - announce if announce is not None else self.announce, - pdulen, - self.ski, - self.asn) - + self.key) - if announce is None: - assert self._pdu is None - self._pdu = pdu - return pdu - - def got_pdu(self, reader): - if not reader.ready(): - return None - header = reader.get(self.header_struct.size) - version, pdu_type, self.announce, length, self.ski, self.asn = self.header_struct.unpack(header) - remaining = length - self.header_struct.size - if remaining <= 0: - raise CorruptData("Got PDU length %d, minimum is %d" % (length, self.header_struct.size + 1), pdu = self) - self.key = reader.get(remaining) - assert header + self.key == self.to_pdu() - return self - - -class error_report(pdu): - """ - Error Report PDU. - """ - - pdu_type = 10 - - header_struct = struct.Struct("!BBHL") - string_struct = struct.Struct("!L") - - errors = { - 2 : "No Data Available" } - - fatal = { - 0 : "Corrupt Data", - 1 : "Internal Error", - 3 : "Invalid Request", - 4 : "Unsupported Protocol Version", - 5 : "Unsupported PDU Type", - 6 : "Withdrawal of Unknown Record", - 7 : "Duplicate Announcement Received" } - - assert set(errors) & set(fatal) == set() - - errors.update(fatal) - - codes = dict((v, k) for k, v in errors.items()) - - def __init__(self, errno = None, errpdu = None, errmsg = None): - assert errno is None or errno in self.errors - self.errno = errno - self.errpdu = errpdu - self.errmsg = errmsg if errmsg is not None or errno is None else self.errors[errno] - - def __str__(self): - return "[%s, error #%s: %r]" % (self.__class__.__name__, self.errno, self.errmsg) - - def to_counted_string(self, s): - return self.string_struct.pack(len(s)) + s - - def read_counted_string(self, reader, remaining): - assert remaining >= self.string_struct.size - n = self.string_struct.unpack(reader.get(self.string_struct.size))[0] - assert remaining >= self.string_struct.size + n - return n, reader.get(n), (remaining - self.string_struct.size - n) - - def to_pdu(self): - """ - Generate the wire format PDU for this error report. - """ - if self._pdu is None: - assert isinstance(self.errno, int) - assert not isinstance(self.errpdu, error_report) - p = self.errpdu - if p is None: - p = "" - elif isinstance(p, pdu): - p = p.to_pdu() - assert isinstance(p, str) - pdulen = self.header_struct.size + self.string_struct.size * 2 + len(p) + len(self.errmsg) - self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.errno, pdulen) - self._pdu += self.to_counted_string(p) - self._pdu += self.to_counted_string(self.errmsg.encode("utf8")) - return self._pdu - - def got_pdu(self, reader): - if not reader.ready(): - return None - header = reader.get(self.header_struct.size) - version, pdu_type, self.errno, length = self.header_struct.unpack(header) - remaining = length - self.header_struct.size - self.pdulen, self.errpdu, remaining = self.read_counted_string(reader, remaining) - self.errlen, self.errmsg, remaining = self.read_counted_string(reader, remaining) - if length != self.header_struct.size + self.string_struct.size * 2 + self.pdulen + self.errlen: - raise CorruptData("Got PDU length %d, expected %d" % ( - length, self.header_struct.size + self.string_struct.size * 2 + self.pdulen + self.errlen)) - assert (header - + self.to_counted_string(self.errpdu) - + self.to_counted_string(self.errmsg.encode("utf8")) - == self.to_pdu()) - return self - - def serve(self, server): - """ - Received an error_report from client. Not much we can do beyond - logging it, then killing the connection if error was fatal. - """ - log(self) - if self.errno in self.fatal: - log("[Shutting down due to reported fatal protocol error]") - sys.exit(1) - -pdu.pdu_map = dict((p.pdu_type, p) for p in (ipv4_prefix, ipv6_prefix, serial_notify, serial_query, reset_query, - cache_response, end_of_data, cache_reset, router_key, error_report)) - -class pdu_set(list): - """ - Object representing a set of PDUs, that is, one versioned and - (theoretically) consistant set of prefixes and router keys extracted - from rcynic's output. - """ - - @classmethod - def _load_file(cls, filename): - """ - Low-level method to read pdu_set from a file. - """ - self = cls() - f = open(filename, "rb") - r = read_buffer() - while True: - p = pdu.read_pdu(r) - while p is None: - b = f.read(r.needed()) - if b == "": - assert r.available() == 0 - return self - r.put(b) - p = r.retry() - self.append(p) - - @staticmethod - def seq_ge(a, b): - return ((a - b) % (1 << 32)) < (1 << 31) - - -class axfr_set(pdu_set): - """ - Object representing a complete set of PDUs, that is, one versioned - and (theoretically) consistant set of prefixes and router - certificates extracted from rcynic's output, all with the announce - field set. - """ - - @classmethod - def parse_rcynic(cls, rcynic_dir): - """ - Parse ROAS and router certificates fetched (and validated!) by - rcynic to create a new axfr_set. We use the scan_roas and - scan_routercerts utilities to parse the ASN.1, although we may go - back to parsing the files directly using the rpki.POW library code - some day. - """ - - self = cls() - self.serial = timestamp.now() - - try: - p = subprocess.Popen((scan_roas, rcynic_dir), stdout = subprocess.PIPE) - for line in p.stdout: - line = line.split() - asn = line[1] - self.extend(prefix.from_text(asn, addr) for addr in line[2:]) - except OSError, e: - sys.exit("Could not run %s, check your $PATH variable? (%s)" % (scan_roas, e)) - - try: - p = subprocess.Popen((scan_routercerts, rcynic_dir), stdout = subprocess.PIPE) - for line in p.stdout: - line = line.split() - gski = line[0] - key = line[-1] - self.extend(router_key.from_text(asn, gski, key) for asn in line[1:-1]) - except OSError, e: - sys.exit("Could not run %s, check your $PATH variable? (%s)" % (scan_routercerts, e)) - - self.sort() - for i in xrange(len(self) - 2, -1, -1): - if self[i] == self[i + 1]: - del self[i + 1] - return self - - @classmethod - def load(cls, filename): - """ - Load an axfr_set from a file, parse filename to obtain serial. - """ - fn1, fn2 = os.path.basename(filename).split(".") - assert fn1.isdigit() and fn2 == "ax" - self = cls._load_file(filename) - self.serial = timestamp(fn1) - return self - - def filename(self): - """ - Generate filename for this axfr_set. - """ - return "%d.ax" % self.serial - - @classmethod - def load_current(cls): - """ - Load current axfr_set. Return None if can't. - """ - serial = read_current()[0] - if serial is None: - return None - try: - return cls.load("%d.ax" % serial) - except IOError: - return None - - def save_axfr(self): - """ - Write axfr__set to file with magic filename. - """ - f = open(self.filename(), "wb") - for p in self: - f.write(p.to_pdu()) - f.close() - - def destroy_old_data(self): - """ - Destroy old data files, presumably because our nonce changed and - the old serial numbers are no longer valid. - """ - for i in glob.iglob("*.ix.*"): - os.unlink(i) - for i in glob.iglob("*.ax"): - if i != self.filename(): - os.unlink(i) - - def mark_current(self): - """ - Save current serial number and nonce, creating new nonce if - necessary. Creating a new nonce triggers cleanup of old state, as - the new nonce invalidates all old serial numbers. - """ - old_serial, nonce = read_current() - if old_serial is None or self.seq_ge(old_serial, self.serial): - blather("Creating new nonce and deleting stale data") - nonce = new_nonce() - self.destroy_old_data() - write_current(self.serial, nonce) - - def save_ixfr(self, other): - """ - Comparing this axfr_set with an older one and write the resulting - ixfr_set to file with magic filename. Since we store pdu_sets - in sorted order, computing the difference is a trivial linear - comparison. - """ - f = open("%d.ix.%d" % (self.serial, other.serial), "wb") - old = other - new = self - len_old = len(old) - len_new = len(new) - i_old = i_new = 0 - while i_old < len_old and i_new < len_new: - if old[i_old] < new[i_new]: - f.write(old[i_old].to_pdu(announce = 0)) - i_old += 1 - elif old[i_old] > new[i_new]: - f.write(new[i_new].to_pdu(announce = 1)) - i_new += 1 - else: - i_old += 1 - i_new += 1 - for i in xrange(i_old, len_old): - f.write(old[i].to_pdu(announce = 0)) - for i in xrange(i_new, len_new): - f.write(new[i].to_pdu(announce = 1)) - f.close() - - def show(self): - """ - Print this axfr_set. - """ - blather("# AXFR %d (%s)" % (self.serial, self.serial)) - for p in self: - blather(p) - - @staticmethod - def read_bgpdump(filename): - assert filename.endswith(".bz2") - blather("Reading %s" % filename) - bunzip2 = subprocess.Popen(("bzip2", "-c", "-d", filename), stdout = subprocess.PIPE) - bgpdump = subprocess.Popen(("bgpdump", "-m", "-"), stdin = bunzip2.stdout, stdout = subprocess.PIPE) - return bgpdump.stdout - - @classmethod - def parse_bgpdump_rib_dump(cls, filename): - assert os.path.basename(filename).startswith("ribs.") - self = cls() - self.serial = None - for line in cls.read_bgpdump(filename): - try: - pfx = prefix.from_bgpdump(line, rib_dump = True) - except IgnoreThisRecord: - continue - self.append(pfx) - self.serial = pfx.timestamp - if self.serial is None: - sys.exit("Failed to parse anything useful from %s" % filename) - self.sort() - for i in xrange(len(self) - 2, -1, -1): - if self[i] == self[i + 1]: - del self[i + 1] - return self - - def parse_bgpdump_update(self, filename): - assert os.path.basename(filename).startswith("updates.") - for line in self.read_bgpdump(filename): - try: - pfx = prefix.from_bgpdump(line, rib_dump = False) - except IgnoreThisRecord: - continue - announce = pfx.announce - pfx.announce = 1 - i = bisect.bisect_left(self, pfx) - if announce: - if i >= len(self) or pfx != self[i]: - self.insert(i, pfx) - else: - while i < len(self) and pfx.prefix == self[i].prefix and pfx.prefixlen == self[i].prefixlen: - del self[i] - self.serial = pfx.timestamp - -class ixfr_set(pdu_set): - """ - Object representing an incremental set of PDUs, that is, the - differences between one versioned and (theoretically) consistant set - of prefixes and router certificates extracted from rcynic's output - and another, with the announce fields set or cleared as necessary to - indicate the changes. - """ - - @classmethod - def load(cls, filename): - """ - Load an ixfr_set from a file, parse filename to obtain serials. - """ - fn1, fn2, fn3 = os.path.basename(filename).split(".") - assert fn1.isdigit() and fn2 == "ix" and fn3.isdigit() - self = cls._load_file(filename) - self.from_serial = timestamp(fn3) - self.to_serial = timestamp(fn1) - return self - - def filename(self): - """ - Generate filename for this ixfr_set. - """ - return "%d.ix.%d" % (self.to_serial, self.from_serial) - - def show(self): - """ - Print this ixfr_set. - """ - blather("# IXFR %d (%s) -> %d (%s)" % (self.from_serial, self.from_serial, - self.to_serial, self.to_serial)) - for p in self: - blather(p) - -class file_producer(object): - """ - File-based producer object for asynchat. - """ - - def __init__(self, handle, buffersize): - self.handle = handle - self.buffersize = buffersize - - def more(self): - return self.handle.read(self.buffersize) - -class pdu_channel(asynchat.async_chat): - """ - asynchat subclass that understands our PDUs. This just handles - network I/O. Specific engines (client, server) should be subclasses - of this with methods that do something useful with the resulting - PDUs. - """ - - def __init__(self, conn = None): - asynchat.async_chat.__init__(self, conn) - self.reader = read_buffer() - - def start_new_pdu(self): - """ - Start read of a new PDU. - """ - try: - p = pdu.read_pdu(self.reader) - while p is not None: - self.deliver_pdu(p) - p = pdu.read_pdu(self.reader) - except PDUException, e: - self.push_pdu(e.make_error_report()) - self.close_when_done() - else: - assert not self.reader.ready() - self.set_terminator(self.reader.needed()) - - def collect_incoming_data(self, data): - """ - Collect data into the read buffer. - """ - self.reader.put(data) - - def found_terminator(self): - """ - Got requested data, see if we now have a PDU. If so, pass it - along, then restart cycle for a new PDU. - """ - p = self.reader.retry() - if p is None: - self.set_terminator(self.reader.needed()) - else: - self.deliver_pdu(p) - self.start_new_pdu() - - def push_pdu(self, pdu): - """ - Write PDU to stream. - """ - try: - self.push(pdu.to_pdu()) - except OSError, e: - if e.errno != errno.EAGAIN: - raise - - def push_file(self, f): - """ - Write content of a file to stream. - """ - try: - self.push_with_producer(file_producer(f, self.ac_out_buffer_size)) - except OSError, e: - if e.errno != errno.EAGAIN: - raise - - def log(self, msg): - """ - Intercept asyncore's logging. - """ - log(msg) - - def log_info(self, msg, tag = "info"): - """ - Intercept asynchat's logging. - """ - log("asynchat: %s: %s" % (tag, msg)) - - def handle_error(self): - """ - Handle errors caught by asyncore main loop. - """ - c, e = sys.exc_info()[:2] - if backtrace_on_exceptions or e == 0: - for line in traceback.format_exc().splitlines(): - log(line) - else: - log("[Exception: %s: %s]" % (c.__name__, e)) - log("[Exiting after unhandled exception]") - sys.exit(1) - - def init_file_dispatcher(self, fd): - """ - Kludge to plug asyncore.file_dispatcher into asynchat. Call from - subclass's __init__() method, after calling - pdu_channel.__init__(), and don't read this on a full stomach. - """ - self.connected = True - self._fileno = fd - self.socket = asyncore.file_wrapper(fd) - self.add_channel() - flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0) - flags = flags | os.O_NONBLOCK - fcntl.fcntl(fd, fcntl.F_SETFL, flags) - - def handle_close(self): - """ - Exit when channel closed. - """ - asynchat.async_chat.handle_close(self) - sys.exit(0) - -class server_write_channel(pdu_channel): - """ - Kludge to deal with ssh's habit of sometimes (compile time option) - invoking us with two unidirectional pipes instead of one - bidirectional socketpair. All the server logic is in the - server_channel class, this class just deals with sending the - server's output to a different file descriptor. - """ - - def __init__(self): - """ - Set up stdout. - """ - pdu_channel.__init__(self) - self.init_file_dispatcher(sys.stdout.fileno()) - - def readable(self): - """ - This channel is never readable. - """ - return False - -class server_channel(pdu_channel): - """ - Server protocol engine, handles upcalls from pdu_channel to - implement protocol logic. - """ - - def __init__(self): - """ - Set up stdin and stdout as connection and start listening for - first PDU. - """ - pdu_channel.__init__(self) - self.init_file_dispatcher(sys.stdin.fileno()) - self.writer = server_write_channel() - self.get_serial() - self.start_new_pdu() - - def writable(self): - """ - This channel is never writable. - """ - return False - - def push(self, data): - """ - Redirect to writer channel. - """ - return self.writer.push(data) - - def push_with_producer(self, producer): - """ - Redirect to writer channel. - """ - return self.writer.push_with_producer(producer) - - def push_pdu(self, pdu): - """ - Redirect to writer channel. - """ - return self.writer.push_pdu(pdu) - - def push_file(self, f): - """ - Redirect to writer channel. - """ - return self.writer.push_file(f) - - def deliver_pdu(self, pdu): - """ - Handle received PDU. - """ - pdu.serve(self) - - def get_serial(self): - """ - Read, cache, and return current serial number, or None if we can't - find the serial number file. The latter condition should never - happen, but maybe we got started in server mode while the cronjob - mode instance is still building its database. - """ - self.current_serial, self.current_nonce = read_current() - return self.current_serial - - def check_serial(self): - """ - Check for a new serial number. - """ - old_serial = self.current_serial - return old_serial != self.get_serial() - - def notify(self, data = None): - """ - Cronjob instance kicked us, send a notify message. - """ - if self.check_serial() is not None: - self.push_pdu(serial_notify(serial = self.current_serial, nonce = self.current_nonce)) - else: - log("Cronjob kicked me without a valid current serial number") - -class client_channel(pdu_channel): - """ - Client protocol engine, handles upcalls from pdu_channel. - """ - - current_serial = None - current_nonce = None - sql = None - host = None - port = None - cache_id = None - - def __init__(self, sock, proc, killsig, host, port): - self.killsig = killsig - self.proc = proc - self.host = host - self.port = port - pdu_channel.__init__(self, conn = sock) - self.start_new_pdu() - - @classmethod - def ssh(cls, host, port): - """ - Set up ssh connection and start listening for first PDU. - """ - args = ("ssh", "-p", port, "-s", host, "rpki-rtr") - blather("[Running ssh: %s]" % " ".join(args)) - s = socket.socketpair() - return cls(sock = s[1], - proc = subprocess.Popen(args, executable = "/usr/bin/ssh", - stdin = s[0], stdout = s[0], close_fds = True), - killsig = signal.SIGKILL, - host = host, port = port) - - @classmethod - def tcp(cls, host, port): - """ - Set up TCP connection and start listening for first PDU. - """ - blather("[Starting raw TCP connection to %s:%s]" % (host, port)) - try: - addrinfo = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM) - except socket.error, e: - blather("[socket.getaddrinfo() failed: %s]" % e) - else: - for ai in addrinfo: - af, socktype, proto, cn, sa = ai - blather("[Trying addr %s port %s]" % sa[:2]) - try: - s = socket.socket(af, socktype, proto) - except socket.error, e: - blather("[socket.socket() failed: %s]" % e) - continue - try: - s.connect(sa) - except socket.error, e: - blather("[socket.connect() failed: %s]" % e) - s.close() - continue - return cls(sock = s, proc = None, killsig = None, - host = host, port = port) - sys.exit(1) - - @classmethod - def loopback(cls, host, port): - """ - Set up loopback connection and start listening for first PDU. - """ - s = socket.socketpair() - blather("[Using direct subprocess kludge for testing]") - argv = [sys.executable, sys.argv[0], "--server"] - if "--syslog" in sys.argv: - argv.extend(("--syslog", sys.argv[sys.argv.index("--syslog") + 1])) - return cls(sock = s[1], - proc = subprocess.Popen(argv, stdin = s[0], stdout = s[0], close_fds = True), - killsig = signal.SIGINT, - host = host, port = port) - - @classmethod - def tls(cls, host, port): - """ - Set up TLS connection and start listening for first PDU. - - NB: This uses OpenSSL's "s_client" command, which does not - check server certificates properly, so this is not suitable for - production use. Fixing this would be a trivial change, it just - requires using a client program which does check certificates - properly (eg, gnutls-cli, or stunnel's client mode if that works - for such purposes this week). - """ - args = ("openssl", "s_client", "-tls1", "-quiet", "-connect", "%s:%s" % (host, port)) - blather("[Running: %s]" % " ".join(args)) - s = socket.socketpair() - return cls(sock = s[1], - proc = subprocess.Popen(args, stdin = s[0], stdout = s[0], close_fds = True), - killsig = signal.SIGKILL, - host = host, port = port) - - def setup_sql(self, sqlname): - """ - Set up an SQLite database to contain the table we receive. If - necessary, we will create the database. - """ - import sqlite3 - missing = not os.path.exists(sqlname) - self.sql = sqlite3.connect(sqlname, detect_types = sqlite3.PARSE_DECLTYPES) - self.sql.text_factory = str - cur = self.sql.cursor() - cur.execute("PRAGMA foreign_keys = on") - if missing: - cur.execute(''' - CREATE TABLE cache ( - cache_id INTEGER PRIMARY KEY NOT NULL, - host TEXT NOT NULL, - port TEXT NOT NULL, - nonce INTEGER, - serial INTEGER, - updated INTEGER, - UNIQUE (host, port))''') - cur.execute(''' - CREATE TABLE prefix ( - cache_id INTEGER NOT NULL - REFERENCES cache(cache_id) - ON DELETE CASCADE - ON UPDATE CASCADE, - asn INTEGER NOT NULL, - prefix TEXT NOT NULL, - prefixlen INTEGER NOT NULL, - max_prefixlen INTEGER NOT NULL, - UNIQUE (cache_id, asn, prefix, prefixlen, max_prefixlen))''') - - cur.execute(''' - CREATE TABLE routerkey ( - cache_id INTEGER NOT NULL - REFERENCES cache(cache_id) - ON DELETE CASCADE - ON UPDATE CASCADE, - asn INTEGER NOT NULL, - ski TEXT NOT NULL, - key TEXT NOT NULL, - UNIQUE (cache_id, asn, ski), - UNIQUE (cache_id, asn, key))''') - - cur.execute("SELECT cache_id, nonce, serial FROM cache WHERE host = ? AND port = ?", - (self.host, self.port)) - try: - self.cache_id, self.current_nonce, self.current_serial = cur.fetchone() - except TypeError: - cur.execute("INSERT INTO cache (host, port) VALUES (?, ?)", (self.host, self.port)) - self.cache_id = cur.lastrowid - self.sql.commit() - - def cache_reset(self): - """ - Handle cache_reset actions. - """ - self.current_serial = None - if self.sql: - cur = self.sql.cursor() - cur.execute("DELETE FROM prefix WHERE cache_id = ?", (self.cache_id,)) - cur.execute("UPDATE cache SET serial = NULL WHERE cache_id = ?", (self.cache_id,)) - - def end_of_data(self, serial, nonce): - """ - Handle end_of_data actions. - """ - self.current_serial = serial - self.current_nonce = nonce - if self.sql: - self.sql.execute("UPDATE cache SET serial = ?, nonce = ?, updated = datetime('now') WHERE cache_id = ?", - (serial, nonce, self.cache_id)) - self.sql.commit() - - def consume_prefix(self, prefix): - """ - Handle one prefix PDU. - """ - if self.sql: - values = (self.cache_id, prefix.asn, str(prefix.prefix), prefix.prefixlen, prefix.max_prefixlen) - if prefix.announce: - self.sql.execute("INSERT INTO prefix (cache_id, asn, prefix, prefixlen, max_prefixlen) " - "VALUES (?, ?, ?, ?, ?)", - values) - else: - self.sql.execute("DELETE FROM prefix " - "WHERE cache_id = ? AND asn = ? AND prefix = ? AND prefixlen = ? AND max_prefixlen = ?", - values) - - - def consume_routerkey(self, routerkey): - """ - Handle one Router Key PDU. - """ - - if self.sql: - values = (self.cache_id, routerkey.asn, - base64.urlsafe_b64encode(routerkey.ski).rstrip("="), - base64.b64encode(routerkey.key)) - if routerkey.announce: - self.sql.execute("INSERT INTO routerkey (cache_id, asn, ski, key) " - "VALUES (?, ?, ?, ?)", - values) - else: - self.sql.execute("DELETE FROM routerkey " - "WHERE cache_id = ? AND asn = ? AND (ski = ? OR key = ?)", - values) - - - def deliver_pdu(self, pdu): - """ - Handle received PDU. - """ - pdu.consume(self) - - def push_pdu(self, pdu): - """ - Log outbound PDU then write it to stream. - """ - blather(pdu) - pdu_channel.push_pdu(self, pdu) - - def cleanup(self): - """ - Force clean up this client's child process. If everything goes - well, child will have exited already before this method is called, - but we may need to whack it with a stick if something breaks. - """ - if self.proc is not None and self.proc.returncode is None: - try: - os.kill(self.proc.pid, self.killsig) - except OSError: - pass - - def handle_close(self): - """ - Intercept close event so we can log it, then shut down. - """ - blather("Server closed channel") - pdu_channel.handle_close(self) - -class kickme_channel(asyncore.dispatcher): - """ - asyncore dispatcher for the PF_UNIX socket that cronjob mode uses to - kick servers when it's time to send notify PDUs to clients. - """ - - def __init__(self, server): - asyncore.dispatcher.__init__(self) - self.server = server - self.sockname = "%s.%d" % (kickme_base, os.getpid()) - self.create_socket(socket.AF_UNIX, socket.SOCK_DGRAM) - try: - self.bind(self.sockname) - os.chmod(self.sockname, 0660) - except socket.error, e: - log("Couldn't bind() kickme socket: %r" % e) - self.close() - except OSError, e: - log("Couldn't chmod() kickme socket: %r" % e) - - def writable(self): - """ - This socket is read-only, never writable. - """ - return False - - def handle_connect(self): - """ - Ignore connect events (not very useful on datagram socket). - """ - pass - - def handle_read(self): - """ - Handle receipt of a datagram. - """ - data = self.recv(512) - self.server.notify(data) - - def cleanup(self): - """ - Clean up this dispatcher's socket. - """ - self.close() - try: - os.unlink(self.sockname) - except: - pass - - def log(self, msg): - """ - Intercept asyncore's logging. - """ - log(msg) - - def log_info(self, msg, tag = "info"): - """ - Intercept asyncore's logging. - """ - log("asyncore: %s: %s" % (tag, msg)) - - def handle_error(self): - """ - Handle errors caught by asyncore main loop. - """ - c, e = sys.exc_info()[:2] - if backtrace_on_exceptions or e == 0: - for line in traceback.format_exc().splitlines(): - log(line) - else: - log("[Exception: %s: %s]" % (c.__name__, e)) - log("[Exiting after unhandled exception]") - sys.exit(1) - - -def hostport_tag(): - """ - Construct hostname/address + port when we're running under a - protocol we understand well enough to do that. This is all - kludgery. Just grit your teeth, or perhaps just close your eyes. - """ - - proto = None - - if proto is None: - try: - host, port = socket.fromfd(0, socket.AF_INET, socket.SOCK_STREAM).getpeername() - proto = "tcp" - except: - pass - - if proto is None: - try: - host, port = socket.fromfd(0, socket.AF_INET6, socket.SOCK_STREAM).getpeername()[0:2] - proto = "tcp" - except: - pass - - if proto is None: - try: - host, port = os.environ["SSH_CONNECTION"].split()[0:2] - proto = "ssh" - except: - pass - - if proto is None: - try: - host, port = os.environ["REMOTE_HOST"], os.getenv("REMOTE_PORT") - proto = "ssl" - except: - pass - - if proto is None: - return "" - elif not port: - return "/%s/%s" % (proto, host) - elif ":" in host: - return "/%s/%s.%s" % (proto, host, port) - else: - return "/%s/%s:%s" % (proto, host, port) - - -def kick_all(serial): - """ - Kick any existing server processes to wake them up. - """ - - try: - os.stat(kickme_dir) - except OSError: - blather('# Creating directory "%s"' % kickme_dir) - os.makedirs(kickme_dir) - - msg = "Good morning, serial %d is ready" % serial - sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) - for name in glob.iglob("%s.*" % kickme_base): - try: - blather("# Kicking %s" % name) - sock.sendto(msg, name) - except socket.error: - try: - blather("# Failed to kick %s, probably dead socket, attempting cleanup" % name) - os.unlink(name) - except Exception, e: - blather("# Couldn't unlink suspected dead socket %s: %s" % (name, e)) - except Exception, e: - log("# Failed to kick %s and don't understand why: %s" % (name, e)) - sock.close() - -def cronjob_main(argv): - """ - Run this mode right after rcynic to do the real work of groveling - through the ROAs that rcynic collects and translating that data into - the form used in the rpki-router protocol. This mode prepares both - full dumps (AXFR) and incremental dumps against a specific prior - version (IXFR). [Terminology here borrowed from DNS, as is much of - the protocol design.] Finally, this mode kicks any active servers, - so that they can notify their clients that a new version is - available. - - Run this in the directory where you want to write its output files, - which should also be the directory in which you run this program in - --server mode. - - This mode takes one argument on the command line, which specifies - the directory name of rcynic's authenticated output tree (normally - $somewhere/rcynic-data/authenticated/). - """ - - if len(argv) != 1: - sys.exit("Expected one argument, got %r" % (argv,)) - - old_ixfrs = glob.glob("*.ix.*") - - current = read_current()[0] - cutoff = timestamp.now(-(24 * 60 * 60)) - for f in glob.iglob("*.ax"): - t = timestamp(int(f.split(".")[0])) - if t < cutoff and t != current: - blather("# Deleting old file %s, timestamp %s" % (f, t)) - os.unlink(f) - - pdus = axfr_set.parse_rcynic(argv[0]) - if pdus == axfr_set.load_current(): - blather("# No change, new version not needed") - sys.exit() - pdus.save_axfr() - for axfr in glob.iglob("*.ax"): - if axfr != pdus.filename(): - pdus.save_ixfr(axfr_set.load(axfr)) - pdus.mark_current() - - blather("# New serial is %d (%s)" % (pdus.serial, pdus.serial)) - - kick_all(pdus.serial) - - old_ixfrs.sort() - for ixfr in old_ixfrs: - try: - blather("# Deleting old file %s" % ixfr) - os.unlink(ixfr) - except OSError: - pass - -def show_main(argv): - """ - Display dumps created by --cronjob mode in textual form. - Intended only for debugging. - - This mode takes no command line arguments. Run it in the directory - where you ran --cronjob mode. - """ - - if argv: - sys.exit("Unexpected arguments: %r" % (argv,)) - - g = glob.glob("*.ax") - g.sort() - for f in g: - axfr_set.load(f).show() - - g = glob.glob("*.ix.*") - g.sort() - for f in g: - ixfr_set.load(f).show() - -def server_main(argv): - """ - Implement the server side of the rpkk-router protocol. Other than - one PF_UNIX socket inode, this doesn't write anything to disk, so it - can be run with minimal privileges. Most of the hard work has - already been done in --cronjob mode, so all that this mode has to do - is serve up the results. - - In production use this server should run under sshd. The subsystem - mechanism in sshd does not allow us to pass arguments on the command - line, so setting this up might require a wrapper script, but in - production use you will probably want to lock down the public key - used to authenticate the ssh session so that it can only run this - one command, in which case you can just specify the full command - including any arguments in the authorized_keys file. - - Unless you do something special, sshd will have this program running - in whatever it thinks is the home directory associated with the - username given in the ssh prototocol setup, so it may be easiest to - set this up so that the home directory sshd puts this program into - is the one where --cronjob left its files for this mode to pick up. - - This mode must be run in the directory where you ran --cronjob mode. - - This mode takes one optional argument: if provided, the argument is - the name of a directory to which the program should chdir() on - startup; this may simplify setup when running under inetd. - - The server is event driven, so everything interesting happens in the - channel classes. - """ - - blather("[Starting]") - if len(argv) > 1: - sys.exit("Unexpected arguments: %r" % (argv,)) - if argv: - try: - os.chdir(argv[0]) - except OSError, e: - sys.exit(e) - kickme = None - try: - server = server_channel() - kickme = kickme_channel(server = server) - asyncore.loop(timeout = None) - except KeyboardInterrupt: - sys.exit(0) - finally: - if kickme is not None: - kickme.cleanup() - - -def listener_tcp_main(argv): - """ - Simple plain-TCP listener. Listens on a specified TCP port, upon - receiving a connection, forks the process and starts child executing - at server_main(). - - First argument (required) is numeric port number. - - Second argument (optional) is directory, like --server. - - NB: plain-TCP is completely insecure. We only implement this - because it's all that the routers currently support. In theory, we - will all be running TCP-AO in the future, at which point this will - go away. - """ - - # Perhaps we should daemonize? Deal with that later. - - if len(argv) > 2: - sys.exit("Unexpected arguments: %r" % (argv,)) - try: - port = int(argv[0]) if argv[0].isdigit() else socket.getservbyname(argv[0], "tcp") - except: - sys.exit("Couldn't parse port number on which to listen") - if len(argv) > 1: - try: - os.chdir(argv[1]) - except OSError, e: - sys.exit(e) - listener = None - try: - listener = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) - listener.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0) - except: - if listener is not None: - listener.close() - listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - try: - listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) - except AttributeError: - pass - listener.bind(("", port)) - listener.listen(5) - blather("[Listening on port %s]" % port) - while True: - s, ai = listener.accept() - blather("[Received connection from %r]" % (ai,)) - pid = os.fork() - if pid == 0: - os.dup2(s.fileno(), 0) - os.dup2(s.fileno(), 1) - s.close() - #os.closerange(3, os.sysconf("SC_OPEN_MAX")) - global log_tag - log_tag = "rtr-origin/server" + hostport_tag() - syslog.closelog() - syslog.openlog(log_tag, syslog.LOG_PID, syslog_facility) - server_main(()) - sys.exit() - else: - blather("[Spawned server %d]" % pid) - try: - while True: - pid, status = os.waitpid(0, os.WNOHANG) - if pid: - blather("[Server %s exited]" % pid) - else: - break - except: - pass - - -def client_main(argv): - """ - Toy client, intended only for debugging. - - This program takes one or more arguments. The first argument - determines what kind of connection it should open to the server, the - remaining arguments are connection details specific to this - particular type of connection. - - If the first argument is "loopback", the client will run a copy of - the server directly in a subprocess, and communicate with it via a - PF_UNIX socket pair. This sub-mode takes no further arguments. - - If the first argument is "ssh", the client will attempt to run ssh - in as subprocess to connect to the server using the ssh subsystem - mechanism as specified for this protocol. The remaining arguments - should be a hostname (or IP address in a form acceptable to ssh) and - a TCP port number. - - If the first argument is "tcp", the client will attempt to open a - direct (and completely insecure!) TCP connection to the server. - The remaining arguments should be a hostname (or IP address) and - a TCP port number. - - If the first argument is "tls", the client will attempt to open a - TLS connection to the server. The remaining arguments should be a - hostname (or IP address) and a TCP port number. - - An optional final name is the name of a file containing a SQLite - database in which to store the received table. If specified, this - database will be created if missing. - """ - - blather("[Startup]") - client = None - if not argv: - argv = ["loopback"] - proto = argv[0] - if proto == "loopback" and len(argv) in (1, 2): - constructor = client_channel.loopback - host, port = "", "" - sqlname = None if len(argv) == 1 else argv[1] - elif proto in ("ssh", "tcp", "tls") and len(argv) in (3, 4): - constructor = getattr(client_channel, proto) - host, port = argv[1:3] - sqlname = None if len(argv) == 3 else argv[3] - else: - sys.exit("Unexpected arguments: %s" % " ".join(argv)) - - try: - client = constructor(host, port) - if sqlname: - client.setup_sql(sqlname) - while True: - if client.current_serial is None or client.current_nonce is None: - client.push_pdu(reset_query()) - else: - client.push_pdu(serial_query(serial = client.current_serial, nonce = client.current_nonce)) - wakeup = time.time() + 600 - while True: - remaining = wakeup - time.time() - if remaining < 0: - break - asyncore.loop(timeout = remaining, count = 1) - - except KeyboardInterrupt: - sys.exit(0) - finally: - if client is not None: - client.cleanup() - -def bgpdump_convert_main(argv): - """ - Simulate route origin data from a set of BGP dump files. - - * DANGER WILL ROBINSON! * - * DEBUGGING AND TEST USE ONLY! * - - argv is an ordered list of filenames. Each file must be a BGP RIB - dumps, a BGP UPDATE dumps, or an AXFR dump in the format written by - this program's --cronjob command. The first file must be a RIB dump - or AXFR dump, it cannot be an UPDATE dump. Output will be a set of - AXFR and IXFR files with timestamps derived from the BGP dumps, - which can be used as input to this program's --server command for - test purposes. SUCH DATA PROVIDE NO SECURITY AT ALL. - - You have been warned. - """ - - first = True - db = None - axfrs = [] - - for filename in argv: - - if filename.endswith(".ax"): - blather("Reading %s" % filename) - db = axfr_set.load(filename) - - elif os.path.basename(filename).startswith("ribs."): - db = axfr_set.parse_bgpdump_rib_dump(filename) - db.save_axfr() - - elif not first: - assert db is not None - db.parse_bgpdump_update(filename) - db.save_axfr() - - else: - sys.exit("First argument must be a RIB dump or .ax file, don't know what to do with %s" % filename) - - blather("DB serial now %d (%s)" % (db.serial, db.serial)) - if first and read_current() == (None, None): - db.mark_current() - first = False - - for axfr in axfrs: - blather("Loading %s" % axfr) - ax = axfr_set.load(axfr) - blather("Computing changes from %d (%s) to %d (%s)" % (ax.serial, ax.serial, db.serial, db.serial)) - db.save_ixfr(ax) - del ax - - axfrs.append(db.filename()) - - -def bgpdump_select_main(argv): - """ - Simulate route origin data from a set of BGP dump files. - - * DANGER WILL ROBINSON! * - * DEBUGGING AND TEST USE ONLY! * - - Set current serial number to correspond to an .ax file created by - converting BGP dump files. SUCH DATA PROVIDE NO SECURITY AT ALL. - - You have been warned. - """ - - serial = None - try: - head, sep, tail = os.path.basename(argv[0]).partition(".") - if len(argv) == 1 and head.isdigit() and sep == "." and tail == "ax": - serial = timestamp(head) - except: - pass - if serial is None: - sys.exit("Argument must be name of a .ax file") - - nonce = read_current()[1] - if nonce is None: - nonce = new_nonce() - - write_current(serial, nonce) - kick_all(serial) - - -class bgpsec_replay_clock(object): - """ - Internal clock for replaying BGP dump files. - - * DANGER WILL ROBINSON! * - * DEBUGGING AND TEST USE ONLY! * - - This class replaces the normal on-disk serial number mechanism with - an in-memory version based on pre-computed data. - bgpdump_server_main() uses this hack to replay historical data for - testing purposes. DO NOT USE THIS IN PRODUCTION. - - You have been warned. - """ - - def __init__(self): - self.timestamps = [timestamp(int(f.split(".")[0])) for f in glob.iglob("*.ax")] - self.timestamps.sort() - self.offset = self.timestamps[0] - int(time.time()) - self.nonce = new_nonce() - - def __nonzero__(self): - return len(self.timestamps) > 0 - - def now(self): - return timestamp.now(self.offset) - - def read_current(self): - now = self.now() - while len(self.timestamps) > 1 and now >= self.timestamps[1]: - del self.timestamps[0] - return self.timestamps[0], self.nonce - - def siesta(self): - now = self.now() - if len(self.timestamps) <= 1: - return None - elif now < self.timestamps[1]: - return self.timestamps[1] - now - else: - return 1 - - -def bgpdump_server_main(argv): - """ - Simulate route origin data from a set of BGP dump files. - - * DANGER WILL ROBINSON! * - * DEBUGGING AND TEST USE ONLY! * - - This is a clone of server_main() which replaces the external serial - number updates triggered via the kickme channel by cronjob_main with - an internal clocking mechanism to replay historical test data. - - DO NOT USE THIS IN PRODUCTION. - - You have been warned. - """ - - blather("[Starting]") - if len(argv) > 1: - sys.exit("Unexpected arguments: %r" % (argv,)) - if argv: - try: - os.chdir(argv[0]) - except OSError, e: - sys.exit(e) - # - # Yes, this really does replace a global function with a bound - # method to our clock object. Fun stuff, huh? - # - global read_current - clock = bgpsec_replay_clock() - read_current = clock.read_current - # - try: - server = server_channel() - old_serial = server.get_serial() - blather("[Starting at serial %d (%s)]" % (old_serial, old_serial)) - while clock: - new_serial = server.get_serial() - if old_serial != new_serial: - blather("[Serial bumped from %d (%s) to %d (%s)]" % (old_serial, old_serial, new_serial, new_serial)) - server.notify() - old_serial = new_serial - asyncore.loop(timeout = clock.siesta(), count = 1) - except KeyboardInterrupt: - sys.exit(0) - -# Figure out where the scan_roas utility program is today -try: - # Set from autoconf - scan_roas = rpki.autoconf.scan_roas -except NameError: - # Source directory - scan_roas = os.path.normpath(os.path.join(sys.path[0], "..", "utils", - "scan_roas", "scan_roas")) -# If that didn't work, use $PATH and hope for the best -if not os.path.exists(scan_roas): - scan_roas = "scan_roas" - -# Same thing for scan_routercerts -try: - # Set from autoconf - scan_routercerts = rpki.autoconf.scan_routercerts -except NameError: - # Source directory - scan_routercerts = os.path.normpath(os.path.join(sys.path[0], "..", "utils", - "scan_routercerts", "scan_routercerts")) -if not os.path.exists(scan_routercerts): - scan_routercerts = "scan_routercerts" - -force_zero_nonce = False - -kickme_dir = "sockets" -kickme_base = os.path.join(kickme_dir, "kickme") - -main_dispatch = { - "cronjob" : cronjob_main, - "client" : client_main, - "server" : server_main, - "show" : show_main, - "listener_tcp" : listener_tcp_main, - "bgpdump_convert" : bgpdump_convert_main, - "bgpdump_select" : bgpdump_select_main, - "bgpdump_server" : bgpdump_server_main } - -def usage(msg = None): - f = sys.stderr if msg else sys.stdout - f.write("Usage: %s [options] --mode [arguments]\n" % sys.argv[0]) - f.write("\n") - f.write("where options are zero or more of:\n") - f.write("\n") - f.write("--syslog facility.warning_priority[.info_priority]\n") - f.write("\n") - f.write("--zero-nonce\n") - f.write("\n") - f.write("and --mode is one of:\n") - f.write("\n") - for name, func in main_dispatch.iteritems(): - f.write("--%s:\n" % name) - f.write(func.__doc__) - f.write("\n") - sys.exit(msg) - -if __name__ == "__main__": - - os.environ["TZ"] = "UTC" - time.tzset() - - mode = None - - syslog_facility, syslog_warning, syslog_info = syslog.LOG_DAEMON, syslog.LOG_WARNING, syslog.LOG_INFO - - opts, argv = getopt.getopt(sys.argv[1:], "hs:z?", ["help", "syslog=", "zero-nonce"] + main_dispatch.keys()) - for o, a in opts: - if o in ("-h", "--help", "-?"): - usage() - elif o in ("-z", "--zero-nonce"): - force_zero_nonce = True - elif o in ("-s", "--syslog"): - try: - a = [getattr(syslog, "LOG_" + i.upper()) for i in a.split(".")] - if len(a) == 2: - a.append(a[1]) - syslog_facility, syslog_warning, syslog_info = a - if syslog_facility < 8 or syslog_warning >= 8 or syslog_info >= 8: - raise ValueError - except: - usage("Bad value specified for --syslog option") - elif len(o) > 2 and o[2:] in main_dispatch: - if mode is not None: - sys.exit("Conflicting modes specified") - mode = o[2:] - - if mode is None: - usage("No mode specified") - - log_tag = "rtr-origin/" + mode - - if mode in ("server", "bgpdump_server"): - log_tag += hostport_tag() - - if mode in ("cronjob", "server" , "bgpdump_server"): - syslog.openlog(log_tag, syslog.LOG_PID, syslog_facility) - def log(msg): - return syslog.syslog(syslog_warning, str(msg)) - def blather(msg): - return syslog.syslog(syslog_info, str(msg)) - - elif mode == "show": - def log(msg): - try: - os.write(sys.stdout.fileno(), "%s\n" % msg) - except OSError, e: - if e.errno != errno.EPIPE: - raise - blather = log - - else: - def log(msg): - sys.stderr.write("%s %s[%d]: %s\n" % (time.strftime("%F %T"), log_tag, os.getpid(), msg)) - blather = log - - main_dispatch[mode](argv) diff --git a/rp/rtr-origin/rules.darwin.mk b/rp/rtr-origin/rules.darwin.mk deleted file mode 100644 index 1230db92..00000000 --- a/rp/rtr-origin/rules.darwin.mk +++ /dev/null @@ -1,9 +0,0 @@ -# $Id$ - -install-always: install-binary - -install-postconf: install-listener - -install-listener: - @echo "No rule for $@ on this platform (yet), you'll have to do that yourself if it matters." - diff --git a/rp/rtr-origin/rules.freebsd.mk b/rp/rtr-origin/rules.freebsd.mk deleted file mode 100644 index df99da47..00000000 --- a/rp/rtr-origin/rules.freebsd.mk +++ /dev/null @@ -1,37 +0,0 @@ -# $Id$ - -install-always: install-binary - -install-postconf: install-listener - -install-listener: .FORCE - @if /usr/bin/egrep -q '^rpki-rtr' /etc/services ; \ - then \ - echo "You already have a /etc/services entry for rpki-rtr, so I will use it."; \ - elif echo >>/etc/services "rpki-rtr ${RPKI_RTR_PORT}/tcp #RFC 6810" ; \ - then \ - echo "Added rpki-rtr to /etc/services."; \ - else \ - echo "Adding rpki-rtr to /etc/services failed, please fix this, then try again."; \ - exit 1; \ - fi - @if /usr/bin/egrep -q "rpki-rtr[ ]+stream[ ]+tcp[ ]" /etc/inetd.conf; \ - then \ - echo "You already have an inetd.conf entry for rpki-rtr on TCPv4, so I will use it."; \ - elif echo >>/etc/inetd.conf "rpki-rtr stream tcp nowait rpkirtr /usr/local/bin/rtr-origin rtr-origin --server /var/rcynic/rpki-rtr"; \ - then \ - echo "Added rpki-rtr for TCPv4 to /etc/inetd.conf."; \ - else \ - echo "Adding rpki-rtr for TCPv4 to /etc/inetd.conf failed, please fix this, then try again."; \ - exit 1; \ - fi - @if /usr/bin/egrep -q "rpki-rtr[ ]+stream[ ]+tcp6[ ]" /etc/inetd.conf; \ - then \ - echo "You already have an inetd.conf entry for rpki-rtr on TCPv6, so I will use it."; \ - elif echo >>/etc/inetd.conf "rpki-rtr stream tcp6 nowait rpkirtr /usr/local/bin/rtr-origin rtr-origin --server /var/rcynic/rpki-rtr"; \ - then \ - echo "Added rpki-rtr for TCPv6 to /etc/inetd.conf."; \ - else \ - echo "Adding rpki-rtr for TCPv6 to /etc/inetd.conf failed, please fix this, then try again."; \ - exit 1; \ - fi diff --git a/rp/rtr-origin/rules.linux.mk b/rp/rtr-origin/rules.linux.mk deleted file mode 100644 index 23f90f69..00000000 --- a/rp/rtr-origin/rules.linux.mk +++ /dev/null @@ -1,29 +0,0 @@ -# $Id$ - -install-always: install-binary install-listener - -install-postconf: - @true - -# Only need to make listener if not already present - -install-listener: ${DESTDIR}/etc/xinetd.d/rpki-rtr - -${DESTDIR}/etc/xinetd.d/rpki-rtr: - @${AWK} 'BEGIN { \ - print "service rpki-rtr"; \ - print "{"; \ - print " type = UNLISTED"; \ - print " flags = IPv4"; \ - print " socket_type = stream"; \ - print " protocol = tcp"; \ - print " port = ${RPKI_RTR_PORT}"; \ - print " wait = no"; \ - print " user = rpkirtr"; \ - print " server = ${bindir}/${BIN}"; \ - print " server_args = --server /var/rcynic/rpki-rtr"; \ - print "}"; \ - }' >xinetd.rpki-rtr - ${INSTALL} -d ${DESTDIR}/etc/xinetd.d - ${INSTALL} -m 644 xinetd.rpki-rtr $@ - rm xinetd.rpki-rtr diff --git a/rp/rtr-origin/rules.unknown.mk b/rp/rtr-origin/rules.unknown.mk deleted file mode 100644 index fb16e93a..00000000 --- a/rp/rtr-origin/rules.unknown.mk +++ /dev/null @@ -1,8 +0,0 @@ -# $Id$ - -install-always: install-binary - -install-postconf: install-listener - -install-listener: - @echo "Don't know how to make $@ on this platform"; exit 1 diff --git a/rp/rtr-origin/server.sh b/rp/rtr-origin/server.sh deleted file mode 100755 index 7ccf2f38..00000000 --- a/rp/rtr-origin/server.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh - -# -# Wrapper for rtr-origin.py in server mode, for testing. -# -# In production we would probably want to handle all of this either -# directly in the Python code or in the command= setting for a -# particular ssh key, but for initial testing it's simpler to run a -# shall script to change to the right directory and supply any -# necessary command line arguments. -# -# Be warned that almost any error here will cause the subsystem to -# fail mysteriously, leaving behind naught but a SIGCHILD log message -# from sshd as this script dies. - -cd /u/sra/rpki/subvert-rpki.hactrn.net/rtr-origin - -exec /usr/local/bin/python rtr-origin.py --server diff --git a/rp/rtr-origin/sshd.conf b/rp/rtr-origin/sshd.conf deleted file mode 100644 index 0124fc4c..00000000 --- a/rp/rtr-origin/sshd.conf +++ /dev/null @@ -1,23 +0,0 @@ -# $Id$ -# -# sshd config file for testing. Invoke thusly: -# -# /usr/sbin/sshd -f /u/sra/rpki/subvert-rpki.hactrn.net/rtr-origin/sshd.conf -d - -Port 2222 -Protocol 2 -ListenAddress 127.0.0.1 -ListenAddress ::1 -HostKey /u/sra/rpki/subvert-rpki.hactrn.net/rtr-origin/ssh_host_rsa_key -PermitRootLogin no -PubkeyAuthentication yes -AuthorizedKeysFile /u/sra/rpki/subvert-rpki.hactrn.net/rtr-origin/authorized_keys -PasswordAuthentication no -PermitEmptyPasswords no -ChallengeResponseAuthentication no -UsePAM no -AllowTcpForwarding no -X11Forwarding no -UseDNS no -PidFile /u/sra/rpki/subvert-rpki.hactrn.net/rtr-origin/sshd.pid -Subsystem rpki-rtr /u/sra/rpki/subvert-rpki.hactrn.net/rtr-origin/server.sh diff --git a/rpki/relaxng.py b/rpki/relaxng.py index 917ed6ed..3dcf3d4f 100644 --- a/rpki/relaxng.py +++ b/rpki/relaxng.py @@ -2,9 +2,9 @@ import lxml.etree -## @var relaxng/left_right -## Parsed RelaxNG relaxng/left_right schema -relaxng/left_right = lxml.etree.RelaxNG(lxml.etree.fromstring(r''' +## @var left_right +## Parsed RelaxNG left_right schema +left_right = lxml.etree.RelaxNG(lxml.etree.fromstring(r''' ''')) -## @var relaxng/up_down -## Parsed RelaxNG relaxng/up_down schema -relaxng/up_down = lxml.etree.RelaxNG(lxml.etree.fromstring(r''' +## @var myrpki +## Parsed RelaxNG myrpki schema +myrpki = lxml.etree.RelaxNG(lxml.etree.fromstring(r''' - - - + + + 2 + + + 512000 - [\-,0-9]* - + - 512000 - [\-,/.0-9]* + 255 + [\-_A-Za-z0-9]+ - + - 512000 - [\-,/:0-9a-fA-F]* + 255 + [\-_A-Za-z0-9/]+ - - - 1 - 1024 + + + 4096 - - - 27 - 1024 - + + - - - 1 - 1024 + + + 512000 + [\-,0-9]+ - + - 10 - 4096 + 512000 + [\-,0-9/.]+ - - - 4 + + 512000 + [\-,0-9/:a-fA-F]+ - - - - - 1 - - - - - - - - - - - - - - list - - - - - - list_response - - - - - - issue - - - - - - issue_response - - - - - - revoke - - - - - - revoke_response - - - - - - error_response - - - - - - - - - - + + + .*Z + - - - - - - - - - - - - - - - - + + + + + - - + + - - - 1024 - rsync://.+ - + + - - - + + + + + + + + + + + + + + + + + + + - - + + - - + + - - + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - + + + - - - + + + - - - + + + + + + + + - + + + + + + + + + + + + + + + + + + + + + + + - - - - - + + + + + + + + + + - - + + + + + none + + + offer + + + + referral + + + + + - - - - + + + + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - 9999 - + + + + + + + + + + + + + + + + + - - - - - - - 1024 - + + + + + + + + + confirmed + + + + + + + + + + + + + + + - - + + + + + + + + + + + + + + + + + + + + + + + ''')) -## @var relaxng/publication -## Parsed RelaxNG relaxng/publication schema -relaxng/publication = lxml.etree.RelaxNG(lxml.etree.fromstring(r''' +## @var publication +## Parsed RelaxNG publication schema +publication = lxml.etree.RelaxNG(lxml.etree.fromstring(r''' - - - - publish - - - - - - - - - - - - publish - - - - - - - - - - - withdraw - - - - - - - - - - - withdraw - - - - - - - - - - - - publish - - - - - - - - - - - - publish + + + + + - - - - - + + + + + + + + + + + - - + + - withdraw + create - + + + - - + + - withdraw + create - + - - - + + - publish + set - - + + + - - + + - publish + set - + - - + + - withdraw + get - + - - + + - withdraw + get - + + - - - + + - publish + list - - - - + + - publish + list - + + - - + + - withdraw + destroy - + - - + + - withdraw + destroy - + - - - + + + publish @@ -1890,8 +1808,8 @@ relaxng/publication = lxml.etree.RelaxNG(lxml.etree.fromstring(r''' - - + + publish @@ -1901,8 +1819,8 @@ relaxng/publication = lxml.etree.RelaxNG(lxml.etree.fromstring(r''' - - + + withdraw @@ -1912,421 +1830,222 @@ relaxng/publication = lxml.etree.RelaxNG(lxml.etree.fromstring(r''' - - + + withdraw - - - - - - - - - 1024 - - - - - - - - - - - - - 512000 - - - - - - -''')) - -## @var relaxng/myrpki -## Parsed RelaxNG relaxng/myrpki schema -relaxng/myrpki = lxml.etree.RelaxNG(lxml.etree.fromstring(r''' - - - - 2 - - - - 512000 - - - - - 255 - [\-_A-Za-z0-9]+ - - - - - 255 - [\-_A-Za-z0-9/]+ - - - - - 4096 - - - - - - - - 512000 - [\-,0-9]+ - - - - - 512000 - [\-,0-9/.]+ - - - - - 512000 - [\-,0-9/:a-fA-F]+ - - - - - .*Z - - - - - - - - - - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + publish + - - - + + + + + + + + + publish + - - - + + + + + + + + withdraw + - - - + + + + + + + + withdraw + - - - + + - - - - - - + + + + + + publish - - + + + + + + + + + + + publish - - - + + + + - - - - - - + + + + + withdraw - + + + + - - - + + + + withdraw + - - - + - + - - - - - none + + + + + publish - - offer + + + + + + + + + + + publish - - - referral - - - - - + + + + + - - - - - + + + + withdraw + + + + + + + + + + + withdraw - - - + + + + + + + + + publish + - - - + - - - - - + + + + + + + + publish - - - - - - - - - + + - - - - - - - - - - - - + + + + + withdraw - - - - + + + + - - - - - - - - - confirmed - - - - - - - - - - - - + + + + + withdraw - - - - - - - + + + + + + + + 1024 + + + + - + - - - - - - - - - - + + - + + + 512000 + + - + ''')) -## @var relaxng/router_certificate -## Parsed RelaxNG relaxng/router_certificate schema -relaxng/router_certificate = lxml.etree.RelaxNG(lxml.etree.fromstring(r''' +## @var router_certificate +## Parsed RelaxNG router_certificate schema +router_certificate = lxml.etree.RelaxNG(lxml.etree.fromstring(r''' - - - 1 - - - + + + 1 + + + + 512000 + + + + + + + + 512000 + [0-9][\-,0-9]* + + + + + .*Z + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +''')) + +## @var up_down +## Parsed RelaxNG up_down schema +up_down = lxml.etree.RelaxNG(lxml.etree.fromstring(r''' + + + + 512000 + [\-,0-9]* - - + + + 512000 + [\-,/.0-9]* + - + 512000 - [0-9][\-,0-9]* + [\-,/:0-9a-fA-F]* - - - .*Z + + + 1 + 1024 - - - - - - - - - - - - - - + + + 27 + 1024 + - - - + + + 1 + 1024 + + + + + 10 + 4096 + + + + + 4 + 512000 + + + + - + + 1 + + + + + + + - - - - + + + list + + + + + + list_response + + + + + + issue + + + + + + issue_response + + + + + + revoke + + + + + + revoke_response + + + + + + error_response + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1024 + rsync://.+ + + + - - + + + + + + + + + + + + + + + + + + + + + + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 9999 + + + + + + + + + 1024 + + + +