aboutsummaryrefslogtreecommitdiff
path: root/rp
diff options
context:
space:
mode:
Diffstat (limited to 'rp')
-rw-r--r--rp/rcynic/Doxyfile1679
-rw-r--r--rp/rcynic/Makefile.in237
-rw-r--r--rp/rcynic/README13
-rw-r--r--rp/rcynic/bio_f_linebreak.c268
-rw-r--r--rp/rcynic/bio_f_linebreak.h10
-rw-r--r--rp/rcynic/defstack.h134
-rwxr-xr-xrp/rcynic/make-tal.sh42
-rwxr-xr-xrp/rcynic/rc-scripts/darwin/RCynic42
-rw-r--r--rp/rcynic/rc-scripts/darwin/StartupParameters.plist19
-rwxr-xr-xrp/rcynic/rc-scripts/freebsd/rc.d.rcynic44
-rwxr-xr-xrp/rcynic/rcynic-cron106
-rwxr-xr-xrp/rcynic/rcynic-html658
-rwxr-xr-xrp/rcynic/rcynic-svn190
-rwxr-xr-xrp/rcynic/rcynic-text118
-rw-r--r--rp/rcynic/rcynic.c6070
-rw-r--r--rp/rcynic/rcynic.xsl312
-rw-r--r--rp/rcynic/rpki-torrent.py721
-rw-r--r--rp/rcynic/rules.darwin.mk108
-rw-r--r--rp/rcynic/rules.freebsd.mk56
-rw-r--r--rp/rcynic/rules.linux.mk92
-rw-r--r--rp/rcynic/rules.unknown.mk4
-rw-r--r--rp/rcynic/sample-rcynic.conf39
-rw-r--r--rp/rcynic/sample-trust-anchors/README26
-rw-r--r--rp/rcynic/sample-trust-anchors/afrinic.tal8
-rw-r--r--rp/rcynic/sample-trust-anchors/altca.tal9
-rw-r--r--rp/rcynic/sample-trust-anchors/apnic-rpki-root-afrinic-origin.tal8
-rw-r--r--rp/rcynic/sample-trust-anchors/apnic-rpki-root-arin-origin.tal8
-rw-r--r--rp/rcynic/sample-trust-anchors/apnic-rpki-root-iana-origin.tal8
-rw-r--r--rp/rcynic/sample-trust-anchors/apnic-rpki-root-lacnic-origin.tal8
-rw-r--r--rp/rcynic/sample-trust-anchors/apnic-rpki-root-ripe-origin.tal8
-rw-r--r--rp/rcynic/sample-trust-anchors/apnic-testbed.tal9
-rw-r--r--rp/rcynic/sample-trust-anchors/bbn-testbed.tal.disabled8
-rw-r--r--rp/rcynic/sample-trust-anchors/lacnic.tal8
-rw-r--r--rp/rcynic/sample-trust-anchors/ripe-ncc-root.tal9
-rw-r--r--rp/rcynic/sample-trust-anchors/ripe-pilot.tal7
-rw-r--r--rp/rcynic/sample-trust-anchors/rpki.net-testbed.tal9
-rw-r--r--rp/rcynic/sample-trust-anchors/testbed-apnicrpki.tal.disabled8
-rw-r--r--rp/rcynic/static-rsync/Makefile.in44
-rw-r--r--rp/rcynic/static-rsync/README15
-rw-r--r--rp/rcynic/static-rsync/patches/patch-CVE-2007-409160
-rw-r--r--rp/rcynic/static-rsync/rsync-2.6.9.tar.gzbin0 -> 811841 bytes
-rwxr-xr-xrp/rcynic/validation_status34
-rw-r--r--rp/rtr-origin/Makefile.in63
-rw-r--r--rp/rtr-origin/README11
-rwxr-xr-xrp/rtr-origin/rtr-origin2278
-rw-r--r--rp/rtr-origin/rules.darwin.mk9
-rw-r--r--rp/rtr-origin/rules.freebsd.mk37
-rw-r--r--rp/rtr-origin/rules.linux.mk29
-rw-r--r--rp/rtr-origin/rules.unknown.mk8
-rwxr-xr-xrp/rtr-origin/server.sh17
-rw-r--r--rp/rtr-origin/sshd.conf23
-rw-r--r--rp/utils/Makefile.in9
-rw-r--r--rp/utils/README12
-rw-r--r--rp/utils/dot.awk34
-rw-r--r--rp/utils/find_roa.c356
-rw-r--r--rp/utils/find_roa/Makefile.in56
-rw-r--r--rp/utils/hashdir.c217
-rw-r--r--rp/utils/hashdir/Makefile.in55
-rw-r--r--rp/utils/print_roa.c384
-rw-r--r--rp/utils/print_roa/Makefile.in52
-rw-r--r--rp/utils/print_rpki_manifest.c235
-rw-r--r--rp/utils/print_rpki_manifest/Makefile.in52
-rw-r--r--rp/utils/scan_roas.c305
-rw-r--r--rp/utils/scan_roas/Makefile.in52
-rwxr-xr-xrp/utils/scan_routercerts.py69
-rw-r--r--rp/utils/scan_routercerts/Makefile.in41
-rwxr-xr-xrp/utils/strip_roa.sh39
-rw-r--r--rp/utils/table.awk35
-rw-r--r--rp/utils/test_roa.sh35
-rw-r--r--rp/utils/uri.c248
-rw-r--r--rp/utils/uri/Makefile.in31
71 files changed, 16048 insertions, 0 deletions
diff --git a/rp/rcynic/Doxyfile b/rp/rcynic/Doxyfile
new file mode 100644
index 00000000..aa183715
--- /dev/null
+++ b/rp/rcynic/Doxyfile
@@ -0,0 +1,1679 @@
+# Doxyfile 1.7.3
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the
+# iconv built into libc) for the transcoding. See
+# http://www.gnu.org/software/libiconv for the list of possible encodings.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
+# by quotes) that should identify the project.
+
+PROJECT_NAME = rcynic
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+PROJECT_NUMBER = 1.0
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description for a project that appears at the top of each page and should give viewer a quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF =
+
+# With the PROJECT_LOGO tag one can specify an logo or icon that is
+# included in the documentation. The maximum height of the logo should not
+# exceed 55 pixels and the maximum width should not exceed 200 pixels.
+# Doxygen will copy the logo to the output directory.
+
+PROJECT_LOGO =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = doc
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
+# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
+# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
+# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
+# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak,
+# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful if your file system
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like regular Qt-style comments
+# (thus requiring an explicit @brief command for a brief description.)
+
+JAVADOC_AUTOBRIEF = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
+# interpret the first line (until the first dot) of a Qt-style
+# comment as the brief description. If set to NO, the comments
+# will behave just like regular Qt-style comments (thus requiring
+# an explicit \brief command for a brief description.)
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES = YES
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE = 8
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = YES
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
+# sources only. Doxygen will then generate output that is more tailored for
+# Java. For instance, namespaces will be presented as packages, qualified
+# scopes will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources only. Doxygen will then generate output that is more tailored for
+# Fortran.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for
+# VHDL.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given extension.
+# Doxygen has a built-in mapping, but you can override or extend it using this
+# tag. The format is ext=language, where ext is a file extension, and language
+# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C,
+# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make
+# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
+# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions
+# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
+
+EXTENSION_MAPPING =
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also makes the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
+# Doxygen will parse them like normal C++ but will assume all classes use public
+# instead of private inheritance when no explicit protection keyword is present.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate getter
+# and setter methods for a property. Setting this option to YES (the default)
+# will make doxygen replace the get and set methods by a property in the
+# documentation. This will only work if the methods are indeed getting or
+# setting a simple type. If this is not the case, or you want to show the
+# methods anyway, you should set this option to NO.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING = YES
+
+# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
+# is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically
+# be useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
+# determine which symbols to keep in memory and which to flush to disk.
+# When the cache is full, less often used symbols will be written to disk.
+# For small to medium size projects (<1000 input files) the default value is
+# probably good enough. For larger projects a too small cache size can cause
+# doxygen to be busy swapping symbols to and from disk most of the time
+# causing a significant performance penalty.
+# If the system has enough physical memory increasing the cache will improve the
+# performance by keeping more symbols in memory. Note that the value works on
+# a logarithmic scale so increasing the size by one will roughly double the
+# memory usage. The cache size is given by this formula:
+# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
+# corresponding to a cache size of 2^16 = 65536 symbols
+
+SYMBOL_CACHE_SIZE = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = YES
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base
+# name of the file that contains the anonymous namespace. By default
+# anonymous namespaces are hidden.
+
+EXTRACT_ANON_NSPACES = YES
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES = NO
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
+# will list include files with double quotes in the documentation
+# rather than with sharp brackets.
+
+FORCE_LOCAL_INCLUDES = NO
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
+# will sort the (brief and detailed) documentation of class members so that
+# constructors and destructors are listed first. If set to NO (the default)
+# the constructors will appear in the respective orders defined by
+# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
+# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
+# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
+# hierarchy of group names into alphabetical order. If set to NO (the default)
+# the group names will appear in their defined order.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper type resolution of all parameters of a function it will reject a
+# match between the prototype and the implementation of a member function even if there is only one candidate or it is obvious which candidate to choose by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
+# will still accept a match between prototype and implementation in such cases.
+
+STRICT_PROTO_MATCHING = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or macro consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and macros in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+# If the sources in your project are distributed over multiple directories
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
+# in the documentation. The default is NO.
+
+SHOW_DIRECTORIES = NO
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
+# This will remove the Files entry from the Quick Index and from the
+# Folder Tree View (if specified). The default is YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
+# Namespaces page.
+# This will remove the Namespaces entry from the Quick Index
+# and from the Folder Tree View (if specified). The default is YES.
+
+SHOW_NAMESPACES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. The create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option.
+# You can optionally specify a file name after the option, if omitted
+# DoxygenLayout.xml will be used as the name of the layout file.
+
+LAYOUT_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET = YES
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR = YES
+
+# The WARN_NO_PARAMDOC option can be enabled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT = .
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
+# also the default input encoding. Doxygen uses libiconv (or the iconv built
+# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
+# the list of possible encodings.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
+# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
+# *.f90 *.f *.for *.vhd *.vhdl
+
+FILE_PATTERNS = *.[ch]
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = NO
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output.
+# If FILTER_PATTERNS is specified, this tag will be
+# ignored.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis.
+# Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match.
+# The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty or if
+# non of the patterns match the file name, INPUT_FILTER is applied.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
+# and it is also possible to disable source filtering for a specific pattern
+# using *.ext= (so without naming a filter). This option only has effect when
+# FILTER_SOURCE_FILES is enabled.
+
+FILTER_SOURCE_PATTERNS =
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER = YES
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS = NO
+
+# If the REFERENCED_BY_RELATION tag is set to YES
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = YES
+
+# If the REFERENCES_RELATION tag is set to YES
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = YES
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code.
+# Otherwise they will link to the documentation.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = YES
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet. Note that doxygen will try to copy
+# the style sheet file to the HTML output directory, so don't put your own
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
+# Doxygen will adjust the colors in the stylesheet and background images
+# according to this color. Hue is specified as an angle on a colorwheel,
+# see http://en.wikipedia.org/wiki/Hue for more information.
+# For instance the value 0 represents red, 60 is yellow, 120 is green,
+# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
+# The allowed range is 0 to 359.
+
+HTML_COLORSTYLE_HUE = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
+# the colors in the HTML output. For a value of 0 the output will use
+# grayscales only. A value of 255 will produce the most vivid colors.
+
+HTML_COLORSTYLE_SAT = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
+# the luminance component of the colors in the HTML output. Values below
+# 100 gradually make the output lighter, whereas values above 100 make
+# the output darker. The value divided by 100 is the actual gamma applied,
+# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
+# and 100 does not change the gamma.
+
+HTML_COLORSTYLE_GAMMA = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting
+# this to NO can help when comparing the output of multiple runs.
+
+HTML_TIMESTAMP = YES
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
+# files or namespaces will be aligned in HTML using tables. If set to
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded. For this to work a browser that supports
+# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox
+# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari).
+
+HTML_DYNAMIC_SECTIONS = NO
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files
+# will be generated that can be used as input for Apple's Xcode 3
+# integrated development environment, introduced with OSX 10.5 (Leopard).
+# To create a documentation set, doxygen will generate a Makefile in the
+# HTML output directory. Running make will produce the docset in that
+# directory and running "make install" will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
+# it at startup.
+# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+
+GENERATE_DOCSET = NO
+
+# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
+# feed. A documentation feed provides an umbrella under which multiple
+# documentation sets from a single provider (such as a company or product suite)
+# can be grouped.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
+# should uniquely identify the documentation set bundle. This should be a
+# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
+# will append .docset to the name.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+
+DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+
+# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
+
+DOCSET_PUBLISHER_NAME = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
+# is used to encode HtmlHelp index (hhk), content (hhc) and project file
+# content.
+
+CHM_INDEX_ENCODING =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
+# that can be used as input for Qt's qhelpgenerator to generate a
+# Qt Compressed Help (.qch) of the generated HTML documentation.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
+# be used to specify the file name of the resulting .qch file.
+# The path specified is relative to the HTML output folder.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#namespace
+
+QHP_NAMESPACE =
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#virtual-folders
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
+# add. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#custom-filters
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see
+# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
+# Qt Help Project / Custom Filters</a>.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's
+# filter section matches.
+# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
+# Qt Help Project / Filter Attributes</a>.
+
+QHP_SECT_FILTER_ATTRS =
+
+# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
+# be used to specify the location of Qt's qhelpgenerator.
+# If non-empty doxygen will try to run qhelpgenerator on the generated
+# .qhp file.
+
+QHG_LOCATION =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
+# will be generated, which together with the HTML files, form an Eclipse help
+# plugin. To install this plugin and make it available under the help contents
+# menu in Eclipse, the contents of the directory containing the HTML and XML
+# files needs to be copied into the plugins directory of eclipse. The name of
+# the directory within the plugins directory should be the same as
+# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
+# the help appears.
+
+GENERATE_ECLIPSEHELP = NO
+
+# A unique identifier for the eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have
+# this name.
+
+ECLIPSE_DOC_ID = org.doxygen.Project
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX = NO
+
+# This tag can be used to set the number of enum values (range [0,1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+# Note that a value of 0 will completely suppress the enum values from appearing in the overview section.
+
+ENUM_VALUES_PER_LINE = 4
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information.
+# If the tag value is set to YES, a side panel will be generated
+# containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
+# Windows users are probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW = NO
+
+# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories,
+# and Class Hierarchy pages using a tree view instead of an ordered list.
+
+USE_INLINE_TREES = NO
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 250
+
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
+# links to external symbols imported via tag files in a separate window.
+
+EXT_LINKS_IN_WINDOW = NO
+
+# Use this tag to change the font size of Latex formulas included
+# as images in the HTML documentation. The default is 10. Note that
+# when you change the font size after a successful doxygen run you need
+# to manually remove any form_*.png images from the HTML output directory
+# to force them to be regenerated.
+
+FORMULA_FONTSIZE = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are
+# not supported properly for IE 6.0, but are supported on all modern browsers.
+# Note that when changing this option you need to delete any form_*.png files
+# in the HTML output before the changes have effect.
+
+FORMULA_TRANSPARENT = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
+# (see http://www.mathjax.org) which uses client side Javascript for the
+# rendering instead of using prerendered bitmaps. Use this if you do not
+# have LaTeX installed or if you want to formulas look prettier in the HTML
+# output. When enabled you also need to install MathJax separately and
+# configure the path to it using the MATHJAX_RELPATH option.
+
+USE_MATHJAX = NO
+
+# When MathJax is enabled you need to specify the location relative to the
+# HTML output directory using the MATHJAX_RELPATH option. The destination
+# directory should contain the MathJax.js script. For instance, if the mathjax
+# directory is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the mathjax.org site, so you can quickly see the result without installing
+# MathJax, but it is strongly recommended to install a local copy of MathJax
+# before deployment.
+
+MATHJAX_RELPATH = http://www.mathjax.org/mathjax
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box
+# for the HTML output. The underlying search engine uses javascript
+# and DHTML and should work on any modern browser. Note that when using
+# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
+# (GENERATE_DOCSET) there is already a search function so this one should
+# typically be disabled. For large projects the javascript based search engine
+# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
+
+SEARCHENGINE = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a PHP enabled web server instead of at the web client
+# using Javascript. Doxygen will generate the search PHP script and index
+# file to put on the web server. The advantage of the server
+# based approach is that it scales better to large projects and allows
+# full text search. The disadvantages are that it is more difficult to setup
+# and does not have live searching capabilities.
+
+SERVER_BASED_SEARCH = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+# Note that when enabling USE_PDFLATEX this option is only used for
+# generating bitmaps for formulas in the HTML output, but not in the
+# Makefile that is written to the output directory.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = YES
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = letter
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = YES
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = YES
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = YES
+
+# If LATEX_SOURCE_CODE is set to YES then doxygen will include
+# source code with syntax highlighting in the LaTeX output.
+# Note that which sources are shown also depends on other settings
+# such as SOURCE_BROWSER.
+
+LATEX_SOURCE_CODE = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader.
+# This is useful
+# if you want to understand what is going on.
+# On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = YES
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED = DOXYGEN_GETS_HOPELESSLY_CONFUSED_BY_THIS_SECTION
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition that overrules the definition found in the source code.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all references to function-like macros
+# that are alone on a line, have an all uppercase name, and do not end with a
+# semicolon, because these will confuse the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles.
+# Optionally an initial location of the external documentation
+# can be added for each tagfile. The format of a tag file without
+# this location is as follows:
+#
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+#
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths or
+# URLs. If a location is present for each tag, the installdox tool
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option also works with HAVE_DOT disabled, but it is recommended to
+# install and use dot, since it yields more powerful graphs.
+
+CLASS_DIAGRAMS = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see
+# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH =
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = NO
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = YES
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
+# allowed to run in parallel. When set to 0 (the default) doxygen will
+# base this on the number of processors available in the system. You can set it
+# explicitly to a value larger than 0 to get control over the balance
+# between CPU load and processing speed.
+
+DOT_NUM_THREADS = 0
+
+# By default doxygen will write a font called Helvetica to the output
+# directory and reference it in all dot files that doxygen generates.
+# When you want a differently looking font you can specify the font name
+# using DOT_FONTNAME. You need to make sure dot is able to find the font,
+# which can be done by putting it in a standard location or by setting the
+# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory
+# containing the font.
+
+DOT_FONTNAME = FreeSans
+
+# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
+# The default size is 10pt.
+
+DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the output directory to look for the
+# FreeSans.ttf font (which doxygen will put there itself). If you specify a
+# different font using DOT_FONTNAME you can set the path where dot
+# can find it using this tag.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK = YES
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH and HAVE_DOT options are set to YES then
+# doxygen will generate a call dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable call graphs
+# for selected functions only using the \callgraph command.
+
+CALL_GRAPH = YES
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
+# doxygen will generate a caller dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable caller
+# graphs for selected functions only using the \callergraph command.
+
+CALLER_GRAPH = YES
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will generate a graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are png, svg, gif or svg.
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the
+# \mscfile command).
+
+MSCFILE_DIRS =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
+# nodes that will be shown in the graph. If the number of nodes in a graph
+# becomes larger than this value, doxygen will truncate the graph, which is
+# visualized by representing a node as a red box. Note that doxygen if the
+# number of direct children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
+# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not
+# seem to support this out of the box. Warning: Depending on the platform used,
+# enabling this option may lead to badly anti-aliased labels on the edges of
+# a graph (i.e. they become hard to read).
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS = NO
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
diff --git a/rp/rcynic/Makefile.in b/rp/rcynic/Makefile.in
new file mode 100644
index 00000000..06f7d9cd
--- /dev/null
+++ b/rp/rcynic/Makefile.in
@@ -0,0 +1,237 @@
+# $Id$
+
+NAME = rcynic
+
+BIN = ${NAME}
+SRC = ${NAME}.c
+OBJ = ${NAME}.o
+
+GEN = defstack.h
+
+OBJS = ${OBJ} bio_f_linebreak.o
+
+CFLAGS = @CFLAGS@ -Wall -Wshadow -Wmissing-prototypes -Wmissing-declarations -Werror-implicit-function-declaration
+LDFLAGS = @LDFLAGS@ @LD_STATIC_FLAG@
+LIBS = @LIBS@
+
+AWK = @AWK@
+SORT = @SORT@
+PYTHON = @PYTHON@
+RRDTOOL = @RRDTOOL@
+INSTALL = @INSTALL@
+
+abs_top_srcdir = @abs_top_srcdir@
+abs_top_builddir = @abs_top_builddir@
+
+prefix = @prefix@
+exec_prefix = @exec_prefix@
+datarootdir = @datarootdir@
+datadir = @datadir@
+localstatedir = @localstatedir@
+sharedstatedir = @sharedstatedir@
+sysconfdir = @sysconfdir@
+bindir = @bindir@
+sbindir = @sbindir@
+libexecdir = @libexecdir@
+sysconfdir = @sysconfdir@
+
+abs_builddir = @abs_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+abs_top_builddir = @abs_top_builddir@
+srcdir = @srcdir@
+
+RCYNIC_BIN_RCYNIC = @RCYNIC_BIN_RCYNIC@
+RCYNIC_CONF_DATA = @RCYNIC_CONF_DATA@
+RCYNIC_CONF_FILE = @RCYNIC_CONF_FILE@
+RCYNIC_CONF_RSYNC = @RCYNIC_CONF_RSYNC@
+RCYNIC_CONF_TA_DIR = @RCYNIC_CONF_TA_DIR@
+RCYNIC_CRON_USER = @RCYNIC_CRON_USER@
+RCYNIC_DATA_DIR = ${RCYNIC_DIR}/data
+RCYNIC_DIR = @RCYNIC_DIR@
+RCYNIC_DIRS = ${RCYNIC_TA_DIR} ${RCYNIC_JAIL_DIRS} ${RCYNIC_DATA_DIR} ${RPKIRTR_DIR} ${RPKIRTR_DIR}/sockets
+RCYNIC_GECOS = RPKI Validation System
+RCYNIC_GROUP = rcynic
+RCYNIC_HTML_DIR = @RCYNIC_HTML_DIR@
+RCYNIC_INSTALL_TARGETS = @RCYNIC_INSTALL_TARGETS@
+RCYNIC_JAIL_DIRS = @RCYNIC_JAIL_DIRS@
+RCYNIC_STATIC_RSYNC = @RCYNIC_STATIC_RSYNC@
+RCYNIC_TA_DIR = @RCYNIC_TA_DIR@
+RCYNIC_USER = rcynic
+RPKIRTR_DIR = ${RCYNIC_DIR}/rpki-rtr
+RPKIRTR_GECOS = RPKI router server
+RPKIRTR_GROUP = rpkirtr
+RPKIRTR_MODE = 775
+RPKIRTR_USER = rpkirtr
+
+SCRIPTS = rcynic-text rcynic-html rcynic-svn validation_status rcynic-cron
+
+all: ${BIN} ${SCRIPTS} ${RCYNIC_STATIC_RSYNC}
+
+clean:
+ if test -r static-rsync/Makefile; then cd static-rsync; ${MAKE} $@; fi
+ rm -f ${BIN} ${OBJS} ${SCRIPTS}
+
+${OBJ}: ${SRC} ${GEN}
+
+${BIN}: ${OBJS}
+ ${CC} ${CFLAGS} -o $@ ${OBJS} ${LDFLAGS} ${LIBS}
+
+${GEN}: ${SRC}
+ ${PYTHON} ${abs_top_srcdir}/buildtools/defstack.py ${SRC} >$@.tmp
+ mv $@.tmp $@
+
+COMPILE_PYTHON = \
+ AC_PYTHON_INTERPRETER='${PYTHON}' \
+ AC_RRDTOOL_BINARY='${RRDTOOL}' \
+ ${PYTHON} ${abs_top_srcdir}/buildtools/make-rcynic-script.py <$? >$@; \
+ chmod 755 $@
+
+COMPILE_PYTHON_CRON = \
+ AC_PYTHON_INTERPRETER='${PYTHON}' \
+ AC_RCYNIC_USER='${RCYNIC_USER}' \
+ AC_RCYNIC_DIR='${RCYNIC_DIR}' \
+ AC_bindir='${bindir}' \
+ AC_sbindir='${sbindir}' \
+ AC_sysconfdir='${sysconfdir}' \
+ AC_libexecdir='${libexecdir}' \
+ AC_RCYNIC_HTML_DIR='${RCYNIC_HTML_DIR}' \
+ ${PYTHON} ${abs_top_srcdir}/buildtools/make-rcynic-script.py <$? >$@; \
+ chmod 755 $@
+
+rcynic-text: rcynic-text.py
+ ${COMPILE_PYTHON}
+
+rcynic-html: rcynic-html.py
+ ${COMPILE_PYTHON}
+
+rcynic-svn: rcynic-svn.py
+ ${COMPILE_PYTHON}
+
+validation_status: validation_status.py
+ ${COMPILE_PYTHON}
+
+rcynic-cron: rcynic-cron.py
+ ${COMPILE_PYTHON_CRON}
+
+tags: TAGS
+
+TAGS: ${SRC} ${GEN}
+ etags ${SRC} ${GEN}
+
+test: ${BIN}
+ if test -r rcynic.conf; \
+ then \
+ ./${BIN} -j 0 && \
+ test -r rcynic.xml && \
+ echo && \
+ ./rcynic-text rcynic.xml; \
+ else \
+ echo No rcynic.conf, skipping test; \
+ fi
+
+uninstall deinstall:
+ @echo Sorry, automated deinstallation of rcynic is not implemented yet
+
+distclean: clean
+ if test -r static-rsync/Makefile; then cd static-rsync; ${MAKE} $@; fi
+ rm -f Makefile
+
+static-rsync/rsync:
+ @echo "Building static rsync for use in chroot jail"
+ cd static-rsync; ${MAKE} all
+
+install: all ${RCYNIC_INSTALL_TARGETS}
+
+install-always: \
+ install-directories install-scripts install-rcynic install-rcynic-conf
+
+install-postconf: \
+ install-user-and-group install-directory-ownership install-crontab
+
+install-jailed: \
+ install-static-rsync install-shared-libraries install-rc-scripts
+
+install-directories: ${RCYNIC_DIRS}
+
+${RCYNIC_DIRS} ${DESTDIR}${bindir} ${DESTDIR}${sysconfdir}:
+ ${INSTALL} -v -d $@
+
+install-directory-ownership: ${RCYNIC_DATA_DIR} ${RPKIRTR_DIR} ${RPKIRTR_DIR}/sockets
+ chown ${RCYNIC_USER}:${RCYNIC_GROUP} ${RCYNIC_DATA_DIR} ${RPKIRTR_DIR}
+ chown ${RPKIRTR_USER}:${RCYNIC_GROUP} ${RPKIRTR_DIR}/sockets
+ chmod ${RPKIRTR_MODE} ${RPKIRTR_DIR}/sockets
+
+install-rcynic-conf: ${RCYNIC_CONF_FILE}
+
+${RCYNIC_CONF_FILE}:
+ @echo
+ @echo Found no ${RCYNIC_CONF_FILE}, creating basic config and installing default trust anchor locators.
+ @echo You might want to edit this.
+ @echo
+ ${INSTALL} -v -d ${RCYNIC_TA_DIR}
+ ${INSTALL} -v -p -m 444 sample-trust-anchors/*.tal ${RCYNIC_TA_DIR}
+ @echo > $@.tmp '# Basic rcynic configuration file with default trust anchors.'
+ @echo >>$@.tmp '# See documentation for details.'
+ @echo >>$@.tmp ''
+ @echo >>$@.tmp '[rcynic]'
+ @echo >>$@.tmp 'rsync-program = ${RCYNIC_CONF_RSYNC}'
+ @echo >>$@.tmp 'authenticated = ${RCYNIC_CONF_DATA}/authenticated'
+ @echo >>$@.tmp 'unauthenticated = ${RCYNIC_CONF_DATA}/unauthenticated'
+ @echo >>$@.tmp 'xml-summary = ${RCYNIC_CONF_DATA}/rcynic.xml'
+ @echo >>$@.tmp 'jitter = 600'
+ @echo >>$@.tmp 'max-parallel-fetches = 8'
+ @echo >>$@.tmp 'use-syslog = true'
+ @echo >>$@.tmp 'log-level = log_usage_err'
+ @echo >>$@.tmp 'trust-anchor-directory = ${RCYNIC_CONF_TA_DIR}'
+ @chmod 444 $@.tmp
+ @mv -f $@.tmp $@
+
+install-rcynic: ${RCYNIC_BIN_RCYNIC}
+
+${RCYNIC_BIN_RCYNIC}: ${BIN}
+ ${INSTALL} -p -m 555 ${BIN} $@
+
+install-static-rsync: ${RCYNIC_DIR}/bin/rsync
+
+${RCYNIC_DIR}/bin/rsync: static-rsync/rsync
+ ${INSTALL} -p -m 555 static-rsync/rsync $@
+
+install-scripts: \
+ ${DESTDIR}${bindir} \
+ ${DESTDIR}${bindir}/rcynic-text \
+ ${DESTDIR}${bindir}/rcynic-html \
+ ${DESTDIR}${bindir}/rcynic-svn \
+ ${DESTDIR}${bindir}/rcynic-cron \
+ ${DESTDIR}${bindir}/validation_status
+
+${DESTDIR}${bindir}/rcynic-text: rcynic-text
+ ${INSTALL} -p -m 555 rcynic-text $@
+
+${DESTDIR}${bindir}/rcynic-html: rcynic-html
+ ${INSTALL} -p -m 555 rcynic-html $@
+
+${DESTDIR}${bindir}/rcynic-svn: rcynic-svn
+ ${INSTALL} -p -m 555 rcynic-svn $@
+
+${DESTDIR}${bindir}/rcynic-cron: rcynic-cron
+ ${INSTALL} -p -m 555 rcynic-cron $@
+
+${DESTDIR}${bindir}/validation_status: validation_status
+ ${INSTALL} -p -m 555 validation_status $@
+
+.FORCE:
+
+install-crontab: .FORCE
+ @if test "X`/usr/bin/crontab -l -u ${RCYNIC_CRON_USER} 2>/dev/null`" != "X"; \
+ then \
+ echo "${RCYNIC_CRON_USER} already has a crontab, leaving it alone"; \
+ else \
+ echo "Setting up ${RCYNIC_CRON_USER}'s crontab to run rcynic-cron script"; \
+ ${AWK} -v t=`hexdump -n 2 -e '"%u\n"' /dev/random` '\
+ BEGIN {printf "MAILTO=root\n%u * * * *\texec ${bindir}/rcynic-cron\n", t % 60}' | \
+ /usr/bin/crontab -u ${RCYNIC_CRON_USER} -; \
+ fi
+
+# Platform-specific rules below here.
+
+@RCYNIC_MAKE_RULES@
diff --git a/rp/rcynic/README b/rp/rcynic/README
new file mode 100644
index 00000000..ecc92ac5
--- /dev/null
+++ b/rp/rcynic/README
@@ -0,0 +1,13 @@
+$Id$
+
+"Cynical rsync" -- fetch and validate RPKI certificates.
+
+This is the primary RPKI relying party validation tool.
+
+See:
+
+- The primary documentation at http://trac.rpki.net/
+
+- The PDF manual in ../doc/manual.pdf, or
+
+- The flat text page ../doc/doc.RPKI.RP.rcynic
diff --git a/rp/rcynic/bio_f_linebreak.c b/rp/rcynic/bio_f_linebreak.c
new file mode 100644
index 00000000..c2d9fb4d
--- /dev/null
+++ b/rp/rcynic/bio_f_linebreak.c
@@ -0,0 +1,268 @@
+/* $Id$ */
+
+/** @file bio_f_linebreak.c
+ *
+ * This implements a trivial filter BIO (see OpenSSL manual) which
+ * does one rather silly thing: on read, it inserts line break into
+ * the input stream at regular intervals.
+ *
+ * You might reasonably ask why anyone would want such a thing. The
+ * answer is that OpenSSL's Base64 filter BIO has two input modes,
+ * neither of which is really useful for reading generalized Base64
+ * input. In one mode, it requires line breaks at most every 79
+ * characters; in the other mode, it requires that there to be no
+ * whitespace of any kind at all. These modes work for the things
+ * that OpenSSL itself does with Base64 decoding, but fail miserably
+ * when used to read free-form Base64 text.
+ *
+ * The real solution would be to rewrite OpenSSL's Base64 filter to
+ * support a third mode in which it accepts generalized Base64 text,
+ * but that's been suggested before and nothing has been done about
+ * it, probably because OpenSSL's Base64 implementation is completely
+ * line-oriented and rather nasty.
+ *
+ * So this filter is a stop-gap to let us get the job done. Since it
+ * uses a (relatively) well-defined OpenSSL internal API, it should be
+ * reasonably stable.
+ *
+ * 98% of the code in this module is derived from "null filter" BIO
+ * that ships with OpenSSL (BIO_TYPE_NULL_FILTER), so I consider this
+ * to be a derivative work, thus am leaving it under OpenSSL's license.
+ */
+
+/* Original crypto/bio/bf_null.c code was:
+ *
+ * Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <openssl/bio.h>
+
+#include "bio_f_linebreak.h"
+
+#ifndef BIO_TYPE_LINEBREAK_FILTER
+#define BIO_TYPE_LINEBREAK_FILTER (99 | BIO_TYPE_FILTER)
+#endif
+
+#ifndef LINEBREAK_MAX_LINE
+#define LINEBREAK_MAX_LINE 72 /* We break anything longer than this */
+#endif
+
+static int linebreak_new(BIO *b)
+{
+ b->init = 1;
+ b->ptr = NULL;
+ b->flags = 0;
+ b->num = 0;
+ return 1;
+}
+
+static int linebreak_free(BIO *b)
+{
+ return b != NULL;
+}
+
+static int linebreak_read(BIO *b, char *out, int outl)
+{
+ int ret = 0, want, n, i;
+
+ if (out == NULL || b->next_bio == NULL || outl <= 0)
+ return 0;
+
+ while (outl > 0) {
+
+ if (b->num >= LINEBREAK_MAX_LINE) {
+ b->num = 0;
+ *out++ = '\n';
+ outl--;
+ ret++;
+ continue;
+ }
+
+ want = LINEBREAK_MAX_LINE - b->num;
+ if (want > outl)
+ want = outl;
+
+ n = BIO_read(b->next_bio, out, want);
+
+ BIO_clear_retry_flags(b);
+ BIO_copy_next_retry(b);
+
+ if (n > 0) {
+ for (i = n - 1; i >= 0; i--)
+ if (out[i] == '\n')
+ break;
+ if (i >= 0)
+ b->num = n - i - 1;
+ else
+ b->num += n;
+ out += n;
+ outl -= n;
+ ret += n;
+ continue;
+ }
+
+ if (ret == 0)
+ ret = n;
+ break;
+ }
+
+ return ret;
+}
+
+static int linebreak_write(BIO *b, const char *in, int inl)
+{
+ int ret = 0;
+
+ if (in == NULL || inl <= 0 || b->next_bio == NULL)
+ return 0;
+
+ ret = BIO_write(b->next_bio, in, inl);
+
+ BIO_clear_retry_flags(b);
+ BIO_copy_next_retry(b);
+
+ return ret;
+}
+
+static long linebreak_ctrl(BIO *b, int cmd, long num, void *ptr)
+{
+ long ret;
+
+ if (b->next_bio == NULL)
+ return 0;
+
+ switch (cmd) {
+
+ case BIO_C_DO_STATE_MACHINE:
+ BIO_clear_retry_flags(b);
+ ret = BIO_ctrl(b->next_bio, cmd, num, ptr);
+ BIO_copy_next_retry(b);
+ return ret;
+
+ case BIO_CTRL_DUP:
+ return 0;
+
+ default:
+ return BIO_ctrl(b->next_bio, cmd, num, ptr);
+
+ }
+}
+
+static long linebreak_callback_ctrl(BIO *b, int cmd, bio_info_cb *cb)
+{
+ if (b->next_bio == NULL)
+ return 0;
+ else
+ return BIO_callback_ctrl(b->next_bio, cmd, cb);
+}
+
+static int linebreak_puts(BIO *b, const char *str)
+{
+ if (b->next_bio == NULL)
+ return 0;
+ else
+ return BIO_puts(b->next_bio, str);
+}
+
+static BIO_METHOD methods_linebreak = {
+ BIO_TYPE_LINEBREAK_FILTER,
+ "Linebreak filter",
+ linebreak_write,
+ linebreak_read,
+ linebreak_puts,
+ NULL, /* No linebreak_gets() */
+ linebreak_ctrl,
+ linebreak_new,
+ linebreak_free,
+ linebreak_callback_ctrl,
+};
+
+BIO_METHOD *BIO_f_linebreak(void)
+{
+ return &methods_linebreak;
+}
+
+
+#ifdef __BIO_F_LINEBREAK_UNIT_TEST__
+
+int main (int argc, char *argv[])
+{
+ BIO *ich = BIO_new_fd(0, 1);
+ BIO *och = BIO_new_fd(1, 1);
+ BIO *fch = BIO_new(BIO_f_linebreak());
+ char buffer[4098];
+ int n;
+
+ if (ich == NULL || och == NULL || fch == NULL)
+ return 1;
+
+ BIO_push(fch, ich);
+ ich = fch;
+ fch = NULL;
+
+ while ((n = BIO_read(ich, buffer, sizeof(buffer))) > 0)
+ BIO_write(och, buffer, n);
+
+ BIO_free_all(ich);
+ BIO_free_all(och);
+ return 0;
+}
+
+#endif
diff --git a/rp/rcynic/bio_f_linebreak.h b/rp/rcynic/bio_f_linebreak.h
new file mode 100644
index 00000000..b5becfa6
--- /dev/null
+++ b/rp/rcynic/bio_f_linebreak.h
@@ -0,0 +1,10 @@
+/* $Id$ */
+
+#ifndef __BIO_F_LINEBREAK__
+#define __BIO_F_LINEBREAK__
+
+#include <openssl/bio.h>
+
+BIO_METHOD *BIO_f_linebreak(void);
+
+#endif /* __BIO_F_LINEBREAK__ */
diff --git a/rp/rcynic/defstack.h b/rp/rcynic/defstack.h
new file mode 100644
index 00000000..370c8129
--- /dev/null
+++ b/rp/rcynic/defstack.h
@@ -0,0 +1,134 @@
+/*
+ * Automatically generated, do not edit.
+ * Generator $Id: defstack.py 4878 2012-11-15 22:13:53Z sra $
+ */
+
+#ifndef __RCYNIC_C__DEFSTACK_H__
+#define __RCYNIC_C__DEFSTACK_H__
+
+/*
+ * Safestack macros for validation_status_t.
+ */
+#define sk_validation_status_t_new(st) SKM_sk_new(validation_status_t, (st))
+#define sk_validation_status_t_new_null() SKM_sk_new_null(validation_status_t)
+#define sk_validation_status_t_free(st) SKM_sk_free(validation_status_t, (st))
+#define sk_validation_status_t_num(st) SKM_sk_num(validation_status_t, (st))
+#define sk_validation_status_t_value(st, i) SKM_sk_value(validation_status_t, (st), (i))
+#define sk_validation_status_t_set(st, i, val) SKM_sk_set(validation_status_t, (st), (i), (val))
+#define sk_validation_status_t_zero(st) SKM_sk_zero(validation_status_t, (st))
+#define sk_validation_status_t_push(st, val) SKM_sk_push(validation_status_t, (st), (val))
+#define sk_validation_status_t_unshift(st, val) SKM_sk_unshift(validation_status_t, (st), (val))
+#define sk_validation_status_t_find(st, val) SKM_sk_find(validation_status_t, (st), (val))
+#define sk_validation_status_t_find_ex(st, val) SKM_sk_find_ex(validation_status_t, (st), (val))
+#define sk_validation_status_t_delete(st, i) SKM_sk_delete(validation_status_t, (st), (i))
+#define sk_validation_status_t_delete_ptr(st, ptr) SKM_sk_delete_ptr(validation_status_t, (st), (ptr))
+#define sk_validation_status_t_insert(st, val, i) SKM_sk_insert(validation_status_t, (st), (val), (i))
+#define sk_validation_status_t_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(validation_status_t, (st), (cmp))
+#define sk_validation_status_t_dup(st) SKM_sk_dup(validation_status_t, st)
+#define sk_validation_status_t_pop_free(st, free_func) SKM_sk_pop_free(validation_status_t, (st), (free_func))
+#define sk_validation_status_t_shift(st) SKM_sk_shift(validation_status_t, (st))
+#define sk_validation_status_t_pop(st) SKM_sk_pop(validation_status_t, (st))
+#define sk_validation_status_t_sort(st) SKM_sk_sort(validation_status_t, (st))
+#define sk_validation_status_t_is_sorted(st) SKM_sk_is_sorted(validation_status_t, (st))
+
+/*
+ * Safestack macros for walk_ctx_t.
+ */
+#define sk_walk_ctx_t_new(st) SKM_sk_new(walk_ctx_t, (st))
+#define sk_walk_ctx_t_new_null() SKM_sk_new_null(walk_ctx_t)
+#define sk_walk_ctx_t_free(st) SKM_sk_free(walk_ctx_t, (st))
+#define sk_walk_ctx_t_num(st) SKM_sk_num(walk_ctx_t, (st))
+#define sk_walk_ctx_t_value(st, i) SKM_sk_value(walk_ctx_t, (st), (i))
+#define sk_walk_ctx_t_set(st, i, val) SKM_sk_set(walk_ctx_t, (st), (i), (val))
+#define sk_walk_ctx_t_zero(st) SKM_sk_zero(walk_ctx_t, (st))
+#define sk_walk_ctx_t_push(st, val) SKM_sk_push(walk_ctx_t, (st), (val))
+#define sk_walk_ctx_t_unshift(st, val) SKM_sk_unshift(walk_ctx_t, (st), (val))
+#define sk_walk_ctx_t_find(st, val) SKM_sk_find(walk_ctx_t, (st), (val))
+#define sk_walk_ctx_t_find_ex(st, val) SKM_sk_find_ex(walk_ctx_t, (st), (val))
+#define sk_walk_ctx_t_delete(st, i) SKM_sk_delete(walk_ctx_t, (st), (i))
+#define sk_walk_ctx_t_delete_ptr(st, ptr) SKM_sk_delete_ptr(walk_ctx_t, (st), (ptr))
+#define sk_walk_ctx_t_insert(st, val, i) SKM_sk_insert(walk_ctx_t, (st), (val), (i))
+#define sk_walk_ctx_t_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(walk_ctx_t, (st), (cmp))
+#define sk_walk_ctx_t_dup(st) SKM_sk_dup(walk_ctx_t, st)
+#define sk_walk_ctx_t_pop_free(st, free_func) SKM_sk_pop_free(walk_ctx_t, (st), (free_func))
+#define sk_walk_ctx_t_shift(st) SKM_sk_shift(walk_ctx_t, (st))
+#define sk_walk_ctx_t_pop(st) SKM_sk_pop(walk_ctx_t, (st))
+#define sk_walk_ctx_t_sort(st) SKM_sk_sort(walk_ctx_t, (st))
+#define sk_walk_ctx_t_is_sorted(st) SKM_sk_is_sorted(walk_ctx_t, (st))
+
+/*
+ * Safestack macros for rsync_ctx_t.
+ */
+#define sk_rsync_ctx_t_new(st) SKM_sk_new(rsync_ctx_t, (st))
+#define sk_rsync_ctx_t_new_null() SKM_sk_new_null(rsync_ctx_t)
+#define sk_rsync_ctx_t_free(st) SKM_sk_free(rsync_ctx_t, (st))
+#define sk_rsync_ctx_t_num(st) SKM_sk_num(rsync_ctx_t, (st))
+#define sk_rsync_ctx_t_value(st, i) SKM_sk_value(rsync_ctx_t, (st), (i))
+#define sk_rsync_ctx_t_set(st, i, val) SKM_sk_set(rsync_ctx_t, (st), (i), (val))
+#define sk_rsync_ctx_t_zero(st) SKM_sk_zero(rsync_ctx_t, (st))
+#define sk_rsync_ctx_t_push(st, val) SKM_sk_push(rsync_ctx_t, (st), (val))
+#define sk_rsync_ctx_t_unshift(st, val) SKM_sk_unshift(rsync_ctx_t, (st), (val))
+#define sk_rsync_ctx_t_find(st, val) SKM_sk_find(rsync_ctx_t, (st), (val))
+#define sk_rsync_ctx_t_find_ex(st, val) SKM_sk_find_ex(rsync_ctx_t, (st), (val))
+#define sk_rsync_ctx_t_delete(st, i) SKM_sk_delete(rsync_ctx_t, (st), (i))
+#define sk_rsync_ctx_t_delete_ptr(st, ptr) SKM_sk_delete_ptr(rsync_ctx_t, (st), (ptr))
+#define sk_rsync_ctx_t_insert(st, val, i) SKM_sk_insert(rsync_ctx_t, (st), (val), (i))
+#define sk_rsync_ctx_t_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(rsync_ctx_t, (st), (cmp))
+#define sk_rsync_ctx_t_dup(st) SKM_sk_dup(rsync_ctx_t, st)
+#define sk_rsync_ctx_t_pop_free(st, free_func) SKM_sk_pop_free(rsync_ctx_t, (st), (free_func))
+#define sk_rsync_ctx_t_shift(st) SKM_sk_shift(rsync_ctx_t, (st))
+#define sk_rsync_ctx_t_pop(st) SKM_sk_pop(rsync_ctx_t, (st))
+#define sk_rsync_ctx_t_sort(st) SKM_sk_sort(rsync_ctx_t, (st))
+#define sk_rsync_ctx_t_is_sorted(st) SKM_sk_is_sorted(rsync_ctx_t, (st))
+
+/*
+ * Safestack macros for rsync_history_t.
+ */
+#define sk_rsync_history_t_new(st) SKM_sk_new(rsync_history_t, (st))
+#define sk_rsync_history_t_new_null() SKM_sk_new_null(rsync_history_t)
+#define sk_rsync_history_t_free(st) SKM_sk_free(rsync_history_t, (st))
+#define sk_rsync_history_t_num(st) SKM_sk_num(rsync_history_t, (st))
+#define sk_rsync_history_t_value(st, i) SKM_sk_value(rsync_history_t, (st), (i))
+#define sk_rsync_history_t_set(st, i, val) SKM_sk_set(rsync_history_t, (st), (i), (val))
+#define sk_rsync_history_t_zero(st) SKM_sk_zero(rsync_history_t, (st))
+#define sk_rsync_history_t_push(st, val) SKM_sk_push(rsync_history_t, (st), (val))
+#define sk_rsync_history_t_unshift(st, val) SKM_sk_unshift(rsync_history_t, (st), (val))
+#define sk_rsync_history_t_find(st, val) SKM_sk_find(rsync_history_t, (st), (val))
+#define sk_rsync_history_t_find_ex(st, val) SKM_sk_find_ex(rsync_history_t, (st), (val))
+#define sk_rsync_history_t_delete(st, i) SKM_sk_delete(rsync_history_t, (st), (i))
+#define sk_rsync_history_t_delete_ptr(st, ptr) SKM_sk_delete_ptr(rsync_history_t, (st), (ptr))
+#define sk_rsync_history_t_insert(st, val, i) SKM_sk_insert(rsync_history_t, (st), (val), (i))
+#define sk_rsync_history_t_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(rsync_history_t, (st), (cmp))
+#define sk_rsync_history_t_dup(st) SKM_sk_dup(rsync_history_t, st)
+#define sk_rsync_history_t_pop_free(st, free_func) SKM_sk_pop_free(rsync_history_t, (st), (free_func))
+#define sk_rsync_history_t_shift(st) SKM_sk_shift(rsync_history_t, (st))
+#define sk_rsync_history_t_pop(st) SKM_sk_pop(rsync_history_t, (st))
+#define sk_rsync_history_t_sort(st) SKM_sk_sort(rsync_history_t, (st))
+#define sk_rsync_history_t_is_sorted(st) SKM_sk_is_sorted(rsync_history_t, (st))
+
+/*
+ * Safestack macros for task_t.
+ */
+#define sk_task_t_new(st) SKM_sk_new(task_t, (st))
+#define sk_task_t_new_null() SKM_sk_new_null(task_t)
+#define sk_task_t_free(st) SKM_sk_free(task_t, (st))
+#define sk_task_t_num(st) SKM_sk_num(task_t, (st))
+#define sk_task_t_value(st, i) SKM_sk_value(task_t, (st), (i))
+#define sk_task_t_set(st, i, val) SKM_sk_set(task_t, (st), (i), (val))
+#define sk_task_t_zero(st) SKM_sk_zero(task_t, (st))
+#define sk_task_t_push(st, val) SKM_sk_push(task_t, (st), (val))
+#define sk_task_t_unshift(st, val) SKM_sk_unshift(task_t, (st), (val))
+#define sk_task_t_find(st, val) SKM_sk_find(task_t, (st), (val))
+#define sk_task_t_find_ex(st, val) SKM_sk_find_ex(task_t, (st), (val))
+#define sk_task_t_delete(st, i) SKM_sk_delete(task_t, (st), (i))
+#define sk_task_t_delete_ptr(st, ptr) SKM_sk_delete_ptr(task_t, (st), (ptr))
+#define sk_task_t_insert(st, val, i) SKM_sk_insert(task_t, (st), (val), (i))
+#define sk_task_t_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(task_t, (st), (cmp))
+#define sk_task_t_dup(st) SKM_sk_dup(task_t, st)
+#define sk_task_t_pop_free(st, free_func) SKM_sk_pop_free(task_t, (st), (free_func))
+#define sk_task_t_shift(st) SKM_sk_shift(task_t, (st))
+#define sk_task_t_pop(st) SKM_sk_pop(task_t, (st))
+#define sk_task_t_sort(st) SKM_sk_sort(task_t, (st))
+#define sk_task_t_is_sorted(st) SKM_sk_is_sorted(task_t, (st))
+
+#endif /* __RCYNIC_C__DEFSTACK_H__ */
diff --git a/rp/rcynic/make-tal.sh b/rp/rcynic/make-tal.sh
new file mode 100755
index 00000000..854a76b9
--- /dev/null
+++ b/rp/rcynic/make-tal.sh
@@ -0,0 +1,42 @@
+#!/bin/sh -
+# $Id$
+#
+# Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# Generate a trust anchor locator (TAL) given the rsync URI for a
+# self-signed RFC 3779 certificate.
+#
+# Usage: make-tal.sh uri [local_copy_of_certificate]
+#
+# The optional second parameter is the name of a local copy of the
+# certificate to be checked against the copy retrieved from the URI;
+# if present, this should be a local X.509 file in DER format.
+
+case "$1" in rsync://*) :;; *) echo 1>&2 "\"$1\" is not a rsync URI"; exit 1;; esac
+
+tmpfile="make-tal.tmp.$$"
+trap "rm -f $tmpfile" 0 1 2 15
+
+rsync "$1" "$tmpfile" || exit
+
+if test -n "$2"
+then
+ diff -q "$tmpfile" "$2" || exit
+fi
+
+echo "$1"
+echo
+openssl x509 -inform DER -in "$tmpfile" -pubkey -noout |
+awk '!/-----(BEGIN|END)/'
diff --git a/rp/rcynic/rc-scripts/darwin/RCynic b/rp/rcynic/rc-scripts/darwin/RCynic
new file mode 100755
index 00000000..d486a3c3
--- /dev/null
+++ b/rp/rcynic/rc-scripts/darwin/RCynic
@@ -0,0 +1,42 @@
+#!/bin/sh -
+#
+# $Id$
+#
+. /etc/rc.common
+
+name="rcynic"
+start_cmd="rcynic_start"
+stop_cmd="rcynic_stop"
+
+: ${rcynic_dir="/var/rcynic"}
+
+StartService()
+{
+ /sbin/umount "${rcynic_dir}/dev" 2>/dev/null
+
+ if ! /sbin/mount_devfs devfs "${rcynic_dir}/dev"; then
+ echo "Mounting devfs on ${rcynic_dir}/dev failed..."
+ exit 1
+ fi
+
+ for i in /etc/localtime /etc/resolv.conf; do
+ j="${rcynic_dir}${i}"
+ if /bin/test -r "$i" && ! /usr/bin/cmp -s "$i" "$j"; then
+ /usr/bin/install -m 444 -o root -g wheel -p "$i" "$j"
+ fi
+ done
+
+ /bin/ln -f /var/run/mDNSResponder "${rcynic_dir}/var/run/mDNSResponder"
+}
+
+StopService()
+{
+ /sbin/umount "${rcynic_dir}/dev" 2>/dev/null
+}
+
+RestartService()
+{
+ StartService
+}
+
+RunService "$1"
diff --git a/rp/rcynic/rc-scripts/darwin/StartupParameters.plist b/rp/rcynic/rc-scripts/darwin/StartupParameters.plist
new file mode 100644
index 00000000..ca46b676
--- /dev/null
+++ b/rp/rcynic/rc-scripts/darwin/StartupParameters.plist
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>Description</key>
+ <string>RCynic Setup</string>
+ <key>OrderPreference</key>
+ <string>None</string>
+ <key>Provides</key>
+ <array>
+ <string>RCynic</string>
+ </array>
+ <key>Uses</key>
+ <array>
+ <string>Network</string>
+ <string>Resolver</string>
+ </array>
+ </dict>
+</plist>
diff --git a/rp/rcynic/rc-scripts/freebsd/rc.d.rcynic b/rp/rcynic/rc-scripts/freebsd/rc.d.rcynic
new file mode 100755
index 00000000..9b7aa545
--- /dev/null
+++ b/rp/rcynic/rc-scripts/freebsd/rc.d.rcynic
@@ -0,0 +1,44 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# PROVIDE: rcynic
+# REQUIRE: DAEMON
+# KEYWORD: nojail
+
+. /etc/rc.subr
+
+name="rcynic"
+start_cmd="rcynic_start"
+stop_cmd="rcynic_stop"
+
+: ${rcynic_dir="/var/rcynic"}
+
+rcynic_start()
+{
+ /sbin/umount "${rcynic_dir}/dev" 2>/dev/null
+
+ if ! /sbin/mount -t devfs dev "${rcynic_dir}/dev"; then
+ echo "Mounting devfs on ${rcynic_dir}/dev failed..."
+ exit 1
+ fi
+
+ /sbin/devfs -m "${rcynic_dir}/dev" rule apply hide
+ /sbin/devfs -m "${rcynic_dir}/dev" rule apply path null unhide
+ /sbin/devfs -m "${rcynic_dir}/dev" rule apply path random unhide
+
+ for i in /etc/localtime /etc/resolv.conf; do
+ j="${rcynic_dir}${i}"
+ if /bin/test -r "$i" && ! /usr/bin/cmp -s "$i" "$j"; then
+ /usr/bin/install -m 444 -o root -g wheel -p "$i" "$j"
+ fi
+ done
+}
+
+rcynic_stop()
+{
+ /sbin/umount "${rcynic_dir}/dev" 2>/dev/null
+}
+
+load_rc_config $name
+run_rc_command "$1"
diff --git a/rp/rcynic/rcynic-cron b/rp/rcynic/rcynic-cron
new file mode 100755
index 00000000..fbe1ebeb
--- /dev/null
+++ b/rp/rcynic/rcynic-cron
@@ -0,0 +1,106 @@
+# $Id$
+#
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+# Portions copyright (C) 2013 Internet Systems Consortium ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notices and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ISC DISCLAIM ALL
+# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
+# ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA
+# OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Cron job for rcynic and rtr-origin in stock configuration.
+
+Locking code here works like FreeBSD's lockf(1) utility given -k and
+-t 0 options, which is both the sanest and simplest combination for
+our purposes. In theory this is portable to any Unix-like system.
+"""
+
+import os
+import sys
+import pwd
+import fcntl
+import errno
+import argparse
+
+def run(*cmd, **kwargs):
+ chroot_this = kwargs.pop("chroot_this", False)
+ cwd = kwargs.pop("cwd", None)
+ pid = os.fork()
+ if pid == 0:
+ if chroot_this:
+ os.chdir(ac_rcynic_dir)
+ elif cwd is not None:
+ os.chdir(cwd)
+ if we_are_root:
+ os.initgroups(pw.pw_name, pw.pw_gid)
+ if chroot_this:
+ os.chroot(ac_rcynic_dir)
+ if we_are_root:
+ os.setgid(pw.pw_gid)
+ os.setuid(pw.pw_uid)
+ os.closerange(3, os.sysconf("SC_OPEN_MAX"))
+ os.execvp(cmd[0], cmd)
+ os._exit(1)
+ else:
+ status = os.waitpid(pid, 0)[1]
+ if status == 0:
+ return
+ elif os.WIFSIGNALED(status):
+ sys.exit("Process %s exited with signal %s" % (" ".join(cmd), os.WTERMSIG(status)))
+ elif os.WIFEXITED(status):
+ sys.exit("Program %s exited with status %s" % (" ".join(cmd), os.WEXITSTATUS(status)))
+ else:
+ sys.exit("Program %s exited for unknown reason %s" % (" ".join(cmd), status))
+
+parser = argparse.ArgumentParser(description = __doc__)
+parser.add_argument("--chroot", action = "store_true", help = "run chrooted")
+args = parser.parse_args()
+
+we_are_root = os.getuid() == 0
+
+if args.chroot and not we_are_root:
+ sys.exit("Only root can --chroot")
+
+try:
+ pw = pwd.getpwnam(ac_rcynic_user)
+except KeyError:
+ sys.exit("Could not find passwd entry for user %s" % ac_rcynic_user)
+
+try:
+ lock = os.open(os.path.join(ac_rcynic_dir, "data/lock"), os.O_RDONLY | os.O_CREAT | os.O_NONBLOCK, 0666)
+ fcntl.flock(lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ if we_are_root:
+ os.fchown(lock, pw.pw_uid, pw.pw_gid)
+except (IOError, OSError), e:
+ if e.errno == errno.EAGAIN:
+ sys.exit(0) # Another instance of this script is already running, exit silently
+ else:
+ sys.exit("Error %r opening lock %r" % (e.strerror, os.path.join(ac_rcynic_dir, "data/lock")))
+
+if args.chroot:
+ run("/bin/rcynic", "-c", "/etc/rcynic.conf", chroot_this = True)
+else:
+ run(os.path.join(ac_bindir, "rcynic"), "-c", os.path.join(ac_sysconfdir, "rcynic.conf"))
+
+run(os.path.join(ac_bindir, "rtr-origin"),
+ "--cronjob",
+ os.path.join(ac_rcynic_dir, "data/authenticated"),
+ cwd = os.path.join(ac_rcynic_dir, "rpki-rtr"))
+
+prog = os.path.join(ac_libexecdir, "rpkigui-rcynic")
+if os.path.exists(prog):
+ run(prog)
+
+if ac_rcynic_html_dir and os.path.exists(os.path.dirname(ac_rcynic_html_dir)):
+ run(os.path.join(ac_bindir, "rcynic-html"),
+ os.path.join(ac_rcynic_dir, "data/rcynic.xml"),
+ ac_rcynic_html_dir)
diff --git a/rp/rcynic/rcynic-html b/rp/rcynic/rcynic-html
new file mode 100755
index 00000000..58e65dde
--- /dev/null
+++ b/rp/rcynic/rcynic-html
@@ -0,0 +1,658 @@
+# $Id$
+#
+# Copyright (C) 2013--2014 Dragon Research Labs ("DRL")
+# Portions copyright (C) 2009--2012 Internet Systems Consortium ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notices and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ISC DISCLAIM ALL
+# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
+# ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA
+# OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Render rcynic's XML output to basic (X)HTML with some rrdtool graphics.
+"""
+
+import sys
+import urlparse
+import os
+import argparse
+import time
+import subprocess
+import copy
+
+try:
+ from lxml.etree import (ElementTree, Element, SubElement, Comment)
+except ImportError:
+ from xml.etree.ElementTree import (ElementTree, Element, SubElement, Comment)
+
+session = None
+args = None
+
+def parse_options():
+
+ global args
+
+ try:
+ default_rrdtool_binary = ac_rrdtool_binary
+ except NameError:
+ default_rrdtool_binary = "rrdtool"
+
+ parser = argparse.ArgumentParser(description = __doc__)
+ parser.add_argument("--refresh", type = int, default = 1800,
+ help = "refresh interval for generated HTML")
+ parser.add_argument("--hide-problems", action = "store_true",
+ help = "don't generate \"problems\" page")
+ parser.add_argument("--hide-graphs", action = "store_true",
+ help = "don't generate graphs")
+ parser.add_argument("--hide-object-counts", action = "store_true",
+ help = "don't display object counts")
+ parser.add_argument("--dont-update-rrds", action = "store_true",
+ help = "don't add new data to RRD databases")
+ parser.add_argument("--png-height", type = int, default = 190,
+ help = "height of PNG images")
+ parser.add_argument("--png-width", type = int, default = 1350,
+ help = "width of PNG images")
+ parser.add_argument("--svg-height", type = int, default = 600,
+ help = "height of SVG images")
+ parser.add_argument("--svg-width", type = int, default = 1200,
+ help = "width of SVG images")
+ parser.add_argument("--eps-height", type = int, default = 0,
+ help = "height of EPS images")
+ parser.add_argument("--eps-width", type = int, default = 0,
+ help = "width of EPS images")
+ parser.add_argument("--rrdtool-binary", default = default_rrdtool_binary,
+ help = "location of rrdtool binary")
+ parser.add_argument("input_file", type = argparse.FileType("r"),
+ help = "XML input file")
+ parser.add_argument("output_directory",
+ help = "output directory")
+ args = parser.parse_args()
+
+
+def parse_utc(s):
+ return int(time.mktime(time.strptime(s, "%Y-%m-%dT%H:%M:%SZ")))
+
+class Label(object):
+
+ moods = ["bad", "warn", "good"]
+
+ def __init__(self, elt):
+ self.code = elt.tag
+ self.mood = elt.get("kind")
+ self.text = elt.text.strip()
+ self.count = 0
+
+ def get_count(self):
+ return self.count
+
+ @property
+ def sort_key(self):
+ try:
+ return self.moods.index(self.mood)
+ except ValueError:
+ return len(self.moods)
+
+class Validation_Status(object):
+
+ def __init__(self, elt, label_map):
+ self.uri = elt.text.strip()
+ self.timestamp = elt.get("timestamp")
+ self.generation = elt.get("generation")
+ self.hostname = urlparse.urlparse(self.uri).hostname or "[None]"
+ self.fn2 = os.path.splitext(self.uri)[1] or None if self.generation else None
+ self.label = label_map[elt.get("status")]
+
+ def sort_key(self):
+ return (self.label.sort_key, self.timestamp, self.hostname, self.fn2, self.generation)
+
+ @property
+ def code(self):
+ return self.label.code
+
+ @property
+ def mood(self):
+ return self.label.mood
+
+ @property
+ def accepted(self):
+ return self.label.code == "object_accepted"
+
+ @property
+ def rejected(self):
+ return self.label.code == "object_rejected"
+
+ @property
+ def is_current(self):
+ return self.generation == "current"
+
+ @property
+ def is_backup(self):
+ return self.generation == "backup"
+
+ @property
+ def is_problem(self):
+ return self.label.mood != "good"
+
+ @property
+ def is_connection_problem(self):
+ return self.label.mood != "good" and self.label.code.startswith("rsync_transfer_")
+
+ @property
+ def is_object_problem(self):
+ return self.label.mood != "good" and not self.label.code.startswith("rsync_transfer_")
+
+ @property
+ def is_connection_detail(self):
+ return self.label.code.startswith("rsync_transfer_")
+
+ @property
+ def is_object_detail(self):
+ return not self.label.code.startswith("rsync_transfer_")
+
+class Problem_Mixin(object):
+
+ @property
+ def connection_problems(self):
+ result = [v for v in self.validation_status if v.is_connection_problem]
+ result.sort(key = Validation_Status.sort_key)
+ return result
+
+ @property
+ def object_problems(self):
+ result = [v for v in self.validation_status if v.is_object_problem]
+ result.sort(key = Validation_Status.sort_key)
+ return result
+
+class Host(Problem_Mixin):
+
+ def __init__(self, hostname, timestamp):
+ self.hostname = hostname
+ self.timestamp = timestamp
+ self.elapsed = 0
+ self.connections = 0
+ self.failures = 0
+ self.uris = set()
+ self.graph = None
+ self.counters = {}
+ self.totals = {}
+ self.validation_status = []
+
+ def add_connection(self, elt):
+ self.elapsed += parse_utc(elt.get("finished")) - parse_utc(elt.get("started"))
+ self.connections += 1
+ if elt.get("error") is not None:
+ self.failures += 1
+
+ def add_validation_status(self, v):
+ self.validation_status.append(v)
+ if v.generation == "current":
+ self.uris.add(v.uri)
+ self.counters[(v.fn2, v.generation, v.label)] = self.get_counter(v.fn2, v.generation, v.label) + 1
+ self.totals[v.label] = self.get_total(v.label) + 1
+ v.label.count += 1
+
+ def get_counter(self, fn2, generation, label):
+ return self.counters.get((fn2, generation, label), 0)
+
+ def get_total(self, label):
+ return self.totals.get(label, 0)
+
+ @property
+ def failed(self):
+ return 1 if self.failures > 0 else 0
+
+ @property
+ def objects(self):
+ return len(self.uris)
+
+ field_table = (("connections", "GAUGE"),
+ ("objects", "GAUGE"),
+ ("elapsed", "GAUGE"),
+ ("failed", "ABSOLUTE"))
+
+ rras = tuple("RRA:AVERAGE:0.5:%s:9600" % steps
+ for steps in (1, 4, 24))
+
+ @classmethod
+ def field_ds_specifiers(cls, heartbeat = 24 * 60 * 60, minimum = 0, maximum = "U"):
+ return ["DS:%s:%s:%s:%s:%s" % (field[0], field[1], heartbeat, minimum, maximum)
+ for field in cls.field_table]
+
+ @property
+ def field_values(self):
+ return tuple(str(getattr(self, field[0])) for field in self.field_table)
+
+ @classmethod
+ def field_defs(cls, filebase):
+ return ["DEF:%s=%s.rrd:%s:AVERAGE" % (field[0], filebase, field[0])
+ for field in cls.field_table]
+
+ graph_opts = (
+ "--vertical-label", "Sync time (seconds)",
+ "--right-axis-label", "Objects (count)",
+ "--lower-limit", "0",
+ "--right-axis", "1:0",
+ "--full-size-mode" )
+
+ graph_cmds = (
+
+ # Split elapsed into separate data sets, so we can color
+ # differently to indicate how succesful transfer was. Intent is
+ # that exactly one of these be defined for every value in elapsed.
+
+ "CDEF:success=failed,UNKN,elapsed,IF",
+ "CDEF:failure=connections,1,EQ,failed,*,elapsed,UNKN,IF",
+ "CDEF:partial=connections,1,NE,failed,*,elapsed,UNKN,IF",
+
+ # Show connection timing first, as color-coded semi-transparent
+ # areas with opaque borders. Intent is to make the colors stand
+ # out, since they're a major health indicator. Transparency is
+ # handled via an alpha channel (fourth octet of color code). We
+ # draw this stuff first so that later lines can overwrite it.
+
+ "AREA:success#00FF0080:Sync time (success)",
+ "AREA:partial#FFA50080:Sync time (partial failure)",
+ "AREA:failure#FF000080:Sync time (total failure)",
+
+ "LINE1:success#00FF00", # Green
+ "LINE1:partial#FFA500", # Orange
+ "LINE1:failure#FF0000", # Red
+
+ # Now show object counts, as a simple black line.
+
+ "LINE1:objects#000000:Objects", # Black
+
+ # Add averages over period to chart legend.
+
+ "VDEF:avg_elapsed=elapsed,AVERAGE",
+ "VDEF:avg_connections=connections,AVERAGE",
+ "VDEF:avg_objects=objects,AVERAGE",
+ "COMMENT:\j",
+ "GPRINT:avg_elapsed:Average sync time (seconds)\: %5.2lf",
+ "GPRINT:avg_connections:Average connection count\: %5.2lf",
+ "GPRINT:avg_objects:Average object count\: %5.2lf" )
+
+ graph_periods = (("week", "-1w"),
+ ("month", "-31d"),
+ ("year", "-1y"))
+
+ def rrd_run(self, cmd):
+ try:
+ cmd = [str(i) for i in cmd]
+ cmd.insert(0, args.rrdtool_binary)
+ subprocess.check_call(cmd, stdout = open("/dev/null", "w"))
+ except OSError, e:
+ sys.exit("Problem running %s, perhaps you need to set --rrdtool-binary? (%s)" % (args.rrdtool_binary, e))
+ except subprocess.CalledProcessError, e:
+ sys.exit("Failure running %s: %s" % (args.rrdtool_binary, e))
+
+ def rrd_update(self):
+ filename = os.path.join(args.output_directory, self.hostname) + ".rrd"
+ if not os.path.exists(filename):
+ cmd = ["create", filename, "--start", self.timestamp - 1, "--step", "3600"]
+ cmd.extend(self.field_ds_specifiers())
+ cmd.extend(self.rras)
+ self.rrd_run(cmd)
+ self.rrd_run(["update", filename,
+ "%s:%s" % (self.timestamp, ":".join(str(v) for v in self.field_values))])
+
+ def rrd_graph(self, html):
+ filebase = os.path.join(args.output_directory, self.hostname)
+ formats = [format for format in ("png", "svg", "eps")
+ if getattr(args, format + "_width") and getattr(args, format + "_height")]
+ for period, start in self.graph_periods:
+ for format in formats:
+ cmds = [ "graph", "%s_%s.%s" % (filebase, period, format),
+ "--title", "%s last %s" % (self.hostname, period),
+ "--start", start,
+ "--width", getattr(args, format + "_width"),
+ "--height", getattr(args, format + "_height"),
+ "--imgformat", format.upper() ]
+ cmds.extend(self.graph_opts)
+ cmds.extend(self.field_defs(filebase))
+ cmds.extend(self.graph_cmds)
+ self.rrd_run(cmds)
+ img = Element("img", src = "%s_%s.png" % (self.hostname, period),
+ width = str(args.png_width),
+ height = str(args.png_height))
+ if self.graph is None:
+ self.graph = copy.copy(img)
+ html.BodyElement("h2").text = "%s over last %s" % (self.hostname, period)
+ html.BodyElement("a", href = "%s_%s_svg.html" % (self.hostname, period)).append(img)
+ html.BodyElement("br")
+ svg_html = HTML("%s over last %s" % (self.hostname, period),
+ "%s_%s_svg" % (self.hostname, period))
+ svg_html.BodyElement("img", src = "%s_%s.svg" % (self.hostname, period))
+ svg_html.close()
+
+
+class Session(Problem_Mixin):
+
+ def __init__(self):
+ self.hosts = {}
+
+ self.root = ElementTree(file = args.input_file).getroot()
+
+ self.rcynic_version = self.root.get("rcynic-version")
+ self.rcynic_date = self.root.get("date")
+ self.timestamp = parse_utc(self.rcynic_date)
+
+ self.labels = [Label(elt) for elt in self.root.find("labels")]
+ self.load_validation_status()
+
+ for elt in self.root.findall("rsync_history"):
+ self.get_host(urlparse.urlparse(elt.text.strip()).hostname).add_connection(elt)
+
+ generations = set()
+ fn2s = set()
+
+ for v in self.validation_status:
+ self.get_host(v.hostname).add_validation_status(v)
+ generations.add(v.generation)
+ fn2s.add(v.fn2)
+
+ self.labels = [l for l in self.labels if l.count > 0]
+
+ self.hostnames = sorted(self.hosts)
+ self.generations = sorted(generations)
+ self.fn2s = sorted(fn2s)
+
+ def load_validation_status(self):
+ label_map = dict((label.code, label) for label in self.labels)
+ full_validation_status = [Validation_Status(elt, label_map)
+ for elt in self.root.findall("validation_status")]
+ accepted_current = set(v.uri for v in full_validation_status
+ if v.is_current and v.accepted)
+ self.validation_status = [v for v in full_validation_status
+ if not v.is_backup
+ or v.uri not in accepted_current]
+
+ def get_host(self, hostname):
+ if hostname not in self.hosts:
+ self.hosts[hostname] = Host(hostname, self.timestamp)
+ return self.hosts[hostname]
+
+ def get_sum(self, fn2, generation, label):
+ return sum(h.get_counter(fn2, generation, label)
+ for h in self.hosts.itervalues())
+
+ def rrd_update(self):
+ if not args.dont_update_rrds:
+ for h in self.hosts.itervalues():
+ h.rrd_update()
+
+css = '''
+ th, td {
+ text-align: center; padding: 4px;
+ }
+
+ td.uri {
+ text-align: left;
+ }
+
+ thead tr th, tfoot tr td {
+ font-weight: bold;
+ }
+
+ .good {
+ background-color: #77ff77;
+ }
+
+ .warn {
+ background-color: yellow;
+ }
+
+ .bad {
+ background-color: #ff5500;
+ }
+
+ body {
+ font-family: arial, helvetica, serif;
+ }
+
+ /* Make background-color inherit like color does. */
+ #nav {
+ background-color: inherit;
+ }
+
+ #nav, #nav ul {
+ float: left;
+ width: 100%;
+ list-style: none;
+ line-height: 1;
+ font-weight: normal;
+ padding: 0;
+ border-color: black;
+ border-style: solid;
+ border-width: 1px 0;
+ margin: 0 0 1em 0;
+ }
+
+ #nav a, #nav span {
+ display: block;
+ background-color: white;
+ color: black;
+ text-decoration: none;
+ padding: 0.25em 0.75em;
+ }
+
+ #nav li {
+ float: left;
+ padding: 0;
+ }
+
+ /* Use <ul style="width: ..."> to set submenu width. */
+ #nav li ul {
+ position: absolute;
+ display: none;
+ height: auto;
+ border-width: 1px;
+ margin: 0;
+ }
+
+ #nav li li {
+ width: 100%;
+ }
+
+ /* Display submenu when hovering. */
+ #nav li:hover ul {
+ display: block;
+ }
+
+ /* Reverse video when hovering. */
+ #nav a:hover, #nav span:hover {
+ color: white;
+ background-color: black;
+ }
+'''
+
+class HTML(object):
+
+ def __init__(self, title, filebase):
+
+ self.filename = os.path.join(args.output_directory, filebase + ".html")
+
+ self.html = Element("html")
+ self.html.append(Comment(" Generators:\n" +
+ " " + session.rcynic_version + "\n" +
+ " $Id$\n"))
+ self.head = SubElement(self.html, "head")
+ self.body = SubElement(self.html, "body")
+
+ title += " " + session.rcynic_date
+ SubElement(self.head, "title").text = title
+ SubElement(self.body, "h1").text = title
+ SubElement(self.head, "style", type = "text/css").text = css
+
+ if args.refresh:
+ SubElement(self.head, "meta", { "http-equiv" : "Refresh", "content" : str(args.refresh) })
+
+ hostwidth = max(len(hostname) for hostname in session.hostnames)
+
+ toc = SubElement(self.body, "ul", id = "nav")
+ SubElement(SubElement(toc, "li"), "a", href = "index.html").text = "Overview"
+ li = SubElement(toc, "li")
+ SubElement(li, "span").text = "Repositories"
+ ul = SubElement(li, "ul", style = "width: %sem" % hostwidth)
+ for hostname in session.hostnames:
+ SubElement(SubElement(ul, "li"), "a", href = "%s.html" % hostname).text = hostname
+ SubElement(SubElement(toc, "li"), "a", href = "problems.html").text = "Problems"
+ li = SubElement(toc, "li")
+ SubElement(li, "span").text = "All Details"
+ ul = SubElement(li, "ul", style = "width: 15em")
+ SubElement(SubElement(ul, "li"), "a", href = "connections.html").text = "All Connections"
+ SubElement(SubElement(ul, "li"), "a", href = "objects.html").text = "All Objects"
+ SubElement(self.body, "br")
+
+ def close(self):
+ ElementTree(element = self.html).write(self.filename)
+
+ def BodyElement(self, tag, **attrib):
+ return SubElement(self.body, tag, **attrib)
+
+ def counter_table(self, data_func, total_func):
+ table = self.BodyElement("table", rules = "all", border = "1")
+ thead = SubElement(table, "thead")
+ tfoot = SubElement(table, "tfoot")
+ tbody = SubElement(table, "tbody")
+ tr = SubElement(thead, "tr")
+ SubElement(tr, "th")
+ for label in session.labels:
+ SubElement(tr, "th").text = label.text
+ for fn2 in session.fn2s:
+ for generation in session.generations:
+ counters = [data_func(fn2, generation, label) for label in session.labels]
+ if sum(counters) > 0:
+ tr = SubElement(tbody, "tr")
+ SubElement(tr, "td").text = ((generation or "") + " " + (fn2 or "")).strip()
+ for label, count in zip(session.labels, counters):
+ td = SubElement(tr, "td")
+ if count > 0:
+ td.set("class", label.mood)
+ td.text = str(count)
+ tr = SubElement(tfoot, "tr")
+ SubElement(tr, "td").text = "Total"
+ counters = [total_func(label) for label in session.labels]
+ for label, count in zip(session.labels, counters):
+ td = SubElement(tr, "td")
+ if count > 0:
+ td.set("class", label.mood)
+ td.text = str(count)
+ return table
+
+ def object_count_table(self, session):
+ table = self.BodyElement("table", rules = "all", border = "1")
+ thead = SubElement(table, "thead")
+ tbody = SubElement(table, "tbody")
+ tfoot = SubElement(table, "tfoot")
+ fn2s = [fn2 for fn2 in session.fn2s if fn2 is not None]
+ total = dict((fn2, 0) for fn2 in fn2s)
+ for hostname in session.hostnames:
+ tr = SubElement(tbody, "tr")
+ SubElement(tr, "td").text = hostname
+ for fn2 in fn2s:
+ td = SubElement(tr, "td")
+ count = sum(uri.endswith(fn2) for uri in session.hosts[hostname].uris)
+ total[fn2] += count
+ if count > 0:
+ td.text = str(count)
+ trhead = SubElement(thead, "tr")
+ trfoot = SubElement(tfoot, "tr")
+ SubElement(trhead, "th").text = "Repository"
+ SubElement(trfoot, "td").text = "Total"
+ for fn2 in fn2s:
+ SubElement(trhead, "th").text = fn2
+ SubElement(trfoot, "td").text = str(total[fn2])
+ return table
+
+ def detail_table(self, records):
+ if records:
+ table = self.BodyElement("table", rules = "all", border = "1")
+ thead = SubElement(table, "thead")
+ tbody = SubElement(table, "tbody")
+ tr = SubElement(thead, "tr")
+ SubElement(tr, "th").text = "Timestamp"
+ SubElement(tr, "th").text = "Generation"
+ SubElement(tr, "th").text = "Status"
+ SubElement(tr, "th").text = "URI"
+ for v in records:
+ tr = SubElement(tbody, "tr", { "class" : v.mood })
+ SubElement(tr, "td").text = v.timestamp
+ SubElement(tr, "td").text = v.generation
+ SubElement(tr, "td").text = v.label.text
+ SubElement(tr, "td", { "class" : "uri"}).text = v.uri
+ return table
+ else:
+ self.BodyElement("p").text = "None found"
+ return None
+
+def main():
+
+ global session
+
+ os.putenv("TZ", "UTC")
+ time.tzset()
+
+ parse_options()
+
+ session = Session()
+ session.rrd_update()
+
+ for hostname in session.hostnames:
+ html = HTML("Repository details for %s" % hostname, hostname)
+ html.counter_table(session.hosts[hostname].get_counter, session.hosts[hostname].get_total)
+ if not args.hide_graphs:
+ session.hosts[hostname].rrd_graph(html)
+ if not args.hide_problems:
+ html.BodyElement("h2").text = "Connection Problems"
+ html.detail_table(session.hosts[hostname].connection_problems)
+ html.BodyElement("h2").text = "Object Problems"
+ html.detail_table(session.hosts[hostname].object_problems)
+ html.close()
+
+ html = HTML("rcynic summary", "index")
+ html.BodyElement("h2").text = "Grand totals for all repositories"
+ html.counter_table(session.get_sum, Label.get_count)
+ if not args.hide_object_counts:
+ html.BodyElement("br")
+ html.BodyElement("hr")
+ html.BodyElement("br")
+ html.BodyElement("h2").text = "Current total object counts (distinct URIs)"
+ html.object_count_table(session)
+ for hostname in session.hostnames:
+ html.BodyElement("br")
+ html.BodyElement("hr")
+ html.BodyElement("br")
+ html.BodyElement("h2").text = "Overview for repository %s" % hostname
+ html.counter_table(session.hosts[hostname].get_counter, session.hosts[hostname].get_total)
+ if not args.hide_graphs:
+ html.BodyElement("br")
+ html.BodyElement("a", href = "%s.html" % hostname).append(session.hosts[hostname].graph)
+ html.close()
+
+ html = HTML("Problems", "problems")
+ html.BodyElement("h2").text = "Connection Problems"
+ html.detail_table(session.connection_problems)
+ html.BodyElement("h2").text = "Object Problems"
+ html.detail_table(session.object_problems)
+ html.close()
+
+ html = HTML("All connections", "connections")
+ html.detail_table([v for v in session.validation_status if v.is_connection_detail])
+ html.close()
+
+ html = HTML("All objects", "objects")
+ html.detail_table([v for v in session.validation_status if v.is_object_detail])
+ html.close()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/rp/rcynic/rcynic-svn b/rp/rcynic/rcynic-svn
new file mode 100755
index 00000000..fd0df500
--- /dev/null
+++ b/rp/rcynic/rcynic-svn
@@ -0,0 +1,190 @@
+# $Id$
+#
+# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Archive rcynic output in a Subversion repository.
+"""
+
+import subprocess
+import argparse
+import datetime
+import fcntl
+import glob
+import os
+
+try:
+ from lxml.etree import ElementTree
+except ImportError:
+ from xml.etree.ElementTree import ElementTree
+
+
+mime_types = (
+ ("html", "application/xhtml+xml"),
+ ("cer", "application/pkix-cert"),
+ ("crl", "application/pkix-crl"),
+ ("mft", "application/rpki-manifest"),
+ ("mnf", "application/rpki-manifest"),
+ ("roa", "application/rpki-roa"),
+ ("gbr", "application/rpki-ghostbusters"))
+
+
+def run(*cmd, **kwargs):
+ """
+ Run a program, displaying timing data when appropriate.
+ """
+
+ t = datetime.datetime.utcnow()
+ subprocess.check_call(cmd, **kwargs)
+ if args.show_timing:
+ now = datetime.datetime.utcnow()
+ print now, (now - t), " ".join(cmd)
+
+
+def runxml(*cmd):
+ """
+
+ Run a program which produces XML output, displaying timing data when
+ appropriate and returning an ElementTree constructed from the
+ program's output.
+ """
+ t = datetime.datetime.utcnow()
+ p = subprocess.Popen(cmd, stdout = subprocess.PIPE)
+ x = ElementTree(file = p.stdout)
+ s = p.wait()
+ if s:
+ raise subprocess.CalledProcessError(s, cmd[0])
+ if args.show_timing:
+ now = datetime.datetime.utcnow()
+ print now, (now - t), " ".join(cmd)
+ return x
+
+
+# Main program.
+
+parser = argparse.ArgumentParser(description = __doc__)
+
+parser.add_argument("--show_timing", action = "store_true", help = \
+ """
+ Show timing data on programs we run.
+ """)
+
+parser.add_argument("--verbatim", action = "store_true", help = \
+ """
+ Whether to archive rcynic's data output exactly as
+ rcynic writes it or map it into a directory
+ structure which makes more sense when used with
+ Subversion. True means archive exactly as rcynic
+ writes it, interpreting file and directory names
+ as rsync would, transient directories and all.
+ False means map the current authenticated/ tree in
+ rcynic's output to a stable authenticated/ subtree
+ in the subversion repository, with file and
+ directory names from the command line shorted to
+ their last component.
+ """)
+
+parser.add_argument("--lockfile", default = "rcynic-svn.lock", help = \
+ """
+ Lock file to to prevent multiple copies of this
+ program (eg, running under cron) from stepping on
+ each other while modifying the working directory.
+ """)
+
+parser.add_argument("files_to_archive", nargs = "*", help = \
+ """
+ Files to archive using Subversion. If omitted, we
+ assume that some other process has already
+ modified the Subversion working directory.
+ """)
+
+parser.add_argument("working_directory", help = \
+ """
+ Subversion working directory to use (must already
+ exist).
+ """)
+
+args = parser.parse_args()
+
+if args.show_timing:
+ t0 = datetime.datetime.utcnow()
+ print t0, "Starting"
+
+# Lock out other instances of this program. We may want some more
+# sophsiticated approach when combining this with other programs, but
+# this should minimize the risk of multiple copies of this program
+# trying to modify the same subversion working directory at the same
+# time and messing each other up. We leave the lock file in place
+# because doing so removes a potential race condition.
+
+lock = os.open("cronjob.lock", os.O_RDONLY | os.O_CREAT | os.O_NONBLOCK, 0666)
+fcntl.flock(lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
+
+# Make sure working tree is up to date.
+
+run("svn", "update", "--quiet", args.working_directory)
+
+# Copy rcynic's output as appropriate.
+
+if args.files_to_archive:
+
+ if args.verbatim:
+ cmd = ["rsync", "--archive", "--quiet", "--delete"]
+ cmd.extend(args.files_to_archive)
+ cmd.append(args.working_directory)
+ run(*cmd)
+
+ else:
+ for src in args.files_to_archive:
+ cmd = ["rsync", "--archive", "--quiet", "--delete", "--copy-links"]
+ cmd.append(src.rstrip("/"))
+ cmd.append(args.working_directory.rstrip("/") + "/")
+ run(*cmd)
+
+# Ask Subversion to add any new files, trying hard to get the MIME
+# types right.
+
+cmd = ["svn", "add", "--quiet", "--force", "--auto-props"]
+
+for fn2, mime_type in mime_types:
+ cmd.append("--config-option")
+ cmd.append("config:auto-props:*.%s=svn:mime-type=%s" % (fn2, mime_type))
+
+cmd.append(".")
+
+run(*cmd, cwd = args.working_directory)
+
+# Parse XML version of Subversion's status output to figure out what
+# files have been deleted, and tell Subversion that we deleted them
+# intentionally.
+
+missing = sorted(entry.get("path")
+ for entry in runxml("svn", "status", "--xml", args.working_directory).find("target").findall("entry")
+ if entry.find("wc-status").get("item") == "missing")
+deleted = []
+
+for path in missing:
+ if not any(path.startswith(r) for r in deleted):
+ run("svn", "delete", "--quiet", path)
+ deleted.append(path + "/")
+
+# Commit our changes and update the working tree.
+
+run("svn", "commit", "--quiet", "--message", "Auto update.", args.working_directory)
+run("svn", "update", "--quiet", args.working_directory)
+
+if args.show_timing:
+ now = datetime.datetime.utcnow()
+ print now, now - t0, "total runtime"
diff --git a/rp/rcynic/rcynic-text b/rp/rcynic/rcynic-text
new file mode 100755
index 00000000..a8e56dac
--- /dev/null
+++ b/rp/rcynic/rcynic-text
@@ -0,0 +1,118 @@
+# $Id$
+#
+# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Flat text summary of rcynic.xml.
+"""
+
+import sys
+import urlparse
+import textwrap
+
+try:
+ from lxml.etree import ElementTree
+except ImportError:
+ from xml.etree.ElementTree import ElementTree
+
+class Label(object):
+
+ def __init__(self, elt):
+ self.tag = elt.tag
+ self.width = max(len(s) for s in elt.text.split())
+ self.lines = textwrap.wrap(elt.text.strip(), width = self.width)
+ self.counter = 0
+
+ def line(self, n):
+ try:
+ return " " + self.lines[n].center(self.width) + " "
+ except IndexError:
+ return " " * (self.width + 2)
+
+ def add(self):
+ self.counter += 1
+
+ @property
+ def total(self):
+ return " " + str(self.counter).rjust(self.width) + " "
+
+ @property
+ def visible(self):
+ return self.counter > 0
+
+class Host(object):
+
+ def __init__(self):
+ self.counters = {}
+
+ def add(self, label):
+ self.counters[label] = self.counters.get(label, 0) + 1
+ label.add()
+
+ def total(self, label):
+ if label in self.counters:
+ return " " + str(self.counters[label]).rjust(label.width) + " "
+ else:
+ return " " * (label.width + 2)
+
+class Session(object):
+
+ def __init__(self, labels):
+ self.hosts = {}
+ self.labels = labels
+ self.map = dict((label.tag, label) for label in labels)
+
+ def add(self, elt):
+ label = self.map[elt.get("status")]
+ hostname = urlparse.urlparse(elt.text.strip()).hostname
+ if hostname not in self.hosts:
+ self.hosts[hostname] = Host()
+ self.hosts[hostname].add(label)
+
+ def show(self):
+ visible = [label for label in self.labels if label.visible]
+ hostnames = sorted(hostname for hostname in self.hosts if hostname is not None)
+ hostwidth = max(len(hostname) for hostname in hostnames + ["Hostname"])
+ separator = "+-%s-+-%s-+" % (
+ "-" * hostwidth,
+ "-+-".join("-" * label.width for label in visible))
+ print separator
+ for i in xrange(max(len(label.lines) for label in visible)):
+ print "| %s |%s|" % (
+ ("Hostname" if i == 0 else "").ljust(hostwidth),
+ "|".join(label.line(i) for label in visible))
+ print separator
+ for hostname in hostnames:
+ print "| %s |%s|" % (
+ hostname.ljust(hostwidth),
+ "|".join(self.hosts[hostname].total(label) for label in visible))
+ if hostnames:
+ print separator
+ print "| %s |%s|" % (
+ "Total".ljust(hostwidth),
+ "|".join(label.total for label in visible))
+ print separator
+
+
+def main():
+ for filename in ([sys.stdin] if len(sys.argv) < 2 else sys.argv[1:]):
+ etree = ElementTree(file = filename)
+ session = Session([Label(elt) for elt in etree.find("labels")])
+ for elt in etree.findall("validation_status"):
+ session.add(elt)
+ session.show()
+
+if __name__ == "__main__":
+ main()
diff --git a/rp/rcynic/rcynic.c b/rp/rcynic/rcynic.c
new file mode 100644
index 00000000..dea9c48f
--- /dev/null
+++ b/rp/rcynic/rcynic.c
@@ -0,0 +1,6070 @@
+/*
+ * Copyright (C) 2013--2014 Dragon Research Labs ("DRL")
+ * Portions copyright (C) 2009--2012 Internet Systems Consortium ("ISC")
+ * Portions copyright (C) 2006--2008 American Registry for Internet Numbers ("ARIN")
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notices and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND DRL, ISC, AND ARIN DISCLAIM ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL,
+ * ISC, OR ARIN BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+ * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* $Id$ */
+
+/**
+ * @mainpage
+ *
+ * "Cynical rsync": Recursively walk RPKI tree using rsync to pull
+ * data from remote sites, validating certificates and CRLs as we go.
+ *
+ * Doxygen doesn't quite know what to make of a one-file C program,
+ * and ends up putting most of the interesting data @link rcynic.c
+ * here. @endlink
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/file.h>
+#include <errno.h>
+#include <sys/signal.h>
+#include <sys/wait.h>
+#include <time.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <dirent.h>
+#include <limits.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <utime.h>
+#include <glob.h>
+#include <sys/param.h>
+#include <getopt.h>
+
+#define SYSLOG_NAMES /* defines CODE prioritynames[], facilitynames[] */
+#include <syslog.h>
+
+#include <openssl/bio.h>
+#include <openssl/pem.h>
+#include <openssl/err.h>
+#include <openssl/x509.h>
+#include <openssl/x509v3.h>
+#include <openssl/safestack.h>
+#include <openssl/conf.h>
+#include <openssl/rand.h>
+#include <openssl/asn1t.h>
+#include <openssl/cms.h>
+
+#include <rpki/roa.h>
+#include <rpki/manifest.h>
+
+#include "bio_f_linebreak.h"
+
+#include "defstack.h"
+
+#if !defined(FILENAME_MAX) && defined(PATH_MAX) && PATH_MAX > 1024
+#define FILENAME_MAX PATH_MAX
+#elif !defined(FILENAME_MAX)
+#define FILENAME_MAX 1024
+#endif
+
+#define SCHEME_RSYNC ("rsync://")
+#define SIZEOF_RSYNC (sizeof(SCHEME_RSYNC) - 1)
+
+/**
+ * Maximum length of a hostname.
+ */
+#ifndef HOSTNAME_MAX
+#define HOSTNAME_MAX 256
+#endif
+
+/**
+ * Maximum length of an URI.
+ */
+#define URI_MAX (SIZEOF_RSYNC + HOSTNAME_MAX + 1 + FILENAME_MAX)
+
+/**
+ * Maximum number of times we try to kill an inferior process before
+ * giving up.
+ */
+#define KILL_MAX 10
+
+/**
+ * Version number of XML summary output.
+ */
+#define XML_SUMMARY_VERSION 1
+
+/**
+ * How much buffer space do we need for a raw address?
+ */
+#define ADDR_RAW_BUF_LEN 16
+
+/**
+ * How many bytes is a SHA256 digest?
+ */
+#define HASH_SHA256_LEN 32
+
+/**
+ * Logging levels. Same general idea as syslog(), but our own
+ * catagories based on what makes sense for this program. Default
+ * mappings to syslog() priorities are here because it's the easiest
+ * way to make sure that we assign a syslog level to each of ours.
+ */
+
+#define LOG_LEVELS \
+ QQ(log_sys_err, LOG_ERR) /* Error from OS or library */ \
+ QQ(log_usage_err, LOG_ERR) /* Bad usage (local error) */ \
+ QQ(log_data_err, LOG_NOTICE) /* Bad data, no biscuit */ \
+ QQ(log_telemetry, LOG_INFO) /* Normal progress chatter */ \
+ QQ(log_verbose, LOG_INFO) /* Extra chatter */ \
+ QQ(log_debug, LOG_DEBUG) /* Only useful when debugging */
+
+#define QQ(x,y) x ,
+typedef enum log_level { LOG_LEVELS LOG_LEVEL_T_MAX } log_level_t;
+#undef QQ
+
+#define QQ(x,y) { #x , x },
+static const struct {
+ const char *name;
+ log_level_t value;
+} log_levels[] = {
+ LOG_LEVELS
+};
+#undef QQ
+
+/**
+ * MIB counters derived from OpenSSL. Long list of validation failure
+ * codes from OpenSSL (crypto/x509/x509_vfy.h).
+ */
+
+#define MIB_COUNTERS_FROM_OPENSSL \
+ QV(X509_V_ERR_UNABLE_TO_GET_CRL) \
+ QV(X509_V_ERR_UNABLE_TO_DECRYPT_CERT_SIGNATURE) \
+ QV(X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE) \
+ QV(X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY) \
+ QV(X509_V_ERR_CERT_SIGNATURE_FAILURE) \
+ QV(X509_V_ERR_CRL_SIGNATURE_FAILURE) \
+ QV(X509_V_ERR_CERT_NOT_YET_VALID) \
+ QV(X509_V_ERR_CERT_HAS_EXPIRED) \
+ QV(X509_V_ERR_CRL_NOT_YET_VALID) \
+ QV(X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD) \
+ QV(X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD) \
+ QV(X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD) \
+ QV(X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD) \
+ QV(X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT) \
+ QV(X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN) \
+ QV(X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY) \
+ QV(X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE) \
+ QV(X509_V_ERR_CERT_CHAIN_TOO_LONG) \
+ QV(X509_V_ERR_CERT_REVOKED) \
+ QV(X509_V_ERR_INVALID_CA) \
+ QV(X509_V_ERR_PATH_LENGTH_EXCEEDED) \
+ QV(X509_V_ERR_INVALID_PURPOSE) \
+ QV(X509_V_ERR_CERT_UNTRUSTED) \
+ QV(X509_V_ERR_CERT_REJECTED) \
+ QV(X509_V_ERR_AKID_SKID_MISMATCH) \
+ QV(X509_V_ERR_AKID_ISSUER_SERIAL_MISMATCH) \
+ QV(X509_V_ERR_KEYUSAGE_NO_CERTSIGN) \
+ QV(X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER) \
+ QV(X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION) \
+ QV(X509_V_ERR_KEYUSAGE_NO_CRL_SIGN) \
+ QV(X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION) \
+ QV(X509_V_ERR_INVALID_NON_CA) \
+ QV(X509_V_ERR_PROXY_PATH_LENGTH_EXCEEDED) \
+ QV(X509_V_ERR_KEYUSAGE_NO_DIGITAL_SIGNATURE) \
+ QV(X509_V_ERR_PROXY_CERTIFICATES_NOT_ALLOWED) \
+ QV(X509_V_ERR_INVALID_EXTENSION) \
+ QV(X509_V_ERR_INVALID_POLICY_EXTENSION) \
+ QV(X509_V_ERR_NO_EXPLICIT_POLICY) \
+ QV(X509_V_ERR_UNNESTED_RESOURCE)
+
+/**
+ * MIB counters specific to rcynic.
+ */
+
+#define MIB_COUNTERS \
+ MIB_COUNTERS_FROM_OPENSSL \
+ QB(aia_extension_missing, "AIA extension missing") \
+ QB(aia_extension_forbidden, "AIA extension forbidden") \
+ QB(aia_uri_missing, "AIA URI missing") \
+ QB(aki_extension_issuer_mismatch, "AKI extension issuer mismatch") \
+ QB(aki_extension_missing, "AKI extension missing") \
+ QB(aki_extension_wrong_format, "AKI extension is wrong format") \
+ QB(bad_asidentifiers, "Bad ASIdentifiers extension") \
+ QB(bad_certificate_policy, "Bad certificate policy") \
+ QB(bad_cms_econtenttype, "Bad CMS eContentType") \
+ QB(bad_cms_si_contenttype, "Bad CMS SI ContentType") \
+ QB(bad_cms_signer, "Bad CMS signer") \
+ QB(bad_cms_signer_infos, "Bad CMS signerInfos") \
+ QB(bad_crl, "Bad CRL") \
+ QB(bad_ipaddrblocks, "Bad IPAddrBlocks extension") \
+ QB(bad_key_usage, "Bad keyUsage") \
+ QB(bad_manifest_digest_length, "Bad manifest digest length") \
+ QB(bad_public_key, "Bad public key") \
+ QB(bad_roa_asID, "Bad ROA asID") \
+ QB(bad_certificate_serial_number, "Bad certificate serialNumber") \
+ QB(bad_manifest_number, "Bad manifestNumber") \
+ QB(certificate_bad_signature, "Bad certificate signature") \
+ QB(certificate_failed_validation, "Certificate failed validation") \
+ QB(cms_econtent_decode_error, "CMS eContent decode error") \
+ QB(cms_includes_crls, "CMS includes CRLs") \
+ QB(cms_signer_missing, "CMS signer missing") \
+ QB(cms_ski_mismatch, "CMS SKI mismatch") \
+ QB(cms_validation_failure, "CMS validation failure") \
+ QB(crl_issuer_name_mismatch, "CRL issuer name mismatch") \
+ QB(crl_not_in_manifest, "CRL not listed in manifest") \
+ QB(crl_not_yet_valid, "CRL not yet valid") \
+ QB(crl_number_extension_missing, "CRL number extension missing") \
+ QB(crl_number_is_negative, "CRL number is negative") \
+ QB(crl_number_out_of_range, "CRL number out of range") \
+ QB(crldp_doesnt_match_issuer_sia, "CRLDP doesn't match issuer's SIA") \
+ QB(crldp_uri_missing, "CRLDP URI missing") \
+ QB(disallowed_x509v3_extension, "Disallowed X.509v3 extension") \
+ QB(duplicate_name_in_manifest, "Duplicate name in manifest") \
+ QB(inappropriate_eku_extension, "Inappropriate EKU extension") \
+ QB(malformed_aia_extension, "Malformed AIA extension") \
+ QB(malformed_sia_extension, "Malformed SIA extension") \
+ QB(malformed_basic_constraints, "Malformed basicConstraints") \
+ QB(malformed_trust_anchor, "Malformed trust anchor") \
+ QB(malformed_cadirectory_uri, "Malformed caDirectory URI") \
+ QB(malformed_crldp_extension, "Malformed CRDLP extension") \
+ QB(malformed_crldp_uri, "Malformed CRDLP URI") \
+ QB(malformed_roa_addressfamily, "Malformed ROA addressFamily") \
+ QB(malformed_tal_uri, "Malformed TAL URI") \
+ QB(manifest_carepository_mismatch, "Manifest caRepository mismatch") \
+ QB(manifest_interval_overruns_cert, "Manifest interval overruns certificate") \
+ QB(manifest_lists_missing_object, "Manifest lists missing object") \
+ QB(manifest_not_yet_valid, "Manifest not yet valid") \
+ QB(missing_resources, "Missing resources") \
+ QB(nonconformant_asn1_time_value, "Nonconformant ASN.1 time value") \
+ QB(nonconformant_public_key_algorithm,"Nonconformant public key algorithm")\
+ QB(nonconformant_signature_algorithm, "Nonconformant signature algorithm")\
+ QB(nonconformant_digest_algorithm, "Nonconformant digest algorithm") \
+ QB(nonconformant_certificate_uid, "Nonconformant certificate UID") \
+ QB(object_rejected, "Object rejected") \
+ QB(rfc3779_inheritance_required, "RFC 3779 inheritance required") \
+ QB(roa_contains_bad_afi_value, "ROA contains bad AFI value") \
+ QB(roa_max_prefixlen_too_short, "ROA maxPrefixlen too short") \
+ QB(roa_resource_not_in_ee, "ROA resource not in EE") \
+ QB(roa_resources_malformed, "ROA resources malformed") \
+ QB(rsync_transfer_failed, "rsync transfer failed") \
+ QB(rsync_transfer_timed_out, "rsync transfer timed out") \
+ QB(safi_not_allowed, "SAFI not allowed") \
+ QB(sia_cadirectory_uri_missing, "SIA caDirectory URI missing") \
+ QB(sia_extension_missing, "SIA extension missing") \
+ QB(sia_manifest_uri_missing, "SIA manifest URI missing") \
+ QB(ski_extension_missing, "SKI extension missing") \
+ QB(ski_public_key_mismatch, "SKI public key mismatch") \
+ QB(trust_anchor_key_mismatch, "Trust anchor key mismatch") \
+ QB(trust_anchor_with_crldp, "Trust anchor can't have CRLDP") \
+ QB(unknown_afi, "Unknown AFI") \
+ QB(unknown_openssl_verify_error, "Unknown OpenSSL verify error") \
+ QB(unreadable_trust_anchor, "Unreadable trust anchor") \
+ QB(unreadable_trust_anchor_locator, "Unreadable trust anchor locator") \
+ QB(wrong_object_version, "Wrong object version") \
+ QW(aia_doesnt_match_issuer, "AIA doesn't match issuer") \
+ QW(backup_thisupdate_newer_than_current, "Backup thisUpdate newer than current") \
+ QW(backup_number_higher_than_current, "Backup number higher than current") \
+ QW(bad_thisupdate, "Bad CRL thisUpdate") \
+ QW(bad_cms_si_signed_attributes, "Bad CMS SI signed attributes") \
+ QW(bad_signed_object_uri, "Bad signedObject URI") \
+ QW(crldp_names_newer_crl, "CRLDP names newer CRL") \
+ QW(digest_mismatch, "Digest mismatch") \
+ QW(ee_certificate_with_1024_bit_key, "EE certificate with 1024 bit key") \
+ QW(issuer_uses_multiple_crldp_values, "Issuer uses multiple CRLDP values")\
+ QW(multiple_rsync_uris_in_extension, "Multiple rsync URIs in extension") \
+ QW(nonconformant_issuer_name, "Nonconformant X.509 issuer name") \
+ QW(nonconformant_subject_name, "Nonconformant X.509 subject name") \
+ QW(policy_qualifier_cps, "Policy Qualifier CPS") \
+ QW(rsync_partial_transfer, "rsync partial transfer") \
+ QW(rsync_transfer_skipped, "rsync transfer skipped") \
+ QW(sia_extension_missing_from_ee, "SIA extension missing from EE") \
+ QW(skipped_because_not_in_manifest, "Skipped because not in manifest") \
+ QW(stale_crl_or_manifest, "Stale CRL or manifest") \
+ QW(tainted_by_stale_crl, "Tainted by stale CRL") \
+ QW(tainted_by_stale_manifest, "Tainted by stale manifest") \
+ QW(tainted_by_not_being_in_manifest, "Tainted by not being in manifest") \
+ QW(trust_anchor_not_self_signed, "Trust anchor not self-signed") \
+ QW(trust_anchor_skipped, "Trust anchor skipped") \
+ QW(unknown_object_type_skipped, "Unknown object type skipped") \
+ QW(uri_too_long, "URI too long") \
+ QW(wrong_cms_si_signature_algorithm, "Wrong CMS SI signature algorithm") \
+ QW(wrong_cms_si_digest_algorithm, "Wrong CMS SI digest algorithm") \
+ QG(non_rsync_uri_in_extension, "Non-rsync URI in extension") \
+ QG(object_accepted, "Object accepted") \
+ QG(rechecking_object, "Rechecking object") \
+ QG(rsync_transfer_succeeded, "rsync transfer succeeded") \
+ QG(validation_ok, "OK")
+
+#define QV(x) QB(mib_openssl_##x, 0)
+
+static const char
+ mib_counter_kind_good[] = "good",
+ mib_counter_kind_warn[] = "warn",
+ mib_counter_kind_bad[] = "bad";
+
+#define QG(x,y) mib_counter_kind_good ,
+#define QW(x,y) mib_counter_kind_warn ,
+#define QB(x,y) mib_counter_kind_bad ,
+static const char * const mib_counter_kind[] = { MIB_COUNTERS NULL };
+#undef QB
+#undef QW
+#undef QG
+
+#define QG(x,y) QQ(x,y)
+#define QW(x,y) QQ(x,y)
+#define QB(x,y) QQ(x,y)
+
+#define QQ(x,y) x ,
+typedef enum mib_counter { MIB_COUNTERS MIB_COUNTER_T_MAX } mib_counter_t;
+#undef QQ
+
+#define QQ(x,y) y ,
+static const char * const mib_counter_desc[] = { MIB_COUNTERS NULL };
+#undef QQ
+
+#define QQ(x,y) #x ,
+static const char * const mib_counter_label[] = { MIB_COUNTERS NULL };
+#undef QQ
+
+#undef QV
+
+#define QQ(x,y) 0 ,
+#define QV(x) x ,
+static const long mib_counter_openssl[] = { MIB_COUNTERS 0 };
+#undef QV
+#undef QQ
+
+/**
+ * Object sources. We always try to get fresh copies of objects using
+ * rsync, but if that fails we try using backup copies from what
+ * worked the last time we were run. This means that a URI
+ * potentially represents two different objects, so we need to
+ * distinguish them for tracking purposes in our validation log.
+ */
+
+#define OBJECT_GENERATIONS \
+ QQ(null) \
+ QQ(current) \
+ QQ(backup)
+
+#define QQ(x) object_generation_##x ,
+typedef enum object_generation { OBJECT_GENERATIONS OBJECT_GENERATION_MAX } object_generation_t;
+#undef QQ
+
+#define QQ(x) #x ,
+static const char * const object_generation_label[] = { OBJECT_GENERATIONS NULL };
+#undef QQ
+
+/**
+ * Type-safe string wrapper for URIs.
+ */
+typedef struct { char s[URI_MAX]; } uri_t;
+
+/**
+ * Type-safe string wrapper for filename paths.
+ */
+typedef struct { char s[FILENAME_MAX]; } path_t;
+
+/**
+ * Type-safe wrapper for hash buffers.
+ */
+typedef struct { unsigned char h[EVP_MAX_MD_SIZE]; } hashbuf_t;
+
+/**
+ * Type-safe wrapper for timestamp strings.
+ */
+typedef struct { char s[sizeof("2001-01-01T00:00:00Z") + 1]; } timestamp_t;
+
+/**
+ * Per-URI validation status object.
+ * uri must be first element.
+ */
+typedef struct validation_status {
+ uri_t uri;
+ object_generation_t generation;
+ time_t timestamp;
+ unsigned char events[(MIB_COUNTER_T_MAX + 7) / 8];
+ short balance;
+ struct validation_status *left_child;
+ struct validation_status *right_child;
+} validation_status_t;
+
+DECLARE_STACK_OF(validation_status_t)
+
+/**
+ * Structure to hold data parsed out of a certificate.
+ */
+typedef struct certinfo {
+ int ca, ta;
+ object_generation_t generation;
+ uri_t uri, sia, aia, crldp, manifest, signedobject;
+} certinfo_t;
+
+typedef struct rcynic_ctx rcynic_ctx_t;
+
+/**
+ * States that a walk_ctx_t can be in.
+ */
+typedef enum {
+ walk_state_initial, /**< Initial state */
+ walk_state_rsync, /**< rsyncing certinfo.sia */
+ walk_state_ready, /**< Ready to traverse outputs */
+ walk_state_current, /**< prefix = rc->unauthenticated */
+ walk_state_backup, /**< prefix = rc->old_authenticated */
+ walk_state_done /**< Done walking this cert's outputs */
+} walk_state_t;
+
+/**
+ * Context for certificate tree walks. This includes all the stuff
+ * that we would keep as automatic variables on the call stack if we
+ * didn't have to use callbacks to support multiple rsync processes.
+ */
+typedef struct walk_ctx {
+ unsigned refcount;
+ certinfo_t certinfo;
+ X509 *cert;
+ Manifest *manifest;
+ object_generation_t manifest_generation;
+ STACK_OF(OPENSSL_STRING) *filenames;
+ int manifest_iteration, filename_iteration, stale_manifest;
+ walk_state_t state;
+ uri_t crldp;
+ STACK_OF(X509) *certs;
+ STACK_OF(X509_CRL) *crls;
+} walk_ctx_t;
+
+DECLARE_STACK_OF(walk_ctx_t)
+
+/**
+ * Return codes from rsync functions.
+ */
+typedef enum {
+ rsync_status_done, /* Request completed */
+ rsync_status_failed, /* Request failed */
+ rsync_status_timed_out, /* Request timed out */
+ rsync_status_pending, /* Request in progress */
+ rsync_status_skipped /* Request not attempted */
+} rsync_status_t;
+
+/**
+ * States for asynchronous rsync.
+ * "initial" must be first.
+ */
+
+#define RSYNC_STATES \
+ QQ(initial) \
+ QQ(running) \
+ QQ(conflict_wait) \
+ QQ(retry_wait) \
+ QQ(closed) \
+ QQ(terminating)
+
+#define QQ(x) rsync_state_##x,
+typedef enum { RSYNC_STATES RSYNC_STATE_T_MAX } rsync_state_t;
+#undef QQ
+
+#define QQ(x) #x ,
+static const char * const rsync_state_label[] = { RSYNC_STATES NULL };
+#undef QQ
+
+/**
+ * Context for asyncronous rsync.
+ */
+typedef struct rsync_ctx {
+ uri_t uri;
+ void (*handler)(rcynic_ctx_t *, const struct rsync_ctx *, const rsync_status_t, const uri_t *, void *);
+ void *cookie;
+ rsync_state_t state;
+ enum {
+ rsync_problem_none, /* Must be first */
+ rsync_problem_timed_out,
+ rsync_problem_refused
+ } problem;
+ unsigned tries;
+ pid_t pid;
+ int fd;
+ time_t started, deadline;
+ char buffer[URI_MAX * 4];
+ size_t buflen;
+} rsync_ctx_t;
+
+DECLARE_STACK_OF(rsync_ctx_t)
+
+/**
+ * Record of rsync attempts.
+ */
+typedef struct rsync_history {
+ uri_t uri;
+ time_t started, finished;
+ rsync_status_t status;
+ int final_slash;
+} rsync_history_t;
+
+DECLARE_STACK_OF(rsync_history_t)
+
+/**
+ * Deferred task.
+ */
+typedef struct task {
+ void (*handler)(rcynic_ctx_t *, void *);
+ void *cookie;
+} task_t;
+
+DECLARE_STACK_OF(task_t)
+
+/**
+ * Trust anchor locator (TAL) fetch context.
+ */
+typedef struct tal_ctx {
+ uri_t uri;
+ path_t path;
+ EVP_PKEY *pkey;
+} tal_ctx_t;
+
+/**
+ * Extended context for verify callbacks. This is a wrapper around
+ * OpenSSL's X509_STORE_CTX, and the embedded X509_STORE_CTX @em must be
+ * the first element of this structure in order for the evil cast to
+ * do the right thing. This is ugly but safe, as the C language
+ * promises us that the address of the first element of a structure is
+ * the same as the address of the structure.
+ */
+typedef struct rcynic_x509_store_ctx {
+ X509_STORE_CTX ctx; /* Must be first */
+ rcynic_ctx_t *rc;
+ const certinfo_t *subject;
+} rcynic_x509_store_ctx_t;
+
+/**
+ * Program context that would otherwise be a mess of global variables.
+ */
+struct rcynic_ctx {
+ path_t authenticated, old_authenticated, new_authenticated, unauthenticated;
+ char *jane, *rsync_program;
+ STACK_OF(validation_status_t) *validation_status;
+ STACK_OF(rsync_history_t) *rsync_history;
+ STACK_OF(rsync_ctx_t) *rsync_queue;
+ STACK_OF(task_t) *task_queue;
+ int use_syslog, allow_stale_crl, allow_stale_manifest, use_links;
+ int require_crl_in_manifest, rsync_timeout, priority[LOG_LEVEL_T_MAX];
+ int allow_non_self_signed_trust_anchor, allow_object_not_in_manifest;
+ int max_parallel_fetches, max_retries, retry_wait_min, run_rsync;
+ int allow_digest_mismatch, allow_crl_digest_mismatch;
+ int allow_nonconformant_name, allow_ee_without_signedObject;
+ int allow_1024_bit_ee_key, allow_wrong_cms_si_attributes;
+ int rsync_early;
+ unsigned max_select_time;
+ validation_status_t *validation_status_in_waiting;
+ validation_status_t *validation_status_root;
+ log_level_t log_level;
+ X509_STORE *x509_store;
+};
+
+
+
+/*
+ * Handle NIDs we wish OpenSSL knew about. This is carefully (we
+ * hope) written to do nothing at all for any NID that OpenSSL knows
+ * about; the intent is just to add definitions for things OpenSSL
+ * doesn't know about yet. Of necessity, this is a bit gross, since
+ * it confounds runtime static variables with predefined macro names,
+ * but we try to put all the magic associated with this in one place.
+ *
+ * In the long run it might be cleaner to generate this with a trivial
+ * script and put the result in a shared .h file, but this will do for
+ * the moment.
+ */
+
+#ifndef NID_ad_rpkiManifest
+static int NID_ad_rpkiManifest;
+#endif
+
+#ifndef NID_ad_signedObject
+static int NID_ad_signedObject;
+#endif
+
+#ifndef NID_ct_ROA
+static int NID_ct_ROA;
+#endif
+
+#ifndef NID_ct_rpkiManifest
+static int NID_ct_rpkiManifest;
+#endif
+
+#ifndef NID_ct_rpkiGhostbusters
+static int NID_ct_rpkiGhostbusters;
+#endif
+
+#ifndef NID_cp_ipAddr_asNumber
+static int NID_cp_ipAddr_asNumber;
+#endif
+
+#ifndef NID_id_kp_bgpsec_router
+static int NID_id_kp_bgpsec_router;
+#endif
+
+/**
+ * Missing NIDs, if any.
+ */
+static const struct {
+ int *nid;
+ const char *oid;
+ const char *sn;
+ const char *ln;
+} missing_nids[] = {
+
+#ifndef NID_ad_rpkiManifest
+ {&NID_ad_rpkiManifest, "1.3.6.1.5.5.7.48.10", "id-ad-rpkiManifest", "RPKI Manifest"},
+#endif
+
+#ifndef NID_ad_signedObject
+ {&NID_ad_signedObject, "1.3.6.1.5.5.7.48.11", "id-ad-signedObject", "Signed Object"},
+#endif
+
+#ifndef NID_ct_ROA
+ {&NID_ct_ROA, "1.2.840.113549.1.9.16.1.24", "id-ct-routeOriginAttestation", "ROA eContent"},
+#endif
+
+#ifndef NID_ct_rpkiManifest
+ {&NID_ct_rpkiManifest, "1.2.840.113549.1.9.16.1.26", "id-ct-rpkiManifest", "RPKI Manifest eContent"},
+#endif
+
+#ifndef NID_ct_rpkiGhostbusters
+ {&NID_ct_rpkiGhostbusters, "1.2.840.113549.1.9.16.1.35", "id-ct-rpkiGhostbusters", "RPKI Ghostbusters eContent"},
+#endif
+
+#ifndef NID_cp_ipAddr_asNumber
+ {&NID_cp_ipAddr_asNumber, "1.3.6.1.5.5.7.14.2", "id-cp-ipAddr-asNumber", "RPKI Certificate Policy"},
+#endif
+
+#ifndef NID_id_kp_bgpsec_router
+ {&NID_id_kp_bgpsec_router, "1.3.6.1.5.5.7.3.30", "id-kp-bgpsec-router", "BGPSEC Router Certificate"},
+#endif
+
+};
+
+
+
+/**
+ * Subversion ID data.
+ */
+static const char svn_id[] = "$Id$";
+
+/**
+ * Suffix we use temporarily during the symlink shuffle. Could be
+ * almost anything, but we want to do the length check early, before
+ * we waste a lot of work we'll just have to throw away, so we just
+ * wire in something short and obvious.
+ */
+static const char authenticated_symlink_suffix[] = ".new";
+
+/**
+ * Constants for comparisions. We can't build these at compile time,
+ * so they can't be const, but treat them as if they were once
+ * allocated.
+ *
+ * We probably need both a better scheme for naming NID_ replacements
+ * and a more comprehensive rewrite of how we handle OIDs OpenSSL
+ * doesn't know about, so that we neither conflict with defined
+ * symbols nor duplicate effort nor explode if and when OpenSSL adds
+ * new OIDs (with or without the names we would have used).
+ */
+
+static const ASN1_INTEGER *asn1_zero, *asn1_four_octets, *asn1_twenty_octets;
+static int NID_binary_signing_time;
+
+
+
+/**
+ * Handle missing NIDs.
+ */
+static int
+create_missing_nids(void)
+{
+ int i;
+
+ for (i = 0; i < (int) (sizeof(missing_nids) / sizeof(*missing_nids)); i++)
+ if ((*missing_nids[i].nid = OBJ_txt2nid(missing_nids[i].oid)) == NID_undef &&
+ (*missing_nids[i].nid = OBJ_create(missing_nids[i].oid,
+ missing_nids[i].sn,
+ missing_nids[i].ln)) == NID_undef)
+ return 0;
+
+ return 1;
+}
+
+
+
+/**
+ * Type-safe wrapper around free() to keep safestack macros happy.
+ */
+static void OPENSSL_STRING_free(OPENSSL_STRING s)
+{
+ if (s)
+ free(s);
+}
+
+/**
+ * Wrapper around an idiom we use with OPENSSL_STRING stacks. There's
+ * a bug in the current sk_OPENSSL_STRING_delete() macro that casts
+ * the return value to the wrong type, so we cast it to something
+ * innocuous here and avoid using that macro elsewhere.
+ */
+static void sk_OPENSSL_STRING_remove(STACK_OF(OPENSSL_STRING) *sk, const char *str)
+{
+ OPENSSL_STRING_free((void *) sk_OPENSSL_STRING_delete(sk, sk_OPENSSL_STRING_find(sk, str)));
+}
+
+/**
+ * Allocate a new validation_status_t object.
+ */
+static validation_status_t *validation_status_t_new(void)
+{
+ validation_status_t *v = malloc(sizeof(*v));
+ if (v)
+ memset(v, 0, sizeof(*v));
+ return v;
+}
+
+/**
+ * Type-safe wrapper around free() to keep safestack macros happy.
+ */
+static void validation_status_t_free(validation_status_t *v)
+{
+ if (v)
+ free(v);
+}
+
+
+
+/**
+ * Allocate a new rsync_history_t object.
+ */
+static rsync_history_t *rsync_history_t_new(void)
+{
+ rsync_history_t *h = malloc(sizeof(*h));
+ if (h)
+ memset(h, 0, sizeof(*h));
+ return h;
+}
+
+/**
+ * Type-safe wrapper around free() to keep safestack macros happy.
+ */
+static void rsync_history_t_free(rsync_history_t *h)
+{
+ if (h)
+ free(h);
+}
+
+/**
+ * Compare two rsync_history_t objects.
+ */
+static int rsync_history_cmp(const rsync_history_t * const *a, const rsync_history_t * const *b)
+{
+ return strcmp((*a)->uri.s, (*b)->uri.s);
+}
+
+
+
+/**
+ * Convert a time_t to a printable string in UTC format.
+ */
+static const char *time_to_string(timestamp_t *ts, const time_t *t)
+{
+ time_t now;
+ size_t n;
+
+ assert(ts != NULL);
+
+ if (t == NULL) {
+ now = time(0);
+ t = &now;
+ }
+
+ n = strftime(ts->s, sizeof(ts->s), "%Y-%m-%dT%H:%M:%SZ", gmtime(t));
+ assert(n > 0);
+
+ return ts->s;
+}
+
+/*
+ * GCC attributes to help catch format string errors.
+ */
+
+#ifdef __GNUC__
+
+static void logmsg(const rcynic_ctx_t *rc,
+ const log_level_t level,
+ const char *fmt, ...)
+ __attribute__ ((format (printf, 3, 4)));
+#endif
+
+/**
+ * Logging.
+ */
+static void vlogmsg(const rcynic_ctx_t *rc,
+ const log_level_t level,
+ const char *fmt,
+ va_list ap)
+{
+ assert(rc && fmt);
+
+ if (rc->log_level < level)
+ return;
+
+ if (rc->use_syslog) {
+ vsyslog(rc->priority[level], fmt, ap);
+ } else {
+ char ts[sizeof("00:00:00")+1];
+ time_t t = time(0);
+ strftime(ts, sizeof(ts), "%H:%M:%S", localtime(&t));
+ fprintf(stderr, "%s: ", ts);
+ if (rc->jane)
+ fprintf(stderr, "%s: ", rc->jane);
+ vfprintf(stderr, fmt, ap);
+ putc('\n', stderr);
+ }
+}
+
+/**
+ * Logging.
+ */
+static void logmsg(const rcynic_ctx_t *rc,
+ const log_level_t level,
+ const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ vlogmsg(rc, level, fmt, ap);
+ va_end(ap);
+}
+
+/**
+ * Print OpenSSL library errors.
+ */
+static void log_openssl_errors(const rcynic_ctx_t *rc)
+{
+ const char *data, *file;
+ unsigned long code;
+ char error[256];
+ int flags, line;
+
+ if (!rc->log_level < log_verbose)
+ return;
+
+ while ((code = ERR_get_error_line_data(&file, &line, &data, &flags))) {
+ ERR_error_string_n(code, error, sizeof(error));
+ if (data && (flags & ERR_TXT_STRING))
+ logmsg(rc, log_sys_err, "OpenSSL error %s:%d: %s: %s", file, line, error, data);
+ else
+ logmsg(rc, log_sys_err, "OpenSSL error %s:%d: %s", file, line, error);
+ }
+}
+
+/**
+ * Configure logging.
+ */
+static int configure_logmsg(rcynic_ctx_t *rc, const char *name)
+{
+ int i;
+
+ assert(rc && name);
+
+ for (i = 0; i < sizeof(log_levels)/sizeof(*log_levels); i++) {
+ if (!strcmp(name, log_levels[i].name)) {
+ rc->log_level = log_levels[i].value;
+ return 1;
+ }
+ }
+
+ logmsg(rc, log_usage_err, "Bad log level %s", name);
+ return 0;
+}
+
+/**
+ * Configure syslog.
+ */
+static int configure_syslog(const rcynic_ctx_t *rc,
+ int *result,
+ const CODE *table,
+ const char *name)
+{
+ assert(result && table && name);
+
+ while (table->c_name && strcmp(table->c_name, name))
+ table++;
+
+ if (table->c_name) {
+ *result = table->c_val;
+ return 1;
+ } else {
+ logmsg(rc, log_usage_err, "Bad syslog code %s", name);
+ return 0;
+ }
+}
+
+/**
+ * Configure boolean variable.
+ */
+static int configure_boolean(const rcynic_ctx_t *rc,
+ int *result,
+ const char *val)
+{
+ assert(rc && result && val);
+
+ switch (*val) {
+ case 'y': case 'Y': case 't': case 'T': case '1':
+ *result = 1;
+ return 1;
+ case 'n': case 'N': case 'f': case 'F': case '0':
+ *result = 0;
+ return 1;
+ default:
+ logmsg(rc, log_usage_err, "Bad boolean value %s", val);
+ return 0;
+ }
+}
+
+/**
+ * Configure integer variable.
+ */
+static int configure_integer(const rcynic_ctx_t *rc,
+ int *result,
+ const char *val)
+{
+ long res;
+ char *p;
+
+ assert(rc && result && val);
+
+ res = strtol(val, &p, 10);
+
+ if (*val != '\0' && *p == '\0') {
+ *result = (int) res;
+ return 1;
+ } else {
+ logmsg(rc, log_usage_err, "Bad integer value %s", val);
+ return 0;
+ }
+}
+
+/**
+ * Configure unsigned integer variable.
+ */
+static int configure_unsigned_integer(const rcynic_ctx_t *rc,
+ unsigned *result,
+ const char *val)
+{
+ unsigned long res;
+ char *p;
+
+ assert(rc && result && val);
+
+ res = strtoul(val, &p, 10);
+
+ if (*val != '\0' && *p == '\0') {
+ *result = (unsigned) res;
+ return 1;
+ } else {
+ logmsg(rc, log_usage_err, "Bad integer value %s", val);
+ return 0;
+ }
+}
+
+
+
+/**
+ * Make a directory if it doesn't already exist.
+ */
+static int mkdir_maybe(const rcynic_ctx_t *rc, const path_t *name)
+{
+ path_t path;
+ char *s;
+
+ assert(name != NULL);
+ if (strlen(name->s) >= sizeof(path.s)) {
+ logmsg(rc, log_data_err, "Pathname %s too long", name->s);
+ return 0;
+ }
+ strcpy(path.s, name->s);
+ s = path.s[0] == '/' ? path.s + 1 : path.s;
+ if ((s = strrchr(s, '/')) == NULL)
+ return 1;
+ *s = '\0';
+ if (!mkdir_maybe(rc, &path)) {
+ logmsg(rc, log_sys_err, "Failed to make directory %s", path.s);
+ return 0;
+ }
+ if (!access(path.s, F_OK))
+ return 1;
+ logmsg(rc, log_verbose, "Creating directory %s", path.s);
+ return mkdir(path.s, 0777) == 0;
+}
+
+/**
+ * strdup() a string and push it onto a stack.
+ */
+static int sk_OPENSSL_STRING_push_strdup(STACK_OF(OPENSSL_STRING) *sk, const char *str)
+{
+ OPENSSL_STRING s = strdup(str);
+
+ if (s && sk_OPENSSL_STRING_push(sk, s))
+ return 1;
+ if (s)
+ free(s);
+ return 0;
+}
+
+/**
+ * Compare two URI strings, for OpenSSL STACK operations.
+ */
+
+static int uri_cmp(const char * const *a, const char * const *b)
+{
+ return strcmp(*a, *b);
+}
+
+/**
+ * Is string an rsync URI?
+ */
+static int is_rsync(const char *uri)
+{
+ return uri && !strncmp(uri, SCHEME_RSYNC, SIZEOF_RSYNC);
+}
+
+/**
+ * Convert an rsync URI to a filename, checking for evil character
+ * sequences. NB: This routine can't call mib_increment(), because
+ * mib_increment() calls it, so errors detected here only go into
+ * the log, not the MIB.
+ */
+static int uri_to_filename(const rcynic_ctx_t *rc,
+ const uri_t *uri,
+ path_t *path,
+ const path_t *prefix)
+{
+ const char *u;
+ size_t n;
+
+ path->s[0] = '\0';
+
+ if (!is_rsync(uri->s)) {
+ logmsg(rc, log_telemetry, "%s is not an rsync URI, not converting to filename", uri->s);
+ return 0;
+ }
+
+ u = uri->s + SIZEOF_RSYNC;
+ n = strlen(u);
+
+ if (u[0] == '/' || u[0] == '.' || strstr(u, "/../") ||
+ (n >= 3 && !strcmp(u + n - 3, "/.."))) {
+ logmsg(rc, log_data_err, "Dangerous URI %s, not converting to filename", uri->s);
+ return 0;
+ }
+
+ if (prefix)
+ n += strlen(prefix->s);
+
+ if (n >= sizeof(path->s)) {
+ logmsg(rc, log_data_err, "URI %s too long, not converting to filename", uri->s);
+ return 0;
+ }
+
+ if (prefix) {
+ strcpy(path->s, prefix->s);
+ strcat(path->s, u);
+ } else {
+ strcpy(path->s, u);
+ }
+
+ return 1;
+}
+
+/**
+ * Compare filename fields of two FileAndHash structures.
+ */
+static int FileAndHash_name_cmp(const FileAndHash * const *a, const FileAndHash * const *b)
+{
+ return strcmp((char *) (*a)->file->data, (char *) (*b)->file->data);
+}
+
+/**
+ * Get value of code in a validation_status_t.
+ */
+static int validation_status_get_code(const validation_status_t *v,
+ const mib_counter_t code)
+{
+ assert(v && code < MIB_COUNTER_T_MAX);
+ return (v->events[code / 8] & (1 << (code % 8))) != 0;
+}
+
+/**
+ * Set value of code in a validation_status_t.
+ */
+static void validation_status_set_code(validation_status_t *v,
+ const mib_counter_t code,
+ int value)
+{
+ assert(v && code < MIB_COUNTER_T_MAX);
+ if (value)
+ v->events[code / 8] |= (1 << (code % 8));
+ else
+ v->events[code / 8] &= ~(1 << (code % 8));
+}
+
+/**
+ * validation_status object comparison, for AVL tree rather than
+ * OpenSSL stacks.
+ */
+static int
+validation_status_cmp(const validation_status_t *node,
+ const uri_t *uri,
+ const object_generation_t generation)
+{
+ int cmp = ((int) node->generation) - ((int) generation);
+ if (cmp)
+ return cmp;
+ else
+ return strcmp(uri->s, node->uri.s);
+}
+
+/**
+ * validation_status AVL tree insertion. Adapted from code written by
+ * Paul Vixie and explictly placed in the public domain using examples
+ * from the book: "Algorithms & Data Structures," Niklaus Wirth,
+ * Prentice-Hall, 1986, ISBN 0-13-022005-1. Thanks, Paul!
+ */
+static validation_status_t *
+validation_status_sprout(validation_status_t **node,
+ int *needs_balancing,
+ validation_status_t *new_node)
+{
+#ifdef AVL_DEBUG
+#define AVL_MSG(msg) sprintf(stderr, "AVL_DEBUG: '%s'\n", msg)
+#else
+#define AVL_MSG(msg)
+#endif
+
+ validation_status_t *p1, *p2, *result;
+ int cmp;
+
+ /*
+ * Are we grounded? If so, add the node "here" and set the
+ * rebalance flag, then exit.
+ */
+ if (*node == NULL) {
+ AVL_MSG("Grounded, adding new node");
+ new_node->left_child = NULL;
+ new_node->right_child = NULL;
+ new_node->balance = 0;
+ *node = new_node;
+ *needs_balancing = 1;
+ return *node;
+ }
+
+ /*
+ * Compare the data.
+ */
+ cmp = validation_status_cmp(*node, &new_node->uri, new_node->generation);
+
+ /*
+ * If LESS, prepare to move to the left.
+ */
+ if (cmp < 0) {
+
+ AVL_MSG("LESS. sprouting left.");
+ result = validation_status_sprout(&(*node)->left_child, needs_balancing, new_node);
+
+ if (*needs_balancing) {
+ AVL_MSG("LESS: left branch has grown longer");
+
+ switch ((*node)->balance) {
+
+ case 1:
+ /*
+ * Right branch WAS longer; balance is ok now.
+ */
+ AVL_MSG("LESS: case 1.. balance restored implicitly");
+ (*node)->balance = 0;
+ *needs_balancing = 0;
+ break;
+
+ case 0:
+ /*
+ * Balance WAS okay; now left branch longer.
+ */
+ AVL_MSG("LESS: case 0.. balnce bad but still ok");
+ (*node)->balance = -1;
+ break;
+
+ case -1:
+ /*
+ * Left branch was already too long. Rebalance.
+ */
+ AVL_MSG("LESS: case -1: rebalancing");
+ p1 = (*node)->left_child;
+
+ if (p1->balance == -1) {
+ AVL_MSG("LESS: single LL");
+ (*node)->left_child = p1->right_child;
+ p1->right_child = *node;
+ (*node)->balance = 0;
+ *node = p1;
+ }
+
+ else {
+ AVL_MSG("LESS: double LR");
+
+ p2 = p1->right_child;
+ p1->right_child = p2->left_child;
+ p2->left_child = p1;
+
+ (*node)->left_child = p2->right_child;
+ p2->right_child = *node;
+
+ if (p2->balance == -1)
+ (*node)->balance = 1;
+ else
+ (*node)->balance = 0;
+
+ if (p2->balance == 1)
+ p1->balance = -1;
+ else
+ p1->balance = 0;
+ *node = p2;
+ }
+
+ (*node)->balance = 0;
+ *needs_balancing = 0;
+ }
+ }
+ return result;
+ }
+
+ /*
+ * If MORE, prepare to move to the right.
+ */
+ if (cmp > 0) {
+
+ AVL_MSG("MORE: sprouting to the right");
+ result = validation_status_sprout(&(*node)->right_child, needs_balancing, new_node);
+
+ if (*needs_balancing) {
+ AVL_MSG("MORE: right branch has grown longer");
+
+ switch ((*node)->balance) {
+
+ case -1:AVL_MSG("MORE: balance was off, fixed implicitly");
+ (*node)->balance = 0;
+ *needs_balancing = 0;
+ break;
+
+ case 0: AVL_MSG("MORE: balance was okay, now off but ok");
+ (*node)->balance = 1;
+ break;
+
+ case 1: AVL_MSG("MORE: balance was off, need to rebalance");
+ p1 = (*node)->right_child;
+
+ if (p1->balance == 1) {
+ AVL_MSG("MORE: single RR");
+ (*node)->right_child = p1->left_child;
+ p1->left_child = *node;
+ (*node)->balance = 0;
+ *node = p1;
+ }
+
+ else {
+ AVL_MSG("MORE: double RL");
+
+ p2 = p1->left_child;
+ p1->left_child = p2->right_child;
+ p2->right_child = p1;
+
+ (*node)->right_child = p2->left_child;
+ p2->left_child = *node;
+
+ if (p2->balance == 1)
+ (*node)->balance = -1;
+ else
+ (*node)->balance = 0;
+
+ if (p2->balance == -1)
+ p1->balance = 1;
+ else
+ p1->balance = 0;
+
+ *node = p2;
+ } /*else*/
+ (*node)->balance = 0;
+ *needs_balancing = 0;
+ }
+ }
+ return result;
+ }
+
+ /*
+ * Neither more nor less, found existing node matching key, return it.
+ */
+ AVL_MSG("I found it!");
+ *needs_balancing = 0;
+ return *node;
+
+#undef AVL_MSG
+}
+
+/**
+ * Add a validation status entry to internal log.
+ */
+static void log_validation_status(rcynic_ctx_t *rc,
+ const uri_t *uri,
+ const mib_counter_t code,
+ const object_generation_t generation)
+{
+ validation_status_t *v = NULL;
+ int needs_balancing = 0;
+
+ assert(rc && uri && code < MIB_COUNTER_T_MAX && generation < OBJECT_GENERATION_MAX);
+
+ if (!rc->validation_status)
+ return;
+
+ if (code == rsync_transfer_skipped && !rc->run_rsync)
+ return;
+
+ if (rc->validation_status_in_waiting == NULL &&
+ (rc->validation_status_in_waiting = validation_status_t_new()) == NULL) {
+ logmsg(rc, log_sys_err, "Couldn't allocate validation status entry for %s", uri->s);
+ return;
+ }
+
+ v = rc->validation_status_in_waiting;
+ memset(v, 0, sizeof(*v));
+ v->uri = *uri;
+ v->generation = generation;
+
+ v = validation_status_sprout(&rc->validation_status_root, &needs_balancing, v);
+ if (v == rc->validation_status_in_waiting)
+ rc->validation_status_in_waiting = NULL;
+
+ if (rc->validation_status_in_waiting == NULL &&
+ !sk_validation_status_t_push(rc->validation_status, v)) {
+ logmsg(rc, log_sys_err, "Couldn't store validation status entry for %s", uri->s);
+ return;
+ }
+
+ v->timestamp = time(0);
+
+ if (validation_status_get_code(v, code))
+ return;
+
+ validation_status_set_code(v, code, 1);
+
+ logmsg(rc, log_verbose, "Recording \"%s\" for %s%s%s",
+ (mib_counter_desc[code]
+ ? mib_counter_desc[code]
+ : X509_verify_cert_error_string(mib_counter_openssl[code])),
+ (generation != object_generation_null ? object_generation_label[generation] : ""),
+ (generation != object_generation_null ? " " : ""),
+ uri->s);
+}
+
+/**
+ * Copy or link a file, as the case may be.
+ */
+static int cp_ln(const rcynic_ctx_t *rc, const path_t *source, const path_t *target)
+{
+ struct stat statbuf;
+ struct utimbuf utimebuf;
+ FILE *in = NULL, *out = NULL;
+ int c, ok = 0;
+
+ if (rc->use_links) {
+ (void) unlink(target->s);
+ ok = link(source->s, target->s) == 0;
+ if (!ok)
+ logmsg(rc, log_sys_err, "Couldn't link %s to %s: %s",
+ source->s, target->s, strerror(errno));
+ return ok;
+ }
+
+ if ((in = fopen(source->s, "rb")) == NULL ||
+ (out = fopen(target->s, "wb")) == NULL)
+ goto done;
+
+ while ((c = getc(in)) != EOF)
+ if (putc(c, out) == EOF)
+ goto done;
+
+ ok = 1;
+
+ done:
+ ok &= !(in != NULL && fclose(in) == EOF);
+ ok &= !(out != NULL && fclose(out) == EOF);
+
+ if (!ok) {
+ logmsg(rc, log_sys_err, "Couldn't copy %s to %s: %s",
+ source->s, target->s, strerror(errno));
+ return ok;
+ }
+
+ /*
+ * Perserve the file modification time to allow for detection of
+ * changed objects in the authenticated directory. Failure to reset
+ * the times is not optimal, but is also not critical, thus no
+ * failure return.
+ */
+ if (stat(source->s, &statbuf) < 0 ||
+ (utimebuf.actime = statbuf.st_atime,
+ utimebuf.modtime = statbuf.st_mtime,
+ utime(target->s, &utimebuf) < 0))
+ logmsg(rc, log_sys_err, "Couldn't copy inode timestamp from %s to %s: %s",
+ source->s, target->s, strerror(errno));
+
+ return ok;
+}
+
+/**
+ * Install an object.
+ */
+static int install_object(rcynic_ctx_t *rc,
+ const uri_t *uri,
+ const path_t *source,
+ const object_generation_t generation)
+{
+ path_t target;
+
+ if (!uri_to_filename(rc, uri, &target, &rc->new_authenticated)) {
+ logmsg(rc, log_data_err, "Couldn't generate installation name for %s", uri->s);
+ return 0;
+ }
+
+ if (!mkdir_maybe(rc, &target)) {
+ logmsg(rc, log_sys_err, "Couldn't create directory for %s", target.s);
+ return 0;
+ }
+
+ if (!cp_ln(rc, source, &target))
+ return 0;
+ log_validation_status(rc, uri, object_accepted, generation);
+ return 1;
+}
+
+/**
+ * AVL tree lookup for validation status objects.
+ */
+static validation_status_t *
+validation_status_find(validation_status_t *node,
+ const uri_t *uri,
+ const object_generation_t generation)
+{
+ int cmp;
+
+ while (node != NULL && (cmp = validation_status_cmp(node, uri, generation)) != 0)
+ node = cmp < 0 ? node->left_child : node->right_child;
+
+ return node;
+}
+
+/**
+ * Check whether we have a validation status entry corresponding to a
+ * given filename. This is intended for use during pruning the
+ * unauthenticated tree, so it only checks the current generation.
+ */
+static int
+validation_status_find_filename(const rcynic_ctx_t *rc,
+ const char *filename)
+{
+ uri_t uri;
+
+ if (strlen(filename) + SIZEOF_RSYNC >= sizeof(uri.s))
+ return 0;
+
+ strcpy(uri.s, SCHEME_RSYNC);
+ strcat(uri.s, filename);
+
+ return validation_status_find(rc->validation_status_root, &uri, object_generation_current) != NULL;
+}
+
+/**
+ * Figure out whether we already have a good copy of an object. This
+ * is a little more complicated than it sounds, because we might have
+ * failed the current generation and accepted the backup due to having
+ * followed the old CA certificate chain first during a key rollover.
+ * So if this check is of the current object and we have not already
+ * accepted the current object for this URI, we need to recheck.
+ *
+ * We also handle logging when we decide that we do need to check, so
+ * that the caller doesn't need to concern itself with why we thought
+ * the check was necessary.
+ */
+static int skip_checking_this_object(rcynic_ctx_t *rc,
+ const uri_t *uri,
+ const object_generation_t generation)
+{
+ validation_status_t *v = NULL;
+ path_t path;
+
+ assert(rc && uri && rc->validation_status);
+
+ if (!uri_to_filename(rc, uri, &path, &rc->new_authenticated))
+ return 1;
+
+ if (access(path.s, R_OK)) {
+ logmsg(rc, log_telemetry, "Checking %s", uri->s);
+ return 0;
+ }
+
+ if (generation != object_generation_current)
+ return 1;
+
+ v = validation_status_find(rc->validation_status_root, uri, generation);
+
+ if (v != NULL && validation_status_get_code(v, object_accepted))
+ return 1;
+
+ log_validation_status(rc, uri, rechecking_object, generation);
+ logmsg(rc, log_telemetry, "Rechecking %s", uri->s);
+ return 0;
+}
+
+
+
+/**
+ * Check str for a suffix.
+ */
+static int endswith(const char *str, const char *suffix)
+{
+ size_t len_str, len_suffix;
+ assert(str != NULL && suffix != NULL);
+ len_str = strlen(str);
+ len_suffix = strlen(suffix);
+ return len_str >= len_suffix && !strcmp(str + len_str - len_suffix, suffix);
+}
+
+/**
+ * Check str for a prefix.
+ */
+static int startswith(const char *str, const char *prefix)
+{
+ size_t len_str, len_prefix;
+ assert(str != NULL && prefix != NULL);
+ len_str = strlen(str);
+ len_prefix = strlen(prefix);
+ return len_str >= len_prefix && !strncmp(str, prefix, len_prefix);
+}
+
+/**
+ * Convert a filename to a file:// URI, for logging.
+ */
+static void filename_to_uri(uri_t *uri,
+ const char *fn)
+{
+ assert(sizeof("file://") < sizeof(uri->s));
+ strcpy(uri->s, "file://");
+ if (*fn != '/') {
+ if (getcwd(uri->s + strlen(uri->s), sizeof(uri->s) - strlen(uri->s)) == NULL ||
+ (!endswith(uri->s, "/") && strlen(uri->s) >= sizeof(uri->s) - 1))
+ uri->s[0] = '\0';
+ else
+ strcat(uri->s, "/");
+ }
+ if (uri->s[0] != '\0' && strlen(uri->s) + strlen(fn) < sizeof(uri->s))
+ strcat(uri->s, fn);
+ else
+ uri->s[0] = '\0';
+}
+
+/**
+ * Set a directory name, adding or stripping trailing slash as needed.
+ */
+static int set_directory(const rcynic_ctx_t *rc, path_t *out, const char *in, const int want_slash)
+{
+ int has_slash, need_slash;
+ size_t n;
+
+ assert(rc && in && out);
+
+ n = strlen(in);
+
+ if (n == 0) {
+ logmsg(rc, log_usage_err, "Empty path");
+ return 0;
+ }
+
+ has_slash = in[n - 1] == '/';
+
+ need_slash = want_slash && !has_slash;
+
+ if (n + need_slash + 1 > sizeof(out->s)) {
+ logmsg(rc, log_usage_err, "Path \"%s\" too long", in);
+ return 0;
+ }
+
+ strcpy(out->s, in);
+ if (need_slash)
+ strcat(out->s, "/");
+ else if (has_slash && !want_slash)
+ out->s[n - 1] = '\0';
+
+ return 1;
+}
+
+/**
+ * Test whether a filesystem path points to a directory.
+ */
+static int is_directory(const path_t *name)
+{
+ struct stat st;
+
+ assert(name);
+ return lstat(name->s, &st) == 0 && S_ISDIR(st.st_mode);
+}
+
+/**
+ * Remove a directory tree, like rm -rf.
+ */
+static int rm_rf(const path_t *name)
+{
+ path_t path;
+ struct dirent *d;
+ DIR *dir;
+ int ret = 0;
+
+ assert(name);
+
+ if (!is_directory(name))
+ return unlink(name->s) == 0;
+
+ if ((dir = opendir(name->s)) == NULL)
+ return 0;
+
+ while ((d = readdir(dir)) != NULL) {
+ if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, ".."))
+ continue;
+ if (snprintf(path.s, sizeof(path.s), "%s/%s", name->s, d->d_name) >= sizeof(path.s))
+ goto done;
+ if (unlink(path.s) == 0)
+ continue;
+ else if (rm_rf(&path))
+ continue;
+ else
+ goto done;
+ }
+
+ ret = rmdir(name->s) == 0;
+
+ done:
+ closedir(dir);
+ return ret;
+}
+
+/**
+ * Construct names for the directories not directly settable by the
+ * user.
+ *
+ * This function also checks for an old-style rc->authenticated
+ * directory, to simplify upgrade from older versions of rcynic.
+ */
+static int construct_directory_names(rcynic_ctx_t *rc)
+{
+ struct stat st;
+ ssize_t n;
+ path_t p;
+ time_t t = time(0);
+
+ p = rc->authenticated;
+
+ n = strlen(p.s);
+
+ if (n + sizeof(authenticated_symlink_suffix) >= sizeof(p.s)) {
+ logmsg(rc, log_usage_err, "Symlink name would be too long");
+ return 0;
+ }
+
+ if (strftime(p.s + n, sizeof(p.s) - n - 1, ".%Y-%m-%dT%H:%M:%SZ", gmtime(&t)) == 0) {
+ logmsg(rc, log_usage_err, "Generated path with timestamp would be too long");
+ return 0;
+ }
+
+ if (!set_directory(rc, &rc->new_authenticated, p.s, 1))
+ return 0;
+
+ if (!set_directory(rc, &rc->old_authenticated, rc->authenticated.s, 1))
+ return 0;
+
+ if (lstat(rc->authenticated.s, &st) == 0 && S_ISDIR((st.st_mode)) &&
+ strlen(rc->authenticated.s) + sizeof(".old") < sizeof(p.s)) {
+ p = rc->authenticated;
+ strcat(p.s, ".old");
+ rm_rf(&p);
+ (void) rename(rc->authenticated.s, p.s);
+ }
+
+ if (lstat(rc->authenticated.s, &st) == 0 && S_ISDIR(st.st_mode)) {
+ logmsg(rc, log_usage_err,
+ "Existing %s directory is in the way, please remove it",
+ rc->authenticated.s);
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * Do final symlink shuffle and cleanup of output directories.
+ */
+static int finalize_directories(const rcynic_ctx_t *rc)
+{
+ path_t path, real_old, real_new;
+ const char *dir;
+ glob_t g;
+ int i;
+
+ if (!realpath(rc->old_authenticated.s, real_old.s))
+ real_old.s[0] = '\0';
+
+ if (!realpath(rc->new_authenticated.s, real_new.s))
+ real_new.s[0] = '\0';
+
+ assert(real_new.s[0] && real_new.s[strlen(real_new.s) - 1] != '/');
+
+ if ((dir = strrchr(real_new.s, '/')) == NULL)
+ dir = real_new.s;
+ else
+ dir++;
+
+ path = rc->authenticated;
+
+ if (strlen(path.s) + sizeof(authenticated_symlink_suffix) >= sizeof(path.s))
+ return 0;
+ strcat(path.s, authenticated_symlink_suffix);
+
+ (void) unlink(path.s);
+
+ if (symlink(dir, path.s) < 0) {
+ logmsg(rc, log_sys_err, "Couldn't link %s to %s: %s",
+ path.s, dir, strerror(errno));
+ return 0;
+ }
+
+ if (rename(path.s, rc->authenticated.s) < 0) {
+ logmsg(rc, log_sys_err, "Couldn't rename %s to %s: %s",
+ path.s, rc->authenticated.s, strerror(errno));
+ return 0;
+ }
+
+ if (real_old.s[0] && strlen(rc->authenticated.s) + sizeof(".old") < sizeof(path.s)) {
+ assert(real_old.s[strlen(real_old.s) - 1] != '/');
+
+ path = rc->authenticated;
+ strcat(path.s, ".old");
+
+ (void) unlink(path.s);
+
+ if ((dir = strrchr(real_old.s, '/')) == NULL)
+ dir = real_old.s;
+ else
+ dir++;
+
+ (void) symlink(dir, path.s);
+ }
+
+ path = rc->authenticated;
+ assert(strlen(path.s) + sizeof(".*") < sizeof(path.s));
+ strcat(path.s, ".*");
+
+ memset(&g, 0, sizeof(g));
+
+ if (real_new.s[0] && glob(path.s, 0, 0, &g) == 0) {
+ for (i = 0; i < g.gl_pathc; i++)
+ if (realpath(g.gl_pathv[i], path.s) &&
+ strcmp(path.s, real_old.s) &&
+ strcmp(path.s, real_new.s))
+ rm_rf(&path);
+ globfree(&g);
+ }
+
+ return 1;
+}
+
+
+
+/**
+ * Test whether a pair of URIs "conflict", that is, whether attempting
+ * to rsync both of them at the same time in parallel might cause
+ * unpredictable behavior. Might need a better name for this test.
+ *
+ * Returns non-zero iff the two URIs "conflict".
+ */
+static int conflicting_uris(const uri_t *a, const uri_t *b)
+{
+ size_t len_a, len_b;
+
+ assert(a && is_rsync(a->s) && b && is_rsync(b->s));
+
+ len_a = strlen(a->s);
+ len_b = strlen(b->s);
+
+ assert(len_a < sizeof(a->s) && len_b < sizeof(b->s));
+
+ return !strncmp(a->s, b->s, len_a < len_b ? len_a : len_b);
+}
+
+
+
+/**
+ * Read non-directory filenames from a directory, so we can check to
+ * see what's missing from a manifest.
+ */
+static STACK_OF(OPENSSL_STRING) *directory_filenames(const rcynic_ctx_t *rc,
+ const walk_state_t state,
+ const uri_t *uri)
+{
+ STACK_OF(OPENSSL_STRING) *result = NULL;
+ path_t dpath, fpath;
+ const path_t *prefix = NULL;
+ DIR *dir = NULL;
+ struct dirent *d;
+ int ok = 0;
+
+ assert(rc && uri);
+
+ switch (state) {
+ case walk_state_current:
+ prefix = &rc->unauthenticated;
+ break;
+ case walk_state_backup:
+ prefix = &rc->old_authenticated;
+ break;
+ default:
+ goto done;
+ }
+
+ if (!uri_to_filename(rc, uri, &dpath, prefix) ||
+ (dir = opendir(dpath.s)) == NULL ||
+ (result = sk_OPENSSL_STRING_new(uri_cmp)) == NULL)
+ goto done;
+
+ while ((d = readdir(dir)) != NULL)
+ if (snprintf(fpath.s, sizeof(fpath.s), "%s/%s", dpath.s, d->d_name) >= sizeof(fpath.s)) {
+ logmsg(rc, log_data_err, "Local path name %s/%s too long", dpath.s, d->d_name);
+ goto done;
+ }
+ else if (!is_directory(&fpath) && !sk_OPENSSL_STRING_push_strdup(result, d->d_name)) {
+ logmsg(rc, log_sys_err, "sk_OPENSSL_STRING_push_strdup() failed, probably memory exhaustion");
+ goto done;
+ }
+
+ ok = 1;
+
+ done:
+ if (dir != NULL)
+ closedir(dir);
+
+ if (ok)
+ return result;
+
+ sk_OPENSSL_STRING_pop_free(result, OPENSSL_STRING_free);
+ return NULL;
+}
+
+
+
+/**
+ * Increment walk context reference count.
+ */
+static void walk_ctx_attach(walk_ctx_t *w)
+{
+ if (w != NULL) {
+ w->refcount++;
+ assert(w->refcount != 0);
+ }
+}
+
+/**
+ * Decrement walk context reference count; freeing the context if the
+ * reference count is now zero.
+ */
+static void walk_ctx_detach(walk_ctx_t *w)
+{
+ if (w != NULL && --(w->refcount) == 0) {
+ assert(w->refcount == 0);
+ X509_free(w->cert);
+ Manifest_free(w->manifest);
+ sk_X509_free(w->certs);
+ sk_X509_CRL_pop_free(w->crls, X509_CRL_free);
+ sk_OPENSSL_STRING_pop_free(w->filenames, OPENSSL_STRING_free);
+ free(w);
+ }
+}
+
+/**
+ * Return top context of a walk context stack.
+ */
+static walk_ctx_t *walk_ctx_stack_head(STACK_OF(walk_ctx_t) *wsk)
+{
+ return sk_walk_ctx_t_value(wsk, sk_walk_ctx_t_num(wsk) - 1);
+}
+
+/**
+ * Whether we're done iterating over a walk context. Think of this as
+ * the thing you call (negated) in the second clause of a conceptual
+ * "for" loop.
+ */
+static int walk_ctx_loop_done(STACK_OF(walk_ctx_t) *wsk)
+{
+ walk_ctx_t *w = walk_ctx_stack_head(wsk);
+ return wsk == NULL || w == NULL || w->state >= walk_state_done;
+}
+
+/**
+ * Walk context iterator. Think of this as the thing you call in the
+ * third clause of a conceptual "for" loop: this reinitializes as
+ * necessary for the next pass through the loop.
+ *
+ * General idea here is that we have several state variables in a walk
+ * context which collectively define the current pass, product URI,
+ * etc, and we want to be able to iterate through this sequence via
+ * the event system. So this function steps to the next state.
+ *
+ * Conceptually, w->manifest->fileList and w->filenames form a single
+ * array with index w->manifest_iteration + w->filename_iteration.
+ * Beware of fencepost errors, I've gotten this wrong once already.
+ * Slightly odd coding here is to make it easier to check this.
+ */
+static void walk_ctx_loop_next(const rcynic_ctx_t *rc, STACK_OF(walk_ctx_t) *wsk)
+{
+ walk_ctx_t *w = walk_ctx_stack_head(wsk);
+ int n_manifest, n_filenames;
+
+ assert(rc && wsk && w);
+
+ assert(w->manifest_iteration >= 0 && w->filename_iteration >= 0);
+
+ n_manifest = w->manifest ? sk_FileAndHash_num(w->manifest->fileList) : 0;
+ n_filenames = w->filenames ? sk_OPENSSL_STRING_num(w->filenames) : 0;
+
+ if (w->manifest_iteration + w->filename_iteration < n_manifest + n_filenames) {
+ if (w->manifest_iteration < n_manifest)
+ w->manifest_iteration++;
+ else
+ w->filename_iteration++;
+ }
+
+ assert(w->manifest_iteration <= n_manifest && w->filename_iteration <= n_filenames);
+
+ if (w->manifest_iteration + w->filename_iteration < n_manifest + n_filenames)
+ return;
+
+ while (!walk_ctx_loop_done(wsk)) {
+ w->state++;
+ w->manifest_iteration = 0;
+ w->filename_iteration = 0;
+ sk_OPENSSL_STRING_pop_free(w->filenames, OPENSSL_STRING_free);
+ w->filenames = directory_filenames(rc, w->state, &w->certinfo.sia);
+ if (w->manifest != NULL || w->filenames != NULL)
+ return;
+ }
+}
+
+static int check_manifest(rcynic_ctx_t *rc, STACK_OF(walk_ctx_t) *wsk);
+
+/**
+ * Loop initializer for walk context. Think of this as the thing you
+ * call in the first clause of a conceptual "for" loop.
+ */
+static void walk_ctx_loop_init(rcynic_ctx_t *rc, STACK_OF(walk_ctx_t) *wsk)
+{
+ walk_ctx_t *w = walk_ctx_stack_head(wsk);
+
+ assert(rc && wsk && w && w->state == walk_state_ready);
+
+ if (!w->manifest && !check_manifest(rc, wsk)) {
+ /*
+ * Simple failure to find a manifest doesn't get here. This is
+ * for manifest failures that cause us to reject all of this
+ * certificate's products due to policy knob settings.
+ */
+ w->state = walk_state_done;
+ return;
+ }
+
+ if (!w->manifest)
+ logmsg(rc, log_telemetry, "Couldn't get manifest %s, blundering onward", w->certinfo.manifest.s);
+
+ w->manifest_iteration = 0;
+ w->filename_iteration = 0;
+ w->state++;
+ assert(w->state == walk_state_current);
+
+ assert(w->filenames == NULL);
+ w->filenames = directory_filenames(rc, w->state, &w->certinfo.sia);
+
+ w->stale_manifest = w->manifest != NULL && X509_cmp_current_time(w->manifest->nextUpdate) < 0;
+
+ while (!walk_ctx_loop_done(wsk) &&
+ (w->manifest == NULL || w->manifest_iteration >= sk_FileAndHash_num(w->manifest->fileList)) &&
+ (w->filenames == NULL || w->filename_iteration >= sk_OPENSSL_STRING_num(w->filenames)))
+ walk_ctx_loop_next(rc, wsk);
+}
+
+/**
+ * Extract URI and hash values from walk context.
+ */
+static int walk_ctx_loop_this(const rcynic_ctx_t *rc,
+ STACK_OF(walk_ctx_t) *wsk,
+ uri_t *uri,
+ const unsigned char **hash,
+ size_t *hashlen)
+{
+ const walk_ctx_t *w = walk_ctx_stack_head(wsk);
+ const char *name = NULL;
+ FileAndHash *fah = NULL;
+
+ assert(rc && wsk && w && uri && hash && hashlen);
+
+ if (w->manifest != NULL && w->manifest_iteration < sk_FileAndHash_num(w->manifest->fileList)) {
+ fah = sk_FileAndHash_value(w->manifest->fileList, w->manifest_iteration);
+ name = (const char *) fah->file->data;
+ } else if (w->filenames != NULL && w->filename_iteration < sk_OPENSSL_STRING_num(w->filenames)) {
+ name = sk_OPENSSL_STRING_value(w->filenames, w->filename_iteration);
+ }
+
+ if (name == NULL) {
+ logmsg(rc, log_sys_err, "Can't find a URI in walk context, this shouldn't happen: state %d, manifest_iteration %d, filename_iteration %d",
+ (int) w->state, w->manifest_iteration, w->filename_iteration);
+ return 0;
+ }
+
+ if (strlen(w->certinfo.sia.s) + strlen(name) >= sizeof(uri->s)) {
+ logmsg(rc, log_data_err, "URI %s%s too long, skipping", w->certinfo.sia.s, uri->s);
+ return 0;
+ }
+
+ strcpy(uri->s, w->certinfo.sia.s);
+ strcat(uri->s, name);
+
+ if (fah != NULL) {
+ sk_OPENSSL_STRING_remove(w->filenames, name);
+ *hash = fah->hash->data;
+ *hashlen = fah->hash->length;
+ } else {
+ *hash = NULL;
+ *hashlen = 0;
+ }
+
+ return 1;
+}
+
+/**
+ * Create a new walk context stack.
+ */
+static STACK_OF(walk_ctx_t) *walk_ctx_stack_new(void)
+{
+ return sk_walk_ctx_t_new_null();
+}
+
+/**
+ * Push a walk context onto a walk context stack, return the new context.
+ */
+static walk_ctx_t *walk_ctx_stack_push(STACK_OF(walk_ctx_t) *wsk,
+ X509 *x,
+ const certinfo_t *certinfo)
+{
+ walk_ctx_t *w;
+
+ if (x == NULL ||
+ (certinfo == NULL) != (sk_walk_ctx_t_num(wsk) == 0) ||
+ (w = malloc(sizeof(*w))) == NULL)
+ return NULL;
+
+ memset(w, 0, sizeof(*w));
+ w->cert = x;
+ if (certinfo != NULL)
+ w->certinfo = *certinfo;
+ else
+ memset(&w->certinfo, 0, sizeof(w->certinfo));
+
+ if (!sk_walk_ctx_t_push(wsk, w)) {
+ free(w);
+ return NULL;
+ }
+
+ walk_ctx_attach(w);
+ return w;
+}
+
+/**
+ * Pop and discard a walk context from a walk context stack.
+ */
+static void walk_ctx_stack_pop(STACK_OF(walk_ctx_t) *wsk)
+{
+ walk_ctx_detach(sk_walk_ctx_t_pop(wsk));
+}
+
+/**
+ * Clone a stack of walk contexts.
+ */
+static STACK_OF(walk_ctx_t) *walk_ctx_stack_clone(STACK_OF(walk_ctx_t) *old_wsk)
+{
+ STACK_OF(walk_ctx_t) *new_wsk;
+ int i;
+ if (old_wsk == NULL || (new_wsk = sk_walk_ctx_t_dup(old_wsk)) == NULL)
+ return NULL;
+ for (i = 0; i < sk_walk_ctx_t_num(new_wsk); i++)
+ walk_ctx_attach(sk_walk_ctx_t_value(new_wsk, i));
+ return new_wsk;
+}
+
+/**
+ * Extract certificate stack from walk context stack. Returns a newly
+ * created STACK_OF(X509) pointing to the existing cert objects.
+ *
+ * NB: This is a shallow copy, so use sk_X509_free() to free it, not
+ * sk_X509_pop_free().
+ */
+static STACK_OF(X509) *walk_ctx_stack_certs(const rcynic_ctx_t *rc,
+ STACK_OF(walk_ctx_t) *wsk)
+{
+ STACK_OF(X509) *xsk = sk_X509_new_null();
+ walk_ctx_t *w;
+ int i;
+
+ assert(rc);
+
+ for (i = 0; i < sk_walk_ctx_t_num(wsk); i++)
+ if ((w = sk_walk_ctx_t_value(wsk, i)) == NULL ||
+ (w->cert != NULL && !sk_X509_push(xsk, w->cert)))
+ goto fail;
+
+ return xsk;
+
+ fail:
+ logmsg(rc, log_sys_err, "Couldn't clone walk_ctx_stack, memory exhausted?");
+ sk_X509_free(xsk);
+ return NULL;
+}
+
+/**
+ * Free a walk context stack, decrementing reference counts of each
+ * frame on it.
+ */
+static void walk_ctx_stack_free(STACK_OF(walk_ctx_t) *wsk)
+{
+ sk_walk_ctx_t_pop_free(wsk, walk_ctx_detach);
+}
+
+
+
+static int rsync_count_running(const rcynic_ctx_t *);
+
+/**
+ * Add a task to the task queue.
+ */
+static int task_add(const rcynic_ctx_t *rc,
+ void (*handler)(rcynic_ctx_t *, void *),
+ void *cookie)
+{
+ task_t *t = malloc(sizeof(*t));
+
+ assert(rc && rc->task_queue && handler);
+
+ assert(rsync_count_running(rc) <= rc->max_parallel_fetches);
+
+ if (!t)
+ return 0;
+
+ t->handler = handler;
+ t->cookie = cookie;
+
+ if (sk_task_t_push(rc->task_queue, t))
+ return 1;
+
+ free(t);
+ return 0;
+}
+
+/**
+ * Run tasks until queue is empty.
+ */
+static void task_run_q(rcynic_ctx_t *rc)
+{
+ task_t *t;
+ assert(rc && rc->task_queue);
+ while ((t = sk_task_t_shift(rc->task_queue)) != NULL) {
+ t->handler(rc, t->cookie);
+ free(t);
+ }
+}
+
+
+
+/**
+ * Check cache of whether we've already fetched a particular URI.
+ */
+static rsync_history_t *rsync_history_uri(const rcynic_ctx_t *rc,
+ const uri_t *uri)
+{
+ rsync_history_t h;
+ char *s;
+ int i;
+
+ assert(rc && uri && rc->rsync_history);
+
+ if (!is_rsync(uri->s))
+ return NULL;
+
+ h.uri = *uri;
+
+ while ((s = strrchr(h.uri.s, '/')) != NULL && s[1] == '\0')
+ *s = '\0';
+
+ while ((i = sk_rsync_history_t_find(rc->rsync_history, &h)) < 0) {
+ if ((s = strrchr(h.uri.s, '/')) == NULL ||
+ (s - h.uri.s) < SIZEOF_RSYNC)
+ return NULL;
+ *s = '\0';
+ }
+
+ return sk_rsync_history_t_value(rc->rsync_history, i);
+}
+
+/**
+ * Record that we've already attempted to synchronize a particular
+ * rsync URI.
+ */
+static void rsync_history_add(const rcynic_ctx_t *rc,
+ const rsync_ctx_t *ctx,
+ const rsync_status_t status)
+{
+ int final_slash = 0;
+ rsync_history_t *h;
+ uri_t uri;
+ size_t n;
+ char *s;
+
+ assert(rc && ctx && rc->rsync_history && is_rsync(ctx->uri.s));
+
+ uri = ctx->uri;
+
+ while ((s = strrchr(uri.s, '/')) != NULL && s[1] == '\0') {
+ final_slash = 1;
+ *s = '\0';
+ }
+
+ if (status != rsync_status_done) {
+
+ n = SIZEOF_RSYNC + strcspn(uri.s + SIZEOF_RSYNC, "/");
+ assert(n < sizeof(uri.s));
+ uri.s[n] = '\0';
+ final_slash = 1;
+
+ if ((h = rsync_history_uri(rc, &uri)) != NULL) {
+ assert(h->status != rsync_status_done);
+ return;
+ }
+ }
+
+ if ((h = rsync_history_t_new()) != NULL) {
+ h->uri = uri;
+ h->status = status;
+ h->started = ctx->started;
+ h->finished = time(0);
+ h->final_slash = final_slash;
+ }
+
+ if (h == NULL || !sk_rsync_history_t_push(rc->rsync_history, h)) {
+ rsync_history_t_free(h);
+ logmsg(rc, log_sys_err,
+ "Couldn't add %s to rsync_history, blundering onwards", uri.s);
+ }
+}
+
+
+
+/**
+ * Return count of how many rsync contexts are in running.
+ */
+static int rsync_count_running(const rcynic_ctx_t *rc)
+{
+ const rsync_ctx_t *ctx;
+ int i, n = 0;
+
+ assert(rc && rc->rsync_queue);
+
+ for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i) {
+ switch (ctx->state) {
+ case rsync_state_running:
+ case rsync_state_closed:
+ case rsync_state_terminating:
+ n++;
+ default:
+ continue;
+ }
+ }
+
+ return n;
+}
+
+/**
+ * Test whether an rsync context conflicts with anything that's
+ * currently runable.
+ */
+static int rsync_conflicts(const rcynic_ctx_t *rc,
+ const rsync_ctx_t *ctx)
+{
+ const rsync_ctx_t *c;
+ int i;
+
+ assert(rc && ctx && rc->rsync_queue);
+
+ for (i = 0; (c = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i)
+ if (c != ctx &&
+ (c->state == rsync_state_initial ||
+ c->state == rsync_state_running) &&
+ conflicting_uris(&c->uri, &ctx->uri))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Test whether a rsync context is runable at this time.
+ */
+static int rsync_runable(const rcynic_ctx_t *rc,
+ const rsync_ctx_t *ctx)
+{
+ assert(rc && ctx);
+
+ switch (ctx->state) {
+
+ case rsync_state_initial:
+ case rsync_state_running:
+ return 1;
+
+ case rsync_state_retry_wait:
+ return ctx->deadline <= time(0);
+
+ case rsync_state_closed:
+ case rsync_state_terminating:
+ return 0;
+
+ case rsync_state_conflict_wait:
+ return !rsync_conflicts(rc, ctx);
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * Return count of runable rsync contexts.
+ */
+static int rsync_count_runable(const rcynic_ctx_t *rc)
+{
+ const rsync_ctx_t *ctx;
+ int i, n = 0;
+
+ assert(rc && rc->rsync_queue);
+
+ for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i)
+ if (rsync_runable(rc, ctx))
+ n++;
+
+ return n;
+}
+
+/**
+ * Call rsync context handler, if one is set.
+ */
+static void rsync_call_handler(rcynic_ctx_t *rc,
+ rsync_ctx_t *ctx,
+ const rsync_status_t status)
+{
+ if (!ctx)
+ return;
+
+ switch (status) {
+
+ case rsync_status_pending:
+ case rsync_status_done:
+ break;
+
+ case rsync_status_failed:
+ log_validation_status(rc, &ctx->uri, rsync_transfer_failed, object_generation_null);
+ break;
+
+ case rsync_status_timed_out:
+ log_validation_status(rc, &ctx->uri, rsync_transfer_timed_out, object_generation_null);
+ break;
+
+ case rsync_status_skipped:
+ log_validation_status(rc, &ctx->uri, rsync_transfer_skipped, object_generation_null);
+ break;
+ }
+
+ if (ctx->handler)
+ ctx->handler(rc, ctx, status, &ctx->uri, ctx->cookie);
+}
+
+/**
+ * Run an rsync process.
+ */
+static void rsync_run(rcynic_ctx_t *rc,
+ rsync_ctx_t *ctx)
+{
+ static const char * const rsync_cmd[] = {
+ "rsync", "--update", "--times", "--copy-links", "--itemize-changes"
+ };
+ static const char * const rsync_tree_args[] = {
+ "--recursive", "--delete"
+ };
+
+ const char *argv[10];
+ path_t path;
+ int i, argc = 0, flags, pipe_fds[2];
+
+ pipe_fds[0] = pipe_fds[1] = -1;
+
+ assert(rc && ctx && ctx->pid == 0 && ctx->state != rsync_state_running && rsync_runable(rc, ctx));
+
+ if (rsync_history_uri(rc, &ctx->uri)) {
+ logmsg(rc, log_verbose, "Late rsync cache hit for %s", ctx->uri.s);
+ rsync_call_handler(rc, ctx, rsync_status_done);
+ (void) sk_rsync_ctx_t_delete_ptr(rc->rsync_queue, ctx);
+ free(ctx);
+ return;
+ }
+
+ assert(rsync_count_running(rc) < rc->max_parallel_fetches);
+
+ logmsg(rc, log_telemetry, "Fetching %s", ctx->uri.s);
+
+ memset(argv, 0, sizeof(argv));
+
+ for (i = 0; i < sizeof(rsync_cmd)/sizeof(*rsync_cmd); i++) {
+ assert(argc < sizeof(argv)/sizeof(*argv));
+ argv[argc++] = rsync_cmd[i];
+ }
+ if (endswith(ctx->uri.s, "/")) {
+ for (i = 0; i < sizeof(rsync_tree_args)/sizeof(*rsync_tree_args); i++) {
+ assert(argc < sizeof(argv)/sizeof(*argv));
+ argv[argc++] = rsync_tree_args[i];
+ }
+ }
+
+ if (rc->rsync_program)
+ argv[0] = rc->rsync_program;
+
+ if (!uri_to_filename(rc, &ctx->uri, &path, &rc->unauthenticated)) {
+ logmsg(rc, log_data_err, "Couldn't extract filename from URI: %s", ctx->uri.s);
+ goto lose;
+ }
+
+ assert(argc < sizeof(argv)/sizeof(*argv));
+ argv[argc++] = ctx->uri.s;
+
+ assert(argc < sizeof(argv)/sizeof(*argv));
+ argv[argc++] = path.s;
+
+ if (!mkdir_maybe(rc, &path)) {
+ logmsg(rc, log_sys_err, "Couldn't make target directory: %s", path.s);
+ goto lose;
+ }
+
+ for (i = 0; i < argc; i++)
+ logmsg(rc, log_debug, "rsync argv[%d]: %s", i, argv[i]);
+
+ if (pipe(pipe_fds) < 0) {
+ logmsg(rc, log_sys_err, "pipe() failed: %s", strerror(errno));
+ goto lose;
+ }
+
+ switch ((ctx->pid = vfork())) {
+
+ case -1:
+ logmsg(rc, log_sys_err, "vfork() failed: %s", strerror(errno));
+ goto lose;
+
+ case 0:
+ /*
+ * Child
+ */
+#define whine(msg) ((void) write(2, msg, sizeof(msg) - 1))
+ if (close(pipe_fds[0]) < 0)
+ whine("close(pipe_fds[0]) failed\n");
+ else if (dup2(pipe_fds[1], 1) < 0)
+ whine("dup2(pipe_fds[1], 1) failed\n");
+ else if (dup2(pipe_fds[1], 2) < 0)
+ whine("dup2(pipe_fds[1], 2) failed\n");
+ else if (close(pipe_fds[1]) < 0)
+ whine("close(pipe_fds[1]) failed\n");
+ else if (execvp(argv[0], (char * const *) argv) < 0)
+ whine("execvp(argv[0], (char * const *) argv) failed\n");
+ whine("last system error: ");
+ write(2, strerror(errno), strlen(strerror(errno)));
+ whine("\n");
+ _exit(1);
+#undef whine
+
+ default:
+ /*
+ * Parent
+ */
+ ctx->fd = pipe_fds[0];
+ if ((flags = fcntl(ctx->fd, F_GETFL, 0)) == -1 ||
+ fcntl(ctx->fd, F_SETFL, flags | O_NONBLOCK) == -1) {
+ logmsg(rc, log_sys_err, "fcntl(ctx->fd, F_[GS]ETFL, O_NONBLOCK) failed: %s",
+ strerror(errno));
+ goto lose;
+ }
+ (void) close(pipe_fds[1]);
+ ctx->state = rsync_state_running;
+ ctx->problem = rsync_problem_none;
+ if (!ctx->started)
+ ctx->started = time(0);
+ if (rc->rsync_timeout)
+ ctx->deadline = time(0) + rc->rsync_timeout;
+ logmsg(rc, log_verbose, "Subprocess %u started, queued %d, runable %d, running %d, max %d, URI %s",
+ (unsigned) ctx->pid, sk_rsync_ctx_t_num(rc->rsync_queue), rsync_count_runable(rc), rsync_count_running(rc), rc->max_parallel_fetches, ctx->uri.s);
+ rsync_call_handler(rc, ctx, rsync_status_pending);
+ return;
+
+ }
+
+ lose:
+ if (pipe_fds[0] != -1)
+ (void) close(pipe_fds[0]);
+ if (pipe_fds[1] != -1)
+ (void) close(pipe_fds[1]);
+ if (rc->rsync_queue && ctx)
+ (void) sk_rsync_ctx_t_delete_ptr(rc->rsync_queue, ctx);
+ rsync_call_handler(rc, ctx, rsync_status_failed);
+ if (ctx->pid > 0) {
+ (void) kill(ctx->pid, SIGKILL);
+ ctx->pid = 0;
+ }
+}
+
+/**
+ * Process one line of rsync's output. This is a separate function
+ * primarily to centralize scraping for magic error strings.
+ */
+static void do_one_rsync_log_line(const rcynic_ctx_t *rc,
+ rsync_ctx_t *ctx)
+{
+ unsigned u;
+ char *s;
+
+ /*
+ * Send line to our log unless it's empty.
+ */
+ if (ctx->buffer[strspn(ctx->buffer, " \t\n\r")] != '\0')
+ logmsg(rc, log_telemetry, "rsync[%u]: %s", ctx->pid, ctx->buffer);
+
+ /*
+ * Check for magic error strings
+ */
+ if ((s = strstr(ctx->buffer, "@ERROR: max connections")) != NULL) {
+ ctx->problem = rsync_problem_refused;
+ if (sscanf(s, "@ERROR: max connections (%u) reached -- try again later", &u) == 1)
+ logmsg(rc, log_verbose, "Subprocess %u reported limit of %u for %s", ctx->pid, u, ctx->uri.s);
+ }
+}
+
+/**
+ * Construct select() arguments.
+ */
+static int rsync_construct_select(const rcynic_ctx_t *rc,
+ const time_t now,
+ fd_set *rfds,
+ struct timeval *tv)
+{
+ rsync_ctx_t *ctx;
+ time_t when = 0;
+ int i, n = 0;
+
+ assert(rc && rc->rsync_queue && rfds && tv && rc->max_select_time >= 0);
+
+ FD_ZERO(rfds);
+
+ for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i) {
+
+#if 0
+ logmsg(rc, log_debug, "+++ ctx[%d] pid %d fd %d state %s started %lu deadline %lu",
+ i, ctx->pid, ctx->fd, rsync_state_label[ctx->state],
+ (unsigned long) ctx->started, (unsigned long) ctx->deadline);
+#endif
+
+ switch (ctx->state) {
+
+ case rsync_state_running:
+ assert(ctx->fd >= 0);
+ FD_SET(ctx->fd, rfds);
+ if (ctx->fd > n)
+ n = ctx->fd;
+ if (!rc->rsync_timeout)
+ continue;
+ /* Fall through */
+
+ case rsync_state_retry_wait:
+ if (when == 0 || ctx->deadline < when)
+ when = ctx->deadline;
+ /* Fall through */
+
+ default:
+ continue;
+ }
+ }
+
+ if (!when)
+ tv->tv_sec = rc->max_select_time;
+ else if (when < now)
+ tv->tv_sec = 0;
+ else if (when < now + rc->max_select_time)
+ tv->tv_sec = when - now;
+ else
+ tv->tv_sec = rc->max_select_time;
+ tv->tv_usec = 0;
+ return n;
+}
+
+/**
+ * Convert rsync_status_t to mib_counter_t.
+ *
+ * Maybe some day this will go away and we won't be carrying
+ * essentially the same information in two different databases, but
+ * for now I'll settle for cleaning up the duplicate code logic.
+ */
+static mib_counter_t rsync_status_to_mib_counter(rsync_status_t status)
+{
+ switch (status) {
+ case rsync_status_done: return rsync_transfer_succeeded;
+ case rsync_status_timed_out: return rsync_transfer_timed_out;
+ case rsync_status_failed: return rsync_transfer_failed;
+ case rsync_status_skipped: return rsync_transfer_skipped;
+ default:
+ /*
+ * Keep GCC from whining about untested cases.
+ */
+ assert(status == rsync_status_done ||
+ status == rsync_status_timed_out ||
+ status == rsync_status_failed ||
+ status == rsync_status_skipped);
+ return rsync_transfer_failed;
+ }
+}
+
+/**
+ * Manager for queue of rsync tasks in progress.
+ *
+ * General plan here is to process one completed child, or output
+ * accumulated from children, or block if there is absolutely nothing
+ * to do, on the theory that caller had nothing to do either or would
+ * not have called us. Once we've done something allegedly useful, we
+ * return, because this is not the event loop; if and when the event
+ * loop has nothing more important to do, we'll be called again.
+ *
+ * So this is the only place where the program blocks waiting for
+ * children, but we only do it when we know there's nothing else
+ * useful that we could be doing while we wait.
+ */
+static void rsync_mgr(rcynic_ctx_t *rc)
+{
+ rsync_status_t rsync_status;
+ int i, n, pid_status = -1;
+ rsync_ctx_t *ctx = NULL;
+ time_t now = time(0);
+ struct timeval tv;
+ fd_set rfds;
+ pid_t pid;
+ char *s;
+
+ assert(rc && rc->rsync_queue);
+
+ /*
+ * Check for exited subprocesses.
+ */
+
+ while ((pid = waitpid(-1, &pid_status, WNOHANG)) > 0) {
+
+ /*
+ * Child exited, handle it.
+ */
+
+ logmsg(rc, log_verbose, "Subprocess %u exited with status %d",
+ (unsigned) pid, WEXITSTATUS(pid_status));
+
+ for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i)
+ if (ctx->pid == pid)
+ break;
+ if (ctx == NULL) {
+ assert(i == sk_rsync_ctx_t_num(rc->rsync_queue));
+ logmsg(rc, log_sys_err, "Couldn't find rsync context for pid %d", pid);
+ continue;
+ }
+
+ close(ctx->fd);
+ ctx->fd = -1;
+
+ if (ctx->buflen > 0) {
+ assert(ctx->buflen < sizeof(ctx->buffer));
+ ctx->buffer[ctx->buflen] = '\0';
+ do_one_rsync_log_line(rc, ctx);
+ ctx->buflen = 0;
+ }
+
+ switch (WEXITSTATUS(pid_status)) {
+
+ case 0:
+ rsync_status = rsync_status_done;
+ break;
+
+ case 5: /* "Error starting client-server protocol" */
+ /*
+ * Handle remote rsyncd refusing to talk to us because we've
+ * exceeded its connection limit. Back off for a short
+ * interval, then retry.
+ */
+ if (ctx->problem == rsync_problem_refused && ctx->tries < rc->max_retries) {
+ unsigned char r;
+ if (!RAND_bytes(&r, sizeof(r)))
+ r = 60;
+ ctx->deadline = time(0) + rc->retry_wait_min + r;
+ ctx->state = rsync_state_retry_wait;
+ ctx->problem = rsync_problem_none;
+ ctx->pid = 0;
+ ctx->tries++;
+ logmsg(rc, log_telemetry, "Scheduling retry for %s", ctx->uri.s);
+ continue;
+ }
+ goto failure;
+
+ case 23: /* "Partial transfer due to error" */
+ /*
+ * This appears to be a catch-all for "something bad happened
+ * trying to do what you asked me to do". In the cases I've
+ * seen to date, this is things like "the directory you
+ * requested isn't there" or "NFS exploded when I tried to touch
+ * the directory". These aren't network layer failures, so we
+ * (probably) shouldn't give up on the repository host.
+ */
+ rsync_status = rsync_status_done;
+ log_validation_status(rc, &ctx->uri, rsync_partial_transfer, object_generation_null);
+ break;
+
+ default:
+ failure:
+ rsync_status = rsync_status_failed;
+ logmsg(rc, log_data_err, "rsync %u exited with status %d fetching %s",
+ (unsigned) pid, WEXITSTATUS(pid_status), ctx->uri.s);
+ break;
+ }
+
+ if (rc->rsync_timeout && now >= ctx->deadline)
+ rsync_status = rsync_status_timed_out;
+ log_validation_status(rc, &ctx->uri,
+ rsync_status_to_mib_counter(rsync_status),
+ object_generation_null);
+ rsync_history_add(rc, ctx, rsync_status);
+ rsync_call_handler(rc, ctx, rsync_status);
+ (void) sk_rsync_ctx_t_delete_ptr(rc->rsync_queue, ctx);
+ free(ctx);
+ ctx = NULL;
+ }
+
+ if (pid == -1 && errno != EINTR && errno != ECHILD)
+ logmsg(rc, log_sys_err, "waitpid() returned error: %s", strerror(errno));
+
+ assert(rsync_count_running(rc) <= rc->max_parallel_fetches);
+
+ /*
+ * Look for rsync contexts that have become runable. Odd loop
+ * structure is because rsync_run() might decide to remove the
+ * specified rsync task from the queue instead of running it.
+ */
+ for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; i++) {
+ n = sk_rsync_ctx_t_num(rc->rsync_queue);
+ if (ctx->state != rsync_state_running &&
+ rsync_runable(rc, ctx) &&
+ rsync_count_running(rc) < rc->max_parallel_fetches)
+ rsync_run(rc, ctx);
+ if (n > sk_rsync_ctx_t_num(rc->rsync_queue))
+ i--;
+ }
+
+ assert(rsync_count_running(rc) <= rc->max_parallel_fetches);
+
+ /*
+ * Check for log text from subprocesses.
+ */
+
+ n = rsync_construct_select(rc, now, &rfds, &tv);
+
+ if (n > 0 && tv.tv_sec)
+ logmsg(rc, log_verbose, "Waiting up to %u seconds for rsync, queued %d, runable %d, running %d, max %d",
+ (unsigned) tv.tv_sec, sk_rsync_ctx_t_num(rc->rsync_queue), rsync_count_runable(rc),
+ rsync_count_running(rc), rc->max_parallel_fetches);
+
+ if (n > 0) {
+#if 0
+ logmsg(rc, log_debug, "++ select(%d, %u)", n, tv.tv_sec);
+#endif
+ n = select(n + 1, &rfds, NULL, NULL, &tv);
+ }
+
+ if (n > 0) {
+
+ for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i) {
+ if (ctx->fd <= 0 || !FD_ISSET(ctx->fd, &rfds))
+ continue;
+
+ assert(ctx->buflen < sizeof(ctx->buffer) - 1);
+
+ while ((n = read(ctx->fd, ctx->buffer + ctx->buflen, sizeof(ctx->buffer) - 1 - ctx->buflen)) > 0) {
+ ctx->buflen += n;
+ assert(ctx->buflen < sizeof(ctx->buffer));
+ ctx->buffer[ctx->buflen] = '\0';
+
+ while ((s = strchr(ctx->buffer, '\n')) != NULL) {
+ *s++ = '\0';
+ do_one_rsync_log_line(rc, ctx);
+ assert(s > ctx->buffer && s < ctx->buffer + sizeof(ctx->buffer));
+ ctx->buflen -= s - ctx->buffer;
+ assert(ctx->buflen < sizeof(ctx->buffer));
+ if (ctx->buflen > 0)
+ memmove(ctx->buffer, s, ctx->buflen);
+ ctx->buffer[ctx->buflen] = '\0';
+ }
+
+ if (ctx->buflen == sizeof(ctx->buffer) - 1) {
+ ctx->buffer[sizeof(ctx->buffer) - 1] = '\0';
+ do_one_rsync_log_line(rc, ctx);
+ ctx->buflen = 0;
+ }
+ }
+
+ if (n == 0) {
+ (void) close(ctx->fd);
+ ctx->fd = -1;
+ ctx->state = rsync_state_closed;
+ }
+ }
+ }
+
+ assert(rsync_count_running(rc) <= rc->max_parallel_fetches);
+
+ /*
+ * Deal with children that have been running too long.
+ */
+ if (rc->rsync_timeout) {
+ for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i) {
+ int sig;
+ if (ctx->pid <= 0 || now < ctx->deadline)
+ continue;
+ sig = ctx->tries++ < KILL_MAX ? SIGTERM : SIGKILL;
+ if (ctx->state != rsync_state_terminating) {
+ ctx->problem = rsync_problem_timed_out;
+ ctx->state = rsync_state_terminating;
+ ctx->tries = 0;
+ logmsg(rc, log_telemetry, "Subprocess %u is taking too long fetching %s, whacking it", (unsigned) ctx->pid, ctx->uri.s);
+ rsync_history_add(rc, ctx, rsync_status_timed_out);
+ } else if (sig == SIGTERM) {
+ logmsg(rc, log_verbose, "Whacking subprocess %u again", (unsigned) ctx->pid);
+ } else {
+ logmsg(rc, log_verbose, "Whacking subprocess %u with big hammer", (unsigned) ctx->pid);
+ }
+ (void) kill(ctx->pid, sig);
+ ctx->deadline = now + 1;
+ }
+ }
+}
+
+/**
+ * Set up rsync context and attempt to start it.
+ */
+static void rsync_init(rcynic_ctx_t *rc,
+ const uri_t *uri,
+ void *cookie,
+ void (*handler)(rcynic_ctx_t *, const rsync_ctx_t *, const rsync_status_t, const uri_t *, void *))
+{
+ rsync_ctx_t *ctx = NULL;
+
+ assert(rc && uri && strlen(uri->s) > SIZEOF_RSYNC);
+
+ if (!rc->run_rsync) {
+ logmsg(rc, log_verbose, "rsync disabled, skipping %s", uri->s);
+ if (handler)
+ handler(rc, NULL, rsync_status_skipped, uri, cookie);
+ return;
+ }
+
+ if (rsync_history_uri(rc, uri)) {
+ logmsg(rc, log_verbose, "rsync cache hit for %s", uri->s);
+ if (handler)
+ handler(rc, NULL, rsync_status_done, uri, cookie);
+ return;
+ }
+
+ if ((ctx = malloc(sizeof(*ctx))) == NULL) {
+ logmsg(rc, log_sys_err, "malloc(rsync_ctxt_t) failed");
+ if (handler)
+ handler(rc, NULL, rsync_status_failed, uri, cookie);
+ return;
+ }
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->uri = *uri;
+ ctx->handler = handler;
+ ctx->cookie = cookie;
+ ctx->fd = -1;
+
+ if (!sk_rsync_ctx_t_push(rc->rsync_queue, ctx)) {
+ logmsg(rc, log_sys_err, "Couldn't push rsync state object onto queue, punting %s", ctx->uri.s);
+ rsync_call_handler(rc, ctx, rsync_status_failed);
+ free(ctx);
+ return;
+ }
+
+ if (rsync_conflicts(rc, ctx)) {
+ logmsg(rc, log_debug, "New rsync context %s is feeling conflicted", ctx->uri.s);
+ ctx->state = rsync_state_conflict_wait;
+ }
+}
+
+/**
+ * rsync a trust anchor.
+ */
+static void rsync_ta(rcynic_ctx_t *rc,
+ const uri_t *uri,
+ tal_ctx_t *tctx,
+ void (*handler)(rcynic_ctx_t *, const rsync_ctx_t *,
+ const rsync_status_t, const uri_t *, void *))
+{
+ assert(endswith(uri->s, ".cer"));
+ rsync_init(rc, uri, tctx, handler);
+}
+
+/**
+ * rsync an entire subtree, generally rooted at a SIA collection.
+ */
+static void rsync_tree(rcynic_ctx_t *rc,
+ const uri_t *uri,
+ STACK_OF(walk_ctx_t) *wsk,
+ void (*handler)(rcynic_ctx_t *, const rsync_ctx_t *,
+ const rsync_status_t, const uri_t *, void *))
+{
+ assert(endswith(uri->s, "/"));
+ rsync_init(rc, uri, wsk, handler);
+}
+
+
+
+/**
+ * Clean up old stuff from previous rsync runs. --delete doesn't help
+ * if the URI changes and we never visit the old URI again.
+ */
+static int prune_unauthenticated(const rcynic_ctx_t *rc,
+ const path_t *name,
+ const size_t baselen)
+{
+ path_t path;
+ struct dirent *d;
+ DIR *dir;
+ const char *slash;
+
+ assert(rc && name && baselen > 0 && strlen(name->s) >= baselen);
+
+ if (!is_directory(name)) {
+ logmsg(rc, log_usage_err, "prune: %s is not a directory", name->s);
+ return 0;
+ }
+
+ if ((dir = opendir(name->s)) == NULL) {
+ logmsg(rc, log_sys_err, "prune: opendir() failed on %s: %s", name->s, strerror(errno));
+ return 0;
+ }
+
+ slash = endswith(name->s, "/") ? "" : "/";
+
+ while ((d = readdir(dir)) != NULL) {
+ if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, ".."))
+ continue;
+
+ if (snprintf(path.s, sizeof(path.s), "%s%s%s", name->s, slash, d->d_name) >= sizeof(path.s)) {
+ logmsg(rc, log_debug, "prune: %s%s%s too long", name->s, slash, d->d_name);
+ goto done;
+ }
+
+ if (validation_status_find_filename(rc, path.s + baselen)) {
+ logmsg(rc, log_debug, "prune: cache hit %s", path.s);
+ continue;
+ }
+
+ if (unlink(path.s) == 0) {
+ logmsg(rc, log_debug, "prune: removed %s", path.s);
+ continue;
+ }
+
+ if (prune_unauthenticated(rc, &path, baselen))
+ continue;
+
+ logmsg(rc, log_sys_err, "prune: removing %s failed: %s", path.s, strerror(errno));
+ goto done;
+ }
+
+ if (rmdir(name->s) == 0)
+ logmsg(rc, log_debug, "prune: removed %s", name->s);
+ else if (errno != ENOTEMPTY)
+ logmsg(rc, log_sys_err, "prune: couldn't remove %s: %s", name->s, strerror(errno));
+
+ done:
+ closedir(dir);
+ return !d;
+}
+
+
+
+/**
+ * Read a DER object using a BIO pipeline that hashes the file content
+ * as we read it. Returns the internal form of the parsed DER object,
+ * sets the hash buffer (if specified) as a side effect. The default
+ * hash algorithm is SHA-256.
+ */
+static void *read_file_with_hash(const path_t *filename,
+ const ASN1_ITEM *it,
+ const EVP_MD *md,
+ hashbuf_t *hash)
+{
+ void *result = NULL;
+ BIO *b;
+
+ if ((b = BIO_new_file(filename->s, "rb")) == NULL)
+ goto error;
+
+ if (hash != NULL) {
+ BIO *b2 = BIO_new(BIO_f_md());
+ if (b2 == NULL)
+ goto error;
+ if (md == NULL)
+ md = EVP_sha256();
+ if (!BIO_set_md(b2, md)) {
+ BIO_free(b2);
+ goto error;
+ }
+ BIO_push(b2, b);
+ b = b2;
+ }
+
+ if ((result = ASN1_item_d2i_bio(it, b, NULL)) == NULL)
+ goto error;
+
+ if (hash != NULL) {
+ memset(hash, 0, sizeof(*hash));
+ BIO_gets(b, (char *) hash, sizeof(hash->h));
+ }
+
+ error:
+ BIO_free_all(b);
+ return result;
+}
+
+/**
+ * Read and hash a certificate.
+ */
+static X509 *read_cert(const path_t *filename, hashbuf_t *hash)
+{
+ return read_file_with_hash(filename, ASN1_ITEM_rptr(X509), NULL, hash);
+}
+
+/**
+ * Read and hash a CRL.
+ */
+static X509_CRL *read_crl(const path_t *filename, hashbuf_t *hash)
+{
+ return read_file_with_hash(filename, ASN1_ITEM_rptr(X509_CRL), NULL, hash);
+}
+
+/**
+ * Read and hash a CMS message.
+ */
+static CMS_ContentInfo *read_cms(const path_t *filename, hashbuf_t *hash)
+{
+ return read_file_with_hash(filename, ASN1_ITEM_rptr(CMS_ContentInfo), NULL, hash);
+}
+
+
+
+/**
+ * Extract CRLDP data from a certificate. Stops looking after finding
+ * the first rsync URI.
+ */
+static int extract_crldp_uri(rcynic_ctx_t *rc,
+ const uri_t *uri,
+ const object_generation_t generation,
+ const STACK_OF(DIST_POINT) *crldp,
+ uri_t *result)
+{
+ DIST_POINT *d;
+ int i;
+
+ assert(rc && uri && crldp && result);
+
+ if (sk_DIST_POINT_num(crldp) != 1)
+ goto bad;
+
+ d = sk_DIST_POINT_value(crldp, 0);
+
+ if (d->reasons || d->CRLissuer || !d->distpoint || d->distpoint->type != 0)
+ goto bad;
+
+ for (i = 0; i < sk_GENERAL_NAME_num(d->distpoint->name.fullname); i++) {
+ GENERAL_NAME *n = sk_GENERAL_NAME_value(d->distpoint->name.fullname, i);
+ if (n == NULL || n->type != GEN_URI)
+ goto bad;
+ if (!is_rsync((char *) n->d.uniformResourceIdentifier->data))
+ log_validation_status(rc, uri, non_rsync_uri_in_extension, generation);
+ else if (sizeof(result->s) <= n->d.uniformResourceIdentifier->length)
+ log_validation_status(rc, uri, uri_too_long, generation);
+ else if (result->s[0])
+ log_validation_status(rc, uri, multiple_rsync_uris_in_extension, generation);
+ else
+ strcpy(result->s, (char *) n->d.uniformResourceIdentifier->data);
+ }
+
+ return result->s[0];
+
+ bad:
+ log_validation_status(rc, uri, malformed_crldp_extension, generation);
+ return 0;
+}
+
+/**
+ * Extract SIA or AIA data from a certificate.
+ */
+static int extract_access_uri(rcynic_ctx_t *rc,
+ const uri_t *uri,
+ const object_generation_t generation,
+ const AUTHORITY_INFO_ACCESS *xia,
+ const int nid,
+ uri_t *result,
+ int *count)
+{
+ int i;
+
+ assert(rc && uri && xia && result && count);
+
+ for (i = 0; i < sk_ACCESS_DESCRIPTION_num(xia); i++) {
+ ACCESS_DESCRIPTION *a = sk_ACCESS_DESCRIPTION_value(xia, i);
+ if (a == NULL || a->location->type != GEN_URI)
+ return 0;
+ if (OBJ_obj2nid(a->method) != nid)
+ continue;
+ ++*count;
+ if (!is_rsync((char *) a->location->d.uniformResourceIdentifier->data))
+ log_validation_status(rc, uri, non_rsync_uri_in_extension, generation);
+ else if (sizeof(result->s) <= a->location->d.uniformResourceIdentifier->length)
+ log_validation_status(rc, uri, uri_too_long, generation);
+ else if (result->s[0])
+ log_validation_status(rc, uri, multiple_rsync_uris_in_extension, generation);
+ else
+ strcpy(result->s, (char *) a->location->d.uniformResourceIdentifier->data);
+ }
+ return 1;
+}
+
+
+
+/**
+ * Check to see whether an AKI extension is present, is of the right
+ * form, and matches the issuer.
+ */
+static int check_aki(rcynic_ctx_t *rc,
+ const uri_t *uri,
+ const X509 *issuer,
+ const AUTHORITY_KEYID *aki,
+ const object_generation_t generation)
+{
+ assert(rc && uri && issuer && issuer->skid);
+
+ if (aki == NULL) {
+ log_validation_status(rc, uri, aki_extension_missing, generation);
+ return 0;
+ }
+
+ if (!aki->keyid || aki->serial || aki->issuer) {
+ log_validation_status(rc, uri, aki_extension_wrong_format, generation);
+ return 0;
+ }
+
+ if (ASN1_OCTET_STRING_cmp(aki->keyid, issuer->skid)) {
+ log_validation_status(rc, uri, aki_extension_issuer_mismatch, generation);
+ return 0;
+ }
+
+ return 1;
+}
+
+
+
+/**
+ * Check whether a Distinguished Name conforms to the rescert profile.
+ * The profile is very restrictive: it only allows one mandatory
+ * CommonName field and one optional SerialNumber field, both of which
+ * must be of type PrintableString.
+ */
+static int check_allowed_dn(X509_NAME *dn)
+{
+ X509_NAME_ENTRY *ne;
+ ASN1_STRING *s;
+ int loc;
+
+ if (dn == NULL)
+ return 0;
+
+ switch (X509_NAME_entry_count(dn)) {
+
+ case 2:
+ if ((loc = X509_NAME_get_index_by_NID(dn, NID_serialNumber, -1)) < 0 ||
+ (ne = X509_NAME_get_entry(dn, loc)) == NULL ||
+ (s = X509_NAME_ENTRY_get_data(ne)) == NULL ||
+ ASN1_STRING_type(s) != V_ASN1_PRINTABLESTRING)
+ return 0;
+
+ /* Fall through */
+
+ case 1:
+ if ((loc = X509_NAME_get_index_by_NID(dn, NID_commonName, -1)) < 0 ||
+ (ne = X509_NAME_get_entry(dn, loc)) == NULL ||
+ (s = X509_NAME_ENTRY_get_data(ne)) == NULL ||
+ ASN1_STRING_type(s) != V_ASN1_PRINTABLESTRING)
+ return 0;
+
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+/**
+ * Check whether an ASN.1 TIME value conforms to RFC 5280 4.1.2.5.
+ */
+static int check_allowed_time_encoding(ASN1_TIME *t)
+{
+ switch (t->type) {
+
+ case V_ASN1_UTCTIME:
+ return t->length == sizeof("yymmddHHMMSSZ") - 1;
+
+ case V_ASN1_GENERALIZEDTIME:
+ return (t->length == sizeof("yyyymmddHHMMSSZ") - 1 &&
+ strcmp("205", (char *) t->data) <= 0);
+
+ }
+ return 0;
+}
+
+/**
+ * Compare ASN1_TIME values.
+ */
+static int asn1_time_cmp(ASN1_TIME *t1, ASN1_TIME *t2)
+{
+ ASN1_GENERALIZEDTIME *g1 = ASN1_TIME_to_generalizedtime(t1, NULL);
+ ASN1_GENERALIZEDTIME *g2 = ASN1_TIME_to_generalizedtime(t2, NULL);
+
+ int cmp = ASN1_STRING_cmp(g1, g2);
+
+ ASN1_GENERALIZEDTIME_free(g1);
+ ASN1_GENERALIZEDTIME_free(g2);
+
+ return cmp;
+}
+
+
+
+/**
+ * Attempt to read and check one CRL from disk.
+ */
+
+static X509_CRL *check_crl_1(rcynic_ctx_t *rc,
+ const uri_t *uri,
+ path_t *path,
+ const path_t *prefix,
+ X509 *issuer,
+ const object_generation_t generation)
+{
+ STACK_OF(X509_REVOKED) *revoked;
+ X509_CRL *crl = NULL;
+ EVP_PKEY *pkey;
+ int i, ret;
+
+ assert(uri && path && issuer);
+
+ if (!uri_to_filename(rc, uri, path, prefix) ||
+ (crl = read_crl(path, NULL)) == NULL)
+ goto punt;
+
+ if (X509_CRL_get_version(crl) != 1) {
+ log_validation_status(rc, uri, wrong_object_version, generation);
+ goto punt;
+ }
+
+ if (!crl->crl || !crl->crl->sig_alg || !crl->crl->sig_alg->algorithm ||
+ OBJ_obj2nid(crl->crl->sig_alg->algorithm) != NID_sha256WithRSAEncryption) {
+ log_validation_status(rc, uri, nonconformant_signature_algorithm, generation);
+ goto punt;
+ }
+
+ if (!check_allowed_time_encoding(X509_CRL_get_lastUpdate(crl)) ||
+ !check_allowed_time_encoding(X509_CRL_get_nextUpdate(crl))) {
+ log_validation_status(rc, uri, nonconformant_asn1_time_value, generation);
+ goto punt;
+ }
+
+ if (X509_cmp_current_time(X509_CRL_get_lastUpdate(crl)) > 0) {
+ log_validation_status(rc, uri, crl_not_yet_valid, generation);
+ goto punt;
+ }
+
+ if (X509_cmp_current_time(X509_CRL_get_nextUpdate(crl)) < 0) {
+ log_validation_status(rc, uri, stale_crl_or_manifest, generation);
+ if (!rc->allow_stale_crl)
+ goto punt;
+ }
+
+ if (!check_aki(rc, uri, issuer, crl->akid, generation))
+ goto punt;
+
+ if (crl->crl_number == NULL) {
+ log_validation_status(rc, uri, crl_number_extension_missing, generation);
+ goto punt;
+ }
+
+ if (ASN1_INTEGER_cmp(crl->crl_number, asn1_zero) < 0) {
+ log_validation_status(rc, uri, crl_number_is_negative, generation);
+ goto punt;
+ }
+
+ if (ASN1_INTEGER_cmp(crl->crl_number, asn1_twenty_octets) > 0) {
+ log_validation_status(rc, uri, crl_number_out_of_range, generation);
+ goto punt;
+ }
+
+ if (X509_CRL_get_ext_count(crl) != 2) {
+ log_validation_status(rc, uri, disallowed_x509v3_extension, generation);
+ goto punt;
+ }
+
+ if (X509_NAME_cmp(X509_CRL_get_issuer(crl), X509_get_subject_name(issuer))) {
+ log_validation_status(rc, uri, crl_issuer_name_mismatch, generation);
+ goto punt;
+ }
+
+ if (!check_allowed_dn(X509_CRL_get_issuer(crl))) {
+ log_validation_status(rc, uri, nonconformant_issuer_name, generation);
+ if (!rc->allow_nonconformant_name)
+ goto punt;
+ }
+
+ if ((revoked = X509_CRL_get_REVOKED(crl)) != NULL) {
+ for (i = sk_X509_REVOKED_num(revoked) - 1; i >= 0; --i) {
+ if (X509_REVOKED_get_ext_count(sk_X509_REVOKED_value(revoked, i)) > 0) {
+ log_validation_status(rc, uri, disallowed_x509v3_extension, generation);
+ goto punt;
+ }
+ }
+ }
+
+ if ((pkey = X509_get_pubkey(issuer)) == NULL)
+ goto punt;
+ ret = X509_CRL_verify(crl, pkey);
+ EVP_PKEY_free(pkey);
+
+ if (ret > 0)
+ return crl;
+
+ punt:
+ X509_CRL_free(crl);
+ return NULL;
+}
+
+/**
+ * Check whether we already have a particular CRL, attempt to fetch it
+ * and check issuer's signature if we don't.
+ *
+ * General plan here is to do basic checks on both current and backup
+ * generation CRLs, then, if both generations pass all of our other
+ * tests, pick the generation with the highest CRL number, to protect
+ * against replay attacks.
+ */
+static X509_CRL *check_crl(rcynic_ctx_t *rc,
+ const uri_t *uri,
+ X509 *issuer)
+{
+ X509_CRL *old_crl, *new_crl, *result = NULL;
+ path_t old_path, new_path;
+
+ if (uri_to_filename(rc, uri, &new_path, &rc->new_authenticated) &&
+ (new_crl = read_crl(&new_path, NULL)) != NULL)
+ return new_crl;
+
+ logmsg(rc, log_telemetry, "Checking CRL %s", uri->s);
+
+ new_crl = check_crl_1(rc, uri, &new_path, &rc->unauthenticated,
+ issuer, object_generation_current);
+
+ old_crl = check_crl_1(rc, uri, &old_path, &rc->old_authenticated,
+ issuer, object_generation_backup);
+
+ if (!new_crl)
+ result = old_crl;
+
+ else if (!old_crl)
+ result = new_crl;
+
+ else {
+ ASN1_GENERALIZEDTIME *g_old = ASN1_TIME_to_generalizedtime(X509_CRL_get_lastUpdate(old_crl), NULL);
+ ASN1_GENERALIZEDTIME *g_new = ASN1_TIME_to_generalizedtime(X509_CRL_get_lastUpdate(new_crl), NULL);
+ int num_cmp = ASN1_INTEGER_cmp(old_crl->crl_number, new_crl->crl_number);
+ int date_cmp = (!g_old || !g_new) ? 0 : ASN1_STRING_cmp(g_old, g_new);
+
+ if (!g_old)
+ log_validation_status(rc, uri, bad_thisupdate, object_generation_backup);
+ if (!g_new)
+ log_validation_status(rc, uri, bad_thisupdate, object_generation_current);
+ if (num_cmp > 0)
+ log_validation_status(rc, uri, backup_number_higher_than_current, object_generation_current);
+ if (g_old && g_new && date_cmp > 0)
+ log_validation_status(rc, uri, backup_thisupdate_newer_than_current, object_generation_current);
+
+ if (num_cmp > 0 && (!g_old || !g_new || date_cmp > 0))
+ result = old_crl;
+ else
+ result = new_crl;
+
+ ASN1_GENERALIZEDTIME_free(g_old);
+ ASN1_GENERALIZEDTIME_free(g_new);
+ }
+
+ if (result && result == new_crl)
+ install_object(rc, uri, &new_path, object_generation_current);
+ else if (!access(new_path.s, F_OK))
+ log_validation_status(rc, uri, object_rejected, object_generation_current);
+
+ if (result && result == old_crl)
+ install_object(rc, uri, &old_path, object_generation_backup);
+ else if (!result && !access(old_path.s, F_OK))
+ log_validation_status(rc, uri, object_rejected, object_generation_backup);
+
+ if (result != new_crl)
+ X509_CRL_free(new_crl);
+
+ if (result != old_crl)
+ X509_CRL_free(old_crl);
+
+ return result;
+}
+
+
+/**
+ * Check digest of a CRL we've already accepted.
+ */
+static int check_crl_digest(const rcynic_ctx_t *rc,
+ const uri_t *uri,
+ const unsigned char *hash,
+ const size_t hashlen)
+{
+ X509_CRL *crl = NULL;
+ hashbuf_t hashbuf;
+ path_t path;
+ int result;
+
+ assert(rc && uri && hash);
+
+ if (!uri_to_filename(rc, uri, &path, &rc->new_authenticated) ||
+ (crl = read_crl(&path, &hashbuf)) == NULL)
+ return 0;
+
+ result = hashlen <= sizeof(hashbuf.h) && !memcmp(hashbuf.h, hash, hashlen);
+
+ X509_CRL_free(crl);
+
+ return result;
+}
+
+
+
+/**
+ * Validation callback function for use with x509_verify_cert().
+ */
+static int check_x509_cb(int ok, X509_STORE_CTX *ctx)
+{
+ rcynic_x509_store_ctx_t *rctx = (rcynic_x509_store_ctx_t *) ctx;
+ mib_counter_t code;
+
+ assert(rctx != NULL);
+
+ switch (ctx->error) {
+ case X509_V_OK:
+ return ok;
+
+ case X509_V_ERR_SUBJECT_ISSUER_MISMATCH:
+ /*
+ * Informational events, not really errors. ctx->check_issued()
+ * is called in many places where failure to find an issuer is not
+ * a failure for the calling function. Just leave these alone.
+ */
+ return ok;
+
+ case X509_V_ERR_CRL_HAS_EXPIRED:
+ /*
+ * This isn't really an error, exactly. CRLs don't really
+ * "expire". What OpenSSL really means by this error is just
+ * "it's now later than the issuer said it intended to publish a
+ * new CRL". Whether we treat this as an error or not is
+ * configurable, see the allow_stale_crl parameter.
+ *
+ * Deciding whether to allow stale CRLs is check_crl_1()'s job,
+ * not ours. By the time this callback occurs, we've already
+ * accepted the CRL; this callback is just notifying us that the
+ * object being checked is tainted by a stale CRL. So we mark the
+ * object as tainted and carry on.
+ */
+ log_validation_status(rctx->rc, &rctx->subject->uri, tainted_by_stale_crl, rctx->subject->generation);
+ ok = 1;
+ return ok;
+
+ case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT:
+ /*
+ * This is another error that's only an error in the strange world
+ * of OpenSSL, but a more serious one. By default, OpenSSL
+ * expects all trust anchors to be self-signed. This is not a
+ * PKIX requirement, it's just an OpenSSL thing, but one violates
+ * it at one's peril, because the only way to convince OpenSSL to
+ * allow a non-self-signed trust anchor is to intercept this
+ * "error" in the verify callback handler.
+ *
+ * So this program supports non-self-signed trust anchors, but be
+ * warned that enabling this feature may cause this program's
+ * output not to work with other OpenSSL-based applications.
+ */
+ if (rctx->rc->allow_non_self_signed_trust_anchor)
+ ok = 1;
+ log_validation_status(rctx->rc, &rctx->subject->uri, trust_anchor_not_self_signed, rctx->subject->generation);
+ return ok;
+
+ /*
+ * Select correct MIB counter for every known OpenSSL verify errors
+ * except the ones we handle explicitly above, then fall through to
+ * common handling for all of these.
+ */
+#define QV(x) \
+ case x: \
+ code = mib_openssl_##x; \
+ break;
+ MIB_COUNTERS_FROM_OPENSSL;
+#undef QV
+
+ default:
+ code = unknown_openssl_verify_error;
+ break;
+ }
+
+ log_validation_status(rctx->rc, &rctx->subject->uri, code, rctx->subject->generation);
+ return ok;
+}
+
+/**
+ * Check crypto aspects of a certificate, policy OID, RFC 3779 path
+ * validation, and conformance to the RPKI certificate profile.
+ */
+static int check_x509(rcynic_ctx_t *rc,
+ STACK_OF(walk_ctx_t) *wsk,
+ const uri_t *uri,
+ X509 *x,
+ certinfo_t *certinfo,
+ const object_generation_t generation)
+{
+ walk_ctx_t *w = walk_ctx_stack_head(wsk);
+ rcynic_x509_store_ctx_t rctx;
+ EVP_PKEY *issuer_pkey = NULL, *subject_pkey = NULL;
+ unsigned long flags = (X509_V_FLAG_POLICY_CHECK | X509_V_FLAG_EXPLICIT_POLICY | X509_V_FLAG_X509_STRICT);
+ AUTHORITY_INFO_ACCESS *sia = NULL, *aia = NULL;
+ STACK_OF(POLICYINFO) *policies = NULL;
+ ASN1_BIT_STRING *ski_pubkey = NULL;
+ STACK_OF(DIST_POINT) *crldp = NULL;
+ EXTENDED_KEY_USAGE *eku = NULL;
+ BASIC_CONSTRAINTS *bc = NULL;
+ hashbuf_t ski_hashbuf;
+ unsigned ski_hashlen, afi;
+ int i, ok, crit, loc, ex_count, routercert = 0, ret = 0;
+
+ assert(rc && wsk && w && uri && x && w->cert);
+
+ /*
+ * Cleanup logic will explode if rctx.ctx hasn't been initialized,
+ * so we need to do this before running any test that can fail.
+ */
+ if (!X509_STORE_CTX_init(&rctx.ctx, rc->x509_store, x, NULL))
+ return 0;
+
+ /*
+ * certinfo == NULL means x is a self-signed trust anchor.
+ */
+ if (certinfo == NULL)
+ certinfo = &w->certinfo;
+
+ memset(certinfo, 0, sizeof(*certinfo));
+
+ certinfo->uri = *uri;
+ certinfo->generation = generation;
+
+ if (ASN1_INTEGER_cmp(X509_get_serialNumber(x), asn1_zero) <= 0 ||
+ ASN1_INTEGER_cmp(X509_get_serialNumber(x), asn1_twenty_octets) > 0) {
+ log_validation_status(rc, uri, bad_certificate_serial_number, generation);
+ goto done;
+ }
+
+ if (!check_allowed_time_encoding(X509_get_notBefore(x)) ||
+ !check_allowed_time_encoding(X509_get_notAfter(x))) {
+ log_validation_status(rc, uri, nonconformant_asn1_time_value, generation);
+ goto done;
+ }
+
+ /*
+ * Apparently nothing ever looks at these fields, so there are no
+ * API functions for them. We wouldn't bother either if they
+ * weren't forbidden by the RPKI certificate profile.
+ */
+ if (!x->cert_info || x->cert_info->issuerUID || x->cert_info->subjectUID) {
+ log_validation_status(rc, uri, nonconformant_certificate_uid, generation);
+ goto done;
+ }
+
+ /*
+ * Keep track of allowed extensions we've seen. Once we've
+ * processed all the ones we expect, anything left is an error.
+ */
+ ex_count = X509_get_ext_count(x);
+
+ /*
+ * We don't use X509_check_ca() to set certinfo->ca anymore, because
+ * it's not paranoid enough to enforce the RPKI certificate profile,
+ * but we still call it because we need it (or something) to invoke
+ * x509v3_cache_extensions() for us.
+ */
+ (void) X509_check_ca(x);
+
+ if ((bc = X509_get_ext_d2i(x, NID_basic_constraints, &crit, NULL)) != NULL) {
+ ex_count--;
+ if (!crit || bc->ca <= 0 || bc->pathlen != NULL) {
+ log_validation_status(rc, uri, malformed_basic_constraints, generation);
+ goto done;
+ }
+ }
+
+ certinfo->ca = bc != NULL;
+
+ if (certinfo == &w->certinfo) {
+ certinfo->ta = 1;
+ if (!certinfo->ca) {
+ log_validation_status(rc, uri, malformed_trust_anchor, generation);
+ goto done;
+ }
+ }
+
+ if ((aia = X509_get_ext_d2i(x, NID_info_access, NULL, NULL)) != NULL) {
+ int n_caIssuers = 0;
+ ex_count--;
+ if (!extract_access_uri(rc, uri, generation, aia, NID_ad_ca_issuers,
+ &certinfo->aia, &n_caIssuers) ||
+ !certinfo->aia.s[0] ||
+ sk_ACCESS_DESCRIPTION_num(aia) != n_caIssuers) {
+ log_validation_status(rc, uri, malformed_aia_extension, generation);
+ goto done;
+ }
+ }
+
+ if (certinfo->ta && aia) {
+ log_validation_status(rc, uri, aia_extension_forbidden, generation);
+ goto done;
+ }
+
+ if (!certinfo->ta && !aia) {
+ log_validation_status(rc, uri, aia_extension_missing, generation);
+ goto done;
+ }
+
+ if ((eku = X509_get_ext_d2i(x, NID_ext_key_usage, &crit, NULL)) != NULL) {
+ ex_count--;
+ if (crit || certinfo->ca || !endswith(uri->s, ".cer") || sk_ASN1_OBJECT_num(eku) == 0) {
+ log_validation_status(rc, uri, inappropriate_eku_extension, generation);
+ goto done;
+ }
+ for (i = 0; i < sk_ASN1_OBJECT_num(eku); i++)
+ routercert |= OBJ_obj2nid(sk_ASN1_OBJECT_value(eku, i)) == NID_id_kp_bgpsec_router;
+ }
+
+ if ((sia = X509_get_ext_d2i(x, NID_sinfo_access, NULL, NULL)) != NULL) {
+ int got_caDirectory, got_rpkiManifest, got_signedObject;
+ int n_caDirectory = 0, n_rpkiManifest = 0, n_signedObject = 0;
+ ex_count--;
+ ok = (extract_access_uri(rc, uri, generation, sia, NID_caRepository,
+ &certinfo->sia, &n_caDirectory) &&
+ extract_access_uri(rc, uri, generation, sia, NID_ad_rpkiManifest,
+ &certinfo->manifest, &n_rpkiManifest) &&
+ extract_access_uri(rc, uri, generation, sia, NID_ad_signedObject,
+ &certinfo->signedobject, &n_signedObject));
+ got_caDirectory = certinfo->sia.s[0] != '\0';
+ got_rpkiManifest = certinfo->manifest.s[0] != '\0';
+ got_signedObject = certinfo->signedobject.s[0] != '\0';
+ ok &= sk_ACCESS_DESCRIPTION_num(sia) == n_caDirectory + n_rpkiManifest + n_signedObject;
+ if (certinfo->ca)
+ ok &= got_caDirectory && got_rpkiManifest && !got_signedObject;
+ else if (rc->allow_ee_without_signedObject)
+ ok &= !got_caDirectory && !got_rpkiManifest;
+ else
+ ok &= !got_caDirectory && !got_rpkiManifest && got_signedObject;
+ if (!ok) {
+ log_validation_status(rc, uri, malformed_sia_extension, generation);
+ goto done;
+ }
+ } else if (certinfo->ca || !rc->allow_ee_without_signedObject) {
+ log_validation_status(rc, uri, sia_extension_missing, generation);
+ goto done;
+ } else if (!routercert) {
+ log_validation_status(rc, uri, sia_extension_missing_from_ee, generation);
+ }
+
+ if (certinfo->signedobject.s[0] && strcmp(uri->s, certinfo->signedobject.s))
+ log_validation_status(rc, uri, bad_signed_object_uri, generation);
+
+ if ((crldp = X509_get_ext_d2i(x, NID_crl_distribution_points, NULL, NULL)) != NULL) {
+ ex_count--;
+ if (!extract_crldp_uri(rc, uri, generation, crldp, &certinfo->crldp))
+ goto done;
+ }
+
+ rctx.rc = rc;
+ rctx.subject = certinfo;
+
+ if (w->certs == NULL && (w->certs = walk_ctx_stack_certs(rc, wsk)) == NULL)
+ goto done;
+
+ if (X509_get_version(x) != 2) {
+ log_validation_status(rc, uri, wrong_object_version, generation);
+ goto done;
+ }
+
+ if (!x->cert_info || !x->cert_info->signature || !x->cert_info->signature->algorithm ||
+ OBJ_obj2nid(x->cert_info->signature->algorithm) != NID_sha256WithRSAEncryption) {
+ log_validation_status(rc, uri, nonconformant_signature_algorithm, generation);
+ goto done;
+ }
+
+ if (certinfo->sia.s[0] && certinfo->sia.s[strlen(certinfo->sia.s) - 1] != '/') {
+ log_validation_status(rc, uri, malformed_cadirectory_uri, generation);
+ goto done;
+ }
+
+ if (!w->certinfo.ta && strcmp(w->certinfo.uri.s, certinfo->aia.s))
+ log_validation_status(rc, uri, aia_doesnt_match_issuer, generation);
+
+ if (certinfo->ca && !certinfo->sia.s[0]) {
+ log_validation_status(rc, uri, sia_cadirectory_uri_missing, generation);
+ goto done;
+ }
+
+ if (certinfo->ca && !certinfo->manifest.s[0]) {
+ log_validation_status(rc, uri, sia_manifest_uri_missing, generation);
+ goto done;
+ }
+
+ if (certinfo->ca && !startswith(certinfo->manifest.s, certinfo->sia.s)) {
+ log_validation_status(rc, uri, manifest_carepository_mismatch, generation);
+ goto done;
+ }
+
+ if (x->skid) {
+ ex_count--;
+ } else {
+ log_validation_status(rc, uri, ski_extension_missing, generation);
+ goto done;
+ }
+
+ if (!check_allowed_dn(X509_get_subject_name(x))) {
+ log_validation_status(rc, uri, nonconformant_subject_name, generation);
+ if (!rc->allow_nonconformant_name)
+ goto done;
+ }
+
+ if (!check_allowed_dn(X509_get_issuer_name(x))) {
+ log_validation_status(rc, uri, nonconformant_issuer_name, generation);
+ if (!rc->allow_nonconformant_name)
+ goto done;
+ }
+
+ if ((policies = X509_get_ext_d2i(x, NID_certificate_policies, &crit, NULL)) != NULL) {
+ POLICYQUALINFO *qualifier = NULL;
+ POLICYINFO *policy = NULL;
+ ex_count--;
+ if (!crit || sk_POLICYINFO_num(policies) != 1 ||
+ (policy = sk_POLICYINFO_value(policies, 0)) == NULL ||
+ OBJ_obj2nid(policy->policyid) != NID_cp_ipAddr_asNumber ||
+ sk_POLICYQUALINFO_num(policy->qualifiers) > 1 ||
+ (sk_POLICYQUALINFO_num(policy->qualifiers) == 1 &&
+ ((qualifier = sk_POLICYQUALINFO_value(policy->qualifiers, 0)) == NULL ||
+ OBJ_obj2nid(qualifier->pqualid) != NID_id_qt_cps))) {
+ log_validation_status(rc, uri, bad_certificate_policy, generation);
+ goto done;
+ }
+ if (qualifier)
+ log_validation_status(rc, uri, policy_qualifier_cps, generation);
+ }
+
+ if (!X509_EXTENSION_get_critical(X509_get_ext(x, X509_get_ext_by_NID(x, NID_key_usage, -1))) ||
+ (x->ex_flags & EXFLAG_KUSAGE) == 0 ||
+ x->ex_kusage != (certinfo->ca ? KU_KEY_CERT_SIGN | KU_CRL_SIGN : KU_DIGITAL_SIGNATURE)) {
+ log_validation_status(rc, uri, bad_key_usage, generation);
+ goto done;
+ }
+ ex_count--;
+
+ if (x->rfc3779_addr) {
+ ex_count--;
+ if (routercert ||
+ (loc = X509_get_ext_by_NID(x, NID_sbgp_ipAddrBlock, -1)) < 0 ||
+ !X509_EXTENSION_get_critical(X509_get_ext(x, loc)) ||
+ !v3_addr_is_canonical(x->rfc3779_addr) ||
+ sk_IPAddressFamily_num(x->rfc3779_addr) == 0) {
+ log_validation_status(rc, uri, bad_ipaddrblocks, generation);
+ goto done;
+ }
+ for (i = 0; i < sk_IPAddressFamily_num(x->rfc3779_addr); i++) {
+ IPAddressFamily *f = sk_IPAddressFamily_value(x->rfc3779_addr, i);
+ afi = v3_addr_get_afi(f);
+ if (afi != IANA_AFI_IPV4 && afi != IANA_AFI_IPV6) {
+ log_validation_status(rc, uri, unknown_afi, generation);
+ goto done;
+ }
+ if (f->addressFamily->length != 2) {
+ log_validation_status(rc, uri, safi_not_allowed, generation);
+ goto done;
+ }
+ }
+ }
+
+ if (x->rfc3779_asid) {
+ ex_count--;
+ if ((loc = X509_get_ext_by_NID(x, NID_sbgp_autonomousSysNum, -1)) < 0 ||
+ !X509_EXTENSION_get_critical(X509_get_ext(x, loc)) ||
+ !v3_asid_is_canonical(x->rfc3779_asid) ||
+ x->rfc3779_asid->asnum == NULL ||
+ x->rfc3779_asid->rdi != NULL ||
+ (routercert && x->rfc3779_asid->asnum->type == ASIdentifierChoice_inherit)) {
+ log_validation_status(rc, uri, bad_asidentifiers, generation);
+ goto done;
+ }
+ }
+
+ if (!x->rfc3779_addr && !x->rfc3779_asid) {
+ log_validation_status(rc, uri, missing_resources, generation);
+ goto done;
+ }
+
+ subject_pkey = X509_get_pubkey(x);
+ ok = subject_pkey != NULL;
+ if (ok) {
+ ASN1_OBJECT *algorithm;
+
+ (void) X509_PUBKEY_get0_param(&algorithm, NULL, NULL, NULL, X509_get_X509_PUBKEY(x));
+
+ switch (OBJ_obj2nid(algorithm)) {
+
+ case NID_rsaEncryption:
+ ok = (EVP_PKEY_type(subject_pkey->type) == EVP_PKEY_RSA &&
+ BN_get_word(subject_pkey->pkey.rsa->e) == 65537);
+ if (!ok)
+ break;
+ if (!certinfo->ca && rc->allow_1024_bit_ee_key &&
+ BN_num_bits(subject_pkey->pkey.rsa->n) == 1024)
+ log_validation_status(rc, uri, ee_certificate_with_1024_bit_key, generation);
+ else
+ ok = BN_num_bits(subject_pkey->pkey.rsa->n) == 2048;
+ break;
+
+ case NID_X9_62_id_ecPublicKey:
+ ok = !certinfo->ca && routercert;
+ break;
+
+ default:
+ ok = 0;
+ }
+ }
+ if (!ok) {
+ log_validation_status(rc, uri, bad_public_key, generation);
+ goto done;
+ }
+
+ if (x->skid == NULL ||
+ (ski_pubkey = X509_get0_pubkey_bitstr(x)) == NULL ||
+ !EVP_Digest(ski_pubkey->data, ski_pubkey->length,
+ ski_hashbuf.h, &ski_hashlen, EVP_sha1(), NULL) ||
+ ski_hashlen != 20 ||
+ ski_hashlen != x->skid->length ||
+ memcmp(ski_hashbuf.h, x->skid->data, ski_hashlen)) {
+ log_validation_status(rc, uri, ski_public_key_mismatch, generation);
+ goto done;
+ }
+
+ if (x->akid) {
+ ex_count--;
+ if (!check_aki(rc, uri, w->cert, x->akid, generation))
+ goto done;
+ }
+
+ if (!x->akid && !certinfo->ta) {
+ log_validation_status(rc, uri, aki_extension_missing, generation);
+ goto done;
+ }
+
+ if ((issuer_pkey = X509_get_pubkey(w->cert)) == NULL || X509_verify(x, issuer_pkey) <= 0) {
+ log_validation_status(rc, uri, certificate_bad_signature, generation);
+ goto done;
+ }
+
+ if (certinfo->ta) {
+
+ if (certinfo->crldp.s[0]) {
+ log_validation_status(rc, uri, trust_anchor_with_crldp, generation);
+ goto done;
+ }
+
+ } else {
+
+ if (!certinfo->crldp.s[0]) {
+ log_validation_status(rc, uri, crldp_uri_missing, generation);
+ goto done;
+ }
+
+ if (!certinfo->ca && !startswith(certinfo->crldp.s, w->certinfo.sia.s)) {
+ log_validation_status(rc, uri, crldp_doesnt_match_issuer_sia, generation);
+ goto done;
+ }
+
+ if (w->crls == NULL && ((w->crls = sk_X509_CRL_new_null()) == NULL ||
+ !sk_X509_CRL_push(w->crls, NULL))) {
+ logmsg(rc, log_sys_err, "Internal allocation error setting up CRL for validation");
+ goto done;
+ }
+
+ assert(sk_X509_CRL_num(w->crls) == 1);
+ assert((w->crldp.s[0] == '\0') == (sk_X509_CRL_value(w->crls, 0) == NULL));
+
+ if (strcmp(w->crldp.s, certinfo->crldp.s)) {
+ X509_CRL *old_crl = sk_X509_CRL_value(w->crls, 0);
+ X509_CRL *new_crl = check_crl(rc, &certinfo->crldp, w->cert);
+
+ if (w->crldp.s[0])
+ log_validation_status(rc, uri, issuer_uses_multiple_crldp_values, generation);
+
+ if (new_crl == NULL) {
+ log_validation_status(rc, uri, bad_crl, generation);
+ goto done;
+ }
+
+ if (old_crl && new_crl && ASN1_INTEGER_cmp(old_crl->crl_number, new_crl->crl_number) < 0) {
+ log_validation_status(rc, uri, crldp_names_newer_crl, generation);
+ X509_CRL_free(old_crl);
+ old_crl = NULL;
+ }
+
+ if (old_crl == NULL) {
+ sk_X509_CRL_set(w->crls, 0, new_crl);
+ w->crldp = certinfo->crldp;
+ } else {
+ X509_CRL_free(new_crl);
+ }
+ }
+
+ assert(sk_X509_CRL_value(w->crls, 0));
+ flags |= X509_V_FLAG_CRL_CHECK;
+ X509_STORE_CTX_set0_crls(&rctx.ctx, w->crls);
+ }
+
+ if (ex_count > 0) {
+ log_validation_status(rc, uri, disallowed_x509v3_extension, generation);
+ goto done;
+ }
+
+ assert(w->certs != NULL);
+ X509_STORE_CTX_trusted_stack(&rctx.ctx, w->certs);
+ X509_STORE_CTX_set_verify_cb(&rctx.ctx, check_x509_cb);
+
+ X509_VERIFY_PARAM_set_flags(rctx.ctx.param, flags);
+
+ X509_VERIFY_PARAM_add0_policy(rctx.ctx.param, OBJ_nid2obj(NID_cp_ipAddr_asNumber));
+
+ if (X509_verify_cert(&rctx.ctx) <= 0) {
+ log_validation_status(rc, uri, certificate_failed_validation, generation);
+ goto done;
+ }
+
+ ret = 1;
+
+ done:
+ X509_STORE_CTX_cleanup(&rctx.ctx);
+ EVP_PKEY_free(issuer_pkey);
+ EVP_PKEY_free(subject_pkey);
+ BASIC_CONSTRAINTS_free(bc);
+ sk_ACCESS_DESCRIPTION_pop_free(sia, ACCESS_DESCRIPTION_free);
+ sk_ACCESS_DESCRIPTION_pop_free(aia, ACCESS_DESCRIPTION_free);
+ sk_DIST_POINT_pop_free(crldp, DIST_POINT_free);
+ sk_POLICYINFO_pop_free(policies, POLICYINFO_free);
+ sk_ASN1_OBJECT_pop_free(eku, ASN1_OBJECT_free);
+
+ return ret;
+}
+
+/**
+ * Extract one datum from a CMS_SignerInfo.
+ */
+static void *extract_si_datum(CMS_SignerInfo *si,
+ int *n,
+ const int optional,
+ const int nid,
+ const int asn1_type)
+{
+ int i = CMS_signed_get_attr_by_NID(si, nid, -1);
+ void *result = NULL;
+ X509_ATTRIBUTE *a;
+
+ assert(si && n);
+
+ if (i < 0 && optional)
+ return NULL;
+
+ if (i >= 0 &&
+ CMS_signed_get_attr_by_NID(si, nid, i) < 0 &&
+ (a = CMS_signed_get_attr(si, i)) != NULL &&
+ X509_ATTRIBUTE_count(a) == 1 &&
+ (result = X509_ATTRIBUTE_get0_data(a, 0, asn1_type, NULL)) != NULL)
+ --*n;
+ else
+ *n = -1;
+
+ return result;
+}
+
+/**
+ * Check a signed CMS object.
+ */
+static int check_cms(rcynic_ctx_t *rc,
+ STACK_OF(walk_ctx_t) *wsk,
+ const uri_t *uri,
+ path_t *path,
+ const path_t *prefix,
+ CMS_ContentInfo **pcms,
+ X509 **px,
+ certinfo_t *certinfo,
+ BIO *bio,
+ const unsigned char *hash,
+ const size_t hashlen,
+ const int expected_eContentType_nid,
+ const int require_inheritance,
+ const object_generation_t generation)
+{
+ STACK_OF(CMS_SignerInfo) *signer_infos = NULL;
+ CMS_ContentInfo *cms = NULL;
+ CMS_SignerInfo *si = NULL;
+ ASN1_OCTET_STRING *sid = NULL;
+ X509_NAME *si_issuer = NULL;
+ ASN1_INTEGER *si_serial = NULL;
+ STACK_OF(X509_CRL) *crls = NULL;
+ STACK_OF(X509) *certs = NULL;
+ X509_ALGOR *signature_alg = NULL, *digest_alg = NULL;
+ ASN1_OBJECT *oid = NULL;
+ hashbuf_t hashbuf;
+ X509 *x = NULL;
+ certinfo_t certinfo_;
+ int i, result = 0;
+
+ assert(rc && wsk && uri && path && prefix);
+
+ if (!certinfo)
+ certinfo = &certinfo_;
+
+ if (!uri_to_filename(rc, uri, path, prefix))
+ goto error;
+
+ if (hash)
+ cms = read_cms(path, &hashbuf);
+ else
+ cms = read_cms(path, NULL);
+
+ if (!cms)
+ goto error;
+
+ if (hash && (hashlen > sizeof(hashbuf.h) ||
+ memcmp(hashbuf.h, hash, hashlen))) {
+ log_validation_status(rc, uri, digest_mismatch, generation);
+ if (!rc->allow_digest_mismatch)
+ goto error;
+ }
+
+ if (OBJ_obj2nid(CMS_get0_eContentType(cms)) != expected_eContentType_nid) {
+ log_validation_status(rc, uri, bad_cms_econtenttype, generation);
+ goto error;
+ }
+
+ if (CMS_verify(cms, NULL, NULL, NULL, bio, CMS_NO_SIGNER_CERT_VERIFY) <= 0) {
+ log_validation_status(rc, uri, cms_validation_failure, generation);
+ goto error;
+ }
+
+ if ((crls = CMS_get1_crls(cms)) != NULL) {
+ log_validation_status(rc, uri, cms_includes_crls, generation);
+ goto error;
+ }
+
+ if ((signer_infos = CMS_get0_SignerInfos(cms)) == NULL ||
+ sk_CMS_SignerInfo_num(signer_infos) != 1 ||
+ (si = sk_CMS_SignerInfo_value(signer_infos, 0)) == NULL ||
+ !CMS_SignerInfo_get0_signer_id(si, &sid, &si_issuer, &si_serial) ||
+ sid == NULL || si_issuer != NULL || si_serial != NULL ||
+ CMS_unsigned_get_attr_count(si) != -1) {
+ log_validation_status(rc, uri, bad_cms_signer_infos, generation);
+ goto error;
+ }
+
+ CMS_SignerInfo_get0_algs(si, NULL, &x, &digest_alg, &signature_alg);
+
+ if (x == NULL) {
+ log_validation_status(rc, uri, cms_signer_missing, generation);
+ goto error;
+ }
+
+ if ((certs = CMS_get1_certs(cms)) == NULL ||
+ sk_X509_num(certs) != 1 ||
+ X509_cmp(x, sk_X509_value(certs, 0))) {
+ log_validation_status(rc, uri, bad_cms_signer, generation);
+ goto error;
+ }
+
+ X509_ALGOR_get0(&oid, NULL, NULL, signature_alg);
+ i = OBJ_obj2nid(oid);
+ if (i != NID_sha256WithRSAEncryption && i != NID_rsaEncryption) {
+ log_validation_status(rc, uri, wrong_cms_si_signature_algorithm, generation);
+ goto error;
+ }
+
+ X509_ALGOR_get0(&oid, NULL, NULL, digest_alg);
+ if (OBJ_obj2nid(oid) != NID_sha256) {
+ log_validation_status(rc, uri, wrong_cms_si_digest_algorithm, generation);
+ goto error;
+ }
+
+ i = CMS_signed_get_attr_count(si);
+
+ (void) extract_si_datum(si, &i, 1, NID_pkcs9_signingTime, V_ASN1_UTCTIME);
+ (void) extract_si_datum(si, &i, 1, NID_binary_signing_time, V_ASN1_INTEGER);
+ oid = extract_si_datum(si, &i, 0, NID_pkcs9_contentType, V_ASN1_OBJECT);
+ (void) extract_si_datum(si, &i, 0, NID_pkcs9_messageDigest, V_ASN1_OCTET_STRING);
+
+ if (i != 0) {
+ log_validation_status(rc, uri, bad_cms_si_signed_attributes, generation);
+ if (!rc->allow_wrong_cms_si_attributes)
+ goto error;
+ }
+
+ if (OBJ_obj2nid(oid) != expected_eContentType_nid) {
+ log_validation_status(rc, uri, bad_cms_si_contenttype, generation);
+ goto error;
+ }
+
+ if (CMS_SignerInfo_cert_cmp(si, x)) {
+ log_validation_status(rc, uri, cms_ski_mismatch, generation);
+ goto error;
+ }
+
+ if (!check_x509(rc, wsk, uri, x, certinfo, generation))
+ goto error;
+
+ if (require_inheritance && x->rfc3779_addr) {
+ for (i = 0; i < sk_IPAddressFamily_num(x->rfc3779_addr); i++) {
+ IPAddressFamily *f = sk_IPAddressFamily_value(x->rfc3779_addr, i);
+ if (f->ipAddressChoice->type != IPAddressChoice_inherit) {
+ log_validation_status(rc, uri, rfc3779_inheritance_required, generation);
+ goto error;
+ }
+ }
+ }
+
+ if (require_inheritance && x->rfc3779_asid && x->rfc3779_asid->asnum &&
+ x->rfc3779_asid->asnum->type != ASIdentifierChoice_inherit) {
+ log_validation_status(rc, uri, rfc3779_inheritance_required, generation);
+ goto error;
+ }
+
+ if (pcms) {
+ *pcms = cms;
+ cms = NULL;
+ }
+
+ if (px)
+ *px = x;
+
+ result = 1;
+
+ error:
+ CMS_ContentInfo_free(cms);
+ sk_X509_CRL_pop_free(crls, X509_CRL_free);
+ sk_X509_pop_free(certs, X509_free);
+
+ return result;
+}
+
+
+
+/**
+ * Load certificate, check against manifest, then run it through all
+ * the check_x509() tests.
+ */
+static X509 *check_cert_1(rcynic_ctx_t *rc,
+ STACK_OF(walk_ctx_t) *wsk,
+ const uri_t *uri,
+ path_t *path,
+ const path_t *prefix,
+ certinfo_t *certinfo,
+ const unsigned char *hash,
+ const size_t hashlen,
+ object_generation_t generation)
+{
+ hashbuf_t hashbuf;
+ X509 *x = NULL;
+
+ assert(uri && path && wsk && certinfo);
+
+ if (!uri_to_filename(rc, uri, path, prefix))
+ return NULL;
+
+ if (access(path->s, R_OK))
+ return NULL;
+
+ if (hash)
+ x = read_cert(path, &hashbuf);
+ else
+ x = read_cert(path, NULL);
+
+ if (!x) {
+ logmsg(rc, log_sys_err, "Can't read certificate %s", path->s);
+ goto punt;
+ }
+
+ if (hash && (hashlen > sizeof(hashbuf.h) ||
+ memcmp(hashbuf.h, hash, hashlen))) {
+ log_validation_status(rc, uri, digest_mismatch, generation);
+ if (!rc->allow_digest_mismatch)
+ goto punt;
+ }
+
+ if (check_x509(rc, wsk, uri, x, certinfo, generation))
+ return x;
+
+ punt:
+ X509_free(x);
+ return NULL;
+}
+
+/**
+ * Try to find a good copy of a certificate either in fresh data or in
+ * backup data from a previous run of this program.
+ */
+static X509 *check_cert(rcynic_ctx_t *rc,
+ STACK_OF(walk_ctx_t) *wsk,
+ uri_t *uri,
+ certinfo_t *certinfo,
+ const unsigned char *hash,
+ const size_t hashlen)
+{
+ walk_ctx_t *w = walk_ctx_stack_head(wsk);
+ object_generation_t generation;
+ const path_t *prefix = NULL;
+ path_t path;
+ X509 *x;
+
+ assert(rc && uri && wsk && w && certinfo);
+
+ switch (w->state) {
+ case walk_state_current:
+ prefix = &rc->unauthenticated;
+ generation = object_generation_current;
+ break;
+ case walk_state_backup:
+ prefix = &rc->old_authenticated;
+ generation = object_generation_backup;
+ break;
+ default:
+ return NULL;
+ }
+
+ if (skip_checking_this_object(rc, uri, generation))
+ return NULL;
+
+ if ((x = check_cert_1(rc, wsk, uri, &path, prefix, certinfo,
+ hash, hashlen, generation)) != NULL)
+ install_object(rc, uri, &path, generation);
+ else if (!access(path.s, F_OK))
+ log_validation_status(rc, uri, object_rejected, generation);
+ else if (hash && generation == w->manifest_generation)
+ log_validation_status(rc, uri, manifest_lists_missing_object, generation);
+
+ return x;
+}
+
+
+
+/**
+ * Read and check one manifest from disk.
+ */
+static Manifest *check_manifest_1(rcynic_ctx_t *rc,
+ STACK_OF(walk_ctx_t) *wsk,
+ const uri_t *uri,
+ path_t *path,
+ const path_t *prefix,
+ certinfo_t *certinfo,
+ const object_generation_t generation)
+{
+ STACK_OF(FileAndHash) *sorted_fileList = NULL;
+ Manifest *manifest = NULL, *result = NULL;
+ CMS_ContentInfo *cms = NULL;
+ FileAndHash *fah = NULL, *fah2 = NULL;
+ BIO *bio = NULL;
+ X509 *x;
+ int i;
+
+ assert(rc && wsk && uri && path && prefix);
+
+ if ((bio = BIO_new(BIO_s_mem())) == NULL) {
+ logmsg(rc, log_sys_err, "Couldn't allocate BIO for manifest %s", uri->s);
+ goto done;
+ }
+
+ if (!check_cms(rc, wsk, uri, path, prefix, &cms, &x, certinfo, bio, NULL, 0,
+ NID_ct_rpkiManifest, 1, generation))
+ goto done;
+
+ if ((manifest = ASN1_item_d2i_bio(ASN1_ITEM_rptr(Manifest), bio, NULL)) == NULL) {
+ log_validation_status(rc, uri, cms_econtent_decode_error, generation);
+ goto done;
+ }
+
+ if (manifest->version) {
+ log_validation_status(rc, uri, wrong_object_version, generation);
+ goto done;
+ }
+
+ if (X509_cmp_current_time(manifest->thisUpdate) > 0) {
+ log_validation_status(rc, uri, manifest_not_yet_valid, generation);
+ goto done;
+ }
+
+ if (X509_cmp_current_time(manifest->nextUpdate) < 0) {
+ log_validation_status(rc, uri, stale_crl_or_manifest, generation);
+ if (!rc->allow_stale_manifest)
+ goto done;
+ }
+
+ if (asn1_time_cmp(manifest->thisUpdate, X509_get_notBefore(x)) < 0 ||
+ asn1_time_cmp(manifest->nextUpdate, X509_get_notAfter(x)) > 0) {
+ log_validation_status(rc, uri, manifest_interval_overruns_cert, generation);
+ goto done;
+ }
+
+ if (ASN1_INTEGER_cmp(manifest->manifestNumber, asn1_zero) < 0 ||
+ ASN1_INTEGER_cmp(manifest->manifestNumber, asn1_twenty_octets) > 0) {
+ log_validation_status(rc, uri, bad_manifest_number, generation);
+ goto done;
+ }
+
+ if (OBJ_obj2nid(manifest->fileHashAlg) != NID_sha256) {
+ log_validation_status(rc, uri, nonconformant_digest_algorithm, generation);
+ goto done;
+ }
+
+ if ((sorted_fileList = sk_FileAndHash_dup(manifest->fileList)) == NULL) {
+ logmsg(rc, log_sys_err, "Couldn't allocate shallow copy of fileList for manifest %s", uri->s);
+ goto done;
+ }
+
+ (void) sk_FileAndHash_set_cmp_func(sorted_fileList, FileAndHash_name_cmp);
+ sk_FileAndHash_sort(sorted_fileList);
+
+ for (i = 0; (fah = sk_FileAndHash_value(sorted_fileList, i)) != NULL && (fah2 = sk_FileAndHash_value(sorted_fileList, i + 1)) != NULL; i++) {
+ if (!strcmp((char *) fah->file->data, (char *) fah2->file->data)) {
+ log_validation_status(rc, uri, duplicate_name_in_manifest, generation);
+ goto done;
+ }
+ }
+
+ for (i = 0; (fah = sk_FileAndHash_value(manifest->fileList, i)) != NULL; i++) {
+ if (fah->hash->length != HASH_SHA256_LEN ||
+ (fah->hash->flags & (ASN1_STRING_FLAG_BITS_LEFT | 7)) > ASN1_STRING_FLAG_BITS_LEFT) {
+ log_validation_status(rc, uri, bad_manifest_digest_length, generation);
+ goto done;
+ }
+ }
+
+ result = manifest;
+ manifest = NULL;
+
+ done:
+ BIO_free(bio);
+ Manifest_free(manifest);
+ CMS_ContentInfo_free(cms);
+ sk_FileAndHash_free(sorted_fileList);
+ return result;
+}
+
+/**
+ * Check whether we already have a particular manifest, attempt to fetch it
+ * and check issuer's signature if we don't.
+ *
+ * General plan here is to do basic checks on both current and backup
+ * generation manifests, then, if both generations pass all of our
+ * other tests, pick the generation with the highest manifest number,
+ * to protect against replay attacks.
+ *
+ * Once we've picked the manifest we're going to use, we need to check
+ * it against the CRL we've chosen. Not much we can do if they don't
+ * match besides whine about it, but we do need to whine in this case.
+ */
+static int check_manifest(rcynic_ctx_t *rc,
+ STACK_OF(walk_ctx_t) *wsk)
+{
+ walk_ctx_t *w = walk_ctx_stack_head(wsk);
+ Manifest *old_manifest, *new_manifest, *result = NULL;
+ certinfo_t old_certinfo, new_certinfo;
+ const uri_t *uri, *crldp = NULL;
+ object_generation_t generation = object_generation_null;
+ path_t old_path, new_path;
+ FileAndHash *fah = NULL;
+ const char *crl_tail;
+ int i, ok = 1;
+
+ assert(rc && wsk && w && !w->manifest);
+
+ uri = &w->certinfo.manifest;
+
+ logmsg(rc, log_telemetry, "Checking manifest %s", uri->s);
+
+ new_manifest = check_manifest_1(rc, wsk, uri, &new_path,
+ &rc->unauthenticated, &new_certinfo,
+ object_generation_current);
+
+ old_manifest = check_manifest_1(rc, wsk, uri, &old_path,
+ &rc->old_authenticated, &old_certinfo,
+ object_generation_backup);
+
+ if (!new_manifest)
+ result = old_manifest;
+
+ else if (!old_manifest)
+ result = new_manifest;
+
+ else {
+ int num_cmp = ASN1_INTEGER_cmp(old_manifest->manifestNumber, new_manifest->manifestNumber);
+ int date_cmp = ASN1_STRING_cmp(old_manifest->thisUpdate, new_manifest->thisUpdate);
+
+ if (num_cmp > 0)
+ log_validation_status(rc, uri, backup_number_higher_than_current, object_generation_current);
+ if (date_cmp > 0)
+ log_validation_status(rc, uri, backup_thisupdate_newer_than_current, object_generation_current);
+
+ if (num_cmp > 0 && date_cmp > 0)
+ result = old_manifest;
+ else
+ result = new_manifest;
+ }
+
+ if (result && result == new_manifest) {
+ generation = object_generation_current;
+ install_object(rc, uri, &new_path, generation);
+ crldp = &new_certinfo.crldp;
+ }
+
+ if (result && result == old_manifest) {
+ generation = object_generation_backup;
+ install_object(rc, uri, &old_path, generation);
+ crldp = &old_certinfo.crldp;
+ }
+
+ if (result) {
+ crl_tail = strrchr(crldp->s, '/');
+ assert(crl_tail != NULL);
+ crl_tail++;
+
+ for (i = 0; (fah = sk_FileAndHash_value(result->fileList, i)) != NULL; i++)
+ if (!strcmp((char *) fah->file->data, crl_tail))
+ break;
+
+ if (!fah) {
+ log_validation_status(rc, uri, crl_not_in_manifest, generation);
+ if (rc->require_crl_in_manifest)
+ ok = 0;
+ }
+
+ else if (!check_crl_digest(rc, crldp, fah->hash->data, fah->hash->length)) {
+ log_validation_status(rc, uri, digest_mismatch, generation);
+ if (!rc->allow_crl_digest_mismatch)
+ ok = 0;
+ }
+ }
+
+ if ((!result || result != new_manifest) && !access(new_path.s, F_OK))
+ log_validation_status(rc, uri, object_rejected, object_generation_current);
+
+ if (!result && !access(old_path.s, F_OK))
+ log_validation_status(rc, uri, object_rejected, object_generation_backup);
+
+ if (result != new_manifest)
+ Manifest_free(new_manifest);
+
+ if (result != old_manifest)
+ Manifest_free(old_manifest);
+
+ w->manifest = result;
+ if (crldp)
+ w->crldp = *crldp;
+ w->manifest_generation = generation;
+
+ return ok;
+}
+
+
+
+/**
+ * Mark CRL or manifest that we're rechecking so XML report makes more sense.
+ */
+static void rsync_needed_mark_recheck(rcynic_ctx_t *rc,
+ const uri_t *uri)
+{
+ validation_status_t *v = NULL;
+
+ if (uri->s[0] != '\0')
+ v = validation_status_find(rc->validation_status_root,
+ uri, object_generation_current);
+
+ if (v) {
+ validation_status_set_code(v, stale_crl_or_manifest, 0);
+ log_validation_status(rc, uri, rechecking_object,
+ object_generation_current);
+ }
+}
+
+/**
+ * Check whether we need to rsync a particular tree. This depends on
+ * the setting of rc->rsync_early, whether we have a valid manifest on
+ * file, and whether that manifest is stale yet.
+ */
+static int rsync_needed(rcynic_ctx_t *rc,
+ STACK_OF(walk_ctx_t) *wsk)
+{
+ walk_ctx_t *w = walk_ctx_stack_head(wsk);
+ int needed;
+
+ assert(rc && wsk && w);
+
+ needed = (rc->rsync_early ||
+ !check_manifest(rc, wsk) ||
+ w->manifest == NULL ||
+ X509_cmp_current_time(w->manifest->nextUpdate) < 0);
+
+ if (needed && w->manifest != NULL) {
+ rsync_needed_mark_recheck(rc, &w->certinfo.manifest);
+ rsync_needed_mark_recheck(rc, &w->certinfo.crldp);
+ Manifest_free(w->manifest);
+ w->manifest = NULL;
+ }
+
+ return needed;
+}
+
+
+
+/**
+ * Extract a ROA prefix from the ASN.1 bitstring encoding.
+ */
+static int extract_roa_prefix(const ROAIPAddress *ra,
+ const unsigned afi,
+ unsigned char *addr,
+ unsigned *prefixlen,
+ unsigned *max_prefixlen)
+{
+ unsigned length;
+ long maxlen;
+
+ assert(ra && addr && prefixlen && max_prefixlen);
+
+ maxlen = ASN1_INTEGER_get(ra->maxLength);
+
+ switch (afi) {
+ case IANA_AFI_IPV4: length = 4; break;
+ case IANA_AFI_IPV6: length = 16; break;
+ default: return 0;
+ }
+
+ if (ra->IPAddress->length < 0 || ra->IPAddress->length > length ||
+ maxlen < 0 || maxlen > (long) length * 8)
+ return 0;
+
+ if (ra->IPAddress->length > 0) {
+ memcpy(addr, ra->IPAddress->data, ra->IPAddress->length);
+ if ((ra->IPAddress->flags & 7) != 0) {
+ unsigned char mask = 0xFF >> (8 - (ra->IPAddress->flags & 7));
+ addr[ra->IPAddress->length - 1] &= ~mask;
+ }
+ }
+
+ memset(addr + ra->IPAddress->length, 0, length - ra->IPAddress->length);
+ *prefixlen = (ra->IPAddress->length * 8) - (ra->IPAddress->flags & 7);
+ *max_prefixlen = ra->maxLength ? (unsigned) maxlen : *prefixlen;
+
+ return 1;
+}
+
+/**
+ * Read and check one ROA from disk.
+ */
+static int check_roa_1(rcynic_ctx_t *rc,
+ STACK_OF(walk_ctx_t) *wsk,
+ const uri_t *uri,
+ path_t *path,
+ const path_t *prefix,
+ const unsigned char *hash,
+ const size_t hashlen,
+ const object_generation_t generation)
+{
+ STACK_OF(IPAddressFamily) *roa_resources = NULL, *ee_resources = NULL;
+ unsigned char addrbuf[ADDR_RAW_BUF_LEN];
+ CMS_ContentInfo *cms = NULL;
+ BIO *bio = NULL;
+ ROA *roa = NULL;
+ X509 *x = NULL;
+ int i, j, result = 0;
+ unsigned afi, *safi = NULL, safi_, prefixlen, max_prefixlen;
+ ROAIPAddressFamily *rf;
+ ROAIPAddress *ra;
+
+ assert(rc && wsk && uri && path && prefix);
+
+ if ((bio = BIO_new(BIO_s_mem())) == NULL) {
+ logmsg(rc, log_sys_err, "Couldn't allocate BIO for ROA %s", uri->s);
+ goto error;
+ }
+
+ if (!check_cms(rc, wsk, uri, path, prefix, &cms, &x, NULL, bio, NULL, 0,
+ NID_ct_ROA, 0, generation))
+ goto error;
+
+ if (!(roa = ASN1_item_d2i_bio(ASN1_ITEM_rptr(ROA), bio, NULL))) {
+ log_validation_status(rc, uri, cms_econtent_decode_error, generation);
+ goto error;
+ }
+
+ if (roa->version) {
+ log_validation_status(rc, uri, wrong_object_version, generation);
+ goto error;
+ }
+
+ if (ASN1_INTEGER_cmp(roa->asID, asn1_zero) < 0 ||
+ ASN1_INTEGER_cmp(roa->asID, asn1_four_octets) > 0) {
+ log_validation_status(rc, uri, bad_roa_asID, generation);
+ goto error;
+ }
+
+ ee_resources = X509_get_ext_d2i(x, NID_sbgp_ipAddrBlock, NULL, NULL);
+
+ /*
+ * Extract prefixes from ROA and convert them into a resource set.
+ */
+
+ if (!(roa_resources = sk_IPAddressFamily_new_null()))
+ goto error;
+
+ for (i = 0; i < sk_ROAIPAddressFamily_num(roa->ipAddrBlocks); i++) {
+ rf = sk_ROAIPAddressFamily_value(roa->ipAddrBlocks, i);
+ if (!rf || !rf->addressFamily || rf->addressFamily->length < 2 || rf->addressFamily->length > 3) {
+ log_validation_status(rc, uri, malformed_roa_addressfamily, generation);
+ goto error;
+ }
+ afi = (rf->addressFamily->data[0] << 8) | (rf->addressFamily->data[1]);
+ if (rf->addressFamily->length == 3)
+ *(safi = &safi_) = rf->addressFamily->data[2];
+ for (j = 0; j < sk_ROAIPAddress_num(rf->addresses); j++) {
+ ra = sk_ROAIPAddress_value(rf->addresses, j);
+ if (!ra ||
+ !extract_roa_prefix(ra, afi, addrbuf, &prefixlen, &max_prefixlen) ||
+ !v3_addr_add_prefix(roa_resources, afi, safi, addrbuf, prefixlen)) {
+ log_validation_status(rc, uri, roa_resources_malformed, generation);
+ goto error;
+ }
+ if (max_prefixlen < prefixlen) {
+ log_validation_status(rc, uri, roa_max_prefixlen_too_short, generation);
+ goto error;
+ }
+ }
+ }
+
+ /*
+ * ROAs can include nested prefixes, so direct translation to
+ * resource sets could include overlapping ranges, which is illegal.
+ * So we have to remove nested stuff before whacking into canonical
+ * form. Fortunately, this is relatively easy, since we know these
+ * are just prefixes, not ranges: in a list of prefixes sorted by
+ * the RFC 3779 rules, the first element of a set of nested prefixes
+ * will always be the least specific.
+ */
+
+ for (i = 0; i < sk_IPAddressFamily_num(roa_resources); i++) {
+ IPAddressFamily *f = sk_IPAddressFamily_value(roa_resources, i);
+
+ if ((afi = v3_addr_get_afi(f)) == 0) {
+ log_validation_status(rc, uri, roa_contains_bad_afi_value, generation);
+ goto error;
+ }
+
+ if (f->ipAddressChoice->type == IPAddressChoice_addressesOrRanges) {
+ IPAddressOrRanges *aors = f->ipAddressChoice->u.addressesOrRanges;
+
+ sk_IPAddressOrRange_sort(aors);
+
+ for (j = 0; j < sk_IPAddressOrRange_num(aors) - 1; j++) {
+ IPAddressOrRange *a = sk_IPAddressOrRange_value(aors, j);
+ IPAddressOrRange *b = sk_IPAddressOrRange_value(aors, j + 1);
+ unsigned char a_min[ADDR_RAW_BUF_LEN], a_max[ADDR_RAW_BUF_LEN];
+ unsigned char b_min[ADDR_RAW_BUF_LEN], b_max[ADDR_RAW_BUF_LEN];
+ int length;
+
+ if ((length = v3_addr_get_range(a, afi, a_min, a_max, ADDR_RAW_BUF_LEN)) == 0 ||
+ (length = v3_addr_get_range(b, afi, b_min, b_max, ADDR_RAW_BUF_LEN)) == 0) {
+ log_validation_status(rc, uri, roa_resources_malformed, generation);
+ goto error;
+ }
+
+ if (memcmp(a_max, b_max, length) >= 0) {
+ (void) sk_IPAddressOrRange_delete(aors, j + 1);
+ IPAddressOrRange_free(b);
+ --j;
+ }
+ }
+ }
+ }
+
+ if (!v3_addr_canonize(roa_resources)) {
+ log_validation_status(rc, uri, roa_resources_malformed, generation);
+ goto error;
+ }
+
+ if (!v3_addr_subset(roa_resources, ee_resources)) {
+ log_validation_status(rc, uri, roa_resource_not_in_ee, generation);
+ goto error;
+ }
+
+ result = 1;
+
+ error:
+ BIO_free(bio);
+ ROA_free(roa);
+ CMS_ContentInfo_free(cms);
+ sk_IPAddressFamily_pop_free(roa_resources, IPAddressFamily_free);
+ sk_IPAddressFamily_pop_free(ee_resources, IPAddressFamily_free);
+
+ return result;
+}
+
+/**
+ * Check whether we already have a particular ROA, attempt to fetch it
+ * and check issuer's signature if we don't.
+ */
+static void check_roa(rcynic_ctx_t *rc,
+ STACK_OF(walk_ctx_t) *wsk,
+ const uri_t *uri,
+ const unsigned char *hash,
+ const size_t hashlen)
+{
+ walk_ctx_t *w = walk_ctx_stack_head(wsk);
+ path_t path;
+
+ assert(rc && wsk && w && uri);
+
+ if (uri_to_filename(rc, uri, &path, &rc->new_authenticated) &&
+ !access(path.s, F_OK))
+ return;
+
+ logmsg(rc, log_telemetry, "Checking ROA %s", uri->s);
+
+ if (check_roa_1(rc, wsk, uri, &path, &rc->unauthenticated,
+ hash, hashlen, object_generation_current)) {
+ install_object(rc, uri, &path, object_generation_current);
+ return;
+ }
+
+ if (!access(path.s, F_OK))
+ log_validation_status(rc, uri, object_rejected, object_generation_current);
+ else if (hash)
+ log_validation_status(rc, uri, manifest_lists_missing_object, object_generation_current);
+
+ if (check_roa_1(rc, wsk, uri, &path, &rc->old_authenticated,
+ hash, hashlen, object_generation_backup)) {
+ install_object(rc, uri, &path, object_generation_backup);
+ return;
+ }
+
+ if (!access(path.s, F_OK))
+ log_validation_status(rc, uri, object_rejected, object_generation_backup);
+ else if (hash && w->manifest_generation == object_generation_backup)
+ log_validation_status(rc, uri, manifest_lists_missing_object, object_generation_backup);
+}
+
+
+
+/**
+ * Read and check one Ghostbuster record from disk.
+ */
+static int check_ghostbuster_1(rcynic_ctx_t *rc,
+ STACK_OF(walk_ctx_t) *wsk,
+ const uri_t *uri,
+ path_t *path,
+ const path_t *prefix,
+ const unsigned char *hash,
+ const size_t hashlen,
+ const object_generation_t generation)
+{
+ CMS_ContentInfo *cms = NULL;
+ BIO *bio = NULL;
+ X509 *x;
+ int result = 0;
+
+ assert(rc && wsk && uri && path && prefix);
+
+#if 0
+ /*
+ * May want this later if we're going to inspect the VCard. For now,
+ * just leave this NULL and the right thing should happen.
+ */
+ if ((bio = BIO_new(BIO_s_mem())) == NULL) {
+ logmsg(rc, log_sys_err, "Couldn't allocate BIO for Ghostbuster record %s", uri->s);
+ goto error;
+ }
+#endif
+
+ if (!check_cms(rc, wsk, uri, path, prefix, &cms, &x, NULL, bio, NULL, 0,
+ NID_ct_rpkiGhostbusters, 1, generation))
+ goto error;
+
+#if 0
+ /*
+ * Here is where we would read the VCard from the bio returned by
+ * CMS_verify() so that we could check the VCard.
+ */
+#endif
+
+ result = 1;
+
+ error:
+ BIO_free(bio);
+ CMS_ContentInfo_free(cms);
+
+ return result;
+}
+
+/**
+ * Check whether we already have a particular Ghostbuster record,
+ * attempt to fetch it and check issuer's signature if we don't.
+ */
+static void check_ghostbuster(rcynic_ctx_t *rc,
+ STACK_OF(walk_ctx_t) *wsk,
+ const uri_t *uri,
+ const unsigned char *hash,
+ const size_t hashlen)
+{
+ walk_ctx_t *w = walk_ctx_stack_head(wsk);
+ path_t path;
+
+ assert(rc && wsk && w && uri);
+
+ if (uri_to_filename(rc, uri, &path, &rc->new_authenticated) &&
+ !access(path.s, F_OK))
+ return;
+
+ logmsg(rc, log_telemetry, "Checking Ghostbuster record %s", uri->s);
+
+ if (check_ghostbuster_1(rc, wsk, uri, &path, &rc->unauthenticated,
+ hash, hashlen, object_generation_current)) {
+ install_object(rc, uri, &path, object_generation_current);
+ return;
+ }
+
+ if (!access(path.s, F_OK))
+ log_validation_status(rc, uri, object_rejected, object_generation_current);
+ else if (hash)
+ log_validation_status(rc, uri, manifest_lists_missing_object, object_generation_current);
+
+ if (check_ghostbuster_1(rc, wsk, uri, &path, &rc->old_authenticated,
+ hash, hashlen, object_generation_backup)) {
+ install_object(rc, uri, &path, object_generation_backup);
+ return;
+ }
+
+ if (!access(path.s, F_OK))
+ log_validation_status(rc, uri, object_rejected, object_generation_backup);
+ else if (hash && w->manifest_generation == object_generation_backup)
+ log_validation_status(rc, uri, manifest_lists_missing_object, object_generation_backup);
+}
+
+
+
+static void walk_cert(rcynic_ctx_t *, void *);
+
+/**
+ * rsync callback for fetching SIA tree.
+ */
+static void rsync_sia_callback(rcynic_ctx_t *rc,
+ const rsync_ctx_t *ctx,
+ const rsync_status_t status,
+ const uri_t *uri,
+ void *cookie)
+{
+ STACK_OF(walk_ctx_t) *wsk = cookie;
+ walk_ctx_t *w = walk_ctx_stack_head(wsk);
+
+ assert(rc && wsk);
+
+ if (status != rsync_status_pending) {
+ w->state++;
+ task_add(rc, walk_cert, wsk);
+ return;
+ }
+
+ if (rsync_count_runable(rc) >= rc->max_parallel_fetches)
+ return;
+
+ if ((wsk = walk_ctx_stack_clone(wsk)) == NULL) {
+ logmsg(rc, log_sys_err,
+ "walk_ctx_stack_clone() failed, probably memory exhaustion, blundering onwards without forking stack");
+ return;
+ }
+
+ walk_ctx_stack_pop(wsk);
+ task_add(rc, walk_cert, wsk);
+}
+
+/**
+ * Recursive walk of certificate hierarchy (core of the program).
+ *
+ * Walk all products of the current certificate, starting with the
+ * ones named in the manifest and continuing with any that we find in
+ * the publication directory but which are not named in the manifest.
+ *
+ * Dispatch to correct checking code for the object named by URI,
+ * based on the filename extension in the uri. CRLs are a special
+ * case because we've already checked them by the time we get here, so
+ * we just ignore them. Other objects are either certificates or
+ * CMS-signed objects of one kind or another.
+ */
+static void walk_cert(rcynic_ctx_t *rc, void *cookie)
+{
+ STACK_OF(walk_ctx_t) *wsk = cookie;
+ const unsigned char *hash = NULL;
+ object_generation_t generation;
+ size_t hashlen;
+ walk_ctx_t *w;
+ uri_t uri;
+
+ assert(rc && wsk);
+
+ while ((w = walk_ctx_stack_head(wsk)) != NULL) {
+
+ switch (w->state) {
+ case walk_state_current:
+ generation = object_generation_current;
+ break;
+ case walk_state_backup:
+ generation = object_generation_backup;
+ break;
+ default:
+ generation = object_generation_null;
+ break;
+ }
+
+ switch (w->state) {
+
+ case walk_state_initial:
+
+ if (!w->certinfo.sia.s[0] || !w->certinfo.ca) {
+ w->state = walk_state_done;
+ continue;
+ }
+
+ if (!w->certinfo.manifest.s[0]) {
+ log_validation_status(rc, &w->certinfo.uri, sia_manifest_uri_missing, w->certinfo.generation);
+ w->state = walk_state_done;
+ continue;
+ }
+
+ w->state++;
+ continue;
+
+ case walk_state_rsync:
+
+ if (rsync_needed(rc, wsk)) {
+ rsync_tree(rc, &w->certinfo.sia, wsk, rsync_sia_callback);
+ return;
+ }
+ log_validation_status(rc, &w->certinfo.sia, rsync_transfer_skipped, object_generation_null);
+ w->state++;
+ continue;
+
+ case walk_state_ready:
+
+ walk_ctx_loop_init(rc, wsk); /* sets w->state */
+ continue;
+
+ case walk_state_current:
+ case walk_state_backup:
+
+ if (!walk_ctx_loop_this(rc, wsk, &uri, &hash, &hashlen)) {
+ walk_ctx_loop_next(rc, wsk);
+ continue;
+ }
+
+ if (endswith(uri.s, ".crl") || endswith(uri.s, ".mft") || endswith(uri.s, ".mnf")) {
+ walk_ctx_loop_next(rc, wsk);
+ continue; /* CRLs and manifests checked elsewhere */
+ }
+
+ if (hash == NULL && !rc->allow_object_not_in_manifest) {
+ log_validation_status(rc, &uri, skipped_because_not_in_manifest, generation);
+ walk_ctx_loop_next(rc, wsk);
+ continue;
+ }
+
+ if (hash == NULL)
+ log_validation_status(rc, &uri, tainted_by_not_being_in_manifest, generation);
+ else if (w->stale_manifest)
+ log_validation_status(rc, &uri, tainted_by_stale_manifest, generation);
+
+ if (endswith(uri.s, ".roa")) {
+ check_roa(rc, wsk, &uri, hash, hashlen);
+ walk_ctx_loop_next(rc, wsk);
+ continue;
+ }
+
+ if (endswith(uri.s, ".gbr")) {
+ check_ghostbuster(rc, wsk, &uri, hash, hashlen);
+ walk_ctx_loop_next(rc, wsk);
+ continue;
+ }
+
+ if (endswith(uri.s, ".cer")) {
+ certinfo_t certinfo;
+ X509 *x = check_cert(rc, wsk, &uri, &certinfo, hash, hashlen);
+ if (!walk_ctx_stack_push(wsk, x, &certinfo))
+ walk_ctx_loop_next(rc, wsk);
+ continue;
+ }
+
+ log_validation_status(rc, &uri, unknown_object_type_skipped, object_generation_null);
+ walk_ctx_loop_next(rc, wsk);
+ continue;
+
+ case walk_state_done:
+
+ walk_ctx_stack_pop(wsk); /* Resume our issuer's state */
+ continue;
+
+ }
+ }
+
+ assert(walk_ctx_stack_head(wsk) == NULL);
+ walk_ctx_stack_free(wsk);
+}
+
+/**
+ * Check a trust anchor. Yes, we trust it, by definition, but it
+ * still needs to conform to the certificate profile, the
+ * self-signature must be correct, etcetera.
+ *
+ * Ownership of the TA certificate object passes to this function when
+ * called (ie, freeing "x" is our responsibility).
+ */
+static int check_ta(rcynic_ctx_t *rc, X509 *x, const uri_t *uri,
+ const path_t *path1, const path_t *path2,
+ const object_generation_t generation)
+{
+ STACK_OF(walk_ctx_t) *wsk = NULL;
+ walk_ctx_t *w = NULL;
+
+ assert(rc && x && uri && path1 && path2);
+
+ if (x == NULL)
+ return 1;
+
+ if ((wsk = walk_ctx_stack_new()) == NULL) {
+ logmsg(rc, log_sys_err, "Couldn't allocate walk context stack");
+ X509_free(x);
+ return 0;
+ }
+
+ if ((w = walk_ctx_stack_push(wsk, x, NULL)) == NULL) {
+ logmsg(rc, log_sys_err, "Couldn't push walk context stack");
+ walk_ctx_stack_free(wsk);
+ X509_free(x);
+ return 0;
+ }
+
+ if (!check_x509(rc, wsk, uri, x, NULL, generation)) {
+ log_validation_status(rc, uri, object_rejected, generation);
+ walk_ctx_stack_free(wsk);
+ return 1;
+ }
+
+ logmsg(rc, log_telemetry, "Copying trust anchor %s to %s", path1->s, path2->s);
+
+ if (!mkdir_maybe(rc, path2) || !cp_ln(rc, path1, path2)) {
+ walk_ctx_stack_free(wsk);
+ return 0;
+ }
+
+ log_validation_status(rc, uri, object_accepted, generation);
+ task_add(rc, walk_cert, wsk);
+ return 1;
+}
+
+
+
+/**
+ * Check a trust anchor read from a local file.
+ */
+static int check_ta_cer(rcynic_ctx_t *rc,
+ const char *fn)
+
+{
+ path_t path1, path2;
+ unsigned long hash;
+ X509 *x = NULL;
+ uri_t uri;
+ int i;
+
+ assert(rc && fn);
+
+ logmsg(rc, log_telemetry, "Processing trust anchor from file %s", fn);
+
+ if (strlen(fn) >= sizeof(path1.s)) {
+ logmsg(rc, log_usage_err, "Trust anchor path name too long %s", fn);
+ return 0;
+ }
+ strcpy(path1.s, fn);
+ filename_to_uri(&uri, path1.s);
+
+ if ((x = read_cert(&path1, NULL)) == NULL) {
+ logmsg(rc, log_usage_err, "Couldn't read trust anchor from file %s", fn);
+ log_validation_status(rc, &uri, unreadable_trust_anchor, object_generation_null);
+ goto lose;
+ }
+
+ hash = X509_subject_name_hash(x);
+
+ for (i = 0; i < INT_MAX; i++) {
+ if (snprintf(path2.s, sizeof(path2.s), "%s%lx.%d.cer",
+ rc->new_authenticated.s, hash, i) >= sizeof(path2.s)) {
+ logmsg(rc, log_sys_err,
+ "Couldn't construct path name for trust anchor %s", path1.s);
+ goto lose;
+ }
+ if (access(path2.s, F_OK))
+ break;
+ }
+ if (i == INT_MAX) {
+ logmsg(rc, log_sys_err, "Couldn't find a free name for trust anchor %s", path1.s);
+ goto lose;
+ }
+
+ return check_ta(rc, x, &uri, &path1, &path2, object_generation_null);
+
+ lose:
+ log_validation_status(rc, &uri, trust_anchor_skipped, object_generation_null);
+ X509_free(x);
+ return 0;
+}
+
+
+
+/**
+ * Allocate a new tal_ctx_t.
+ */
+static tal_ctx_t *tal_ctx_t_new(void)
+{
+ tal_ctx_t *tctx = malloc(sizeof(*tctx));
+ if (tctx)
+ memset(tctx, 0, sizeof(*tctx));
+ return tctx;
+}
+
+/**
+ * Free a tal_ctx_t.
+ */
+static void tal_ctx_t_free(tal_ctx_t *tctx)
+{
+ if (tctx) {
+ EVP_PKEY_free(tctx->pkey);
+ free(tctx);
+ }
+}
+
+/**
+ * Read a trust anchor from disk and compare with known public key.
+ *
+ * NB: EVP_PKEY_cmp() returns 1 for match, not 0 like every other
+ * xyz_cmp() function in the entire OpenSSL library. Go figure.
+ */
+static int check_ta_tal_callback_1(rcynic_ctx_t *rc,
+ const tal_ctx_t *tctx,
+ object_generation_t generation)
+
+{
+ const path_t *prefix = NULL;
+ EVP_PKEY *pkey = NULL;
+ X509 *x = NULL;
+ path_t path;
+ int ret = 0;
+
+ switch (generation) {
+ case object_generation_current:
+ prefix = &rc->unauthenticated;
+ break;
+ case object_generation_backup:
+ prefix = &rc->old_authenticated;
+ break;
+ default:
+ goto done;
+ }
+
+ if (!uri_to_filename(rc, &tctx->uri, &path, prefix)) {
+ log_validation_status(rc, &tctx->uri, unreadable_trust_anchor_locator, generation);
+ goto done;
+ }
+
+ if ((x = read_cert(&path, NULL)) == NULL || (pkey = X509_get_pubkey(x)) == NULL) {
+ log_validation_status(rc, &tctx->uri, unreadable_trust_anchor, generation);
+ goto done;
+ }
+
+ if (EVP_PKEY_cmp(tctx->pkey, pkey) != 1) {
+ log_validation_status(rc, &tctx->uri, trust_anchor_key_mismatch, generation);
+ goto done;
+ }
+
+ ret = check_ta(rc, x, &tctx->uri, &path, &tctx->path, generation);
+ x = NULL;
+
+ done:
+ if (!ret)
+ log_validation_status(rc, &tctx->uri, object_rejected, generation);
+ EVP_PKEY_free(pkey);
+ X509_free(x);
+ return ret;
+}
+
+/**
+ * rsync callback for fetching a TAL.
+ */
+static void rsync_tal_callback(rcynic_ctx_t *rc,
+ const rsync_ctx_t *ctx,
+ const rsync_status_t status,
+ const uri_t *uri,
+ void *cookie)
+{
+ tal_ctx_t *tctx = cookie;
+
+ assert(rc && tctx);
+
+ if (status == rsync_status_pending)
+ return;
+
+ if (!check_ta_tal_callback_1(rc, tctx, object_generation_current) &&
+ !check_ta_tal_callback_1(rc, tctx, object_generation_backup))
+ log_validation_status(rc, &tctx->uri, trust_anchor_skipped, object_generation_null);
+
+ tal_ctx_t_free(tctx);
+}
+
+/**
+ * Check a trust anchor read from a trust anchor locator (TAL).
+ */
+static int check_ta_tal(rcynic_ctx_t *rc,
+ const char *fn)
+
+{
+ tal_ctx_t *tctx = NULL;
+ BIO *bio = NULL;
+ int ret = 1;
+
+ assert(rc && fn);
+
+ logmsg(rc, log_telemetry, "Processing trust anchor locator from file %s", fn);
+
+ if ((tctx = tal_ctx_t_new()) == NULL) {
+ logmsg(rc, log_sys_err, "malloc(tal_ctxt_t) failed");
+ goto done;
+ }
+
+ bio = BIO_new_file(fn, "r");
+
+ if (!bio)
+ logmsg(rc, log_usage_err, "Couldn't open trust anchor locator file %s", fn);
+
+ if (!bio || BIO_gets(bio, tctx->uri.s, sizeof(tctx->uri.s)) <= 0) {
+ uri_t furi;
+ filename_to_uri(&furi, fn);
+ log_validation_status(rc, &furi, unreadable_trust_anchor_locator, object_generation_null);
+ goto done;
+ }
+
+ tctx->uri.s[strcspn(tctx->uri.s, " \t\r\n")] = '\0';
+
+ if (!uri_to_filename(rc, &tctx->uri, &tctx->path, &rc->new_authenticated)) {
+ log_validation_status(rc, &tctx->uri, unreadable_trust_anchor_locator, object_generation_null);
+ goto done;
+ }
+
+ if (!endswith(tctx->uri.s, ".cer")) {
+ log_validation_status(rc, &tctx->uri, malformed_tal_uri, object_generation_null);
+ goto done;
+ }
+
+ bio = BIO_push(BIO_new(BIO_f_linebreak()), bio);
+ bio = BIO_push(BIO_new(BIO_f_base64()), bio);
+ if (bio)
+ tctx->pkey = d2i_PUBKEY_bio(bio, NULL);
+ if (!tctx->pkey) {
+ log_validation_status(rc, &tctx->uri, unreadable_trust_anchor_locator, object_generation_null);
+ goto done;
+ }
+
+ logmsg(rc, log_telemetry, "Processing trust anchor from URI %s", tctx->uri.s);
+
+ rsync_ta(rc, &tctx->uri, tctx, rsync_tal_callback);
+ tctx = NULL; /* Control has passed */
+
+ done:
+ tal_ctx_t_free(tctx);
+ BIO_free_all(bio);
+ return ret;
+}
+
+/**
+ * Check a directory of trust anchors and trust anchor locators.
+ */
+static int check_ta_dir(rcynic_ctx_t *rc,
+ const char *dn)
+{
+ DIR *dir = NULL;
+ struct dirent *d;
+ path_t path;
+ int is_cer, is_tal;
+
+ assert(rc && dn);
+
+ if ((dir = opendir(dn)) == NULL) {
+ logmsg(rc, log_sys_err, "Couldn't open trust anchor directory %s: %s",
+ dn, strerror(errno));
+ return 0;
+ }
+
+ while ((d = readdir(dir)) != NULL) {
+ if (snprintf(path.s, sizeof(path.s), "%s/%s", dn, d->d_name) >= sizeof(path.s)) {
+ logmsg(rc, log_data_err, "Pathname %s/%s too long", dn, d->d_name);
+ break;
+ }
+ is_cer = endswith(path.s, ".cer");
+ is_tal = endswith(path.s, ".tal");
+ if (is_cer && !check_ta_cer(rc, path.s))
+ break;
+ if (is_tal && !check_ta_tal(rc, path.s))
+ break;
+ if (!is_cer && !is_tal)
+ logmsg(rc, log_verbose, "Skipping non-trust-anchor %s", path.s);
+ }
+
+ if (dir != NULL)
+ closedir(dir);
+
+ return !d;;
+}
+
+
+
+/**
+ * Write detailed log of what we've done as an XML file.
+ */
+static int write_xml_file(const rcynic_ctx_t *rc,
+ const char *xmlfile)
+{
+ int i, j, use_stdout, ok;
+ char hostname[HOSTNAME_MAX];
+ mib_counter_t code;
+ timestamp_t ts;
+ FILE *f = NULL;
+ path_t xmltemp;
+
+ if (xmlfile == NULL)
+ return 1;
+
+ use_stdout = !strcmp(xmlfile, "-");
+
+ logmsg(rc, log_telemetry, "Writing XML summary to %s",
+ (use_stdout ? "standard output" : xmlfile));
+
+ if (use_stdout) {
+ f = stdout;
+ ok = 1;
+ } else if (snprintf(xmltemp.s, sizeof(xmltemp.s), "%s.%u.tmp", xmlfile, (unsigned) getpid()) >= sizeof(xmltemp.s)) {
+ logmsg(rc, log_usage_err, "Filename \"%s\" is too long, not writing XML", xmlfile);
+ return 0;
+ } else {
+ ok = (f = fopen(xmltemp.s, "w")) != NULL;
+ }
+
+ ok &= gethostname(hostname, sizeof(hostname)) == 0;
+
+ if (ok)
+ ok &= fprintf(f, "<?xml version=\"1.0\" ?>\n"
+ "<rcynic-summary date=\"%s\" rcynic-version=\"%s\""
+ " summary-version=\"%d\" reporting-hostname=\"%s\">\n"
+ " <labels>\n",
+ time_to_string(&ts, NULL),
+ svn_id, XML_SUMMARY_VERSION, hostname) != EOF;
+
+ for (j = 0; ok && j < MIB_COUNTER_T_MAX; ++j)
+ ok &= fprintf(f, " <%s kind=\"%s\">%s</%s>\n",
+ mib_counter_label[j], mib_counter_kind[j],
+ (mib_counter_desc[j]
+ ? mib_counter_desc[j]
+ : X509_verify_cert_error_string(mib_counter_openssl[j])),
+ mib_counter_label[j]) != EOF;
+
+ if (ok)
+ ok &= fprintf(f, " </labels>\n") != EOF;
+
+ for (i = 0; ok && i < sk_validation_status_t_num(rc->validation_status); i++) {
+ validation_status_t *v = sk_validation_status_t_value(rc->validation_status, i);
+ assert(v);
+
+ (void) time_to_string(&ts, &v->timestamp);
+
+ for (code = (mib_counter_t) 0; ok && code < MIB_COUNTER_T_MAX; code++) {
+ if (validation_status_get_code(v, code)) {
+ if (ok)
+ ok &= fprintf(f, " <validation_status timestamp=\"%s\" status=\"%s\"",
+ ts.s, mib_counter_label[code]) != EOF;
+ if (ok && (v->generation == object_generation_current ||
+ v->generation == object_generation_backup))
+ ok &= fprintf(f, " generation=\"%s\"",
+ object_generation_label[v->generation]) != EOF;
+ if (ok)
+ ok &= fprintf(f, ">%s</validation_status>\n", v->uri.s) != EOF;
+ }
+ }
+ }
+
+ for (i = 0; ok && i < sk_rsync_history_t_num(rc->rsync_history); i++) {
+ rsync_history_t *h = sk_rsync_history_t_value(rc->rsync_history, i);
+ assert(h);
+
+ if (ok)
+ ok &= fprintf(f, " <rsync_history") != EOF;
+ if (ok && h->started)
+ ok &= fprintf(f, " started=\"%s\"",
+ time_to_string(&ts, &h->started)) != EOF;
+ if (ok && h->finished)
+ ok &= fprintf(f, " finished=\"%s\"",
+ time_to_string(&ts, &h->finished)) != EOF;
+ if (ok && h->status != rsync_status_done)
+ ok &= fprintf(f, " error=\"%u\"", (unsigned) h->status) != EOF;
+ if (ok)
+ ok &= fprintf(f, ">%s%s</rsync_history>\n",
+ h->uri.s, (h->final_slash ? "/" : "")) != EOF;
+ }
+
+ if (ok)
+ ok &= fprintf(f, "</rcynic-summary>\n") != EOF;
+
+ if (f && !use_stdout)
+ ok &= fclose(f) != EOF;
+
+ if (ok && !use_stdout)
+ ok &= rename(xmltemp.s, xmlfile) == 0;
+
+ if (!ok)
+ logmsg(rc, log_sys_err, "Couldn't write XML summary to %s: %s",
+ (use_stdout ? "standard output" : xmlfile), strerror(errno));
+
+ if (!ok && !use_stdout)
+ (void) unlink(xmltemp.s);
+
+ return ok;
+}
+
+
+
+/**
+ * Long options.
+ */
+#define OPTIONS \
+ QA('a', "authenticated", "root of authenticated data tree") \
+ QA('c', "config", "override default name of config file") \
+ QF('h', "help", "print this help message") \
+ QA('j', "jitter", "set jitter value") \
+ QA('l', "log-level", "set log level") \
+ QA('u', "unauthenticated", "root of unauthenticated data tree") \
+ QF('e', "use-stderr", "log to syslog") \
+ QF('s', "use-syslog", "log to stderr") \
+ QF('V', "version", "print program version") \
+ QA('x', "xml-file", "set XML output file location")
+
+const static struct option longopts[] = {
+ { "authenticated", required_argument, NULL, 'a' },
+ { "config", required_argument, NULL, 'c' },
+ { "help", no_argument, NULL, 'h' },
+ { "jitter", required_argument, NULL, 'j' },
+ { "log-level", required_argument, NULL, 'l' },
+ { "unauthenticated", required_argument, NULL, 'u' },
+ { "use-stderr", no_argument, NULL, 'e' },
+ { "use-syslog", no_argument, NULL, 's' },
+ { "version", no_argument, NULL, 'V' },
+ { "xml-file", required_argument, NULL, 'x' },
+ { NULL }
+};
+
+/**
+ * Wrapper around printf() to take arguments like logmsg().
+ * If C had closures, usage() would use them instead of this silliness.
+ */
+static void logmsg_printf(const rcynic_ctx_t *rc,
+ const log_level_t level,
+ const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ vprintf(fmt, ap);
+ putchar('\n');
+ va_end(ap);
+}
+
+/**
+ * Log usage message, either to stdout (for --help) or via logmsg().
+ */
+static void usage (const rcynic_ctx_t *rc, const char *jane)
+{
+ void (*log)(const rcynic_ctx_t *, const log_level_t, const char *, ...) = rc ? logmsg : logmsg_printf;
+ char left[80];
+
+ if (rc && !jane)
+ jane = rc->jane;
+
+ log(rc, log_usage_err, "usage: %s [options]", jane);
+ log(rc, log_usage_err, "options:");
+
+#define QF(_s_, _l_, _d_) \
+ (void) snprintf(left, sizeof(left), "-%c --%-32s", _s_, _l_); \
+ log(rc, log_usage_err, " %s%s", left, _d_);
+
+#define QA(_s_, _l_, _d_) \
+ (void) snprintf(left, sizeof(left), "-%c ARG --%-32s", _s_, _l_ " ARG"); \
+ log(rc, log_usage_err, " %s%s", left, _d_);
+
+ OPTIONS;
+
+#undef QA
+#undef QF
+}
+
+/**
+ * Main program. Parse command line, read config file, iterate over
+ * trust anchors found via config file and do a tree walk for each
+ * trust anchor.
+ */
+int main(int argc, char *argv[])
+{
+ int opt_jitter = 0, use_syslog = 0, use_stderr = 0, syslog_facility = 0;
+ int opt_syslog = 0, opt_stderr = 0, opt_level = 0, prune = 1;
+ int opt_auth = 0, opt_unauth = 0, keep_lockfile = 0;
+ char *lockfile = NULL, *xmlfile = NULL;
+ char *cfg_file = "rcynic.conf";
+ int c, i, ret = 1, jitter = 600, lockfd = -1;
+ STACK_OF(CONF_VALUE) *cfg_section = NULL;
+ CONF *cfg_handle = NULL;
+ time_t start = 0, finish;
+ rcynic_ctx_t rc;
+ unsigned delay;
+ long eline = 0;
+ path_t ta_dir;
+
+#define QF(_s_, _l_, _d_) _s_,
+#define QA(_s_, _l_, _d_) _s_, ':',
+
+ const static char short_opts[] = { OPTIONS '\0' };
+
+#undef QA
+#undef QF
+
+#define QF(_s_, _l_, _d_) { _l_, no_argument, NULL, _s_ },
+#define QA(_s_, _l_, _d_) { _l_, required_argument, NULL, _s_ },
+
+ static struct option long_opts[] = { OPTIONS { NULL } };
+
+#undef QA
+#undef QF
+
+ memset(&rc, 0, sizeof(rc));
+
+ if ((rc.jane = strrchr(argv[0], '/')) == NULL)
+ rc.jane = argv[0];
+ else
+ rc.jane++;
+
+ rc.log_level = log_data_err;
+ rc.allow_stale_crl = 1;
+ rc.allow_stale_manifest = 1;
+ rc.allow_digest_mismatch = 1;
+ rc.allow_crl_digest_mismatch = 1;
+ rc.allow_nonconformant_name = 1;
+ rc.allow_ee_without_signedObject = 1;
+ rc.allow_1024_bit_ee_key = 1;
+ rc.allow_wrong_cms_si_attributes = 1;
+ rc.max_parallel_fetches = 1;
+ rc.max_retries = 3;
+ rc.retry_wait_min = 30;
+ rc.run_rsync = 1;
+ rc.rsync_timeout = 300;
+ rc.max_select_time = 30;
+ rc.rsync_early = 1;
+
+#define QQ(x,y) rc.priority[x] = y;
+ LOG_LEVELS;
+#undef QQ
+
+ if (!set_directory(&rc, &rc.authenticated, "rcynic-data/authenticated", 0) ||
+ !set_directory(&rc, &rc.unauthenticated, "rcynic-data/unauthenticated/", 1))
+ goto done;
+
+ OpenSSL_add_all_algorithms();
+ ERR_load_crypto_strings();
+
+ if (!create_missing_nids()) {
+ logmsg(&rc, log_sys_err, "Couldn't initialize missing OIDs!");
+ goto done;
+ }
+
+ memset(&ta_dir, 0, sizeof(ta_dir));
+
+ opterr = 0;
+
+ while ((c = getopt_long(argc, argv, short_opts, long_opts, NULL)) > 0) {
+ switch (c) {
+ case 'a':
+ opt_auth = 1;
+ if (!set_directory(&rc, &rc.authenticated, optarg, 0))
+ goto done;
+ break;
+ case 'c':
+ cfg_file = optarg;
+ break;
+ case 'l':
+ opt_level = 1;
+ if (!configure_logmsg(&rc, optarg))
+ goto done;
+ break;
+ case 's':
+ use_syslog = opt_syslog = 1;
+ break;
+ case 'e':
+ use_stderr = opt_stderr = 1;
+ break;
+ case 'h':
+ usage(NULL, rc.jane);
+ ret = 0;
+ goto done;
+ case 'j':
+ if (!configure_integer(&rc, &jitter, optarg))
+ goto done;
+ opt_jitter = 1;
+ break;
+ case 'u':
+ opt_unauth = 1;
+ if (!set_directory(&rc, &rc.unauthenticated, optarg, 1))
+ goto done;
+ break;
+ case 'V':
+ puts(svn_id);
+ ret = 0;
+ goto done;
+ case 'x':
+ xmlfile = strdup(optarg);
+ break;
+ default:
+ usage(&rc, NULL);
+ goto done;
+ }
+ }
+
+ if (!(asn1_zero = s2i_ASN1_INTEGER(NULL, "0x0")) ||
+ !(asn1_four_octets = s2i_ASN1_INTEGER(NULL, "0xFFFFFFFF")) ||
+ !(asn1_twenty_octets = s2i_ASN1_INTEGER(NULL, "0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")) ||
+ !(NID_binary_signing_time = OBJ_create("1.2.840.113549.1.9.16.2.46",
+ "id-aa-binarySigningTime",
+ "id-aa-binarySigningTime"))) {
+ logmsg(&rc, log_sys_err, "Couldn't initialize ASN.1 constants!");
+ goto done;
+ }
+
+ if ((cfg_handle = NCONF_new(NULL)) == NULL) {
+ logmsg(&rc, log_sys_err, "Couldn't create CONF opbject");
+ goto done;
+ }
+
+ if (NCONF_load(cfg_handle, cfg_file, &eline) <= 0) {
+ if (eline <= 0)
+ logmsg(&rc, log_usage_err, "Couldn't load config file %s", cfg_file);
+ else
+ logmsg(&rc, log_usage_err, "Error on line %ld of config file %s", eline, cfg_file);
+ goto done;
+ }
+
+ if (CONF_modules_load(cfg_handle, NULL, 0) <= 0) {
+ logmsg(&rc, log_sys_err, "Couldn't configure OpenSSL");
+ goto done;
+ }
+
+ if ((cfg_section = NCONF_get_section(cfg_handle, "rcynic")) == NULL) {
+ logmsg(&rc, log_usage_err, "Couldn't load rcynic section from config file");
+ goto done;
+ }
+
+ for (i = 0; i < sk_CONF_VALUE_num(cfg_section); i++) {
+ CONF_VALUE *val = sk_CONF_VALUE_value(cfg_section, i);
+
+ assert(val && val->name && val->value);
+
+ if (!opt_auth &&
+ !name_cmp(val->name, "authenticated") &&
+ !set_directory(&rc, &rc.authenticated, val->value, 0))
+ goto done;
+
+ else if (!opt_unauth &&
+ !name_cmp(val->name, "unauthenticated") &&
+ !set_directory(&rc, &rc.unauthenticated, val->value, 1))
+ goto done;
+
+ else if (!name_cmp(val->name, "trust-anchor-directory") &&
+ !set_directory(&rc, &ta_dir, val->value, 0))
+ goto done;
+
+ else if (!name_cmp(val->name, "rsync-timeout") &&
+ !configure_integer(&rc, &rc.rsync_timeout, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "max-parallel-fetches") &&
+ !configure_integer(&rc, &rc.max_parallel_fetches, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "max-select-time") &&
+ !configure_unsigned_integer(&rc, &rc.max_select_time, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "rsync-program"))
+ rc.rsync_program = strdup(val->value);
+
+ else if (!name_cmp(val->name, "lockfile"))
+ lockfile = strdup(val->value);
+
+ else if (!name_cmp(val->name, "keep-lockfile") &&
+ !configure_boolean(&rc, &keep_lockfile, val->value))
+ goto done;
+
+ else if (!opt_jitter &&
+ !name_cmp(val->name, "jitter") &&
+ !configure_integer(&rc, &jitter, val->value))
+ goto done;
+
+ else if (!opt_level &&
+ !name_cmp(val->name, "log-level") &&
+ !configure_logmsg(&rc, val->value))
+ goto done;
+
+ else if (!opt_syslog &&
+ !name_cmp(val->name, "use-syslog") &&
+ !configure_boolean(&rc, &use_syslog, val->value))
+ goto done;
+
+ else if (!opt_stderr &&
+ !name_cmp(val->name, "use-stderr") &&
+ !configure_boolean(&rc, &use_stderr, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "syslog-facility") &&
+ !configure_syslog(&rc, &syslog_facility,
+ facilitynames, val->value))
+ goto done;
+
+ else if (!xmlfile &&
+ (!name_cmp(val->name, "xml-file") ||
+ !name_cmp(val->name, "xml-summary")))
+ xmlfile = strdup(val->value);
+
+ else if (!name_cmp(val->name, "allow-stale-crl") &&
+ !configure_boolean(&rc, &rc.allow_stale_crl, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "allow-stale-manifest") &&
+ !configure_boolean(&rc, &rc.allow_stale_manifest, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "allow-non-self-signed-trust-anchor") &&
+ !configure_boolean(&rc, &rc.allow_non_self_signed_trust_anchor, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "require-crl-in-manifest") &&
+ !configure_boolean(&rc, &rc.require_crl_in_manifest, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "allow-object-not-in-manifest") &&
+ !configure_boolean(&rc, &rc.allow_object_not_in_manifest, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "allow-digest-mismatch") &&
+ !configure_boolean(&rc, &rc.allow_digest_mismatch, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "allow-crl-digest-mismatch") &&
+ !configure_boolean(&rc, &rc.allow_crl_digest_mismatch, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "use-links") &&
+ !configure_boolean(&rc, &rc.use_links, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "prune") &&
+ !configure_boolean(&rc, &prune, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "run-rsync") &&
+ !configure_boolean(&rc, &rc.run_rsync, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "allow-nonconformant-name") &&
+ !configure_boolean(&rc, &rc.allow_nonconformant_name, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "allow-ee-without-signedObject") &&
+ !configure_boolean(&rc, &rc.allow_ee_without_signedObject, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "allow-1024-bit-ee-key") &&
+ !configure_boolean(&rc, &rc.allow_1024_bit_ee_key, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "allow-wrong-cms-si-attributes") &&
+ !configure_boolean(&rc, &rc.allow_wrong_cms_si_attributes, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "rsync-early") &&
+ !configure_boolean(&rc, &rc.rsync_early, val->value))
+ goto done;
+
+ /*
+ * Ugly, but the easiest way to handle all these strings.
+ */
+
+#define QQ(x,y) \
+ else if (!name_cmp(val->name, "syslog-priority-" #x) && \
+ !configure_syslog(&rc, &rc.priority[x], \
+ prioritynames, val->value)) \
+ goto done;
+
+ LOG_LEVELS; /* the semicolon is for emacs */
+
+#undef QQ
+
+ }
+
+ if ((rc.rsync_history = sk_rsync_history_t_new(rsync_history_cmp)) == NULL) {
+ logmsg(&rc, log_sys_err, "Couldn't allocate rsync_history stack");
+ goto done;
+ }
+
+ if ((rc.validation_status = sk_validation_status_t_new_null()) == NULL) {
+ logmsg(&rc, log_sys_err, "Couldn't allocate validation_status stack");
+ goto done;
+ }
+
+ if ((rc.x509_store = X509_STORE_new()) == NULL) {
+ logmsg(&rc, log_sys_err, "Couldn't allocate X509_STORE");
+ goto done;
+ }
+
+ if ((rc.rsync_queue = sk_rsync_ctx_t_new_null()) == NULL) {
+ logmsg(&rc, log_sys_err, "Couldn't allocate rsync_queue");
+ goto done;
+ }
+
+ if ((rc.task_queue = sk_task_t_new_null()) == NULL) {
+ logmsg(&rc, log_sys_err, "Couldn't allocate task_queue");
+ goto done;
+ }
+
+ rc.use_syslog = use_syslog;
+
+ if (use_syslog)
+ openlog(rc.jane,
+ LOG_PID | (use_stderr ? LOG_PERROR : 0),
+ (syslog_facility ? syslog_facility : LOG_LOCAL0));
+
+ if (jitter > 0) {
+ if (RAND_bytes((unsigned char *) &delay, sizeof(delay)) <= 0) {
+ logmsg(&rc, log_sys_err, "Couldn't read random bytes");
+ goto done;
+ }
+ delay %= jitter;
+ logmsg(&rc, log_telemetry, "Delaying %u seconds before startup", delay);
+ while (delay > 0)
+ delay = sleep(delay);
+ }
+
+ if (lockfile &&
+ ((lockfd = open(lockfile, O_RDWR|O_CREAT|O_NONBLOCK, 0666)) < 0 ||
+ lockf(lockfd, F_TLOCK, 0) < 0)) {
+ if (lockfd >= 0 && errno == EAGAIN)
+ logmsg(&rc, log_telemetry, "Lock %s held by another process", lockfile);
+ else
+ logmsg(&rc, log_sys_err, "Problem locking %s: %s", lockfile, strerror(errno));
+ lockfd = -1;
+ goto done;
+ }
+
+ start = time(0);
+ logmsg(&rc, log_telemetry, "Starting");
+
+ if (!construct_directory_names(&rc))
+ goto done;
+
+ if (!access(rc.new_authenticated.s, F_OK)) {
+ logmsg(&rc, log_sys_err,
+ "Timestamped output directory %s already exists! Clock went backwards?",
+ rc.new_authenticated.s);
+ goto done;
+ }
+
+ if (!mkdir_maybe(&rc, &rc.new_authenticated)) {
+ logmsg(&rc, log_sys_err, "Couldn't prepare directory %s: %s",
+ rc.new_authenticated.s, strerror(errno));
+ goto done;
+ }
+
+ for (i = 0; i < sk_CONF_VALUE_num(cfg_section); i++) {
+ CONF_VALUE *val = sk_CONF_VALUE_value(cfg_section, i);
+
+ assert(val && val->name && val->value);
+
+ if (!name_cmp(val->name, "trust-anchor-uri-with-key") ||
+ !name_cmp(val->name, "indirect-trust-anchor")) {
+ logmsg(&rc, log_usage_err,
+ "Directive \"%s\" is obsolete -- please use \"trust-anchor-locator\" instead",
+ val->name);
+ goto done;
+ }
+
+ if ((!name_cmp(val->name, "trust-anchor") && !check_ta_cer(&rc, val->value)) ||
+ (!name_cmp(val->name, "trust-anchor-locator") && !check_ta_tal(&rc, val->value)))
+ goto done;
+ }
+
+ if (*ta_dir.s != '\0' && !check_ta_dir(&rc, ta_dir.s))
+ goto done;
+
+ while (sk_task_t_num(rc.task_queue) > 0 || sk_rsync_ctx_t_num(rc.rsync_queue) > 0) {
+ task_run_q(&rc);
+ rsync_mgr(&rc);
+ }
+
+ logmsg(&rc, log_telemetry, "Event loop done, beginning final output and cleanup");
+
+ if (!finalize_directories(&rc))
+ goto done;
+
+ if (prune && rc.run_rsync &&
+ !prune_unauthenticated(&rc, &rc.unauthenticated,
+ strlen(rc.unauthenticated.s))) {
+ logmsg(&rc, log_sys_err, "Trouble pruning old unauthenticated data");
+ goto done;
+ }
+
+ if (!write_xml_file(&rc, xmlfile))
+ goto done;
+
+ ret = 0;
+
+ done:
+ log_openssl_errors(&rc);
+
+ /*
+ * Do NOT free cfg_section, NCONF_free() takes care of that
+ */
+ sk_validation_status_t_pop_free(rc.validation_status, validation_status_t_free);
+ sk_rsync_history_t_pop_free(rc.rsync_history, rsync_history_t_free);
+ validation_status_t_free(rc.validation_status_in_waiting);
+ X509_STORE_free(rc.x509_store);
+ NCONF_free(cfg_handle);
+ CONF_modules_free();
+ EVP_cleanup();
+ ERR_free_strings();
+ if (rc.rsync_program)
+ free(rc.rsync_program);
+ if (lockfile && lockfd >= 0 && !keep_lockfile)
+ unlink(lockfile);
+ if (lockfile)
+ free(lockfile);
+ if (xmlfile)
+ free(xmlfile);
+
+ if (start) {
+ finish = time(0);
+ logmsg(&rc, log_telemetry,
+ "Finished, elapsed time %u:%02u:%02u",
+ (unsigned) ((finish - start) / 3600),
+ (unsigned) ((finish - start) / 60 % 60),
+ (unsigned) ((finish - start) % 60));
+ }
+
+ return ret;
+}
diff --git a/rp/rcynic/rcynic.xsl b/rp/rcynic/rcynic.xsl
new file mode 100644
index 00000000..487cd2f6
--- /dev/null
+++ b/rp/rcynic/rcynic.xsl
@@ -0,0 +1,312 @@
+<?xml version="1.0"?>
+<!--
+ - Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
+ -
+ - Permission to use, copy, modify, and/or distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+ -
+ - Portions copyright (C) 2006 American Registry for Internet Numbers ("ARIN")
+ -
+ - Permission to use, copy, modify, and distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+ -->
+
+<!-- $Id$ -->
+
+<!--
+ - XSL stylesheet to render rcynic's xml-summary output as basic (X)HTML.
+ -
+ - This is a bit more complicated than strictly necessary, because I wanted
+ - the ability to drop out columns that are nothing but zeros.
+ - There's probably some clever way of using XPath to simplify this,
+ - but I don't expect the data sets to be large enough for performance
+ - to be an issue here. Feel free to show me how to do better.
+ -->
+
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version="1.0"
+ xmlns:com="http://exslt.org/common"
+ xmlns:str="http://exslt.org/strings"
+ exclude-result-prefixes="com str">
+
+ <xsl:output omit-xml-declaration="yes" indent="yes" method="xml" encoding="US-ASCII"
+ doctype-public="-//W3C//DTD XHTML 1.0 Strict//EN"
+ doctype-system="http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"/>
+
+ <xsl:param name="refresh" select="1800"/>
+ <xsl:param name="suppress-zero-columns" select="1"/>
+ <xsl:param name="show-total" select="1"/>
+ <xsl:param name="use-colors" select="1"/>
+ <xsl:param name="show-detailed-status" select="1"/>
+ <xsl:param name="show-problems" select="0"/>
+ <xsl:param name="show-summary" select="1"/>
+
+ <xsl:template match="/">
+ <xsl:comment>Generators</xsl:comment>
+ <xsl:comment><xsl:value-of select="rcynic-summary/@rcynic-version"/></xsl:comment>
+ <xsl:comment>$Id$</xsl:comment>
+ <html>
+ <xsl:variable name="title">
+ <xsl:text>rcynic summary </xsl:text>
+ <xsl:value-of select="rcynic-summary/@date"/>
+ </xsl:variable>
+ <head>
+ <title>
+ <xsl:value-of select="$title"/>
+ </title>
+ <xsl:if test="$refresh != 0">
+ <meta http-equiv="Refresh" content="{$refresh}"/>
+ </xsl:if>
+ <style type="text/css">
+ td { text-align: center; padding: 4px }
+ td.uri { text-align: left }
+ td.host { text-align: left }
+ <xsl:if test="$use-colors != 0">
+ tr.good,td.good { background-color: #77ff77 }
+ tr.warn,td.warn { background-color: yellow }
+ tr.bad,td.bad { background-color: #ff5500 }
+ </xsl:if>
+ </style>
+ </head>
+ <body>
+ <h1><xsl:value-of select="$title"/></h1>
+
+ <!-- Summary output, old host-oriented format -->
+ <xsl:if test="$show-summary != 0">
+
+ <!-- Collect data we need to display -->
+ <xsl:variable name="host-data">
+ <xsl:for-each select="rcynic-summary/validation_status">
+ <xsl:sort order="ascending" data-type="text" select="."/>
+ <xsl:variable name="uri" select="string(.)"/>
+ <xsl:if test="starts-with($uri, 'rsync://')">
+ <xsl:variable name="hostname" select="str:tokenize($uri, ':/')[2]"/>
+ <xsl:variable name="mood" select="/rcynic-summary/labels/*[name() = current()/@status]/@kind"/>
+ <xsl:variable name="fn2">
+ <xsl:if test="substring($uri, string-length($uri) - 3, 1) = '.' and @generation != ''">
+ <xsl:value-of select="substring($uri, string-length($uri) - 3)"/>
+ </xsl:if>
+ </xsl:variable>
+ <x hostname="{$hostname}" timestamp="{@timestamp}" uri="{$uri}" status="{@status}" mood="{$mood}" fn2="{$fn2}" generation="{@generation}"/>
+ </xsl:if>
+ </xsl:for-each>
+ </xsl:variable>
+
+ <!-- Calculate set of unique hostnames -->
+ <xsl:variable name="unique-hostnames">
+ <xsl:for-each select="com:node-set($host-data)/x[not(@hostname = following::x/@hostname)]">
+ <x hostname="{@hostname}"/>
+ </xsl:for-each>
+ </xsl:variable>
+
+ <!-- Calculate set of unique filename types -->
+ <xsl:variable name="unique-fn2s">
+ <xsl:for-each select="com:node-set($host-data)/x[not(@fn2 = following::x/@fn2)]">
+ <x fn2="{@fn2}"/>
+ </xsl:for-each>
+ </xsl:variable>
+
+ <!-- Generation names -->
+ <xsl:variable name="unique-generations">
+ <xsl:for-each select="com:node-set($host-data)/x[not(@generation = following::x/@generation)]">
+ <x generation="{@generation}"/>
+ </xsl:for-each>
+ </xsl:variable>
+
+ <!-- Calculate grand totals, figure out which columns to display -->
+ <xsl:variable name="totals">
+ <xsl:for-each select="rcynic-summary/labels/*">
+ <xsl:variable name="sum" select="count(com:node-set($host-data)/x[@status = name(current())])"/>
+ <xsl:variable name="show">
+ <xsl:choose>
+ <xsl:when test="$suppress-zero-columns = 0 or $sum &gt; 0">
+ <xsl:text>1</xsl:text>
+ </xsl:when>
+ <xsl:otherwise>
+ <xsl:text>0</xsl:text>
+ </xsl:otherwise>
+ </xsl:choose>
+ </xsl:variable>
+ <x name="{name(current())}" sum="{$sum}" text="{.}" show="{$show}" mood="{@kind}"/>
+ </xsl:for-each>
+ </xsl:variable>
+
+ <!-- Calculate how many columns we'll be displaying -->
+ <xsl:variable name="columns" select="count(com:node-set($totals)/x[@show = 1])"/>
+
+ <!-- Show the total -->
+ <xsl:if test="$show-total != 0">
+ <br/>
+ <h2>Grand Totals</h2>
+ <table class="summary" rules="all" border="1">
+ <thead>
+ <tr>
+ <td/> <!-- was hostname -->
+ <xsl:for-each select="com:node-set($totals)/x[@show = 1]">
+ <td><b><xsl:value-of select="@text"/></b></td>
+ </xsl:for-each>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <td><b>Total</b></td>
+ <xsl:for-each select="com:node-set($totals)/x">
+ <xsl:if test="$suppress-zero-columns = 0 or @sum &gt; 0">
+ <td class="{@mood}"><xsl:value-of select="@sum"/></td>
+ </xsl:if>
+ </xsl:for-each>
+ </tr>
+ </tbody>
+ </table>
+ </xsl:if>
+
+ <!-- Generate the HTML -->
+ <br/>
+ <h2>Summaries by Repository Host</h2>
+ <xsl:for-each select="com:node-set($unique-hostnames)/x">
+ <xsl:sort order="ascending" data-type="text" select="@hostname"/>
+ <xsl:variable name="hostname" select="@hostname"/>
+ <br/>
+ <h3><xsl:value-of select="$hostname"/></h3>
+ <table class="summary" rules="all" border="1">
+ <thead>
+ <tr>
+ <td/> <!-- was hostname -->
+ <xsl:for-each select="com:node-set($totals)/x[@show = 1]">
+ <td><b><xsl:value-of select="@text"/></b></td>
+ </xsl:for-each>
+ </tr>
+ </thead>
+ <tbody>
+ <xsl:for-each select="com:node-set($unique-fn2s)/x">
+ <xsl:sort order="ascending" data-type="text" select="@fn2"/>
+ <xsl:variable name="fn2" select="@fn2"/>
+ <xsl:for-each select="com:node-set($unique-generations)/x">
+ <xsl:sort order="ascending" data-type="text" select="@generation"/>
+ <xsl:variable name="generation" select="@generation"/>
+ <xsl:if test="count(com:node-set($host-data)/x[@hostname = $hostname and @fn2 = $fn2 and @generation = $generation])">
+ <tr>
+ <td><xsl:value-of select="concat($generation, ' ', $fn2)"/></td>
+ <xsl:for-each select="com:node-set($totals)/x[@show = 1]">
+ <xsl:variable name="label" select="@name"/>
+ <xsl:variable name="value" select="count(com:node-set($host-data)/x[@hostname = $hostname and @fn2 = $fn2 and @generation = $generation and @status = $label])"/>
+ <xsl:choose>
+ <xsl:when test="$value != 0">
+ <td class="{@mood}">
+ <xsl:value-of select="$value"/>
+ </td>
+ </xsl:when>
+ <xsl:otherwise>
+ <td/>
+ </xsl:otherwise>
+ </xsl:choose>
+ </xsl:for-each>
+ </tr>
+ </xsl:if>
+ </xsl:for-each>
+ </xsl:for-each>
+ <tr>
+ <td>Total</td>
+ <xsl:for-each select="com:node-set($totals)/x[@show = 1]">
+ <xsl:variable name="label" select="@name"/>
+ <xsl:variable name="value" select="count(com:node-set($host-data)/x[@hostname = $hostname and @status = $label])"/>
+ <xsl:choose>
+ <xsl:when test="$value != 0">
+ <td class="{@mood}">
+ <xsl:value-of select="$value"/>
+ </td>
+ </xsl:when>
+ <xsl:otherwise>
+ <td/>
+ </xsl:otherwise>
+ </xsl:choose>
+ </xsl:for-each>
+ </tr>
+ </tbody>
+ </table>
+ </xsl:for-each>
+
+ <!-- "Problems" display -->
+ <xsl:if test="$show-problems != 0">
+ <br/>
+ <h2>Problems</h2>
+ <table class="problems" rules="all" border="1" >
+ <thead>
+ <tr>
+ <td class="status"><b>Status</b></td>
+ <td class="uri"><b>URI</b></td>
+ </tr>
+ </thead>
+ <tbody>
+ <xsl:for-each select="rcynic-summary/validation_status">
+ <xsl:variable name="status" select="@status"/>
+ <xsl:variable name="mood" select="/rcynic-summary/labels/*[name() = $status]/@kind"/>
+ <xsl:if test="$mood != 'good'">
+ <tr class="{$mood}">
+ <td class="status"><xsl:value-of select="/rcynic-summary/labels/*[name() = $status] "/></td>
+ <td class="uri"><xsl:value-of select="."/></td>
+ </tr>
+ </xsl:if>
+ </xsl:for-each>
+ </tbody>
+ </table>
+ </xsl:if>
+ </xsl:if>
+
+ <!-- Detailed status display -->
+ <xsl:if test="$show-detailed-status != 0">
+ <br/>
+ <h2>Validation Status</h2>
+ <table class="details" rules="all" border="1" >
+ <thead>
+ <tr>
+ <td class="timestamp"><b>Timestamp</b></td>
+ <td class="generation"><b>Generation</b></td>
+ <td class="status"><b>Status</b></td>
+ <td class="uri"><b>URI</b></td>
+ </tr>
+ </thead>
+ <tbody>
+ <xsl:for-each select="rcynic-summary/validation_status">
+ <xsl:variable name="status" select="@status"/>
+ <xsl:variable name="mood" select="/rcynic-summary/labels/*[name() = $status]/@kind"/>
+ <tr class="{$mood}">
+ <td class="timestamp"><xsl:value-of select="@timestamp"/></td>
+ <td class="generation"><xsl:value-of select="@generation"/></td>
+ <td class="status"><xsl:value-of select="/rcynic-summary/labels/*[name() = $status] "/></td>
+ <td class="uri"><xsl:value-of select="."/></td>
+ </tr>
+ </xsl:for-each>
+ </tbody>
+ </table>
+ </xsl:if>
+
+ </body>
+ </html>
+ </xsl:template>
+
+</xsl:stylesheet>
+
+<!--
+ - Local variables:
+ - mode: sgml
+ - End:
+ -->
diff --git a/rp/rcynic/rpki-torrent.py b/rp/rcynic/rpki-torrent.py
new file mode 100644
index 00000000..9b97f298
--- /dev/null
+++ b/rp/rcynic/rpki-torrent.py
@@ -0,0 +1,721 @@
+#!/usr/local/bin/python
+
+# $Id$
+#
+# Copyright (C) 2013--2014 Dragon Research Labs ("DRL")
+# Portions copyright (C) 2012 Internet Systems Consortium ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notices and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ISC DISCLAIM ALL
+# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
+# ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA
+# OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+import urllib2
+import httplib
+import socket
+import ssl
+import urlparse
+import zipfile
+import sys
+import os
+import email.utils
+import base64
+import hashlib
+import subprocess
+import syslog
+import traceback
+import ConfigParser
+import stat
+import time
+import errno
+import fcntl
+import argparse
+import smtplib
+import email.mime.text
+
+import transmissionrpc
+
+tr_env_vars = ("TR_TORRENT_DIR", "TR_TORRENT_ID", "TR_TORRENT_NAME")
+
+class WrongServer(Exception):
+ "Hostname not in X.509v3 subjectAltName extension."
+
+class UnexpectedRedirect(Exception):
+ "Unexpected HTTP redirect."
+
+class WrongMode(Exception):
+ "Wrong operation for mode."
+
+class BadFormat(Exception):
+ "Zip file does not match our expectations."
+
+class InconsistentEnvironment(Exception):
+ "Environment variables received from Transmission aren't consistent."
+
+class TorrentNotReady(Exception):
+ "Torrent is not ready for checking."
+
+class TorrentDoesNotMatchManifest(Exception):
+ "Retrieved torrent does not match manifest."
+
+class TorrentNameDoesNotMatchURL(Exception):
+ "Torrent name doesn't uniquely match a URL."
+
+class CouldNotFindTorrents(Exception):
+ "Could not find torrent(s) with given name(s)."
+
+class UseTheSourceLuke(Exception):
+ "Use The Source, Luke."
+
+def main():
+ try:
+ syslog_flags = syslog.LOG_PID;
+ if os.isatty(sys.stderr.fileno()):
+ syslog_flags |= syslog.LOG_PERROR
+ syslog.openlog("rpki-torrent", syslog_flags)
+
+ parser = argparse.ArgumentParser(description = __doc__)
+ parser.add_argument("-c", "--config",
+ help = "configuration file")
+ args = parser.parse_args()
+
+ global cfg
+ cfg = MyConfigParser()
+ cfg.read(args.config or
+ [os.path.join(dn, fn)
+ for fn in ("rcynic.conf", "rpki.conf")
+ for dn in ("/var/rcynic/etc", "/usr/local/etc", "/etc")])
+
+ if cfg.act_as_generator:
+ if len(argv) == 1 and argv[0] == "generate":
+ generator_main()
+ elif len(argv) == 1 and argv[0] == "mirror":
+ mirror_main()
+ else:
+ raise UseTheSourceLuke
+
+ else:
+ if len(argv) == 0 and all(v in os.environ for v in tr_env_vars):
+ torrent_completion_main()
+ elif len(argv) == 1 and argv[0] == "poll":
+ poll_main()
+ else:
+ raise UseTheSourceLuke
+
+ except Exception, e:
+ for line in traceback.format_exc().splitlines():
+ syslog.syslog(line)
+ sys.exit(1)
+
+
+def generator_main():
+ import paramiko
+
+ class SFTPClient(paramiko.SFTPClient):
+ def atomic_rename(self, oldpath, newpath):
+ oldpath = self._adjust_cwd(oldpath)
+ newpath = self._adjust_cwd(newpath)
+ self._log(paramiko.common.DEBUG, 'atomic_rename(%r, %r)' % (oldpath, newpath))
+ self._request(paramiko.sftp.CMD_EXTENDED, "posix-rename@openssh.com", oldpath, newpath)
+
+ z = ZipFile(url = cfg.generate_url, dir = cfg.zip_dir)
+ client = TransmissionClient()
+
+ client.remove_torrents(z.torrent_name)
+
+ download_dir = client.get_session().download_dir
+ torrent_dir = os.path.join(download_dir, z.torrent_name)
+ torrent_file = os.path.join(cfg.zip_dir, z.torrent_name + ".torrent")
+
+
+ syslog.syslog("Synchronizing local data from %s to %s" % (cfg.unauthenticated, torrent_dir))
+ subprocess.check_call((cfg.rsync_prog, "--archive", "--delete",
+ os.path.normpath(cfg.unauthenticated) + "/",
+ os.path.normpath(torrent_dir) + "/"))
+
+ syslog.syslog("Creating %s" % torrent_file)
+ try:
+ os.unlink(torrent_file)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ ignore_output_for_now = subprocess.check_output(
+ (cfg.mktorrent_prog,
+ "-a", cfg.tracker_url,
+ "-c", "RPKI unauthenticated data snapshot generated by rpki-torrent",
+ "-o", torrent_file,
+ torrent_dir))
+
+ syslog.syslog("Generating manifest")
+ manifest = create_manifest(download_dir, z.torrent_name)
+
+ syslog.syslog("Loading %s with unlimited seeding" % torrent_file)
+ f = open(torrent_file, "rb")
+ client.add(base64.b64encode(f.read()))
+ f.close()
+ client.unlimited_seeding(z.torrent_name)
+
+ syslog.syslog("Creating upload connection")
+ ssh = paramiko.Transport((cfg.sftp_host, cfg.sftp_port))
+ try:
+ hostkeys = paramiko.util.load_host_keys(cfg.sftp_hostkey_file)[cfg.sftp_host]["ssh-rsa"]
+ except ConfigParser.Error:
+ hostkeys = None
+ ssh.connect(
+ username = cfg.sftp_user,
+ hostkey = hostkeys,
+ pkey = paramiko.RSAKey.from_private_key_file(cfg.sftp_private_key_file))
+ sftp = SFTPClient.from_transport(ssh)
+
+ zip_filename = os.path.join("data", os.path.basename(z.filename))
+ zip_tempname = zip_filename + ".new"
+
+ syslog.syslog("Creating %s" % zip_tempname)
+ f = sftp.open(zip_tempname, "wb")
+ z.set_output_stream(f)
+
+ syslog.syslog("Writing %s to zip" % torrent_file)
+ z.write(
+ torrent_file,
+ arcname = os.path.basename(torrent_file),
+ compress_type = zipfile.ZIP_DEFLATED)
+
+ manifest_name = z.torrent_name + ".manifest"
+
+ syslog.syslog("Writing %s to zip" % manifest_name)
+ zi = zipfile.ZipInfo(manifest_name, time.gmtime()[:6])
+ zi.external_attr = (stat.S_IFREG | 0644) << 16
+ zi.internal_attr = 1 # Text, not binary
+ z.writestr(zi,
+ "".join("%s %s\n" % (v, k) for k, v in manifest.iteritems()),
+ zipfile.ZIP_DEFLATED)
+
+ syslog.syslog("Closing %s and renaming to %s" % (zip_tempname, zip_filename))
+ z.close()
+ f.close()
+ sftp.atomic_rename(zip_tempname, zip_filename)
+
+ syslog.syslog("Closing upload connection")
+ ssh.close()
+
+def mirror_main():
+ client = TransmissionClient()
+ torrent_names = []
+
+ for zip_url in cfg.zip_urls:
+ if zip_url != cfg.generate_url:
+ z = ZipFile(url = zip_url, dir = cfg.zip_dir, ta = cfg.zip_ta)
+ if z.fetch():
+ client.remove_torrents(z.torrent_name)
+ syslog.syslog("Mirroring torrent %s" % z.torrent_name)
+ client.add(z.get_torrent())
+ torrent_names.append(z.torrent_name)
+
+ if torrent_names:
+ client.unlimited_seeding(*torrent_names)
+
+
+def poll_main():
+ for zip_url in cfg.zip_urls:
+
+ z = ZipFile(url = zip_url, dir = cfg.zip_dir, ta = cfg.zip_ta)
+ client = TransmissionClient()
+
+ if z.fetch():
+ client.remove_torrents(z.torrent_name)
+ syslog.syslog("Adding torrent %s" % z.torrent_name)
+ client.add(z.get_torrent())
+
+ elif cfg.run_rcynic_anyway:
+ run_rcynic(client, z)
+
+
+def torrent_completion_main():
+ torrent_name = os.getenv("TR_TORRENT_NAME")
+ torrent_id = int(os.getenv("TR_TORRENT_ID"))
+
+ z = ZipFile(url = cfg.find_url(torrent_name), dir = cfg.zip_dir, ta = cfg.zip_ta)
+ client = TransmissionClient()
+ torrent = client.info([torrent_id]).popitem()[1]
+
+ if torrent.name != torrent_name:
+ raise InconsistentEnvironment("Torrent name %s does not match ID %d" % (torrent_name, torrent_id))
+
+ if z.torrent_name != torrent_name:
+ raise InconsistentEnvironment("Torrent name %s does not match torrent name in zip file %s" % (torrent_name, z.torrent_name))
+
+ if torrent is None or torrent.progress != 100:
+ raise TorrentNotReady("Torrent %s not ready for checking, how did I get here?" % torrent_name)
+
+ log_email("Download complete %s" % z.url)
+
+ run_rcynic(client, z)
+
+
+def run_rcynic(client, z):
+ """
+ Run rcynic and any post-processing we might want.
+ """
+
+ if cfg.lockfile is not None:
+ syslog.syslog("Acquiring lock %s" % cfg.lockfile)
+ lock = os.open(cfg.lockfile, os.O_WRONLY | os.O_CREAT, 0600)
+ fcntl.flock(lock, fcntl.LOCK_EX)
+ else:
+ lock = None
+
+ syslog.syslog("Checking manifest against disk")
+
+ download_dir = client.get_session().download_dir
+
+ manifest_from_disk = create_manifest(download_dir, z.torrent_name)
+ manifest_from_zip = z.get_manifest()
+
+ excess_files = set(manifest_from_disk) - set(manifest_from_zip)
+ for fn in excess_files:
+ del manifest_from_disk[fn]
+
+ if manifest_from_disk != manifest_from_zip:
+ raise TorrentDoesNotMatchManifest("Manifest for torrent %s does not match what we got" %
+ z.torrent_name)
+
+ if excess_files:
+ syslog.syslog("Cleaning up excess files")
+ for fn in excess_files:
+ os.unlink(os.path.join(download_dir, fn))
+
+ syslog.syslog("Running rcynic")
+ log_email("Starting rcynic %s" % z.url)
+ subprocess.check_call((cfg.rcynic_prog,
+ "-c", cfg.rcynic_conf,
+ "-u", os.path.join(client.get_session().download_dir, z.torrent_name)))
+ log_email("Completed rcynic %s" % z.url)
+
+ for cmd in cfg.post_rcynic_commands:
+ syslog.syslog("Running post-rcynic command: %s" % cmd)
+ subprocess.check_call(cmd, shell = True)
+
+ if lock is not None:
+ syslog.syslog("Releasing lock %s" % cfg.lockfile)
+ os.close(lock)
+
+# See http://www.minstrel.org.uk/papers/sftp/ for details on how to
+# set up safe upload-only SFTP directories on the server. In
+# particular http://www.minstrel.org.uk/papers/sftp/builtin/ is likely
+# to be the right path.
+
+
+class ZipFile(object):
+ """
+ Augmented version of standard python zipfile.ZipFile class, with
+ some extra methods and specialized capabilities.
+
+ All methods of the standard zipfile.ZipFile class are supported, but
+ the constructor arguments are different, and opening the zip file
+ itself is deferred until a call which requires this, since the file
+ may first need to be fetched via HTTPS.
+ """
+
+ def __init__(self, url, dir, ta = None, verbose = True):
+ self.url = url
+ self.dir = dir
+ self.ta = ta
+ self.verbose = verbose
+ self.filename = os.path.join(dir, os.path.basename(url))
+ self.changed = False
+ self.zf = None
+ self.peercert = None
+ self.torrent_name, zip_ext = os.path.splitext(os.path.basename(url))
+ if zip_ext != ".zip":
+ raise BadFormat
+
+
+ def __getattr__(self, name):
+ if self.zf is None:
+ self.zf = zipfile.ZipFile(self.filename)
+ return getattr(self.zf, name)
+
+
+ def build_opener(self):
+ """
+ Voodoo to create a urllib2.OpenerDirector object with TLS
+ certificate checking enabled and a hook to set self.peercert so
+ our caller can check the subjectAltName field.
+
+ You probably don't want to look at this if you can avoid it.
+ """
+
+ assert self.ta is not None
+
+ # Yes, we're constructing one-off classes. Look away, look away.
+
+ class HTTPSConnection(httplib.HTTPSConnection):
+ zip = self
+ def connect(self):
+ sock = socket.create_connection((self.host, self.port), self.timeout)
+ if getattr(self, "_tunnel_host", None):
+ self.sock = sock
+ self._tunnel()
+ self.sock = ssl.wrap_socket(sock,
+ keyfile = self.key_file,
+ certfile = self.cert_file,
+ cert_reqs = ssl.CERT_REQUIRED,
+ ssl_version = ssl.PROTOCOL_TLSv1,
+ ca_certs = self.zip.ta)
+ self.zip.peercert = self.sock.getpeercert()
+
+ class HTTPSHandler(urllib2.HTTPSHandler):
+ def https_open(self, req):
+ return self.do_open(HTTPSConnection, req)
+
+ return urllib2.build_opener(HTTPSHandler)
+
+
+ def check_subjectAltNames(self):
+ """
+ Check self.peercert against URL to make sure we were talking to
+ the right HTTPS server.
+ """
+
+ hostname = urlparse.urlparse(self.url).hostname
+ subjectAltNames = set(i[1]
+ for i in self.peercert.get("subjectAltName", ())
+ if i[0] == "DNS")
+ if hostname not in subjectAltNames:
+ raise WrongServer
+
+
+ def download_file(self, r, bufsize = 4096):
+ """
+ Downloaded file to disk.
+ """
+
+ tempname = self.filename + ".new"
+ f = open(tempname, "wb")
+ n = int(r.info()["Content-Length"])
+ for i in xrange(0, n - bufsize, bufsize):
+ f.write(r.read(bufsize))
+ f.write(r.read())
+ f.close()
+ mtime = email.utils.mktime_tz(email.utils.parsedate_tz(r.info()["Last-Modified"]))
+ os.utime(tempname, (mtime, mtime))
+ os.rename(tempname, self.filename)
+
+
+ def set_output_stream(self, stream):
+ """
+ Set up this zip file for writing to a network stream.
+ """
+
+ assert self.zf is None
+ self.zf = zipfile.ZipFile(stream, "w")
+
+
+ def fetch(self):
+ """
+ Fetch zip file from URL given to constructor.
+ """
+
+ headers = { "User-Agent" : "rpki-torrent" }
+ try:
+ headers["If-Modified-Since"] = email.utils.formatdate(
+ os.path.getmtime(self.filename), False, True)
+ except OSError:
+ pass
+
+ syslog.syslog("Checking %s..." % self.url)
+ try:
+ r = self.build_opener().open(urllib2.Request(self.url, None, headers))
+ syslog.syslog("%s has changed, starting download" % self.url)
+ self.changed = True
+ log_email("Downloading %s" % self.url)
+ except urllib2.HTTPError, e:
+ if e.code == 304:
+ syslog.syslog("%s has not changed" % self.url)
+ elif e.code == 404:
+ syslog.syslog("%s does not exist" % self.url)
+ else:
+ raise
+ r = None
+
+ self.check_subjectAltNames()
+
+ if r is not None and r.geturl() != self.url:
+ raise UnexpectedRedirect
+
+ if r is not None:
+ self.download_file(r)
+ r.close()
+
+ return self.changed
+
+
+ def check_format(self):
+ """
+ Make sure that format of zip file matches our preconceptions: it
+ should contain two files, one of which is the .torrent file, the
+ other is the manifest, with names derived from the torrent name
+ inferred from the URL.
+ """
+
+ if set(self.namelist()) != set((self.torrent_name + ".torrent", self.torrent_name + ".manifest")):
+ raise BadFormat
+
+
+ def get_torrent(self):
+ """
+ Extract torrent file from zip file, encoded in Base64 because
+ that's what the transmisionrpc library says it wants.
+ """
+
+ self.check_format()
+ return base64.b64encode(self.read(self.torrent_name + ".torrent"))
+
+
+ def get_manifest(self):
+ """
+ Extract manifest from zip file, as a dictionary.
+
+ For the moment we're fixing up the internal file names from the
+ format that the existing shell-script prototype uses, but this
+ should go away once this program both generates and checks the
+ manifests.
+ """
+
+ self.check_format()
+ result = {}
+ for line in self.open(self.torrent_name + ".manifest"):
+ h, fn = line.split()
+ #
+ # Fixup for earlier manifest format, this should go away
+ if not fn.startswith(self.torrent_name):
+ fn = os.path.normpath(os.path.join(self.torrent_name, fn))
+ #
+ result[fn] = h
+ return result
+
+
+def create_manifest(topdir, torrent_name):
+ """
+ Generate a manifest, expressed as a dictionary.
+ """
+
+ result = {}
+ topdir = os.path.abspath(topdir)
+ for dirpath, dirnames, filenames in os.walk(os.path.join(topdir, torrent_name)):
+ for filename in filenames:
+ filename = os.path.join(dirpath, filename)
+ f = open(filename, "rb")
+ result[os.path.relpath(filename, topdir)] = hashlib.sha256(f.read()).hexdigest()
+ f.close()
+ return result
+
+
+def log_email(msg, subj = None):
+ try:
+ if not msg.endswith("\n"):
+ msg += "\n"
+ if subj is None:
+ subj = msg.partition("\n")[0]
+ m = email.mime.text.MIMEText(msg)
+ m["Date"] = time.strftime("%d %b %Y %H:%M:%S +0000", time.gmtime())
+ m["From"] = cfg.log_email
+ m["To"] = cfg.log_email
+ m["Subject"] = subj
+ s = smtplib.SMTP("localhost")
+ s.sendmail(cfg.log_email, [cfg.log_email], m.as_string())
+ s.quit()
+ except ConfigParser.Error:
+ pass
+
+
+class TransmissionClient(transmissionrpc.client.Client):
+ """
+ Extension of transmissionrpc.client.Client.
+ """
+
+ def __init__(self, **kwargs):
+ kwargs.setdefault("address", "127.0.0.1")
+ kwargs.setdefault("user", cfg.transmission_username)
+ kwargs.setdefault("password", cfg.transmission_password)
+ transmissionrpc.client.Client.__init__(self, **kwargs)
+
+
+ def find_torrents(self, *names):
+ """
+ Find torrents with given name(s), return id(s).
+ """
+
+ result = [i for i, t in self.list().iteritems() if t.name in names]
+ if not result:
+ raise CouldNotFindTorrents
+ return result
+
+
+ def remove_torrents(self, *names):
+ """
+ Remove any torrents with the given name(s).
+ """
+
+ try:
+ ids = self.find_torrents(*names)
+ except CouldNotFindTorrents:
+ pass
+ else:
+ syslog.syslog("Removing torrent%s %s (%s)" % (
+ "" if len(ids) == 1 else "s",
+ ", ".join(names),
+ ", ".join("#%s" % i for i in ids)))
+ self.remove(ids)
+
+ def unlimited_seeding(self, *names):
+ """
+ Set unlimited seeding for specified torrents.
+ """
+
+ # Apparently seedRatioMode = 2 means "no limit"
+ try:
+ self.change(self.find_torrents(*names), seedRatioMode = 2)
+ except CouldNotFindTorrents:
+ syslog.syslog("Couldn't tweak seedRatioMode, blundering onwards")
+
+
+class MyConfigParser(ConfigParser.RawConfigParser):
+
+ rpki_torrent_section = "rpki-torrent"
+
+ @property
+ def zip_dir(self):
+ return self.get(self.rpki_torrent_section, "zip_dir")
+
+ @property
+ def zip_ta(self):
+ return self.get(self.rpki_torrent_section, "zip_ta")
+
+ @property
+ def rcynic_prog(self):
+ return self.get(self.rpki_torrent_section, "rcynic_prog")
+
+ @property
+ def rcynic_conf(self):
+ return self.get(self.rpki_torrent_section, "rcynic_conf")
+
+ @property
+ def run_rcynic_anyway(self):
+ return self.getboolean(self.rpki_torrent_section, "run_rcynic_anyway")
+
+ @property
+ def generate_url(self):
+ return self.get(self.rpki_torrent_section, "generate_url")
+
+ @property
+ def act_as_generator(self):
+ try:
+ return self.get(self.rpki_torrent_section, "generate_url") != ""
+ except ConfigParser.Error:
+ return False
+
+ @property
+ def rsync_prog(self):
+ return self.get(self.rpki_torrent_section, "rsync_prog")
+
+ @property
+ def mktorrent_prog(self):
+ return self.get(self.rpki_torrent_section, "mktorrent_prog")
+
+ @property
+ def tracker_url(self):
+ return self.get(self.rpki_torrent_section, "tracker_url")
+
+ @property
+ def sftp_host(self):
+ return self.get(self.rpki_torrent_section, "sftp_host")
+
+ @property
+ def sftp_port(self):
+ try:
+ return self.getint(self.rpki_torrent_section, "sftp_port")
+ except ConfigParser.Error:
+ return 22
+
+ @property
+ def sftp_user(self):
+ return self.get(self.rpki_torrent_section, "sftp_user")
+
+ @property
+ def sftp_hostkey_file(self):
+ return self.get(self.rpki_torrent_section, "sftp_hostkey_file")
+
+ @property
+ def sftp_private_key_file(self):
+ return self.get(self.rpki_torrent_section, "sftp_private_key_file")
+
+ @property
+ def lockfile(self):
+ try:
+ return self.get(self.rpki_torrent_section, "lockfile")
+ except ConfigParser.Error:
+ return None
+
+ @property
+ def unauthenticated(self):
+ try:
+ return self.get(self.rpki_torrent_section, "unauthenticated")
+ except ConfigParser.Error:
+ return self.get("rcynic", "unauthenticated")
+
+ @property
+ def log_email(self):
+ return self.get(self.rpki_torrent_section, "log_email")
+
+ @property
+ def transmission_username(self):
+ try:
+ return self.get(self.rpki_torrent_section, "transmission_username")
+ except ConfigParser.Error:
+ return None
+
+ @property
+ def transmission_password(self):
+ try:
+ return self.get(self.rpki_torrent_section, "transmission_password")
+ except ConfigParser.Error:
+ return None
+
+ def multioption_iter(self, name, getter = None):
+ if getter is None:
+ getter = self.get
+ if self.has_option(self.rpki_torrent_section, name):
+ yield getter(self.rpki_torrent_section, name)
+ name += "."
+ names = [i for i in self.options(self.rpki_torrent_section) if i.startswith(name) and i[len(name):].isdigit()]
+ names.sort(key = lambda s: int(s[len(name):]))
+ for name in names:
+ yield getter(self.rpki_torrent_section, name)
+
+ @property
+ def zip_urls(self):
+ return self.multioption_iter("zip_url")
+
+ @property
+ def post_rcynic_commands(self):
+ return self.multioption_iter("post_rcynic_command")
+
+ def find_url(self, torrent_name):
+ urls = [u for u in self.zip_urls
+ if os.path.splitext(os.path.basename(u))[0] == torrent_name]
+ if len(urls) != 1:
+ raise TorrentNameDoesNotMatchURL("Can't find URL matching torrent name %s" % torrent_name)
+ return urls[0]
+
+
+if __name__ == "__main__":
+ main()
diff --git a/rp/rcynic/rules.darwin.mk b/rp/rcynic/rules.darwin.mk
new file mode 100644
index 00000000..d37b0e75
--- /dev/null
+++ b/rp/rcynic/rules.darwin.mk
@@ -0,0 +1,108 @@
+# $Id$
+
+install-user-and-group: .FORCE
+ @if /usr/bin/dscl . -read "/Groups/${RCYNIC_GROUP}" >/dev/null 2>&1; \
+ then \
+ echo "You already have a group \"${RCYNIC_GROUP}\", so I will use it."; \
+ elif gid="$$(/usr/bin/dscl . -list /Groups PrimaryGroupID | /usr/bin/awk 'BEGIN {gid = 501} $$2 >= gid {gid = 1 + $$2} END {print gid}')" && \
+ /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" && \
+ /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" RealName "${RCYNIC_GECOS}" && \
+ /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" PrimaryGroupID "$$gid" && \
+ /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" GeneratedUID "$$(/usr/bin/uuidgen)" && \
+ /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" Password "*"; \
+ then \
+ echo "Added group \"${RCYNIC_GROUP}\"."; \
+ else \
+ echo "Adding group \"${RCYNIC_GROUP}\" failed..."; \
+ echo "Please create it, then try again."; \
+ exit 1; \
+ fi; \
+ if /usr/bin/dscl . -read "/Users/${RCYNIC_USER}" >/dev/null 2>&1; \
+ then \
+ echo "You already have a user \"${RCYNIC_USER}\", so I will use it."; \
+ elif uid="$$(/usr/bin/dscl . -list /Users UniqueID | /usr/bin/awk 'BEGIN {uid = 501} $$2 >= uid {uid = 1 + $$2} END {print uid}')" && \
+ /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" && \
+ /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" UserShell "/usr/bin/false" && \
+ /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" RealName "${RCYNIC_GECOS}" && \
+ /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" UniqueID "$$uid" && \
+ /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" PrimaryGroupID "$$gid" && \
+ /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" NFSHomeDirectory "/var/empty" && \
+ /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" GeneratedUID "$$(/usr/bin/uuidgen)" && \
+ /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" Password "*"; \
+ then \
+ echo "Added user \"${RCYNIC_USER}\"."; \
+ else \
+ echo "Adding user \"${RCYNIC_USER}\" failed..."; \
+ echo "Please create it, then try again."; \
+ exit 1; \
+ fi
+ @if /usr/bin/dscl . -read "/Groups/${RPKIRTR_GROUP}" >/dev/null 2>&1; \
+ then \
+ echo "You already have a group \"${RPKIRTR_GROUP}\", so I will use it."; \
+ elif gid="$$(/usr/bin/dscl . -list /Groups PrimaryGroupID | /usr/bin/awk 'BEGIN {gid = 501} $$2 >= gid {gid = 1 + $$2} END {print gid}')" && \
+ /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" && \
+ /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" RealName "${RPKIRTR_GECOS}" && \
+ /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" PrimaryGroupID "$$gid" && \
+ /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" GeneratedUID "$$(/usr/bin/uuidgen)" && \
+ /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" Password "*"; \
+ then \
+ echo "Added group \"${RPKIRTR_GROUP}\"."; \
+ else \
+ echo "Adding group \"${RPKIRTR_GROUP}\" failed..."; \
+ echo "Please create it, then try again."; \
+ exit 1; \
+ fi; \
+ if /usr/bin/dscl . -read "/Users/${RPKIRTR_USER}" >/dev/null 2>&1; \
+ then \
+ echo "You already have a user \"${RPKIRTR_USER}\", so I will use it."; \
+ elif uid="$$(/usr/bin/dscl . -list /Users UniqueID | /usr/bin/awk 'BEGIN {uid = 501} $$2 >= uid {uid = 1 + $$2} END {print uid}')" && \
+ /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" && \
+ /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" UserShell "/usr/bin/false" && \
+ /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" RealName "${RPKIRTR_GECOS}" && \
+ /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" UniqueID "$$uid" && \
+ /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" PrimaryGroupID "$$gid" && \
+ /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" NFSHomeDirectory "/var/empty" && \
+ /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" GeneratedUID "$$(/usr/bin/uuidgen)" && \
+ /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" Password "*"; \
+ then \
+ echo "Added user \"${RPKIRTR_USER}\"."; \
+ else \
+ echo "Adding user \"${RPKIRTR_USER}\" failed..."; \
+ echo "Please create it, then try again."; \
+ exit 1; \
+ fi
+
+
+install-shared-libraries: .FORCE
+ @echo "Copying required shared libraries"
+ @shared_libraries="${RCYNIC_DIR}/bin/rcynic ${RCYNIC_DIR}/bin/rsync"; \
+ while true; \
+ do \
+ closure="$$(/usr/bin/otool -L $${shared_libraries} | /usr/bin/awk '/:$$/ {next} {print $$1}' | /usr/bin/sort -u)"; \
+ if test "x$$shared_libraries" = "x$$closure";
+ then \
+ break; \
+ else \
+ shared_libraries="$$closure"; \
+ fi; \
+ done; \
+ for shared in /usr/lib/dyld $$shared_libraries; \
+ do \
+ if /bin/test -r "${RCYNIC_DIR}/$${shared}"; \
+ then \
+ echo "You already have a \"${RCYNIC_DIR}/$${shared}\", so I will use it"; \
+ elif /usr/bin/install -m 555 -o root -g wheel -p "$${shared}" "${RCYNIC_DIR}/$${shared}"; \
+ then \
+ echo "Copied $${shared} into ${RCYNIC_DIR}"; \
+ else \
+ echo "Unable to copy $${shared} into ${RCYNIC_DIR}"; \
+ exit 1; \
+ fi; \
+ done
+
+install-rc-scripts:
+ ${INSTALL} -o root -g wheel -d ${DESTDIR}/Library/StartupItems/RCynic
+ ${INSTALL} -o root -g wheel -m 555 \
+ rc-scripts/darwin/RCynic \
+ rc-scripts/darwin/StartupParameters.plist \
+ ${DESTDIR}/Library/Startup/RCynic
diff --git a/rp/rcynic/rules.freebsd.mk b/rp/rcynic/rules.freebsd.mk
new file mode 100644
index 00000000..5233386e
--- /dev/null
+++ b/rp/rcynic/rules.freebsd.mk
@@ -0,0 +1,56 @@
+# $Id$
+
+install-user-and-group: .FORCE
+ @if /usr/sbin/pw groupshow "${RCYNIC_GROUP}" 2>/dev/null; \
+ then \
+ echo "You already have a group \"${RCYNIC_GROUP}\", so I will use it."; \
+ elif /usr/sbin/pw groupadd ${RCYNIC_GROUP}; \
+ then \
+ echo "Added group \"${RCYNIC_GROUP}\"."; \
+ else \
+ echo "Adding group \"${RCYNIC_GROUP}\" failed..."; \
+ echo "Please create it, then try again."; \
+ exit 1; \
+ fi
+ @if /usr/sbin/pw groupshow "${RPKIRTR_GROUP}" 2>/dev/null; \
+ then \
+ echo "You already have a group \"${RPKIRTR_GROUP}\", so I will use it."; \
+ elif /usr/sbin/pw groupadd ${RPKIRTR_GROUP}; \
+ then \
+ echo "Added group \"${RPKIRTR_GROUP}\"."; \
+ else \
+ echo "Adding group \"${RPKIRTR_GROUP}\" failed..."; \
+ echo "Please create it, then try again."; \
+ exit 1; \
+ fi
+ @if /usr/sbin/pw usershow "${RCYNIC_USER}" 2>/dev/null; \
+ then \
+ echo "You already have a user \"${RCYNIC_USER}\", so I will use it."; \
+ elif /usr/sbin/pw useradd ${RCYNIC_USER} -g ${RCYNIC_GROUP} -h - -d /nonexistant -s /usr/sbin/nologin -c "${RCYNIC_GECOS}" -G "${RPKIRTR_GROUP}"; \
+ then \
+ echo "Added user \"${RCYNIC_USER}\"."; \
+ else \
+ echo "Adding user \"${RCYNIC_USER}\" failed..."; \
+ echo "Please create it, then try again."; \
+ exit 1; \
+ fi
+ @if /usr/sbin/pw usershow "${RPKIRTR_USER}" 2>/dev/null; \
+ then \
+ echo "You already have a user \"${RPKIRTR_USER}\", so I will use it."; \
+ elif /usr/sbin/pw useradd ${RPKIRTR_USER} -g ${RPKIRTR_GROUP} -h - -d /nonexistant -s /usr/sbin/nologin -c "${RPKIRTR_GECOS}"; \
+ then \
+ echo "Added user \"${RPKIRTR_USER}\"."; \
+ else \
+ echo "Adding user \"${RPKIRTR_USER}\" failed..."; \
+ echo "Please create it, then try again."; \
+ exit 1; \
+ fi
+
+
+# We use static compilation on FreeBSD, so no need for shared libraries
+
+install-shared-libraries:
+ @true
+
+install-rc-scripts:
+ ${INSTALL} -m 555 -o root -g wheel -p rc-scripts/freebsd/rc.d.rcynic ${DESTDIR}/usr/local/etc/rc.d/rcynic
diff --git a/rp/rcynic/rules.linux.mk b/rp/rcynic/rules.linux.mk
new file mode 100644
index 00000000..6a962cef
--- /dev/null
+++ b/rp/rcynic/rules.linux.mk
@@ -0,0 +1,92 @@
+# $Id$
+
+install-user-and-group: .FORCE
+ @if getent group ${RCYNIC_GROUP} >/dev/null; \
+ then \
+ echo "You already have a group \"${RCYNIC_GROUP}\", so I will use it."; \
+ elif /usr/sbin/groupadd ${RCYNIC_GROUP}; \
+ then \
+ echo "Added group \"${RCYNIC_GROUP}\"."; \
+ else \
+ echo "Adding group \"${RCYNIC_GROUP}\" failed..."; \
+ echo "Please create it, then try again."; \
+ exit 1; \
+ fi
+ @nogroup='-N'; \
+ if test -f /etc/redhat-release; then read vendor release version < /etc/redhat-release; if test $$vendor = CentOS; then nogroup='-n'; fi; fi; \
+ if getent passwd ${RCYNIC_USER} >/dev/null; \
+ then \
+ echo "You already have a user \"${RCYNIC_USER}\", so I will use it."; \
+ elif /usr/sbin/useradd -g ${RCYNIC_GROUP} -M $$nogroup -d "${RCYNIC_DIR}" -s /sbin/nologin -c "${RCYNIC_GECOS}" ${RCYNIC_USER}; \
+ then \
+ echo "Added user \"${RCYNIC_USER}\"."; \
+ else \
+ echo "Adding user \"${RCYNIC_USER}\" failed..."; \
+ echo "Please create it, then try again."; \
+ exit 1; \
+ fi
+ @if getent group ${RPKIRTR_GROUP} >/dev/null; \
+ then \
+ echo "You already have a group \"${RPKIRTR_GROUP}\", so I will use it."; \
+ elif /usr/sbin/groupadd ${RPKIRTR_GROUP}; \
+ then \
+ echo "Added group \"${RPKIRTR_GROUP}\"."; \
+ else \
+ echo "Adding group \"${RPKIRTR_GROUP}\" failed..."; \
+ echo "Please create it, then try again."; \
+ exit 1; \
+ fi
+ @nogroup='-N'; \
+ if test -f /etc/redhat-release; then read vendor release version < /etc/redhat-release; if test $$vendor = CentOS; then nogroup='-n'; fi; fi; \
+ if getent passwd ${RPKIRTR_USER} >/dev/null; \
+ then \
+ echo "You already have a user \"${RPKIRTR_USER}\", so I will use it."; \
+ elif /usr/sbin/useradd -g ${RPKIRTR_GROUP} -M $$nogroup -d "${RPKIRTR_DIR}" -s /sbin/nologin -c "${RPKIRTR_GECOS}" ${RPKIRTR_USER}; \
+ then \
+ echo "Added user \"${RPKIRTR_USER}\"."; \
+ else \
+ echo "Adding user \"${RPKIRTR_USER}\" failed..."; \
+ echo "Please create it, then try again."; \
+ exit 1; \
+ fi
+ usermod -a -G ${RPKIRTR_GROUP} ${RCYNIC_USER}
+
+install-shared-libraries: .FORCE
+ @echo "Copying required shared libraries"
+ @if test -d /lib64; then libdir=/lib64; else libdir=/lib; fi; \
+ shared_libraries="${RCYNIC_DIR}/bin/rcynic ${RCYNIC_DIR}/bin/rsync $$(/usr/bin/find $${libdir} -name 'libnss*.so*' -print)"; \
+ while true; \
+ do \
+ closure="$$(/usr/bin/ldd $${shared_libraries} | \
+ ${AWK} ' \
+ { sub(/:$/, "") } \
+ $$0 == "${RCYNIC_DIR}/bin/rcynic" { next } \
+ $$0 == "${RCYNIC_DIR}/bin/rsync" { next } \
+ $$1 ~ /\/ld-linux\.so/ { next } \
+ { for (i = 1; i <= NF; i++) if ($$i ~ /^\//) print $$i } \
+ ' | \
+ ${SORT} -u)"; \
+ if test "X$$shared_libraries" = "X$$closure"; \
+ then \
+ break; \
+ else \
+ shared_libraries="$$closure"; \
+ fi; \
+ done; \
+ if test -f $${libdir}/libresolv.so.2; \
+ then \
+ shared_libraries="$${shared_libraries} $${libdir}/libresolv.so.2";
+ fi; \
+ for shared in $${libdir}/*ld*.so* $$shared_libraries; \
+ do \
+ if test ! -r "${RCYNIC_DIR}/$${shared}"; \
+ then \
+ ${INSTALL} -m 555 -d `dirname "${RCYNIC_DIR}$${shared}"` && \
+ ${INSTALL} -m 555 -p "$${shared}" "${RCYNIC_DIR}$${shared}"; \
+ fi; \
+ done
+
+# No devfs, so no rc script
+
+install-rc-scripts:
+ @true
diff --git a/rp/rcynic/rules.unknown.mk b/rp/rcynic/rules.unknown.mk
new file mode 100644
index 00000000..6ce3ea18
--- /dev/null
+++ b/rp/rcynic/rules.unknown.mk
@@ -0,0 +1,4 @@
+# $Id$
+
+install-user-and-group install-shared-libraries install-rc-scripts: .FORCE
+ @echo "Don't know how to make $@ on this platform"; exit 1
diff --git a/rp/rcynic/sample-rcynic.conf b/rp/rcynic/sample-rcynic.conf
new file mode 100644
index 00000000..6fa49b9c
--- /dev/null
+++ b/rp/rcynic/sample-rcynic.conf
@@ -0,0 +1,39 @@
+# $Id$
+#
+# Sample rcynic configuration file for jailed environment.
+
+[rcynic]
+rsync-program = /bin/rsync
+authenticated = /data/authenticated
+unauthenticated = /data/unauthenticated
+lockfile = /data/lock
+xml-summary = /data/rcynic.xml
+jitter = 600
+use-syslog = true
+log-level = log_usage_err
+
+# You need to specify some trust anchors here, eg:
+
+#trust-anchor.1 = /etc/trust-anchors/ta-1.cer
+#trust-anchor.2 = /etc/trust-anchors/ta-2.cer
+
+# or, using the "Trust Anchor Locator" form:
+
+#trust-anchor-locator.1 = /etc/trust-anchors/ta-1.tal
+#trust-anchor-locator.2 = /etc/trust-anchors/ta-2.tal
+
+# The choice between these two formats depends largely on the policies
+# of the entity generating the corresponding trust anchor, ie, will
+# probably be made for you by the generating entity.
+
+# If you already keep all your TAs and TALs in a single directory
+# anyway, you can also use the trust-anchor-directory directive to
+# name that directory and let rcynic track whatever TAs and TALs you
+# have there at the moment:
+
+#trust-anchor-directory = /etc/trust-anchors
+
+# Note that the order in which rcynic will read TAs and TALs from such
+# a directory is undefined. In general this is not a problem, but if
+# you really care about this for some reason, stick to the other
+# directives.
diff --git a/rp/rcynic/sample-trust-anchors/README b/rp/rcynic/sample-trust-anchors/README
new file mode 100644
index 00000000..485d0485
--- /dev/null
+++ b/rp/rcynic/sample-trust-anchors/README
@@ -0,0 +1,26 @@
+$Id$
+
+These are SAMPLE trust anchors for testing rcynic. Any resemblance
+between these files and real trust anchors living, dead, or wandering
+the night in ghostly torment is purely coincidental.
+
+Remember: it's only a trust anchor if a replying party says it is.
+You are the relying party, so it's your call.
+
+The .cer files (if any) are self-signed X.509 trust anchors.
+
+The .tal files are "trust anchor locators", see RFC 6490.
+
+See the rcynic documentation for further details.
+
+==
+
+afrinic.tal AfriNIC
+apnic.tal APNIC
+arin.tal ARIN testbed
+bbn-testbed.tal.disabled Only useful for certain tests
+lacnic.tal LACNIC
+ripe-ncc-root.tal RIPE NCC
+ripe-pilot.tal A RIPE testbed
+rpki.net-testbed.tal rpki.net testbed
+testbed-apnicrpki.tal An APNIC testbed
diff --git a/rp/rcynic/sample-trust-anchors/afrinic.tal b/rp/rcynic/sample-trust-anchors/afrinic.tal
new file mode 100644
index 00000000..0f202c73
--- /dev/null
+++ b/rp/rcynic/sample-trust-anchors/afrinic.tal
@@ -0,0 +1,8 @@
+rsync://rpki.afrinic.net/repository/AfriNIC.cer
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxsAqAhWIO+ON2Ef9oRDM
+pKxv+AfmSLIdLWJtjrvUyDxJPBjgR+kVrOHUeTaujygFUp49tuN5H2C1rUuQavTH
+vve6xNF5fU3OkTcqEzMOZy+ctkbde2SRMVdvbO22+TH9gNhKDc9l7Vu01qU4LeJH
+k3X0f5uu5346YrGAOSv6AaYBXVgXxa0s9ZvgqFpim50pReQe/WI3QwFKNgpPzfQL
+6Y7fDPYdYaVOXPXSKtx7P4s4KLA/ZWmRL/bobw/i2fFviAGhDrjqqqum+/9w1hEl
+L/vqihVnV18saKTnLvkItA/Bf5i11Yhw2K7qv573YWxyuqCknO/iYLTR1DToBZcZ
+UQIDAQAB
diff --git a/rp/rcynic/sample-trust-anchors/altca.tal b/rp/rcynic/sample-trust-anchors/altca.tal
new file mode 100644
index 00000000..2dbc7413
--- /dev/null
+++ b/rp/rcynic/sample-trust-anchors/altca.tal
@@ -0,0 +1,9 @@
+rsync://ca0.rpki.net/tal/root.cer
+
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzPSPpQxa0rxz9sbbvYGr
+UlpgyBVxSIt+k/WDKbr+VW7CjUoz6cc5KMFANkQWw3D6ER4kCwX4WJkD58AGGbw/
+WeAe6m3aHcORUVRkr45a4qSrYiG7Wq9RAXtwbhlXofB3zo+O90IlXDaVP2U9bw+Q
+yoJBJuAmZONt0bRgrktv8QhVtKvuYkH5ZIe7DkXJcJzBn6gv09dZsdwZm3xV3soX
+HEKrz5pY6Sb2xoL1CyPqzGOfVFxl0G5+dmcD/degPKxrEycAzjnHUzN1gus2jg26
+dtkix7KG/Mn1h/k53j0FdQD+zqPwakgwqjvCOdSdHMRmsikj0EF9WrZIOjZUXV6q
+6wIDAQAB
diff --git a/rp/rcynic/sample-trust-anchors/apnic-rpki-root-afrinic-origin.tal b/rp/rcynic/sample-trust-anchors/apnic-rpki-root-afrinic-origin.tal
new file mode 100644
index 00000000..d254ec44
--- /dev/null
+++ b/rp/rcynic/sample-trust-anchors/apnic-rpki-root-afrinic-origin.tal
@@ -0,0 +1,8 @@
+rsync://rpki.apnic.net/repository/apnic-rpki-root-afrinic-origin.cer
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuMLL96YV9pf0rZ4Ow/bk
+cgpoPfsRzkcgmisyCuMUdotHwrp8pepujhohatScRK09ILRrZYCdpX4121MJhqXC
+P3u3hy9fF0CeARKX/Q82nJccD4dtUp23UcFys8hwJgNYZI910ajkAxwNT//H/TFw
+oUYbzZGBR7o2awMc7GdQl/j6dgOkV6AfYy5DyDEgOUNHnUxED2rreefL/E2Fr2ST
+Esar6bTR4Tg4+nVF1PjAkgN0tKZYe4wZ6VmtqV/VTngSLysim6av7ki+JR3cVgVU
+OqXeh1vPjH2tNu6u9bX37ZrdVb6NBRer9I99IDbKvyhELb6nzo8+Q74zga9HI+Pf
+QwIDAQAB
diff --git a/rp/rcynic/sample-trust-anchors/apnic-rpki-root-arin-origin.tal b/rp/rcynic/sample-trust-anchors/apnic-rpki-root-arin-origin.tal
new file mode 100644
index 00000000..b82a136f
--- /dev/null
+++ b/rp/rcynic/sample-trust-anchors/apnic-rpki-root-arin-origin.tal
@@ -0,0 +1,8 @@
+rsync://rpki.apnic.net/repository/apnic-rpki-root-arin-origin.cer
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAp6vscYtzhe0CfFk5Ro44
+llPhsInXtfAxqfYmK7m9V3khkqK3d3/ZAW6pcJm7qW8XhEGl+F5mUeeLIm5JoIhr
+kT5B5M6uL0VlCCkZJH4h76ybOa83vWITNZEDy9L3c3nK4S+Basu3vYoE4ICXGG+J
+7zg5Iw9saV+p03E2w1g16pt1QI3Cnggp6edkeWClEz3aPw/ULOIHb7YmatWwdERl
+tL9LsuMSKszQLUY7F4XVpxey/rJYAZgzDUh+b6813WAClCkkydNjsbviuekAWJbx
+sW7Mcw53u30K4g8MP03CjkDOubyoR4Qo99R1UQJCdrRsFKbSSfN/fOA4y7ikc3xs
+jQIDAQAB
diff --git a/rp/rcynic/sample-trust-anchors/apnic-rpki-root-iana-origin.tal b/rp/rcynic/sample-trust-anchors/apnic-rpki-root-iana-origin.tal
new file mode 100644
index 00000000..a4e714c0
--- /dev/null
+++ b/rp/rcynic/sample-trust-anchors/apnic-rpki-root-iana-origin.tal
@@ -0,0 +1,8 @@
+rsync://rpki.apnic.net/repository/apnic-rpki-root-iana-origin.cer
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx9RWSL61YAAYumEiU8z8
+qH2ETVIL01ilxZlzIL9JYSORMN5Cmtf8V2JblIealSqgOTGjvSjEsiV73s67zYQI
+7C/iSOb96uf3/s86NqbxDiFQGN8qG7RNcdgVuUlAidl8WxvLNI8VhqbAB5uSg/Mr
+LeSOvXRja041VptAxIhcGzDMvlAJRwkrYK/Mo8P4E2rSQgwqCgae0ebY1CsJ3Cjf
+i67C1nw7oXqJJovvXJ4apGmEv8az23OLC6Ki54Ul/E6xk227BFttqFV3YMtKx42H
+cCcDVZZy01n7JjzvO8ccaXmHIgR7utnqhBRNNq5Xc5ZhbkrUsNtiJmrZzVlgU6Ou
+0wIDAQAB
diff --git a/rp/rcynic/sample-trust-anchors/apnic-rpki-root-lacnic-origin.tal b/rp/rcynic/sample-trust-anchors/apnic-rpki-root-lacnic-origin.tal
new file mode 100644
index 00000000..a4d56802
--- /dev/null
+++ b/rp/rcynic/sample-trust-anchors/apnic-rpki-root-lacnic-origin.tal
@@ -0,0 +1,8 @@
+rsync://rpki.apnic.net/repository/apnic-rpki-root-lacnic-origin.cer
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyoYPp3l3DWyPtLWrmRn4
+Oux9hQ5bxd0SX/f6ygHxik+I3eMJP5J0Pr2e500tyXb2uKsX9kDqu/kckr+TUMhV
+BHd5yAv8OAE3YYEvpz/7uTX7cYy2yUeA76OEP75Y88OIQEzGpPLNpIzDxMggxuDh
+IhkA5xMiUJgVoEgmWSzR+MuRBjv2422wAGB5GpLgYsOjpwvG0VPmhnE+39+10ucQ
+CLt0Ny5kOR4an2tkvHjm7rzKDnFm8MWxPzAWESdf+8g7ITzSglqxDNiK5E5rdzNt
+h1Kvp+9RwaFArw6Ky1A4HhnoplN4EfKwxq0YamuKV0ZTTpWyT2+qDuE6sOfHRbJ0
+5QIDAQAB
diff --git a/rp/rcynic/sample-trust-anchors/apnic-rpki-root-ripe-origin.tal b/rp/rcynic/sample-trust-anchors/apnic-rpki-root-ripe-origin.tal
new file mode 100644
index 00000000..d64df3ae
--- /dev/null
+++ b/rp/rcynic/sample-trust-anchors/apnic-rpki-root-ripe-origin.tal
@@ -0,0 +1,8 @@
+rsync://rpki.apnic.net/repository/apnic-rpki-root-ripe-origin.cer
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwsQlXmEklLYApoDo7GEa
+NNTEGFPU5wJpi04iXuga2xn+g/TMLOlyJbjuPYRtRm/7VbRnN3m9Ta+WETy03+Fm
+EbXzB4xxhJKVik/ARHBnrBWhLyURy8Q5/XplE9cJein37IE1mIsbKM7o/90S225w
+7GuvW7T4kjPWYmBFOywHWsfQO1EdsgiJrkz+Ab67ZkdSIiKHkf2UE6/MrbDEj+QK
+9+s/vKH8BtDhaLmTWY+bVvfJ3+AWDH6roo1ozbl5yamQFbLOl3ns30f3yOJcNSNu
+/qgMQRRyp2sXXQovhTy8yqm3LFspaCWnTmQtBieWZwibuOa4Z27A1FzTMst2T4wY
+/wIDAQAB
diff --git a/rp/rcynic/sample-trust-anchors/apnic-testbed.tal b/rp/rcynic/sample-trust-anchors/apnic-testbed.tal
new file mode 100644
index 00000000..f87a3bf3
--- /dev/null
+++ b/rp/rcynic/sample-trust-anchors/apnic-testbed.tal
@@ -0,0 +1,9 @@
+rsync://rpki-testbed.apnic.net/repository/rpki-testbed.apnic.net.cer
+
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyiVmZgr+aN0xZfh6yrJS
+qmsX2Q58UzjPaTnhsP88zdSqmRCVAegpRMjBjwF6Ga8tR9RP89W+tRzLf2x883Et
+vmNAax939gilN4VxGRgKL2YZO27w8Vk6aVpSJ0+CCQI497eataRFpG9eLiPfsRfo
+Fuha75MGa5m1cmJFVk0Jj0sRa9RmB/ZVxZDdwHgxAB7L875JSUnVCOLCP4FejoYt
+71wJfzFw3fZmXSPeXQ+wtxVPfh8VIOyLTyYbq3AA1Gn+herzvCsiLXZ41lcCCRZR
+ObwIGptj7KnqLoJ/MOGL6eoSY+v1B85z2Il3Nl8px3ohsApDM/MANDacxl0EgLwX
+2QIDAQAB
diff --git a/rp/rcynic/sample-trust-anchors/bbn-testbed.tal.disabled b/rp/rcynic/sample-trust-anchors/bbn-testbed.tal.disabled
new file mode 100644
index 00000000..21302ea5
--- /dev/null
+++ b/rp/rcynic/sample-trust-anchors/bbn-testbed.tal.disabled
@@ -0,0 +1,8 @@
+rsync://rpki.bbn.com/rpki/root.cer
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvEjkk0oC2N8Ij+ruAkrX
+cI7lSOIG1sQLC8xJJqklMlOBLao+3If0AVKSEP5+pb/hJwpXPomt6Lm4+fuZ6vX/
+q15llQL3YWkHZ49t/X47yCqxvM0txRaZafffbk6lJrofKhKKrYEcrr/rHiG+IQXn
+U1MGaQ/a8DfRDWiCQxonh7CthrLi7tr+QI9s9fDvvHPCK9OmmhZvCUDOydf+/vMn
+VShQ57KsUFcuZ0EX9UwsaIGCYGyvsYwk54UtoIGovg2IavfZK8ai0/5XEVpvKQiR
+8AixZpg5bSe7555+LhzpL5+LdqX6/hVRah0JrdR8KiXvr16Kmcitj+JLqVc0Wkd8
+zwIDAQAB
diff --git a/rp/rcynic/sample-trust-anchors/lacnic.tal b/rp/rcynic/sample-trust-anchors/lacnic.tal
new file mode 100644
index 00000000..29220d37
--- /dev/null
+++ b/rp/rcynic/sample-trust-anchors/lacnic.tal
@@ -0,0 +1,8 @@
+rsync://repository.lacnic.net/rpki/lacnic/rta-lacnic-rpki.cer
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqZEzhYK0+PtDOPfub/KR
+c3MeWx3neXx4/wbnJWGbNAtbYqXg3uU5J4HFzPgk/VIppgSKAhlO0H60DRP48by9
+gr5/yDHu2KXhOmnMg46sYsUIpfgtBS9+VtrqWziJfb+pkGtuOWeTnj6zBmBNZKK+
+5AlMCW1WPhrylIcB+XSZx8tk9GS/3SMQ+YfMVwwAyYjsex14Uzto4GjONALE5oh1
+M3+glRQduD6vzSwOD+WahMbc9vCOTED+2McLHRKgNaQf0YJ9a1jG9oJIvDkKXEqd
+fqDRktwyoD74cV57bW3tBAexB7GglITbInyQAsmdngtfg2LUMrcROHHP86QPZINj
+DQIDAQAB
diff --git a/rp/rcynic/sample-trust-anchors/ripe-ncc-root.tal b/rp/rcynic/sample-trust-anchors/ripe-ncc-root.tal
new file mode 100644
index 00000000..ebdb9f45
--- /dev/null
+++ b/rp/rcynic/sample-trust-anchors/ripe-ncc-root.tal
@@ -0,0 +1,9 @@
+rsync://rpki.ripe.net/ta/ripe-ncc-ta.cer
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0URYSGqUz2m
+yBsOzeW1jQ6NsxNvlLMyhWknvnl8NiBCs/T/S2XuNKQNZ+wBZxIgPPV
+2pFBFeQAvoH/WK83HwA26V2siwm/MY2nKZ+Olw+wlpzlZ1p3Ipj2eNc
+Krmit8BwBC8xImzuCGaV0jkRB0GZ0hoH6Ml03umLprRsn6v0xOP0+l6
+Qc1ZHMFVFb385IQ7FQQTcVIxrdeMsoyJq9eMkE6DoclHhF/NlSllXub
+ASQ9KUWqJ0+Ot3QCXr4LXECMfkpkVR2TZT+v5v658bHVs6ZxRD1b6Uk
+1uQKAyHUbn/tXvP8lrjAibGzVsXDT2L0x4Edx+QdixPgOji3gBMyL2V
+wIDAQAB
diff --git a/rp/rcynic/sample-trust-anchors/ripe-pilot.tal b/rp/rcynic/sample-trust-anchors/ripe-pilot.tal
new file mode 100644
index 00000000..971128e0
--- /dev/null
+++ b/rp/rcynic/sample-trust-anchors/ripe-pilot.tal
@@ -0,0 +1,7 @@
+rsync://localcert.ripe.net/ta/ripe-ncc-pilot.cer
+
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApIXenLOBfyo7cOnm4mGKmYxsoWCp28dw3XJAoZNW
+PDK8i9MxYACpwfz7bjyGma1BWPBJuievNd6nriFI+3WG+wt2bnO2ZmiLenCwMtm8bu7BeldpWRwlAnRp4t4I
+L6sZ7T9bF+4sTrv1qiEANqam0mhtLtUfbWXV5Z4mjgnNur7fJH2lIOm7Oc2/tok1rid8WsPe18zuvgwA3M0f
+KQ/Oa4SMXKnHr3fg2cHAm1cfEEvhMKa3rUAvsKGVEYeTJNg6rh3IRnjWhZ8GmE1ywl/9qMa2z4YsUi9Bx9U+
+/zMS8qpJn/q6XBbZ8XYTTFvSWfXd6b82jSfABa4ukIDCUF/QFwIDAQAB
diff --git a/rp/rcynic/sample-trust-anchors/rpki.net-testbed.tal b/rp/rcynic/sample-trust-anchors/rpki.net-testbed.tal
new file mode 100644
index 00000000..1e466300
--- /dev/null
+++ b/rp/rcynic/sample-trust-anchors/rpki.net-testbed.tal
@@ -0,0 +1,9 @@
+rsync://repo0.rpki.net/rpki/root.cer
+
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAovWQL2lh6knDxGUG5hbt
+CXvvh4AOzjhDkSHlj22gn/1oiM9IeDATIwP44vhQ6L/xvuk7W6Kfa5ygmqQ+xOZO
+wTWPcrUbqaQyPNxokuivzyvqVZVDecOEqs78q58mSp9nbtxmLRW7B67SJCBSzfa5
+XpVyXYEgYAjkk3fpmefU+AcxtxvvHB5OVPIaBfPcs80ICMgHQX+fphvute9XLxjf
+JKJWkhZqZ0v7pZm2uhkcPx1PMGcrGee0WSDC3fr3erLueagpiLsFjwwpX6F+Ms8v
+qz45H+DKmYKvPSstZjCCq9aJ0qANT9OtnfSDOS+aLRPjZryCNyvvBHxZXqj5YCGK
+twIDAQAB
diff --git a/rp/rcynic/sample-trust-anchors/testbed-apnicrpki.tal.disabled b/rp/rcynic/sample-trust-anchors/testbed-apnicrpki.tal.disabled
new file mode 100644
index 00000000..ae16a302
--- /dev/null
+++ b/rp/rcynic/sample-trust-anchors/testbed-apnicrpki.tal.disabled
@@ -0,0 +1,8 @@
+rsync://apnicrpki.rand.apnic.net/repository/root.cer
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApQofhU9VeRGZxlNgkrFR
+ShSIkGtRUwg1mkISvieM2oxA6LFhHoqFV25BdK/3IhOh/0I/E9WX7uWPD72MFGBB
+LsFc6JpZFkvg8+9KUIHquk46wn4bEvA8xMsPbGo+mK0CAkqOdHQSemC7nqUlR9VH
+5zH1t8aYS4mrVN59LfClmiEE7QElgmVyvccfaEd+gMGaxsDvSylWIdvVxYfFG2rB
+LiEsmfXwdn2a8b1Zx5eaFD80XV9Z7h15ESP1epSScGzMp2XB0FYMC3f9k7l5sydX
+Rj5BYo1rWaM+Y6AoHe/d8G0xm6iX6b/JqT4Ywdt8/7DbsweaIk73VnpnXrYATCt9
+VwIDAQAB
diff --git a/rp/rcynic/static-rsync/Makefile.in b/rp/rcynic/static-rsync/Makefile.in
new file mode 100644
index 00000000..8a433c7b
--- /dev/null
+++ b/rp/rcynic/static-rsync/Makefile.in
@@ -0,0 +1,44 @@
+# $Id$
+
+VERSION = 2.6.9
+
+CFG_ENV = CFLAGS='@CFLAGS@' LDFLAGS='@LDFLAGS@ @LD_STATIC_FLAG@'
+CFG_ARG =
+
+TARBALL = rsync-${VERSION}.tar.gz
+DIRNAME = rsync-${VERSION}
+
+CFG_LOG = > ../config.log 2>&1
+BIN_LOG = > ../build.log 2>&1
+
+BIN = rsync
+
+abs_top_srcdir = @abs_top_srcdir@
+abs_top_builddir = @abs_top_builddir@
+
+all: ${BIN}
+
+${BIN}: ${DIRNAME}/${BIN}
+ ln ${DIRNAME}/${BIN} $@
+ file $@
+
+${DIRNAME}/${BIN}: configured.stamp
+ cd ${DIRNAME} && ${MAKE} ${BIN_LOG}
+
+extracted.stamp: ${TARBALL}
+ gzip -c -d ${TARBALL} | tar -xf -
+ touch $@
+
+patched.stamp: extracted.stamp
+ for i in patches/patch-*; do if test -f "$$i"; then patch -d ${DIRNAME} <"$$i"; else :; fi; done
+ touch $@
+
+configured.stamp: patched.stamp
+ cd ${DIRNAME} && ${CFG_ENV} ./configure ${CFG_ARG} ${CFG_LOG}
+ touch $@
+
+clean:
+ rm -rf ${BIN} ${DIRNAME} *.stamp *.log
+
+distclean: clean
+ rm -f Makefile
diff --git a/rp/rcynic/static-rsync/README b/rp/rcynic/static-rsync/README
new file mode 100644
index 00000000..9ff5afa8
--- /dev/null
+++ b/rp/rcynic/static-rsync/README
@@ -0,0 +1,15 @@
+$Id$
+
+Hack to build a static rsync binary suitable for use in a chroot jail.
+
+The default configuration is for gcc, since that's the most widely
+used compiler on the platforms we use. I've provided hooks intended
+to make it simple to support other compilers just by overriding make
+variables on the command line: if you need to do something more
+drastic than this to get your compiler working, please tell me.
+
+If your platform doesn't support static binaries at all, you're on
+your own (and should whine at your OS vendor, as this is nuts).
+
+We try to stick with rsync release code, but apply security patches
+when necessary.
diff --git a/rp/rcynic/static-rsync/patches/patch-CVE-2007-4091 b/rp/rcynic/static-rsync/patches/patch-CVE-2007-4091
new file mode 100644
index 00000000..201af96a
--- /dev/null
+++ b/rp/rcynic/static-rsync/patches/patch-CVE-2007-4091
@@ -0,0 +1,60 @@
+--- sender.c 2006-09-20 03:53:32.000000000 +0200
++++ sender.c 2007-07-25 15:33:05.000000000 +0200
+@@ -123,6 +123,7 @@
+ char fname[MAXPATHLEN];
+ struct file_struct *file;
+ unsigned int offset;
++ size_t l = 0;
+
+ if (ndx < 0 || ndx >= the_file_list->count)
+ return;
+@@ -133,6 +134,20 @@
+ file->dir.root, "/", NULL);
+ } else
+ offset = 0;
++
++ l = offset + 1;
++ if (file) {
++ if (file->dirname)
++ l += strlen(file->dirname);
++ if (file->basename)
++ l += strlen(file->basename);
++ }
++
++ if (l >= sizeof(fname)) {
++ rprintf(FERROR, "Overlong pathname\n");
++ exit_cleanup(RERR_FILESELECT);
++ }
++
+ f_name(file, fname + offset);
+ if (remove_source_files) {
+ if (do_unlink(fname) == 0) {
+@@ -224,6 +239,7 @@
+ enum logcode log_code = log_before_transfer ? FLOG : FINFO;
+ int f_xfer = write_batch < 0 ? batch_fd : f_out;
+ int i, j;
++ size_t l = 0;
+
+ if (verbose > 2)
+ rprintf(FINFO, "send_files starting\n");
+@@ -259,6 +275,20 @@
+ fname[offset++] = '/';
+ } else
+ offset = 0;
++
++ l = offset + 1;
++ if (file) {
++ if (file->dirname)
++ l += strlen(file->dirname);
++ if (file->basename)
++ l += strlen(file->basename);
++ }
++
++ if (l >= sizeof(fname)) {
++ rprintf(FERROR, "Overlong pathname\n");
++ exit_cleanup(RERR_FILESELECT);
++ }
++
+ fname2 = f_name(file, fname + offset);
+
+ if (verbose > 2)
diff --git a/rp/rcynic/static-rsync/rsync-2.6.9.tar.gz b/rp/rcynic/static-rsync/rsync-2.6.9.tar.gz
new file mode 100644
index 00000000..6377f639
--- /dev/null
+++ b/rp/rcynic/static-rsync/rsync-2.6.9.tar.gz
Binary files differ
diff --git a/rp/rcynic/validation_status b/rp/rcynic/validation_status
new file mode 100755
index 00000000..1f7a704d
--- /dev/null
+++ b/rp/rcynic/validation_status
@@ -0,0 +1,34 @@
+# $Id$
+#
+# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Flat text listing of <validation_status/> elements from rcynic.xml.
+"""
+
+import sys
+
+try:
+ from lxml.etree import ElementTree
+except ImportError:
+ from xml.etree.ElementTree import ElementTree
+
+for filename in ([sys.stdin] if len(sys.argv) < 2 else sys.argv[1:]):
+ for elt in ElementTree(file = filename).findall("validation_status"):
+ print "%s %8s %-40s %s" % (
+ elt.get("timestamp"),
+ elt.get("generation"),
+ elt.get("status"),
+ elt.text.strip())
diff --git a/rp/rtr-origin/Makefile.in b/rp/rtr-origin/Makefile.in
new file mode 100644
index 00000000..daa18009
--- /dev/null
+++ b/rp/rtr-origin/Makefile.in
@@ -0,0 +1,63 @@
+# $Id$
+
+BASE = rtr-origin
+SRC = ${BASE}.py
+BIN = ${BASE}
+
+INSTALL = @INSTALL@
+PYTHON = @PYTHON@
+AWK = @AWK@
+
+prefix = @prefix@
+exec_prefix = @exec_prefix@
+datarootdir = @datarootdir@
+datadir = @datadir@
+localstatedir = @localstatedir@
+sharedstatedir = @sharedstatedir@
+sysconfdir = @sysconfdir@
+bindir = @bindir@
+sbindir = @sbindir@
+libexecdir = @libexecdir@
+libdir = @libdir@
+
+abs_top_srcdir = @abs_top_srcdir@
+abs_top_builddir = @abs_top_builddir@
+
+RTR_ORIGIN_INSTALL_TARGETS = @RCYNIC_INSTALL_TARGETS@
+
+RPKI_RTR_PORT = 43779
+
+SCAN_ROAS = ${bindir}/scan_roas
+
+
+all: ${BIN}
+
+clean:
+ rm -f ${BIN}
+
+install: all ${RTR_ORIGIN_INSTALL_TARGETS}
+
+install-binary:
+ if test -d ${DESTDIR}${bindir} ; then :; else ${INSTALL} -m 555 -d ${DESTDIR}${bindir}; fi
+ ${INSTALL} -m 555 ${BIN} ${DESTDIR}${bindir}/${BIN}
+
+deinstall uninstall:
+ rm -f ${DESTDIR}${bindir}/${BIN}
+
+distclean: clean
+ rm -rf current sockets *.ax *.ix.*
+ rm -f Makefile
+
+${BIN} : ${SRC}
+ AC_PYTHON_INTERPRETER='${PYTHON}' AC_SCAN_ROAS='${SCAN_ROAS}' \
+ ${PYTHON} ${abs_top_srcdir}/buildtools/make-rcynic-script.py <${SRC} >$@
+ chmod a+x $@
+
+test:
+ @true
+
+.FORCE:
+
+# Platform-specific rules below here.
+
+@RTR_ORIGIN_MAKE_RULES@
diff --git a/rp/rtr-origin/README b/rp/rtr-origin/README
new file mode 100644
index 00000000..dae53010
--- /dev/null
+++ b/rp/rtr-origin/README
@@ -0,0 +1,11 @@
+$Id$
+
+Sample implementation of draft-ymbk-rpki-rtr-protocol.
+
+See:
+
+- The primary documentation at http://trac.rpki.net/
+
+- The PDF manual in ../doc/manual.pdf, or
+
+- The flat text page ../doc/doc.RPKI.RP.rpki-rtr
diff --git a/rp/rtr-origin/rtr-origin b/rp/rtr-origin/rtr-origin
new file mode 100755
index 00000000..f37d2ce0
--- /dev/null
+++ b/rp/rtr-origin/rtr-origin
@@ -0,0 +1,2278 @@
+#!/usr/bin/env python
+
+# Router origin-authentication rpki-router protocol implementation. See
+# draft-ietf-sidr-rpki-rtr in fine Internet-Draft repositories near you.
+#
+# Run the program with the --help argument for usage information, or see
+# documentation for the *_main() functions.
+#
+#
+# $Id$
+#
+# Copyright (C) 2009-2013 Internet Systems Consortium ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+import sys
+import os
+import struct
+import time
+import glob
+import socket
+import fcntl
+import signal
+import syslog
+import errno
+import asyncore
+import asynchat
+import subprocess
+import traceback
+import getopt
+import bisect
+import random
+import base64
+
+
+# Debugging only, should be False in production
+disable_incrementals = False
+
+# Whether to log backtraces
+backtrace_on_exceptions = False
+
+class IgnoreThisRecord(Exception):
+ pass
+
+
+class timestamp(int):
+ """
+ Wrapper around time module.
+ """
+
+ def __new__(cls, x):
+ return int.__new__(cls, x)
+
+ @classmethod
+ def now(cls, delta = 0):
+ return cls(time.time() + delta)
+
+ def __str__(self):
+ return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(self))
+
+
+class ipaddr(object):
+ """
+ IP addresses.
+ """
+
+ def __init__(self, string = None, value = None):
+ assert (string is None) != (value is None)
+ if string is not None:
+ value = socket.inet_pton(self.af, string)
+ assert len(value) == self.size
+ self.value = value
+
+ def __str__(self):
+ return socket.inet_ntop(self.af, self.value)
+
+ def __cmp__(self, other):
+ return cmp(self.value, other.value)
+
+class v4addr(ipaddr):
+ af = socket.AF_INET
+ size = 4
+
+class v6addr(ipaddr):
+ af = socket.AF_INET6
+ size = 16
+
+def read_current():
+ """
+ Read current serial number and nonce. Return None for both if
+ serial and nonce not recorded. For backwards compatibility, treat
+ file containing just a serial number as having a nonce of zero.
+ """
+ try:
+ f = open("current", "r")
+ values = tuple(int(s) for s in f.read().split())
+ f.close()
+ return values[0], values[1]
+ except IndexError:
+ return values[0], 0
+ except IOError:
+ return None, None
+
+def write_current(serial, nonce):
+ """
+ Write serial number and nonce.
+ """
+ tmpfn = "current.%d.tmp" % os.getpid()
+ try:
+ f = open(tmpfn, "w")
+ f.write("%d %d\n" % (serial, nonce))
+ f.close()
+ os.rename(tmpfn, "current")
+ finally:
+ try:
+ os.unlink(tmpfn)
+ except:
+ pass
+
+
+def new_nonce():
+ """
+ Create and return a new nonce value.
+ """
+ if force_zero_nonce:
+ return 0
+ try:
+ return int(random.SystemRandom().getrandbits(16))
+ except NotImplementedError:
+ return int(random.getrandbits(16))
+
+
+class read_buffer(object):
+ """
+ Wrapper around synchronous/asynchronous read state.
+ """
+
+ def __init__(self):
+ self.buffer = ""
+
+ def update(self, need, callback):
+ """
+ Update count of needed bytes and callback, then dispatch to callback.
+ """
+ self.need = need
+ self.callback = callback
+ return self.callback(self)
+
+ def available(self):
+ """
+ How much data do we have available in this buffer?
+ """
+ return len(self.buffer)
+
+ def needed(self):
+ """
+ How much more data does this buffer need to become ready?
+ """
+ return self.need - self.available()
+
+ def ready(self):
+ """
+ Is this buffer ready to read yet?
+ """
+ return self.available() >= self.need
+
+ def get(self, n):
+ """
+ Hand some data to the caller.
+ """
+ b = self.buffer[:n]
+ self.buffer = self.buffer[n:]
+ return b
+
+ def put(self, b):
+ """
+ Accumulate some data.
+ """
+ self.buffer += b
+
+ def retry(self):
+ """
+ Try dispatching to the callback again.
+ """
+ return self.callback(self)
+
+class PDUException(Exception):
+ """
+ Parent exception type for exceptions that signal particular protocol
+ errors. String value of exception instance will be the message to
+ put in the error_report PDU, error_report_code value of exception
+ will be the numeric code to use.
+ """
+
+ def __init__(self, msg = None, pdu = None):
+ assert msg is None or isinstance(msg, (str, unicode))
+ self.error_report_msg = msg
+ self.error_report_pdu = pdu
+
+ def __str__(self):
+ return self.error_report_msg or self.__class__.__name__
+
+ def make_error_report(self):
+ return error_report(errno = self.error_report_code,
+ errmsg = self.error_report_msg,
+ errpdu = self.error_report_pdu)
+
+class UnsupportedProtocolVersion(PDUException):
+ error_report_code = 4
+
+class UnsupportedPDUType(PDUException):
+ error_report_code = 5
+
+class CorruptData(PDUException):
+ error_report_code = 0
+
+class pdu(object):
+ """
+ Object representing a generic PDU in the rpki-router protocol.
+ Real PDUs are subclasses of this class.
+ """
+
+ version = 0 # Protocol version
+
+ _pdu = None # Cached when first generated
+
+ header_struct = struct.Struct("!BBHL")
+
+ def __cmp__(self, other):
+ return cmp(self.to_pdu(), other.to_pdu())
+
+ def check(self):
+ """
+ Check attributes to make sure they're within range.
+ """
+ pass
+
+ @classmethod
+ def read_pdu(cls, reader):
+ return reader.update(need = cls.header_struct.size, callback = cls.got_header)
+
+ @classmethod
+ def got_header(cls, reader):
+ if not reader.ready():
+ return None
+ assert reader.available() >= cls.header_struct.size
+ version, pdu_type, whatever, length = cls.header_struct.unpack(reader.buffer[:cls.header_struct.size])
+ if version != cls.version:
+ raise UnsupportedProtocolVersion(
+ "Received PDU version %d, expected %d" % (version, cls.version))
+ if pdu_type not in cls.pdu_map:
+ raise UnsupportedPDUType(
+ "Received unsupported PDU type %d" % pdu_type)
+ if length < 8:
+ raise CorruptData(
+ "Received PDU with length %d, which is too short to be valid" % length)
+ self = cls.pdu_map[pdu_type]()
+ return reader.update(need = length, callback = self.got_pdu)
+
+ def consume(self, client):
+ """
+ Handle results in test client. Default behavior is just to print
+ out the PDU.
+ """
+ blather(self)
+
+ def send_file(self, server, filename):
+ """
+ Send a content of a file as a cache response. Caller should catch IOError.
+ """
+ f = open(filename, "rb")
+ server.push_pdu(cache_response(nonce = server.current_nonce))
+ server.push_file(f)
+ server.push_pdu(end_of_data(serial = server.current_serial, nonce = server.current_nonce))
+
+ def send_nodata(self, server):
+ """
+ Send a nodata error.
+ """
+ server.push_pdu(error_report(errno = error_report.codes["No Data Available"], errpdu = self))
+
+class pdu_with_serial(pdu):
+ """
+ Base class for PDUs consisting of just a serial number and nonce.
+ """
+
+ header_struct = struct.Struct("!BBHLL")
+
+ def __init__(self, serial = None, nonce = None):
+ if serial is not None:
+ assert isinstance(serial, int)
+ self.serial = serial
+ if nonce is not None:
+ assert isinstance(nonce, int)
+ self.nonce = nonce
+
+ def __str__(self):
+ return "[%s, serial #%d nonce %d]" % (self.__class__.__name__, self.serial, self.nonce)
+
+ def to_pdu(self):
+ """
+ Generate the wire format PDU.
+ """
+ if self._pdu is None:
+ self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.nonce,
+ self.header_struct.size, self.serial)
+ return self._pdu
+
+ def got_pdu(self, reader):
+ if not reader.ready():
+ return None
+ b = reader.get(self.header_struct.size)
+ version, pdu_type, self.nonce, length, self.serial = self.header_struct.unpack(b)
+ if length != 12:
+ raise CorruptData("PDU length of %d can't be right" % length, pdu = self)
+ assert b == self.to_pdu()
+ return self
+
+class pdu_nonce(pdu):
+ """
+ Base class for PDUs consisting of just a nonce.
+ """
+
+ header_struct = struct.Struct("!BBHL")
+
+ def __init__(self, nonce = None):
+ if nonce is not None:
+ assert isinstance(nonce, int)
+ self.nonce = nonce
+
+ def __str__(self):
+ return "[%s, nonce %d]" % (self.__class__.__name__, self.nonce)
+
+ def to_pdu(self):
+ """
+ Generate the wire format PDU.
+ """
+ if self._pdu is None:
+ self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.nonce, self.header_struct.size)
+ return self._pdu
+
+ def got_pdu(self, reader):
+ if not reader.ready():
+ return None
+ b = reader.get(self.header_struct.size)
+ version, pdu_type, self.nonce, length = self.header_struct.unpack(b)
+ if length != 8:
+ raise CorruptData("PDU length of %d can't be right" % length, pdu = self)
+ assert b == self.to_pdu()
+ return self
+
+class pdu_empty(pdu):
+ """
+ Base class for empty PDUs.
+ """
+
+ header_struct = struct.Struct("!BBHL")
+
+ def __str__(self):
+ return "[%s]" % self.__class__.__name__
+
+ def to_pdu(self):
+ """
+ Generate the wire format PDU for this prefix.
+ """
+ if self._pdu is None:
+ self._pdu = self.header_struct.pack(self.version, self.pdu_type, 0, self.header_struct.size)
+ return self._pdu
+
+ def got_pdu(self, reader):
+ if not reader.ready():
+ return None
+ b = reader.get(self.header_struct.size)
+ version, pdu_type, zero, length = self.header_struct.unpack(b)
+ if zero != 0:
+ raise CorruptData("Must-be-zero field isn't zero" % length, pdu = self)
+ if length != 8:
+ raise CorruptData("PDU length of %d can't be right" % length, pdu = self)
+ assert b == self.to_pdu()
+ return self
+
+class serial_notify(pdu_with_serial):
+ """
+ Serial Notify PDU.
+ """
+
+ pdu_type = 0
+
+ def consume(self, client):
+ """
+ Respond to a serial_notify message with either a serial_query or
+ reset_query, depending on what we already know.
+ """
+ blather(self)
+ if client.current_serial is None or client.current_nonce != self.nonce:
+ client.push_pdu(reset_query())
+ elif self.serial != client.current_serial:
+ client.push_pdu(serial_query(serial = client.current_serial, nonce = client.current_nonce))
+ else:
+ blather("[Notify did not change serial number, ignoring]")
+
+class serial_query(pdu_with_serial):
+ """
+ Serial Query PDU.
+ """
+
+ pdu_type = 1
+
+ def serve(self, server):
+ """
+ Received a serial query, send incremental transfer in response.
+ If client is already up to date, just send an empty incremental
+ transfer.
+ """
+ blather(self)
+ if server.get_serial() is None:
+ self.send_nodata(server)
+ elif server.current_nonce != self.nonce:
+ log("[Client requested wrong nonce, resetting client]")
+ server.push_pdu(cache_reset())
+ elif server.current_serial == self.serial:
+ blather("[Client is already current, sending empty IXFR]")
+ server.push_pdu(cache_response(nonce = server.current_nonce))
+ server.push_pdu(end_of_data(serial = server.current_serial, nonce = server.current_nonce))
+ elif disable_incrementals:
+ server.push_pdu(cache_reset())
+ else:
+ try:
+ self.send_file(server, "%d.ix.%d" % (server.current_serial, self.serial))
+ except IOError:
+ server.push_pdu(cache_reset())
+
+class reset_query(pdu_empty):
+ """
+ Reset Query PDU.
+ """
+
+ pdu_type = 2
+
+ def serve(self, server):
+ """
+ Received a reset query, send full current state in response.
+ """
+ blather(self)
+ if server.get_serial() is None:
+ self.send_nodata(server)
+ else:
+ try:
+ fn = "%d.ax" % server.current_serial
+ self.send_file(server, fn)
+ except IOError:
+ server.push_pdu(error_report(errno = error_report.codes["Internal Error"],
+ errpdu = self, errmsg = "Couldn't open %s" % fn))
+
+class cache_response(pdu_nonce):
+ """
+ Cache Response PDU.
+ """
+
+ pdu_type = 3
+
+ def consume(self, client):
+ """
+ Handle cache_response.
+ """
+ blather(self)
+ if self.nonce != client.current_nonce:
+ blather("[Nonce changed, resetting]")
+ client.cache_reset()
+
+class end_of_data(pdu_with_serial):
+ """
+ End of Data PDU.
+ """
+
+ pdu_type = 7
+
+ def consume(self, client):
+ """
+ Handle end_of_data response.
+ """
+ blather(self)
+ client.end_of_data(self.serial, self.nonce)
+
+class cache_reset(pdu_empty):
+ """
+ Cache reset PDU.
+ """
+
+ pdu_type = 8
+
+ def consume(self, client):
+ """
+ Handle cache_reset response, by issuing a reset_query.
+ """
+ blather(self)
+ client.cache_reset()
+ client.push_pdu(reset_query())
+
+class prefix(pdu):
+ """
+ Object representing one prefix. This corresponds closely to one PDU
+ in the rpki-router protocol, so closely that we use lexical ordering
+ of the wire format of the PDU as the ordering for this class.
+
+ This is a virtual class, but the .from_text() constructor
+ instantiates the correct concrete subclass (ipv4_prefix or
+ ipv6_prefix) depending on the syntax of its input text.
+ """
+
+ header_struct = struct.Struct("!BB2xLBBBx")
+ asnum_struct = struct.Struct("!L")
+
+ @staticmethod
+ def from_text(asnum, addr):
+ """
+ Construct a prefix from its text form.
+ """
+ cls = ipv6_prefix if ":" in addr else ipv4_prefix
+ self = cls()
+ self.asn = long(asnum)
+ p, l = addr.split("/")
+ self.prefix = self.addr_type(string = p)
+ if "-" in l:
+ self.prefixlen, self.max_prefixlen = tuple(int(i) for i in l.split("-"))
+ else:
+ self.prefixlen = self.max_prefixlen = int(l)
+ self.announce = 1
+ self.check()
+ return self
+
+ def __str__(self):
+ plm = "%s/%s-%s" % (self.prefix, self.prefixlen, self.max_prefixlen)
+ return "%s %8s %-32s %s" % ("+" if self.announce else "-", self.asn, plm,
+ ":".join(("%02X" % ord(b) for b in self.to_pdu())))
+
+ def show(self):
+ blather("# Class: %s" % self.__class__.__name__)
+ blather("# ASN: %s" % self.asn)
+ blather("# Prefix: %s" % self.prefix)
+ blather("# Prefixlen: %s" % self.prefixlen)
+ blather("# MaxPrefixlen: %s" % self.max_prefixlen)
+ blather("# Announce: %s" % self.announce)
+
+ def consume(self, client):
+ """
+ Handle one incoming prefix PDU
+ """
+ blather(self)
+ client.consume_prefix(self)
+
+ def check(self):
+ """
+ Check attributes to make sure they're within range.
+ """
+ if self.announce not in (0, 1):
+ raise CorruptData("Announce value %d is neither zero nor one" % self.announce, pdu = self)
+ if self.prefixlen < 0 or self.prefixlen > self.addr_type.size * 8:
+ raise CorruptData("Implausible prefix length %d" % self.prefixlen, pdu = self)
+ if self.max_prefixlen < self.prefixlen or self.max_prefixlen > self.addr_type.size * 8:
+ raise CorruptData("Implausible max prefix length %d" % self.max_prefixlen, pdu = self)
+ pdulen = self.header_struct.size + self.addr_type.size + self.asnum_struct.size
+ if len(self.to_pdu()) != pdulen:
+ raise CorruptData("Expected %d byte PDU, got %d" % (pdulen, len(self.to_pdu())), pdu = self)
+
+ def to_pdu(self, announce = None):
+ """
+ Generate the wire format PDU for this prefix.
+ """
+ if announce is not None:
+ assert announce in (0, 1)
+ elif self._pdu is not None:
+ return self._pdu
+ pdulen = self.header_struct.size + self.addr_type.size + self.asnum_struct.size
+ pdu = (self.header_struct.pack(self.version, self.pdu_type, pdulen,
+ announce if announce is not None else self.announce,
+ self.prefixlen, self.max_prefixlen) +
+ self.prefix.value +
+ self.asnum_struct.pack(self.asn))
+ if announce is None:
+ assert self._pdu is None
+ self._pdu = pdu
+ return pdu
+
+ def got_pdu(self, reader):
+ if not reader.ready():
+ return None
+ b1 = reader.get(self.header_struct.size)
+ b2 = reader.get(self.addr_type.size)
+ b3 = reader.get(self.asnum_struct.size)
+ version, pdu_type, length, self.announce, self.prefixlen, self.max_prefixlen = self.header_struct.unpack(b1)
+ if length != len(b1) + len(b2) + len(b3):
+ raise CorruptData("Got PDU length %d, expected %d" % (length, len(b1) + len(b2) + len(b3)), pdu = self)
+ self.prefix = self.addr_type(value = b2)
+ self.asn = self.asnum_struct.unpack(b3)[0]
+ assert b1 + b2 + b3 == self.to_pdu()
+ return self
+
+ @staticmethod
+ def from_bgpdump(line, rib_dump):
+ try:
+ assert isinstance(rib_dump, bool)
+ fields = line.split("|")
+
+ # Parse prefix, including figuring out IP protocol version
+ cls = ipv6_prefix if ":" in fields[5] else ipv4_prefix
+ self = cls()
+ self.timestamp = timestamp(fields[1])
+ p, l = fields[5].split("/")
+ self.prefix = self.addr_type(p)
+ self.prefixlen = self.max_prefixlen = int(l)
+
+ # Withdrawals don't have AS paths, so be careful
+ assert fields[2] == "B" if rib_dump else fields[2] in ("A", "W")
+ if fields[2] == "W":
+ self.asn = 0
+ self.announce = 0
+ else:
+ self.announce = 1
+ if not fields[6] or "{" in fields[6] or "(" in fields[6]:
+ raise IgnoreThisRecord
+ a = fields[6].split()[-1]
+ if "." in a:
+ a = [int(s) for s in a.split(".")]
+ if len(a) != 2 or a[0] < 0 or a[0] > 65535 or a[1] < 0 or a[1] > 65535:
+ log("Bad dotted ASNum %r, ignoring record" % fields[6])
+ raise IgnoreThisRecord
+ a = (a[0] << 16) | a[1]
+ else:
+ a = int(a)
+ self.asn = a
+
+ self.check()
+ return self
+
+ except IgnoreThisRecord:
+ raise
+
+ except Exception, e:
+ log("Ignoring line %r: %s" % (line, e))
+ raise IgnoreThisRecord
+
+class ipv4_prefix(prefix):
+ """
+ IPv4 flavor of a prefix.
+ """
+ pdu_type = 4
+ addr_type = v4addr
+
+class ipv6_prefix(prefix):
+ """
+ IPv6 flavor of a prefix.
+ """
+ pdu_type = 6
+ addr_type = v6addr
+
+class router_key(pdu):
+ """
+ Router Key PDU.
+ """
+
+ pdu_type = 9
+
+ header_struct = struct.Struct("!BBBxL20sL")
+
+ @classmethod
+ def from_text(cls, asnum, gski, key):
+ """
+ Construct a router key from its text form.
+ """
+
+ self = cls()
+ self.asn = long(asnum)
+ self.ski = base64.urlsafe_b64decode(gski + "=")
+ self.key = base64.b64decode(key)
+ self.announce = 1
+ self.check()
+ return self
+
+ def __str__(self):
+ return "%s %8s %-32s %s" % ("+" if self.announce else "-", self.asn,
+ base64.urlsafe_b64encode(self.ski).rstrip("="),
+ ":".join(("%02X" % ord(b) for b in self.to_pdu())))
+
+ def consume(self, client):
+ """
+ Handle one incoming Router Key PDU
+ """
+
+ blather(self)
+ client.consume_routerkey(self)
+
+ def check(self):
+ """
+ Check attributes to make sure they're within range.
+ """
+
+ if self.announce not in (0, 1):
+ raise CorruptData("Announce value %d is neither zero nor one" % self.announce, pdu = self)
+ if len(self.ski) != 20:
+ raise CorruptData("Implausible SKI length %d" % len(self.ski), pdu = self)
+ pdulen = self.header_struct.size + len(self.key)
+ if len(self.to_pdu()) != pdulen:
+ raise CorruptData("Expected %d byte PDU, got %d" % (pdulen, len(self.to_pdu())), pdu = self)
+
+ def to_pdu(self, announce = None):
+ if announce is not None:
+ assert announce in (0, 1)
+ elif self._pdu is not None:
+ return self._pdu
+ pdulen = self.header_struct.size + len(self.key)
+ pdu = (self.header_struct.pack(self.version,
+ self.pdu_type,
+ announce if announce is not None else self.announce,
+ pdulen,
+ self.ski,
+ self.asn)
+ + self.key)
+ if announce is None:
+ assert self._pdu is None
+ self._pdu = pdu
+ return pdu
+
+ def got_pdu(self, reader):
+ if not reader.ready():
+ return None
+ header = reader.get(self.header_struct.size)
+ version, pdu_type, self.announce, length, self.ski, self.asn = self.header_struct.unpack(header)
+ remaining = length - self.header_struct.size
+ if remaining <= 0:
+ raise CorruptData("Got PDU length %d, minimum is %d" % (length, self.header_struct.size + 1), pdu = self)
+ self.key = reader.get(remaining)
+ assert header + self.key == self.to_pdu()
+ return self
+
+
+class error_report(pdu):
+ """
+ Error Report PDU.
+ """
+
+ pdu_type = 10
+
+ header_struct = struct.Struct("!BBHL")
+ string_struct = struct.Struct("!L")
+
+ errors = {
+ 2 : "No Data Available" }
+
+ fatal = {
+ 0 : "Corrupt Data",
+ 1 : "Internal Error",
+ 3 : "Invalid Request",
+ 4 : "Unsupported Protocol Version",
+ 5 : "Unsupported PDU Type",
+ 6 : "Withdrawal of Unknown Record",
+ 7 : "Duplicate Announcement Received" }
+
+ assert set(errors) & set(fatal) == set()
+
+ errors.update(fatal)
+
+ codes = dict((v, k) for k, v in errors.items())
+
+ def __init__(self, errno = None, errpdu = None, errmsg = None):
+ assert errno is None or errno in self.errors
+ self.errno = errno
+ self.errpdu = errpdu
+ self.errmsg = errmsg if errmsg is not None or errno is None else self.errors[errno]
+
+ def __str__(self):
+ return "[%s, error #%s: %r]" % (self.__class__.__name__, self.errno, self.errmsg)
+
+ def to_counted_string(self, s):
+ return self.string_struct.pack(len(s)) + s
+
+ def read_counted_string(self, reader, remaining):
+ assert remaining >= self.string_struct.size
+ n = self.string_struct.unpack(reader.get(self.string_struct.size))[0]
+ assert remaining >= self.string_struct.size + n
+ return n, reader.get(n), (remaining - self.string_struct.size - n)
+
+ def to_pdu(self):
+ """
+ Generate the wire format PDU for this error report.
+ """
+ if self._pdu is None:
+ assert isinstance(self.errno, int)
+ assert not isinstance(self.errpdu, error_report)
+ p = self.errpdu
+ if p is None:
+ p = ""
+ elif isinstance(p, pdu):
+ p = p.to_pdu()
+ assert isinstance(p, str)
+ pdulen = self.header_struct.size + self.string_struct.size * 2 + len(p) + len(self.errmsg)
+ self._pdu = self.header_struct.pack(self.version, self.pdu_type, self.errno, pdulen)
+ self._pdu += self.to_counted_string(p)
+ self._pdu += self.to_counted_string(self.errmsg.encode("utf8"))
+ return self._pdu
+
+ def got_pdu(self, reader):
+ if not reader.ready():
+ return None
+ header = reader.get(self.header_struct.size)
+ version, pdu_type, self.errno, length = self.header_struct.unpack(header)
+ remaining = length - self.header_struct.size
+ self.pdulen, self.errpdu, remaining = self.read_counted_string(reader, remaining)
+ self.errlen, self.errmsg, remaining = self.read_counted_string(reader, remaining)
+ if length != self.header_struct.size + self.string_struct.size * 2 + self.pdulen + self.errlen:
+ raise CorruptData("Got PDU length %d, expected %d" % (
+ length, self.header_struct.size + self.string_struct.size * 2 + self.pdulen + self.errlen))
+ assert (header
+ + self.to_counted_string(self.errpdu)
+ + self.to_counted_string(self.errmsg.encode("utf8"))
+ == self.to_pdu())
+ return self
+
+ def serve(self, server):
+ """
+ Received an error_report from client. Not much we can do beyond
+ logging it, then killing the connection if error was fatal.
+ """
+ log(self)
+ if self.errno in self.fatal:
+ log("[Shutting down due to reported fatal protocol error]")
+ sys.exit(1)
+
+pdu.pdu_map = dict((p.pdu_type, p) for p in (ipv4_prefix, ipv6_prefix, serial_notify, serial_query, reset_query,
+ cache_response, end_of_data, cache_reset, router_key, error_report))
+
+class pdu_set(list):
+ """
+ Object representing a set of PDUs, that is, one versioned and
+ (theoretically) consistant set of prefixes and router keys extracted
+ from rcynic's output.
+ """
+
+ @classmethod
+ def _load_file(cls, filename):
+ """
+ Low-level method to read pdu_set from a file.
+ """
+ self = cls()
+ f = open(filename, "rb")
+ r = read_buffer()
+ while True:
+ p = pdu.read_pdu(r)
+ while p is None:
+ b = f.read(r.needed())
+ if b == "":
+ assert r.available() == 0
+ return self
+ r.put(b)
+ p = r.retry()
+ self.append(p)
+
+ @staticmethod
+ def seq_ge(a, b):
+ return ((a - b) % (1 << 32)) < (1 << 31)
+
+
+class axfr_set(pdu_set):
+ """
+ Object representing a complete set of PDUs, that is, one versioned
+ and (theoretically) consistant set of prefixes and router
+ certificates extracted from rcynic's output, all with the announce
+ field set.
+ """
+
+ @classmethod
+ def parse_rcynic(cls, rcynic_dir):
+ """
+ Parse ROAS and router certificates fetched (and validated!) by
+ rcynic to create a new axfr_set. We use the scan_roas and
+ scan_routercerts utilities to parse the ASN.1, although we may go
+ back to parsing the files directly using the rpki.POW library code
+ some day.
+ """
+
+ self = cls()
+ self.serial = timestamp.now()
+
+ try:
+ p = subprocess.Popen((scan_roas, rcynic_dir), stdout = subprocess.PIPE)
+ for line in p.stdout:
+ line = line.split()
+ asn = line[1]
+ self.extend(prefix.from_text(asn, addr) for addr in line[2:])
+ except OSError, e:
+ sys.exit("Could not run %s, check your $PATH variable? (%s)" % (scan_roas, e))
+
+ try:
+ p = subprocess.Popen((scan_routercerts, rcynic_dir), stdout = subprocess.PIPE)
+ for line in p.stdout:
+ line = line.split()
+ gski = line[0]
+ key = line[-1]
+ self.extend(router_key.from_text(asn, gski, key) for asn in line[1:-1])
+ except OSError, e:
+ sys.exit("Could not run %s, check your $PATH variable? (%s)" % (scan_routercerts, e))
+
+ self.sort()
+ for i in xrange(len(self) - 2, -1, -1):
+ if self[i] == self[i + 1]:
+ del self[i + 1]
+ return self
+
+ @classmethod
+ def load(cls, filename):
+ """
+ Load an axfr_set from a file, parse filename to obtain serial.
+ """
+ fn1, fn2 = os.path.basename(filename).split(".")
+ assert fn1.isdigit() and fn2 == "ax"
+ self = cls._load_file(filename)
+ self.serial = timestamp(fn1)
+ return self
+
+ def filename(self):
+ """
+ Generate filename for this axfr_set.
+ """
+ return "%d.ax" % self.serial
+
+ @classmethod
+ def load_current(cls):
+ """
+ Load current axfr_set. Return None if can't.
+ """
+ serial = read_current()[0]
+ if serial is None:
+ return None
+ try:
+ return cls.load("%d.ax" % serial)
+ except IOError:
+ return None
+
+ def save_axfr(self):
+ """
+ Write axfr__set to file with magic filename.
+ """
+ f = open(self.filename(), "wb")
+ for p in self:
+ f.write(p.to_pdu())
+ f.close()
+
+ def destroy_old_data(self):
+ """
+ Destroy old data files, presumably because our nonce changed and
+ the old serial numbers are no longer valid.
+ """
+ for i in glob.iglob("*.ix.*"):
+ os.unlink(i)
+ for i in glob.iglob("*.ax"):
+ if i != self.filename():
+ os.unlink(i)
+
+ def mark_current(self):
+ """
+ Save current serial number and nonce, creating new nonce if
+ necessary. Creating a new nonce triggers cleanup of old state, as
+ the new nonce invalidates all old serial numbers.
+ """
+ old_serial, nonce = read_current()
+ if old_serial is None or self.seq_ge(old_serial, self.serial):
+ blather("Creating new nonce and deleting stale data")
+ nonce = new_nonce()
+ self.destroy_old_data()
+ write_current(self.serial, nonce)
+
+ def save_ixfr(self, other):
+ """
+ Comparing this axfr_set with an older one and write the resulting
+ ixfr_set to file with magic filename. Since we store pdu_sets
+ in sorted order, computing the difference is a trivial linear
+ comparison.
+ """
+ f = open("%d.ix.%d" % (self.serial, other.serial), "wb")
+ old = other
+ new = self
+ len_old = len(old)
+ len_new = len(new)
+ i_old = i_new = 0
+ while i_old < len_old and i_new < len_new:
+ if old[i_old] < new[i_new]:
+ f.write(old[i_old].to_pdu(announce = 0))
+ i_old += 1
+ elif old[i_old] > new[i_new]:
+ f.write(new[i_new].to_pdu(announce = 1))
+ i_new += 1
+ else:
+ i_old += 1
+ i_new += 1
+ for i in xrange(i_old, len_old):
+ f.write(old[i].to_pdu(announce = 0))
+ for i in xrange(i_new, len_new):
+ f.write(new[i].to_pdu(announce = 1))
+ f.close()
+
+ def show(self):
+ """
+ Print this axfr_set.
+ """
+ blather("# AXFR %d (%s)" % (self.serial, self.serial))
+ for p in self:
+ blather(p)
+
+ @staticmethod
+ def read_bgpdump(filename):
+ assert filename.endswith(".bz2")
+ blather("Reading %s" % filename)
+ bunzip2 = subprocess.Popen(("bzip2", "-c", "-d", filename), stdout = subprocess.PIPE)
+ bgpdump = subprocess.Popen(("bgpdump", "-m", "-"), stdin = bunzip2.stdout, stdout = subprocess.PIPE)
+ return bgpdump.stdout
+
+ @classmethod
+ def parse_bgpdump_rib_dump(cls, filename):
+ assert os.path.basename(filename).startswith("ribs.")
+ self = cls()
+ self.serial = None
+ for line in cls.read_bgpdump(filename):
+ try:
+ pfx = prefix.from_bgpdump(line, rib_dump = True)
+ except IgnoreThisRecord:
+ continue
+ self.append(pfx)
+ self.serial = pfx.timestamp
+ if self.serial is None:
+ sys.exit("Failed to parse anything useful from %s" % filename)
+ self.sort()
+ for i in xrange(len(self) - 2, -1, -1):
+ if self[i] == self[i + 1]:
+ del self[i + 1]
+ return self
+
+ def parse_bgpdump_update(self, filename):
+ assert os.path.basename(filename).startswith("updates.")
+ for line in self.read_bgpdump(filename):
+ try:
+ pfx = prefix.from_bgpdump(line, rib_dump = False)
+ except IgnoreThisRecord:
+ continue
+ announce = pfx.announce
+ pfx.announce = 1
+ i = bisect.bisect_left(self, pfx)
+ if announce:
+ if i >= len(self) or pfx != self[i]:
+ self.insert(i, pfx)
+ else:
+ while i < len(self) and pfx.prefix == self[i].prefix and pfx.prefixlen == self[i].prefixlen:
+ del self[i]
+ self.serial = pfx.timestamp
+
+class ixfr_set(pdu_set):
+ """
+ Object representing an incremental set of PDUs, that is, the
+ differences between one versioned and (theoretically) consistant set
+ of prefixes and router certificates extracted from rcynic's output
+ and another, with the announce fields set or cleared as necessary to
+ indicate the changes.
+ """
+
+ @classmethod
+ def load(cls, filename):
+ """
+ Load an ixfr_set from a file, parse filename to obtain serials.
+ """
+ fn1, fn2, fn3 = os.path.basename(filename).split(".")
+ assert fn1.isdigit() and fn2 == "ix" and fn3.isdigit()
+ self = cls._load_file(filename)
+ self.from_serial = timestamp(fn3)
+ self.to_serial = timestamp(fn1)
+ return self
+
+ def filename(self):
+ """
+ Generate filename for this ixfr_set.
+ """
+ return "%d.ix.%d" % (self.to_serial, self.from_serial)
+
+ def show(self):
+ """
+ Print this ixfr_set.
+ """
+ blather("# IXFR %d (%s) -> %d (%s)" % (self.from_serial, self.from_serial,
+ self.to_serial, self.to_serial))
+ for p in self:
+ blather(p)
+
+class file_producer(object):
+ """
+ File-based producer object for asynchat.
+ """
+
+ def __init__(self, handle, buffersize):
+ self.handle = handle
+ self.buffersize = buffersize
+
+ def more(self):
+ return self.handle.read(self.buffersize)
+
+class pdu_channel(asynchat.async_chat):
+ """
+ asynchat subclass that understands our PDUs. This just handles
+ network I/O. Specific engines (client, server) should be subclasses
+ of this with methods that do something useful with the resulting
+ PDUs.
+ """
+
+ def __init__(self, conn = None):
+ asynchat.async_chat.__init__(self, conn)
+ self.reader = read_buffer()
+
+ def start_new_pdu(self):
+ """
+ Start read of a new PDU.
+ """
+ try:
+ p = pdu.read_pdu(self.reader)
+ while p is not None:
+ self.deliver_pdu(p)
+ p = pdu.read_pdu(self.reader)
+ except PDUException, e:
+ self.push_pdu(e.make_error_report())
+ self.close_when_done()
+ else:
+ assert not self.reader.ready()
+ self.set_terminator(self.reader.needed())
+
+ def collect_incoming_data(self, data):
+ """
+ Collect data into the read buffer.
+ """
+ self.reader.put(data)
+
+ def found_terminator(self):
+ """
+ Got requested data, see if we now have a PDU. If so, pass it
+ along, then restart cycle for a new PDU.
+ """
+ p = self.reader.retry()
+ if p is None:
+ self.set_terminator(self.reader.needed())
+ else:
+ self.deliver_pdu(p)
+ self.start_new_pdu()
+
+ def push_pdu(self, pdu):
+ """
+ Write PDU to stream.
+ """
+ try:
+ self.push(pdu.to_pdu())
+ except OSError, e:
+ if e.errno != errno.EAGAIN:
+ raise
+
+ def push_file(self, f):
+ """
+ Write content of a file to stream.
+ """
+ try:
+ self.push_with_producer(file_producer(f, self.ac_out_buffer_size))
+ except OSError, e:
+ if e.errno != errno.EAGAIN:
+ raise
+
+ def log(self, msg):
+ """
+ Intercept asyncore's logging.
+ """
+ log(msg)
+
+ def log_info(self, msg, tag = "info"):
+ """
+ Intercept asynchat's logging.
+ """
+ log("asynchat: %s: %s" % (tag, msg))
+
+ def handle_error(self):
+ """
+ Handle errors caught by asyncore main loop.
+ """
+ c, e = sys.exc_info()[:2]
+ if backtrace_on_exceptions or e == 0:
+ for line in traceback.format_exc().splitlines():
+ log(line)
+ else:
+ log("[Exception: %s: %s]" % (c.__name__, e))
+ log("[Exiting after unhandled exception]")
+ sys.exit(1)
+
+ def init_file_dispatcher(self, fd):
+ """
+ Kludge to plug asyncore.file_dispatcher into asynchat. Call from
+ subclass's __init__() method, after calling
+ pdu_channel.__init__(), and don't read this on a full stomach.
+ """
+ self.connected = True
+ self._fileno = fd
+ self.socket = asyncore.file_wrapper(fd)
+ self.add_channel()
+ flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
+ flags = flags | os.O_NONBLOCK
+ fcntl.fcntl(fd, fcntl.F_SETFL, flags)
+
+ def handle_close(self):
+ """
+ Exit when channel closed.
+ """
+ asynchat.async_chat.handle_close(self)
+ sys.exit(0)
+
+class server_write_channel(pdu_channel):
+ """
+ Kludge to deal with ssh's habit of sometimes (compile time option)
+ invoking us with two unidirectional pipes instead of one
+ bidirectional socketpair. All the server logic is in the
+ server_channel class, this class just deals with sending the
+ server's output to a different file descriptor.
+ """
+
+ def __init__(self):
+ """
+ Set up stdout.
+ """
+ pdu_channel.__init__(self)
+ self.init_file_dispatcher(sys.stdout.fileno())
+
+ def readable(self):
+ """
+ This channel is never readable.
+ """
+ return False
+
+class server_channel(pdu_channel):
+ """
+ Server protocol engine, handles upcalls from pdu_channel to
+ implement protocol logic.
+ """
+
+ def __init__(self):
+ """
+ Set up stdin and stdout as connection and start listening for
+ first PDU.
+ """
+ pdu_channel.__init__(self)
+ self.init_file_dispatcher(sys.stdin.fileno())
+ self.writer = server_write_channel()
+ self.get_serial()
+ self.start_new_pdu()
+
+ def writable(self):
+ """
+ This channel is never writable.
+ """
+ return False
+
+ def push(self, data):
+ """
+ Redirect to writer channel.
+ """
+ return self.writer.push(data)
+
+ def push_with_producer(self, producer):
+ """
+ Redirect to writer channel.
+ """
+ return self.writer.push_with_producer(producer)
+
+ def push_pdu(self, pdu):
+ """
+ Redirect to writer channel.
+ """
+ return self.writer.push_pdu(pdu)
+
+ def push_file(self, f):
+ """
+ Redirect to writer channel.
+ """
+ return self.writer.push_file(f)
+
+ def deliver_pdu(self, pdu):
+ """
+ Handle received PDU.
+ """
+ pdu.serve(self)
+
+ def get_serial(self):
+ """
+ Read, cache, and return current serial number, or None if we can't
+ find the serial number file. The latter condition should never
+ happen, but maybe we got started in server mode while the cronjob
+ mode instance is still building its database.
+ """
+ self.current_serial, self.current_nonce = read_current()
+ return self.current_serial
+
+ def check_serial(self):
+ """
+ Check for a new serial number.
+ """
+ old_serial = self.current_serial
+ return old_serial != self.get_serial()
+
+ def notify(self, data = None):
+ """
+ Cronjob instance kicked us, send a notify message.
+ """
+ if self.check_serial() is not None:
+ self.push_pdu(serial_notify(serial = self.current_serial, nonce = self.current_nonce))
+ else:
+ log("Cronjob kicked me without a valid current serial number")
+
+class client_channel(pdu_channel):
+ """
+ Client protocol engine, handles upcalls from pdu_channel.
+ """
+
+ current_serial = None
+ current_nonce = None
+ sql = None
+ host = None
+ port = None
+ cache_id = None
+
+ def __init__(self, sock, proc, killsig, host, port):
+ self.killsig = killsig
+ self.proc = proc
+ self.host = host
+ self.port = port
+ pdu_channel.__init__(self, conn = sock)
+ self.start_new_pdu()
+
+ @classmethod
+ def ssh(cls, host, port):
+ """
+ Set up ssh connection and start listening for first PDU.
+ """
+ args = ("ssh", "-p", port, "-s", host, "rpki-rtr")
+ blather("[Running ssh: %s]" % " ".join(args))
+ s = socket.socketpair()
+ return cls(sock = s[1],
+ proc = subprocess.Popen(args, executable = "/usr/bin/ssh",
+ stdin = s[0], stdout = s[0], close_fds = True),
+ killsig = signal.SIGKILL,
+ host = host, port = port)
+
+ @classmethod
+ def tcp(cls, host, port):
+ """
+ Set up TCP connection and start listening for first PDU.
+ """
+ blather("[Starting raw TCP connection to %s:%s]" % (host, port))
+ try:
+ addrinfo = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM)
+ except socket.error, e:
+ blather("[socket.getaddrinfo() failed: %s]" % e)
+ else:
+ for ai in addrinfo:
+ af, socktype, proto, cn, sa = ai
+ blather("[Trying addr %s port %s]" % sa[:2])
+ try:
+ s = socket.socket(af, socktype, proto)
+ except socket.error, e:
+ blather("[socket.socket() failed: %s]" % e)
+ continue
+ try:
+ s.connect(sa)
+ except socket.error, e:
+ blather("[socket.connect() failed: %s]" % e)
+ s.close()
+ continue
+ return cls(sock = s, proc = None, killsig = None,
+ host = host, port = port)
+ sys.exit(1)
+
+ @classmethod
+ def loopback(cls, host, port):
+ """
+ Set up loopback connection and start listening for first PDU.
+ """
+ s = socket.socketpair()
+ blather("[Using direct subprocess kludge for testing]")
+ argv = [sys.executable, sys.argv[0], "--server"]
+ if "--syslog" in sys.argv:
+ argv.extend(("--syslog", sys.argv[sys.argv.index("--syslog") + 1]))
+ return cls(sock = s[1],
+ proc = subprocess.Popen(argv, stdin = s[0], stdout = s[0], close_fds = True),
+ killsig = signal.SIGINT,
+ host = host, port = port)
+
+ @classmethod
+ def tls(cls, host, port):
+ """
+ Set up TLS connection and start listening for first PDU.
+
+ NB: This uses OpenSSL's "s_client" command, which does not
+ check server certificates properly, so this is not suitable for
+ production use. Fixing this would be a trivial change, it just
+ requires using a client program which does check certificates
+ properly (eg, gnutls-cli, or stunnel's client mode if that works
+ for such purposes this week).
+ """
+ args = ("openssl", "s_client", "-tls1", "-quiet", "-connect", "%s:%s" % (host, port))
+ blather("[Running: %s]" % " ".join(args))
+ s = socket.socketpair()
+ return cls(sock = s[1],
+ proc = subprocess.Popen(args, stdin = s[0], stdout = s[0], close_fds = True),
+ killsig = signal.SIGKILL,
+ host = host, port = port)
+
+ def setup_sql(self, sqlname):
+ """
+ Set up an SQLite database to contain the table we receive. If
+ necessary, we will create the database.
+ """
+ import sqlite3
+ missing = not os.path.exists(sqlname)
+ self.sql = sqlite3.connect(sqlname, detect_types = sqlite3.PARSE_DECLTYPES)
+ self.sql.text_factory = str
+ cur = self.sql.cursor()
+ cur.execute("PRAGMA foreign_keys = on")
+ if missing:
+ cur.execute('''
+ CREATE TABLE cache (
+ cache_id INTEGER PRIMARY KEY NOT NULL,
+ host TEXT NOT NULL,
+ port TEXT NOT NULL,
+ nonce INTEGER,
+ serial INTEGER,
+ updated INTEGER,
+ UNIQUE (host, port))''')
+ cur.execute('''
+ CREATE TABLE prefix (
+ cache_id INTEGER NOT NULL
+ REFERENCES cache(cache_id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ asn INTEGER NOT NULL,
+ prefix TEXT NOT NULL,
+ prefixlen INTEGER NOT NULL,
+ max_prefixlen INTEGER NOT NULL,
+ UNIQUE (cache_id, asn, prefix, prefixlen, max_prefixlen))''')
+
+ cur.execute('''
+ CREATE TABLE routerkey (
+ cache_id INTEGER NOT NULL
+ REFERENCES cache(cache_id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ asn INTEGER NOT NULL,
+ ski TEXT NOT NULL,
+ key TEXT NOT NULL,
+ UNIQUE (cache_id, asn, ski),
+ UNIQUE (cache_id, asn, key))''')
+
+ cur.execute("SELECT cache_id, nonce, serial FROM cache WHERE host = ? AND port = ?",
+ (self.host, self.port))
+ try:
+ self.cache_id, self.current_nonce, self.current_serial = cur.fetchone()
+ except TypeError:
+ cur.execute("INSERT INTO cache (host, port) VALUES (?, ?)", (self.host, self.port))
+ self.cache_id = cur.lastrowid
+ self.sql.commit()
+
+ def cache_reset(self):
+ """
+ Handle cache_reset actions.
+ """
+ self.current_serial = None
+ if self.sql:
+ cur = self.sql.cursor()
+ cur.execute("DELETE FROM prefix WHERE cache_id = ?", (self.cache_id,))
+ cur.execute("UPDATE cache SET serial = NULL WHERE cache_id = ?", (self.cache_id,))
+
+ def end_of_data(self, serial, nonce):
+ """
+ Handle end_of_data actions.
+ """
+ self.current_serial = serial
+ self.current_nonce = nonce
+ if self.sql:
+ self.sql.execute("UPDATE cache SET serial = ?, nonce = ?, updated = datetime('now') WHERE cache_id = ?",
+ (serial, nonce, self.cache_id))
+ self.sql.commit()
+
+ def consume_prefix(self, prefix):
+ """
+ Handle one prefix PDU.
+ """
+ if self.sql:
+ values = (self.cache_id, prefix.asn, str(prefix.prefix), prefix.prefixlen, prefix.max_prefixlen)
+ if prefix.announce:
+ self.sql.execute("INSERT INTO prefix (cache_id, asn, prefix, prefixlen, max_prefixlen) "
+ "VALUES (?, ?, ?, ?, ?)",
+ values)
+ else:
+ self.sql.execute("DELETE FROM prefix "
+ "WHERE cache_id = ? AND asn = ? AND prefix = ? AND prefixlen = ? AND max_prefixlen = ?",
+ values)
+
+
+ def consume_routerkey(self, routerkey):
+ """
+ Handle one Router Key PDU.
+ """
+
+ if self.sql:
+ values = (self.cache_id, routerkey.asn,
+ base64.urlsafe_b64encode(routerkey.ski).rstrip("="),
+ base64.b64encode(routerkey.key))
+ if routerkey.announce:
+ self.sql.execute("INSERT INTO routerkey (cache_id, asn, ski, key) "
+ "VALUES (?, ?, ?, ?)",
+ values)
+ else:
+ self.sql.execute("DELETE FROM routerkey "
+ "WHERE cache_id = ? AND asn = ? AND (ski = ? OR key = ?)",
+ values)
+
+
+ def deliver_pdu(self, pdu):
+ """
+ Handle received PDU.
+ """
+ pdu.consume(self)
+
+ def push_pdu(self, pdu):
+ """
+ Log outbound PDU then write it to stream.
+ """
+ blather(pdu)
+ pdu_channel.push_pdu(self, pdu)
+
+ def cleanup(self):
+ """
+ Force clean up this client's child process. If everything goes
+ well, child will have exited already before this method is called,
+ but we may need to whack it with a stick if something breaks.
+ """
+ if self.proc is not None and self.proc.returncode is None:
+ try:
+ os.kill(self.proc.pid, self.killsig)
+ except OSError:
+ pass
+
+ def handle_close(self):
+ """
+ Intercept close event so we can log it, then shut down.
+ """
+ blather("Server closed channel")
+ pdu_channel.handle_close(self)
+
+class kickme_channel(asyncore.dispatcher):
+ """
+ asyncore dispatcher for the PF_UNIX socket that cronjob mode uses to
+ kick servers when it's time to send notify PDUs to clients.
+ """
+
+ def __init__(self, server):
+ asyncore.dispatcher.__init__(self)
+ self.server = server
+ self.sockname = "%s.%d" % (kickme_base, os.getpid())
+ self.create_socket(socket.AF_UNIX, socket.SOCK_DGRAM)
+ try:
+ self.bind(self.sockname)
+ os.chmod(self.sockname, 0660)
+ except socket.error, e:
+ log("Couldn't bind() kickme socket: %r" % e)
+ self.close()
+ except OSError, e:
+ log("Couldn't chmod() kickme socket: %r" % e)
+
+ def writable(self):
+ """
+ This socket is read-only, never writable.
+ """
+ return False
+
+ def handle_connect(self):
+ """
+ Ignore connect events (not very useful on datagram socket).
+ """
+ pass
+
+ def handle_read(self):
+ """
+ Handle receipt of a datagram.
+ """
+ data = self.recv(512)
+ self.server.notify(data)
+
+ def cleanup(self):
+ """
+ Clean up this dispatcher's socket.
+ """
+ self.close()
+ try:
+ os.unlink(self.sockname)
+ except:
+ pass
+
+ def log(self, msg):
+ """
+ Intercept asyncore's logging.
+ """
+ log(msg)
+
+ def log_info(self, msg, tag = "info"):
+ """
+ Intercept asyncore's logging.
+ """
+ log("asyncore: %s: %s" % (tag, msg))
+
+ def handle_error(self):
+ """
+ Handle errors caught by asyncore main loop.
+ """
+ c, e = sys.exc_info()[:2]
+ if backtrace_on_exceptions or e == 0:
+ for line in traceback.format_exc().splitlines():
+ log(line)
+ else:
+ log("[Exception: %s: %s]" % (c.__name__, e))
+ log("[Exiting after unhandled exception]")
+ sys.exit(1)
+
+
+def hostport_tag():
+ """
+ Construct hostname/address + port when we're running under a
+ protocol we understand well enough to do that. This is all
+ kludgery. Just grit your teeth, or perhaps just close your eyes.
+ """
+
+ proto = None
+
+ if proto is None:
+ try:
+ host, port = socket.fromfd(0, socket.AF_INET, socket.SOCK_STREAM).getpeername()
+ proto = "tcp"
+ except:
+ pass
+
+ if proto is None:
+ try:
+ host, port = socket.fromfd(0, socket.AF_INET6, socket.SOCK_STREAM).getpeername()[0:2]
+ proto = "tcp"
+ except:
+ pass
+
+ if proto is None:
+ try:
+ host, port = os.environ["SSH_CONNECTION"].split()[0:2]
+ proto = "ssh"
+ except:
+ pass
+
+ if proto is None:
+ try:
+ host, port = os.environ["REMOTE_HOST"], os.getenv("REMOTE_PORT")
+ proto = "ssl"
+ except:
+ pass
+
+ if proto is None:
+ return ""
+ elif not port:
+ return "/%s/%s" % (proto, host)
+ elif ":" in host:
+ return "/%s/%s.%s" % (proto, host, port)
+ else:
+ return "/%s/%s:%s" % (proto, host, port)
+
+
+def kick_all(serial):
+ """
+ Kick any existing server processes to wake them up.
+ """
+
+ try:
+ os.stat(kickme_dir)
+ except OSError:
+ blather('# Creating directory "%s"' % kickme_dir)
+ os.makedirs(kickme_dir)
+
+ msg = "Good morning, serial %d is ready" % serial
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
+ for name in glob.iglob("%s.*" % kickme_base):
+ try:
+ blather("# Kicking %s" % name)
+ sock.sendto(msg, name)
+ except socket.error:
+ try:
+ blather("# Failed to kick %s, probably dead socket, attempting cleanup" % name)
+ os.unlink(name)
+ except Exception, e:
+ blather("# Couldn't unlink suspected dead socket %s: %s" % (name, e))
+ except Exception, e:
+ log("# Failed to kick %s and don't understand why: %s" % (name, e))
+ sock.close()
+
+def cronjob_main(argv):
+ """
+ Run this mode right after rcynic to do the real work of groveling
+ through the ROAs that rcynic collects and translating that data into
+ the form used in the rpki-router protocol. This mode prepares both
+ full dumps (AXFR) and incremental dumps against a specific prior
+ version (IXFR). [Terminology here borrowed from DNS, as is much of
+ the protocol design.] Finally, this mode kicks any active servers,
+ so that they can notify their clients that a new version is
+ available.
+
+ Run this in the directory where you want to write its output files,
+ which should also be the directory in which you run this program in
+ --server mode.
+
+ This mode takes one argument on the command line, which specifies
+ the directory name of rcynic's authenticated output tree (normally
+ $somewhere/rcynic-data/authenticated/).
+ """
+
+ if len(argv) != 1:
+ sys.exit("Expected one argument, got %r" % (argv,))
+
+ old_ixfrs = glob.glob("*.ix.*")
+
+ current = read_current()[0]
+ cutoff = timestamp.now(-(24 * 60 * 60))
+ for f in glob.iglob("*.ax"):
+ t = timestamp(int(f.split(".")[0]))
+ if t < cutoff and t != current:
+ blather("# Deleting old file %s, timestamp %s" % (f, t))
+ os.unlink(f)
+
+ pdus = axfr_set.parse_rcynic(argv[0])
+ if pdus == axfr_set.load_current():
+ blather("# No change, new version not needed")
+ sys.exit()
+ pdus.save_axfr()
+ for axfr in glob.iglob("*.ax"):
+ if axfr != pdus.filename():
+ pdus.save_ixfr(axfr_set.load(axfr))
+ pdus.mark_current()
+
+ blather("# New serial is %d (%s)" % (pdus.serial, pdus.serial))
+
+ kick_all(pdus.serial)
+
+ old_ixfrs.sort()
+ for ixfr in old_ixfrs:
+ try:
+ blather("# Deleting old file %s" % ixfr)
+ os.unlink(ixfr)
+ except OSError:
+ pass
+
+def show_main(argv):
+ """
+ Display dumps created by --cronjob mode in textual form.
+ Intended only for debugging.
+
+ This mode takes no command line arguments. Run it in the directory
+ where you ran --cronjob mode.
+ """
+
+ if argv:
+ sys.exit("Unexpected arguments: %r" % (argv,))
+
+ g = glob.glob("*.ax")
+ g.sort()
+ for f in g:
+ axfr_set.load(f).show()
+
+ g = glob.glob("*.ix.*")
+ g.sort()
+ for f in g:
+ ixfr_set.load(f).show()
+
+def server_main(argv):
+ """
+ Implement the server side of the rpkk-router protocol. Other than
+ one PF_UNIX socket inode, this doesn't write anything to disk, so it
+ can be run with minimal privileges. Most of the hard work has
+ already been done in --cronjob mode, so all that this mode has to do
+ is serve up the results.
+
+ In production use this server should run under sshd. The subsystem
+ mechanism in sshd does not allow us to pass arguments on the command
+ line, so setting this up might require a wrapper script, but in
+ production use you will probably want to lock down the public key
+ used to authenticate the ssh session so that it can only run this
+ one command, in which case you can just specify the full command
+ including any arguments in the authorized_keys file.
+
+ Unless you do something special, sshd will have this program running
+ in whatever it thinks is the home directory associated with the
+ username given in the ssh prototocol setup, so it may be easiest to
+ set this up so that the home directory sshd puts this program into
+ is the one where --cronjob left its files for this mode to pick up.
+
+ This mode must be run in the directory where you ran --cronjob mode.
+
+ This mode takes one optional argument: if provided, the argument is
+ the name of a directory to which the program should chdir() on
+ startup; this may simplify setup when running under inetd.
+
+ The server is event driven, so everything interesting happens in the
+ channel classes.
+ """
+
+ blather("[Starting]")
+ if len(argv) > 1:
+ sys.exit("Unexpected arguments: %r" % (argv,))
+ if argv:
+ try:
+ os.chdir(argv[0])
+ except OSError, e:
+ sys.exit(e)
+ kickme = None
+ try:
+ server = server_channel()
+ kickme = kickme_channel(server = server)
+ asyncore.loop(timeout = None)
+ except KeyboardInterrupt:
+ sys.exit(0)
+ finally:
+ if kickme is not None:
+ kickme.cleanup()
+
+
+def listener_tcp_main(argv):
+ """
+ Simple plain-TCP listener. Listens on a specified TCP port, upon
+ receiving a connection, forks the process and starts child executing
+ at server_main().
+
+ First argument (required) is numeric port number.
+
+ Second argument (optional) is directory, like --server.
+
+ NB: plain-TCP is completely insecure. We only implement this
+ because it's all that the routers currently support. In theory, we
+ will all be running TCP-AO in the future, at which point this will
+ go away.
+ """
+
+ # Perhaps we should daemonize? Deal with that later.
+
+ if len(argv) > 2:
+ sys.exit("Unexpected arguments: %r" % (argv,))
+ try:
+ port = int(argv[0]) if argv[0].isdigit() else socket.getservbyname(argv[0], "tcp")
+ except:
+ sys.exit("Couldn't parse port number on which to listen")
+ if len(argv) > 1:
+ try:
+ os.chdir(argv[1])
+ except OSError, e:
+ sys.exit(e)
+ listener = None
+ try:
+ listener = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+ listener.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
+ except:
+ if listener is not None:
+ listener.close()
+ listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ try:
+ listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+ except AttributeError:
+ pass
+ listener.bind(("", port))
+ listener.listen(5)
+ blather("[Listening on port %s]" % port)
+ while True:
+ s, ai = listener.accept()
+ blather("[Received connection from %r]" % (ai,))
+ pid = os.fork()
+ if pid == 0:
+ os.dup2(s.fileno(), 0)
+ os.dup2(s.fileno(), 1)
+ s.close()
+ #os.closerange(3, os.sysconf("SC_OPEN_MAX"))
+ global log_tag
+ log_tag = "rtr-origin/server" + hostport_tag()
+ syslog.closelog()
+ syslog.openlog(log_tag, syslog.LOG_PID, syslog_facility)
+ server_main(())
+ sys.exit()
+ else:
+ blather("[Spawned server %d]" % pid)
+ try:
+ while True:
+ pid, status = os.waitpid(0, os.WNOHANG)
+ if pid:
+ blather("[Server %s exited]" % pid)
+ else:
+ break
+ except:
+ pass
+
+
+def client_main(argv):
+ """
+ Toy client, intended only for debugging.
+
+ This program takes one or more arguments. The first argument
+ determines what kind of connection it should open to the server, the
+ remaining arguments are connection details specific to this
+ particular type of connection.
+
+ If the first argument is "loopback", the client will run a copy of
+ the server directly in a subprocess, and communicate with it via a
+ PF_UNIX socket pair. This sub-mode takes no further arguments.
+
+ If the first argument is "ssh", the client will attempt to run ssh
+ in as subprocess to connect to the server using the ssh subsystem
+ mechanism as specified for this protocol. The remaining arguments
+ should be a hostname (or IP address in a form acceptable to ssh) and
+ a TCP port number.
+
+ If the first argument is "tcp", the client will attempt to open a
+ direct (and completely insecure!) TCP connection to the server.
+ The remaining arguments should be a hostname (or IP address) and
+ a TCP port number.
+
+ If the first argument is "tls", the client will attempt to open a
+ TLS connection to the server. The remaining arguments should be a
+ hostname (or IP address) and a TCP port number.
+
+ An optional final name is the name of a file containing a SQLite
+ database in which to store the received table. If specified, this
+ database will be created if missing.
+ """
+
+ blather("[Startup]")
+ client = None
+ if not argv:
+ argv = ["loopback"]
+ proto = argv[0]
+ if proto == "loopback" and len(argv) in (1, 2):
+ constructor = client_channel.loopback
+ host, port = "", ""
+ sqlname = None if len(argv) == 1 else argv[1]
+ elif proto in ("ssh", "tcp", "tls") and len(argv) in (3, 4):
+ constructor = getattr(client_channel, proto)
+ host, port = argv[1:3]
+ sqlname = None if len(argv) == 3 else argv[3]
+ else:
+ sys.exit("Unexpected arguments: %s" % " ".join(argv))
+
+ try:
+ client = constructor(host, port)
+ if sqlname:
+ client.setup_sql(sqlname)
+ while True:
+ if client.current_serial is None or client.current_nonce is None:
+ client.push_pdu(reset_query())
+ else:
+ client.push_pdu(serial_query(serial = client.current_serial, nonce = client.current_nonce))
+ wakeup = time.time() + 600
+ while True:
+ remaining = wakeup - time.time()
+ if remaining < 0:
+ break
+ asyncore.loop(timeout = remaining, count = 1)
+
+ except KeyboardInterrupt:
+ sys.exit(0)
+ finally:
+ if client is not None:
+ client.cleanup()
+
+def bgpdump_convert_main(argv):
+ """
+ Simulate route origin data from a set of BGP dump files.
+
+ * DANGER WILL ROBINSON! *
+ * DEBUGGING AND TEST USE ONLY! *
+
+ argv is an ordered list of filenames. Each file must be a BGP RIB
+ dumps, a BGP UPDATE dumps, or an AXFR dump in the format written by
+ this program's --cronjob command. The first file must be a RIB dump
+ or AXFR dump, it cannot be an UPDATE dump. Output will be a set of
+ AXFR and IXFR files with timestamps derived from the BGP dumps,
+ which can be used as input to this program's --server command for
+ test purposes. SUCH DATA PROVIDE NO SECURITY AT ALL.
+
+ You have been warned.
+ """
+
+ first = True
+ db = None
+ axfrs = []
+
+ for filename in argv:
+
+ if filename.endswith(".ax"):
+ blather("Reading %s" % filename)
+ db = axfr_set.load(filename)
+
+ elif os.path.basename(filename).startswith("ribs."):
+ db = axfr_set.parse_bgpdump_rib_dump(filename)
+ db.save_axfr()
+
+ elif not first:
+ assert db is not None
+ db.parse_bgpdump_update(filename)
+ db.save_axfr()
+
+ else:
+ sys.exit("First argument must be a RIB dump or .ax file, don't know what to do with %s" % filename)
+
+ blather("DB serial now %d (%s)" % (db.serial, db.serial))
+ if first and read_current() == (None, None):
+ db.mark_current()
+ first = False
+
+ for axfr in axfrs:
+ blather("Loading %s" % axfr)
+ ax = axfr_set.load(axfr)
+ blather("Computing changes from %d (%s) to %d (%s)" % (ax.serial, ax.serial, db.serial, db.serial))
+ db.save_ixfr(ax)
+ del ax
+
+ axfrs.append(db.filename())
+
+
+def bgpdump_select_main(argv):
+ """
+ Simulate route origin data from a set of BGP dump files.
+
+ * DANGER WILL ROBINSON! *
+ * DEBUGGING AND TEST USE ONLY! *
+
+ Set current serial number to correspond to an .ax file created by
+ converting BGP dump files. SUCH DATA PROVIDE NO SECURITY AT ALL.
+
+ You have been warned.
+ """
+
+ serial = None
+ try:
+ head, sep, tail = os.path.basename(argv[0]).partition(".")
+ if len(argv) == 1 and head.isdigit() and sep == "." and tail == "ax":
+ serial = timestamp(head)
+ except:
+ pass
+ if serial is None:
+ sys.exit("Argument must be name of a .ax file")
+
+ nonce = read_current()[1]
+ if nonce is None:
+ nonce = new_nonce()
+
+ write_current(serial, nonce)
+ kick_all(serial)
+
+
+class bgpsec_replay_clock(object):
+ """
+ Internal clock for replaying BGP dump files.
+
+ * DANGER WILL ROBINSON! *
+ * DEBUGGING AND TEST USE ONLY! *
+
+ This class replaces the normal on-disk serial number mechanism with
+ an in-memory version based on pre-computed data.
+ bgpdump_server_main() uses this hack to replay historical data for
+ testing purposes. DO NOT USE THIS IN PRODUCTION.
+
+ You have been warned.
+ """
+
+ def __init__(self):
+ self.timestamps = [timestamp(int(f.split(".")[0])) for f in glob.iglob("*.ax")]
+ self.timestamps.sort()
+ self.offset = self.timestamps[0] - int(time.time())
+ self.nonce = new_nonce()
+
+ def __nonzero__(self):
+ return len(self.timestamps) > 0
+
+ def now(self):
+ return timestamp.now(self.offset)
+
+ def read_current(self):
+ now = self.now()
+ while len(self.timestamps) > 1 and now >= self.timestamps[1]:
+ del self.timestamps[0]
+ return self.timestamps[0], self.nonce
+
+ def siesta(self):
+ now = self.now()
+ if len(self.timestamps) <= 1:
+ return None
+ elif now < self.timestamps[1]:
+ return self.timestamps[1] - now
+ else:
+ return 1
+
+
+def bgpdump_server_main(argv):
+ """
+ Simulate route origin data from a set of BGP dump files.
+
+ * DANGER WILL ROBINSON! *
+ * DEBUGGING AND TEST USE ONLY! *
+
+ This is a clone of server_main() which replaces the external serial
+ number updates triggered via the kickme channel by cronjob_main with
+ an internal clocking mechanism to replay historical test data.
+
+ DO NOT USE THIS IN PRODUCTION.
+
+ You have been warned.
+ """
+
+ blather("[Starting]")
+ if len(argv) > 1:
+ sys.exit("Unexpected arguments: %r" % (argv,))
+ if argv:
+ try:
+ os.chdir(argv[0])
+ except OSError, e:
+ sys.exit(e)
+ #
+ # Yes, this really does replace a global function with a bound
+ # method to our clock object. Fun stuff, huh?
+ #
+ global read_current
+ clock = bgpsec_replay_clock()
+ read_current = clock.read_current
+ #
+ try:
+ server = server_channel()
+ old_serial = server.get_serial()
+ blather("[Starting at serial %d (%s)]" % (old_serial, old_serial))
+ while clock:
+ new_serial = server.get_serial()
+ if old_serial != new_serial:
+ blather("[Serial bumped from %d (%s) to %d (%s)]" % (old_serial, old_serial, new_serial, new_serial))
+ server.notify()
+ old_serial = new_serial
+ asyncore.loop(timeout = clock.siesta(), count = 1)
+ except KeyboardInterrupt:
+ sys.exit(0)
+
+# Figure out where the scan_roas utility program is today
+try:
+ # Set from autoconf
+ scan_roas = ac_scan_roas
+except NameError:
+ # Source directory
+ scan_roas = os.path.normpath(os.path.join(sys.path[0], "..", "utils",
+ "scan_roas", "scan_roas"))
+# If that didn't work, use $PATH and hope for the best
+if not os.path.exists(scan_roas):
+ scan_roas = "scan_roas"
+
+# Same thing for scan_routercerts
+try:
+ # Set from autoconf
+ scan_routercerts = ac_scan_routercerts
+except NameError:
+ # Source directory
+ scan_routercerts = os.path.normpath(os.path.join(sys.path[0], "..", "utils",
+ "scan_routercerts", "scan_routercerts"))
+if not os.path.exists(scan_routercerts):
+ scan_routercerts = "scan_routercerts"
+
+force_zero_nonce = False
+
+kickme_dir = "sockets"
+kickme_base = os.path.join(kickme_dir, "kickme")
+
+main_dispatch = {
+ "cronjob" : cronjob_main,
+ "client" : client_main,
+ "server" : server_main,
+ "show" : show_main,
+ "listener_tcp" : listener_tcp_main,
+ "bgpdump_convert" : bgpdump_convert_main,
+ "bgpdump_select" : bgpdump_select_main,
+ "bgpdump_server" : bgpdump_server_main }
+
+def usage(msg = None):
+ f = sys.stderr if msg else sys.stdout
+ f.write("Usage: %s [options] --mode [arguments]\n" % sys.argv[0])
+ f.write("\n")
+ f.write("where options are zero or more of:\n")
+ f.write("\n")
+ f.write("--syslog facility.warning_priority[.info_priority]\n")
+ f.write("\n")
+ f.write("--zero-nonce\n")
+ f.write("\n")
+ f.write("and --mode is one of:\n")
+ f.write("\n")
+ for name, func in main_dispatch.iteritems():
+ f.write("--%s:\n" % name)
+ f.write(func.__doc__)
+ f.write("\n")
+ sys.exit(msg)
+
+if __name__ == "__main__":
+
+ os.environ["TZ"] = "UTC"
+ time.tzset()
+
+ mode = None
+
+ syslog_facility, syslog_warning, syslog_info = syslog.LOG_DAEMON, syslog.LOG_WARNING, syslog.LOG_INFO
+
+ opts, argv = getopt.getopt(sys.argv[1:], "hs:z?", ["help", "syslog=", "zero-nonce"] + main_dispatch.keys())
+ for o, a in opts:
+ if o in ("-h", "--help", "-?"):
+ usage()
+ elif o in ("-z", "--zero-nonce"):
+ force_zero_nonce = True
+ elif o in ("-s", "--syslog"):
+ try:
+ a = [getattr(syslog, "LOG_" + i.upper()) for i in a.split(".")]
+ if len(a) == 2:
+ a.append(a[1])
+ syslog_facility, syslog_warning, syslog_info = a
+ if syslog_facility < 8 or syslog_warning >= 8 or syslog_info >= 8:
+ raise ValueError
+ except:
+ usage("Bad value specified for --syslog option")
+ elif len(o) > 2 and o[2:] in main_dispatch:
+ if mode is not None:
+ sys.exit("Conflicting modes specified")
+ mode = o[2:]
+
+ if mode is None:
+ usage("No mode specified")
+
+ log_tag = "rtr-origin/" + mode
+
+ if mode in ("server", "bgpdump_server"):
+ log_tag += hostport_tag()
+
+ if mode in ("cronjob", "server" , "bgpdump_server"):
+ syslog.openlog(log_tag, syslog.LOG_PID, syslog_facility)
+ def log(msg):
+ return syslog.syslog(syslog_warning, str(msg))
+ def blather(msg):
+ return syslog.syslog(syslog_info, str(msg))
+
+ elif mode == "show":
+ def log(msg):
+ try:
+ os.write(sys.stdout.fileno(), "%s\n" % msg)
+ except OSError, e:
+ if e.errno != errno.EPIPE:
+ raise
+ blather = log
+
+ else:
+ def log(msg):
+ sys.stderr.write("%s %s[%d]: %s\n" % (time.strftime("%F %T"), log_tag, os.getpid(), msg))
+ blather = log
+
+ main_dispatch[mode](argv)
diff --git a/rp/rtr-origin/rules.darwin.mk b/rp/rtr-origin/rules.darwin.mk
new file mode 100644
index 00000000..1230db92
--- /dev/null
+++ b/rp/rtr-origin/rules.darwin.mk
@@ -0,0 +1,9 @@
+# $Id$
+
+install-always: install-binary
+
+install-postconf: install-listener
+
+install-listener:
+ @echo "No rule for $@ on this platform (yet), you'll have to do that yourself if it matters."
+
diff --git a/rp/rtr-origin/rules.freebsd.mk b/rp/rtr-origin/rules.freebsd.mk
new file mode 100644
index 00000000..df99da47
--- /dev/null
+++ b/rp/rtr-origin/rules.freebsd.mk
@@ -0,0 +1,37 @@
+# $Id$
+
+install-always: install-binary
+
+install-postconf: install-listener
+
+install-listener: .FORCE
+ @if /usr/bin/egrep -q '^rpki-rtr' /etc/services ; \
+ then \
+ echo "You already have a /etc/services entry for rpki-rtr, so I will use it."; \
+ elif echo >>/etc/services "rpki-rtr ${RPKI_RTR_PORT}/tcp #RFC 6810" ; \
+ then \
+ echo "Added rpki-rtr to /etc/services."; \
+ else \
+ echo "Adding rpki-rtr to /etc/services failed, please fix this, then try again."; \
+ exit 1; \
+ fi
+ @if /usr/bin/egrep -q "rpki-rtr[ ]+stream[ ]+tcp[ ]" /etc/inetd.conf; \
+ then \
+ echo "You already have an inetd.conf entry for rpki-rtr on TCPv4, so I will use it."; \
+ elif echo >>/etc/inetd.conf "rpki-rtr stream tcp nowait rpkirtr /usr/local/bin/rtr-origin rtr-origin --server /var/rcynic/rpki-rtr"; \
+ then \
+ echo "Added rpki-rtr for TCPv4 to /etc/inetd.conf."; \
+ else \
+ echo "Adding rpki-rtr for TCPv4 to /etc/inetd.conf failed, please fix this, then try again."; \
+ exit 1; \
+ fi
+ @if /usr/bin/egrep -q "rpki-rtr[ ]+stream[ ]+tcp6[ ]" /etc/inetd.conf; \
+ then \
+ echo "You already have an inetd.conf entry for rpki-rtr on TCPv6, so I will use it."; \
+ elif echo >>/etc/inetd.conf "rpki-rtr stream tcp6 nowait rpkirtr /usr/local/bin/rtr-origin rtr-origin --server /var/rcynic/rpki-rtr"; \
+ then \
+ echo "Added rpki-rtr for TCPv6 to /etc/inetd.conf."; \
+ else \
+ echo "Adding rpki-rtr for TCPv6 to /etc/inetd.conf failed, please fix this, then try again."; \
+ exit 1; \
+ fi
diff --git a/rp/rtr-origin/rules.linux.mk b/rp/rtr-origin/rules.linux.mk
new file mode 100644
index 00000000..23f90f69
--- /dev/null
+++ b/rp/rtr-origin/rules.linux.mk
@@ -0,0 +1,29 @@
+# $Id$
+
+install-always: install-binary install-listener
+
+install-postconf:
+ @true
+
+# Only need to make listener if not already present
+
+install-listener: ${DESTDIR}/etc/xinetd.d/rpki-rtr
+
+${DESTDIR}/etc/xinetd.d/rpki-rtr:
+ @${AWK} 'BEGIN { \
+ print "service rpki-rtr"; \
+ print "{"; \
+ print " type = UNLISTED"; \
+ print " flags = IPv4"; \
+ print " socket_type = stream"; \
+ print " protocol = tcp"; \
+ print " port = ${RPKI_RTR_PORT}"; \
+ print " wait = no"; \
+ print " user = rpkirtr"; \
+ print " server = ${bindir}/${BIN}"; \
+ print " server_args = --server /var/rcynic/rpki-rtr"; \
+ print "}"; \
+ }' >xinetd.rpki-rtr
+ ${INSTALL} -d ${DESTDIR}/etc/xinetd.d
+ ${INSTALL} -m 644 xinetd.rpki-rtr $@
+ rm xinetd.rpki-rtr
diff --git a/rp/rtr-origin/rules.unknown.mk b/rp/rtr-origin/rules.unknown.mk
new file mode 100644
index 00000000..fb16e93a
--- /dev/null
+++ b/rp/rtr-origin/rules.unknown.mk
@@ -0,0 +1,8 @@
+# $Id$
+
+install-always: install-binary
+
+install-postconf: install-listener
+
+install-listener:
+ @echo "Don't know how to make $@ on this platform"; exit 1
diff --git a/rp/rtr-origin/server.sh b/rp/rtr-origin/server.sh
new file mode 100755
index 00000000..7ccf2f38
--- /dev/null
+++ b/rp/rtr-origin/server.sh
@@ -0,0 +1,17 @@
+#!/bin/sh -
+#
+# Wrapper for rtr-origin.py in server mode, for testing.
+#
+# In production we would probably want to handle all of this either
+# directly in the Python code or in the command= setting for a
+# particular ssh key, but for initial testing it's simpler to run a
+# shall script to change to the right directory and supply any
+# necessary command line arguments.
+#
+# Be warned that almost any error here will cause the subsystem to
+# fail mysteriously, leaving behind naught but a SIGCHILD log message
+# from sshd as this script dies.
+
+cd /u/sra/rpki/subvert-rpki.hactrn.net/rtr-origin
+
+exec /usr/local/bin/python rtr-origin.py --server
diff --git a/rp/rtr-origin/sshd.conf b/rp/rtr-origin/sshd.conf
new file mode 100644
index 00000000..0124fc4c
--- /dev/null
+++ b/rp/rtr-origin/sshd.conf
@@ -0,0 +1,23 @@
+# $Id$
+#
+# sshd config file for testing. Invoke thusly:
+#
+# /usr/sbin/sshd -f /u/sra/rpki/subvert-rpki.hactrn.net/rtr-origin/sshd.conf -d
+
+Port 2222
+Protocol 2
+ListenAddress 127.0.0.1
+ListenAddress ::1
+HostKey /u/sra/rpki/subvert-rpki.hactrn.net/rtr-origin/ssh_host_rsa_key
+PermitRootLogin no
+PubkeyAuthentication yes
+AuthorizedKeysFile /u/sra/rpki/subvert-rpki.hactrn.net/rtr-origin/authorized_keys
+PasswordAuthentication no
+PermitEmptyPasswords no
+ChallengeResponseAuthentication no
+UsePAM no
+AllowTcpForwarding no
+X11Forwarding no
+UseDNS no
+PidFile /u/sra/rpki/subvert-rpki.hactrn.net/rtr-origin/sshd.pid
+Subsystem rpki-rtr /u/sra/rpki/subvert-rpki.hactrn.net/rtr-origin/server.sh
diff --git a/rp/utils/Makefile.in b/rp/utils/Makefile.in
new file mode 100644
index 00000000..c89fdff5
--- /dev/null
+++ b/rp/utils/Makefile.in
@@ -0,0 +1,9 @@
+# $Id$
+
+SUBDIRS = uri print_rpki_manifest print_roa hashdir find_roa scan_roas scan_routercerts
+
+all clean test distclean install deinstall uninstall::
+ @for i in ${SUBDIRS}; do echo "Making $@ in $$i"; (cd $$i && ${MAKE} $@); done
+
+distclean::
+ rm -f Makefile
diff --git a/rp/utils/README b/rp/utils/README
new file mode 100644
index 00000000..edbd793b
--- /dev/null
+++ b/rp/utils/README
@@ -0,0 +1,12 @@
+$Id$
+
+A collection of small RPKI utility programs which can be combined in
+various useful ways by relying parties or by rpkid test scripts.
+
+See:
+
+- The primary documentation at http://trac.rpki.net/
+
+- The PDF manual in ../doc/manual.pdf, or
+
+- The flat text page ../doc/doc.RPKI.Utils
diff --git a/rp/utils/dot.awk b/rp/utils/dot.awk
new file mode 100644
index 00000000..ca1b490b
--- /dev/null
+++ b/rp/utils/dot.awk
@@ -0,0 +1,34 @@
+#!/usr/bin/awk -f
+# $Id$
+#
+# This doesn't really work right yet, and even if it did, the graph
+# it would generate would be hopelessly large.
+
+BEGIN {
+ cmd = "find /var/rcynic/data/unauthenticated -type f -name '*.cer' -print0 | xargs -0 ./uri -d";
+ while ((cmd | getline) == 1) {
+ if ($1 == "File") {
+ sub("/var/rcynic/data/unauthenticated/", "rsync://");
+ u = $2;
+ uri[u] = ++n;
+ continue;
+ }
+ if ($1 == "SIA:") {
+ sia[u] = $2;
+ continue;
+ }
+ if ($1 == "AIA:") {
+ aia[u] = $2;
+ continue;
+ }
+ }
+ print "digraph rpki {";
+ for (u in uri) {
+ printf "n%06d\t[ label=\"%s\" ];\n", uri[u], u;
+ if (sia[u])
+ printf "n%06d -> n%06d\t [ color=blue ];\n", uri[u], uri[sia[u]];
+ if (aia[u])
+ printf "n%06d -> n%06d\t [ color=green ];\n", uri[u], uri[aia[u]];
+ }
+ print "}";
+}
diff --git a/rp/utils/find_roa.c b/rp/utils/find_roa.c
new file mode 100644
index 00000000..a14242c8
--- /dev/null
+++ b/rp/utils/find_roa.c
@@ -0,0 +1,356 @@
+/*
+ * Copyright (C) 2014 Dragon Research Labs ("DRL")
+ * Portions copyright (C) 2006--2008 American Registry for Internet Numbers ("ARIN")
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notices and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ARIN DISCLAIM ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
+ * ARIN BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA
+ * OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* $Id$ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <string.h>
+#include <dirent.h>
+#include <limits.h>
+
+#include <openssl/bio.h>
+#include <openssl/pem.h>
+#include <openssl/err.h>
+#include <openssl/x509.h>
+#include <openssl/x509v3.h>
+#include <openssl/safestack.h>
+#include <openssl/conf.h>
+#include <openssl/rand.h>
+#include <openssl/asn1.h>
+#include <openssl/asn1t.h>
+#include <openssl/cms.h>
+
+#include <rpki/roa.h>
+
+#ifndef FILENAME_MAX
+#define FILENAME_MAX 1024
+#endif
+
+#ifndef ADDR_RAW_BUF_LEN
+#define ADDR_RAW_BUF_LEN 16
+#endif
+
+
+
+/*
+ * Error handling.
+ */
+
+#define _lose(_msg_, _file_) \
+ do { \
+ if (_file_) \
+ fprintf(stderr, "%s:%d: %s: %s\n", __FILE__, __LINE__, _msg_, _file_); \
+ else \
+ fprintf(stderr, "%s:%d: %s\n", __FILE__, __LINE__, _msg_); \
+ fprintf(stderr, "%s: %s\n", _msg_, _file_); \
+ } while (0)
+
+#define lose(_msg_, _file_) \
+ do { \
+ _lose(_msg_, _file_); \
+ goto done; \
+ } while (0)
+
+#define lose_errno(_msg_, _file_) \
+ do { \
+ _lose(_msg_, _file_); \
+ perror(NULL); \
+ goto done; \
+ } while (0)
+
+#define lose_openssl(_msg_, _file_) \
+ do { \
+ _lose(_msg_, _file_); \
+ ERR_print_errors_fp(stderr); \
+ goto done; \
+ } while (0)
+
+
+/*
+ * Extract a ROA prefix from the ASN.1 bitstring encoding.
+ */
+static int extract_roa_prefix(unsigned char *addr,
+ unsigned *prefixlen,
+ const ASN1_BIT_STRING *bs,
+ const unsigned afi)
+{
+ unsigned length;
+
+ switch (afi) {
+ case IANA_AFI_IPV4: length = 4; break;
+ case IANA_AFI_IPV6: length = 16; break;
+ default: return 0;
+ }
+
+ if (bs->length < 0 || bs->length > length)
+ return 0;
+
+ if (bs->length > 0) {
+ memcpy(addr, bs->data, bs->length);
+ if ((bs->flags & 7) != 0) {
+ unsigned char mask = 0xFF >> (8 - (bs->flags & 7));
+ addr[bs->length - 1] &= ~mask;
+ }
+ }
+
+ memset(addr + bs->length, 0, length - bs->length);
+
+ *prefixlen = (bs->length * 8) - (bs->flags & 7);
+
+ return 1;
+}
+
+/*
+ * Check str for a trailing suffix.
+ */
+static int has_suffix(const char *str, const char *suffix)
+{
+ size_t len_str, len_suffix;
+ assert(str != NULL && suffix != NULL);
+ len_str = strlen(str);
+ len_suffix = strlen(suffix);
+ return len_str >= len_suffix && !strcmp(str + len_str - len_suffix, suffix);
+}
+
+/*
+ * Handle one object.
+ */
+static void file_handler(const char *filename, const unsigned prefix_afi, const unsigned char *prefix, const unsigned long prefixlen)
+{
+ unsigned char roa_prefix[ADDR_RAW_BUF_LEN];
+ unsigned roa_prefixlen, roa_maxprefixlen, plen;
+ CMS_ContentInfo *cms = NULL;
+ BIO *b = NULL;
+ ROA *r = NULL;
+ int i, j, k, n;
+ unsigned long asid;
+
+ if (!(b = BIO_new_file(filename, "rb")))
+ lose_openssl("Couldn't open CMS file", filename);
+
+ if ((cms = d2i_CMS_bio(b, NULL)) == NULL)
+ lose_openssl("Couldn't read CMS file", filename);
+
+ BIO_free(b);
+
+ if ((b = BIO_new(BIO_s_mem())) == NULL)
+ lose_openssl("Couldn't open ROA", filename);
+
+ if (CMS_verify(cms, NULL, NULL, NULL, b, CMS_NOCRL | CMS_NO_SIGNER_CERT_VERIFY | CMS_NO_ATTR_VERIFY | CMS_NO_CONTENT_VERIFY) <= 0)
+ lose_openssl("Couldn't parse ROA CMS", filename);
+
+ if ((r = ASN1_item_d2i_bio(ASN1_ITEM_rptr(ROA), b, NULL)) == NULL)
+ lose_openssl("Couldn't parse ROA", filename);
+
+ asid = (unsigned long) ASN1_INTEGER_get(r->asID);
+
+ for (i = 0; i < sk_ROAIPAddressFamily_num(r->ipAddrBlocks); i++) {
+ ROAIPAddressFamily *f = sk_ROAIPAddressFamily_value(r->ipAddrBlocks, i);
+
+ /*
+ * AFI must match, SAFI must be null
+ */
+ if (f->addressFamily->length != 2 ||
+ prefix_afi != ((f->addressFamily->data[0] << 8) | (f->addressFamily->data[1])))
+ continue;
+
+ for (j = 0; j < sk_ROAIPAddress_num(f->addresses); j++) {
+ ROAIPAddress *a = sk_ROAIPAddress_value(f->addresses, j);
+
+ if (!extract_roa_prefix(roa_prefix, &roa_prefixlen, a->IPAddress, prefix_afi))
+ lose("Malformed ROA", filename);
+
+ /*
+ * If the prefix we're looking for is bigger than the ROA
+ * prefix, the ROA can't possibly cover.
+ */
+ if (prefixlen < roa_prefixlen)
+ continue;
+
+ if (a->maxLength)
+ roa_maxprefixlen = ASN1_INTEGER_get(a->maxLength);
+ else
+ roa_maxprefixlen = roa_prefixlen;
+
+ /*
+ * If the prefix we're looking for is smaller than the smallest
+ * allowed slice of the ROA prefix, the ROA can't possibly
+ * cover.
+ */
+ if (prefixlen > roa_maxprefixlen)
+ continue;
+
+ /*
+ * If we get this far, we have to compare prefixes.
+ */
+ assert(roa_prefixlen <= ADDR_RAW_BUF_LEN * 8);
+ plen = prefixlen < roa_prefixlen ? prefixlen : roa_prefixlen;
+ k = 0;
+ while (plen >= 8 && prefix[k] == roa_prefix[k]) {
+ plen -= 8;
+ k++;
+ }
+ if (plen > 8 || ((prefix[k] ^ roa_prefix[k]) & (0xFF << (8 - plen))) != 0)
+ continue;
+
+ /*
+ * If we get here, we have a match.
+ */
+ printf("ASN %lu prefix ", asid);
+ switch (prefix_afi) {
+ case IANA_AFI_IPV4:
+ printf("%u.%u.%u.%u", prefix[0], prefix[1], prefix[2], prefix[3]);
+ break;
+ case IANA_AFI_IPV6:
+ for (n = 16; n > 1 && prefix[n-1] == 0x00 && prefix[n-2] == 0x00; n -= 2)
+ ;
+ for (k = 0; k < n; k += 2)
+ printf("%x%s", (prefix[k] << 8) | prefix[k+1], (k < 14 ? ":" : ""));
+ if (k < 16)
+ printf(":");
+ break;
+ }
+ printf("/%lu ROA %s\n", prefixlen, filename);
+ goto done;
+ }
+ }
+
+ done:
+ BIO_free(b);
+ CMS_ContentInfo_free(cms);
+ ROA_free(r);
+}
+
+/*
+ * Walk a directory tree
+ */
+static int handle_directory(const char *name, const unsigned prefix_afi, const unsigned char *prefix, const unsigned long prefixlen)
+{
+ char path[FILENAME_MAX];
+ struct dirent *d;
+ size_t len;
+ DIR *dir;
+ int ret = 0, need_slash;
+
+ assert(name);
+ len = strlen(name);
+ assert(len > 0 && len < sizeof(path));
+ need_slash = name[len - 1] != '/';
+
+ if ((dir = opendir(name)) == NULL)
+ lose_errno("Couldn't open directory", name);
+
+ while ((d = readdir(dir)) != NULL) {
+ if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, ".."))
+ continue;
+ if (len + strlen(d->d_name) + need_slash >= sizeof(path))
+ lose("Constructed path name too long", d->d_name);
+ strcpy(path, name);
+ if (need_slash)
+ strcat(path, "/");
+ strcat(path, d->d_name);
+ switch (d->d_type) {
+ case DT_DIR:
+ if (!handle_directory(path, prefix_afi, prefix, prefixlen))
+ lose("Directory walk failed", path);
+ continue;
+ default:
+ if (has_suffix(path, ".roa"))
+ file_handler(path, prefix_afi, prefix, prefixlen);
+ continue;
+ }
+ }
+
+ ret = 1;
+
+ done:
+ if (dir)
+ closedir(dir);
+ return ret;
+}
+
+static void usage (const char *jane, const int code)
+{
+ fprintf(code ? stderr : stdout, "usage: %s authtree prefix [prefix...]\n", jane);
+ exit(code);
+}
+
+int main (int argc, char *argv[])
+{
+ unsigned char prefix[ADDR_RAW_BUF_LEN];
+ unsigned long prefixlen;
+ unsigned afi;
+ char *s = NULL, *p = NULL;
+ int i, len, ret = 1;
+
+ if (argc == 2 && (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help")))
+ usage(argv[0], 0);
+
+ if (argc < 3)
+ usage(argv[0], 1);
+
+ OpenSSL_add_all_algorithms();
+ ERR_load_crypto_strings();
+
+ for (i = 2; i < argc; i++) {
+
+ if ((s = strdup(argv[i])) == NULL)
+ lose("Couldn't strdup()", argv[i]);
+
+ if ((p = strchr(s, '/')) != NULL)
+ *p++ = '\0';
+
+ len = a2i_ipadd(prefix, s);
+
+ switch (len) {
+ case 4: afi = IANA_AFI_IPV4; break;
+ case 16: afi = IANA_AFI_IPV6; break;
+ default: lose("Unknown AFI", argv[i]);
+ }
+
+ if (p) {
+ if (*p == '\0' ||
+ (prefixlen = strtoul(p, &p, 10)) == ULONG_MAX ||
+ *p != '\0' ||
+ prefixlen > ADDR_RAW_BUF_LEN * 8)
+ lose("Bad prefix length", argv[i]);
+ } else {
+ prefixlen = len * 8;
+ }
+
+ assert(prefixlen <= ADDR_RAW_BUF_LEN * 8);
+
+ free(s);
+ p = s = NULL;
+
+ if (!handle_directory(argv[1], afi, prefix, prefixlen))
+ goto done;
+
+ }
+
+ ret = 0;
+
+ done:
+ if (s)
+ free(s);
+ return ret;
+}
diff --git a/rp/utils/find_roa/Makefile.in b/rp/utils/find_roa/Makefile.in
new file mode 100644
index 00000000..36c68e01
--- /dev/null
+++ b/rp/utils/find_roa/Makefile.in
@@ -0,0 +1,56 @@
+# $Id$
+
+NAME = find_roa
+
+BIN = ${NAME}
+SRC = ${NAME}.c
+OBJ = ${NAME}.o
+
+CFLAGS = @CFLAGS@
+LDFLAGS = @LDFLAGS@
+LIBS = @LIBS@
+
+INSTALL = @INSTALL@ -m 555
+
+prefix = @prefix@
+exec_prefix = @exec_prefix@
+datarootdir = @datarootdir@
+datadir = @datadir@
+localstatedir = @localstatedir@
+sharedstatedir = @sharedstatedir@
+sysconfdir = @sysconfdir@
+bindir = @bindir@
+sbindir = @sbindir@
+libexecdir = @libexecdir@
+libdir = @libdir@
+
+abs_top_srcdir = @abs_top_srcdir@
+abs_top_builddir = @abs_top_builddir@
+
+all: ${BIN}
+
+clean:
+ rm -rf ${BIN} ${OBJ} ${BIN}.dSYM
+
+${BIN}: ${SRC}
+ ${CC} ${CFLAGS} -o $@ ${SRC} ${LDFLAGS} ${LIBS}
+
+
+ROA_DIR = ${abs_top_builddir}/rcynic/rcynic-data/authenticated
+
+TEST_ARGS = ${ROA_DIR} 10.3.0.44 10.2.0.6 10.0.0.0/24
+
+test: ${BIN}
+# if test -d ${ROA_DIR}; then ./${BIN} ${TEST_ARGS} ; else :; fi
+ if test -d ${ROA_DIR}; then sh ./test_roa.sh ${TEST_ARGS} ; else :; fi
+
+install: all
+ if test -d ${DESTDIR}${bindir} ; then :; else ${INSTALL} -d ${DESTDIR}${bindir}; fi
+ ${INSTALL} ${BIN} ${DESTDIR}${bindir}
+
+deinstall uninstall:
+ rm -f ${DESTDIR}${bindir}/${BIN}
+
+distclean: clean
+ rm -rf hashed-pem-dir
+ rm -f Makefile
diff --git a/rp/utils/hashdir.c b/rp/utils/hashdir.c
new file mode 100644
index 00000000..1b5b0f46
--- /dev/null
+++ b/rp/utils/hashdir.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2014 Dragon Research Labs ("DRL")
+ * Portions copyright (C) 2006--2008 American Registry for Internet Numbers ("ARIN")
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notices and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ARIN DISCLAIM ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
+ * ARIN BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA
+ * OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* $Id$ */
+
+/*
+ * Read a directory tree of DER certificates and CRLs and copy
+ * them into a PEM format directory with names in the hash format
+ * that OpenSSL's lookup routines expect.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <string.h>
+#include <dirent.h>
+#include <limits.h>
+
+#include <openssl/bio.h>
+#include <openssl/pem.h>
+#include <openssl/x509.h>
+#include <openssl/err.h>
+
+#ifndef FILENAME_MAX
+#define FILENAME_MAX 1024
+#endif
+
+static int verbose = 1;
+
+/*
+ * Error handling.
+ */
+
+#define _lose(_msg_, _file_) \
+ do { \
+ fprintf(stderr, "%s: %s\n", _msg_, _file_); \
+ } while (0)
+
+#define lose(_msg_, _file_) \
+ do { \
+ _lose(_msg_, _file_); \
+ goto done; \
+ } while (0)
+
+#define lose_errno(_msg_, _file_) \
+ do { \
+ _lose(_msg_, _file_); \
+ perror(NULL); \
+ goto done; \
+ } while (0)
+
+#define lose_openssl(_msg_, _file_) \
+ do { \
+ _lose(_msg_, _file_); \
+ ERR_print_errors_fp(stderr); \
+ goto done; \
+ } while (0)
+
+/*
+ * Check str for a trailing suffix.
+ */
+static int has_suffix(const char *str, const char *suffix)
+{
+ size_t len_str, len_suffix;
+ assert(str != NULL && suffix != NULL);
+ len_str = strlen(str);
+ len_suffix = strlen(suffix);
+ return len_str >= len_suffix && !strcmp(str + len_str - len_suffix, suffix);
+}
+
+/*
+ * Handle one object.
+ */
+static void file_handler(const char *filename, const char *targetdir)
+{
+ char path[FILENAME_MAX];
+ unsigned long hash;
+ const char *fmt;
+ X509_CRL *crl = NULL;
+ X509 *cer = NULL;
+ BIO *b = NULL;
+ int i, is_crl;
+
+ if (has_suffix(filename, ".cer"))
+ is_crl = 0;
+ else if (has_suffix(filename, ".crl"))
+ is_crl = 1;
+ else
+ return; /* Ignore if neither certificate nor CRL */
+
+ if (verbose)
+ printf("Reading %s\n", filename);
+
+ if (!(b = BIO_new_file(filename, "rb")))
+ lose_openssl("Couldn't open input file", filename);
+
+ if (is_crl
+ ? !(crl = d2i_X509_CRL_bio(b, NULL))
+ : !(cer = d2i_X509_bio(b, NULL)))
+ lose_openssl("Couldn't read DER object", filename);
+
+ BIO_free(b);
+ b = NULL;
+
+ if (is_crl) {
+ hash = X509_NAME_hash(X509_CRL_get_issuer(crl));
+ fmt = "%s/%08lx.r%d";
+ } else {
+ hash = X509_subject_name_hash(cer);
+ fmt = "%s/%08lx.%d";
+ }
+
+ for (i = 0; i < INT_MAX; i++)
+ if (snprintf(path, sizeof(path), fmt, targetdir, hash, i) == sizeof(path))
+ lose("Path too long", filename);
+ else if (access(path, F_OK))
+ break;
+ if (i == INT_MAX)
+ lose("No pathname available", filename);
+
+ if (verbose)
+ printf("Writing %s\n", path);
+
+ if (!(b = BIO_new_file(path, "w")))
+ lose_openssl("Couldn't open output file", path);
+
+ if (is_crl
+ ? !PEM_write_bio_X509_CRL(b, crl)
+ : !PEM_write_bio_X509(b, cer))
+ lose_openssl("Couldn't write PEM object", path);
+
+ done:
+ X509_free(cer);
+ X509_CRL_free(crl);
+ BIO_free(b);
+}
+
+/*
+ * Walk a directory tree
+ */
+static int handle_directory(const char *name, const char *targetdir)
+{
+ char path[FILENAME_MAX];
+ struct dirent *d;
+ size_t len;
+ DIR *dir;
+ int ret = 0, need_slash;
+
+ assert(name);
+ len = strlen(name);
+ assert(len > 0 && len < sizeof(path));
+ need_slash = name[len - 1] != '/';
+
+ if ((dir = opendir(name)) == NULL)
+ lose_errno("Couldn't open directory", name);
+
+ while ((d = readdir(dir)) != NULL) {
+ if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, ".."))
+ continue;
+ if (len + strlen(d->d_name) + need_slash >= sizeof(path))
+ lose("Constructed path name too long", d->d_name);
+ strcpy(path, name);
+ if (need_slash)
+ strcat(path, "/");
+ strcat(path, d->d_name);
+ switch (d->d_type) {
+ case DT_DIR:
+ if (!handle_directory(path, targetdir))
+ lose("Directory walk failed", path);
+ continue;
+ default:
+ file_handler(path, targetdir);
+ continue;
+ }
+ }
+
+ ret = 1;
+
+ done:
+ if (dir)
+ closedir(dir);
+ return ret;
+}
+
+static void usage (const char *jane, const int code)
+{
+ fprintf(code ? stderr : stdout, "usage: %s input-directory output-directory\n", jane);
+ exit(code);
+}
+
+int main(int argc, char *argv[])
+{
+ if (argc == 2 && (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help")))
+ usage(argv[0], 0);
+
+ if (argc != 3)
+ usage(argv[0], 1);
+
+ return !handle_directory(argv[1], argv[2]);
+}
diff --git a/rp/utils/hashdir/Makefile.in b/rp/utils/hashdir/Makefile.in
new file mode 100644
index 00000000..c0cf448a
--- /dev/null
+++ b/rp/utils/hashdir/Makefile.in
@@ -0,0 +1,55 @@
+# $Id$
+
+NAME = hashdir
+
+BIN = ${NAME}
+SRC = ${NAME}.c
+OBJ = ${NAME}.o
+
+CFLAGS = @CFLAGS@
+LDFLAGS = @LDFLAGS@
+LIBS = @LIBS@
+
+INSTALL = @INSTALL@ -m 555
+
+prefix = @prefix@
+exec_prefix = @exec_prefix@
+datarootdir = @datarootdir@
+datadir = @datadir@
+localstatedir = @localstatedir@
+sharedstatedir = @sharedstatedir@
+sysconfdir = @sysconfdir@
+bindir = @bindir@
+sbindir = @sbindir@
+libexecdir = @libexecdir@
+libdir = @libdir@
+
+abs_top_srcdir = @abs_top_srcdir@
+abs_top_builddir = @abs_top_builddir@
+
+all: ${BIN}
+
+clean::
+ rm -rf ${BIN} ${OBJ} ${BIN}.dSYM
+
+${BIN}: ${SRC}
+ ${CC} ${CFLAGS} -o $@ ${SRC} ${LDFLAGS} ${LIBS}
+
+INPUT = ${abs_top_builddir}/rcynic/rcynic-data/authenticated
+OUTPUT = hashed-pem-dir
+
+test: ${BIN}
+ if test -d ${INPUT}; then rm -rf ${OUTPUT} && mkdir ${OUTPUT} && ./hashdir ${INPUT} ${OUTPUT}; else :; fi
+
+clean::
+ rm -rf ${OUTPUT}
+
+install: all
+ if test -d ${DESTDIR}${bindir} ; then :; else ${INSTALL} -d ${DESTDIR}${bindir}; fi
+ ${INSTALL} ${BIN} ${DESTDIR}${bindir}
+
+deinstall uninstall:
+ rm -f ${DESTDIR}${bindir}/${BIN}
+
+distclean: clean
+ rm -f Makefile
diff --git a/rp/utils/print_roa.c b/rp/utils/print_roa.c
new file mode 100644
index 00000000..c88fc092
--- /dev/null
+++ b/rp/utils/print_roa.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright (C) 2014 Dragon Research Labs ("DRL")
+ * Portions copyright (C) 2008 American Registry for Internet Numbers ("ARIN")
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notices and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ARIN DISCLAIM ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
+ * ARIN BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA
+ * OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* $Id$ */
+
+/*
+ * Decoder test for ROAs.
+ *
+ * NB: This does -not- check the CMS signatures, just the encoding.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <string.h>
+#include <getopt.h>
+
+#include <openssl/bio.h>
+#include <openssl/bn.h>
+#include <openssl/pem.h>
+#include <openssl/err.h>
+#include <openssl/x509.h>
+#include <openssl/x509v3.h>
+#include <openssl/safestack.h>
+#include <openssl/conf.h>
+#include <openssl/rand.h>
+#include <openssl/asn1.h>
+#include <openssl/asn1t.h>
+#include <openssl/cms.h>
+
+#include <rpki/roa.h>
+
+/*
+ * How much buffer space do we need for a raw address?
+ */
+#define ADDR_RAW_BUF_LEN 16
+
+
+
+/*
+ * Extract signing time from CMS message.
+ */
+
+static char *
+extract_signingTime(CMS_ContentInfo *cms, char *buffer, size_t buflen)
+{
+ STACK_OF(CMS_SignerInfo) *sis = NULL;
+ CMS_SignerInfo *si = NULL;
+ X509_ATTRIBUTE *xa = NULL;
+ ASN1_TYPE *so = NULL;
+ int i = -1;
+
+ if (cms == NULL ||
+ buffer == NULL ||
+ buflen < sizeof("20010401123456Z") ||
+ (sis = CMS_get0_SignerInfos(cms)) == NULL ||
+ sk_CMS_SignerInfo_num(sis) != 1 ||
+ (si = sk_CMS_SignerInfo_value(sis, 0)) < 0 ||
+ (i = CMS_signed_get_attr_by_NID(si, NID_pkcs9_signingTime, -1)) < 0 ||
+ (xa = CMS_signed_get_attr(si, i)) == NULL ||
+ xa->single ||
+ sk_ASN1_TYPE_num(xa->value.set) != 1 ||
+ (so = sk_ASN1_TYPE_value(xa->value.set, 0)) == NULL)
+ return NULL;
+
+ assert(buflen > 2);
+ buffer[buflen - 1] = '\0';
+
+ switch (so->type) {
+ case V_ASN1_UTCTIME:
+ strcpy(buffer, (so->value.utctime->data[0] >= '5') ? "19" : "20");
+ return strncpy(buffer + 2, (const char *) so->value.utctime->data, buflen - 3);
+ case V_ASN1_GENERALIZEDTIME:
+ return strncpy(buffer, (const char *) so->value.generalizedtime->data, buflen - 1);
+ default:
+ return NULL;
+ }
+}
+
+
+
+/*
+ * Expand the bitstring form of an address into a raw byte array.
+ * At the moment this is coded for simplicity, not speed.
+ */
+static void addr_expand(unsigned char *addr,
+ const ASN1_BIT_STRING *bs,
+ const int length)
+{
+ assert(bs->length >= 0 && bs->length <= length);
+ if (bs->length > 0) {
+ memcpy(addr, bs->data, bs->length);
+ if ((bs->flags & 7) != 0) {
+ unsigned char mask = 0xFF >> (8 - (bs->flags & 7));
+ addr[bs->length - 1] &= ~mask;
+ }
+ }
+ memset(addr + bs->length, 0, length - bs->length);
+}
+
+/*
+ * Extract the prefix length from a bitstring.
+ */
+#define addr_prefixlen(bs) ((int) ((bs)->length * 8 - ((bs)->flags & 7)))
+
+/*
+ * Read ROA (CMS object) in DER format.
+ *
+ * NB: When invoked this way, CMS_verify() does -not- verify, it just decodes the ASN.1.
+ *
+ * Well, OK, this function has evolved to doing a lot more than just
+ * reading the object. Refactor or at least rename, someday.
+ */
+static ROA *read_roa(const char *filename,
+ const int print_cms,
+ const int print_roa,
+ const int print_signerinfo,
+ const int print_brief,
+ const int print_signingtime)
+{
+ unsigned char addr[ADDR_RAW_BUF_LEN];
+ CMS_ContentInfo *cms = NULL;
+ const ASN1_OBJECT *oid = NULL;
+ char *asID = NULL;
+ BIGNUM *bn = NULL;
+ ROA *r = NULL;
+ char buf[512];
+ BIO *b = NULL;
+ int i, j, k, n;
+
+ if ((b = BIO_new_file(filename, "r")) == NULL ||
+ (cms = d2i_CMS_bio(b, NULL)) == NULL)
+ goto done;
+ BIO_free(b);
+ b = NULL;
+
+ if (print_signerinfo) {
+ STACK_OF(CMS_SignerInfo) *signerInfos = CMS_get0_SignerInfos(cms);
+ STACK_OF(X509) *certs = CMS_get1_certs(cms);
+ STACK_OF(X509_CRL) *crls = CMS_get1_crls(cms);
+ printf("Certificates: %d\n", certs ? sk_X509_num(certs) : 0);
+ printf("CRLs: %d\n", crls ? sk_X509_CRL_num(crls) : 0);
+ for (i = 0; i < sk_CMS_SignerInfo_num(signerInfos); i++) {
+ CMS_SignerInfo *si = sk_CMS_SignerInfo_value(signerInfos, i);
+ ASN1_OCTET_STRING *hash = NULL;
+ printf("SignerId[%d]: ", i);
+ if (CMS_SignerInfo_get0_signer_id(si, &hash, NULL, NULL) && hash != NULL)
+ for (j = 0; j < hash->length; j++)
+ printf("%02x%s", hash->data[j], j == hash->length - 1 ? "" : ":");
+ else
+ printf("[Could not read SID]");
+ if (certs)
+ for (j = 0; j < sk_X509_num(certs); j++)
+ if (!CMS_SignerInfo_cert_cmp(si, sk_X509_value(certs, j)))
+ printf(" [Matches certificate %d]", j);
+ if ((j = CMS_signed_get_attr_by_NID(si, NID_pkcs9_signingTime, -1)) >= 0) {
+ X509_ATTRIBUTE *xa = CMS_signed_get_attr(si, j);
+ if (xa && !xa->single && sk_ASN1_TYPE_num(xa->value.set) == 1) {
+ ASN1_TYPE *so = sk_ASN1_TYPE_value(xa->value.set, 0);
+ switch (so->type) {
+ case V_ASN1_UTCTIME:
+ printf(" [signingTime(U) %s%s]",
+ so->value.utctime->data[0] < '5' ? "20" : "19",
+ so->value.utctime->data);
+ break;
+ case V_ASN1_GENERALIZEDTIME:
+ printf(" [signingTime(G) %s]",
+ so->value.generalizedtime->data);
+ break;
+ }
+ }
+ }
+ printf("\n");
+ }
+ sk_X509_pop_free(certs, X509_free);
+ sk_X509_CRL_pop_free(crls, X509_CRL_free);
+ }
+
+ if ((b = BIO_new(BIO_s_mem())) == NULL ||
+ CMS_verify(cms, NULL, NULL, NULL, b, CMS_NOCRL | CMS_NO_SIGNER_CERT_VERIFY | CMS_NO_ATTR_VERIFY | CMS_NO_CONTENT_VERIFY) <= 0 ||
+ (r = ASN1_item_d2i_bio(ASN1_ITEM_rptr(ROA), b, NULL)) == NULL)
+ goto done;
+ BIO_free(b);
+ b = NULL;
+
+ if (print_roa) {
+
+ bn = ASN1_INTEGER_to_BN(r->asID, NULL);
+ asID = BN_bn2dec(bn);
+
+ if (print_brief) {
+
+ if (print_signingtime) {
+ char buffer[sizeof("20010401123456Z")], *b;
+ if (!extract_signingTime(cms, buffer, sizeof(buffer)))
+ goto done;
+ printf("%s ", buffer);
+ }
+
+ fputs(asID, stdout);
+
+ } else {
+
+ if ((oid = CMS_get0_eContentType(cms)) == NULL)
+ goto done;
+ OBJ_obj2txt(buf, sizeof(buf), oid, 0);
+ printf("eContentType: %s\n", buf);
+
+ if (r->version)
+ printf("version: %ld\n", ASN1_INTEGER_get(r->version));
+ else
+ printf("version: 0 [Defaulted]\n");
+ printf("asID: %s\n", asID);
+ }
+
+ for (i = 0; i < sk_ROAIPAddressFamily_num(r->ipAddrBlocks); i++) {
+
+ ROAIPAddressFamily *f = sk_ROAIPAddressFamily_value(r->ipAddrBlocks, i);
+
+ unsigned afi = (f->addressFamily->data[0] << 8) | (f->addressFamily->data[1]);
+
+ if (!print_brief) {
+ printf(" addressFamily: %x", afi);
+ if (f->addressFamily->length == 3)
+ printf("[%x]", f->addressFamily->data[2]);
+ printf("\n");
+ }
+
+ for (j = 0; j < sk_ROAIPAddress_num(f->addresses); j++) {
+ ROAIPAddress *a = sk_ROAIPAddress_value(f->addresses, j);
+
+ if (print_brief)
+ printf(" ");
+ else
+ printf(" IPaddress: ");
+
+ switch (afi) {
+
+ case IANA_AFI_IPV4:
+ addr_expand(addr, a->IPAddress, 4);
+ printf("%d.%d.%d.%d", addr[0], addr[1], addr[2], addr[3]);
+ break;
+
+ case IANA_AFI_IPV6:
+ addr_expand(addr, a->IPAddress, 16);
+ for (n = 16; n > 1 && addr[n-1] == 0x00 && addr[n-2] == 0x00; n -= 2)
+ ;
+ for (k = 0; k < n; k += 2)
+ printf("%x%s", (addr[k] << 8) | addr[k+1], (k < 14 ? ":" : ""));
+ if (k < 16)
+ printf(":");
+ if (k == 0)
+ printf(":");
+ break;
+
+ default:
+ if (!print_brief) {
+ for (k = 0; k < a->IPAddress->length; k++)
+ printf("%s%02x", (k > 0 ? ":" : ""), a->IPAddress->data[k]);
+ printf("[%d]", (int) (a->IPAddress->flags & 7));
+ }
+ break;
+
+ }
+
+ printf("/%u", addr_prefixlen(a->IPAddress));
+
+ if (a->maxLength)
+ printf("-%ld", ASN1_INTEGER_get(a->maxLength));
+
+ if (!print_brief)
+ printf("\n");
+ }
+ }
+ if (print_brief)
+ printf("\n");
+ }
+
+ if (print_cms) {
+ if (print_roa)
+ printf("\n");
+ fflush(stdout);
+ if ((b = BIO_new(BIO_s_fd())) == NULL)
+ goto done;
+ BIO_set_fd(b, 1, BIO_NOCLOSE);
+ CMS_ContentInfo_print_ctx(b, cms, 0, NULL);
+ BIO_free(b);
+ b = NULL;
+ }
+
+ done:
+ if (ERR_peek_error())
+ ERR_print_errors_fp(stderr);
+ BIO_free(b);
+ BN_free(bn);
+ if (asID)
+ OPENSSL_free(asID);
+ CMS_ContentInfo_free(cms);
+ return r;
+}
+
+
+
+const static struct option longopts[] = {
+ { "brief", no_argument, NULL, 'b' },
+ { "print-cms", no_argument, NULL, 'c' },
+ { "help", no_argument, NULL, 'h' },
+ { "signingtime", no_argument, NULL, 's' },
+ { NULL }
+};
+
+static int usage (const char *jane, const int code)
+{
+ FILE *out = code ? stderr : stdout;
+ int i;
+
+ fprintf(out, "usage: %s [options] ROA [ROA...]\n", jane);
+ fprintf(out, "options:\n");
+ for (i = 0; longopts[i].name != NULL; i++)
+ fprintf(out, " -%c --%s\n", longopts[i].val, longopts[i].name);
+
+ return code;
+}
+
+/*
+ * Main program.
+ */
+int main (int argc, char *argv[])
+{
+ int result = 0, print_brief = 0, print_signingtime = 0, print_cms = 0, c;
+ const char *jane = argv[0];
+ ROA *r;
+
+ OpenSSL_add_all_algorithms();
+ ERR_load_crypto_strings();
+
+ while ((c = getopt_long(argc, argv, "bchs", longopts, NULL)) != -1) {
+ switch (c) {
+ case 'b':
+ print_brief = 1;
+ break;
+ case 'c':
+ print_cms = 1;
+ break;
+ case 's':
+ print_signingtime = 1;
+ break;
+ case 'h':
+ return usage(jane, 0);
+ default:
+ return usage(jane, 1);
+ }
+ }
+
+ argc -= optind;
+ argv += optind;
+
+ if (argc == 0)
+ return usage(jane, 1);
+
+ while (argc-- > 0) {
+ r = read_roa(*argv++, print_cms, 1, !print_brief, print_brief, print_signingtime);
+ result |= r == NULL;
+ ROA_free(r);
+ }
+ return result;
+}
diff --git a/rp/utils/print_roa/Makefile.in b/rp/utils/print_roa/Makefile.in
new file mode 100644
index 00000000..5999b351
--- /dev/null
+++ b/rp/utils/print_roa/Makefile.in
@@ -0,0 +1,52 @@
+# $Id$
+
+NAME = print_roa
+
+BIN = ${NAME}
+SRC = ${NAME}.c
+OBJ = ${NAME}.o
+
+CFLAGS = @CFLAGS@
+LDFLAGS = @LDFLAGS@
+LIBS = @LIBS@
+
+INSTALL = @INSTALL@ -m 555
+
+prefix = @prefix@
+exec_prefix = @exec_prefix@
+datarootdir = @datarootdir@
+datadir = @datadir@
+localstatedir = @localstatedir@
+sharedstatedir = @sharedstatedir@
+sysconfdir = @sysconfdir@
+bindir = @bindir@
+sbindir = @sbindir@
+libexecdir = @libexecdir@
+libdir = @libdir@
+
+abs_top_srcdir = @abs_top_srcdir@
+abs_top_builddir = @abs_top_builddir@
+
+all: ${BIN}
+
+clean:
+ rm -rf ${BIN} ${OBJ} ${BIN}.dSYM
+
+${BIN}: ${SRC}
+ ${CC} ${CFLAGS} -o $@ ${SRC} ${LDFLAGS} ${LIBS}
+
+ROA_DIR = ${abs_top_builddir}/rpkid/tests/smoketest.dir/publication
+
+test: all
+ -date -u +'now: %Y%m%d%H%M%SZ'
+ if test -d ${ROA_DIR}; then find ${ROA_DIR} -type f -name '*.roa' -print -exec ./${BIN} {} \; ; else :; fi
+
+install: all
+ if test -d ${DESTDIR}${bindir} ; then :; else ${INSTALL} -d ${DESTDIR}${bindir}; fi
+ ${INSTALL} ${BIN} ${DESTDIR}${bindir}
+
+deinstall uninstall:
+ rm -f ${DESTDIR}${bindir}/${BIN}
+
+distclean: clean
+ rm -f Makefile
diff --git a/rp/utils/print_rpki_manifest.c b/rp/utils/print_rpki_manifest.c
new file mode 100644
index 00000000..f55f9916
--- /dev/null
+++ b/rp/utils/print_rpki_manifest.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2014 Dragon Research Labs ("DRL")
+ * Portions copyright (C) 2008 American Registry for Internet Numbers ("ARIN")
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notices and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ARIN DISCLAIM ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
+ * ARIN BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA
+ * OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* $Id$ */
+
+/*
+ * Decoder test for RPKI manifests.
+ *
+ * NB: This does -not- check the CMS signatures, just the encoding.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <getopt.h>
+
+#include <openssl/bn.h>
+#include <openssl/bio.h>
+#include <openssl/pem.h>
+#include <openssl/err.h>
+#include <openssl/x509.h>
+#include <openssl/x509v3.h>
+#include <openssl/safestack.h>
+#include <openssl/conf.h>
+#include <openssl/rand.h>
+#include <openssl/asn1.h>
+#include <openssl/asn1t.h>
+#include <openssl/cms.h>
+
+#include <rpki/manifest.h>
+
+/*
+ * Read manifest (CMS object) in DER format.
+ *
+ * NB: When invoked this way, CMS_verify() does -not- verify, it just decodes the ASN.1.
+ *
+ * OK, this does more than just reading the CMS. Refactor or rename, someday.
+ */
+
+static const Manifest *read_manifest(const char *filename,
+ const int print_cms,
+ const int print_manifest,
+ const int print_signerinfo)
+{
+ CMS_ContentInfo *cms = NULL;
+ const ASN1_OBJECT *oid = NULL;
+ const Manifest *m = NULL;
+ char *mftnum = NULL;
+ BIGNUM *bn = NULL;
+ char buf[512];
+ BIO *b = NULL;
+ int i, j;
+
+ if ((b = BIO_new_file(filename, "r")) == NULL ||
+ (cms = d2i_CMS_bio(b, NULL)) == NULL)
+ goto done;
+ BIO_free(b);
+ b = NULL;
+
+ if (print_signerinfo) {
+ STACK_OF(CMS_SignerInfo) *signerInfos = CMS_get0_SignerInfos(cms);
+ STACK_OF(X509) *certs = CMS_get1_certs(cms);
+ STACK_OF(X509_CRL) *crls = CMS_get1_crls(cms);
+ printf("Certificates: %d\n", certs ? sk_X509_num(certs) : 0);
+ printf("CRLs: %d\n", crls ? sk_X509_CRL_num(crls) : 0);
+ for (i = 0; i < sk_CMS_SignerInfo_num(signerInfos); i++) {
+ CMS_SignerInfo *si = sk_CMS_SignerInfo_value(signerInfos, i);
+ ASN1_OCTET_STRING *hash = NULL;
+ printf("SignerId[%d]: ", i);
+ if (CMS_SignerInfo_get0_signer_id(si, &hash, NULL, NULL) && hash != NULL)
+ for (j = 0; j < hash->length; j++)
+ printf("%02x%s", hash->data[j], j == hash->length - 1 ? "" : ":");
+ else
+ printf("[Could not read SID]");
+ if (certs)
+ for (j = 0; j < sk_X509_num(certs); j++)
+ if (!CMS_SignerInfo_cert_cmp(si, sk_X509_value(certs, j)))
+ printf(" [Matches certificate %d]", j);
+ if ((j = CMS_signed_get_attr_by_NID(si, NID_pkcs9_signingTime, -1)) >= 0) {
+ X509_ATTRIBUTE *xa = CMS_signed_get_attr(si, j);
+ if (xa && !xa->single && sk_ASN1_TYPE_num(xa->value.set) == 1) {
+ ASN1_TYPE *so = sk_ASN1_TYPE_value(xa->value.set, 0);
+ switch (so->type) {
+ case V_ASN1_UTCTIME:
+ printf(" [signingTime(U) %s%s]",
+ so->value.utctime->data[0] < '5' ? "20" : "19",
+ so->value.utctime->data);
+ break;
+ case V_ASN1_GENERALIZEDTIME:
+ printf(" [signingTime(G) %s]",
+ so->value.generalizedtime->data);
+ break;
+ }
+ }
+ }
+ printf("\n");
+ }
+ sk_X509_pop_free(certs, X509_free);
+ sk_X509_CRL_pop_free(crls, X509_CRL_free);
+ }
+
+ if ((b = BIO_new(BIO_s_mem())) == NULL ||
+ CMS_verify(cms, NULL, NULL, NULL, b, CMS_NOCRL | CMS_NO_SIGNER_CERT_VERIFY | CMS_NO_ATTR_VERIFY | CMS_NO_CONTENT_VERIFY) <= 0 ||
+ (m = ASN1_item_d2i_bio(ASN1_ITEM_rptr(Manifest), b, NULL)) == NULL)
+ goto done;
+ BIO_free(b);
+ b = NULL;
+
+ if (print_manifest) {
+
+ if ((oid = CMS_get0_eContentType(cms)) == NULL)
+ goto done;
+ OBJ_obj2txt(buf, sizeof(buf), oid, 0);
+ printf("eContentType: %s\n", buf);
+
+ if (m->version)
+ printf("version: %ld\n", ASN1_INTEGER_get(m->version));
+ else
+ printf("version: 0 [Defaulted]\n");
+
+ bn = ASN1_INTEGER_to_BN(m->manifestNumber, NULL);
+ mftnum = BN_bn2dec(bn);
+ printf("manifestNumber: %s\n", mftnum);
+
+ printf("thisUpdate: %s\n", m->thisUpdate->data);
+ printf("nextUpdate: %s\n", m->nextUpdate->data);
+ OBJ_obj2txt(buf, sizeof(buf), m->fileHashAlg, 0);
+ printf("fileHashAlg: %s\n", buf);
+
+ for (i = 0; i < sk_FileAndHash_num(m->fileList); i++) {
+ FileAndHash *fah = sk_FileAndHash_value(m->fileList, i);
+ printf("fileList[%3d]: ", i);
+ for (j = 0; j < fah->hash->length; j++)
+ printf("%02x%s", fah->hash->data[j], j == fah->hash->length - 1 ? " " : ":");
+ printf(" %s\n", fah->file->data);
+ }
+
+ if (X509_cmp_current_time(m->nextUpdate) < 0)
+ printf("MANIFEST IS STALE\n");
+ }
+
+ if (print_cms) {
+ if (print_manifest)
+ printf("\n");
+ fflush(stdout);
+ if ((b = BIO_new(BIO_s_fd())) == NULL)
+ goto done;
+ BIO_set_fd(b, 1, BIO_NOCLOSE);
+ CMS_ContentInfo_print_ctx(b, cms, 0, NULL);
+ BIO_free(b);
+ b = NULL;
+ }
+
+ done:
+ if (ERR_peek_error())
+ ERR_print_errors_fp(stderr);
+ BIO_free(b);
+ BN_free(bn);
+ if (mftnum)
+ OPENSSL_free(mftnum);
+ CMS_ContentInfo_free(cms);
+ return m;
+}
+
+
+
+const static struct option longopts[] = {
+ { "print-cms", no_argument, NULL, 'c' },
+ { "help", no_argument, NULL, 'h' },
+ { NULL }
+};
+
+static int usage (const char *jane, const int code)
+{
+ FILE *out = code ? stderr : stdout;
+ int i;
+
+ fprintf(out, "usage: %s [options] manifest [manifest...]\n", jane);
+ fprintf(out, "options:\n");
+ for (i = 0; longopts[i].name != NULL; i++)
+ fprintf(out, " -%c --%s\n", longopts[i].val, longopts[i].name);
+
+ return code;
+}
+
+/*
+ * Main program.
+ */
+int main (int argc, char *argv[])
+{
+ int result = 0, print_cms = 0, c;
+ const char *jane = argv[0];
+
+ OpenSSL_add_all_algorithms();
+ ERR_load_crypto_strings();
+
+ while ((c = getopt_long(argc, argv, "ch", longopts, NULL)) != -1) {
+ switch (c) {
+ case 'c':
+ print_cms = 1;
+ break;
+ case 'h':
+ return usage(jane, 0);
+ default:
+ return usage(jane, 1);
+ }
+ }
+
+ argc -= optind;
+ argv += optind;
+
+ if (argc == 0)
+ return usage(jane, 1);
+
+ while (argc-- > 0)
+ result |= read_manifest(*argv++, print_cms, 1, 1) == NULL;
+ return result;
+}
diff --git a/rp/utils/print_rpki_manifest/Makefile.in b/rp/utils/print_rpki_manifest/Makefile.in
new file mode 100644
index 00000000..22f1b16b
--- /dev/null
+++ b/rp/utils/print_rpki_manifest/Makefile.in
@@ -0,0 +1,52 @@
+# $Id$
+
+NAME = print_rpki_manifest
+
+BIN = ${NAME}
+SRC = ${NAME}.c
+OBJ = ${NAME}.o
+
+CFLAGS = @CFLAGS@
+LDFLAGS = @LDFLAGS@
+LIBS = @LIBS@
+
+INSTALL = @INSTALL@ -m 555
+
+prefix = @prefix@
+exec_prefix = @exec_prefix@
+datarootdir = @datarootdir@
+datadir = @datadir@
+localstatedir = @localstatedir@
+sharedstatedir = @sharedstatedir@
+sysconfdir = @sysconfdir@
+bindir = @bindir@
+sbindir = @sbindir@
+libexecdir = @libexecdir@
+libdir = @libdir@
+
+abs_top_srcdir = @abs_top_srcdir@
+abs_top_builddir = @abs_top_builddir@
+
+all: ${BIN}
+
+clean:
+ rm -rf ${BIN} ${OBJ} ${BIN}.dSYM
+
+${BIN}: ${SRC}
+ ${CC} ${CFLAGS} -o $@ ${SRC} ${LDFLAGS} ${LIBS}
+
+MANIFEST_DIR = ${abs_top_builddir}/rpkid/tests/smoketest.dir/publication
+
+test: all
+ -date -u +'now: %Y%m%d%H%M%SZ'
+ if test -d ${MANIFEST_DIR}; then find ${MANIFEST_DIR} -type f -name '*.mnf' -print -exec ./${BIN} {} \; ; else :; fi
+
+install: all
+ if test -d ${DESTDIR}${bindir} ; then :; else ${INSTALL} -d ${DESTDIR}${bindir}; fi
+ ${INSTALL} ${BIN} ${DESTDIR}${bindir}
+
+deinstall uninstall:
+ rm -f ${DESTDIR}${bindir}/${BIN}
+
+distclean: clean
+ rm -f Makefile
diff --git a/rp/utils/scan_roas.c b/rp/utils/scan_roas.c
new file mode 100644
index 00000000..f32e3827
--- /dev/null
+++ b/rp/utils/scan_roas.c
@@ -0,0 +1,305 @@
+/*
+ * Copyright (C) 2014 Dragon Research Labs ("DRL")
+ * Portions copyright (C) 2011 Internet Systems Consortium ("ISC")
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notices and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ISC DISCLAIM ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
+ * ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA
+ * OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* $Id$ */
+
+/*
+ * Decoder test for ROAs.
+ *
+ * NB: This does -not- check the CMS signatures, just the encoding.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <string.h>
+#include <dirent.h>
+
+#include <openssl/bio.h>
+#include <openssl/pem.h>
+#include <openssl/err.h>
+#include <openssl/x509.h>
+#include <openssl/x509v3.h>
+#include <openssl/safestack.h>
+#include <openssl/conf.h>
+#include <openssl/rand.h>
+#include <openssl/asn1.h>
+#include <openssl/asn1t.h>
+#include <openssl/cms.h>
+
+#include <rpki/roa.h>
+
+/*
+ * How much buffer space do we need for a raw address?
+ */
+#define ADDR_RAW_BUF_LEN 16
+
+/*
+ * How long can a filesystem path be?
+ */
+#define PATH_MAX 2048
+
+
+
+/*
+ * Extract signing time from CMS message.
+ */
+
+static char *
+extract_signingTime(CMS_ContentInfo *cms, char *buffer, size_t buflen)
+{
+ STACK_OF(CMS_SignerInfo) *sis = NULL;
+ CMS_SignerInfo *si = NULL;
+ X509_ATTRIBUTE *xa = NULL;
+ ASN1_TYPE *so = NULL;
+ int i = -1;
+
+ if (cms == NULL ||
+ buffer == NULL ||
+ buflen < sizeof("20010401123456Z") ||
+ (sis = CMS_get0_SignerInfos(cms)) == NULL ||
+ sk_CMS_SignerInfo_num(sis) != 1 ||
+ (si = sk_CMS_SignerInfo_value(sis, 0)) < 0 ||
+ (i = CMS_signed_get_attr_by_NID(si, NID_pkcs9_signingTime, -1)) < 0 ||
+ (xa = CMS_signed_get_attr(si, i)) == NULL ||
+ xa->single ||
+ sk_ASN1_TYPE_num(xa->value.set) != 1 ||
+ (so = sk_ASN1_TYPE_value(xa->value.set, 0)) == NULL)
+ return NULL;
+
+ assert(buflen > 2);
+ buffer[buflen - 1] = '\0';
+
+ switch (so->type) {
+ case V_ASN1_UTCTIME:
+ strcpy(buffer, (so->value.utctime->data[0] >= '5') ? "19" : "20");
+ return strncpy(buffer + 2, (const char *) so->value.utctime->data, buflen - 3);
+ case V_ASN1_GENERALIZEDTIME:
+ return strncpy(buffer, (const char *) so->value.generalizedtime->data, buflen - 1);
+ default:
+ return NULL;
+ }
+}
+
+
+
+/*
+ * Expand the bitstring form of an address into a raw byte array.
+ * At the moment this is coded for simplicity, not speed.
+ */
+static void addr_expand(unsigned char *addr,
+ const ASN1_BIT_STRING *bs,
+ const int length)
+{
+ assert(bs->length >= 0 && bs->length <= length);
+ if (bs->length > 0) {
+ memcpy(addr, bs->data, bs->length);
+ if ((bs->flags & 7) != 0) {
+ unsigned char mask = 0xFF >> (8 - (bs->flags & 7));
+ addr[bs->length - 1] &= ~mask;
+ }
+ }
+ memset(addr + bs->length, 0, length - bs->length);
+}
+
+/*
+ * Extract the prefix length from a bitstring.
+ */
+#define addr_prefixlen(bs) ((int) ((bs)->length * 8 - ((bs)->flags & 7)))
+
+/*
+ * Read ROA (CMS object) in DER format.
+ *
+ * NB: When invoked this way, CMS_verify() does -not- verify, it just decodes the ASN.1.
+ */
+static int read_roa(const char *filename)
+{
+ char buffer[sizeof("20010401123456Z")], *b;
+ unsigned char addr[ADDR_RAW_BUF_LEN];
+ CMS_ContentInfo *cms = NULL;
+ const ASN1_OBJECT *oid = NULL;
+ ROA *r = NULL;
+ char buf[512];
+ BIO *bio;
+ int i, j, k, n, ok;
+
+ if ((bio = BIO_new_file(filename, "r")) == NULL ||
+ (cms = d2i_CMS_bio(bio, NULL)) == NULL)
+ goto done;
+ BIO_free(bio);
+
+ if ((bio = BIO_new(BIO_s_mem())) == NULL ||
+ CMS_verify(cms, NULL, NULL, NULL, bio, CMS_NOCRL | CMS_NO_SIGNER_CERT_VERIFY | CMS_NO_ATTR_VERIFY | CMS_NO_CONTENT_VERIFY) <= 0 ||
+ (r = ASN1_item_d2i_bio(ASN1_ITEM_rptr(ROA), bio, NULL)) == NULL)
+ goto done;
+
+ if (!extract_signingTime(cms, buffer, sizeof(buffer)))
+ goto done;
+ printf("%s ", buffer);
+
+ printf("%ld", ASN1_INTEGER_get(r->asID));
+
+ for (i = 0; i < sk_ROAIPAddressFamily_num(r->ipAddrBlocks); i++) {
+
+ ROAIPAddressFamily *f = sk_ROAIPAddressFamily_value(r->ipAddrBlocks, i);
+
+ unsigned afi = (f->addressFamily->data[0] << 8) | (f->addressFamily->data[1]);
+
+ for (j = 0; j < sk_ROAIPAddress_num(f->addresses); j++) {
+ ROAIPAddress *a = sk_ROAIPAddress_value(f->addresses, j);
+
+ printf(" ");
+
+ switch (afi) {
+
+ case IANA_AFI_IPV4:
+ addr_expand(addr, a->IPAddress, 4);
+ printf("%d.%d.%d.%d", addr[0], addr[1], addr[2], addr[3]);
+ break;
+
+ case IANA_AFI_IPV6:
+ addr_expand(addr, a->IPAddress, 16);
+ for (n = 16; n > 1 && addr[n-1] == 0x00 && addr[n-2] == 0x00; n -= 2)
+ ;
+ for (k = 0; k < n; k += 2)
+ printf("%x%s", (addr[k] << 8) | addr[k+1], (k < 14 ? ":" : ""));
+ if (k < 16)
+ printf(":");
+ if (k == 0)
+ printf(":");
+ break;
+
+ default:
+ break;
+ }
+
+ printf("/%u", addr_prefixlen(a->IPAddress));
+
+ if (a->maxLength)
+ printf("-%ld", ASN1_INTEGER_get(a->maxLength));
+ }
+ }
+ printf("\n");
+
+ done:
+ ok = r != NULL;
+
+ if (ERR_peek_error())
+ ERR_print_errors_fp(stderr);
+ BIO_free(bio);
+ CMS_ContentInfo_free(cms);
+ ROA_free(r);
+
+ return ok;
+}
+
+
+
+/**
+ * Check str for a trailing suffix.
+ */
+static int endswith(const char *str, const char *suffix)
+{
+ size_t len_str, len_suffix;
+ assert(str != NULL && suffix != NULL);
+ len_str = strlen(str);
+ len_suffix = strlen(suffix);
+ return len_str >= len_suffix && !strcmp(str + len_str - len_suffix, suffix);
+}
+
+
+
+/**
+ * Walk directory tree, looking for ROAs.
+ */
+static int walk(const char *name)
+{
+ int need_slash, ok = 1;
+ char path[PATH_MAX];
+ struct dirent *d;
+ size_t len;
+ DIR *dir;
+
+ assert(name);
+ len = strlen(name);
+
+ assert(len > 0 && len < sizeof(path));
+ need_slash = name[len - 1] != '/';
+
+ if ((dir = opendir(name)) == NULL)
+ return 0;
+
+ while ((d = readdir(dir)) != NULL) {
+ if (!strcmp(d->d_name, ".") ||
+ !strcmp(d->d_name, ".."))
+ continue;
+ if (len + strlen(d->d_name) + need_slash >= sizeof(path)) {
+ ok = 0;
+ goto done;
+ }
+ strcpy(path, name);
+ if (need_slash)
+ strcat(path, "/");
+ strcat(path, d->d_name);
+ switch (d->d_type) {
+ case DT_DIR:
+ ok &= walk(path);
+ continue;
+ default:
+ if (endswith(path, ".roa"))
+ ok &= read_roa(path);
+ continue;
+ }
+ }
+
+ done:
+ closedir(dir);
+ return ok;
+}
+
+
+
+static void usage (const char *jane, const int code)
+{
+ fprintf(code ? stderr : stdout, "usage: %s authtree [authtree...]\n", jane);
+ exit(code);
+}
+
+/*
+ * Main program.
+ */
+int main (int argc, char *argv[])
+{
+ int i, ok = 1;
+
+ if (argc == 2 && (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help")))
+ usage(argv[0], 0);
+
+ if (argc < 2)
+ usage(argv[0], 1);
+
+ OpenSSL_add_all_algorithms();
+ ERR_load_crypto_strings();
+
+ for (i = 1; i < argc; i++)
+ ok &= walk(argv[i]);
+
+ return !ok;
+}
diff --git a/rp/utils/scan_roas/Makefile.in b/rp/utils/scan_roas/Makefile.in
new file mode 100644
index 00000000..7707969c
--- /dev/null
+++ b/rp/utils/scan_roas/Makefile.in
@@ -0,0 +1,52 @@
+# $Id$
+
+NAME = scan_roas
+
+BIN = ${NAME}
+SRC = ${NAME}.c
+OBJ = ${NAME}.o
+
+CFLAGS = @CFLAGS@
+LDFLAGS = @LDFLAGS@
+LIBS = @LIBS@
+
+INSTALL = @INSTALL@ -m 555
+
+prefix = @prefix@
+exec_prefix = @exec_prefix@
+datarootdir = @datarootdir@
+datadir = @datadir@
+localstatedir = @localstatedir@
+sharedstatedir = @sharedstatedir@
+sysconfdir = @sysconfdir@
+bindir = @bindir@
+sbindir = @sbindir@
+libexecdir = @libexecdir@
+libdir = @libdir@
+
+abs_top_srcdir = @abs_top_srcdir@
+abs_top_builddir = @abs_top_builddir@
+
+all: ${BIN}
+
+clean:
+ rm -rf ${BIN} ${OBJ} ${BIN}.dSYM
+
+${BIN}: ${SRC}
+ ${CC} ${CFLAGS} -o $@ ${SRC} ${LDFLAGS} ${LIBS}
+
+ROA_DIR = ${abs_top_builddir}/rpkid/tests/smoketest.dir/publication
+
+test: all
+ -date -u +'now: %Y%m%d%H%M%SZ'
+ if test -d ${ROA_DIR}; then ./${BIN} ${ROA_DIR} ; else :; fi
+
+install: all
+ if test -d ${DESTDIR}${bindir} ; then :; else ${INSTALL} -d ${DESTDIR}${bindir}; fi
+ ${INSTALL} ${BIN} ${DESTDIR}${bindir}
+
+deinstall uninstall:
+ rm -f ${DESTDIR}${bindir}/${BIN}
+
+distclean: clean
+ rm -f Makefile
diff --git a/rp/utils/scan_routercerts.py b/rp/utils/scan_routercerts.py
new file mode 100755
index 00000000..342fa272
--- /dev/null
+++ b/rp/utils/scan_routercerts.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+# $Id$
+#
+# Copyright (C) 2014 Dragon Research Labs ("DRL")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND DRL DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL DRL BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Scan rcynic validated output looking for router certificates, print
+out stuff that the rpki-rtr code cares about.
+"""
+
+# This program represents a weird temporary state, mostly to avoid
+# diving into a recursive yak shaving exercise.
+#
+# Under the old scheme, anything used by the RP code should be either
+# C code or pure Python code using just the standard libraries. This
+# has gotten silly, but we haven't yet refactored the current packaged
+# builds from two packages into three (adding a -libs package).
+#
+# So, by rights, this program should be a C monstrosity written using
+# the OpenSSL C API. I started coding it that way, but it was just
+# too painful for something we're probably going to rewrite as a few
+# lines of Python once we refactor, but by the same token I didn't
+# want to delay router certificate support until the refactoring.
+#
+# So this program anticipates the new scheme of things, but makes one
+# concession to current reality: if it has a problem importing the
+# RPKI-specific libraries, it just quietly exits as if everything were
+# fine and there simply are no router certificates to report. This
+# isn't the right answer in the long run, but will suffice to avoid
+# further bald yaks.
+
+import os
+import sys
+import base64
+
+try:
+ import rpki.POW
+ import rpki.oids
+except ImportError:
+ sys.exit(0)
+
+rcynic_dir = sys.argv[1]
+
+for root, dirs, files in os.walk(rcynic_dir):
+ for fn in files:
+ if not fn.endswith(".cer"):
+ continue
+ x = rpki.POW.X509.derReadFile(os.path.join(root, fn))
+
+ if rpki.oids.id_kp_bgpsec_router not in (x.getEKU() or ()):
+ continue
+
+ sys.stdout.write(base64.urlsafe_b64encode(x.getSKI()).rstrip("="))
+ for min_asn, max_asn in x.getRFC3779()[0]:
+ for asn in xrange(min_asn, max_asn + 1):
+ sys.stdout.write(" %s" % asn)
+ sys.stdout.write(" %s\n" % base64.b64encode(x.getPublicKey().derWritePublic()))
diff --git a/rp/utils/scan_routercerts/Makefile.in b/rp/utils/scan_routercerts/Makefile.in
new file mode 100644
index 00000000..715d1325
--- /dev/null
+++ b/rp/utils/scan_routercerts/Makefile.in
@@ -0,0 +1,41 @@
+# $Id$
+
+NAME = scan_routercerts
+
+BIN = ${NAME}
+
+INSTALL = @INSTALL@ -m 555
+
+prefix = @prefix@
+exec_prefix = @exec_prefix@
+datarootdir = @datarootdir@
+datadir = @datadir@
+localstatedir = @localstatedir@
+sharedstatedir = @sharedstatedir@
+sysconfdir = @sysconfdir@
+bindir = @bindir@
+sbindir = @sbindir@
+libexecdir = @libexecdir@
+libdir = @libdir@
+
+abs_top_srcdir = @abs_top_srcdir@
+abs_top_builddir = @abs_top_builddir@
+
+all clean:
+ @true
+
+ROUTERCERT_DIR = ${abs_top_builddir}/rpkid/tests/smoketest.dir/publication
+
+test: all
+ -date -u +'now: %Y%m%d%H%M%SZ'
+ if test -d ${ROUTERCERT_DIR}; then ./${BIN} ; else :; fi
+
+install: all
+ if test -d ${DESTDIR}${bindir} ; then :; else ${INSTALL} -d ${DESTDIR}${bindir}; fi
+ ${INSTALL} ${BIN} ${DESTDIR}${bindir}
+
+deinstall uninstall:
+ rm -f ${DESTDIR}${bindir}/${BIN}
+
+distclean: clean
+ rm -f Makefile
diff --git a/rp/utils/strip_roa.sh b/rp/utils/strip_roa.sh
new file mode 100755
index 00000000..e2dacf86
--- /dev/null
+++ b/rp/utils/strip_roa.sh
@@ -0,0 +1,39 @@
+#!/bin/sh -
+# $Id$
+#
+# Copyright (C) 2010 Internet Systems Consortium ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+#
+# Strip boring parts of print_roa's output to make a (somewhat) terser
+# description, one line per ROA. This is intended for use in
+# comparing sets of ROAs using text comparision tools like "diff" or
+# "comm". One could definitely do something prettier, but this
+# suffices for basic tests.
+#
+# Use this as in a shell pipeline to postprocess print_roa's output.
+
+awk '
+ /Certificate/ {
+ roa[++n] = "";
+ }
+ /asID|addressFamily|IPaddress/ {
+ roa[n] = roa[n] " " $0;
+ }
+ END {
+ for (i in roa)
+ print roa[i];
+ }
+' |
+tr -s \\011 \\040 |
+sort -u
diff --git a/rp/utils/table.awk b/rp/utils/table.awk
new file mode 100644
index 00000000..d8627f67
--- /dev/null
+++ b/rp/utils/table.awk
@@ -0,0 +1,35 @@
+#!/usr/bin/awk -f
+# $Id$
+#
+# Reformat uri.c's output in a way that's more useful
+# for some kinds of scripting. Perhaps this functionality should be
+# part of uri.c itself, but for now this script will do.
+
+BEGIN {
+ cmd = "find /var/rcynic/data/unauthenticated -type f -name '*.cer' -print0 | xargs -0 ./uri -d";
+ while ((cmd | getline) == 1) {
+ if ($1 == "File") {
+ if (f)
+ print f, u, a, s, c;
+ a = s = c = "-";
+ f = $2;
+ sub("/var/rcynic/data/unauthenticated/","rsync://");
+ u = $2;
+ continue;
+ }
+ if ($1 == "SIA:") {
+ s = $2;
+ continue;
+ }
+ if ($1 == "AIA:") {
+ a = $2;
+ continue;
+ }
+ if ($1 == "CRL:") {
+ c = $2;
+ continue;
+ }
+ }
+ if (f != "-")
+ print f, u, a, s, c;
+}
diff --git a/rp/utils/test_roa.sh b/rp/utils/test_roa.sh
new file mode 100644
index 00000000..43d20898
--- /dev/null
+++ b/rp/utils/test_roa.sh
@@ -0,0 +1,35 @@
+#!/bin/sh -
+#
+# Copyright (C) 2008 American Registry for Internet Numbers ("ARIN")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+#
+# $Id$
+
+auth_dir="${1?"usage: $0 authenticated_certificate_tree prefix [prefix...]"}"
+
+rm -rf hashed-pem-dir
+mkdir hashed-pem-dir
+
+../hashdir/hashdir "$auth_dir" hashed-pem-dir >/dev/null
+
+./find_roa "$@" | awk '
+ $1 == "ASN" && $3 == "prefix" && $5 == "ROA" {
+ print "";
+ print "Found match:"
+ print;
+ print "Verifying certificate chain and signatures:"
+ roa = $6;
+ if (!system("../../openssl/openssl/apps/openssl cms -verify -inform DER -out /dev/null -CApath hashed-pem-dir -in " roa))
+ system("../print_roa/print_roa " roa);
+ }'
diff --git a/rp/utils/uri.c b/rp/utils/uri.c
new file mode 100644
index 00000000..6353e8e5
--- /dev/null
+++ b/rp/utils/uri.c
@@ -0,0 +1,248 @@
+/*
+ * Copyright (C) 2014 Dragon Research Labs ("DRL")
+ * Portions copyright (C) 2006--2008 American Registry for Internet Numbers ("ARIN")
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notices and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ARIN DISCLAIM ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR
+ * ARIN BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA
+ * OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* $Id$ */
+
+/*
+ * Extract and test URIs from certificates. This is a unit test of
+ * rcynic code, a utility, or both, depending on how it turns out.
+ *
+ * NB: OpenSSL insures that IA5 strings are null-terminated, so it's safe
+ * for us to ignore the length count.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <getopt.h>
+
+#include <openssl/bio.h>
+#include <openssl/pem.h>
+#include <openssl/err.h>
+#include <openssl/cms.h>
+#include <openssl/x509.h>
+#include <openssl/x509v3.h>
+#include <openssl/safestack.h>
+
+static const unsigned char id_ad_caIssuers[] = {0x2b, 0x6, 0x1, 0x5, 0x5, 0x7, 0x30, 0x2}; /* 1.3.6.1.5.5.7.48.2 */
+static const unsigned char id_ad_caRepository[] = {0x2b, 0x6, 0x1, 0x5, 0x5, 0x7, 0x30, 0x5}; /* 1.3.6.1.5.5.7.48.5 */
+static const unsigned char id_ad_signedObjectRepository[] = {0x2b, 0x6, 0x1, 0x5, 0x5, 0x7, 0x30, 0x9}; /* 1.3.6.1.5.5.7.48.9 */
+static const unsigned char id_ad_rpkiManifest[] = {0x2b, 0x6, 0x1, 0x5, 0x5, 0x7, 0x30, 0xa}; /* 1.3.6.1.5.5.7.48.10 */
+static const unsigned char id_ad_signedObject[] = {0x2b, 0x6, 0x1, 0x5, 0x5, 0x7, 0x30, 0xb}; /* 1.3.6.1.5.5.7.48.11 */
+
+static X509 *read_cert(const char *filename, int format, int verbose)
+{
+ BIO *b = BIO_new_file(filename, "r");
+ STACK_OF(X509) *certs = NULL;
+ CMS_ContentInfo *cms = NULL;
+ X509 *x = NULL;
+
+ if (b == NULL)
+ return NULL;
+
+ switch (format) {
+ case 'p':
+ x = PEM_read_bio_X509(b, NULL, NULL, NULL);
+ break;
+ case 'd':
+ x = d2i_X509_bio(b, NULL);
+ break;
+ }
+
+ if (x == NULL) {
+ BIO_reset(b);
+ switch (format) {
+ case 'p':
+ cms = PEM_read_bio_CMS(b, NULL, NULL, NULL);
+ break;
+ case 'd':
+ cms = d2i_CMS_bio(b, NULL);
+ break;
+ }
+ if (cms != NULL && (certs = CMS_get1_certs(cms)) != NULL)
+ x = sk_X509_shift(certs);
+ }
+
+ if (x != NULL && verbose) {
+ X509_print_fp(stdout, x);
+ printf("\n");
+ }
+
+ sk_X509_pop_free(certs, X509_free);
+ CMS_ContentInfo_free(cms);
+ BIO_free(b);
+ return x;
+}
+
+enum decode_errors {
+ decode_ok,
+ decode_no_extension,
+ decode_not_exactly_one_DistributionPointName,
+ decode_has_reasons,
+ decode_has_CRLissuer,
+ decode_no_distributionPoint,
+ decode_not_GeneralName,
+ decode_not_URI,
+};
+
+static enum decode_errors decode_crldp(X509 *x, int verbose, int spaces)
+{
+ enum decode_errors err = decode_ok;
+ STACK_OF(DIST_POINT) *ds = X509_get_ext_d2i(x, NID_crl_distribution_points, NULL, NULL);
+ DIST_POINT *d;
+ GENERAL_NAME *n;
+ int i;
+
+ if (!ds) {
+ err = decode_no_extension;
+ } else if (sk_DIST_POINT_num(ds) != 1) {
+ err = decode_not_exactly_one_DistributionPointName;
+ } else if ((d = sk_DIST_POINT_value(ds, 0))->reasons) {
+ err = decode_has_reasons;
+ } else if (d->CRLissuer) {
+ err = decode_has_CRLissuer;
+ } else if (!d->distpoint) {
+ err = decode_no_distributionPoint;
+ } else if (d->distpoint->type != 0) {
+ err = decode_not_GeneralName;
+ } else {
+ for (i = 0; i < sk_GENERAL_NAME_num(d->distpoint->name.fullname); i++) {
+ n = sk_GENERAL_NAME_value(d->distpoint->name.fullname, i);
+ if (n->type != GEN_URI) {
+ err = decode_not_GeneralName;
+ break;
+ }
+ printf(" CRLDP: %s%s", n->d.uniformResourceIdentifier->data, spaces ? "" : "\n");
+ }
+ }
+
+ sk_DIST_POINT_pop_free(ds, DIST_POINT_free);
+ return err;
+}
+
+#define decode_xia(_x_, _v_, _s_, _tag_, _nid_, _oid_) \
+ _decode_xia(_x_, _v_, _s_, _tag_, _nid_, _oid_, sizeof(_oid_))
+
+static enum decode_errors _decode_xia(X509 *x,
+ int verbose,
+ int spaces,
+ char *tag,
+ int nid,
+ const unsigned char *oid,
+ int oidlen)
+{
+ enum decode_errors err = decode_ok;
+ AUTHORITY_INFO_ACCESS *as = X509_get_ext_d2i(x, nid, NULL, NULL);
+ ACCESS_DESCRIPTION *a;
+ int i;
+
+ if (!as) {
+ err = decode_no_extension;
+ } else {
+ for (i = 0; i < sk_ACCESS_DESCRIPTION_num(as); i++) {
+ a = sk_ACCESS_DESCRIPTION_value(as, i);
+ if (a->location->type != GEN_URI) {
+ err = decode_not_URI;
+ break;
+ }
+ if (a->method->length == oidlen && !memcmp(a->method->data, oid, oidlen))
+ printf(" %s: %s%s", tag, a->location->d.uniformResourceIdentifier->data, spaces ? "" : "\n");
+ }
+ }
+
+ sk_ACCESS_DESCRIPTION_pop_free(as, ACCESS_DESCRIPTION_free);
+ return err;
+}
+
+
+
+const static struct option longopts[] = {
+ { "der", no_argument, NULL, 'd' },
+ { "help", no_argument, NULL, 'h' },
+ { "pem", no_argument, NULL, 'p' },
+ { "spaces", no_argument, NULL, 's' },
+ { "verbose", no_argument, NULL, 'v' },
+ { NULL }
+};
+
+static int usage (const char *jane, const int code)
+{
+ FILE *out = code ? stderr : stdout;
+ int i;
+
+ fprintf(out, "usage: %s [-p | -d] cert [cert...]\n", jane);
+ fprintf(out, "options:\n");
+ for (i = 0; longopts[i].name != NULL; i++)
+ fprintf(out, " -%c --%s\n", longopts[i].val, longopts[i].name);
+
+ return code;
+}
+
+int main(int argc, char *argv[])
+{
+ int c, format = 'd', spaces = 0, verbose = 0;
+ const char *jane = argv[0];
+ X509 *x;
+
+ OpenSSL_add_all_algorithms();
+ ERR_load_crypto_strings();
+
+ while ((c = getopt(argc, argv, "pdsv")) > 0) {
+ switch (c) {
+ case 'v':
+ verbose = 1;
+ break;
+ case 'p':
+ case 'd':
+ format = c;
+ break;
+ case 's':
+ spaces = 1;
+ break;
+ case 'h':
+ return usage(jane, 0);
+ default:
+ return usage(jane, 1);
+ }
+ }
+
+ argc -= optind;
+ argv += optind;
+
+ if (argc == 0)
+ return usage(jane, 1);
+
+ while (argc-- > 0) {
+ printf(spaces ? "%s" : "File: %s\n", *argv);
+ if ((x = read_cert(*argv++, format, verbose)) == NULL) {
+ printf("Couldn't read certificate, skipping\n");
+ continue;
+ }
+ decode_xia(x, verbose, spaces, "AIA:caIssuers", NID_info_access, id_ad_caIssuers);
+ decode_xia(x, verbose, spaces, "SIA:caRepository", NID_sinfo_access, id_ad_caRepository);
+ decode_xia(x, verbose, spaces, "SIA:signedObjectRepository", NID_sinfo_access, id_ad_signedObjectRepository);
+ decode_xia(x, verbose, spaces, "SIA:rpkiManifest", NID_sinfo_access, id_ad_rpkiManifest);
+ decode_xia(x, verbose, spaces, "SIA:signedObject", NID_sinfo_access, id_ad_signedObject);
+ decode_crldp(x, verbose, spaces);
+ if (spaces)
+ putchar('\n');
+ X509_free(x);
+ }
+
+ return 0;
+}
diff --git a/rp/utils/uri/Makefile.in b/rp/utils/uri/Makefile.in
new file mode 100644
index 00000000..fc545060
--- /dev/null
+++ b/rp/utils/uri/Makefile.in
@@ -0,0 +1,31 @@
+# $Id$
+
+NAME = uri
+
+BIN = ${NAME}
+SRC = ${NAME}.c
+OBJ = ${NAME}.o
+
+CFLAGS = @CFLAGS@
+LDFLAGS = @LDFLAGS@
+LIBS = @LIBS@
+
+abs_top_srcdir = @abs_top_srcdir@
+abs_top_builddir = @abs_top_builddir@
+
+all: ${BIN}
+
+clean:
+ rm -rf ${BIN} ${OBJ} ${BIN}.dSYM
+
+${BIN}: ${SRC}
+ ${CC} ${CFLAGS} -o $@ ${SRC} ${LDFLAGS} ${LIBS}
+
+test:
+ @true
+
+install deinstall uninstall:
+ @true
+
+distclean: clean
+ rm -f Makefile