aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRob Austein <sra@hactrn.net>2011-09-15 00:04:23 +0000
committerRob Austein <sra@hactrn.net>2011-09-15 00:04:23 +0000
commit0c4a98c56785c2f4a6e4fba3941083fc466da893 (patch)
tree4a188735b9d63422cf8573f1efbc5ec5b8e15162
parentbb6769c8315a193d7940837606951f1c2942e095 (diff)
Merge rcynic-ng/ back into rcynic/. Used svn merge --ignore-ancestry
to minimize problems for people mirroring the repository. svn path=/configure; revision=3985
-rwxr-xr-xconfigure4
-rw-r--r--configure.ac2
-rw-r--r--rcynic-ng/Doxyfile1679
-rw-r--r--rcynic-ng/Makefile.in77
-rw-r--r--rcynic-ng/README582
-rwxr-xr-xrcynic-ng/installation-scripts/darwin/RCynic/RCynic75
-rw-r--r--rcynic-ng/installation-scripts/darwin/RCynic/StartupParameters.plist19
-rw-r--r--rcynic-ng/installation-scripts/darwin/install.sh193
-rw-r--r--rcynic-ng/installation-scripts/deinstall.sh5
-rw-r--r--rcynic-ng/installation-scripts/freebsd/install.sh151
-rwxr-xr-xrcynic-ng/installation-scripts/freebsd/rc.d.rcynic71
-rw-r--r--rcynic-ng/installation-scripts/install.sh14
-rw-r--r--rcynic-ng/installation-scripts/linux/install.sh.in236
-rw-r--r--rcynic-ng/installation-scripts/sample-rcynic.conf27
-rwxr-xr-xrcynic-ng/make-tal.sh42
-rw-r--r--rcynic-ng/rcynic.c4644
-rw-r--r--rcynic-ng/rcynic.xsl312
-rw-r--r--rcynic-ng/sample-trust-anchors/README13
-rw-r--r--rcynic-ng/sample-trust-anchors/afrinic.tal8
-rw-r--r--rcynic-ng/sample-trust-anchors/apnic.tal8
-rw-r--r--rcynic-ng/sample-trust-anchors/bbn-testbed.tal8
-rw-r--r--rcynic-ng/sample-trust-anchors/lacnic.tal8
-rw-r--r--rcynic-ng/sample-trust-anchors/ripe-ncc-root.tal9
-rw-r--r--rcynic-ng/sample-trust-anchors/testbed-apnic.tal8
-rw-r--r--rcynic-ng/sample-trust-anchors/testbed-apnicrpki.tal8
-rw-r--r--rcynic-ng/sample-trust-anchors/testbed-arin.tal8
-rw-r--r--rcynic-ng/sample-trust-anchors/testbed-ripe.tal8
-rw-r--r--rcynic-ng/show.awk93
-rw-r--r--rcynic-ng/show.sh.in20
-rw-r--r--rcynic-ng/show.xsl59
-rw-r--r--rcynic-ng/static-rsync/Makefile.in41
-rw-r--r--rcynic-ng/static-rsync/README15
-rw-r--r--rcynic-ng/static-rsync/patches/patch-CVE-2007-409160
-rw-r--r--rcynic-ng/static-rsync/rsync-2.6.9.tar.gzbin811841 -> 0 bytes
-rw-r--r--rcynic/Doxyfile2
-rw-r--r--rcynic/Makefile.in28
-rw-r--r--rcynic/README350
-rw-r--r--rcynic/bio_f_linebreak.c (renamed from rcynic-ng/bio_f_linebreak.c)0
-rw-r--r--rcynic/bio_f_linebreak.h (renamed from rcynic-ng/bio_f_linebreak.h)0
-rw-r--r--rcynic/defasn1.h (renamed from rcynic-ng/defasn1.h)0
-rw-r--r--rcynic/defstack.awk (renamed from rcynic-ng/defstack.awk)0
-rw-r--r--rcynic/rcynic.c3852
-rw-r--r--rcynic/rcynic.xsl279
-rw-r--r--rcynic/show.awk84
-rw-r--r--rcynic/show.xsl40
-rw-r--r--rcynic/validation_status.xsl (renamed from rcynic-ng/validation_status.xsl)0
46 files changed, 2904 insertions, 10238 deletions
diff --git a/configure b/configure
index 038f9ce8..fc45bd9b 100755
--- a/configure
+++ b/configure
@@ -4266,7 +4266,7 @@ test $build_django = yes && RPKID_SUBDIRS="$RPKID_SUBDIRS portal-gui"
-ac_config_files="$ac_config_files Makefile rcynic/Makefile rcynic/static-rsync/Makefile rcynic-ng/Makefile rcynic-ng/static-rsync/Makefile utils/Makefile utils/find_roa/Makefile utils/hashdir/Makefile utils/print_rpki_manifest/Makefile utils/print_roa/Makefile utils/scan_roas/Makefile utils/uri/Makefile"
+ac_config_files="$ac_config_files Makefile rcynic/Makefile rcynic/static-rsync/Makefile utils/Makefile utils/find_roa/Makefile utils/hashdir/Makefile utils/print_rpki_manifest/Makefile utils/print_roa/Makefile utils/scan_roas/Makefile utils/uri/Makefile"
case $host_os in
@@ -5135,8 +5135,6 @@ do
"Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
"rcynic/Makefile") CONFIG_FILES="$CONFIG_FILES rcynic/Makefile" ;;
"rcynic/static-rsync/Makefile") CONFIG_FILES="$CONFIG_FILES rcynic/static-rsync/Makefile" ;;
- "rcynic-ng/Makefile") CONFIG_FILES="$CONFIG_FILES rcynic-ng/Makefile" ;;
- "rcynic-ng/static-rsync/Makefile") CONFIG_FILES="$CONFIG_FILES rcynic-ng/static-rsync/Makefile" ;;
"utils/Makefile") CONFIG_FILES="$CONFIG_FILES utils/Makefile" ;;
"utils/find_roa/Makefile") CONFIG_FILES="$CONFIG_FILES utils/find_roa/Makefile" ;;
"utils/hashdir/Makefile") CONFIG_FILES="$CONFIG_FILES utils/hashdir/Makefile" ;;
diff --git a/configure.ac b/configure.ac
index 24a3fb80..74047972 100644
--- a/configure.ac
+++ b/configure.ac
@@ -313,8 +313,6 @@ AC_SUBST(RPKID_SUBDIRS)
AC_CONFIG_FILES([Makefile
rcynic/Makefile
rcynic/static-rsync/Makefile
- rcynic-ng/Makefile
- rcynic-ng/static-rsync/Makefile
utils/Makefile
utils/find_roa/Makefile
utils/hashdir/Makefile
diff --git a/rcynic-ng/Doxyfile b/rcynic-ng/Doxyfile
deleted file mode 100644
index bac51c4f..00000000
--- a/rcynic-ng/Doxyfile
+++ /dev/null
@@ -1,1679 +0,0 @@
-# Doxyfile 1.7.3
-
-# This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project.
-#
-# All text after a hash (#) is considered a comment and will be ignored.
-# The format is:
-# TAG = value [value, ...]
-# For lists items can also be appended using:
-# TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (" ").
-
-#---------------------------------------------------------------------------
-# Project related configuration options
-#---------------------------------------------------------------------------
-
-# This tag specifies the encoding used for all characters in the config file
-# that follow. The default is UTF-8 which is also the encoding used for all
-# text before the first occurrence of this tag. Doxygen uses libiconv (or the
-# iconv built into libc) for the transcoding. See
-# http://www.gnu.org/software/libiconv for the list of possible encodings.
-
-DOXYFILE_ENCODING = UTF-8
-
-# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
-# by quotes) that should identify the project.
-
-PROJECT_NAME = rcynic
-
-# The PROJECT_NUMBER tag can be used to enter a project or revision number.
-# This could be handy for archiving the generated documentation or
-# if some version control system is used.
-
-PROJECT_NUMBER = 1.0
-
-# Using the PROJECT_BRIEF tag one can provide an optional one line description for a project that appears at the top of each page and should give viewer a quick idea about the purpose of the project. Keep the description short.
-
-PROJECT_BRIEF =
-
-# With the PROJECT_LOGO tag one can specify an logo or icon that is
-# included in the documentation. The maximum height of the logo should not
-# exceed 55 pixels and the maximum width should not exceed 200 pixels.
-# Doxygen will copy the logo to the output directory.
-
-PROJECT_LOGO =
-
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
-# base path where the generated documentation will be put.
-# If a relative path is entered, it will be relative to the location
-# where doxygen was started. If left blank the current directory will be used.
-
-OUTPUT_DIRECTORY = doc
-
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
-# 4096 sub-directories (in 2 levels) under the output directory of each output
-# format and will distribute the generated files over these directories.
-# Enabling this option can be useful when feeding doxygen a huge amount of
-# source files, where putting all generated files in the same directory would
-# otherwise cause performance problems for the file system.
-
-CREATE_SUBDIRS = NO
-
-# The OUTPUT_LANGUAGE tag is used to specify the language in which all
-# documentation generated by doxygen is written. Doxygen will use this
-# information to generate all constant output in the proper language.
-# The default language is English, other supported languages are:
-# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
-# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
-# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
-# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
-# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak,
-# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
-
-OUTPUT_LANGUAGE = English
-
-# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
-# include brief member descriptions after the members that are listed in
-# the file and class documentation (similar to JavaDoc).
-# Set to NO to disable this.
-
-BRIEF_MEMBER_DESC = YES
-
-# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
-# the brief description of a member or function before the detailed description.
-# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
-# brief descriptions will be completely suppressed.
-
-REPEAT_BRIEF = YES
-
-# This tag implements a quasi-intelligent brief description abbreviator
-# that is used to form the text in various listings. Each string
-# in this list, if found as the leading text of the brief description, will be
-# stripped from the text and the result after processing the whole list, is
-# used as the annotated text. Otherwise, the brief description is used as-is.
-# If left blank, the following values are used ("$name" is automatically
-# replaced with the name of the entity): "The $name class" "The $name widget"
-# "The $name file" "is" "provides" "specifies" "contains"
-# "represents" "a" "an" "the"
-
-ABBREVIATE_BRIEF =
-
-# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
-# Doxygen will generate a detailed section even if there is only a brief
-# description.
-
-ALWAYS_DETAILED_SEC = NO
-
-# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
-# inherited members of a class in the documentation of that class as if those
-# members were ordinary class members. Constructors, destructors and assignment
-# operators of the base classes will not be shown.
-
-INLINE_INHERITED_MEMB = NO
-
-# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
-# path before files name in the file list and in the header files. If set
-# to NO the shortest path that makes the file name unique will be used.
-
-FULL_PATH_NAMES = YES
-
-# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
-# can be used to strip a user-defined part of the path. Stripping is
-# only done if one of the specified strings matches the left-hand part of
-# the path. The tag can be used to show relative paths in the file list.
-# If left blank the directory from which doxygen is run is used as the
-# path to strip.
-
-STRIP_FROM_PATH =
-
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
-# the path mentioned in the documentation of a class, which tells
-# the reader which header file to include in order to use a class.
-# If left blank only the name of the header file containing the class
-# definition is used. Otherwise one should specify the include paths that
-# are normally passed to the compiler using the -I flag.
-
-STRIP_FROM_INC_PATH =
-
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
-# (but less readable) file names. This can be useful if your file system
-# doesn't support long names like on DOS, Mac, or CD-ROM.
-
-SHORT_NAMES = NO
-
-# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
-# will interpret the first line (until the first dot) of a JavaDoc-style
-# comment as the brief description. If set to NO, the JavaDoc
-# comments will behave just like regular Qt-style comments
-# (thus requiring an explicit @brief command for a brief description.)
-
-JAVADOC_AUTOBRIEF = NO
-
-# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
-# interpret the first line (until the first dot) of a Qt-style
-# comment as the brief description. If set to NO, the comments
-# will behave just like regular Qt-style comments (thus requiring
-# an explicit \brief command for a brief description.)
-
-QT_AUTOBRIEF = NO
-
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
-# treat a multi-line C++ special comment block (i.e. a block of //! or ///
-# comments) as a brief description. This used to be the default behaviour.
-# The new default is to treat a multi-line C++ comment block as a detailed
-# description. Set this tag to YES if you prefer the old behaviour instead.
-
-MULTILINE_CPP_IS_BRIEF = NO
-
-# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
-# member inherits the documentation from any documented member that it
-# re-implements.
-
-INHERIT_DOCS = YES
-
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
-# a new page for each member. If set to NO, the documentation of a member will
-# be part of the file/class/namespace that contains it.
-
-SEPARATE_MEMBER_PAGES = YES
-
-# The TAB_SIZE tag can be used to set the number of spaces in a tab.
-# Doxygen uses this value to replace tabs by spaces in code fragments.
-
-TAB_SIZE = 8
-
-# This tag can be used to specify a number of aliases that acts
-# as commands in the documentation. An alias has the form "name=value".
-# For example adding "sideeffect=\par Side Effects:\n" will allow you to
-# put the command \sideeffect (or @sideeffect) in the documentation, which
-# will result in a user-defined paragraph with heading "Side Effects:".
-# You can put \n's in the value part of an alias to insert newlines.
-
-ALIASES =
-
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
-# sources only. Doxygen will then generate output that is more tailored for C.
-# For instance, some of the names that are used will be different. The list
-# of all members will be omitted, etc.
-
-OPTIMIZE_OUTPUT_FOR_C = YES
-
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
-# sources only. Doxygen will then generate output that is more tailored for
-# Java. For instance, namespaces will be presented as packages, qualified
-# scopes will look different, etc.
-
-OPTIMIZE_OUTPUT_JAVA = NO
-
-# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
-# sources only. Doxygen will then generate output that is more tailored for
-# Fortran.
-
-OPTIMIZE_FOR_FORTRAN = NO
-
-# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
-# sources. Doxygen will then generate output that is tailored for
-# VHDL.
-
-OPTIMIZE_OUTPUT_VHDL = NO
-
-# Doxygen selects the parser to use depending on the extension of the files it
-# parses. With this tag you can assign which parser to use for a given extension.
-# Doxygen has a built-in mapping, but you can override or extend it using this
-# tag. The format is ext=language, where ext is a file extension, and language
-# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C,
-# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make
-# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
-# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions
-# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
-
-EXTENSION_MAPPING =
-
-# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
-# to include (a tag file for) the STL sources as input, then you should
-# set this tag to YES in order to let doxygen match functions declarations and
-# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
-# func(std::string) {}). This also makes the inheritance and collaboration
-# diagrams that involve STL classes more complete and accurate.
-
-BUILTIN_STL_SUPPORT = NO
-
-# If you use Microsoft's C++/CLI language, you should set this option to YES to
-# enable parsing support.
-
-CPP_CLI_SUPPORT = NO
-
-# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
-# Doxygen will parse them like normal C++ but will assume all classes use public
-# instead of private inheritance when no explicit protection keyword is present.
-
-SIP_SUPPORT = NO
-
-# For Microsoft's IDL there are propget and propput attributes to indicate getter
-# and setter methods for a property. Setting this option to YES (the default)
-# will make doxygen replace the get and set methods by a property in the
-# documentation. This will only work if the methods are indeed getting or
-# setting a simple type. If this is not the case, or you want to show the
-# methods anyway, you should set this option to NO.
-
-IDL_PROPERTY_SUPPORT = YES
-
-# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES, then doxygen will reuse the documentation of the first
-# member in the group (if any) for the other members of the group. By default
-# all members of a group must be documented explicitly.
-
-DISTRIBUTE_GROUP_DOC = NO
-
-# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
-# the same type (for instance a group of public functions) to be put as a
-# subgroup of that type (e.g. under the Public Functions section). Set it to
-# NO to prevent subgrouping. Alternatively, this can be done per class using
-# the \nosubgrouping command.
-
-SUBGROUPING = YES
-
-# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
-# is documented as struct, union, or enum with the name of the typedef. So
-# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
-# with name TypeT. When disabled the typedef will appear as a member of a file,
-# namespace, or class. And the struct will be named TypeS. This can typically
-# be useful for C code in case the coding convention dictates that all compound
-# types are typedef'ed and only the typedef is referenced, never the tag name.
-
-TYPEDEF_HIDES_STRUCT = NO
-
-# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
-# determine which symbols to keep in memory and which to flush to disk.
-# When the cache is full, less often used symbols will be written to disk.
-# For small to medium size projects (<1000 input files) the default value is
-# probably good enough. For larger projects a too small cache size can cause
-# doxygen to be busy swapping symbols to and from disk most of the time
-# causing a significant performance penalty.
-# If the system has enough physical memory increasing the cache will improve the
-# performance by keeping more symbols in memory. Note that the value works on
-# a logarithmic scale so increasing the size by one will roughly double the
-# memory usage. The cache size is given by this formula:
-# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
-# corresponding to a cache size of 2^16 = 65536 symbols
-
-SYMBOL_CACHE_SIZE = 0
-
-#---------------------------------------------------------------------------
-# Build related configuration options
-#---------------------------------------------------------------------------
-
-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
-# documentation are documented, even if no documentation was available.
-# Private class members and static file members will be hidden unless
-# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
-
-EXTRACT_ALL = YES
-
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
-# will be included in the documentation.
-
-EXTRACT_PRIVATE = YES
-
-# If the EXTRACT_STATIC tag is set to YES all static members of a file
-# will be included in the documentation.
-
-EXTRACT_STATIC = YES
-
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
-# defined locally in source files will be included in the documentation.
-# If set to NO only classes defined in header files are included.
-
-EXTRACT_LOCAL_CLASSES = YES
-
-# This flag is only useful for Objective-C code. When set to YES local
-# methods, which are defined in the implementation section but not in
-# the interface are included in the documentation.
-# If set to NO (the default) only methods in the interface are included.
-
-EXTRACT_LOCAL_METHODS = NO
-
-# If this flag is set to YES, the members of anonymous namespaces will be
-# extracted and appear in the documentation as a namespace called
-# 'anonymous_namespace{file}', where file will be replaced with the base
-# name of the file that contains the anonymous namespace. By default
-# anonymous namespaces are hidden.
-
-EXTRACT_ANON_NSPACES = YES
-
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
-# undocumented members of documented classes, files or namespaces.
-# If set to NO (the default) these members will be included in the
-# various overviews, but no documentation section is generated.
-# This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_MEMBERS = NO
-
-# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
-# undocumented classes that are normally visible in the class hierarchy.
-# If set to NO (the default) these classes will be included in the various
-# overviews. This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_CLASSES = NO
-
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
-# friend (class|struct|union) declarations.
-# If set to NO (the default) these declarations will be included in the
-# documentation.
-
-HIDE_FRIEND_COMPOUNDS = NO
-
-# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
-# documentation blocks found inside the body of a function.
-# If set to NO (the default) these blocks will be appended to the
-# function's detailed documentation block.
-
-HIDE_IN_BODY_DOCS = NO
-
-# The INTERNAL_DOCS tag determines if documentation
-# that is typed after a \internal command is included. If the tag is set
-# to NO (the default) then the documentation will be excluded.
-# Set it to YES to include the internal documentation.
-
-INTERNAL_DOCS = NO
-
-# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
-# file names in lower-case letters. If set to YES upper-case letters are also
-# allowed. This is useful if you have classes or files whose names only differ
-# in case and if your file system supports case sensitive file names. Windows
-# and Mac users are advised to set this option to NO.
-
-CASE_SENSE_NAMES = YES
-
-# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
-# will show members with their full class and namespace scopes in the
-# documentation. If set to YES the scope will be hidden.
-
-HIDE_SCOPE_NAMES = NO
-
-# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
-# will put a list of the files that are included by a file in the documentation
-# of that file.
-
-SHOW_INCLUDE_FILES = NO
-
-# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
-# will list include files with double quotes in the documentation
-# rather than with sharp brackets.
-
-FORCE_LOCAL_INCLUDES = NO
-
-# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
-# is inserted in the documentation for inline members.
-
-INLINE_INFO = YES
-
-# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
-# will sort the (detailed) documentation of file and class members
-# alphabetically by member name. If set to NO the members will appear in
-# declaration order.
-
-SORT_MEMBER_DOCS = YES
-
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
-# brief documentation of file, namespace and class members alphabetically
-# by member name. If set to NO (the default) the members will appear in
-# declaration order.
-
-SORT_BRIEF_DOCS = NO
-
-# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
-# will sort the (brief and detailed) documentation of class members so that
-# constructors and destructors are listed first. If set to NO (the default)
-# the constructors will appear in the respective orders defined by
-# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
-# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
-# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
-
-SORT_MEMBERS_CTORS_1ST = NO
-
-# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
-# hierarchy of group names into alphabetical order. If set to NO (the default)
-# the group names will appear in their defined order.
-
-SORT_GROUP_NAMES = NO
-
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
-# sorted by fully-qualified names, including namespaces. If set to
-# NO (the default), the class list will be sorted only by class name,
-# not including the namespace part.
-# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
-# Note: This option applies only to the class list, not to the
-# alphabetical list.
-
-SORT_BY_SCOPE_NAME = NO
-
-# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper type resolution of all parameters of a function it will reject a
-# match between the prototype and the implementation of a member function even if there is only one candidate or it is obvious which candidate to choose by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
-# will still accept a match between prototype and implementation in such cases.
-
-STRICT_PROTO_MATCHING = NO
-
-# The GENERATE_TODOLIST tag can be used to enable (YES) or
-# disable (NO) the todo list. This list is created by putting \todo
-# commands in the documentation.
-
-GENERATE_TODOLIST = YES
-
-# The GENERATE_TESTLIST tag can be used to enable (YES) or
-# disable (NO) the test list. This list is created by putting \test
-# commands in the documentation.
-
-GENERATE_TESTLIST = YES
-
-# The GENERATE_BUGLIST tag can be used to enable (YES) or
-# disable (NO) the bug list. This list is created by putting \bug
-# commands in the documentation.
-
-GENERATE_BUGLIST = YES
-
-# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
-# disable (NO) the deprecated list. This list is created by putting
-# \deprecated commands in the documentation.
-
-GENERATE_DEPRECATEDLIST= YES
-
-# The ENABLED_SECTIONS tag can be used to enable conditional
-# documentation sections, marked by \if sectionname ... \endif.
-
-ENABLED_SECTIONS =
-
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
-# the initial value of a variable or macro consists of for it to appear in
-# the documentation. If the initializer consists of more lines than specified
-# here it will be hidden. Use a value of 0 to hide initializers completely.
-# The appearance of the initializer of individual variables and macros in the
-# documentation can be controlled using \showinitializer or \hideinitializer
-# command in the documentation regardless of this setting.
-
-MAX_INITIALIZER_LINES = 30
-
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
-# at the bottom of the documentation of classes and structs. If set to YES the
-# list will mention the files that were used to generate the documentation.
-
-SHOW_USED_FILES = YES
-
-# If the sources in your project are distributed over multiple directories
-# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
-# in the documentation. The default is NO.
-
-SHOW_DIRECTORIES = NO
-
-# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
-# This will remove the Files entry from the Quick Index and from the
-# Folder Tree View (if specified). The default is YES.
-
-SHOW_FILES = YES
-
-# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
-# Namespaces page.
-# This will remove the Namespaces entry from the Quick Index
-# and from the Folder Tree View (if specified). The default is YES.
-
-SHOW_NAMESPACES = YES
-
-# The FILE_VERSION_FILTER tag can be used to specify a program or script that
-# doxygen should invoke to get the current version for each file (typically from
-# the version control system). Doxygen will invoke the program by executing (via
-# popen()) the command <command> <input-file>, where <command> is the value of
-# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
-# provided by doxygen. Whatever the program writes to standard output
-# is used as the file version. See the manual for examples.
-
-FILE_VERSION_FILTER =
-
-# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
-# by doxygen. The layout file controls the global structure of the generated
-# output files in an output format independent way. The create the layout file
-# that represents doxygen's defaults, run doxygen with the -l option.
-# You can optionally specify a file name after the option, if omitted
-# DoxygenLayout.xml will be used as the name of the layout file.
-
-LAYOUT_FILE =
-
-#---------------------------------------------------------------------------
-# configuration options related to warning and progress messages
-#---------------------------------------------------------------------------
-
-# The QUIET tag can be used to turn on/off the messages that are generated
-# by doxygen. Possible values are YES and NO. If left blank NO is used.
-
-QUIET = YES
-
-# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated by doxygen. Possible values are YES and NO. If left blank
-# NO is used.
-
-WARNINGS = YES
-
-# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
-# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
-# automatically be disabled.
-
-WARN_IF_UNDOCUMENTED = YES
-
-# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some
-# parameters in a documented function, or documenting parameters that
-# don't exist or using markup commands wrongly.
-
-WARN_IF_DOC_ERROR = YES
-
-# The WARN_NO_PARAMDOC option can be enabled to get warnings for
-# functions that are documented, but have no documentation for their parameters
-# or return value. If set to NO (the default) doxygen will only warn about
-# wrong or incomplete parameter documentation, but not about the absence of
-# documentation.
-
-WARN_NO_PARAMDOC = NO
-
-# The WARN_FORMAT tag determines the format of the warning messages that
-# doxygen can produce. The string should contain the $file, $line, and $text
-# tags, which will be replaced by the file and line number from which the
-# warning originated and the warning text. Optionally the format may contain
-# $version, which will be replaced by the version of the file (if it could
-# be obtained via FILE_VERSION_FILTER)
-
-WARN_FORMAT = "$file:$line: $text"
-
-# The WARN_LOGFILE tag can be used to specify a file to which warning
-# and error messages should be written. If left blank the output is written
-# to stderr.
-
-WARN_LOGFILE =
-
-#---------------------------------------------------------------------------
-# configuration options related to the input files
-#---------------------------------------------------------------------------
-
-# The INPUT tag can be used to specify the files and/or directories that contain
-# documented source files. You may enter file names like "myfile.cpp" or
-# directories like "/usr/src/myproject". Separate the files or directories
-# with spaces.
-
-INPUT = rcynic.c
-
-# This tag can be used to specify the character encoding of the source files
-# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
-# also the default input encoding. Doxygen uses libiconv (or the iconv built
-# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
-# the list of possible encodings.
-
-INPUT_ENCODING = UTF-8
-
-# If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank the following patterns are tested:
-# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
-# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
-# *.f90 *.f *.for *.vhd *.vhdl
-
-FILE_PATTERNS =
-
-# The RECURSIVE tag can be used to turn specify whether or not subdirectories
-# should be searched for input files as well. Possible values are YES and NO.
-# If left blank NO is used.
-
-RECURSIVE = NO
-
-# The EXCLUDE tag can be used to specify files and/or directories that should
-# excluded from the INPUT source files. This way you can easily exclude a
-# subdirectory from a directory tree whose root is specified with the INPUT tag.
-
-EXCLUDE =
-
-# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
-# directories that are symbolic links (a Unix file system feature) are excluded
-# from the input.
-
-EXCLUDE_SYMLINKS = NO
-
-# If the value of the INPUT tag contains directories, you can use the
-# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories. Note that the wildcards are matched
-# against the file with absolute path, so to exclude all test directories
-# for example use the pattern */test/*
-
-EXCLUDE_PATTERNS =
-
-# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
-# (namespaces, classes, functions, etc.) that should be excluded from the
-# output. The symbol name can be a fully qualified name, a word, or if the
-# wildcard * is used, a substring. Examples: ANamespace, AClass,
-# AClass::ANamespace, ANamespace::*Test
-
-EXCLUDE_SYMBOLS =
-
-# The EXAMPLE_PATH tag can be used to specify one or more files or
-# directories that contain example code fragments that are included (see
-# the \include command).
-
-EXAMPLE_PATH =
-
-# If the value of the EXAMPLE_PATH tag contains directories, you can use the
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank all files are included.
-
-EXAMPLE_PATTERNS =
-
-# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
-# searched for input files to be used with the \include or \dontinclude
-# commands irrespective of the value of the RECURSIVE tag.
-# Possible values are YES and NO. If left blank NO is used.
-
-EXAMPLE_RECURSIVE = NO
-
-# The IMAGE_PATH tag can be used to specify one or more files or
-# directories that contain image that are included in the documentation (see
-# the \image command).
-
-IMAGE_PATH =
-
-# The INPUT_FILTER tag can be used to specify a program that doxygen should
-# invoke to filter for each input file. Doxygen will invoke the filter program
-# by executing (via popen()) the command <filter> <input-file>, where <filter>
-# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
-# input file. Doxygen will then use the output that the filter program writes
-# to standard output.
-# If FILTER_PATTERNS is specified, this tag will be
-# ignored.
-
-INPUT_FILTER =
-
-# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
-# basis.
-# Doxygen will compare the file name with each pattern and apply the
-# filter if there is a match.
-# The filters are a list of the form:
-# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
-# info on how filters are used. If FILTER_PATTERNS is empty or if
-# non of the patterns match the file name, INPUT_FILTER is applied.
-
-FILTER_PATTERNS =
-
-# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER) will be used to filter the input files when producing source
-# files to browse (i.e. when SOURCE_BROWSER is set to YES).
-
-FILTER_SOURCE_FILES = NO
-
-# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
-# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
-# and it is also possible to disable source filtering for a specific pattern
-# using *.ext= (so without naming a filter). This option only has effect when
-# FILTER_SOURCE_FILES is enabled.
-
-FILTER_SOURCE_PATTERNS =
-
-#---------------------------------------------------------------------------
-# configuration options related to source browsing
-#---------------------------------------------------------------------------
-
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will
-# be generated. Documented entities will be cross-referenced with these sources.
-# Note: To get rid of all source code in the generated output, make sure also
-# VERBATIM_HEADERS is set to NO.
-
-SOURCE_BROWSER = YES
-
-# Setting the INLINE_SOURCES tag to YES will include the body
-# of functions and classes directly in the documentation.
-
-INLINE_SOURCES = NO
-
-# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
-# doxygen to hide any special comment blocks from generated source code
-# fragments. Normal C and C++ comments will always remain visible.
-
-STRIP_CODE_COMMENTS = NO
-
-# If the REFERENCED_BY_RELATION tag is set to YES
-# then for each documented function all documented
-# functions referencing it will be listed.
-
-REFERENCED_BY_RELATION = YES
-
-# If the REFERENCES_RELATION tag is set to YES
-# then for each documented function all documented entities
-# called/used by that function will be listed.
-
-REFERENCES_RELATION = YES
-
-# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
-# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
-# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
-# link to the source code.
-# Otherwise they will link to the documentation.
-
-REFERENCES_LINK_SOURCE = YES
-
-# If the USE_HTAGS tag is set to YES then the references to source code
-# will point to the HTML generated by the htags(1) tool instead of doxygen
-# built-in source browser. The htags tool is part of GNU's global source
-# tagging system (see http://www.gnu.org/software/global/global.html). You
-# will need version 4.8.6 or higher.
-
-USE_HTAGS = NO
-
-# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
-# will generate a verbatim copy of the header file for each class for
-# which an include is specified. Set to NO to disable this.
-
-VERBATIM_HEADERS = YES
-
-#---------------------------------------------------------------------------
-# configuration options related to the alphabetical class index
-#---------------------------------------------------------------------------
-
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
-# of all compounds will be generated. Enable this if the project
-# contains a lot of classes, structs, unions or interfaces.
-
-ALPHABETICAL_INDEX = YES
-
-# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
-# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
-# in which this list will be split (can be a number in the range [1..20])
-
-COLS_IN_ALPHA_INDEX = 5
-
-# In case all classes in a project start with a common prefix, all
-# classes will be put under the same header in the alphabetical index.
-# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
-# should be ignored while generating the index headers.
-
-IGNORE_PREFIX =
-
-#---------------------------------------------------------------------------
-# configuration options related to the HTML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
-# generate HTML output.
-
-GENERATE_HTML = YES
-
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `html' will be used as the default path.
-
-HTML_OUTPUT = html
-
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
-# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
-# doxygen will generate files with .html extension.
-
-HTML_FILE_EXTENSION = .html
-
-# The HTML_HEADER tag can be used to specify a personal HTML header for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard header.
-
-HTML_HEADER =
-
-# The HTML_FOOTER tag can be used to specify a personal HTML footer for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard footer.
-
-HTML_FOOTER =
-
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
-# style sheet that is used by each HTML page. It can be used to
-# fine-tune the look of the HTML output. If the tag is left blank doxygen
-# will generate a default style sheet. Note that doxygen will try to copy
-# the style sheet file to the HTML output directory, so don't put your own
-# stylesheet in the HTML output directory as well, or it will be erased!
-
-HTML_STYLESHEET =
-
-# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
-# Doxygen will adjust the colors in the stylesheet and background images
-# according to this color. Hue is specified as an angle on a colorwheel,
-# see http://en.wikipedia.org/wiki/Hue for more information.
-# For instance the value 0 represents red, 60 is yellow, 120 is green,
-# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
-# The allowed range is 0 to 359.
-
-HTML_COLORSTYLE_HUE = 220
-
-# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
-# the colors in the HTML output. For a value of 0 the output will use
-# grayscales only. A value of 255 will produce the most vivid colors.
-
-HTML_COLORSTYLE_SAT = 100
-
-# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
-# the luminance component of the colors in the HTML output. Values below
-# 100 gradually make the output lighter, whereas values above 100 make
-# the output darker. The value divided by 100 is the actual gamma applied,
-# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
-# and 100 does not change the gamma.
-
-HTML_COLORSTYLE_GAMMA = 80
-
-# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting
-# this to NO can help when comparing the output of multiple runs.
-
-HTML_TIMESTAMP = YES
-
-# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
-# files or namespaces will be aligned in HTML using tables. If set to
-# NO a bullet list will be used.
-
-HTML_ALIGN_MEMBERS = YES
-
-# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
-# documentation will contain sections that can be hidden and shown after the
-# page has loaded. For this to work a browser that supports
-# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox
-# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari).
-
-HTML_DYNAMIC_SECTIONS = NO
-
-# If the GENERATE_DOCSET tag is set to YES, additional index files
-# will be generated that can be used as input for Apple's Xcode 3
-# integrated development environment, introduced with OSX 10.5 (Leopard).
-# To create a documentation set, doxygen will generate a Makefile in the
-# HTML output directory. Running make will produce the docset in that
-# directory and running "make install" will install the docset in
-# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
-# it at startup.
-# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
-# for more information.
-
-GENERATE_DOCSET = NO
-
-# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
-# feed. A documentation feed provides an umbrella under which multiple
-# documentation sets from a single provider (such as a company or product suite)
-# can be grouped.
-
-DOCSET_FEEDNAME = "Doxygen generated docs"
-
-# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
-# should uniquely identify the documentation set bundle. This should be a
-# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
-# will append .docset to the name.
-
-DOCSET_BUNDLE_ID = org.doxygen.Project
-
-# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify
-# the documentation publisher. This should be a reverse domain-name style
-# string, e.g. com.mycompany.MyDocSet.documentation.
-
-DOCSET_PUBLISHER_ID = org.doxygen.Publisher
-
-# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
-
-DOCSET_PUBLISHER_NAME = Publisher
-
-# If the GENERATE_HTMLHELP tag is set to YES, additional index files
-# will be generated that can be used as input for tools like the
-# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
-# of the generated HTML documentation.
-
-GENERATE_HTMLHELP = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
-# be used to specify the file name of the resulting .chm file. You
-# can add a path in front of the file if the result should not be
-# written to the html output directory.
-
-CHM_FILE =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
-# be used to specify the location (absolute path including file name) of
-# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
-# the HTML help compiler on the generated index.hhp.
-
-HHC_LOCATION =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
-# controls if a separate .chi index file is generated (YES) or that
-# it should be included in the master .chm file (NO).
-
-GENERATE_CHI = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
-# is used to encode HtmlHelp index (hhk), content (hhc) and project file
-# content.
-
-CHM_INDEX_ENCODING =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
-# controls whether a binary table of contents is generated (YES) or a
-# normal table of contents (NO) in the .chm file.
-
-BINARY_TOC = NO
-
-# The TOC_EXPAND flag can be set to YES to add extra items for group members
-# to the contents of the HTML help documentation and to the tree view.
-
-TOC_EXPAND = NO
-
-# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
-# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
-# that can be used as input for Qt's qhelpgenerator to generate a
-# Qt Compressed Help (.qch) of the generated HTML documentation.
-
-GENERATE_QHP = NO
-
-# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
-# be used to specify the file name of the resulting .qch file.
-# The path specified is relative to the HTML output folder.
-
-QCH_FILE =
-
-# The QHP_NAMESPACE tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#namespace
-
-QHP_NAMESPACE =
-
-# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#virtual-folders
-
-QHP_VIRTUAL_FOLDER = doc
-
-# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
-# add. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#custom-filters
-
-QHP_CUST_FILTER_NAME =
-
-# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
-# custom filter to add. For more information please see
-# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
-# Qt Help Project / Custom Filters</a>.
-
-QHP_CUST_FILTER_ATTRS =
-
-# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
-# project's
-# filter section matches.
-# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
-# Qt Help Project / Filter Attributes</a>.
-
-QHP_SECT_FILTER_ATTRS =
-
-# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
-# be used to specify the location of Qt's qhelpgenerator.
-# If non-empty doxygen will try to run qhelpgenerator on the generated
-# .qhp file.
-
-QHG_LOCATION =
-
-# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
-# will be generated, which together with the HTML files, form an Eclipse help
-# plugin. To install this plugin and make it available under the help contents
-# menu in Eclipse, the contents of the directory containing the HTML and XML
-# files needs to be copied into the plugins directory of eclipse. The name of
-# the directory within the plugins directory should be the same as
-# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
-# the help appears.
-
-GENERATE_ECLIPSEHELP = NO
-
-# A unique identifier for the eclipse help plugin. When installing the plugin
-# the directory name containing the HTML and XML files should also have
-# this name.
-
-ECLIPSE_DOC_ID = org.doxygen.Project
-
-# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
-# top of each HTML page. The value NO (the default) enables the index and
-# the value YES disables it.
-
-DISABLE_INDEX = NO
-
-# This tag can be used to set the number of enum values (range [0,1..20])
-# that doxygen will group on one line in the generated HTML documentation.
-# Note that a value of 0 will completely suppress the enum values from appearing in the overview section.
-
-ENUM_VALUES_PER_LINE = 4
-
-# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
-# structure should be generated to display hierarchical information.
-# If the tag value is set to YES, a side panel will be generated
-# containing a tree-like index structure (just like the one that
-# is generated for HTML Help). For this to work a browser that supports
-# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
-# Windows users are probably better off using the HTML help feature.
-
-GENERATE_TREEVIEW = NO
-
-# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories,
-# and Class Hierarchy pages using a tree view instead of an ordered list.
-
-USE_INLINE_TREES = NO
-
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
-# used to set the initial width (in pixels) of the frame in which the tree
-# is shown.
-
-TREEVIEW_WIDTH = 250
-
-# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
-# links to external symbols imported via tag files in a separate window.
-
-EXT_LINKS_IN_WINDOW = NO
-
-# Use this tag to change the font size of Latex formulas included
-# as images in the HTML documentation. The default is 10. Note that
-# when you change the font size after a successful doxygen run you need
-# to manually remove any form_*.png images from the HTML output directory
-# to force them to be regenerated.
-
-FORMULA_FONTSIZE = 10
-
-# Use the FORMULA_TRANPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are
-# not supported properly for IE 6.0, but are supported on all modern browsers.
-# Note that when changing this option you need to delete any form_*.png files
-# in the HTML output before the changes have effect.
-
-FORMULA_TRANSPARENT = YES
-
-# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
-# (see http://www.mathjax.org) which uses client side Javascript for the
-# rendering instead of using prerendered bitmaps. Use this if you do not
-# have LaTeX installed or if you want to formulas look prettier in the HTML
-# output. When enabled you also need to install MathJax separately and
-# configure the path to it using the MATHJAX_RELPATH option.
-
-USE_MATHJAX = NO
-
-# When MathJax is enabled you need to specify the location relative to the
-# HTML output directory using the MATHJAX_RELPATH option. The destination
-# directory should contain the MathJax.js script. For instance, if the mathjax
-# directory is located at the same level as the HTML output directory, then
-# MATHJAX_RELPATH should be ../mathjax. The default value points to the mathjax.org site, so you can quickly see the result without installing
-# MathJax, but it is strongly recommended to install a local copy of MathJax
-# before deployment.
-
-MATHJAX_RELPATH = http://www.mathjax.org/mathjax
-
-# When the SEARCHENGINE tag is enabled doxygen will generate a search box
-# for the HTML output. The underlying search engine uses javascript
-# and DHTML and should work on any modern browser. Note that when using
-# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
-# (GENERATE_DOCSET) there is already a search function so this one should
-# typically be disabled. For large projects the javascript based search engine
-# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
-
-SEARCHENGINE = YES
-
-# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
-# implemented using a PHP enabled web server instead of at the web client
-# using Javascript. Doxygen will generate the search PHP script and index
-# file to put on the web server. The advantage of the server
-# based approach is that it scales better to large projects and allows
-# full text search. The disadvantages are that it is more difficult to setup
-# and does not have live searching capabilities.
-
-SERVER_BASED_SEARCH = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the LaTeX output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
-# generate Latex output.
-
-GENERATE_LATEX = YES
-
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `latex' will be used as the default path.
-
-LATEX_OUTPUT = latex
-
-# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
-# invoked. If left blank `latex' will be used as the default command name.
-# Note that when enabling USE_PDFLATEX this option is only used for
-# generating bitmaps for formulas in the HTML output, but not in the
-# Makefile that is written to the output directory.
-
-LATEX_CMD_NAME = latex
-
-# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
-# generate index for LaTeX. If left blank `makeindex' will be used as the
-# default command name.
-
-MAKEINDEX_CMD_NAME = makeindex
-
-# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
-# LaTeX documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_LATEX = YES
-
-# The PAPER_TYPE tag can be used to set the paper type that is used
-# by the printer. Possible values are: a4, letter, legal and
-# executive. If left blank a4wide will be used.
-
-PAPER_TYPE = letter
-
-# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
-# packages that should be included in the LaTeX output.
-
-EXTRA_PACKAGES =
-
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
-# the generated latex document. The header should contain everything until
-# the first chapter. If it is left blank doxygen will generate a
-# standard header. Notice: only use this tag if you know what you are doing!
-
-LATEX_HEADER =
-
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
-# is prepared for conversion to pdf (using ps2pdf). The pdf file will
-# contain links (just like the HTML output) instead of page references
-# This makes the output suitable for online browsing using a pdf viewer.
-
-PDF_HYPERLINKS = YES
-
-# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
-# plain latex in the generated Makefile. Set this option to YES to get a
-# higher quality PDF documentation.
-
-USE_PDFLATEX = YES
-
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
-# command to the generated LaTeX files. This will instruct LaTeX to keep
-# running if errors occur, instead of asking the user for help.
-# This option is also used when generating formulas in HTML.
-
-LATEX_BATCHMODE = YES
-
-# If LATEX_HIDE_INDICES is set to YES then doxygen will not
-# include the index chapters (such as File Index, Compound Index, etc.)
-# in the output.
-
-LATEX_HIDE_INDICES = YES
-
-# If LATEX_SOURCE_CODE is set to YES then doxygen will include
-# source code with syntax highlighting in the LaTeX output.
-# Note that which sources are shown also depends on other settings
-# such as SOURCE_BROWSER.
-
-LATEX_SOURCE_CODE = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the RTF output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
-# The RTF output is optimized for Word 97 and may not look very pretty with
-# other RTF readers or editors.
-
-GENERATE_RTF = NO
-
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `rtf' will be used as the default path.
-
-RTF_OUTPUT = rtf
-
-# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
-# RTF documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_RTF = NO
-
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
-# will contain hyperlink fields. The RTF file will
-# contain links (just like the HTML output) instead of page references.
-# This makes the output suitable for online browsing using WORD or other
-# programs which support those fields.
-# Note: wordpad (write) and others do not support links.
-
-RTF_HYPERLINKS = NO
-
-# Load stylesheet definitions from file. Syntax is similar to doxygen's
-# config file, i.e. a series of assignments. You only have to provide
-# replacements, missing definitions are set to their default value.
-
-RTF_STYLESHEET_FILE =
-
-# Set optional variables used in the generation of an rtf document.
-# Syntax is similar to doxygen's config file.
-
-RTF_EXTENSIONS_FILE =
-
-#---------------------------------------------------------------------------
-# configuration options related to the man page output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
-# generate man pages
-
-GENERATE_MAN = NO
-
-# The MAN_OUTPUT tag is used to specify where the man pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `man' will be used as the default path.
-
-MAN_OUTPUT = man
-
-# The MAN_EXTENSION tag determines the extension that is added to
-# the generated man pages (default is the subroutine's section .3)
-
-MAN_EXTENSION = .3
-
-# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
-# then it will generate one additional man file for each entity
-# documented in the real man page(s). These additional files
-# only source the real man page, but without them the man command
-# would be unable to find the correct page. The default is NO.
-
-MAN_LINKS = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the XML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_XML tag is set to YES Doxygen will
-# generate an XML file that captures the structure of
-# the code including all documentation.
-
-GENERATE_XML = NO
-
-# The XML_OUTPUT tag is used to specify where the XML pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `xml' will be used as the default path.
-
-XML_OUTPUT = xml
-
-# The XML_SCHEMA tag can be used to specify an XML schema,
-# which can be used by a validating XML parser to check the
-# syntax of the XML files.
-
-XML_SCHEMA =
-
-# The XML_DTD tag can be used to specify an XML DTD,
-# which can be used by a validating XML parser to check the
-# syntax of the XML files.
-
-XML_DTD =
-
-# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
-# dump the program listings (including syntax highlighting
-# and cross-referencing information) to the XML output. Note that
-# enabling this will significantly increase the size of the XML output.
-
-XML_PROGRAMLISTING = YES
-
-#---------------------------------------------------------------------------
-# configuration options for the AutoGen Definitions output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
-# generate an AutoGen Definitions (see autogen.sf.net) file
-# that captures the structure of the code including all
-# documentation. Note that this feature is still experimental
-# and incomplete at the moment.
-
-GENERATE_AUTOGEN_DEF = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the Perl module output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_PERLMOD tag is set to YES Doxygen will
-# generate a Perl module file that captures the structure of
-# the code including all documentation. Note that this
-# feature is still experimental and incomplete at the
-# moment.
-
-GENERATE_PERLMOD = NO
-
-# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
-# the necessary Makefile rules, Perl scripts and LaTeX code to be able
-# to generate PDF and DVI output from the Perl module output.
-
-PERLMOD_LATEX = NO
-
-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
-# nicely formatted so it can be parsed by a human reader.
-# This is useful
-# if you want to understand what is going on.
-# On the other hand, if this
-# tag is set to NO the size of the Perl module output will be much smaller
-# and Perl will parse it just the same.
-
-PERLMOD_PRETTY = YES
-
-# The names of the make variables in the generated doxyrules.make file
-# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
-# This is useful so different doxyrules.make files included by the same
-# Makefile don't overwrite each other's variables.
-
-PERLMOD_MAKEVAR_PREFIX =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the preprocessor
-#---------------------------------------------------------------------------
-
-# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
-# evaluate all C-preprocessor directives found in the sources and include
-# files.
-
-ENABLE_PREPROCESSING = YES
-
-# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
-# names in the source code. If set to NO (the default) only conditional
-# compilation will be performed. Macro expansion can be done in a controlled
-# way by setting EXPAND_ONLY_PREDEF to YES.
-
-MACRO_EXPANSION = YES
-
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
-# then the macro expansion is limited to the macros specified with the
-# PREDEFINED and EXPAND_AS_DEFINED tags.
-
-EXPAND_ONLY_PREDEF = NO
-
-# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
-# in the INCLUDE_PATH (see below) will be search if a #include is found.
-
-SEARCH_INCLUDES = YES
-
-# The INCLUDE_PATH tag can be used to specify one or more directories that
-# contain include files that are not input files but should be processed by
-# the preprocessor.
-
-INCLUDE_PATH =
-
-# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
-# patterns (like *.h and *.hpp) to filter out the header-files in the
-# directories. If left blank, the patterns specified with FILE_PATTERNS will
-# be used.
-
-INCLUDE_FILE_PATTERNS =
-
-# The PREDEFINED tag can be used to specify one or more macro names that
-# are defined before the preprocessor is started (similar to the -D option of
-# gcc). The argument of the tag is a list of macros of the form: name
-# or name=definition (no spaces). If the definition and the = are
-# omitted =1 is assumed. To prevent a macro definition from being
-# undefined via #undef or recursively expanded use the := operator
-# instead of the = operator.
-
-PREDEFINED = DOXYGEN_GETS_HOPELESSLY_CONFUSED_BY_THIS_SECTION
-
-# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
-# this tag can be used to specify a list of macro names that should be expanded.
-# The macro definition that is found in the sources will be used.
-# Use the PREDEFINED tag if you want to use a different macro definition that overrules the definition found in the source code.
-
-EXPAND_AS_DEFINED =
-
-# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
-# doxygen's preprocessor will remove all references to function-like macros
-# that are alone on a line, have an all uppercase name, and do not end with a
-# semicolon, because these will confuse the parser if not removed.
-
-SKIP_FUNCTION_MACROS = YES
-
-#---------------------------------------------------------------------------
-# Configuration::additions related to external references
-#---------------------------------------------------------------------------
-
-# The TAGFILES option can be used to specify one or more tagfiles.
-# Optionally an initial location of the external documentation
-# can be added for each tagfile. The format of a tag file without
-# this location is as follows:
-#
-# TAGFILES = file1 file2 ...
-# Adding location for the tag files is done as follows:
-#
-# TAGFILES = file1=loc1 "file2 = loc2" ...
-# where "loc1" and "loc2" can be relative or absolute paths or
-# URLs. If a location is present for each tag, the installdox tool
-# does not have to be run to correct the links.
-# Note that each tag file must have a unique name
-# (where the name does NOT include the path)
-# If a tag file is not located in the directory in which doxygen
-# is run, you must also specify the path to the tagfile here.
-
-TAGFILES =
-
-# When a file name is specified after GENERATE_TAGFILE, doxygen will create
-# a tag file that is based on the input files it reads.
-
-GENERATE_TAGFILE =
-
-# If the ALLEXTERNALS tag is set to YES all external classes will be listed
-# in the class index. If set to NO only the inherited external classes
-# will be listed.
-
-ALLEXTERNALS = NO
-
-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
-# in the modules index. If set to NO, only the current project's groups will
-# be listed.
-
-EXTERNAL_GROUPS = YES
-
-# The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of `which perl').
-
-PERL_PATH = /usr/bin/perl
-
-#---------------------------------------------------------------------------
-# Configuration options related to the dot tool
-#---------------------------------------------------------------------------
-
-# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
-# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
-# or super classes. Setting the tag to NO turns the diagrams off. Note that
-# this option also works with HAVE_DOT disabled, but it is recommended to
-# install and use dot, since it yields more powerful graphs.
-
-CLASS_DIAGRAMS = YES
-
-# You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see
-# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
-# the mscgen tool resides. If left empty the tool is assumed to be found in the
-# default search path.
-
-MSCGEN_PATH =
-
-# If set to YES, the inheritance and collaboration graphs will hide
-# inheritance and usage relations if the target is undocumented
-# or is not a class.
-
-HIDE_UNDOC_RELATIONS = NO
-
-# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
-# available from the path. This tool is part of Graphviz, a graph visualization
-# toolkit from AT&T and Lucent Bell Labs. The other options in this section
-# have no effect if this option is set to NO (the default)
-
-HAVE_DOT = YES
-
-# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
-# allowed to run in parallel. When set to 0 (the default) doxygen will
-# base this on the number of processors available in the system. You can set it
-# explicitly to a value larger than 0 to get control over the balance
-# between CPU load and processing speed.
-
-DOT_NUM_THREADS = 0
-
-# By default doxygen will write a font called Helvetica to the output
-# directory and reference it in all dot files that doxygen generates.
-# When you want a differently looking font you can specify the font name
-# using DOT_FONTNAME. You need to make sure dot is able to find the font,
-# which can be done by putting it in a standard location or by setting the
-# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory
-# containing the font.
-
-DOT_FONTNAME = FreeSans
-
-# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
-# The default size is 10pt.
-
-DOT_FONTSIZE = 10
-
-# By default doxygen will tell dot to use the output directory to look for the
-# FreeSans.ttf font (which doxygen will put there itself). If you specify a
-# different font using DOT_FONTNAME you can set the path where dot
-# can find it using this tag.
-
-DOT_FONTPATH =
-
-# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect inheritance relations. Setting this tag to YES will force the
-# the CLASS_DIAGRAMS tag to NO.
-
-CLASS_GRAPH = YES
-
-# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect implementation dependencies (inheritance, containment, and
-# class references variables) of the class with other documented classes.
-
-COLLABORATION_GRAPH = YES
-
-# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for groups, showing the direct groups dependencies
-
-GROUP_GRAPHS = YES
-
-# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
-# collaboration diagrams in a style similar to the OMG's Unified Modeling
-# Language.
-
-UML_LOOK = YES
-
-# If set to YES, the inheritance and collaboration graphs will show the
-# relations between templates and their instances.
-
-TEMPLATE_RELATIONS = NO
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
-# tags are set to YES then doxygen will generate a graph for each documented
-# file showing the direct and indirect include dependencies of the file with
-# other documented files.
-
-INCLUDE_GRAPH = NO
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
-# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
-# documented header file showing the documented files that directly or
-# indirectly include this file.
-
-INCLUDED_BY_GRAPH = YES
-
-# If the CALL_GRAPH and HAVE_DOT options are set to YES then
-# doxygen will generate a call dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable call graphs
-# for selected functions only using the \callgraph command.
-
-CALL_GRAPH = YES
-
-# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
-# doxygen will generate a caller dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable caller
-# graphs for selected functions only using the \callergraph command.
-
-CALLER_GRAPH = YES
-
-# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
-# will generate a graphical hierarchy of all classes instead of a textual one.
-
-GRAPHICAL_HIERARCHY = YES
-
-# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
-# then doxygen will show the dependencies a directory has on other directories
-# in a graphical way. The dependency relations are determined by the #include
-# relations between the files in the directories.
-
-DIRECTORY_GRAPH = YES
-
-# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot. Possible values are png, svg, gif or svg.
-# If left blank png will be used.
-
-DOT_IMAGE_FORMAT = png
-
-# The tag DOT_PATH can be used to specify the path where the dot tool can be
-# found. If left blank, it is assumed the dot tool can be found in the path.
-
-DOT_PATH =
-
-# The DOTFILE_DIRS tag can be used to specify one or more directories that
-# contain dot files that are included in the documentation (see the
-# \dotfile command).
-
-DOTFILE_DIRS =
-
-# The MSCFILE_DIRS tag can be used to specify one or more directories that
-# contain msc files that are included in the documentation (see the
-# \mscfile command).
-
-MSCFILE_DIRS =
-
-# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
-# nodes that will be shown in the graph. If the number of nodes in a graph
-# becomes larger than this value, doxygen will truncate the graph, which is
-# visualized by representing a node as a red box. Note that doxygen if the
-# number of direct children of the root node in a graph is already larger than
-# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
-# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
-
-DOT_GRAPH_MAX_NODES = 50
-
-# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
-# graphs generated by dot. A depth value of 3 means that only nodes reachable
-# from the root by following a path via at most 3 edges will be shown. Nodes
-# that lay further from the root node will be omitted. Note that setting this
-# option to 1 or 2 may greatly reduce the computation time needed for large
-# code bases. Also note that the size of a graph can be further restricted by
-# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
-
-MAX_DOT_GRAPH_DEPTH = 0
-
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not
-# seem to support this out of the box. Warning: Depending on the platform used,
-# enabling this option may lead to badly anti-aliased labels on the edges of
-# a graph (i.e. they become hard to read).
-
-DOT_TRANSPARENT = NO
-
-# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
-# files in one run (i.e. multiple -o and -T options on the command line). This
-# makes dot run faster, but since only newer versions of dot (>1.8.10)
-# support this, this feature is disabled by default.
-
-DOT_MULTI_TARGETS = NO
-
-# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
-# generate a legend page explaining the meaning of the various boxes and
-# arrows in the dot generated graphs.
-
-GENERATE_LEGEND = YES
-
-# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
-# remove the intermediate dot files that are used to generate
-# the various graphs.
-
-DOT_CLEANUP = YES
diff --git a/rcynic-ng/Makefile.in b/rcynic-ng/Makefile.in
deleted file mode 100644
index 3b86a510..00000000
--- a/rcynic-ng/Makefile.in
+++ /dev/null
@@ -1,77 +0,0 @@
-# $Id$
-
-NAME = rcynic
-
-BIN = ${NAME}
-SRC = ${NAME}.c
-OBJ = ${NAME}.o
-
-HDR = defasn1.h
-GEN = defstack.h
-
-OBJS = ${OBJ} bio_f_linebreak.o
-
-CFLAGS = @CFLAGS@ -Wall -Wshadow -Wmissing-prototypes -Wmissing-declarations -Werror-implicit-function-declaration
-LDFLAGS = @LDFLAGS@ @LD_STATIC_FLAG@
-LIBS = @LIBS@
-
-AWK = @AWK@
-XSLTPROC = @XSLTPROC@
-
-abs_top_srcdir = @abs_top_srcdir@
-abs_top_builddir = @abs_top_builddir@
-
-host_os = @host_os@
-
-all: ${BIN}
-
-clean:
- cd static-rsync; ${MAKE} $@
- rm -f ${BIN} ${OBJS} ${GEN}
-
-${OBJ}: ${SRC} ${HDR} ${GEN}
-
-${BIN}: ${OBJS}
- ${CC} ${CFLAGS} -o $@ ${OBJS} ${LDFLAGS} ${LIBS}
-
-defstack.h: defstack.awk ${SRC} ${HDR}
- ${AWK} -f >$@ defstack.awk ${SRC} ${HDR}
-
-test: ${BIN}
- if test -r rcynic.conf; then ./${BIN} -j 0 && echo && ./show.sh; else echo No rcynic.conf, skipping test; fi
-
-install: ${BIN} installation-scripts/install.sh
- cd installation-scripts; host_os="${host_os}"; DESTDIR="${DESTDIR}"; . ./install.sh
-
-uninstall deinstall:
- cd installation-scripts; host_os="${host_os}"; DESTDIR="${DESTDIR}"; . ./deinstall.sh
-
-distclean: clean docclean
- cd static-rsync; ${MAKE} $@
- rm -f show.sh installation-scripts/linux/install.sh Makefile
-
-tags: TAGS
-
-TAGS: ${SRC} ${HDR} ${GEN}
- etags ${SRC} ${HDR} ${GEN}
-
-rcynic.html: rcynic.xml rcynic.xsl
- ${XSLTPROC} -o $@ rcynic.xsl rcynic.xml
-
-# Doc stuff right now is just internals doc, of interest only to
-# programmers. Real doc for rcynic is still the README. This may
-# change some day.
-
-html:
- TZ='' doxygen
-
-pdf: html
- cd doc/latex && TZ='' ${MAKE} pdf && ln -f refman.pdf ../manual.pdf
-
-tgz: html
- cd doc && tar -cf - html | gzip -9 >manual.tar.gz
-
-docs: html pdf tgz
-
-docclean:
- rm -rf doc
diff --git a/rcynic-ng/README b/rcynic-ng/README
deleted file mode 100644
index fb810e6e..00000000
--- a/rcynic-ng/README
+++ /dev/null
@@ -1,582 +0,0 @@
--*- Text -*- $Id$
-
-"Cynical rsync" -- fetch and validate RPKI certificates.
-
-To build this you will need to link it against an OpenSSL libcrypto
-that has support for the RFC 3779 extensions. See ../openssl/README.
-
-I developed this code on FreeBSD 6-STABLE. It is also known to run
-work on Ubuntu (8.10) and Mac OS X (Snow Leopard). In theory it
-should run on any reasonably POSIX-like system. As far as I know I
-have not used any seriously non-portable features, but neither have I
-done a POSIX reference manual lookup for every function call. Please
-report any portability problems.
-
-All certificates and CRLs are in DER format, with filenames derived
-from the RPKI rsync URIs at which the data are published. See
-../utils/ and ../rtr-origin/ for tools that use rcynic's output.
-
-All configuration is via an OpenSSL-style configuration file, except
-for selection of the name of the configuration file itself. A few of
-the parameters can also be set from the command line, to simplify
-testing. The default name for the configuration is rcynic.conf; you
-can override this with the -c option on the command line. The config
-file uses OpenSSL's config file syntax, and you can set OpenSSL
-library configuration paramaters (eg, "engine" settings) in the config
-file as well. rcynic's own configuration parameters are in a section
-called "[rcynic]".
-
-Most configuration parameters are optional and have defaults that
-should do something reasonable if you are running rcynic in a test
-directory. If you're running it as a system progran, perhaps under
-cron, you'll want to set additional parameters to tell rcynic where to
-find its data and where to write its output.
-
-The one thing you MUST specify in the config file in order for the
-program to do anything useful is file name of one or more trust
-anchors. Trust anchors for this program are represented as
-DER-formated X509 objects that look just like certificates, except
-that they're trust anchors.
-
-Strictly speaking, trust anchors do not need to be self-signed, but
-many programs (including OpenSSL ) assume that trust anchors will be
-self-signed. See the allow-non-self-signed-trust-anchor configuration
-option if you need to use a non-self-signed trust anchor, but be
-warned that the results, while technically correct, may not be useful.
-
-There are two ways of specifying trust anchors:
-
-- Via the "trust-anchor" directive, to name a local file containing
- the DER-encoded trust anchor.
-
-- Via the "trust-anchor-locator" directive, to name a local file
- containing a "trust anchor locator" (TAL). See draft-ietf-sidr-ta
- for details [update this once RFC has been issued].
-
-In most cases, except perhaps for testing, you will want to use trust
-anchor locators, since they allow the trust anchor itself to be
-updated without requiring reconfiguration of rcynic.
-
-See the make-tal.sh script in this directory if you need to generate
-your own TAL file for a trust anchor.
-
-As of when I write this documentation, there still is no global trust
-anchor for the RPKI system, so you have to specify separate trust
-anchors for each RIR that's publishing data:
-
-Example of a minimal config file:
-
- [rcynic]
-
- trust-anchor-locator.0 = trust-anchors/apnic.tal
- trust-anchor-locator.1 = trust-anchors/ripe.tal
- trust-anchor-locator.2 = trust-anchors/afrinic.tal
- trust-anchor-locator.3 = trust-anchors/lacnic.tal
-
-Eventually, this should all be collapsed into a single trust anchor,
-so that relying parties (people running tools like rcynic) don't need
-to sort out this sort of issue, at which point the above
-configuration can become something like:
-
- [rcynic]
-
- trust-anchor-locator = trust-anchors/iana.tal
-
-
-By default, rcynic uses two writable directory trees:
-
-- unauthenticated Raw data fetched via rsync. In order to take
- full advantage of rsync's optimized transfers,
- you should preserve and reuse this directory
- across rcynic runs, so that rcynic need not
- re-fetch data that have not changed.
-
-- authenticated Data that rcynic has checked. This is the
- real output of the process.
-
-authenticated is really a symbolic link to a directory with a name of
-the form authenticated.<timestamp>, where <timestamp> is an ISO 8601
-timestamp like 2001-04-01T01:23:45Z. rcynic creates a new timestamped
-directory every time it runs, and moves the symbolic link as an atomic
-operation when the validation process completes. The intent is that
-authenticated always points to the most recent usable validation
-results, so that programs which use rcynic's output don't need to
-worry about whether an rcynic run is in progress.
-
-rynic stores trust anchors specified via the trust-anchor-locator
-directive in the unauthenticated tree just like any other fetched
-object, and copies into the authenticated trees just like any other
-object once they pass rcynic's checks.
-
-rcynic copies trust anchors specified via the "trust-anchor" directive
-into the top level directory of the authenticated tree xxxxxxxx.n.cer,
-where xxxxxxxx and n are the OpenSSL object name hash and index within
-the resulting virtual hash bucket (the same as the c_hash Perl script
-that comes with OpenSSL would produce), and ".cer" is the literal
-string ".cer". The reason for this is that these trust anchors, by
-definition, are not fetched automatically, and thus do not really have
-publication URIs in the sense that every other object in these trees
-do. So rcynic uses a naming scheme which insures (a) that each trust
-anchor has a unique name within the output tree and (b) that trust
-anchors cannot be confusd with certificates: trust anchors always go
-in the top level of the tree, data fetched via rsync always go in
-subdirectories.
-
-As currently implemented, rcynic does not attempt to maintain an
-in-memory cache of objects it might need again later. It does keep an
-internal cache of the URIs from which it has already fetched data in
-this pass, and it keeps a stack containing the current certificate
-chain as it does its validation walk. All other data (eg, CRLs) are
-freed immediately after use and read from disk again as needed. From
-a database design standpoint, this is not very efficient, but as the
-rcynic's main bottlenecks are expected to be crypto and network
-operations, it seemed best to keep the design as simple as possible,
-at least until execution profiling demonstrates a real issue here.
-
-Usage and configuration:
-
-Logging levels:
-
-rcynic has its own system of logging levels, similar to what syslog()
-uses but customized to the specific task rcynic performs. Levels:
-
- log_sys_err Error from operating system or library
- log_usage_err Bad usage (local configuration error)
- log_data_err Bad data (broken certificates or CRLs)
- log_telemetry Normal chatter about rcynic's progress
- log_verbose Extra verbose chatter
- log_debug Only useful when debugging
-
-Command line options:
-
- -c configfile Path to configuration file (default: rcynic.conf)
- -l loglevel Logging level (default: log_data_err)
- -s Log via syslog
- -e Log via stderr when also using syslog
- -j Start-up jitter interval (see below; default: 600)
- -V Print rcynic's version to standard output and exit
-
-Configuration file:
-
-rcynic uses the OpenSSL libcrypto configuration file mechanism. All
-libcrypto configuration options (eg, for engine support) are
-available. All rcynic-specific options are in the "[rcynic]"
-section. You -must- have a configuration file in order for rcynic to
-do anything useful, as the configuration file is the only way to list
-your trust anchors.
-
-Configuration variables:
-
-authenticated Path to output directory (where rcynic should
- place objects it has been able to validate).
-
- Default: rcynic-data/authenticated
-
-
-unauthenticated Path to directory where rcynic should store
- unauthenticatd data retrieved via rsync.
- Unless something goes horribly wrong, you want
- rcynic to preserve and reuse this directory
- across runs to minimize the network traffic
- necessary to bring your repository mirror up
- to date.
-
- Default: rcynic-data/unauthenticated
-
-
-rsync-timeout How long (in seconds) to let rsync run before
- terminating the rsync process, or zero for no
- timeout. You want this timeout to be fairly
- long, to avoid terminating rsync connections
- prematurely. It's present to let you defend
- against evil rsync server operators who try to
- tarpit your connection as a form of denial of
- service attack on rcynic.
-
- Default: 300 seconds.
-
-
-max-parallel-fetches Upper limit on the number of copies of rsync
- that rcynic is allowed to run at once. Used
- properly, this can speed up synchronization
- considerably when fetching from repositories
- built with sub-optimal tree layouts or when
- dealing with unreachable repositories. Used
- improperly, this option can generate excessive
- load on repositories, cause synchronization to
- be interrupted by firewalls, and generally
- creates create a public nuisance. Use with
- caution.
-
- As of this writing, values in the range 2-4
- are reasonably safe. At least one RIR
- currently refuses service at settings above 4,
- and another RIR appears to be running some
- kind of firewall that silently blocks
- connections when it thinks decides that the
- connection rate is excessive.
-
- rcynic can't really detect all of the possible
- problems created by excessive values of this
- parameter, but if rcynic's report shows that
- both successful retrivial and skipped
- retrieval from the same repository host,
- that's a pretty good hint that something is
- wrong, and an excessive value here is a good
- first guess as to the cause.
-
- Default: 1
-
-
-rsync-program Path to the rsync program.
-
- Default: rsync, but you should probably set
- this variable rather than just trusting the
- PATH environment variable to be set correctly.
-
-
-log-level Same as -l option on command line. Command
- line setting overrides config file setting.
-
- Default: log_log_err
-
-
-use-syslog Same as -s option on command line. Command
- line setting overrides config file setting.
-
- Values: true or false.
-
- Default: false
-
-
-use-stderr Same as -e option on command line. Command
- line setting overrides config file setting.
-
- Values: true or false.
-
- Default: false, but if neither use-syslog nor
- use-stderr is set, log output goes to stderr.
-
-
-syslog-facility Syslog facility to use.
-
- Default: local0
-
-
-
-syslog-priority-xyz (where xyz is an rcynic logging level, above)
- Override the syslog priority value to use when
- logging messages at this rcynic level.
-
- Defaults:
-
- syslog-priority-log_sys_err: err
- syslog-priority-log_usage_err: err
- syslog-priority-log_data_err: notice
- syslog-priority-log_telemetry: info
- syslog-priority-log_verbose: info
- syslog-priority-log_debug: debug
-
-
-jitter Startup jitter interval, same as -j option on
- command line. Jitter interval, specified in
- number of seconds. rcynic will pick a random
- number within the interval from zero to this
- value, and will delay for that many seconds on
- startup. The purpose of this is to spread the
- load from large numbers of rcynic clients all
- running under cron with synchronized clocks,
- in particular to avoid hammering the RPKI
- rsync servers into the ground at midnight UTC.
-
- Default: 600
-
-
-lockfile Name of lockfile, or empty for no lock. If
- you run rcynic under cron, you should use this
- parameter to set a lockfile so that successive
- instances of rcynic don't stomp on each other.
-
- Default: no lock
-
-xml-summary Enable output of a per-host summary at the
- end of an rcynic run in XML format. Some
- users prefer this to the log_telemetry style
- of logging, or just want it in addition to
- logging. Value: filename to which XML summary
- should be written; "-" will send XML summary
- to stdout.
-
- Default: no XML summary
-
-
-allow-stale-crl Allow use of CRLs which are past their
- nextUpdate timestamp. This is probably
- harmless, but since it may be an early warning
- of problems, it's configurable.
-
- Values: true or false.
-
- Default: true
-
-
-prune Clean up old files corresponding to URIs that
- rcynic did not see at all during this run.
- rcynic invokes rsync with the --delete option
- to clean up old objects from collections that
- rcynic revisits, but if a URI changes so that
- rcynic never visits the old collection again,
- old files will remain in the local mirror
- indefinitely unless you enable this option.
-
- Values: true or false.
-
- Default: true
-
-
-allow-stale-manifest Allow use of manifests which are past their
- nextUpdate timestamp. This is probably
- harmless, but since it may be an early warning
- of problems, it's configurable.
-
- Values: true or false.
-
- Default: true
-
-
-require-crl-in-manifest Reject manifests which don't list the CRL
- covering the manifest EE certificate.
-
- Values: true or false.
-
- Default: false
-
-
-allow-non-self-signed-trust-anchor
- Experimental. Attempts to work around OpenSSL's
- strong preference for self-signed trust
- anchors. Do not use this unless you really know
- what you are doing.
-
- Values: true or false.
-
- Default: false
-
-
-run-rsync
-
- Whether to run rsync to fetch data. You don't
- want to change this except when building
- complex topologies where rcynic running on one
- set of machines acts as aggregators for
- another set of validators. A large ISP might
- want to build such a topology so that they
- could have a local validation cache in each
- POP while minimizing load on the global
- repository system and maintaining some degree
- of internal consistancy between POPs. In such
- cases, one might want the rcynic instances in
- the POPs to validate data fetched from the
- aggregators via an external process, without
- the POP rcynic instances attempting to fetch
- anything themselves.
-
- Don't touch this unless you really know what
- you're doing.
-
- Values: true or false.
-
- Default: true
-
-
-use-links Whether to use hard links rather than copying
- valid objects from the unauthenticated to
- authenticated tree. Using links is slightly
- more fragile (anything that stomps on the
- unauthenticated file also stomps on the
- authenticated file) but is a bit faster and
- reduces the number of inodes consumed by a
- large data collection. At the moment, copying
- is the default behavior, but this may change
- in the future.
-
- Values: true or false.
-
- Default: false
-
-
-trust-anchor Specify one RPKI trust anchor, represented as
- a local file containing an X.509 certificate
- in DER format. Value of this option is the
- pathname of the file.
-
- No default.
-
-
-trust-anchor-locator
- Specify one RPKI trust anchor, represented as
- a local file containing an rsync URI and the
- RSA public key of the X.509 object specified
- by the URI. First line of the file is the
- URI, remainder is the public key in Base64
- encoded DER format. Value of this option
- is the pathname of the file.
-
- No default.
-
-
-There's a companion XSLT template in rcynic.xsl, which translates what
-the xml-summary option writes into HTML.
-
-
-
-Running rcynic chrooted
-
-This is an attempt to describe the process of setting up rcynic in a
-chrooted environment. The installation scripts that ship with rcynic
-attempt to do this automatically for the platforms we support, but the
-process is somewhat finicky, so some explanation seems in order. If
-you're running on one of the supported platforms, the following steps
-may be handled for you by the Makefiles, but you may still want to
-understand what all this is trying to do.
-
-rcynic itself does not include any direct support for running
-chrooted, but is designed to be (relatively) easy to run in a chroot
-jail. Here's how.
-
-You'll either need staticly linked copies of rcynic and rsync, or
-you'll need to figure out which shared libraries these programs need
-(try using the "ldd" command). Here we assume staticly linked
-binaries, because that's simpler.
-
-You'll need a chroot wrapper program. Your platform may already have
-one (FreeBSD does -- /usr/sbin/chroot), but if you don't, you can
-download Wietse Venema's "chrootuid" program from:
-
- ftp://ftp.porcupine.org/pub/security/chrootuid1.3.tar.gz
-
-Warning: The chroot program included in at least some Linux
-distributions is not adaquate to this task, you need a wrapper that
-knows how to drop privileges after performing the chroot() operation
-itself. If in doubt, use chrootuid.
-
-Unfortunately, the precise details of setting up a proper chroot jail
-vary wildly from one system to another, so the following instructions
-will likely not be a precise match for the preferred way of doing this
-on any particular platform. We have sample scripts that do the right
-thing for FreeBSD, feel free to contribute such scripts for other
-platforms.
-
-Step 1: Build the static binaries. You might want to test them at
-this stage too, although you can defer that until after you've got the
-jail built.
-
-Step 2: Create a userid under which to run rcynic. Here we'll assume
-that you've created a user "rcynic", whose default group is also named
-"rcynic". Do not add any other userids to the rcynic group unless you
-really know what you are doing.
-
-Step 3: Build the jail. You'll need, at minimum, a directory in which
-to put the binaries, a subdirectory tree that's writable by the userid
-which will be running rcynic and rsync, your trust anchors, and
-whatever device inodes the various libraries need on your system.
-Most likely the devices that matter will be /dev/null, /dev/random,a
-nd /dev/urandom; if you're running a FreeBSD system with devfs, you
-do this by mounting and configuring a devfs instance in the jail, on
-other platforms you probably use the mknod program or something.
-
-Important: other than the directories that you want rcynic and rsync
-to be able to modify, -nothing- in the initial jail setup should be
-writable by the rcynic userid. In particular, rcynic and rsync should
--not- be allowed to modify: their own binary images, any of the
-configuration files, or your trust anchors. It's simplest just to
-have root own all the files and directories that rcynic and rsync are
-not allowed to modify, and make sure that the permissions for all of
-those directories and files make them writable only by root.
-
-Sample jail tree, assuming that we're putting all of this under
-/var/rcynic:
-
- # mkdir /var/rcynic
- # mkdir /var/rcynic/bin
- # mkdir /var/rcynic/data
- # mkdir /var/rcynic/dev
- # mkdir /var/rcynic/etc
- # mkdir /var/rcynic/etc/trust-anchors
-
-Copy your trust anchors into /var/rcynic/etc/trust-anchors.
-
-Copy the staticly linked rcynic and rsync into /var/rcynic/bin.
-
-Copy /etc/resolv.conf and /etc/localtime (if it exists) into
-/var/rcynic/etc.
-
-Write an rcynic configuration file as /var/rcynic/etc/rcynic.conf
-(path names in this file must match the jail setup, more below).
-
- # chmod -R go-w /var/rcynic
- # chown -R root:wheel /var/rcynic
- # chown -R rcynic:rcynic /var/rcynic/data
-
-If you're using devfs, arrange for it to be mounted at
-/var/rcynic/dev; otherwise, create whatever device inodes you need in
-/var/rcynic/dev and make sure that they have sane permissions (copying
-whatever permissions are used in your system /dev directory should
-suffice).
-
-rcynic.conf to match this configuration:
-
- [rcynic]
-
- trust-anchor-locator.1 = /etc/trust-anchors/ta-1.tal
- trust-anchor-locator.2 = /etc/trust-anchors/ta-2.tal
- trust-anchor-locator.3 = /etc/trust-anchors/ta-3.tal
-
- rsync-program = /bin/rsync
- authenticated = /data/authenticated
- unauthenticated = /data/unauthenticated
-
-Once you've got all this set up, you're ready to try running rcynic in
-the jail. Try it from the command line first, then if that works, you
-should be able to run it under cron.
-
-Note: chroot, chrootuid, and other programs of this type are usually
-intended to be run by root, and should -not- be setuid programs unless
-you -really- know what you are doing.
-
-Sample command line:
-
- # /usr/local/bin/chrootuid /var/rcynic rcynic /bin/rcynic -s -c /etc/rcynic.conf
-
-Note that we use absolute pathnames everywhere. This is not an
-accident. Programs running in jails under cron should not make
-assumptions about the current working directory or environment
-variable settings, and programs running in chroot jails would need
-different PATH settings anyway. Best just to specify everything.
-
-Building static binaries:
-
-On FreeBSD, building a staticly linked rsync is easy: just set the
-environment variable LDFLAGS='-static' before building the rsync port
-and the right thing will happen. Since this is really just GNU
-configure picking up the environment variable, the same trick should
-work on other platforms...except that some compilers don't support
--static, and some platforms are missing some or all of the non-shared
-libraries you'd need to link the resulting binary.
-
-For simplicity, I've taken the same approach with rcynic, so
-
- $ make LDFLAGS='-static'
-
-should work. Except that you don't even have to do that: static
-linking is the default where supported, because I run it jailed.
-
-syslog:
-
-Depending on your syslogd configuration, syslog may not work properly
-with rcynic in a chroot jail. On FreeBSD, the easiest way to fix this
-is to add the following lines to /etc/rc.conf:
-
- altlog_proglist="named rcynic"
- rcynic_chrootdir="/var/rcynic"
- rcynic_enable="YES"
diff --git a/rcynic-ng/installation-scripts/darwin/RCynic/RCynic b/rcynic-ng/installation-scripts/darwin/RCynic/RCynic
deleted file mode 100755
index bf36d6e5..00000000
--- a/rcynic-ng/installation-scripts/darwin/RCynic/RCynic
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/bin/sh -
-#
-# $Id$
-#
-. /etc/rc.common
-
-name="rcynic"
-start_cmd="rcynic_start"
-stop_cmd="rcynic_stop"
-
-: ${rcynic_jaildir="/var/rcynic"}
-: ${rcynic_user="rcynic"}
-: ${rcynic_group="rcynic"}
-
-StartService()
-{
- /bin/test -d "${rcynic_jaildir}" || /bin/mkdir "${rcynic_jaildir}"
- /sbin/umount "${rcynic_jaildir}/dev" 2>/dev/null
-
- /usr/sbin/mtree -deU -p "${rcynic_jaildir}" <<EOF
-
- /set type=dir uname=root gname=wheel mode=0555
- .
- bin
- ..
- dev
- ..
- etc
- trust-anchors
- ..
- ..
- usr
- lib
- system
- ..
- ..
- ..
- var
- run
- ..
- ..
- data uname=${rcynic_user} gname=${rcynic_group} mode=0755
- ..
- ..
-EOF
-
- /bin/chmod -R a-w "${rcynic_jaildir}/bin" "${rcynic_jaildir}/etc"
- /usr/sbin/chown -R root:wheel "${rcynic_jaildir}/bin" "${rcynic_jaildir}/etc"
-
- if ! /sbin/mount_devfs devfs "${rcynic_jaildir}/dev"; then
- echo "Mounting devfs on ${rcynic_jaildir}/dev failed..."
- exit 1
- fi
-
- for i in /etc/localtime /etc/resolv.conf; do
- j="${rcynic_jaildir}${i}"
- if /bin/test -r "$i" && ! /usr/bin/cmp -s "$i" "$j"; then
- /usr/bin/install -m 444 -o root -g wheel -p "$i" "$j"
- fi
- done
-
- /bin/ln -f /var/run/mDNSResponder "${rcynic_jaildir}/var/run/mDNSResponder"
-}
-
-StopService()
-{
- /sbin/umount "${rcynic_jaildir}/dev" 2>/dev/null
-}
-
-RestartService()
-{
- StartService
-}
-
-RunService "$1"
diff --git a/rcynic-ng/installation-scripts/darwin/RCynic/StartupParameters.plist b/rcynic-ng/installation-scripts/darwin/RCynic/StartupParameters.plist
deleted file mode 100644
index ca46b676..00000000
--- a/rcynic-ng/installation-scripts/darwin/RCynic/StartupParameters.plist
+++ /dev/null
@@ -1,19 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
-<plist version="1.0">
-<dict>
- <key>Description</key>
- <string>RCynic Setup</string>
- <key>OrderPreference</key>
- <string>None</string>
- <key>Provides</key>
- <array>
- <string>RCynic</string>
- </array>
- <key>Uses</key>
- <array>
- <string>Network</string>
- <string>Resolver</string>
- </array>
- </dict>
-</plist>
diff --git a/rcynic-ng/installation-scripts/darwin/install.sh b/rcynic-ng/installation-scripts/darwin/install.sh
deleted file mode 100644
index 1f0dfe7d..00000000
--- a/rcynic-ng/installation-scripts/darwin/install.sh
+++ /dev/null
@@ -1,193 +0,0 @@
-#!/bin/sh -
-# $Id$
-#
-# Create a chroot jail for rcynic.
-#
-# This is approximately what a pkg-install script might do if this were
-# a FreeBSD port. Perhaps some day it will be.
-
-: ${jaildir="${DESTDIR}/var/rcynic"}
-: ${jailuser="rcynic"}
-: ${jailgroup="rcynic"}
-: ${jailname="RPKI Validation System"}
-: ${setupcron="YES"}
-
-echo "Setting up \"${jaildir}\" as a chroot jail for rcynic."
-
-if /usr/bin/dscl . -read "/Groups/${jailgroup}" >/dev/null 2>&1
-then
- echo "You already have a group \"${jailgroup}\", so I will use it."
-elif gid="$(/usr/bin/dscl . -list /Groups PrimaryGroupID | /usr/bin/awk 'BEGIN {gid = 501} $2 >= gid {gid = 1 + $2} END {print gid}')" &&
- /usr/bin/dscl . -create "/Groups/${jailgroup}" &&
- /usr/bin/dscl . -create "/Groups/${jailgroup}" RealName "${jailname}" &&
- /usr/bin/dscl . -create "/Groups/${jailgroup}" PrimaryGroupID "$gid" &&
- /usr/bin/dscl . -create "/Groups/${jailgroup}" GeneratedUID "$(/usr/bin/uuidgen)" &&
- /usr/bin/dscl . -create "/Groups/${jailgroup}" Password "*"
-then
- echo "Added group \"${jailgroup}\"."
-else
- echo "Adding group \"${jailgroup}\" failed..."
- echo "Please create it, then try again."
- exit 1
-fi
-
-if /usr/bin/dscl . -read "/Users/${jailuser}" >/dev/null 2>&1
-then
- echo "You already have a user \"${jailuser}\", so I will use it."
-elif uid="$(/usr/bin/dscl . -list /Users UniqueID | /usr/bin/awk 'BEGIN {uid = 501} $2 >= uid {uid = 1 + $2} END {print uid}')" &&
- /usr/bin/dscl . -create "/Users/${jailuser}" &&
- /usr/bin/dscl . -create "/Users/${jailuser}" UserShell "/usr/bin/false" &&
- /usr/bin/dscl . -create "/Users/${jailuser}" RealName "${jailname}" &&
- /usr/bin/dscl . -create "/Users/${jailuser}" UniqueID "$uid" &&
- /usr/bin/dscl . -create "/Users/${jailuser}" PrimaryGroupID "$gid" &&
- /usr/bin/dscl . -create "/Users/${jailuser}" NFSHomeDirectory "/var/empty" &&
- /usr/bin/dscl . -create "/Users/${jailuser}" GeneratedUID "$(/usr/bin/uuidgen)" &&
- /usr/bin/dscl . -create "/Users/${jailuser}" Password "*"
-then
- echo "Added user \"${jailuser}\"."
-else
- echo "Adding user \"${jailuser}\" failed..."
- echo "Please create it, then try again."
- exit 1
-fi
-
-if ! /bin/test -d "${jaildir}"; then
- /bin/mkdir "${jaildir}"
-fi
-
-if /usr/bin/install -o root -g wheel -d ${DESTDIR}/Library/StartupItems/RCynic &&
- /usr/bin/install -o root -g wheel -m 555 RCynic/RCynic RCynic/StartupParameters.plist ${DESTDIR}/Library/StartupItems/RCynic; then
- echo "Installed ${DESTDIR}/Library/StartupItems/RCynic"
-else
- echo "Installing ${DESTDIR}/Library/StartupItems/RCynic failed"
- exit 1
-fi
-
-echo "Running ${DESTDIR}/Library/StartupItems/RCynic/RCynic to set up directories"
-
-if ! rcynic_jaildir="$jaildir" rcynic_user="$jailuser" rcynic_group="$jailgroup" /Library/StartupItems/RCynic/RCynic start; then
- echo "Directory setup failed"
- exit 1
-fi
-
-if /bin/test -r "$jaildir/etc/rcynic.conf"; then
- echo "You already have config file \"${jaildir}/etc/rcynic.conf\", so I will use it."
-elif /usr/bin/install -m 444 -o root -g wheel -p ../sample-rcynic.conf "${jaildir}/etc/rcynic.conf"; then
- echo "Installed minimal ${jaildir}/etc/rcynic.conf, adding SAMPLE trust anchors"
- for i in ../../sample-trust-anchors/*.tal; do
- j="$jaildir/etc/trust-anchors/${i##*/}"
- /bin/test -r "$i" || continue
- /bin/test -r "$j" && continue
- echo "Installing $i as $j"
- /usr/bin/install -m 444 -o root -g wheel -p "$i" "$j"
- done
- j=1
- for i in $jaildir/etc/trust-anchors/*.tal; do
- echo >>"${jaildir}/etc/rcynic.conf" "trust-anchor-locator.$j = /etc/trust-anchors/${i##*/}"
- j=$((j+1))
- done
-else
- echo "Installing minimal ${jaildir}/etc/rcynic.conf failed"
- exit 1
-fi
-
-echo "Installing rcynic as ${jaildir}/bin/rcynic"
-
-/usr/bin/install -m 555 -o root -g wheel -p ../../rcynic "${jaildir}/bin/rcynic"
-
-if /bin/test -x "$jaildir/bin/rsync"; then
- echo "You already have an executable \"$jaildir/bin/rsync\", so I will use it"
-elif /usr/bin/install -m 555 -o root -g wheel -p /usr/bin/rsync "${jaildir}/bin/rsync"; then
- echo "Installed ${jaildir}/bin/rsync"
-else
- echo "Installing ${jaildir}/bin/rsync failed"
- exit 1
-fi
-
-echo "Copying required shared libraries"
-
-shared_libraries="${jaildir}/bin/rcynic ${jaildir}/bin/rsync"
-while true
-do
- closure="$(/usr/bin/otool -L ${shared_libraries} | /usr/bin/awk '/:$/ {next} {print $1}' | /usr/bin/sort -u)"
- if test "x$shared_libraries" = "x$closure"
- then
- break
- else
- shared_libraries="$closure"
- fi
-done
-
-for shared in /usr/lib/dyld $shared_libraries
-do
- if /bin/test -r "${jaildir}/${shared}"
- then
- echo "You already have a \"${jaildir}/${shared}\", so I will use it"
- elif /usr/bin/install -m 555 -o root -g wheel -p "${shared}" "${jaildir}/${shared}"
- then
- echo "Copied ${shared} into ${jaildir}"
- else
- echo "Unable to copy ${shared} into ${jaildir}"
- exit 1
- fi
-done
-
-if /usr/bin/install -m 444 -o root -g wheel -p ../../rcynic.xsl "${jaildir}/etc/rcynic.xsl"; then
- echo "Installed rcynic.xsl as \"${jaildir}/etc/rcynic.xsl\""
-else
- echo "Installing rcynic.xsl failed"
- exit 1
-fi
-
-echo "Setting up root's crontab to run jailed rcynic"
-
-case "$setupcron" in
-YES|yes)
- /usr/bin/crontab -l -u root 2>/dev/null |
- /usr/bin/awk -v "jailuser=$jailuser" -v "jailgroup=$jailgroup" -v "jaildir=$jaildir" '
- BEGIN {
- cmd = "exec /usr/sbin/chroot -u " jailuser " -g " jailgroup " " jaildir;
- cmd = cmd " /bin/rcynic -c /etc/rcynic.conf";
- }
- $0 !~ cmd {
- print;
- }
- END {
- "/usr/bin/hexdump -n 2 -e \"\\\"%u\\\\\\n\\\"\" /dev/random" | getline;
- printf "%u * * * *\t%s\n", $1 % 60, cmd;
- }' |
- /usr/bin/crontab -u root -
- /bin/cat <<EOF
-
- crontab is set up to run rcynic hourly, at a randomly selected
- minute (to spread load on the rsync servers). Please do NOT
- adjust this to run on the hour. In particular please do NOT
- adjust this to run at midnight UTC.
-EOF
- ;;
-
-*)
- /bin/cat <<EOF
-
- You'll need to add a crontab entry running the following command as root:
-
- /usr/sbin/chroot -u $jailuser -g $jailgroup $jaildir /bin/rcynic -c /etc/rcynic.conf
-
- Please try to pick a random time for this, don't just run it on the hour,
- or at local midnight, or, worst of all, at midnight UTC.
-
-EOF
- ;;
-
-esac
-
-/bin/cat <<EOF
-
- Jail set up. You may need to customize $jaildir/etc/rcynic.conf.
- If you did not install your own trust anchors, a default set
- of SAMPLE trust anchors may have been installed for you, but
- you, the relying party, are the only one who can decide
- whether you trust those anchors. rcynic will not do anything
- useful without good trust anchors.
-
-EOF
diff --git a/rcynic-ng/installation-scripts/deinstall.sh b/rcynic-ng/installation-scripts/deinstall.sh
deleted file mode 100644
index 113794a4..00000000
--- a/rcynic-ng/installation-scripts/deinstall.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/sh -
-# $Id$
-
-echo Sorry, automated deinstallation of rcynic is not implemented yet
-exit 1
diff --git a/rcynic-ng/installation-scripts/freebsd/install.sh b/rcynic-ng/installation-scripts/freebsd/install.sh
deleted file mode 100644
index 6c3453cb..00000000
--- a/rcynic-ng/installation-scripts/freebsd/install.sh
+++ /dev/null
@@ -1,151 +0,0 @@
-#!/bin/sh -
-# $Id$
-#
-# Create a chroot jail for rcynic.
-#
-# This is approximately what a pkg-install script might do if this were
-# a FreeBSD port. Perhaps some day it will be.
-
-: ${jaildir="${DESTDIR}/var/rcynic"}
-: ${jailuser="rcynic"}
-: ${jailgroup="rcynic"}
-: ${setupcron="NO"}
-
-echo "Setting up \"${jaildir}\" as a chroot jail for rcynic."
-
-if /usr/sbin/pw groupshow "${jailgroup}" 2>/dev/null; then
- echo "You already have a group \"${jailgroup}\", so I will use it."
-elif /usr/sbin/pw groupadd ${jailgroup}; then
- echo "Added group \"${jailgroup}\"."
-else
- echo "Adding group \"${jailgroup}\" failed..."
- echo "Please create it, then try again."
- exit 1
-fi
-
-if /usr/sbin/pw usershow "${jailuser}" 2>/dev/null; then
- echo "You already have a user \"${jailuser}\", so I will use it."
-elif /usr/sbin/pw useradd ${jailuser} -g ${jailgroup} -h - -d /nonexistant -s /usr/sbin/nologin -c "RPKI validation system"; then
- echo "Added user \"${jailuser}\"."
-else
- echo "Adding user \"${jailuser}\" failed..."
- echo "Please create it, then try again."
- exit 1
-fi
-
-if ! /bin/test -d "${jaildir}"; then
- /bin/mkdir "${jaildir}"
-fi
-
-if /usr/bin/install -m 555 -o root -g wheel -p rc.d.rcynic ${DESTDIR}/usr/local/etc/rc.d/rcynic; then
- echo "Installed rc.d.rcynic as ${DESTDIR}/usr/local/etc/rc.d/rcynic"
-else
- echo "Installing ${DESTDIR}/usr/local/etc/rc.d/rcynic failed"
- exit 1
-fi
-
-echo "Running /usr/local/etc/rc.d/rcynic to set up directories"
-
-if ! rcynic_jaildir="$jaildir" rcynic_user="$jailuser" rcynic_group="$jailgroup" /bin/sh /usr/local/etc/rc.d/rcynic start; then
- echo "Directory setup failed"
- exit 1
-fi
-
-if /bin/test -r "$jaildir/etc/rcynic.conf"; then
- echo "You already have config file \"${jaildir}/etc/rcynic.conf\", so I will use it."
-elif /usr/bin/install -m 444 -o root -g wheel -p ../sample-rcynic.conf "${jaildir}/etc/rcynic.conf"; then
- echo "Installed minimal ${jaildir}/etc/rcynic.conf, adding SAMPLE trust anchors"
- for i in ../../sample-trust-anchors/*.tal; do
- j="$jaildir/etc/trust-anchors/${i##*/}"
- /bin/test -r "$i" || continue
- /bin/test -r "$j" && continue
- echo "Installing $i as $j"
- /usr/bin/install -m 444 -o root -g wheel -p "$i" "$j"
- done
- j=1
- for i in $jaildir/etc/trust-anchors/*.tal; do
- echo >>"${jaildir}/etc/rcynic.conf" "trust-anchor-locator.$j = /etc/trust-anchors/${i##*/}"
- j=$((j+1))
- done
-else
- echo "Installing minimal ${jaildir}/etc/rcynic.conf failed"
- exit 1
-fi
-
-echo "Installing rcynic as ${jaildir}/bin/rcynic"
-
-/usr/bin/install -m 555 -o root -g wheel -p ../../rcynic "${jaildir}/bin/rcynic"
-
-if /bin/test ! -x "$jaildir/bin/rsync" -a ! -x ../../static-rsync/rsync; then
- echo "Building static rsync for jail, this may take a little while"
- (cd ../../static-rsync && exec make)
-fi
-
-if /bin/test -x "$jaildir/bin/rsync"; then
- echo "You already have an executable \"$jaildir/bin/rsync\", so I will use it"
-elif /usr/bin/install -m 555 -o root -g wheel -p ../../static-rsync/rsync "${jaildir}/bin/rsync"; then
- echo "Installed static rsync as \"${jaildir}/bin/rsync\""
-else
- echo "Installing static rsync failed"
- exit 1
-fi
-
-if /usr/bin/install -m 444 -o root -g wheel -p ../../rcynic.xsl "${jaildir}/etc/rcynic.xsl"; then
- echo "Installed rcynic.xsl as \"${jaildir}/etc/rcynic.xsl\""
-else
- echo "Installing rcynic.xsl failed"
- exit 1
-fi
-
-echo "Setting up root's crontab to run jailed rcynic"
-
-case "$setupcron" in
-YES|yes)
- /usr/bin/crontab -l -u root 2>/dev/null |
- /usr/bin/awk -v "jailuser=$jailuser" -v "jailgroup=$jailgroup" -v "jaildir=$jaildir" '
- BEGIN {
- cmd = "exec /usr/sbin/chroot -u " jailuser " -g " jailgroup " " jaildir;
- cmd = cmd " /bin/rcynic -c /etc/rcynic.conf";
- }
- $0 !~ cmd {
- print;
- }
- END {
- "/usr/bin/hexdump -n 2 -e \"\\\"%u\\\\\\n\\\"\" /dev/random" | getline;
- printf "%u * * * *\t%s\n", $1 % 60, cmd;
- }' |
- /usr/bin/crontab -u root -
- /bin/cat <<EOF
-
- crontab is set up to run rcynic hourly, at a randomly selected
- minute (to spread load on the rsync servers). Please do NOT
- adjust this to run on the hour. In particular please do NOT
- adjust this to run at midnight UTC.
-EOF
- ;;
-
-*)
- /bin/cat <<EOF
-
- You'll need to add a crontab entry running the following command as root:
-
- /usr/sbin/chroot -u $jailuser -g $jailgroup $jaildir /bin/rcynic -c /etc/rcynic.conf
-
- Please try to pick a random time for this, don't just run it on the hour,
- or at local midnight, or, worst of all, at midnight UTC.
-
-EOF
- ;;
-
-esac
-
-/bin/cat <<EOF
-
- Jail set up. You may need to customize $jaildir/etc/rcynic.conf.
- If you did not install your own trust anchors, a default set
- of SAMPLE trust anchors may have been installed for you, but
- you, the relying party, are the only one who can decide
- whether you trust those anchors. rcynic will not do anything
- useful without good trust anchors.
-
-EOF
diff --git a/rcynic-ng/installation-scripts/freebsd/rc.d.rcynic b/rcynic-ng/installation-scripts/freebsd/rc.d.rcynic
deleted file mode 100755
index b86c9b81..00000000
--- a/rcynic-ng/installation-scripts/freebsd/rc.d.rcynic
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/bin/sh -
-#
-# $Id$
-#
-# PROVIDE: rcynic
-# REQUIRE: DAEMON
-# KEYWORD: nojail
-
-. /etc/rc.subr
-
-name="rcynic"
-start_cmd="rcynic_start"
-stop_cmd="rcynic_stop"
-
-: ${rcynic_jaildir="/var/rcynic"}
-: ${rcynic_user="rcynic"}
-: ${rcynic_group="rcynic"}
-
-rcynic_start()
-{
- /bin/test -d "${rcynic_jaildir}" || /bin/mkdir "${rcynic_jaildir}"
- /sbin/umount "${rcynic_jaildir}/dev" 2>/dev/null
-
- /usr/sbin/mtree -deU -p "${rcynic_jaildir}" <<EOF
-
- /set type=dir uname=root gname=wheel mode=0555
- .
- bin
- ..
- dev
- ..
- etc
- trust-anchors
- ..
- ..
- var
- run
- ..
- ..
- data uname=${rcynic_user} gname=${rcynic_group} mode=0755
- ..
- ..
-EOF
-
- /bin/chmod -R a-w "${rcynic_jaildir}/bin" "${rcynic_jaildir}/etc"
- /usr/sbin/chown -R root:wheel "${rcynic_jaildir}/bin" "${rcynic_jaildir}/etc"
-
- if ! /sbin/mount -t devfs dev "${rcynic_jaildir}/dev"; then
- echo "Mounting devfs on ${rcynic_jaildir}/dev failed..."
- exit 1
- fi
-
- /sbin/devfs -m "${rcynic_jaildir}/dev" rule apply hide
- /sbin/devfs -m "${rcynic_jaildir}/dev" rule apply path null unhide
- /sbin/devfs -m "${rcynic_jaildir}/dev" rule apply path random unhide
-
- for i in /etc/localtime /etc/resolv.conf; do
- j="${rcynic_jaildir}${i}"
- if /bin/test -r "$i" && ! /usr/bin/cmp -s "$i" "$j"; then
- /usr/bin/install -m 444 -o root -g wheel -p "$i" "$j"
- fi
- done
-}
-
-rcynic_stop()
-{
- /sbin/umount "${rcynic_jaildir}/dev" 2>/dev/null
-}
-
-load_rc_config $name
-run_rc_command "$1"
diff --git a/rcynic-ng/installation-scripts/install.sh b/rcynic-ng/installation-scripts/install.sh
deleted file mode 100644
index 8c708a28..00000000
--- a/rcynic-ng/installation-scripts/install.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/sh -
-# $Id$
-
-set -e
-
-case "${host_os}" in
-
-freebsd*) cd freebsd; . ./install.sh;;
-darwin*) cd darwin; . ./install.sh;;
-linux*) cd linux; . ./install.sh;;
-
-*) echo 1>&2 "Don't know how to install rcynic jail on platform ${host_os}"
- exit 1;;
-esac
diff --git a/rcynic-ng/installation-scripts/linux/install.sh.in b/rcynic-ng/installation-scripts/linux/install.sh.in
deleted file mode 100644
index f0bdc505..00000000
--- a/rcynic-ng/installation-scripts/linux/install.sh.in
+++ /dev/null
@@ -1,236 +0,0 @@
-#!/bin/sh -
-# $Id$
-#
-# Create a chroot jail for rcynic.
-#
-# This is approximately what a package installation script might do.
-
-: ${jaildir="${DESTDIR}/var/rcynic"}
-: ${jailuser="rcynic"}
-: ${jailgroup="rcynic"}
-: ${setupcron="YES"}
-
-AWK='@AWK@'
-SORT='@SORT@'
-
-echo "Setting up \"${jaildir}\" as a chroot jail for rcynic."
-
-if ${AWK} -F: -v jailgroup="${jailgroup}" 'BEGIN {status = 1} $1 == jailgroup {status = 0} END {exit status}' /etc/group
-then
- echo "You already have a group \"${jailgroup}\", so I will use it."
-elif /usr/sbin/groupadd ${jailgroup}
-then
- echo "Added group \"${jailgroup}\"."
-else
- echo "Adding group \"${jailgroup}\" failed..."
- echo "Please create it, then try again."
- exit 1
-fi
-
-# The adduser program on CentOS 5.x uses the -n flag instead of -N to
-# avoid creating the associated group.
-nogroup='-N'
-if test -f /etc/redhat-release; then
- read vendor release version < /etc/redhat-release
- if test $vendor = CentOS; then
- nogroup='-n'
- fi
-fi
-
-if ${AWK} -F: -v jailuser="${jailuser}" 'BEGIN {status = 1} $1 == jailuser {status = 0} END {exit status}' /etc/passwd
-then
- echo "You already have a user \"${jailuser}\", so I will use it."
-elif /usr/sbin/useradd -g ${jailgroup} -M $nogroup -d "${jaildir}" -s /sbin/nologin -c "RPKI validation system" ${jailuser}
-then
- echo "Added user \"${jailuser}\"."
-else
- echo "Adding user \"${jailuser}\" failed..."
- echo "Please create it, then try again."
- exit 1
-fi
-
-# test for x86_64 target
-if test -d /lib64; then
- libdir=/lib64
-else
- libdir=/lib
-fi
-
-echo "Building directories"
-
-if ! /bin/mkdir -p -v -m 555 \
- "${jaildir}/bin" \
- "${jaildir}/dev" \
- "${jaildir}/etc/trust-anchors" \
- "${jaildir}/${libdir}" \
- "${jaildir}/usr/lib" \
- "${jaildir}/data"
-then
- echo "Unable to build directories under \"${jaildir}\", please fix this then try again."
- exit 1
-fi
-
-echo "Installing device inodes"
-
-if ! (cd /dev; /bin/ls null zero random urandom | /bin/cpio -puv "${jaildir}/dev")
-then
- echo "Unable to install device inodes in ${jaildir}/dev/, please fix this then try again"
- exit 1
-fi
-
-echo "Copying files from /etc"
-
-for i in /etc/localtime /etc/resolv.conf /etc/passwd /etc/group
-do
- j="${jaildir}${i}"
- if test -r "$i" &&
- ! /usr/bin/cmp -s "$i" "$j" &&
- ! /bin/cp -p "$i" "$j"
- then
- echo "Unable to copy $i to ${jaildir}, please fix this then try again"
- exit 1
- fi
-done
-
-echo "Whacking file permissions"
-
-if ! /bin/chmod -R a-w "${jaildir}/bin" "${jaildir}/etc" ||
- ! /bin/chmod -R 755 "${jaildir}/data" ||
- ! /bin/chown -R root:root "${jaildir}/bin" "${jaildir}/etc" ||
- ! /bin/chown -R "${jailuser}:${jailgroup}" "${jaildir}/data"
-then
- echo "Unable to set file permissions and ownerships correctly, please fix this and try again"
- exit 1
-fi
-
-if test -r "$jaildir/etc/rcynic.conf"; then
- echo "You already have config file \"${jaildir}/etc/rcynic.conf\", so I will use it."
-elif /usr/bin/install -m 444 -o root -g root -p ../sample-rcynic.conf "${jaildir}/etc/rcynic.conf"; then
- echo "Installed minimal ${jaildir}/etc/rcynic.conf, adding SAMPLE trust anchors"
- for i in ../../sample-trust-anchors/*.tal; do
- j="$jaildir/etc/trust-anchors/${i##*/}"
- test -r "$i" || continue
- test -r "$j" && continue
- echo "Installing $i as $j"
- /usr/bin/install -m 444 -o root -g root -p "$i" "$j"
- done
- j=1
- for i in $jaildir/etc/trust-anchors/*.tal; do
- echo >>"${jaildir}/etc/rcynic.conf" "trust-anchor-locator.$j = /etc/trust-anchors/${i##*/}"
- j=$((j+1))
- done
-else
- echo "Installing minimal ${jaildir}/etc/rcynic.conf failed"
- exit 1
-fi
-
-echo "Installing rcynic as ${jaildir}/bin/rcynic"
-
-/usr/bin/install -m 555 -o root -g root -p ../../rcynic "${jaildir}/bin/rcynic"
-
-if test -x "$jaildir/bin/rsync"; then
- echo "You already have an executable \"$jaildir/bin/rsync\", so I will use it"
-elif /usr/bin/install -m 555 -o root -g root -p /usr/bin/rsync "${jaildir}/bin/rsync"; then
- echo "Installed ${jaildir}/bin/rsync"
-else
- echo "Installing ${jaildir}/bin/rsync failed"
- exit 1
-fi
-
-echo "Copying required shared libraries"
-
-shared_libraries="${jaildir}/bin/rcynic ${jaildir}/bin/rsync"
-while true
-do
- closure="$(/usr/bin/ldd ${shared_libraries} |
- ${AWK} -v "rcynic=${jaildir}/bin/rcynic" -v "rsync=${jaildir}/bin/rsync" \
- '{sub(/:$/, "")} $0 == rcynic || $0 == rsync {next} {for (i = 1; i <= NF; i++) if ($i ~ /^\//) print $i}' |
- ${SORT} -u)"
- if test "x$shared_libraries" = "x$closure"
- then
- break
- else
- shared_libraries="$closure"
- fi
-done
-
-# Under CentOS 5.5, rsync requires libresolv, but ldd doesn't show
-# it.
-if test -f ${libdir}/libresolv.so.2; then
- shared_libraries="${shared_libraries} ${libdir}/libresolv.so.2"
-fi
-
-for shared in ${libdir}/ld*.so $shared_libraries ${libdir}/libnss*.so.*
-do
- if test -r "${jaildir}/${shared}"
- then
- echo "You already have a \"${jaildir}${shared}\", so I will use it"
- elif /usr/bin/install -m 555 -o root -g root -d "${jaildir}${shared%/*}" &&
- /usr/bin/install -m 555 -o root -g root -p "${shared}" "${jaildir}${shared}"
- then
- echo "Copied ${shared} into ${jaildir}"
- else
- echo "Unable to copy ${shared} into ${jaildir}"
- exit 1
- fi
-done
-
-if /usr/bin/install -m 444 -o root -g root -p ../../rcynic.xsl "${jaildir}/etc/rcynic.xsl"; then
- echo "Installed rcynic.xsl as \"${jaildir}/etc/rcynic.xsl\""
-else
- echo "Installing rcynic.xsl failed"
- exit 1
-fi
-
-echo "Setting up root's crontab to run jailed rcynic"
-
-case "$setupcron" in
-YES|yes)
- /usr/bin/crontab -l -u root 2>/dev/null |
- ${AWK} -v "jailuser=$jailuser" -v "jailgroup=$jailgroup" -v "jaildir=$jaildir" '
- BEGIN {
- cmd = "exec /usr/sbin/chroot --userspec=" jailuser ":" jailgroup " " jaildir;
- cmd = cmd " /bin/rcynic -c /etc/rcynic.conf";
- }
- $0 !~ cmd {
- print;
- }
- END {
- "/usr/bin/hexdump -n 2 -e \"\\\"%u\\\\\\n\\\"\" /dev/random" | getline;
- printf "%u * * * *\t%s\n", $1 % 60, cmd;
- }' |
- /usr/bin/crontab -u root -
- /bin/cat <<EOF
-
- crontab is set up to run rcynic hourly, at a randomly selected
- minute (to spread load on the rsync servers). Please do NOT
- adjust this to run on the hour. In particular please do NOT
- adjust this to run at midnight UTC.
-EOF
- ;;
-
-*)
- /bin/cat <<EOF
-
- You'll need to add a crontab entry running the following command as root:
-
- /usr/sbin/chroot -u $jailuser -g $jailgroup $jaildir /bin/rcynic -c /etc/rcynic.conf
-
- Please try to pick a random time for this, don't just run it on the hour,
- or at local midnight, or, worst of all, at midnight UTC.
-
-EOF
- ;;
-
-esac
-
-/bin/cat <<EOF
-
- Jail set up. You may need to customize $jaildir/etc/rcynic.conf.
- If you did not install your own trust anchors, a default set
- of SAMPLE trust anchors may have been installed for you, but
- you, the relying party, are the only one who can decide
- whether you trust those anchors. rcynic will not do anything
- useful without good trust anchors.
-
-EOF
diff --git a/rcynic-ng/installation-scripts/sample-rcynic.conf b/rcynic-ng/installation-scripts/sample-rcynic.conf
deleted file mode 100644
index 6e464e7e..00000000
--- a/rcynic-ng/installation-scripts/sample-rcynic.conf
+++ /dev/null
@@ -1,27 +0,0 @@
-# $Id$
-#
-# Sample rcynic configuration file for FreeBSD jailed environment
-
-[rcynic]
-rsync-program = /bin/rsync
-authenticated = /data/authenticated
-old-authenticated = /data/authenticated.old
-unauthenticated = /data/unauthenticated
-lockfile = /data/lock
-jitter = 600
-use-syslog = true
-log-level = log_usage_err
-
-# You need to specify some trust anchors here, eg:
-
-#trust-anchor.1 = /etc/trust-anchors/ta-1.cer
-#trust-anchor.2 = /etc/trust-anchors/ta-2.cer
-
-# or, using the "Trust Anchor Locator" form:
-
-#trust-anchor-locator.1 = /etc/trust-anchors/ta-1.tal
-#trust-anchor-locator.2 = /etc/trust-anchors/ta-2.tal
-
-# The choice between these two formats depends largely on the policies
-# of the entity generating the corresponding trust anchor, ie, will
-# probably be made for you by the generating entity.
diff --git a/rcynic-ng/make-tal.sh b/rcynic-ng/make-tal.sh
deleted file mode 100755
index 854a76b9..00000000
--- a/rcynic-ng/make-tal.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/sh -
-# $Id$
-#
-# Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-# Generate a trust anchor locator (TAL) given the rsync URI for a
-# self-signed RFC 3779 certificate.
-#
-# Usage: make-tal.sh uri [local_copy_of_certificate]
-#
-# The optional second parameter is the name of a local copy of the
-# certificate to be checked against the copy retrieved from the URI;
-# if present, this should be a local X.509 file in DER format.
-
-case "$1" in rsync://*) :;; *) echo 1>&2 "\"$1\" is not a rsync URI"; exit 1;; esac
-
-tmpfile="make-tal.tmp.$$"
-trap "rm -f $tmpfile" 0 1 2 15
-
-rsync "$1" "$tmpfile" || exit
-
-if test -n "$2"
-then
- diff -q "$tmpfile" "$2" || exit
-fi
-
-echo "$1"
-echo
-openssl x509 -inform DER -in "$tmpfile" -pubkey -noout |
-awk '!/-----(BEGIN|END)/'
diff --git a/rcynic-ng/rcynic.c b/rcynic-ng/rcynic.c
deleted file mode 100644
index 09fe1360..00000000
--- a/rcynic-ng/rcynic.c
+++ /dev/null
@@ -1,4644 +0,0 @@
-/*
- * Copyright (C) 2009--2011 Internet Systems Consortium ("ISC")
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- *
- * Portions copyright (C) 2006--2008 American Registry for Internet Numbers ("ARIN")
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
- * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
- * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/* $Id$ */
-
-/**
- * @mainpage
- *
- * "Cynical rsync": Recursively walk RPKI tree using rsync to pull
- * data from remote sites, validating certificates and CRLs as we go.
- *
- * Doxygen doesn't quite know what to make of a one-file C program,
- * and ends up putting most of the interesting data @link rcynic.c
- * here. @endlink
- */
-
-#include <assert.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdarg.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/time.h>
-#include <sys/file.h>
-#include <errno.h>
-#include <sys/signal.h>
-#include <sys/wait.h>
-#include <time.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <dirent.h>
-#include <limits.h>
-#include <fcntl.h>
-#include <signal.h>
-#include <utime.h>
-#include <glob.h>
-#include <sys/param.h>
-
-#define SYSLOG_NAMES /* defines CODE prioritynames[], facilitynames[] */
-#include <syslog.h>
-
-#include <openssl/bio.h>
-#include <openssl/pem.h>
-#include <openssl/err.h>
-#include <openssl/x509.h>
-#include <openssl/x509v3.h>
-#include <openssl/safestack.h>
-#include <openssl/conf.h>
-#include <openssl/rand.h>
-#include <openssl/asn1t.h>
-#include <openssl/cms.h>
-
-#include "bio_f_linebreak.h"
-
-#include "defstack.h"
-#include "defasn1.h"
-
-#if !defined(FILENAME_MAX) && defined(PATH_MAX) && PATH_MAX > 1024
-#define FILENAME_MAX PATH_MAX
-#elif !defined(FILENAME_MAX)
-#define FILENAME_MAX 1024
-#endif
-
-#define SCHEME_RSYNC ("rsync://")
-#define SIZEOF_RSYNC (sizeof(SCHEME_RSYNC) - 1)
-
-/**
- * Maximum length of an URI.
- */
-#define URI_MAX (FILENAME_MAX + SIZEOF_RSYNC)
-
-/**
- * Maximum number of times we try to kill an inferior process before
- * giving up.
- */
-#define KILL_MAX 10
-
-#ifndef HOSTNAME_MAX
-#define HOSTNAME_MAX 256
-#endif
-
-/**
- * Version number of XML summary output.
- */
-#define XML_SUMMARY_VERSION 1
-
-/**
- * How much buffer space do we need for a raw address?
- */
-#define ADDR_RAW_BUF_LEN 16
-
-/**
- * Logging levels. Same general idea as syslog(), but our own
- * catagories based on what makes sense for this program. Default
- * mappings to syslog() priorities are here because it's the easiest
- * way to make sure that we assign a syslog level to each of ours.
- */
-
-#define LOG_LEVELS \
- QQ(log_sys_err, LOG_ERR) /* Error from OS or library */ \
- QQ(log_usage_err, LOG_ERR) /* Bad usage (local error) */ \
- QQ(log_data_err, LOG_NOTICE) /* Bad data, no biscuit */ \
- QQ(log_telemetry, LOG_INFO) /* Normal progress chatter */ \
- QQ(log_verbose, LOG_INFO) /* Extra chatter */ \
- QQ(log_debug, LOG_DEBUG) /* Only useful when debugging */
-
-#define QQ(x,y) x ,
-typedef enum log_level { LOG_LEVELS LOG_LEVEL_T_MAX } log_level_t;
-#undef QQ
-
-#define QQ(x,y) { #x , x },
-static const struct {
- const char *name;
- log_level_t value;
-} log_levels[] = {
- LOG_LEVELS
-};
-#undef QQ
-
-/**
- * MIB counters derived from OpenSSL. Long list of validation failure
- * codes from OpenSSL (crypto/x509/x509_vfy.h).
- */
-
-#define MIB_COUNTERS_FROM_OPENSSL \
- QV(X509_V_ERR_UNABLE_TO_GET_CRL) \
- QV(X509_V_ERR_UNABLE_TO_DECRYPT_CERT_SIGNATURE) \
- QV(X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE) \
- QV(X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY) \
- QV(X509_V_ERR_CERT_SIGNATURE_FAILURE) \
- QV(X509_V_ERR_CRL_SIGNATURE_FAILURE) \
- QV(X509_V_ERR_CERT_NOT_YET_VALID) \
- QV(X509_V_ERR_CERT_HAS_EXPIRED) \
- QV(X509_V_ERR_CRL_NOT_YET_VALID) \
- QV(X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD) \
- QV(X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD) \
- QV(X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD) \
- QV(X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD) \
- QV(X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT) \
- QV(X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN) \
- QV(X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY) \
- QV(X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE) \
- QV(X509_V_ERR_CERT_CHAIN_TOO_LONG) \
- QV(X509_V_ERR_CERT_REVOKED) \
- QV(X509_V_ERR_INVALID_CA) \
- QV(X509_V_ERR_PATH_LENGTH_EXCEEDED) \
- QV(X509_V_ERR_INVALID_PURPOSE) \
- QV(X509_V_ERR_CERT_UNTRUSTED) \
- QV(X509_V_ERR_CERT_REJECTED) \
- QV(X509_V_ERR_AKID_SKID_MISMATCH) \
- QV(X509_V_ERR_AKID_ISSUER_SERIAL_MISMATCH) \
- QV(X509_V_ERR_KEYUSAGE_NO_CERTSIGN) \
- QV(X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER) \
- QV(X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION) \
- QV(X509_V_ERR_KEYUSAGE_NO_CRL_SIGN) \
- QV(X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION) \
- QV(X509_V_ERR_INVALID_NON_CA) \
- QV(X509_V_ERR_PROXY_PATH_LENGTH_EXCEEDED) \
- QV(X509_V_ERR_KEYUSAGE_NO_DIGITAL_SIGNATURE) \
- QV(X509_V_ERR_PROXY_CERTIFICATES_NOT_ALLOWED) \
- QV(X509_V_ERR_INVALID_EXTENSION) \
- QV(X509_V_ERR_INVALID_POLICY_EXTENSION) \
- QV(X509_V_ERR_NO_EXPLICIT_POLICY) \
- QV(X509_V_ERR_UNNESTED_RESOURCE)
-
-/**
- * MIB counters specific to rcynic.
- */
-
-#define MIB_COUNTERS \
- MIB_COUNTERS_FROM_OPENSSL \
- QB(aia_mismatch, "Mismatched AIA extension") \
- QB(aia_missing, "AIA extension missing") \
- QB(certificate_bad_crl, "Bad certificate CRL") \
- QB(certificate_bad_signature, "Bad certificate signature") \
- QB(certificate_digest_mismatch, "Certificate digest mismatch") \
- QB(certificate_failed_validation, "Certificate failed validation") \
- QB(crl_digest_mismatch, "CRL digest mismatch") \
- QB(crl_not_in_manifest, "CRL not listed in manifest") \
- QB(crl_not_yet_valid, "CRL not yet valid") \
- QB(crldp_mismatch, "CRLDP doesn't match issuer's SIA") \
- QB(crldp_missing, "CRLDP extension missing") \
- QB(disallowed_extension, "Disallowed X.509v3 extension") \
- QB(ghostbuster_bad_crl, "Ghostbuster EE has bad CRL") \
- QB(ghostbuster_bad_econtenttype, "Bad Ghostbuster eContentType") \
- QB(ghostbuster_digest_mismatch, "Ghostbuster digest mismatch") \
- QB(ghostbuster_invalid_cms, "Ghostbuster validation failure") \
- QB(ghostbuster_invalid_ee, "Invalid Ghostbuster certificate") \
- QB(ghostbuster_missing_signer, "Missing Ghostbuster signer") \
- QB(hash_too_long, "Hash value is too long") \
- QB(malformed_crldp, "Malformed CRDLP extension") \
- QB(malformed_roa_addressfamily, "Malformed ROA addressFamily") \
- QB(malformed_sia, "Malformed SIA extension") \
- QB(manifest_bad_econtenttype, "Bad manifest eContentType") \
- QB(manifest_decode_error, "Manifest decode error") \
- QB(manifest_invalid_cms, "Manifest validation failure") \
- QB(manifest_invalid_ee, "Invalid manifest certificate") \
- QB(manifest_malformed_crldp, "Malformed manifest CRLDP") \
- QB(manifest_mismatch, "Manifest doesn't match SIA") \
- QB(manifest_missing, "Manifest pointer missing") \
- QB(manifest_missing_crldp, "Missing manifest CRLDP") \
- QB(manifest_missing_signer, "Missing manifest signer") \
- QB(manifest_not_yet_valid, "Manifest not yet valid") \
- QB(manifest_wrong_version, "Wrong manifest version") \
- QB(object_rejected, "Object rejected") \
- QB(roa_bad_afi, "ROA contains bad AFI value") \
- QB(roa_bad_crl, "ROA EE has bad CRL") \
- QB(roa_bad_econtenttype, "Bad ROA eContentType") \
- QB(roa_decode_error, "ROA decode error") \
- QB(roa_digest_mismatch, "ROA digest mismatch") \
- QB(roa_invalid_cms, "ROA validation failure") \
- QB(roa_invalid_ee, "Invalid ROA certificate") \
- QB(roa_missing_signer, "Missing ROA signer") \
- QB(roa_not_nested, "ROA resource not in EE") \
- QB(roa_resources_malformed, "ROA resources malformed") \
- QB(roa_wrong_version, "Wrong ROA version") \
- QB(rsync_failed, "rsync transfer failed") \
- QB(rsync_timed_out, "rsync transfer timed out") \
- QB(sia_missing, "SIA extension missing") \
- QB(trust_anchor_key_mismatch, "Trust anchor key mismatch") \
- QB(trust_anchor_with_crldp, "Trust anchor can't have CRLDP") \
- QB(unknown_verify_error, "Unknown OpenSSL verify error") \
- QB(unreadable_trust_anchor, "Unreadable trust anchor") \
- QB(unreadable_trust_anchor_locator, "Unreadable trust anchor locator") \
- QB(uri_too_long, "URI too long") \
- QW(nonconformant_issuer_name, "Nonconformant X.509 issuer name") \
- QW(nonconformant_subject_name, "Nonconformant X.509 subject name") \
- QW(rsync_skipped, "rsync transfer skipped") \
- QW(stale_crl, "Stale CRL") \
- QW(stale_manifest, "Stale manifest") \
- QW(tainted_by_stale_crl, "Tainted by stale CRL") \
- QW(tainted_by_stale_manifest, "Tainted by stale manifest") \
- QW(tainted_by_not_being_in_manifest, "Tainted by not being in manifest") \
- QW(trust_anchor_not_self_signed, "Trust anchor not self-signed") \
- QW(unknown_object_type_skipped, "Unknown object type skipped") \
- QG(current_cert_recheck, "Certificate rechecked") \
- QG(object_accepted, "Object accepted") \
- QG(rsync_succeeded, "rsync transfer succeeded") \
- QG(validation_ok, "OK")
-
-#define QV(x) QB(mib_openssl_##x, 0)
-
-static const char
- mib_counter_kind_good[] = "good",
- mib_counter_kind_warn[] = "warn",
- mib_counter_kind_bad[] = "bad";
-
-#define QG(x,y) mib_counter_kind_good ,
-#define QW(x,y) mib_counter_kind_warn ,
-#define QB(x,y) mib_counter_kind_bad ,
-static const char * const mib_counter_kind[] = { MIB_COUNTERS NULL };
-#undef QB
-#undef QW
-#undef QG
-
-#define QG(x,y) QQ(x,y)
-#define QW(x,y) QQ(x,y)
-#define QB(x,y) QQ(x,y)
-
-#define QQ(x,y) x ,
-typedef enum mib_counter { MIB_COUNTERS MIB_COUNTER_T_MAX } mib_counter_t;
-#undef QQ
-
-#define QQ(x,y) y ,
-static const char * const mib_counter_desc[] = { MIB_COUNTERS NULL };
-#undef QQ
-
-#define QQ(x,y) #x ,
-static const char * const mib_counter_label[] = { MIB_COUNTERS NULL };
-#undef QQ
-
-#undef QV
-
-#define QQ(x,y) 0 ,
-#define QV(x) x ,
-static const long mib_counter_openssl[] = { MIB_COUNTERS 0 };
-#undef QV
-#undef QQ
-
-/**
- * Object sources. We always try to get fresh copies of objects using
- * rsync, but if that fails we try using backup copies from what
- * worked the last time we were run. This means that a URI
- * potentially represents two different objects, so we need to
- * distinguish them for tracking purposes in our validation log.
- */
-
-#define OBJECT_GENERATIONS \
- QQ(null) \
- QQ(current) \
- QQ(backup)
-
-#define QQ(x) object_generation_##x ,
-typedef enum object_generation { OBJECT_GENERATIONS OBJECT_GENERATION_MAX } object_generation_t;
-#undef QQ
-
-#define QQ(x) #x ,
-static const char * const object_generation_label[] = { OBJECT_GENERATIONS NULL };
-#undef QQ
-
-/**
- * Type-safe string wrapper for URIs.
- */
-typedef struct { char s[URI_MAX]; } uri_t;
-
-/**
- * Type-safe string wrapper for filename paths.
- */
-typedef struct { char s[FILENAME_MAX]; } path_t;
-
-/**
- * Type-safe string wrapper for hostnames.
- */
-typedef struct { char s[HOSTNAME_MAX]; } hostname_t;
-
-/**
- * Type-safe wrapper for hash buffers.
- */
-typedef struct { unsigned char h[EVP_MAX_MD_SIZE]; } hashbuf_t;
-
-/**
- * Per-URI validation status object.
- * uri must be first element.
- */
-typedef struct validation_status {
- uri_t uri;
- object_generation_t generation;
- time_t timestamp;
- unsigned char events[(MIB_COUNTER_T_MAX + 7) / 8];
-} validation_status_t;
-
-DECLARE_STACK_OF(validation_status_t)
-
-/**
- * Structure to hold data parsed out of a certificate.
- */
-typedef struct certinfo {
- int ca, ta;
- object_generation_t generation;
- uri_t uri, sia, aia, crldp, manifest;
-} certinfo_t;
-
-typedef struct rcynic_ctx rcynic_ctx_t;
-
-/**
- * States that a walk_ctx_t can be in.
- */
-typedef enum {
- walk_state_initial, /**< Initial state */
- walk_state_rsync, /**< rsyncing certinfo.sia */
- walk_state_ready, /**< Ready to traverse outputs */
- walk_state_current, /**< prefix = rc->unauthenticated */
- walk_state_backup, /**< prefix = rc->old_authenticated */
- walk_state_done /**< Done walking this cert's outputs */
-} walk_state_t;
-
-/**
- * Context for certificate tree walks. This includes all the stuff
- * that we would keep as automatic variables on the call stack if we
- * didn't have to use callbacks to support multiple rsync processes.
- */
-typedef struct walk_ctx {
- unsigned refcount;
- certinfo_t certinfo;
- X509 *cert;
- Manifest *manifest;
- STACK_OF(OPENSSL_STRING) *filenames;
- int manifest_iteration, filename_iteration, stale_manifest;
- walk_state_t state;
-} walk_ctx_t;
-
-DECLARE_STACK_OF(walk_ctx_t)
-
-/**
- * Return codes from rsync functions.
- */
-typedef enum {
- rsync_status_done, /* Request completed */
- rsync_status_failed, /* Request failed */
- rsync_status_timed_out, /* Request timed out */
- rsync_status_pending, /* Request in progress */
- rsync_status_skipped /* Request not attempted */
-} rsync_status_t;
-
-/**
- * Context for asyncronous rsync.
- */
-typedef struct rsync_ctx {
- uri_t uri;
- void (*handler)(const rcynic_ctx_t *, const struct rsync_ctx *, const rsync_status_t, const uri_t *, STACK_OF(walk_ctx_t) *);
- STACK_OF(walk_ctx_t) *wsk;
- enum {
- rsync_state_initial, /* Must be first */
- rsync_state_running,
- rsync_state_conflict_wait,
- rsync_state_retry_wait,
- rsync_state_terminating
- } state;
- enum {
- rsync_problem_none, /* Must be first */
- rsync_problem_timed_out,
- rsync_problem_refused
- } problem;
- unsigned tries;
- pid_t pid;
- int fd;
- time_t started, deadline;
- char buffer[URI_MAX * 4];
- size_t buflen;
-} rsync_ctx_t;
-
-DECLARE_STACK_OF(rsync_ctx_t)
-
-/**
- * Deferred task.
- */
-typedef struct task {
- void (*handler)(rcynic_ctx_t *, STACK_OF(walk_ctx_t) *);
- STACK_OF(walk_ctx_t) *wsk;
-} task_t;
-
-DECLARE_STACK_OF(task_t)
-
-/**
- * Extended context for verify callbacks. This is a wrapper around
- * OpenSSL's X509_STORE_CTX, and the embedded X509_STORE_CTX @em must be
- * the first element of this structure in order for the evil cast to
- * do the right thing. This is ugly but safe, as the C language
- * promises us that the address of the first element of a structure is
- * the same as the address of the structure.
- */
-typedef struct rcynic_x509_store_ctx {
- X509_STORE_CTX ctx; /* Must be first */
- const rcynic_ctx_t *rc;
- const certinfo_t *subject;
-} rcynic_x509_store_ctx_t;
-
-/**
- * Program context that would otherwise be a mess of global variables.
- */
-struct rcynic_ctx {
- path_t authenticated, old_authenticated, new_authenticated, unauthenticated;
- char *jane, *rsync_program;
- STACK_OF(OPENSSL_STRING) *rsync_cache, *backup_cache, *dead_host_cache;
- STACK_OF(validation_status_t) *validation_status;
- STACK_OF(rsync_ctx_t) *rsync_queue;
- STACK_OF(task_t) *task_queue;
- int use_syslog, allow_stale_crl, allow_stale_manifest, use_links;
- int require_crl_in_manifest, rsync_timeout, priority[LOG_LEVEL_T_MAX];
- int allow_non_self_signed_trust_anchor, allow_object_not_in_manifest;
- int max_parallel_fetches, max_retries, retry_wait_min, run_rsync;
- log_level_t log_level;
- X509_STORE *x509_store;
-};
-
-
-/**
- * Subversion ID data.
- */
-static const char svn_id[] = "$Id$";
-
-/*
- * ASN.1 Object identifiers in form suitable for use with oid_cmp()
- */
-
-/** 1.3.6.1.5.5.7.48.2 */
-static const unsigned char id_ad_caIssuers[] =
- {0x2b, 0x6, 0x1, 0x5, 0x5, 0x7, 0x30, 0x2};
-
-/** 1.3.6.1.5.5.7.48.5 */
-static const unsigned char id_ad_caRepository[] =
- {0x2b, 0x6, 0x1, 0x5, 0x5, 0x7, 0x30, 0x5};
-
-/** 1.3.6.1.5.5.7.48.10 */
-static const unsigned char id_ad_rpkiManifest[] =
- {0x2b, 0x6, 0x1, 0x5, 0x5, 0x7, 0x30, 0xa};
-
-/** 1.2.840.113549.1.9.16.1.24 */
-static const unsigned char id_ct_routeOriginAttestation[] =
- {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x01, 0x18};
-
-/** 1.2.840.113549.1.9.16.1.26 */
-static const unsigned char id_ct_rpkiManifest[] =
- {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x01, 0x1a};
-
-/** 1.2.840.113549.1.9.16.1.35 */
-static const unsigned char id_ct_rpkiGhostbusters[] =
- {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x01, 0x23};
-
-/** 2.16.840.1.101.3.4.2.1 */
-static const unsigned char id_sha256[] =
- {0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01};
-
-/**
- * RPKI certificate policy OID in form suitable for use with
- * X509_VERIFY_PARAM_add0_policy().
- */
-static const char rpki_policy_oid[] = "1.3.6.1.5.5.7.14.2";
-
-/**
- * Suffix we use temporarily during the symlink shuffle. Could be
- * almost anything, but we want to do the length check early, before
- * we waste a lot of work we'll just have to throw away, so we just
- * wire in something short and obvious.
- */
-static const char authenticated_symlink_suffix[] = ".new";
-
-
-
-/**
- * Type-safe wrapper around free() to keep safestack macros happy.
- */
-static void OPENSSL_STRING_free(OPENSSL_STRING s)
-{
- if (s)
- free(s);
-}
-
-/**
- * Wrapper around an idiom we use with OPENSSL_STRING stacks. There's
- * a bug in the current sk_OPENSSL_STRING_delete() macro that casts
- * the return value to the wrong type, so we cast it to something
- * innocuous here and avoid using that macro elsewhere.
- */
-static void sk_OPENSSL_STRING_remove(STACK_OF(OPENSSL_STRING) *sk, const char *str)
-{
- OPENSSL_STRING_free((void *) sk_OPENSSL_STRING_delete(sk, sk_OPENSSL_STRING_find(sk, str)));
-}
-
-/**
- * Allocate a new validation_status_t object.
- */
-static validation_status_t *validation_status_t_new(void)
-{
- validation_status_t *v = malloc(sizeof(*v));
- if (v)
- memset(v, 0, sizeof(*v));
- return v;
-}
-
-/**
- * Type-safe wrapper around free() to keep safestack macros happy.
- */
-static void validation_status_t_free(validation_status_t *v)
-{
- if (v)
- free(v);
-}
-
-
-
-/*
- * GCC attributes to help catch format string errors.
- */
-
-#ifdef __GNUC__
-
-static void logmsg(const rcynic_ctx_t *rc,
- const log_level_t level,
- const char *fmt, ...)
- __attribute__ ((format (printf, 3, 4)));
-#endif
-
-/**
- * Logging.
- */
-static void vlogmsg(const rcynic_ctx_t *rc,
- const log_level_t level,
- const char *fmt,
- va_list ap)
-{
- char tad[sizeof("00:00:00")+1];
- time_t tad_time;
-
- assert(rc && fmt);
-
- if (rc->log_level < level)
- return;
-
- if (rc->use_syslog) {
- vsyslog(rc->priority[level], fmt, ap);
- } else {
- time(&tad_time);
- strftime(tad, sizeof(tad), "%H:%M:%S", localtime(&tad_time));
- fprintf(stderr, "%s: ", tad);
- if (rc->jane)
- fprintf(stderr, "%s: ", rc->jane);
- vfprintf(stderr, fmt, ap);
- putc('\n', stderr);
- }
-}
-
-/**
- * Logging.
- */
-static void logmsg(const rcynic_ctx_t *rc,
- const log_level_t level,
- const char *fmt, ...)
-{
- va_list ap;
- va_start(ap, fmt);
- vlogmsg(rc, level, fmt, ap);
- va_end(ap);
-}
-
-/**
- * Print OpenSSL library errors.
- */
-static void log_openssl_errors(const rcynic_ctx_t *rc)
-{
- const char *data, *file;
- unsigned long code;
- char error[256];
- int flags, line;
-
- if (!rc->log_level < log_verbose)
- return;
-
- while ((code = ERR_get_error_line_data(&file, &line, &data, &flags))) {
- ERR_error_string_n(code, error, sizeof(error));
- if (data && (flags & ERR_TXT_STRING))
- logmsg(rc, log_sys_err, "OpenSSL error %s:%d: %s: %s", file, line, error, data);
- else
- logmsg(rc, log_sys_err, "OpenSSL error %s:%d: %s", file, line, error);
- }
-}
-
-/**
- * Configure logging.
- */
-static int configure_logmsg(rcynic_ctx_t *rc, const char *name)
-{
- int i;
-
- assert(rc && name);
-
- for (i = 0; i < sizeof(log_levels)/sizeof(*log_levels); i++) {
- if (!strcmp(name, log_levels[i].name)) {
- rc->log_level = log_levels[i].value;
- return 1;
- }
- }
-
- logmsg(rc, log_usage_err, "Bad log level %s", name);
- return 0;
-}
-
-/**
- * Configure syslog.
- */
-static int configure_syslog(const rcynic_ctx_t *rc,
- int *result,
- const CODE *table,
- const char *name)
-{
- assert(result && table && name);
-
- while (table->c_name && strcmp(table->c_name, name))
- table++;
-
- if (table->c_name) {
- *result = table->c_val;
- return 1;
- } else {
- logmsg(rc, log_usage_err, "Bad syslog code %s", name);
- return 0;
- }
-}
-
-/**
- * Configure boolean variable.
- */
-static int configure_boolean(const rcynic_ctx_t *rc,
- int *result,
- const char *val)
-{
- assert(rc && result && val);
-
- switch (*val) {
- case 'y': case 'Y': case 't': case 'T': case '1':
- *result = 1;
- return 1;
- case 'n': case 'N': case 'f': case 'F': case '0':
- *result = 0;
- return 1;
- default:
- logmsg(rc, log_usage_err, "Bad boolean value %s", val);
- return 0;
- }
-}
-
-/**
- * Configure integer variable.
- */
-static int configure_integer(const rcynic_ctx_t *rc,
- int *result,
- const char *val)
-{
- long res;
- char *p;
-
- assert(rc && result && val);
-
- res = strtol(val, &p, 10);
-
- if (*val != '\0' && *p == '\0') {
- *result = (int) res;
- return 1;
- } else {
- logmsg(rc, log_usage_err, "Bad integer value %s", val);
- return 0;
- }
-}
-
-
-
-/**
- * Make a directory if it doesn't already exist.
- */
-static int mkdir_maybe(const rcynic_ctx_t *rc, const path_t *name)
-{
- path_t path;
- char *s;
-
- assert(name != NULL);
- if (strlen(name->s) >= sizeof(path.s)) {
- logmsg(rc, log_data_err, "Pathname %s too long", name->s);
- return 0;
- }
- strcpy(path.s, name->s);
- s = path.s[0] == '/' ? path.s + 1 : path.s;
- if ((s = strrchr(s, '/')) == NULL)
- return 1;
- *s = '\0';
- if (!mkdir_maybe(rc, &path)) {
- logmsg(rc, log_sys_err, "Failed to make directory %s", path.s);
- return 0;
- }
- if (!access(path.s, F_OK))
- return 1;
- logmsg(rc, log_verbose, "Creating directory %s", path.s);
- return mkdir(path.s, 0777) == 0;
-}
-
-/**
- * strdup() a string and push it onto a stack.
- */
-static int sk_OPENSSL_STRING_push_strdup(STACK_OF(OPENSSL_STRING) *sk, const char *str)
-{
- OPENSSL_STRING s = strdup(str);
-
- if (s && sk_OPENSSL_STRING_push(sk, s))
- return 1;
- if (s)
- free(s);
- return 0;
-}
-
-/**
- * Compare two URI strings, for OpenSSL STACK operations.
- */
-
-static int uri_cmp(const char * const *a, const char * const *b)
-{
- return strcmp(*a, *b);
-}
-
-/**
- * Is string an rsync URI?
- */
-static int is_rsync(const char *uri)
-{
- return uri && !strncmp(uri, SCHEME_RSYNC, SIZEOF_RSYNC);
-}
-
-/**
- * Convert an rsync URI to a filename, checking for evil character
- * sequences. NB: This routine can't call mib_increment(), because
- * mib_increment() calls it, so errors detected here only go into
- * the log, not the MIB.
- */
-static int uri_to_filename(const rcynic_ctx_t *rc,
- const uri_t *uri,
- path_t *path,
- const path_t *prefix)
-{
- const char *u;
- size_t n;
-
- path->s[0] = '\0';
-
- if (!is_rsync(uri->s)) {
- logmsg(rc, log_telemetry, "%s is not an rsync URI, not converting to filename", uri->s);
- return 0;
- }
-
- u = uri->s + SIZEOF_RSYNC;
- n = strlen(u);
-
- if (u[0] == '/' || u[0] == '.' || strstr(u, "/../") ||
- (n >= 3 && !strcmp(u + n - 3, "/.."))) {
- logmsg(rc, log_data_err, "Dangerous URI %s, not converting to filename", uri->s);
- return 0;
- }
-
- if (prefix)
- n += strlen(prefix->s);
-
- if (n >= sizeof(path->s)) {
- logmsg(rc, log_data_err, "URI %s too long, not converting to filename", uri->s);
- return 0;
- }
-
- if (prefix) {
- strcpy(path->s, prefix->s);
- strcat(path->s, u);
- } else {
- strcpy(path->s, u);
- }
-
- return 1;
-}
-
-/**
- * Extract a hostname from a URI.
- */
-static int uri_to_hostname(const uri_t *uri,
- hostname_t *hostname)
-{
- size_t n;
-
- if (!uri || !hostname || !is_rsync(uri->s) ||
- (n = strcspn(uri->s + SIZEOF_RSYNC, "/")) >= sizeof(hostname->s))
- return 0;
-
- strncpy(hostname->s, uri->s + SIZEOF_RSYNC, n);
- hostname->s[n] = '\0';
- return 1;
-}
-
-/**
- * OID comparison.
- */
-static int oid_cmp(const ASN1_OBJECT *obj, const unsigned char *oid, const size_t oidlen)
-{
- assert(obj != NULL && oid != NULL);
- if (obj->length != oidlen)
- return obj->length - oidlen;
- else
- return memcmp(obj->data, oid, oidlen);
-}
-
-/**
- * Get value of code in a validation_status_t.
- */
-static int validation_status_get_code(const validation_status_t *v,
- const mib_counter_t code)
-{
- assert(v && code < MIB_COUNTER_T_MAX);
- return (v->events[code / 8] & (1 << (code % 8))) != 0;
-}
-
-/**
- * Set value of code in a validation_status_t.
- */
-static void validation_status_set_code(validation_status_t *v,
- const mib_counter_t code,
- int value)
-{
- assert(v && code < MIB_COUNTER_T_MAX);
- if (value)
- v->events[code / 8] |= (1 << (code % 8));
- else
- v->events[code / 8] &= ~(1 << (code % 8));
-}
-
-/**
- * Add a validation status entry to internal log.
- */
-static void log_validation_status(const rcynic_ctx_t *rc,
- const uri_t *uri,
- const mib_counter_t code,
- const object_generation_t generation)
-{
- validation_status_t v_, *v = NULL;
- int was_set;
-
- assert(rc && uri && code < MIB_COUNTER_T_MAX && generation < OBJECT_GENERATION_MAX);
-
- if (!rc->validation_status)
- return;
-
- memset(&v_, 0, sizeof(v_));
- v_.uri = *uri;
- v_.generation = generation;
-
- v = sk_validation_status_t_value(rc->validation_status, sk_validation_status_t_find(rc->validation_status, &v_));
- if (v == NULL) {
- if ((v = validation_status_t_new()) == NULL) {
- logmsg(rc, log_sys_err, "Couldn't allocate validation status entry for %s", uri->s);
- return;
- }
- *v = v_;
- if (!sk_validation_status_t_push(rc->validation_status, v)) {
- logmsg(rc, log_sys_err, "Couldn't store validation status entry for %s", uri->s);
- free(v);
- return;
- }
- }
-
- was_set = validation_status_get_code(v, code);
-
- v->timestamp = time(0);
- validation_status_set_code(v, code, 1);
-
- if (!was_set)
- logmsg(rc, log_verbose, "Recording \"%s\" for %s%s%s",
- (mib_counter_desc[code]
- ? mib_counter_desc[code]
- : X509_verify_cert_error_string(mib_counter_openssl[code])),
- (generation != object_generation_null ? object_generation_label[generation] : ""),
- (generation != object_generation_null ? " " : ""),
- uri->s);
-}
-
-/**
- * Validation status object comparision.
- */
-static int validation_status_cmp(const validation_status_t * const *a, const validation_status_t * const *b)
-{
- int cmp = strcmp((*a)->uri.s, (*b)->uri.s);
- if (cmp)
- return cmp;
- cmp = (int) ((*a)->generation) - (int) ((*b)->generation);
- if (cmp)
- return cmp;
- return 0;
-}
-
-/**
- * Copy or link a file, as the case may be.
- */
-static int cp_ln(const rcynic_ctx_t *rc, const path_t *source, const path_t *target)
-{
- struct stat statbuf;
- struct utimbuf utimebuf;
- FILE *in = NULL, *out = NULL;
- int c, ok = 0;
-
- if (rc->use_links) {
- (void) unlink(target->s);
- ok = link(source->s, target->s) == 0;
- if (!ok)
- logmsg(rc, log_sys_err, "Couldn't link %s to %s: %s",
- source->s, target->s, strerror(errno));
- return ok;
- }
-
- if ((in = fopen(source->s, "rb")) == NULL ||
- (out = fopen(target->s, "wb")) == NULL)
- goto done;
-
- while ((c = getc(in)) != EOF)
- if (putc(c, out) == EOF)
- goto done;
-
- ok = 1;
-
- done:
- ok &= !(in != NULL && fclose(in) == EOF);
- ok &= !(out != NULL && fclose(out) == EOF);
-
- if (!ok) {
- logmsg(rc, log_sys_err, "Couldn't copy %s to %s: %s",
- source->s, target->s, strerror(errno));
- return ok;
- }
-
- /*
- * Perserve the file modification time to allow for detection of
- * changed objects in the authenticated directory. Failure to reset
- * the times is not optimal, but is also not critical, thus no
- * failure return.
- */
- if (stat(source->s, &statbuf) < 0 ||
- (utimebuf.actime = statbuf.st_atime,
- utimebuf.modtime = statbuf.st_mtime,
- utime(target->s, &utimebuf) < 0))
- logmsg(rc, log_sys_err, "Couldn't copy inode timestamp from %s to %s: %s",
- source->s, target->s, strerror(errno));
-
- return ok;
-}
-
-/**
- * Install an object.
- */
-static int install_object(const rcynic_ctx_t *rc,
- const uri_t *uri,
- const path_t *source,
- const mib_counter_t code,
- const object_generation_t generation)
-{
- path_t target;
-
- if (!uri_to_filename(rc, uri, &target, &rc->new_authenticated)) {
- logmsg(rc, log_data_err, "Couldn't generate installation name for %s", uri->s);
- return 0;
- }
-
- if (!mkdir_maybe(rc, &target)) {
- logmsg(rc, log_sys_err, "Couldn't create directory for %s", target.s);
- return 0;
- }
-
- if (!cp_ln(rc, source, &target))
- return 0;
- log_validation_status(rc, uri, code, generation);
- return 1;
-}
-
-/**
- * Check str for a suffix.
- */
-static int endswith(const char *str, const char *suffix)
-{
- size_t len_str, len_suffix;
- assert(str != NULL && suffix != NULL);
- len_str = strlen(str);
- len_suffix = strlen(suffix);
- return len_str >= len_suffix && !strcmp(str + len_str - len_suffix, suffix);
-}
-
-/**
- * Check str for a prefix.
- */
-static int startswith(const char *str, const char *prefix)
-{
- size_t len_str, len_prefix;
- assert(str != NULL && prefix != NULL);
- len_str = strlen(str);
- len_prefix = strlen(prefix);
- return len_str >= len_prefix && !strncmp(str, prefix, len_prefix);
-}
-
-
-/**
- * Set a directory name, adding or stripping trailing slash as needed.
- */
-static int set_directory(const rcynic_ctx_t *rc, path_t *out, const char *in, const int want_slash)
-{
- int has_slash, need_slash;
- size_t n;
-
- assert(rc && in && out);
-
- n = strlen(in);
-
- if (n == 0) {
- logmsg(rc, log_usage_err, "Empty path");
- return 0;
- }
-
- has_slash = in[n - 1] == '/';
-
- need_slash = want_slash && !has_slash;
-
- if (n + need_slash + 1 > sizeof(out->s)) {
- logmsg(rc, log_usage_err, "Path \"%s\" too long", in);
- return 0;
- }
-
- strcpy(out->s, in);
- if (need_slash)
- strcat(out->s, "/");
- else if (has_slash && !want_slash)
- out->s[n - 1] = '\0';
-
- return 1;
-}
-
-/**
- * Remove a directory tree, like rm -rf.
- */
-static int rm_rf(const path_t *name)
-{
- path_t path;
- struct dirent *d;
- size_t len;
- DIR *dir;
- int ret = 0, need_slash;
-
- assert(name);
- len = strlen(name->s);
- assert(len > 0 && len < sizeof(path.s));
- need_slash = name->s[len - 1] != '/';
-
- if (rmdir(name->s) == 0)
- return 1;
-
- switch (errno) {
- case ENOENT:
- return 1;
- case ENOTEMPTY:
- break;
- default:
- return 0;
- }
-
- if ((dir = opendir(name->s)) == NULL)
- return 0;
-
- while ((d = readdir(dir)) != NULL) {
- if (d->d_name[0] == '.' && (d->d_name[1] == '\0' || (d->d_name[1] == '.' && d->d_name[2] == '\0')))
- continue;
- if (len + strlen(d->d_name) + need_slash >= sizeof(path.s))
- goto done;
- strcpy(path.s, name->s);
- if (need_slash)
- strcat(path.s, "/");
- strcat(path.s, d->d_name);
- switch (d->d_type) {
- case DT_DIR:
- if (!rm_rf(&path))
- goto done;
- continue;
- default:
- if (unlink(path.s) < 0)
- goto done;
- continue;
- }
- }
-
- ret = rmdir(name->s) == 0;
-
- done:
- closedir(dir);
- return ret;
-}
-
-/**
- * Construct names for the directories not directly settable by the
- * user.
- *
- * This function also checks for an old-style rc->authenticated
- * directory, to simplify upgrade from older versions of rcynic.
- */
-static int construct_directory_names(rcynic_ctx_t *rc)
-{
- struct stat st;
- ssize_t n;
- path_t p;
- time_t t = time(0);
-
- p = rc->authenticated;
-
- n = strlen(p.s);
-
- if (n + sizeof(authenticated_symlink_suffix) >= sizeof(p.s)) {
- logmsg(rc, log_usage_err, "Symlink name would be too long");
- return 0;
- }
-
- if (strftime(p.s + n, sizeof(p.s) - n - 1, ".%Y-%m-%dT%H:%M:%SZ", gmtime(&t)) == 0) {
- logmsg(rc, log_usage_err, "Generated path with timestamp would be too long");
- return 0;
- }
-
- if (!set_directory(rc, &rc->new_authenticated, p.s, 1))
- return 0;
-
- if (!set_directory(rc, &rc->old_authenticated, rc->authenticated.s, 1))
- return 0;
-
- if (lstat(rc->authenticated.s, &st) == 0 && (st.st_mode & S_IFDIR) != 0 &&
- strlen(rc->authenticated.s) + sizeof(".old") < sizeof(p.s)) {
- p = rc->authenticated;
- strcat(p.s, ".old");
- rm_rf(&p);
- (void) rename(rc->authenticated.s, p.s);
- }
-
- if (lstat(rc->authenticated.s, &st) == 0 && (st.st_mode & S_IFDIR) != 0) {
- logmsg(rc, log_usage_err,
- "Existing %s directory is in the way, please remove it",
- rc->authenticated.s);
- return 0;
- }
-
- return 1;
-}
-
-/**
- * Do final symlink shuffle and cleanup of output directories.
- */
-static int finalize_directories(const rcynic_ctx_t *rc)
-{
- path_t path, sym, real_old, real_new;
- const char *dir;
- size_t n;
- glob_t g;
- int i;
-
- if (!realpath(rc->old_authenticated.s, real_old.s))
- real_old.s[0] = '\0';
-
- if (!realpath(rc->new_authenticated.s, real_new.s))
- real_old.s[0] = '\0';
-
- path = rc->new_authenticated;
-
- n = strlen(path.s);
- assert(n > 1 && path.s[n - 1] == '/');
- path.s[n - 1] = '\0';
-
- if ((dir = strrchr(path.s, '/')) == NULL)
- dir = path.s;
- else
- dir++;
-
- sym = rc->authenticated;
-
- assert(strlen(sym.s) + sizeof(authenticated_symlink_suffix) < sizeof(sym.s));
- strcat(sym.s, authenticated_symlink_suffix);
-
- (void) unlink(sym.s);
-
- if (symlink(dir, sym.s) < 0) {
- logmsg(rc, log_sys_err, "Couldn't link %s to %s: %s",
- sym.s, dir, strerror(errno));
- return 0;
- }
-
- if (rename(sym.s, rc->authenticated.s) < 0) {
- logmsg(rc, log_sys_err, "Couldn't rename %s to %s: %s",
- sym.s, rc->authenticated.s, strerror(errno));
- return 0;
- }
-
- path = rc->authenticated;
- assert(strlen(path.s) + sizeof(".*") < sizeof(path.s));
- strcat(path.s, ".*");
-
- memset(&g, 0, sizeof(g));
-
- if (real_new.s[0] && glob(path.s, 0, 0, &g) == 0)
- for (i = 0; i < g.gl_pathc; i++)
- if (realpath(g.gl_pathv[i], path.s) &&
- strcmp(path.s, real_old.s) &&
- strcmp(path.s, real_new.s))
- rm_rf(&path);
-
- return 1;
-}
-
-
-
-/**
- * Check to see whether a hostname is in the dead host cache.
- */
-static int dead_host_check(const rcynic_ctx_t *rc, const uri_t *uri)
-{
- hostname_t hostname;
-
- assert(rc && uri && rc->dead_host_cache);
-
- return (uri_to_hostname(uri, &hostname) &&
- sk_OPENSSL_STRING_find(rc->dead_host_cache, hostname.s) >= 0);
-}
-
-
-/**
- * Add an entry to the dead host cache.
- */
-static void dead_host_add(const rcynic_ctx_t *rc, const uri_t *uri)
-{
- hostname_t hostname;
-
- assert(rc && uri && rc->dead_host_cache);
-
- if (dead_host_check(rc, uri))
- return;
-
- if (!uri_to_hostname(uri, &hostname))
- return;
-
- (void) sk_OPENSSL_STRING_push_strdup(rc->dead_host_cache, hostname.s);
-}
-
-
-
-/**
- * Test whether a pair of URIs "conflict", that is, whether attempting
- * to rsync both of them at the same time in parallel might cause
- * unpredictable behavior. Might need a better name for this test.
- *
- * Returns non-zero iff the two URIs "conflict".
- */
-static int conflicting_uris(const uri_t *a, const uri_t *b)
-{
- size_t len_a, len_b;
-
- assert(a && is_rsync(a->s) && b && is_rsync(b->s));
-
- len_a = strlen(a->s);
- len_b = strlen(b->s);
-
- assert(len_a < sizeof(a->s) && len_b < sizeof(b->s));
-
- return !strncmp(a->s, b->s, len_a < len_b ? len_a : len_b);
-}
-
-
-
-/**
- * Read non-directory filenames from a directory, so we can check to
- * see what's missing from a manifest.
- */
-static STACK_OF(OPENSSL_STRING) *directory_filenames(const rcynic_ctx_t *rc,
- const walk_state_t state,
- const uri_t *uri)
-{
- STACK_OF(OPENSSL_STRING) *result = NULL;
- path_t path;
- const path_t *prefix = NULL;
- DIR *dir = NULL;
- struct dirent *d;
- int ok = 0;
-
- assert(rc && uri);
-
- switch (state) {
- case walk_state_current:
- prefix = &rc->unauthenticated;
- break;
- case walk_state_backup:
- prefix = &rc->old_authenticated;
- break;
- default:
- goto done;
- }
-
- if (!uri_to_filename(rc, uri, &path, prefix) ||
- (dir = opendir(path.s)) == NULL ||
- (result = sk_OPENSSL_STRING_new(uri_cmp)) == NULL)
- goto done;
-
- while ((d = readdir(dir)) != NULL)
- if (d->d_type != DT_DIR && !sk_OPENSSL_STRING_push_strdup(result, d->d_name))
- goto done;
-
- ok = 1;
-
- done:
- if (dir != NULL)
- closedir(dir);
-
- if (ok)
- return result;
-
- sk_OPENSSL_STRING_pop_free(result, OPENSSL_STRING_free);
- return NULL;
-}
-
-
-
-/**
- * Increment walk context reference count.
- */
-static void walk_ctx_attach(walk_ctx_t *w)
-{
- if (w != NULL) {
- w->refcount++;
- assert(w->refcount != 0);
- }
-}
-
-/**
- * Decrement walk context reference count; freeing the context if the
- * reference count is now zero.
- */
-static void walk_ctx_detach(walk_ctx_t *w)
-{
- if (w != NULL && --(w->refcount) == 0) {
- assert(w->refcount == 0);
- X509_free(w->cert);
- Manifest_free(w->manifest);
- sk_OPENSSL_STRING_pop_free(w->filenames, OPENSSL_STRING_free);
- free(w);
- }
-}
-
-/**
- * Return top context of a walk context stack.
- */
-static walk_ctx_t *walk_ctx_stack_head(STACK_OF(walk_ctx_t) *wsk)
-{
- return sk_walk_ctx_t_value(wsk, sk_walk_ctx_t_num(wsk) - 1);
-}
-
-/**
- * Walk context iterator. Think of this as the thing you call in the
- * third clause of a conceptual "for" loop: this reinitializes as
- * necessary for the next pass through the loop.
- *
- * This is still under construction, but general idea is that we have
- * several state variables in a walk context which collectively define
- * the current pass, product URI, etc, and we want to be able to
- * iterate through this sequence via the event system. So we need a
- * function which steps to the next state.
- */
-static void walk_ctx_loop_next(const rcynic_ctx_t *rc, STACK_OF(walk_ctx_t) *wsk)
-{
- walk_ctx_t *w = walk_ctx_stack_head(wsk);
-
- assert(rc && wsk && w);
-
- if (w->manifest && w->manifest_iteration + 1 < sk_FileAndHash_num(w->manifest->fileList)) {
- w->manifest_iteration++;
- return;
- }
-
- if (w->filenames && w->filename_iteration + 1 < sk_OPENSSL_STRING_num(w->filenames)) {
- w->filename_iteration++;
- return;
- }
-
- if (w->state < walk_state_done) {
- w->state++;
- w->manifest_iteration = 0;
- w->filename_iteration = 0;
- sk_OPENSSL_STRING_pop_free(w->filenames, OPENSSL_STRING_free);
- w->filenames = directory_filenames(rc, w->state, &w->certinfo.sia);
- }
-}
-
-/**
- * Whether we're done iterating over a walk context. Think of this as
- * the thing you call (negated) in the second clause of a conceptual
- * "for" loop.
- */
-static int walk_ctx_loop_done(STACK_OF(walk_ctx_t) *wsk)
-{
- walk_ctx_t *w = walk_ctx_stack_head(wsk);
- return wsk == NULL || w == NULL || w->state >= walk_state_done;
-}
-
-static Manifest *check_manifest(const rcynic_ctx_t *rc,
- STACK_OF(walk_ctx_t) *wsk);
-
-/**
- * Loop initializer for walk context. Think of this as the thing you
- * call in the first clause of a conceptual "for" loop.
- */
-static void walk_ctx_loop_init(const rcynic_ctx_t *rc, STACK_OF(walk_ctx_t) *wsk)
-{
- walk_ctx_t *w = walk_ctx_stack_head(wsk);
-
- assert(rc && wsk && w && w->state == walk_state_ready);
-
- assert(w->manifest == NULL);
- if ((w->manifest = check_manifest(rc, wsk)) == NULL)
- logmsg(rc, log_telemetry, "Couldn't get manifest %s, blundering onward", w->certinfo.manifest.s);
-
- assert(w->filenames == NULL);
- w->filenames = directory_filenames(rc, w->state, &w->certinfo.sia);
-
- w->stale_manifest = w->manifest != NULL && X509_cmp_current_time(w->manifest->nextUpdate) < 0;
-
- w->manifest_iteration = 0;
- w->filename_iteration = 0;
- w->state++;
-
- assert(w->state == walk_state_current);
-
- while (!walk_ctx_loop_done(wsk) &&
- (w->manifest == NULL || w->manifest_iteration >= sk_FileAndHash_num(w->manifest->fileList)) &&
- (w->filenames == NULL || w->filename_iteration >= sk_OPENSSL_STRING_num(w->filenames)))
- walk_ctx_loop_next(rc, wsk);
-}
-
-/**
- * Extract URI and hash values from walk context.
- */
-static int walk_ctx_loop_this(const rcynic_ctx_t *rc,
- STACK_OF(walk_ctx_t) *wsk,
- uri_t *uri,
- const unsigned char **hash,
- size_t *hashlen)
-{
- const walk_ctx_t *w = walk_ctx_stack_head(wsk);
- const char *name = NULL;
- FileAndHash *fah = NULL;
-
- assert(rc && wsk && w && uri && hash && hashlen);
-
- if (w->manifest != NULL && w->manifest_iteration < sk_FileAndHash_num(w->manifest->fileList)) {
- fah = sk_FileAndHash_value(w->manifest->fileList, w->manifest_iteration);
- name = (const char *) fah->file->data;
- } else if (w->filenames != NULL && w->filename_iteration < sk_OPENSSL_STRING_num(w->filenames)) {
- name = sk_OPENSSL_STRING_value(w->filenames, w->filename_iteration);
- }
-
- if (name == NULL) {
- logmsg(rc, log_sys_err, "Can't find a URI in walk context, this shouldn't happen: state %d, manifest_iteration %d, filename_iteration %d",
- (int) w->state, w->manifest_iteration, w->filename_iteration);
- return 0;
- }
-
- if (strlen(w->certinfo.sia.s) + strlen(name) >= sizeof(uri->s)) {
- logmsg(rc, log_data_err, "URI %s%s too long, skipping", w->certinfo.sia.s, uri->s);
- return 0;
- }
-
- strcpy(uri->s, w->certinfo.sia.s);
- strcat(uri->s, name);
-
- if (fah != NULL) {
- sk_OPENSSL_STRING_remove(w->filenames, name);
- *hash = fah->hash->data;
- *hashlen = fah->hash->length;
- } else {
- *hash = NULL;
- *hashlen = 0;
- }
-
- return 1;
-}
-
-/**
- * Create a new walk context stack.
- */
-static STACK_OF(walk_ctx_t) *walk_ctx_stack_new(void)
-{
- return sk_walk_ctx_t_new_null();
-}
-
-/**
- * Push a walk context onto a walk context stack, return the new context.
- */
-static walk_ctx_t *walk_ctx_stack_push(STACK_OF(walk_ctx_t) *wsk,
- X509 *x,
- const certinfo_t *certinfo)
-{
- walk_ctx_t *w;
-
- if (x == NULL || certinfo == NULL)
- return NULL;
-
- if ((w = malloc(sizeof(*w))) == NULL)
- return NULL;
-
- memset(w, 0, sizeof(*w));
- w->cert = x;
- w->certinfo = *certinfo;
-
- if (!sk_walk_ctx_t_push(wsk, w)) {
- free(w);
- return NULL;
- }
-
- walk_ctx_attach(w);
- return w;
-}
-
-/**
- * Pop and discard a walk context from a walk context stack.
- */
-static void walk_ctx_stack_pop(STACK_OF(walk_ctx_t) *wsk)
-{
- walk_ctx_detach(sk_walk_ctx_t_pop(wsk));
-}
-
-/**
- * Clone a stack of walk contexts.
- */
-static STACK_OF(walk_ctx_t) *walk_ctx_stack_clone(STACK_OF(walk_ctx_t) *old_wsk)
-{
- STACK_OF(walk_ctx_t) *new_wsk;
- int i;
- if (old_wsk == NULL || (new_wsk = sk_walk_ctx_t_dup(old_wsk)) == NULL)
- return NULL;
- for (i = 0; i < sk_walk_ctx_t_num(new_wsk); i++)
- walk_ctx_attach(sk_walk_ctx_t_value(new_wsk, i));
- return new_wsk;
-}
-
-/**
- * Extract certificate stack from walk context stack. Returns a newly
- * created STACK_OF(X509) pointing to the existing cert objects (ie,
- * this is a shallow copy, so only free the STACK_OF(X509), not the
- * certificates themselves).
- */
-static STACK_OF(X509) *walk_ctx_stack_certs(STACK_OF(walk_ctx_t) *wsk)
-{
- STACK_OF(X509) *xsk = sk_X509_new_null();
- walk_ctx_t *w;
- int i;
-
- for (i = 0; i < sk_walk_ctx_t_num(wsk); i++)
- if ((w = sk_walk_ctx_t_value(wsk, i)) == NULL ||
- (w->cert != NULL && !sk_X509_push(xsk, w->cert)))
- goto fail;
-
- return xsk;
-
- fail:
- sk_X509_free(xsk);
- return NULL;
-}
-
-/**
- * Free a walk context stack, decrementing reference counts of each
- * frame on it.
- */
-static void walk_ctx_stack_free(STACK_OF(walk_ctx_t) *wsk)
-{
- sk_walk_ctx_t_pop_free(wsk, walk_ctx_detach);
-}
-
-
-
-static int rsync_count_running(const rcynic_ctx_t *);
-
-/**
- * Add a task to the task queue.
- */
-static int task_add(const rcynic_ctx_t *rc,
- void (*handler)(rcynic_ctx_t *, STACK_OF(walk_ctx_t) *),
- STACK_OF(walk_ctx_t) *wsk)
-{
- task_t *t = malloc(sizeof(*t));
-
- assert(rc && rc->task_queue && handler);
-
- assert(rsync_count_running(rc) <= rc->max_parallel_fetches);
-
- if (!t)
- return 0;
-
- t->handler = handler;
- t->wsk = wsk;
-
- if (sk_task_t_push(rc->task_queue, t))
- return 1;
-
- free(t);
- return 0;
-}
-
-/**
- * Run tasks until queue is empty.
- */
-static void task_run_q(rcynic_ctx_t *rc)
-{
- task_t *t;
- assert(rc && rc->task_queue);
- while ((t = sk_task_t_shift(rc->task_queue)) != NULL) {
- t->handler(rc, t->wsk);
- free(t);
- }
-}
-
-
-
-/**
- * Record that we've already synced a particular rsync URI.
- */
-
-static void rsync_cache_add(const rcynic_ctx_t *rc, const uri_t *uri)
-{
- uri_t uribuf;
- char *s;
-
- assert(rc && uri && rc->rsync_cache);
- uribuf = *uri;
- while ((s = strrchr(uribuf.s, '/')) != NULL && s[1] == '\0')
- *s = '\0';
- assert(strlen(uribuf.s) > SIZEOF_RSYNC);
- if (!sk_OPENSSL_STRING_push_strdup(rc->rsync_cache, uribuf.s + SIZEOF_RSYNC))
- logmsg(rc, log_sys_err, "Couldn't cache URI %s, blundering onward", uri->s);
-}
-
-/**
- * Maintain a cache of URIs we've already fetched.
- */
-static int rsync_cached_string(const rcynic_ctx_t *rc,
- const char *string)
-{
- char *s, buffer[URI_MAX];
-
- assert(rc && rc->rsync_cache && strlen(string) < sizeof(buffer));
- strcpy(buffer, string);
- if ((s = strrchr(buffer, '/')) != NULL && s[1] == '\0')
- *s = '\0';
- while (sk_OPENSSL_STRING_find(rc->rsync_cache, buffer) < 0) {
- if ((s = strrchr(buffer, '/')) == NULL)
- return 0;
- *s = '\0';
- }
- return 1;
-}
-
-/**
- * Check whether a particular URI has been cached.
- */
-static int rsync_cached_uri(const rcynic_ctx_t *rc,
- const uri_t *uri)
-{
- return is_rsync(uri->s) && rsync_cached_string(rc, uri->s + SIZEOF_RSYNC);
-}
-
-/**
- * Return count of how many rsync contexts are in running.
- */
-static int rsync_count_running(const rcynic_ctx_t *rc)
-{
- const rsync_ctx_t *ctx;
- int i, n = 0;
-
- assert(rc && rc->rsync_queue);
-
- for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i) {
- switch (ctx->state) {
- case rsync_state_running:
- case rsync_state_terminating:
- n++;
- default:
- continue;
- }
- }
-
- return n;
-}
-
-/**
- * Test whether an rsync context conflicts with anything that's
- * currently runable.
- */
-static int rsync_conflicts(const rcynic_ctx_t *rc,
- const rsync_ctx_t *ctx)
-{
- const rsync_ctx_t *c;
- int i;
-
- assert(rc && ctx && rc->rsync_queue);
-
- for (i = 0; (c = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i)
- if (c != ctx &&
- (c->state == rsync_state_initial ||
- c->state == rsync_state_running) &&
- conflicting_uris(&c->uri, &ctx->uri))
- return 1;
-
- return 0;
-}
-
-/**
- * Test whether a rsync context is runable at this time.
- */
-static int rsync_runable(const rcynic_ctx_t *rc,
- const rsync_ctx_t *ctx)
-{
- assert(rc && ctx);
-
- switch (ctx->state) {
-
- case rsync_state_initial:
- case rsync_state_running:
- return 1;
-
- case rsync_state_retry_wait:
- return ctx->deadline <= time(0);
-
- case rsync_state_terminating:
- return 0;
-
- case rsync_state_conflict_wait:
- return !rsync_conflicts(rc, ctx);
- }
-
- return 0;
-}
-
-/**
- * Return count of runable rsync contexts.
- */
-static int rsync_count_runable(const rcynic_ctx_t *rc)
-{
- const rsync_ctx_t *ctx;
- int i, n = 0;
-
- assert(rc && rc->rsync_queue);
-
- for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i)
- if (rsync_runable(rc, ctx))
- n++;
-
- return n;
-}
-
-/**
- * Run an rsync process.
- */
-static void rsync_run(const rcynic_ctx_t *rc,
- rsync_ctx_t *ctx)
-{
- static const char * const rsync_cmd[] = {
- "rsync", "--update", "--times", "--copy-links", "--itemize-changes"
- };
- static const char * const rsync_tree_args[] = {
- "--recursive", "--delete"
- };
-
- const char *argv[10];
- path_t path;
- int i, argc = 0, flags, pipe_fds[2];
-
- pipe_fds[0] = pipe_fds[1] = -1;
-
- assert(rc && ctx && ctx->pid == 0 && ctx->state != rsync_state_running && rsync_runable(rc, ctx));
-
- assert(rsync_count_running(rc) < rc->max_parallel_fetches);
-
- logmsg(rc, log_telemetry, "Fetching %s", ctx->uri.s);
-
- memset(argv, 0, sizeof(argv));
-
- for (i = 0; i < sizeof(rsync_cmd)/sizeof(*rsync_cmd); i++) {
- assert(argc < sizeof(argv)/sizeof(*argv));
- argv[argc++] = rsync_cmd[i];
- }
- if (endswith(ctx->uri.s, "/")) {
- for (i = 0; i < sizeof(rsync_tree_args)/sizeof(*rsync_tree_args); i++) {
- assert(argc < sizeof(argv)/sizeof(*argv));
- argv[argc++] = rsync_tree_args[i];
- }
- }
-
- if (rc->rsync_program)
- argv[0] = rc->rsync_program;
-
- if (!uri_to_filename(rc, &ctx->uri, &path, &rc->unauthenticated)) {
- logmsg(rc, log_data_err, "Couldn't extract filename from URI: %s", ctx->uri.s);
- goto lose;
- }
-
- assert(argc < sizeof(argv)/sizeof(*argv));
- argv[argc++] = ctx->uri.s;
-
- assert(argc < sizeof(argv)/sizeof(*argv));
- argv[argc++] = path.s;
-
- if (!mkdir_maybe(rc, &path)) {
- logmsg(rc, log_sys_err, "Couldn't make target directory: %s", path.s);
- goto lose;
- }
-
- for (i = 0; i < argc; i++)
- logmsg(rc, log_verbose, "rsync argv[%d]: %s", i, argv[i]);
-
- if (pipe(pipe_fds) < 0) {
- logmsg(rc, log_sys_err, "pipe() failed: %s", strerror(errno));
- goto lose;
- }
- ctx->fd = pipe_fds[0];
-
- if ((flags = fcntl(ctx->fd, F_GETFL, 0)) == -1) {
- logmsg(rc, log_sys_err, "fcntl(F_GETFL) failed: %s",
- strerror(errno));
- goto lose;
- }
- flags |= O_NONBLOCK;
- if (fcntl(ctx->fd, F_SETFL, flags) == -1) {
- logmsg(rc, log_sys_err, "fcntl(F_SETFL) failed: %s",
- strerror(errno));
- goto lose;
- }
-
- switch ((ctx->pid = vfork())) {
-
- case -1:
- logmsg(rc, log_sys_err, "vfork() failed: %s", strerror(errno));
- goto lose;
-
- case 0:
- /*
- * Child
- */
-#define whine(msg) ((void) write(2, msg, sizeof(msg) - 1))
- if (close(pipe_fds[0]) < 0)
- whine("close(pipe_fds[0]) failed\n");
- else if (dup2(pipe_fds[1], 1) < 0)
- whine("dup2(pipe_fds[1], 1) failed\n");
- else if (dup2(pipe_fds[1], 2) < 0)
- whine("dup2(pipe_fds[1], 2) failed\n");
- else if (close(pipe_fds[1]) < 0)
- whine("close(pipe_fds[1]) failed\n");
- else if (execvp(argv[0], (char * const *) argv) < 0)
- whine("execvp(argv[0], (char * const *) argv) failed\n");
- whine("last system error: ");
- write(2, strerror(errno), strlen(strerror(errno)));
- whine("\n");
- _exit(1);
-#undef whine
-
- default:
- /*
- * Parent
- */
- (void) close(pipe_fds[1]);
- pipe_fds[1] = -1;
- ctx->state = rsync_state_running;
- ctx->problem = rsync_problem_none;
- if (rc->rsync_timeout)
- ctx->deadline = time(0) + rc->rsync_timeout;
- logmsg(rc, log_debug, "Subprocess %u started, queued %d, runable %d, running %d, max %d, URI %s",
- (unsigned) ctx->pid, sk_rsync_ctx_t_num(rc->rsync_queue), rsync_count_runable(rc), rsync_count_running(rc), rc->max_parallel_fetches, ctx->uri.s);
- if (ctx->handler)
- ctx->handler(rc, ctx, rsync_status_pending, &ctx->uri, ctx->wsk);
- return;
-
- }
-
- lose:
- if (pipe_fds[0] != -1)
- (void) close(pipe_fds[0]);
- if (pipe_fds[1] != -1)
- (void) close(pipe_fds[1]);
- if (rc->rsync_queue && ctx)
- (void) sk_rsync_ctx_t_delete_ptr(rc->rsync_queue, ctx);
- if (ctx && ctx->handler)
- ctx->handler(rc, ctx, rsync_status_failed, &ctx->uri, ctx->wsk);
- if (ctx)
- free(ctx);
-}
-
-/**
- * Process one line of rsync's output. This is a separate function
- * primarily to centralize scraping for magic error strings.
- */
-static void do_one_rsync_log_line(const rcynic_ctx_t *rc,
- rsync_ctx_t *ctx)
-{
- unsigned u;
- char *s;
-
- /*
- * Send line to our log unless it's empty.
- */
- if (ctx->buffer[strspn(ctx->buffer, " \t\n\r")] != '\0')
- logmsg(rc, log_telemetry, "rsync[%u]: %s", ctx->pid, ctx->buffer);
-
- /*
- * Check for magic error strings
- */
- if ((s = strstr(ctx->buffer, "@ERROR: max connections")) != NULL) {
- ctx->problem = rsync_problem_refused;
- if (sscanf(s, "@ERROR: max connections (%u) reached -- try again later", &u) == 1)
- logmsg(rc, log_debug, "Subprocess %u reported limit of %u for %s", ctx->pid, u, ctx->uri.s);
- }
-}
-
-/**
- * Construct select() arguments.
- */
-static int rsync_construct_select(const rcynic_ctx_t *rc,
- const time_t now,
- fd_set *rfds,
- struct timeval *tv)
-{
- rsync_ctx_t *ctx;
- time_t when = 0;
- int i, n = 0;
-
- assert(rc && rc->rsync_queue && rfds && tv);
-
- FD_ZERO(rfds);
-
- for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i) {
- switch (ctx->state) {
-
- case rsync_state_running:
- if (ctx->fd >= 0) {
- FD_SET(ctx->fd, rfds);
- if (ctx->fd > n)
- n = ctx->fd;
- }
- if (!rc->rsync_timeout)
- continue;
- /* Fall through */
-
- case rsync_state_retry_wait:
- if (when == 0 || ctx->deadline < when)
- when = ctx->deadline;
- /* Fall through */
-
- default:
- continue;
- }
- }
-
- tv->tv_sec = when ? when - now : 0;
- tv->tv_usec = 0;
- return n;
-}
-
-/**
- * Manager for queue of rsync tasks in progress.
- *
- * General plan here is to process one completed child, or output
- * accumulated from children, or block if there is absolutely nothing
- * to do, on the theory that caller had nothing to do either or would
- * not have called us. Once we've done something allegedly useful, we
- * return, because this is not the event loop; if and when the event
- * loop has nothing more important to do, we'll be called again.
- *
- * So this is the only place where the program blocks waiting for
- * children, but we only do it when we know there's nothing else
- * useful that we could be doing while we wait.
- */
-static void rsync_mgr(const rcynic_ctx_t *rc)
-{
- time_t now = time(0);
- int i, n, pid_status = -1;
- rsync_ctx_t *ctx = NULL;
- struct timeval tv;
- fd_set rfds;
- pid_t pid;
- char *s;
-
- assert(rc && rc->rsync_queue);
-
- /*
- * Check for exited subprocesses.
- */
-
- while ((pid = waitpid(-1, &pid_status, WNOHANG)) > 0) {
-
- /*
- * Child exited, handle it.
- */
-
- logmsg(rc, log_debug, "Subprocess %d exited with status %d", pid, WEXITSTATUS(pid_status));
-
- for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i)
- if (ctx->pid == pid)
- break;
- if (ctx == NULL) {
- assert(i == sk_rsync_ctx_t_num(rc->rsync_queue));
- logmsg(rc, log_sys_err, "Couldn't find rsync context for pid %d", pid);
- continue;
- }
-
- close(ctx->fd);
- ctx->fd = -1;
-
- if (ctx->buflen > 0) {
- assert(ctx->buflen < sizeof(ctx->buffer));
- ctx->buffer[ctx->buflen] = '\0';
- do_one_rsync_log_line(rc, ctx);
- ctx->buflen = 0;
- }
-
- switch (WEXITSTATUS(pid_status)) {
-
- case 0:
- log_validation_status(rc, &ctx->uri,
- (ctx->problem == rsync_problem_timed_out
- ? rsync_timed_out
- : rsync_succeeded),
- object_generation_null);
- break;
-
- case 5: /* "Error starting client-server protocol" */
- /*
- * Handle remote rsyncd refusing to talk to us because we've
- * exceeded its connection limit. Back off for a short
- * interval, then retry.
- */
- if (ctx->problem == rsync_problem_refused && ctx->tries < rc->max_retries) {
- unsigned char r;
- if (!RAND_bytes(&r, sizeof(r)))
- r = 60;
- ctx->deadline = time(0) + rc->retry_wait_min + r;
- ctx->state = rsync_state_retry_wait;
- ctx->problem = rsync_problem_none;
- ctx->pid = 0;
- ctx->tries++;
- logmsg(rc, log_telemetry, "Scheduling retry for %s", ctx->uri.s);
- continue;
- }
-
- /* Otherwise, fall through */
-
- case 2: /* "Protocol incompatibility" */
- case 4: /* "Requested action not supported" */
- case 10: /* "Error in socket I/O" */
- case 11: /* "Error in file I/O" */
- case 12: /* "Error in rsync protocol data stream" */
- case 21: /* "Some error returned by waitpid()" */
- case 30: /* "Timeout in data send/receive" */
- case 35: /* "Timeout waiting for daemon connection" */
- logmsg(rc, log_telemetry, "Adding %s to dead host cache", ctx->uri.s);
- dead_host_add(rc, &ctx->uri);
-
- /* Fall through */
-
- default:
- logmsg(rc, log_data_err, "rsync %u exited with status %d fetching %s",
- (unsigned) pid, WEXITSTATUS(pid_status), ctx->uri.s);
- log_validation_status(rc, &ctx->uri,
- (rc->rsync_timeout && now >= ctx->deadline
- ? rsync_timed_out
- : rsync_failed),
- object_generation_null);
- break;
- }
-
- rsync_cache_add(rc, &ctx->uri);
- if (ctx->handler)
- ctx->handler(rc, ctx, (ctx->problem == rsync_problem_timed_out
- ? rsync_status_timed_out
- : WEXITSTATUS(pid_status) != 0
- ? rsync_status_failed
- : rsync_status_done),
- &ctx->uri, ctx->wsk);
- (void) sk_rsync_ctx_t_delete_ptr(rc->rsync_queue, ctx);
- free(ctx);
- ctx = NULL;
- }
-
- if (pid == -1 && errno != EINTR && errno != ECHILD)
- logmsg(rc, log_sys_err, "waitpid() returned error: %s", strerror(errno));
-
- assert(rsync_count_running(rc) <= rc->max_parallel_fetches);
-
- /*
- * Look for rsync contexts that have become runable.
- */
- for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i)
- if (ctx->state != rsync_state_running &&
- rsync_runable(rc, ctx) &&
- rsync_count_running(rc) < rc->max_parallel_fetches)
- rsync_run(rc, ctx);
-
- assert(rsync_count_running(rc) <= rc->max_parallel_fetches);
-
- /*
- * Check for log text from subprocesses.
- */
-
- n = rsync_construct_select(rc, now, &rfds, &tv);
-
- if (n > 0 || tv.tv_sec)
- n = select(n + 1, &rfds, NULL, NULL, tv.tv_sec ? &tv : NULL);
-
- if (n > 0) {
-
- for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i) {
- if (ctx->fd <= 0 || !FD_ISSET(ctx->fd, &rfds))
- continue;
-
- assert(ctx->buflen < sizeof(ctx->buffer) - 1);
-
- while ((n = read(ctx->fd, ctx->buffer + ctx->buflen, sizeof(ctx->buffer) - 1 - ctx->buflen)) > 0) {
- ctx->buflen += n;
- assert(ctx->buflen < sizeof(ctx->buffer));
- ctx->buffer[ctx->buflen] = '\0';
-
- while ((s = strchr(ctx->buffer, '\n')) != NULL) {
- *s++ = '\0';
- do_one_rsync_log_line(rc, ctx);
- assert(s > ctx->buffer && s < ctx->buffer + sizeof(ctx->buffer));
- ctx->buflen -= s - ctx->buffer;
- assert(ctx->buflen < sizeof(ctx->buffer));
- if (ctx->buflen > 0)
- memmove(ctx->buffer, s, ctx->buflen);
- ctx->buffer[ctx->buflen] = '\0';
- }
-
- if (ctx->buflen == sizeof(ctx->buffer) - 1) {
- ctx->buffer[sizeof(ctx->buffer) - 1] = '\0';
- do_one_rsync_log_line(rc, ctx);
- ctx->buflen = 0;
- }
- }
-
- if (n == 0) {
- (void) close(ctx->fd);
- ctx->fd = -1;
- }
- }
- }
-
- assert(rsync_count_running(rc) <= rc->max_parallel_fetches);
-
- /*
- * Deal with children that have been running too long.
- */
- if (rc->rsync_timeout) {
- for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i) {
- int sig;
- if (ctx->pid <= 0 || now < ctx->deadline)
- continue;
- sig = ctx->tries++ < KILL_MAX ? SIGTERM : SIGKILL;
- if (ctx->state != rsync_state_terminating) {
- ctx->problem = rsync_problem_timed_out;
- ctx->state = rsync_state_terminating;
- ctx->tries = 0;
- logmsg(rc, log_telemetry, "Subprocess %u is taking too long fetching %s, whacking it", (unsigned) ctx->pid, ctx->uri.s);
- dead_host_add(rc, &ctx->uri);
- } else if (sig == SIGTERM) {
- logmsg(rc, log_telemetry, "Whacking subprocess %u again", (unsigned) ctx->pid);
- } else {
- logmsg(rc, log_telemetry, "Whacking subprocess %u with big hammer", (unsigned) ctx->pid);
- }
- (void) kill(ctx->pid, sig);
- ctx->deadline = now + 1;
- }
- }
-}
-
-/**
- * Set up rsync context and attempt to start it.
- */
-static void rsync_init(const rcynic_ctx_t *rc,
- const uri_t *uri,
- STACK_OF(walk_ctx_t) *wsk,
- void (*handler)(const rcynic_ctx_t *, const rsync_ctx_t *, const rsync_status_t, const uri_t *, STACK_OF(walk_ctx_t) *))
-{
- rsync_ctx_t *ctx = NULL;
-
- assert(rc && uri && strlen(uri->s) > SIZEOF_RSYNC);
-
- if (!rc->run_rsync) {
- logmsg(rc, log_verbose, "rsync disabled, skipping %s", uri->s);
- rsync_cache_add(rc, uri);
- if (handler)
- handler(rc, NULL, rsync_status_skipped, uri, wsk);
- return;
- }
-
- if (rsync_cached_uri(rc, uri)) {
- logmsg(rc, log_verbose, "rsync cache hit for %s", uri->s);
- if (handler)
- handler(rc, NULL, rsync_status_done, uri, wsk);
- return;
- }
-
- if (dead_host_check(rc, uri)) {
- logmsg(rc, log_verbose, "Dead host cache hit for %s", uri->s);
- rsync_cache_add(rc, uri);
- if (handler)
- handler(rc, NULL, rsync_status_skipped, uri, wsk);
- return;
- }
-
- if ((ctx = malloc(sizeof(*ctx))) == NULL) {
- logmsg(rc, log_sys_err, "malloc(rsync_ctxt_t) failed");
- if (handler)
- handler(rc, NULL, rsync_status_failed, uri, wsk);
- return;
- }
-
- memset(ctx, 0, sizeof(*ctx));
- ctx->uri = *uri;
- ctx->handler = handler;
- ctx->wsk = wsk;
- ctx->fd = -1;
-
- if (!sk_rsync_ctx_t_push(rc->rsync_queue, ctx)) {
- logmsg(rc, log_sys_err, "Couldn't push rsync state object onto queue, punting %s", ctx->uri.s);
- if (handler)
- handler(rc, ctx, rsync_status_failed, uri, wsk);
- free(ctx);
- return;
- }
-
- if (rsync_conflicts(rc, ctx)) {
- logmsg(rc, log_debug, "New rsync context %s is feeling conflicted", ctx->uri.s);
- ctx->state = rsync_state_conflict_wait;
- }
-
-
-#if 0
- if (rsync_runable(rc, ctx) && rsync_count_running(rc) < rc->max_parallel_fetches);
- rsync_run(rc, ctx);
-#endif
-}
-
-/**
- * rsync a single file (trust anchor, CRL, manifest, ROA, whatever).
- */
-static void rsync_file(const rcynic_ctx_t *rc,
- const uri_t *uri)
-{
- assert(!endswith(uri->s, "/"));
- rsync_init(rc, uri, NULL, NULL);
-}
-
-/**
- * rsync an entire subtree, generally rooted at a SIA collection.
- */
-static void rsync_tree(const rcynic_ctx_t *rc,
- const uri_t *uri,
- STACK_OF(walk_ctx_t) *wsk,
- void (*handler)(const rcynic_ctx_t *, const rsync_ctx_t *, const rsync_status_t, const uri_t *, STACK_OF(walk_ctx_t) *))
-{
- assert(endswith(uri->s, "/"));
- rsync_init(rc, uri, wsk, handler);
-}
-
-
-
-/**
- * Clean up old stuff from previous rsync runs. --delete doesn't help
- * if the URI changes and we never visit the old URI again.
- */
-static int prune_unauthenticated(const rcynic_ctx_t *rc,
- const path_t *name,
- const size_t baselen)
-{
- path_t path;
- struct dirent *d;
- size_t len;
- DIR *dir;
- int need_slash;
-
- assert(rc && name && baselen > 0);
- len = strlen(name->s);
- assert(len >= baselen && len < sizeof(path.s));
- need_slash = name->s[len - 1] != '/';
-
- if (rsync_cached_string(rc, name->s + baselen)) {
- logmsg(rc, log_debug, "prune: cache hit for %s, not cleaning", name->s);
- return 1;
- }
-
- if (rmdir(name->s) == 0) {
- logmsg(rc, log_debug, "prune: removed %s", name->s);
- return 1;
- }
-
- switch (errno) {
- case ENOENT:
- logmsg(rc, log_debug, "prune: nonexistant %s", name->s);
- return 1;
- case ENOTEMPTY:
- break;
- default:
- logmsg(rc, log_debug, "prune: other error %s: %s", name->s, strerror(errno));
- return 0;
- }
-
- if ((dir = opendir(name->s)) == NULL)
- return 0;
-
- while ((d = readdir(dir)) != NULL) {
- if (d->d_name[0] == '.' && (d->d_name[1] == '\0' || (d->d_name[1] == '.' && d->d_name[2] == '\0')))
- continue;
- if (len + strlen(d->d_name) + need_slash >= sizeof(path)) {
- logmsg(rc, log_debug, "prune: %s%s%s too long", name->s, (need_slash ? "/" : ""), d->d_name);
- goto done;
- }
- strcpy(path.s, name->s);
- if (need_slash)
- strcat(path.s, "/");
- strcat(path.s, d->d_name);
- switch (d->d_type) {
- case DT_DIR:
- if (!prune_unauthenticated(rc, &path, baselen))
- goto done;
- continue;
- default:
- if (rsync_cached_string(rc, path.s + baselen)) {
- logmsg(rc, log_debug, "prune: cache hit %s", path.s);
- continue;
- }
- if (unlink(path.s) < 0) {
- logmsg(rc, log_debug, "prune: removing %s failed: %s", path.s, strerror(errno));
- goto done;
- }
- logmsg(rc, log_debug, "prune: removed %s", path.s);
- continue;
- }
- }
-
- if (rmdir(name->s) < 0 && errno != ENOTEMPTY)
- logmsg(rc, log_debug, "prune: couldn't remove %s: %s", name->s, strerror(errno));
-
- done:
- closedir(dir);
- return !d;
-}
-
-
-
-/**
- * Read a DER object using a BIO pipeline that hashes the file content
- * as we read it. Returns the internal form of the parsed DER object,
- * sets the hash buffer (if specified) as a side effect. The default
- * hash algorithm is SHA-256.
- */
-static void *read_file_with_hash(const path_t *filename,
- const ASN1_ITEM *it,
- const EVP_MD *md,
- hashbuf_t *hash)
-{
- void *result = NULL;
- BIO *b;
-
- if ((b = BIO_new_file(filename->s, "rb")) == NULL)
- goto error;
-
- if (hash != NULL) {
- BIO *b2 = BIO_new(BIO_f_md());
- if (b2 == NULL)
- goto error;
- if (md == NULL)
- md = EVP_sha256();
- if (!BIO_set_md(b2, md)) {
- BIO_free(b2);
- goto error;
- }
- BIO_push(b2, b);
- b = b2;
- }
-
- if ((result = ASN1_item_d2i_bio(it, b, NULL)) == NULL)
- goto error;
-
- if (hash != NULL) {
- memset(hash, 0, sizeof(*hash));
- BIO_gets(b, (char *) hash, sizeof(hash->h));
- }
-
- error:
- BIO_free_all(b);
- return result;
-}
-
-/**
- * Read and hash a certificate.
- */
-static X509 *read_cert(const path_t *filename, hashbuf_t *hash)
-{
- return read_file_with_hash(filename, ASN1_ITEM_rptr(X509), NULL, hash);
-}
-
-/**
- * Read and hash a CRL.
- */
-static X509_CRL *read_crl(const path_t *filename, hashbuf_t *hash)
-{
- return read_file_with_hash(filename, ASN1_ITEM_rptr(X509_CRL), NULL, hash);
-}
-
-/**
- * Read and hash a CMS message.
- */
-static CMS_ContentInfo *read_cms(const path_t *filename, hashbuf_t *hash)
-{
- return read_file_with_hash(filename, ASN1_ITEM_rptr(CMS_ContentInfo), NULL, hash);
-}
-
-
-
-/**
- * Extract CRLDP data from a certificate.
- */
-static void extract_crldp_uri(const rcynic_ctx_t *rc,
- const uri_t *uri,
- const object_generation_t generation,
- const STACK_OF(DIST_POINT) *crldp,
- uri_t *result)
-{
- DIST_POINT *d;
- int i;
-
- assert(crldp);
-
- if (sk_DIST_POINT_num(crldp) != 1) {
- log_validation_status(rc, uri, malformed_crldp, generation);
- return;
- }
-
- d = sk_DIST_POINT_value(crldp, 0);
-
- if (d->reasons || d->CRLissuer || !d->distpoint || d->distpoint->type != 0) {
- log_validation_status(rc, uri, malformed_crldp, generation);
- return;
- }
-
- for (i = 0; i < sk_GENERAL_NAME_num(d->distpoint->name.fullname); i++) {
- GENERAL_NAME *n = sk_GENERAL_NAME_value(d->distpoint->name.fullname, i);
- assert(n != NULL);
- if (n->type != GEN_URI) {
- log_validation_status(rc, uri, malformed_crldp, generation);
- return;
- }
- if (!is_rsync((char *) n->d.uniformResourceIdentifier->data)) {
- logmsg(rc, log_verbose, "Skipping non-rsync URI %s for %s",
- (char *) n->d.uniformResourceIdentifier->data, uri->s);
- continue;
- }
- if (sizeof(result->s) <= n->d.uniformResourceIdentifier->length) {
- log_validation_status(rc, uri, uri_too_long, generation);
- continue;
- }
- strcpy(result->s, (char *) n->d.uniformResourceIdentifier->data);
- return;
- }
-}
-
-/**
- * Extract SIA or AIA data from a certificate.
- */
-static void extract_access_uri(const rcynic_ctx_t *rc,
- const uri_t *uri,
- const object_generation_t generation,
- const AUTHORITY_INFO_ACCESS *xia,
- const unsigned char *oid,
- const int oidlen,
- uri_t *result)
-{
- int i;
-
- if (!xia)
- return;
-
- for (i = 0; i < sk_ACCESS_DESCRIPTION_num(xia); i++) {
- ACCESS_DESCRIPTION *a = sk_ACCESS_DESCRIPTION_value(xia, i);
- assert(a != NULL);
- if (a->location->type != GEN_URI)
- return;
- if (oid_cmp(a->method, oid, oidlen))
- continue;
- if (!is_rsync((char *) a->location->d.uniformResourceIdentifier->data)) {
- logmsg(rc, log_verbose, "Skipping non-rsync URI %s for %s",
- a->location->d.uniformResourceIdentifier->data, uri->s);
- continue;
- }
- if (sizeof(result->s) <= a->location->d.uniformResourceIdentifier->length) {
- log_validation_status(rc, uri, uri_too_long, generation);
- continue;
- }
- strcpy(result->s, (char *) a->location->d.uniformResourceIdentifier->data);
- return;
- }
-}
-
-/**
- * Parse interesting stuff from a certificate.
- */
-static void parse_cert(const rcynic_ctx_t *rc, X509 *x, certinfo_t *c, const uri_t *uri, const object_generation_t generation)
-{
- STACK_OF(DIST_POINT) *crldp;
- AUTHORITY_INFO_ACCESS *xia;
-
- assert(x != NULL && c != NULL && uri != NULL);
- memset(c, 0, sizeof(*c));
-
- c->ca = X509_check_ca(x) == 1;
- c->uri = *uri;
- c->generation = generation;
-
- if ((xia = X509_get_ext_d2i(x, NID_info_access, NULL, NULL)) != NULL) {
- extract_access_uri(rc, uri, generation, xia, id_ad_caIssuers, sizeof(id_ad_caIssuers), &c->aia);
- sk_ACCESS_DESCRIPTION_pop_free(xia, ACCESS_DESCRIPTION_free);
- }
-
- if ((xia = X509_get_ext_d2i(x, NID_sinfo_access, NULL, NULL)) != NULL) {
- extract_access_uri(rc, uri, generation, xia, id_ad_caRepository, sizeof(id_ad_caRepository), &c->sia);
- extract_access_uri(rc, uri, generation, xia, id_ad_rpkiManifest, sizeof(id_ad_rpkiManifest), &c->manifest);
- sk_ACCESS_DESCRIPTION_pop_free(xia, ACCESS_DESCRIPTION_free);
- }
-
- if ((crldp = X509_get_ext_d2i(x, NID_crl_distribution_points, NULL, NULL)) != NULL) {
- extract_crldp_uri(rc, uri, generation, crldp, &c->crldp);
- sk_DIST_POINT_pop_free(crldp, DIST_POINT_free);
- }
-}
-
-
-
-/**
- * Attempt to read and check one CRL from disk.
- */
-
-static X509_CRL *check_crl_1(const rcynic_ctx_t *rc,
- const uri_t *uri,
- path_t *path,
- const path_t *prefix,
- X509 *issuer,
- const unsigned char *hash,
- const size_t hashlen,
- const object_generation_t generation)
-{
- hashbuf_t hashbuf;
- X509_CRL *crl = NULL;
- EVP_PKEY *pkey;
- int ret;
-
- assert(uri && path && issuer);
-
- if (!uri_to_filename(rc, uri, path, prefix))
- goto punt;
-
- if (hashlen > sizeof(hashbuf.h)) {
- log_validation_status(rc, uri, hash_too_long, generation);
- goto punt;
- }
-
- if (hash)
- crl = read_crl(path, &hashbuf);
- else
- crl = read_crl(path, NULL);
-
- if (!crl)
- goto punt;
-
- if (hash && memcmp(hashbuf.h, hash, hashlen)) {
- log_validation_status(rc, uri, crl_digest_mismatch, generation);
- goto punt;
- }
-
- if (X509_cmp_current_time(X509_CRL_get_lastUpdate(crl)) > 0) {
- log_validation_status(rc, uri, crl_not_yet_valid, generation);
- goto punt;
- }
-
- if (X509_cmp_current_time(X509_CRL_get_nextUpdate(crl)) < 0) {
- log_validation_status(rc, uri, stale_crl, generation);
- if (!rc->allow_stale_crl)
- goto punt;
- }
-
- if ((pkey = X509_get_pubkey(issuer)) == NULL)
- goto punt;
- ret = X509_CRL_verify(crl, pkey);
- EVP_PKEY_free(pkey);
-
- if (ret > 0)
- return crl;
-
- punt:
- X509_CRL_free(crl);
- return NULL;
-}
-
-/**
- * Check whether we already have a particular CRL, attempt to fetch it
- * and check issuer's signature if we don't.
- */
-static X509_CRL *check_crl(const rcynic_ctx_t *rc,
- const uri_t *uri,
- X509 *issuer,
- const unsigned char *hash,
- const size_t hashlen)
-{
- path_t path;
- X509_CRL *crl;
-
- if (uri_to_filename(rc, uri, &path, &rc->new_authenticated) &&
- (crl = read_crl(&path, NULL)) != NULL)
- return crl;
-
- logmsg(rc, log_telemetry, "Checking CRL %s", uri->s);
-
- if ((crl = check_crl_1(rc, uri, &path, &rc->unauthenticated,
- issuer, hash, hashlen, object_generation_current))) {
- install_object(rc, uri, &path, object_accepted, object_generation_current);
- return crl;
- } else if (!access(path.s, F_OK)) {
- log_validation_status(rc, uri, object_rejected, object_generation_current);
- }
-
- if ((crl = check_crl_1(rc, uri, &path, &rc->old_authenticated,
- issuer, hash, hashlen, object_generation_backup))) {
- install_object(rc, uri, &path, object_accepted, object_generation_backup);
- return crl;
- } else if (!access(path.s, F_OK)) {
- log_validation_status(rc, uri, object_rejected, object_generation_backup);
- }
-
- return NULL;
-}
-
-
-
-/**
- * Check whether extensions in a certificate are allowed by profile.
- * Also returns failure in a few null-pointer cases that can't
- * possibly conform to profile.
- */
-static int check_allowed_extensions(const X509 *x, const int allow_eku)
-{
- int i;
-
- if (x == NULL || x->cert_info == NULL || x->cert_info->extensions == NULL)
- return 0;
-
- for (i = 0; i < sk_X509_EXTENSION_num(x->cert_info->extensions); i++) {
- switch (OBJ_obj2nid(sk_X509_EXTENSION_value(x->cert_info->extensions,
- i)->object)) {
- case NID_basic_constraints:
- case NID_subject_key_identifier:
- case NID_authority_key_identifier:
- case NID_key_usage:
- case NID_crl_distribution_points:
- case NID_info_access:
- case NID_sinfo_access:
- case NID_certificate_policies:
- case NID_sbgp_ipAddrBlock:
- case NID_sbgp_autonomousSysNum:
- continue;
- case NID_ext_key_usage:
- if (allow_eku)
- continue;
- else
- return 0;
- default:
- return 0;
- }
- }
-
- return 1;
-}
-
-/**
- * Check whether a Distinguished Name conforms to the rescert profile.
- * The profile is very restrictive: it only allows one mandatory
- * CommonName field and one optional SerialNumber field, both of which
- * must be of type PrintableString.
- */
-static int check_allowed_dn(X509_NAME *dn)
-{
- X509_NAME_ENTRY *ne;
- ASN1_STRING *s;
- int loc;
-
- if (dn == NULL)
- return 0;
-
- switch (X509_NAME_entry_count(dn)) {
-
- case 2:
- if ((loc = X509_NAME_get_index_by_NID(dn, NID_serialNumber, -1)) < 0 ||
- (ne = X509_NAME_get_entry(dn, loc)) == NULL ||
- (s = X509_NAME_ENTRY_get_data(ne)) == NULL ||
- ASN1_STRING_type(s) != V_ASN1_PRINTABLESTRING)
- return 0;
-
- /* Fall through */
-
- case 1:
- if ((loc = X509_NAME_get_index_by_NID(dn, NID_commonName, -1)) < 0 ||
- (ne = X509_NAME_get_entry(dn, loc)) == NULL ||
- (s = X509_NAME_ENTRY_get_data(ne)) == NULL ||
- ASN1_STRING_type(s) != V_ASN1_PRINTABLESTRING)
- return 0;
-
- return 1;
-
- default:
- return 0;
- }
-}
-
-
-
-/**
- * Validation callback function for use with x509_verify_cert().
- */
-static int check_x509_cb(int ok, X509_STORE_CTX *ctx)
-{
- rcynic_x509_store_ctx_t *rctx = (rcynic_x509_store_ctx_t *) ctx;
- mib_counter_t code;
-
- assert(rctx != NULL);
-
- switch (ctx->error) {
- case X509_V_OK:
- return ok;
-
- case X509_V_ERR_SUBJECT_ISSUER_MISMATCH:
- /*
- * Informational events, not really errors. ctx->check_issued()
- * is called in many places where failure to find an issuer is not
- * a failure for the calling function. Just leave these alone.
- */
- return ok;
-
- case X509_V_ERR_CRL_HAS_EXPIRED:
- /*
- * This isn't really an error, exactly. CRLs don't really
- * "expire". What OpenSSL really means by this error is just
- * "it's now later than the issuer said it intended to publish a
- * new CRL". Whether we treat this as an error or not is
- * configurable, see the allow_stale_crl parameter.
- *
- * Deciding whether to allow stale CRLs is check_crl_1()'s job,
- * not ours. By the time this callback occurs, we've already
- * accepted the CRL; this callback is just notifying us that the
- * object being checked is tainted by a stale CRL. So we mark the
- * object as tainted and carry on.
- */
- log_validation_status(rctx->rc, &rctx->subject->uri, tainted_by_stale_crl, rctx->subject->generation);
- ok = 1;
- return ok;
-
- case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT:
- /*
- * This is another error that's only an error in the strange world
- * of OpenSSL, but a more serious one. By default, OpenSSL
- * expects all trust anchors to be self-signed. This is not a
- * PKIX requirement, it's just an OpenSSL thing, but one violates
- * it at one's peril, because the only way to convince OpenSSL to
- * allow a non-self-signed trust anchor is to intercept this
- * "error" in the verify callback handler.
- *
- * So this program supports non-self-signed trust anchors, but be
- * warned that enabling this feature may cause this program's
- * output not to work with other OpenSSL-based applications.
- */
- if (rctx->rc->allow_non_self_signed_trust_anchor)
- ok = 1;
- log_validation_status(rctx->rc, &rctx->subject->uri, trust_anchor_not_self_signed, rctx->subject->generation);
- return ok;
-
- /*
- * Select correct MIB counter for every known OpenSSL verify errors
- * except the ones we handle explicitly above, then fall through to
- * common handling for all of these.
- */
-#define QV(x) \
- case x: \
- code = mib_openssl_##x; \
- break;
- MIB_COUNTERS_FROM_OPENSSL;
-#undef QV
-
- default:
- code = unknown_verify_error;
- break;
- }
-
- log_validation_status(rctx->rc, &rctx->subject->uri, code, rctx->subject->generation);
- return ok;
-}
-
-/**
- * Check crypto aspects of a certificate, policy OID, RFC 3779 path
- * validation, and conformance to the RPKI certificate profile.
- */
-static int check_x509(const rcynic_ctx_t *rc,
- STACK_OF(X509) *certs,
- X509 *x,
- const certinfo_t *subject,
- const certinfo_t *issuer_certinfo)
-{
- rcynic_x509_store_ctx_t rctx;
- STACK_OF(X509_CRL) *crls = NULL;
- EVP_PKEY *pkey = NULL;
- X509_CRL *crl = NULL;
- unsigned long flags = (X509_V_FLAG_POLICY_CHECK | X509_V_FLAG_EXPLICIT_POLICY | X509_V_FLAG_X509_STRICT);
- X509 *issuer;
- int ret = 0;
-
- assert(rc && certs && x && subject);
-
- if (!X509_STORE_CTX_init(&rctx.ctx, rc->x509_store, x, NULL))
- return 0;
- rctx.rc = rc;
- rctx.subject = subject;
-
- issuer = sk_X509_value(certs, sk_X509_num(certs) - 1);
- assert(issuer != NULL);
-
- if (subject->sia.s[0] && subject->sia.s[strlen(subject->sia.s) - 1] != '/') {
- log_validation_status(rc, &subject->uri, malformed_sia, subject->generation);
- goto done;
- }
-
- if (!subject->ta && !subject->aia.s[0]) {
- log_validation_status(rc, &subject->uri, aia_missing, subject->generation);
- goto done;
- }
-
- if (!issuer_certinfo->ta && strcmp(issuer_certinfo->uri.s, subject->aia.s)) {
- log_validation_status(rc, &subject->uri, aia_mismatch, subject->generation);
- goto done;
- }
-
- if (subject->ca && !subject->sia.s[0]) {
- log_validation_status(rc, &subject->uri, sia_missing, subject->generation);
- goto done;
- }
-
- if (subject->ca && !subject->manifest.s[0]) {
- log_validation_status(rc, &subject->uri, manifest_missing, subject->generation);
- goto done;
- }
-
- if (subject->ca && !startswith(subject->manifest.s, subject->sia.s)) {
- log_validation_status(rc, &subject->uri, manifest_mismatch, subject->generation);
- goto done;
- }
-
- if (!check_allowed_extensions(x, !subject->ca)) {
- log_validation_status(rc, &subject->uri, disallowed_extension, subject->generation);
- goto done;
- }
-
- if (!check_allowed_dn(X509_get_subject_name(x)))
- log_validation_status(rc, &subject->uri, nonconformant_subject_name, subject->generation);
-
- if (!check_allowed_dn(X509_get_issuer_name(x)))
- log_validation_status(rc, &subject->uri, nonconformant_issuer_name, subject->generation);
-
- if (subject->ta) {
-
- if (subject->crldp.s[0]) {
- log_validation_status(rc, &subject->uri, trust_anchor_with_crldp, subject->generation);
- goto done;
- }
-
- } else {
-
- if (!subject->crldp.s[0]) {
- log_validation_status(rc, &subject->uri, crldp_missing, subject->generation);
- goto done;
- }
-
- if (!subject->ca && !startswith(subject->crldp.s, issuer_certinfo->sia.s)) {
- log_validation_status(rc, &subject->uri, crldp_mismatch, subject->generation);
- goto done;
- }
-
- flags |= X509_V_FLAG_CRL_CHECK;
-
- if ((pkey = X509_get_pubkey(issuer)) == NULL || X509_verify(x, pkey) <= 0) {
- log_validation_status(rc, &subject->uri, certificate_bad_signature, subject->generation);
- goto done;
- }
-
- if ((crl = check_crl(rc, &subject->crldp, issuer, NULL, 0)) == NULL) {
- log_validation_status(rc, &subject->uri, certificate_bad_crl, subject->generation);
- goto done;
- }
-
- if ((crls = sk_X509_CRL_new_null()) == NULL || !sk_X509_CRL_push(crls, crl)) {
- logmsg(rc, log_sys_err,
- "Internal allocation error setting up CRL for validation");
- goto done;
- }
- crl = NULL;
-
- X509_STORE_CTX_set0_crls(&rctx.ctx, crls);
-
- }
-
- X509_STORE_CTX_trusted_stack(&rctx.ctx, certs);
- X509_STORE_CTX_set_verify_cb(&rctx.ctx, check_x509_cb);
-
- X509_VERIFY_PARAM_set_flags(rctx.ctx.param, flags);
-
- X509_VERIFY_PARAM_add0_policy(rctx.ctx.param, OBJ_txt2obj(rpki_policy_oid, 1));
-
- if (X509_verify_cert(&rctx.ctx) <= 0) {
- log_validation_status(rc, &subject->uri, certificate_failed_validation, subject->generation);
- goto done;
- }
-
- ret = 1;
-
- done:
- sk_X509_CRL_pop_free(crls, X509_CRL_free);
- X509_STORE_CTX_cleanup(&rctx.ctx);
- EVP_PKEY_free(pkey);
- X509_CRL_free(crl);
-
- return ret;
-}
-
-/**
- * Load certificate, check against manifest, then run it through all
- * the check_x509() tests.
- */
-static X509 *check_cert_1(const rcynic_ctx_t *rc,
- const uri_t *uri,
- path_t *path,
- const path_t *prefix,
- STACK_OF(X509) *certs,
- const certinfo_t *issuer,
- certinfo_t *subject,
- const unsigned char *hash,
- const size_t hashlen,
- object_generation_t generation)
-{
- hashbuf_t hashbuf;
- X509 *x = NULL;
-
- assert(uri && path && certs && issuer && subject);
-
- if (!uri_to_filename(rc, uri, path, prefix))
- return NULL;
-
- if (access(path->s, R_OK))
- return NULL;
-
- if (hashlen > sizeof(hashbuf.h)) {
- log_validation_status(rc, uri, hash_too_long, generation);
- goto punt;
- }
-
- if (hash)
- x = read_cert(path, &hashbuf);
- else
- x = read_cert(path, NULL);
-
- if (!x) {
- logmsg(rc, log_sys_err, "Can't read certificate %s", path->s);
- goto punt;
- }
-
- if (hash && memcmp(hashbuf.h, hash, hashlen)) {
- log_validation_status(rc, uri, certificate_digest_mismatch, generation);
- goto punt;
- }
-
- parse_cert(rc, x, subject, uri, generation);
-
- if (check_x509(rc, certs, x, subject, issuer))
- return x;
-
- punt:
- X509_free(x);
- return NULL;
-}
-
-/**
- * Try to find a good copy of a certificate either in fresh data or in
- * backup data from a previous run of this program.
- */
-static X509 *check_cert(rcynic_ctx_t *rc,
- uri_t *uri,
- STACK_OF(walk_ctx_t) *wsk,
- certinfo_t *subject,
- const unsigned char *hash,
- const size_t hashlen)
-{
- walk_ctx_t *w = walk_ctx_stack_head(wsk);
- object_generation_t generation;
- const certinfo_t *issuer = NULL;
- STACK_OF(X509) *certs = NULL;
- const path_t *prefix = NULL;
- path_t path;
- X509 *x;
-
- assert(rc && uri && wsk && w && subject);
-
- issuer = &w->certinfo;
-
- switch (w->state) {
- case walk_state_current:
- prefix = &rc->unauthenticated;
- generation = object_generation_current;
- break;
- case walk_state_backup:
- prefix = &rc->old_authenticated;
- generation = object_generation_backup;
- break;
- default:
- return NULL;
- }
-
- /*
- * If target file already exists and we're not here to recheck with
- * better data, just get out now.
- */
-
- if (uri_to_filename(rc, uri, &path, &rc->new_authenticated) &&
- !access(path.s, R_OK)) {
- if (w->state == walk_state_backup || sk_OPENSSL_STRING_find(rc->backup_cache, uri->s) < 0)
- return NULL;
- assert(generation == object_generation_current);
- log_validation_status(rc, uri, current_cert_recheck, generation);
- logmsg(rc, log_telemetry, "Rechecking %s", uri->s);
- } else {
- logmsg(rc, log_telemetry, "Checking %s", uri->s);
- }
-
- if ((certs = walk_ctx_stack_certs(wsk)) == NULL)
- return NULL;
-
- if ((x = check_cert_1(rc, uri, &path, prefix, certs, issuer, subject, hash, hashlen, generation)) != NULL) {
- install_object(rc, uri, &path, object_accepted, generation);
- if (w->state == walk_state_current)
- sk_OPENSSL_STRING_remove(rc->backup_cache, uri->s);
- else if (!sk_OPENSSL_STRING_push_strdup(rc->backup_cache, uri->s))
- logmsg(rc, log_sys_err, "Couldn't cache URI %s, blundering onward", uri->s);
-
- } else if (!access(path.s, F_OK)) {
- log_validation_status(rc, uri, object_rejected, generation);
- }
-
- sk_X509_free(certs);
- certs = NULL;
-
- return x;
-}
-
-
-
-/**
- * Read and check one manifest from disk.
- */
-static Manifest *check_manifest_1(const rcynic_ctx_t *rc,
- const uri_t *uri,
- path_t *path,
- const path_t *prefix,
- STACK_OF(X509) *certs,
- const object_generation_t generation)
-{
- CMS_ContentInfo *cms = NULL;
- const ASN1_OBJECT *eContentType = NULL;
- STACK_OF(X509) *signers = NULL;
- STACK_OF(X509_CRL) *crls = NULL;
- X509_CRL *crl = NULL;
- Manifest *manifest = NULL, *result = NULL;
- BIO *bio = NULL;
- rcynic_x509_store_ctx_t rctx;
- certinfo_t certinfo;
- int i, initialized_store_ctx = 0;
- FileAndHash *fah = NULL;
- char *crl_tail;
-
- assert(rc && uri && path && prefix && certs && sk_X509_num(certs));
-
- if (!uri_to_filename(rc, uri, path, prefix) ||
- (cms = read_cms(path, NULL)) == NULL)
- goto done;
-
- if ((eContentType = CMS_get0_eContentType(cms)) == NULL ||
- oid_cmp(eContentType, id_ct_rpkiManifest, sizeof(id_ct_rpkiManifest))) {
- log_validation_status(rc, uri, manifest_bad_econtenttype, generation);
- goto done;
- }
-
- if ((bio = BIO_new(BIO_s_mem())) == NULL) {
- logmsg(rc, log_sys_err, "Couldn't allocate BIO for manifest %s", uri->s);
- goto done;
- }
-
- if (CMS_verify(cms, NULL, NULL, NULL, bio, CMS_NO_SIGNER_CERT_VERIFY) <= 0) {
- log_validation_status(rc, uri, manifest_invalid_cms, generation);
- goto done;
- }
-
- if ((signers = CMS_get0_signers(cms)) == NULL || sk_X509_num(signers) != 1) {
- log_validation_status(rc, uri, manifest_missing_signer, generation);
- goto done;
- }
-
- parse_cert(rc, sk_X509_value(signers, 0), &certinfo, uri, generation);
-
- if (!certinfo.crldp.s[0]) {
- log_validation_status(rc, uri, manifest_missing_crldp, generation);
- goto done;
- }
-
- if ((crl_tail = strrchr(certinfo.crldp.s, '/')) == NULL) {
- log_validation_status(rc, uri, manifest_malformed_crldp, generation);
- goto done;
- }
- crl_tail++;
-
- if ((manifest = ASN1_item_d2i_bio(ASN1_ITEM_rptr(Manifest), bio, NULL)) == NULL) {
- log_validation_status(rc, uri, manifest_decode_error, generation);
- goto done;
- }
-
- if (manifest->version) {
- log_validation_status(rc, uri, manifest_wrong_version, generation);
- goto done;
- }
-
- if (X509_cmp_current_time(manifest->thisUpdate) > 0) {
- log_validation_status(rc, uri, manifest_not_yet_valid, generation);
- goto done;
- }
-
- if (X509_cmp_current_time(manifest->nextUpdate) < 0) {
- log_validation_status(rc, uri, stale_manifest, generation);
- if (!rc->allow_stale_manifest)
- goto done;
- }
-
- if (manifest->fileHashAlg == NULL ||
- oid_cmp(manifest->fileHashAlg, id_sha256, sizeof(id_sha256)))
- goto done;
-
- for (i = 0; (fah = sk_FileAndHash_value(manifest->fileList, i)) != NULL; i++)
- if (!strcmp((char *) fah->file->data, crl_tail))
- break;
-
- if (fah) {
- crl = check_crl(rc, &certinfo.crldp,
- sk_X509_value(certs, sk_X509_num(certs) - 1),
- fah->hash->data, fah->hash->length);
- } else {
- log_validation_status(rc, uri, crl_not_in_manifest, generation);
- if (rc->require_crl_in_manifest)
- goto done;
- crl = check_crl(rc, &certinfo.crldp,
- sk_X509_value(certs, sk_X509_num(certs) - 1),
- NULL, 0);
- }
-
- if (!crl)
- goto done;
-
- if ((crls = sk_X509_CRL_new_null()) == NULL || !sk_X509_CRL_push(crls, crl))
- goto done;
- crl = NULL;
-
- if (!(initialized_store_ctx = X509_STORE_CTX_init(&rctx.ctx, rc->x509_store, sk_X509_value(signers, 0), NULL)))
- goto done;
-
- rctx.rc = rc;
- rctx.subject = &certinfo;
-
- X509_STORE_CTX_trusted_stack(&rctx.ctx, certs);
- X509_STORE_CTX_set0_crls(&rctx.ctx, crls);
- X509_STORE_CTX_set_verify_cb(&rctx.ctx, check_x509_cb);
-
- X509_VERIFY_PARAM_set_flags(rctx.ctx.param,
- X509_V_FLAG_CRL_CHECK |
- X509_V_FLAG_POLICY_CHECK |
- X509_V_FLAG_EXPLICIT_POLICY |
- X509_V_FLAG_X509_STRICT);
-
- X509_VERIFY_PARAM_add0_policy(rctx.ctx.param, OBJ_txt2obj(rpki_policy_oid, 1));
-
- if (X509_verify_cert(&rctx.ctx) <= 0) {
- /*
- * Redundant error message?
- */
- log_validation_status(rc, uri, manifest_invalid_ee, generation);
- goto done;
- }
-
- result = manifest;
- manifest = NULL;
-
- done:
- if (initialized_store_ctx)
- X509_STORE_CTX_cleanup(&rctx.ctx);
- BIO_free(bio);
- Manifest_free(manifest);
- CMS_ContentInfo_free(cms);
- sk_X509_free(signers);
- sk_X509_CRL_pop_free(crls, X509_CRL_free);
-
- return result;
-}
-
-/**
- * Check whether we already have a particular manifest, attempt to fetch it
- * and check issuer's signature if we don't.
- */
-static Manifest *check_manifest(const rcynic_ctx_t *rc,
- STACK_OF(walk_ctx_t) *wsk)
-{
- walk_ctx_t *w = walk_ctx_stack_head(wsk);
- CMS_ContentInfo *cms = NULL;
- Manifest *manifest = NULL;
- STACK_OF(X509) *certs = NULL;
- BIO *bio = NULL;
- path_t path;
- uri_t *uri;
-
- assert(rc && wsk && w);
-
- uri = &w->certinfo.manifest;
-
- if (uri_to_filename(rc, uri, &path, &rc->new_authenticated) &&
- (cms = read_cms(&path, NULL)) != NULL &&
- (bio = BIO_new(BIO_s_mem()))!= NULL &&
- CMS_verify(cms, NULL, NULL, NULL, bio,
- CMS_NO_SIGNER_CERT_VERIFY |
- CMS_NO_ATTR_VERIFY |
- CMS_NO_CONTENT_VERIFY) > 0)
- manifest = ASN1_item_d2i_bio(ASN1_ITEM_rptr(Manifest), bio, NULL);
-
- CMS_ContentInfo_free(cms);
- BIO_free(bio);
-
- if (manifest != NULL)
- return manifest;
-
- logmsg(rc, log_telemetry, "Checking manifest %s", uri->s);
-
- if ((certs = walk_ctx_stack_certs(wsk)) == NULL)
- return NULL;
-
- if (manifest == NULL) {
- if ((manifest = check_manifest_1(rc, uri, &path,
- &rc->unauthenticated, certs, object_generation_current)) != NULL)
- install_object(rc, uri, &path, object_accepted, object_generation_current);
- else if (!access(path.s, F_OK))
- log_validation_status(rc, uri, object_rejected, object_generation_current);
- }
-
- if (manifest == NULL) {
- if ((manifest = check_manifest_1(rc, uri, &path,
- &rc->old_authenticated, certs, object_generation_backup)) != NULL)
- install_object(rc, uri, &path, object_accepted, object_generation_backup);
- else if (!access(path.s, F_OK))
- log_validation_status(rc, uri, object_rejected, object_generation_backup);
- }
-
- sk_X509_free(certs);
- certs = NULL;
-
- return manifest;
-}
-
-
-
-/**
- * Extract a ROA prefix from the ASN.1 bitstring encoding.
- */
-static int extract_roa_prefix(unsigned char *addr,
- unsigned *prefixlen,
- const ASN1_BIT_STRING *bs,
- const unsigned afi)
-{
- unsigned length;
-
- switch (afi) {
- case IANA_AFI_IPV4: length = 4; break;
- case IANA_AFI_IPV6: length = 16; break;
- default: return 0;
- }
-
- if (bs->length < 0 || bs->length > length)
- return 0;
-
- if (bs->length > 0) {
- memcpy(addr, bs->data, bs->length);
- if ((bs->flags & 7) != 0) {
- unsigned char mask = 0xFF >> (8 - (bs->flags & 7));
- addr[bs->length - 1] &= ~mask;
- }
- }
-
- memset(addr + bs->length, 0, length - bs->length);
-
- *prefixlen = (bs->length * 8) - (bs->flags & 7);
-
- return 1;
-}
-
-/**
- * Read and check one ROA from disk.
- */
-static int check_roa_1(const rcynic_ctx_t *rc,
- const uri_t *uri,
- path_t *path,
- const path_t *prefix,
- STACK_OF(X509) *certs,
- const unsigned char *hash,
- const size_t hashlen,
- const object_generation_t generation)
-{
- unsigned char addrbuf[ADDR_RAW_BUF_LEN];
- const ASN1_OBJECT *eContentType = NULL;
- STACK_OF(IPAddressFamily) *roa_resources = NULL, *ee_resources = NULL;
- STACK_OF(X509_CRL) *crls = NULL;
- STACK_OF(X509) *signers = NULL;
- CMS_ContentInfo *cms = NULL;
- X509_CRL *crl = NULL;
- hashbuf_t hashbuf;
- ROA *roa = NULL;
- BIO *bio = NULL;
- rcynic_x509_store_ctx_t rctx;
- certinfo_t certinfo;
- int i, j, initialized_store_ctx = 0, result = 0;
- unsigned afi, *safi = NULL, safi_, prefixlen;
- ROAIPAddressFamily *rf;
- ROAIPAddress *ra;
-
- assert(rc && uri && path && prefix && certs && sk_X509_num(certs));
-
- if (!uri_to_filename(rc, uri, path, prefix))
- goto error;
-
- if (hashlen > sizeof(hashbuf.h)) {
- log_validation_status(rc, uri, hash_too_long, generation);
- goto error;
- }
-
- if (hash)
- cms = read_cms(path, &hashbuf);
- else
- cms = read_cms(path, NULL);
-
- if (!cms)
- goto error;
-
- if (hash && memcmp(hashbuf.h, hash, hashlen)) {
- log_validation_status(rc, uri, roa_digest_mismatch, generation);
- goto error;
- }
-
- if (!(eContentType = CMS_get0_eContentType(cms)) ||
- oid_cmp(eContentType, id_ct_routeOriginAttestation,
- sizeof(id_ct_routeOriginAttestation))) {
- log_validation_status(rc, uri, roa_bad_econtenttype, generation);
- goto error;
- }
-
- if ((bio = BIO_new(BIO_s_mem())) == NULL) {
- logmsg(rc, log_sys_err, "Couldn't allocate BIO for ROA %s", uri->s);
- goto error;
- }
-
- if (CMS_verify(cms, NULL, NULL, NULL, bio, CMS_NO_SIGNER_CERT_VERIFY) <= 0) {
- log_validation_status(rc, uri, roa_invalid_cms, generation);
- goto error;
- }
-
- if (!(signers = CMS_get0_signers(cms)) || sk_X509_num(signers) != 1) {
- log_validation_status(rc, uri, roa_missing_signer, generation);
- goto error;
- }
-
- parse_cert(rc, sk_X509_value(signers, 0), &certinfo, uri, generation);
-
- if (!(roa = ASN1_item_d2i_bio(ASN1_ITEM_rptr(ROA), bio, NULL))) {
- log_validation_status(rc, uri, roa_decode_error, generation);
- goto error;
- }
-
- if (roa->version) {
- log_validation_status(rc, uri, roa_wrong_version, generation);
- goto error;
- }
-
- /*
- * ROA issuer doesn't need rights to the ASN, so we don't need to
- * check the asID field.
- */
-
- ee_resources = X509_get_ext_d2i(sk_X509_value(signers, 0), NID_sbgp_ipAddrBlock, NULL, NULL);
-
- /*
- * Extract prefixes from ROA and convert them into a resource set.
- */
-
- if (!(roa_resources = sk_IPAddressFamily_new_null()))
- goto error;
-
- for (i = 0; i < sk_ROAIPAddressFamily_num(roa->ipAddrBlocks); i++) {
- rf = sk_ROAIPAddressFamily_value(roa->ipAddrBlocks, i);
- if (!rf || !rf->addressFamily || rf->addressFamily->length < 2 || rf->addressFamily->length > 3) {
- log_validation_status(rc, uri, malformed_roa_addressfamily, generation);
- goto error;
- }
- afi = (rf->addressFamily->data[0] << 8) | (rf->addressFamily->data[1]);
- if (rf->addressFamily->length == 3)
- *(safi = &safi_) = rf->addressFamily->data[2];
- for (j = 0; j < sk_ROAIPAddress_num(rf->addresses); j++) {
- ra = sk_ROAIPAddress_value(rf->addresses, j);
- if (!ra ||
- !extract_roa_prefix(addrbuf, &prefixlen, ra->IPAddress, afi) ||
- !v3_addr_add_prefix(roa_resources, afi, safi, addrbuf, prefixlen)) {
- log_validation_status(rc, uri, roa_resources_malformed, generation);
- goto error;
- }
- }
- }
-
- /*
- * ROAs can include nested prefixes, so direct translation to
- * resource sets could include overlapping ranges, which is illegal.
- * So we have to remove nested stuff before whacking into canonical
- * form. Fortunately, this is relatively easy, since we know these
- * are just prefixes, not ranges: in a list of prefixes sorted by
- * the RFC 3779 rules, the first element of a set of nested prefixes
- * will always be the least specific.
- */
-
- for (i = 0; i < sk_IPAddressFamily_num(roa_resources); i++) {
- IPAddressFamily *f = sk_IPAddressFamily_value(roa_resources, i);
-
- if ((afi = v3_addr_get_afi(f)) == 0) {
- log_validation_status(rc, uri, roa_bad_afi, generation);
- goto error;
- }
-
- if (f->ipAddressChoice->type == IPAddressChoice_addressesOrRanges) {
- IPAddressOrRanges *aors = f->ipAddressChoice->u.addressesOrRanges;
-
- sk_IPAddressOrRange_sort(aors);
-
- for (j = 0; j < sk_IPAddressOrRange_num(aors) - 1; j++) {
- IPAddressOrRange *a = sk_IPAddressOrRange_value(aors, j);
- IPAddressOrRange *b = sk_IPAddressOrRange_value(aors, j + 1);
- unsigned char a_min[ADDR_RAW_BUF_LEN], a_max[ADDR_RAW_BUF_LEN];
- unsigned char b_min[ADDR_RAW_BUF_LEN], b_max[ADDR_RAW_BUF_LEN];
- int length;
-
- if ((length = v3_addr_get_range(a, afi, a_min, a_max, ADDR_RAW_BUF_LEN)) == 0 ||
- (length = v3_addr_get_range(b, afi, b_min, b_max, ADDR_RAW_BUF_LEN)) == 0) {
- log_validation_status(rc, uri, roa_resources_malformed, generation);
- goto error;
- }
-
- if (memcmp(a_max, b_max, length) >= 0) {
- (void) sk_IPAddressOrRange_delete(aors, j + 1);
- IPAddressOrRange_free(b);
- --j;
- }
- }
- }
- }
-
- if (!v3_addr_canonize(roa_resources)) {
- log_validation_status(rc, uri, roa_resources_malformed, generation);
- goto error;
- }
-
- if (!v3_addr_subset(roa_resources, ee_resources)) {
- log_validation_status(rc, uri, roa_not_nested, generation);
- goto error;
- }
-
- if (!(crl = check_crl(rc, &certinfo.crldp, sk_X509_value(certs, sk_X509_num(certs) - 1), NULL, 0))) {
- log_validation_status(rc, uri, roa_bad_crl, generation);
- goto error;
- }
-
- if (!(crls = sk_X509_CRL_new_null()) || !sk_X509_CRL_push(crls, crl))
- goto error;
- crl = NULL;
-
- if (!(initialized_store_ctx = X509_STORE_CTX_init(&rctx.ctx, rc->x509_store, sk_X509_value(signers, 0), NULL)))
- goto error;
-
- rctx.rc = rc;
- rctx.subject = &certinfo;
-
- X509_STORE_CTX_trusted_stack(&rctx.ctx, certs);
- X509_STORE_CTX_set0_crls(&rctx.ctx, crls);
- X509_STORE_CTX_set_verify_cb(&rctx.ctx, check_x509_cb);
-
- X509_VERIFY_PARAM_set_flags(rctx.ctx.param,
- X509_V_FLAG_CRL_CHECK |
- X509_V_FLAG_POLICY_CHECK |
- X509_V_FLAG_EXPLICIT_POLICY |
- X509_V_FLAG_X509_STRICT);
-
- X509_VERIFY_PARAM_add0_policy(rctx.ctx.param, OBJ_txt2obj(rpki_policy_oid, 1));
-
- if (X509_verify_cert(&rctx.ctx) <= 0) {
- /*
- * Redundant error message?
- */
- log_validation_status(rc, uri, roa_invalid_ee, generation);
- goto error;
- }
-
- result = 1;
-
- error:
- if (initialized_store_ctx)
- X509_STORE_CTX_cleanup(&rctx.ctx);
- BIO_free(bio);
- ROA_free(roa);
- CMS_ContentInfo_free(cms);
- sk_X509_free(signers);
- sk_X509_CRL_pop_free(crls, X509_CRL_free);
- sk_IPAddressFamily_pop_free(roa_resources, IPAddressFamily_free);
- sk_IPAddressFamily_pop_free(ee_resources, IPAddressFamily_free);
-
- return result;
-}
-
-/**
- * Check whether we already have a particular ROA, attempt to fetch it
- * and check issuer's signature if we don't.
- */
-static void check_roa(const rcynic_ctx_t *rc,
- const uri_t *uri,
- STACK_OF(walk_ctx_t) *wsk,
- const unsigned char *hash,
- const size_t hashlen)
-{
- STACK_OF(X509) *certs = NULL;
- path_t path;
-
- assert(rc && uri && wsk);
-
- if (uri_to_filename(rc, uri, &path, &rc->new_authenticated) &&
- !access(path.s, F_OK))
- return;
-
- logmsg(rc, log_telemetry, "Checking ROA %s", uri->s);
-
- if ((certs = walk_ctx_stack_certs(wsk)) == NULL)
- return;
-
- if (check_roa_1(rc, uri, &path, &rc->unauthenticated,
- certs, hash, hashlen, object_generation_current)) {
- install_object(rc, uri, &path, object_accepted, object_generation_current);
- goto done;
- } else if (!access(path.s, F_OK)) {
- log_validation_status(rc, uri, object_rejected, object_generation_current);
- }
-
- if (check_roa_1(rc, uri, &path, &rc->old_authenticated,
- certs, hash, hashlen, object_generation_backup)) {
- install_object(rc, uri, &path, object_accepted, object_generation_backup);
- goto done;
- } else if (!access(path.s, F_OK)) {
- log_validation_status(rc, uri, object_rejected, object_generation_backup);
- }
-
- done:
- sk_X509_free(certs);
-}
-
-
-
-/**
- * Read and check one Ghostbuster record from disk.
- */
-static int check_ghostbuster_1(const rcynic_ctx_t *rc,
- const uri_t *uri,
- path_t *path,
- const path_t *prefix,
- STACK_OF(X509) *certs,
- const unsigned char *hash,
- const size_t hashlen,
- const object_generation_t generation)
-{
- const ASN1_OBJECT *eContentType = NULL;
- STACK_OF(X509_CRL) *crls = NULL;
- STACK_OF(X509) *signers = NULL;
- CMS_ContentInfo *cms = NULL;
- X509_CRL *crl = NULL;
- hashbuf_t hashbuf;
- BIO *bio = NULL;
- rcynic_x509_store_ctx_t rctx;
- certinfo_t certinfo;
- int initialized_store_ctx = 0, result = 0;
-
- assert(rc && uri && path && prefix && certs && sk_X509_num(certs));
-
- if (!uri_to_filename(rc, uri, path, prefix))
- goto error;
-
- if (hashlen > sizeof(hashbuf.h)) {
- log_validation_status(rc, uri, hash_too_long, generation);
- goto error;
- }
-
- if (hash)
- cms = read_cms(path, &hashbuf);
- else
- cms = read_cms(path, NULL);
-
- if (!cms)
- goto error;
-
- if (hash && memcmp(hashbuf.h, hash, hashlen)) {
- log_validation_status(rc, uri, ghostbuster_digest_mismatch, generation);
- goto error;
- }
-
- if (!(eContentType = CMS_get0_eContentType(cms)) ||
- oid_cmp(eContentType, id_ct_rpkiGhostbusters,
- sizeof(id_ct_rpkiGhostbusters))) {
- log_validation_status(rc, uri, ghostbuster_bad_econtenttype, generation);
- goto error;
- }
-
-#if 0
- /*
- * May want this later if we're going to inspect the VCard. For now,
- * just leave this NULL and the right thing should happen.
- */
- if ((bio = BIO_new(BIO_s_mem())) == NULL) {
- logmsg(rc, log_sys_err, "Couldn't allocate BIO for Ghostbuster record %s", uri->s);
- goto error;
- }
-#endif
-
- if (CMS_verify(cms, NULL, NULL, NULL, bio, CMS_NO_SIGNER_CERT_VERIFY) <= 0) {
- log_validation_status(rc, uri, ghostbuster_invalid_cms, generation);
- goto error;
- }
-
- if (!(signers = CMS_get0_signers(cms)) || sk_X509_num(signers) != 1) {
- log_validation_status(rc, uri, ghostbuster_missing_signer, generation);
- goto error;
- }
-
- parse_cert(rc, sk_X509_value(signers, 0), &certinfo, uri, generation);
-
-#if 0
- /*
- * Here is where we would read the VCard from the bio returned by
- * CMS_verify() so that we could check the VCard.
- */
-#endif
-
- if (!(crl = check_crl(rc, &certinfo.crldp, sk_X509_value(certs, sk_X509_num(certs) - 1), NULL, 0))) {
- log_validation_status(rc, uri, ghostbuster_bad_crl, generation);
- goto error;
- }
-
- if (!(crls = sk_X509_CRL_new_null()) || !sk_X509_CRL_push(crls, crl))
- goto error;
- crl = NULL;
-
- if (!(initialized_store_ctx = X509_STORE_CTX_init(&rctx.ctx, rc->x509_store, sk_X509_value(signers, 0), NULL)))
- goto error;
-
- rctx.rc = rc;
- rctx.subject = &certinfo;
-
- X509_STORE_CTX_trusted_stack(&rctx.ctx, certs);
- X509_STORE_CTX_set0_crls(&rctx.ctx, crls);
- X509_STORE_CTX_set_verify_cb(&rctx.ctx, check_x509_cb);
-
- X509_VERIFY_PARAM_set_flags(rctx.ctx.param,
- X509_V_FLAG_CRL_CHECK |
- X509_V_FLAG_POLICY_CHECK |
- X509_V_FLAG_EXPLICIT_POLICY |
- X509_V_FLAG_X509_STRICT);
-
- X509_VERIFY_PARAM_add0_policy(rctx.ctx.param, OBJ_txt2obj(rpki_policy_oid, 1));
-
- if (X509_verify_cert(&rctx.ctx) <= 0) {
- log_validation_status(rc, uri, ghostbuster_invalid_ee, generation);
- goto error;
- }
-
- result = 1;
-
- error:
- if (initialized_store_ctx)
- X509_STORE_CTX_cleanup(&rctx.ctx);
- BIO_free(bio);
- CMS_ContentInfo_free(cms);
- sk_X509_free(signers);
- sk_X509_CRL_pop_free(crls, X509_CRL_free);
-
- return result;
-}
-
-/**
- * Check whether we already have a particular Ghostbuster record,
- * attempt to fetch it and check issuer's signature if we don't.
- */
-static void check_ghostbuster(const rcynic_ctx_t *rc,
- const uri_t *uri,
- STACK_OF(walk_ctx_t) *wsk,
- const unsigned char *hash,
- const size_t hashlen)
-{
- STACK_OF(X509) *certs = NULL;
- path_t path;
-
- assert(rc && uri && wsk);
-
- if (uri_to_filename(rc, uri, &path, &rc->new_authenticated) &&
- !access(path.s, F_OK))
- return;
-
- logmsg(rc, log_telemetry, "Checking Ghostbuster record %s", uri->s);
-
- if ((certs = walk_ctx_stack_certs(wsk)) == NULL)
- return;
-
- if (check_ghostbuster_1(rc, uri, &path, &rc->unauthenticated,
- certs, hash, hashlen, object_generation_current)) {
- install_object(rc, uri, &path, object_accepted, object_generation_current);
- goto done;
- } else if (!access(path.s, F_OK)) {
- log_validation_status(rc, uri, object_rejected, object_generation_current);
- }
-
- if (check_ghostbuster_1(rc, uri, &path, &rc->old_authenticated,
- certs, hash, hashlen, object_generation_backup)) {
- install_object(rc, uri, &path, object_accepted, object_generation_backup);
- goto done;
- } else if (!access(path.s, F_OK)) {
- log_validation_status(rc, uri, object_rejected, object_generation_backup);
- }
-
- done:
- sk_X509_free(certs);
-}
-
-
-
-static void walk_cert(rcynic_ctx_t *, STACK_OF(walk_ctx_t) *);
-
-/**
- * rsync callback for fetching SIA tree.
- */
-static void rsync_sia_callback(const rcynic_ctx_t *rc,
- const rsync_ctx_t *ctx,
- const rsync_status_t status,
- const uri_t *uri,
- STACK_OF(walk_ctx_t) *wsk)
-{
- walk_ctx_t *w = walk_ctx_stack_head(wsk);
-
- assert(rc && wsk);
-
- switch (status) {
-
- case rsync_status_pending:
- if (rsync_count_runable(rc) >= rc->max_parallel_fetches)
- return;
-
- assert(rsync_count_running(rc) < rc->max_parallel_fetches);
-
- if ((wsk = walk_ctx_stack_clone(wsk)) == NULL) {
- logmsg(rc, log_sys_err, "walk_ctx_stack_clone() failed, probably memory exhaustion, blundering onwards without forking stack");
- return;
- }
-
- walk_ctx_stack_pop(wsk);
- task_add(rc, walk_cert, wsk);
- return;
-
- case rsync_status_failed:
- log_validation_status(rc, uri, rsync_failed, object_generation_null);
- break;
-
- case rsync_status_timed_out:
- log_validation_status(rc, uri, rsync_timed_out, object_generation_null);
- break;
-
- case rsync_status_skipped:
- log_validation_status(rc, uri, rsync_skipped, object_generation_null);
- break;
-
- case rsync_status_done:
- break;
- }
-
- w->state++;
- task_add(rc, walk_cert, wsk);
-}
-
-/**
- * Recursive walk of certificate hierarchy (core of the program).
- *
- * Walk all products of the current certificate, starting with the
- * ones named in the manifest and continuing with any that we find in
- * the publication directory but which are not named in the manifest.
- *
- * Dispatch to correct checking code for the object named by URI,
- * based on the filename extension in the uri. CRLs are a special
- * case because we've already checked them by the time we get here, so
- * we just ignore them. Other objects are either certificates or
- * CMS-signed objects of one kind or another.
- */
-static void walk_cert(rcynic_ctx_t *rc, STACK_OF(walk_ctx_t) *wsk)
-{
- const unsigned char *hash = NULL;
- object_generation_t generation;
- size_t hashlen;
- walk_ctx_t *w;
- uri_t uri;
-
- assert(rc && wsk);
-
- while ((w = walk_ctx_stack_head(wsk)) != NULL) {
-
- switch (w->state) {
- case walk_state_current:
- generation = object_generation_current;
- break;
- case walk_state_backup:
- generation = object_generation_backup;
- break;
- default:
- generation = object_generation_null;
- break;
- }
-
- switch (w->state) {
-
- case walk_state_initial:
-
- if (!w->certinfo.sia.s[0] || !w->certinfo.ca) {
- w->state = walk_state_done;
- continue;
- }
-
- if (!w->certinfo.manifest.s[0]) {
- log_validation_status(rc, &w->certinfo.uri, manifest_missing, w->certinfo.generation);
- w->state = walk_state_done;
- continue;
- }
-
- w->state++;
- continue;
-
- case walk_state_rsync:
-
- rsync_tree(rc, &w->certinfo.sia, wsk, rsync_sia_callback);
- return;
-
- case walk_state_ready:
-
- walk_ctx_loop_init(rc, wsk); /* sets w->state */
- continue;
-
- case walk_state_current:
- case walk_state_backup:
-
- if (!walk_ctx_loop_this(rc, wsk, &uri, &hash, &hashlen)) {
- walk_ctx_loop_next(rc, wsk);
- continue;
- }
-
- if (endswith(uri.s, ".crl") || endswith(uri.s, ".mft") || endswith(uri.s, ".mnf")) {
- walk_ctx_loop_next(rc, wsk);
- continue; /* CRLs and manifests checked elsewhere */
- }
-
- if (hash == NULL)
- log_validation_status(rc, &uri, tainted_by_not_being_in_manifest, generation);
- else if (w->stale_manifest)
- log_validation_status(rc, &uri, tainted_by_stale_manifest, generation);
-
- if (hash == NULL && !rc->allow_object_not_in_manifest) {
- walk_ctx_loop_next(rc, wsk);
- continue;
- }
-
- if (endswith(uri.s, ".roa")) {
- check_roa(rc, &uri, wsk, hash, hashlen);
- walk_ctx_loop_next(rc, wsk);
- continue;
- }
-
- if (endswith(uri.s, ".gbr")) {
- check_ghostbuster(rc, &uri, wsk, hash, hashlen);
- walk_ctx_loop_next(rc, wsk);
- continue;
- }
-
- if (endswith(uri.s, ".cer")) {
- certinfo_t subject;
- X509 *x = check_cert(rc, &uri, wsk, &subject, hash, hashlen);
- if (!walk_ctx_stack_push(wsk, x, &subject))
- walk_ctx_loop_next(rc, wsk);
- continue;
- }
-
- log_validation_status(rc, &uri, unknown_object_type_skipped, object_generation_null);
- walk_ctx_loop_next(rc, wsk);
- continue;
-
- case walk_state_done:
-
- walk_ctx_stack_pop(wsk); /* Resume our issuer's state */
- continue;
-
- }
- }
-
- assert(walk_ctx_stack_head(wsk) == NULL);
- walk_ctx_stack_free(wsk);
-}
-
-/**
- * Check a trust anchor. Yes, we trust it, by definition, but it
- * still needs to conform to the certificate profile, the
- * self-signature must be correct, etcetera.
- */
-static void check_ta(rcynic_ctx_t *rc, STACK_OF(walk_ctx_t) *wsk)
-{
- STACK_OF(X509) *certs = walk_ctx_stack_certs(wsk);
- walk_ctx_t *w = walk_ctx_stack_head(wsk);
- int ok = 0;
-
- if (certs != NULL && w != NULL)
- ok = check_x509(rc, certs, w->cert, &w->certinfo, &w->certinfo);
-
- sk_X509_free(certs);
-
- if (!ok)
- return;
-
- task_add(rc, walk_cert, wsk);
-
- while (sk_task_t_num(rc->task_queue) > 0 || sk_rsync_ctx_t_num(rc->rsync_queue) > 0) {
- task_run_q(rc);
- rsync_mgr(rc);
- }
-}
-
-
-
-/**
- * Read a trust anchor from disk and compare with known public key.
- * NB: EVP_PKEY_cmp() returns 1 for match, not 0 like every other
- * xyz_cmp() function in the entire OpenSSL library. Go figure.
- */
-static X509 *read_ta(const rcynic_ctx_t *rc, const uri_t *uri, const path_t *path, const EVP_PKEY *pkey, object_generation_t generation)
-
-{
- EVP_PKEY *xpkey = NULL;
- X509 *x = NULL;
- int match = 0;
-
- if ((x = read_cert(path, NULL)) == NULL || (xpkey = X509_get_pubkey(x)) == NULL) {
- log_validation_status(rc, uri, unreadable_trust_anchor, generation);
- } else {
- match = EVP_PKEY_cmp(pkey, xpkey) == 1;
- if (!match)
- log_validation_status(rc, uri, trust_anchor_key_mismatch, generation);
- }
-
- EVP_PKEY_free(xpkey);
- if (match)
- return x;
- X509_free(x);
- return NULL;
-}
-
-
-
-/**
- * Main program. Parse command line, read config file, iterate over
- * trust anchors found via config file and do a tree walk for each
- * trust anchor.
- */
-int main(int argc, char *argv[])
-{
- int opt_jitter = 0, use_syslog = 0, use_stderr = 0, syslog_facility = 0;
- int opt_syslog = 0, opt_stderr = 0, opt_level = 0, prune = 1;
- char *cfg_file = "rcynic.conf";
- char *lockfile = NULL, *xmlfile = NULL;
- int c, i, j, ret = 1, jitter = 600, lockfd = -1;
- STACK_OF(CONF_VALUE) *cfg_section = NULL;
- STACK_OF(walk_ctx_t) *wsk = NULL;
- CONF *cfg_handle = NULL;
- walk_ctx_t *w = NULL;
- time_t start = 0, finish;
- unsigned long hash;
- rcynic_ctx_t rc;
- unsigned delay;
- long eline = 0;
- BIO *bio = NULL;
-
- memset(&rc, 0, sizeof(rc));
-
- if ((rc.jane = strrchr(argv[0], '/')) == NULL)
- rc.jane = argv[0];
- else
- rc.jane++;
-
- rc.log_level = log_data_err;
- rc.allow_stale_crl = 1;
- rc.allow_stale_manifest = 1;
- rc.max_parallel_fetches = 1;
- rc.max_retries = 3;
- rc.retry_wait_min = 30;
- rc.run_rsync = 1;
- rc.rsync_timeout = 300;
-
-#define QQ(x,y) rc.priority[x] = y;
- LOG_LEVELS;
-#undef QQ
-
- if (!set_directory(&rc, &rc.authenticated, "rcynic-data/authenticated", 0) ||
- !set_directory(&rc, &rc.unauthenticated, "rcynic-data/unauthenticated/", 1))
- goto done;
-
- OpenSSL_add_all_algorithms();
- ERR_load_crypto_strings();
-
- while ((c = getopt(argc, argv, "c:l:sej:V")) > 0) {
- switch (c) {
- case 'c':
- cfg_file = optarg;
- break;
- case 'l':
- opt_level = 1;
- if (!configure_logmsg(&rc, optarg))
- goto done;
- break;
- case 's':
- use_syslog = opt_syslog = 1;
- break;
- case 'e':
- use_stderr = opt_stderr = 1;
- break;
- case 'j':
- if (!configure_integer(&rc, &jitter, optarg))
- goto done;
- opt_jitter = 1;
- break;
- case 'V':
- puts(svn_id);
- ret = 0;
- goto done;
- default:
- logmsg(&rc, log_usage_err,
- "usage: %s [-c configfile] [-s] [-e] [-l loglevel] [-j jitter] [-V]",
- rc.jane);
- goto done;
- }
- }
-
- if ((cfg_handle = NCONF_new(NULL)) == NULL) {
- logmsg(&rc, log_sys_err, "Couldn't create CONF opbject");
- goto done;
- }
-
- if (NCONF_load(cfg_handle, cfg_file, &eline) <= 0) {
- if (eline <= 0)
- logmsg(&rc, log_usage_err, "Couldn't load config file %s", cfg_file);
- else
- logmsg(&rc, log_usage_err, "Error on line %ld of config file %s", eline, cfg_file);
- goto done;
- }
-
- if (CONF_modules_load(cfg_handle, NULL, 0) <= 0) {
- logmsg(&rc, log_sys_err, "Couldn't configure OpenSSL");
- goto done;
- }
-
- if ((cfg_section = NCONF_get_section(cfg_handle, "rcynic")) == NULL) {
- logmsg(&rc, log_usage_err, "Couldn't load rcynic section from config file");
- goto done;
- }
-
- for (i = 0; i < sk_CONF_VALUE_num(cfg_section); i++) {
- CONF_VALUE *val = sk_CONF_VALUE_value(cfg_section, i);
-
- assert(val && val->name && val->value);
-
- if (!name_cmp(val->name, "authenticated") &&
- !set_directory(&rc, &rc.authenticated, val->value, 0))
- goto done;
-
- else if (!name_cmp(val->name, "unauthenticated") &&
- !set_directory(&rc, &rc.unauthenticated, val->value, 1))
- goto done;
-
- else if (!name_cmp(val->name, "rsync-timeout") &&
- !configure_integer(&rc, &rc.rsync_timeout, val->value))
- goto done;
-
- else if (!name_cmp(val->name, "max-parallel-fetches") &&
- !configure_integer(&rc, &rc.max_parallel_fetches, val->value))
- goto done;
-
- else if (!name_cmp(val->name, "rsync-program"))
- rc.rsync_program = strdup(val->value);
-
- else if (!name_cmp(val->name, "lockfile"))
- lockfile = strdup(val->value);
-
- else if (!opt_jitter &&
- !name_cmp(val->name, "jitter") &&
- !configure_integer(&rc, &jitter, val->value))
- goto done;
-
- else if (!opt_level &&
- !name_cmp(val->name, "log-level") &&
- !configure_logmsg(&rc, val->value))
- goto done;
-
- else if (!opt_syslog &&
- !name_cmp(val->name, "use-syslog") &&
- !configure_boolean(&rc, &use_syslog, val->value))
- goto done;
-
- else if (!opt_stderr &&
- !name_cmp(val->name, "use-stderr") &&
- !configure_boolean(&rc, &use_stderr, val->value))
- goto done;
-
- else if (!name_cmp(val->name, "syslog-facility") &&
- !configure_syslog(&rc, &syslog_facility,
- facilitynames, val->value))
- goto done;
-
- else if (!name_cmp(val->name, "xml-summary"))
- xmlfile = strdup(val->value);
-
- else if (!name_cmp(val->name, "allow-stale-crl") &&
- !configure_boolean(&rc, &rc.allow_stale_crl, val->value))
- goto done;
-
- else if (!name_cmp(val->name, "allow-stale-manifest") &&
- !configure_boolean(&rc, &rc.allow_stale_manifest, val->value))
- goto done;
-
- else if (!name_cmp(val->name, "allow-non-self-signed-trust-anchor") &&
- !configure_boolean(&rc, &rc.allow_non_self_signed_trust_anchor, val->value))
- goto done;
-
- else if (!name_cmp(val->name, "require-crl-in-manifest") &&
- !configure_boolean(&rc, &rc.require_crl_in_manifest, val->value))
- goto done;
-
- else if (!name_cmp(val->name, "allow-object-not-in-manifest") &&
- !configure_boolean(&rc, &rc.allow_object_not_in_manifest, val->value))
- goto done;
-
- else if (!name_cmp(val->name, "use-links") &&
- !configure_boolean(&rc, &rc.use_links, val->value))
- goto done;
-
- else if (!name_cmp(val->name, "prune") &&
- !configure_boolean(&rc, &prune, val->value))
- goto done;
-
- else if (!name_cmp(val->name, "run-rsync") &&
- !configure_boolean(&rc, &rc.run_rsync, val->value))
- goto done;
-
- /*
- * Ugly, but the easiest way to handle all these strings.
- */
-
-#define QQ(x,y) \
- else if (!name_cmp(val->name, "syslog-priority-" #x) && \
- !configure_syslog(&rc, &rc.priority[x], \
- prioritynames, val->value)) \
- goto done;
-
- LOG_LEVELS; /* the semicolon is for emacs */
-
-#undef QQ
-
- }
-
- if ((rc.rsync_cache = sk_OPENSSL_STRING_new(uri_cmp)) == NULL) {
- logmsg(&rc, log_sys_err, "Couldn't allocate rsync_cache stack");
- goto done;
- }
-
- if ((rc.backup_cache = sk_OPENSSL_STRING_new(uri_cmp)) == NULL) {
- logmsg(&rc, log_sys_err, "Couldn't allocate backup_cache stack");
- goto done;
- }
-
- if ((rc.dead_host_cache = sk_OPENSSL_STRING_new(uri_cmp)) == NULL) {
- logmsg(&rc, log_sys_err, "Couldn't allocate dead_host_cache stack");
- goto done;
- }
-
- if (xmlfile != NULL) {
- if ((rc.validation_status = sk_validation_status_t_new(validation_status_cmp)) == NULL) {
- logmsg(&rc, log_sys_err, "Couldn't allocate validation_status stack");
- goto done;
- }
- }
-
- if ((rc.x509_store = X509_STORE_new()) == NULL) {
- logmsg(&rc, log_sys_err, "Couldn't allocate X509_STORE");
- goto done;
- }
-
- if ((rc.rsync_queue = sk_rsync_ctx_t_new_null()) == NULL) {
- logmsg(&rc, log_sys_err, "Couldn't allocate rsync_queue");
- goto done;
- }
-
- if ((rc.task_queue = sk_task_t_new_null()) == NULL) {
- logmsg(&rc, log_sys_err, "Couldn't allocate task_queue");
- goto done;
- }
-
- rc.use_syslog = use_syslog;
-
- if (use_syslog)
- openlog(rc.jane,
- LOG_PID | (use_stderr ? LOG_PERROR : 0),
- (syslog_facility ? syslog_facility : LOG_LOCAL0));
-
- if (jitter > 0) {
- if (RAND_bytes((unsigned char *) &delay, sizeof(delay)) <= 0) {
- logmsg(&rc, log_sys_err, "Couldn't read random bytes");
- goto done;
- }
- delay %= jitter;
- logmsg(&rc, log_telemetry, "Delaying %u seconds before startup", delay);
- while (delay > 0)
- delay = sleep(delay);
- }
-
- if (lockfile &&
- ((lockfd = open(lockfile, O_RDWR|O_CREAT|O_NONBLOCK, 0666)) < 0 ||
- lockf(lockfd, F_TLOCK, 0) < 0)) {
- if (lockfd >= 0 && errno == EAGAIN)
- logmsg(&rc, log_telemetry, "Lock %s held by another process", lockfile);
- else
- logmsg(&rc, log_sys_err, "Problem locking %s: %s", lockfile, strerror(errno));
- lockfd = -1;
- goto done;
- }
-
- start = time(0);
- logmsg(&rc, log_telemetry, "Starting");
-
- if (!construct_directory_names(&rc))
- goto done;
-
- if (!access(rc.new_authenticated.s, F_OK)) {
- logmsg(&rc, log_sys_err, "Timestamped output directory %s already exists! Clock went backwards?", rc.new_authenticated.s);
- goto done;
- }
-
- if (!mkdir_maybe(&rc, &rc.new_authenticated)) {
- logmsg(&rc, log_sys_err, "Couldn't prepare directory %s: %s",
- rc.new_authenticated.s, strerror(errno));
- goto done;
- }
-
- for (i = 0; i < sk_CONF_VALUE_num(cfg_section); i++) {
- CONF_VALUE *val = sk_CONF_VALUE_value(cfg_section, i);
- object_generation_t generation = object_generation_null;
- path_t path1, path2;
- certinfo_t ta_certinfo;
- uri_t uri;
- X509 *x = NULL;
-
- assert(val && val->name && val->value);
-
- if (!name_cmp(val->name, "trust-anchor-uri-with-key") ||
- !name_cmp(val->name, "indirect-trust-anchor")) {
- /*
- * Obsolete syntax. If you're reading this comment because you
- * had an old rcynic.conf and got this error message:
- *
- * "indirect-trust-anchor" is exactly the same as
- * "trust-anchor-locator", the name was changed to settle a
- * terminology fight in the IETF SIDR WG.
- *
- * "trust-anchor-uri-with-key" is semantically identical to
- * "trust-anchor-locator" (and was the original form of this
- * mechanism), but the syntax and local file format is
- * different.
- *
- * If you're seeing this error, you should just obtain current
- * TAL files. Also see the "make-tal.sh" script.
- */
- logmsg(&rc, log_usage_err,
- "Directive \"%s\" is obsolete -- please use \"trust-anchor-locator\" instead",
- val->name);
- goto done;
- }
-
- if (!name_cmp(val->name, "trust-anchor")) {
- /*
- * Local file trust anchor method.
- */
- logmsg(&rc, log_telemetry, "Processing trust anchor from local file %s", val->value);
- if (strlen(val->value) >= sizeof(path1.s)) {
- logmsg(&rc, log_usage_err, "Trust anchor path name too long %s", val->value);
- goto done;
- }
- strcpy(path1.s, val->value);
-
- /* Construct file:// URI for logging */
- assert(sizeof("file://") < sizeof(uri.s));
- strcpy(uri.s, "file://");
- if (path1.s[0] != '/') {
- if (getcwd(uri.s + strlen(uri.s), sizeof(uri.s) - strlen(uri.s)) == NULL ||
- (!endswith(uri.s, "/") && strlen(uri.s) >= sizeof(uri.s) - 1))
- uri.s[0] = '\0';
- else
- strcat(uri.s, "/");
- }
- if (uri.s[0] != '\0' && strlen(uri.s) + strlen(path1.s) < sizeof(uri.s))
- strcat(uri.s, path1.s);
- else
- uri.s[0] = '\0';
-
- if ((x = read_cert(&path1, NULL)) == NULL) {
- log_validation_status(&rc, &uri, unreadable_trust_anchor, generation);
- continue;
- }
- hash = X509_subject_name_hash(x);
- for (j = 0; j < INT_MAX; j++) {
- if (snprintf(path2.s, sizeof(path2.s), "%s%lx.%d.cer",
- rc.new_authenticated.s, hash, j) == sizeof(path2.s)) {
- logmsg(&rc, log_sys_err,
- "Couldn't construct path name for trust anchor %s", path1.s);
- goto done;
- }
- if (access(path2.s, F_OK))
- break;
- }
- if (j == INT_MAX) {
- logmsg(&rc, log_sys_err, "Couldn't find a free name for trust anchor %s", path1.s);
- goto done;
- }
- }
-
- if (!name_cmp(val->name, "trust-anchor-locator")) {
- /*
- * Trust anchor locator (URI + public key) method.
- */
- EVP_PKEY *pkey = NULL;
- char *fn;
- path_t path3;
-
- fn = val->value;
- bio = BIO_new_file(fn, "r");
- if (!bio || BIO_gets(bio, uri.s, sizeof(uri.s)) <= 0) {
- log_validation_status(&rc, &uri, unreadable_trust_anchor_locator, object_generation_null);
- BIO_free(bio);
- bio = NULL;
- continue;
- }
- uri.s[strcspn(uri.s, " \t\r\n")] = '\0';
- bio = BIO_push(BIO_new(BIO_f_linebreak()), bio);
- bio = BIO_push(BIO_new(BIO_f_base64()), bio);
- if (!uri_to_filename(&rc, &uri, &path1, &rc.unauthenticated) ||
- !uri_to_filename(&rc, &uri, &path2, &rc.new_authenticated) ||
- !uri_to_filename(&rc, &uri, &path3, &rc.old_authenticated)) {
- log_validation_status(&rc, &uri, unreadable_trust_anchor_locator, object_generation_null);
- BIO_free_all(bio);
- bio = NULL;
- continue;
- }
- logmsg(&rc, log_telemetry, "Processing trust anchor from URI %s", uri.s);
- rsync_file(&rc, &uri);
- while (sk_rsync_ctx_t_num(rc.rsync_queue) > 0)
- rsync_mgr(&rc);
- if (bio)
- pkey = d2i_PUBKEY_bio(bio, NULL);
- BIO_free_all(bio);
- bio = NULL;
- if (!pkey) {
- log_validation_status(&rc, &uri, unreadable_trust_anchor_locator, object_generation_null);
- continue;
- }
- generation = object_generation_current;
- if ((x = read_ta(&rc, &uri, &path1, pkey, generation)) == NULL) {
- generation = object_generation_backup;
- path1 = path3;
- x = read_ta(&rc, &uri, &path1, pkey, generation);
- }
- EVP_PKEY_free(pkey);
- if (!x)
- continue;
- }
-
- if (!x)
- continue;
-
- logmsg(&rc, log_telemetry, "Copying trust anchor %s to %s", path1.s, path2.s);
-
- if (!mkdir_maybe(&rc, &path2) || !cp_ln(&rc, &path1, &path2))
- goto done;
-
- if ((wsk = walk_ctx_stack_new()) == NULL) {
- logmsg(&rc, log_sys_err, "Couldn't allocate walk context stack");
- goto done;
- }
-
- parse_cert(&rc, x, &ta_certinfo, &uri, generation);
- ta_certinfo.ta = 1;
-
- if ((w = walk_ctx_stack_push(wsk, x, &ta_certinfo)) == NULL) {
- logmsg(&rc, log_sys_err, "Couldn't push walk context stack");
- goto done;
- }
-
- check_ta(&rc, wsk);
- wsk = NULL; /* Ownership of wsk passed to check_ta() */
- }
-
- if (!finalize_directories(&rc))
- goto done;
-
- if (prune && !prune_unauthenticated(&rc, &rc.unauthenticated,
- strlen(rc.unauthenticated.s))) {
- logmsg(&rc, log_sys_err, "Trouble pruning old unauthenticated data");
- goto done;
- }
-
- ret = 0;
-
- done:
- log_openssl_errors(&rc);
-
- if (xmlfile != NULL) {
-
- char tad[sizeof("2006-10-13T11:22:33Z") + 1];
- time_t tad_time = time(0);
- struct tm *tad_tm = gmtime(&tad_time);
- int ok = 1, use_stdout = !strcmp(xmlfile, "-");
- hostname_t hostname;
- mib_counter_t code;
- FILE *f = NULL;
-
- strftime(tad, sizeof(tad), "%Y-%m-%dT%H:%M:%SZ", tad_tm);
-
- ok &= gethostname(hostname.s, sizeof(hostname.s)) == 0;
-
- if (use_stdout)
- f = stdout;
- else if (ok)
- ok &= (f = fopen(xmlfile, "w")) != NULL;
-
- if (ok)
- logmsg(&rc, log_telemetry, "Writing XML summary to %s",
- (use_stdout ? "standard output" : xmlfile));
-
- if (ok)
- ok &= fprintf(f, "<?xml version=\"1.0\" ?>\n"
- "<rcynic-summary date=\"%s\" rcynic-version=\"%s\""
- " summary-version=\"%d\" reporting-hostname=\"%s\">\n"
- " <labels>\n",
- tad, svn_id, XML_SUMMARY_VERSION, hostname.s) != EOF;
-
- for (j = 0; ok && j < MIB_COUNTER_T_MAX; ++j)
- if (ok)
- ok &= fprintf(f, " <%s kind=\"%s\">%s</%s>\n",
- mib_counter_label[j], mib_counter_kind[j],
- (mib_counter_desc[j]
- ? mib_counter_desc[j]
- : X509_verify_cert_error_string(mib_counter_openssl[j])),
- mib_counter_label[j]) != EOF;
-
- if (ok)
- ok &= fprintf(f, " </labels>\n") != EOF;
-
- for (i = 0; ok && i < sk_validation_status_t_num(rc.validation_status); i++) {
- validation_status_t *v = sk_validation_status_t_value(rc.validation_status, i);
- assert(v);
-
- tad_tm = gmtime(&v->timestamp);
- strftime(tad, sizeof(tad), "%Y-%m-%dT%H:%M:%SZ", tad_tm);
-
- for (code = (mib_counter_t) 0; ok && code < MIB_COUNTER_T_MAX; code++) {
- if (validation_status_get_code(v, code)) {
- if (ok)
- ok &= fprintf(f, " <validation_status timestamp=\"%s\" status=\"%s\"",
- tad, mib_counter_label[code]) != EOF;
- if (ok && (v->generation == object_generation_current ||
- v->generation == object_generation_backup))
- ok &= fprintf(f, " generation=\"%s\"",
- object_generation_label[v->generation]) != EOF;
- if (ok)
- ok &= fprintf(f, ">%s</validation_status>\n", v->uri.s) != EOF;
- }
- }
- }
-
- if (ok)
- ok &= fprintf(f, "</rcynic-summary>\n") != EOF;
-
- if (f && !use_stdout)
- ok &= fclose(f) != EOF;
-
- if (!ok)
- logmsg(&rc, log_sys_err, "Couldn't write XML summary to %s: %s",
- xmlfile, strerror(errno));
-
- }
-
- /*
- * Do NOT free cfg_section, NCONF_free() takes care of that
- */
- sk_OPENSSL_STRING_pop_free(rc.rsync_cache, OPENSSL_STRING_free);
- sk_OPENSSL_STRING_pop_free(rc.backup_cache, OPENSSL_STRING_free);
- sk_OPENSSL_STRING_pop_free(rc.dead_host_cache, OPENSSL_STRING_free);
- sk_validation_status_t_pop_free(rc.validation_status, validation_status_t_free);
- X509_STORE_free(rc.x509_store);
- NCONF_free(cfg_handle);
- CONF_modules_free();
- BIO_free(bio);
- EVP_cleanup();
- ERR_free_strings();
- if (rc.rsync_program)
- free(rc.rsync_program);
- if (lockfile && lockfd >= 0)
- unlink(lockfile);
- if (lockfile)
- free(lockfile);
- if (xmlfile)
- free(xmlfile);
-
- if (start) {
- finish = time(0);
- logmsg(&rc, log_telemetry,
- "Finished, elapsed time %u:%02u:%02u",
- (unsigned) ((finish - start) / 3600),
- (unsigned) ((finish - start) / 60 % 60),
- (unsigned) ((finish - start) % 60));
- }
-
- return ret;
-}
diff --git a/rcynic-ng/rcynic.xsl b/rcynic-ng/rcynic.xsl
deleted file mode 100644
index 487cd2f6..00000000
--- a/rcynic-ng/rcynic.xsl
+++ /dev/null
@@ -1,312 +0,0 @@
-<?xml version="1.0"?>
-<!--
- - Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
- -
- - Portions copyright (C) 2006 American Registry for Internet Numbers ("ARIN")
- -
- - Permission to use, copy, modify, and distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
- -->
-
-<!-- $Id$ -->
-
-<!--
- - XSL stylesheet to render rcynic's xml-summary output as basic (X)HTML.
- -
- - This is a bit more complicated than strictly necessary, because I wanted
- - the ability to drop out columns that are nothing but zeros.
- - There's probably some clever way of using XPath to simplify this,
- - but I don't expect the data sets to be large enough for performance
- - to be an issue here. Feel free to show me how to do better.
- -->
-
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
- version="1.0"
- xmlns:com="http://exslt.org/common"
- xmlns:str="http://exslt.org/strings"
- exclude-result-prefixes="com str">
-
- <xsl:output omit-xml-declaration="yes" indent="yes" method="xml" encoding="US-ASCII"
- doctype-public="-//W3C//DTD XHTML 1.0 Strict//EN"
- doctype-system="http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"/>
-
- <xsl:param name="refresh" select="1800"/>
- <xsl:param name="suppress-zero-columns" select="1"/>
- <xsl:param name="show-total" select="1"/>
- <xsl:param name="use-colors" select="1"/>
- <xsl:param name="show-detailed-status" select="1"/>
- <xsl:param name="show-problems" select="0"/>
- <xsl:param name="show-summary" select="1"/>
-
- <xsl:template match="/">
- <xsl:comment>Generators</xsl:comment>
- <xsl:comment><xsl:value-of select="rcynic-summary/@rcynic-version"/></xsl:comment>
- <xsl:comment>$Id$</xsl:comment>
- <html>
- <xsl:variable name="title">
- <xsl:text>rcynic summary </xsl:text>
- <xsl:value-of select="rcynic-summary/@date"/>
- </xsl:variable>
- <head>
- <title>
- <xsl:value-of select="$title"/>
- </title>
- <xsl:if test="$refresh != 0">
- <meta http-equiv="Refresh" content="{$refresh}"/>
- </xsl:if>
- <style type="text/css">
- td { text-align: center; padding: 4px }
- td.uri { text-align: left }
- td.host { text-align: left }
- <xsl:if test="$use-colors != 0">
- tr.good,td.good { background-color: #77ff77 }
- tr.warn,td.warn { background-color: yellow }
- tr.bad,td.bad { background-color: #ff5500 }
- </xsl:if>
- </style>
- </head>
- <body>
- <h1><xsl:value-of select="$title"/></h1>
-
- <!-- Summary output, old host-oriented format -->
- <xsl:if test="$show-summary != 0">
-
- <!-- Collect data we need to display -->
- <xsl:variable name="host-data">
- <xsl:for-each select="rcynic-summary/validation_status">
- <xsl:sort order="ascending" data-type="text" select="."/>
- <xsl:variable name="uri" select="string(.)"/>
- <xsl:if test="starts-with($uri, 'rsync://')">
- <xsl:variable name="hostname" select="str:tokenize($uri, ':/')[2]"/>
- <xsl:variable name="mood" select="/rcynic-summary/labels/*[name() = current()/@status]/@kind"/>
- <xsl:variable name="fn2">
- <xsl:if test="substring($uri, string-length($uri) - 3, 1) = '.' and @generation != ''">
- <xsl:value-of select="substring($uri, string-length($uri) - 3)"/>
- </xsl:if>
- </xsl:variable>
- <x hostname="{$hostname}" timestamp="{@timestamp}" uri="{$uri}" status="{@status}" mood="{$mood}" fn2="{$fn2}" generation="{@generation}"/>
- </xsl:if>
- </xsl:for-each>
- </xsl:variable>
-
- <!-- Calculate set of unique hostnames -->
- <xsl:variable name="unique-hostnames">
- <xsl:for-each select="com:node-set($host-data)/x[not(@hostname = following::x/@hostname)]">
- <x hostname="{@hostname}"/>
- </xsl:for-each>
- </xsl:variable>
-
- <!-- Calculate set of unique filename types -->
- <xsl:variable name="unique-fn2s">
- <xsl:for-each select="com:node-set($host-data)/x[not(@fn2 = following::x/@fn2)]">
- <x fn2="{@fn2}"/>
- </xsl:for-each>
- </xsl:variable>
-
- <!-- Generation names -->
- <xsl:variable name="unique-generations">
- <xsl:for-each select="com:node-set($host-data)/x[not(@generation = following::x/@generation)]">
- <x generation="{@generation}"/>
- </xsl:for-each>
- </xsl:variable>
-
- <!-- Calculate grand totals, figure out which columns to display -->
- <xsl:variable name="totals">
- <xsl:for-each select="rcynic-summary/labels/*">
- <xsl:variable name="sum" select="count(com:node-set($host-data)/x[@status = name(current())])"/>
- <xsl:variable name="show">
- <xsl:choose>
- <xsl:when test="$suppress-zero-columns = 0 or $sum &gt; 0">
- <xsl:text>1</xsl:text>
- </xsl:when>
- <xsl:otherwise>
- <xsl:text>0</xsl:text>
- </xsl:otherwise>
- </xsl:choose>
- </xsl:variable>
- <x name="{name(current())}" sum="{$sum}" text="{.}" show="{$show}" mood="{@kind}"/>
- </xsl:for-each>
- </xsl:variable>
-
- <!-- Calculate how many columns we'll be displaying -->
- <xsl:variable name="columns" select="count(com:node-set($totals)/x[@show = 1])"/>
-
- <!-- Show the total -->
- <xsl:if test="$show-total != 0">
- <br/>
- <h2>Grand Totals</h2>
- <table class="summary" rules="all" border="1">
- <thead>
- <tr>
- <td/> <!-- was hostname -->
- <xsl:for-each select="com:node-set($totals)/x[@show = 1]">
- <td><b><xsl:value-of select="@text"/></b></td>
- </xsl:for-each>
- </tr>
- </thead>
- <tbody>
- <tr>
- <td><b>Total</b></td>
- <xsl:for-each select="com:node-set($totals)/x">
- <xsl:if test="$suppress-zero-columns = 0 or @sum &gt; 0">
- <td class="{@mood}"><xsl:value-of select="@sum"/></td>
- </xsl:if>
- </xsl:for-each>
- </tr>
- </tbody>
- </table>
- </xsl:if>
-
- <!-- Generate the HTML -->
- <br/>
- <h2>Summaries by Repository Host</h2>
- <xsl:for-each select="com:node-set($unique-hostnames)/x">
- <xsl:sort order="ascending" data-type="text" select="@hostname"/>
- <xsl:variable name="hostname" select="@hostname"/>
- <br/>
- <h3><xsl:value-of select="$hostname"/></h3>
- <table class="summary" rules="all" border="1">
- <thead>
- <tr>
- <td/> <!-- was hostname -->
- <xsl:for-each select="com:node-set($totals)/x[@show = 1]">
- <td><b><xsl:value-of select="@text"/></b></td>
- </xsl:for-each>
- </tr>
- </thead>
- <tbody>
- <xsl:for-each select="com:node-set($unique-fn2s)/x">
- <xsl:sort order="ascending" data-type="text" select="@fn2"/>
- <xsl:variable name="fn2" select="@fn2"/>
- <xsl:for-each select="com:node-set($unique-generations)/x">
- <xsl:sort order="ascending" data-type="text" select="@generation"/>
- <xsl:variable name="generation" select="@generation"/>
- <xsl:if test="count(com:node-set($host-data)/x[@hostname = $hostname and @fn2 = $fn2 and @generation = $generation])">
- <tr>
- <td><xsl:value-of select="concat($generation, ' ', $fn2)"/></td>
- <xsl:for-each select="com:node-set($totals)/x[@show = 1]">
- <xsl:variable name="label" select="@name"/>
- <xsl:variable name="value" select="count(com:node-set($host-data)/x[@hostname = $hostname and @fn2 = $fn2 and @generation = $generation and @status = $label])"/>
- <xsl:choose>
- <xsl:when test="$value != 0">
- <td class="{@mood}">
- <xsl:value-of select="$value"/>
- </td>
- </xsl:when>
- <xsl:otherwise>
- <td/>
- </xsl:otherwise>
- </xsl:choose>
- </xsl:for-each>
- </tr>
- </xsl:if>
- </xsl:for-each>
- </xsl:for-each>
- <tr>
- <td>Total</td>
- <xsl:for-each select="com:node-set($totals)/x[@show = 1]">
- <xsl:variable name="label" select="@name"/>
- <xsl:variable name="value" select="count(com:node-set($host-data)/x[@hostname = $hostname and @status = $label])"/>
- <xsl:choose>
- <xsl:when test="$value != 0">
- <td class="{@mood}">
- <xsl:value-of select="$value"/>
- </td>
- </xsl:when>
- <xsl:otherwise>
- <td/>
- </xsl:otherwise>
- </xsl:choose>
- </xsl:for-each>
- </tr>
- </tbody>
- </table>
- </xsl:for-each>
-
- <!-- "Problems" display -->
- <xsl:if test="$show-problems != 0">
- <br/>
- <h2>Problems</h2>
- <table class="problems" rules="all" border="1" >
- <thead>
- <tr>
- <td class="status"><b>Status</b></td>
- <td class="uri"><b>URI</b></td>
- </tr>
- </thead>
- <tbody>
- <xsl:for-each select="rcynic-summary/validation_status">
- <xsl:variable name="status" select="@status"/>
- <xsl:variable name="mood" select="/rcynic-summary/labels/*[name() = $status]/@kind"/>
- <xsl:if test="$mood != 'good'">
- <tr class="{$mood}">
- <td class="status"><xsl:value-of select="/rcynic-summary/labels/*[name() = $status] "/></td>
- <td class="uri"><xsl:value-of select="."/></td>
- </tr>
- </xsl:if>
- </xsl:for-each>
- </tbody>
- </table>
- </xsl:if>
- </xsl:if>
-
- <!-- Detailed status display -->
- <xsl:if test="$show-detailed-status != 0">
- <br/>
- <h2>Validation Status</h2>
- <table class="details" rules="all" border="1" >
- <thead>
- <tr>
- <td class="timestamp"><b>Timestamp</b></td>
- <td class="generation"><b>Generation</b></td>
- <td class="status"><b>Status</b></td>
- <td class="uri"><b>URI</b></td>
- </tr>
- </thead>
- <tbody>
- <xsl:for-each select="rcynic-summary/validation_status">
- <xsl:variable name="status" select="@status"/>
- <xsl:variable name="mood" select="/rcynic-summary/labels/*[name() = $status]/@kind"/>
- <tr class="{$mood}">
- <td class="timestamp"><xsl:value-of select="@timestamp"/></td>
- <td class="generation"><xsl:value-of select="@generation"/></td>
- <td class="status"><xsl:value-of select="/rcynic-summary/labels/*[name() = $status] "/></td>
- <td class="uri"><xsl:value-of select="."/></td>
- </tr>
- </xsl:for-each>
- </tbody>
- </table>
- </xsl:if>
-
- </body>
- </html>
- </xsl:template>
-
-</xsl:stylesheet>
-
-<!--
- - Local variables:
- - mode: sgml
- - End:
- -->
diff --git a/rcynic-ng/sample-trust-anchors/README b/rcynic-ng/sample-trust-anchors/README
deleted file mode 100644
index b0c45dd3..00000000
--- a/rcynic-ng/sample-trust-anchors/README
+++ /dev/null
@@ -1,13 +0,0 @@
-$Id$
-
-These are SAMPLE trust anchors for testing rcynic. Any resemblance
-between these files and real trust anchors living, dead, or wandering
-the night in ghostly torment is purely coincidental.
-
-Remember: it's only a trust anchor if a replying party says it is.
-You're the relying party, so it's your call.
-
-The .cer files (if any) are self-signed X.509 trust anchors.
-
-The .tal files are "trust anchor locators", formerly known as
-"indirect trust anchors" -- see draft-ietf-sidr-ta.
diff --git a/rcynic-ng/sample-trust-anchors/afrinic.tal b/rcynic-ng/sample-trust-anchors/afrinic.tal
deleted file mode 100644
index 0f202c73..00000000
--- a/rcynic-ng/sample-trust-anchors/afrinic.tal
+++ /dev/null
@@ -1,8 +0,0 @@
-rsync://rpki.afrinic.net/repository/AfriNIC.cer
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxsAqAhWIO+ON2Ef9oRDM
-pKxv+AfmSLIdLWJtjrvUyDxJPBjgR+kVrOHUeTaujygFUp49tuN5H2C1rUuQavTH
-vve6xNF5fU3OkTcqEzMOZy+ctkbde2SRMVdvbO22+TH9gNhKDc9l7Vu01qU4LeJH
-k3X0f5uu5346YrGAOSv6AaYBXVgXxa0s9ZvgqFpim50pReQe/WI3QwFKNgpPzfQL
-6Y7fDPYdYaVOXPXSKtx7P4s4KLA/ZWmRL/bobw/i2fFviAGhDrjqqqum+/9w1hEl
-L/vqihVnV18saKTnLvkItA/Bf5i11Yhw2K7qv573YWxyuqCknO/iYLTR1DToBZcZ
-UQIDAQAB
diff --git a/rcynic-ng/sample-trust-anchors/apnic.tal b/rcynic-ng/sample-trust-anchors/apnic.tal
deleted file mode 100644
index 9e061061..00000000
--- a/rcynic-ng/sample-trust-anchors/apnic.tal
+++ /dev/null
@@ -1,8 +0,0 @@
-rsync://rpki.apnic.net/repository/APNIC.cer
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr36frxrKWB/g+b2F4TSN
-TJYm5vcq0QjSb6Sas3O84doujITy5FmAbcFlTUc5m/hhDrWO6OBowAFmLt8KowCj
-Gf0aBFGeXULYiqcZFCiP1LPqvS2M5WN5PLrz88f21EUpqsm+Fq/5fwdH6OHZvSbC
-BE2grVw1tQM5LCSqCs4vmrdvGi1tVcFF2FlDFaCpKN7p0Eg7YmLedf4ANkbpNxqk
-mjbDVybK1qL6p6j85uBTbPoGHHr/AUw2UXdnl1Z+mPj1lZv01/AQe7X4KobJW+Tc
-KljtK6Zccs6+/LiC1zh59VHdbIn1K4KyXVTeNAOHzu3ZOOlcTMMfO0dCnqtKoab/
-eQIDAQAB
diff --git a/rcynic-ng/sample-trust-anchors/bbn-testbed.tal b/rcynic-ng/sample-trust-anchors/bbn-testbed.tal
deleted file mode 100644
index 21302ea5..00000000
--- a/rcynic-ng/sample-trust-anchors/bbn-testbed.tal
+++ /dev/null
@@ -1,8 +0,0 @@
-rsync://rpki.bbn.com/rpki/root.cer
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvEjkk0oC2N8Ij+ruAkrX
-cI7lSOIG1sQLC8xJJqklMlOBLao+3If0AVKSEP5+pb/hJwpXPomt6Lm4+fuZ6vX/
-q15llQL3YWkHZ49t/X47yCqxvM0txRaZafffbk6lJrofKhKKrYEcrr/rHiG+IQXn
-U1MGaQ/a8DfRDWiCQxonh7CthrLi7tr+QI9s9fDvvHPCK9OmmhZvCUDOydf+/vMn
-VShQ57KsUFcuZ0EX9UwsaIGCYGyvsYwk54UtoIGovg2IavfZK8ai0/5XEVpvKQiR
-8AixZpg5bSe7555+LhzpL5+LdqX6/hVRah0JrdR8KiXvr16Kmcitj+JLqVc0Wkd8
-zwIDAQAB
diff --git a/rcynic-ng/sample-trust-anchors/lacnic.tal b/rcynic-ng/sample-trust-anchors/lacnic.tal
deleted file mode 100644
index 77a142d2..00000000
--- a/rcynic-ng/sample-trust-anchors/lacnic.tal
+++ /dev/null
@@ -1,8 +0,0 @@
-rsync://repository.lacnic.net/rpki/lacnic/RTA_LACNIC_RPKI.cer
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1AuR49ZoKS59Vnpq8M0X
-djeV3ROqtElwx6sNmUXvWBFPQlZLs2tR5/0MwprIWRi91WnMBVWjsECcLBe7Pu+u
-V/tTvPMJRXm/c+l8nR+FhAj7pn4M5A2pHFBndCPc1UrFD+BLACx9DSNiUjzKr1t7
-wjHTW+F0NMnZ9g9hKdxDNCFi66BGx2f3TTW3uGns/IPfkxrRCeYtJcBpQ5mKoc8g
-QOndiEG/33uXDS9EOe1dycmnaw9EQqxqHp+Bj0TIVoFyfDNuT+soJ3uwtQr2g5Ys
-AIxJtmBAZrLj+acmLeQrYC0xQuK118dSAS9r6GSm476m2aGEYtb083fLodeYSEjM
-/wIDAQAB
diff --git a/rcynic-ng/sample-trust-anchors/ripe-ncc-root.tal b/rcynic-ng/sample-trust-anchors/ripe-ncc-root.tal
deleted file mode 100644
index ebdb9f45..00000000
--- a/rcynic-ng/sample-trust-anchors/ripe-ncc-root.tal
+++ /dev/null
@@ -1,9 +0,0 @@
-rsync://rpki.ripe.net/ta/ripe-ncc-ta.cer
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0URYSGqUz2m
-yBsOzeW1jQ6NsxNvlLMyhWknvnl8NiBCs/T/S2XuNKQNZ+wBZxIgPPV
-2pFBFeQAvoH/WK83HwA26V2siwm/MY2nKZ+Olw+wlpzlZ1p3Ipj2eNc
-Krmit8BwBC8xImzuCGaV0jkRB0GZ0hoH6Ml03umLprRsn6v0xOP0+l6
-Qc1ZHMFVFb385IQ7FQQTcVIxrdeMsoyJq9eMkE6DoclHhF/NlSllXub
-ASQ9KUWqJ0+Ot3QCXr4LXECMfkpkVR2TZT+v5v658bHVs6ZxRD1b6Uk
-1uQKAyHUbn/tXvP8lrjAibGzVsXDT2L0x4Edx+QdixPgOji3gBMyL2V
-wIDAQAB
diff --git a/rcynic-ng/sample-trust-anchors/testbed-apnic.tal b/rcynic-ng/sample-trust-anchors/testbed-apnic.tal
deleted file mode 100644
index 3e57b05e..00000000
--- a/rcynic-ng/sample-trust-anchors/testbed-apnic.tal
+++ /dev/null
@@ -1,8 +0,0 @@
-rsync://apnic.rpki.net/rpki/apnic/root.cer
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAovWQL2lh6knDx
-GUG5hbtCXvvh4AOzjhDkSHlj22gn/1oiM9IeDATIwP44vhQ6L/xvuk7W6
-Kfa5ygmqQ+xOZOwTWPcrUbqaQyPNxokuivzyvqVZVDecOEqs78q58mSp9
-nbtxmLRW7B67SJCBSzfa5XpVyXYEgYAjkk3fpmefU+AcxtxvvHB5OVPIa
-BfPcs80ICMgHQX+fphvute9XLxjfJKJWkhZqZ0v7pZm2uhkcPx1PMGcrG
-ee0WSDC3fr3erLueagpiLsFjwwpX6F+Ms8vqz45H+DKmYKvPSstZjCCq9
-aJ0qANT9OtnfSDOS+aLRPjZryCNyvvBHxZXqj5YCGKtwIDAQAB
diff --git a/rcynic-ng/sample-trust-anchors/testbed-apnicrpki.tal b/rcynic-ng/sample-trust-anchors/testbed-apnicrpki.tal
deleted file mode 100644
index ae16a302..00000000
--- a/rcynic-ng/sample-trust-anchors/testbed-apnicrpki.tal
+++ /dev/null
@@ -1,8 +0,0 @@
-rsync://apnicrpki.rand.apnic.net/repository/root.cer
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApQofhU9VeRGZxlNgkrFR
-ShSIkGtRUwg1mkISvieM2oxA6LFhHoqFV25BdK/3IhOh/0I/E9WX7uWPD72MFGBB
-LsFc6JpZFkvg8+9KUIHquk46wn4bEvA8xMsPbGo+mK0CAkqOdHQSemC7nqUlR9VH
-5zH1t8aYS4mrVN59LfClmiEE7QElgmVyvccfaEd+gMGaxsDvSylWIdvVxYfFG2rB
-LiEsmfXwdn2a8b1Zx5eaFD80XV9Z7h15ESP1epSScGzMp2XB0FYMC3f9k7l5sydX
-Rj5BYo1rWaM+Y6AoHe/d8G0xm6iX6b/JqT4Ywdt8/7DbsweaIk73VnpnXrYATCt9
-VwIDAQAB
diff --git a/rcynic-ng/sample-trust-anchors/testbed-arin.tal b/rcynic-ng/sample-trust-anchors/testbed-arin.tal
deleted file mode 100644
index 5ec14f36..00000000
--- a/rcynic-ng/sample-trust-anchors/testbed-arin.tal
+++ /dev/null
@@ -1,8 +0,0 @@
-rsync://arin.rpki.net/rpki/arin/root.cer
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAovWQL2lh6knDx
-GUG5hbtCXvvh4AOzjhDkSHlj22gn/1oiM9IeDATIwP44vhQ6L/xvuk7W6
-Kfa5ygmqQ+xOZOwTWPcrUbqaQyPNxokuivzyvqVZVDecOEqs78q58mSp9
-nbtxmLRW7B67SJCBSzfa5XpVyXYEgYAjkk3fpmefU+AcxtxvvHB5OVPIa
-BfPcs80ICMgHQX+fphvute9XLxjfJKJWkhZqZ0v7pZm2uhkcPx1PMGcrG
-ee0WSDC3fr3erLueagpiLsFjwwpX6F+Ms8vqz45H+DKmYKvPSstZjCCq9
-aJ0qANT9OtnfSDOS+aLRPjZryCNyvvBHxZXqj5YCGKtwIDAQAB
diff --git a/rcynic-ng/sample-trust-anchors/testbed-ripe.tal b/rcynic-ng/sample-trust-anchors/testbed-ripe.tal
deleted file mode 100644
index d52aba62..00000000
--- a/rcynic-ng/sample-trust-anchors/testbed-ripe.tal
+++ /dev/null
@@ -1,8 +0,0 @@
-rsync://ripe.rpki.net/rpki/ripe/root.cer
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAovWQL2lh6knDx
-GUG5hbtCXvvh4AOzjhDkSHlj22gn/1oiM9IeDATIwP44vhQ6L/xvuk7W6
-Kfa5ygmqQ+xOZOwTWPcrUbqaQyPNxokuivzyvqVZVDecOEqs78q58mSp9
-nbtxmLRW7B67SJCBSzfa5XpVyXYEgYAjkk3fpmefU+AcxtxvvHB5OVPIa
-BfPcs80ICMgHQX+fphvute9XLxjfJKJWkhZqZ0v7pZm2uhkcPx1PMGcrG
-ee0WSDC3fr3erLueagpiLsFjwwpX6F+Ms8vqz45H+DKmYKvPSstZjCCq9
-aJ0qANT9OtnfSDOS+aLRPjZryCNyvvBHxZXqj5YCGKtwIDAQAB
diff --git a/rcynic-ng/show.awk b/rcynic-ng/show.awk
deleted file mode 100644
index 84d33cda..00000000
--- a/rcynic-ng/show.awk
+++ /dev/null
@@ -1,93 +0,0 @@
-# $Id$
-#
-# Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-# Prettyprint tab-delimited rcynic summary data. This is intended as
-# a postprocessor for show.xsl.
-
-BEGIN {
- FS = "\t";
- label[++nlabels] = "hostname";
- head["hostname", 1] = "Repository";
- head["hostname", 2] = "Host";
- width["hostname"] = length(head["hostname", 1]);
-}
-
-!NF {
- got_labels = 1;
- next;
-}
-
-!got_labels {
- label[++nlabels] = $1;
- nh = split($2, h, /[ \t]+/);
- for (i = 1; i <= nh; i++) {
- head[$1, i] = h[i];
- if (length(h[i]) > width[$1])
- width[$1] = length(h[i]);
- }
-}
-
-got_labels && $3 ~ /^rsync:\/\/.+/ {
- hostname = $3;
- sub(/^rsync:\/\//, "", hostname);
- sub(/\/.*$/, "", hostname);
- if (!seen[hostname])
- seen[hostname] = ++nhostnames;
- data[hostname, $2]++;
- total[$2]++;
- if (length(hostname) > width["hostname"])
- width["hostname"] = length(hostname);
-}
-
-END {
-
- for (i = 1;; i++) {
- blank = 1;
- for (j = 1; j <= nlabels; j++)
- if (head[label[j], i] && (j == 1 || total[label[j]] > 0))
- blank = 0;
- if (blank)
- break;
- for (j = 1; j <= nlabels; j++) {
- if (j == 1)
- printf "%-*s", width[label[j]], head[label[j], i];
- else if (total[label[j]] > 0)
- printf " %*s", width[label[j]], head[label[j], i];
- }
- print "";
- }
-
- for (i in seen)
- hostnames[seen[i]] = i;
-
- for (i = 1; i <= nhostnames; i++) {
- for (j = 1; j <= nlabels; j++) {
- if (j == 1)
- printf "%-*s", width[label[j]], hostnames[i];
- else if (total[label[j]] > 0)
- printf " %*s", width[label[j]], data[hostnames[i], label[j]];
- }
- print "";
- }
-
- for (j = 1; j <= nlabels; j++) {
- if (j == 1)
- printf "%-*s", width[label[j]], "Total";
- else if (total[label[j]] > 0)
- printf " %*s", width[label[j]], total[label[j]];
- }
- print "";
-}
diff --git a/rcynic-ng/show.sh.in b/rcynic-ng/show.sh.in
deleted file mode 100644
index c62a325b..00000000
--- a/rcynic-ng/show.sh.in
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/sh -
-# $Id$
-#
-# Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-# Prettyprint rcynic summary data as flat text.
-
-@XSLTPROC@ @abs_builddir@/show.xsl ${1-rcynic.xml} | @AWK@ -f @abs_builddir@/show.awk
diff --git a/rcynic-ng/show.xsl b/rcynic-ng/show.xsl
deleted file mode 100644
index 06d69893..00000000
--- a/rcynic-ng/show.xsl
+++ /dev/null
@@ -1,59 +0,0 @@
-<?xml version="1.0"?>
-<!--
- - Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
- -->
-
-<!-- $Id$ -->
-
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-
- <xsl:output method="text" encoding="US-ASCII"/>
-
- <!-- Translate rcynic XML into tab-delimited flat text -->
- <xsl:template match="/">
-
- <!-- Write labels as two columns: <label> <tab> <text> -->
- <xsl:for-each select="rcynic-summary/labels/*">
- <xsl:value-of select="name()"/>
- <xsl:text>&#9;</xsl:text>
- <xsl:value-of select="."/>
- <xsl:text>&#10;</xsl:text>
- </xsl:for-each>
-
- <!-- Blank line between sections -->
- <xsl:text>&#10;</xsl:text>
-
- <!-- Write status as three colums: <timestamp> <tab> <status> <tab> <uri> -->
- <xsl:for-each select="rcynic-summary/validation_status">
- <xsl:sort order="ascending" data-type="text" select="."/>
- <xsl:value-of select="@timestamp"/>
- <xsl:text>&#9;</xsl:text>
- <xsl:value-of select="@status"/>
- <xsl:text>&#9;</xsl:text>
- <xsl:value-of select="."/>
- <xsl:text>&#9;</xsl:text>
- <xsl:value-of select="@generation"/>
- <xsl:text>&#10;</xsl:text>
- </xsl:for-each>
-
- </xsl:template>
-
-</xsl:stylesheet>
-
-<!--
- - Local variables:
- - mode: sgml
- - End:
- -->
diff --git a/rcynic-ng/static-rsync/Makefile.in b/rcynic-ng/static-rsync/Makefile.in
deleted file mode 100644
index 9c8556e0..00000000
--- a/rcynic-ng/static-rsync/Makefile.in
+++ /dev/null
@@ -1,41 +0,0 @@
-# $Id$
-
-VERSION = 2.6.9
-
-CFG_ENV = CFLAGS='@CFLAGS@' LDFLAGS='@LDFLAGS@ @LD_STATIC_FLAG@'
-CFG_ARG =
-
-TARBALL = rsync-${VERSION}.tar.gz
-DIRNAME = rsync-${VERSION}
-
-CFG_LOG = > ../config.log 2>&1
-BIN_LOG = > ../build.log 2>&1
-
-BIN = rsync
-
-all: ${BIN}
-
-${BIN}: ${DIRNAME}/${BIN}
- ln ${DIRNAME}/${BIN} $@
- file $@
-
-${DIRNAME}/${BIN}: configured.stamp
- cd ${DIRNAME} && ${MAKE} ${BIN_LOG}
-
-extracted.stamp: ${TARBALL}
- gzip -c -d ${TARBALL} | tar -xf -
- touch $@
-
-patched.stamp: extracted.stamp
- for i in patches/patch-*; do if test -f "$$i"; then patch -d ${DIRNAME} <"$$i"; else :; fi; done
- touch $@
-
-configured.stamp: patched.stamp
- cd ${DIRNAME} && ${CFG_ENV} ./configure ${CFG_ARG} ${CFG_LOG}
- touch $@
-
-clean:
- rm -rf ${BIN} ${DIRNAME} *.stamp *.log
-
-distclean: clean
- rm -f Makefile
diff --git a/rcynic-ng/static-rsync/README b/rcynic-ng/static-rsync/README
deleted file mode 100644
index 9ff5afa8..00000000
--- a/rcynic-ng/static-rsync/README
+++ /dev/null
@@ -1,15 +0,0 @@
-$Id$
-
-Hack to build a static rsync binary suitable for use in a chroot jail.
-
-The default configuration is for gcc, since that's the most widely
-used compiler on the platforms we use. I've provided hooks intended
-to make it simple to support other compilers just by overriding make
-variables on the command line: if you need to do something more
-drastic than this to get your compiler working, please tell me.
-
-If your platform doesn't support static binaries at all, you're on
-your own (and should whine at your OS vendor, as this is nuts).
-
-We try to stick with rsync release code, but apply security patches
-when necessary.
diff --git a/rcynic-ng/static-rsync/patches/patch-CVE-2007-4091 b/rcynic-ng/static-rsync/patches/patch-CVE-2007-4091
deleted file mode 100644
index 201af96a..00000000
--- a/rcynic-ng/static-rsync/patches/patch-CVE-2007-4091
+++ /dev/null
@@ -1,60 +0,0 @@
---- sender.c 2006-09-20 03:53:32.000000000 +0200
-+++ sender.c 2007-07-25 15:33:05.000000000 +0200
-@@ -123,6 +123,7 @@
- char fname[MAXPATHLEN];
- struct file_struct *file;
- unsigned int offset;
-+ size_t l = 0;
-
- if (ndx < 0 || ndx >= the_file_list->count)
- return;
-@@ -133,6 +134,20 @@
- file->dir.root, "/", NULL);
- } else
- offset = 0;
-+
-+ l = offset + 1;
-+ if (file) {
-+ if (file->dirname)
-+ l += strlen(file->dirname);
-+ if (file->basename)
-+ l += strlen(file->basename);
-+ }
-+
-+ if (l >= sizeof(fname)) {
-+ rprintf(FERROR, "Overlong pathname\n");
-+ exit_cleanup(RERR_FILESELECT);
-+ }
-+
- f_name(file, fname + offset);
- if (remove_source_files) {
- if (do_unlink(fname) == 0) {
-@@ -224,6 +239,7 @@
- enum logcode log_code = log_before_transfer ? FLOG : FINFO;
- int f_xfer = write_batch < 0 ? batch_fd : f_out;
- int i, j;
-+ size_t l = 0;
-
- if (verbose > 2)
- rprintf(FINFO, "send_files starting\n");
-@@ -259,6 +275,20 @@
- fname[offset++] = '/';
- } else
- offset = 0;
-+
-+ l = offset + 1;
-+ if (file) {
-+ if (file->dirname)
-+ l += strlen(file->dirname);
-+ if (file->basename)
-+ l += strlen(file->basename);
-+ }
-+
-+ if (l >= sizeof(fname)) {
-+ rprintf(FERROR, "Overlong pathname\n");
-+ exit_cleanup(RERR_FILESELECT);
-+ }
-+
- fname2 = f_name(file, fname + offset);
-
- if (verbose > 2)
diff --git a/rcynic-ng/static-rsync/rsync-2.6.9.tar.gz b/rcynic-ng/static-rsync/rsync-2.6.9.tar.gz
deleted file mode 100644
index 6377f639..00000000
--- a/rcynic-ng/static-rsync/rsync-2.6.9.tar.gz
+++ /dev/null
Binary files differ
diff --git a/rcynic/Doxyfile b/rcynic/Doxyfile
index c52ab27b..bac51c4f 100644
--- a/rcynic/Doxyfile
+++ b/rcynic/Doxyfile
@@ -1377,7 +1377,7 @@ ENABLE_PREPROCESSING = YES
# compilation will be performed. Macro expansion can be done in a controlled
# way by setting EXPAND_ONLY_PREDEF to YES.
-MACRO_EXPANSION = NO
+MACRO_EXPANSION = YES
# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
# then the macro expansion is limited to the macros specified with the
diff --git a/rcynic/Makefile.in b/rcynic/Makefile.in
index 9bf7d38e..3b86a510 100644
--- a/rcynic/Makefile.in
+++ b/rcynic/Makefile.in
@@ -6,10 +6,18 @@ BIN = ${NAME}
SRC = ${NAME}.c
OBJ = ${NAME}.o
-CFLAGS = @CFLAGS@ -Wall -Wshadow -Wmissing-prototypes -Wmissing-declarations
+HDR = defasn1.h
+GEN = defstack.h
+
+OBJS = ${OBJ} bio_f_linebreak.o
+
+CFLAGS = @CFLAGS@ -Wall -Wshadow -Wmissing-prototypes -Wmissing-declarations -Werror-implicit-function-declaration
LDFLAGS = @LDFLAGS@ @LD_STATIC_FLAG@
LIBS = @LIBS@
+AWK = @AWK@
+XSLTPROC = @XSLTPROC@
+
abs_top_srcdir = @abs_top_srcdir@
abs_top_builddir = @abs_top_builddir@
@@ -19,10 +27,15 @@ all: ${BIN}
clean:
cd static-rsync; ${MAKE} $@
- rm -f ${BIN} ${OBJ}
+ rm -f ${BIN} ${OBJS} ${GEN}
-${BIN}: ${SRC}
- ${CC} ${CFLAGS} -o $@ ${SRC} ${LDFLAGS} ${LIBS}
+${OBJ}: ${SRC} ${HDR} ${GEN}
+
+${BIN}: ${OBJS}
+ ${CC} ${CFLAGS} -o $@ ${OBJS} ${LDFLAGS} ${LIBS}
+
+defstack.h: defstack.awk ${SRC} ${HDR}
+ ${AWK} -f >$@ defstack.awk ${SRC} ${HDR}
test: ${BIN}
if test -r rcynic.conf; then ./${BIN} -j 0 && echo && ./show.sh; else echo No rcynic.conf, skipping test; fi
@@ -39,8 +52,11 @@ distclean: clean docclean
tags: TAGS
-TAGS: ${SRC}
- etags ${SRC}
+TAGS: ${SRC} ${HDR} ${GEN}
+ etags ${SRC} ${HDR} ${GEN}
+
+rcynic.html: rcynic.xml rcynic.xsl
+ ${XSLTPROC} -o $@ rcynic.xsl rcynic.xml
# Doc stuff right now is just internals doc, of interest only to
# programmers. Real doc for rcynic is still the README. This may
diff --git a/rcynic/README b/rcynic/README
index 27a2e455..fb810e6e 100644
--- a/rcynic/README
+++ b/rcynic/README
@@ -13,10 +13,8 @@ done a POSIX reference manual lookup for every function call. Please
report any portability problems.
All certificates and CRLs are in DER format, with filenames derived
-from the RPKI rsync URIs at which the data are published. At some
-point I'll probably write a companion program to convert a tree of DER
-into the hashed directory of PEM format that most OpenSSL applications
-expect.
+from the RPKI rsync URIs at which the data are published. See
+../utils/ and ../rtr-origin/ for tools that use rcynic's output.
All configuration is via an OpenSSL-style configuration file, except
for selection of the name of the configuration file itself. A few of
@@ -38,19 +36,54 @@ The one thing you MUST specify in the config file in order for the
program to do anything useful is file name of one or more trust
anchors. Trust anchors for this program are represented as
DER-formated X509 objects that look just like certificates, except
-that they're trust anchors. To date I have only tested this code with
-self-signed trust anchors; in theory, this is not required, in
-practice the code may require tweaks to support other trust anchors.
+that they're trust anchors.
+
+Strictly speaking, trust anchors do not need to be self-signed, but
+many programs (including OpenSSL ) assume that trust anchors will be
+self-signed. See the allow-non-self-signed-trust-anchor configuration
+option if you need to use a non-self-signed trust anchor, but be
+warned that the results, while technically correct, may not be useful.
+
+There are two ways of specifying trust anchors:
+
+- Via the "trust-anchor" directive, to name a local file containing
+ the DER-encoded trust anchor.
+
+- Via the "trust-anchor-locator" directive, to name a local file
+ containing a "trust anchor locator" (TAL). See draft-ietf-sidr-ta
+ for details [update this once RFC has been issued].
+
+In most cases, except perhaps for testing, you will want to use trust
+anchor locators, since they allow the trust anchor itself to be
+updated without requiring reconfiguration of rcynic.
+
+See the make-tal.sh script in this directory if you need to generate
+your own TAL file for a trust anchor.
+
+As of when I write this documentation, there still is no global trust
+anchor for the RPKI system, so you have to specify separate trust
+anchors for each RIR that's publishing data:
Example of a minimal config file:
[rcynic]
- trust-anchor.0 = trust-anchors/apnic-trust-anchor.cer
- trust-anchor.1 = trust-anchors/ripe-ripe-trust-anchor.cer
- trust-anchor.2 = trust-anchors/ripe-arin-trust-anchor.cer
+ trust-anchor-locator.0 = trust-anchors/apnic.tal
+ trust-anchor-locator.1 = trust-anchors/ripe.tal
+ trust-anchor-locator.2 = trust-anchors/afrinic.tal
+ trust-anchor-locator.3 = trust-anchors/lacnic.tal
+
+Eventually, this should all be collapsed into a single trust anchor,
+so that relying parties (people running tools like rcynic) don't need
+to sort out this sort of issue, at which point the above
+configuration can become something like:
+
+ [rcynic]
+
+ trust-anchor-locator = trust-anchors/iana.tal
-By default, rcynic uses three writable directory trees:
+
+By default, rcynic uses two writable directory trees:
- unauthenticated Raw data fetched via rsync. In order to take
full advantage of rsync's optimized transfers,
@@ -61,25 +94,33 @@ By default, rcynic uses three writable directory trees:
- authenticated Data that rcynic has checked. This is the
real output of the process.
-- old_authenticated Saved results from immediately previous rcynic
- run, used when attempting to recover from
- certain kinds of errors.
-
-rcynic renames the authenticated tree to become the old_authenticated
-tree when it starts up, then builds a new authenticated tree.
-
-rcynic copies the trust anchors themselves into the top level
-directory of the authenticated tree xxxxxxxx.n.cer, where xxxxxxxx and
-n are the OpenSSL object name hash and index within the resulting
-virtual hash bucket (the same as the c_hash Perl script that comes
-with OpenSSL would produce), and ".cer" is the literal string ".cer".
-The reason for this is that trust anchors, by definition, are not
-fetched automatically, and thus do not really have publication URIs in
-the sense that every other object in these trees do. So rcynic uses a
-naming scheme which insures (a) that each trust anchor has a unique
-name within the output tree and (b) that trust anchors cannot be
-confusd with certificates: trust anchors always go in the top level of
-the tree, data fetched via rsync always go in subdirectories.
+authenticated is really a symbolic link to a directory with a name of
+the form authenticated.<timestamp>, where <timestamp> is an ISO 8601
+timestamp like 2001-04-01T01:23:45Z. rcynic creates a new timestamped
+directory every time it runs, and moves the symbolic link as an atomic
+operation when the validation process completes. The intent is that
+authenticated always points to the most recent usable validation
+results, so that programs which use rcynic's output don't need to
+worry about whether an rcynic run is in progress.
+
+rynic stores trust anchors specified via the trust-anchor-locator
+directive in the unauthenticated tree just like any other fetched
+object, and copies into the authenticated trees just like any other
+object once they pass rcynic's checks.
+
+rcynic copies trust anchors specified via the "trust-anchor" directive
+into the top level directory of the authenticated tree xxxxxxxx.n.cer,
+where xxxxxxxx and n are the OpenSSL object name hash and index within
+the resulting virtual hash bucket (the same as the c_hash Perl script
+that comes with OpenSSL would produce), and ".cer" is the literal
+string ".cer". The reason for this is that these trust anchors, by
+definition, are not fetched automatically, and thus do not really have
+publication URIs in the sense that every other object in these trees
+do. So rcynic uses a naming scheme which insures (a) that each trust
+anchor has a unique name within the output tree and (b) that trust
+anchors cannot be confusd with certificates: trust anchors always go
+in the top level of the tree, data fetched via rsync always go in
+subdirectories.
As currently implemented, rcynic does not attempt to maintain an
in-memory cache of objects it might need again later. It does keep an
@@ -90,7 +131,7 @@ freed immediately after use and read from disk again as needed. From
a database design standpoint, this is not very efficient, but as the
rcynic's main bottlenecks are expected to be crypto and network
operations, it seemed best to keep the design as simple as possible,
-at least until execution profiling demonstrates a real issue.
+at least until execution profiling demonstrates a real issue here.
Usage and configuration:
@@ -109,7 +150,7 @@ uses but customized to the specific task rcynic performs. Levels:
Command line options:
-c configfile Path to configuration file (default: rcynic.conf)
- -l loglevel Logging level (default: log_telemetry)
+ -l loglevel Logging level (default: log_data_err)
-s Log via syslog
-e Log via stderr when also using syslog
-j Start-up jitter interval (see below; default: 600)
@@ -128,15 +169,8 @@ Configuration variables:
authenticated Path to output directory (where rcynic should
place objects it has been able to validate).
- Default: rcynic-data/authenticated
-old-authenticated Path to which rcynic should rename the output
- directory (if any) from the previous rcynic
- run. rcynic preserves the previous run's
- output directory both as a backup data source
- for the current run and also so that you don't
- lose all your state if rcynic chokes and
- dies. Default: rcynic-data/authenticated.old
+ Default: rcynic-data/authenticated
unauthenticated Path to directory where rcynic should store
@@ -145,7 +179,10 @@ unauthenticated Path to directory where rcynic should store
rcynic to preserve and reuse this directory
across runs to minimize the network traffic
necessary to bring your repository mirror up
- to date. Default: rcynic-data/unauthenticated
+ to date.
+
+ Default: rcynic-data/unauthenticated
+
rsync-timeout How long (in seconds) to let rsync run before
terminating the rsync process, or zero for no
@@ -154,35 +191,83 @@ rsync-timeout How long (in seconds) to let rsync run before
prematurely. It's present to let you defend
against evil rsync server operators who try to
tarpit your connection as a form of denial of
- service attack on rcynic. Default: no timeout
- (but this may change, best set it explictly).
+ service attack on rcynic.
+
+ Default: 300 seconds.
+
+
+max-parallel-fetches Upper limit on the number of copies of rsync
+ that rcynic is allowed to run at once. Used
+ properly, this can speed up synchronization
+ considerably when fetching from repositories
+ built with sub-optimal tree layouts or when
+ dealing with unreachable repositories. Used
+ improperly, this option can generate excessive
+ load on repositories, cause synchronization to
+ be interrupted by firewalls, and generally
+ creates create a public nuisance. Use with
+ caution.
+
+ As of this writing, values in the range 2-4
+ are reasonably safe. At least one RIR
+ currently refuses service at settings above 4,
+ and another RIR appears to be running some
+ kind of firewall that silently blocks
+ connections when it thinks decides that the
+ connection rate is excessive.
+ rcynic can't really detect all of the possible
+ problems created by excessive values of this
+ parameter, but if rcynic's report shows that
+ both successful retrivial and skipped
+ retrieval from the same repository host,
+ that's a pretty good hint that something is
+ wrong, and an excessive value here is a good
+ first guess as to the cause.
+
+ Default: 1
+
+
+rsync-program Path to the rsync program.
+
+ Default: rsync, but you should probably set
+ this variable rather than just trusting the
+ PATH environment variable to be set correctly.
-rsync-program Path to the rsync program. Default: rsync,
- but you should probably set this variable
- rather than just trusting the PATH environment
- variable to be set correctly.
log-level Same as -l option on command line. Command
line setting overrides config file setting.
- Default: log_telemetry
+
+ Default: log_log_err
+
use-syslog Same as -s option on command line. Command
line setting overrides config file setting.
- Values: true or false. Default: false
+
+ Values: true or false.
+
+ Default: false
+
use-stderr Same as -e option on command line. Command
line setting overrides config file setting.
- Values: true or false. Default: false, but
- if neither use-syslog nor use-stderr is set,
- log output will go to stderr.
-syslog-facility Syslog facility to use. Default: local0
+ Values: true or false.
+
+ Default: false, but if neither use-syslog nor
+ use-stderr is set, log output goes to stderr.
+
+
+syslog-facility Syslog facility to use.
+
+ Default: local0
+
syslog-priority-xyz (where xyz is an rcynic logging level, above)
Override the syslog priority value to use when
logging messages at this rcynic level.
+
Defaults:
syslog-priority-log_sys_err: err
@@ -192,6 +277,7 @@ syslog-priority-xyz (where xyz is an rcynic logging level, above)
syslog-priority-log_verbose: info
syslog-priority-log_debug: debug
+
jitter Startup jitter interval, same as -j option on
command line. Jitter interval, specified in
number of seconds. rcynic will pick a random
@@ -202,12 +288,15 @@ jitter Startup jitter interval, same as -j option on
running under cron with synchronized clocks,
in particular to avoid hammering the RPKI
rsync servers into the ground at midnight UTC.
+
Default: 600
+
lockfile Name of lockfile, or empty for no lock. If
you run rcynic under cron, you should use this
parameter to set a lockfile so that successive
instances of rcynic don't stomp on each other.
+
Default: no lock
xml-summary Enable output of a per-host summary at the
@@ -216,13 +305,20 @@ xml-summary Enable output of a per-host summary at the
of logging, or just want it in addition to
logging. Value: filename to which XML summary
should be written; "-" will send XML summary
- to stdout. Default: no XML summary
+ to stdout.
+
+ Default: no XML summary
+
allow-stale-crl Allow use of CRLs which are past their
nextUpdate timestamp. This is probably
harmless, but since it may be an early warning
of problems, it's configurable.
- Values: true or false. Default: true
+
+ Values: true or false.
+
+ Default: true
+
prune Clean up old files corresponding to URIs that
rcynic did not see at all during this run.
@@ -232,39 +328,90 @@ prune Clean up old files corresponding to URIs that
rcynic never visits the old collection again,
old files will remain in the local mirror
indefinitely unless you enable this option.
- Values: true or false. Default: true
+
+ Values: true or false.
+
+ Default: true
+
allow-stale-manifest Allow use of manifests which are past their
nextUpdate timestamp. This is probably
harmless, but since it may be an early warning
of problems, it's configurable.
- Values: true or false. Default: true
+
+ Values: true or false.
+
+ Default: true
+
require-crl-in-manifest Reject manifests which don't list the CRL
covering the manifest EE certificate.
- Values: true or false. Default: false
+
+ Values: true or false.
+
+ Default: false
+
allow-non-self-signed-trust-anchor
Experimental. Attempts to work around OpenSSL's
strong preference for self-signed trust
anchors. Do not use this unless you really know
what you are doing.
- Values: true or false. Default: false
+
+ Values: true or false.
+
+ Default: false
+
+
+run-rsync
+
+ Whether to run rsync to fetch data. You don't
+ want to change this except when building
+ complex topologies where rcynic running on one
+ set of machines acts as aggregators for
+ another set of validators. A large ISP might
+ want to build such a topology so that they
+ could have a local validation cache in each
+ POP while minimizing load on the global
+ repository system and maintaining some degree
+ of internal consistancy between POPs. In such
+ cases, one might want the rcynic instances in
+ the POPs to validate data fetched from the
+ aggregators via an external process, without
+ the POP rcynic instances attempting to fetch
+ anything themselves.
+
+ Don't touch this unless you really know what
+ you're doing.
+
+ Values: true or false.
+
+ Default: true
+
+
+use-links Whether to use hard links rather than copying
+ valid objects from the unauthenticated to
+ authenticated tree. Using links is slightly
+ more fragile (anything that stomps on the
+ unauthenticated file also stomps on the
+ authenticated file) but is a bit faster and
+ reduces the number of inodes consumed by a
+ large data collection. At the moment, copying
+ is the default behavior, but this may change
+ in the future.
+
+ Values: true or false.
+
+ Default: false
+
trust-anchor Specify one RPKI trust anchor, represented as
a local file containing an X.509 certificate
in DER format. Value of this option is the
- pathname of the file. No default.
+ pathname of the file.
+
+ No default.
-trust-anchor-uri-with-key
- Specify one RPKI trust anchor, represented as
- an rsync URI and a local file containing the
- RSA public key of the X.509 object specified
- by the URI. The RSA public key should be in
- DER format. Value for this option consists of
- the URI and the filename of the public key, in
- that order, separated by whitespace. No
- default.
trust-anchor-locator
Specify one RPKI trust anchor, represented as
@@ -273,17 +420,10 @@ trust-anchor-locator
by the URI. First line of the file is the
URI, remainder is the public key in Base64
encoded DER format. Value of this option
- is the pathname of the file. No default.
+ is the pathname of the file.
-indirect-trust-anchor
- An old name for trust-anchor-locator, kept for
- backwards compatability. See description of
- trust-anchor-locator. The name was changed
- to keep members of the IETF SIDR WG happy.
+ No default.
-trust-anchor-uri-with-key and indirect-trust-anchor are semantically
-identical, the difference is just how the URI and public key are
-stored.
There's a companion XSLT template in rcynic.xsl, which translates what
the xml-summary option writes into HTML.
@@ -292,11 +432,17 @@ the xml-summary option writes into HTML.
Running rcynic chrooted
-[This is only a sketch, needs details and finicky proofreading]
+This is an attempt to describe the process of setting up rcynic in a
+chrooted environment. The installation scripts that ship with rcynic
+attempt to do this automatically for the platforms we support, but the
+process is somewhat finicky, so some explanation seems in order. If
+you're running on one of the supported platforms, the following steps
+may be handled for you by the Makefiles, but you may still want to
+understand what all this is trying to do.
-rcynic does not include any direct support for running chrooted, but
-is designed to be (relatively) easy to run in a chroot jail. Here's
-how.
+rcynic itself does not include any direct support for running
+chrooted, but is designed to be (relatively) easy to run in a chroot
+jail. Here's how.
You'll either need staticly linked copies of rcynic and rsync, or
you'll need to figure out which shared libraries these programs need
@@ -345,7 +491,8 @@ writable by the rcynic userid. In particular, rcynic and rsync should
-not- be allowed to modify: their own binary images, any of the
configuration files, or your trust anchors. It's simplest just to
have root own all the files and directories that rcynic and rsync are
-not allowed to modify.
+not allowed to modify, and make sure that the permissions for all of
+those directories and files make them writable only by root.
Sample jail tree, assuming that we're putting all of this under
/var/rcynic:
@@ -381,14 +528,13 @@ rcynic.conf to match this configuration:
[rcynic]
- trust-anchor.1 = /etc/trust-anchors/ta-1.cer
- trust-anchor.2 = /etc/trust-anchors/ta-2.cer
- trust-anchor.3 = /etc/trust-anchors/ta-3.cer
+ trust-anchor-locator.1 = /etc/trust-anchors/ta-1.tal
+ trust-anchor-locator.2 = /etc/trust-anchors/ta-2.tal
+ trust-anchor-locator.3 = /etc/trust-anchors/ta-3.tal
- rsync-program = /bin/rsync
- authenticated = /data/authenticated
- old-authenticated = /data/authenticated.old
- unauthenticated = /data/unauthenticated
+ rsync-program = /bin/rsync
+ authenticated = /data/authenticated
+ unauthenticated = /data/unauthenticated
Once you've got all this set up, you're ready to try running rcynic in
the jail. Try it from the command line first, then if that works, you
@@ -414,14 +560,16 @@ On FreeBSD, building a staticly linked rsync is easy: just set the
environment variable LDFLAGS='-static' before building the rsync port
and the right thing will happen. Since this is really just GNU
configure picking up the environment variable, the same trick should
-work on other platforms.
+work on other platforms...except that some compilers don't support
+-static, and some platforms are missing some or all of the non-shared
+libraries you'd need to link the resulting binary.
For simplicity, I've taken the same approach with rcynic, so
$ make LDFLAGS='-static'
should work. Except that you don't even have to do that: static
-linking is the default, because I run it jailed.
+linking is the default where supported, because I run it jailed.
syslog:
@@ -432,21 +580,3 @@ is to add the following lines to /etc/rc.conf:
altlog_proglist="named rcynic"
rcynic_chrootdir="/var/rcynic"
rcynic_enable="YES"
-
-
-
-If you're using the experimental trust-anchor-uri-with-key trust
-anchor format, you'll need a copy of the public key in DER format.
-One can extract this from an X.509 format trust anchor using the
-OpenSSL command line tool, but the path is poorly documented. Try
-something like this:
-
- $ openssl x509 -inform DER -in foo.cer -pubkey -noout | openssl rsa -outform DER -pubin -out foo.key
-
-The important bits here are:
-
-a) You're asking the x509 command to extract the public key and send
- it (in PEM format) to stdout without the rest of the certificate
-
-b) You're asking the rsa command to read a public key (in PEM format)
- on stdin, convert it to DER format and write it out.
diff --git a/rcynic-ng/bio_f_linebreak.c b/rcynic/bio_f_linebreak.c
index 0376456e..0376456e 100644
--- a/rcynic-ng/bio_f_linebreak.c
+++ b/rcynic/bio_f_linebreak.c
diff --git a/rcynic-ng/bio_f_linebreak.h b/rcynic/bio_f_linebreak.h
index b5becfa6..b5becfa6 100644
--- a/rcynic-ng/bio_f_linebreak.h
+++ b/rcynic/bio_f_linebreak.h
diff --git a/rcynic-ng/defasn1.h b/rcynic/defasn1.h
index c14e0ce5..c14e0ce5 100644
--- a/rcynic-ng/defasn1.h
+++ b/rcynic/defasn1.h
diff --git a/rcynic-ng/defstack.awk b/rcynic/defstack.awk
index 4593cb33..4593cb33 100644
--- a/rcynic-ng/defstack.awk
+++ b/rcynic/defstack.awk
diff --git a/rcynic/rcynic.c b/rcynic/rcynic.c
index 6dfaee83..09fe1360 100644
--- a/rcynic/rcynic.c
+++ b/rcynic/rcynic.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009 Internet Systems Consortium ("ISC")
+ * Copyright (C) 2009--2011 Internet Systems Consortium ("ISC")
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -60,6 +60,8 @@
#include <fcntl.h>
#include <signal.h>
#include <utime.h>
+#include <glob.h>
+#include <sys/param.h>
#define SYSLOG_NAMES /* defines CODE prioritynames[], facilitynames[] */
#include <syslog.h>
@@ -75,7 +77,14 @@
#include <openssl/asn1t.h>
#include <openssl/cms.h>
-#ifndef FILENAME_MAX
+#include "bio_f_linebreak.h"
+
+#include "defstack.h"
+#include "defasn1.h"
+
+#if !defined(FILENAME_MAX) && defined(PATH_MAX) && PATH_MAX > 1024
+#define FILENAME_MAX PATH_MAX
+#elif !defined(FILENAME_MAX)
#define FILENAME_MAX 1024
#endif
@@ -93,8 +102,8 @@
*/
#define KILL_MAX 10
-#ifndef HOST_NAME_MAX
-#define HOST_NAME_MAX 256
+#ifndef HOSTNAME_MAX
+#define HOSTNAME_MAX 256
#endif
/**
@@ -182,87 +191,79 @@ static const struct {
QV(X509_V_ERR_UNNESTED_RESOURCE)
/**
- * MIB counters specific to rcynic. "validation_ok" is not used as a
- * counter, but is used as a validation status code.
+ * MIB counters specific to rcynic.
*/
#define MIB_COUNTERS \
- QG(validation_ok, "OK") \
- QG(backup_cert_accepted, "Backup certificates accepted") \
- QB(backup_cert_rejected, "Backup certificates rejected") \
- QG(backup_crl_accepted, "Backup CRLs accepted") \
- QB(backup_crl_rejected, "Backup CRLs rejected") \
- QG(current_cert_accepted, "Current certificates accepted") \
- QB(current_cert_rejected, "Current certificates rejected") \
- QG(current_crl_accepted, "Current CRLs accepted") \
- QB(current_crl_rejected, "Current CRLs rejected") \
- QG(current_manifest_accepted, "Current Manifests accepted") \
- QB(current_manifest_rejected, "Current Manifests rejected") \
- QG(backup_manifest_accepted, "Backup Manifests accepted") \
- QB(backup_manifest_rejected, "Backup Manifests rejected") \
- QB(rsync_failed, "rsync transfers failed") \
- QG(rsync_succeeded, "rsync transfers succeeded") \
- QB(rsync_timed_out, "rsync transfers timed out") \
- QW(stale_crl, "Stale CRLs") \
- QB(malformed_sia, "Malformed SIA extensions") \
- QB(sia_missing, "SIA extensions missing") \
- QB(aia_missing, "AIA extensions missing") \
- QB(crldp_missing, "CRLDP extensions missing") \
- QB(aia_mismatch, "Mismatched AIA extensions") \
- QB(unknown_verify_error, "Unknown OpenSSL verify error") \
- QG(current_cert_recheck, "Certificates rechecked") \
- QB(manifest_invalid_ee, "Invalid manifest certificates") \
- QB(manifest_invalid_cms, "Manifest validation failures") \
- QB(manifest_decode_error, "Manifest decode errors") \
- QW(stale_manifest, "Stale manifests") \
- QB(manifest_not_yet_valid, "Manifests not yet valid") \
- QB(manifest_bad_econtenttype, "Bad manifest eContentType") \
- QB(manifest_missing_signer, "Missing manifest signers") \
- QB(manifest_missing_crldp, "Missing manifest CRLDP") \
- QB(manifest_malformed_crldp, "Malformed manifest CRLDP") \
- QB(certificate_digest_mismatch, "Certificate digest mismatches") \
- QB(crl_digest_mismatch, "CRL digest mismatches") \
+ MIB_COUNTERS_FROM_OPENSSL \
+ QB(aia_mismatch, "Mismatched AIA extension") \
+ QB(aia_missing, "AIA extension missing") \
+ QB(certificate_bad_crl, "Bad certificate CRL") \
+ QB(certificate_bad_signature, "Bad certificate signature") \
+ QB(certificate_digest_mismatch, "Certificate digest mismatch") \
+ QB(certificate_failed_validation, "Certificate failed validation") \
+ QB(crl_digest_mismatch, "CRL digest mismatch") \
QB(crl_not_in_manifest, "CRL not listed in manifest") \
- QB(roa_invalid_ee, "Invalid ROA certificates") \
- QB(roa_invalid_cms, "ROA validation failures") \
- QB(roa_decode_error, "ROA decode errors") \
- QB(roa_bad_econtenttype, "Bad ROA eContentType") \
- QB(roa_missing_signer, "Missing ROA signers") \
- QB(roa_digest_mismatch, "ROA digest mismatches") \
- QG(current_roa_accepted, "Current ROAs accepted") \
- QB(current_roa_rejected, "Current ROAs rejected") \
- QG(backup_roa_accepted, "Backup ROAs accepted") \
- QB(backup_roa_rejected, "Backup ROAs rejected") \
- QB(malformed_roa_addressfamily, "Malformed ROA addressFamilys") \
- QB(manifest_wrong_version, "Wrong manifest versions") \
- QB(roa_wrong_version, "Wrong ROA versions") \
- QW(trust_anchor_not_self_signed, "Trust anchor not self-signed") \
- QB(uri_too_long, "URI too long") \
+ QB(crl_not_yet_valid, "CRL not yet valid") \
+ QB(crldp_mismatch, "CRLDP doesn't match issuer's SIA") \
+ QB(crldp_missing, "CRLDP extension missing") \
+ QB(disallowed_extension, "Disallowed X.509v3 extension") \
+ QB(ghostbuster_bad_crl, "Ghostbuster EE has bad CRL") \
+ QB(ghostbuster_bad_econtenttype, "Bad Ghostbuster eContentType") \
+ QB(ghostbuster_digest_mismatch, "Ghostbuster digest mismatch") \
+ QB(ghostbuster_invalid_cms, "Ghostbuster validation failure") \
+ QB(ghostbuster_invalid_ee, "Invalid Ghostbuster certificate") \
+ QB(ghostbuster_missing_signer, "Missing Ghostbuster signer") \
+ QB(hash_too_long, "Hash value is too long") \
QB(malformed_crldp, "Malformed CRDLP extension") \
- QB(certificate_bad_signature, "Bad certificate signature") \
- QB(certificate_bad_crl, "Bad certificate CRL") \
- QB(manifest_bad_crl, "Manifest has bad CRL") \
- QB(roa_resources_malformed, "ROA resources malformed") \
+ QB(malformed_roa_addressfamily, "Malformed ROA addressFamily") \
+ QB(malformed_sia, "Malformed SIA extension") \
+ QB(manifest_bad_econtenttype, "Bad manifest eContentType") \
+ QB(manifest_decode_error, "Manifest decode error") \
+ QB(manifest_invalid_cms, "Manifest validation failure") \
+ QB(manifest_invalid_ee, "Invalid manifest certificate") \
+ QB(manifest_malformed_crldp, "Malformed manifest CRLDP") \
+ QB(manifest_mismatch, "Manifest doesn't match SIA") \
+ QB(manifest_missing, "Manifest pointer missing") \
+ QB(manifest_missing_crldp, "Missing manifest CRLDP") \
+ QB(manifest_missing_signer, "Missing manifest signer") \
+ QB(manifest_not_yet_valid, "Manifest not yet valid") \
+ QB(manifest_wrong_version, "Wrong manifest version") \
+ QB(object_rejected, "Object rejected") \
QB(roa_bad_afi, "ROA contains bad AFI value") \
- QB(roa_not_nested, "ROA resources not in EE") \
QB(roa_bad_crl, "ROA EE has bad CRL") \
- QB(ghostbuster_digest_mismatch, "Ghostbuster digest mismatches") \
- QB(ghostbuster_bad_econtenttype, "Bad Ghostbuster eContentType") \
- QB(ghostbuster_invalid_cms, "Ghostbuster validation failures") \
- QB(ghostbuster_missing_signer, "Missing Ghostbuster signers") \
- QB(ghostbuster_bad_crl, "Ghostbuster EE has bad CRL") \
- QB(ghostbuster_invalid_ee, "Invalid Ghostbuster certificates") \
- QG(current_ghostbuster_accepted, "Current Ghostbusters accepted") \
- QB(current_ghostbuster_rejected, "Current Ghostbusters rejected") \
- QG(backup_ghostbuster_accepted, "Backup Ghostbusters accepted") \
- QB(backup_ghostbuster_rejected, "Backup Ghostbusters rejected") \
- QB(disallowed_extension, "Disallowed X.509v3 extension") \
- QB(crldp_mismatch, "CRLDP doesn't match issuer's SIA") \
- QB(manifest_missing, "Manifest pointer missing") \
- QB(manifest_mismatch, "Manifest doesn't match SIA") \
+ QB(roa_bad_econtenttype, "Bad ROA eContentType") \
+ QB(roa_decode_error, "ROA decode error") \
+ QB(roa_digest_mismatch, "ROA digest mismatch") \
+ QB(roa_invalid_cms, "ROA validation failure") \
+ QB(roa_invalid_ee, "Invalid ROA certificate") \
+ QB(roa_missing_signer, "Missing ROA signer") \
+ QB(roa_not_nested, "ROA resource not in EE") \
+ QB(roa_resources_malformed, "ROA resources malformed") \
+ QB(roa_wrong_version, "Wrong ROA version") \
+ QB(rsync_failed, "rsync transfer failed") \
+ QB(rsync_timed_out, "rsync transfer timed out") \
+ QB(sia_missing, "SIA extension missing") \
+ QB(trust_anchor_key_mismatch, "Trust anchor key mismatch") \
QB(trust_anchor_with_crldp, "Trust anchor can't have CRLDP") \
- QW(object_not_in_manifest, "Object not in manifest") \
- MIB_COUNTERS_FROM_OPENSSL
+ QB(unknown_verify_error, "Unknown OpenSSL verify error") \
+ QB(unreadable_trust_anchor, "Unreadable trust anchor") \
+ QB(unreadable_trust_anchor_locator, "Unreadable trust anchor locator") \
+ QB(uri_too_long, "URI too long") \
+ QW(nonconformant_issuer_name, "Nonconformant X.509 issuer name") \
+ QW(nonconformant_subject_name, "Nonconformant X.509 subject name") \
+ QW(rsync_skipped, "rsync transfer skipped") \
+ QW(stale_crl, "Stale CRL") \
+ QW(stale_manifest, "Stale manifest") \
+ QW(tainted_by_stale_crl, "Tainted by stale CRL") \
+ QW(tainted_by_stale_manifest, "Tainted by stale manifest") \
+ QW(tainted_by_not_being_in_manifest, "Tainted by not being in manifest") \
+ QW(trust_anchor_not_self_signed, "Trust anchor not self-signed") \
+ QW(unknown_object_type_skipped, "Unknown object type skipped") \
+ QG(current_cert_recheck, "Certificate rechecked") \
+ QG(object_accepted, "Object accepted") \
+ QG(rsync_succeeded, "rsync transfer succeeded") \
+ QG(validation_ok, "OK")
#define QV(x) QB(mib_openssl_##x, 0)
@@ -304,50 +305,148 @@ static const long mib_counter_openssl[] = { MIB_COUNTERS 0 };
#undef QQ
/**
- * Per-host MIB counter object.
- * hostname[] must be first element.
+ * Object sources. We always try to get fresh copies of objects using
+ * rsync, but if that fails we try using backup copies from what
+ * worked the last time we were run. This means that a URI
+ * potentially represents two different objects, so we need to
+ * distinguish them for tracking purposes in our validation log.
*/
-typedef struct host_mib_counter {
- char hostname[URI_MAX];
- unsigned long counters[MIB_COUNTER_T_MAX];
-} HOST_MIB_COUNTER;
-DECLARE_STACK_OF(HOST_MIB_COUNTER)
+#define OBJECT_GENERATIONS \
+ QQ(null) \
+ QQ(current) \
+ QQ(backup)
+
+#define QQ(x) object_generation_##x ,
+typedef enum object_generation { OBJECT_GENERATIONS OBJECT_GENERATION_MAX } object_generation_t;
+#undef QQ
+
+#define QQ(x) #x ,
+static const char * const object_generation_label[] = { OBJECT_GENERATIONS NULL };
+#undef QQ
+
+/**
+ * Type-safe string wrapper for URIs.
+ */
+typedef struct { char s[URI_MAX]; } uri_t;
+
+/**
+ * Type-safe string wrapper for filename paths.
+ */
+typedef struct { char s[FILENAME_MAX]; } path_t;
+
+/**
+ * Type-safe string wrapper for hostnames.
+ */
+typedef struct { char s[HOSTNAME_MAX]; } hostname_t;
+
+/**
+ * Type-safe wrapper for hash buffers.
+ */
+typedef struct { unsigned char h[EVP_MAX_MD_SIZE]; } hashbuf_t;
/**
* Per-URI validation status object.
+ * uri must be first element.
*/
typedef struct validation_status {
- char uri[URI_MAX];
+ uri_t uri;
+ object_generation_t generation;
time_t timestamp;
- mib_counter_t code;
-} VALIDATION_STATUS;
+ unsigned char events[(MIB_COUNTER_T_MAX + 7) / 8];
+} validation_status_t;
-DECLARE_STACK_OF(VALIDATION_STATUS)
+DECLARE_STACK_OF(validation_status_t)
/**
* Structure to hold data parsed out of a certificate.
*/
typedef struct certinfo {
int ca, ta;
- char uri[URI_MAX], sia[URI_MAX], aia[URI_MAX], crldp[URI_MAX], manifest[URI_MAX];
+ object_generation_t generation;
+ uri_t uri, sia, aia, crldp, manifest;
} certinfo_t;
+typedef struct rcynic_ctx rcynic_ctx_t;
+
/**
- * Program context that would otherwise be a mess of global variables.
+ * States that a walk_ctx_t can be in.
*/
-typedef struct rcynic_ctx {
- char *authenticated, *old_authenticated, *unauthenticated;
- char *jane, *rsync_program;
- STACK_OF(OPENSSL_STRING) *rsync_cache, *backup_cache, *stale_cache;
- STACK_OF(HOST_MIB_COUNTER) *host_counters;
- STACK_OF(VALIDATION_STATUS) *validation_status;
- int indent, use_syslog, allow_stale_crl, allow_stale_manifest, use_links;
- int require_crl_in_manifest, rsync_timeout, priority[LOG_LEVEL_T_MAX];
- int allow_non_self_signed_trust_anchor, allow_object_not_in_manifest;
- log_level_t log_level;
- X509_STORE *x509_store;
-} rcynic_ctx_t;
+typedef enum {
+ walk_state_initial, /**< Initial state */
+ walk_state_rsync, /**< rsyncing certinfo.sia */
+ walk_state_ready, /**< Ready to traverse outputs */
+ walk_state_current, /**< prefix = rc->unauthenticated */
+ walk_state_backup, /**< prefix = rc->old_authenticated */
+ walk_state_done /**< Done walking this cert's outputs */
+} walk_state_t;
+
+/**
+ * Context for certificate tree walks. This includes all the stuff
+ * that we would keep as automatic variables on the call stack if we
+ * didn't have to use callbacks to support multiple rsync processes.
+ */
+typedef struct walk_ctx {
+ unsigned refcount;
+ certinfo_t certinfo;
+ X509 *cert;
+ Manifest *manifest;
+ STACK_OF(OPENSSL_STRING) *filenames;
+ int manifest_iteration, filename_iteration, stale_manifest;
+ walk_state_t state;
+} walk_ctx_t;
+
+DECLARE_STACK_OF(walk_ctx_t)
+
+/**
+ * Return codes from rsync functions.
+ */
+typedef enum {
+ rsync_status_done, /* Request completed */
+ rsync_status_failed, /* Request failed */
+ rsync_status_timed_out, /* Request timed out */
+ rsync_status_pending, /* Request in progress */
+ rsync_status_skipped /* Request not attempted */
+} rsync_status_t;
+
+/**
+ * Context for asyncronous rsync.
+ */
+typedef struct rsync_ctx {
+ uri_t uri;
+ void (*handler)(const rcynic_ctx_t *, const struct rsync_ctx *, const rsync_status_t, const uri_t *, STACK_OF(walk_ctx_t) *);
+ STACK_OF(walk_ctx_t) *wsk;
+ enum {
+ rsync_state_initial, /* Must be first */
+ rsync_state_running,
+ rsync_state_conflict_wait,
+ rsync_state_retry_wait,
+ rsync_state_terminating
+ } state;
+ enum {
+ rsync_problem_none, /* Must be first */
+ rsync_problem_timed_out,
+ rsync_problem_refused
+ } problem;
+ unsigned tries;
+ pid_t pid;
+ int fd;
+ time_t started, deadline;
+ char buffer[URI_MAX * 4];
+ size_t buflen;
+} rsync_ctx_t;
+
+DECLARE_STACK_OF(rsync_ctx_t)
+
+/**
+ * Deferred task.
+ */
+typedef struct task {
+ void (*handler)(rcynic_ctx_t *, STACK_OF(walk_ctx_t) *);
+ STACK_OF(walk_ctx_t) *wsk;
+} task_t;
+
+DECLARE_STACK_OF(task_t)
/**
* Extended context for verify callbacks. This is a wrapper around
@@ -364,6 +463,25 @@ typedef struct rcynic_x509_store_ctx {
} rcynic_x509_store_ctx_t;
/**
+ * Program context that would otherwise be a mess of global variables.
+ */
+struct rcynic_ctx {
+ path_t authenticated, old_authenticated, new_authenticated, unauthenticated;
+ char *jane, *rsync_program;
+ STACK_OF(OPENSSL_STRING) *rsync_cache, *backup_cache, *dead_host_cache;
+ STACK_OF(validation_status_t) *validation_status;
+ STACK_OF(rsync_ctx_t) *rsync_queue;
+ STACK_OF(task_t) *task_queue;
+ int use_syslog, allow_stale_crl, allow_stale_manifest, use_links;
+ int require_crl_in_manifest, rsync_timeout, priority[LOG_LEVEL_T_MAX];
+ int allow_non_self_signed_trust_anchor, allow_object_not_in_manifest;
+ int max_parallel_fetches, max_retries, retry_wait_min, run_rsync;
+ log_level_t log_level;
+ X509_STORE *x509_store;
+};
+
+
+/**
* Subversion ID data.
*/
static const char svn_id[] = "$Id$";
@@ -406,6 +524,14 @@ static const unsigned char id_sha256[] =
*/
static const char rpki_policy_oid[] = "1.3.6.1.5.5.7.14.2";
+/**
+ * Suffix we use temporarily during the symlink shuffle. Could be
+ * almost anything, but we want to do the length check early, before
+ * we waste a lot of work we'll just have to throw away, so we just
+ * wire in something short and obvious.
+ */
+static const char authenticated_symlink_suffix[] = ".new";
+
/**
@@ -429,22 +555,11 @@ static void sk_OPENSSL_STRING_remove(STACK_OF(OPENSSL_STRING) *sk, const char *s
}
/**
- * Allocate a new HOST_MIB_COUNTER object.
+ * Allocate a new validation_status_t object.
*/
-static HOST_MIB_COUNTER *HOST_MIB_COUNTER_new(void)
+static validation_status_t *validation_status_t_new(void)
{
- HOST_MIB_COUNTER *h = malloc(sizeof(*h));
- if (h)
- memset(h, 0, sizeof(*h));
- return h;
-}
-
-/**
- * Allocate a new VALIDATION_STATUS object.
- */
-static VALIDATION_STATUS *VALIDATION_STATUS_new(void)
-{
- VALIDATION_STATUS *v = malloc(sizeof(*v));
+ validation_status_t *v = malloc(sizeof(*v));
if (v)
memset(v, 0, sizeof(*v));
return v;
@@ -453,235 +568,25 @@ static VALIDATION_STATUS *VALIDATION_STATUS_new(void)
/**
* Type-safe wrapper around free() to keep safestack macros happy.
*/
-static void HOST_MIB_COUNTER_free(HOST_MIB_COUNTER *h)
-{
- if (h)
- free(h);
-}
-
-/**
- * Type-safe wrapper around free() to keep safestack macros happy.
- */
-static void VALIDATION_STATUS_free(VALIDATION_STATUS *v)
+static void validation_status_t_free(validation_status_t *v)
{
if (v)
free(v);
}
-/*
- * Safestack macros for HOST_MIB_COUNTER.
- */
-
-#define sk_HOST_MIB_COUNTER_new(st) SKM_sk_new(HOST_MIB_COUNTER, (st))
-#define sk_HOST_MIB_COUNTER_new_null() SKM_sk_new_null(HOST_MIB_COUNTER)
-#define sk_HOST_MIB_COUNTER_free(st) SKM_sk_free(HOST_MIB_COUNTER, (st))
-#define sk_HOST_MIB_COUNTER_num(st) SKM_sk_num(HOST_MIB_COUNTER, (st))
-#define sk_HOST_MIB_COUNTER_value(st, i) SKM_sk_value(HOST_MIB_COUNTER, (st), (i))
-#define sk_HOST_MIB_COUNTER_set(st, i, val) SKM_sk_set(HOST_MIB_COUNTER, (st), (i), (val))
-#define sk_HOST_MIB_COUNTER_zero(st) SKM_sk_zero(HOST_MIB_COUNTER, (st))
-#define sk_HOST_MIB_COUNTER_push(st, val) SKM_sk_push(HOST_MIB_COUNTER, (st), (val))
-#define sk_HOST_MIB_COUNTER_unshift(st, val) SKM_sk_unshift(HOST_MIB_COUNTER, (st), (val))
-#define sk_HOST_MIB_COUNTER_find(st, val) SKM_sk_find(HOST_MIB_COUNTER, (st), (val))
-#define sk_HOST_MIB_COUNTER_find_ex(st, val) SKM_sk_find_ex(HOST_MIB_COUNTER, (st), (val))
-#define sk_HOST_MIB_COUNTER_delete(st, i) SKM_sk_delete(HOST_MIB_COUNTER, (st), (i))
-#define sk_HOST_MIB_COUNTER_delete_ptr(st, ptr) SKM_sk_delete_ptr(HOST_MIB_COUNTER, (st), (ptr))
-#define sk_HOST_MIB_COUNTER_insert(st, val, i) SKM_sk_insert(HOST_MIB_COUNTER, (st), (val), (i))
-#define sk_HOST_MIB_COUNTER_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(HOST_MIB_COUNTER, (st), (cmp))
-#define sk_HOST_MIB_COUNTER_dup(st) SKM_sk_dup(HOST_MIB_COUNTER, st)
-#define sk_HOST_MIB_COUNTER_pop_free(st, free_func) SKM_sk_pop_free(HOST_MIB_COUNTER, (st), (free_func))
-#define sk_HOST_MIB_COUNTER_shift(st) SKM_sk_shift(HOST_MIB_COUNTER, (st))
-#define sk_HOST_MIB_COUNTER_pop(st) SKM_sk_pop(HOST_MIB_COUNTER, (st))
-#define sk_HOST_MIB_COUNTER_sort(st) SKM_sk_sort(HOST_MIB_COUNTER, (st))
-#define sk_HOST_MIB_COUNTER_is_sorted(st) SKM_sk_is_sorted(HOST_MIB_COUNTER, (st))
-
-/*
- * Safestack macros for VALIDATION_STATUS.
- */
-
-#define sk_VALIDATION_STATUS_new(st) SKM_sk_new(VALIDATION_STATUS, (st))
-#define sk_VALIDATION_STATUS_new_null() SKM_sk_new_null(VALIDATION_STATUS)
-#define sk_VALIDATION_STATUS_free(st) SKM_sk_free(VALIDATION_STATUS, (st))
-#define sk_VALIDATION_STATUS_num(st) SKM_sk_num(VALIDATION_STATUS, (st))
-#define sk_VALIDATION_STATUS_value(st, i) SKM_sk_value(VALIDATION_STATUS, (st), (i))
-#define sk_VALIDATION_STATUS_set(st, i, val) SKM_sk_set(VALIDATION_STATUS, (st), (i), (val))
-#define sk_VALIDATION_STATUS_zero(st) SKM_sk_zero(VALIDATION_STATUS, (st))
-#define sk_VALIDATION_STATUS_push(st, val) SKM_sk_push(VALIDATION_STATUS, (st), (val))
-#define sk_VALIDATION_STATUS_unshift(st, val) SKM_sk_unshift(VALIDATION_STATUS, (st), (val))
-#define sk_VALIDATION_STATUS_find(st, val) SKM_sk_find(VALIDATION_STATUS, (st), (val))
-#define sk_VALIDATION_STATUS_find_ex(st, val) SKM_sk_find_ex(VALIDATION_STATUS, (st), (val))
-#define sk_VALIDATION_STATUS_delete(st, i) SKM_sk_delete(VALIDATION_STATUS, (st), (i))
-#define sk_VALIDATION_STATUS_delete_ptr(st, ptr) SKM_sk_delete_ptr(VALIDATION_STATUS, (st), (ptr))
-#define sk_VALIDATION_STATUS_insert(st, val, i) SKM_sk_insert(VALIDATION_STATUS, (st), (val), (i))
-#define sk_VALIDATION_STATUS_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(VALIDATION_STATUS, (st), (cmp))
-#define sk_VALIDATION_STATUS_dup(st) SKM_sk_dup(VALIDATION_STATUS, st)
-#define sk_VALIDATION_STATUS_pop_free(st, free_func) SKM_sk_pop_free(VALIDATION_STATUS, (st), (free_func))
-#define sk_VALIDATION_STATUS_shift(st) SKM_sk_shift(VALIDATION_STATUS, (st))
-#define sk_VALIDATION_STATUS_pop(st) SKM_sk_pop(VALIDATION_STATUS, (st))
-#define sk_VALIDATION_STATUS_sort(st) SKM_sk_sort(VALIDATION_STATUS, (st))
-#define sk_VALIDATION_STATUS_is_sorted(st) SKM_sk_is_sorted(VALIDATION_STATUS, (st))
-
/*
- * ASN.1 templates. Not sure that ASN1_EXP_OPT() is the right macro
- * for these defaulted "version" fields, but it's what the examples
- * for this construction use. Probably doesn't matter since this
- * program only decodes manifests, never encodes them.
- *
- * Putting this section under conditional compilation is a hack to
- * keep Doxygen's parser from becoming hopelessly confused by the
- * weird OpenSSL ASN.1 macros. Someday perhaps I'll have time to
- * track down the problem in Doxygen's parser, but this works for now.
- */
-
-#ifndef DOXYGEN_GETS_HOPELESSLY_CONFUSED_BY_THIS_SECTION
-
-typedef struct FileAndHash_st {
- ASN1_IA5STRING *file;
- ASN1_BIT_STRING *hash;
-} FileAndHash;
-
-DECLARE_STACK_OF(FileAndHash)
-
-ASN1_SEQUENCE(FileAndHash) = {
- ASN1_SIMPLE(FileAndHash, file, ASN1_IA5STRING),
- ASN1_SIMPLE(FileAndHash, hash, ASN1_BIT_STRING)
-} ASN1_SEQUENCE_END(FileAndHash)
-
-typedef struct Manifest_st {
- ASN1_INTEGER *version, *manifestNumber;
- ASN1_GENERALIZEDTIME *thisUpdate, *nextUpdate;
- ASN1_OBJECT *fileHashAlg;
- STACK_OF(FileAndHash) *fileList;
-} Manifest;
-
-ASN1_SEQUENCE(Manifest) = {
- ASN1_EXP_OPT(Manifest, version, ASN1_INTEGER, 0),
- ASN1_SIMPLE(Manifest, manifestNumber, ASN1_INTEGER),
- ASN1_SIMPLE(Manifest, thisUpdate, ASN1_GENERALIZEDTIME),
- ASN1_SIMPLE(Manifest, nextUpdate, ASN1_GENERALIZEDTIME),
- ASN1_SIMPLE(Manifest, fileHashAlg, ASN1_OBJECT),
- ASN1_SEQUENCE_OF(Manifest, fileList, FileAndHash)
-} ASN1_SEQUENCE_END(Manifest)
-
-DECLARE_ASN1_FUNCTIONS(FileAndHash)
-DECLARE_ASN1_FUNCTIONS(Manifest)
-
-IMPLEMENT_ASN1_FUNCTIONS(FileAndHash)
-IMPLEMENT_ASN1_FUNCTIONS(Manifest)
-
-#define sk_FileAndHash_new(st) SKM_sk_new(FileAndHash, (st))
-#define sk_FileAndHash_new_null() SKM_sk_new_null(FileAndHash)
-#define sk_FileAndHash_free(st) SKM_sk_free(FileAndHash, (st))
-#define sk_FileAndHash_num(st) SKM_sk_num(FileAndHash, (st))
-#define sk_FileAndHash_value(st, i) SKM_sk_value(FileAndHash, (st), (i))
-#define sk_FileAndHash_set(st, i, val) SKM_sk_set(FileAndHash, (st), (i), (val))
-#define sk_FileAndHash_zero(st) SKM_sk_zero(FileAndHash, (st))
-#define sk_FileAndHash_push(st, val) SKM_sk_push(FileAndHash, (st), (val))
-#define sk_FileAndHash_unshift(st, val) SKM_sk_unshift(FileAndHash, (st), (val))
-#define sk_FileAndHash_find(st, val) SKM_sk_find(FileAndHash, (st), (val))
-#define sk_FileAndHash_find_ex(st, val) SKM_sk_find_ex(FileAndHash, (st), (val))
-#define sk_FileAndHash_delete(st, i) SKM_sk_delete(FileAndHash, (st), (i))
-#define sk_FileAndHash_delete_ptr(st, ptr) SKM_sk_delete_ptr(FileAndHash, (st), (ptr))
-#define sk_FileAndHash_insert(st, val, i) SKM_sk_insert(FileAndHash, (st), (val), (i))
-#define sk_FileAndHash_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(FileAndHash, (st), (cmp))
-#define sk_FileAndHash_dup(st) SKM_sk_dup(FileAndHash, st)
-#define sk_FileAndHash_pop_free(st, free_func) SKM_sk_pop_free(FileAndHash, (st), (free_func))
-#define sk_FileAndHash_shift(st) SKM_sk_shift(FileAndHash, (st))
-#define sk_FileAndHash_pop(st) SKM_sk_pop(FileAndHash, (st))
-#define sk_FileAndHash_sort(st) SKM_sk_sort(FileAndHash, (st))
-#define sk_FileAndHash_is_sorted(st) SKM_sk_is_sorted(FileAndHash, (st))
-
-typedef struct ROAIPAddress_st {
- ASN1_BIT_STRING *IPAddress;
- ASN1_INTEGER *maxLength;
-} ROAIPAddress;
-
-DECLARE_STACK_OF(ROAIPAddress)
-
-ASN1_SEQUENCE(ROAIPAddress) = {
- ASN1_SIMPLE(ROAIPAddress, IPAddress, ASN1_BIT_STRING),
- ASN1_OPT(ROAIPAddress, maxLength, ASN1_INTEGER)
-} ASN1_SEQUENCE_END(ROAIPAddress)
-
-typedef struct ROAIPAddressFamily_st {
- ASN1_OCTET_STRING *addressFamily;
- STACK_OF(ROAIPAddress) *addresses;
-} ROAIPAddressFamily;
-
-DECLARE_STACK_OF(ROAIPAddressFamily)
-
-ASN1_SEQUENCE(ROAIPAddressFamily) = {
- ASN1_SIMPLE(ROAIPAddressFamily, addressFamily, ASN1_OCTET_STRING),
- ASN1_SEQUENCE_OF(ROAIPAddressFamily, addresses, ROAIPAddress)
-} ASN1_SEQUENCE_END(ROAIPAddressFamily)
-
-typedef struct ROA_st {
- ASN1_INTEGER *version, *asID;
- STACK_OF(ROAIPAddressFamily) *ipAddrBlocks;
-} ROA;
-
-ASN1_SEQUENCE(ROA) = {
- ASN1_EXP_OPT(ROA, version, ASN1_INTEGER, 0),
- ASN1_SIMPLE(ROA, asID, ASN1_INTEGER),
- ASN1_SEQUENCE_OF(ROA, ipAddrBlocks, ROAIPAddressFamily)
-} ASN1_SEQUENCE_END(ROA)
-
-DECLARE_ASN1_FUNCTIONS(ROAIPAddress)
-DECLARE_ASN1_FUNCTIONS(ROAIPAddressFamily)
-DECLARE_ASN1_FUNCTIONS(ROA)
-
-IMPLEMENT_ASN1_FUNCTIONS(ROAIPAddress)
-IMPLEMENT_ASN1_FUNCTIONS(ROAIPAddressFamily)
-IMPLEMENT_ASN1_FUNCTIONS(ROA)
-
-#define sk_ROAIPAddress_new(st) SKM_sk_new(ROAIPAddress, (st))
-#define sk_ROAIPAddress_new_null() SKM_sk_new_null(ROAIPAddress)
-#define sk_ROAIPAddress_free(st) SKM_sk_free(ROAIPAddress, (st))
-#define sk_ROAIPAddress_num(st) SKM_sk_num(ROAIPAddress, (st))
-#define sk_ROAIPAddress_value(st, i) SKM_sk_value(ROAIPAddress, (st), (i))
-#define sk_ROAIPAddress_set(st, i, val) SKM_sk_set(ROAIPAddress, (st), (i), (val))
-#define sk_ROAIPAddress_zero(st) SKM_sk_zero(ROAIPAddress, (st))
-#define sk_ROAIPAddress_push(st, val) SKM_sk_push(ROAIPAddress, (st), (val))
-#define sk_ROAIPAddress_unshift(st, val) SKM_sk_unshift(ROAIPAddress, (st), (val))
-#define sk_ROAIPAddress_find(st, val) SKM_sk_find(ROAIPAddress, (st), (val))
-#define sk_ROAIPAddress_find_ex(st, val) SKM_sk_find_ex(ROAIPAddress, (st), (val))
-#define sk_ROAIPAddress_delete(st, i) SKM_sk_delete(ROAIPAddress, (st), (i))
-#define sk_ROAIPAddress_delete_ptr(st, ptr) SKM_sk_delete_ptr(ROAIPAddress, (st), (ptr))
-#define sk_ROAIPAddress_insert(st, val, i) SKM_sk_insert(ROAIPAddress, (st), (val), (i))
-#define sk_ROAIPAddress_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(ROAIPAddress, (st), (cmp))
-#define sk_ROAIPAddress_dup(st) SKM_sk_dup(ROAIPAddress, st)
-#define sk_ROAIPAddress_pop_free(st, free_func) SKM_sk_pop_free(ROAIPAddress, (st), (free_func))
-#define sk_ROAIPAddress_shift(st) SKM_sk_shift(ROAIPAddress, (st))
-#define sk_ROAIPAddress_pop(st) SKM_sk_pop(ROAIPAddress, (st))
-#define sk_ROAIPAddress_sort(st) SKM_sk_sort(ROAIPAddress, (st))
-#define sk_ROAIPAddress_is_sorted(st) SKM_sk_is_sorted(ROAIPAddress, (st))
-
-#define sk_ROAIPAddressFamily_new(st) SKM_sk_new(ROAIPAddressFamily, (st))
-#define sk_ROAIPAddressFamily_new_null() SKM_sk_new_null(ROAIPAddressFamily)
-#define sk_ROAIPAddressFamily_free(st) SKM_sk_free(ROAIPAddressFamily, (st))
-#define sk_ROAIPAddressFamily_num(st) SKM_sk_num(ROAIPAddressFamily, (st))
-#define sk_ROAIPAddressFamily_value(st, i) SKM_sk_value(ROAIPAddressFamily, (st), (i))
-#define sk_ROAIPAddressFamily_set(st, i, val) SKM_sk_set(ROAIPAddressFamily, (st), (i), (val))
-#define sk_ROAIPAddressFamily_zero(st) SKM_sk_zero(ROAIPAddressFamily, (st))
-#define sk_ROAIPAddressFamily_push(st, val) SKM_sk_push(ROAIPAddressFamily, (st), (val))
-#define sk_ROAIPAddressFamily_unshift(st, val) SKM_sk_unshift(ROAIPAddressFamily, (st), (val))
-#define sk_ROAIPAddressFamily_find(st, val) SKM_sk_find(ROAIPAddressFamily, (st), (val))
-#define sk_ROAIPAddressFamily_find_ex(st, val) SKM_sk_find_ex(ROAIPAddressFamily, (st), (val))
-#define sk_ROAIPAddressFamily_delete(st, i) SKM_sk_delete(ROAIPAddressFamily, (st), (i))
-#define sk_ROAIPAddressFamily_delete_ptr(st, ptr) SKM_sk_delete_ptr(ROAIPAddressFamily, (st), (ptr))
-#define sk_ROAIPAddressFamily_insert(st, val, i) SKM_sk_insert(ROAIPAddressFamily, (st), (val), (i))
-#define sk_ROAIPAddressFamily_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(ROAIPAddressFamily, (st), (cmp))
-#define sk_ROAIPAddressFamily_dup(st) SKM_sk_dup(ROAIPAddressFamily, st)
-#define sk_ROAIPAddressFamily_pop_free(st, free_func) SKM_sk_pop_free(ROAIPAddressFamily, (st), (free_func))
-#define sk_ROAIPAddressFamily_shift(st) SKM_sk_shift(ROAIPAddressFamily, (st))
-#define sk_ROAIPAddressFamily_pop(st) SKM_sk_pop(ROAIPAddressFamily, (st))
-#define sk_ROAIPAddressFamily_sort(st) SKM_sk_sort(ROAIPAddressFamily, (st))
-#define sk_ROAIPAddressFamily_is_sorted(st) SKM_sk_is_sorted(ROAIPAddressFamily, (st))
-
-#endif /* DOXYGEN_GETS_HOPELESSLY_CONFUSED_BY_THIS_SECTION */
+ * GCC attributes to help catch format string errors.
+ */
-
+#ifdef __GNUC__
+
+static void logmsg(const rcynic_ctx_t *rc,
+ const log_level_t level,
+ const char *fmt, ...)
+ __attribute__ ((format (printf, 3, 4)));
+#endif
/**
* Logging.
@@ -707,8 +612,6 @@ static void vlogmsg(const rcynic_ctx_t *rc,
fprintf(stderr, "%s: ", tad);
if (rc->jane)
fprintf(stderr, "%s: ", rc->jane);
- if (rc->indent)
- fprintf(stderr, "%*s", rc->indent, " ");
vfprintf(stderr, fmt, ap);
putc('\n', stderr);
}
@@ -743,9 +646,9 @@ static void log_openssl_errors(const rcynic_ctx_t *rc)
while ((code = ERR_get_error_line_data(&file, &line, &data, &flags))) {
ERR_error_string_n(code, error, sizeof(error));
if (data && (flags & ERR_TXT_STRING))
- logmsg(rc, log_sys_err, "OpenSSL error %s:%d: %s", file, line, error, data);
+ logmsg(rc, log_sys_err, "OpenSSL error %s:%d: %s: %s", file, line, error, data);
else
- logmsg(rc, log_sys_err, "OpenSSL error %s:%d", file, line, error);
+ logmsg(rc, log_sys_err, "OpenSSL error %s:%d: %s", file, line, error);
}
}
@@ -841,28 +744,29 @@ static int configure_integer(const rcynic_ctx_t *rc,
/**
* Make a directory if it doesn't already exist.
*/
-static int mkdir_maybe(const rcynic_ctx_t *rc, const char *name)
+static int mkdir_maybe(const rcynic_ctx_t *rc, const path_t *name)
{
- char *b, buffer[FILENAME_MAX];
+ path_t path;
+ char *s;
assert(name != NULL);
- if (strlen(name) >= sizeof(buffer)) {
- logmsg(rc, log_data_err, "Pathname %s too long", name);
+ if (strlen(name->s) >= sizeof(path.s)) {
+ logmsg(rc, log_data_err, "Pathname %s too long", name->s);
return 0;
}
- strcpy(buffer, name);
- b = buffer[0] == '/' ? buffer + 1 : buffer;
- if ((b = strrchr(b, '/')) == NULL)
+ strcpy(path.s, name->s);
+ s = path.s[0] == '/' ? path.s + 1 : path.s;
+ if ((s = strrchr(s, '/')) == NULL)
return 1;
- *b = '\0';
- if (!mkdir_maybe(rc, buffer)) {
- logmsg(rc, log_sys_err, "Failed to make directory %s", buffer);
+ *s = '\0';
+ if (!mkdir_maybe(rc, &path)) {
+ logmsg(rc, log_sys_err, "Failed to make directory %s", path.s);
return 0;
}
- if (!access(buffer, F_OK))
+ if (!access(path.s, F_OK))
return 1;
- logmsg(rc, log_verbose, "Creating directory %s", buffer);
- return mkdir(buffer, 0777) == 0;
+ logmsg(rc, log_verbose, "Creating directory %s", path.s);
+ return mkdir(path.s, 0777) == 0;
}
/**
@@ -903,49 +807,65 @@ static int is_rsync(const char *uri)
* the log, not the MIB.
*/
static int uri_to_filename(const rcynic_ctx_t *rc,
- const char *uri,
- char *buffer,
- const size_t buflen,
- const char *prefix)
+ const uri_t *uri,
+ path_t *path,
+ const path_t *prefix)
{
const char *u;
size_t n;
- buffer[0] = '\0';
+ path->s[0] = '\0';
- if (!is_rsync(uri)) {
- logmsg(rc, log_telemetry, "%s is not an rsync URI, not converting to filename", uri);
+ if (!is_rsync(uri->s)) {
+ logmsg(rc, log_telemetry, "%s is not an rsync URI, not converting to filename", uri->s);
return 0;
}
- u = uri + SIZEOF_RSYNC;
+ u = uri->s + SIZEOF_RSYNC;
n = strlen(u);
if (u[0] == '/' || u[0] == '.' || strstr(u, "/../") ||
(n >= 3 && !strcmp(u + n - 3, "/.."))) {
- logmsg(rc, log_data_err, "Dangerous URI %s, not converting to filename", uri);
+ logmsg(rc, log_data_err, "Dangerous URI %s, not converting to filename", uri->s);
return 0;
}
if (prefix)
- n += strlen(prefix);
+ n += strlen(prefix->s);
- if (n >= buflen) {
- logmsg(rc, log_data_err, "URI %s too long, not converting to filename", uri);
+ if (n >= sizeof(path->s)) {
+ logmsg(rc, log_data_err, "URI %s too long, not converting to filename", uri->s);
return 0;
}
if (prefix) {
- strcpy(buffer, prefix);
- strcat(buffer, u);
+ strcpy(path->s, prefix->s);
+ strcat(path->s, u);
} else {
- strcpy(buffer, u);
+ strcpy(path->s, u);
}
return 1;
}
/**
+ * Extract a hostname from a URI.
+ */
+static int uri_to_hostname(const uri_t *uri,
+ hostname_t *hostname)
+{
+ size_t n;
+
+ if (!uri || !hostname || !is_rsync(uri->s) ||
+ (n = strcspn(uri->s + SIZEOF_RSYNC, "/")) >= sizeof(hostname->s))
+ return 0;
+
+ strncpy(hostname->s, uri->s + SIZEOF_RSYNC, n);
+ hostname->s[n] = '\0';
+ return 1;
+}
+
+/**
* OID comparison.
*/
static int oid_cmp(const ASN1_OBJECT *obj, const unsigned char *oid, const size_t oidlen)
@@ -958,198 +878,176 @@ static int oid_cmp(const ASN1_OBJECT *obj, const unsigned char *oid, const size_
}
/**
- * Host MIB counter comparision.
+ * Get value of code in a validation_status_t.
*/
-static int host_mib_counter_cmp(const HOST_MIB_COUNTER * const *a, const HOST_MIB_COUNTER * const *b)
+static int validation_status_get_code(const validation_status_t *v,
+ const mib_counter_t code)
{
- return strcasecmp((*a)->hostname, (*b)->hostname);
+ assert(v && code < MIB_COUNTER_T_MAX);
+ return (v->events[code / 8] & (1 << (code % 8))) != 0;
}
/**
- * MIB counter manipulation.
+ * Set value of code in a validation_status_t.
*/
-static void mib_increment(const rcynic_ctx_t *rc,
- const char *uri,
- const mib_counter_t counter)
+static void validation_status_set_code(validation_status_t *v,
+ const mib_counter_t code,
+ int value)
{
- HOST_MIB_COUNTER *h = NULL, hn;
- char *s;
-
- assert(rc && uri && strlen(uri) < URI_MAX);
-
- if (!rc->host_counters)
- return;
-
- memset(&hn, 0, sizeof(hn));
-
- if (!uri_to_filename(rc, uri, hn.hostname, sizeof(hn.hostname), NULL)) {
- logmsg(rc, log_data_err, "Couldn't convert URI %s to hostname", uri);
- return;
- }
-
- if ((s = strchr(hn.hostname, '/')) != NULL)
- *s = '\0';
-
- h = sk_HOST_MIB_COUNTER_value(rc->host_counters,
- sk_HOST_MIB_COUNTER_find(rc->host_counters,
- &hn));
- if (!h) {
- if ((h = HOST_MIB_COUNTER_new()) == NULL) {
- logmsg(rc, log_sys_err, "Couldn't allocate MIB counters for %s", uri);
- return;
- }
- strcpy(h->hostname, hn.hostname);
- if (!sk_HOST_MIB_COUNTER_push(rc->host_counters, h)) {
- logmsg(rc, log_sys_err, "Couldn't store MIB counters for %s", uri);
- free(h);
- return;
- }
- }
-
- h->counters[counter]++;
+ assert(v && code < MIB_COUNTER_T_MAX);
+ if (value)
+ v->events[code / 8] |= (1 << (code % 8));
+ else
+ v->events[code / 8] &= ~(1 << (code % 8));
}
/**
* Add a validation status entry to internal log.
*/
static void log_validation_status(const rcynic_ctx_t *rc,
- const char *uri,
- const mib_counter_t code)
+ const uri_t *uri,
+ const mib_counter_t code,
+ const object_generation_t generation)
{
- VALIDATION_STATUS *v = NULL;
+ validation_status_t v_, *v = NULL;
+ int was_set;
- assert(rc && uri && strlen(uri) < URI_MAX);
+ assert(rc && uri && code < MIB_COUNTER_T_MAX && generation < OBJECT_GENERATION_MAX);
if (!rc->validation_status)
return;
- if ((v = VALIDATION_STATUS_new()) == NULL) {
- logmsg(rc, log_sys_err, "Couldn't allocate validation status entry for %s", uri);
- goto punt;
- }
+ memset(&v_, 0, sizeof(v_));
+ v_.uri = *uri;
+ v_.generation = generation;
- strcpy(v->uri, uri);
- v->timestamp = time(0);
- v->code = code;
-
- if (!sk_VALIDATION_STATUS_push(rc->validation_status, v)) {
- logmsg(rc, log_sys_err, "Couldn't store validation status entry for %s", uri);
- goto punt;
+ v = sk_validation_status_t_value(rc->validation_status, sk_validation_status_t_find(rc->validation_status, &v_));
+ if (v == NULL) {
+ if ((v = validation_status_t_new()) == NULL) {
+ logmsg(rc, log_sys_err, "Couldn't allocate validation status entry for %s", uri->s);
+ return;
+ }
+ *v = v_;
+ if (!sk_validation_status_t_push(rc->validation_status, v)) {
+ logmsg(rc, log_sys_err, "Couldn't store validation status entry for %s", uri->s);
+ free(v);
+ return;
+ }
}
- v = NULL;
+ was_set = validation_status_get_code(v, code);
- punt:
- if (v)
- free(v);
+ v->timestamp = time(0);
+ validation_status_set_code(v, code, 1);
+
+ if (!was_set)
+ logmsg(rc, log_verbose, "Recording \"%s\" for %s%s%s",
+ (mib_counter_desc[code]
+ ? mib_counter_desc[code]
+ : X509_verify_cert_error_string(mib_counter_openssl[code])),
+ (generation != object_generation_null ? object_generation_label[generation] : ""),
+ (generation != object_generation_null ? " " : ""),
+ uri->s);
}
/**
- * Reject an object.
+ * Validation status object comparision.
*/
-static void reject(const rcynic_ctx_t *rc,
- const char *uri,
- const mib_counter_t code,
- const char *fmt, ...)
+static int validation_status_cmp(const validation_status_t * const *a, const validation_status_t * const *b)
{
- char format[URI_MAX * 2];
- va_list ap;
-
- assert(fmt && strlen(fmt) + sizeof("Rejected %s") < sizeof(format));
- snprintf(format, sizeof(format), "Rejected %s %s", uri, fmt);
- log_validation_status(rc, uri, code);
- va_start(ap, fmt);
- vlogmsg(rc, log_data_err, format, ap);
- va_end(ap);
+ int cmp = strcmp((*a)->uri.s, (*b)->uri.s);
+ if (cmp)
+ return cmp;
+ cmp = (int) ((*a)->generation) - (int) ((*b)->generation);
+ if (cmp)
+ return cmp;
+ return 0;
}
/**
- * Copy a file
+ * Copy or link a file, as the case may be.
*/
-static int cp(const rcynic_ctx_t *rc, const char *source, const char *target)
+static int cp_ln(const rcynic_ctx_t *rc, const path_t *source, const path_t *target)
{
struct stat statbuf;
struct utimbuf utimebuf;
FILE *in = NULL, *out = NULL;
- int c, ret = 0;
+ int c, ok = 0;
+
+ if (rc->use_links) {
+ (void) unlink(target->s);
+ ok = link(source->s, target->s) == 0;
+ if (!ok)
+ logmsg(rc, log_sys_err, "Couldn't link %s to %s: %s",
+ source->s, target->s, strerror(errno));
+ return ok;
+ }
- if ((in = fopen(source, "rb")) == NULL ||
- (out = fopen(target, "wb")) == NULL)
+ if ((in = fopen(source->s, "rb")) == NULL ||
+ (out = fopen(target->s, "wb")) == NULL)
goto done;
while ((c = getc(in)) != EOF)
if (putc(c, out) == EOF)
goto done;
- ret = 1;
+ ok = 1;
done:
- ret &= !(in != NULL && fclose(in) == EOF);
- ret &= !(out != NULL && fclose(out) == EOF);
+ ok &= !(in != NULL && fclose(in) == EOF);
+ ok &= !(out != NULL && fclose(out) == EOF);
+
+ if (!ok) {
+ logmsg(rc, log_sys_err, "Couldn't copy %s to %s: %s",
+ source->s, target->s, strerror(errno));
+ return ok;
+ }
/*
* Perserve the file modification time to allow for detection of
* changed objects in the authenticated directory. Failure to reset
* the times is not optimal, but is also not critical, thus no
- * failure return. Errors are reported with log_sys_err because
- * there is no log type for warnings.
+ * failure return.
*/
- if (ret && (stat(source, &statbuf) < 0 ||
- (utimebuf.actime = statbuf.st_atime,
- utimebuf.modtime = statbuf.st_mtime,
- utime(target, &utimebuf) < 0)))
+ if (stat(source->s, &statbuf) < 0 ||
+ (utimebuf.actime = statbuf.st_atime,
+ utimebuf.modtime = statbuf.st_mtime,
+ utime(target->s, &utimebuf) < 0))
logmsg(rc, log_sys_err, "Couldn't copy inode timestamp from %s to %s: %s",
- source, target, strerror(errno));
+ source->s, target->s, strerror(errno));
- return ret;
-}
-
-/**
- * Link a file
- */
-static int ln(const char *source, const char *target)
-{
- unlink(target);
- return link(source, target) == 0;
+ return ok;
}
/**
- * Install an object. It'd be nice if we could just use link(), but
- * that would require us to trust rsync never to do anything bad. For
- * now we just copy in the simplest way possible. Come back to this
- * if profiling shows a hotspot here.
- *
- * Well, ok, profiling didn't show an issue, but inode exhaustion did.
- * So we now make copy vs link a configuration choice.
+ * Install an object.
*/
static int install_object(const rcynic_ctx_t *rc,
- const char *uri,
- const char *source)
+ const uri_t *uri,
+ const path_t *source,
+ const mib_counter_t code,
+ const object_generation_t generation)
{
- char target[FILENAME_MAX];
+ path_t target;
- if (!uri_to_filename(rc, uri, target, sizeof(target), rc->authenticated)) {
- logmsg(rc, log_data_err, "Couldn't generate installation name for %s", uri);
+ if (!uri_to_filename(rc, uri, &target, &rc->new_authenticated)) {
+ logmsg(rc, log_data_err, "Couldn't generate installation name for %s", uri->s);
return 0;
}
- if (!mkdir_maybe(rc, target)) {
- logmsg(rc, log_sys_err, "Couldn't create directory for %s", target);
+ if (!mkdir_maybe(rc, &target)) {
+ logmsg(rc, log_sys_err, "Couldn't create directory for %s", target.s);
return 0;
}
- if (rc->use_links ? !ln(source, target) : !cp(rc, source, target)) {
- logmsg(rc, log_sys_err, "Couldn't %s %s to %s",
- (rc->use_links ? "link" : "copy"), source, target);
+ if (!cp_ln(rc, source, &target))
return 0;
- }
- log_validation_status(rc, uri, validation_ok);
- logmsg(rc, log_telemetry, "Accepted %s", uri);
+ log_validation_status(rc, uri, code, generation);
return 1;
}
/**
- * Check str for a trailing suffix.
+ * Check str for a suffix.
*/
static int endswith(const char *str, const char *suffix)
{
@@ -1174,46 +1072,57 @@ static int startswith(const char *str, const char *prefix)
/**
- * Set a directory name, making sure it has the trailing slash we
- * require in various other routines.
+ * Set a directory name, adding or stripping trailing slash as needed.
*/
-static void set_directory(char **out, const char *in)
+static int set_directory(const rcynic_ctx_t *rc, path_t *out, const char *in, const int want_slash)
{
- int need_slash;
+ int has_slash, need_slash;
size_t n;
- char *s;
- assert(in && out);
+ assert(rc && in && out);
+
n = strlen(in);
- assert(n > 0);
- need_slash = in[n - 1] != '/';
- s = malloc(n + need_slash + 1);
- assert(s != NULL);
- strcpy(s, in);
+
+ if (n == 0) {
+ logmsg(rc, log_usage_err, "Empty path");
+ return 0;
+ }
+
+ has_slash = in[n - 1] == '/';
+
+ need_slash = want_slash && !has_slash;
+
+ if (n + need_slash + 1 > sizeof(out->s)) {
+ logmsg(rc, log_usage_err, "Path \"%s\" too long", in);
+ return 0;
+ }
+
+ strcpy(out->s, in);
if (need_slash)
- strcat(s, "/");
- if (*out)
- free(*out);
- *out = s;
+ strcat(out->s, "/");
+ else if (has_slash && !want_slash)
+ out->s[n - 1] = '\0';
+
+ return 1;
}
/**
* Remove a directory tree, like rm -rf.
*/
-static int rm_rf(const char *name)
+static int rm_rf(const path_t *name)
{
- char path[FILENAME_MAX];
+ path_t path;
struct dirent *d;
size_t len;
DIR *dir;
int ret = 0, need_slash;
assert(name);
- len = strlen(name);
- assert(len > 0 && len < sizeof(path));
- need_slash = name[len - 1] != '/';
+ len = strlen(name->s);
+ assert(len > 0 && len < sizeof(path.s));
+ need_slash = name->s[len - 1] != '/';
- if (rmdir(name) == 0)
+ if (rmdir(name->s) == 0)
return 1;
switch (errno) {
@@ -1225,40 +1134,581 @@ static int rm_rf(const char *name)
return 0;
}
- if ((dir = opendir(name)) == NULL)
+ if ((dir = opendir(name->s)) == NULL)
return 0;
while ((d = readdir(dir)) != NULL) {
if (d->d_name[0] == '.' && (d->d_name[1] == '\0' || (d->d_name[1] == '.' && d->d_name[2] == '\0')))
continue;
- if (len + strlen(d->d_name) + need_slash >= sizeof(path))
+ if (len + strlen(d->d_name) + need_slash >= sizeof(path.s))
goto done;
- strcpy(path, name);
+ strcpy(path.s, name->s);
if (need_slash)
- strcat(path, "/");
- strcat(path, d->d_name);
+ strcat(path.s, "/");
+ strcat(path.s, d->d_name);
switch (d->d_type) {
case DT_DIR:
- if (!rm_rf(path))
+ if (!rm_rf(&path))
goto done;
continue;
default:
- if (unlink(path) < 0)
+ if (unlink(path.s) < 0)
goto done;
continue;
}
}
- ret = rmdir(name) == 0;
+ ret = rmdir(name->s) == 0;
done:
closedir(dir);
return ret;
}
+/**
+ * Construct names for the directories not directly settable by the
+ * user.
+ *
+ * This function also checks for an old-style rc->authenticated
+ * directory, to simplify upgrade from older versions of rcynic.
+ */
+static int construct_directory_names(rcynic_ctx_t *rc)
+{
+ struct stat st;
+ ssize_t n;
+ path_t p;
+ time_t t = time(0);
+
+ p = rc->authenticated;
+
+ n = strlen(p.s);
+
+ if (n + sizeof(authenticated_symlink_suffix) >= sizeof(p.s)) {
+ logmsg(rc, log_usage_err, "Symlink name would be too long");
+ return 0;
+ }
+
+ if (strftime(p.s + n, sizeof(p.s) - n - 1, ".%Y-%m-%dT%H:%M:%SZ", gmtime(&t)) == 0) {
+ logmsg(rc, log_usage_err, "Generated path with timestamp would be too long");
+ return 0;
+ }
+
+ if (!set_directory(rc, &rc->new_authenticated, p.s, 1))
+ return 0;
+
+ if (!set_directory(rc, &rc->old_authenticated, rc->authenticated.s, 1))
+ return 0;
+
+ if (lstat(rc->authenticated.s, &st) == 0 && (st.st_mode & S_IFDIR) != 0 &&
+ strlen(rc->authenticated.s) + sizeof(".old") < sizeof(p.s)) {
+ p = rc->authenticated;
+ strcat(p.s, ".old");
+ rm_rf(&p);
+ (void) rename(rc->authenticated.s, p.s);
+ }
+
+ if (lstat(rc->authenticated.s, &st) == 0 && (st.st_mode & S_IFDIR) != 0) {
+ logmsg(rc, log_usage_err,
+ "Existing %s directory is in the way, please remove it",
+ rc->authenticated.s);
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * Do final symlink shuffle and cleanup of output directories.
+ */
+static int finalize_directories(const rcynic_ctx_t *rc)
+{
+ path_t path, sym, real_old, real_new;
+ const char *dir;
+ size_t n;
+ glob_t g;
+ int i;
+
+ if (!realpath(rc->old_authenticated.s, real_old.s))
+ real_old.s[0] = '\0';
+
+ if (!realpath(rc->new_authenticated.s, real_new.s))
+ real_old.s[0] = '\0';
+
+ path = rc->new_authenticated;
+
+ n = strlen(path.s);
+ assert(n > 1 && path.s[n - 1] == '/');
+ path.s[n - 1] = '\0';
+
+ if ((dir = strrchr(path.s, '/')) == NULL)
+ dir = path.s;
+ else
+ dir++;
+
+ sym = rc->authenticated;
+
+ assert(strlen(sym.s) + sizeof(authenticated_symlink_suffix) < sizeof(sym.s));
+ strcat(sym.s, authenticated_symlink_suffix);
+
+ (void) unlink(sym.s);
+
+ if (symlink(dir, sym.s) < 0) {
+ logmsg(rc, log_sys_err, "Couldn't link %s to %s: %s",
+ sym.s, dir, strerror(errno));
+ return 0;
+ }
+
+ if (rename(sym.s, rc->authenticated.s) < 0) {
+ logmsg(rc, log_sys_err, "Couldn't rename %s to %s: %s",
+ sym.s, rc->authenticated.s, strerror(errno));
+ return 0;
+ }
+
+ path = rc->authenticated;
+ assert(strlen(path.s) + sizeof(".*") < sizeof(path.s));
+ strcat(path.s, ".*");
+
+ memset(&g, 0, sizeof(g));
+
+ if (real_new.s[0] && glob(path.s, 0, 0, &g) == 0)
+ for (i = 0; i < g.gl_pathc; i++)
+ if (realpath(g.gl_pathv[i], path.s) &&
+ strcmp(path.s, real_old.s) &&
+ strcmp(path.s, real_new.s))
+ rm_rf(&path);
+
+ return 1;
+}
+
+
+
+/**
+ * Check to see whether a hostname is in the dead host cache.
+ */
+static int dead_host_check(const rcynic_ctx_t *rc, const uri_t *uri)
+{
+ hostname_t hostname;
+
+ assert(rc && uri && rc->dead_host_cache);
+
+ return (uri_to_hostname(uri, &hostname) &&
+ sk_OPENSSL_STRING_find(rc->dead_host_cache, hostname.s) >= 0);
+}
+
+
+/**
+ * Add an entry to the dead host cache.
+ */
+static void dead_host_add(const rcynic_ctx_t *rc, const uri_t *uri)
+{
+ hostname_t hostname;
+
+ assert(rc && uri && rc->dead_host_cache);
+
+ if (dead_host_check(rc, uri))
+ return;
+
+ if (!uri_to_hostname(uri, &hostname))
+ return;
+
+ (void) sk_OPENSSL_STRING_push_strdup(rc->dead_host_cache, hostname.s);
+}
+
+
+
+/**
+ * Test whether a pair of URIs "conflict", that is, whether attempting
+ * to rsync both of them at the same time in parallel might cause
+ * unpredictable behavior. Might need a better name for this test.
+ *
+ * Returns non-zero iff the two URIs "conflict".
+ */
+static int conflicting_uris(const uri_t *a, const uri_t *b)
+{
+ size_t len_a, len_b;
+
+ assert(a && is_rsync(a->s) && b && is_rsync(b->s));
+
+ len_a = strlen(a->s);
+ len_b = strlen(b->s);
+
+ assert(len_a < sizeof(a->s) && len_b < sizeof(b->s));
+
+ return !strncmp(a->s, b->s, len_a < len_b ? len_a : len_b);
+}
+
+
+
+/**
+ * Read non-directory filenames from a directory, so we can check to
+ * see what's missing from a manifest.
+ */
+static STACK_OF(OPENSSL_STRING) *directory_filenames(const rcynic_ctx_t *rc,
+ const walk_state_t state,
+ const uri_t *uri)
+{
+ STACK_OF(OPENSSL_STRING) *result = NULL;
+ path_t path;
+ const path_t *prefix = NULL;
+ DIR *dir = NULL;
+ struct dirent *d;
+ int ok = 0;
+
+ assert(rc && uri);
+
+ switch (state) {
+ case walk_state_current:
+ prefix = &rc->unauthenticated;
+ break;
+ case walk_state_backup:
+ prefix = &rc->old_authenticated;
+ break;
+ default:
+ goto done;
+ }
+
+ if (!uri_to_filename(rc, uri, &path, prefix) ||
+ (dir = opendir(path.s)) == NULL ||
+ (result = sk_OPENSSL_STRING_new(uri_cmp)) == NULL)
+ goto done;
+
+ while ((d = readdir(dir)) != NULL)
+ if (d->d_type != DT_DIR && !sk_OPENSSL_STRING_push_strdup(result, d->d_name))
+ goto done;
+
+ ok = 1;
+
+ done:
+ if (dir != NULL)
+ closedir(dir);
+
+ if (ok)
+ return result;
+
+ sk_OPENSSL_STRING_pop_free(result, OPENSSL_STRING_free);
+ return NULL;
+}
+
+
+
+/**
+ * Increment walk context reference count.
+ */
+static void walk_ctx_attach(walk_ctx_t *w)
+{
+ if (w != NULL) {
+ w->refcount++;
+ assert(w->refcount != 0);
+ }
+}
+
+/**
+ * Decrement walk context reference count; freeing the context if the
+ * reference count is now zero.
+ */
+static void walk_ctx_detach(walk_ctx_t *w)
+{
+ if (w != NULL && --(w->refcount) == 0) {
+ assert(w->refcount == 0);
+ X509_free(w->cert);
+ Manifest_free(w->manifest);
+ sk_OPENSSL_STRING_pop_free(w->filenames, OPENSSL_STRING_free);
+ free(w);
+ }
+}
+
+/**
+ * Return top context of a walk context stack.
+ */
+static walk_ctx_t *walk_ctx_stack_head(STACK_OF(walk_ctx_t) *wsk)
+{
+ return sk_walk_ctx_t_value(wsk, sk_walk_ctx_t_num(wsk) - 1);
+}
+
+/**
+ * Walk context iterator. Think of this as the thing you call in the
+ * third clause of a conceptual "for" loop: this reinitializes as
+ * necessary for the next pass through the loop.
+ *
+ * This is still under construction, but general idea is that we have
+ * several state variables in a walk context which collectively define
+ * the current pass, product URI, etc, and we want to be able to
+ * iterate through this sequence via the event system. So we need a
+ * function which steps to the next state.
+ */
+static void walk_ctx_loop_next(const rcynic_ctx_t *rc, STACK_OF(walk_ctx_t) *wsk)
+{
+ walk_ctx_t *w = walk_ctx_stack_head(wsk);
+
+ assert(rc && wsk && w);
+
+ if (w->manifest && w->manifest_iteration + 1 < sk_FileAndHash_num(w->manifest->fileList)) {
+ w->manifest_iteration++;
+ return;
+ }
+
+ if (w->filenames && w->filename_iteration + 1 < sk_OPENSSL_STRING_num(w->filenames)) {
+ w->filename_iteration++;
+ return;
+ }
+
+ if (w->state < walk_state_done) {
+ w->state++;
+ w->manifest_iteration = 0;
+ w->filename_iteration = 0;
+ sk_OPENSSL_STRING_pop_free(w->filenames, OPENSSL_STRING_free);
+ w->filenames = directory_filenames(rc, w->state, &w->certinfo.sia);
+ }
+}
+
+/**
+ * Whether we're done iterating over a walk context. Think of this as
+ * the thing you call (negated) in the second clause of a conceptual
+ * "for" loop.
+ */
+static int walk_ctx_loop_done(STACK_OF(walk_ctx_t) *wsk)
+{
+ walk_ctx_t *w = walk_ctx_stack_head(wsk);
+ return wsk == NULL || w == NULL || w->state >= walk_state_done;
+}
+
+static Manifest *check_manifest(const rcynic_ctx_t *rc,
+ STACK_OF(walk_ctx_t) *wsk);
+
+/**
+ * Loop initializer for walk context. Think of this as the thing you
+ * call in the first clause of a conceptual "for" loop.
+ */
+static void walk_ctx_loop_init(const rcynic_ctx_t *rc, STACK_OF(walk_ctx_t) *wsk)
+{
+ walk_ctx_t *w = walk_ctx_stack_head(wsk);
+
+ assert(rc && wsk && w && w->state == walk_state_ready);
+
+ assert(w->manifest == NULL);
+ if ((w->manifest = check_manifest(rc, wsk)) == NULL)
+ logmsg(rc, log_telemetry, "Couldn't get manifest %s, blundering onward", w->certinfo.manifest.s);
+
+ assert(w->filenames == NULL);
+ w->filenames = directory_filenames(rc, w->state, &w->certinfo.sia);
+
+ w->stale_manifest = w->manifest != NULL && X509_cmp_current_time(w->manifest->nextUpdate) < 0;
+
+ w->manifest_iteration = 0;
+ w->filename_iteration = 0;
+ w->state++;
+
+ assert(w->state == walk_state_current);
+
+ while (!walk_ctx_loop_done(wsk) &&
+ (w->manifest == NULL || w->manifest_iteration >= sk_FileAndHash_num(w->manifest->fileList)) &&
+ (w->filenames == NULL || w->filename_iteration >= sk_OPENSSL_STRING_num(w->filenames)))
+ walk_ctx_loop_next(rc, wsk);
+}
+
+/**
+ * Extract URI and hash values from walk context.
+ */
+static int walk_ctx_loop_this(const rcynic_ctx_t *rc,
+ STACK_OF(walk_ctx_t) *wsk,
+ uri_t *uri,
+ const unsigned char **hash,
+ size_t *hashlen)
+{
+ const walk_ctx_t *w = walk_ctx_stack_head(wsk);
+ const char *name = NULL;
+ FileAndHash *fah = NULL;
+
+ assert(rc && wsk && w && uri && hash && hashlen);
+
+ if (w->manifest != NULL && w->manifest_iteration < sk_FileAndHash_num(w->manifest->fileList)) {
+ fah = sk_FileAndHash_value(w->manifest->fileList, w->manifest_iteration);
+ name = (const char *) fah->file->data;
+ } else if (w->filenames != NULL && w->filename_iteration < sk_OPENSSL_STRING_num(w->filenames)) {
+ name = sk_OPENSSL_STRING_value(w->filenames, w->filename_iteration);
+ }
+
+ if (name == NULL) {
+ logmsg(rc, log_sys_err, "Can't find a URI in walk context, this shouldn't happen: state %d, manifest_iteration %d, filename_iteration %d",
+ (int) w->state, w->manifest_iteration, w->filename_iteration);
+ return 0;
+ }
+
+ if (strlen(w->certinfo.sia.s) + strlen(name) >= sizeof(uri->s)) {
+ logmsg(rc, log_data_err, "URI %s%s too long, skipping", w->certinfo.sia.s, uri->s);
+ return 0;
+ }
+
+ strcpy(uri->s, w->certinfo.sia.s);
+ strcat(uri->s, name);
+
+ if (fah != NULL) {
+ sk_OPENSSL_STRING_remove(w->filenames, name);
+ *hash = fah->hash->data;
+ *hashlen = fah->hash->length;
+ } else {
+ *hash = NULL;
+ *hashlen = 0;
+ }
+
+ return 1;
+}
+
+/**
+ * Create a new walk context stack.
+ */
+static STACK_OF(walk_ctx_t) *walk_ctx_stack_new(void)
+{
+ return sk_walk_ctx_t_new_null();
+}
+
+/**
+ * Push a walk context onto a walk context stack, return the new context.
+ */
+static walk_ctx_t *walk_ctx_stack_push(STACK_OF(walk_ctx_t) *wsk,
+ X509 *x,
+ const certinfo_t *certinfo)
+{
+ walk_ctx_t *w;
+
+ if (x == NULL || certinfo == NULL)
+ return NULL;
+
+ if ((w = malloc(sizeof(*w))) == NULL)
+ return NULL;
+
+ memset(w, 0, sizeof(*w));
+ w->cert = x;
+ w->certinfo = *certinfo;
+
+ if (!sk_walk_ctx_t_push(wsk, w)) {
+ free(w);
+ return NULL;
+ }
+
+ walk_ctx_attach(w);
+ return w;
+}
+
+/**
+ * Pop and discard a walk context from a walk context stack.
+ */
+static void walk_ctx_stack_pop(STACK_OF(walk_ctx_t) *wsk)
+{
+ walk_ctx_detach(sk_walk_ctx_t_pop(wsk));
+}
+
+/**
+ * Clone a stack of walk contexts.
+ */
+static STACK_OF(walk_ctx_t) *walk_ctx_stack_clone(STACK_OF(walk_ctx_t) *old_wsk)
+{
+ STACK_OF(walk_ctx_t) *new_wsk;
+ int i;
+ if (old_wsk == NULL || (new_wsk = sk_walk_ctx_t_dup(old_wsk)) == NULL)
+ return NULL;
+ for (i = 0; i < sk_walk_ctx_t_num(new_wsk); i++)
+ walk_ctx_attach(sk_walk_ctx_t_value(new_wsk, i));
+ return new_wsk;
+}
+
+/**
+ * Extract certificate stack from walk context stack. Returns a newly
+ * created STACK_OF(X509) pointing to the existing cert objects (ie,
+ * this is a shallow copy, so only free the STACK_OF(X509), not the
+ * certificates themselves).
+ */
+static STACK_OF(X509) *walk_ctx_stack_certs(STACK_OF(walk_ctx_t) *wsk)
+{
+ STACK_OF(X509) *xsk = sk_X509_new_null();
+ walk_ctx_t *w;
+ int i;
+
+ for (i = 0; i < sk_walk_ctx_t_num(wsk); i++)
+ if ((w = sk_walk_ctx_t_value(wsk, i)) == NULL ||
+ (w->cert != NULL && !sk_X509_push(xsk, w->cert)))
+ goto fail;
+
+ return xsk;
+
+ fail:
+ sk_X509_free(xsk);
+ return NULL;
+}
+
+/**
+ * Free a walk context stack, decrementing reference counts of each
+ * frame on it.
+ */
+static void walk_ctx_stack_free(STACK_OF(walk_ctx_t) *wsk)
+{
+ sk_walk_ctx_t_pop_free(wsk, walk_ctx_detach);
+}
+
+
+
+static int rsync_count_running(const rcynic_ctx_t *);
+
+/**
+ * Add a task to the task queue.
+ */
+static int task_add(const rcynic_ctx_t *rc,
+ void (*handler)(rcynic_ctx_t *, STACK_OF(walk_ctx_t) *),
+ STACK_OF(walk_ctx_t) *wsk)
+{
+ task_t *t = malloc(sizeof(*t));
+
+ assert(rc && rc->task_queue && handler);
+
+ assert(rsync_count_running(rc) <= rc->max_parallel_fetches);
+
+ if (!t)
+ return 0;
+
+ t->handler = handler;
+ t->wsk = wsk;
+
+ if (sk_task_t_push(rc->task_queue, t))
+ return 1;
+
+ free(t);
+ return 0;
+}
+
+/**
+ * Run tasks until queue is empty.
+ */
+static void task_run_q(rcynic_ctx_t *rc)
+{
+ task_t *t;
+ assert(rc && rc->task_queue);
+ while ((t = sk_task_t_shift(rc->task_queue)) != NULL) {
+ t->handler(rc, t->wsk);
+ free(t);
+ }
+}
+
/**
+ * Record that we've already synced a particular rsync URI.
+ */
+
+static void rsync_cache_add(const rcynic_ctx_t *rc, const uri_t *uri)
+{
+ uri_t uribuf;
+ char *s;
+
+ assert(rc && uri && rc->rsync_cache);
+ uribuf = *uri;
+ while ((s = strrchr(uribuf.s, '/')) != NULL && s[1] == '\0')
+ *s = '\0';
+ assert(strlen(uribuf.s) > SIZEOF_RSYNC);
+ if (!sk_OPENSSL_STRING_push_strdup(rc->rsync_cache, uribuf.s + SIZEOF_RSYNC))
+ logmsg(rc, log_sys_err, "Couldn't cache URI %s, blundering onward", uri->s);
+}
+
+/**
* Maintain a cache of URIs we've already fetched.
*/
static int rsync_cached_string(const rcynic_ctx_t *rc,
@@ -1282,235 +1732,610 @@ static int rsync_cached_string(const rcynic_ctx_t *rc,
* Check whether a particular URI has been cached.
*/
static int rsync_cached_uri(const rcynic_ctx_t *rc,
- const char *uri)
+ const uri_t *uri)
{
- return is_rsync(uri) && rsync_cached_string(rc, uri + SIZEOF_RSYNC);
+ return is_rsync(uri->s) && rsync_cached_string(rc, uri->s + SIZEOF_RSYNC);
}
+/**
+ * Return count of how many rsync contexts are in running.
+ */
+static int rsync_count_running(const rcynic_ctx_t *rc)
+{
+ const rsync_ctx_t *ctx;
+ int i, n = 0;
+
+ assert(rc && rc->rsync_queue);
+
+ for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i) {
+ switch (ctx->state) {
+ case rsync_state_running:
+ case rsync_state_terminating:
+ n++;
+ default:
+ continue;
+ }
+ }
+
+ return n;
+}
/**
- * Run rsync. This is fairly nasty, because we need to:
- *
- * @li Construct the argument list for rsync;
- *
- * @li Run rsync in a child process;
- *
- * @li Sit listening to rsync's output, logging whatever we get;
- *
- * @li Impose an optional time limit on rsync's execution time
- *
- * @li Clean up from (b), (c), and (d); and
- *
- * @li Keep track of which URIs we've already fetched, so we don't
- * have to do it again.
- *
- * Taken all together, this is pretty icky. Breaking it into separate
- * functions wouldn't help much. Don't read this on a full stomach.
+ * Test whether an rsync context conflicts with anything that's
+ * currently runable.
+ */
+static int rsync_conflicts(const rcynic_ctx_t *rc,
+ const rsync_ctx_t *ctx)
+{
+ const rsync_ctx_t *c;
+ int i;
+
+ assert(rc && ctx && rc->rsync_queue);
+
+ for (i = 0; (c = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i)
+ if (c != ctx &&
+ (c->state == rsync_state_initial ||
+ c->state == rsync_state_running) &&
+ conflicting_uris(&c->uri, &ctx->uri))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Test whether a rsync context is runable at this time.
+ */
+static int rsync_runable(const rcynic_ctx_t *rc,
+ const rsync_ctx_t *ctx)
+{
+ assert(rc && ctx);
+
+ switch (ctx->state) {
+
+ case rsync_state_initial:
+ case rsync_state_running:
+ return 1;
+
+ case rsync_state_retry_wait:
+ return ctx->deadline <= time(0);
+
+ case rsync_state_terminating:
+ return 0;
+
+ case rsync_state_conflict_wait:
+ return !rsync_conflicts(rc, ctx);
+ }
+
+ return 0;
+}
+
+/**
+ * Return count of runable rsync contexts.
*/
-static int rsync(const rcynic_ctx_t *rc,
- const char * const *args,
- const char *uri)
+static int rsync_count_runable(const rcynic_ctx_t *rc)
{
- static const char *rsync_cmd[] = {
- "rsync", "--update", "--times", "--copy-links", "--itemize-changes", NULL
+ const rsync_ctx_t *ctx;
+ int i, n = 0;
+
+ assert(rc && rc->rsync_queue);
+
+ for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i)
+ if (rsync_runable(rc, ctx))
+ n++;
+
+ return n;
+}
+
+/**
+ * Run an rsync process.
+ */
+static void rsync_run(const rcynic_ctx_t *rc,
+ rsync_ctx_t *ctx)
+{
+ static const char * const rsync_cmd[] = {
+ "rsync", "--update", "--times", "--copy-links", "--itemize-changes"
+ };
+ static const char * const rsync_tree_args[] = {
+ "--recursive", "--delete"
};
- const char *argv[100];
- char *s, *b, buffer[URI_MAX * 4], path[FILENAME_MAX];
- int i, n, ret, pipe_fds[2], argc = 0, pid_status = -1;
- time_t now, deadline;
- struct timeval tv;
- pid_t pid, wpid;
- fd_set rfds;
+ const char *argv[10];
+ path_t path;
+ int i, argc = 0, flags, pipe_fds[2];
- assert(rc && uri);
+ pipe_fds[0] = pipe_fds[1] = -1;
+
+ assert(rc && ctx && ctx->pid == 0 && ctx->state != rsync_state_running && rsync_runable(rc, ctx));
+
+ assert(rsync_count_running(rc) < rc->max_parallel_fetches);
+
+ logmsg(rc, log_telemetry, "Fetching %s", ctx->uri.s);
memset(argv, 0, sizeof(argv));
- for (i = 0; rsync_cmd[i]; i++) {
+ for (i = 0; i < sizeof(rsync_cmd)/sizeof(*rsync_cmd); i++) {
assert(argc < sizeof(argv)/sizeof(*argv));
argv[argc++] = rsync_cmd[i];
}
- if (args) {
- for (i = 0; args[i]; i++) {
+ if (endswith(ctx->uri.s, "/")) {
+ for (i = 0; i < sizeof(rsync_tree_args)/sizeof(*rsync_tree_args); i++) {
assert(argc < sizeof(argv)/sizeof(*argv));
- argv[argc++] = args[i];
+ argv[argc++] = rsync_tree_args[i];
}
}
if (rc->rsync_program)
argv[0] = rc->rsync_program;
- if (!uri_to_filename(rc, uri, path, sizeof(path), rc->unauthenticated)) {
- logmsg(rc, log_data_err, "Couldn't extract filename from URI: %s", uri);
- return 0;
+ if (!uri_to_filename(rc, &ctx->uri, &path, &rc->unauthenticated)) {
+ logmsg(rc, log_data_err, "Couldn't extract filename from URI: %s", ctx->uri.s);
+ goto lose;
}
assert(argc < sizeof(argv)/sizeof(*argv));
- argv[argc++] = uri;
+ argv[argc++] = ctx->uri.s;
assert(argc < sizeof(argv)/sizeof(*argv));
- argv[argc++] = path;
+ argv[argc++] = path.s;
- assert(strlen(uri) > SIZEOF_RSYNC);
- if (rsync_cached_uri(rc, uri)) {
- logmsg(rc, log_verbose, "rsync cache hit for %s", uri);
- return 1;
+ if (!mkdir_maybe(rc, &path)) {
+ logmsg(rc, log_sys_err, "Couldn't make target directory: %s", path.s);
+ goto lose;
}
- if (!mkdir_maybe(rc, path)) {
- logmsg(rc, log_sys_err, "Couldn't make target directory: %s", path);
- return 0;
- }
-
- logmsg(rc, log_telemetry, "Fetching %s", uri);
-
for (i = 0; i < argc; i++)
logmsg(rc, log_verbose, "rsync argv[%d]: %s", i, argv[i]);
if (pipe(pipe_fds) < 0) {
logmsg(rc, log_sys_err, "pipe() failed: %s", strerror(errno));
- return 0;
+ goto lose;
}
+ ctx->fd = pipe_fds[0];
- if ((i = fcntl(pipe_fds[0], F_GETFL, 0)) == -1 ||
- fcntl(pipe_fds[0], F_SETFL, i | O_NONBLOCK) == -1) {
- logmsg(rc, log_sys_err,
- "Couldn't set rsync's output stream non-blocking: %s",
+ if ((flags = fcntl(ctx->fd, F_GETFL, 0)) == -1) {
+ logmsg(rc, log_sys_err, "fcntl(F_GETFL) failed: %s",
strerror(errno));
- close(pipe_fds[0]);
- close(pipe_fds[1]);
- return 0;
+ goto lose;
+ }
+ flags |= O_NONBLOCK;
+ if (fcntl(ctx->fd, F_SETFL, flags) == -1) {
+ logmsg(rc, log_sys_err, "fcntl(F_SETFL) failed: %s",
+ strerror(errno));
+ goto lose;
}
- switch ((pid = vfork())) {
+ switch ((ctx->pid = vfork())) {
+
case -1:
logmsg(rc, log_sys_err, "vfork() failed: %s", strerror(errno));
- close(pipe_fds[0]);
- close(pipe_fds[1]);
- return 0;
+ goto lose;
+
case 0:
-#define whine(msg) write(2, msg, sizeof(msg) - 1)
- close(pipe_fds[0]);
- if (dup2(pipe_fds[1], 1) < 0)
- whine("dup2(1) failed\n");
+ /*
+ * Child
+ */
+#define whine(msg) ((void) write(2, msg, sizeof(msg) - 1))
+ if (close(pipe_fds[0]) < 0)
+ whine("close(pipe_fds[0]) failed\n");
+ else if (dup2(pipe_fds[1], 1) < 0)
+ whine("dup2(pipe_fds[1], 1) failed\n");
else if (dup2(pipe_fds[1], 2) < 0)
- whine("dup2(2) failed\n");
+ whine("dup2(pipe_fds[1], 2) failed\n");
+ else if (close(pipe_fds[1]) < 0)
+ whine("close(pipe_fds[1]) failed\n");
else if (execvp(argv[0], (char * const *) argv) < 0)
- whine("execvp() failed\n");
+ whine("execvp(argv[0], (char * const *) argv) failed\n");
whine("last system error: ");
write(2, strerror(errno), strlen(strerror(errno)));
whine("\n");
_exit(1);
#undef whine
+
+ default:
+ /*
+ * Parent
+ */
+ (void) close(pipe_fds[1]);
+ pipe_fds[1] = -1;
+ ctx->state = rsync_state_running;
+ ctx->problem = rsync_problem_none;
+ if (rc->rsync_timeout)
+ ctx->deadline = time(0) + rc->rsync_timeout;
+ logmsg(rc, log_debug, "Subprocess %u started, queued %d, runable %d, running %d, max %d, URI %s",
+ (unsigned) ctx->pid, sk_rsync_ctx_t_num(rc->rsync_queue), rsync_count_runable(rc), rsync_count_running(rc), rc->max_parallel_fetches, ctx->uri.s);
+ if (ctx->handler)
+ ctx->handler(rc, ctx, rsync_status_pending, &ctx->uri, ctx->wsk);
+ return;
+
}
- close(pipe_fds[1]);
+ lose:
+ if (pipe_fds[0] != -1)
+ (void) close(pipe_fds[0]);
+ if (pipe_fds[1] != -1)
+ (void) close(pipe_fds[1]);
+ if (rc->rsync_queue && ctx)
+ (void) sk_rsync_ctx_t_delete_ptr(rc->rsync_queue, ctx);
+ if (ctx && ctx->handler)
+ ctx->handler(rc, ctx, rsync_status_failed, &ctx->uri, ctx->wsk);
+ if (ctx)
+ free(ctx);
+}
+
+/**
+ * Process one line of rsync's output. This is a separate function
+ * primarily to centralize scraping for magic error strings.
+ */
+static void do_one_rsync_log_line(const rcynic_ctx_t *rc,
+ rsync_ctx_t *ctx)
+{
+ unsigned u;
+ char *s;
- now = time(0);
- deadline = now + rc->rsync_timeout;
+ /*
+ * Send line to our log unless it's empty.
+ */
+ if (ctx->buffer[strspn(ctx->buffer, " \t\n\r")] != '\0')
+ logmsg(rc, log_telemetry, "rsync[%u]: %s", ctx->pid, ctx->buffer);
- n = -1;
- i = 0;
- while ((wpid = waitpid(pid, &pid_status, WNOHANG)) == 0 &&
- (!rc->rsync_timeout || (now = time(0)) < deadline)) {
- FD_ZERO(&rfds);
- FD_SET(pipe_fds[0], &rfds);
- if (rc->rsync_timeout) {
- tv.tv_sec = deadline - now;
- tv.tv_usec = 0;
- n = select(pipe_fds[0] + 1, &rfds, NULL, NULL, &tv);
- } else {
- n = select(pipe_fds[0] + 1, &rfds, NULL, NULL, NULL);
+ /*
+ * Check for magic error strings
+ */
+ if ((s = strstr(ctx->buffer, "@ERROR: max connections")) != NULL) {
+ ctx->problem = rsync_problem_refused;
+ if (sscanf(s, "@ERROR: max connections (%u) reached -- try again later", &u) == 1)
+ logmsg(rc, log_debug, "Subprocess %u reported limit of %u for %s", ctx->pid, u, ctx->uri.s);
+ }
+}
+
+/**
+ * Construct select() arguments.
+ */
+static int rsync_construct_select(const rcynic_ctx_t *rc,
+ const time_t now,
+ fd_set *rfds,
+ struct timeval *tv)
+{
+ rsync_ctx_t *ctx;
+ time_t when = 0;
+ int i, n = 0;
+
+ assert(rc && rc->rsync_queue && rfds && tv);
+
+ FD_ZERO(rfds);
+
+ for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i) {
+ switch (ctx->state) {
+
+ case rsync_state_running:
+ if (ctx->fd >= 0) {
+ FD_SET(ctx->fd, rfds);
+ if (ctx->fd > n)
+ n = ctx->fd;
+ }
+ if (!rc->rsync_timeout)
+ continue;
+ /* Fall through */
+
+ case rsync_state_retry_wait:
+ if (when == 0 || ctx->deadline < when)
+ when = ctx->deadline;
+ /* Fall through */
+
+ default:
+ continue;
}
- if (n == 0 || (n < 0 && errno == EINTR))
+ }
+
+ tv->tv_sec = when ? when - now : 0;
+ tv->tv_usec = 0;
+ return n;
+}
+
+/**
+ * Manager for queue of rsync tasks in progress.
+ *
+ * General plan here is to process one completed child, or output
+ * accumulated from children, or block if there is absolutely nothing
+ * to do, on the theory that caller had nothing to do either or would
+ * not have called us. Once we've done something allegedly useful, we
+ * return, because this is not the event loop; if and when the event
+ * loop has nothing more important to do, we'll be called again.
+ *
+ * So this is the only place where the program blocks waiting for
+ * children, but we only do it when we know there's nothing else
+ * useful that we could be doing while we wait.
+ */
+static void rsync_mgr(const rcynic_ctx_t *rc)
+{
+ time_t now = time(0);
+ int i, n, pid_status = -1;
+ rsync_ctx_t *ctx = NULL;
+ struct timeval tv;
+ fd_set rfds;
+ pid_t pid;
+ char *s;
+
+ assert(rc && rc->rsync_queue);
+
+ /*
+ * Check for exited subprocesses.
+ */
+
+ while ((pid = waitpid(-1, &pid_status, WNOHANG)) > 0) {
+
+ /*
+ * Child exited, handle it.
+ */
+
+ logmsg(rc, log_debug, "Subprocess %d exited with status %d", pid, WEXITSTATUS(pid_status));
+
+ for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i)
+ if (ctx->pid == pid)
+ break;
+ if (ctx == NULL) {
+ assert(i == sk_rsync_ctx_t_num(rc->rsync_queue));
+ logmsg(rc, log_sys_err, "Couldn't find rsync context for pid %d", pid);
continue;
- if (n < 0)
+ }
+
+ close(ctx->fd);
+ ctx->fd = -1;
+
+ if (ctx->buflen > 0) {
+ assert(ctx->buflen < sizeof(ctx->buffer));
+ ctx->buffer[ctx->buflen] = '\0';
+ do_one_rsync_log_line(rc, ctx);
+ ctx->buflen = 0;
+ }
+
+ switch (WEXITSTATUS(pid_status)) {
+
+ case 0:
+ log_validation_status(rc, &ctx->uri,
+ (ctx->problem == rsync_problem_timed_out
+ ? rsync_timed_out
+ : rsync_succeeded),
+ object_generation_null);
+ break;
+
+ case 5: /* "Error starting client-server protocol" */
+ /*
+ * Handle remote rsyncd refusing to talk to us because we've
+ * exceeded its connection limit. Back off for a short
+ * interval, then retry.
+ */
+ if (ctx->problem == rsync_problem_refused && ctx->tries < rc->max_retries) {
+ unsigned char r;
+ if (!RAND_bytes(&r, sizeof(r)))
+ r = 60;
+ ctx->deadline = time(0) + rc->retry_wait_min + r;
+ ctx->state = rsync_state_retry_wait;
+ ctx->problem = rsync_problem_none;
+ ctx->pid = 0;
+ ctx->tries++;
+ logmsg(rc, log_telemetry, "Scheduling retry for %s", ctx->uri.s);
+ continue;
+ }
+
+ /* Otherwise, fall through */
+
+ case 2: /* "Protocol incompatibility" */
+ case 4: /* "Requested action not supported" */
+ case 10: /* "Error in socket I/O" */
+ case 11: /* "Error in file I/O" */
+ case 12: /* "Error in rsync protocol data stream" */
+ case 21: /* "Some error returned by waitpid()" */
+ case 30: /* "Timeout in data send/receive" */
+ case 35: /* "Timeout waiting for daemon connection" */
+ logmsg(rc, log_telemetry, "Adding %s to dead host cache", ctx->uri.s);
+ dead_host_add(rc, &ctx->uri);
+
+ /* Fall through */
+
+ default:
+ logmsg(rc, log_data_err, "rsync %u exited with status %d fetching %s",
+ (unsigned) pid, WEXITSTATUS(pid_status), ctx->uri.s);
+ log_validation_status(rc, &ctx->uri,
+ (rc->rsync_timeout && now >= ctx->deadline
+ ? rsync_timed_out
+ : rsync_failed),
+ object_generation_null);
break;
- while ((n = read(pipe_fds[0], buffer + i, sizeof(buffer) - i - 1)) > 0) {
- n += i;
- assert(n < sizeof(buffer));
- buffer[n] = '\0';
- for (b = buffer; (s = strchr(b, '\n')) != NULL; b = s) {
- *s++ = '\0';
- logmsg(rc, log_telemetry, "%s", b);
+ }
+
+ rsync_cache_add(rc, &ctx->uri);
+ if (ctx->handler)
+ ctx->handler(rc, ctx, (ctx->problem == rsync_problem_timed_out
+ ? rsync_status_timed_out
+ : WEXITSTATUS(pid_status) != 0
+ ? rsync_status_failed
+ : rsync_status_done),
+ &ctx->uri, ctx->wsk);
+ (void) sk_rsync_ctx_t_delete_ptr(rc->rsync_queue, ctx);
+ free(ctx);
+ ctx = NULL;
+ }
+
+ if (pid == -1 && errno != EINTR && errno != ECHILD)
+ logmsg(rc, log_sys_err, "waitpid() returned error: %s", strerror(errno));
+
+ assert(rsync_count_running(rc) <= rc->max_parallel_fetches);
+
+ /*
+ * Look for rsync contexts that have become runable.
+ */
+ for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i)
+ if (ctx->state != rsync_state_running &&
+ rsync_runable(rc, ctx) &&
+ rsync_count_running(rc) < rc->max_parallel_fetches)
+ rsync_run(rc, ctx);
+
+ assert(rsync_count_running(rc) <= rc->max_parallel_fetches);
+
+ /*
+ * Check for log text from subprocesses.
+ */
+
+ n = rsync_construct_select(rc, now, &rfds, &tv);
+
+ if (n > 0 || tv.tv_sec)
+ n = select(n + 1, &rfds, NULL, NULL, tv.tv_sec ? &tv : NULL);
+
+ if (n > 0) {
+
+ for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i) {
+ if (ctx->fd <= 0 || !FD_ISSET(ctx->fd, &rfds))
+ continue;
+
+ assert(ctx->buflen < sizeof(ctx->buffer) - 1);
+
+ while ((n = read(ctx->fd, ctx->buffer + ctx->buflen, sizeof(ctx->buffer) - 1 - ctx->buflen)) > 0) {
+ ctx->buflen += n;
+ assert(ctx->buflen < sizeof(ctx->buffer));
+ ctx->buffer[ctx->buflen] = '\0';
+
+ while ((s = strchr(ctx->buffer, '\n')) != NULL) {
+ *s++ = '\0';
+ do_one_rsync_log_line(rc, ctx);
+ assert(s > ctx->buffer && s < ctx->buffer + sizeof(ctx->buffer));
+ ctx->buflen -= s - ctx->buffer;
+ assert(ctx->buflen < sizeof(ctx->buffer));
+ if (ctx->buflen > 0)
+ memmove(ctx->buffer, s, ctx->buflen);
+ ctx->buffer[ctx->buflen] = '\0';
+ }
+
+ if (ctx->buflen == sizeof(ctx->buffer) - 1) {
+ ctx->buffer[sizeof(ctx->buffer) - 1] = '\0';
+ do_one_rsync_log_line(rc, ctx);
+ ctx->buflen = 0;
+ }
}
- i = strlen(b);
- assert(i < sizeof(buffer) && b + i < buffer + sizeof(buffer));
- if (b == buffer && i == sizeof(buffer) - 1) {
- logmsg(rc, log_telemetry, "%s\\", b);
- i = 0;
+
+ if (n == 0) {
+ (void) close(ctx->fd);
+ ctx->fd = -1;
}
- if (i > 0) {
- memmove(buffer, b, i);
+ }
+ }
+
+ assert(rsync_count_running(rc) <= rc->max_parallel_fetches);
+
+ /*
+ * Deal with children that have been running too long.
+ */
+ if (rc->rsync_timeout) {
+ for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i) {
+ int sig;
+ if (ctx->pid <= 0 || now < ctx->deadline)
+ continue;
+ sig = ctx->tries++ < KILL_MAX ? SIGTERM : SIGKILL;
+ if (ctx->state != rsync_state_terminating) {
+ ctx->problem = rsync_problem_timed_out;
+ ctx->state = rsync_state_terminating;
+ ctx->tries = 0;
+ logmsg(rc, log_telemetry, "Subprocess %u is taking too long fetching %s, whacking it", (unsigned) ctx->pid, ctx->uri.s);
+ dead_host_add(rc, &ctx->uri);
+ } else if (sig == SIGTERM) {
+ logmsg(rc, log_telemetry, "Whacking subprocess %u again", (unsigned) ctx->pid);
+ } else {
+ logmsg(rc, log_telemetry, "Whacking subprocess %u with big hammer", (unsigned) ctx->pid);
}
+ (void) kill(ctx->pid, sig);
+ ctx->deadline = now + 1;
}
- if (n == 0 || (n < 0 && errno != EAGAIN))
- break;
}
-
- close(pipe_fds[0]);
+}
+
+/**
+ * Set up rsync context and attempt to start it.
+ */
+static void rsync_init(const rcynic_ctx_t *rc,
+ const uri_t *uri,
+ STACK_OF(walk_ctx_t) *wsk,
+ void (*handler)(const rcynic_ctx_t *, const rsync_ctx_t *, const rsync_status_t, const uri_t *, STACK_OF(walk_ctx_t) *))
+{
+ rsync_ctx_t *ctx = NULL;
- assert(i >= 0 && i < sizeof(buffer));
- if (i) {
- buffer[i] = '\0';
- logmsg(rc, log_telemetry, "%s", buffer);
+ assert(rc && uri && strlen(uri->s) > SIZEOF_RSYNC);
+
+ if (!rc->run_rsync) {
+ logmsg(rc, log_verbose, "rsync disabled, skipping %s", uri->s);
+ rsync_cache_add(rc, uri);
+ if (handler)
+ handler(rc, NULL, rsync_status_skipped, uri, wsk);
+ return;
}
- if (n < 0 && errno != EAGAIN)
- logmsg(rc, log_sys_err, "Problem reading rsync's output: %s",
- strerror(errno));
+ if (rsync_cached_uri(rc, uri)) {
+ logmsg(rc, log_verbose, "rsync cache hit for %s", uri->s);
+ if (handler)
+ handler(rc, NULL, rsync_status_done, uri, wsk);
+ return;
+ }
+
+ if (dead_host_check(rc, uri)) {
+ logmsg(rc, log_verbose, "Dead host cache hit for %s", uri->s);
+ rsync_cache_add(rc, uri);
+ if (handler)
+ handler(rc, NULL, rsync_status_skipped, uri, wsk);
+ return;
+ }
- if (rc->rsync_timeout && now >= deadline)
- logmsg(rc, log_data_err,
- "Fetch of %s took longer than %d seconds, terminating fetch",
- uri, rc->rsync_timeout);
+ if ((ctx = malloc(sizeof(*ctx))) == NULL) {
+ logmsg(rc, log_sys_err, "malloc(rsync_ctxt_t) failed");
+ if (handler)
+ handler(rc, NULL, rsync_status_failed, uri, wsk);
+ return;
+ }
- assert(pid > 0);
- for (i = 0; i < KILL_MAX && wpid == 0; i++) {
- if ((wpid = waitpid(pid, &pid_status, WNOHANG)) != 0 && WIFEXITED(pid_status))
- break;
- kill(pid, SIGTERM);
- sleep(1);
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->uri = *uri;
+ ctx->handler = handler;
+ ctx->wsk = wsk;
+ ctx->fd = -1;
+
+ if (!sk_rsync_ctx_t_push(rc->rsync_queue, ctx)) {
+ logmsg(rc, log_sys_err, "Couldn't push rsync state object onto queue, punting %s", ctx->uri.s);
+ if (handler)
+ handler(rc, ctx, rsync_status_failed, uri, wsk);
+ free(ctx);
+ return;
}
- if (WEXITSTATUS(pid_status)) {
- logmsg(rc, log_data_err, "rsync exited with status %d fetching %s",
- WEXITSTATUS(pid_status), uri);
- ret = 0;
- mib_increment(rc, uri, (rc->rsync_timeout && now >= deadline
- ? rsync_timed_out
- : rsync_failed));
- } else {
- ret = 1;
- mib_increment(rc, uri, rsync_succeeded);
+ if (rsync_conflicts(rc, ctx)) {
+ logmsg(rc, log_debug, "New rsync context %s is feeling conflicted", ctx->uri.s);
+ ctx->state = rsync_state_conflict_wait;
}
- assert(strlen(uri) > SIZEOF_RSYNC);
- strcpy(buffer, uri + SIZEOF_RSYNC);
- if ((s = strrchr(buffer, '/')) != NULL && s[1] == '\0')
- *s = '\0';
- if (!sk_OPENSSL_STRING_push_strdup(rc->rsync_cache, buffer))
- logmsg(rc, log_sys_err, "Couldn't cache URI %s, blundering onward", uri);
- return ret;
+#if 0
+ if (rsync_runable(rc, ctx) && rsync_count_running(rc) < rc->max_parallel_fetches);
+ rsync_run(rc, ctx);
+#endif
}
/**
- * rsync a single file (CRL, manifest, ROA, whatever).
+ * rsync a single file (trust anchor, CRL, manifest, ROA, whatever).
*/
-static int rsync_file(const rcynic_ctx_t *rc, const char *uri)
+static void rsync_file(const rcynic_ctx_t *rc,
+ const uri_t *uri)
{
- return rsync(rc, NULL, uri);
+ assert(!endswith(uri->s, "/"));
+ rsync_init(rc, uri, NULL, NULL);
}
/**
* rsync an entire subtree, generally rooted at a SIA collection.
*/
-static int rsync_tree(const rcynic_ctx_t *rc, const char *uri)
+static void rsync_tree(const rcynic_ctx_t *rc,
+ const uri_t *uri,
+ STACK_OF(walk_ctx_t) *wsk,
+ void (*handler)(const rcynic_ctx_t *, const rsync_ctx_t *, const rsync_status_t, const uri_t *, STACK_OF(walk_ctx_t) *))
{
- static const char * const rsync_args[] = { "--recursive", "--delete", NULL };
- return rsync(rc, rsync_args, uri);
+ assert(endswith(uri->s, "/"));
+ rsync_init(rc, uri, wsk, handler);
}
@@ -1520,76 +2345,76 @@ static int rsync_tree(const rcynic_ctx_t *rc, const char *uri)
* if the URI changes and we never visit the old URI again.
*/
static int prune_unauthenticated(const rcynic_ctx_t *rc,
- const char *name,
+ const path_t *name,
const size_t baselen)
{
- char path[FILENAME_MAX];
+ path_t path;
struct dirent *d;
size_t len;
DIR *dir;
int need_slash;
assert(rc && name && baselen > 0);
- len = strlen(name);
- assert(len >= baselen && len < sizeof(path));
- need_slash = name[len - 1] != '/';
+ len = strlen(name->s);
+ assert(len >= baselen && len < sizeof(path.s));
+ need_slash = name->s[len - 1] != '/';
- if (rsync_cached_string(rc, name + baselen)) {
- logmsg(rc, log_debug, "prune: cache hit for %s, not cleaning", name);
+ if (rsync_cached_string(rc, name->s + baselen)) {
+ logmsg(rc, log_debug, "prune: cache hit for %s, not cleaning", name->s);
return 1;
}
- if (rmdir(name) == 0) {
- logmsg(rc, log_debug, "prune: removed %s", name);
+ if (rmdir(name->s) == 0) {
+ logmsg(rc, log_debug, "prune: removed %s", name->s);
return 1;
}
switch (errno) {
case ENOENT:
- logmsg(rc, log_debug, "prune: nonexistant %s", name);
+ logmsg(rc, log_debug, "prune: nonexistant %s", name->s);
return 1;
case ENOTEMPTY:
break;
default:
- logmsg(rc, log_debug, "prune: other error %s: %s", name, strerror(errno));
+ logmsg(rc, log_debug, "prune: other error %s: %s", name->s, strerror(errno));
return 0;
}
- if ((dir = opendir(name)) == NULL)
+ if ((dir = opendir(name->s)) == NULL)
return 0;
while ((d = readdir(dir)) != NULL) {
if (d->d_name[0] == '.' && (d->d_name[1] == '\0' || (d->d_name[1] == '.' && d->d_name[2] == '\0')))
continue;
if (len + strlen(d->d_name) + need_slash >= sizeof(path)) {
- logmsg(rc, log_debug, "prune: %s%s%s too long", name, (need_slash ? "/" : ""), d->d_name);
+ logmsg(rc, log_debug, "prune: %s%s%s too long", name->s, (need_slash ? "/" : ""), d->d_name);
goto done;
}
- strcpy(path, name);
+ strcpy(path.s, name->s);
if (need_slash)
- strcat(path, "/");
- strcat(path, d->d_name);
+ strcat(path.s, "/");
+ strcat(path.s, d->d_name);
switch (d->d_type) {
case DT_DIR:
- if (!prune_unauthenticated(rc, path, baselen))
+ if (!prune_unauthenticated(rc, &path, baselen))
goto done;
continue;
default:
- if (rsync_cached_string(rc, path + baselen)) {
- logmsg(rc, log_debug, "prune: cache hit %s", path);
+ if (rsync_cached_string(rc, path.s + baselen)) {
+ logmsg(rc, log_debug, "prune: cache hit %s", path.s);
continue;
}
- if (unlink(path) < 0) {
- logmsg(rc, log_debug, "prune: removing %s failed: %s", path, strerror(errno));
+ if (unlink(path.s) < 0) {
+ logmsg(rc, log_debug, "prune: removing %s failed: %s", path.s, strerror(errno));
goto done;
}
- logmsg(rc, log_debug, "prune: removed %s", path);
+ logmsg(rc, log_debug, "prune: removed %s", path.s);
continue;
}
}
- if (rmdir(name) < 0 && errno != ENOTEMPTY)
- logmsg(rc, log_debug, "prune: couldn't remove %s: %s", name, strerror(errno));
+ if (rmdir(name->s) < 0 && errno != ENOTEMPTY)
+ logmsg(rc, log_debug, "prune: couldn't remove %s: %s", name->s, strerror(errno));
done:
closedir(dir);
@@ -1604,16 +2429,15 @@ static int prune_unauthenticated(const rcynic_ctx_t *rc,
* sets the hash buffer (if specified) as a side effect. The default
* hash algorithm is SHA-256.
*/
-static void *read_file_with_hash(const char *filename,
+static void *read_file_with_hash(const path_t *filename,
const ASN1_ITEM *it,
const EVP_MD *md,
- unsigned char *hash,
- const size_t hashlen)
+ hashbuf_t *hash)
{
void *result = NULL;
BIO *b;
- if ((b = BIO_new_file(filename, "rb")) == NULL)
+ if ((b = BIO_new_file(filename->s, "rb")) == NULL)
goto error;
if (hash != NULL) {
@@ -1634,8 +2458,8 @@ static void *read_file_with_hash(const char *filename,
goto error;
if (hash != NULL) {
- memset(hash, 0, hashlen);
- BIO_gets(b, (char *) hash, hashlen);
+ memset(hash, 0, sizeof(*hash));
+ BIO_gets(b, (char *) hash, sizeof(hash->h));
}
error:
@@ -1646,25 +2470,25 @@ static void *read_file_with_hash(const char *filename,
/**
* Read and hash a certificate.
*/
-static X509 *read_cert(const char *filename, unsigned char *hash, const size_t hashlen)
+static X509 *read_cert(const path_t *filename, hashbuf_t *hash)
{
- return read_file_with_hash(filename, ASN1_ITEM_rptr(X509), NULL, hash, hashlen);
+ return read_file_with_hash(filename, ASN1_ITEM_rptr(X509), NULL, hash);
}
/**
* Read and hash a CRL.
*/
-static X509_CRL *read_crl(const char *filename, unsigned char *hash, const size_t hashlen)
+static X509_CRL *read_crl(const path_t *filename, hashbuf_t *hash)
{
- return read_file_with_hash(filename, ASN1_ITEM_rptr(X509_CRL), NULL, hash, hashlen);
+ return read_file_with_hash(filename, ASN1_ITEM_rptr(X509_CRL), NULL, hash);
}
/**
* Read and hash a CMS message.
*/
-static CMS_ContentInfo *read_cms(const char *filename, unsigned char *hash, const size_t hashlen)
+static CMS_ContentInfo *read_cms(const path_t *filename, hashbuf_t *hash)
{
- return read_file_with_hash(filename, ASN1_ITEM_rptr(CMS_ContentInfo), NULL, hash, hashlen);
+ return read_file_with_hash(filename, ASN1_ITEM_rptr(CMS_ContentInfo), NULL, hash);
}
@@ -1673,10 +2497,10 @@ static CMS_ContentInfo *read_cms(const char *filename, unsigned char *hash, cons
* Extract CRLDP data from a certificate.
*/
static void extract_crldp_uri(const rcynic_ctx_t *rc,
- const char *uri,
+ const uri_t *uri,
+ const object_generation_t generation,
const STACK_OF(DIST_POINT) *crldp,
- char *result,
- const int resultlen)
+ uri_t *result)
{
DIST_POINT *d;
int i;
@@ -1684,17 +2508,14 @@ static void extract_crldp_uri(const rcynic_ctx_t *rc,
assert(crldp);
if (sk_DIST_POINT_num(crldp) != 1) {
- logmsg(rc, log_data_err, "CRLDistributionPoints sequence length is %d (should be 1) for %s",
- sk_DIST_POINT_num(crldp), uri);
- mib_increment(rc, uri, malformed_crldp);
+ log_validation_status(rc, uri, malformed_crldp, generation);
return;
}
d = sk_DIST_POINT_value(crldp, 0);
if (d->reasons || d->CRLissuer || !d->distpoint || d->distpoint->type != 0) {
- logmsg(rc, log_data_err, "CRLDP does not match RPKI certificate profile for %s", uri);
- mib_increment(rc, uri, malformed_crldp);
+ log_validation_status(rc, uri, malformed_crldp, generation);
return;
}
@@ -1702,22 +2523,19 @@ static void extract_crldp_uri(const rcynic_ctx_t *rc,
GENERAL_NAME *n = sk_GENERAL_NAME_value(d->distpoint->name.fullname, i);
assert(n != NULL);
if (n->type != GEN_URI) {
- logmsg(rc, log_data_err, "CRLDP contains non-URI GeneralName for %s", uri);
- mib_increment(rc, uri, malformed_crldp);
+ log_validation_status(rc, uri, malformed_crldp, generation);
return;
}
if (!is_rsync((char *) n->d.uniformResourceIdentifier->data)) {
logmsg(rc, log_verbose, "Skipping non-rsync URI %s for %s",
- (char *) n->d.uniformResourceIdentifier->data, uri);
+ (char *) n->d.uniformResourceIdentifier->data, uri->s);
continue;
}
- if (resultlen <= n->d.uniformResourceIdentifier->length) {
- logmsg(rc, log_data_err, "Skipping improbably long URI %s for %s",
- (char *) n->d.uniformResourceIdentifier->data, uri);
- mib_increment(rc, uri, uri_too_long);
+ if (sizeof(result->s) <= n->d.uniformResourceIdentifier->length) {
+ log_validation_status(rc, uri, uri_too_long, generation);
continue;
}
- strcpy(result, (char *) n->d.uniformResourceIdentifier->data);
+ strcpy(result->s, (char *) n->d.uniformResourceIdentifier->data);
return;
}
}
@@ -1726,12 +2544,12 @@ static void extract_crldp_uri(const rcynic_ctx_t *rc,
* Extract SIA or AIA data from a certificate.
*/
static void extract_access_uri(const rcynic_ctx_t *rc,
- const char *uri,
+ const uri_t *uri,
+ const object_generation_t generation,
const AUTHORITY_INFO_ACCESS *xia,
const unsigned char *oid,
const int oidlen,
- char *result,
- const int resultlen)
+ uri_t *result)
{
int i;
@@ -1747,16 +2565,14 @@ static void extract_access_uri(const rcynic_ctx_t *rc,
continue;
if (!is_rsync((char *) a->location->d.uniformResourceIdentifier->data)) {
logmsg(rc, log_verbose, "Skipping non-rsync URI %s for %s",
- a->location->d.uniformResourceIdentifier->data, uri);
+ a->location->d.uniformResourceIdentifier->data, uri->s);
continue;
}
- if (resultlen <= a->location->d.uniformResourceIdentifier->length) {
- logmsg(rc, log_data_err, "Skipping improbably long URI %s for %s",
- a->location->d.uniformResourceIdentifier->data, uri);
- mib_increment(rc, uri, uri_too_long);
+ if (sizeof(result->s) <= a->location->d.uniformResourceIdentifier->length) {
+ log_validation_status(rc, uri, uri_too_long, generation);
continue;
}
- strcpy(result, (char *) a->location->d.uniformResourceIdentifier->data);
+ strcpy(result->s, (char *) a->location->d.uniformResourceIdentifier->data);
return;
}
}
@@ -1764,7 +2580,7 @@ static void extract_access_uri(const rcynic_ctx_t *rc,
/**
* Parse interesting stuff from a certificate.
*/
-static void parse_cert(const rcynic_ctx_t *rc, X509 *x, certinfo_t *c, const char *uri)
+static void parse_cert(const rcynic_ctx_t *rc, X509 *x, certinfo_t *c, const uri_t *uri, const object_generation_t generation)
{
STACK_OF(DIST_POINT) *crldp;
AUTHORITY_INFO_ACCESS *xia;
@@ -1773,23 +2589,22 @@ static void parse_cert(const rcynic_ctx_t *rc, X509 *x, certinfo_t *c, const cha
memset(c, 0, sizeof(*c));
c->ca = X509_check_ca(x) == 1;
-
- assert(strlen(uri) < sizeof(c->uri));
- strcpy(c->uri, uri);
+ c->uri = *uri;
+ c->generation = generation;
if ((xia = X509_get_ext_d2i(x, NID_info_access, NULL, NULL)) != NULL) {
- extract_access_uri(rc, uri, xia, id_ad_caIssuers, sizeof(id_ad_caIssuers), c->aia, sizeof(c->aia));
+ extract_access_uri(rc, uri, generation, xia, id_ad_caIssuers, sizeof(id_ad_caIssuers), &c->aia);
sk_ACCESS_DESCRIPTION_pop_free(xia, ACCESS_DESCRIPTION_free);
}
if ((xia = X509_get_ext_d2i(x, NID_sinfo_access, NULL, NULL)) != NULL) {
- extract_access_uri(rc, uri, xia, id_ad_caRepository, sizeof(id_ad_caRepository), c->sia, sizeof(c->sia));
- extract_access_uri(rc, uri, xia, id_ad_rpkiManifest, sizeof(id_ad_rpkiManifest), c->manifest, sizeof(c->manifest));
+ extract_access_uri(rc, uri, generation, xia, id_ad_caRepository, sizeof(id_ad_caRepository), &c->sia);
+ extract_access_uri(rc, uri, generation, xia, id_ad_rpkiManifest, sizeof(id_ad_rpkiManifest), &c->manifest);
sk_ACCESS_DESCRIPTION_pop_free(xia, ACCESS_DESCRIPTION_free);
}
if ((crldp = X509_get_ext_d2i(x, NID_crl_distribution_points, NULL, NULL)) != NULL) {
- extract_crldp_uri(rc, uri, crldp, c->crldp, sizeof(c->crldp));
+ extract_crldp_uri(rc, uri, generation, crldp, &c->crldp);
sk_DIST_POINT_pop_free(crldp, DIST_POINT_free);
}
}
@@ -1801,37 +2616,53 @@ static void parse_cert(const rcynic_ctx_t *rc, X509 *x, certinfo_t *c, const cha
*/
static X509_CRL *check_crl_1(const rcynic_ctx_t *rc,
- const char *uri,
- char *path, const int pathlen,
- const char *prefix,
+ const uri_t *uri,
+ path_t *path,
+ const path_t *prefix,
X509 *issuer,
const unsigned char *hash,
- const size_t hashlen)
+ const size_t hashlen,
+ const object_generation_t generation)
{
- unsigned char hashbuf[EVP_MAX_MD_SIZE];
+ hashbuf_t hashbuf;
X509_CRL *crl = NULL;
EVP_PKEY *pkey;
int ret;
- assert(uri && path && issuer && hashlen <= sizeof(hashbuf));
+ assert(uri && path && issuer);
- if (!uri_to_filename(rc, uri, path, pathlen, prefix))
+ if (!uri_to_filename(rc, uri, path, prefix))
goto punt;
+ if (hashlen > sizeof(hashbuf.h)) {
+ log_validation_status(rc, uri, hash_too_long, generation);
+ goto punt;
+ }
+
if (hash)
- crl = read_crl(path, hashbuf, sizeof(hashbuf));
+ crl = read_crl(path, &hashbuf);
else
- crl = read_crl(path, NULL, 0);
+ crl = read_crl(path, NULL);
if (!crl)
goto punt;
- if (hash && memcmp(hashbuf, hash, hashlen)) {
- reject(rc, uri, crl_digest_mismatch,
- "because digest of CRL did not match value from manifest");
+ if (hash && memcmp(hashbuf.h, hash, hashlen)) {
+ log_validation_status(rc, uri, crl_digest_mismatch, generation);
+ goto punt;
+ }
+
+ if (X509_cmp_current_time(X509_CRL_get_lastUpdate(crl)) > 0) {
+ log_validation_status(rc, uri, crl_not_yet_valid, generation);
goto punt;
}
+ if (X509_cmp_current_time(X509_CRL_get_nextUpdate(crl)) < 0) {
+ log_validation_status(rc, uri, stale_crl, generation);
+ if (!rc->allow_stale_crl)
+ goto punt;
+ }
+
if ((pkey = X509_get_pubkey(issuer)) == NULL)
goto punt;
ret = X509_CRL_verify(crl, pkey);
@@ -1850,38 +2681,34 @@ static X509_CRL *check_crl_1(const rcynic_ctx_t *rc,
* and check issuer's signature if we don't.
*/
static X509_CRL *check_crl(const rcynic_ctx_t *rc,
- const char *uri,
+ const uri_t *uri,
X509 *issuer,
const unsigned char *hash,
const size_t hashlen)
{
- char path[FILENAME_MAX];
+ path_t path;
X509_CRL *crl;
- if (uri_to_filename(rc, uri, path, sizeof(path), rc->authenticated) &&
- (crl = read_crl(path, NULL, 0)) != NULL)
+ if (uri_to_filename(rc, uri, &path, &rc->new_authenticated) &&
+ (crl = read_crl(&path, NULL)) != NULL)
return crl;
- logmsg(rc, log_telemetry, "Checking CRL %s", uri);
-
- assert(rsync_cached_uri(rc, uri));
+ logmsg(rc, log_telemetry, "Checking CRL %s", uri->s);
- if ((crl = check_crl_1(rc, uri, path, sizeof(path), rc->unauthenticated,
- issuer, hash, hashlen))) {
- install_object(rc, uri, path);
- mib_increment(rc, uri, current_crl_accepted);
+ if ((crl = check_crl_1(rc, uri, &path, &rc->unauthenticated,
+ issuer, hash, hashlen, object_generation_current))) {
+ install_object(rc, uri, &path, object_accepted, object_generation_current);
return crl;
- } else if (!access(path, F_OK)) {
- mib_increment(rc, uri, current_crl_rejected);
+ } else if (!access(path.s, F_OK)) {
+ log_validation_status(rc, uri, object_rejected, object_generation_current);
}
- if ((crl = check_crl_1(rc, uri, path, sizeof(path), rc->old_authenticated,
- issuer, hash, hashlen))) {
- install_object(rc, uri, path);
- mib_increment(rc, uri, backup_crl_accepted);
+ if ((crl = check_crl_1(rc, uri, &path, &rc->old_authenticated,
+ issuer, hash, hashlen, object_generation_backup))) {
+ install_object(rc, uri, &path, object_accepted, object_generation_backup);
return crl;
- } else if (!access(path, F_OK)) {
- mib_increment(rc, uri, backup_crl_rejected);
+ } else if (!access(path.s, F_OK)) {
+ log_validation_status(rc, uri, object_rejected, object_generation_backup);
}
return NULL;
@@ -1890,12 +2717,93 @@ static X509_CRL *check_crl(const rcynic_ctx_t *rc,
/**
+ * Check whether extensions in a certificate are allowed by profile.
+ * Also returns failure in a few null-pointer cases that can't
+ * possibly conform to profile.
+ */
+static int check_allowed_extensions(const X509 *x, const int allow_eku)
+{
+ int i;
+
+ if (x == NULL || x->cert_info == NULL || x->cert_info->extensions == NULL)
+ return 0;
+
+ for (i = 0; i < sk_X509_EXTENSION_num(x->cert_info->extensions); i++) {
+ switch (OBJ_obj2nid(sk_X509_EXTENSION_value(x->cert_info->extensions,
+ i)->object)) {
+ case NID_basic_constraints:
+ case NID_subject_key_identifier:
+ case NID_authority_key_identifier:
+ case NID_key_usage:
+ case NID_crl_distribution_points:
+ case NID_info_access:
+ case NID_sinfo_access:
+ case NID_certificate_policies:
+ case NID_sbgp_ipAddrBlock:
+ case NID_sbgp_autonomousSysNum:
+ continue;
+ case NID_ext_key_usage:
+ if (allow_eku)
+ continue;
+ else
+ return 0;
+ default:
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+/**
+ * Check whether a Distinguished Name conforms to the rescert profile.
+ * The profile is very restrictive: it only allows one mandatory
+ * CommonName field and one optional SerialNumber field, both of which
+ * must be of type PrintableString.
+ */
+static int check_allowed_dn(X509_NAME *dn)
+{
+ X509_NAME_ENTRY *ne;
+ ASN1_STRING *s;
+ int loc;
+
+ if (dn == NULL)
+ return 0;
+
+ switch (X509_NAME_entry_count(dn)) {
+
+ case 2:
+ if ((loc = X509_NAME_get_index_by_NID(dn, NID_serialNumber, -1)) < 0 ||
+ (ne = X509_NAME_get_entry(dn, loc)) == NULL ||
+ (s = X509_NAME_ENTRY_get_data(ne)) == NULL ||
+ ASN1_STRING_type(s) != V_ASN1_PRINTABLESTRING)
+ return 0;
+
+ /* Fall through */
+
+ case 1:
+ if ((loc = X509_NAME_get_index_by_NID(dn, NID_commonName, -1)) < 0 ||
+ (ne = X509_NAME_get_entry(dn, loc)) == NULL ||
+ (s = X509_NAME_ENTRY_get_data(ne)) == NULL ||
+ ASN1_STRING_type(s) != V_ASN1_PRINTABLESTRING)
+ return 0;
+
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+
+
+/**
* Validation callback function for use with x509_verify_cert().
*/
static int check_x509_cb(int ok, X509_STORE_CTX *ctx)
{
rcynic_x509_store_ctx_t *rctx = (rcynic_x509_store_ctx_t *) ctx;
- mib_counter_t counter;
+ mib_counter_t code;
assert(rctx != NULL);
@@ -1913,25 +2821,20 @@ static int check_x509_cb(int ok, X509_STORE_CTX *ctx)
case X509_V_ERR_CRL_HAS_EXPIRED:
/*
- * This may not be an error at all. CRLs don't really "expire",
- * although the signatures over them do. What OpenSSL really
- * means by this error is just "it's now later than this source
- * said it intended to publish a new CRL. Unclear whether this
- * should be an error; current theory is that it should not be.
+ * This isn't really an error, exactly. CRLs don't really
+ * "expire". What OpenSSL really means by this error is just
+ * "it's now later than the issuer said it intended to publish a
+ * new CRL". Whether we treat this as an error or not is
+ * configurable, see the allow_stale_crl parameter.
+ *
+ * Deciding whether to allow stale CRLs is check_crl_1()'s job,
+ * not ours. By the time this callback occurs, we've already
+ * accepted the CRL; this callback is just notifying us that the
+ * object being checked is tainted by a stale CRL. So we mark the
+ * object as tainted and carry on.
*/
- if (rctx->rc->allow_stale_crl) {
- ok = 1;
- if (sk_OPENSSL_STRING_find(rctx->rc->stale_cache, rctx->subject->crldp) >= 0)
- return ok;
- if (!sk_OPENSSL_STRING_push_strdup(rctx->rc->stale_cache, rctx->subject->crldp))
- logmsg(rctx->rc, log_sys_err,
- "Couldn't cache stale CRLDP %s, blundering onward", rctx->subject->crldp);
- }
- logmsg(rctx->rc, log_data_err, "Stale CRL %s", rctx->subject->crldp);
- if (ok)
- mib_increment(rctx->rc, rctx->subject->uri, stale_crl);
- else
- reject(rctx->rc, rctx->subject->uri, stale_crl, "due to stale CRL %s", rctx->subject->crldp);
+ log_validation_status(rctx->rc, &rctx->subject->uri, tainted_by_stale_crl, rctx->subject->generation);
+ ok = 1;
return ok;
case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT:
@@ -1950,11 +2853,7 @@ static int check_x509_cb(int ok, X509_STORE_CTX *ctx)
*/
if (rctx->rc->allow_non_self_signed_trust_anchor)
ok = 1;
- if (ok)
- mib_increment(rctx->rc, rctx->subject->uri, trust_anchor_not_self_signed);
- else
- reject(rctx->rc, rctx->subject->uri, trust_anchor_not_self_signed,
- "because trust anchor was not self-signed");
+ log_validation_status(rctx->rc, &rctx->subject->uri, trust_anchor_not_self_signed, rctx->subject->generation);
return ok;
/*
@@ -1964,106 +2863,144 @@ static int check_x509_cb(int ok, X509_STORE_CTX *ctx)
*/
#define QV(x) \
case x: \
- counter = mib_openssl_##x; \
+ code = mib_openssl_##x; \
break;
MIB_COUNTERS_FROM_OPENSSL;
#undef QV
default:
- counter = unknown_verify_error;
+ code = unknown_verify_error;
break;
}
- if (ok)
- mib_increment(rctx->rc, rctx->subject->uri, counter);
- else
- reject(rctx->rc, rctx->subject->uri, counter,
- "due to validation failure at depth %d: %s",
- ctx->error_depth,
- X509_verify_cert_error_string(ctx->error));
-
+ log_validation_status(rctx->rc, &rctx->subject->uri, code, rctx->subject->generation);
return ok;
}
/**
- * Check crypto aspects of a certificate, including policy checks
- * and RFC 3779 path validation.
+ * Check crypto aspects of a certificate, policy OID, RFC 3779 path
+ * validation, and conformance to the RPKI certificate profile.
*/
static int check_x509(const rcynic_ctx_t *rc,
STACK_OF(X509) *certs,
X509 *x,
- const certinfo_t *subject)
+ const certinfo_t *subject,
+ const certinfo_t *issuer_certinfo)
{
rcynic_x509_store_ctx_t rctx;
STACK_OF(X509_CRL) *crls = NULL;
EVP_PKEY *pkey = NULL;
X509_CRL *crl = NULL;
+ unsigned long flags = (X509_V_FLAG_POLICY_CHECK | X509_V_FLAG_EXPLICIT_POLICY | X509_V_FLAG_X509_STRICT);
X509 *issuer;
int ret = 0;
- assert(rc && certs && x && subject && subject->crldp[0]);
-
- issuer = sk_X509_value(certs, sk_X509_num(certs) - 1);
- assert(issuer != NULL);
+ assert(rc && certs && x && subject);
if (!X509_STORE_CTX_init(&rctx.ctx, rc->x509_store, x, NULL))
return 0;
rctx.rc = rc;
rctx.subject = subject;
+ issuer = sk_X509_value(certs, sk_X509_num(certs) - 1);
+ assert(issuer != NULL);
+
+ if (subject->sia.s[0] && subject->sia.s[strlen(subject->sia.s) - 1] != '/') {
+ log_validation_status(rc, &subject->uri, malformed_sia, subject->generation);
+ goto done;
+ }
+
+ if (!subject->ta && !subject->aia.s[0]) {
+ log_validation_status(rc, &subject->uri, aia_missing, subject->generation);
+ goto done;
+ }
+
+ if (!issuer_certinfo->ta && strcmp(issuer_certinfo->uri.s, subject->aia.s)) {
+ log_validation_status(rc, &subject->uri, aia_mismatch, subject->generation);
+ goto done;
+ }
+
+ if (subject->ca && !subject->sia.s[0]) {
+ log_validation_status(rc, &subject->uri, sia_missing, subject->generation);
+ goto done;
+ }
+
+ if (subject->ca && !subject->manifest.s[0]) {
+ log_validation_status(rc, &subject->uri, manifest_missing, subject->generation);
+ goto done;
+ }
+
+ if (subject->ca && !startswith(subject->manifest.s, subject->sia.s)) {
+ log_validation_status(rc, &subject->uri, manifest_mismatch, subject->generation);
+ goto done;
+ }
+
+ if (!check_allowed_extensions(x, !subject->ca)) {
+ log_validation_status(rc, &subject->uri, disallowed_extension, subject->generation);
+ goto done;
+ }
+
+ if (!check_allowed_dn(X509_get_subject_name(x)))
+ log_validation_status(rc, &subject->uri, nonconformant_subject_name, subject->generation);
+
+ if (!check_allowed_dn(X509_get_issuer_name(x)))
+ log_validation_status(rc, &subject->uri, nonconformant_issuer_name, subject->generation);
+
if (subject->ta) {
- if (subject->crldp[0]) {
- reject(rc, subject->uri, trust_anchor_with_crldp,
- "because it's a trust anchor but has a CRLDP extension");
+ if (subject->crldp.s[0]) {
+ log_validation_status(rc, &subject->uri, trust_anchor_with_crldp, subject->generation);
goto done;
}
} else {
+ if (!subject->crldp.s[0]) {
+ log_validation_status(rc, &subject->uri, crldp_missing, subject->generation);
+ goto done;
+ }
+
+ if (!subject->ca && !startswith(subject->crldp.s, issuer_certinfo->sia.s)) {
+ log_validation_status(rc, &subject->uri, crldp_mismatch, subject->generation);
+ goto done;
+ }
+
+ flags |= X509_V_FLAG_CRL_CHECK;
+
if ((pkey = X509_get_pubkey(issuer)) == NULL || X509_verify(x, pkey) <= 0) {
- reject(rc, subject->uri, certificate_bad_signature,
- "because it failed signature check prior to CRL fetch");
+ log_validation_status(rc, &subject->uri, certificate_bad_signature, subject->generation);
goto done;
}
- if ((crl = check_crl(rc, subject->crldp, issuer, NULL, 0)) == NULL) {
- reject(rc, subject->uri, certificate_bad_crl,
- "due to bad CRL %s", subject->crldp);
+ if ((crl = check_crl(rc, &subject->crldp, issuer, NULL, 0)) == NULL) {
+ log_validation_status(rc, &subject->uri, certificate_bad_crl, subject->generation);
goto done;
}
- }
- if ((crls = sk_X509_CRL_new_null()) == NULL ||
- !sk_X509_CRL_push(crls, crl)) {
- logmsg(rc, log_sys_err,
- "Internal allocation error setting up CRL for validation");
- goto done;
+ if ((crls = sk_X509_CRL_new_null()) == NULL || !sk_X509_CRL_push(crls, crl)) {
+ logmsg(rc, log_sys_err,
+ "Internal allocation error setting up CRL for validation");
+ goto done;
+ }
+ crl = NULL;
+
+ X509_STORE_CTX_set0_crls(&rctx.ctx, crls);
+
}
- crl = NULL;
X509_STORE_CTX_trusted_stack(&rctx.ctx, certs);
- X509_STORE_CTX_set0_crls(&rctx.ctx, crls);
X509_STORE_CTX_set_verify_cb(&rctx.ctx, check_x509_cb);
- X509_VERIFY_PARAM_set_flags(rctx.ctx.param,
- X509_V_FLAG_CRL_CHECK |
- X509_V_FLAG_POLICY_CHECK |
- X509_V_FLAG_EXPLICIT_POLICY |
- X509_V_FLAG_X509_STRICT);
+ X509_VERIFY_PARAM_set_flags(rctx.ctx.param, flags);
X509_VERIFY_PARAM_add0_policy(rctx.ctx.param, OBJ_txt2obj(rpki_policy_oid, 1));
- if (X509_verify_cert(&rctx.ctx) <= 0) {
- /*
- * Redundant error message?
- */
- logmsg(rc, log_data_err, "Validation failure for %s",
- subject->uri[0] ? subject->uri : subject->ta ? "[Trust anchor]" : "[???]");
+ if (X509_verify_cert(&rctx.ctx) <= 0) {
+ log_validation_status(rc, &subject->uri, certificate_failed_validation, subject->generation);
goto done;
}
- ret = 1;
+ ret = 1;
done:
sk_X509_CRL_pop_free(crls, X509_CRL_free);
@@ -2075,152 +3012,55 @@ static int check_x509(const rcynic_ctx_t *rc,
}
/**
- * Check whether extensions in a certificate are allowed by profile.
- * Also returns failure in a few null-pointer cases that can't
- * possibly conform to profile.
- */
-static int check_cert_only_allowed_extensions(const X509 *x, const int allow_eku)
-{
- int i;
-
- if (x == NULL || x->cert_info == NULL || x->cert_info->extensions == NULL)
- return 0;
-
- for (i = 0; i < sk_X509_EXTENSION_num(x->cert_info->extensions); i++) {
- switch (OBJ_obj2nid(sk_X509_EXTENSION_value(x->cert_info->extensions,
- i)->object)) {
- case NID_basic_constraints:
- case NID_subject_key_identifier:
- case NID_authority_key_identifier:
- case NID_key_usage:
- case NID_crl_distribution_points:
- case NID_info_access:
- case NID_sinfo_access:
- case NID_certificate_policies:
- case NID_sbgp_ipAddrBlock:
- case NID_sbgp_autonomousSysNum:
- continue;
- case NID_ext_key_usage:
- if (allow_eku)
- continue;
- else
- return 0;
- default:
- return 0;
- }
- }
-
- return 1;
-}
-
-/**
- * Check a certificate for conformance to the RPKI certificate profile.
+ * Load certificate, check against manifest, then run it through all
+ * the check_x509() tests.
*/
static X509 *check_cert_1(const rcynic_ctx_t *rc,
- const char *uri,
- char *path,
- const int pathlen,
- const char *prefix,
+ const uri_t *uri,
+ path_t *path,
+ const path_t *prefix,
STACK_OF(X509) *certs,
const certinfo_t *issuer,
certinfo_t *subject,
const unsigned char *hash,
- const size_t hashlen)
+ const size_t hashlen,
+ object_generation_t generation)
{
- unsigned char hashbuf[EVP_MAX_MD_SIZE];
+ hashbuf_t hashbuf;
X509 *x = NULL;
assert(uri && path && certs && issuer && subject);
- if (!uri_to_filename(rc, uri, path, pathlen, prefix)) {
- logmsg(rc, log_data_err, "Can't convert URI %s to filename", uri);
+ if (!uri_to_filename(rc, uri, path, prefix))
return NULL;
- }
- if (access(path, R_OK))
+ if (access(path->s, R_OK))
return NULL;
- if (hash)
- x = read_cert(path, hashbuf, sizeof(hashbuf));
- else
- x = read_cert(path, NULL, 0);
-
- if (!x) {
- logmsg(rc, log_sys_err, "Can't read certificate %s", path);
- goto punt;
- }
-
- if (hash && memcmp(hashbuf, hash, hashlen)) {
- reject(rc, uri, certificate_digest_mismatch,
- "because digest did not match value in manifest");
- goto punt;
- }
-
- parse_cert(rc, x, subject, uri);
-
- if (subject->sia[0] && subject->sia[strlen(subject->sia) - 1] != '/') {
- reject(rc, uri, malformed_sia,
- "due to malformed SIA %s", subject->sia);
- goto punt;
- }
-
- if (!subject->aia[0]) {
- reject(rc, uri, aia_missing, "due to missing AIA extension");
- goto punt;
- }
-
- if (!issuer->ta && strcmp(issuer->uri, subject->aia)) {
- reject(rc, uri, aia_mismatch,
- "because AIA %s doesn't match parent", subject->aia);
- goto punt;
- }
-
- if (subject->ca && !subject->sia[0]) {
- reject(rc, uri, sia_missing,
- "because SIA extension repository pointer is missing");
- goto punt;
- }
-
- if (!subject->crldp[0]) {
- reject(rc, uri, crldp_missing, "because CRLDP extension is missing");
+ if (hashlen > sizeof(hashbuf.h)) {
+ log_validation_status(rc, uri, hash_too_long, generation);
goto punt;
}
- if (subject->ca && !startswith(subject->crldp, issuer->sia)) {
- reject(rc, uri, crldp_mismatch,
- "because CRLDP %s points outside issuer's publication point %s",
- subject->crldp, issuer->sia);
- goto punt;
- }
+ if (hash)
+ x = read_cert(path, &hashbuf);
+ else
+ x = read_cert(path, NULL);
- if (subject->ca && !subject->manifest[0]) {
- reject(rc, uri, manifest_missing,
- "because SIA extension manifest pointer is missing");
+ if (!x) {
+ logmsg(rc, log_sys_err, "Can't read certificate %s", path->s);
goto punt;
}
- if (subject->ca && !startswith(subject->manifest, subject->sia)) {
- reject(rc, uri, manifest_mismatch,
- "because SIA manifest %s points outside publication point %s",
- subject->manifest, subject->sia);
+ if (hash && memcmp(hashbuf.h, hash, hashlen)) {
+ log_validation_status(rc, uri, certificate_digest_mismatch, generation);
goto punt;
}
- if (!check_cert_only_allowed_extensions(x, !subject->ca)) {
- reject(rc, uri, disallowed_extension,
- "due to disallowed X.509v3 extension");
- goto punt;
- }
+ parse_cert(rc, x, subject, uri, generation);
- if (!check_x509(rc, certs, x, subject)) {
- /*
- * Redundant error message?
- */
- logmsg(rc, log_data_err, "Certificate %s failed validation", uri);
- goto punt;
- }
-
- return x;
+ if (check_x509(rc, certs, x, subject, issuer))
+ return x;
punt:
X509_free(x);
@@ -2232,53 +3072,69 @@ static X509 *check_cert_1(const rcynic_ctx_t *rc,
* backup data from a previous run of this program.
*/
static X509 *check_cert(rcynic_ctx_t *rc,
- char *uri,
- STACK_OF(X509) *certs,
- const certinfo_t *issuer,
+ uri_t *uri,
+ STACK_OF(walk_ctx_t) *wsk,
certinfo_t *subject,
- const char *prefix,
- const int backup,
const unsigned char *hash,
const size_t hashlen)
{
- char path[FILENAME_MAX];
+ walk_ctx_t *w = walk_ctx_stack_head(wsk);
+ object_generation_t generation;
+ const certinfo_t *issuer = NULL;
+ STACK_OF(X509) *certs = NULL;
+ const path_t *prefix = NULL;
+ path_t path;
X509 *x;
- assert(rc && uri && certs && issuer && subject && prefix);
+ assert(rc && uri && wsk && w && subject);
+
+ issuer = &w->certinfo;
+
+ switch (w->state) {
+ case walk_state_current:
+ prefix = &rc->unauthenticated;
+ generation = object_generation_current;
+ break;
+ case walk_state_backup:
+ prefix = &rc->old_authenticated;
+ generation = object_generation_backup;
+ break;
+ default:
+ return NULL;
+ }
/*
* If target file already exists and we're not here to recheck with
* better data, just get out now.
*/
- if (uri_to_filename(rc, uri, path, sizeof(path), rc->authenticated) &&
- !access(path, R_OK)) {
- if (backup || sk_OPENSSL_STRING_find(rc->backup_cache, uri) < 0)
+ if (uri_to_filename(rc, uri, &path, &rc->new_authenticated) &&
+ !access(path.s, R_OK)) {
+ if (w->state == walk_state_backup || sk_OPENSSL_STRING_find(rc->backup_cache, uri->s) < 0)
return NULL;
- mib_increment(rc, uri, current_cert_recheck);
- logmsg(rc, log_telemetry, "Rechecking %s", uri);
+ assert(generation == object_generation_current);
+ log_validation_status(rc, uri, current_cert_recheck, generation);
+ logmsg(rc, log_telemetry, "Rechecking %s", uri->s);
} else {
- logmsg(rc, log_telemetry, "Checking %s", uri);
+ logmsg(rc, log_telemetry, "Checking %s", uri->s);
}
- rc->indent++;
+ if ((certs = walk_ctx_stack_certs(wsk)) == NULL)
+ return NULL;
- if ((x = check_cert_1(rc, uri, path, sizeof(path), prefix,
- certs, issuer, subject, hash, hashlen)) != NULL) {
- install_object(rc, uri, path);
- mib_increment(rc, uri,
- (backup ? backup_cert_accepted : current_cert_accepted));
- if (!backup)
- sk_OPENSSL_STRING_remove(rc->backup_cache, uri);
- else if (!sk_OPENSSL_STRING_push_strdup(rc->backup_cache, uri))
- logmsg(rc, log_sys_err, "Couldn't cache URI %s, blundering onward", uri);
+ if ((x = check_cert_1(rc, uri, &path, prefix, certs, issuer, subject, hash, hashlen, generation)) != NULL) {
+ install_object(rc, uri, &path, object_accepted, generation);
+ if (w->state == walk_state_current)
+ sk_OPENSSL_STRING_remove(rc->backup_cache, uri->s);
+ else if (!sk_OPENSSL_STRING_push_strdup(rc->backup_cache, uri->s))
+ logmsg(rc, log_sys_err, "Couldn't cache URI %s, blundering onward", uri->s);
- } else if (!access(path, F_OK)) {
- mib_increment(rc, uri,
- (backup ? backup_cert_rejected : current_cert_rejected));
+ } else if (!access(path.s, F_OK)) {
+ log_validation_status(rc, uri, object_rejected, generation);
}
- rc->indent--;
+ sk_X509_free(certs);
+ certs = NULL;
return x;
}
@@ -2289,11 +3145,11 @@ static X509 *check_cert(rcynic_ctx_t *rc,
* Read and check one manifest from disk.
*/
static Manifest *check_manifest_1(const rcynic_ctx_t *rc,
- const char *uri,
- char *path,
- const int pathlen,
- const char *prefix,
- STACK_OF(X509) *certs)
+ const uri_t *uri,
+ path_t *path,
+ const path_t *prefix,
+ STACK_OF(X509) *certs,
+ const object_generation_t generation)
{
CMS_ContentInfo *cms = NULL;
const ASN1_OBJECT *eContentType = NULL;
@@ -2310,78 +3166,63 @@ static Manifest *check_manifest_1(const rcynic_ctx_t *rc,
assert(rc && uri && path && prefix && certs && sk_X509_num(certs));
- if (!uri_to_filename(rc, uri, path, pathlen, prefix) ||
- (cms = read_cms(path, NULL, 0)) == NULL)
+ if (!uri_to_filename(rc, uri, path, prefix) ||
+ (cms = read_cms(path, NULL)) == NULL)
goto done;
if ((eContentType = CMS_get0_eContentType(cms)) == NULL ||
oid_cmp(eContentType, id_ct_rpkiManifest, sizeof(id_ct_rpkiManifest))) {
- reject(rc, uri, manifest_bad_econtenttype,
- "due to bad manifest eContentType");
+ log_validation_status(rc, uri, manifest_bad_econtenttype, generation);
goto done;
}
if ((bio = BIO_new(BIO_s_mem())) == NULL) {
- logmsg(rc, log_sys_err, "Couldn't allocate BIO for manifest %s", uri);
+ logmsg(rc, log_sys_err, "Couldn't allocate BIO for manifest %s", uri->s);
goto done;
}
if (CMS_verify(cms, NULL, NULL, NULL, bio, CMS_NO_SIGNER_CERT_VERIFY) <= 0) {
- reject(rc, uri, manifest_invalid_cms,
- "due to validation failure for manifest CMS message");
+ log_validation_status(rc, uri, manifest_invalid_cms, generation);
goto done;
}
if ((signers = CMS_get0_signers(cms)) == NULL || sk_X509_num(signers) != 1) {
- reject(rc, uri, manifest_missing_signer,
- "because could not couldn't extract manifest EE certificate from CMS");
+ log_validation_status(rc, uri, manifest_missing_signer, generation);
goto done;
}
- parse_cert(rc, sk_X509_value(signers, 0), &certinfo, uri);
+ parse_cert(rc, sk_X509_value(signers, 0), &certinfo, uri, generation);
- if (!certinfo.crldp[0]) {
- reject(rc, uri, manifest_missing_crldp,
- "due to missing CRLDP in manifest EE certificate");
+ if (!certinfo.crldp.s[0]) {
+ log_validation_status(rc, uri, manifest_missing_crldp, generation);
goto done;
}
- if ((crl_tail = strrchr(certinfo.crldp, '/')) == NULL) {
- reject(rc, uri, manifest_malformed_crldp,
- "due to malformed CRLDP %s in manifest EE certificate",
- certinfo.crldp);
+ if ((crl_tail = strrchr(certinfo.crldp.s, '/')) == NULL) {
+ log_validation_status(rc, uri, manifest_malformed_crldp, generation);
goto done;
}
crl_tail++;
if ((manifest = ASN1_item_d2i_bio(ASN1_ITEM_rptr(Manifest), bio, NULL)) == NULL) {
- reject(rc, uri, manifest_decode_error, "because unable to decode manifest");
+ log_validation_status(rc, uri, manifest_decode_error, generation);
goto done;
}
if (manifest->version) {
- reject(rc, uri, manifest_wrong_version,
- "because manifest version should be defaulted zero, not %ld",
- ASN1_INTEGER_get(manifest->version));
+ log_validation_status(rc, uri, manifest_wrong_version, generation);
goto done;
}
if (X509_cmp_current_time(manifest->thisUpdate) > 0) {
- reject(rc, uri, manifest_not_yet_valid, "because manifest not yet valid");
+ log_validation_status(rc, uri, manifest_not_yet_valid, generation);
goto done;
}
- if (X509_cmp_current_time(manifest->nextUpdate) < 0 &&
- sk_OPENSSL_STRING_find(rc->stale_cache, uri) < 0) {
- if (!sk_OPENSSL_STRING_push_strdup(rc->stale_cache, uri))
- logmsg(rc, log_sys_err, "Couldn't cache stale manifest %s, blundering onward", uri);
- if (!rc->allow_stale_manifest) {
- reject(rc, uri, stale_manifest,
- "because it is a stale manifest");
+ if (X509_cmp_current_time(manifest->nextUpdate) < 0) {
+ log_validation_status(rc, uri, stale_manifest, generation);
+ if (!rc->allow_stale_manifest)
goto done;
- }
- logmsg(rc, log_data_err, "Stale manifest %s", uri);
- mib_increment(rc, uri, stale_manifest);
}
if (manifest->fileHashAlg == NULL ||
@@ -2393,23 +3234,20 @@ static Manifest *check_manifest_1(const rcynic_ctx_t *rc,
break;
if (fah) {
- crl = check_crl(rc, certinfo.crldp, sk_X509_value(certs, sk_X509_num(certs) - 1),
+ crl = check_crl(rc, &certinfo.crldp,
+ sk_X509_value(certs, sk_X509_num(certs) - 1),
fah->hash->data, fah->hash->length);
- } else if (rc->require_crl_in_manifest) {
- reject(rc, uri, crl_not_in_manifest,
- "because CRL %s missing from manifest", certinfo.crldp);
- goto done;
} else {
- logmsg(rc, log_data_err, "Manifest %s is missing entry for CRL %s", uri, certinfo.crldp);
- mib_increment(rc, uri, crl_not_in_manifest);
- crl = check_crl(rc, certinfo.crldp,
- sk_X509_value(certs, sk_X509_num(certs) - 1), NULL, 0);
+ log_validation_status(rc, uri, crl_not_in_manifest, generation);
+ if (rc->require_crl_in_manifest)
+ goto done;
+ crl = check_crl(rc, &certinfo.crldp,
+ sk_X509_value(certs, sk_X509_num(certs) - 1),
+ NULL, 0);
}
- if (!crl) {
- reject(rc, uri, manifest_bad_crl, "due to bad manifest CRL %s", certinfo.crldp);
+ if (!crl)
goto done;
- }
if ((crls = sk_X509_CRL_new_null()) == NULL || !sk_X509_CRL_push(crls, crl))
goto done;
@@ -2437,8 +3275,7 @@ static Manifest *check_manifest_1(const rcynic_ctx_t *rc,
/*
* Redundant error message?
*/
- logmsg(rc, log_data_err, "Validation failure for manifest %s EE certificate",uri);
- mib_increment(rc, uri, manifest_invalid_ee);
+ log_validation_status(rc, uri, manifest_invalid_ee, generation);
goto done;
}
@@ -2462,16 +3299,22 @@ static Manifest *check_manifest_1(const rcynic_ctx_t *rc,
* and check issuer's signature if we don't.
*/
static Manifest *check_manifest(const rcynic_ctx_t *rc,
- const char *uri,
- STACK_OF(X509) *certs)
+ STACK_OF(walk_ctx_t) *wsk)
{
+ walk_ctx_t *w = walk_ctx_stack_head(wsk);
CMS_ContentInfo *cms = NULL;
Manifest *manifest = NULL;
- char path[FILENAME_MAX];
+ STACK_OF(X509) *certs = NULL;
BIO *bio = NULL;
+ path_t path;
+ uri_t *uri;
+
+ assert(rc && wsk && w);
- if (uri_to_filename(rc, uri, path, sizeof(path), rc->authenticated) &&
- (cms = read_cms(path, NULL, 0)) != NULL &&
+ uri = &w->certinfo.manifest;
+
+ if (uri_to_filename(rc, uri, &path, &rc->new_authenticated) &&
+ (cms = read_cms(&path, NULL)) != NULL &&
(bio = BIO_new(BIO_s_mem()))!= NULL &&
CMS_verify(cms, NULL, NULL, NULL, bio,
CMS_NO_SIGNER_CERT_VERIFY |
@@ -2485,29 +3328,31 @@ static Manifest *check_manifest(const rcynic_ctx_t *rc,
if (manifest != NULL)
return manifest;
- logmsg(rc, log_telemetry, "Checking manifest %s", uri);
+ logmsg(rc, log_telemetry, "Checking manifest %s", uri->s);
- assert(rsync_cached_uri(rc, uri));
+ if ((certs = walk_ctx_stack_certs(wsk)) == NULL)
+ return NULL;
- if ((manifest = check_manifest_1(rc, uri, path, sizeof(path),
- rc->unauthenticated, certs))) {
- install_object(rc, uri, path);
- mib_increment(rc, uri, current_manifest_accepted);
- return manifest;
- } else if (!access(path, F_OK)) {
- mib_increment(rc, uri, current_manifest_rejected);
+ if (manifest == NULL) {
+ if ((manifest = check_manifest_1(rc, uri, &path,
+ &rc->unauthenticated, certs, object_generation_current)) != NULL)
+ install_object(rc, uri, &path, object_accepted, object_generation_current);
+ else if (!access(path.s, F_OK))
+ log_validation_status(rc, uri, object_rejected, object_generation_current);
}
- if ((manifest = check_manifest_1(rc, uri, path, sizeof(path),
- rc->old_authenticated, certs))) {
- install_object(rc, uri, path);
- mib_increment(rc, uri, backup_manifest_accepted);
- return manifest;
- } else if (!access(path, F_OK)) {
- mib_increment(rc, uri, backup_manifest_rejected);
+ if (manifest == NULL) {
+ if ((manifest = check_manifest_1(rc, uri, &path,
+ &rc->old_authenticated, certs, object_generation_backup)) != NULL)
+ install_object(rc, uri, &path, object_accepted, object_generation_backup);
+ else if (!access(path.s, F_OK))
+ log_validation_status(rc, uri, object_rejected, object_generation_backup);
}
- return NULL;
+ sk_X509_free(certs);
+ certs = NULL;
+
+ return manifest;
}
@@ -2550,21 +3395,22 @@ static int extract_roa_prefix(unsigned char *addr,
* Read and check one ROA from disk.
*/
static int check_roa_1(const rcynic_ctx_t *rc,
- const char *uri,
- char *path,
- const int pathlen,
- const char *prefix,
+ const uri_t *uri,
+ path_t *path,
+ const path_t *prefix,
STACK_OF(X509) *certs,
const unsigned char *hash,
- const size_t hashlen)
+ const size_t hashlen,
+ const object_generation_t generation)
{
- unsigned char hashbuf[EVP_MAX_MD_SIZE], addrbuf[ADDR_RAW_BUF_LEN];
+ unsigned char addrbuf[ADDR_RAW_BUF_LEN];
const ASN1_OBJECT *eContentType = NULL;
STACK_OF(IPAddressFamily) *roa_resources = NULL, *ee_resources = NULL;
STACK_OF(X509_CRL) *crls = NULL;
STACK_OF(X509) *signers = NULL;
CMS_ContentInfo *cms = NULL;
X509_CRL *crl = NULL;
+ hashbuf_t hashbuf;
ROA *roa = NULL;
BIO *bio = NULL;
rcynic_x509_store_ctx_t rctx;
@@ -2576,58 +3422,58 @@ static int check_roa_1(const rcynic_ctx_t *rc,
assert(rc && uri && path && prefix && certs && sk_X509_num(certs));
- if (!uri_to_filename(rc, uri, path, pathlen, prefix))
+ if (!uri_to_filename(rc, uri, path, prefix))
goto error;
+ if (hashlen > sizeof(hashbuf.h)) {
+ log_validation_status(rc, uri, hash_too_long, generation);
+ goto error;
+ }
+
if (hash)
- cms = read_cms(path, hashbuf, sizeof(hashbuf));
+ cms = read_cms(path, &hashbuf);
else
- cms = read_cms(path, NULL, 0);
+ cms = read_cms(path, NULL);
if (!cms)
goto error;
- if (hash && memcmp(hashbuf, hash, hashlen)) {
- reject(rc, uri, roa_digest_mismatch,
- "because ROA does not match manifest digest");
+ if (hash && memcmp(hashbuf.h, hash, hashlen)) {
+ log_validation_status(rc, uri, roa_digest_mismatch, generation);
goto error;
}
if (!(eContentType = CMS_get0_eContentType(cms)) ||
oid_cmp(eContentType, id_ct_routeOriginAttestation,
sizeof(id_ct_routeOriginAttestation))) {
- reject(rc, uri, roa_bad_econtenttype,
- "because ROA has bad eContentType");
+ log_validation_status(rc, uri, roa_bad_econtenttype, generation);
goto error;
}
if ((bio = BIO_new(BIO_s_mem())) == NULL) {
- logmsg(rc, log_sys_err, "Couldn't allocate BIO for ROA %s", uri);
+ logmsg(rc, log_sys_err, "Couldn't allocate BIO for ROA %s", uri->s);
goto error;
}
if (CMS_verify(cms, NULL, NULL, NULL, bio, CMS_NO_SIGNER_CERT_VERIFY) <= 0) {
- reject(rc, uri, roa_invalid_cms, "because ROA CMS failed validation");
+ log_validation_status(rc, uri, roa_invalid_cms, generation);
goto error;
}
if (!(signers = CMS_get0_signers(cms)) || sk_X509_num(signers) != 1) {
- reject(rc, uri, roa_missing_signer,
- "because couldn't extract CMS signer from ROA");
+ log_validation_status(rc, uri, roa_missing_signer, generation);
goto error;
}
- parse_cert(rc, sk_X509_value(signers, 0), &certinfo, uri);
+ parse_cert(rc, sk_X509_value(signers, 0), &certinfo, uri, generation);
if (!(roa = ASN1_item_d2i_bio(ASN1_ITEM_rptr(ROA), bio, NULL))) {
- reject(rc, uri, roa_decode_error, "because could not decode ROA");
+ log_validation_status(rc, uri, roa_decode_error, generation);
goto error;
}
if (roa->version) {
- reject(rc, uri, roa_wrong_version,
- "because ROA version should be defaulted zero, not %ld",
- ASN1_INTEGER_get(roa->version));
+ log_validation_status(rc, uri, roa_wrong_version, generation);
goto error;
}
@@ -2648,9 +3494,7 @@ static int check_roa_1(const rcynic_ctx_t *rc,
for (i = 0; i < sk_ROAIPAddressFamily_num(roa->ipAddrBlocks); i++) {
rf = sk_ROAIPAddressFamily_value(roa->ipAddrBlocks, i);
if (!rf || !rf->addressFamily || rf->addressFamily->length < 2 || rf->addressFamily->length > 3) {
- reject(rc, uri, malformed_roa_addressfamily,
- "because ROA addressFamily length should be 2 or 3, not %lu",
- (unsigned long) rf->addressFamily->length);
+ log_validation_status(rc, uri, malformed_roa_addressfamily, generation);
goto error;
}
afi = (rf->addressFamily->data[0] << 8) | (rf->addressFamily->data[1]);
@@ -2661,8 +3505,7 @@ static int check_roa_1(const rcynic_ctx_t *rc,
if (!ra ||
!extract_roa_prefix(addrbuf, &prefixlen, ra->IPAddress, afi) ||
!v3_addr_add_prefix(roa_resources, afi, safi, addrbuf, prefixlen)) {
- reject(rc, uri, roa_resources_malformed,
- "because ROA resources appear malformed");
+ log_validation_status(rc, uri, roa_resources_malformed, generation);
goto error;
}
}
@@ -2682,8 +3525,7 @@ static int check_roa_1(const rcynic_ctx_t *rc,
IPAddressFamily *f = sk_IPAddressFamily_value(roa_resources, i);
if ((afi = v3_addr_get_afi(f)) == 0) {
- reject(rc, uri, roa_bad_afi,
- "because found bad AFI while extracting data from ROA");
+ log_validation_status(rc, uri, roa_bad_afi, generation);
goto error;
}
@@ -2701,7 +3543,7 @@ static int check_roa_1(const rcynic_ctx_t *rc,
if ((length = v3_addr_get_range(a, afi, a_min, a_max, ADDR_RAW_BUF_LEN)) == 0 ||
(length = v3_addr_get_range(b, afi, b_min, b_max, ADDR_RAW_BUF_LEN)) == 0) {
- reject(rc, uri, roa_resources_malformed, "because ROA resources appear malformed");
+ log_validation_status(rc, uri, roa_resources_malformed, generation);
goto error;
}
@@ -2715,18 +3557,17 @@ static int check_roa_1(const rcynic_ctx_t *rc,
}
if (!v3_addr_canonize(roa_resources)) {
- reject(rc, uri, roa_resources_malformed, "because ROA resources appear malformed");
+ log_validation_status(rc, uri, roa_resources_malformed, generation);
goto error;
}
if (!v3_addr_subset(roa_resources, ee_resources)) {
- reject(rc, uri, roa_not_nested,
- "because ROA's resources are not a subset of its signing EE certificate's resources");
+ log_validation_status(rc, uri, roa_not_nested, generation);
goto error;
}
- if (!(crl = check_crl(rc, certinfo.crldp, sk_X509_value(certs, sk_X509_num(certs) - 1), NULL, 0))) {
- reject(rc, uri, roa_bad_crl, "because ROA EE certificate has bad CRL %s", certinfo.crldp);
+ if (!(crl = check_crl(rc, &certinfo.crldp, sk_X509_value(certs, sk_X509_num(certs) - 1), NULL, 0))) {
+ log_validation_status(rc, uri, roa_bad_crl, generation);
goto error;
}
@@ -2756,8 +3597,7 @@ static int check_roa_1(const rcynic_ctx_t *rc,
/*
* Redundant error message?
*/
- logmsg(rc, log_data_err, "Validation failure for ROA %s EE certificate",uri);
- mib_increment(rc, uri, roa_invalid_ee);
+ log_validation_status(rc, uri, roa_invalid_ee, generation);
goto error;
}
@@ -2782,38 +3622,43 @@ static int check_roa_1(const rcynic_ctx_t *rc,
* and check issuer's signature if we don't.
*/
static void check_roa(const rcynic_ctx_t *rc,
- const char *uri,
- STACK_OF(X509) *certs,
+ const uri_t *uri,
+ STACK_OF(walk_ctx_t) *wsk,
const unsigned char *hash,
const size_t hashlen)
{
- char path[FILENAME_MAX];
+ STACK_OF(X509) *certs = NULL;
+ path_t path;
- if (uri_to_filename(rc, uri, path, sizeof(path), rc->authenticated) &&
- !access(path, F_OK))
- return;
+ assert(rc && uri && wsk);
- logmsg(rc, log_telemetry, "Checking ROA %s", uri);
+ if (uri_to_filename(rc, uri, &path, &rc->new_authenticated) &&
+ !access(path.s, F_OK))
+ return;
- assert(rsync_cached_uri(rc, uri));
+ logmsg(rc, log_telemetry, "Checking ROA %s", uri->s);
- if (check_roa_1(rc, uri, path, sizeof(path), rc->unauthenticated,
- certs, hash, hashlen)) {
- install_object(rc, uri, path);
- mib_increment(rc, uri, current_roa_accepted);
+ if ((certs = walk_ctx_stack_certs(wsk)) == NULL)
return;
- } else if (!access(path, F_OK)) {
- mib_increment(rc, uri, current_roa_rejected);
+
+ if (check_roa_1(rc, uri, &path, &rc->unauthenticated,
+ certs, hash, hashlen, object_generation_current)) {
+ install_object(rc, uri, &path, object_accepted, object_generation_current);
+ goto done;
+ } else if (!access(path.s, F_OK)) {
+ log_validation_status(rc, uri, object_rejected, object_generation_current);
}
- if (check_roa_1(rc, uri, path, sizeof(path), rc->old_authenticated,
- certs, hash, hashlen)) {
- install_object(rc, uri, path);
- mib_increment(rc, uri, backup_roa_accepted);
- return;
- } else if (!access(path, F_OK)) {
- mib_increment(rc, uri, backup_roa_rejected);
+ if (check_roa_1(rc, uri, &path, &rc->old_authenticated,
+ certs, hash, hashlen, object_generation_backup)) {
+ install_object(rc, uri, &path, object_accepted, object_generation_backup);
+ goto done;
+ } else if (!access(path.s, F_OK)) {
+ log_validation_status(rc, uri, object_rejected, object_generation_backup);
}
+
+ done:
+ sk_X509_free(certs);
}
@@ -2822,20 +3667,20 @@ static void check_roa(const rcynic_ctx_t *rc,
* Read and check one Ghostbuster record from disk.
*/
static int check_ghostbuster_1(const rcynic_ctx_t *rc,
- const char *uri,
- char *path,
- const int pathlen,
- const char *prefix,
+ const uri_t *uri,
+ path_t *path,
+ const path_t *prefix,
STACK_OF(X509) *certs,
const unsigned char *hash,
- const size_t hashlen)
+ const size_t hashlen,
+ const object_generation_t generation)
{
- unsigned char hashbuf[EVP_MAX_MD_SIZE];
const ASN1_OBJECT *eContentType = NULL;
STACK_OF(X509_CRL) *crls = NULL;
STACK_OF(X509) *signers = NULL;
CMS_ContentInfo *cms = NULL;
X509_CRL *crl = NULL;
+ hashbuf_t hashbuf;
BIO *bio = NULL;
rcynic_x509_store_ctx_t rctx;
certinfo_t certinfo;
@@ -2843,28 +3688,31 @@ static int check_ghostbuster_1(const rcynic_ctx_t *rc,
assert(rc && uri && path && prefix && certs && sk_X509_num(certs));
- if (!uri_to_filename(rc, uri, path, pathlen, prefix))
+ if (!uri_to_filename(rc, uri, path, prefix))
+ goto error;
+
+ if (hashlen > sizeof(hashbuf.h)) {
+ log_validation_status(rc, uri, hash_too_long, generation);
goto error;
+ }
if (hash)
- cms = read_cms(path, hashbuf, sizeof(hashbuf));
+ cms = read_cms(path, &hashbuf);
else
- cms = read_cms(path, NULL, 0);
+ cms = read_cms(path, NULL);
if (!cms)
goto error;
- if (hash && memcmp(hashbuf, hash, hashlen)) {
- reject(rc, uri, ghostbuster_digest_mismatch,
- "because Ghostbuster record does not match manifest digest");
+ if (hash && memcmp(hashbuf.h, hash, hashlen)) {
+ log_validation_status(rc, uri, ghostbuster_digest_mismatch, generation);
goto error;
}
if (!(eContentType = CMS_get0_eContentType(cms)) ||
oid_cmp(eContentType, id_ct_rpkiGhostbusters,
sizeof(id_ct_rpkiGhostbusters))) {
- reject(rc, uri, ghostbuster_bad_econtenttype,
- "because Ghostbuster record has bad eContentType");
+ log_validation_status(rc, uri, ghostbuster_bad_econtenttype, generation);
goto error;
}
@@ -2874,23 +3722,22 @@ static int check_ghostbuster_1(const rcynic_ctx_t *rc,
* just leave this NULL and the right thing should happen.
*/
if ((bio = BIO_new(BIO_s_mem())) == NULL) {
- logmsg(rc, log_sys_err, "Couldn't allocate BIO for Ghostbuster record %s", uri);
+ logmsg(rc, log_sys_err, "Couldn't allocate BIO for Ghostbuster record %s", uri->s);
goto error;
}
#endif
if (CMS_verify(cms, NULL, NULL, NULL, bio, CMS_NO_SIGNER_CERT_VERIFY) <= 0) {
- reject(rc, uri, ghostbuster_invalid_cms, "because Ghostbuster record CMS failed validation");
+ log_validation_status(rc, uri, ghostbuster_invalid_cms, generation);
goto error;
}
if (!(signers = CMS_get0_signers(cms)) || sk_X509_num(signers) != 1) {
- reject(rc, uri, ghostbuster_missing_signer,
- "because couldn't extract CMS signer from Ghostbuster record");
+ log_validation_status(rc, uri, ghostbuster_missing_signer, generation);
goto error;
}
- parse_cert(rc, sk_X509_value(signers, 0), &certinfo, uri);
+ parse_cert(rc, sk_X509_value(signers, 0), &certinfo, uri, generation);
#if 0
/*
@@ -2899,8 +3746,8 @@ static int check_ghostbuster_1(const rcynic_ctx_t *rc,
*/
#endif
- if (!(crl = check_crl(rc, certinfo.crldp, sk_X509_value(certs, sk_X509_num(certs) - 1), NULL, 0))) {
- reject(rc, uri, ghostbuster_bad_crl, "because Ghostbuster record EE certificate has bad CRL %s", certinfo.crldp);
+ if (!(crl = check_crl(rc, &certinfo.crldp, sk_X509_value(certs, sk_X509_num(certs) - 1), NULL, 0))) {
+ log_validation_status(rc, uri, ghostbuster_bad_crl, generation);
goto error;
}
@@ -2927,11 +3774,7 @@ static int check_ghostbuster_1(const rcynic_ctx_t *rc,
X509_VERIFY_PARAM_add0_policy(rctx.ctx.param, OBJ_txt2obj(rpki_policy_oid, 1));
if (X509_verify_cert(&rctx.ctx) <= 0) {
- /*
- * Redundant error message?
- */
- logmsg(rc, log_data_err, "Validation failure for Ghostbuster record %s EE certificate", uri);
- mib_increment(rc, uri, ghostbuster_invalid_ee);
+ log_validation_status(rc, uri, ghostbuster_invalid_ee, generation);
goto error;
}
@@ -2953,217 +3796,277 @@ static int check_ghostbuster_1(const rcynic_ctx_t *rc,
* attempt to fetch it and check issuer's signature if we don't.
*/
static void check_ghostbuster(const rcynic_ctx_t *rc,
- const char *uri,
- STACK_OF(X509) *certs,
- const unsigned char *hash,
- const size_t hashlen)
+ const uri_t *uri,
+ STACK_OF(walk_ctx_t) *wsk,
+ const unsigned char *hash,
+ const size_t hashlen)
{
- char path[FILENAME_MAX];
+ STACK_OF(X509) *certs = NULL;
+ path_t path;
- if (uri_to_filename(rc, uri, path, sizeof(path), rc->authenticated) &&
- !access(path, F_OK))
- return;
+ assert(rc && uri && wsk);
- logmsg(rc, log_telemetry, "Checking Ghostbuster record %s", uri);
+ if (uri_to_filename(rc, uri, &path, &rc->new_authenticated) &&
+ !access(path.s, F_OK))
+ return;
- assert(rsync_cached_uri(rc, uri));
+ logmsg(rc, log_telemetry, "Checking Ghostbuster record %s", uri->s);
- if (check_ghostbuster_1(rc, uri, path, sizeof(path), rc->unauthenticated,
- certs, hash, hashlen)) {
- install_object(rc, uri, path);
- mib_increment(rc, uri, current_ghostbuster_accepted);
+ if ((certs = walk_ctx_stack_certs(wsk)) == NULL)
return;
- } else if (!access(path, F_OK)) {
- mib_increment(rc, uri, current_ghostbuster_rejected);
+
+ if (check_ghostbuster_1(rc, uri, &path, &rc->unauthenticated,
+ certs, hash, hashlen, object_generation_current)) {
+ install_object(rc, uri, &path, object_accepted, object_generation_current);
+ goto done;
+ } else if (!access(path.s, F_OK)) {
+ log_validation_status(rc, uri, object_rejected, object_generation_current);
}
- if (check_ghostbuster_1(rc, uri, path, sizeof(path), rc->old_authenticated,
- certs, hash, hashlen)) {
- install_object(rc, uri, path);
- mib_increment(rc, uri, backup_ghostbuster_accepted);
- return;
- } else if (!access(path, F_OK)) {
- mib_increment(rc, uri, backup_ghostbuster_rejected);
+ if (check_ghostbuster_1(rc, uri, &path, &rc->old_authenticated,
+ certs, hash, hashlen, object_generation_backup)) {
+ install_object(rc, uri, &path, object_accepted, object_generation_backup);
+ goto done;
+ } else if (!access(path.s, F_OK)) {
+ log_validation_status(rc, uri, object_rejected, object_generation_backup);
}
+
+ done:
+ sk_X509_free(certs);
}
-static void walk_cert(rcynic_ctx_t *rc,
- const certinfo_t *parent,
- STACK_OF(X509) *certs);
+static void walk_cert(rcynic_ctx_t *, STACK_OF(walk_ctx_t) *);
/**
- * Recursive walk of certificate hierarchy (core of the program). The
- * daisy chain recursion is to avoid having to duplicate the stack
- * manipulation and error handling.
+ * rsync callback for fetching SIA tree.
*/
-static void walk_cert_1(rcynic_ctx_t *rc,
- char *uri,
- STACK_OF(X509) *certs,
- const certinfo_t *parent,
- const char *prefix,
- const int backup,
- const unsigned char *hash,
- const size_t hashlen)
+static void rsync_sia_callback(const rcynic_ctx_t *rc,
+ const rsync_ctx_t *ctx,
+ const rsync_status_t status,
+ const uri_t *uri,
+ STACK_OF(walk_ctx_t) *wsk)
{
- certinfo_t child;
- X509 *x;
+ walk_ctx_t *w = walk_ctx_stack_head(wsk);
- if ((x = check_cert(rc, uri, certs, parent, &child, prefix, backup, hash, hashlen)) == NULL)
- return;
+ assert(rc && wsk);
- if (!sk_X509_push(certs, x)) {
- logmsg(rc, log_sys_err,
- "Internal allocation failure recursing over certificate");
+ switch (status) {
+
+ case rsync_status_pending:
+ if (rsync_count_runable(rc) >= rc->max_parallel_fetches)
+ return;
+
+ assert(rsync_count_running(rc) < rc->max_parallel_fetches);
+
+ if ((wsk = walk_ctx_stack_clone(wsk)) == NULL) {
+ logmsg(rc, log_sys_err, "walk_ctx_stack_clone() failed, probably memory exhaustion, blundering onwards without forking stack");
+ return;
+ }
+
+ walk_ctx_stack_pop(wsk);
+ task_add(rc, walk_cert, wsk);
return;
+
+ case rsync_status_failed:
+ log_validation_status(rc, uri, rsync_failed, object_generation_null);
+ break;
+
+ case rsync_status_timed_out:
+ log_validation_status(rc, uri, rsync_timed_out, object_generation_null);
+ break;
+
+ case rsync_status_skipped:
+ log_validation_status(rc, uri, rsync_skipped, object_generation_null);
+ break;
+
+ case rsync_status_done:
+ break;
}
- walk_cert(rc, &child, certs);
- X509_free(sk_X509_pop(certs));
+ w->state++;
+ task_add(rc, walk_cert, wsk);
}
/**
- * Recursive walk of certificate hierarchy (core of the program). The
- * daisy chain recursion is to avoid having to duplicate the stack
- * manipulation and error handling.
+ * Recursive walk of certificate hierarchy (core of the program).
+ *
+ * Walk all products of the current certificate, starting with the
+ * ones named in the manifest and continuing with any that we find in
+ * the publication directory but which are not named in the manifest.
+ *
+ * Dispatch to correct checking code for the object named by URI,
+ * based on the filename extension in the uri. CRLs are a special
+ * case because we've already checked them by the time we get here, so
+ * we just ignore them. Other objects are either certificates or
+ * CMS-signed objects of one kind or another.
*/
-static void walk_cert_2(rcynic_ctx_t *rc,
- char *uri,
- STACK_OF(X509) *certs,
- const certinfo_t *parent,
- const char *prefix,
- const int backup,
- const unsigned char *hash,
- const size_t hashlen)
+static void walk_cert(rcynic_ctx_t *rc, STACK_OF(walk_ctx_t) *wsk)
{
- if (endswith(uri, ".cer"))
- walk_cert_1(rc, uri, certs, parent, prefix, backup, hash, hashlen);
- else if (endswith(uri, ".roa"))
- check_roa(rc, uri, certs, hash, hashlen);
- else if (endswith(uri, ".gbr"))
- check_ghostbuster(rc, uri, certs, hash, hashlen);
- else if (!endswith(uri, ".crl"))
- logmsg(rc, log_telemetry, "Don't know how to check object %s, ignoring", uri);
-}
+ const unsigned char *hash = NULL;
+ object_generation_t generation;
+ size_t hashlen;
+ walk_ctx_t *w;
+ uri_t uri;
-/**
- * Recursive walk of certificate hierarchy (core of the program). The
- * daisy chain recursion is to avoid having to duplicate the stack
- * manipulation and error handling.
- */
-static void walk_cert_3(rcynic_ctx_t *rc,
- STACK_OF(X509) *certs,
- const certinfo_t *parent,
- const char *prefix,
- const int backup,
- Manifest *manifest)
-{
- char uri[URI_MAX], path[FILENAME_MAX];
- FileAndHash *fah;
- STACK_OF(OPENSSL_STRING) *stray_ducks = NULL;
- DIR *dir = NULL;
- struct dirent *d;
- int i;
+ assert(rc && wsk);
- /*
- * Pull all non-directory filenames from the publication point directory.
- */
- if ((stray_ducks = sk_OPENSSL_STRING_new(uri_cmp)) == NULL)
- logmsg(rc, log_sys_err, "Couldn't allocate stray_ducks stack");
- else if (!uri_to_filename(rc, parent->sia, path, sizeof(path), prefix) || (dir = opendir(path)) == NULL)
- logmsg(rc, log_data_err, "Couldn't list directory %s, skipping check for out-of-manifest data", path);
- else
- while ((d = readdir(dir)) != NULL)
- if (d->d_type != DT_DIR && !sk_OPENSSL_STRING_push_strdup(stray_ducks, d->d_name))
- logmsg(rc, log_sys_err, "Couldn't strdup() string \"%s\", blundering onwards", d->d_name);
+ while ((w = walk_ctx_stack_head(wsk)) != NULL) {
- if (dir != NULL)
- closedir(dir);
-
- /*
- * Loop over manifest, checking everything it lists. Remove any
- * filenames we find in the manifest from our list of objects found
- * in the publication point directory, so we don't check stuff twice.
- */
- for (i = 0; (fah = sk_FileAndHash_value(manifest->fileList, i)) != NULL; i++) {
- sk_OPENSSL_STRING_remove(stray_ducks, (char *) fah->file->data);
- if (strlen(parent->sia) + strlen((char *) fah->file->data) >= sizeof(uri)) {
- logmsg(rc, log_data_err, "URI %s%s too long, skipping", parent->sia, fah->file->data);
- } else {
- strcpy(uri, parent->sia);
- strcat(uri, (char *) fah->file->data);
- walk_cert_2(rc, uri, certs, parent, prefix, backup, fah->hash->data, fah->hash->length);
+ switch (w->state) {
+ case walk_state_current:
+ generation = object_generation_current;
+ break;
+ case walk_state_backup:
+ generation = object_generation_backup;
+ break;
+ default:
+ generation = object_generation_null;
+ break;
}
- }
- /*
- * Whine about and maybe check any object that was in the directory
- * but not in the manifest, except for the manifest itself.
- */
- for (i = 0; i < sk_OPENSSL_STRING_num(stray_ducks); i++) {
- char *s = sk_OPENSSL_STRING_value(stray_ducks, i);
- if (strlen(parent->sia) + strlen(s) >= sizeof(uri)) {
- logmsg(rc, log_data_err, "URI %s%s too long, skipping", parent->sia, s);
+ switch (w->state) {
+
+ case walk_state_initial:
+
+ if (!w->certinfo.sia.s[0] || !w->certinfo.ca) {
+ w->state = walk_state_done;
+ continue;
+ }
+
+ if (!w->certinfo.manifest.s[0]) {
+ log_validation_status(rc, &w->certinfo.uri, manifest_missing, w->certinfo.generation);
+ w->state = walk_state_done;
+ continue;
+ }
+
+ w->state++;
continue;
- }
- strcpy(uri, parent->sia);
- strcat(uri, s);
- if (!strcmp(uri, parent->manifest))
+
+ case walk_state_rsync:
+
+ rsync_tree(rc, &w->certinfo.sia, wsk, rsync_sia_callback);
+ return;
+
+ case walk_state_ready:
+
+ walk_ctx_loop_init(rc, wsk); /* sets w->state */
+ continue;
+
+ case walk_state_current:
+ case walk_state_backup:
+
+ if (!walk_ctx_loop_this(rc, wsk, &uri, &hash, &hashlen)) {
+ walk_ctx_loop_next(rc, wsk);
+ continue;
+ }
+
+ if (endswith(uri.s, ".crl") || endswith(uri.s, ".mft") || endswith(uri.s, ".mnf")) {
+ walk_ctx_loop_next(rc, wsk);
+ continue; /* CRLs and manifests checked elsewhere */
+ }
+
+ if (hash == NULL)
+ log_validation_status(rc, &uri, tainted_by_not_being_in_manifest, generation);
+ else if (w->stale_manifest)
+ log_validation_status(rc, &uri, tainted_by_stale_manifest, generation);
+
+ if (hash == NULL && !rc->allow_object_not_in_manifest) {
+ walk_ctx_loop_next(rc, wsk);
+ continue;
+ }
+
+ if (endswith(uri.s, ".roa")) {
+ check_roa(rc, &uri, wsk, hash, hashlen);
+ walk_ctx_loop_next(rc, wsk);
+ continue;
+ }
+
+ if (endswith(uri.s, ".gbr")) {
+ check_ghostbuster(rc, &uri, wsk, hash, hashlen);
+ walk_ctx_loop_next(rc, wsk);
+ continue;
+ }
+
+ if (endswith(uri.s, ".cer")) {
+ certinfo_t subject;
+ X509 *x = check_cert(rc, &uri, wsk, &subject, hash, hashlen);
+ if (!walk_ctx_stack_push(wsk, x, &subject))
+ walk_ctx_loop_next(rc, wsk);
+ continue;
+ }
+
+ log_validation_status(rc, &uri, unknown_object_type_skipped, object_generation_null);
+ walk_ctx_loop_next(rc, wsk);
+ continue;
+
+ case walk_state_done:
+
+ walk_ctx_stack_pop(wsk); /* Resume our issuer's state */
continue;
- logmsg(rc, log_telemetry, "Object %s present in publication directory but not in manifest", uri);
- mib_increment(rc, uri, object_not_in_manifest);
- if (rc->allow_object_not_in_manifest)
- walk_cert_2(rc, uri, certs, parent, prefix, backup, NULL, 0);
+
+ }
}
- sk_OPENSSL_STRING_pop_free(stray_ducks, OPENSSL_STRING_free);
+ assert(walk_ctx_stack_head(wsk) == NULL);
+ walk_ctx_stack_free(wsk);
}
/**
- * Recursive walk of certificate hierarchy (core of the program). The
- * daisy chain recursion is to avoid having to duplicate the stack
- * manipulation and error handling.
+ * Check a trust anchor. Yes, we trust it, by definition, but it
+ * still needs to conform to the certificate profile, the
+ * self-signature must be correct, etcetera.
*/
-static void walk_cert(rcynic_ctx_t *rc,
- const certinfo_t *parent,
- STACK_OF(X509) *certs)
+static void check_ta(rcynic_ctx_t *rc, STACK_OF(walk_ctx_t) *wsk)
{
- assert(parent && certs);
-
- if (parent->sia[0] && parent->ca) {
- int n_cert = sk_X509_num(certs);
- Manifest *manifest = NULL;
+ STACK_OF(X509) *certs = walk_ctx_stack_certs(wsk);
+ walk_ctx_t *w = walk_ctx_stack_head(wsk);
+ int ok = 0;
- rc->indent++;
+ if (certs != NULL && w != NULL)
+ ok = check_x509(rc, certs, w->cert, &w->certinfo, &w->certinfo);
- rsync_tree(rc, parent->sia);
+ sk_X509_free(certs);
- if (!parent->manifest[0]) {
-
- logmsg(rc, log_data_err, "Parent certificate does not specify a manifest, skipping collection");
-
- } else if ((manifest = check_manifest(rc, parent->manifest, certs)) == NULL) {
-
- logmsg(rc, log_data_err, "Couldn't get manifest %s, skipping collection", parent->manifest);
+ if (!ok)
+ return;
- } else {
+ task_add(rc, walk_cert, wsk);
- logmsg(rc, log_debug, "Walking unauthenticated store");
- walk_cert_3(rc, certs, parent, rc->unauthenticated, 0, manifest);
- logmsg(rc, log_debug, "Done walking unauthenticated store");
+ while (sk_task_t_num(rc->task_queue) > 0 || sk_rsync_ctx_t_num(rc->rsync_queue) > 0) {
+ task_run_q(rc);
+ rsync_mgr(rc);
+ }
+}
- logmsg(rc, log_debug, "Walking old authenticated store");
- walk_cert_3(rc, certs, parent, rc->old_authenticated, 1, manifest);
- logmsg(rc, log_debug, "Done walking old authenticated store");
+
- Manifest_free(manifest);
- }
+/**
+ * Read a trust anchor from disk and compare with known public key.
+ * NB: EVP_PKEY_cmp() returns 1 for match, not 0 like every other
+ * xyz_cmp() function in the entire OpenSSL library. Go figure.
+ */
+static X509 *read_ta(const rcynic_ctx_t *rc, const uri_t *uri, const path_t *path, const EVP_PKEY *pkey, object_generation_t generation)
- assert(sk_X509_num(certs) == n_cert);
+{
+ EVP_PKEY *xpkey = NULL;
+ X509 *x = NULL;
+ int match = 0;
- rc->indent--;
+ if ((x = read_cert(path, NULL)) == NULL || (xpkey = X509_get_pubkey(x)) == NULL) {
+ log_validation_status(rc, uri, unreadable_trust_anchor, generation);
+ } else {
+ match = EVP_PKEY_cmp(pkey, xpkey) == 1;
+ if (!match)
+ log_validation_status(rc, uri, trust_anchor_key_mismatch, generation);
}
+
+ EVP_PKEY_free(xpkey);
+ if (match)
+ return x;
+ X509_free(x);
+ return NULL;
}
@@ -3181,8 +4084,9 @@ int main(int argc, char *argv[])
char *lockfile = NULL, *xmlfile = NULL;
int c, i, j, ret = 1, jitter = 600, lockfd = -1;
STACK_OF(CONF_VALUE) *cfg_section = NULL;
- STACK_OF(X509) *certs = NULL;
+ STACK_OF(walk_ctx_t) *wsk = NULL;
CONF *cfg_handle = NULL;
+ walk_ctx_t *w = NULL;
time_t start = 0, finish;
unsigned long hash;
rcynic_ctx_t rc;
@@ -3197,17 +4101,23 @@ int main(int argc, char *argv[])
else
rc.jane++;
- set_directory(&rc.authenticated, "rcynic-data/authenticated/");
- set_directory(&rc.old_authenticated, "rcynic-data/authenticated.old/");
- set_directory(&rc.unauthenticated, "rcynic-data/unauthenticated/");
- rc.log_level = log_telemetry;
+ rc.log_level = log_data_err;
rc.allow_stale_crl = 1;
rc.allow_stale_manifest = 1;
+ rc.max_parallel_fetches = 1;
+ rc.max_retries = 3;
+ rc.retry_wait_min = 30;
+ rc.run_rsync = 1;
+ rc.rsync_timeout = 300;
#define QQ(x,y) rc.priority[x] = y;
LOG_LEVELS;
#undef QQ
+ if (!set_directory(&rc, &rc.authenticated, "rcynic-data/authenticated", 0) ||
+ !set_directory(&rc, &rc.unauthenticated, "rcynic-data/unauthenticated/", 1))
+ goto done;
+
OpenSSL_add_all_algorithms();
ERR_load_crypto_strings();
@@ -3272,19 +4182,22 @@ int main(int argc, char *argv[])
assert(val && val->name && val->value);
- if (!name_cmp(val->name, "authenticated"))
- set_directory(&rc.authenticated, val->value);
-
- else if (!name_cmp(val->name, "old-authenticated"))
- set_directory(&rc.old_authenticated, val->value);
+ if (!name_cmp(val->name, "authenticated") &&
+ !set_directory(&rc, &rc.authenticated, val->value, 0))
+ goto done;
- else if (!name_cmp(val->name, "unauthenticated"))
- set_directory(&rc.unauthenticated, val->value);
+ else if (!name_cmp(val->name, "unauthenticated") &&
+ !set_directory(&rc, &rc.unauthenticated, val->value, 1))
+ goto done;
else if (!name_cmp(val->name, "rsync-timeout") &&
!configure_integer(&rc, &rc.rsync_timeout, val->value))
goto done;
+ else if (!name_cmp(val->name, "max-parallel-fetches") &&
+ !configure_integer(&rc, &rc.max_parallel_fetches, val->value))
+ goto done;
+
else if (!name_cmp(val->name, "rsync-program"))
rc.rsync_program = strdup(val->value);
@@ -3347,6 +4260,10 @@ int main(int argc, char *argv[])
!configure_boolean(&rc, &prune, val->value))
goto done;
+ else if (!name_cmp(val->name, "run-rsync") &&
+ !configure_boolean(&rc, &rc.run_rsync, val->value))
+ goto done;
+
/*
* Ugly, but the easiest way to handle all these strings.
*/
@@ -3373,29 +4290,30 @@ int main(int argc, char *argv[])
goto done;
}
- if ((rc.stale_cache = sk_OPENSSL_STRING_new(uri_cmp)) == NULL) {
- logmsg(&rc, log_sys_err, "Couldn't allocate stale_cache stack");
+ if ((rc.dead_host_cache = sk_OPENSSL_STRING_new(uri_cmp)) == NULL) {
+ logmsg(&rc, log_sys_err, "Couldn't allocate dead_host_cache stack");
goto done;
}
if (xmlfile != NULL) {
- if ((rc.host_counters = sk_HOST_MIB_COUNTER_new(host_mib_counter_cmp)) == NULL) {
- logmsg(&rc, log_sys_err, "Couldn't allocate host_counters stack");
- goto done;
- }
- if ((rc.validation_status = sk_VALIDATION_STATUS_new_null()) == NULL) {
+ if ((rc.validation_status = sk_validation_status_t_new(validation_status_cmp)) == NULL) {
logmsg(&rc, log_sys_err, "Couldn't allocate validation_status stack");
goto done;
}
}
- if ((certs = sk_X509_new_null()) == NULL) {
- logmsg(&rc, log_sys_err, "Couldn't allocate certificate stack");
+ if ((rc.x509_store = X509_STORE_new()) == NULL) {
+ logmsg(&rc, log_sys_err, "Couldn't allocate X509_STORE");
goto done;
}
- if ((rc.x509_store = X509_STORE_new()) == NULL) {
- logmsg(&rc, log_sys_err, "Couldn't allocate X509_STORE");
+ if ((rc.rsync_queue = sk_rsync_ctx_t_new_null()) == NULL) {
+ logmsg(&rc, log_sys_err, "Couldn't allocate rsync_queue");
+ goto done;
+ }
+
+ if ((rc.task_queue = sk_task_t_new_null()) == NULL) {
+ logmsg(&rc, log_sys_err, "Couldn't allocate task_queue");
goto done;
}
@@ -3431,177 +4349,181 @@ int main(int argc, char *argv[])
start = time(0);
logmsg(&rc, log_telemetry, "Starting");
- if (!rm_rf(rc.old_authenticated)) {
- logmsg(&rc, log_sys_err, "Couldn't remove %s: %s",
- rc.old_authenticated, strerror(errno));
+ if (!construct_directory_names(&rc))
goto done;
- }
- if (rename(rc.authenticated, rc.old_authenticated) < 0 &&
- errno != ENOENT) {
- logmsg(&rc, log_sys_err, "Couldn't rename %s to %s: %s",
- rc.old_authenticated, rc.authenticated, strerror(errno));
+ if (!access(rc.new_authenticated.s, F_OK)) {
+ logmsg(&rc, log_sys_err, "Timestamped output directory %s already exists! Clock went backwards?", rc.new_authenticated.s);
goto done;
}
- if (!access(rc.authenticated, F_OK) || !mkdir_maybe(&rc, rc.authenticated)) {
+ if (!mkdir_maybe(&rc, &rc.new_authenticated)) {
logmsg(&rc, log_sys_err, "Couldn't prepare directory %s: %s",
- rc.authenticated, strerror(errno));
+ rc.new_authenticated.s, strerror(errno));
goto done;
}
for (i = 0; i < sk_CONF_VALUE_num(cfg_section); i++) {
CONF_VALUE *val = sk_CONF_VALUE_value(cfg_section, i);
- char path1[FILENAME_MAX], path2[FILENAME_MAX], uri[URI_MAX];
- certinfo_t ta_info;
+ object_generation_t generation = object_generation_null;
+ path_t path1, path2;
+ certinfo_t ta_certinfo;
+ uri_t uri;
X509 *x = NULL;
assert(val && val->name && val->value);
- uri[0] = '\0';
+ if (!name_cmp(val->name, "trust-anchor-uri-with-key") ||
+ !name_cmp(val->name, "indirect-trust-anchor")) {
+ /*
+ * Obsolete syntax. If you're reading this comment because you
+ * had an old rcynic.conf and got this error message:
+ *
+ * "indirect-trust-anchor" is exactly the same as
+ * "trust-anchor-locator", the name was changed to settle a
+ * terminology fight in the IETF SIDR WG.
+ *
+ * "trust-anchor-uri-with-key" is semantically identical to
+ * "trust-anchor-locator" (and was the original form of this
+ * mechanism), but the syntax and local file format is
+ * different.
+ *
+ * If you're seeing this error, you should just obtain current
+ * TAL files. Also see the "make-tal.sh" script.
+ */
+ logmsg(&rc, log_usage_err,
+ "Directive \"%s\" is obsolete -- please use \"trust-anchor-locator\" instead",
+ val->name);
+ goto done;
+ }
if (!name_cmp(val->name, "trust-anchor")) {
/*
- * Old local file trust anchor method.
+ * Local file trust anchor method.
*/
logmsg(&rc, log_telemetry, "Processing trust anchor from local file %s", val->value);
- if (strlen(val->value) >= sizeof(path1)) {
+ if (strlen(val->value) >= sizeof(path1.s)) {
logmsg(&rc, log_usage_err, "Trust anchor path name too long %s", val->value);
goto done;
}
- strcpy(path1, val->value);
- if ((x = read_cert(path1, NULL, 0)) == NULL) {
- logmsg(&rc, log_usage_err, "Couldn't read trust anchor %s", path1);
- goto done;
+ strcpy(path1.s, val->value);
+
+ /* Construct file:// URI for logging */
+ assert(sizeof("file://") < sizeof(uri.s));
+ strcpy(uri.s, "file://");
+ if (path1.s[0] != '/') {
+ if (getcwd(uri.s + strlen(uri.s), sizeof(uri.s) - strlen(uri.s)) == NULL ||
+ (!endswith(uri.s, "/") && strlen(uri.s) >= sizeof(uri.s) - 1))
+ uri.s[0] = '\0';
+ else
+ strcat(uri.s, "/");
+ }
+ if (uri.s[0] != '\0' && strlen(uri.s) + strlen(path1.s) < sizeof(uri.s))
+ strcat(uri.s, path1.s);
+ else
+ uri.s[0] = '\0';
+
+ if ((x = read_cert(&path1, NULL)) == NULL) {
+ log_validation_status(&rc, &uri, unreadable_trust_anchor, generation);
+ continue;
}
hash = X509_subject_name_hash(x);
for (j = 0; j < INT_MAX; j++) {
- if (snprintf(path2, sizeof(path2), "%s%lx.%d.cer",
- rc.authenticated, hash, j) == sizeof(path2)) {
+ if (snprintf(path2.s, sizeof(path2.s), "%s%lx.%d.cer",
+ rc.new_authenticated.s, hash, j) == sizeof(path2.s)) {
logmsg(&rc, log_sys_err,
- "Couldn't construct path name for trust anchor %s", path1);
+ "Couldn't construct path name for trust anchor %s", path1.s);
goto done;
}
- if (access(path2, F_OK))
+ if (access(path2.s, F_OK))
break;
}
if (j == INT_MAX) {
- logmsg(&rc, log_sys_err, "Couldn't find a free name for trust anchor %s", path1);
+ logmsg(&rc, log_sys_err, "Couldn't find a free name for trust anchor %s", path1.s);
goto done;
}
- assert(sizeof("file://") < sizeof(uri));
- strcpy(uri, "file://");
- if (path1[0] != '/') {
- if (getcwd(uri + strlen(uri), sizeof(uri) - strlen(uri)) == NULL ||
- (!endswith(uri, "/") && strlen(uri) >= sizeof(uri) - 1))
- uri[0] = '\0';
- else
- strcat(uri, "/");
- }
- if (uri[0] != '\0' && strlen(uri) + strlen(path1) < sizeof(uri))
- strcat(uri, path1);
- else
- uri[0] = '\0';
}
- if (!name_cmp(val->name, "trust-anchor-uri-with-key") ||
- !name_cmp(val->name, "indirect-trust-anchor") ||
- !name_cmp(val->name, "trust-anchor-locator")) {
+ if (!name_cmp(val->name, "trust-anchor-locator")) {
/*
- * Newfangled URI + public key method. Two different versions
- * of essentially the same mechanism.
- *
- * NB: EVP_PKEY_cmp() returns 1 for success, not 0 like every
- * other xyz_cmp() function in the entire OpenSSL library.
- * Go figure.
+ * Trust anchor locator (URI + public key) method.
*/
- int unified = (!name_cmp(val->name, "indirect-trust-anchor") ||
- !name_cmp(val->name, "trust-anchor-locator"));
- EVP_PKEY *pkey = NULL, *xpkey = NULL;
+ EVP_PKEY *pkey = NULL;
char *fn;
- if (unified) {
- fn = val->value;
- bio = BIO_new_file(fn, "r");
- if (!bio || BIO_gets(bio, uri, sizeof(uri)) <= 0) {
- logmsg(&rc, log_usage_err, "Couldn't read trust anchor URI from %s", fn);
- goto done;
- }
- uri[strcspn(uri, " \t\r\n")] = '\0';
- bio = BIO_push(BIO_new(BIO_f_base64()), bio);
- } else {
- j = strcspn(val->value, " \t");
- if (j >= sizeof(uri)) {
- logmsg(&rc, log_usage_err, "Trust anchor URI too long %s", val->value);
- goto done;
- }
- memcpy(uri, val->value, j);
- uri[j] = '\0';
- j += strspn(val->value + j, " \t");
- fn = val->value + j;
- bio = BIO_new_file(fn, "rb");
- }
- if (!uri_to_filename(&rc, uri, path1, sizeof(path1), rc.unauthenticated) ||
- !uri_to_filename(&rc, uri, path2, sizeof(path2), rc.authenticated)) {
- logmsg(&rc, log_usage_err, "Couldn't convert trust anchor URI %s to filename", uri);
- goto done;
+ path_t path3;
+
+ fn = val->value;
+ bio = BIO_new_file(fn, "r");
+ if (!bio || BIO_gets(bio, uri.s, sizeof(uri.s)) <= 0) {
+ log_validation_status(&rc, &uri, unreadable_trust_anchor_locator, object_generation_null);
+ BIO_free(bio);
+ bio = NULL;
+ continue;
}
- logmsg(&rc, log_telemetry, "Processing trust anchor from URI %s", uri);
- if (!rsync_file(&rc, uri)) {
- logmsg(&rc, log_data_err, "Could not fetch trust anchor from %s", uri);
+ uri.s[strcspn(uri.s, " \t\r\n")] = '\0';
+ bio = BIO_push(BIO_new(BIO_f_linebreak()), bio);
+ bio = BIO_push(BIO_new(BIO_f_base64()), bio);
+ if (!uri_to_filename(&rc, &uri, &path1, &rc.unauthenticated) ||
+ !uri_to_filename(&rc, &uri, &path2, &rc.new_authenticated) ||
+ !uri_to_filename(&rc, &uri, &path3, &rc.old_authenticated)) {
+ log_validation_status(&rc, &uri, unreadable_trust_anchor_locator, object_generation_null);
+ BIO_free_all(bio);
+ bio = NULL;
continue;
}
+ logmsg(&rc, log_telemetry, "Processing trust anchor from URI %s", uri.s);
+ rsync_file(&rc, &uri);
+ while (sk_rsync_ctx_t_num(rc.rsync_queue) > 0)
+ rsync_mgr(&rc);
if (bio)
pkey = d2i_PUBKEY_bio(bio, NULL);
BIO_free_all(bio);
bio = NULL;
if (!pkey) {
- logmsg(&rc, log_usage_err, "Couldn't read trust anchor public key for %s from %s", uri, fn);
- goto done;
+ log_validation_status(&rc, &uri, unreadable_trust_anchor_locator, object_generation_null);
+ continue;
+ }
+ generation = object_generation_current;
+ if ((x = read_ta(&rc, &uri, &path1, pkey, generation)) == NULL) {
+ generation = object_generation_backup;
+ path1 = path3;
+ x = read_ta(&rc, &uri, &path1, pkey, generation);
}
- if ((x = read_cert(path1, NULL, 0)) == NULL)
- logmsg(&rc, log_data_err, "Couldn't read trust anchor %s", path1);
- if (x && (xpkey = X509_get_pubkey(x)) == NULL)
- logmsg(&rc, log_data_err, "Rejected %s because couldn't read public key from trust anchor locator", uri);
- j = (xpkey && EVP_PKEY_cmp(pkey, xpkey) == 1);
EVP_PKEY_free(pkey);
- EVP_PKEY_free(xpkey);
- if (!j) {
- logmsg(&rc, log_data_err, "Rejected %s because known public key didn't match trust anchor locator", uri);
- X509_free(x);
+ if (!x)
continue;
- }
}
if (!x)
continue;
- logmsg(&rc, log_telemetry, "Copying trust anchor %s to %s", path1, path2);
+ logmsg(&rc, log_telemetry, "Copying trust anchor %s to %s", path1.s, path2.s);
- if (!mkdir_maybe(&rc, path2) ||
- !(rc.use_links ? ln(path1, path2) : cp(&rc, path1, path2))) {
- logmsg(&rc, log_sys_err, "Couldn't %s trust anchor %s",
- (rc.use_links ? "link" : "copy"), path1);
+ if (!mkdir_maybe(&rc, &path2) || !cp_ln(&rc, &path1, &path2))
+ goto done;
+
+ if ((wsk = walk_ctx_stack_new()) == NULL) {
+ logmsg(&rc, log_sys_err, "Couldn't allocate walk context stack");
goto done;
}
- parse_cert(&rc, x, &ta_info, uri);
- ta_info.ta = 1;
- sk_X509_push(certs, x);
+ parse_cert(&rc, x, &ta_certinfo, &uri, generation);
+ ta_certinfo.ta = 1;
- if (ta_info.crldp[0] && !check_x509(&rc, certs, x, &ta_info)) {
- logmsg(&rc, log_data_err, "Couldn't get CRL for trust anchor %s", path1);
- } else {
- if (*uri)
- log_validation_status(&rc, uri, validation_ok);
- walk_cert(&rc, &ta_info, certs);
+ if ((w = walk_ctx_stack_push(wsk, x, &ta_certinfo)) == NULL) {
+ logmsg(&rc, log_sys_err, "Couldn't push walk context stack");
+ goto done;
}
- X509_free(sk_X509_pop(certs));
- assert(sk_X509_num(certs) == 0);
+ check_ta(&rc, wsk);
+ wsk = NULL; /* Ownership of wsk passed to check_ta() */
}
- if (prune && !prune_unauthenticated(&rc, rc.unauthenticated,
- strlen(rc.unauthenticated))) {
+ if (!finalize_directories(&rc))
+ goto done;
+
+ if (prune && !prune_unauthenticated(&rc, &rc.unauthenticated,
+ strlen(rc.unauthenticated.s))) {
logmsg(&rc, log_sys_err, "Trouble pruning old unauthenticated data");
goto done;
}
@@ -3614,15 +4536,16 @@ int main(int argc, char *argv[])
if (xmlfile != NULL) {
char tad[sizeof("2006-10-13T11:22:33Z") + 1];
- char hostname[HOST_NAME_MAX];
time_t tad_time = time(0);
struct tm *tad_tm = gmtime(&tad_time);
int ok = 1, use_stdout = !strcmp(xmlfile, "-");
+ hostname_t hostname;
+ mib_counter_t code;
FILE *f = NULL;
strftime(tad, sizeof(tad), "%Y-%m-%dT%H:%M:%SZ", tad_tm);
- ok &= gethostname(hostname, sizeof(hostname)) == 0;
+ ok &= gethostname(hostname.s, sizeof(hostname.s)) == 0;
if (use_stdout)
f = stdout;
@@ -3637,47 +4560,41 @@ int main(int argc, char *argv[])
ok &= fprintf(f, "<?xml version=\"1.0\" ?>\n"
"<rcynic-summary date=\"%s\" rcynic-version=\"%s\""
" summary-version=\"%d\" reporting-hostname=\"%s\">\n"
- " <labels>\n"
- " <hostname>Publication Repository</hostname>\n",
- tad, svn_id, XML_SUMMARY_VERSION, hostname) != EOF;
+ " <labels>\n",
+ tad, svn_id, XML_SUMMARY_VERSION, hostname.s) != EOF;
for (j = 0; ok && j < MIB_COUNTER_T_MAX; ++j)
- ok &= fprintf(f, " <%s kind=\"%s\">%s</%s>\n",
- mib_counter_label[j], mib_counter_kind[j],
- (mib_counter_desc[j]
- ? mib_counter_desc[j]
- : X509_verify_cert_error_string(mib_counter_openssl[j])),
- mib_counter_label[j]) != EOF;
+ if (ok)
+ ok &= fprintf(f, " <%s kind=\"%s\">%s</%s>\n",
+ mib_counter_label[j], mib_counter_kind[j],
+ (mib_counter_desc[j]
+ ? mib_counter_desc[j]
+ : X509_verify_cert_error_string(mib_counter_openssl[j])),
+ mib_counter_label[j]) != EOF;
if (ok)
ok &= fprintf(f, " </labels>\n") != EOF;
- for (i = 0; ok && i < sk_HOST_MIB_COUNTER_num(rc.host_counters); i++) {
- HOST_MIB_COUNTER *h = sk_HOST_MIB_COUNTER_value(rc.host_counters, i);
- assert(h);
-
- if (ok)
- ok &= fprintf(f, " <host>\n <hostname>%s</hostname>\n",
- h->hostname) != EOF;
-
- for (j = 0; ok && j < MIB_COUNTER_T_MAX; ++j)
- ok &= fprintf(f, " <%s>%lu</%s>\n", mib_counter_label[j],
- h->counters[j], mib_counter_label[j]) != EOF;
-
- if (ok)
- ok &= fprintf(f, " </host>\n") != EOF;
- }
-
-
- for (i = 0; ok && i < sk_VALIDATION_STATUS_num(rc.validation_status); i++) {
- VALIDATION_STATUS *v = sk_VALIDATION_STATUS_value(rc.validation_status, i);
+ for (i = 0; ok && i < sk_validation_status_t_num(rc.validation_status); i++) {
+ validation_status_t *v = sk_validation_status_t_value(rc.validation_status, i);
assert(v);
tad_tm = gmtime(&v->timestamp);
strftime(tad, sizeof(tad), "%Y-%m-%dT%H:%M:%SZ", tad_tm);
- ok &= fprintf(f, " <validation_status timestamp=\"%s\" status=\"%s\">%s</validation_status>\n",
- tad, mib_counter_label[v->code], v->uri) != EOF;
+ for (code = (mib_counter_t) 0; ok && code < MIB_COUNTER_T_MAX; code++) {
+ if (validation_status_get_code(v, code)) {
+ if (ok)
+ ok &= fprintf(f, " <validation_status timestamp=\"%s\" status=\"%s\"",
+ tad, mib_counter_label[code]) != EOF;
+ if (ok && (v->generation == object_generation_current ||
+ v->generation == object_generation_backup))
+ ok &= fprintf(f, " generation=\"%s\"",
+ object_generation_label[v->generation]) != EOF;
+ if (ok)
+ ok &= fprintf(f, ">%s</validation_status>\n", v->uri.s) != EOF;
+ }
+ }
}
if (ok)
@@ -3695,21 +4612,16 @@ int main(int argc, char *argv[])
/*
* Do NOT free cfg_section, NCONF_free() takes care of that
*/
- sk_X509_pop_free(certs, X509_free);
sk_OPENSSL_STRING_pop_free(rc.rsync_cache, OPENSSL_STRING_free);
sk_OPENSSL_STRING_pop_free(rc.backup_cache, OPENSSL_STRING_free);
- sk_OPENSSL_STRING_pop_free(rc.stale_cache, OPENSSL_STRING_free);
- sk_HOST_MIB_COUNTER_pop_free(rc.host_counters, HOST_MIB_COUNTER_free);
- sk_VALIDATION_STATUS_pop_free(rc.validation_status, VALIDATION_STATUS_free);
+ sk_OPENSSL_STRING_pop_free(rc.dead_host_cache, OPENSSL_STRING_free);
+ sk_validation_status_t_pop_free(rc.validation_status, validation_status_t_free);
X509_STORE_free(rc.x509_store);
NCONF_free(cfg_handle);
CONF_modules_free();
BIO_free(bio);
EVP_cleanup();
ERR_free_strings();
- free(rc.authenticated);
- free(rc.old_authenticated);
- free(rc.unauthenticated);
if (rc.rsync_program)
free(rc.rsync_program);
if (lockfile && lockfd >= 0)
@@ -3722,10 +4634,10 @@ int main(int argc, char *argv[])
if (start) {
finish = time(0);
logmsg(&rc, log_telemetry,
- "Finished, elapsed time %d:%02d:%02d",
- (finish - start) / 3600,
- (finish - start) / 60 % 60,
- (finish - start) % 60);
+ "Finished, elapsed time %u:%02u:%02u",
+ (unsigned) ((finish - start) / 3600),
+ (unsigned) ((finish - start) / 60 % 60),
+ (unsigned) ((finish - start) % 60));
}
return ret;
diff --git a/rcynic/rcynic.xsl b/rcynic/rcynic.xsl
index 25aa7b2c..487cd2f6 100644
--- a/rcynic/rcynic.xsl
+++ b/rcynic/rcynic.xsl
@@ -1,6 +1,6 @@
<?xml version="1.0"?>
<!--
- - Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+ - Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
-
- Permission to use, copy, modify, and/or distribute this software for any
- purpose with or without fee is hereby granted, provided that the above
@@ -43,8 +43,9 @@
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0"
- xmlns:exslt="http://exslt.org/common"
- exclude-result-prefixes="exslt">
+ xmlns:com="http://exslt.org/common"
+ xmlns:str="http://exslt.org/strings"
+ exclude-result-prefixes="com str">
<xsl:output omit-xml-declaration="yes" indent="yes" method="xml" encoding="US-ASCII"
doctype-public="-//W3C//DTD XHTML 1.0 Strict//EN"
@@ -54,29 +55,10 @@
<xsl:param name="suppress-zero-columns" select="1"/>
<xsl:param name="show-total" select="1"/>
<xsl:param name="use-colors" select="1"/>
- <xsl:param name="show-detailed-status" select="0"/>
- <xsl:param name="show-problems" select="1"/>
+ <xsl:param name="show-detailed-status" select="1"/>
+ <xsl:param name="show-problems" select="0"/>
<xsl:param name="show-summary" select="1"/>
- <xsl:variable name="sums">
- <xsl:for-each select="rcynic-summary/labels/*">
- <x sum="{sum(/rcynic-summary/host/*[name() = name(current()) and . != 0])}"/>
- </xsl:for-each>
- </xsl:variable>
-
- <xsl:variable name="moods">
- <xsl:for-each select="rcynic-summary/host">
- <xsl:variable name="hostname" select="hostname"/>
- <xsl:for-each select="*[not(self::hostname)]">
- <xsl:variable name="label" select="name()"/>
- <xsl:variable name="mood" select="/rcynic-summary/labels/*[name() = $label]/@kind"/>
- <x hostname="{$hostname}" mood="{$mood}">
- <xsl:value-of select="."/>
- </x>
- </xsl:for-each>
- </xsl:for-each>
- </xsl:variable>
-
<xsl:template match="/">
<xsl:comment>Generators</xsl:comment>
<xsl:comment><xsl:value-of select="rcynic-summary/@rcynic-version"/></xsl:comment>
@@ -94,102 +76,210 @@
<meta http-equiv="Refresh" content="{$refresh}"/>
</xsl:if>
<style type="text/css">
- td { text-align: center; padding: 4px }
- td.uri { text-align: left }
+ td { text-align: center; padding: 4px }
+ td.uri { text-align: left }
+ td.host { text-align: left }
<xsl:if test="$use-colors != 0">
- tr.good { background-color: #77ff77 }
- tr.warn { background-color: yellow }
- tr.bad { background-color: #ff5500 }
+ tr.good,td.good { background-color: #77ff77 }
+ tr.warn,td.warn { background-color: yellow }
+ tr.bad,td.bad { background-color: #ff5500 }
</xsl:if>
</style>
</head>
<body>
+ <h1><xsl:value-of select="$title"/></h1>
+ <!-- Summary output, old host-oriented format -->
<xsl:if test="$show-summary != 0">
- <h1>
- <xsl:value-of select="$title"/>
- </h1>
- <table class="summary" rules="all">
- <thead>
- <tr>
- <xsl:for-each select="rcynic-summary/labels/*">
- <xsl:variable name="p" select="position()"/>
- <xsl:if test="$suppress-zero-columns = 0 or position() = 1 or exslt:node-set($sums)/x[$p]/@sum &gt; 0">
- <td><b><xsl:apply-templates/></b></td>
+
+ <!-- Collect data we need to display -->
+ <xsl:variable name="host-data">
+ <xsl:for-each select="rcynic-summary/validation_status">
+ <xsl:sort order="ascending" data-type="text" select="."/>
+ <xsl:variable name="uri" select="string(.)"/>
+ <xsl:if test="starts-with($uri, 'rsync://')">
+ <xsl:variable name="hostname" select="str:tokenize($uri, ':/')[2]"/>
+ <xsl:variable name="mood" select="/rcynic-summary/labels/*[name() = current()/@status]/@kind"/>
+ <xsl:variable name="fn2">
+ <xsl:if test="substring($uri, string-length($uri) - 3, 1) = '.' and @generation != ''">
+ <xsl:value-of select="substring($uri, string-length($uri) - 3)"/>
</xsl:if>
- </xsl:for-each>
- </tr>
- </thead>
- <tbody>
- <xsl:for-each select="rcynic-summary/host">
- <xsl:sort order="descending" data-type="number" select="sum(*[not(self::hostname)])"/>
- <xsl:sort order="ascending" data-type="text" select="hostname"/>
- <xsl:variable name="hostname" select="hostname"/>
- <xsl:variable name="goodness" select="sum(exslt:node-set($moods)/x[@hostname = $hostname and @mood = 'good'])"/>
- <xsl:variable name="badness" select="sum(exslt:node-set($moods)/x[@hostname = $hostname and @mood = 'bad'])"/>
- <xsl:variable name="warnings" select="sum(exslt:node-set($moods)/x[@hostname = $hostname and @mood = 'warn'])"/>
- <xsl:variable name="mood">
- <xsl:choose>
- <xsl:when test="$goodness != 0 and $warnings = 0 and $badness = 0">good</xsl:when>
- <xsl:when test="$goodness + $warnings != 0">warn</xsl:when>
- <xsl:otherwise>bad</xsl:otherwise>
- </xsl:choose>
</xsl:variable>
- <tr class="{$mood}">
- <xsl:for-each select="*">
- <xsl:variable name="p" select="position()"/>
- <xsl:if test="$suppress-zero-columns = 0 or position() = 1 or exslt:node-set($sums)/x[$p]/@sum &gt; 0">
- <td><xsl:if test=". != 0"><xsl:apply-templates/></xsl:if></td>
- </xsl:if>
+ <x hostname="{$hostname}" timestamp="{@timestamp}" uri="{$uri}" status="{@status}" mood="{$mood}" fn2="{$fn2}" generation="{@generation}"/>
+ </xsl:if>
+ </xsl:for-each>
+ </xsl:variable>
+
+ <!-- Calculate set of unique hostnames -->
+ <xsl:variable name="unique-hostnames">
+ <xsl:for-each select="com:node-set($host-data)/x[not(@hostname = following::x/@hostname)]">
+ <x hostname="{@hostname}"/>
+ </xsl:for-each>
+ </xsl:variable>
+
+ <!-- Calculate set of unique filename types -->
+ <xsl:variable name="unique-fn2s">
+ <xsl:for-each select="com:node-set($host-data)/x[not(@fn2 = following::x/@fn2)]">
+ <x fn2="{@fn2}"/>
+ </xsl:for-each>
+ </xsl:variable>
+
+ <!-- Generation names -->
+ <xsl:variable name="unique-generations">
+ <xsl:for-each select="com:node-set($host-data)/x[not(@generation = following::x/@generation)]">
+ <x generation="{@generation}"/>
+ </xsl:for-each>
+ </xsl:variable>
+
+ <!-- Calculate grand totals, figure out which columns to display -->
+ <xsl:variable name="totals">
+ <xsl:for-each select="rcynic-summary/labels/*">
+ <xsl:variable name="sum" select="count(com:node-set($host-data)/x[@status = name(current())])"/>
+ <xsl:variable name="show">
+ <xsl:choose>
+ <xsl:when test="$suppress-zero-columns = 0 or $sum &gt; 0">
+ <xsl:text>1</xsl:text>
+ </xsl:when>
+ <xsl:otherwise>
+ <xsl:text>0</xsl:text>
+ </xsl:otherwise>
+ </xsl:choose>
+ </xsl:variable>
+ <x name="{name(current())}" sum="{$sum}" text="{.}" show="{$show}" mood="{@kind}"/>
+ </xsl:for-each>
+ </xsl:variable>
+
+ <!-- Calculate how many columns we'll be displaying -->
+ <xsl:variable name="columns" select="count(com:node-set($totals)/x[@show = 1])"/>
+
+ <!-- Show the total -->
+ <xsl:if test="$show-total != 0">
+ <br/>
+ <h2>Grand Totals</h2>
+ <table class="summary" rules="all" border="1">
+ <thead>
+ <tr>
+ <td/> <!-- was hostname -->
+ <xsl:for-each select="com:node-set($totals)/x[@show = 1]">
+ <td><b><xsl:value-of select="@text"/></b></td>
</xsl:for-each>
</tr>
- </xsl:for-each>
- <xsl:if test="$show-total != 0">
+ </thead>
+ <tbody>
<tr>
<td><b>Total</b></td>
- <xsl:for-each select="exslt:node-set($sums)/x[position() &gt; 1]">
+ <xsl:for-each select="com:node-set($totals)/x">
<xsl:if test="$suppress-zero-columns = 0 or @sum &gt; 0">
- <td><b><xsl:value-of select="@sum"/></b></td>
+ <td class="{@mood}"><xsl:value-of select="@sum"/></td>
</xsl:if>
</xsl:for-each>
</tr>
- </xsl:if>
- </tbody>
- </table>
- </xsl:if>
+ </tbody>
+ </table>
+ </xsl:if>
- <xsl:if test="$show-problems != 0">
+ <!-- Generate the HTML -->
<br/>
- <h1>Problems</h1>
- <table class="problems" rules="all" >
- <thead>
- <tr>
- <td class="status"><b>Status</b></td>
- <td class="uri"><b>URI</b></td>
- </tr>
- </thead>
- <tbody>
- <xsl:for-each select="rcynic-summary/validation_status">
- <xsl:variable name="status" select="@status"/>
- <xsl:variable name="mood" select="/rcynic-summary/labels/*[name() = $status]/@kind"/>
- <xsl:if test="$mood != 'good'">
- <tr>
- <td class="status"><xsl:value-of select="/rcynic-summary/labels/*[name() = $status] "/></td>
- <td class="uri"><xsl:value-of select="."/></td>
- </tr>
- </xsl:if>
- </xsl:for-each>
- </tbody>
- </table>
+ <h2>Summaries by Repository Host</h2>
+ <xsl:for-each select="com:node-set($unique-hostnames)/x">
+ <xsl:sort order="ascending" data-type="text" select="@hostname"/>
+ <xsl:variable name="hostname" select="@hostname"/>
+ <br/>
+ <h3><xsl:value-of select="$hostname"/></h3>
+ <table class="summary" rules="all" border="1">
+ <thead>
+ <tr>
+ <td/> <!-- was hostname -->
+ <xsl:for-each select="com:node-set($totals)/x[@show = 1]">
+ <td><b><xsl:value-of select="@text"/></b></td>
+ </xsl:for-each>
+ </tr>
+ </thead>
+ <tbody>
+ <xsl:for-each select="com:node-set($unique-fn2s)/x">
+ <xsl:sort order="ascending" data-type="text" select="@fn2"/>
+ <xsl:variable name="fn2" select="@fn2"/>
+ <xsl:for-each select="com:node-set($unique-generations)/x">
+ <xsl:sort order="ascending" data-type="text" select="@generation"/>
+ <xsl:variable name="generation" select="@generation"/>
+ <xsl:if test="count(com:node-set($host-data)/x[@hostname = $hostname and @fn2 = $fn2 and @generation = $generation])">
+ <tr>
+ <td><xsl:value-of select="concat($generation, ' ', $fn2)"/></td>
+ <xsl:for-each select="com:node-set($totals)/x[@show = 1]">
+ <xsl:variable name="label" select="@name"/>
+ <xsl:variable name="value" select="count(com:node-set($host-data)/x[@hostname = $hostname and @fn2 = $fn2 and @generation = $generation and @status = $label])"/>
+ <xsl:choose>
+ <xsl:when test="$value != 0">
+ <td class="{@mood}">
+ <xsl:value-of select="$value"/>
+ </td>
+ </xsl:when>
+ <xsl:otherwise>
+ <td/>
+ </xsl:otherwise>
+ </xsl:choose>
+ </xsl:for-each>
+ </tr>
+ </xsl:if>
+ </xsl:for-each>
+ </xsl:for-each>
+ <tr>
+ <td>Total</td>
+ <xsl:for-each select="com:node-set($totals)/x[@show = 1]">
+ <xsl:variable name="label" select="@name"/>
+ <xsl:variable name="value" select="count(com:node-set($host-data)/x[@hostname = $hostname and @status = $label])"/>
+ <xsl:choose>
+ <xsl:when test="$value != 0">
+ <td class="{@mood}">
+ <xsl:value-of select="$value"/>
+ </td>
+ </xsl:when>
+ <xsl:otherwise>
+ <td/>
+ </xsl:otherwise>
+ </xsl:choose>
+ </xsl:for-each>
+ </tr>
+ </tbody>
+ </table>
+ </xsl:for-each>
+
+ <!-- "Problems" display -->
+ <xsl:if test="$show-problems != 0">
+ <br/>
+ <h2>Problems</h2>
+ <table class="problems" rules="all" border="1" >
+ <thead>
+ <tr>
+ <td class="status"><b>Status</b></td>
+ <td class="uri"><b>URI</b></td>
+ </tr>
+ </thead>
+ <tbody>
+ <xsl:for-each select="rcynic-summary/validation_status">
+ <xsl:variable name="status" select="@status"/>
+ <xsl:variable name="mood" select="/rcynic-summary/labels/*[name() = $status]/@kind"/>
+ <xsl:if test="$mood != 'good'">
+ <tr class="{$mood}">
+ <td class="status"><xsl:value-of select="/rcynic-summary/labels/*[name() = $status] "/></td>
+ <td class="uri"><xsl:value-of select="."/></td>
+ </tr>
+ </xsl:if>
+ </xsl:for-each>
+ </tbody>
+ </table>
+ </xsl:if>
</xsl:if>
+ <!-- Detailed status display -->
<xsl:if test="$show-detailed-status != 0">
<br/>
- <h1>Validation Status</h1>
- <table class="details" rules="all" >
+ <h2>Validation Status</h2>
+ <table class="details" rules="all" border="1" >
<thead>
<tr>
<td class="timestamp"><b>Timestamp</b></td>
+ <td class="generation"><b>Generation</b></td>
<td class="status"><b>Status</b></td>
<td class="uri"><b>URI</b></td>
</tr>
@@ -200,6 +290,7 @@
<xsl:variable name="mood" select="/rcynic-summary/labels/*[name() = $status]/@kind"/>
<tr class="{$mood}">
<td class="timestamp"><xsl:value-of select="@timestamp"/></td>
+ <td class="generation"><xsl:value-of select="@generation"/></td>
<td class="status"><xsl:value-of select="/rcynic-summary/labels/*[name() = $status] "/></td>
<td class="uri"><xsl:value-of select="."/></td>
</tr>
diff --git a/rcynic/show.awk b/rcynic/show.awk
index 2e13dce2..84d33cda 100644
--- a/rcynic/show.awk
+++ b/rcynic/show.awk
@@ -1,6 +1,6 @@
# $Id$
#
-# Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+# Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
@@ -19,57 +19,75 @@
BEGIN {
FS = "\t";
+ label[++nlabels] = "hostname";
+ head["hostname", 1] = "Repository";
+ head["hostname", 2] = "Host";
+ width["hostname"] = length(head["hostname", 1]);
}
-NR == 1 {
- nf = NF;
- for (i = 1; i <= nf; i++) {
- nh = split($i, h, /[ \t]+/);
- for (j = 1; j <= nh; j++) {
- head[i, j] = h[j];
- if (length(h[j]) > width[i])
- width[i] = length(h[j]);
- }
- }
+!NF {
+ got_labels = 1;
+ next;
}
-NR > 1 {
- nr = NR - 1;
- for (j = 1; j <= NF; j++) {
- data[nr, j] = $j;
- sum[j] += $j;
- if (length($j) > width[j])
- width[j] = length($j);
+!got_labels {
+ label[++nlabels] = $1;
+ nh = split($2, h, /[ \t]+/);
+ for (i = 1; i <= nh; i++) {
+ head[$1, i] = h[i];
+ if (length(h[i]) > width[$1])
+ width[$1] = length(h[i]);
}
}
+got_labels && $3 ~ /^rsync:\/\/.+/ {
+ hostname = $3;
+ sub(/^rsync:\/\//, "", hostname);
+ sub(/\/.*$/, "", hostname);
+ if (!seen[hostname])
+ seen[hostname] = ++nhostnames;
+ data[hostname, $2]++;
+ total[$2]++;
+ if (length(hostname) > width["hostname"])
+ width["hostname"] = length(hostname);
+}
+
END {
+
for (i = 1;; i++) {
blank = 1;
- for (j = 2; j <= nf; j++)
- if (head[j, i] && sum[j] > 0)
+ for (j = 1; j <= nlabels; j++)
+ if (head[label[j], i] && (j == 1 || total[label[j]] > 0))
blank = 0;
if (blank)
break;
- for (j = 1; j <= nf; j++)
+ for (j = 1; j <= nlabels; j++) {
if (j == 1)
- printf "%*s", width[j], head[j, i];
- else if (sum[j] > 0)
- printf " %*s", width[j], head[j, i];
+ printf "%-*s", width[label[j]], head[label[j], i];
+ else if (total[label[j]] > 0)
+ printf " %*s", width[label[j]], head[label[j], i];
+ }
print "";
}
- for (i = 1; i <= nr; i++) {
- for (j = 1; j <= nf; j++)
+
+ for (i in seen)
+ hostnames[seen[i]] = i;
+
+ for (i = 1; i <= nhostnames; i++) {
+ for (j = 1; j <= nlabels; j++) {
if (j == 1)
- printf "%*s", width[j], data[i, j];
- else if (sum[j] > 0)
- printf " %*s", width[j], data[i, j];
+ printf "%-*s", width[label[j]], hostnames[i];
+ else if (total[label[j]] > 0)
+ printf " %*s", width[label[j]], data[hostnames[i], label[j]];
+ }
print "";
}
- for (j = 1; j <= nf; j++)
+
+ for (j = 1; j <= nlabels; j++) {
if (j == 1)
- printf "%*s", width[j], "Total";
- else if (sum[j] > 0)
- printf " %*s", width[j], sum[j];
+ printf "%-*s", width[label[j]], "Total";
+ else if (total[label[j]] > 0)
+ printf " %*s", width[label[j]], total[label[j]];
+ }
print "";
}
diff --git a/rcynic/show.xsl b/rcynic/show.xsl
index c6362f78..06d69893 100644
--- a/rcynic/show.xsl
+++ b/rcynic/show.xsl
@@ -1,6 +1,6 @@
<?xml version="1.0"?>
<!--
- - Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+ - Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
-
- Permission to use, copy, modify, and/or distribute this software for any
- purpose with or without fee is hereby granted, provided that the above
@@ -17,33 +17,37 @@
<!-- $Id$ -->
-<!--
- - XSL stylesheet to render rcynic's xml-summary output as tab-delimited text.
- -->
-
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="text" encoding="US-ASCII"/>
+ <!-- Translate rcynic XML into tab-delimited flat text -->
<xsl:template match="/">
+
+ <!-- Write labels as two columns: <label> <tab> <text> -->
<xsl:for-each select="rcynic-summary/labels/*">
- <xsl:if test="position() != 1">
- <xsl:text>&#9;</xsl:text>
- </xsl:if>
- <xsl:apply-templates/>
+ <xsl:value-of select="name()"/>
+ <xsl:text>&#9;</xsl:text>
+ <xsl:value-of select="."/>
+ <xsl:text>&#10;</xsl:text>
</xsl:for-each>
+
+ <!-- Blank line between sections -->
<xsl:text>&#10;</xsl:text>
- <xsl:for-each select="rcynic-summary/host">
- <xsl:sort order="descending" data-type="number" select="sum(*[not(self::hostname)])"/>
- <xsl:sort order="ascending" data-type="text" select="hostname"/>
- <xsl:for-each select="*">
- <xsl:if test="position() != 1">
- <xsl:text>&#9;</xsl:text>
- </xsl:if>
- <xsl:apply-templates/>
- </xsl:for-each>
+
+ <!-- Write status as three colums: <timestamp> <tab> <status> <tab> <uri> -->
+ <xsl:for-each select="rcynic-summary/validation_status">
+ <xsl:sort order="ascending" data-type="text" select="."/>
+ <xsl:value-of select="@timestamp"/>
+ <xsl:text>&#9;</xsl:text>
+ <xsl:value-of select="@status"/>
+ <xsl:text>&#9;</xsl:text>
+ <xsl:value-of select="."/>
+ <xsl:text>&#9;</xsl:text>
+ <xsl:value-of select="@generation"/>
<xsl:text>&#10;</xsl:text>
</xsl:for-each>
+
</xsl:template>
</xsl:stylesheet>
diff --git a/rcynic-ng/validation_status.xsl b/rcynic/validation_status.xsl
index 737b1467..737b1467 100644
--- a/rcynic-ng/validation_status.xsl
+++ b/rcynic/validation_status.xsl