diff options
Diffstat (limited to 'rp/rcynic')
42 files changed, 11256 insertions, 0 deletions
diff --git a/rp/rcynic/Doxyfile b/rp/rcynic/Doxyfile new file mode 100644 index 00000000..aa183715 --- /dev/null +++ b/rp/rcynic/Doxyfile @@ -0,0 +1,1679 @@ +# Doxyfile 1.7.3 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" "). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# http://www.gnu.org/software/libiconv for the list of possible encodings. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded +# by quotes) that should identify the project. + +PROJECT_NAME = rcynic + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or +# if some version control system is used. + +PROJECT_NUMBER = 1.0 + +# Using the PROJECT_BRIEF tag one can provide an optional one line description for a project that appears at the top of each page and should give viewer a quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify an logo or icon that is +# included in the documentation. The maximum height of the logo should not +# exceed 55 pixels and the maximum width should not exceed 200 pixels. +# Doxygen will copy the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location +# where doxygen was started. If left blank the current directory will be used. + +OUTPUT_DIRECTORY = doc + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create +# 4096 sub-directories (in 2 levels) under the output directory of each output +# format and will distribute the generated files over these directories. +# Enabling this option can be useful when feeding doxygen a huge amount of +# source files, where putting all generated files in the same directory would +# otherwise cause performance problems for the file system. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, +# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, +# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English +# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, +# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, +# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). +# Set to NO to disable this. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend +# the brief description of a member or function before the detailed description. +# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator +# that is used to form the text in various listings. Each string +# in this list, if found as the leading text of the brief description, will be +# stripped from the text and the result after processing the whole list, is +# used as the annotated text. Otherwise, the brief description is used as-is. +# If left blank, the following values are used ("$name" is automatically +# replaced with the name of the entity): "The $name class" "The $name widget" +# "The $name file" "is" "provides" "specifies" "contains" +# "represents" "a" "an" "the" + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief +# description. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set +# to NO the shortest path that makes the file name unique will be used. + +FULL_PATH_NAMES = YES + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the +# path to strip. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of +# the path mentioned in the documentation of a class, which tells +# the reader which header file to include in order to use a class. +# If left blank only the name of the header file containing the class +# definition is used. Otherwise one should specify the include paths that +# are normally passed to the compiler using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful if your file system +# doesn't support long names like on DOS, Mac, or CD-ROM. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like regular Qt-style comments +# (thus requiring an explicit @brief command for a brief description.) + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then Doxygen will +# interpret the first line (until the first dot) of a Qt-style +# comment as the brief description. If set to NO, the comments +# will behave just like regular Qt-style comments (thus requiring +# an explicit \brief command for a brief description.) + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed +# description. Set this tag to YES if you prefer the old behaviour instead. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it +# re-implements. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce +# a new page for each member. If set to NO, the documentation of a member will +# be part of the file/class/namespace that contains it. + +SEPARATE_MEMBER_PAGES = YES + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# Doxygen uses this value to replace tabs by spaces in code fragments. + +TAB_SIZE = 8 + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C +# sources only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list +# of all members will be omitted, etc. + +OPTIMIZE_OUTPUT_FOR_C = YES + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java +# sources only. Doxygen will then generate output that is more tailored for +# Java. For instance, namespaces will be presented as packages, qualified +# scopes will look different, etc. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources only. Doxygen will then generate output that is more tailored for +# Fortran. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for +# VHDL. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given extension. +# Doxygen has a built-in mapping, but you can override or extend it using this +# tag. The format is ext=language, where ext is a file extension, and language +# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C, +# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make +# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C +# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions +# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should +# set this tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. +# func(std::string) {}). This also makes the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. +# Doxygen will parse them like normal C++ but will assume all classes use public +# instead of private inheritance when no explicit protection keyword is present. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate getter +# and setter methods for a property. Setting this option to YES (the default) +# will make doxygen replace the get and set methods by a property in the +# documentation. This will only work if the methods are indeed getting or +# setting a simple type. If this is not the case, or you want to show the +# methods anyway, you should set this option to NO. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using +# the \nosubgrouping command. + +SUBGROUPING = YES + +# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum +# is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically +# be useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. + +TYPEDEF_HIDES_STRUCT = NO + +# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to +# determine which symbols to keep in memory and which to flush to disk. +# When the cache is full, less often used symbols will be written to disk. +# For small to medium size projects (<1000 input files) the default value is +# probably good enough. For larger projects a too small cache size can cause +# doxygen to be busy swapping symbols to and from disk most of the time +# causing a significant performance penalty. +# If the system has enough physical memory increasing the cache will improve the +# performance by keeping more symbols in memory. Note that the value works on +# a logarithmic scale so increasing the size by one will roughly double the +# memory usage. The cache size is given by this formula: +# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, +# corresponding to a cache size of 2^16 = 65536 symbols + +SYMBOL_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless +# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# will be included in the documentation. + +EXTRACT_PRIVATE = YES + +# If the EXTRACT_STATIC tag is set to YES all static members of a file +# will be included in the documentation. + +EXTRACT_STATIC = YES + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. +# If set to NO only classes defined in header files are included. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local +# methods, which are defined in the implementation section but not in +# the interface are included in the documentation. +# If set to NO (the default) only methods in the interface are included. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base +# name of the file that contains the anonymous namespace. By default +# anonymous namespaces are hidden. + +EXTRACT_ANON_NSPACES = YES + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. +# This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various +# overviews. This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the +# documentation. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the +# function's detailed documentation block. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. +# Set it to YES to include the internal documentation. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the +# documentation. If set to YES the scope will be hidden. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation +# of that file. + +SHOW_INCLUDE_FILES = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen +# will list include files with double quotes in the documentation +# rather than with sharp brackets. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# is inserted in the documentation for inline members. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in +# declaration order. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in +# declaration order. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen +# will sort the (brief and detailed) documentation of class members so that +# constructors and destructors are listed first. If set to NO (the default) +# the constructors will appear in the respective orders defined by +# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. +# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO +# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the +# hierarchy of group names into alphabetical order. If set to NO (the default) +# the group names will appear in their defined order. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be +# sorted by fully-qualified names, including namespaces. If set to +# NO (the default), the class list will be sorted only by class name, +# not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the +# alphabetical list. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper type resolution of all parameters of a function it will reject a +# match between the prototype and the implementation of a member function even if there is only one candidate or it is obvious which candidate to choose by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen +# will still accept a match between prototype and implementation in such cases. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo +# commands in the documentation. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test +# commands in the documentation. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug +# commands in the documentation. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting +# \deprecated commands in the documentation. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if sectionname ... \endif. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or macro consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and macros in the +# documentation can be controlled using \showinitializer or \hideinitializer +# command in the documentation regardless of this setting. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the +# list will mention the files that were used to generate the documentation. + +SHOW_USED_FILES = YES + +# If the sources in your project are distributed over multiple directories +# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy +# in the documentation. The default is NO. + +SHOW_DIRECTORIES = NO + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. +# This will remove the Files entry from the Quick Index and from the +# Folder Tree View (if specified). The default is YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the +# Namespaces page. +# This will remove the Namespaces entry from the Quick Index +# and from the Folder Tree View (if specified). The default is YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command <command> <input-file>, where <command> is the value of +# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file +# provided by doxygen. Whatever the program writes to standard output +# is used as the file version. See the manual for examples. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. The create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. +# You can optionally specify a file name after the option, if omitted +# DoxygenLayout.xml will be used as the name of the layout file. + +LAYOUT_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated +# by doxygen. Possible values are YES and NO. If left blank NO is used. + +QUIET = YES + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank +# NO is used. + +WARNINGS = YES + +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# automatically be disabled. + +WARN_IF_UNDOCUMENTED = YES + +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that +# don't exist or using markup commands wrongly. + +WARN_IF_DOC_ERROR = YES + +# The WARN_NO_PARAMDOC option can be enabled to get warnings for +# functions that are documented, but have no documentation for their parameters +# or return value. If set to NO (the default) doxygen will only warn about +# wrong or incomplete parameter documentation, but not about the absence of +# documentation. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the +# warning originated and the warning text. Optionally the format may contain +# $version, which will be replaced by the version of the file (if it could +# be obtained via FILE_VERSION_FILTER) + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written +# to stderr. + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = . + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is +# also the default input encoding. Doxygen uses libiconv (or the iconv built +# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for +# the list of possible encodings. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh +# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py +# *.f90 *.f *.for *.vhd *.vhdl + +FILE_PATTERNS = *.[ch] + +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. +# If left blank NO is used. + +RECURSIVE = NO + +# The EXCLUDE tag can be used to specify files and/or directories that should +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. Note that the wildcards are matched +# against the file with absolute path, so to exclude all test directories +# for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank all files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. +# Possible values are YES and NO. If left blank NO is used. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command <filter> <input-file>, where <filter> +# is the value of the INPUT_FILTER tag, and <input-file> is the name of an +# input file. Doxygen will then use the output that the filter program writes +# to standard output. +# If FILTER_PATTERNS is specified, this tag will be +# ignored. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. +# Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. +# The filters are a list of the form: +# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further +# info on how filters are used. If FILTER_PATTERNS is empty or if +# non of the patterns match the file name, INPUT_FILTER is applied. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source +# files to browse (i.e. when SOURCE_BROWSER is set to YES). + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) +# and it is also possible to disable source filtering for a specific pattern +# using *.ext= (so without naming a filter). This option only has effect when +# FILTER_SOURCE_FILES is enabled. + +FILTER_SOURCE_PATTERNS = + +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. +# Note: To get rid of all source code in the generated output, make sure also +# VERBATIM_HEADERS is set to NO. + +SOURCE_BROWSER = YES + +# Setting the INLINE_SOURCES tag to YES will include the body +# of functions and classes directly in the documentation. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code +# fragments. Normal C and C++ comments will always remain visible. + +STRIP_CODE_COMMENTS = NO + +# If the REFERENCED_BY_RELATION tag is set to YES +# then for each documented function all documented +# functions referencing it will be listed. + +REFERENCED_BY_RELATION = YES + +# If the REFERENCES_RELATION tag is set to YES +# then for each documented function all documented entities +# called/used by that function will be listed. + +REFERENCES_RELATION = YES + +# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) +# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from +# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will +# link to the source code. +# Otherwise they will link to the documentation. + +REFERENCES_LINK_SOURCE = YES + +# If the USE_HTAGS tag is set to YES then the references to source code +# will point to the HTML generated by the htags(1) tool instead of doxygen +# built-in source browser. The htags tool is part of GNU's global source +# tagging system (see http://www.gnu.org/software/global/global.html). You +# will need version 4.8.6 or higher. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for +# which an include is specified. Set to NO to disable this. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project +# contains a lot of classes, structs, unions or interfaces. + +ALPHABETICAL_INDEX = YES + +# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then +# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns +# in which this list will be split (can be a number in the range [1..20]) + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# should be ignored while generating the index headers. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If the tag is left blank doxygen +# will generate a default style sheet. Note that doxygen will try to copy +# the style sheet file to the HTML output directory, so don't put your own +# stylesheet in the HTML output directory as well, or it will be erased! + +HTML_STYLESHEET = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. +# Doxygen will adjust the colors in the stylesheet and background images +# according to this color. Hue is specified as an angle on a colorwheel, +# see http://en.wikipedia.org/wiki/Hue for more information. +# For instance the value 0 represents red, 60 is yellow, 120 is green, +# 180 is cyan, 240 is blue, 300 purple, and 360 is red again. +# The allowed range is 0 to 359. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of +# the colors in the HTML output. For a value of 0 the output will use +# grayscales only. A value of 255 will produce the most vivid colors. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to +# the luminance component of the colors in the HTML output. Values below +# 100 gradually make the output lighter, whereas values above 100 make +# the output darker. The value divided by 100 is the actual gamma applied, +# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, +# and 100 does not change the gamma. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting +# this to NO can help when comparing the output of multiple runs. + +HTML_TIMESTAMP = YES + +# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, +# files or namespaces will be aligned in HTML using tables. If set to +# NO a bullet list will be used. + +HTML_ALIGN_MEMBERS = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. For this to work a browser that supports +# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox +# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). + +HTML_DYNAMIC_SECTIONS = NO + +# If the GENERATE_DOCSET tag is set to YES, additional index files +# will be generated that can be used as input for Apple's Xcode 3 +# integrated development environment, introduced with OSX 10.5 (Leopard). +# To create a documentation set, doxygen will generate a Makefile in the +# HTML output directory. Running make will produce the docset in that +# directory and running "make install" will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find +# it at startup. +# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. + +GENERATE_DOCSET = NO + +# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the +# feed. A documentation feed provides an umbrella under which multiple +# documentation sets from a single provider (such as a company or product suite) +# can be grouped. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that +# should uniquely identify the documentation set bundle. This should be a +# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen +# will append .docset to the name. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output directory. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING +# is used to encode HtmlHelp index (hhk), content (hhc) and project file +# content. + +CHM_INDEX_ENCODING = + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated +# that can be used as input for Qt's qhelpgenerator to generate a +# Qt Compressed Help (.qch) of the generated HTML documentation. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can +# be used to specify the file name of the resulting .qch file. +# The path specified is relative to the HTML output folder. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#namespace + +QHP_NAMESPACE = + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#virtual-folders + +QHP_VIRTUAL_FOLDER = doc + +# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to +# add. For more information please see +# http://doc.trolltech.com/qthelpproject.html#custom-filters + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see +# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters"> +# Qt Help Project / Custom Filters</a>. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's +# filter section matches. +# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes"> +# Qt Help Project / Filter Attributes</a>. + +QHP_SECT_FILTER_ATTRS = + +# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can +# be used to specify the location of Qt's qhelpgenerator. +# If non-empty doxygen will try to run qhelpgenerator on the generated +# .qhp file. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files +# will be generated, which together with the HTML files, form an Eclipse help +# plugin. To install this plugin and make it available under the help contents +# menu in Eclipse, the contents of the directory containing the HTML and XML +# files needs to be copied into the plugins directory of eclipse. The name of +# the directory within the plugins directory should be the same as +# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before +# the help appears. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have +# this name. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index at +# top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. + +DISABLE_INDEX = NO + +# This tag can be used to set the number of enum values (range [0,1..20]) +# that doxygen will group on one line in the generated HTML documentation. +# Note that a value of 0 will completely suppress the enum values from appearing in the overview section. + +ENUM_VALUES_PER_LINE = 4 + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. +# If the tag value is set to YES, a side panel will be generated +# containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). +# Windows users are probably better off using the HTML help feature. + +GENERATE_TREEVIEW = NO + +# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories, +# and Class Hierarchy pages using a tree view instead of an ordered list. + +USE_INLINE_TREES = NO + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 250 + +# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open +# links to external symbols imported via tag files in a separate window. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of Latex formulas included +# as images in the HTML documentation. The default is 10. Note that +# when you change the font size after a successful doxygen run you need +# to manually remove any form_*.png images from the HTML output directory +# to force them to be regenerated. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are +# not supported properly for IE 6.0, but are supported on all modern browsers. +# Note that when changing this option you need to delete any form_*.png files +# in the HTML output before the changes have effect. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax +# (see http://www.mathjax.org) which uses client side Javascript for the +# rendering instead of using prerendered bitmaps. Use this if you do not +# have LaTeX installed or if you want to formulas look prettier in the HTML +# output. When enabled you also need to install MathJax separately and +# configure the path to it using the MATHJAX_RELPATH option. + +USE_MATHJAX = NO + +# When MathJax is enabled you need to specify the location relative to the +# HTML output directory using the MATHJAX_RELPATH option. The destination +# directory should contain the MathJax.js script. For instance, if the mathjax +# directory is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the mathjax.org site, so you can quickly see the result without installing +# MathJax, but it is strongly recommended to install a local copy of MathJax +# before deployment. + +MATHJAX_RELPATH = http://www.mathjax.org/mathjax + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box +# for the HTML output. The underlying search engine uses javascript +# and DHTML and should work on any modern browser. Note that when using +# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets +# (GENERATE_DOCSET) there is already a search function so this one should +# typically be disabled. For large projects the javascript based search engine +# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. + +SEARCHENGINE = YES + +# When the SERVER_BASED_SEARCH tag is enabled the search engine will be +# implemented using a PHP enabled web server instead of at the web client +# using Javascript. Doxygen will generate the search PHP script and index +# file to put on the web server. The advantage of the server +# based approach is that it scales better to large projects and allows +# full text search. The disadvantages are that it is more difficult to setup +# and does not have live searching capabilities. + +SERVER_BASED_SEARCH = NO + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = YES + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. +# Note that when enabling USE_PDFLATEX this option is only used for +# generating bitmaps for formulas in the HTML output, but not in the +# Makefile that is written to the output directory. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = YES + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, letter, legal and +# executive. If left blank a4wide will be used. + +PAPER_TYPE = letter + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = YES + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = YES + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = YES + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = YES + +# If LATEX_SOURCE_CODE is set to YES then doxygen will include +# source code with syntax highlighting in the LaTeX output. +# Note that which sources are shown also depends on other settings +# such as SOURCE_BROWSER. + +LATEX_SOURCE_CODE = NO + +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimized for Word 97 and may not look very pretty with +# other RTF readers or editors. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `rtf' will be used as the default path. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. +# Note: wordpad (write) and others do not support links. + +RTF_HYPERLINKS = NO + +# Load stylesheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an rtf document. +# Syntax is similar to doxygen's config file. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = NO + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `man' will be used as the default path. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of +# the code including all documentation. + +GENERATE_XML = NO + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `xml' will be used as the default path. + +XML_OUTPUT = xml + +# The XML_SCHEMA tag can be used to specify an XML schema, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_SCHEMA = + +# The XML_DTD tag can be used to specify an XML DTD, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_DTD = + +# If the XML_PROGRAMLISTING tag is set to YES Doxygen will +# dump the program listings (including syntax highlighting +# and cross-referencing information) to the XML output. Note that +# enabling this will significantly increase the size of the XML output. + +XML_PROGRAMLISTING = YES + +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental +# and incomplete at the moment. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# to generate PDF and DVI output from the Perl module output. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. +# This is useful +# if you want to understand what is going on. +# On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller +# and Perl will parse it just the same. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same +# Makefile don't overwrite each other's variables. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = YES + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_DEFINED tags. + +EXPAND_ONLY_PREDEF = NO + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# in the INCLUDE_PATH (see below) will be search if a #include is found. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. To prevent a macro definition from being +# undefined via #undef or recursively expanded use the := operator +# instead of the = operator. + +PREDEFINED = DOXYGEN_GETS_HOPELESSLY_CONFUSED_BY_THIS_SECTION + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition that overrules the definition found in the source code. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all references to function-like macros +# that are alone on a line, have an all uppercase name, and do not end with a +# semicolon, because these will confuse the parser if not removed. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES option can be used to specify one or more tagfiles. +# Optionally an initial location of the external documentation +# can be added for each tagfile. The format of a tag file without +# this location is as follows: +# +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths or +# URLs. If a location is present for each tag, the installdox tool +# does not have to be run to correct the links. +# Note that each tag file must have a unique name +# (where the name does NOT include the path) +# If a tag file is not located in the directory in which doxygen +# is run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# a tag file that is based on the input files it reads. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes +# will be listed. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will +# be listed. + +EXTERNAL_GROUPS = YES + +# The PERL_PATH should be the absolute path and name of the perl script +# interpreter (i.e. the result of `which perl'). + +PERL_PATH = /usr/bin/perl + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will +# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base +# or super classes. Setting the tag to NO turns the diagrams off. Note that +# this option also works with HAVE_DOT disabled, but it is recommended to +# install and use dot, since it yields more powerful graphs. + +CLASS_DIAGRAMS = YES + +# You can define message sequence charts within doxygen comments using the \msc +# command. Doxygen will then run the mscgen tool (see +# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the +# documentation. The MSCGEN_PATH tag allows you to specify the directory where +# the mscgen tool resides. If left empty the tool is assumed to be found in the +# default search path. + +MSCGEN_PATH = + +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented +# or is not a class. + +HIDE_UNDOC_RELATIONS = NO + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# have no effect if this option is set to NO (the default) + +HAVE_DOT = YES + +# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is +# allowed to run in parallel. When set to 0 (the default) doxygen will +# base this on the number of processors available in the system. You can set it +# explicitly to a value larger than 0 to get control over the balance +# between CPU load and processing speed. + +DOT_NUM_THREADS = 0 + +# By default doxygen will write a font called Helvetica to the output +# directory and reference it in all dot files that doxygen generates. +# When you want a differently looking font you can specify the font name +# using DOT_FONTNAME. You need to make sure dot is able to find the font, +# which can be done by putting it in a standard location or by setting the +# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory +# containing the font. + +DOT_FONTNAME = FreeSans + +# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. +# The default size is 10pt. + +DOT_FONTSIZE = 10 + +# By default doxygen will tell dot to use the output directory to look for the +# FreeSans.ttf font (which doxygen will put there itself). If you specify a +# different font using DOT_FONTNAME you can set the path where dot +# can find it using this tag. + +DOT_FONTPATH = + +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the +# the CLASS_DIAGRAMS tag to NO. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and +# class references variables) of the class with other documented classes. + +COLLABORATION_GRAPH = YES + +# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for groups, showing the direct groups dependencies + +GROUP_GRAPHS = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. + +UML_LOOK = YES + +# If set to YES, the inheritance and collaboration graphs will show the +# relations between templates and their instances. + +TEMPLATE_RELATIONS = NO + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with +# other documented files. + +INCLUDE_GRAPH = NO + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or +# indirectly include this file. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH and HAVE_DOT options are set to YES then +# doxygen will generate a call dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable call graphs +# for selected functions only using the \callgraph command. + +CALL_GRAPH = YES + +# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then +# doxygen will generate a caller dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable caller +# graphs for selected functions only using the \callergraph command. + +CALLER_GRAPH = YES + +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# will generate a graphical hierarchy of all classes instead of a textual one. + +GRAPHICAL_HIERARCHY = YES + +# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES +# then doxygen will show the dependencies a directory has on other directories +# in a graphical way. The dependency relations are determined by the #include +# relations between the files in the directories. + +DIRECTORY_GRAPH = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. Possible values are png, svg, gif or svg. +# If left blank png will be used. + +DOT_IMAGE_FORMAT = png + +# The tag DOT_PATH can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found in the path. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS = + +# The MSCFILE_DIRS tag can be used to specify one or more directories that +# contain msc files that are included in the documentation (see the +# \mscfile command). + +MSCFILE_DIRS = + +# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of +# nodes that will be shown in the graph. If the number of nodes in a graph +# becomes larger than this value, doxygen will truncate the graph, which is +# visualized by representing a node as a red box. Note that doxygen if the +# number of direct children of the root node in a graph is already larger than +# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note +# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. + +DOT_GRAPH_MAX_NODES = 50 + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the +# graphs generated by dot. A depth value of 3 means that only nodes reachable +# from the root by following a path via at most 3 edges will be shown. Nodes +# that lay further from the root node will be omitted. Note that setting this +# option to 1 or 2 may greatly reduce the computation time needed for large +# code bases. Also note that the size of a graph can be further restricted by +# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. + +MAX_DOT_GRAPH_DEPTH = 0 + +# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent +# background. This is disabled by default, because dot on Windows does not +# seem to support this out of the box. Warning: Depending on the platform used, +# enabling this option may lead to badly anti-aliased labels on the edges of +# a graph (i.e. they become hard to read). + +DOT_TRANSPARENT = NO + +# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# files in one run (i.e. multiple -o and -T options on the command line). This +# makes dot run faster, but since only newer versions of dot (>1.8.10) +# support this, this feature is disabled by default. + +DOT_MULTI_TARGETS = NO + +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and +# arrows in the dot generated graphs. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate +# the various graphs. + +DOT_CLEANUP = YES diff --git a/rp/rcynic/Makefile.in b/rp/rcynic/Makefile.in new file mode 100644 index 00000000..06f7d9cd --- /dev/null +++ b/rp/rcynic/Makefile.in @@ -0,0 +1,237 @@ +# $Id$ + +NAME = rcynic + +BIN = ${NAME} +SRC = ${NAME}.c +OBJ = ${NAME}.o + +GEN = defstack.h + +OBJS = ${OBJ} bio_f_linebreak.o + +CFLAGS = @CFLAGS@ -Wall -Wshadow -Wmissing-prototypes -Wmissing-declarations -Werror-implicit-function-declaration +LDFLAGS = @LDFLAGS@ @LD_STATIC_FLAG@ +LIBS = @LIBS@ + +AWK = @AWK@ +SORT = @SORT@ +PYTHON = @PYTHON@ +RRDTOOL = @RRDTOOL@ +INSTALL = @INSTALL@ + +abs_top_srcdir = @abs_top_srcdir@ +abs_top_builddir = @abs_top_builddir@ + +prefix = @prefix@ +exec_prefix = @exec_prefix@ +datarootdir = @datarootdir@ +datadir = @datadir@ +localstatedir = @localstatedir@ +sharedstatedir = @sharedstatedir@ +sysconfdir = @sysconfdir@ +bindir = @bindir@ +sbindir = @sbindir@ +libexecdir = @libexecdir@ +sysconfdir = @sysconfdir@ + +abs_builddir = @abs_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +abs_top_builddir = @abs_top_builddir@ +srcdir = @srcdir@ + +RCYNIC_BIN_RCYNIC = @RCYNIC_BIN_RCYNIC@ +RCYNIC_CONF_DATA = @RCYNIC_CONF_DATA@ +RCYNIC_CONF_FILE = @RCYNIC_CONF_FILE@ +RCYNIC_CONF_RSYNC = @RCYNIC_CONF_RSYNC@ +RCYNIC_CONF_TA_DIR = @RCYNIC_CONF_TA_DIR@ +RCYNIC_CRON_USER = @RCYNIC_CRON_USER@ +RCYNIC_DATA_DIR = ${RCYNIC_DIR}/data +RCYNIC_DIR = @RCYNIC_DIR@ +RCYNIC_DIRS = ${RCYNIC_TA_DIR} ${RCYNIC_JAIL_DIRS} ${RCYNIC_DATA_DIR} ${RPKIRTR_DIR} ${RPKIRTR_DIR}/sockets +RCYNIC_GECOS = RPKI Validation System +RCYNIC_GROUP = rcynic +RCYNIC_HTML_DIR = @RCYNIC_HTML_DIR@ +RCYNIC_INSTALL_TARGETS = @RCYNIC_INSTALL_TARGETS@ +RCYNIC_JAIL_DIRS = @RCYNIC_JAIL_DIRS@ +RCYNIC_STATIC_RSYNC = @RCYNIC_STATIC_RSYNC@ +RCYNIC_TA_DIR = @RCYNIC_TA_DIR@ +RCYNIC_USER = rcynic +RPKIRTR_DIR = ${RCYNIC_DIR}/rpki-rtr +RPKIRTR_GECOS = RPKI router server +RPKIRTR_GROUP = rpkirtr +RPKIRTR_MODE = 775 +RPKIRTR_USER = rpkirtr + +SCRIPTS = rcynic-text rcynic-html rcynic-svn validation_status rcynic-cron + +all: ${BIN} ${SCRIPTS} ${RCYNIC_STATIC_RSYNC} + +clean: + if test -r static-rsync/Makefile; then cd static-rsync; ${MAKE} $@; fi + rm -f ${BIN} ${OBJS} ${SCRIPTS} + +${OBJ}: ${SRC} ${GEN} + +${BIN}: ${OBJS} + ${CC} ${CFLAGS} -o $@ ${OBJS} ${LDFLAGS} ${LIBS} + +${GEN}: ${SRC} + ${PYTHON} ${abs_top_srcdir}/buildtools/defstack.py ${SRC} >$@.tmp + mv $@.tmp $@ + +COMPILE_PYTHON = \ + AC_PYTHON_INTERPRETER='${PYTHON}' \ + AC_RRDTOOL_BINARY='${RRDTOOL}' \ + ${PYTHON} ${abs_top_srcdir}/buildtools/make-rcynic-script.py <$? >$@; \ + chmod 755 $@ + +COMPILE_PYTHON_CRON = \ + AC_PYTHON_INTERPRETER='${PYTHON}' \ + AC_RCYNIC_USER='${RCYNIC_USER}' \ + AC_RCYNIC_DIR='${RCYNIC_DIR}' \ + AC_bindir='${bindir}' \ + AC_sbindir='${sbindir}' \ + AC_sysconfdir='${sysconfdir}' \ + AC_libexecdir='${libexecdir}' \ + AC_RCYNIC_HTML_DIR='${RCYNIC_HTML_DIR}' \ + ${PYTHON} ${abs_top_srcdir}/buildtools/make-rcynic-script.py <$? >$@; \ + chmod 755 $@ + +rcynic-text: rcynic-text.py + ${COMPILE_PYTHON} + +rcynic-html: rcynic-html.py + ${COMPILE_PYTHON} + +rcynic-svn: rcynic-svn.py + ${COMPILE_PYTHON} + +validation_status: validation_status.py + ${COMPILE_PYTHON} + +rcynic-cron: rcynic-cron.py + ${COMPILE_PYTHON_CRON} + +tags: TAGS + +TAGS: ${SRC} ${GEN} + etags ${SRC} ${GEN} + +test: ${BIN} + if test -r rcynic.conf; \ + then \ + ./${BIN} -j 0 && \ + test -r rcynic.xml && \ + echo && \ + ./rcynic-text rcynic.xml; \ + else \ + echo No rcynic.conf, skipping test; \ + fi + +uninstall deinstall: + @echo Sorry, automated deinstallation of rcynic is not implemented yet + +distclean: clean + if test -r static-rsync/Makefile; then cd static-rsync; ${MAKE} $@; fi + rm -f Makefile + +static-rsync/rsync: + @echo "Building static rsync for use in chroot jail" + cd static-rsync; ${MAKE} all + +install: all ${RCYNIC_INSTALL_TARGETS} + +install-always: \ + install-directories install-scripts install-rcynic install-rcynic-conf + +install-postconf: \ + install-user-and-group install-directory-ownership install-crontab + +install-jailed: \ + install-static-rsync install-shared-libraries install-rc-scripts + +install-directories: ${RCYNIC_DIRS} + +${RCYNIC_DIRS} ${DESTDIR}${bindir} ${DESTDIR}${sysconfdir}: + ${INSTALL} -v -d $@ + +install-directory-ownership: ${RCYNIC_DATA_DIR} ${RPKIRTR_DIR} ${RPKIRTR_DIR}/sockets + chown ${RCYNIC_USER}:${RCYNIC_GROUP} ${RCYNIC_DATA_DIR} ${RPKIRTR_DIR} + chown ${RPKIRTR_USER}:${RCYNIC_GROUP} ${RPKIRTR_DIR}/sockets + chmod ${RPKIRTR_MODE} ${RPKIRTR_DIR}/sockets + +install-rcynic-conf: ${RCYNIC_CONF_FILE} + +${RCYNIC_CONF_FILE}: + @echo + @echo Found no ${RCYNIC_CONF_FILE}, creating basic config and installing default trust anchor locators. + @echo You might want to edit this. + @echo + ${INSTALL} -v -d ${RCYNIC_TA_DIR} + ${INSTALL} -v -p -m 444 sample-trust-anchors/*.tal ${RCYNIC_TA_DIR} + @echo > $@.tmp '# Basic rcynic configuration file with default trust anchors.' + @echo >>$@.tmp '# See documentation for details.' + @echo >>$@.tmp '' + @echo >>$@.tmp '[rcynic]' + @echo >>$@.tmp 'rsync-program = ${RCYNIC_CONF_RSYNC}' + @echo >>$@.tmp 'authenticated = ${RCYNIC_CONF_DATA}/authenticated' + @echo >>$@.tmp 'unauthenticated = ${RCYNIC_CONF_DATA}/unauthenticated' + @echo >>$@.tmp 'xml-summary = ${RCYNIC_CONF_DATA}/rcynic.xml' + @echo >>$@.tmp 'jitter = 600' + @echo >>$@.tmp 'max-parallel-fetches = 8' + @echo >>$@.tmp 'use-syslog = true' + @echo >>$@.tmp 'log-level = log_usage_err' + @echo >>$@.tmp 'trust-anchor-directory = ${RCYNIC_CONF_TA_DIR}' + @chmod 444 $@.tmp + @mv -f $@.tmp $@ + +install-rcynic: ${RCYNIC_BIN_RCYNIC} + +${RCYNIC_BIN_RCYNIC}: ${BIN} + ${INSTALL} -p -m 555 ${BIN} $@ + +install-static-rsync: ${RCYNIC_DIR}/bin/rsync + +${RCYNIC_DIR}/bin/rsync: static-rsync/rsync + ${INSTALL} -p -m 555 static-rsync/rsync $@ + +install-scripts: \ + ${DESTDIR}${bindir} \ + ${DESTDIR}${bindir}/rcynic-text \ + ${DESTDIR}${bindir}/rcynic-html \ + ${DESTDIR}${bindir}/rcynic-svn \ + ${DESTDIR}${bindir}/rcynic-cron \ + ${DESTDIR}${bindir}/validation_status + +${DESTDIR}${bindir}/rcynic-text: rcynic-text + ${INSTALL} -p -m 555 rcynic-text $@ + +${DESTDIR}${bindir}/rcynic-html: rcynic-html + ${INSTALL} -p -m 555 rcynic-html $@ + +${DESTDIR}${bindir}/rcynic-svn: rcynic-svn + ${INSTALL} -p -m 555 rcynic-svn $@ + +${DESTDIR}${bindir}/rcynic-cron: rcynic-cron + ${INSTALL} -p -m 555 rcynic-cron $@ + +${DESTDIR}${bindir}/validation_status: validation_status + ${INSTALL} -p -m 555 validation_status $@ + +.FORCE: + +install-crontab: .FORCE + @if test "X`/usr/bin/crontab -l -u ${RCYNIC_CRON_USER} 2>/dev/null`" != "X"; \ + then \ + echo "${RCYNIC_CRON_USER} already has a crontab, leaving it alone"; \ + else \ + echo "Setting up ${RCYNIC_CRON_USER}'s crontab to run rcynic-cron script"; \ + ${AWK} -v t=`hexdump -n 2 -e '"%u\n"' /dev/random` '\ + BEGIN {printf "MAILTO=root\n%u * * * *\texec ${bindir}/rcynic-cron\n", t % 60}' | \ + /usr/bin/crontab -u ${RCYNIC_CRON_USER} -; \ + fi + +# Platform-specific rules below here. + +@RCYNIC_MAKE_RULES@ diff --git a/rp/rcynic/README b/rp/rcynic/README new file mode 100644 index 00000000..ecc92ac5 --- /dev/null +++ b/rp/rcynic/README @@ -0,0 +1,13 @@ +$Id$ + +"Cynical rsync" -- fetch and validate RPKI certificates. + +This is the primary RPKI relying party validation tool. + +See: + +- The primary documentation at http://trac.rpki.net/ + +- The PDF manual in ../doc/manual.pdf, or + +- The flat text page ../doc/doc.RPKI.RP.rcynic diff --git a/rp/rcynic/bio_f_linebreak.c b/rp/rcynic/bio_f_linebreak.c new file mode 100644 index 00000000..c2d9fb4d --- /dev/null +++ b/rp/rcynic/bio_f_linebreak.c @@ -0,0 +1,268 @@ +/* $Id$ */ + +/** @file bio_f_linebreak.c + * + * This implements a trivial filter BIO (see OpenSSL manual) which + * does one rather silly thing: on read, it inserts line break into + * the input stream at regular intervals. + * + * You might reasonably ask why anyone would want such a thing. The + * answer is that OpenSSL's Base64 filter BIO has two input modes, + * neither of which is really useful for reading generalized Base64 + * input. In one mode, it requires line breaks at most every 79 + * characters; in the other mode, it requires that there to be no + * whitespace of any kind at all. These modes work for the things + * that OpenSSL itself does with Base64 decoding, but fail miserably + * when used to read free-form Base64 text. + * + * The real solution would be to rewrite OpenSSL's Base64 filter to + * support a third mode in which it accepts generalized Base64 text, + * but that's been suggested before and nothing has been done about + * it, probably because OpenSSL's Base64 implementation is completely + * line-oriented and rather nasty. + * + * So this filter is a stop-gap to let us get the job done. Since it + * uses a (relatively) well-defined OpenSSL internal API, it should be + * reasonably stable. + * + * 98% of the code in this module is derived from "null filter" BIO + * that ships with OpenSSL (BIO_TYPE_NULL_FILTER), so I consider this + * to be a derivative work, thus am leaving it under OpenSSL's license. + */ + +/* Original crypto/bio/bf_null.c code was: + * + * Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +#include <stdio.h> +#include <errno.h> +#include <string.h> +#include <openssl/bio.h> + +#include "bio_f_linebreak.h" + +#ifndef BIO_TYPE_LINEBREAK_FILTER +#define BIO_TYPE_LINEBREAK_FILTER (99 | BIO_TYPE_FILTER) +#endif + +#ifndef LINEBREAK_MAX_LINE +#define LINEBREAK_MAX_LINE 72 /* We break anything longer than this */ +#endif + +static int linebreak_new(BIO *b) +{ + b->init = 1; + b->ptr = NULL; + b->flags = 0; + b->num = 0; + return 1; +} + +static int linebreak_free(BIO *b) +{ + return b != NULL; +} + +static int linebreak_read(BIO *b, char *out, int outl) +{ + int ret = 0, want, n, i; + + if (out == NULL || b->next_bio == NULL || outl <= 0) + return 0; + + while (outl > 0) { + + if (b->num >= LINEBREAK_MAX_LINE) { + b->num = 0; + *out++ = '\n'; + outl--; + ret++; + continue; + } + + want = LINEBREAK_MAX_LINE - b->num; + if (want > outl) + want = outl; + + n = BIO_read(b->next_bio, out, want); + + BIO_clear_retry_flags(b); + BIO_copy_next_retry(b); + + if (n > 0) { + for (i = n - 1; i >= 0; i--) + if (out[i] == '\n') + break; + if (i >= 0) + b->num = n - i - 1; + else + b->num += n; + out += n; + outl -= n; + ret += n; + continue; + } + + if (ret == 0) + ret = n; + break; + } + + return ret; +} + +static int linebreak_write(BIO *b, const char *in, int inl) +{ + int ret = 0; + + if (in == NULL || inl <= 0 || b->next_bio == NULL) + return 0; + + ret = BIO_write(b->next_bio, in, inl); + + BIO_clear_retry_flags(b); + BIO_copy_next_retry(b); + + return ret; +} + +static long linebreak_ctrl(BIO *b, int cmd, long num, void *ptr) +{ + long ret; + + if (b->next_bio == NULL) + return 0; + + switch (cmd) { + + case BIO_C_DO_STATE_MACHINE: + BIO_clear_retry_flags(b); + ret = BIO_ctrl(b->next_bio, cmd, num, ptr); + BIO_copy_next_retry(b); + return ret; + + case BIO_CTRL_DUP: + return 0; + + default: + return BIO_ctrl(b->next_bio, cmd, num, ptr); + + } +} + +static long linebreak_callback_ctrl(BIO *b, int cmd, bio_info_cb *cb) +{ + if (b->next_bio == NULL) + return 0; + else + return BIO_callback_ctrl(b->next_bio, cmd, cb); +} + +static int linebreak_puts(BIO *b, const char *str) +{ + if (b->next_bio == NULL) + return 0; + else + return BIO_puts(b->next_bio, str); +} + +static BIO_METHOD methods_linebreak = { + BIO_TYPE_LINEBREAK_FILTER, + "Linebreak filter", + linebreak_write, + linebreak_read, + linebreak_puts, + NULL, /* No linebreak_gets() */ + linebreak_ctrl, + linebreak_new, + linebreak_free, + linebreak_callback_ctrl, +}; + +BIO_METHOD *BIO_f_linebreak(void) +{ + return &methods_linebreak; +} + + +#ifdef __BIO_F_LINEBREAK_UNIT_TEST__ + +int main (int argc, char *argv[]) +{ + BIO *ich = BIO_new_fd(0, 1); + BIO *och = BIO_new_fd(1, 1); + BIO *fch = BIO_new(BIO_f_linebreak()); + char buffer[4098]; + int n; + + if (ich == NULL || och == NULL || fch == NULL) + return 1; + + BIO_push(fch, ich); + ich = fch; + fch = NULL; + + while ((n = BIO_read(ich, buffer, sizeof(buffer))) > 0) + BIO_write(och, buffer, n); + + BIO_free_all(ich); + BIO_free_all(och); + return 0; +} + +#endif diff --git a/rp/rcynic/bio_f_linebreak.h b/rp/rcynic/bio_f_linebreak.h new file mode 100644 index 00000000..b5becfa6 --- /dev/null +++ b/rp/rcynic/bio_f_linebreak.h @@ -0,0 +1,10 @@ +/* $Id$ */ + +#ifndef __BIO_F_LINEBREAK__ +#define __BIO_F_LINEBREAK__ + +#include <openssl/bio.h> + +BIO_METHOD *BIO_f_linebreak(void); + +#endif /* __BIO_F_LINEBREAK__ */ diff --git a/rp/rcynic/defstack.h b/rp/rcynic/defstack.h new file mode 100644 index 00000000..370c8129 --- /dev/null +++ b/rp/rcynic/defstack.h @@ -0,0 +1,134 @@ +/* + * Automatically generated, do not edit. + * Generator $Id: defstack.py 4878 2012-11-15 22:13:53Z sra $ + */ + +#ifndef __RCYNIC_C__DEFSTACK_H__ +#define __RCYNIC_C__DEFSTACK_H__ + +/* + * Safestack macros for validation_status_t. + */ +#define sk_validation_status_t_new(st) SKM_sk_new(validation_status_t, (st)) +#define sk_validation_status_t_new_null() SKM_sk_new_null(validation_status_t) +#define sk_validation_status_t_free(st) SKM_sk_free(validation_status_t, (st)) +#define sk_validation_status_t_num(st) SKM_sk_num(validation_status_t, (st)) +#define sk_validation_status_t_value(st, i) SKM_sk_value(validation_status_t, (st), (i)) +#define sk_validation_status_t_set(st, i, val) SKM_sk_set(validation_status_t, (st), (i), (val)) +#define sk_validation_status_t_zero(st) SKM_sk_zero(validation_status_t, (st)) +#define sk_validation_status_t_push(st, val) SKM_sk_push(validation_status_t, (st), (val)) +#define sk_validation_status_t_unshift(st, val) SKM_sk_unshift(validation_status_t, (st), (val)) +#define sk_validation_status_t_find(st, val) SKM_sk_find(validation_status_t, (st), (val)) +#define sk_validation_status_t_find_ex(st, val) SKM_sk_find_ex(validation_status_t, (st), (val)) +#define sk_validation_status_t_delete(st, i) SKM_sk_delete(validation_status_t, (st), (i)) +#define sk_validation_status_t_delete_ptr(st, ptr) SKM_sk_delete_ptr(validation_status_t, (st), (ptr)) +#define sk_validation_status_t_insert(st, val, i) SKM_sk_insert(validation_status_t, (st), (val), (i)) +#define sk_validation_status_t_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(validation_status_t, (st), (cmp)) +#define sk_validation_status_t_dup(st) SKM_sk_dup(validation_status_t, st) +#define sk_validation_status_t_pop_free(st, free_func) SKM_sk_pop_free(validation_status_t, (st), (free_func)) +#define sk_validation_status_t_shift(st) SKM_sk_shift(validation_status_t, (st)) +#define sk_validation_status_t_pop(st) SKM_sk_pop(validation_status_t, (st)) +#define sk_validation_status_t_sort(st) SKM_sk_sort(validation_status_t, (st)) +#define sk_validation_status_t_is_sorted(st) SKM_sk_is_sorted(validation_status_t, (st)) + +/* + * Safestack macros for walk_ctx_t. + */ +#define sk_walk_ctx_t_new(st) SKM_sk_new(walk_ctx_t, (st)) +#define sk_walk_ctx_t_new_null() SKM_sk_new_null(walk_ctx_t) +#define sk_walk_ctx_t_free(st) SKM_sk_free(walk_ctx_t, (st)) +#define sk_walk_ctx_t_num(st) SKM_sk_num(walk_ctx_t, (st)) +#define sk_walk_ctx_t_value(st, i) SKM_sk_value(walk_ctx_t, (st), (i)) +#define sk_walk_ctx_t_set(st, i, val) SKM_sk_set(walk_ctx_t, (st), (i), (val)) +#define sk_walk_ctx_t_zero(st) SKM_sk_zero(walk_ctx_t, (st)) +#define sk_walk_ctx_t_push(st, val) SKM_sk_push(walk_ctx_t, (st), (val)) +#define sk_walk_ctx_t_unshift(st, val) SKM_sk_unshift(walk_ctx_t, (st), (val)) +#define sk_walk_ctx_t_find(st, val) SKM_sk_find(walk_ctx_t, (st), (val)) +#define sk_walk_ctx_t_find_ex(st, val) SKM_sk_find_ex(walk_ctx_t, (st), (val)) +#define sk_walk_ctx_t_delete(st, i) SKM_sk_delete(walk_ctx_t, (st), (i)) +#define sk_walk_ctx_t_delete_ptr(st, ptr) SKM_sk_delete_ptr(walk_ctx_t, (st), (ptr)) +#define sk_walk_ctx_t_insert(st, val, i) SKM_sk_insert(walk_ctx_t, (st), (val), (i)) +#define sk_walk_ctx_t_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(walk_ctx_t, (st), (cmp)) +#define sk_walk_ctx_t_dup(st) SKM_sk_dup(walk_ctx_t, st) +#define sk_walk_ctx_t_pop_free(st, free_func) SKM_sk_pop_free(walk_ctx_t, (st), (free_func)) +#define sk_walk_ctx_t_shift(st) SKM_sk_shift(walk_ctx_t, (st)) +#define sk_walk_ctx_t_pop(st) SKM_sk_pop(walk_ctx_t, (st)) +#define sk_walk_ctx_t_sort(st) SKM_sk_sort(walk_ctx_t, (st)) +#define sk_walk_ctx_t_is_sorted(st) SKM_sk_is_sorted(walk_ctx_t, (st)) + +/* + * Safestack macros for rsync_ctx_t. + */ +#define sk_rsync_ctx_t_new(st) SKM_sk_new(rsync_ctx_t, (st)) +#define sk_rsync_ctx_t_new_null() SKM_sk_new_null(rsync_ctx_t) +#define sk_rsync_ctx_t_free(st) SKM_sk_free(rsync_ctx_t, (st)) +#define sk_rsync_ctx_t_num(st) SKM_sk_num(rsync_ctx_t, (st)) +#define sk_rsync_ctx_t_value(st, i) SKM_sk_value(rsync_ctx_t, (st), (i)) +#define sk_rsync_ctx_t_set(st, i, val) SKM_sk_set(rsync_ctx_t, (st), (i), (val)) +#define sk_rsync_ctx_t_zero(st) SKM_sk_zero(rsync_ctx_t, (st)) +#define sk_rsync_ctx_t_push(st, val) SKM_sk_push(rsync_ctx_t, (st), (val)) +#define sk_rsync_ctx_t_unshift(st, val) SKM_sk_unshift(rsync_ctx_t, (st), (val)) +#define sk_rsync_ctx_t_find(st, val) SKM_sk_find(rsync_ctx_t, (st), (val)) +#define sk_rsync_ctx_t_find_ex(st, val) SKM_sk_find_ex(rsync_ctx_t, (st), (val)) +#define sk_rsync_ctx_t_delete(st, i) SKM_sk_delete(rsync_ctx_t, (st), (i)) +#define sk_rsync_ctx_t_delete_ptr(st, ptr) SKM_sk_delete_ptr(rsync_ctx_t, (st), (ptr)) +#define sk_rsync_ctx_t_insert(st, val, i) SKM_sk_insert(rsync_ctx_t, (st), (val), (i)) +#define sk_rsync_ctx_t_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(rsync_ctx_t, (st), (cmp)) +#define sk_rsync_ctx_t_dup(st) SKM_sk_dup(rsync_ctx_t, st) +#define sk_rsync_ctx_t_pop_free(st, free_func) SKM_sk_pop_free(rsync_ctx_t, (st), (free_func)) +#define sk_rsync_ctx_t_shift(st) SKM_sk_shift(rsync_ctx_t, (st)) +#define sk_rsync_ctx_t_pop(st) SKM_sk_pop(rsync_ctx_t, (st)) +#define sk_rsync_ctx_t_sort(st) SKM_sk_sort(rsync_ctx_t, (st)) +#define sk_rsync_ctx_t_is_sorted(st) SKM_sk_is_sorted(rsync_ctx_t, (st)) + +/* + * Safestack macros for rsync_history_t. + */ +#define sk_rsync_history_t_new(st) SKM_sk_new(rsync_history_t, (st)) +#define sk_rsync_history_t_new_null() SKM_sk_new_null(rsync_history_t) +#define sk_rsync_history_t_free(st) SKM_sk_free(rsync_history_t, (st)) +#define sk_rsync_history_t_num(st) SKM_sk_num(rsync_history_t, (st)) +#define sk_rsync_history_t_value(st, i) SKM_sk_value(rsync_history_t, (st), (i)) +#define sk_rsync_history_t_set(st, i, val) SKM_sk_set(rsync_history_t, (st), (i), (val)) +#define sk_rsync_history_t_zero(st) SKM_sk_zero(rsync_history_t, (st)) +#define sk_rsync_history_t_push(st, val) SKM_sk_push(rsync_history_t, (st), (val)) +#define sk_rsync_history_t_unshift(st, val) SKM_sk_unshift(rsync_history_t, (st), (val)) +#define sk_rsync_history_t_find(st, val) SKM_sk_find(rsync_history_t, (st), (val)) +#define sk_rsync_history_t_find_ex(st, val) SKM_sk_find_ex(rsync_history_t, (st), (val)) +#define sk_rsync_history_t_delete(st, i) SKM_sk_delete(rsync_history_t, (st), (i)) +#define sk_rsync_history_t_delete_ptr(st, ptr) SKM_sk_delete_ptr(rsync_history_t, (st), (ptr)) +#define sk_rsync_history_t_insert(st, val, i) SKM_sk_insert(rsync_history_t, (st), (val), (i)) +#define sk_rsync_history_t_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(rsync_history_t, (st), (cmp)) +#define sk_rsync_history_t_dup(st) SKM_sk_dup(rsync_history_t, st) +#define sk_rsync_history_t_pop_free(st, free_func) SKM_sk_pop_free(rsync_history_t, (st), (free_func)) +#define sk_rsync_history_t_shift(st) SKM_sk_shift(rsync_history_t, (st)) +#define sk_rsync_history_t_pop(st) SKM_sk_pop(rsync_history_t, (st)) +#define sk_rsync_history_t_sort(st) SKM_sk_sort(rsync_history_t, (st)) +#define sk_rsync_history_t_is_sorted(st) SKM_sk_is_sorted(rsync_history_t, (st)) + +/* + * Safestack macros for task_t. + */ +#define sk_task_t_new(st) SKM_sk_new(task_t, (st)) +#define sk_task_t_new_null() SKM_sk_new_null(task_t) +#define sk_task_t_free(st) SKM_sk_free(task_t, (st)) +#define sk_task_t_num(st) SKM_sk_num(task_t, (st)) +#define sk_task_t_value(st, i) SKM_sk_value(task_t, (st), (i)) +#define sk_task_t_set(st, i, val) SKM_sk_set(task_t, (st), (i), (val)) +#define sk_task_t_zero(st) SKM_sk_zero(task_t, (st)) +#define sk_task_t_push(st, val) SKM_sk_push(task_t, (st), (val)) +#define sk_task_t_unshift(st, val) SKM_sk_unshift(task_t, (st), (val)) +#define sk_task_t_find(st, val) SKM_sk_find(task_t, (st), (val)) +#define sk_task_t_find_ex(st, val) SKM_sk_find_ex(task_t, (st), (val)) +#define sk_task_t_delete(st, i) SKM_sk_delete(task_t, (st), (i)) +#define sk_task_t_delete_ptr(st, ptr) SKM_sk_delete_ptr(task_t, (st), (ptr)) +#define sk_task_t_insert(st, val, i) SKM_sk_insert(task_t, (st), (val), (i)) +#define sk_task_t_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(task_t, (st), (cmp)) +#define sk_task_t_dup(st) SKM_sk_dup(task_t, st) +#define sk_task_t_pop_free(st, free_func) SKM_sk_pop_free(task_t, (st), (free_func)) +#define sk_task_t_shift(st) SKM_sk_shift(task_t, (st)) +#define sk_task_t_pop(st) SKM_sk_pop(task_t, (st)) +#define sk_task_t_sort(st) SKM_sk_sort(task_t, (st)) +#define sk_task_t_is_sorted(st) SKM_sk_is_sorted(task_t, (st)) + +#endif /* __RCYNIC_C__DEFSTACK_H__ */ diff --git a/rp/rcynic/make-tal.sh b/rp/rcynic/make-tal.sh new file mode 100755 index 00000000..854a76b9 --- /dev/null +++ b/rp/rcynic/make-tal.sh @@ -0,0 +1,42 @@ +#!/bin/sh - +# $Id$ +# +# Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC") +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +# PERFORMANCE OF THIS SOFTWARE. + +# Generate a trust anchor locator (TAL) given the rsync URI for a +# self-signed RFC 3779 certificate. +# +# Usage: make-tal.sh uri [local_copy_of_certificate] +# +# The optional second parameter is the name of a local copy of the +# certificate to be checked against the copy retrieved from the URI; +# if present, this should be a local X.509 file in DER format. + +case "$1" in rsync://*) :;; *) echo 1>&2 "\"$1\" is not a rsync URI"; exit 1;; esac + +tmpfile="make-tal.tmp.$$" +trap "rm -f $tmpfile" 0 1 2 15 + +rsync "$1" "$tmpfile" || exit + +if test -n "$2" +then + diff -q "$tmpfile" "$2" || exit +fi + +echo "$1" +echo +openssl x509 -inform DER -in "$tmpfile" -pubkey -noout | +awk '!/-----(BEGIN|END)/' diff --git a/rp/rcynic/rc-scripts/darwin/RCynic b/rp/rcynic/rc-scripts/darwin/RCynic new file mode 100755 index 00000000..d486a3c3 --- /dev/null +++ b/rp/rcynic/rc-scripts/darwin/RCynic @@ -0,0 +1,42 @@ +#!/bin/sh - +# +# $Id$ +# +. /etc/rc.common + +name="rcynic" +start_cmd="rcynic_start" +stop_cmd="rcynic_stop" + +: ${rcynic_dir="/var/rcynic"} + +StartService() +{ + /sbin/umount "${rcynic_dir}/dev" 2>/dev/null + + if ! /sbin/mount_devfs devfs "${rcynic_dir}/dev"; then + echo "Mounting devfs on ${rcynic_dir}/dev failed..." + exit 1 + fi + + for i in /etc/localtime /etc/resolv.conf; do + j="${rcynic_dir}${i}" + if /bin/test -r "$i" && ! /usr/bin/cmp -s "$i" "$j"; then + /usr/bin/install -m 444 -o root -g wheel -p "$i" "$j" + fi + done + + /bin/ln -f /var/run/mDNSResponder "${rcynic_dir}/var/run/mDNSResponder" +} + +StopService() +{ + /sbin/umount "${rcynic_dir}/dev" 2>/dev/null +} + +RestartService() +{ + StartService +} + +RunService "$1" diff --git a/rp/rcynic/rc-scripts/darwin/StartupParameters.plist b/rp/rcynic/rc-scripts/darwin/StartupParameters.plist new file mode 100644 index 00000000..ca46b676 --- /dev/null +++ b/rp/rcynic/rc-scripts/darwin/StartupParameters.plist @@ -0,0 +1,19 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> +<plist version="1.0"> +<dict> + <key>Description</key> + <string>RCynic Setup</string> + <key>OrderPreference</key> + <string>None</string> + <key>Provides</key> + <array> + <string>RCynic</string> + </array> + <key>Uses</key> + <array> + <string>Network</string> + <string>Resolver</string> + </array> + </dict> +</plist> diff --git a/rp/rcynic/rc-scripts/freebsd/rc.d.rcynic b/rp/rcynic/rc-scripts/freebsd/rc.d.rcynic new file mode 100755 index 00000000..9b7aa545 --- /dev/null +++ b/rp/rcynic/rc-scripts/freebsd/rc.d.rcynic @@ -0,0 +1,44 @@ +#!/bin/sh - +# +# $Id$ +# +# PROVIDE: rcynic +# REQUIRE: DAEMON +# KEYWORD: nojail + +. /etc/rc.subr + +name="rcynic" +start_cmd="rcynic_start" +stop_cmd="rcynic_stop" + +: ${rcynic_dir="/var/rcynic"} + +rcynic_start() +{ + /sbin/umount "${rcynic_dir}/dev" 2>/dev/null + + if ! /sbin/mount -t devfs dev "${rcynic_dir}/dev"; then + echo "Mounting devfs on ${rcynic_dir}/dev failed..." + exit 1 + fi + + /sbin/devfs -m "${rcynic_dir}/dev" rule apply hide + /sbin/devfs -m "${rcynic_dir}/dev" rule apply path null unhide + /sbin/devfs -m "${rcynic_dir}/dev" rule apply path random unhide + + for i in /etc/localtime /etc/resolv.conf; do + j="${rcynic_dir}${i}" + if /bin/test -r "$i" && ! /usr/bin/cmp -s "$i" "$j"; then + /usr/bin/install -m 444 -o root -g wheel -p "$i" "$j" + fi + done +} + +rcynic_stop() +{ + /sbin/umount "${rcynic_dir}/dev" 2>/dev/null +} + +load_rc_config $name +run_rc_command "$1" diff --git a/rp/rcynic/rcynic-cron b/rp/rcynic/rcynic-cron new file mode 100755 index 00000000..fbe1ebeb --- /dev/null +++ b/rp/rcynic/rcynic-cron @@ -0,0 +1,106 @@ +# $Id$ +# +# Copyright (C) 2014 Dragon Research Labs ("DRL") +# Portions copyright (C) 2013 Internet Systems Consortium ("ISC") +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notices and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ISC DISCLAIM ALL +# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR +# ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL +# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA +# OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +# PERFORMANCE OF THIS SOFTWARE. + +""" +Cron job for rcynic and rtr-origin in stock configuration. + +Locking code here works like FreeBSD's lockf(1) utility given -k and +-t 0 options, which is both the sanest and simplest combination for +our purposes. In theory this is portable to any Unix-like system. +""" + +import os +import sys +import pwd +import fcntl +import errno +import argparse + +def run(*cmd, **kwargs): + chroot_this = kwargs.pop("chroot_this", False) + cwd = kwargs.pop("cwd", None) + pid = os.fork() + if pid == 0: + if chroot_this: + os.chdir(ac_rcynic_dir) + elif cwd is not None: + os.chdir(cwd) + if we_are_root: + os.initgroups(pw.pw_name, pw.pw_gid) + if chroot_this: + os.chroot(ac_rcynic_dir) + if we_are_root: + os.setgid(pw.pw_gid) + os.setuid(pw.pw_uid) + os.closerange(3, os.sysconf("SC_OPEN_MAX")) + os.execvp(cmd[0], cmd) + os._exit(1) + else: + status = os.waitpid(pid, 0)[1] + if status == 0: + return + elif os.WIFSIGNALED(status): + sys.exit("Process %s exited with signal %s" % (" ".join(cmd), os.WTERMSIG(status))) + elif os.WIFEXITED(status): + sys.exit("Program %s exited with status %s" % (" ".join(cmd), os.WEXITSTATUS(status))) + else: + sys.exit("Program %s exited for unknown reason %s" % (" ".join(cmd), status)) + +parser = argparse.ArgumentParser(description = __doc__) +parser.add_argument("--chroot", action = "store_true", help = "run chrooted") +args = parser.parse_args() + +we_are_root = os.getuid() == 0 + +if args.chroot and not we_are_root: + sys.exit("Only root can --chroot") + +try: + pw = pwd.getpwnam(ac_rcynic_user) +except KeyError: + sys.exit("Could not find passwd entry for user %s" % ac_rcynic_user) + +try: + lock = os.open(os.path.join(ac_rcynic_dir, "data/lock"), os.O_RDONLY | os.O_CREAT | os.O_NONBLOCK, 0666) + fcntl.flock(lock, fcntl.LOCK_EX | fcntl.LOCK_NB) + if we_are_root: + os.fchown(lock, pw.pw_uid, pw.pw_gid) +except (IOError, OSError), e: + if e.errno == errno.EAGAIN: + sys.exit(0) # Another instance of this script is already running, exit silently + else: + sys.exit("Error %r opening lock %r" % (e.strerror, os.path.join(ac_rcynic_dir, "data/lock"))) + +if args.chroot: + run("/bin/rcynic", "-c", "/etc/rcynic.conf", chroot_this = True) +else: + run(os.path.join(ac_bindir, "rcynic"), "-c", os.path.join(ac_sysconfdir, "rcynic.conf")) + +run(os.path.join(ac_bindir, "rtr-origin"), + "--cronjob", + os.path.join(ac_rcynic_dir, "data/authenticated"), + cwd = os.path.join(ac_rcynic_dir, "rpki-rtr")) + +prog = os.path.join(ac_libexecdir, "rpkigui-rcynic") +if os.path.exists(prog): + run(prog) + +if ac_rcynic_html_dir and os.path.exists(os.path.dirname(ac_rcynic_html_dir)): + run(os.path.join(ac_bindir, "rcynic-html"), + os.path.join(ac_rcynic_dir, "data/rcynic.xml"), + ac_rcynic_html_dir) diff --git a/rp/rcynic/rcynic-html b/rp/rcynic/rcynic-html new file mode 100755 index 00000000..58e65dde --- /dev/null +++ b/rp/rcynic/rcynic-html @@ -0,0 +1,658 @@ +# $Id$ +# +# Copyright (C) 2013--2014 Dragon Research Labs ("DRL") +# Portions copyright (C) 2009--2012 Internet Systems Consortium ("ISC") +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notices and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ISC DISCLAIM ALL +# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR +# ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL +# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA +# OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +# PERFORMANCE OF THIS SOFTWARE. + +""" +Render rcynic's XML output to basic (X)HTML with some rrdtool graphics. +""" + +import sys +import urlparse +import os +import argparse +import time +import subprocess +import copy + +try: + from lxml.etree import (ElementTree, Element, SubElement, Comment) +except ImportError: + from xml.etree.ElementTree import (ElementTree, Element, SubElement, Comment) + +session = None +args = None + +def parse_options(): + + global args + + try: + default_rrdtool_binary = ac_rrdtool_binary + except NameError: + default_rrdtool_binary = "rrdtool" + + parser = argparse.ArgumentParser(description = __doc__) + parser.add_argument("--refresh", type = int, default = 1800, + help = "refresh interval for generated HTML") + parser.add_argument("--hide-problems", action = "store_true", + help = "don't generate \"problems\" page") + parser.add_argument("--hide-graphs", action = "store_true", + help = "don't generate graphs") + parser.add_argument("--hide-object-counts", action = "store_true", + help = "don't display object counts") + parser.add_argument("--dont-update-rrds", action = "store_true", + help = "don't add new data to RRD databases") + parser.add_argument("--png-height", type = int, default = 190, + help = "height of PNG images") + parser.add_argument("--png-width", type = int, default = 1350, + help = "width of PNG images") + parser.add_argument("--svg-height", type = int, default = 600, + help = "height of SVG images") + parser.add_argument("--svg-width", type = int, default = 1200, + help = "width of SVG images") + parser.add_argument("--eps-height", type = int, default = 0, + help = "height of EPS images") + parser.add_argument("--eps-width", type = int, default = 0, + help = "width of EPS images") + parser.add_argument("--rrdtool-binary", default = default_rrdtool_binary, + help = "location of rrdtool binary") + parser.add_argument("input_file", type = argparse.FileType("r"), + help = "XML input file") + parser.add_argument("output_directory", + help = "output directory") + args = parser.parse_args() + + +def parse_utc(s): + return int(time.mktime(time.strptime(s, "%Y-%m-%dT%H:%M:%SZ"))) + +class Label(object): + + moods = ["bad", "warn", "good"] + + def __init__(self, elt): + self.code = elt.tag + self.mood = elt.get("kind") + self.text = elt.text.strip() + self.count = 0 + + def get_count(self): + return self.count + + @property + def sort_key(self): + try: + return self.moods.index(self.mood) + except ValueError: + return len(self.moods) + +class Validation_Status(object): + + def __init__(self, elt, label_map): + self.uri = elt.text.strip() + self.timestamp = elt.get("timestamp") + self.generation = elt.get("generation") + self.hostname = urlparse.urlparse(self.uri).hostname or "[None]" + self.fn2 = os.path.splitext(self.uri)[1] or None if self.generation else None + self.label = label_map[elt.get("status")] + + def sort_key(self): + return (self.label.sort_key, self.timestamp, self.hostname, self.fn2, self.generation) + + @property + def code(self): + return self.label.code + + @property + def mood(self): + return self.label.mood + + @property + def accepted(self): + return self.label.code == "object_accepted" + + @property + def rejected(self): + return self.label.code == "object_rejected" + + @property + def is_current(self): + return self.generation == "current" + + @property + def is_backup(self): + return self.generation == "backup" + + @property + def is_problem(self): + return self.label.mood != "good" + + @property + def is_connection_problem(self): + return self.label.mood != "good" and self.label.code.startswith("rsync_transfer_") + + @property + def is_object_problem(self): + return self.label.mood != "good" and not self.label.code.startswith("rsync_transfer_") + + @property + def is_connection_detail(self): + return self.label.code.startswith("rsync_transfer_") + + @property + def is_object_detail(self): + return not self.label.code.startswith("rsync_transfer_") + +class Problem_Mixin(object): + + @property + def connection_problems(self): + result = [v for v in self.validation_status if v.is_connection_problem] + result.sort(key = Validation_Status.sort_key) + return result + + @property + def object_problems(self): + result = [v for v in self.validation_status if v.is_object_problem] + result.sort(key = Validation_Status.sort_key) + return result + +class Host(Problem_Mixin): + + def __init__(self, hostname, timestamp): + self.hostname = hostname + self.timestamp = timestamp + self.elapsed = 0 + self.connections = 0 + self.failures = 0 + self.uris = set() + self.graph = None + self.counters = {} + self.totals = {} + self.validation_status = [] + + def add_connection(self, elt): + self.elapsed += parse_utc(elt.get("finished")) - parse_utc(elt.get("started")) + self.connections += 1 + if elt.get("error") is not None: + self.failures += 1 + + def add_validation_status(self, v): + self.validation_status.append(v) + if v.generation == "current": + self.uris.add(v.uri) + self.counters[(v.fn2, v.generation, v.label)] = self.get_counter(v.fn2, v.generation, v.label) + 1 + self.totals[v.label] = self.get_total(v.label) + 1 + v.label.count += 1 + + def get_counter(self, fn2, generation, label): + return self.counters.get((fn2, generation, label), 0) + + def get_total(self, label): + return self.totals.get(label, 0) + + @property + def failed(self): + return 1 if self.failures > 0 else 0 + + @property + def objects(self): + return len(self.uris) + + field_table = (("connections", "GAUGE"), + ("objects", "GAUGE"), + ("elapsed", "GAUGE"), + ("failed", "ABSOLUTE")) + + rras = tuple("RRA:AVERAGE:0.5:%s:9600" % steps + for steps in (1, 4, 24)) + + @classmethod + def field_ds_specifiers(cls, heartbeat = 24 * 60 * 60, minimum = 0, maximum = "U"): + return ["DS:%s:%s:%s:%s:%s" % (field[0], field[1], heartbeat, minimum, maximum) + for field in cls.field_table] + + @property + def field_values(self): + return tuple(str(getattr(self, field[0])) for field in self.field_table) + + @classmethod + def field_defs(cls, filebase): + return ["DEF:%s=%s.rrd:%s:AVERAGE" % (field[0], filebase, field[0]) + for field in cls.field_table] + + graph_opts = ( + "--vertical-label", "Sync time (seconds)", + "--right-axis-label", "Objects (count)", + "--lower-limit", "0", + "--right-axis", "1:0", + "--full-size-mode" ) + + graph_cmds = ( + + # Split elapsed into separate data sets, so we can color + # differently to indicate how succesful transfer was. Intent is + # that exactly one of these be defined for every value in elapsed. + + "CDEF:success=failed,UNKN,elapsed,IF", + "CDEF:failure=connections,1,EQ,failed,*,elapsed,UNKN,IF", + "CDEF:partial=connections,1,NE,failed,*,elapsed,UNKN,IF", + + # Show connection timing first, as color-coded semi-transparent + # areas with opaque borders. Intent is to make the colors stand + # out, since they're a major health indicator. Transparency is + # handled via an alpha channel (fourth octet of color code). We + # draw this stuff first so that later lines can overwrite it. + + "AREA:success#00FF0080:Sync time (success)", + "AREA:partial#FFA50080:Sync time (partial failure)", + "AREA:failure#FF000080:Sync time (total failure)", + + "LINE1:success#00FF00", # Green + "LINE1:partial#FFA500", # Orange + "LINE1:failure#FF0000", # Red + + # Now show object counts, as a simple black line. + + "LINE1:objects#000000:Objects", # Black + + # Add averages over period to chart legend. + + "VDEF:avg_elapsed=elapsed,AVERAGE", + "VDEF:avg_connections=connections,AVERAGE", + "VDEF:avg_objects=objects,AVERAGE", + "COMMENT:\j", + "GPRINT:avg_elapsed:Average sync time (seconds)\: %5.2lf", + "GPRINT:avg_connections:Average connection count\: %5.2lf", + "GPRINT:avg_objects:Average object count\: %5.2lf" ) + + graph_periods = (("week", "-1w"), + ("month", "-31d"), + ("year", "-1y")) + + def rrd_run(self, cmd): + try: + cmd = [str(i) for i in cmd] + cmd.insert(0, args.rrdtool_binary) + subprocess.check_call(cmd, stdout = open("/dev/null", "w")) + except OSError, e: + sys.exit("Problem running %s, perhaps you need to set --rrdtool-binary? (%s)" % (args.rrdtool_binary, e)) + except subprocess.CalledProcessError, e: + sys.exit("Failure running %s: %s" % (args.rrdtool_binary, e)) + + def rrd_update(self): + filename = os.path.join(args.output_directory, self.hostname) + ".rrd" + if not os.path.exists(filename): + cmd = ["create", filename, "--start", self.timestamp - 1, "--step", "3600"] + cmd.extend(self.field_ds_specifiers()) + cmd.extend(self.rras) + self.rrd_run(cmd) + self.rrd_run(["update", filename, + "%s:%s" % (self.timestamp, ":".join(str(v) for v in self.field_values))]) + + def rrd_graph(self, html): + filebase = os.path.join(args.output_directory, self.hostname) + formats = [format for format in ("png", "svg", "eps") + if getattr(args, format + "_width") and getattr(args, format + "_height")] + for period, start in self.graph_periods: + for format in formats: + cmds = [ "graph", "%s_%s.%s" % (filebase, period, format), + "--title", "%s last %s" % (self.hostname, period), + "--start", start, + "--width", getattr(args, format + "_width"), + "--height", getattr(args, format + "_height"), + "--imgformat", format.upper() ] + cmds.extend(self.graph_opts) + cmds.extend(self.field_defs(filebase)) + cmds.extend(self.graph_cmds) + self.rrd_run(cmds) + img = Element("img", src = "%s_%s.png" % (self.hostname, period), + width = str(args.png_width), + height = str(args.png_height)) + if self.graph is None: + self.graph = copy.copy(img) + html.BodyElement("h2").text = "%s over last %s" % (self.hostname, period) + html.BodyElement("a", href = "%s_%s_svg.html" % (self.hostname, period)).append(img) + html.BodyElement("br") + svg_html = HTML("%s over last %s" % (self.hostname, period), + "%s_%s_svg" % (self.hostname, period)) + svg_html.BodyElement("img", src = "%s_%s.svg" % (self.hostname, period)) + svg_html.close() + + +class Session(Problem_Mixin): + + def __init__(self): + self.hosts = {} + + self.root = ElementTree(file = args.input_file).getroot() + + self.rcynic_version = self.root.get("rcynic-version") + self.rcynic_date = self.root.get("date") + self.timestamp = parse_utc(self.rcynic_date) + + self.labels = [Label(elt) for elt in self.root.find("labels")] + self.load_validation_status() + + for elt in self.root.findall("rsync_history"): + self.get_host(urlparse.urlparse(elt.text.strip()).hostname).add_connection(elt) + + generations = set() + fn2s = set() + + for v in self.validation_status: + self.get_host(v.hostname).add_validation_status(v) + generations.add(v.generation) + fn2s.add(v.fn2) + + self.labels = [l for l in self.labels if l.count > 0] + + self.hostnames = sorted(self.hosts) + self.generations = sorted(generations) + self.fn2s = sorted(fn2s) + + def load_validation_status(self): + label_map = dict((label.code, label) for label in self.labels) + full_validation_status = [Validation_Status(elt, label_map) + for elt in self.root.findall("validation_status")] + accepted_current = set(v.uri for v in full_validation_status + if v.is_current and v.accepted) + self.validation_status = [v for v in full_validation_status + if not v.is_backup + or v.uri not in accepted_current] + + def get_host(self, hostname): + if hostname not in self.hosts: + self.hosts[hostname] = Host(hostname, self.timestamp) + return self.hosts[hostname] + + def get_sum(self, fn2, generation, label): + return sum(h.get_counter(fn2, generation, label) + for h in self.hosts.itervalues()) + + def rrd_update(self): + if not args.dont_update_rrds: + for h in self.hosts.itervalues(): + h.rrd_update() + +css = ''' + th, td { + text-align: center; padding: 4px; + } + + td.uri { + text-align: left; + } + + thead tr th, tfoot tr td { + font-weight: bold; + } + + .good { + background-color: #77ff77; + } + + .warn { + background-color: yellow; + } + + .bad { + background-color: #ff5500; + } + + body { + font-family: arial, helvetica, serif; + } + + /* Make background-color inherit like color does. */ + #nav { + background-color: inherit; + } + + #nav, #nav ul { + float: left; + width: 100%; + list-style: none; + line-height: 1; + font-weight: normal; + padding: 0; + border-color: black; + border-style: solid; + border-width: 1px 0; + margin: 0 0 1em 0; + } + + #nav a, #nav span { + display: block; + background-color: white; + color: black; + text-decoration: none; + padding: 0.25em 0.75em; + } + + #nav li { + float: left; + padding: 0; + } + + /* Use <ul style="width: ..."> to set submenu width. */ + #nav li ul { + position: absolute; + display: none; + height: auto; + border-width: 1px; + margin: 0; + } + + #nav li li { + width: 100%; + } + + /* Display submenu when hovering. */ + #nav li:hover ul { + display: block; + } + + /* Reverse video when hovering. */ + #nav a:hover, #nav span:hover { + color: white; + background-color: black; + } +''' + +class HTML(object): + + def __init__(self, title, filebase): + + self.filename = os.path.join(args.output_directory, filebase + ".html") + + self.html = Element("html") + self.html.append(Comment(" Generators:\n" + + " " + session.rcynic_version + "\n" + + " $Id$\n")) + self.head = SubElement(self.html, "head") + self.body = SubElement(self.html, "body") + + title += " " + session.rcynic_date + SubElement(self.head, "title").text = title + SubElement(self.body, "h1").text = title + SubElement(self.head, "style", type = "text/css").text = css + + if args.refresh: + SubElement(self.head, "meta", { "http-equiv" : "Refresh", "content" : str(args.refresh) }) + + hostwidth = max(len(hostname) for hostname in session.hostnames) + + toc = SubElement(self.body, "ul", id = "nav") + SubElement(SubElement(toc, "li"), "a", href = "index.html").text = "Overview" + li = SubElement(toc, "li") + SubElement(li, "span").text = "Repositories" + ul = SubElement(li, "ul", style = "width: %sem" % hostwidth) + for hostname in session.hostnames: + SubElement(SubElement(ul, "li"), "a", href = "%s.html" % hostname).text = hostname + SubElement(SubElement(toc, "li"), "a", href = "problems.html").text = "Problems" + li = SubElement(toc, "li") + SubElement(li, "span").text = "All Details" + ul = SubElement(li, "ul", style = "width: 15em") + SubElement(SubElement(ul, "li"), "a", href = "connections.html").text = "All Connections" + SubElement(SubElement(ul, "li"), "a", href = "objects.html").text = "All Objects" + SubElement(self.body, "br") + + def close(self): + ElementTree(element = self.html).write(self.filename) + + def BodyElement(self, tag, **attrib): + return SubElement(self.body, tag, **attrib) + + def counter_table(self, data_func, total_func): + table = self.BodyElement("table", rules = "all", border = "1") + thead = SubElement(table, "thead") + tfoot = SubElement(table, "tfoot") + tbody = SubElement(table, "tbody") + tr = SubElement(thead, "tr") + SubElement(tr, "th") + for label in session.labels: + SubElement(tr, "th").text = label.text + for fn2 in session.fn2s: + for generation in session.generations: + counters = [data_func(fn2, generation, label) for label in session.labels] + if sum(counters) > 0: + tr = SubElement(tbody, "tr") + SubElement(tr, "td").text = ((generation or "") + " " + (fn2 or "")).strip() + for label, count in zip(session.labels, counters): + td = SubElement(tr, "td") + if count > 0: + td.set("class", label.mood) + td.text = str(count) + tr = SubElement(tfoot, "tr") + SubElement(tr, "td").text = "Total" + counters = [total_func(label) for label in session.labels] + for label, count in zip(session.labels, counters): + td = SubElement(tr, "td") + if count > 0: + td.set("class", label.mood) + td.text = str(count) + return table + + def object_count_table(self, session): + table = self.BodyElement("table", rules = "all", border = "1") + thead = SubElement(table, "thead") + tbody = SubElement(table, "tbody") + tfoot = SubElement(table, "tfoot") + fn2s = [fn2 for fn2 in session.fn2s if fn2 is not None] + total = dict((fn2, 0) for fn2 in fn2s) + for hostname in session.hostnames: + tr = SubElement(tbody, "tr") + SubElement(tr, "td").text = hostname + for fn2 in fn2s: + td = SubElement(tr, "td") + count = sum(uri.endswith(fn2) for uri in session.hosts[hostname].uris) + total[fn2] += count + if count > 0: + td.text = str(count) + trhead = SubElement(thead, "tr") + trfoot = SubElement(tfoot, "tr") + SubElement(trhead, "th").text = "Repository" + SubElement(trfoot, "td").text = "Total" + for fn2 in fn2s: + SubElement(trhead, "th").text = fn2 + SubElement(trfoot, "td").text = str(total[fn2]) + return table + + def detail_table(self, records): + if records: + table = self.BodyElement("table", rules = "all", border = "1") + thead = SubElement(table, "thead") + tbody = SubElement(table, "tbody") + tr = SubElement(thead, "tr") + SubElement(tr, "th").text = "Timestamp" + SubElement(tr, "th").text = "Generation" + SubElement(tr, "th").text = "Status" + SubElement(tr, "th").text = "URI" + for v in records: + tr = SubElement(tbody, "tr", { "class" : v.mood }) + SubElement(tr, "td").text = v.timestamp + SubElement(tr, "td").text = v.generation + SubElement(tr, "td").text = v.label.text + SubElement(tr, "td", { "class" : "uri"}).text = v.uri + return table + else: + self.BodyElement("p").text = "None found" + return None + +def main(): + + global session + + os.putenv("TZ", "UTC") + time.tzset() + + parse_options() + + session = Session() + session.rrd_update() + + for hostname in session.hostnames: + html = HTML("Repository details for %s" % hostname, hostname) + html.counter_table(session.hosts[hostname].get_counter, session.hosts[hostname].get_total) + if not args.hide_graphs: + session.hosts[hostname].rrd_graph(html) + if not args.hide_problems: + html.BodyElement("h2").text = "Connection Problems" + html.detail_table(session.hosts[hostname].connection_problems) + html.BodyElement("h2").text = "Object Problems" + html.detail_table(session.hosts[hostname].object_problems) + html.close() + + html = HTML("rcynic summary", "index") + html.BodyElement("h2").text = "Grand totals for all repositories" + html.counter_table(session.get_sum, Label.get_count) + if not args.hide_object_counts: + html.BodyElement("br") + html.BodyElement("hr") + html.BodyElement("br") + html.BodyElement("h2").text = "Current total object counts (distinct URIs)" + html.object_count_table(session) + for hostname in session.hostnames: + html.BodyElement("br") + html.BodyElement("hr") + html.BodyElement("br") + html.BodyElement("h2").text = "Overview for repository %s" % hostname + html.counter_table(session.hosts[hostname].get_counter, session.hosts[hostname].get_total) + if not args.hide_graphs: + html.BodyElement("br") + html.BodyElement("a", href = "%s.html" % hostname).append(session.hosts[hostname].graph) + html.close() + + html = HTML("Problems", "problems") + html.BodyElement("h2").text = "Connection Problems" + html.detail_table(session.connection_problems) + html.BodyElement("h2").text = "Object Problems" + html.detail_table(session.object_problems) + html.close() + + html = HTML("All connections", "connections") + html.detail_table([v for v in session.validation_status if v.is_connection_detail]) + html.close() + + html = HTML("All objects", "objects") + html.detail_table([v for v in session.validation_status if v.is_object_detail]) + html.close() + + +if __name__ == "__main__": + main() diff --git a/rp/rcynic/rcynic-svn b/rp/rcynic/rcynic-svn new file mode 100755 index 00000000..fd0df500 --- /dev/null +++ b/rp/rcynic/rcynic-svn @@ -0,0 +1,190 @@ +# $Id$ +# +# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +# PERFORMANCE OF THIS SOFTWARE. + +""" +Archive rcynic output in a Subversion repository. +""" + +import subprocess +import argparse +import datetime +import fcntl +import glob +import os + +try: + from lxml.etree import ElementTree +except ImportError: + from xml.etree.ElementTree import ElementTree + + +mime_types = ( + ("html", "application/xhtml+xml"), + ("cer", "application/pkix-cert"), + ("crl", "application/pkix-crl"), + ("mft", "application/rpki-manifest"), + ("mnf", "application/rpki-manifest"), + ("roa", "application/rpki-roa"), + ("gbr", "application/rpki-ghostbusters")) + + +def run(*cmd, **kwargs): + """ + Run a program, displaying timing data when appropriate. + """ + + t = datetime.datetime.utcnow() + subprocess.check_call(cmd, **kwargs) + if args.show_timing: + now = datetime.datetime.utcnow() + print now, (now - t), " ".join(cmd) + + +def runxml(*cmd): + """ + + Run a program which produces XML output, displaying timing data when + appropriate and returning an ElementTree constructed from the + program's output. + """ + t = datetime.datetime.utcnow() + p = subprocess.Popen(cmd, stdout = subprocess.PIPE) + x = ElementTree(file = p.stdout) + s = p.wait() + if s: + raise subprocess.CalledProcessError(s, cmd[0]) + if args.show_timing: + now = datetime.datetime.utcnow() + print now, (now - t), " ".join(cmd) + return x + + +# Main program. + +parser = argparse.ArgumentParser(description = __doc__) + +parser.add_argument("--show_timing", action = "store_true", help = \ + """ + Show timing data on programs we run. + """) + +parser.add_argument("--verbatim", action = "store_true", help = \ + """ + Whether to archive rcynic's data output exactly as + rcynic writes it or map it into a directory + structure which makes more sense when used with + Subversion. True means archive exactly as rcynic + writes it, interpreting file and directory names + as rsync would, transient directories and all. + False means map the current authenticated/ tree in + rcynic's output to a stable authenticated/ subtree + in the subversion repository, with file and + directory names from the command line shorted to + their last component. + """) + +parser.add_argument("--lockfile", default = "rcynic-svn.lock", help = \ + """ + Lock file to to prevent multiple copies of this + program (eg, running under cron) from stepping on + each other while modifying the working directory. + """) + +parser.add_argument("files_to_archive", nargs = "*", help = \ + """ + Files to archive using Subversion. If omitted, we + assume that some other process has already + modified the Subversion working directory. + """) + +parser.add_argument("working_directory", help = \ + """ + Subversion working directory to use (must already + exist). + """) + +args = parser.parse_args() + +if args.show_timing: + t0 = datetime.datetime.utcnow() + print t0, "Starting" + +# Lock out other instances of this program. We may want some more +# sophsiticated approach when combining this with other programs, but +# this should minimize the risk of multiple copies of this program +# trying to modify the same subversion working directory at the same +# time and messing each other up. We leave the lock file in place +# because doing so removes a potential race condition. + +lock = os.open("cronjob.lock", os.O_RDONLY | os.O_CREAT | os.O_NONBLOCK, 0666) +fcntl.flock(lock, fcntl.LOCK_EX | fcntl.LOCK_NB) + +# Make sure working tree is up to date. + +run("svn", "update", "--quiet", args.working_directory) + +# Copy rcynic's output as appropriate. + +if args.files_to_archive: + + if args.verbatim: + cmd = ["rsync", "--archive", "--quiet", "--delete"] + cmd.extend(args.files_to_archive) + cmd.append(args.working_directory) + run(*cmd) + + else: + for src in args.files_to_archive: + cmd = ["rsync", "--archive", "--quiet", "--delete", "--copy-links"] + cmd.append(src.rstrip("/")) + cmd.append(args.working_directory.rstrip("/") + "/") + run(*cmd) + +# Ask Subversion to add any new files, trying hard to get the MIME +# types right. + +cmd = ["svn", "add", "--quiet", "--force", "--auto-props"] + +for fn2, mime_type in mime_types: + cmd.append("--config-option") + cmd.append("config:auto-props:*.%s=svn:mime-type=%s" % (fn2, mime_type)) + +cmd.append(".") + +run(*cmd, cwd = args.working_directory) + +# Parse XML version of Subversion's status output to figure out what +# files have been deleted, and tell Subversion that we deleted them +# intentionally. + +missing = sorted(entry.get("path") + for entry in runxml("svn", "status", "--xml", args.working_directory).find("target").findall("entry") + if entry.find("wc-status").get("item") == "missing") +deleted = [] + +for path in missing: + if not any(path.startswith(r) for r in deleted): + run("svn", "delete", "--quiet", path) + deleted.append(path + "/") + +# Commit our changes and update the working tree. + +run("svn", "commit", "--quiet", "--message", "Auto update.", args.working_directory) +run("svn", "update", "--quiet", args.working_directory) + +if args.show_timing: + now = datetime.datetime.utcnow() + print now, now - t0, "total runtime" diff --git a/rp/rcynic/rcynic-text b/rp/rcynic/rcynic-text new file mode 100755 index 00000000..a8e56dac --- /dev/null +++ b/rp/rcynic/rcynic-text @@ -0,0 +1,118 @@ +# $Id$ +# +# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +# PERFORMANCE OF THIS SOFTWARE. + +""" +Flat text summary of rcynic.xml. +""" + +import sys +import urlparse +import textwrap + +try: + from lxml.etree import ElementTree +except ImportError: + from xml.etree.ElementTree import ElementTree + +class Label(object): + + def __init__(self, elt): + self.tag = elt.tag + self.width = max(len(s) for s in elt.text.split()) + self.lines = textwrap.wrap(elt.text.strip(), width = self.width) + self.counter = 0 + + def line(self, n): + try: + return " " + self.lines[n].center(self.width) + " " + except IndexError: + return " " * (self.width + 2) + + def add(self): + self.counter += 1 + + @property + def total(self): + return " " + str(self.counter).rjust(self.width) + " " + + @property + def visible(self): + return self.counter > 0 + +class Host(object): + + def __init__(self): + self.counters = {} + + def add(self, label): + self.counters[label] = self.counters.get(label, 0) + 1 + label.add() + + def total(self, label): + if label in self.counters: + return " " + str(self.counters[label]).rjust(label.width) + " " + else: + return " " * (label.width + 2) + +class Session(object): + + def __init__(self, labels): + self.hosts = {} + self.labels = labels + self.map = dict((label.tag, label) for label in labels) + + def add(self, elt): + label = self.map[elt.get("status")] + hostname = urlparse.urlparse(elt.text.strip()).hostname + if hostname not in self.hosts: + self.hosts[hostname] = Host() + self.hosts[hostname].add(label) + + def show(self): + visible = [label for label in self.labels if label.visible] + hostnames = sorted(hostname for hostname in self.hosts if hostname is not None) + hostwidth = max(len(hostname) for hostname in hostnames + ["Hostname"]) + separator = "+-%s-+-%s-+" % ( + "-" * hostwidth, + "-+-".join("-" * label.width for label in visible)) + print separator + for i in xrange(max(len(label.lines) for label in visible)): + print "| %s |%s|" % ( + ("Hostname" if i == 0 else "").ljust(hostwidth), + "|".join(label.line(i) for label in visible)) + print separator + for hostname in hostnames: + print "| %s |%s|" % ( + hostname.ljust(hostwidth), + "|".join(self.hosts[hostname].total(label) for label in visible)) + if hostnames: + print separator + print "| %s |%s|" % ( + "Total".ljust(hostwidth), + "|".join(label.total for label in visible)) + print separator + + +def main(): + for filename in ([sys.stdin] if len(sys.argv) < 2 else sys.argv[1:]): + etree = ElementTree(file = filename) + session = Session([Label(elt) for elt in etree.find("labels")]) + for elt in etree.findall("validation_status"): + session.add(elt) + session.show() + +if __name__ == "__main__": + main() diff --git a/rp/rcynic/rcynic.c b/rp/rcynic/rcynic.c new file mode 100644 index 00000000..dea9c48f --- /dev/null +++ b/rp/rcynic/rcynic.c @@ -0,0 +1,6070 @@ +/* + * Copyright (C) 2013--2014 Dragon Research Labs ("DRL") + * Portions copyright (C) 2009--2012 Internet Systems Consortium ("ISC") + * Portions copyright (C) 2006--2008 American Registry for Internet Numbers ("ARIN") + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notices and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND DRL, ISC, AND ARIN DISCLAIM ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL, + * ISC, OR ARIN BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS + * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* $Id$ */ + +/** + * @mainpage + * + * "Cynical rsync": Recursively walk RPKI tree using rsync to pull + * data from remote sites, validating certificates and CRLs as we go. + * + * Doxygen doesn't quite know what to make of a one-file C program, + * and ends up putting most of the interesting data @link rcynic.c + * here. @endlink + */ + +#include <assert.h> +#include <stdio.h> +#include <stdlib.h> +#include <stdarg.h> +#include <unistd.h> +#include <sys/types.h> +#include <sys/time.h> +#include <sys/file.h> +#include <errno.h> +#include <sys/signal.h> +#include <sys/wait.h> +#include <time.h> +#include <string.h> +#include <sys/stat.h> +#include <dirent.h> +#include <limits.h> +#include <fcntl.h> +#include <signal.h> +#include <utime.h> +#include <glob.h> +#include <sys/param.h> +#include <getopt.h> + +#define SYSLOG_NAMES /* defines CODE prioritynames[], facilitynames[] */ +#include <syslog.h> + +#include <openssl/bio.h> +#include <openssl/pem.h> +#include <openssl/err.h> +#include <openssl/x509.h> +#include <openssl/x509v3.h> +#include <openssl/safestack.h> +#include <openssl/conf.h> +#include <openssl/rand.h> +#include <openssl/asn1t.h> +#include <openssl/cms.h> + +#include <rpki/roa.h> +#include <rpki/manifest.h> + +#include "bio_f_linebreak.h" + +#include "defstack.h" + +#if !defined(FILENAME_MAX) && defined(PATH_MAX) && PATH_MAX > 1024 +#define FILENAME_MAX PATH_MAX +#elif !defined(FILENAME_MAX) +#define FILENAME_MAX 1024 +#endif + +#define SCHEME_RSYNC ("rsync://") +#define SIZEOF_RSYNC (sizeof(SCHEME_RSYNC) - 1) + +/** + * Maximum length of a hostname. + */ +#ifndef HOSTNAME_MAX +#define HOSTNAME_MAX 256 +#endif + +/** + * Maximum length of an URI. + */ +#define URI_MAX (SIZEOF_RSYNC + HOSTNAME_MAX + 1 + FILENAME_MAX) + +/** + * Maximum number of times we try to kill an inferior process before + * giving up. + */ +#define KILL_MAX 10 + +/** + * Version number of XML summary output. + */ +#define XML_SUMMARY_VERSION 1 + +/** + * How much buffer space do we need for a raw address? + */ +#define ADDR_RAW_BUF_LEN 16 + +/** + * How many bytes is a SHA256 digest? + */ +#define HASH_SHA256_LEN 32 + +/** + * Logging levels. Same general idea as syslog(), but our own + * catagories based on what makes sense for this program. Default + * mappings to syslog() priorities are here because it's the easiest + * way to make sure that we assign a syslog level to each of ours. + */ + +#define LOG_LEVELS \ + QQ(log_sys_err, LOG_ERR) /* Error from OS or library */ \ + QQ(log_usage_err, LOG_ERR) /* Bad usage (local error) */ \ + QQ(log_data_err, LOG_NOTICE) /* Bad data, no biscuit */ \ + QQ(log_telemetry, LOG_INFO) /* Normal progress chatter */ \ + QQ(log_verbose, LOG_INFO) /* Extra chatter */ \ + QQ(log_debug, LOG_DEBUG) /* Only useful when debugging */ + +#define QQ(x,y) x , +typedef enum log_level { LOG_LEVELS LOG_LEVEL_T_MAX } log_level_t; +#undef QQ + +#define QQ(x,y) { #x , x }, +static const struct { + const char *name; + log_level_t value; +} log_levels[] = { + LOG_LEVELS +}; +#undef QQ + +/** + * MIB counters derived from OpenSSL. Long list of validation failure + * codes from OpenSSL (crypto/x509/x509_vfy.h). + */ + +#define MIB_COUNTERS_FROM_OPENSSL \ + QV(X509_V_ERR_UNABLE_TO_GET_CRL) \ + QV(X509_V_ERR_UNABLE_TO_DECRYPT_CERT_SIGNATURE) \ + QV(X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE) \ + QV(X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY) \ + QV(X509_V_ERR_CERT_SIGNATURE_FAILURE) \ + QV(X509_V_ERR_CRL_SIGNATURE_FAILURE) \ + QV(X509_V_ERR_CERT_NOT_YET_VALID) \ + QV(X509_V_ERR_CERT_HAS_EXPIRED) \ + QV(X509_V_ERR_CRL_NOT_YET_VALID) \ + QV(X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD) \ + QV(X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD) \ + QV(X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD) \ + QV(X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD) \ + QV(X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT) \ + QV(X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN) \ + QV(X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY) \ + QV(X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE) \ + QV(X509_V_ERR_CERT_CHAIN_TOO_LONG) \ + QV(X509_V_ERR_CERT_REVOKED) \ + QV(X509_V_ERR_INVALID_CA) \ + QV(X509_V_ERR_PATH_LENGTH_EXCEEDED) \ + QV(X509_V_ERR_INVALID_PURPOSE) \ + QV(X509_V_ERR_CERT_UNTRUSTED) \ + QV(X509_V_ERR_CERT_REJECTED) \ + QV(X509_V_ERR_AKID_SKID_MISMATCH) \ + QV(X509_V_ERR_AKID_ISSUER_SERIAL_MISMATCH) \ + QV(X509_V_ERR_KEYUSAGE_NO_CERTSIGN) \ + QV(X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER) \ + QV(X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION) \ + QV(X509_V_ERR_KEYUSAGE_NO_CRL_SIGN) \ + QV(X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION) \ + QV(X509_V_ERR_INVALID_NON_CA) \ + QV(X509_V_ERR_PROXY_PATH_LENGTH_EXCEEDED) \ + QV(X509_V_ERR_KEYUSAGE_NO_DIGITAL_SIGNATURE) \ + QV(X509_V_ERR_PROXY_CERTIFICATES_NOT_ALLOWED) \ + QV(X509_V_ERR_INVALID_EXTENSION) \ + QV(X509_V_ERR_INVALID_POLICY_EXTENSION) \ + QV(X509_V_ERR_NO_EXPLICIT_POLICY) \ + QV(X509_V_ERR_UNNESTED_RESOURCE) + +/** + * MIB counters specific to rcynic. + */ + +#define MIB_COUNTERS \ + MIB_COUNTERS_FROM_OPENSSL \ + QB(aia_extension_missing, "AIA extension missing") \ + QB(aia_extension_forbidden, "AIA extension forbidden") \ + QB(aia_uri_missing, "AIA URI missing") \ + QB(aki_extension_issuer_mismatch, "AKI extension issuer mismatch") \ + QB(aki_extension_missing, "AKI extension missing") \ + QB(aki_extension_wrong_format, "AKI extension is wrong format") \ + QB(bad_asidentifiers, "Bad ASIdentifiers extension") \ + QB(bad_certificate_policy, "Bad certificate policy") \ + QB(bad_cms_econtenttype, "Bad CMS eContentType") \ + QB(bad_cms_si_contenttype, "Bad CMS SI ContentType") \ + QB(bad_cms_signer, "Bad CMS signer") \ + QB(bad_cms_signer_infos, "Bad CMS signerInfos") \ + QB(bad_crl, "Bad CRL") \ + QB(bad_ipaddrblocks, "Bad IPAddrBlocks extension") \ + QB(bad_key_usage, "Bad keyUsage") \ + QB(bad_manifest_digest_length, "Bad manifest digest length") \ + QB(bad_public_key, "Bad public key") \ + QB(bad_roa_asID, "Bad ROA asID") \ + QB(bad_certificate_serial_number, "Bad certificate serialNumber") \ + QB(bad_manifest_number, "Bad manifestNumber") \ + QB(certificate_bad_signature, "Bad certificate signature") \ + QB(certificate_failed_validation, "Certificate failed validation") \ + QB(cms_econtent_decode_error, "CMS eContent decode error") \ + QB(cms_includes_crls, "CMS includes CRLs") \ + QB(cms_signer_missing, "CMS signer missing") \ + QB(cms_ski_mismatch, "CMS SKI mismatch") \ + QB(cms_validation_failure, "CMS validation failure") \ + QB(crl_issuer_name_mismatch, "CRL issuer name mismatch") \ + QB(crl_not_in_manifest, "CRL not listed in manifest") \ + QB(crl_not_yet_valid, "CRL not yet valid") \ + QB(crl_number_extension_missing, "CRL number extension missing") \ + QB(crl_number_is_negative, "CRL number is negative") \ + QB(crl_number_out_of_range, "CRL number out of range") \ + QB(crldp_doesnt_match_issuer_sia, "CRLDP doesn't match issuer's SIA") \ + QB(crldp_uri_missing, "CRLDP URI missing") \ + QB(disallowed_x509v3_extension, "Disallowed X.509v3 extension") \ + QB(duplicate_name_in_manifest, "Duplicate name in manifest") \ + QB(inappropriate_eku_extension, "Inappropriate EKU extension") \ + QB(malformed_aia_extension, "Malformed AIA extension") \ + QB(malformed_sia_extension, "Malformed SIA extension") \ + QB(malformed_basic_constraints, "Malformed basicConstraints") \ + QB(malformed_trust_anchor, "Malformed trust anchor") \ + QB(malformed_cadirectory_uri, "Malformed caDirectory URI") \ + QB(malformed_crldp_extension, "Malformed CRDLP extension") \ + QB(malformed_crldp_uri, "Malformed CRDLP URI") \ + QB(malformed_roa_addressfamily, "Malformed ROA addressFamily") \ + QB(malformed_tal_uri, "Malformed TAL URI") \ + QB(manifest_carepository_mismatch, "Manifest caRepository mismatch") \ + QB(manifest_interval_overruns_cert, "Manifest interval overruns certificate") \ + QB(manifest_lists_missing_object, "Manifest lists missing object") \ + QB(manifest_not_yet_valid, "Manifest not yet valid") \ + QB(missing_resources, "Missing resources") \ + QB(nonconformant_asn1_time_value, "Nonconformant ASN.1 time value") \ + QB(nonconformant_public_key_algorithm,"Nonconformant public key algorithm")\ + QB(nonconformant_signature_algorithm, "Nonconformant signature algorithm")\ + QB(nonconformant_digest_algorithm, "Nonconformant digest algorithm") \ + QB(nonconformant_certificate_uid, "Nonconformant certificate UID") \ + QB(object_rejected, "Object rejected") \ + QB(rfc3779_inheritance_required, "RFC 3779 inheritance required") \ + QB(roa_contains_bad_afi_value, "ROA contains bad AFI value") \ + QB(roa_max_prefixlen_too_short, "ROA maxPrefixlen too short") \ + QB(roa_resource_not_in_ee, "ROA resource not in EE") \ + QB(roa_resources_malformed, "ROA resources malformed") \ + QB(rsync_transfer_failed, "rsync transfer failed") \ + QB(rsync_transfer_timed_out, "rsync transfer timed out") \ + QB(safi_not_allowed, "SAFI not allowed") \ + QB(sia_cadirectory_uri_missing, "SIA caDirectory URI missing") \ + QB(sia_extension_missing, "SIA extension missing") \ + QB(sia_manifest_uri_missing, "SIA manifest URI missing") \ + QB(ski_extension_missing, "SKI extension missing") \ + QB(ski_public_key_mismatch, "SKI public key mismatch") \ + QB(trust_anchor_key_mismatch, "Trust anchor key mismatch") \ + QB(trust_anchor_with_crldp, "Trust anchor can't have CRLDP") \ + QB(unknown_afi, "Unknown AFI") \ + QB(unknown_openssl_verify_error, "Unknown OpenSSL verify error") \ + QB(unreadable_trust_anchor, "Unreadable trust anchor") \ + QB(unreadable_trust_anchor_locator, "Unreadable trust anchor locator") \ + QB(wrong_object_version, "Wrong object version") \ + QW(aia_doesnt_match_issuer, "AIA doesn't match issuer") \ + QW(backup_thisupdate_newer_than_current, "Backup thisUpdate newer than current") \ + QW(backup_number_higher_than_current, "Backup number higher than current") \ + QW(bad_thisupdate, "Bad CRL thisUpdate") \ + QW(bad_cms_si_signed_attributes, "Bad CMS SI signed attributes") \ + QW(bad_signed_object_uri, "Bad signedObject URI") \ + QW(crldp_names_newer_crl, "CRLDP names newer CRL") \ + QW(digest_mismatch, "Digest mismatch") \ + QW(ee_certificate_with_1024_bit_key, "EE certificate with 1024 bit key") \ + QW(issuer_uses_multiple_crldp_values, "Issuer uses multiple CRLDP values")\ + QW(multiple_rsync_uris_in_extension, "Multiple rsync URIs in extension") \ + QW(nonconformant_issuer_name, "Nonconformant X.509 issuer name") \ + QW(nonconformant_subject_name, "Nonconformant X.509 subject name") \ + QW(policy_qualifier_cps, "Policy Qualifier CPS") \ + QW(rsync_partial_transfer, "rsync partial transfer") \ + QW(rsync_transfer_skipped, "rsync transfer skipped") \ + QW(sia_extension_missing_from_ee, "SIA extension missing from EE") \ + QW(skipped_because_not_in_manifest, "Skipped because not in manifest") \ + QW(stale_crl_or_manifest, "Stale CRL or manifest") \ + QW(tainted_by_stale_crl, "Tainted by stale CRL") \ + QW(tainted_by_stale_manifest, "Tainted by stale manifest") \ + QW(tainted_by_not_being_in_manifest, "Tainted by not being in manifest") \ + QW(trust_anchor_not_self_signed, "Trust anchor not self-signed") \ + QW(trust_anchor_skipped, "Trust anchor skipped") \ + QW(unknown_object_type_skipped, "Unknown object type skipped") \ + QW(uri_too_long, "URI too long") \ + QW(wrong_cms_si_signature_algorithm, "Wrong CMS SI signature algorithm") \ + QW(wrong_cms_si_digest_algorithm, "Wrong CMS SI digest algorithm") \ + QG(non_rsync_uri_in_extension, "Non-rsync URI in extension") \ + QG(object_accepted, "Object accepted") \ + QG(rechecking_object, "Rechecking object") \ + QG(rsync_transfer_succeeded, "rsync transfer succeeded") \ + QG(validation_ok, "OK") + +#define QV(x) QB(mib_openssl_##x, 0) + +static const char + mib_counter_kind_good[] = "good", + mib_counter_kind_warn[] = "warn", + mib_counter_kind_bad[] = "bad"; + +#define QG(x,y) mib_counter_kind_good , +#define QW(x,y) mib_counter_kind_warn , +#define QB(x,y) mib_counter_kind_bad , +static const char * const mib_counter_kind[] = { MIB_COUNTERS NULL }; +#undef QB +#undef QW +#undef QG + +#define QG(x,y) QQ(x,y) +#define QW(x,y) QQ(x,y) +#define QB(x,y) QQ(x,y) + +#define QQ(x,y) x , +typedef enum mib_counter { MIB_COUNTERS MIB_COUNTER_T_MAX } mib_counter_t; +#undef QQ + +#define QQ(x,y) y , +static const char * const mib_counter_desc[] = { MIB_COUNTERS NULL }; +#undef QQ + +#define QQ(x,y) #x , +static const char * const mib_counter_label[] = { MIB_COUNTERS NULL }; +#undef QQ + +#undef QV + +#define QQ(x,y) 0 , +#define QV(x) x , +static const long mib_counter_openssl[] = { MIB_COUNTERS 0 }; +#undef QV +#undef QQ + +/** + * Object sources. We always try to get fresh copies of objects using + * rsync, but if that fails we try using backup copies from what + * worked the last time we were run. This means that a URI + * potentially represents two different objects, so we need to + * distinguish them for tracking purposes in our validation log. + */ + +#define OBJECT_GENERATIONS \ + QQ(null) \ + QQ(current) \ + QQ(backup) + +#define QQ(x) object_generation_##x , +typedef enum object_generation { OBJECT_GENERATIONS OBJECT_GENERATION_MAX } object_generation_t; +#undef QQ + +#define QQ(x) #x , +static const char * const object_generation_label[] = { OBJECT_GENERATIONS NULL }; +#undef QQ + +/** + * Type-safe string wrapper for URIs. + */ +typedef struct { char s[URI_MAX]; } uri_t; + +/** + * Type-safe string wrapper for filename paths. + */ +typedef struct { char s[FILENAME_MAX]; } path_t; + +/** + * Type-safe wrapper for hash buffers. + */ +typedef struct { unsigned char h[EVP_MAX_MD_SIZE]; } hashbuf_t; + +/** + * Type-safe wrapper for timestamp strings. + */ +typedef struct { char s[sizeof("2001-01-01T00:00:00Z") + 1]; } timestamp_t; + +/** + * Per-URI validation status object. + * uri must be first element. + */ +typedef struct validation_status { + uri_t uri; + object_generation_t generation; + time_t timestamp; + unsigned char events[(MIB_COUNTER_T_MAX + 7) / 8]; + short balance; + struct validation_status *left_child; + struct validation_status *right_child; +} validation_status_t; + +DECLARE_STACK_OF(validation_status_t) + +/** + * Structure to hold data parsed out of a certificate. + */ +typedef struct certinfo { + int ca, ta; + object_generation_t generation; + uri_t uri, sia, aia, crldp, manifest, signedobject; +} certinfo_t; + +typedef struct rcynic_ctx rcynic_ctx_t; + +/** + * States that a walk_ctx_t can be in. + */ +typedef enum { + walk_state_initial, /**< Initial state */ + walk_state_rsync, /**< rsyncing certinfo.sia */ + walk_state_ready, /**< Ready to traverse outputs */ + walk_state_current, /**< prefix = rc->unauthenticated */ + walk_state_backup, /**< prefix = rc->old_authenticated */ + walk_state_done /**< Done walking this cert's outputs */ +} walk_state_t; + +/** + * Context for certificate tree walks. This includes all the stuff + * that we would keep as automatic variables on the call stack if we + * didn't have to use callbacks to support multiple rsync processes. + */ +typedef struct walk_ctx { + unsigned refcount; + certinfo_t certinfo; + X509 *cert; + Manifest *manifest; + object_generation_t manifest_generation; + STACK_OF(OPENSSL_STRING) *filenames; + int manifest_iteration, filename_iteration, stale_manifest; + walk_state_t state; + uri_t crldp; + STACK_OF(X509) *certs; + STACK_OF(X509_CRL) *crls; +} walk_ctx_t; + +DECLARE_STACK_OF(walk_ctx_t) + +/** + * Return codes from rsync functions. + */ +typedef enum { + rsync_status_done, /* Request completed */ + rsync_status_failed, /* Request failed */ + rsync_status_timed_out, /* Request timed out */ + rsync_status_pending, /* Request in progress */ + rsync_status_skipped /* Request not attempted */ +} rsync_status_t; + +/** + * States for asynchronous rsync. + * "initial" must be first. + */ + +#define RSYNC_STATES \ + QQ(initial) \ + QQ(running) \ + QQ(conflict_wait) \ + QQ(retry_wait) \ + QQ(closed) \ + QQ(terminating) + +#define QQ(x) rsync_state_##x, +typedef enum { RSYNC_STATES RSYNC_STATE_T_MAX } rsync_state_t; +#undef QQ + +#define QQ(x) #x , +static const char * const rsync_state_label[] = { RSYNC_STATES NULL }; +#undef QQ + +/** + * Context for asyncronous rsync. + */ +typedef struct rsync_ctx { + uri_t uri; + void (*handler)(rcynic_ctx_t *, const struct rsync_ctx *, const rsync_status_t, const uri_t *, void *); + void *cookie; + rsync_state_t state; + enum { + rsync_problem_none, /* Must be first */ + rsync_problem_timed_out, + rsync_problem_refused + } problem; + unsigned tries; + pid_t pid; + int fd; + time_t started, deadline; + char buffer[URI_MAX * 4]; + size_t buflen; +} rsync_ctx_t; + +DECLARE_STACK_OF(rsync_ctx_t) + +/** + * Record of rsync attempts. + */ +typedef struct rsync_history { + uri_t uri; + time_t started, finished; + rsync_status_t status; + int final_slash; +} rsync_history_t; + +DECLARE_STACK_OF(rsync_history_t) + +/** + * Deferred task. + */ +typedef struct task { + void (*handler)(rcynic_ctx_t *, void *); + void *cookie; +} task_t; + +DECLARE_STACK_OF(task_t) + +/** + * Trust anchor locator (TAL) fetch context. + */ +typedef struct tal_ctx { + uri_t uri; + path_t path; + EVP_PKEY *pkey; +} tal_ctx_t; + +/** + * Extended context for verify callbacks. This is a wrapper around + * OpenSSL's X509_STORE_CTX, and the embedded X509_STORE_CTX @em must be + * the first element of this structure in order for the evil cast to + * do the right thing. This is ugly but safe, as the C language + * promises us that the address of the first element of a structure is + * the same as the address of the structure. + */ +typedef struct rcynic_x509_store_ctx { + X509_STORE_CTX ctx; /* Must be first */ + rcynic_ctx_t *rc; + const certinfo_t *subject; +} rcynic_x509_store_ctx_t; + +/** + * Program context that would otherwise be a mess of global variables. + */ +struct rcynic_ctx { + path_t authenticated, old_authenticated, new_authenticated, unauthenticated; + char *jane, *rsync_program; + STACK_OF(validation_status_t) *validation_status; + STACK_OF(rsync_history_t) *rsync_history; + STACK_OF(rsync_ctx_t) *rsync_queue; + STACK_OF(task_t) *task_queue; + int use_syslog, allow_stale_crl, allow_stale_manifest, use_links; + int require_crl_in_manifest, rsync_timeout, priority[LOG_LEVEL_T_MAX]; + int allow_non_self_signed_trust_anchor, allow_object_not_in_manifest; + int max_parallel_fetches, max_retries, retry_wait_min, run_rsync; + int allow_digest_mismatch, allow_crl_digest_mismatch; + int allow_nonconformant_name, allow_ee_without_signedObject; + int allow_1024_bit_ee_key, allow_wrong_cms_si_attributes; + int rsync_early; + unsigned max_select_time; + validation_status_t *validation_status_in_waiting; + validation_status_t *validation_status_root; + log_level_t log_level; + X509_STORE *x509_store; +}; + + + +/* + * Handle NIDs we wish OpenSSL knew about. This is carefully (we + * hope) written to do nothing at all for any NID that OpenSSL knows + * about; the intent is just to add definitions for things OpenSSL + * doesn't know about yet. Of necessity, this is a bit gross, since + * it confounds runtime static variables with predefined macro names, + * but we try to put all the magic associated with this in one place. + * + * In the long run it might be cleaner to generate this with a trivial + * script and put the result in a shared .h file, but this will do for + * the moment. + */ + +#ifndef NID_ad_rpkiManifest +static int NID_ad_rpkiManifest; +#endif + +#ifndef NID_ad_signedObject +static int NID_ad_signedObject; +#endif + +#ifndef NID_ct_ROA +static int NID_ct_ROA; +#endif + +#ifndef NID_ct_rpkiManifest +static int NID_ct_rpkiManifest; +#endif + +#ifndef NID_ct_rpkiGhostbusters +static int NID_ct_rpkiGhostbusters; +#endif + +#ifndef NID_cp_ipAddr_asNumber +static int NID_cp_ipAddr_asNumber; +#endif + +#ifndef NID_id_kp_bgpsec_router +static int NID_id_kp_bgpsec_router; +#endif + +/** + * Missing NIDs, if any. + */ +static const struct { + int *nid; + const char *oid; + const char *sn; + const char *ln; +} missing_nids[] = { + +#ifndef NID_ad_rpkiManifest + {&NID_ad_rpkiManifest, "1.3.6.1.5.5.7.48.10", "id-ad-rpkiManifest", "RPKI Manifest"}, +#endif + +#ifndef NID_ad_signedObject + {&NID_ad_signedObject, "1.3.6.1.5.5.7.48.11", "id-ad-signedObject", "Signed Object"}, +#endif + +#ifndef NID_ct_ROA + {&NID_ct_ROA, "1.2.840.113549.1.9.16.1.24", "id-ct-routeOriginAttestation", "ROA eContent"}, +#endif + +#ifndef NID_ct_rpkiManifest + {&NID_ct_rpkiManifest, "1.2.840.113549.1.9.16.1.26", "id-ct-rpkiManifest", "RPKI Manifest eContent"}, +#endif + +#ifndef NID_ct_rpkiGhostbusters + {&NID_ct_rpkiGhostbusters, "1.2.840.113549.1.9.16.1.35", "id-ct-rpkiGhostbusters", "RPKI Ghostbusters eContent"}, +#endif + +#ifndef NID_cp_ipAddr_asNumber + {&NID_cp_ipAddr_asNumber, "1.3.6.1.5.5.7.14.2", "id-cp-ipAddr-asNumber", "RPKI Certificate Policy"}, +#endif + +#ifndef NID_id_kp_bgpsec_router + {&NID_id_kp_bgpsec_router, "1.3.6.1.5.5.7.3.30", "id-kp-bgpsec-router", "BGPSEC Router Certificate"}, +#endif + +}; + + + +/** + * Subversion ID data. + */ +static const char svn_id[] = "$Id$"; + +/** + * Suffix we use temporarily during the symlink shuffle. Could be + * almost anything, but we want to do the length check early, before + * we waste a lot of work we'll just have to throw away, so we just + * wire in something short and obvious. + */ +static const char authenticated_symlink_suffix[] = ".new"; + +/** + * Constants for comparisions. We can't build these at compile time, + * so they can't be const, but treat them as if they were once + * allocated. + * + * We probably need both a better scheme for naming NID_ replacements + * and a more comprehensive rewrite of how we handle OIDs OpenSSL + * doesn't know about, so that we neither conflict with defined + * symbols nor duplicate effort nor explode if and when OpenSSL adds + * new OIDs (with or without the names we would have used). + */ + +static const ASN1_INTEGER *asn1_zero, *asn1_four_octets, *asn1_twenty_octets; +static int NID_binary_signing_time; + + + +/** + * Handle missing NIDs. + */ +static int +create_missing_nids(void) +{ + int i; + + for (i = 0; i < (int) (sizeof(missing_nids) / sizeof(*missing_nids)); i++) + if ((*missing_nids[i].nid = OBJ_txt2nid(missing_nids[i].oid)) == NID_undef && + (*missing_nids[i].nid = OBJ_create(missing_nids[i].oid, + missing_nids[i].sn, + missing_nids[i].ln)) == NID_undef) + return 0; + + return 1; +} + + + +/** + * Type-safe wrapper around free() to keep safestack macros happy. + */ +static void OPENSSL_STRING_free(OPENSSL_STRING s) +{ + if (s) + free(s); +} + +/** + * Wrapper around an idiom we use with OPENSSL_STRING stacks. There's + * a bug in the current sk_OPENSSL_STRING_delete() macro that casts + * the return value to the wrong type, so we cast it to something + * innocuous here and avoid using that macro elsewhere. + */ +static void sk_OPENSSL_STRING_remove(STACK_OF(OPENSSL_STRING) *sk, const char *str) +{ + OPENSSL_STRING_free((void *) sk_OPENSSL_STRING_delete(sk, sk_OPENSSL_STRING_find(sk, str))); +} + +/** + * Allocate a new validation_status_t object. + */ +static validation_status_t *validation_status_t_new(void) +{ + validation_status_t *v = malloc(sizeof(*v)); + if (v) + memset(v, 0, sizeof(*v)); + return v; +} + +/** + * Type-safe wrapper around free() to keep safestack macros happy. + */ +static void validation_status_t_free(validation_status_t *v) +{ + if (v) + free(v); +} + + + +/** + * Allocate a new rsync_history_t object. + */ +static rsync_history_t *rsync_history_t_new(void) +{ + rsync_history_t *h = malloc(sizeof(*h)); + if (h) + memset(h, 0, sizeof(*h)); + return h; +} + +/** + * Type-safe wrapper around free() to keep safestack macros happy. + */ +static void rsync_history_t_free(rsync_history_t *h) +{ + if (h) + free(h); +} + +/** + * Compare two rsync_history_t objects. + */ +static int rsync_history_cmp(const rsync_history_t * const *a, const rsync_history_t * const *b) +{ + return strcmp((*a)->uri.s, (*b)->uri.s); +} + + + +/** + * Convert a time_t to a printable string in UTC format. + */ +static const char *time_to_string(timestamp_t *ts, const time_t *t) +{ + time_t now; + size_t n; + + assert(ts != NULL); + + if (t == NULL) { + now = time(0); + t = &now; + } + + n = strftime(ts->s, sizeof(ts->s), "%Y-%m-%dT%H:%M:%SZ", gmtime(t)); + assert(n > 0); + + return ts->s; +} + +/* + * GCC attributes to help catch format string errors. + */ + +#ifdef __GNUC__ + +static void logmsg(const rcynic_ctx_t *rc, + const log_level_t level, + const char *fmt, ...) + __attribute__ ((format (printf, 3, 4))); +#endif + +/** + * Logging. + */ +static void vlogmsg(const rcynic_ctx_t *rc, + const log_level_t level, + const char *fmt, + va_list ap) +{ + assert(rc && fmt); + + if (rc->log_level < level) + return; + + if (rc->use_syslog) { + vsyslog(rc->priority[level], fmt, ap); + } else { + char ts[sizeof("00:00:00")+1]; + time_t t = time(0); + strftime(ts, sizeof(ts), "%H:%M:%S", localtime(&t)); + fprintf(stderr, "%s: ", ts); + if (rc->jane) + fprintf(stderr, "%s: ", rc->jane); + vfprintf(stderr, fmt, ap); + putc('\n', stderr); + } +} + +/** + * Logging. + */ +static void logmsg(const rcynic_ctx_t *rc, + const log_level_t level, + const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + vlogmsg(rc, level, fmt, ap); + va_end(ap); +} + +/** + * Print OpenSSL library errors. + */ +static void log_openssl_errors(const rcynic_ctx_t *rc) +{ + const char *data, *file; + unsigned long code; + char error[256]; + int flags, line; + + if (!rc->log_level < log_verbose) + return; + + while ((code = ERR_get_error_line_data(&file, &line, &data, &flags))) { + ERR_error_string_n(code, error, sizeof(error)); + if (data && (flags & ERR_TXT_STRING)) + logmsg(rc, log_sys_err, "OpenSSL error %s:%d: %s: %s", file, line, error, data); + else + logmsg(rc, log_sys_err, "OpenSSL error %s:%d: %s", file, line, error); + } +} + +/** + * Configure logging. + */ +static int configure_logmsg(rcynic_ctx_t *rc, const char *name) +{ + int i; + + assert(rc && name); + + for (i = 0; i < sizeof(log_levels)/sizeof(*log_levels); i++) { + if (!strcmp(name, log_levels[i].name)) { + rc->log_level = log_levels[i].value; + return 1; + } + } + + logmsg(rc, log_usage_err, "Bad log level %s", name); + return 0; +} + +/** + * Configure syslog. + */ +static int configure_syslog(const rcynic_ctx_t *rc, + int *result, + const CODE *table, + const char *name) +{ + assert(result && table && name); + + while (table->c_name && strcmp(table->c_name, name)) + table++; + + if (table->c_name) { + *result = table->c_val; + return 1; + } else { + logmsg(rc, log_usage_err, "Bad syslog code %s", name); + return 0; + } +} + +/** + * Configure boolean variable. + */ +static int configure_boolean(const rcynic_ctx_t *rc, + int *result, + const char *val) +{ + assert(rc && result && val); + + switch (*val) { + case 'y': case 'Y': case 't': case 'T': case '1': + *result = 1; + return 1; + case 'n': case 'N': case 'f': case 'F': case '0': + *result = 0; + return 1; + default: + logmsg(rc, log_usage_err, "Bad boolean value %s", val); + return 0; + } +} + +/** + * Configure integer variable. + */ +static int configure_integer(const rcynic_ctx_t *rc, + int *result, + const char *val) +{ + long res; + char *p; + + assert(rc && result && val); + + res = strtol(val, &p, 10); + + if (*val != '\0' && *p == '\0') { + *result = (int) res; + return 1; + } else { + logmsg(rc, log_usage_err, "Bad integer value %s", val); + return 0; + } +} + +/** + * Configure unsigned integer variable. + */ +static int configure_unsigned_integer(const rcynic_ctx_t *rc, + unsigned *result, + const char *val) +{ + unsigned long res; + char *p; + + assert(rc && result && val); + + res = strtoul(val, &p, 10); + + if (*val != '\0' && *p == '\0') { + *result = (unsigned) res; + return 1; + } else { + logmsg(rc, log_usage_err, "Bad integer value %s", val); + return 0; + } +} + + + +/** + * Make a directory if it doesn't already exist. + */ +static int mkdir_maybe(const rcynic_ctx_t *rc, const path_t *name) +{ + path_t path; + char *s; + + assert(name != NULL); + if (strlen(name->s) >= sizeof(path.s)) { + logmsg(rc, log_data_err, "Pathname %s too long", name->s); + return 0; + } + strcpy(path.s, name->s); + s = path.s[0] == '/' ? path.s + 1 : path.s; + if ((s = strrchr(s, '/')) == NULL) + return 1; + *s = '\0'; + if (!mkdir_maybe(rc, &path)) { + logmsg(rc, log_sys_err, "Failed to make directory %s", path.s); + return 0; + } + if (!access(path.s, F_OK)) + return 1; + logmsg(rc, log_verbose, "Creating directory %s", path.s); + return mkdir(path.s, 0777) == 0; +} + +/** + * strdup() a string and push it onto a stack. + */ +static int sk_OPENSSL_STRING_push_strdup(STACK_OF(OPENSSL_STRING) *sk, const char *str) +{ + OPENSSL_STRING s = strdup(str); + + if (s && sk_OPENSSL_STRING_push(sk, s)) + return 1; + if (s) + free(s); + return 0; +} + +/** + * Compare two URI strings, for OpenSSL STACK operations. + */ + +static int uri_cmp(const char * const *a, const char * const *b) +{ + return strcmp(*a, *b); +} + +/** + * Is string an rsync URI? + */ +static int is_rsync(const char *uri) +{ + return uri && !strncmp(uri, SCHEME_RSYNC, SIZEOF_RSYNC); +} + +/** + * Convert an rsync URI to a filename, checking for evil character + * sequences. NB: This routine can't call mib_increment(), because + * mib_increment() calls it, so errors detected here only go into + * the log, not the MIB. + */ +static int uri_to_filename(const rcynic_ctx_t *rc, + const uri_t *uri, + path_t *path, + const path_t *prefix) +{ + const char *u; + size_t n; + + path->s[0] = '\0'; + + if (!is_rsync(uri->s)) { + logmsg(rc, log_telemetry, "%s is not an rsync URI, not converting to filename", uri->s); + return 0; + } + + u = uri->s + SIZEOF_RSYNC; + n = strlen(u); + + if (u[0] == '/' || u[0] == '.' || strstr(u, "/../") || + (n >= 3 && !strcmp(u + n - 3, "/.."))) { + logmsg(rc, log_data_err, "Dangerous URI %s, not converting to filename", uri->s); + return 0; + } + + if (prefix) + n += strlen(prefix->s); + + if (n >= sizeof(path->s)) { + logmsg(rc, log_data_err, "URI %s too long, not converting to filename", uri->s); + return 0; + } + + if (prefix) { + strcpy(path->s, prefix->s); + strcat(path->s, u); + } else { + strcpy(path->s, u); + } + + return 1; +} + +/** + * Compare filename fields of two FileAndHash structures. + */ +static int FileAndHash_name_cmp(const FileAndHash * const *a, const FileAndHash * const *b) +{ + return strcmp((char *) (*a)->file->data, (char *) (*b)->file->data); +} + +/** + * Get value of code in a validation_status_t. + */ +static int validation_status_get_code(const validation_status_t *v, + const mib_counter_t code) +{ + assert(v && code < MIB_COUNTER_T_MAX); + return (v->events[code / 8] & (1 << (code % 8))) != 0; +} + +/** + * Set value of code in a validation_status_t. + */ +static void validation_status_set_code(validation_status_t *v, + const mib_counter_t code, + int value) +{ + assert(v && code < MIB_COUNTER_T_MAX); + if (value) + v->events[code / 8] |= (1 << (code % 8)); + else + v->events[code / 8] &= ~(1 << (code % 8)); +} + +/** + * validation_status object comparison, for AVL tree rather than + * OpenSSL stacks. + */ +static int +validation_status_cmp(const validation_status_t *node, + const uri_t *uri, + const object_generation_t generation) +{ + int cmp = ((int) node->generation) - ((int) generation); + if (cmp) + return cmp; + else + return strcmp(uri->s, node->uri.s); +} + +/** + * validation_status AVL tree insertion. Adapted from code written by + * Paul Vixie and explictly placed in the public domain using examples + * from the book: "Algorithms & Data Structures," Niklaus Wirth, + * Prentice-Hall, 1986, ISBN 0-13-022005-1. Thanks, Paul! + */ +static validation_status_t * +validation_status_sprout(validation_status_t **node, + int *needs_balancing, + validation_status_t *new_node) +{ +#ifdef AVL_DEBUG +#define AVL_MSG(msg) sprintf(stderr, "AVL_DEBUG: '%s'\n", msg) +#else +#define AVL_MSG(msg) +#endif + + validation_status_t *p1, *p2, *result; + int cmp; + + /* + * Are we grounded? If so, add the node "here" and set the + * rebalance flag, then exit. + */ + if (*node == NULL) { + AVL_MSG("Grounded, adding new node"); + new_node->left_child = NULL; + new_node->right_child = NULL; + new_node->balance = 0; + *node = new_node; + *needs_balancing = 1; + return *node; + } + + /* + * Compare the data. + */ + cmp = validation_status_cmp(*node, &new_node->uri, new_node->generation); + + /* + * If LESS, prepare to move to the left. + */ + if (cmp < 0) { + + AVL_MSG("LESS. sprouting left."); + result = validation_status_sprout(&(*node)->left_child, needs_balancing, new_node); + + if (*needs_balancing) { + AVL_MSG("LESS: left branch has grown longer"); + + switch ((*node)->balance) { + + case 1: + /* + * Right branch WAS longer; balance is ok now. + */ + AVL_MSG("LESS: case 1.. balance restored implicitly"); + (*node)->balance = 0; + *needs_balancing = 0; + break; + + case 0: + /* + * Balance WAS okay; now left branch longer. + */ + AVL_MSG("LESS: case 0.. balnce bad but still ok"); + (*node)->balance = -1; + break; + + case -1: + /* + * Left branch was already too long. Rebalance. + */ + AVL_MSG("LESS: case -1: rebalancing"); + p1 = (*node)->left_child; + + if (p1->balance == -1) { + AVL_MSG("LESS: single LL"); + (*node)->left_child = p1->right_child; + p1->right_child = *node; + (*node)->balance = 0; + *node = p1; + } + + else { + AVL_MSG("LESS: double LR"); + + p2 = p1->right_child; + p1->right_child = p2->left_child; + p2->left_child = p1; + + (*node)->left_child = p2->right_child; + p2->right_child = *node; + + if (p2->balance == -1) + (*node)->balance = 1; + else + (*node)->balance = 0; + + if (p2->balance == 1) + p1->balance = -1; + else + p1->balance = 0; + *node = p2; + } + + (*node)->balance = 0; + *needs_balancing = 0; + } + } + return result; + } + + /* + * If MORE, prepare to move to the right. + */ + if (cmp > 0) { + + AVL_MSG("MORE: sprouting to the right"); + result = validation_status_sprout(&(*node)->right_child, needs_balancing, new_node); + + if (*needs_balancing) { + AVL_MSG("MORE: right branch has grown longer"); + + switch ((*node)->balance) { + + case -1:AVL_MSG("MORE: balance was off, fixed implicitly"); + (*node)->balance = 0; + *needs_balancing = 0; + break; + + case 0: AVL_MSG("MORE: balance was okay, now off but ok"); + (*node)->balance = 1; + break; + + case 1: AVL_MSG("MORE: balance was off, need to rebalance"); + p1 = (*node)->right_child; + + if (p1->balance == 1) { + AVL_MSG("MORE: single RR"); + (*node)->right_child = p1->left_child; + p1->left_child = *node; + (*node)->balance = 0; + *node = p1; + } + + else { + AVL_MSG("MORE: double RL"); + + p2 = p1->left_child; + p1->left_child = p2->right_child; + p2->right_child = p1; + + (*node)->right_child = p2->left_child; + p2->left_child = *node; + + if (p2->balance == 1) + (*node)->balance = -1; + else + (*node)->balance = 0; + + if (p2->balance == -1) + p1->balance = 1; + else + p1->balance = 0; + + *node = p2; + } /*else*/ + (*node)->balance = 0; + *needs_balancing = 0; + } + } + return result; + } + + /* + * Neither more nor less, found existing node matching key, return it. + */ + AVL_MSG("I found it!"); + *needs_balancing = 0; + return *node; + +#undef AVL_MSG +} + +/** + * Add a validation status entry to internal log. + */ +static void log_validation_status(rcynic_ctx_t *rc, + const uri_t *uri, + const mib_counter_t code, + const object_generation_t generation) +{ + validation_status_t *v = NULL; + int needs_balancing = 0; + + assert(rc && uri && code < MIB_COUNTER_T_MAX && generation < OBJECT_GENERATION_MAX); + + if (!rc->validation_status) + return; + + if (code == rsync_transfer_skipped && !rc->run_rsync) + return; + + if (rc->validation_status_in_waiting == NULL && + (rc->validation_status_in_waiting = validation_status_t_new()) == NULL) { + logmsg(rc, log_sys_err, "Couldn't allocate validation status entry for %s", uri->s); + return; + } + + v = rc->validation_status_in_waiting; + memset(v, 0, sizeof(*v)); + v->uri = *uri; + v->generation = generation; + + v = validation_status_sprout(&rc->validation_status_root, &needs_balancing, v); + if (v == rc->validation_status_in_waiting) + rc->validation_status_in_waiting = NULL; + + if (rc->validation_status_in_waiting == NULL && + !sk_validation_status_t_push(rc->validation_status, v)) { + logmsg(rc, log_sys_err, "Couldn't store validation status entry for %s", uri->s); + return; + } + + v->timestamp = time(0); + + if (validation_status_get_code(v, code)) + return; + + validation_status_set_code(v, code, 1); + + logmsg(rc, log_verbose, "Recording \"%s\" for %s%s%s", + (mib_counter_desc[code] + ? mib_counter_desc[code] + : X509_verify_cert_error_string(mib_counter_openssl[code])), + (generation != object_generation_null ? object_generation_label[generation] : ""), + (generation != object_generation_null ? " " : ""), + uri->s); +} + +/** + * Copy or link a file, as the case may be. + */ +static int cp_ln(const rcynic_ctx_t *rc, const path_t *source, const path_t *target) +{ + struct stat statbuf; + struct utimbuf utimebuf; + FILE *in = NULL, *out = NULL; + int c, ok = 0; + + if (rc->use_links) { + (void) unlink(target->s); + ok = link(source->s, target->s) == 0; + if (!ok) + logmsg(rc, log_sys_err, "Couldn't link %s to %s: %s", + source->s, target->s, strerror(errno)); + return ok; + } + + if ((in = fopen(source->s, "rb")) == NULL || + (out = fopen(target->s, "wb")) == NULL) + goto done; + + while ((c = getc(in)) != EOF) + if (putc(c, out) == EOF) + goto done; + + ok = 1; + + done: + ok &= !(in != NULL && fclose(in) == EOF); + ok &= !(out != NULL && fclose(out) == EOF); + + if (!ok) { + logmsg(rc, log_sys_err, "Couldn't copy %s to %s: %s", + source->s, target->s, strerror(errno)); + return ok; + } + + /* + * Perserve the file modification time to allow for detection of + * changed objects in the authenticated directory. Failure to reset + * the times is not optimal, but is also not critical, thus no + * failure return. + */ + if (stat(source->s, &statbuf) < 0 || + (utimebuf.actime = statbuf.st_atime, + utimebuf.modtime = statbuf.st_mtime, + utime(target->s, &utimebuf) < 0)) + logmsg(rc, log_sys_err, "Couldn't copy inode timestamp from %s to %s: %s", + source->s, target->s, strerror(errno)); + + return ok; +} + +/** + * Install an object. + */ +static int install_object(rcynic_ctx_t *rc, + const uri_t *uri, + const path_t *source, + const object_generation_t generation) +{ + path_t target; + + if (!uri_to_filename(rc, uri, &target, &rc->new_authenticated)) { + logmsg(rc, log_data_err, "Couldn't generate installation name for %s", uri->s); + return 0; + } + + if (!mkdir_maybe(rc, &target)) { + logmsg(rc, log_sys_err, "Couldn't create directory for %s", target.s); + return 0; + } + + if (!cp_ln(rc, source, &target)) + return 0; + log_validation_status(rc, uri, object_accepted, generation); + return 1; +} + +/** + * AVL tree lookup for validation status objects. + */ +static validation_status_t * +validation_status_find(validation_status_t *node, + const uri_t *uri, + const object_generation_t generation) +{ + int cmp; + + while (node != NULL && (cmp = validation_status_cmp(node, uri, generation)) != 0) + node = cmp < 0 ? node->left_child : node->right_child; + + return node; +} + +/** + * Check whether we have a validation status entry corresponding to a + * given filename. This is intended for use during pruning the + * unauthenticated tree, so it only checks the current generation. + */ +static int +validation_status_find_filename(const rcynic_ctx_t *rc, + const char *filename) +{ + uri_t uri; + + if (strlen(filename) + SIZEOF_RSYNC >= sizeof(uri.s)) + return 0; + + strcpy(uri.s, SCHEME_RSYNC); + strcat(uri.s, filename); + + return validation_status_find(rc->validation_status_root, &uri, object_generation_current) != NULL; +} + +/** + * Figure out whether we already have a good copy of an object. This + * is a little more complicated than it sounds, because we might have + * failed the current generation and accepted the backup due to having + * followed the old CA certificate chain first during a key rollover. + * So if this check is of the current object and we have not already + * accepted the current object for this URI, we need to recheck. + * + * We also handle logging when we decide that we do need to check, so + * that the caller doesn't need to concern itself with why we thought + * the check was necessary. + */ +static int skip_checking_this_object(rcynic_ctx_t *rc, + const uri_t *uri, + const object_generation_t generation) +{ + validation_status_t *v = NULL; + path_t path; + + assert(rc && uri && rc->validation_status); + + if (!uri_to_filename(rc, uri, &path, &rc->new_authenticated)) + return 1; + + if (access(path.s, R_OK)) { + logmsg(rc, log_telemetry, "Checking %s", uri->s); + return 0; + } + + if (generation != object_generation_current) + return 1; + + v = validation_status_find(rc->validation_status_root, uri, generation); + + if (v != NULL && validation_status_get_code(v, object_accepted)) + return 1; + + log_validation_status(rc, uri, rechecking_object, generation); + logmsg(rc, log_telemetry, "Rechecking %s", uri->s); + return 0; +} + + + +/** + * Check str for a suffix. + */ +static int endswith(const char *str, const char *suffix) +{ + size_t len_str, len_suffix; + assert(str != NULL && suffix != NULL); + len_str = strlen(str); + len_suffix = strlen(suffix); + return len_str >= len_suffix && !strcmp(str + len_str - len_suffix, suffix); +} + +/** + * Check str for a prefix. + */ +static int startswith(const char *str, const char *prefix) +{ + size_t len_str, len_prefix; + assert(str != NULL && prefix != NULL); + len_str = strlen(str); + len_prefix = strlen(prefix); + return len_str >= len_prefix && !strncmp(str, prefix, len_prefix); +} + +/** + * Convert a filename to a file:// URI, for logging. + */ +static void filename_to_uri(uri_t *uri, + const char *fn) +{ + assert(sizeof("file://") < sizeof(uri->s)); + strcpy(uri->s, "file://"); + if (*fn != '/') { + if (getcwd(uri->s + strlen(uri->s), sizeof(uri->s) - strlen(uri->s)) == NULL || + (!endswith(uri->s, "/") && strlen(uri->s) >= sizeof(uri->s) - 1)) + uri->s[0] = '\0'; + else + strcat(uri->s, "/"); + } + if (uri->s[0] != '\0' && strlen(uri->s) + strlen(fn) < sizeof(uri->s)) + strcat(uri->s, fn); + else + uri->s[0] = '\0'; +} + +/** + * Set a directory name, adding or stripping trailing slash as needed. + */ +static int set_directory(const rcynic_ctx_t *rc, path_t *out, const char *in, const int want_slash) +{ + int has_slash, need_slash; + size_t n; + + assert(rc && in && out); + + n = strlen(in); + + if (n == 0) { + logmsg(rc, log_usage_err, "Empty path"); + return 0; + } + + has_slash = in[n - 1] == '/'; + + need_slash = want_slash && !has_slash; + + if (n + need_slash + 1 > sizeof(out->s)) { + logmsg(rc, log_usage_err, "Path \"%s\" too long", in); + return 0; + } + + strcpy(out->s, in); + if (need_slash) + strcat(out->s, "/"); + else if (has_slash && !want_slash) + out->s[n - 1] = '\0'; + + return 1; +} + +/** + * Test whether a filesystem path points to a directory. + */ +static int is_directory(const path_t *name) +{ + struct stat st; + + assert(name); + return lstat(name->s, &st) == 0 && S_ISDIR(st.st_mode); +} + +/** + * Remove a directory tree, like rm -rf. + */ +static int rm_rf(const path_t *name) +{ + path_t path; + struct dirent *d; + DIR *dir; + int ret = 0; + + assert(name); + + if (!is_directory(name)) + return unlink(name->s) == 0; + + if ((dir = opendir(name->s)) == NULL) + return 0; + + while ((d = readdir(dir)) != NULL) { + if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, "..")) + continue; + if (snprintf(path.s, sizeof(path.s), "%s/%s", name->s, d->d_name) >= sizeof(path.s)) + goto done; + if (unlink(path.s) == 0) + continue; + else if (rm_rf(&path)) + continue; + else + goto done; + } + + ret = rmdir(name->s) == 0; + + done: + closedir(dir); + return ret; +} + +/** + * Construct names for the directories not directly settable by the + * user. + * + * This function also checks for an old-style rc->authenticated + * directory, to simplify upgrade from older versions of rcynic. + */ +static int construct_directory_names(rcynic_ctx_t *rc) +{ + struct stat st; + ssize_t n; + path_t p; + time_t t = time(0); + + p = rc->authenticated; + + n = strlen(p.s); + + if (n + sizeof(authenticated_symlink_suffix) >= sizeof(p.s)) { + logmsg(rc, log_usage_err, "Symlink name would be too long"); + return 0; + } + + if (strftime(p.s + n, sizeof(p.s) - n - 1, ".%Y-%m-%dT%H:%M:%SZ", gmtime(&t)) == 0) { + logmsg(rc, log_usage_err, "Generated path with timestamp would be too long"); + return 0; + } + + if (!set_directory(rc, &rc->new_authenticated, p.s, 1)) + return 0; + + if (!set_directory(rc, &rc->old_authenticated, rc->authenticated.s, 1)) + return 0; + + if (lstat(rc->authenticated.s, &st) == 0 && S_ISDIR((st.st_mode)) && + strlen(rc->authenticated.s) + sizeof(".old") < sizeof(p.s)) { + p = rc->authenticated; + strcat(p.s, ".old"); + rm_rf(&p); + (void) rename(rc->authenticated.s, p.s); + } + + if (lstat(rc->authenticated.s, &st) == 0 && S_ISDIR(st.st_mode)) { + logmsg(rc, log_usage_err, + "Existing %s directory is in the way, please remove it", + rc->authenticated.s); + return 0; + } + + return 1; +} + +/** + * Do final symlink shuffle and cleanup of output directories. + */ +static int finalize_directories(const rcynic_ctx_t *rc) +{ + path_t path, real_old, real_new; + const char *dir; + glob_t g; + int i; + + if (!realpath(rc->old_authenticated.s, real_old.s)) + real_old.s[0] = '\0'; + + if (!realpath(rc->new_authenticated.s, real_new.s)) + real_new.s[0] = '\0'; + + assert(real_new.s[0] && real_new.s[strlen(real_new.s) - 1] != '/'); + + if ((dir = strrchr(real_new.s, '/')) == NULL) + dir = real_new.s; + else + dir++; + + path = rc->authenticated; + + if (strlen(path.s) + sizeof(authenticated_symlink_suffix) >= sizeof(path.s)) + return 0; + strcat(path.s, authenticated_symlink_suffix); + + (void) unlink(path.s); + + if (symlink(dir, path.s) < 0) { + logmsg(rc, log_sys_err, "Couldn't link %s to %s: %s", + path.s, dir, strerror(errno)); + return 0; + } + + if (rename(path.s, rc->authenticated.s) < 0) { + logmsg(rc, log_sys_err, "Couldn't rename %s to %s: %s", + path.s, rc->authenticated.s, strerror(errno)); + return 0; + } + + if (real_old.s[0] && strlen(rc->authenticated.s) + sizeof(".old") < sizeof(path.s)) { + assert(real_old.s[strlen(real_old.s) - 1] != '/'); + + path = rc->authenticated; + strcat(path.s, ".old"); + + (void) unlink(path.s); + + if ((dir = strrchr(real_old.s, '/')) == NULL) + dir = real_old.s; + else + dir++; + + (void) symlink(dir, path.s); + } + + path = rc->authenticated; + assert(strlen(path.s) + sizeof(".*") < sizeof(path.s)); + strcat(path.s, ".*"); + + memset(&g, 0, sizeof(g)); + + if (real_new.s[0] && glob(path.s, 0, 0, &g) == 0) { + for (i = 0; i < g.gl_pathc; i++) + if (realpath(g.gl_pathv[i], path.s) && + strcmp(path.s, real_old.s) && + strcmp(path.s, real_new.s)) + rm_rf(&path); + globfree(&g); + } + + return 1; +} + + + +/** + * Test whether a pair of URIs "conflict", that is, whether attempting + * to rsync both of them at the same time in parallel might cause + * unpredictable behavior. Might need a better name for this test. + * + * Returns non-zero iff the two URIs "conflict". + */ +static int conflicting_uris(const uri_t *a, const uri_t *b) +{ + size_t len_a, len_b; + + assert(a && is_rsync(a->s) && b && is_rsync(b->s)); + + len_a = strlen(a->s); + len_b = strlen(b->s); + + assert(len_a < sizeof(a->s) && len_b < sizeof(b->s)); + + return !strncmp(a->s, b->s, len_a < len_b ? len_a : len_b); +} + + + +/** + * Read non-directory filenames from a directory, so we can check to + * see what's missing from a manifest. + */ +static STACK_OF(OPENSSL_STRING) *directory_filenames(const rcynic_ctx_t *rc, + const walk_state_t state, + const uri_t *uri) +{ + STACK_OF(OPENSSL_STRING) *result = NULL; + path_t dpath, fpath; + const path_t *prefix = NULL; + DIR *dir = NULL; + struct dirent *d; + int ok = 0; + + assert(rc && uri); + + switch (state) { + case walk_state_current: + prefix = &rc->unauthenticated; + break; + case walk_state_backup: + prefix = &rc->old_authenticated; + break; + default: + goto done; + } + + if (!uri_to_filename(rc, uri, &dpath, prefix) || + (dir = opendir(dpath.s)) == NULL || + (result = sk_OPENSSL_STRING_new(uri_cmp)) == NULL) + goto done; + + while ((d = readdir(dir)) != NULL) + if (snprintf(fpath.s, sizeof(fpath.s), "%s/%s", dpath.s, d->d_name) >= sizeof(fpath.s)) { + logmsg(rc, log_data_err, "Local path name %s/%s too long", dpath.s, d->d_name); + goto done; + } + else if (!is_directory(&fpath) && !sk_OPENSSL_STRING_push_strdup(result, d->d_name)) { + logmsg(rc, log_sys_err, "sk_OPENSSL_STRING_push_strdup() failed, probably memory exhaustion"); + goto done; + } + + ok = 1; + + done: + if (dir != NULL) + closedir(dir); + + if (ok) + return result; + + sk_OPENSSL_STRING_pop_free(result, OPENSSL_STRING_free); + return NULL; +} + + + +/** + * Increment walk context reference count. + */ +static void walk_ctx_attach(walk_ctx_t *w) +{ + if (w != NULL) { + w->refcount++; + assert(w->refcount != 0); + } +} + +/** + * Decrement walk context reference count; freeing the context if the + * reference count is now zero. + */ +static void walk_ctx_detach(walk_ctx_t *w) +{ + if (w != NULL && --(w->refcount) == 0) { + assert(w->refcount == 0); + X509_free(w->cert); + Manifest_free(w->manifest); + sk_X509_free(w->certs); + sk_X509_CRL_pop_free(w->crls, X509_CRL_free); + sk_OPENSSL_STRING_pop_free(w->filenames, OPENSSL_STRING_free); + free(w); + } +} + +/** + * Return top context of a walk context stack. + */ +static walk_ctx_t *walk_ctx_stack_head(STACK_OF(walk_ctx_t) *wsk) +{ + return sk_walk_ctx_t_value(wsk, sk_walk_ctx_t_num(wsk) - 1); +} + +/** + * Whether we're done iterating over a walk context. Think of this as + * the thing you call (negated) in the second clause of a conceptual + * "for" loop. + */ +static int walk_ctx_loop_done(STACK_OF(walk_ctx_t) *wsk) +{ + walk_ctx_t *w = walk_ctx_stack_head(wsk); + return wsk == NULL || w == NULL || w->state >= walk_state_done; +} + +/** + * Walk context iterator. Think of this as the thing you call in the + * third clause of a conceptual "for" loop: this reinitializes as + * necessary for the next pass through the loop. + * + * General idea here is that we have several state variables in a walk + * context which collectively define the current pass, product URI, + * etc, and we want to be able to iterate through this sequence via + * the event system. So this function steps to the next state. + * + * Conceptually, w->manifest->fileList and w->filenames form a single + * array with index w->manifest_iteration + w->filename_iteration. + * Beware of fencepost errors, I've gotten this wrong once already. + * Slightly odd coding here is to make it easier to check this. + */ +static void walk_ctx_loop_next(const rcynic_ctx_t *rc, STACK_OF(walk_ctx_t) *wsk) +{ + walk_ctx_t *w = walk_ctx_stack_head(wsk); + int n_manifest, n_filenames; + + assert(rc && wsk && w); + + assert(w->manifest_iteration >= 0 && w->filename_iteration >= 0); + + n_manifest = w->manifest ? sk_FileAndHash_num(w->manifest->fileList) : 0; + n_filenames = w->filenames ? sk_OPENSSL_STRING_num(w->filenames) : 0; + + if (w->manifest_iteration + w->filename_iteration < n_manifest + n_filenames) { + if (w->manifest_iteration < n_manifest) + w->manifest_iteration++; + else + w->filename_iteration++; + } + + assert(w->manifest_iteration <= n_manifest && w->filename_iteration <= n_filenames); + + if (w->manifest_iteration + w->filename_iteration < n_manifest + n_filenames) + return; + + while (!walk_ctx_loop_done(wsk)) { + w->state++; + w->manifest_iteration = 0; + w->filename_iteration = 0; + sk_OPENSSL_STRING_pop_free(w->filenames, OPENSSL_STRING_free); + w->filenames = directory_filenames(rc, w->state, &w->certinfo.sia); + if (w->manifest != NULL || w->filenames != NULL) + return; + } +} + +static int check_manifest(rcynic_ctx_t *rc, STACK_OF(walk_ctx_t) *wsk); + +/** + * Loop initializer for walk context. Think of this as the thing you + * call in the first clause of a conceptual "for" loop. + */ +static void walk_ctx_loop_init(rcynic_ctx_t *rc, STACK_OF(walk_ctx_t) *wsk) +{ + walk_ctx_t *w = walk_ctx_stack_head(wsk); + + assert(rc && wsk && w && w->state == walk_state_ready); + + if (!w->manifest && !check_manifest(rc, wsk)) { + /* + * Simple failure to find a manifest doesn't get here. This is + * for manifest failures that cause us to reject all of this + * certificate's products due to policy knob settings. + */ + w->state = walk_state_done; + return; + } + + if (!w->manifest) + logmsg(rc, log_telemetry, "Couldn't get manifest %s, blundering onward", w->certinfo.manifest.s); + + w->manifest_iteration = 0; + w->filename_iteration = 0; + w->state++; + assert(w->state == walk_state_current); + + assert(w->filenames == NULL); + w->filenames = directory_filenames(rc, w->state, &w->certinfo.sia); + + w->stale_manifest = w->manifest != NULL && X509_cmp_current_time(w->manifest->nextUpdate) < 0; + + while (!walk_ctx_loop_done(wsk) && + (w->manifest == NULL || w->manifest_iteration >= sk_FileAndHash_num(w->manifest->fileList)) && + (w->filenames == NULL || w->filename_iteration >= sk_OPENSSL_STRING_num(w->filenames))) + walk_ctx_loop_next(rc, wsk); +} + +/** + * Extract URI and hash values from walk context. + */ +static int walk_ctx_loop_this(const rcynic_ctx_t *rc, + STACK_OF(walk_ctx_t) *wsk, + uri_t *uri, + const unsigned char **hash, + size_t *hashlen) +{ + const walk_ctx_t *w = walk_ctx_stack_head(wsk); + const char *name = NULL; + FileAndHash *fah = NULL; + + assert(rc && wsk && w && uri && hash && hashlen); + + if (w->manifest != NULL && w->manifest_iteration < sk_FileAndHash_num(w->manifest->fileList)) { + fah = sk_FileAndHash_value(w->manifest->fileList, w->manifest_iteration); + name = (const char *) fah->file->data; + } else if (w->filenames != NULL && w->filename_iteration < sk_OPENSSL_STRING_num(w->filenames)) { + name = sk_OPENSSL_STRING_value(w->filenames, w->filename_iteration); + } + + if (name == NULL) { + logmsg(rc, log_sys_err, "Can't find a URI in walk context, this shouldn't happen: state %d, manifest_iteration %d, filename_iteration %d", + (int) w->state, w->manifest_iteration, w->filename_iteration); + return 0; + } + + if (strlen(w->certinfo.sia.s) + strlen(name) >= sizeof(uri->s)) { + logmsg(rc, log_data_err, "URI %s%s too long, skipping", w->certinfo.sia.s, uri->s); + return 0; + } + + strcpy(uri->s, w->certinfo.sia.s); + strcat(uri->s, name); + + if (fah != NULL) { + sk_OPENSSL_STRING_remove(w->filenames, name); + *hash = fah->hash->data; + *hashlen = fah->hash->length; + } else { + *hash = NULL; + *hashlen = 0; + } + + return 1; +} + +/** + * Create a new walk context stack. + */ +static STACK_OF(walk_ctx_t) *walk_ctx_stack_new(void) +{ + return sk_walk_ctx_t_new_null(); +} + +/** + * Push a walk context onto a walk context stack, return the new context. + */ +static walk_ctx_t *walk_ctx_stack_push(STACK_OF(walk_ctx_t) *wsk, + X509 *x, + const certinfo_t *certinfo) +{ + walk_ctx_t *w; + + if (x == NULL || + (certinfo == NULL) != (sk_walk_ctx_t_num(wsk) == 0) || + (w = malloc(sizeof(*w))) == NULL) + return NULL; + + memset(w, 0, sizeof(*w)); + w->cert = x; + if (certinfo != NULL) + w->certinfo = *certinfo; + else + memset(&w->certinfo, 0, sizeof(w->certinfo)); + + if (!sk_walk_ctx_t_push(wsk, w)) { + free(w); + return NULL; + } + + walk_ctx_attach(w); + return w; +} + +/** + * Pop and discard a walk context from a walk context stack. + */ +static void walk_ctx_stack_pop(STACK_OF(walk_ctx_t) *wsk) +{ + walk_ctx_detach(sk_walk_ctx_t_pop(wsk)); +} + +/** + * Clone a stack of walk contexts. + */ +static STACK_OF(walk_ctx_t) *walk_ctx_stack_clone(STACK_OF(walk_ctx_t) *old_wsk) +{ + STACK_OF(walk_ctx_t) *new_wsk; + int i; + if (old_wsk == NULL || (new_wsk = sk_walk_ctx_t_dup(old_wsk)) == NULL) + return NULL; + for (i = 0; i < sk_walk_ctx_t_num(new_wsk); i++) + walk_ctx_attach(sk_walk_ctx_t_value(new_wsk, i)); + return new_wsk; +} + +/** + * Extract certificate stack from walk context stack. Returns a newly + * created STACK_OF(X509) pointing to the existing cert objects. + * + * NB: This is a shallow copy, so use sk_X509_free() to free it, not + * sk_X509_pop_free(). + */ +static STACK_OF(X509) *walk_ctx_stack_certs(const rcynic_ctx_t *rc, + STACK_OF(walk_ctx_t) *wsk) +{ + STACK_OF(X509) *xsk = sk_X509_new_null(); + walk_ctx_t *w; + int i; + + assert(rc); + + for (i = 0; i < sk_walk_ctx_t_num(wsk); i++) + if ((w = sk_walk_ctx_t_value(wsk, i)) == NULL || + (w->cert != NULL && !sk_X509_push(xsk, w->cert))) + goto fail; + + return xsk; + + fail: + logmsg(rc, log_sys_err, "Couldn't clone walk_ctx_stack, memory exhausted?"); + sk_X509_free(xsk); + return NULL; +} + +/** + * Free a walk context stack, decrementing reference counts of each + * frame on it. + */ +static void walk_ctx_stack_free(STACK_OF(walk_ctx_t) *wsk) +{ + sk_walk_ctx_t_pop_free(wsk, walk_ctx_detach); +} + + + +static int rsync_count_running(const rcynic_ctx_t *); + +/** + * Add a task to the task queue. + */ +static int task_add(const rcynic_ctx_t *rc, + void (*handler)(rcynic_ctx_t *, void *), + void *cookie) +{ + task_t *t = malloc(sizeof(*t)); + + assert(rc && rc->task_queue && handler); + + assert(rsync_count_running(rc) <= rc->max_parallel_fetches); + + if (!t) + return 0; + + t->handler = handler; + t->cookie = cookie; + + if (sk_task_t_push(rc->task_queue, t)) + return 1; + + free(t); + return 0; +} + +/** + * Run tasks until queue is empty. + */ +static void task_run_q(rcynic_ctx_t *rc) +{ + task_t *t; + assert(rc && rc->task_queue); + while ((t = sk_task_t_shift(rc->task_queue)) != NULL) { + t->handler(rc, t->cookie); + free(t); + } +} + + + +/** + * Check cache of whether we've already fetched a particular URI. + */ +static rsync_history_t *rsync_history_uri(const rcynic_ctx_t *rc, + const uri_t *uri) +{ + rsync_history_t h; + char *s; + int i; + + assert(rc && uri && rc->rsync_history); + + if (!is_rsync(uri->s)) + return NULL; + + h.uri = *uri; + + while ((s = strrchr(h.uri.s, '/')) != NULL && s[1] == '\0') + *s = '\0'; + + while ((i = sk_rsync_history_t_find(rc->rsync_history, &h)) < 0) { + if ((s = strrchr(h.uri.s, '/')) == NULL || + (s - h.uri.s) < SIZEOF_RSYNC) + return NULL; + *s = '\0'; + } + + return sk_rsync_history_t_value(rc->rsync_history, i); +} + +/** + * Record that we've already attempted to synchronize a particular + * rsync URI. + */ +static void rsync_history_add(const rcynic_ctx_t *rc, + const rsync_ctx_t *ctx, + const rsync_status_t status) +{ + int final_slash = 0; + rsync_history_t *h; + uri_t uri; + size_t n; + char *s; + + assert(rc && ctx && rc->rsync_history && is_rsync(ctx->uri.s)); + + uri = ctx->uri; + + while ((s = strrchr(uri.s, '/')) != NULL && s[1] == '\0') { + final_slash = 1; + *s = '\0'; + } + + if (status != rsync_status_done) { + + n = SIZEOF_RSYNC + strcspn(uri.s + SIZEOF_RSYNC, "/"); + assert(n < sizeof(uri.s)); + uri.s[n] = '\0'; + final_slash = 1; + + if ((h = rsync_history_uri(rc, &uri)) != NULL) { + assert(h->status != rsync_status_done); + return; + } + } + + if ((h = rsync_history_t_new()) != NULL) { + h->uri = uri; + h->status = status; + h->started = ctx->started; + h->finished = time(0); + h->final_slash = final_slash; + } + + if (h == NULL || !sk_rsync_history_t_push(rc->rsync_history, h)) { + rsync_history_t_free(h); + logmsg(rc, log_sys_err, + "Couldn't add %s to rsync_history, blundering onwards", uri.s); + } +} + + + +/** + * Return count of how many rsync contexts are in running. + */ +static int rsync_count_running(const rcynic_ctx_t *rc) +{ + const rsync_ctx_t *ctx; + int i, n = 0; + + assert(rc && rc->rsync_queue); + + for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i) { + switch (ctx->state) { + case rsync_state_running: + case rsync_state_closed: + case rsync_state_terminating: + n++; + default: + continue; + } + } + + return n; +} + +/** + * Test whether an rsync context conflicts with anything that's + * currently runable. + */ +static int rsync_conflicts(const rcynic_ctx_t *rc, + const rsync_ctx_t *ctx) +{ + const rsync_ctx_t *c; + int i; + + assert(rc && ctx && rc->rsync_queue); + + for (i = 0; (c = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i) + if (c != ctx && + (c->state == rsync_state_initial || + c->state == rsync_state_running) && + conflicting_uris(&c->uri, &ctx->uri)) + return 1; + + return 0; +} + +/** + * Test whether a rsync context is runable at this time. + */ +static int rsync_runable(const rcynic_ctx_t *rc, + const rsync_ctx_t *ctx) +{ + assert(rc && ctx); + + switch (ctx->state) { + + case rsync_state_initial: + case rsync_state_running: + return 1; + + case rsync_state_retry_wait: + return ctx->deadline <= time(0); + + case rsync_state_closed: + case rsync_state_terminating: + return 0; + + case rsync_state_conflict_wait: + return !rsync_conflicts(rc, ctx); + + default: + break; + } + + return 0; +} + +/** + * Return count of runable rsync contexts. + */ +static int rsync_count_runable(const rcynic_ctx_t *rc) +{ + const rsync_ctx_t *ctx; + int i, n = 0; + + assert(rc && rc->rsync_queue); + + for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i) + if (rsync_runable(rc, ctx)) + n++; + + return n; +} + +/** + * Call rsync context handler, if one is set. + */ +static void rsync_call_handler(rcynic_ctx_t *rc, + rsync_ctx_t *ctx, + const rsync_status_t status) +{ + if (!ctx) + return; + + switch (status) { + + case rsync_status_pending: + case rsync_status_done: + break; + + case rsync_status_failed: + log_validation_status(rc, &ctx->uri, rsync_transfer_failed, object_generation_null); + break; + + case rsync_status_timed_out: + log_validation_status(rc, &ctx->uri, rsync_transfer_timed_out, object_generation_null); + break; + + case rsync_status_skipped: + log_validation_status(rc, &ctx->uri, rsync_transfer_skipped, object_generation_null); + break; + } + + if (ctx->handler) + ctx->handler(rc, ctx, status, &ctx->uri, ctx->cookie); +} + +/** + * Run an rsync process. + */ +static void rsync_run(rcynic_ctx_t *rc, + rsync_ctx_t *ctx) +{ + static const char * const rsync_cmd[] = { + "rsync", "--update", "--times", "--copy-links", "--itemize-changes" + }; + static const char * const rsync_tree_args[] = { + "--recursive", "--delete" + }; + + const char *argv[10]; + path_t path; + int i, argc = 0, flags, pipe_fds[2]; + + pipe_fds[0] = pipe_fds[1] = -1; + + assert(rc && ctx && ctx->pid == 0 && ctx->state != rsync_state_running && rsync_runable(rc, ctx)); + + if (rsync_history_uri(rc, &ctx->uri)) { + logmsg(rc, log_verbose, "Late rsync cache hit for %s", ctx->uri.s); + rsync_call_handler(rc, ctx, rsync_status_done); + (void) sk_rsync_ctx_t_delete_ptr(rc->rsync_queue, ctx); + free(ctx); + return; + } + + assert(rsync_count_running(rc) < rc->max_parallel_fetches); + + logmsg(rc, log_telemetry, "Fetching %s", ctx->uri.s); + + memset(argv, 0, sizeof(argv)); + + for (i = 0; i < sizeof(rsync_cmd)/sizeof(*rsync_cmd); i++) { + assert(argc < sizeof(argv)/sizeof(*argv)); + argv[argc++] = rsync_cmd[i]; + } + if (endswith(ctx->uri.s, "/")) { + for (i = 0; i < sizeof(rsync_tree_args)/sizeof(*rsync_tree_args); i++) { + assert(argc < sizeof(argv)/sizeof(*argv)); + argv[argc++] = rsync_tree_args[i]; + } + } + + if (rc->rsync_program) + argv[0] = rc->rsync_program; + + if (!uri_to_filename(rc, &ctx->uri, &path, &rc->unauthenticated)) { + logmsg(rc, log_data_err, "Couldn't extract filename from URI: %s", ctx->uri.s); + goto lose; + } + + assert(argc < sizeof(argv)/sizeof(*argv)); + argv[argc++] = ctx->uri.s; + + assert(argc < sizeof(argv)/sizeof(*argv)); + argv[argc++] = path.s; + + if (!mkdir_maybe(rc, &path)) { + logmsg(rc, log_sys_err, "Couldn't make target directory: %s", path.s); + goto lose; + } + + for (i = 0; i < argc; i++) + logmsg(rc, log_debug, "rsync argv[%d]: %s", i, argv[i]); + + if (pipe(pipe_fds) < 0) { + logmsg(rc, log_sys_err, "pipe() failed: %s", strerror(errno)); + goto lose; + } + + switch ((ctx->pid = vfork())) { + + case -1: + logmsg(rc, log_sys_err, "vfork() failed: %s", strerror(errno)); + goto lose; + + case 0: + /* + * Child + */ +#define whine(msg) ((void) write(2, msg, sizeof(msg) - 1)) + if (close(pipe_fds[0]) < 0) + whine("close(pipe_fds[0]) failed\n"); + else if (dup2(pipe_fds[1], 1) < 0) + whine("dup2(pipe_fds[1], 1) failed\n"); + else if (dup2(pipe_fds[1], 2) < 0) + whine("dup2(pipe_fds[1], 2) failed\n"); + else if (close(pipe_fds[1]) < 0) + whine("close(pipe_fds[1]) failed\n"); + else if (execvp(argv[0], (char * const *) argv) < 0) + whine("execvp(argv[0], (char * const *) argv) failed\n"); + whine("last system error: "); + write(2, strerror(errno), strlen(strerror(errno))); + whine("\n"); + _exit(1); +#undef whine + + default: + /* + * Parent + */ + ctx->fd = pipe_fds[0]; + if ((flags = fcntl(ctx->fd, F_GETFL, 0)) == -1 || + fcntl(ctx->fd, F_SETFL, flags | O_NONBLOCK) == -1) { + logmsg(rc, log_sys_err, "fcntl(ctx->fd, F_[GS]ETFL, O_NONBLOCK) failed: %s", + strerror(errno)); + goto lose; + } + (void) close(pipe_fds[1]); + ctx->state = rsync_state_running; + ctx->problem = rsync_problem_none; + if (!ctx->started) + ctx->started = time(0); + if (rc->rsync_timeout) + ctx->deadline = time(0) + rc->rsync_timeout; + logmsg(rc, log_verbose, "Subprocess %u started, queued %d, runable %d, running %d, max %d, URI %s", + (unsigned) ctx->pid, sk_rsync_ctx_t_num(rc->rsync_queue), rsync_count_runable(rc), rsync_count_running(rc), rc->max_parallel_fetches, ctx->uri.s); + rsync_call_handler(rc, ctx, rsync_status_pending); + return; + + } + + lose: + if (pipe_fds[0] != -1) + (void) close(pipe_fds[0]); + if (pipe_fds[1] != -1) + (void) close(pipe_fds[1]); + if (rc->rsync_queue && ctx) + (void) sk_rsync_ctx_t_delete_ptr(rc->rsync_queue, ctx); + rsync_call_handler(rc, ctx, rsync_status_failed); + if (ctx->pid > 0) { + (void) kill(ctx->pid, SIGKILL); + ctx->pid = 0; + } +} + +/** + * Process one line of rsync's output. This is a separate function + * primarily to centralize scraping for magic error strings. + */ +static void do_one_rsync_log_line(const rcynic_ctx_t *rc, + rsync_ctx_t *ctx) +{ + unsigned u; + char *s; + + /* + * Send line to our log unless it's empty. + */ + if (ctx->buffer[strspn(ctx->buffer, " \t\n\r")] != '\0') + logmsg(rc, log_telemetry, "rsync[%u]: %s", ctx->pid, ctx->buffer); + + /* + * Check for magic error strings + */ + if ((s = strstr(ctx->buffer, "@ERROR: max connections")) != NULL) { + ctx->problem = rsync_problem_refused; + if (sscanf(s, "@ERROR: max connections (%u) reached -- try again later", &u) == 1) + logmsg(rc, log_verbose, "Subprocess %u reported limit of %u for %s", ctx->pid, u, ctx->uri.s); + } +} + +/** + * Construct select() arguments. + */ +static int rsync_construct_select(const rcynic_ctx_t *rc, + const time_t now, + fd_set *rfds, + struct timeval *tv) +{ + rsync_ctx_t *ctx; + time_t when = 0; + int i, n = 0; + + assert(rc && rc->rsync_queue && rfds && tv && rc->max_select_time >= 0); + + FD_ZERO(rfds); + + for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i) { + +#if 0 + logmsg(rc, log_debug, "+++ ctx[%d] pid %d fd %d state %s started %lu deadline %lu", + i, ctx->pid, ctx->fd, rsync_state_label[ctx->state], + (unsigned long) ctx->started, (unsigned long) ctx->deadline); +#endif + + switch (ctx->state) { + + case rsync_state_running: + assert(ctx->fd >= 0); + FD_SET(ctx->fd, rfds); + if (ctx->fd > n) + n = ctx->fd; + if (!rc->rsync_timeout) + continue; + /* Fall through */ + + case rsync_state_retry_wait: + if (when == 0 || ctx->deadline < when) + when = ctx->deadline; + /* Fall through */ + + default: + continue; + } + } + + if (!when) + tv->tv_sec = rc->max_select_time; + else if (when < now) + tv->tv_sec = 0; + else if (when < now + rc->max_select_time) + tv->tv_sec = when - now; + else + tv->tv_sec = rc->max_select_time; + tv->tv_usec = 0; + return n; +} + +/** + * Convert rsync_status_t to mib_counter_t. + * + * Maybe some day this will go away and we won't be carrying + * essentially the same information in two different databases, but + * for now I'll settle for cleaning up the duplicate code logic. + */ +static mib_counter_t rsync_status_to_mib_counter(rsync_status_t status) +{ + switch (status) { + case rsync_status_done: return rsync_transfer_succeeded; + case rsync_status_timed_out: return rsync_transfer_timed_out; + case rsync_status_failed: return rsync_transfer_failed; + case rsync_status_skipped: return rsync_transfer_skipped; + default: + /* + * Keep GCC from whining about untested cases. + */ + assert(status == rsync_status_done || + status == rsync_status_timed_out || + status == rsync_status_failed || + status == rsync_status_skipped); + return rsync_transfer_failed; + } +} + +/** + * Manager for queue of rsync tasks in progress. + * + * General plan here is to process one completed child, or output + * accumulated from children, or block if there is absolutely nothing + * to do, on the theory that caller had nothing to do either or would + * not have called us. Once we've done something allegedly useful, we + * return, because this is not the event loop; if and when the event + * loop has nothing more important to do, we'll be called again. + * + * So this is the only place where the program blocks waiting for + * children, but we only do it when we know there's nothing else + * useful that we could be doing while we wait. + */ +static void rsync_mgr(rcynic_ctx_t *rc) +{ + rsync_status_t rsync_status; + int i, n, pid_status = -1; + rsync_ctx_t *ctx = NULL; + time_t now = time(0); + struct timeval tv; + fd_set rfds; + pid_t pid; + char *s; + + assert(rc && rc->rsync_queue); + + /* + * Check for exited subprocesses. + */ + + while ((pid = waitpid(-1, &pid_status, WNOHANG)) > 0) { + + /* + * Child exited, handle it. + */ + + logmsg(rc, log_verbose, "Subprocess %u exited with status %d", + (unsigned) pid, WEXITSTATUS(pid_status)); + + for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i) + if (ctx->pid == pid) + break; + if (ctx == NULL) { + assert(i == sk_rsync_ctx_t_num(rc->rsync_queue)); + logmsg(rc, log_sys_err, "Couldn't find rsync context for pid %d", pid); + continue; + } + + close(ctx->fd); + ctx->fd = -1; + + if (ctx->buflen > 0) { + assert(ctx->buflen < sizeof(ctx->buffer)); + ctx->buffer[ctx->buflen] = '\0'; + do_one_rsync_log_line(rc, ctx); + ctx->buflen = 0; + } + + switch (WEXITSTATUS(pid_status)) { + + case 0: + rsync_status = rsync_status_done; + break; + + case 5: /* "Error starting client-server protocol" */ + /* + * Handle remote rsyncd refusing to talk to us because we've + * exceeded its connection limit. Back off for a short + * interval, then retry. + */ + if (ctx->problem == rsync_problem_refused && ctx->tries < rc->max_retries) { + unsigned char r; + if (!RAND_bytes(&r, sizeof(r))) + r = 60; + ctx->deadline = time(0) + rc->retry_wait_min + r; + ctx->state = rsync_state_retry_wait; + ctx->problem = rsync_problem_none; + ctx->pid = 0; + ctx->tries++; + logmsg(rc, log_telemetry, "Scheduling retry for %s", ctx->uri.s); + continue; + } + goto failure; + + case 23: /* "Partial transfer due to error" */ + /* + * This appears to be a catch-all for "something bad happened + * trying to do what you asked me to do". In the cases I've + * seen to date, this is things like "the directory you + * requested isn't there" or "NFS exploded when I tried to touch + * the directory". These aren't network layer failures, so we + * (probably) shouldn't give up on the repository host. + */ + rsync_status = rsync_status_done; + log_validation_status(rc, &ctx->uri, rsync_partial_transfer, object_generation_null); + break; + + default: + failure: + rsync_status = rsync_status_failed; + logmsg(rc, log_data_err, "rsync %u exited with status %d fetching %s", + (unsigned) pid, WEXITSTATUS(pid_status), ctx->uri.s); + break; + } + + if (rc->rsync_timeout && now >= ctx->deadline) + rsync_status = rsync_status_timed_out; + log_validation_status(rc, &ctx->uri, + rsync_status_to_mib_counter(rsync_status), + object_generation_null); + rsync_history_add(rc, ctx, rsync_status); + rsync_call_handler(rc, ctx, rsync_status); + (void) sk_rsync_ctx_t_delete_ptr(rc->rsync_queue, ctx); + free(ctx); + ctx = NULL; + } + + if (pid == -1 && errno != EINTR && errno != ECHILD) + logmsg(rc, log_sys_err, "waitpid() returned error: %s", strerror(errno)); + + assert(rsync_count_running(rc) <= rc->max_parallel_fetches); + + /* + * Look for rsync contexts that have become runable. Odd loop + * structure is because rsync_run() might decide to remove the + * specified rsync task from the queue instead of running it. + */ + for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; i++) { + n = sk_rsync_ctx_t_num(rc->rsync_queue); + if (ctx->state != rsync_state_running && + rsync_runable(rc, ctx) && + rsync_count_running(rc) < rc->max_parallel_fetches) + rsync_run(rc, ctx); + if (n > sk_rsync_ctx_t_num(rc->rsync_queue)) + i--; + } + + assert(rsync_count_running(rc) <= rc->max_parallel_fetches); + + /* + * Check for log text from subprocesses. + */ + + n = rsync_construct_select(rc, now, &rfds, &tv); + + if (n > 0 && tv.tv_sec) + logmsg(rc, log_verbose, "Waiting up to %u seconds for rsync, queued %d, runable %d, running %d, max %d", + (unsigned) tv.tv_sec, sk_rsync_ctx_t_num(rc->rsync_queue), rsync_count_runable(rc), + rsync_count_running(rc), rc->max_parallel_fetches); + + if (n > 0) { +#if 0 + logmsg(rc, log_debug, "++ select(%d, %u)", n, tv.tv_sec); +#endif + n = select(n + 1, &rfds, NULL, NULL, &tv); + } + + if (n > 0) { + + for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i) { + if (ctx->fd <= 0 || !FD_ISSET(ctx->fd, &rfds)) + continue; + + assert(ctx->buflen < sizeof(ctx->buffer) - 1); + + while ((n = read(ctx->fd, ctx->buffer + ctx->buflen, sizeof(ctx->buffer) - 1 - ctx->buflen)) > 0) { + ctx->buflen += n; + assert(ctx->buflen < sizeof(ctx->buffer)); + ctx->buffer[ctx->buflen] = '\0'; + + while ((s = strchr(ctx->buffer, '\n')) != NULL) { + *s++ = '\0'; + do_one_rsync_log_line(rc, ctx); + assert(s > ctx->buffer && s < ctx->buffer + sizeof(ctx->buffer)); + ctx->buflen -= s - ctx->buffer; + assert(ctx->buflen < sizeof(ctx->buffer)); + if (ctx->buflen > 0) + memmove(ctx->buffer, s, ctx->buflen); + ctx->buffer[ctx->buflen] = '\0'; + } + + if (ctx->buflen == sizeof(ctx->buffer) - 1) { + ctx->buffer[sizeof(ctx->buffer) - 1] = '\0'; + do_one_rsync_log_line(rc, ctx); + ctx->buflen = 0; + } + } + + if (n == 0) { + (void) close(ctx->fd); + ctx->fd = -1; + ctx->state = rsync_state_closed; + } + } + } + + assert(rsync_count_running(rc) <= rc->max_parallel_fetches); + + /* + * Deal with children that have been running too long. + */ + if (rc->rsync_timeout) { + for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i) { + int sig; + if (ctx->pid <= 0 || now < ctx->deadline) + continue; + sig = ctx->tries++ < KILL_MAX ? SIGTERM : SIGKILL; + if (ctx->state != rsync_state_terminating) { + ctx->problem = rsync_problem_timed_out; + ctx->state = rsync_state_terminating; + ctx->tries = 0; + logmsg(rc, log_telemetry, "Subprocess %u is taking too long fetching %s, whacking it", (unsigned) ctx->pid, ctx->uri.s); + rsync_history_add(rc, ctx, rsync_status_timed_out); + } else if (sig == SIGTERM) { + logmsg(rc, log_verbose, "Whacking subprocess %u again", (unsigned) ctx->pid); + } else { + logmsg(rc, log_verbose, "Whacking subprocess %u with big hammer", (unsigned) ctx->pid); + } + (void) kill(ctx->pid, sig); + ctx->deadline = now + 1; + } + } +} + +/** + * Set up rsync context and attempt to start it. + */ +static void rsync_init(rcynic_ctx_t *rc, + const uri_t *uri, + void *cookie, + void (*handler)(rcynic_ctx_t *, const rsync_ctx_t *, const rsync_status_t, const uri_t *, void *)) +{ + rsync_ctx_t *ctx = NULL; + + assert(rc && uri && strlen(uri->s) > SIZEOF_RSYNC); + + if (!rc->run_rsync) { + logmsg(rc, log_verbose, "rsync disabled, skipping %s", uri->s); + if (handler) + handler(rc, NULL, rsync_status_skipped, uri, cookie); + return; + } + + if (rsync_history_uri(rc, uri)) { + logmsg(rc, log_verbose, "rsync cache hit for %s", uri->s); + if (handler) + handler(rc, NULL, rsync_status_done, uri, cookie); + return; + } + + if ((ctx = malloc(sizeof(*ctx))) == NULL) { + logmsg(rc, log_sys_err, "malloc(rsync_ctxt_t) failed"); + if (handler) + handler(rc, NULL, rsync_status_failed, uri, cookie); + return; + } + + memset(ctx, 0, sizeof(*ctx)); + ctx->uri = *uri; + ctx->handler = handler; + ctx->cookie = cookie; + ctx->fd = -1; + + if (!sk_rsync_ctx_t_push(rc->rsync_queue, ctx)) { + logmsg(rc, log_sys_err, "Couldn't push rsync state object onto queue, punting %s", ctx->uri.s); + rsync_call_handler(rc, ctx, rsync_status_failed); + free(ctx); + return; + } + + if (rsync_conflicts(rc, ctx)) { + logmsg(rc, log_debug, "New rsync context %s is feeling conflicted", ctx->uri.s); + ctx->state = rsync_state_conflict_wait; + } +} + +/** + * rsync a trust anchor. + */ +static void rsync_ta(rcynic_ctx_t *rc, + const uri_t *uri, + tal_ctx_t *tctx, + void (*handler)(rcynic_ctx_t *, const rsync_ctx_t *, + const rsync_status_t, const uri_t *, void *)) +{ + assert(endswith(uri->s, ".cer")); + rsync_init(rc, uri, tctx, handler); +} + +/** + * rsync an entire subtree, generally rooted at a SIA collection. + */ +static void rsync_tree(rcynic_ctx_t *rc, + const uri_t *uri, + STACK_OF(walk_ctx_t) *wsk, + void (*handler)(rcynic_ctx_t *, const rsync_ctx_t *, + const rsync_status_t, const uri_t *, void *)) +{ + assert(endswith(uri->s, "/")); + rsync_init(rc, uri, wsk, handler); +} + + + +/** + * Clean up old stuff from previous rsync runs. --delete doesn't help + * if the URI changes and we never visit the old URI again. + */ +static int prune_unauthenticated(const rcynic_ctx_t *rc, + const path_t *name, + const size_t baselen) +{ + path_t path; + struct dirent *d; + DIR *dir; + const char *slash; + + assert(rc && name && baselen > 0 && strlen(name->s) >= baselen); + + if (!is_directory(name)) { + logmsg(rc, log_usage_err, "prune: %s is not a directory", name->s); + return 0; + } + + if ((dir = opendir(name->s)) == NULL) { + logmsg(rc, log_sys_err, "prune: opendir() failed on %s: %s", name->s, strerror(errno)); + return 0; + } + + slash = endswith(name->s, "/") ? "" : "/"; + + while ((d = readdir(dir)) != NULL) { + if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, "..")) + continue; + + if (snprintf(path.s, sizeof(path.s), "%s%s%s", name->s, slash, d->d_name) >= sizeof(path.s)) { + logmsg(rc, log_debug, "prune: %s%s%s too long", name->s, slash, d->d_name); + goto done; + } + + if (validation_status_find_filename(rc, path.s + baselen)) { + logmsg(rc, log_debug, "prune: cache hit %s", path.s); + continue; + } + + if (unlink(path.s) == 0) { + logmsg(rc, log_debug, "prune: removed %s", path.s); + continue; + } + + if (prune_unauthenticated(rc, &path, baselen)) + continue; + + logmsg(rc, log_sys_err, "prune: removing %s failed: %s", path.s, strerror(errno)); + goto done; + } + + if (rmdir(name->s) == 0) + logmsg(rc, log_debug, "prune: removed %s", name->s); + else if (errno != ENOTEMPTY) + logmsg(rc, log_sys_err, "prune: couldn't remove %s: %s", name->s, strerror(errno)); + + done: + closedir(dir); + return !d; +} + + + +/** + * Read a DER object using a BIO pipeline that hashes the file content + * as we read it. Returns the internal form of the parsed DER object, + * sets the hash buffer (if specified) as a side effect. The default + * hash algorithm is SHA-256. + */ +static void *read_file_with_hash(const path_t *filename, + const ASN1_ITEM *it, + const EVP_MD *md, + hashbuf_t *hash) +{ + void *result = NULL; + BIO *b; + + if ((b = BIO_new_file(filename->s, "rb")) == NULL) + goto error; + + if (hash != NULL) { + BIO *b2 = BIO_new(BIO_f_md()); + if (b2 == NULL) + goto error; + if (md == NULL) + md = EVP_sha256(); + if (!BIO_set_md(b2, md)) { + BIO_free(b2); + goto error; + } + BIO_push(b2, b); + b = b2; + } + + if ((result = ASN1_item_d2i_bio(it, b, NULL)) == NULL) + goto error; + + if (hash != NULL) { + memset(hash, 0, sizeof(*hash)); + BIO_gets(b, (char *) hash, sizeof(hash->h)); + } + + error: + BIO_free_all(b); + return result; +} + +/** + * Read and hash a certificate. + */ +static X509 *read_cert(const path_t *filename, hashbuf_t *hash) +{ + return read_file_with_hash(filename, ASN1_ITEM_rptr(X509), NULL, hash); +} + +/** + * Read and hash a CRL. + */ +static X509_CRL *read_crl(const path_t *filename, hashbuf_t *hash) +{ + return read_file_with_hash(filename, ASN1_ITEM_rptr(X509_CRL), NULL, hash); +} + +/** + * Read and hash a CMS message. + */ +static CMS_ContentInfo *read_cms(const path_t *filename, hashbuf_t *hash) +{ + return read_file_with_hash(filename, ASN1_ITEM_rptr(CMS_ContentInfo), NULL, hash); +} + + + +/** + * Extract CRLDP data from a certificate. Stops looking after finding + * the first rsync URI. + */ +static int extract_crldp_uri(rcynic_ctx_t *rc, + const uri_t *uri, + const object_generation_t generation, + const STACK_OF(DIST_POINT) *crldp, + uri_t *result) +{ + DIST_POINT *d; + int i; + + assert(rc && uri && crldp && result); + + if (sk_DIST_POINT_num(crldp) != 1) + goto bad; + + d = sk_DIST_POINT_value(crldp, 0); + + if (d->reasons || d->CRLissuer || !d->distpoint || d->distpoint->type != 0) + goto bad; + + for (i = 0; i < sk_GENERAL_NAME_num(d->distpoint->name.fullname); i++) { + GENERAL_NAME *n = sk_GENERAL_NAME_value(d->distpoint->name.fullname, i); + if (n == NULL || n->type != GEN_URI) + goto bad; + if (!is_rsync((char *) n->d.uniformResourceIdentifier->data)) + log_validation_status(rc, uri, non_rsync_uri_in_extension, generation); + else if (sizeof(result->s) <= n->d.uniformResourceIdentifier->length) + log_validation_status(rc, uri, uri_too_long, generation); + else if (result->s[0]) + log_validation_status(rc, uri, multiple_rsync_uris_in_extension, generation); + else + strcpy(result->s, (char *) n->d.uniformResourceIdentifier->data); + } + + return result->s[0]; + + bad: + log_validation_status(rc, uri, malformed_crldp_extension, generation); + return 0; +} + +/** + * Extract SIA or AIA data from a certificate. + */ +static int extract_access_uri(rcynic_ctx_t *rc, + const uri_t *uri, + const object_generation_t generation, + const AUTHORITY_INFO_ACCESS *xia, + const int nid, + uri_t *result, + int *count) +{ + int i; + + assert(rc && uri && xia && result && count); + + for (i = 0; i < sk_ACCESS_DESCRIPTION_num(xia); i++) { + ACCESS_DESCRIPTION *a = sk_ACCESS_DESCRIPTION_value(xia, i); + if (a == NULL || a->location->type != GEN_URI) + return 0; + if (OBJ_obj2nid(a->method) != nid) + continue; + ++*count; + if (!is_rsync((char *) a->location->d.uniformResourceIdentifier->data)) + log_validation_status(rc, uri, non_rsync_uri_in_extension, generation); + else if (sizeof(result->s) <= a->location->d.uniformResourceIdentifier->length) + log_validation_status(rc, uri, uri_too_long, generation); + else if (result->s[0]) + log_validation_status(rc, uri, multiple_rsync_uris_in_extension, generation); + else + strcpy(result->s, (char *) a->location->d.uniformResourceIdentifier->data); + } + return 1; +} + + + +/** + * Check to see whether an AKI extension is present, is of the right + * form, and matches the issuer. + */ +static int check_aki(rcynic_ctx_t *rc, + const uri_t *uri, + const X509 *issuer, + const AUTHORITY_KEYID *aki, + const object_generation_t generation) +{ + assert(rc && uri && issuer && issuer->skid); + + if (aki == NULL) { + log_validation_status(rc, uri, aki_extension_missing, generation); + return 0; + } + + if (!aki->keyid || aki->serial || aki->issuer) { + log_validation_status(rc, uri, aki_extension_wrong_format, generation); + return 0; + } + + if (ASN1_OCTET_STRING_cmp(aki->keyid, issuer->skid)) { + log_validation_status(rc, uri, aki_extension_issuer_mismatch, generation); + return 0; + } + + return 1; +} + + + +/** + * Check whether a Distinguished Name conforms to the rescert profile. + * The profile is very restrictive: it only allows one mandatory + * CommonName field and one optional SerialNumber field, both of which + * must be of type PrintableString. + */ +static int check_allowed_dn(X509_NAME *dn) +{ + X509_NAME_ENTRY *ne; + ASN1_STRING *s; + int loc; + + if (dn == NULL) + return 0; + + switch (X509_NAME_entry_count(dn)) { + + case 2: + if ((loc = X509_NAME_get_index_by_NID(dn, NID_serialNumber, -1)) < 0 || + (ne = X509_NAME_get_entry(dn, loc)) == NULL || + (s = X509_NAME_ENTRY_get_data(ne)) == NULL || + ASN1_STRING_type(s) != V_ASN1_PRINTABLESTRING) + return 0; + + /* Fall through */ + + case 1: + if ((loc = X509_NAME_get_index_by_NID(dn, NID_commonName, -1)) < 0 || + (ne = X509_NAME_get_entry(dn, loc)) == NULL || + (s = X509_NAME_ENTRY_get_data(ne)) == NULL || + ASN1_STRING_type(s) != V_ASN1_PRINTABLESTRING) + return 0; + + return 1; + + default: + return 0; + } +} + +/** + * Check whether an ASN.1 TIME value conforms to RFC 5280 4.1.2.5. + */ +static int check_allowed_time_encoding(ASN1_TIME *t) +{ + switch (t->type) { + + case V_ASN1_UTCTIME: + return t->length == sizeof("yymmddHHMMSSZ") - 1; + + case V_ASN1_GENERALIZEDTIME: + return (t->length == sizeof("yyyymmddHHMMSSZ") - 1 && + strcmp("205", (char *) t->data) <= 0); + + } + return 0; +} + +/** + * Compare ASN1_TIME values. + */ +static int asn1_time_cmp(ASN1_TIME *t1, ASN1_TIME *t2) +{ + ASN1_GENERALIZEDTIME *g1 = ASN1_TIME_to_generalizedtime(t1, NULL); + ASN1_GENERALIZEDTIME *g2 = ASN1_TIME_to_generalizedtime(t2, NULL); + + int cmp = ASN1_STRING_cmp(g1, g2); + + ASN1_GENERALIZEDTIME_free(g1); + ASN1_GENERALIZEDTIME_free(g2); + + return cmp; +} + + + +/** + * Attempt to read and check one CRL from disk. + */ + +static X509_CRL *check_crl_1(rcynic_ctx_t *rc, + const uri_t *uri, + path_t *path, + const path_t *prefix, + X509 *issuer, + const object_generation_t generation) +{ + STACK_OF(X509_REVOKED) *revoked; + X509_CRL *crl = NULL; + EVP_PKEY *pkey; + int i, ret; + + assert(uri && path && issuer); + + if (!uri_to_filename(rc, uri, path, prefix) || + (crl = read_crl(path, NULL)) == NULL) + goto punt; + + if (X509_CRL_get_version(crl) != 1) { + log_validation_status(rc, uri, wrong_object_version, generation); + goto punt; + } + + if (!crl->crl || !crl->crl->sig_alg || !crl->crl->sig_alg->algorithm || + OBJ_obj2nid(crl->crl->sig_alg->algorithm) != NID_sha256WithRSAEncryption) { + log_validation_status(rc, uri, nonconformant_signature_algorithm, generation); + goto punt; + } + + if (!check_allowed_time_encoding(X509_CRL_get_lastUpdate(crl)) || + !check_allowed_time_encoding(X509_CRL_get_nextUpdate(crl))) { + log_validation_status(rc, uri, nonconformant_asn1_time_value, generation); + goto punt; + } + + if (X509_cmp_current_time(X509_CRL_get_lastUpdate(crl)) > 0) { + log_validation_status(rc, uri, crl_not_yet_valid, generation); + goto punt; + } + + if (X509_cmp_current_time(X509_CRL_get_nextUpdate(crl)) < 0) { + log_validation_status(rc, uri, stale_crl_or_manifest, generation); + if (!rc->allow_stale_crl) + goto punt; + } + + if (!check_aki(rc, uri, issuer, crl->akid, generation)) + goto punt; + + if (crl->crl_number == NULL) { + log_validation_status(rc, uri, crl_number_extension_missing, generation); + goto punt; + } + + if (ASN1_INTEGER_cmp(crl->crl_number, asn1_zero) < 0) { + log_validation_status(rc, uri, crl_number_is_negative, generation); + goto punt; + } + + if (ASN1_INTEGER_cmp(crl->crl_number, asn1_twenty_octets) > 0) { + log_validation_status(rc, uri, crl_number_out_of_range, generation); + goto punt; + } + + if (X509_CRL_get_ext_count(crl) != 2) { + log_validation_status(rc, uri, disallowed_x509v3_extension, generation); + goto punt; + } + + if (X509_NAME_cmp(X509_CRL_get_issuer(crl), X509_get_subject_name(issuer))) { + log_validation_status(rc, uri, crl_issuer_name_mismatch, generation); + goto punt; + } + + if (!check_allowed_dn(X509_CRL_get_issuer(crl))) { + log_validation_status(rc, uri, nonconformant_issuer_name, generation); + if (!rc->allow_nonconformant_name) + goto punt; + } + + if ((revoked = X509_CRL_get_REVOKED(crl)) != NULL) { + for (i = sk_X509_REVOKED_num(revoked) - 1; i >= 0; --i) { + if (X509_REVOKED_get_ext_count(sk_X509_REVOKED_value(revoked, i)) > 0) { + log_validation_status(rc, uri, disallowed_x509v3_extension, generation); + goto punt; + } + } + } + + if ((pkey = X509_get_pubkey(issuer)) == NULL) + goto punt; + ret = X509_CRL_verify(crl, pkey); + EVP_PKEY_free(pkey); + + if (ret > 0) + return crl; + + punt: + X509_CRL_free(crl); + return NULL; +} + +/** + * Check whether we already have a particular CRL, attempt to fetch it + * and check issuer's signature if we don't. + * + * General plan here is to do basic checks on both current and backup + * generation CRLs, then, if both generations pass all of our other + * tests, pick the generation with the highest CRL number, to protect + * against replay attacks. + */ +static X509_CRL *check_crl(rcynic_ctx_t *rc, + const uri_t *uri, + X509 *issuer) +{ + X509_CRL *old_crl, *new_crl, *result = NULL; + path_t old_path, new_path; + + if (uri_to_filename(rc, uri, &new_path, &rc->new_authenticated) && + (new_crl = read_crl(&new_path, NULL)) != NULL) + return new_crl; + + logmsg(rc, log_telemetry, "Checking CRL %s", uri->s); + + new_crl = check_crl_1(rc, uri, &new_path, &rc->unauthenticated, + issuer, object_generation_current); + + old_crl = check_crl_1(rc, uri, &old_path, &rc->old_authenticated, + issuer, object_generation_backup); + + if (!new_crl) + result = old_crl; + + else if (!old_crl) + result = new_crl; + + else { + ASN1_GENERALIZEDTIME *g_old = ASN1_TIME_to_generalizedtime(X509_CRL_get_lastUpdate(old_crl), NULL); + ASN1_GENERALIZEDTIME *g_new = ASN1_TIME_to_generalizedtime(X509_CRL_get_lastUpdate(new_crl), NULL); + int num_cmp = ASN1_INTEGER_cmp(old_crl->crl_number, new_crl->crl_number); + int date_cmp = (!g_old || !g_new) ? 0 : ASN1_STRING_cmp(g_old, g_new); + + if (!g_old) + log_validation_status(rc, uri, bad_thisupdate, object_generation_backup); + if (!g_new) + log_validation_status(rc, uri, bad_thisupdate, object_generation_current); + if (num_cmp > 0) + log_validation_status(rc, uri, backup_number_higher_than_current, object_generation_current); + if (g_old && g_new && date_cmp > 0) + log_validation_status(rc, uri, backup_thisupdate_newer_than_current, object_generation_current); + + if (num_cmp > 0 && (!g_old || !g_new || date_cmp > 0)) + result = old_crl; + else + result = new_crl; + + ASN1_GENERALIZEDTIME_free(g_old); + ASN1_GENERALIZEDTIME_free(g_new); + } + + if (result && result == new_crl) + install_object(rc, uri, &new_path, object_generation_current); + else if (!access(new_path.s, F_OK)) + log_validation_status(rc, uri, object_rejected, object_generation_current); + + if (result && result == old_crl) + install_object(rc, uri, &old_path, object_generation_backup); + else if (!result && !access(old_path.s, F_OK)) + log_validation_status(rc, uri, object_rejected, object_generation_backup); + + if (result != new_crl) + X509_CRL_free(new_crl); + + if (result != old_crl) + X509_CRL_free(old_crl); + + return result; +} + + +/** + * Check digest of a CRL we've already accepted. + */ +static int check_crl_digest(const rcynic_ctx_t *rc, + const uri_t *uri, + const unsigned char *hash, + const size_t hashlen) +{ + X509_CRL *crl = NULL; + hashbuf_t hashbuf; + path_t path; + int result; + + assert(rc && uri && hash); + + if (!uri_to_filename(rc, uri, &path, &rc->new_authenticated) || + (crl = read_crl(&path, &hashbuf)) == NULL) + return 0; + + result = hashlen <= sizeof(hashbuf.h) && !memcmp(hashbuf.h, hash, hashlen); + + X509_CRL_free(crl); + + return result; +} + + + +/** + * Validation callback function for use with x509_verify_cert(). + */ +static int check_x509_cb(int ok, X509_STORE_CTX *ctx) +{ + rcynic_x509_store_ctx_t *rctx = (rcynic_x509_store_ctx_t *) ctx; + mib_counter_t code; + + assert(rctx != NULL); + + switch (ctx->error) { + case X509_V_OK: + return ok; + + case X509_V_ERR_SUBJECT_ISSUER_MISMATCH: + /* + * Informational events, not really errors. ctx->check_issued() + * is called in many places where failure to find an issuer is not + * a failure for the calling function. Just leave these alone. + */ + return ok; + + case X509_V_ERR_CRL_HAS_EXPIRED: + /* + * This isn't really an error, exactly. CRLs don't really + * "expire". What OpenSSL really means by this error is just + * "it's now later than the issuer said it intended to publish a + * new CRL". Whether we treat this as an error or not is + * configurable, see the allow_stale_crl parameter. + * + * Deciding whether to allow stale CRLs is check_crl_1()'s job, + * not ours. By the time this callback occurs, we've already + * accepted the CRL; this callback is just notifying us that the + * object being checked is tainted by a stale CRL. So we mark the + * object as tainted and carry on. + */ + log_validation_status(rctx->rc, &rctx->subject->uri, tainted_by_stale_crl, rctx->subject->generation); + ok = 1; + return ok; + + case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT: + /* + * This is another error that's only an error in the strange world + * of OpenSSL, but a more serious one. By default, OpenSSL + * expects all trust anchors to be self-signed. This is not a + * PKIX requirement, it's just an OpenSSL thing, but one violates + * it at one's peril, because the only way to convince OpenSSL to + * allow a non-self-signed trust anchor is to intercept this + * "error" in the verify callback handler. + * + * So this program supports non-self-signed trust anchors, but be + * warned that enabling this feature may cause this program's + * output not to work with other OpenSSL-based applications. + */ + if (rctx->rc->allow_non_self_signed_trust_anchor) + ok = 1; + log_validation_status(rctx->rc, &rctx->subject->uri, trust_anchor_not_self_signed, rctx->subject->generation); + return ok; + + /* + * Select correct MIB counter for every known OpenSSL verify errors + * except the ones we handle explicitly above, then fall through to + * common handling for all of these. + */ +#define QV(x) \ + case x: \ + code = mib_openssl_##x; \ + break; + MIB_COUNTERS_FROM_OPENSSL; +#undef QV + + default: + code = unknown_openssl_verify_error; + break; + } + + log_validation_status(rctx->rc, &rctx->subject->uri, code, rctx->subject->generation); + return ok; +} + +/** + * Check crypto aspects of a certificate, policy OID, RFC 3779 path + * validation, and conformance to the RPKI certificate profile. + */ +static int check_x509(rcynic_ctx_t *rc, + STACK_OF(walk_ctx_t) *wsk, + const uri_t *uri, + X509 *x, + certinfo_t *certinfo, + const object_generation_t generation) +{ + walk_ctx_t *w = walk_ctx_stack_head(wsk); + rcynic_x509_store_ctx_t rctx; + EVP_PKEY *issuer_pkey = NULL, *subject_pkey = NULL; + unsigned long flags = (X509_V_FLAG_POLICY_CHECK | X509_V_FLAG_EXPLICIT_POLICY | X509_V_FLAG_X509_STRICT); + AUTHORITY_INFO_ACCESS *sia = NULL, *aia = NULL; + STACK_OF(POLICYINFO) *policies = NULL; + ASN1_BIT_STRING *ski_pubkey = NULL; + STACK_OF(DIST_POINT) *crldp = NULL; + EXTENDED_KEY_USAGE *eku = NULL; + BASIC_CONSTRAINTS *bc = NULL; + hashbuf_t ski_hashbuf; + unsigned ski_hashlen, afi; + int i, ok, crit, loc, ex_count, routercert = 0, ret = 0; + + assert(rc && wsk && w && uri && x && w->cert); + + /* + * Cleanup logic will explode if rctx.ctx hasn't been initialized, + * so we need to do this before running any test that can fail. + */ + if (!X509_STORE_CTX_init(&rctx.ctx, rc->x509_store, x, NULL)) + return 0; + + /* + * certinfo == NULL means x is a self-signed trust anchor. + */ + if (certinfo == NULL) + certinfo = &w->certinfo; + + memset(certinfo, 0, sizeof(*certinfo)); + + certinfo->uri = *uri; + certinfo->generation = generation; + + if (ASN1_INTEGER_cmp(X509_get_serialNumber(x), asn1_zero) <= 0 || + ASN1_INTEGER_cmp(X509_get_serialNumber(x), asn1_twenty_octets) > 0) { + log_validation_status(rc, uri, bad_certificate_serial_number, generation); + goto done; + } + + if (!check_allowed_time_encoding(X509_get_notBefore(x)) || + !check_allowed_time_encoding(X509_get_notAfter(x))) { + log_validation_status(rc, uri, nonconformant_asn1_time_value, generation); + goto done; + } + + /* + * Apparently nothing ever looks at these fields, so there are no + * API functions for them. We wouldn't bother either if they + * weren't forbidden by the RPKI certificate profile. + */ + if (!x->cert_info || x->cert_info->issuerUID || x->cert_info->subjectUID) { + log_validation_status(rc, uri, nonconformant_certificate_uid, generation); + goto done; + } + + /* + * Keep track of allowed extensions we've seen. Once we've + * processed all the ones we expect, anything left is an error. + */ + ex_count = X509_get_ext_count(x); + + /* + * We don't use X509_check_ca() to set certinfo->ca anymore, because + * it's not paranoid enough to enforce the RPKI certificate profile, + * but we still call it because we need it (or something) to invoke + * x509v3_cache_extensions() for us. + */ + (void) X509_check_ca(x); + + if ((bc = X509_get_ext_d2i(x, NID_basic_constraints, &crit, NULL)) != NULL) { + ex_count--; + if (!crit || bc->ca <= 0 || bc->pathlen != NULL) { + log_validation_status(rc, uri, malformed_basic_constraints, generation); + goto done; + } + } + + certinfo->ca = bc != NULL; + + if (certinfo == &w->certinfo) { + certinfo->ta = 1; + if (!certinfo->ca) { + log_validation_status(rc, uri, malformed_trust_anchor, generation); + goto done; + } + } + + if ((aia = X509_get_ext_d2i(x, NID_info_access, NULL, NULL)) != NULL) { + int n_caIssuers = 0; + ex_count--; + if (!extract_access_uri(rc, uri, generation, aia, NID_ad_ca_issuers, + &certinfo->aia, &n_caIssuers) || + !certinfo->aia.s[0] || + sk_ACCESS_DESCRIPTION_num(aia) != n_caIssuers) { + log_validation_status(rc, uri, malformed_aia_extension, generation); + goto done; + } + } + + if (certinfo->ta && aia) { + log_validation_status(rc, uri, aia_extension_forbidden, generation); + goto done; + } + + if (!certinfo->ta && !aia) { + log_validation_status(rc, uri, aia_extension_missing, generation); + goto done; + } + + if ((eku = X509_get_ext_d2i(x, NID_ext_key_usage, &crit, NULL)) != NULL) { + ex_count--; + if (crit || certinfo->ca || !endswith(uri->s, ".cer") || sk_ASN1_OBJECT_num(eku) == 0) { + log_validation_status(rc, uri, inappropriate_eku_extension, generation); + goto done; + } + for (i = 0; i < sk_ASN1_OBJECT_num(eku); i++) + routercert |= OBJ_obj2nid(sk_ASN1_OBJECT_value(eku, i)) == NID_id_kp_bgpsec_router; + } + + if ((sia = X509_get_ext_d2i(x, NID_sinfo_access, NULL, NULL)) != NULL) { + int got_caDirectory, got_rpkiManifest, got_signedObject; + int n_caDirectory = 0, n_rpkiManifest = 0, n_signedObject = 0; + ex_count--; + ok = (extract_access_uri(rc, uri, generation, sia, NID_caRepository, + &certinfo->sia, &n_caDirectory) && + extract_access_uri(rc, uri, generation, sia, NID_ad_rpkiManifest, + &certinfo->manifest, &n_rpkiManifest) && + extract_access_uri(rc, uri, generation, sia, NID_ad_signedObject, + &certinfo->signedobject, &n_signedObject)); + got_caDirectory = certinfo->sia.s[0] != '\0'; + got_rpkiManifest = certinfo->manifest.s[0] != '\0'; + got_signedObject = certinfo->signedobject.s[0] != '\0'; + ok &= sk_ACCESS_DESCRIPTION_num(sia) == n_caDirectory + n_rpkiManifest + n_signedObject; + if (certinfo->ca) + ok &= got_caDirectory && got_rpkiManifest && !got_signedObject; + else if (rc->allow_ee_without_signedObject) + ok &= !got_caDirectory && !got_rpkiManifest; + else + ok &= !got_caDirectory && !got_rpkiManifest && got_signedObject; + if (!ok) { + log_validation_status(rc, uri, malformed_sia_extension, generation); + goto done; + } + } else if (certinfo->ca || !rc->allow_ee_without_signedObject) { + log_validation_status(rc, uri, sia_extension_missing, generation); + goto done; + } else if (!routercert) { + log_validation_status(rc, uri, sia_extension_missing_from_ee, generation); + } + + if (certinfo->signedobject.s[0] && strcmp(uri->s, certinfo->signedobject.s)) + log_validation_status(rc, uri, bad_signed_object_uri, generation); + + if ((crldp = X509_get_ext_d2i(x, NID_crl_distribution_points, NULL, NULL)) != NULL) { + ex_count--; + if (!extract_crldp_uri(rc, uri, generation, crldp, &certinfo->crldp)) + goto done; + } + + rctx.rc = rc; + rctx.subject = certinfo; + + if (w->certs == NULL && (w->certs = walk_ctx_stack_certs(rc, wsk)) == NULL) + goto done; + + if (X509_get_version(x) != 2) { + log_validation_status(rc, uri, wrong_object_version, generation); + goto done; + } + + if (!x->cert_info || !x->cert_info->signature || !x->cert_info->signature->algorithm || + OBJ_obj2nid(x->cert_info->signature->algorithm) != NID_sha256WithRSAEncryption) { + log_validation_status(rc, uri, nonconformant_signature_algorithm, generation); + goto done; + } + + if (certinfo->sia.s[0] && certinfo->sia.s[strlen(certinfo->sia.s) - 1] != '/') { + log_validation_status(rc, uri, malformed_cadirectory_uri, generation); + goto done; + } + + if (!w->certinfo.ta && strcmp(w->certinfo.uri.s, certinfo->aia.s)) + log_validation_status(rc, uri, aia_doesnt_match_issuer, generation); + + if (certinfo->ca && !certinfo->sia.s[0]) { + log_validation_status(rc, uri, sia_cadirectory_uri_missing, generation); + goto done; + } + + if (certinfo->ca && !certinfo->manifest.s[0]) { + log_validation_status(rc, uri, sia_manifest_uri_missing, generation); + goto done; + } + + if (certinfo->ca && !startswith(certinfo->manifest.s, certinfo->sia.s)) { + log_validation_status(rc, uri, manifest_carepository_mismatch, generation); + goto done; + } + + if (x->skid) { + ex_count--; + } else { + log_validation_status(rc, uri, ski_extension_missing, generation); + goto done; + } + + if (!check_allowed_dn(X509_get_subject_name(x))) { + log_validation_status(rc, uri, nonconformant_subject_name, generation); + if (!rc->allow_nonconformant_name) + goto done; + } + + if (!check_allowed_dn(X509_get_issuer_name(x))) { + log_validation_status(rc, uri, nonconformant_issuer_name, generation); + if (!rc->allow_nonconformant_name) + goto done; + } + + if ((policies = X509_get_ext_d2i(x, NID_certificate_policies, &crit, NULL)) != NULL) { + POLICYQUALINFO *qualifier = NULL; + POLICYINFO *policy = NULL; + ex_count--; + if (!crit || sk_POLICYINFO_num(policies) != 1 || + (policy = sk_POLICYINFO_value(policies, 0)) == NULL || + OBJ_obj2nid(policy->policyid) != NID_cp_ipAddr_asNumber || + sk_POLICYQUALINFO_num(policy->qualifiers) > 1 || + (sk_POLICYQUALINFO_num(policy->qualifiers) == 1 && + ((qualifier = sk_POLICYQUALINFO_value(policy->qualifiers, 0)) == NULL || + OBJ_obj2nid(qualifier->pqualid) != NID_id_qt_cps))) { + log_validation_status(rc, uri, bad_certificate_policy, generation); + goto done; + } + if (qualifier) + log_validation_status(rc, uri, policy_qualifier_cps, generation); + } + + if (!X509_EXTENSION_get_critical(X509_get_ext(x, X509_get_ext_by_NID(x, NID_key_usage, -1))) || + (x->ex_flags & EXFLAG_KUSAGE) == 0 || + x->ex_kusage != (certinfo->ca ? KU_KEY_CERT_SIGN | KU_CRL_SIGN : KU_DIGITAL_SIGNATURE)) { + log_validation_status(rc, uri, bad_key_usage, generation); + goto done; + } + ex_count--; + + if (x->rfc3779_addr) { + ex_count--; + if (routercert || + (loc = X509_get_ext_by_NID(x, NID_sbgp_ipAddrBlock, -1)) < 0 || + !X509_EXTENSION_get_critical(X509_get_ext(x, loc)) || + !v3_addr_is_canonical(x->rfc3779_addr) || + sk_IPAddressFamily_num(x->rfc3779_addr) == 0) { + log_validation_status(rc, uri, bad_ipaddrblocks, generation); + goto done; + } + for (i = 0; i < sk_IPAddressFamily_num(x->rfc3779_addr); i++) { + IPAddressFamily *f = sk_IPAddressFamily_value(x->rfc3779_addr, i); + afi = v3_addr_get_afi(f); + if (afi != IANA_AFI_IPV4 && afi != IANA_AFI_IPV6) { + log_validation_status(rc, uri, unknown_afi, generation); + goto done; + } + if (f->addressFamily->length != 2) { + log_validation_status(rc, uri, safi_not_allowed, generation); + goto done; + } + } + } + + if (x->rfc3779_asid) { + ex_count--; + if ((loc = X509_get_ext_by_NID(x, NID_sbgp_autonomousSysNum, -1)) < 0 || + !X509_EXTENSION_get_critical(X509_get_ext(x, loc)) || + !v3_asid_is_canonical(x->rfc3779_asid) || + x->rfc3779_asid->asnum == NULL || + x->rfc3779_asid->rdi != NULL || + (routercert && x->rfc3779_asid->asnum->type == ASIdentifierChoice_inherit)) { + log_validation_status(rc, uri, bad_asidentifiers, generation); + goto done; + } + } + + if (!x->rfc3779_addr && !x->rfc3779_asid) { + log_validation_status(rc, uri, missing_resources, generation); + goto done; + } + + subject_pkey = X509_get_pubkey(x); + ok = subject_pkey != NULL; + if (ok) { + ASN1_OBJECT *algorithm; + + (void) X509_PUBKEY_get0_param(&algorithm, NULL, NULL, NULL, X509_get_X509_PUBKEY(x)); + + switch (OBJ_obj2nid(algorithm)) { + + case NID_rsaEncryption: + ok = (EVP_PKEY_type(subject_pkey->type) == EVP_PKEY_RSA && + BN_get_word(subject_pkey->pkey.rsa->e) == 65537); + if (!ok) + break; + if (!certinfo->ca && rc->allow_1024_bit_ee_key && + BN_num_bits(subject_pkey->pkey.rsa->n) == 1024) + log_validation_status(rc, uri, ee_certificate_with_1024_bit_key, generation); + else + ok = BN_num_bits(subject_pkey->pkey.rsa->n) == 2048; + break; + + case NID_X9_62_id_ecPublicKey: + ok = !certinfo->ca && routercert; + break; + + default: + ok = 0; + } + } + if (!ok) { + log_validation_status(rc, uri, bad_public_key, generation); + goto done; + } + + if (x->skid == NULL || + (ski_pubkey = X509_get0_pubkey_bitstr(x)) == NULL || + !EVP_Digest(ski_pubkey->data, ski_pubkey->length, + ski_hashbuf.h, &ski_hashlen, EVP_sha1(), NULL) || + ski_hashlen != 20 || + ski_hashlen != x->skid->length || + memcmp(ski_hashbuf.h, x->skid->data, ski_hashlen)) { + log_validation_status(rc, uri, ski_public_key_mismatch, generation); + goto done; + } + + if (x->akid) { + ex_count--; + if (!check_aki(rc, uri, w->cert, x->akid, generation)) + goto done; + } + + if (!x->akid && !certinfo->ta) { + log_validation_status(rc, uri, aki_extension_missing, generation); + goto done; + } + + if ((issuer_pkey = X509_get_pubkey(w->cert)) == NULL || X509_verify(x, issuer_pkey) <= 0) { + log_validation_status(rc, uri, certificate_bad_signature, generation); + goto done; + } + + if (certinfo->ta) { + + if (certinfo->crldp.s[0]) { + log_validation_status(rc, uri, trust_anchor_with_crldp, generation); + goto done; + } + + } else { + + if (!certinfo->crldp.s[0]) { + log_validation_status(rc, uri, crldp_uri_missing, generation); + goto done; + } + + if (!certinfo->ca && !startswith(certinfo->crldp.s, w->certinfo.sia.s)) { + log_validation_status(rc, uri, crldp_doesnt_match_issuer_sia, generation); + goto done; + } + + if (w->crls == NULL && ((w->crls = sk_X509_CRL_new_null()) == NULL || + !sk_X509_CRL_push(w->crls, NULL))) { + logmsg(rc, log_sys_err, "Internal allocation error setting up CRL for validation"); + goto done; + } + + assert(sk_X509_CRL_num(w->crls) == 1); + assert((w->crldp.s[0] == '\0') == (sk_X509_CRL_value(w->crls, 0) == NULL)); + + if (strcmp(w->crldp.s, certinfo->crldp.s)) { + X509_CRL *old_crl = sk_X509_CRL_value(w->crls, 0); + X509_CRL *new_crl = check_crl(rc, &certinfo->crldp, w->cert); + + if (w->crldp.s[0]) + log_validation_status(rc, uri, issuer_uses_multiple_crldp_values, generation); + + if (new_crl == NULL) { + log_validation_status(rc, uri, bad_crl, generation); + goto done; + } + + if (old_crl && new_crl && ASN1_INTEGER_cmp(old_crl->crl_number, new_crl->crl_number) < 0) { + log_validation_status(rc, uri, crldp_names_newer_crl, generation); + X509_CRL_free(old_crl); + old_crl = NULL; + } + + if (old_crl == NULL) { + sk_X509_CRL_set(w->crls, 0, new_crl); + w->crldp = certinfo->crldp; + } else { + X509_CRL_free(new_crl); + } + } + + assert(sk_X509_CRL_value(w->crls, 0)); + flags |= X509_V_FLAG_CRL_CHECK; + X509_STORE_CTX_set0_crls(&rctx.ctx, w->crls); + } + + if (ex_count > 0) { + log_validation_status(rc, uri, disallowed_x509v3_extension, generation); + goto done; + } + + assert(w->certs != NULL); + X509_STORE_CTX_trusted_stack(&rctx.ctx, w->certs); + X509_STORE_CTX_set_verify_cb(&rctx.ctx, check_x509_cb); + + X509_VERIFY_PARAM_set_flags(rctx.ctx.param, flags); + + X509_VERIFY_PARAM_add0_policy(rctx.ctx.param, OBJ_nid2obj(NID_cp_ipAddr_asNumber)); + + if (X509_verify_cert(&rctx.ctx) <= 0) { + log_validation_status(rc, uri, certificate_failed_validation, generation); + goto done; + } + + ret = 1; + + done: + X509_STORE_CTX_cleanup(&rctx.ctx); + EVP_PKEY_free(issuer_pkey); + EVP_PKEY_free(subject_pkey); + BASIC_CONSTRAINTS_free(bc); + sk_ACCESS_DESCRIPTION_pop_free(sia, ACCESS_DESCRIPTION_free); + sk_ACCESS_DESCRIPTION_pop_free(aia, ACCESS_DESCRIPTION_free); + sk_DIST_POINT_pop_free(crldp, DIST_POINT_free); + sk_POLICYINFO_pop_free(policies, POLICYINFO_free); + sk_ASN1_OBJECT_pop_free(eku, ASN1_OBJECT_free); + + return ret; +} + +/** + * Extract one datum from a CMS_SignerInfo. + */ +static void *extract_si_datum(CMS_SignerInfo *si, + int *n, + const int optional, + const int nid, + const int asn1_type) +{ + int i = CMS_signed_get_attr_by_NID(si, nid, -1); + void *result = NULL; + X509_ATTRIBUTE *a; + + assert(si && n); + + if (i < 0 && optional) + return NULL; + + if (i >= 0 && + CMS_signed_get_attr_by_NID(si, nid, i) < 0 && + (a = CMS_signed_get_attr(si, i)) != NULL && + X509_ATTRIBUTE_count(a) == 1 && + (result = X509_ATTRIBUTE_get0_data(a, 0, asn1_type, NULL)) != NULL) + --*n; + else + *n = -1; + + return result; +} + +/** + * Check a signed CMS object. + */ +static int check_cms(rcynic_ctx_t *rc, + STACK_OF(walk_ctx_t) *wsk, + const uri_t *uri, + path_t *path, + const path_t *prefix, + CMS_ContentInfo **pcms, + X509 **px, + certinfo_t *certinfo, + BIO *bio, + const unsigned char *hash, + const size_t hashlen, + const int expected_eContentType_nid, + const int require_inheritance, + const object_generation_t generation) +{ + STACK_OF(CMS_SignerInfo) *signer_infos = NULL; + CMS_ContentInfo *cms = NULL; + CMS_SignerInfo *si = NULL; + ASN1_OCTET_STRING *sid = NULL; + X509_NAME *si_issuer = NULL; + ASN1_INTEGER *si_serial = NULL; + STACK_OF(X509_CRL) *crls = NULL; + STACK_OF(X509) *certs = NULL; + X509_ALGOR *signature_alg = NULL, *digest_alg = NULL; + ASN1_OBJECT *oid = NULL; + hashbuf_t hashbuf; + X509 *x = NULL; + certinfo_t certinfo_; + int i, result = 0; + + assert(rc && wsk && uri && path && prefix); + + if (!certinfo) + certinfo = &certinfo_; + + if (!uri_to_filename(rc, uri, path, prefix)) + goto error; + + if (hash) + cms = read_cms(path, &hashbuf); + else + cms = read_cms(path, NULL); + + if (!cms) + goto error; + + if (hash && (hashlen > sizeof(hashbuf.h) || + memcmp(hashbuf.h, hash, hashlen))) { + log_validation_status(rc, uri, digest_mismatch, generation); + if (!rc->allow_digest_mismatch) + goto error; + } + + if (OBJ_obj2nid(CMS_get0_eContentType(cms)) != expected_eContentType_nid) { + log_validation_status(rc, uri, bad_cms_econtenttype, generation); + goto error; + } + + if (CMS_verify(cms, NULL, NULL, NULL, bio, CMS_NO_SIGNER_CERT_VERIFY) <= 0) { + log_validation_status(rc, uri, cms_validation_failure, generation); + goto error; + } + + if ((crls = CMS_get1_crls(cms)) != NULL) { + log_validation_status(rc, uri, cms_includes_crls, generation); + goto error; + } + + if ((signer_infos = CMS_get0_SignerInfos(cms)) == NULL || + sk_CMS_SignerInfo_num(signer_infos) != 1 || + (si = sk_CMS_SignerInfo_value(signer_infos, 0)) == NULL || + !CMS_SignerInfo_get0_signer_id(si, &sid, &si_issuer, &si_serial) || + sid == NULL || si_issuer != NULL || si_serial != NULL || + CMS_unsigned_get_attr_count(si) != -1) { + log_validation_status(rc, uri, bad_cms_signer_infos, generation); + goto error; + } + + CMS_SignerInfo_get0_algs(si, NULL, &x, &digest_alg, &signature_alg); + + if (x == NULL) { + log_validation_status(rc, uri, cms_signer_missing, generation); + goto error; + } + + if ((certs = CMS_get1_certs(cms)) == NULL || + sk_X509_num(certs) != 1 || + X509_cmp(x, sk_X509_value(certs, 0))) { + log_validation_status(rc, uri, bad_cms_signer, generation); + goto error; + } + + X509_ALGOR_get0(&oid, NULL, NULL, signature_alg); + i = OBJ_obj2nid(oid); + if (i != NID_sha256WithRSAEncryption && i != NID_rsaEncryption) { + log_validation_status(rc, uri, wrong_cms_si_signature_algorithm, generation); + goto error; + } + + X509_ALGOR_get0(&oid, NULL, NULL, digest_alg); + if (OBJ_obj2nid(oid) != NID_sha256) { + log_validation_status(rc, uri, wrong_cms_si_digest_algorithm, generation); + goto error; + } + + i = CMS_signed_get_attr_count(si); + + (void) extract_si_datum(si, &i, 1, NID_pkcs9_signingTime, V_ASN1_UTCTIME); + (void) extract_si_datum(si, &i, 1, NID_binary_signing_time, V_ASN1_INTEGER); + oid = extract_si_datum(si, &i, 0, NID_pkcs9_contentType, V_ASN1_OBJECT); + (void) extract_si_datum(si, &i, 0, NID_pkcs9_messageDigest, V_ASN1_OCTET_STRING); + + if (i != 0) { + log_validation_status(rc, uri, bad_cms_si_signed_attributes, generation); + if (!rc->allow_wrong_cms_si_attributes) + goto error; + } + + if (OBJ_obj2nid(oid) != expected_eContentType_nid) { + log_validation_status(rc, uri, bad_cms_si_contenttype, generation); + goto error; + } + + if (CMS_SignerInfo_cert_cmp(si, x)) { + log_validation_status(rc, uri, cms_ski_mismatch, generation); + goto error; + } + + if (!check_x509(rc, wsk, uri, x, certinfo, generation)) + goto error; + + if (require_inheritance && x->rfc3779_addr) { + for (i = 0; i < sk_IPAddressFamily_num(x->rfc3779_addr); i++) { + IPAddressFamily *f = sk_IPAddressFamily_value(x->rfc3779_addr, i); + if (f->ipAddressChoice->type != IPAddressChoice_inherit) { + log_validation_status(rc, uri, rfc3779_inheritance_required, generation); + goto error; + } + } + } + + if (require_inheritance && x->rfc3779_asid && x->rfc3779_asid->asnum && + x->rfc3779_asid->asnum->type != ASIdentifierChoice_inherit) { + log_validation_status(rc, uri, rfc3779_inheritance_required, generation); + goto error; + } + + if (pcms) { + *pcms = cms; + cms = NULL; + } + + if (px) + *px = x; + + result = 1; + + error: + CMS_ContentInfo_free(cms); + sk_X509_CRL_pop_free(crls, X509_CRL_free); + sk_X509_pop_free(certs, X509_free); + + return result; +} + + + +/** + * Load certificate, check against manifest, then run it through all + * the check_x509() tests. + */ +static X509 *check_cert_1(rcynic_ctx_t *rc, + STACK_OF(walk_ctx_t) *wsk, + const uri_t *uri, + path_t *path, + const path_t *prefix, + certinfo_t *certinfo, + const unsigned char *hash, + const size_t hashlen, + object_generation_t generation) +{ + hashbuf_t hashbuf; + X509 *x = NULL; + + assert(uri && path && wsk && certinfo); + + if (!uri_to_filename(rc, uri, path, prefix)) + return NULL; + + if (access(path->s, R_OK)) + return NULL; + + if (hash) + x = read_cert(path, &hashbuf); + else + x = read_cert(path, NULL); + + if (!x) { + logmsg(rc, log_sys_err, "Can't read certificate %s", path->s); + goto punt; + } + + if (hash && (hashlen > sizeof(hashbuf.h) || + memcmp(hashbuf.h, hash, hashlen))) { + log_validation_status(rc, uri, digest_mismatch, generation); + if (!rc->allow_digest_mismatch) + goto punt; + } + + if (check_x509(rc, wsk, uri, x, certinfo, generation)) + return x; + + punt: + X509_free(x); + return NULL; +} + +/** + * Try to find a good copy of a certificate either in fresh data or in + * backup data from a previous run of this program. + */ +static X509 *check_cert(rcynic_ctx_t *rc, + STACK_OF(walk_ctx_t) *wsk, + uri_t *uri, + certinfo_t *certinfo, + const unsigned char *hash, + const size_t hashlen) +{ + walk_ctx_t *w = walk_ctx_stack_head(wsk); + object_generation_t generation; + const path_t *prefix = NULL; + path_t path; + X509 *x; + + assert(rc && uri && wsk && w && certinfo); + + switch (w->state) { + case walk_state_current: + prefix = &rc->unauthenticated; + generation = object_generation_current; + break; + case walk_state_backup: + prefix = &rc->old_authenticated; + generation = object_generation_backup; + break; + default: + return NULL; + } + + if (skip_checking_this_object(rc, uri, generation)) + return NULL; + + if ((x = check_cert_1(rc, wsk, uri, &path, prefix, certinfo, + hash, hashlen, generation)) != NULL) + install_object(rc, uri, &path, generation); + else if (!access(path.s, F_OK)) + log_validation_status(rc, uri, object_rejected, generation); + else if (hash && generation == w->manifest_generation) + log_validation_status(rc, uri, manifest_lists_missing_object, generation); + + return x; +} + + + +/** + * Read and check one manifest from disk. + */ +static Manifest *check_manifest_1(rcynic_ctx_t *rc, + STACK_OF(walk_ctx_t) *wsk, + const uri_t *uri, + path_t *path, + const path_t *prefix, + certinfo_t *certinfo, + const object_generation_t generation) +{ + STACK_OF(FileAndHash) *sorted_fileList = NULL; + Manifest *manifest = NULL, *result = NULL; + CMS_ContentInfo *cms = NULL; + FileAndHash *fah = NULL, *fah2 = NULL; + BIO *bio = NULL; + X509 *x; + int i; + + assert(rc && wsk && uri && path && prefix); + + if ((bio = BIO_new(BIO_s_mem())) == NULL) { + logmsg(rc, log_sys_err, "Couldn't allocate BIO for manifest %s", uri->s); + goto done; + } + + if (!check_cms(rc, wsk, uri, path, prefix, &cms, &x, certinfo, bio, NULL, 0, + NID_ct_rpkiManifest, 1, generation)) + goto done; + + if ((manifest = ASN1_item_d2i_bio(ASN1_ITEM_rptr(Manifest), bio, NULL)) == NULL) { + log_validation_status(rc, uri, cms_econtent_decode_error, generation); + goto done; + } + + if (manifest->version) { + log_validation_status(rc, uri, wrong_object_version, generation); + goto done; + } + + if (X509_cmp_current_time(manifest->thisUpdate) > 0) { + log_validation_status(rc, uri, manifest_not_yet_valid, generation); + goto done; + } + + if (X509_cmp_current_time(manifest->nextUpdate) < 0) { + log_validation_status(rc, uri, stale_crl_or_manifest, generation); + if (!rc->allow_stale_manifest) + goto done; + } + + if (asn1_time_cmp(manifest->thisUpdate, X509_get_notBefore(x)) < 0 || + asn1_time_cmp(manifest->nextUpdate, X509_get_notAfter(x)) > 0) { + log_validation_status(rc, uri, manifest_interval_overruns_cert, generation); + goto done; + } + + if (ASN1_INTEGER_cmp(manifest->manifestNumber, asn1_zero) < 0 || + ASN1_INTEGER_cmp(manifest->manifestNumber, asn1_twenty_octets) > 0) { + log_validation_status(rc, uri, bad_manifest_number, generation); + goto done; + } + + if (OBJ_obj2nid(manifest->fileHashAlg) != NID_sha256) { + log_validation_status(rc, uri, nonconformant_digest_algorithm, generation); + goto done; + } + + if ((sorted_fileList = sk_FileAndHash_dup(manifest->fileList)) == NULL) { + logmsg(rc, log_sys_err, "Couldn't allocate shallow copy of fileList for manifest %s", uri->s); + goto done; + } + + (void) sk_FileAndHash_set_cmp_func(sorted_fileList, FileAndHash_name_cmp); + sk_FileAndHash_sort(sorted_fileList); + + for (i = 0; (fah = sk_FileAndHash_value(sorted_fileList, i)) != NULL && (fah2 = sk_FileAndHash_value(sorted_fileList, i + 1)) != NULL; i++) { + if (!strcmp((char *) fah->file->data, (char *) fah2->file->data)) { + log_validation_status(rc, uri, duplicate_name_in_manifest, generation); + goto done; + } + } + + for (i = 0; (fah = sk_FileAndHash_value(manifest->fileList, i)) != NULL; i++) { + if (fah->hash->length != HASH_SHA256_LEN || + (fah->hash->flags & (ASN1_STRING_FLAG_BITS_LEFT | 7)) > ASN1_STRING_FLAG_BITS_LEFT) { + log_validation_status(rc, uri, bad_manifest_digest_length, generation); + goto done; + } + } + + result = manifest; + manifest = NULL; + + done: + BIO_free(bio); + Manifest_free(manifest); + CMS_ContentInfo_free(cms); + sk_FileAndHash_free(sorted_fileList); + return result; +} + +/** + * Check whether we already have a particular manifest, attempt to fetch it + * and check issuer's signature if we don't. + * + * General plan here is to do basic checks on both current and backup + * generation manifests, then, if both generations pass all of our + * other tests, pick the generation with the highest manifest number, + * to protect against replay attacks. + * + * Once we've picked the manifest we're going to use, we need to check + * it against the CRL we've chosen. Not much we can do if they don't + * match besides whine about it, but we do need to whine in this case. + */ +static int check_manifest(rcynic_ctx_t *rc, + STACK_OF(walk_ctx_t) *wsk) +{ + walk_ctx_t *w = walk_ctx_stack_head(wsk); + Manifest *old_manifest, *new_manifest, *result = NULL; + certinfo_t old_certinfo, new_certinfo; + const uri_t *uri, *crldp = NULL; + object_generation_t generation = object_generation_null; + path_t old_path, new_path; + FileAndHash *fah = NULL; + const char *crl_tail; + int i, ok = 1; + + assert(rc && wsk && w && !w->manifest); + + uri = &w->certinfo.manifest; + + logmsg(rc, log_telemetry, "Checking manifest %s", uri->s); + + new_manifest = check_manifest_1(rc, wsk, uri, &new_path, + &rc->unauthenticated, &new_certinfo, + object_generation_current); + + old_manifest = check_manifest_1(rc, wsk, uri, &old_path, + &rc->old_authenticated, &old_certinfo, + object_generation_backup); + + if (!new_manifest) + result = old_manifest; + + else if (!old_manifest) + result = new_manifest; + + else { + int num_cmp = ASN1_INTEGER_cmp(old_manifest->manifestNumber, new_manifest->manifestNumber); + int date_cmp = ASN1_STRING_cmp(old_manifest->thisUpdate, new_manifest->thisUpdate); + + if (num_cmp > 0) + log_validation_status(rc, uri, backup_number_higher_than_current, object_generation_current); + if (date_cmp > 0) + log_validation_status(rc, uri, backup_thisupdate_newer_than_current, object_generation_current); + + if (num_cmp > 0 && date_cmp > 0) + result = old_manifest; + else + result = new_manifest; + } + + if (result && result == new_manifest) { + generation = object_generation_current; + install_object(rc, uri, &new_path, generation); + crldp = &new_certinfo.crldp; + } + + if (result && result == old_manifest) { + generation = object_generation_backup; + install_object(rc, uri, &old_path, generation); + crldp = &old_certinfo.crldp; + } + + if (result) { + crl_tail = strrchr(crldp->s, '/'); + assert(crl_tail != NULL); + crl_tail++; + + for (i = 0; (fah = sk_FileAndHash_value(result->fileList, i)) != NULL; i++) + if (!strcmp((char *) fah->file->data, crl_tail)) + break; + + if (!fah) { + log_validation_status(rc, uri, crl_not_in_manifest, generation); + if (rc->require_crl_in_manifest) + ok = 0; + } + + else if (!check_crl_digest(rc, crldp, fah->hash->data, fah->hash->length)) { + log_validation_status(rc, uri, digest_mismatch, generation); + if (!rc->allow_crl_digest_mismatch) + ok = 0; + } + } + + if ((!result || result != new_manifest) && !access(new_path.s, F_OK)) + log_validation_status(rc, uri, object_rejected, object_generation_current); + + if (!result && !access(old_path.s, F_OK)) + log_validation_status(rc, uri, object_rejected, object_generation_backup); + + if (result != new_manifest) + Manifest_free(new_manifest); + + if (result != old_manifest) + Manifest_free(old_manifest); + + w->manifest = result; + if (crldp) + w->crldp = *crldp; + w->manifest_generation = generation; + + return ok; +} + + + +/** + * Mark CRL or manifest that we're rechecking so XML report makes more sense. + */ +static void rsync_needed_mark_recheck(rcynic_ctx_t *rc, + const uri_t *uri) +{ + validation_status_t *v = NULL; + + if (uri->s[0] != '\0') + v = validation_status_find(rc->validation_status_root, + uri, object_generation_current); + + if (v) { + validation_status_set_code(v, stale_crl_or_manifest, 0); + log_validation_status(rc, uri, rechecking_object, + object_generation_current); + } +} + +/** + * Check whether we need to rsync a particular tree. This depends on + * the setting of rc->rsync_early, whether we have a valid manifest on + * file, and whether that manifest is stale yet. + */ +static int rsync_needed(rcynic_ctx_t *rc, + STACK_OF(walk_ctx_t) *wsk) +{ + walk_ctx_t *w = walk_ctx_stack_head(wsk); + int needed; + + assert(rc && wsk && w); + + needed = (rc->rsync_early || + !check_manifest(rc, wsk) || + w->manifest == NULL || + X509_cmp_current_time(w->manifest->nextUpdate) < 0); + + if (needed && w->manifest != NULL) { + rsync_needed_mark_recheck(rc, &w->certinfo.manifest); + rsync_needed_mark_recheck(rc, &w->certinfo.crldp); + Manifest_free(w->manifest); + w->manifest = NULL; + } + + return needed; +} + + + +/** + * Extract a ROA prefix from the ASN.1 bitstring encoding. + */ +static int extract_roa_prefix(const ROAIPAddress *ra, + const unsigned afi, + unsigned char *addr, + unsigned *prefixlen, + unsigned *max_prefixlen) +{ + unsigned length; + long maxlen; + + assert(ra && addr && prefixlen && max_prefixlen); + + maxlen = ASN1_INTEGER_get(ra->maxLength); + + switch (afi) { + case IANA_AFI_IPV4: length = 4; break; + case IANA_AFI_IPV6: length = 16; break; + default: return 0; + } + + if (ra->IPAddress->length < 0 || ra->IPAddress->length > length || + maxlen < 0 || maxlen > (long) length * 8) + return 0; + + if (ra->IPAddress->length > 0) { + memcpy(addr, ra->IPAddress->data, ra->IPAddress->length); + if ((ra->IPAddress->flags & 7) != 0) { + unsigned char mask = 0xFF >> (8 - (ra->IPAddress->flags & 7)); + addr[ra->IPAddress->length - 1] &= ~mask; + } + } + + memset(addr + ra->IPAddress->length, 0, length - ra->IPAddress->length); + *prefixlen = (ra->IPAddress->length * 8) - (ra->IPAddress->flags & 7); + *max_prefixlen = ra->maxLength ? (unsigned) maxlen : *prefixlen; + + return 1; +} + +/** + * Read and check one ROA from disk. + */ +static int check_roa_1(rcynic_ctx_t *rc, + STACK_OF(walk_ctx_t) *wsk, + const uri_t *uri, + path_t *path, + const path_t *prefix, + const unsigned char *hash, + const size_t hashlen, + const object_generation_t generation) +{ + STACK_OF(IPAddressFamily) *roa_resources = NULL, *ee_resources = NULL; + unsigned char addrbuf[ADDR_RAW_BUF_LEN]; + CMS_ContentInfo *cms = NULL; + BIO *bio = NULL; + ROA *roa = NULL; + X509 *x = NULL; + int i, j, result = 0; + unsigned afi, *safi = NULL, safi_, prefixlen, max_prefixlen; + ROAIPAddressFamily *rf; + ROAIPAddress *ra; + + assert(rc && wsk && uri && path && prefix); + + if ((bio = BIO_new(BIO_s_mem())) == NULL) { + logmsg(rc, log_sys_err, "Couldn't allocate BIO for ROA %s", uri->s); + goto error; + } + + if (!check_cms(rc, wsk, uri, path, prefix, &cms, &x, NULL, bio, NULL, 0, + NID_ct_ROA, 0, generation)) + goto error; + + if (!(roa = ASN1_item_d2i_bio(ASN1_ITEM_rptr(ROA), bio, NULL))) { + log_validation_status(rc, uri, cms_econtent_decode_error, generation); + goto error; + } + + if (roa->version) { + log_validation_status(rc, uri, wrong_object_version, generation); + goto error; + } + + if (ASN1_INTEGER_cmp(roa->asID, asn1_zero) < 0 || + ASN1_INTEGER_cmp(roa->asID, asn1_four_octets) > 0) { + log_validation_status(rc, uri, bad_roa_asID, generation); + goto error; + } + + ee_resources = X509_get_ext_d2i(x, NID_sbgp_ipAddrBlock, NULL, NULL); + + /* + * Extract prefixes from ROA and convert them into a resource set. + */ + + if (!(roa_resources = sk_IPAddressFamily_new_null())) + goto error; + + for (i = 0; i < sk_ROAIPAddressFamily_num(roa->ipAddrBlocks); i++) { + rf = sk_ROAIPAddressFamily_value(roa->ipAddrBlocks, i); + if (!rf || !rf->addressFamily || rf->addressFamily->length < 2 || rf->addressFamily->length > 3) { + log_validation_status(rc, uri, malformed_roa_addressfamily, generation); + goto error; + } + afi = (rf->addressFamily->data[0] << 8) | (rf->addressFamily->data[1]); + if (rf->addressFamily->length == 3) + *(safi = &safi_) = rf->addressFamily->data[2]; + for (j = 0; j < sk_ROAIPAddress_num(rf->addresses); j++) { + ra = sk_ROAIPAddress_value(rf->addresses, j); + if (!ra || + !extract_roa_prefix(ra, afi, addrbuf, &prefixlen, &max_prefixlen) || + !v3_addr_add_prefix(roa_resources, afi, safi, addrbuf, prefixlen)) { + log_validation_status(rc, uri, roa_resources_malformed, generation); + goto error; + } + if (max_prefixlen < prefixlen) { + log_validation_status(rc, uri, roa_max_prefixlen_too_short, generation); + goto error; + } + } + } + + /* + * ROAs can include nested prefixes, so direct translation to + * resource sets could include overlapping ranges, which is illegal. + * So we have to remove nested stuff before whacking into canonical + * form. Fortunately, this is relatively easy, since we know these + * are just prefixes, not ranges: in a list of prefixes sorted by + * the RFC 3779 rules, the first element of a set of nested prefixes + * will always be the least specific. + */ + + for (i = 0; i < sk_IPAddressFamily_num(roa_resources); i++) { + IPAddressFamily *f = sk_IPAddressFamily_value(roa_resources, i); + + if ((afi = v3_addr_get_afi(f)) == 0) { + log_validation_status(rc, uri, roa_contains_bad_afi_value, generation); + goto error; + } + + if (f->ipAddressChoice->type == IPAddressChoice_addressesOrRanges) { + IPAddressOrRanges *aors = f->ipAddressChoice->u.addressesOrRanges; + + sk_IPAddressOrRange_sort(aors); + + for (j = 0; j < sk_IPAddressOrRange_num(aors) - 1; j++) { + IPAddressOrRange *a = sk_IPAddressOrRange_value(aors, j); + IPAddressOrRange *b = sk_IPAddressOrRange_value(aors, j + 1); + unsigned char a_min[ADDR_RAW_BUF_LEN], a_max[ADDR_RAW_BUF_LEN]; + unsigned char b_min[ADDR_RAW_BUF_LEN], b_max[ADDR_RAW_BUF_LEN]; + int length; + + if ((length = v3_addr_get_range(a, afi, a_min, a_max, ADDR_RAW_BUF_LEN)) == 0 || + (length = v3_addr_get_range(b, afi, b_min, b_max, ADDR_RAW_BUF_LEN)) == 0) { + log_validation_status(rc, uri, roa_resources_malformed, generation); + goto error; + } + + if (memcmp(a_max, b_max, length) >= 0) { + (void) sk_IPAddressOrRange_delete(aors, j + 1); + IPAddressOrRange_free(b); + --j; + } + } + } + } + + if (!v3_addr_canonize(roa_resources)) { + log_validation_status(rc, uri, roa_resources_malformed, generation); + goto error; + } + + if (!v3_addr_subset(roa_resources, ee_resources)) { + log_validation_status(rc, uri, roa_resource_not_in_ee, generation); + goto error; + } + + result = 1; + + error: + BIO_free(bio); + ROA_free(roa); + CMS_ContentInfo_free(cms); + sk_IPAddressFamily_pop_free(roa_resources, IPAddressFamily_free); + sk_IPAddressFamily_pop_free(ee_resources, IPAddressFamily_free); + + return result; +} + +/** + * Check whether we already have a particular ROA, attempt to fetch it + * and check issuer's signature if we don't. + */ +static void check_roa(rcynic_ctx_t *rc, + STACK_OF(walk_ctx_t) *wsk, + const uri_t *uri, + const unsigned char *hash, + const size_t hashlen) +{ + walk_ctx_t *w = walk_ctx_stack_head(wsk); + path_t path; + + assert(rc && wsk && w && uri); + + if (uri_to_filename(rc, uri, &path, &rc->new_authenticated) && + !access(path.s, F_OK)) + return; + + logmsg(rc, log_telemetry, "Checking ROA %s", uri->s); + + if (check_roa_1(rc, wsk, uri, &path, &rc->unauthenticated, + hash, hashlen, object_generation_current)) { + install_object(rc, uri, &path, object_generation_current); + return; + } + + if (!access(path.s, F_OK)) + log_validation_status(rc, uri, object_rejected, object_generation_current); + else if (hash) + log_validation_status(rc, uri, manifest_lists_missing_object, object_generation_current); + + if (check_roa_1(rc, wsk, uri, &path, &rc->old_authenticated, + hash, hashlen, object_generation_backup)) { + install_object(rc, uri, &path, object_generation_backup); + return; + } + + if (!access(path.s, F_OK)) + log_validation_status(rc, uri, object_rejected, object_generation_backup); + else if (hash && w->manifest_generation == object_generation_backup) + log_validation_status(rc, uri, manifest_lists_missing_object, object_generation_backup); +} + + + +/** + * Read and check one Ghostbuster record from disk. + */ +static int check_ghostbuster_1(rcynic_ctx_t *rc, + STACK_OF(walk_ctx_t) *wsk, + const uri_t *uri, + path_t *path, + const path_t *prefix, + const unsigned char *hash, + const size_t hashlen, + const object_generation_t generation) +{ + CMS_ContentInfo *cms = NULL; + BIO *bio = NULL; + X509 *x; + int result = 0; + + assert(rc && wsk && uri && path && prefix); + +#if 0 + /* + * May want this later if we're going to inspect the VCard. For now, + * just leave this NULL and the right thing should happen. + */ + if ((bio = BIO_new(BIO_s_mem())) == NULL) { + logmsg(rc, log_sys_err, "Couldn't allocate BIO for Ghostbuster record %s", uri->s); + goto error; + } +#endif + + if (!check_cms(rc, wsk, uri, path, prefix, &cms, &x, NULL, bio, NULL, 0, + NID_ct_rpkiGhostbusters, 1, generation)) + goto error; + +#if 0 + /* + * Here is where we would read the VCard from the bio returned by + * CMS_verify() so that we could check the VCard. + */ +#endif + + result = 1; + + error: + BIO_free(bio); + CMS_ContentInfo_free(cms); + + return result; +} + +/** + * Check whether we already have a particular Ghostbuster record, + * attempt to fetch it and check issuer's signature if we don't. + */ +static void check_ghostbuster(rcynic_ctx_t *rc, + STACK_OF(walk_ctx_t) *wsk, + const uri_t *uri, + const unsigned char *hash, + const size_t hashlen) +{ + walk_ctx_t *w = walk_ctx_stack_head(wsk); + path_t path; + + assert(rc && wsk && w && uri); + + if (uri_to_filename(rc, uri, &path, &rc->new_authenticated) && + !access(path.s, F_OK)) + return; + + logmsg(rc, log_telemetry, "Checking Ghostbuster record %s", uri->s); + + if (check_ghostbuster_1(rc, wsk, uri, &path, &rc->unauthenticated, + hash, hashlen, object_generation_current)) { + install_object(rc, uri, &path, object_generation_current); + return; + } + + if (!access(path.s, F_OK)) + log_validation_status(rc, uri, object_rejected, object_generation_current); + else if (hash) + log_validation_status(rc, uri, manifest_lists_missing_object, object_generation_current); + + if (check_ghostbuster_1(rc, wsk, uri, &path, &rc->old_authenticated, + hash, hashlen, object_generation_backup)) { + install_object(rc, uri, &path, object_generation_backup); + return; + } + + if (!access(path.s, F_OK)) + log_validation_status(rc, uri, object_rejected, object_generation_backup); + else if (hash && w->manifest_generation == object_generation_backup) + log_validation_status(rc, uri, manifest_lists_missing_object, object_generation_backup); +} + + + +static void walk_cert(rcynic_ctx_t *, void *); + +/** + * rsync callback for fetching SIA tree. + */ +static void rsync_sia_callback(rcynic_ctx_t *rc, + const rsync_ctx_t *ctx, + const rsync_status_t status, + const uri_t *uri, + void *cookie) +{ + STACK_OF(walk_ctx_t) *wsk = cookie; + walk_ctx_t *w = walk_ctx_stack_head(wsk); + + assert(rc && wsk); + + if (status != rsync_status_pending) { + w->state++; + task_add(rc, walk_cert, wsk); + return; + } + + if (rsync_count_runable(rc) >= rc->max_parallel_fetches) + return; + + if ((wsk = walk_ctx_stack_clone(wsk)) == NULL) { + logmsg(rc, log_sys_err, + "walk_ctx_stack_clone() failed, probably memory exhaustion, blundering onwards without forking stack"); + return; + } + + walk_ctx_stack_pop(wsk); + task_add(rc, walk_cert, wsk); +} + +/** + * Recursive walk of certificate hierarchy (core of the program). + * + * Walk all products of the current certificate, starting with the + * ones named in the manifest and continuing with any that we find in + * the publication directory but which are not named in the manifest. + * + * Dispatch to correct checking code for the object named by URI, + * based on the filename extension in the uri. CRLs are a special + * case because we've already checked them by the time we get here, so + * we just ignore them. Other objects are either certificates or + * CMS-signed objects of one kind or another. + */ +static void walk_cert(rcynic_ctx_t *rc, void *cookie) +{ + STACK_OF(walk_ctx_t) *wsk = cookie; + const unsigned char *hash = NULL; + object_generation_t generation; + size_t hashlen; + walk_ctx_t *w; + uri_t uri; + + assert(rc && wsk); + + while ((w = walk_ctx_stack_head(wsk)) != NULL) { + + switch (w->state) { + case walk_state_current: + generation = object_generation_current; + break; + case walk_state_backup: + generation = object_generation_backup; + break; + default: + generation = object_generation_null; + break; + } + + switch (w->state) { + + case walk_state_initial: + + if (!w->certinfo.sia.s[0] || !w->certinfo.ca) { + w->state = walk_state_done; + continue; + } + + if (!w->certinfo.manifest.s[0]) { + log_validation_status(rc, &w->certinfo.uri, sia_manifest_uri_missing, w->certinfo.generation); + w->state = walk_state_done; + continue; + } + + w->state++; + continue; + + case walk_state_rsync: + + if (rsync_needed(rc, wsk)) { + rsync_tree(rc, &w->certinfo.sia, wsk, rsync_sia_callback); + return; + } + log_validation_status(rc, &w->certinfo.sia, rsync_transfer_skipped, object_generation_null); + w->state++; + continue; + + case walk_state_ready: + + walk_ctx_loop_init(rc, wsk); /* sets w->state */ + continue; + + case walk_state_current: + case walk_state_backup: + + if (!walk_ctx_loop_this(rc, wsk, &uri, &hash, &hashlen)) { + walk_ctx_loop_next(rc, wsk); + continue; + } + + if (endswith(uri.s, ".crl") || endswith(uri.s, ".mft") || endswith(uri.s, ".mnf")) { + walk_ctx_loop_next(rc, wsk); + continue; /* CRLs and manifests checked elsewhere */ + } + + if (hash == NULL && !rc->allow_object_not_in_manifest) { + log_validation_status(rc, &uri, skipped_because_not_in_manifest, generation); + walk_ctx_loop_next(rc, wsk); + continue; + } + + if (hash == NULL) + log_validation_status(rc, &uri, tainted_by_not_being_in_manifest, generation); + else if (w->stale_manifest) + log_validation_status(rc, &uri, tainted_by_stale_manifest, generation); + + if (endswith(uri.s, ".roa")) { + check_roa(rc, wsk, &uri, hash, hashlen); + walk_ctx_loop_next(rc, wsk); + continue; + } + + if (endswith(uri.s, ".gbr")) { + check_ghostbuster(rc, wsk, &uri, hash, hashlen); + walk_ctx_loop_next(rc, wsk); + continue; + } + + if (endswith(uri.s, ".cer")) { + certinfo_t certinfo; + X509 *x = check_cert(rc, wsk, &uri, &certinfo, hash, hashlen); + if (!walk_ctx_stack_push(wsk, x, &certinfo)) + walk_ctx_loop_next(rc, wsk); + continue; + } + + log_validation_status(rc, &uri, unknown_object_type_skipped, object_generation_null); + walk_ctx_loop_next(rc, wsk); + continue; + + case walk_state_done: + + walk_ctx_stack_pop(wsk); /* Resume our issuer's state */ + continue; + + } + } + + assert(walk_ctx_stack_head(wsk) == NULL); + walk_ctx_stack_free(wsk); +} + +/** + * Check a trust anchor. Yes, we trust it, by definition, but it + * still needs to conform to the certificate profile, the + * self-signature must be correct, etcetera. + * + * Ownership of the TA certificate object passes to this function when + * called (ie, freeing "x" is our responsibility). + */ +static int check_ta(rcynic_ctx_t *rc, X509 *x, const uri_t *uri, + const path_t *path1, const path_t *path2, + const object_generation_t generation) +{ + STACK_OF(walk_ctx_t) *wsk = NULL; + walk_ctx_t *w = NULL; + + assert(rc && x && uri && path1 && path2); + + if (x == NULL) + return 1; + + if ((wsk = walk_ctx_stack_new()) == NULL) { + logmsg(rc, log_sys_err, "Couldn't allocate walk context stack"); + X509_free(x); + return 0; + } + + if ((w = walk_ctx_stack_push(wsk, x, NULL)) == NULL) { + logmsg(rc, log_sys_err, "Couldn't push walk context stack"); + walk_ctx_stack_free(wsk); + X509_free(x); + return 0; + } + + if (!check_x509(rc, wsk, uri, x, NULL, generation)) { + log_validation_status(rc, uri, object_rejected, generation); + walk_ctx_stack_free(wsk); + return 1; + } + + logmsg(rc, log_telemetry, "Copying trust anchor %s to %s", path1->s, path2->s); + + if (!mkdir_maybe(rc, path2) || !cp_ln(rc, path1, path2)) { + walk_ctx_stack_free(wsk); + return 0; + } + + log_validation_status(rc, uri, object_accepted, generation); + task_add(rc, walk_cert, wsk); + return 1; +} + + + +/** + * Check a trust anchor read from a local file. + */ +static int check_ta_cer(rcynic_ctx_t *rc, + const char *fn) + +{ + path_t path1, path2; + unsigned long hash; + X509 *x = NULL; + uri_t uri; + int i; + + assert(rc && fn); + + logmsg(rc, log_telemetry, "Processing trust anchor from file %s", fn); + + if (strlen(fn) >= sizeof(path1.s)) { + logmsg(rc, log_usage_err, "Trust anchor path name too long %s", fn); + return 0; + } + strcpy(path1.s, fn); + filename_to_uri(&uri, path1.s); + + if ((x = read_cert(&path1, NULL)) == NULL) { + logmsg(rc, log_usage_err, "Couldn't read trust anchor from file %s", fn); + log_validation_status(rc, &uri, unreadable_trust_anchor, object_generation_null); + goto lose; + } + + hash = X509_subject_name_hash(x); + + for (i = 0; i < INT_MAX; i++) { + if (snprintf(path2.s, sizeof(path2.s), "%s%lx.%d.cer", + rc->new_authenticated.s, hash, i) >= sizeof(path2.s)) { + logmsg(rc, log_sys_err, + "Couldn't construct path name for trust anchor %s", path1.s); + goto lose; + } + if (access(path2.s, F_OK)) + break; + } + if (i == INT_MAX) { + logmsg(rc, log_sys_err, "Couldn't find a free name for trust anchor %s", path1.s); + goto lose; + } + + return check_ta(rc, x, &uri, &path1, &path2, object_generation_null); + + lose: + log_validation_status(rc, &uri, trust_anchor_skipped, object_generation_null); + X509_free(x); + return 0; +} + + + +/** + * Allocate a new tal_ctx_t. + */ +static tal_ctx_t *tal_ctx_t_new(void) +{ + tal_ctx_t *tctx = malloc(sizeof(*tctx)); + if (tctx) + memset(tctx, 0, sizeof(*tctx)); + return tctx; +} + +/** + * Free a tal_ctx_t. + */ +static void tal_ctx_t_free(tal_ctx_t *tctx) +{ + if (tctx) { + EVP_PKEY_free(tctx->pkey); + free(tctx); + } +} + +/** + * Read a trust anchor from disk and compare with known public key. + * + * NB: EVP_PKEY_cmp() returns 1 for match, not 0 like every other + * xyz_cmp() function in the entire OpenSSL library. Go figure. + */ +static int check_ta_tal_callback_1(rcynic_ctx_t *rc, + const tal_ctx_t *tctx, + object_generation_t generation) + +{ + const path_t *prefix = NULL; + EVP_PKEY *pkey = NULL; + X509 *x = NULL; + path_t path; + int ret = 0; + + switch (generation) { + case object_generation_current: + prefix = &rc->unauthenticated; + break; + case object_generation_backup: + prefix = &rc->old_authenticated; + break; + default: + goto done; + } + + if (!uri_to_filename(rc, &tctx->uri, &path, prefix)) { + log_validation_status(rc, &tctx->uri, unreadable_trust_anchor_locator, generation); + goto done; + } + + if ((x = read_cert(&path, NULL)) == NULL || (pkey = X509_get_pubkey(x)) == NULL) { + log_validation_status(rc, &tctx->uri, unreadable_trust_anchor, generation); + goto done; + } + + if (EVP_PKEY_cmp(tctx->pkey, pkey) != 1) { + log_validation_status(rc, &tctx->uri, trust_anchor_key_mismatch, generation); + goto done; + } + + ret = check_ta(rc, x, &tctx->uri, &path, &tctx->path, generation); + x = NULL; + + done: + if (!ret) + log_validation_status(rc, &tctx->uri, object_rejected, generation); + EVP_PKEY_free(pkey); + X509_free(x); + return ret; +} + +/** + * rsync callback for fetching a TAL. + */ +static void rsync_tal_callback(rcynic_ctx_t *rc, + const rsync_ctx_t *ctx, + const rsync_status_t status, + const uri_t *uri, + void *cookie) +{ + tal_ctx_t *tctx = cookie; + + assert(rc && tctx); + + if (status == rsync_status_pending) + return; + + if (!check_ta_tal_callback_1(rc, tctx, object_generation_current) && + !check_ta_tal_callback_1(rc, tctx, object_generation_backup)) + log_validation_status(rc, &tctx->uri, trust_anchor_skipped, object_generation_null); + + tal_ctx_t_free(tctx); +} + +/** + * Check a trust anchor read from a trust anchor locator (TAL). + */ +static int check_ta_tal(rcynic_ctx_t *rc, + const char *fn) + +{ + tal_ctx_t *tctx = NULL; + BIO *bio = NULL; + int ret = 1; + + assert(rc && fn); + + logmsg(rc, log_telemetry, "Processing trust anchor locator from file %s", fn); + + if ((tctx = tal_ctx_t_new()) == NULL) { + logmsg(rc, log_sys_err, "malloc(tal_ctxt_t) failed"); + goto done; + } + + bio = BIO_new_file(fn, "r"); + + if (!bio) + logmsg(rc, log_usage_err, "Couldn't open trust anchor locator file %s", fn); + + if (!bio || BIO_gets(bio, tctx->uri.s, sizeof(tctx->uri.s)) <= 0) { + uri_t furi; + filename_to_uri(&furi, fn); + log_validation_status(rc, &furi, unreadable_trust_anchor_locator, object_generation_null); + goto done; + } + + tctx->uri.s[strcspn(tctx->uri.s, " \t\r\n")] = '\0'; + + if (!uri_to_filename(rc, &tctx->uri, &tctx->path, &rc->new_authenticated)) { + log_validation_status(rc, &tctx->uri, unreadable_trust_anchor_locator, object_generation_null); + goto done; + } + + if (!endswith(tctx->uri.s, ".cer")) { + log_validation_status(rc, &tctx->uri, malformed_tal_uri, object_generation_null); + goto done; + } + + bio = BIO_push(BIO_new(BIO_f_linebreak()), bio); + bio = BIO_push(BIO_new(BIO_f_base64()), bio); + if (bio) + tctx->pkey = d2i_PUBKEY_bio(bio, NULL); + if (!tctx->pkey) { + log_validation_status(rc, &tctx->uri, unreadable_trust_anchor_locator, object_generation_null); + goto done; + } + + logmsg(rc, log_telemetry, "Processing trust anchor from URI %s", tctx->uri.s); + + rsync_ta(rc, &tctx->uri, tctx, rsync_tal_callback); + tctx = NULL; /* Control has passed */ + + done: + tal_ctx_t_free(tctx); + BIO_free_all(bio); + return ret; +} + +/** + * Check a directory of trust anchors and trust anchor locators. + */ +static int check_ta_dir(rcynic_ctx_t *rc, + const char *dn) +{ + DIR *dir = NULL; + struct dirent *d; + path_t path; + int is_cer, is_tal; + + assert(rc && dn); + + if ((dir = opendir(dn)) == NULL) { + logmsg(rc, log_sys_err, "Couldn't open trust anchor directory %s: %s", + dn, strerror(errno)); + return 0; + } + + while ((d = readdir(dir)) != NULL) { + if (snprintf(path.s, sizeof(path.s), "%s/%s", dn, d->d_name) >= sizeof(path.s)) { + logmsg(rc, log_data_err, "Pathname %s/%s too long", dn, d->d_name); + break; + } + is_cer = endswith(path.s, ".cer"); + is_tal = endswith(path.s, ".tal"); + if (is_cer && !check_ta_cer(rc, path.s)) + break; + if (is_tal && !check_ta_tal(rc, path.s)) + break; + if (!is_cer && !is_tal) + logmsg(rc, log_verbose, "Skipping non-trust-anchor %s", path.s); + } + + if (dir != NULL) + closedir(dir); + + return !d;; +} + + + +/** + * Write detailed log of what we've done as an XML file. + */ +static int write_xml_file(const rcynic_ctx_t *rc, + const char *xmlfile) +{ + int i, j, use_stdout, ok; + char hostname[HOSTNAME_MAX]; + mib_counter_t code; + timestamp_t ts; + FILE *f = NULL; + path_t xmltemp; + + if (xmlfile == NULL) + return 1; + + use_stdout = !strcmp(xmlfile, "-"); + + logmsg(rc, log_telemetry, "Writing XML summary to %s", + (use_stdout ? "standard output" : xmlfile)); + + if (use_stdout) { + f = stdout; + ok = 1; + } else if (snprintf(xmltemp.s, sizeof(xmltemp.s), "%s.%u.tmp", xmlfile, (unsigned) getpid()) >= sizeof(xmltemp.s)) { + logmsg(rc, log_usage_err, "Filename \"%s\" is too long, not writing XML", xmlfile); + return 0; + } else { + ok = (f = fopen(xmltemp.s, "w")) != NULL; + } + + ok &= gethostname(hostname, sizeof(hostname)) == 0; + + if (ok) + ok &= fprintf(f, "<?xml version=\"1.0\" ?>\n" + "<rcynic-summary date=\"%s\" rcynic-version=\"%s\"" + " summary-version=\"%d\" reporting-hostname=\"%s\">\n" + " <labels>\n", + time_to_string(&ts, NULL), + svn_id, XML_SUMMARY_VERSION, hostname) != EOF; + + for (j = 0; ok && j < MIB_COUNTER_T_MAX; ++j) + ok &= fprintf(f, " <%s kind=\"%s\">%s</%s>\n", + mib_counter_label[j], mib_counter_kind[j], + (mib_counter_desc[j] + ? mib_counter_desc[j] + : X509_verify_cert_error_string(mib_counter_openssl[j])), + mib_counter_label[j]) != EOF; + + if (ok) + ok &= fprintf(f, " </labels>\n") != EOF; + + for (i = 0; ok && i < sk_validation_status_t_num(rc->validation_status); i++) { + validation_status_t *v = sk_validation_status_t_value(rc->validation_status, i); + assert(v); + + (void) time_to_string(&ts, &v->timestamp); + + for (code = (mib_counter_t) 0; ok && code < MIB_COUNTER_T_MAX; code++) { + if (validation_status_get_code(v, code)) { + if (ok) + ok &= fprintf(f, " <validation_status timestamp=\"%s\" status=\"%s\"", + ts.s, mib_counter_label[code]) != EOF; + if (ok && (v->generation == object_generation_current || + v->generation == object_generation_backup)) + ok &= fprintf(f, " generation=\"%s\"", + object_generation_label[v->generation]) != EOF; + if (ok) + ok &= fprintf(f, ">%s</validation_status>\n", v->uri.s) != EOF; + } + } + } + + for (i = 0; ok && i < sk_rsync_history_t_num(rc->rsync_history); i++) { + rsync_history_t *h = sk_rsync_history_t_value(rc->rsync_history, i); + assert(h); + + if (ok) + ok &= fprintf(f, " <rsync_history") != EOF; + if (ok && h->started) + ok &= fprintf(f, " started=\"%s\"", + time_to_string(&ts, &h->started)) != EOF; + if (ok && h->finished) + ok &= fprintf(f, " finished=\"%s\"", + time_to_string(&ts, &h->finished)) != EOF; + if (ok && h->status != rsync_status_done) + ok &= fprintf(f, " error=\"%u\"", (unsigned) h->status) != EOF; + if (ok) + ok &= fprintf(f, ">%s%s</rsync_history>\n", + h->uri.s, (h->final_slash ? "/" : "")) != EOF; + } + + if (ok) + ok &= fprintf(f, "</rcynic-summary>\n") != EOF; + + if (f && !use_stdout) + ok &= fclose(f) != EOF; + + if (ok && !use_stdout) + ok &= rename(xmltemp.s, xmlfile) == 0; + + if (!ok) + logmsg(rc, log_sys_err, "Couldn't write XML summary to %s: %s", + (use_stdout ? "standard output" : xmlfile), strerror(errno)); + + if (!ok && !use_stdout) + (void) unlink(xmltemp.s); + + return ok; +} + + + +/** + * Long options. + */ +#define OPTIONS \ + QA('a', "authenticated", "root of authenticated data tree") \ + QA('c', "config", "override default name of config file") \ + QF('h', "help", "print this help message") \ + QA('j', "jitter", "set jitter value") \ + QA('l', "log-level", "set log level") \ + QA('u', "unauthenticated", "root of unauthenticated data tree") \ + QF('e', "use-stderr", "log to syslog") \ + QF('s', "use-syslog", "log to stderr") \ + QF('V', "version", "print program version") \ + QA('x', "xml-file", "set XML output file location") + +const static struct option longopts[] = { + { "authenticated", required_argument, NULL, 'a' }, + { "config", required_argument, NULL, 'c' }, + { "help", no_argument, NULL, 'h' }, + { "jitter", required_argument, NULL, 'j' }, + { "log-level", required_argument, NULL, 'l' }, + { "unauthenticated", required_argument, NULL, 'u' }, + { "use-stderr", no_argument, NULL, 'e' }, + { "use-syslog", no_argument, NULL, 's' }, + { "version", no_argument, NULL, 'V' }, + { "xml-file", required_argument, NULL, 'x' }, + { NULL } +}; + +/** + * Wrapper around printf() to take arguments like logmsg(). + * If C had closures, usage() would use them instead of this silliness. + */ +static void logmsg_printf(const rcynic_ctx_t *rc, + const log_level_t level, + const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + vprintf(fmt, ap); + putchar('\n'); + va_end(ap); +} + +/** + * Log usage message, either to stdout (for --help) or via logmsg(). + */ +static void usage (const rcynic_ctx_t *rc, const char *jane) +{ + void (*log)(const rcynic_ctx_t *, const log_level_t, const char *, ...) = rc ? logmsg : logmsg_printf; + char left[80]; + + if (rc && !jane) + jane = rc->jane; + + log(rc, log_usage_err, "usage: %s [options]", jane); + log(rc, log_usage_err, "options:"); + +#define QF(_s_, _l_, _d_) \ + (void) snprintf(left, sizeof(left), "-%c --%-32s", _s_, _l_); \ + log(rc, log_usage_err, " %s%s", left, _d_); + +#define QA(_s_, _l_, _d_) \ + (void) snprintf(left, sizeof(left), "-%c ARG --%-32s", _s_, _l_ " ARG"); \ + log(rc, log_usage_err, " %s%s", left, _d_); + + OPTIONS; + +#undef QA +#undef QF +} + +/** + * Main program. Parse command line, read config file, iterate over + * trust anchors found via config file and do a tree walk for each + * trust anchor. + */ +int main(int argc, char *argv[]) +{ + int opt_jitter = 0, use_syslog = 0, use_stderr = 0, syslog_facility = 0; + int opt_syslog = 0, opt_stderr = 0, opt_level = 0, prune = 1; + int opt_auth = 0, opt_unauth = 0, keep_lockfile = 0; + char *lockfile = NULL, *xmlfile = NULL; + char *cfg_file = "rcynic.conf"; + int c, i, ret = 1, jitter = 600, lockfd = -1; + STACK_OF(CONF_VALUE) *cfg_section = NULL; + CONF *cfg_handle = NULL; + time_t start = 0, finish; + rcynic_ctx_t rc; + unsigned delay; + long eline = 0; + path_t ta_dir; + +#define QF(_s_, _l_, _d_) _s_, +#define QA(_s_, _l_, _d_) _s_, ':', + + const static char short_opts[] = { OPTIONS '\0' }; + +#undef QA +#undef QF + +#define QF(_s_, _l_, _d_) { _l_, no_argument, NULL, _s_ }, +#define QA(_s_, _l_, _d_) { _l_, required_argument, NULL, _s_ }, + + static struct option long_opts[] = { OPTIONS { NULL } }; + +#undef QA +#undef QF + + memset(&rc, 0, sizeof(rc)); + + if ((rc.jane = strrchr(argv[0], '/')) == NULL) + rc.jane = argv[0]; + else + rc.jane++; + + rc.log_level = log_data_err; + rc.allow_stale_crl = 1; + rc.allow_stale_manifest = 1; + rc.allow_digest_mismatch = 1; + rc.allow_crl_digest_mismatch = 1; + rc.allow_nonconformant_name = 1; + rc.allow_ee_without_signedObject = 1; + rc.allow_1024_bit_ee_key = 1; + rc.allow_wrong_cms_si_attributes = 1; + rc.max_parallel_fetches = 1; + rc.max_retries = 3; + rc.retry_wait_min = 30; + rc.run_rsync = 1; + rc.rsync_timeout = 300; + rc.max_select_time = 30; + rc.rsync_early = 1; + +#define QQ(x,y) rc.priority[x] = y; + LOG_LEVELS; +#undef QQ + + if (!set_directory(&rc, &rc.authenticated, "rcynic-data/authenticated", 0) || + !set_directory(&rc, &rc.unauthenticated, "rcynic-data/unauthenticated/", 1)) + goto done; + + OpenSSL_add_all_algorithms(); + ERR_load_crypto_strings(); + + if (!create_missing_nids()) { + logmsg(&rc, log_sys_err, "Couldn't initialize missing OIDs!"); + goto done; + } + + memset(&ta_dir, 0, sizeof(ta_dir)); + + opterr = 0; + + while ((c = getopt_long(argc, argv, short_opts, long_opts, NULL)) > 0) { + switch (c) { + case 'a': + opt_auth = 1; + if (!set_directory(&rc, &rc.authenticated, optarg, 0)) + goto done; + break; + case 'c': + cfg_file = optarg; + break; + case 'l': + opt_level = 1; + if (!configure_logmsg(&rc, optarg)) + goto done; + break; + case 's': + use_syslog = opt_syslog = 1; + break; + case 'e': + use_stderr = opt_stderr = 1; + break; + case 'h': + usage(NULL, rc.jane); + ret = 0; + goto done; + case 'j': + if (!configure_integer(&rc, &jitter, optarg)) + goto done; + opt_jitter = 1; + break; + case 'u': + opt_unauth = 1; + if (!set_directory(&rc, &rc.unauthenticated, optarg, 1)) + goto done; + break; + case 'V': + puts(svn_id); + ret = 0; + goto done; + case 'x': + xmlfile = strdup(optarg); + break; + default: + usage(&rc, NULL); + goto done; + } + } + + if (!(asn1_zero = s2i_ASN1_INTEGER(NULL, "0x0")) || + !(asn1_four_octets = s2i_ASN1_INTEGER(NULL, "0xFFFFFFFF")) || + !(asn1_twenty_octets = s2i_ASN1_INTEGER(NULL, "0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")) || + !(NID_binary_signing_time = OBJ_create("1.2.840.113549.1.9.16.2.46", + "id-aa-binarySigningTime", + "id-aa-binarySigningTime"))) { + logmsg(&rc, log_sys_err, "Couldn't initialize ASN.1 constants!"); + goto done; + } + + if ((cfg_handle = NCONF_new(NULL)) == NULL) { + logmsg(&rc, log_sys_err, "Couldn't create CONF opbject"); + goto done; + } + + if (NCONF_load(cfg_handle, cfg_file, &eline) <= 0) { + if (eline <= 0) + logmsg(&rc, log_usage_err, "Couldn't load config file %s", cfg_file); + else + logmsg(&rc, log_usage_err, "Error on line %ld of config file %s", eline, cfg_file); + goto done; + } + + if (CONF_modules_load(cfg_handle, NULL, 0) <= 0) { + logmsg(&rc, log_sys_err, "Couldn't configure OpenSSL"); + goto done; + } + + if ((cfg_section = NCONF_get_section(cfg_handle, "rcynic")) == NULL) { + logmsg(&rc, log_usage_err, "Couldn't load rcynic section from config file"); + goto done; + } + + for (i = 0; i < sk_CONF_VALUE_num(cfg_section); i++) { + CONF_VALUE *val = sk_CONF_VALUE_value(cfg_section, i); + + assert(val && val->name && val->value); + + if (!opt_auth && + !name_cmp(val->name, "authenticated") && + !set_directory(&rc, &rc.authenticated, val->value, 0)) + goto done; + + else if (!opt_unauth && + !name_cmp(val->name, "unauthenticated") && + !set_directory(&rc, &rc.unauthenticated, val->value, 1)) + goto done; + + else if (!name_cmp(val->name, "trust-anchor-directory") && + !set_directory(&rc, &ta_dir, val->value, 0)) + goto done; + + else if (!name_cmp(val->name, "rsync-timeout") && + !configure_integer(&rc, &rc.rsync_timeout, val->value)) + goto done; + + else if (!name_cmp(val->name, "max-parallel-fetches") && + !configure_integer(&rc, &rc.max_parallel_fetches, val->value)) + goto done; + + else if (!name_cmp(val->name, "max-select-time") && + !configure_unsigned_integer(&rc, &rc.max_select_time, val->value)) + goto done; + + else if (!name_cmp(val->name, "rsync-program")) + rc.rsync_program = strdup(val->value); + + else if (!name_cmp(val->name, "lockfile")) + lockfile = strdup(val->value); + + else if (!name_cmp(val->name, "keep-lockfile") && + !configure_boolean(&rc, &keep_lockfile, val->value)) + goto done; + + else if (!opt_jitter && + !name_cmp(val->name, "jitter") && + !configure_integer(&rc, &jitter, val->value)) + goto done; + + else if (!opt_level && + !name_cmp(val->name, "log-level") && + !configure_logmsg(&rc, val->value)) + goto done; + + else if (!opt_syslog && + !name_cmp(val->name, "use-syslog") && + !configure_boolean(&rc, &use_syslog, val->value)) + goto done; + + else if (!opt_stderr && + !name_cmp(val->name, "use-stderr") && + !configure_boolean(&rc, &use_stderr, val->value)) + goto done; + + else if (!name_cmp(val->name, "syslog-facility") && + !configure_syslog(&rc, &syslog_facility, + facilitynames, val->value)) + goto done; + + else if (!xmlfile && + (!name_cmp(val->name, "xml-file") || + !name_cmp(val->name, "xml-summary"))) + xmlfile = strdup(val->value); + + else if (!name_cmp(val->name, "allow-stale-crl") && + !configure_boolean(&rc, &rc.allow_stale_crl, val->value)) + goto done; + + else if (!name_cmp(val->name, "allow-stale-manifest") && + !configure_boolean(&rc, &rc.allow_stale_manifest, val->value)) + goto done; + + else if (!name_cmp(val->name, "allow-non-self-signed-trust-anchor") && + !configure_boolean(&rc, &rc.allow_non_self_signed_trust_anchor, val->value)) + goto done; + + else if (!name_cmp(val->name, "require-crl-in-manifest") && + !configure_boolean(&rc, &rc.require_crl_in_manifest, val->value)) + goto done; + + else if (!name_cmp(val->name, "allow-object-not-in-manifest") && + !configure_boolean(&rc, &rc.allow_object_not_in_manifest, val->value)) + goto done; + + else if (!name_cmp(val->name, "allow-digest-mismatch") && + !configure_boolean(&rc, &rc.allow_digest_mismatch, val->value)) + goto done; + + else if (!name_cmp(val->name, "allow-crl-digest-mismatch") && + !configure_boolean(&rc, &rc.allow_crl_digest_mismatch, val->value)) + goto done; + + else if (!name_cmp(val->name, "use-links") && + !configure_boolean(&rc, &rc.use_links, val->value)) + goto done; + + else if (!name_cmp(val->name, "prune") && + !configure_boolean(&rc, &prune, val->value)) + goto done; + + else if (!name_cmp(val->name, "run-rsync") && + !configure_boolean(&rc, &rc.run_rsync, val->value)) + goto done; + + else if (!name_cmp(val->name, "allow-nonconformant-name") && + !configure_boolean(&rc, &rc.allow_nonconformant_name, val->value)) + goto done; + + else if (!name_cmp(val->name, "allow-ee-without-signedObject") && + !configure_boolean(&rc, &rc.allow_ee_without_signedObject, val->value)) + goto done; + + else if (!name_cmp(val->name, "allow-1024-bit-ee-key") && + !configure_boolean(&rc, &rc.allow_1024_bit_ee_key, val->value)) + goto done; + + else if (!name_cmp(val->name, "allow-wrong-cms-si-attributes") && + !configure_boolean(&rc, &rc.allow_wrong_cms_si_attributes, val->value)) + goto done; + + else if (!name_cmp(val->name, "rsync-early") && + !configure_boolean(&rc, &rc.rsync_early, val->value)) + goto done; + + /* + * Ugly, but the easiest way to handle all these strings. + */ + +#define QQ(x,y) \ + else if (!name_cmp(val->name, "syslog-priority-" #x) && \ + !configure_syslog(&rc, &rc.priority[x], \ + prioritynames, val->value)) \ + goto done; + + LOG_LEVELS; /* the semicolon is for emacs */ + +#undef QQ + + } + + if ((rc.rsync_history = sk_rsync_history_t_new(rsync_history_cmp)) == NULL) { + logmsg(&rc, log_sys_err, "Couldn't allocate rsync_history stack"); + goto done; + } + + if ((rc.validation_status = sk_validation_status_t_new_null()) == NULL) { + logmsg(&rc, log_sys_err, "Couldn't allocate validation_status stack"); + goto done; + } + + if ((rc.x509_store = X509_STORE_new()) == NULL) { + logmsg(&rc, log_sys_err, "Couldn't allocate X509_STORE"); + goto done; + } + + if ((rc.rsync_queue = sk_rsync_ctx_t_new_null()) == NULL) { + logmsg(&rc, log_sys_err, "Couldn't allocate rsync_queue"); + goto done; + } + + if ((rc.task_queue = sk_task_t_new_null()) == NULL) { + logmsg(&rc, log_sys_err, "Couldn't allocate task_queue"); + goto done; + } + + rc.use_syslog = use_syslog; + + if (use_syslog) + openlog(rc.jane, + LOG_PID | (use_stderr ? LOG_PERROR : 0), + (syslog_facility ? syslog_facility : LOG_LOCAL0)); + + if (jitter > 0) { + if (RAND_bytes((unsigned char *) &delay, sizeof(delay)) <= 0) { + logmsg(&rc, log_sys_err, "Couldn't read random bytes"); + goto done; + } + delay %= jitter; + logmsg(&rc, log_telemetry, "Delaying %u seconds before startup", delay); + while (delay > 0) + delay = sleep(delay); + } + + if (lockfile && + ((lockfd = open(lockfile, O_RDWR|O_CREAT|O_NONBLOCK, 0666)) < 0 || + lockf(lockfd, F_TLOCK, 0) < 0)) { + if (lockfd >= 0 && errno == EAGAIN) + logmsg(&rc, log_telemetry, "Lock %s held by another process", lockfile); + else + logmsg(&rc, log_sys_err, "Problem locking %s: %s", lockfile, strerror(errno)); + lockfd = -1; + goto done; + } + + start = time(0); + logmsg(&rc, log_telemetry, "Starting"); + + if (!construct_directory_names(&rc)) + goto done; + + if (!access(rc.new_authenticated.s, F_OK)) { + logmsg(&rc, log_sys_err, + "Timestamped output directory %s already exists! Clock went backwards?", + rc.new_authenticated.s); + goto done; + } + + if (!mkdir_maybe(&rc, &rc.new_authenticated)) { + logmsg(&rc, log_sys_err, "Couldn't prepare directory %s: %s", + rc.new_authenticated.s, strerror(errno)); + goto done; + } + + for (i = 0; i < sk_CONF_VALUE_num(cfg_section); i++) { + CONF_VALUE *val = sk_CONF_VALUE_value(cfg_section, i); + + assert(val && val->name && val->value); + + if (!name_cmp(val->name, "trust-anchor-uri-with-key") || + !name_cmp(val->name, "indirect-trust-anchor")) { + logmsg(&rc, log_usage_err, + "Directive \"%s\" is obsolete -- please use \"trust-anchor-locator\" instead", + val->name); + goto done; + } + + if ((!name_cmp(val->name, "trust-anchor") && !check_ta_cer(&rc, val->value)) || + (!name_cmp(val->name, "trust-anchor-locator") && !check_ta_tal(&rc, val->value))) + goto done; + } + + if (*ta_dir.s != '\0' && !check_ta_dir(&rc, ta_dir.s)) + goto done; + + while (sk_task_t_num(rc.task_queue) > 0 || sk_rsync_ctx_t_num(rc.rsync_queue) > 0) { + task_run_q(&rc); + rsync_mgr(&rc); + } + + logmsg(&rc, log_telemetry, "Event loop done, beginning final output and cleanup"); + + if (!finalize_directories(&rc)) + goto done; + + if (prune && rc.run_rsync && + !prune_unauthenticated(&rc, &rc.unauthenticated, + strlen(rc.unauthenticated.s))) { + logmsg(&rc, log_sys_err, "Trouble pruning old unauthenticated data"); + goto done; + } + + if (!write_xml_file(&rc, xmlfile)) + goto done; + + ret = 0; + + done: + log_openssl_errors(&rc); + + /* + * Do NOT free cfg_section, NCONF_free() takes care of that + */ + sk_validation_status_t_pop_free(rc.validation_status, validation_status_t_free); + sk_rsync_history_t_pop_free(rc.rsync_history, rsync_history_t_free); + validation_status_t_free(rc.validation_status_in_waiting); + X509_STORE_free(rc.x509_store); + NCONF_free(cfg_handle); + CONF_modules_free(); + EVP_cleanup(); + ERR_free_strings(); + if (rc.rsync_program) + free(rc.rsync_program); + if (lockfile && lockfd >= 0 && !keep_lockfile) + unlink(lockfile); + if (lockfile) + free(lockfile); + if (xmlfile) + free(xmlfile); + + if (start) { + finish = time(0); + logmsg(&rc, log_telemetry, + "Finished, elapsed time %u:%02u:%02u", + (unsigned) ((finish - start) / 3600), + (unsigned) ((finish - start) / 60 % 60), + (unsigned) ((finish - start) % 60)); + } + + return ret; +} diff --git a/rp/rcynic/rcynic.xsl b/rp/rcynic/rcynic.xsl new file mode 100644 index 00000000..487cd2f6 --- /dev/null +++ b/rp/rcynic/rcynic.xsl @@ -0,0 +1,312 @@ +<?xml version="1.0"?> +<!-- + - Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC") + - + - Permission to use, copy, modify, and/or distribute this software for any + - purpose with or without fee is hereby granted, provided that the above + - copyright notice and this permission notice appear in all copies. + - + - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH + - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, + - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE + - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + - PERFORMANCE OF THIS SOFTWARE. + - + - Portions copyright (C) 2006 American Registry for Internet Numbers ("ARIN") + - + - Permission to use, copy, modify, and distribute this software for any + - purpose with or without fee is hereby granted, provided that the above + - copyright notice and this permission notice appear in all copies. + - + - THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH + - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY + - AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT, + - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE + - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + - PERFORMANCE OF THIS SOFTWARE. + --> + +<!-- $Id$ --> + +<!-- + - XSL stylesheet to render rcynic's xml-summary output as basic (X)HTML. + - + - This is a bit more complicated than strictly necessary, because I wanted + - the ability to drop out columns that are nothing but zeros. + - There's probably some clever way of using XPath to simplify this, + - but I don't expect the data sets to be large enough for performance + - to be an issue here. Feel free to show me how to do better. + --> + +<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" + version="1.0" + xmlns:com="http://exslt.org/common" + xmlns:str="http://exslt.org/strings" + exclude-result-prefixes="com str"> + + <xsl:output omit-xml-declaration="yes" indent="yes" method="xml" encoding="US-ASCII" + doctype-public="-//W3C//DTD XHTML 1.0 Strict//EN" + doctype-system="http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"/> + + <xsl:param name="refresh" select="1800"/> + <xsl:param name="suppress-zero-columns" select="1"/> + <xsl:param name="show-total" select="1"/> + <xsl:param name="use-colors" select="1"/> + <xsl:param name="show-detailed-status" select="1"/> + <xsl:param name="show-problems" select="0"/> + <xsl:param name="show-summary" select="1"/> + + <xsl:template match="/"> + <xsl:comment>Generators</xsl:comment> + <xsl:comment><xsl:value-of select="rcynic-summary/@rcynic-version"/></xsl:comment> + <xsl:comment>$Id$</xsl:comment> + <html> + <xsl:variable name="title"> + <xsl:text>rcynic summary </xsl:text> + <xsl:value-of select="rcynic-summary/@date"/> + </xsl:variable> + <head> + <title> + <xsl:value-of select="$title"/> + </title> + <xsl:if test="$refresh != 0"> + <meta http-equiv="Refresh" content="{$refresh}"/> + </xsl:if> + <style type="text/css"> + td { text-align: center; padding: 4px } + td.uri { text-align: left } + td.host { text-align: left } + <xsl:if test="$use-colors != 0"> + tr.good,td.good { background-color: #77ff77 } + tr.warn,td.warn { background-color: yellow } + tr.bad,td.bad { background-color: #ff5500 } + </xsl:if> + </style> + </head> + <body> + <h1><xsl:value-of select="$title"/></h1> + + <!-- Summary output, old host-oriented format --> + <xsl:if test="$show-summary != 0"> + + <!-- Collect data we need to display --> + <xsl:variable name="host-data"> + <xsl:for-each select="rcynic-summary/validation_status"> + <xsl:sort order="ascending" data-type="text" select="."/> + <xsl:variable name="uri" select="string(.)"/> + <xsl:if test="starts-with($uri, 'rsync://')"> + <xsl:variable name="hostname" select="str:tokenize($uri, ':/')[2]"/> + <xsl:variable name="mood" select="/rcynic-summary/labels/*[name() = current()/@status]/@kind"/> + <xsl:variable name="fn2"> + <xsl:if test="substring($uri, string-length($uri) - 3, 1) = '.' and @generation != ''"> + <xsl:value-of select="substring($uri, string-length($uri) - 3)"/> + </xsl:if> + </xsl:variable> + <x hostname="{$hostname}" timestamp="{@timestamp}" uri="{$uri}" status="{@status}" mood="{$mood}" fn2="{$fn2}" generation="{@generation}"/> + </xsl:if> + </xsl:for-each> + </xsl:variable> + + <!-- Calculate set of unique hostnames --> + <xsl:variable name="unique-hostnames"> + <xsl:for-each select="com:node-set($host-data)/x[not(@hostname = following::x/@hostname)]"> + <x hostname="{@hostname}"/> + </xsl:for-each> + </xsl:variable> + + <!-- Calculate set of unique filename types --> + <xsl:variable name="unique-fn2s"> + <xsl:for-each select="com:node-set($host-data)/x[not(@fn2 = following::x/@fn2)]"> + <x fn2="{@fn2}"/> + </xsl:for-each> + </xsl:variable> + + <!-- Generation names --> + <xsl:variable name="unique-generations"> + <xsl:for-each select="com:node-set($host-data)/x[not(@generation = following::x/@generation)]"> + <x generation="{@generation}"/> + </xsl:for-each> + </xsl:variable> + + <!-- Calculate grand totals, figure out which columns to display --> + <xsl:variable name="totals"> + <xsl:for-each select="rcynic-summary/labels/*"> + <xsl:variable name="sum" select="count(com:node-set($host-data)/x[@status = name(current())])"/> + <xsl:variable name="show"> + <xsl:choose> + <xsl:when test="$suppress-zero-columns = 0 or $sum > 0"> + <xsl:text>1</xsl:text> + </xsl:when> + <xsl:otherwise> + <xsl:text>0</xsl:text> + </xsl:otherwise> + </xsl:choose> + </xsl:variable> + <x name="{name(current())}" sum="{$sum}" text="{.}" show="{$show}" mood="{@kind}"/> + </xsl:for-each> + </xsl:variable> + + <!-- Calculate how many columns we'll be displaying --> + <xsl:variable name="columns" select="count(com:node-set($totals)/x[@show = 1])"/> + + <!-- Show the total --> + <xsl:if test="$show-total != 0"> + <br/> + <h2>Grand Totals</h2> + <table class="summary" rules="all" border="1"> + <thead> + <tr> + <td/> <!-- was hostname --> + <xsl:for-each select="com:node-set($totals)/x[@show = 1]"> + <td><b><xsl:value-of select="@text"/></b></td> + </xsl:for-each> + </tr> + </thead> + <tbody> + <tr> + <td><b>Total</b></td> + <xsl:for-each select="com:node-set($totals)/x"> + <xsl:if test="$suppress-zero-columns = 0 or @sum > 0"> + <td class="{@mood}"><xsl:value-of select="@sum"/></td> + </xsl:if> + </xsl:for-each> + </tr> + </tbody> + </table> + </xsl:if> + + <!-- Generate the HTML --> + <br/> + <h2>Summaries by Repository Host</h2> + <xsl:for-each select="com:node-set($unique-hostnames)/x"> + <xsl:sort order="ascending" data-type="text" select="@hostname"/> + <xsl:variable name="hostname" select="@hostname"/> + <br/> + <h3><xsl:value-of select="$hostname"/></h3> + <table class="summary" rules="all" border="1"> + <thead> + <tr> + <td/> <!-- was hostname --> + <xsl:for-each select="com:node-set($totals)/x[@show = 1]"> + <td><b><xsl:value-of select="@text"/></b></td> + </xsl:for-each> + </tr> + </thead> + <tbody> + <xsl:for-each select="com:node-set($unique-fn2s)/x"> + <xsl:sort order="ascending" data-type="text" select="@fn2"/> + <xsl:variable name="fn2" select="@fn2"/> + <xsl:for-each select="com:node-set($unique-generations)/x"> + <xsl:sort order="ascending" data-type="text" select="@generation"/> + <xsl:variable name="generation" select="@generation"/> + <xsl:if test="count(com:node-set($host-data)/x[@hostname = $hostname and @fn2 = $fn2 and @generation = $generation])"> + <tr> + <td><xsl:value-of select="concat($generation, ' ', $fn2)"/></td> + <xsl:for-each select="com:node-set($totals)/x[@show = 1]"> + <xsl:variable name="label" select="@name"/> + <xsl:variable name="value" select="count(com:node-set($host-data)/x[@hostname = $hostname and @fn2 = $fn2 and @generation = $generation and @status = $label])"/> + <xsl:choose> + <xsl:when test="$value != 0"> + <td class="{@mood}"> + <xsl:value-of select="$value"/> + </td> + </xsl:when> + <xsl:otherwise> + <td/> + </xsl:otherwise> + </xsl:choose> + </xsl:for-each> + </tr> + </xsl:if> + </xsl:for-each> + </xsl:for-each> + <tr> + <td>Total</td> + <xsl:for-each select="com:node-set($totals)/x[@show = 1]"> + <xsl:variable name="label" select="@name"/> + <xsl:variable name="value" select="count(com:node-set($host-data)/x[@hostname = $hostname and @status = $label])"/> + <xsl:choose> + <xsl:when test="$value != 0"> + <td class="{@mood}"> + <xsl:value-of select="$value"/> + </td> + </xsl:when> + <xsl:otherwise> + <td/> + </xsl:otherwise> + </xsl:choose> + </xsl:for-each> + </tr> + </tbody> + </table> + </xsl:for-each> + + <!-- "Problems" display --> + <xsl:if test="$show-problems != 0"> + <br/> + <h2>Problems</h2> + <table class="problems" rules="all" border="1" > + <thead> + <tr> + <td class="status"><b>Status</b></td> + <td class="uri"><b>URI</b></td> + </tr> + </thead> + <tbody> + <xsl:for-each select="rcynic-summary/validation_status"> + <xsl:variable name="status" select="@status"/> + <xsl:variable name="mood" select="/rcynic-summary/labels/*[name() = $status]/@kind"/> + <xsl:if test="$mood != 'good'"> + <tr class="{$mood}"> + <td class="status"><xsl:value-of select="/rcynic-summary/labels/*[name() = $status] "/></td> + <td class="uri"><xsl:value-of select="."/></td> + </tr> + </xsl:if> + </xsl:for-each> + </tbody> + </table> + </xsl:if> + </xsl:if> + + <!-- Detailed status display --> + <xsl:if test="$show-detailed-status != 0"> + <br/> + <h2>Validation Status</h2> + <table class="details" rules="all" border="1" > + <thead> + <tr> + <td class="timestamp"><b>Timestamp</b></td> + <td class="generation"><b>Generation</b></td> + <td class="status"><b>Status</b></td> + <td class="uri"><b>URI</b></td> + </tr> + </thead> + <tbody> + <xsl:for-each select="rcynic-summary/validation_status"> + <xsl:variable name="status" select="@status"/> + <xsl:variable name="mood" select="/rcynic-summary/labels/*[name() = $status]/@kind"/> + <tr class="{$mood}"> + <td class="timestamp"><xsl:value-of select="@timestamp"/></td> + <td class="generation"><xsl:value-of select="@generation"/></td> + <td class="status"><xsl:value-of select="/rcynic-summary/labels/*[name() = $status] "/></td> + <td class="uri"><xsl:value-of select="."/></td> + </tr> + </xsl:for-each> + </tbody> + </table> + </xsl:if> + + </body> + </html> + </xsl:template> + +</xsl:stylesheet> + +<!-- + - Local variables: + - mode: sgml + - End: + --> diff --git a/rp/rcynic/rpki-torrent.py b/rp/rcynic/rpki-torrent.py new file mode 100644 index 00000000..9b97f298 --- /dev/null +++ b/rp/rcynic/rpki-torrent.py @@ -0,0 +1,721 @@ +#!/usr/local/bin/python + +# $Id$ +# +# Copyright (C) 2013--2014 Dragon Research Labs ("DRL") +# Portions copyright (C) 2012 Internet Systems Consortium ("ISC") +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notices and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND DRL AND ISC DISCLAIM ALL +# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DRL OR +# ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL +# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA +# OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +# PERFORMANCE OF THIS SOFTWARE. + +import urllib2 +import httplib +import socket +import ssl +import urlparse +import zipfile +import sys +import os +import email.utils +import base64 +import hashlib +import subprocess +import syslog +import traceback +import ConfigParser +import stat +import time +import errno +import fcntl +import argparse +import smtplib +import email.mime.text + +import transmissionrpc + +tr_env_vars = ("TR_TORRENT_DIR", "TR_TORRENT_ID", "TR_TORRENT_NAME") + +class WrongServer(Exception): + "Hostname not in X.509v3 subjectAltName extension." + +class UnexpectedRedirect(Exception): + "Unexpected HTTP redirect." + +class WrongMode(Exception): + "Wrong operation for mode." + +class BadFormat(Exception): + "Zip file does not match our expectations." + +class InconsistentEnvironment(Exception): + "Environment variables received from Transmission aren't consistent." + +class TorrentNotReady(Exception): + "Torrent is not ready for checking." + +class TorrentDoesNotMatchManifest(Exception): + "Retrieved torrent does not match manifest." + +class TorrentNameDoesNotMatchURL(Exception): + "Torrent name doesn't uniquely match a URL." + +class CouldNotFindTorrents(Exception): + "Could not find torrent(s) with given name(s)." + +class UseTheSourceLuke(Exception): + "Use The Source, Luke." + +def main(): + try: + syslog_flags = syslog.LOG_PID; + if os.isatty(sys.stderr.fileno()): + syslog_flags |= syslog.LOG_PERROR + syslog.openlog("rpki-torrent", syslog_flags) + + parser = argparse.ArgumentParser(description = __doc__) + parser.add_argument("-c", "--config", + help = "configuration file") + args = parser.parse_args() + + global cfg + cfg = MyConfigParser() + cfg.read(args.config or + [os.path.join(dn, fn) + for fn in ("rcynic.conf", "rpki.conf") + for dn in ("/var/rcynic/etc", "/usr/local/etc", "/etc")]) + + if cfg.act_as_generator: + if len(argv) == 1 and argv[0] == "generate": + generator_main() + elif len(argv) == 1 and argv[0] == "mirror": + mirror_main() + else: + raise UseTheSourceLuke + + else: + if len(argv) == 0 and all(v in os.environ for v in tr_env_vars): + torrent_completion_main() + elif len(argv) == 1 and argv[0] == "poll": + poll_main() + else: + raise UseTheSourceLuke + + except Exception, e: + for line in traceback.format_exc().splitlines(): + syslog.syslog(line) + sys.exit(1) + + +def generator_main(): + import paramiko + + class SFTPClient(paramiko.SFTPClient): + def atomic_rename(self, oldpath, newpath): + oldpath = self._adjust_cwd(oldpath) + newpath = self._adjust_cwd(newpath) + self._log(paramiko.common.DEBUG, 'atomic_rename(%r, %r)' % (oldpath, newpath)) + self._request(paramiko.sftp.CMD_EXTENDED, "posix-rename@openssh.com", oldpath, newpath) + + z = ZipFile(url = cfg.generate_url, dir = cfg.zip_dir) + client = TransmissionClient() + + client.remove_torrents(z.torrent_name) + + download_dir = client.get_session().download_dir + torrent_dir = os.path.join(download_dir, z.torrent_name) + torrent_file = os.path.join(cfg.zip_dir, z.torrent_name + ".torrent") + + + syslog.syslog("Synchronizing local data from %s to %s" % (cfg.unauthenticated, torrent_dir)) + subprocess.check_call((cfg.rsync_prog, "--archive", "--delete", + os.path.normpath(cfg.unauthenticated) + "/", + os.path.normpath(torrent_dir) + "/")) + + syslog.syslog("Creating %s" % torrent_file) + try: + os.unlink(torrent_file) + except OSError, e: + if e.errno != errno.ENOENT: + raise + ignore_output_for_now = subprocess.check_output( + (cfg.mktorrent_prog, + "-a", cfg.tracker_url, + "-c", "RPKI unauthenticated data snapshot generated by rpki-torrent", + "-o", torrent_file, + torrent_dir)) + + syslog.syslog("Generating manifest") + manifest = create_manifest(download_dir, z.torrent_name) + + syslog.syslog("Loading %s with unlimited seeding" % torrent_file) + f = open(torrent_file, "rb") + client.add(base64.b64encode(f.read())) + f.close() + client.unlimited_seeding(z.torrent_name) + + syslog.syslog("Creating upload connection") + ssh = paramiko.Transport((cfg.sftp_host, cfg.sftp_port)) + try: + hostkeys = paramiko.util.load_host_keys(cfg.sftp_hostkey_file)[cfg.sftp_host]["ssh-rsa"] + except ConfigParser.Error: + hostkeys = None + ssh.connect( + username = cfg.sftp_user, + hostkey = hostkeys, + pkey = paramiko.RSAKey.from_private_key_file(cfg.sftp_private_key_file)) + sftp = SFTPClient.from_transport(ssh) + + zip_filename = os.path.join("data", os.path.basename(z.filename)) + zip_tempname = zip_filename + ".new" + + syslog.syslog("Creating %s" % zip_tempname) + f = sftp.open(zip_tempname, "wb") + z.set_output_stream(f) + + syslog.syslog("Writing %s to zip" % torrent_file) + z.write( + torrent_file, + arcname = os.path.basename(torrent_file), + compress_type = zipfile.ZIP_DEFLATED) + + manifest_name = z.torrent_name + ".manifest" + + syslog.syslog("Writing %s to zip" % manifest_name) + zi = zipfile.ZipInfo(manifest_name, time.gmtime()[:6]) + zi.external_attr = (stat.S_IFREG | 0644) << 16 + zi.internal_attr = 1 # Text, not binary + z.writestr(zi, + "".join("%s %s\n" % (v, k) for k, v in manifest.iteritems()), + zipfile.ZIP_DEFLATED) + + syslog.syslog("Closing %s and renaming to %s" % (zip_tempname, zip_filename)) + z.close() + f.close() + sftp.atomic_rename(zip_tempname, zip_filename) + + syslog.syslog("Closing upload connection") + ssh.close() + +def mirror_main(): + client = TransmissionClient() + torrent_names = [] + + for zip_url in cfg.zip_urls: + if zip_url != cfg.generate_url: + z = ZipFile(url = zip_url, dir = cfg.zip_dir, ta = cfg.zip_ta) + if z.fetch(): + client.remove_torrents(z.torrent_name) + syslog.syslog("Mirroring torrent %s" % z.torrent_name) + client.add(z.get_torrent()) + torrent_names.append(z.torrent_name) + + if torrent_names: + client.unlimited_seeding(*torrent_names) + + +def poll_main(): + for zip_url in cfg.zip_urls: + + z = ZipFile(url = zip_url, dir = cfg.zip_dir, ta = cfg.zip_ta) + client = TransmissionClient() + + if z.fetch(): + client.remove_torrents(z.torrent_name) + syslog.syslog("Adding torrent %s" % z.torrent_name) + client.add(z.get_torrent()) + + elif cfg.run_rcynic_anyway: + run_rcynic(client, z) + + +def torrent_completion_main(): + torrent_name = os.getenv("TR_TORRENT_NAME") + torrent_id = int(os.getenv("TR_TORRENT_ID")) + + z = ZipFile(url = cfg.find_url(torrent_name), dir = cfg.zip_dir, ta = cfg.zip_ta) + client = TransmissionClient() + torrent = client.info([torrent_id]).popitem()[1] + + if torrent.name != torrent_name: + raise InconsistentEnvironment("Torrent name %s does not match ID %d" % (torrent_name, torrent_id)) + + if z.torrent_name != torrent_name: + raise InconsistentEnvironment("Torrent name %s does not match torrent name in zip file %s" % (torrent_name, z.torrent_name)) + + if torrent is None or torrent.progress != 100: + raise TorrentNotReady("Torrent %s not ready for checking, how did I get here?" % torrent_name) + + log_email("Download complete %s" % z.url) + + run_rcynic(client, z) + + +def run_rcynic(client, z): + """ + Run rcynic and any post-processing we might want. + """ + + if cfg.lockfile is not None: + syslog.syslog("Acquiring lock %s" % cfg.lockfile) + lock = os.open(cfg.lockfile, os.O_WRONLY | os.O_CREAT, 0600) + fcntl.flock(lock, fcntl.LOCK_EX) + else: + lock = None + + syslog.syslog("Checking manifest against disk") + + download_dir = client.get_session().download_dir + + manifest_from_disk = create_manifest(download_dir, z.torrent_name) + manifest_from_zip = z.get_manifest() + + excess_files = set(manifest_from_disk) - set(manifest_from_zip) + for fn in excess_files: + del manifest_from_disk[fn] + + if manifest_from_disk != manifest_from_zip: + raise TorrentDoesNotMatchManifest("Manifest for torrent %s does not match what we got" % + z.torrent_name) + + if excess_files: + syslog.syslog("Cleaning up excess files") + for fn in excess_files: + os.unlink(os.path.join(download_dir, fn)) + + syslog.syslog("Running rcynic") + log_email("Starting rcynic %s" % z.url) + subprocess.check_call((cfg.rcynic_prog, + "-c", cfg.rcynic_conf, + "-u", os.path.join(client.get_session().download_dir, z.torrent_name))) + log_email("Completed rcynic %s" % z.url) + + for cmd in cfg.post_rcynic_commands: + syslog.syslog("Running post-rcynic command: %s" % cmd) + subprocess.check_call(cmd, shell = True) + + if lock is not None: + syslog.syslog("Releasing lock %s" % cfg.lockfile) + os.close(lock) + +# See http://www.minstrel.org.uk/papers/sftp/ for details on how to +# set up safe upload-only SFTP directories on the server. In +# particular http://www.minstrel.org.uk/papers/sftp/builtin/ is likely +# to be the right path. + + +class ZipFile(object): + """ + Augmented version of standard python zipfile.ZipFile class, with + some extra methods and specialized capabilities. + + All methods of the standard zipfile.ZipFile class are supported, but + the constructor arguments are different, and opening the zip file + itself is deferred until a call which requires this, since the file + may first need to be fetched via HTTPS. + """ + + def __init__(self, url, dir, ta = None, verbose = True): + self.url = url + self.dir = dir + self.ta = ta + self.verbose = verbose + self.filename = os.path.join(dir, os.path.basename(url)) + self.changed = False + self.zf = None + self.peercert = None + self.torrent_name, zip_ext = os.path.splitext(os.path.basename(url)) + if zip_ext != ".zip": + raise BadFormat + + + def __getattr__(self, name): + if self.zf is None: + self.zf = zipfile.ZipFile(self.filename) + return getattr(self.zf, name) + + + def build_opener(self): + """ + Voodoo to create a urllib2.OpenerDirector object with TLS + certificate checking enabled and a hook to set self.peercert so + our caller can check the subjectAltName field. + + You probably don't want to look at this if you can avoid it. + """ + + assert self.ta is not None + + # Yes, we're constructing one-off classes. Look away, look away. + + class HTTPSConnection(httplib.HTTPSConnection): + zip = self + def connect(self): + sock = socket.create_connection((self.host, self.port), self.timeout) + if getattr(self, "_tunnel_host", None): + self.sock = sock + self._tunnel() + self.sock = ssl.wrap_socket(sock, + keyfile = self.key_file, + certfile = self.cert_file, + cert_reqs = ssl.CERT_REQUIRED, + ssl_version = ssl.PROTOCOL_TLSv1, + ca_certs = self.zip.ta) + self.zip.peercert = self.sock.getpeercert() + + class HTTPSHandler(urllib2.HTTPSHandler): + def https_open(self, req): + return self.do_open(HTTPSConnection, req) + + return urllib2.build_opener(HTTPSHandler) + + + def check_subjectAltNames(self): + """ + Check self.peercert against URL to make sure we were talking to + the right HTTPS server. + """ + + hostname = urlparse.urlparse(self.url).hostname + subjectAltNames = set(i[1] + for i in self.peercert.get("subjectAltName", ()) + if i[0] == "DNS") + if hostname not in subjectAltNames: + raise WrongServer + + + def download_file(self, r, bufsize = 4096): + """ + Downloaded file to disk. + """ + + tempname = self.filename + ".new" + f = open(tempname, "wb") + n = int(r.info()["Content-Length"]) + for i in xrange(0, n - bufsize, bufsize): + f.write(r.read(bufsize)) + f.write(r.read()) + f.close() + mtime = email.utils.mktime_tz(email.utils.parsedate_tz(r.info()["Last-Modified"])) + os.utime(tempname, (mtime, mtime)) + os.rename(tempname, self.filename) + + + def set_output_stream(self, stream): + """ + Set up this zip file for writing to a network stream. + """ + + assert self.zf is None + self.zf = zipfile.ZipFile(stream, "w") + + + def fetch(self): + """ + Fetch zip file from URL given to constructor. + """ + + headers = { "User-Agent" : "rpki-torrent" } + try: + headers["If-Modified-Since"] = email.utils.formatdate( + os.path.getmtime(self.filename), False, True) + except OSError: + pass + + syslog.syslog("Checking %s..." % self.url) + try: + r = self.build_opener().open(urllib2.Request(self.url, None, headers)) + syslog.syslog("%s has changed, starting download" % self.url) + self.changed = True + log_email("Downloading %s" % self.url) + except urllib2.HTTPError, e: + if e.code == 304: + syslog.syslog("%s has not changed" % self.url) + elif e.code == 404: + syslog.syslog("%s does not exist" % self.url) + else: + raise + r = None + + self.check_subjectAltNames() + + if r is not None and r.geturl() != self.url: + raise UnexpectedRedirect + + if r is not None: + self.download_file(r) + r.close() + + return self.changed + + + def check_format(self): + """ + Make sure that format of zip file matches our preconceptions: it + should contain two files, one of which is the .torrent file, the + other is the manifest, with names derived from the torrent name + inferred from the URL. + """ + + if set(self.namelist()) != set((self.torrent_name + ".torrent", self.torrent_name + ".manifest")): + raise BadFormat + + + def get_torrent(self): + """ + Extract torrent file from zip file, encoded in Base64 because + that's what the transmisionrpc library says it wants. + """ + + self.check_format() + return base64.b64encode(self.read(self.torrent_name + ".torrent")) + + + def get_manifest(self): + """ + Extract manifest from zip file, as a dictionary. + + For the moment we're fixing up the internal file names from the + format that the existing shell-script prototype uses, but this + should go away once this program both generates and checks the + manifests. + """ + + self.check_format() + result = {} + for line in self.open(self.torrent_name + ".manifest"): + h, fn = line.split() + # + # Fixup for earlier manifest format, this should go away + if not fn.startswith(self.torrent_name): + fn = os.path.normpath(os.path.join(self.torrent_name, fn)) + # + result[fn] = h + return result + + +def create_manifest(topdir, torrent_name): + """ + Generate a manifest, expressed as a dictionary. + """ + + result = {} + topdir = os.path.abspath(topdir) + for dirpath, dirnames, filenames in os.walk(os.path.join(topdir, torrent_name)): + for filename in filenames: + filename = os.path.join(dirpath, filename) + f = open(filename, "rb") + result[os.path.relpath(filename, topdir)] = hashlib.sha256(f.read()).hexdigest() + f.close() + return result + + +def log_email(msg, subj = None): + try: + if not msg.endswith("\n"): + msg += "\n" + if subj is None: + subj = msg.partition("\n")[0] + m = email.mime.text.MIMEText(msg) + m["Date"] = time.strftime("%d %b %Y %H:%M:%S +0000", time.gmtime()) + m["From"] = cfg.log_email + m["To"] = cfg.log_email + m["Subject"] = subj + s = smtplib.SMTP("localhost") + s.sendmail(cfg.log_email, [cfg.log_email], m.as_string()) + s.quit() + except ConfigParser.Error: + pass + + +class TransmissionClient(transmissionrpc.client.Client): + """ + Extension of transmissionrpc.client.Client. + """ + + def __init__(self, **kwargs): + kwargs.setdefault("address", "127.0.0.1") + kwargs.setdefault("user", cfg.transmission_username) + kwargs.setdefault("password", cfg.transmission_password) + transmissionrpc.client.Client.__init__(self, **kwargs) + + + def find_torrents(self, *names): + """ + Find torrents with given name(s), return id(s). + """ + + result = [i for i, t in self.list().iteritems() if t.name in names] + if not result: + raise CouldNotFindTorrents + return result + + + def remove_torrents(self, *names): + """ + Remove any torrents with the given name(s). + """ + + try: + ids = self.find_torrents(*names) + except CouldNotFindTorrents: + pass + else: + syslog.syslog("Removing torrent%s %s (%s)" % ( + "" if len(ids) == 1 else "s", + ", ".join(names), + ", ".join("#%s" % i for i in ids))) + self.remove(ids) + + def unlimited_seeding(self, *names): + """ + Set unlimited seeding for specified torrents. + """ + + # Apparently seedRatioMode = 2 means "no limit" + try: + self.change(self.find_torrents(*names), seedRatioMode = 2) + except CouldNotFindTorrents: + syslog.syslog("Couldn't tweak seedRatioMode, blundering onwards") + + +class MyConfigParser(ConfigParser.RawConfigParser): + + rpki_torrent_section = "rpki-torrent" + + @property + def zip_dir(self): + return self.get(self.rpki_torrent_section, "zip_dir") + + @property + def zip_ta(self): + return self.get(self.rpki_torrent_section, "zip_ta") + + @property + def rcynic_prog(self): + return self.get(self.rpki_torrent_section, "rcynic_prog") + + @property + def rcynic_conf(self): + return self.get(self.rpki_torrent_section, "rcynic_conf") + + @property + def run_rcynic_anyway(self): + return self.getboolean(self.rpki_torrent_section, "run_rcynic_anyway") + + @property + def generate_url(self): + return self.get(self.rpki_torrent_section, "generate_url") + + @property + def act_as_generator(self): + try: + return self.get(self.rpki_torrent_section, "generate_url") != "" + except ConfigParser.Error: + return False + + @property + def rsync_prog(self): + return self.get(self.rpki_torrent_section, "rsync_prog") + + @property + def mktorrent_prog(self): + return self.get(self.rpki_torrent_section, "mktorrent_prog") + + @property + def tracker_url(self): + return self.get(self.rpki_torrent_section, "tracker_url") + + @property + def sftp_host(self): + return self.get(self.rpki_torrent_section, "sftp_host") + + @property + def sftp_port(self): + try: + return self.getint(self.rpki_torrent_section, "sftp_port") + except ConfigParser.Error: + return 22 + + @property + def sftp_user(self): + return self.get(self.rpki_torrent_section, "sftp_user") + + @property + def sftp_hostkey_file(self): + return self.get(self.rpki_torrent_section, "sftp_hostkey_file") + + @property + def sftp_private_key_file(self): + return self.get(self.rpki_torrent_section, "sftp_private_key_file") + + @property + def lockfile(self): + try: + return self.get(self.rpki_torrent_section, "lockfile") + except ConfigParser.Error: + return None + + @property + def unauthenticated(self): + try: + return self.get(self.rpki_torrent_section, "unauthenticated") + except ConfigParser.Error: + return self.get("rcynic", "unauthenticated") + + @property + def log_email(self): + return self.get(self.rpki_torrent_section, "log_email") + + @property + def transmission_username(self): + try: + return self.get(self.rpki_torrent_section, "transmission_username") + except ConfigParser.Error: + return None + + @property + def transmission_password(self): + try: + return self.get(self.rpki_torrent_section, "transmission_password") + except ConfigParser.Error: + return None + + def multioption_iter(self, name, getter = None): + if getter is None: + getter = self.get + if self.has_option(self.rpki_torrent_section, name): + yield getter(self.rpki_torrent_section, name) + name += "." + names = [i for i in self.options(self.rpki_torrent_section) if i.startswith(name) and i[len(name):].isdigit()] + names.sort(key = lambda s: int(s[len(name):])) + for name in names: + yield getter(self.rpki_torrent_section, name) + + @property + def zip_urls(self): + return self.multioption_iter("zip_url") + + @property + def post_rcynic_commands(self): + return self.multioption_iter("post_rcynic_command") + + def find_url(self, torrent_name): + urls = [u for u in self.zip_urls + if os.path.splitext(os.path.basename(u))[0] == torrent_name] + if len(urls) != 1: + raise TorrentNameDoesNotMatchURL("Can't find URL matching torrent name %s" % torrent_name) + return urls[0] + + +if __name__ == "__main__": + main() diff --git a/rp/rcynic/rules.darwin.mk b/rp/rcynic/rules.darwin.mk new file mode 100644 index 00000000..d37b0e75 --- /dev/null +++ b/rp/rcynic/rules.darwin.mk @@ -0,0 +1,108 @@ +# $Id$ + +install-user-and-group: .FORCE + @if /usr/bin/dscl . -read "/Groups/${RCYNIC_GROUP}" >/dev/null 2>&1; \ + then \ + echo "You already have a group \"${RCYNIC_GROUP}\", so I will use it."; \ + elif gid="$$(/usr/bin/dscl . -list /Groups PrimaryGroupID | /usr/bin/awk 'BEGIN {gid = 501} $$2 >= gid {gid = 1 + $$2} END {print gid}')" && \ + /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" && \ + /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" RealName "${RCYNIC_GECOS}" && \ + /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" PrimaryGroupID "$$gid" && \ + /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" GeneratedUID "$$(/usr/bin/uuidgen)" && \ + /usr/bin/dscl . -create "/Groups/${RCYNIC_GROUP}" Password "*"; \ + then \ + echo "Added group \"${RCYNIC_GROUP}\"."; \ + else \ + echo "Adding group \"${RCYNIC_GROUP}\" failed..."; \ + echo "Please create it, then try again."; \ + exit 1; \ + fi; \ + if /usr/bin/dscl . -read "/Users/${RCYNIC_USER}" >/dev/null 2>&1; \ + then \ + echo "You already have a user \"${RCYNIC_USER}\", so I will use it."; \ + elif uid="$$(/usr/bin/dscl . -list /Users UniqueID | /usr/bin/awk 'BEGIN {uid = 501} $$2 >= uid {uid = 1 + $$2} END {print uid}')" && \ + /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" && \ + /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" UserShell "/usr/bin/false" && \ + /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" RealName "${RCYNIC_GECOS}" && \ + /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" UniqueID "$$uid" && \ + /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" PrimaryGroupID "$$gid" && \ + /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" NFSHomeDirectory "/var/empty" && \ + /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" GeneratedUID "$$(/usr/bin/uuidgen)" && \ + /usr/bin/dscl . -create "/Users/${RCYNIC_USER}" Password "*"; \ + then \ + echo "Added user \"${RCYNIC_USER}\"."; \ + else \ + echo "Adding user \"${RCYNIC_USER}\" failed..."; \ + echo "Please create it, then try again."; \ + exit 1; \ + fi + @if /usr/bin/dscl . -read "/Groups/${RPKIRTR_GROUP}" >/dev/null 2>&1; \ + then \ + echo "You already have a group \"${RPKIRTR_GROUP}\", so I will use it."; \ + elif gid="$$(/usr/bin/dscl . -list /Groups PrimaryGroupID | /usr/bin/awk 'BEGIN {gid = 501} $$2 >= gid {gid = 1 + $$2} END {print gid}')" && \ + /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" && \ + /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" RealName "${RPKIRTR_GECOS}" && \ + /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" PrimaryGroupID "$$gid" && \ + /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" GeneratedUID "$$(/usr/bin/uuidgen)" && \ + /usr/bin/dscl . -create "/Groups/${RPKIRTR_GROUP}" Password "*"; \ + then \ + echo "Added group \"${RPKIRTR_GROUP}\"."; \ + else \ + echo "Adding group \"${RPKIRTR_GROUP}\" failed..."; \ + echo "Please create it, then try again."; \ + exit 1; \ + fi; \ + if /usr/bin/dscl . -read "/Users/${RPKIRTR_USER}" >/dev/null 2>&1; \ + then \ + echo "You already have a user \"${RPKIRTR_USER}\", so I will use it."; \ + elif uid="$$(/usr/bin/dscl . -list /Users UniqueID | /usr/bin/awk 'BEGIN {uid = 501} $$2 >= uid {uid = 1 + $$2} END {print uid}')" && \ + /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" && \ + /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" UserShell "/usr/bin/false" && \ + /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" RealName "${RPKIRTR_GECOS}" && \ + /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" UniqueID "$$uid" && \ + /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" PrimaryGroupID "$$gid" && \ + /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" NFSHomeDirectory "/var/empty" && \ + /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" GeneratedUID "$$(/usr/bin/uuidgen)" && \ + /usr/bin/dscl . -create "/Users/${RPKIRTR_USER}" Password "*"; \ + then \ + echo "Added user \"${RPKIRTR_USER}\"."; \ + else \ + echo "Adding user \"${RPKIRTR_USER}\" failed..."; \ + echo "Please create it, then try again."; \ + exit 1; \ + fi + + +install-shared-libraries: .FORCE + @echo "Copying required shared libraries" + @shared_libraries="${RCYNIC_DIR}/bin/rcynic ${RCYNIC_DIR}/bin/rsync"; \ + while true; \ + do \ + closure="$$(/usr/bin/otool -L $${shared_libraries} | /usr/bin/awk '/:$$/ {next} {print $$1}' | /usr/bin/sort -u)"; \ + if test "x$$shared_libraries" = "x$$closure"; + then \ + break; \ + else \ + shared_libraries="$$closure"; \ + fi; \ + done; \ + for shared in /usr/lib/dyld $$shared_libraries; \ + do \ + if /bin/test -r "${RCYNIC_DIR}/$${shared}"; \ + then \ + echo "You already have a \"${RCYNIC_DIR}/$${shared}\", so I will use it"; \ + elif /usr/bin/install -m 555 -o root -g wheel -p "$${shared}" "${RCYNIC_DIR}/$${shared}"; \ + then \ + echo "Copied $${shared} into ${RCYNIC_DIR}"; \ + else \ + echo "Unable to copy $${shared} into ${RCYNIC_DIR}"; \ + exit 1; \ + fi; \ + done + +install-rc-scripts: + ${INSTALL} -o root -g wheel -d ${DESTDIR}/Library/StartupItems/RCynic + ${INSTALL} -o root -g wheel -m 555 \ + rc-scripts/darwin/RCynic \ + rc-scripts/darwin/StartupParameters.plist \ + ${DESTDIR}/Library/Startup/RCynic diff --git a/rp/rcynic/rules.freebsd.mk b/rp/rcynic/rules.freebsd.mk new file mode 100644 index 00000000..5233386e --- /dev/null +++ b/rp/rcynic/rules.freebsd.mk @@ -0,0 +1,56 @@ +# $Id$ + +install-user-and-group: .FORCE + @if /usr/sbin/pw groupshow "${RCYNIC_GROUP}" 2>/dev/null; \ + then \ + echo "You already have a group \"${RCYNIC_GROUP}\", so I will use it."; \ + elif /usr/sbin/pw groupadd ${RCYNIC_GROUP}; \ + then \ + echo "Added group \"${RCYNIC_GROUP}\"."; \ + else \ + echo "Adding group \"${RCYNIC_GROUP}\" failed..."; \ + echo "Please create it, then try again."; \ + exit 1; \ + fi + @if /usr/sbin/pw groupshow "${RPKIRTR_GROUP}" 2>/dev/null; \ + then \ + echo "You already have a group \"${RPKIRTR_GROUP}\", so I will use it."; \ + elif /usr/sbin/pw groupadd ${RPKIRTR_GROUP}; \ + then \ + echo "Added group \"${RPKIRTR_GROUP}\"."; \ + else \ + echo "Adding group \"${RPKIRTR_GROUP}\" failed..."; \ + echo "Please create it, then try again."; \ + exit 1; \ + fi + @if /usr/sbin/pw usershow "${RCYNIC_USER}" 2>/dev/null; \ + then \ + echo "You already have a user \"${RCYNIC_USER}\", so I will use it."; \ + elif /usr/sbin/pw useradd ${RCYNIC_USER} -g ${RCYNIC_GROUP} -h - -d /nonexistant -s /usr/sbin/nologin -c "${RCYNIC_GECOS}" -G "${RPKIRTR_GROUP}"; \ + then \ + echo "Added user \"${RCYNIC_USER}\"."; \ + else \ + echo "Adding user \"${RCYNIC_USER}\" failed..."; \ + echo "Please create it, then try again."; \ + exit 1; \ + fi + @if /usr/sbin/pw usershow "${RPKIRTR_USER}" 2>/dev/null; \ + then \ + echo "You already have a user \"${RPKIRTR_USER}\", so I will use it."; \ + elif /usr/sbin/pw useradd ${RPKIRTR_USER} -g ${RPKIRTR_GROUP} -h - -d /nonexistant -s /usr/sbin/nologin -c "${RPKIRTR_GECOS}"; \ + then \ + echo "Added user \"${RPKIRTR_USER}\"."; \ + else \ + echo "Adding user \"${RPKIRTR_USER}\" failed..."; \ + echo "Please create it, then try again."; \ + exit 1; \ + fi + + +# We use static compilation on FreeBSD, so no need for shared libraries + +install-shared-libraries: + @true + +install-rc-scripts: + ${INSTALL} -m 555 -o root -g wheel -p rc-scripts/freebsd/rc.d.rcynic ${DESTDIR}/usr/local/etc/rc.d/rcynic diff --git a/rp/rcynic/rules.linux.mk b/rp/rcynic/rules.linux.mk new file mode 100644 index 00000000..6a962cef --- /dev/null +++ b/rp/rcynic/rules.linux.mk @@ -0,0 +1,92 @@ +# $Id$ + +install-user-and-group: .FORCE + @if getent group ${RCYNIC_GROUP} >/dev/null; \ + then \ + echo "You already have a group \"${RCYNIC_GROUP}\", so I will use it."; \ + elif /usr/sbin/groupadd ${RCYNIC_GROUP}; \ + then \ + echo "Added group \"${RCYNIC_GROUP}\"."; \ + else \ + echo "Adding group \"${RCYNIC_GROUP}\" failed..."; \ + echo "Please create it, then try again."; \ + exit 1; \ + fi + @nogroup='-N'; \ + if test -f /etc/redhat-release; then read vendor release version < /etc/redhat-release; if test $$vendor = CentOS; then nogroup='-n'; fi; fi; \ + if getent passwd ${RCYNIC_USER} >/dev/null; \ + then \ + echo "You already have a user \"${RCYNIC_USER}\", so I will use it."; \ + elif /usr/sbin/useradd -g ${RCYNIC_GROUP} -M $$nogroup -d "${RCYNIC_DIR}" -s /sbin/nologin -c "${RCYNIC_GECOS}" ${RCYNIC_USER}; \ + then \ + echo "Added user \"${RCYNIC_USER}\"."; \ + else \ + echo "Adding user \"${RCYNIC_USER}\" failed..."; \ + echo "Please create it, then try again."; \ + exit 1; \ + fi + @if getent group ${RPKIRTR_GROUP} >/dev/null; \ + then \ + echo "You already have a group \"${RPKIRTR_GROUP}\", so I will use it."; \ + elif /usr/sbin/groupadd ${RPKIRTR_GROUP}; \ + then \ + echo "Added group \"${RPKIRTR_GROUP}\"."; \ + else \ + echo "Adding group \"${RPKIRTR_GROUP}\" failed..."; \ + echo "Please create it, then try again."; \ + exit 1; \ + fi + @nogroup='-N'; \ + if test -f /etc/redhat-release; then read vendor release version < /etc/redhat-release; if test $$vendor = CentOS; then nogroup='-n'; fi; fi; \ + if getent passwd ${RPKIRTR_USER} >/dev/null; \ + then \ + echo "You already have a user \"${RPKIRTR_USER}\", so I will use it."; \ + elif /usr/sbin/useradd -g ${RPKIRTR_GROUP} -M $$nogroup -d "${RPKIRTR_DIR}" -s /sbin/nologin -c "${RPKIRTR_GECOS}" ${RPKIRTR_USER}; \ + then \ + echo "Added user \"${RPKIRTR_USER}\"."; \ + else \ + echo "Adding user \"${RPKIRTR_USER}\" failed..."; \ + echo "Please create it, then try again."; \ + exit 1; \ + fi + usermod -a -G ${RPKIRTR_GROUP} ${RCYNIC_USER} + +install-shared-libraries: .FORCE + @echo "Copying required shared libraries" + @if test -d /lib64; then libdir=/lib64; else libdir=/lib; fi; \ + shared_libraries="${RCYNIC_DIR}/bin/rcynic ${RCYNIC_DIR}/bin/rsync $$(/usr/bin/find $${libdir} -name 'libnss*.so*' -print)"; \ + while true; \ + do \ + closure="$$(/usr/bin/ldd $${shared_libraries} | \ + ${AWK} ' \ + { sub(/:$/, "") } \ + $$0 == "${RCYNIC_DIR}/bin/rcynic" { next } \ + $$0 == "${RCYNIC_DIR}/bin/rsync" { next } \ + $$1 ~ /\/ld-linux\.so/ { next } \ + { for (i = 1; i <= NF; i++) if ($$i ~ /^\//) print $$i } \ + ' | \ + ${SORT} -u)"; \ + if test "X$$shared_libraries" = "X$$closure"; \ + then \ + break; \ + else \ + shared_libraries="$$closure"; \ + fi; \ + done; \ + if test -f $${libdir}/libresolv.so.2; \ + then \ + shared_libraries="$${shared_libraries} $${libdir}/libresolv.so.2"; + fi; \ + for shared in $${libdir}/*ld*.so* $$shared_libraries; \ + do \ + if test ! -r "${RCYNIC_DIR}/$${shared}"; \ + then \ + ${INSTALL} -m 555 -d `dirname "${RCYNIC_DIR}$${shared}"` && \ + ${INSTALL} -m 555 -p "$${shared}" "${RCYNIC_DIR}$${shared}"; \ + fi; \ + done + +# No devfs, so no rc script + +install-rc-scripts: + @true diff --git a/rp/rcynic/rules.unknown.mk b/rp/rcynic/rules.unknown.mk new file mode 100644 index 00000000..6ce3ea18 --- /dev/null +++ b/rp/rcynic/rules.unknown.mk @@ -0,0 +1,4 @@ +# $Id$ + +install-user-and-group install-shared-libraries install-rc-scripts: .FORCE + @echo "Don't know how to make $@ on this platform"; exit 1 diff --git a/rp/rcynic/sample-rcynic.conf b/rp/rcynic/sample-rcynic.conf new file mode 100644 index 00000000..6fa49b9c --- /dev/null +++ b/rp/rcynic/sample-rcynic.conf @@ -0,0 +1,39 @@ +# $Id$ +# +# Sample rcynic configuration file for jailed environment. + +[rcynic] +rsync-program = /bin/rsync +authenticated = /data/authenticated +unauthenticated = /data/unauthenticated +lockfile = /data/lock +xml-summary = /data/rcynic.xml +jitter = 600 +use-syslog = true +log-level = log_usage_err + +# You need to specify some trust anchors here, eg: + +#trust-anchor.1 = /etc/trust-anchors/ta-1.cer +#trust-anchor.2 = /etc/trust-anchors/ta-2.cer + +# or, using the "Trust Anchor Locator" form: + +#trust-anchor-locator.1 = /etc/trust-anchors/ta-1.tal +#trust-anchor-locator.2 = /etc/trust-anchors/ta-2.tal + +# The choice between these two formats depends largely on the policies +# of the entity generating the corresponding trust anchor, ie, will +# probably be made for you by the generating entity. + +# If you already keep all your TAs and TALs in a single directory +# anyway, you can also use the trust-anchor-directory directive to +# name that directory and let rcynic track whatever TAs and TALs you +# have there at the moment: + +#trust-anchor-directory = /etc/trust-anchors + +# Note that the order in which rcynic will read TAs and TALs from such +# a directory is undefined. In general this is not a problem, but if +# you really care about this for some reason, stick to the other +# directives. diff --git a/rp/rcynic/sample-trust-anchors/README b/rp/rcynic/sample-trust-anchors/README new file mode 100644 index 00000000..485d0485 --- /dev/null +++ b/rp/rcynic/sample-trust-anchors/README @@ -0,0 +1,26 @@ +$Id$ + +These are SAMPLE trust anchors for testing rcynic. Any resemblance +between these files and real trust anchors living, dead, or wandering +the night in ghostly torment is purely coincidental. + +Remember: it's only a trust anchor if a replying party says it is. +You are the relying party, so it's your call. + +The .cer files (if any) are self-signed X.509 trust anchors. + +The .tal files are "trust anchor locators", see RFC 6490. + +See the rcynic documentation for further details. + +== + +afrinic.tal AfriNIC +apnic.tal APNIC +arin.tal ARIN testbed +bbn-testbed.tal.disabled Only useful for certain tests +lacnic.tal LACNIC +ripe-ncc-root.tal RIPE NCC +ripe-pilot.tal A RIPE testbed +rpki.net-testbed.tal rpki.net testbed +testbed-apnicrpki.tal An APNIC testbed diff --git a/rp/rcynic/sample-trust-anchors/afrinic.tal b/rp/rcynic/sample-trust-anchors/afrinic.tal new file mode 100644 index 00000000..0f202c73 --- /dev/null +++ b/rp/rcynic/sample-trust-anchors/afrinic.tal @@ -0,0 +1,8 @@ +rsync://rpki.afrinic.net/repository/AfriNIC.cer +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxsAqAhWIO+ON2Ef9oRDM +pKxv+AfmSLIdLWJtjrvUyDxJPBjgR+kVrOHUeTaujygFUp49tuN5H2C1rUuQavTH +vve6xNF5fU3OkTcqEzMOZy+ctkbde2SRMVdvbO22+TH9gNhKDc9l7Vu01qU4LeJH +k3X0f5uu5346YrGAOSv6AaYBXVgXxa0s9ZvgqFpim50pReQe/WI3QwFKNgpPzfQL +6Y7fDPYdYaVOXPXSKtx7P4s4KLA/ZWmRL/bobw/i2fFviAGhDrjqqqum+/9w1hEl +L/vqihVnV18saKTnLvkItA/Bf5i11Yhw2K7qv573YWxyuqCknO/iYLTR1DToBZcZ +UQIDAQAB diff --git a/rp/rcynic/sample-trust-anchors/altca.tal b/rp/rcynic/sample-trust-anchors/altca.tal new file mode 100644 index 00000000..2dbc7413 --- /dev/null +++ b/rp/rcynic/sample-trust-anchors/altca.tal @@ -0,0 +1,9 @@ +rsync://ca0.rpki.net/tal/root.cer + +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzPSPpQxa0rxz9sbbvYGr +UlpgyBVxSIt+k/WDKbr+VW7CjUoz6cc5KMFANkQWw3D6ER4kCwX4WJkD58AGGbw/ +WeAe6m3aHcORUVRkr45a4qSrYiG7Wq9RAXtwbhlXofB3zo+O90IlXDaVP2U9bw+Q +yoJBJuAmZONt0bRgrktv8QhVtKvuYkH5ZIe7DkXJcJzBn6gv09dZsdwZm3xV3soX +HEKrz5pY6Sb2xoL1CyPqzGOfVFxl0G5+dmcD/degPKxrEycAzjnHUzN1gus2jg26 +dtkix7KG/Mn1h/k53j0FdQD+zqPwakgwqjvCOdSdHMRmsikj0EF9WrZIOjZUXV6q +6wIDAQAB diff --git a/rp/rcynic/sample-trust-anchors/apnic-rpki-root-afrinic-origin.tal b/rp/rcynic/sample-trust-anchors/apnic-rpki-root-afrinic-origin.tal new file mode 100644 index 00000000..d254ec44 --- /dev/null +++ b/rp/rcynic/sample-trust-anchors/apnic-rpki-root-afrinic-origin.tal @@ -0,0 +1,8 @@ +rsync://rpki.apnic.net/repository/apnic-rpki-root-afrinic-origin.cer +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuMLL96YV9pf0rZ4Ow/bk +cgpoPfsRzkcgmisyCuMUdotHwrp8pepujhohatScRK09ILRrZYCdpX4121MJhqXC +P3u3hy9fF0CeARKX/Q82nJccD4dtUp23UcFys8hwJgNYZI910ajkAxwNT//H/TFw +oUYbzZGBR7o2awMc7GdQl/j6dgOkV6AfYy5DyDEgOUNHnUxED2rreefL/E2Fr2ST +Esar6bTR4Tg4+nVF1PjAkgN0tKZYe4wZ6VmtqV/VTngSLysim6av7ki+JR3cVgVU +OqXeh1vPjH2tNu6u9bX37ZrdVb6NBRer9I99IDbKvyhELb6nzo8+Q74zga9HI+Pf +QwIDAQAB diff --git a/rp/rcynic/sample-trust-anchors/apnic-rpki-root-arin-origin.tal b/rp/rcynic/sample-trust-anchors/apnic-rpki-root-arin-origin.tal new file mode 100644 index 00000000..b82a136f --- /dev/null +++ b/rp/rcynic/sample-trust-anchors/apnic-rpki-root-arin-origin.tal @@ -0,0 +1,8 @@ +rsync://rpki.apnic.net/repository/apnic-rpki-root-arin-origin.cer +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAp6vscYtzhe0CfFk5Ro44 +llPhsInXtfAxqfYmK7m9V3khkqK3d3/ZAW6pcJm7qW8XhEGl+F5mUeeLIm5JoIhr +kT5B5M6uL0VlCCkZJH4h76ybOa83vWITNZEDy9L3c3nK4S+Basu3vYoE4ICXGG+J +7zg5Iw9saV+p03E2w1g16pt1QI3Cnggp6edkeWClEz3aPw/ULOIHb7YmatWwdERl +tL9LsuMSKszQLUY7F4XVpxey/rJYAZgzDUh+b6813WAClCkkydNjsbviuekAWJbx +sW7Mcw53u30K4g8MP03CjkDOubyoR4Qo99R1UQJCdrRsFKbSSfN/fOA4y7ikc3xs +jQIDAQAB diff --git a/rp/rcynic/sample-trust-anchors/apnic-rpki-root-iana-origin.tal b/rp/rcynic/sample-trust-anchors/apnic-rpki-root-iana-origin.tal new file mode 100644 index 00000000..a4e714c0 --- /dev/null +++ b/rp/rcynic/sample-trust-anchors/apnic-rpki-root-iana-origin.tal @@ -0,0 +1,8 @@ +rsync://rpki.apnic.net/repository/apnic-rpki-root-iana-origin.cer +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx9RWSL61YAAYumEiU8z8 +qH2ETVIL01ilxZlzIL9JYSORMN5Cmtf8V2JblIealSqgOTGjvSjEsiV73s67zYQI +7C/iSOb96uf3/s86NqbxDiFQGN8qG7RNcdgVuUlAidl8WxvLNI8VhqbAB5uSg/Mr +LeSOvXRja041VptAxIhcGzDMvlAJRwkrYK/Mo8P4E2rSQgwqCgae0ebY1CsJ3Cjf +i67C1nw7oXqJJovvXJ4apGmEv8az23OLC6Ki54Ul/E6xk227BFttqFV3YMtKx42H +cCcDVZZy01n7JjzvO8ccaXmHIgR7utnqhBRNNq5Xc5ZhbkrUsNtiJmrZzVlgU6Ou +0wIDAQAB diff --git a/rp/rcynic/sample-trust-anchors/apnic-rpki-root-lacnic-origin.tal b/rp/rcynic/sample-trust-anchors/apnic-rpki-root-lacnic-origin.tal new file mode 100644 index 00000000..a4d56802 --- /dev/null +++ b/rp/rcynic/sample-trust-anchors/apnic-rpki-root-lacnic-origin.tal @@ -0,0 +1,8 @@ +rsync://rpki.apnic.net/repository/apnic-rpki-root-lacnic-origin.cer +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyoYPp3l3DWyPtLWrmRn4 +Oux9hQ5bxd0SX/f6ygHxik+I3eMJP5J0Pr2e500tyXb2uKsX9kDqu/kckr+TUMhV +BHd5yAv8OAE3YYEvpz/7uTX7cYy2yUeA76OEP75Y88OIQEzGpPLNpIzDxMggxuDh +IhkA5xMiUJgVoEgmWSzR+MuRBjv2422wAGB5GpLgYsOjpwvG0VPmhnE+39+10ucQ +CLt0Ny5kOR4an2tkvHjm7rzKDnFm8MWxPzAWESdf+8g7ITzSglqxDNiK5E5rdzNt +h1Kvp+9RwaFArw6Ky1A4HhnoplN4EfKwxq0YamuKV0ZTTpWyT2+qDuE6sOfHRbJ0 +5QIDAQAB diff --git a/rp/rcynic/sample-trust-anchors/apnic-rpki-root-ripe-origin.tal b/rp/rcynic/sample-trust-anchors/apnic-rpki-root-ripe-origin.tal new file mode 100644 index 00000000..d64df3ae --- /dev/null +++ b/rp/rcynic/sample-trust-anchors/apnic-rpki-root-ripe-origin.tal @@ -0,0 +1,8 @@ +rsync://rpki.apnic.net/repository/apnic-rpki-root-ripe-origin.cer +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwsQlXmEklLYApoDo7GEa +NNTEGFPU5wJpi04iXuga2xn+g/TMLOlyJbjuPYRtRm/7VbRnN3m9Ta+WETy03+Fm +EbXzB4xxhJKVik/ARHBnrBWhLyURy8Q5/XplE9cJein37IE1mIsbKM7o/90S225w +7GuvW7T4kjPWYmBFOywHWsfQO1EdsgiJrkz+Ab67ZkdSIiKHkf2UE6/MrbDEj+QK +9+s/vKH8BtDhaLmTWY+bVvfJ3+AWDH6roo1ozbl5yamQFbLOl3ns30f3yOJcNSNu +/qgMQRRyp2sXXQovhTy8yqm3LFspaCWnTmQtBieWZwibuOa4Z27A1FzTMst2T4wY +/wIDAQAB diff --git a/rp/rcynic/sample-trust-anchors/apnic-testbed.tal b/rp/rcynic/sample-trust-anchors/apnic-testbed.tal new file mode 100644 index 00000000..f87a3bf3 --- /dev/null +++ b/rp/rcynic/sample-trust-anchors/apnic-testbed.tal @@ -0,0 +1,9 @@ +rsync://rpki-testbed.apnic.net/repository/rpki-testbed.apnic.net.cer + +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyiVmZgr+aN0xZfh6yrJS +qmsX2Q58UzjPaTnhsP88zdSqmRCVAegpRMjBjwF6Ga8tR9RP89W+tRzLf2x883Et +vmNAax939gilN4VxGRgKL2YZO27w8Vk6aVpSJ0+CCQI497eataRFpG9eLiPfsRfo +Fuha75MGa5m1cmJFVk0Jj0sRa9RmB/ZVxZDdwHgxAB7L875JSUnVCOLCP4FejoYt +71wJfzFw3fZmXSPeXQ+wtxVPfh8VIOyLTyYbq3AA1Gn+herzvCsiLXZ41lcCCRZR +ObwIGptj7KnqLoJ/MOGL6eoSY+v1B85z2Il3Nl8px3ohsApDM/MANDacxl0EgLwX +2QIDAQAB diff --git a/rp/rcynic/sample-trust-anchors/bbn-testbed.tal.disabled b/rp/rcynic/sample-trust-anchors/bbn-testbed.tal.disabled new file mode 100644 index 00000000..21302ea5 --- /dev/null +++ b/rp/rcynic/sample-trust-anchors/bbn-testbed.tal.disabled @@ -0,0 +1,8 @@ +rsync://rpki.bbn.com/rpki/root.cer +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvEjkk0oC2N8Ij+ruAkrX +cI7lSOIG1sQLC8xJJqklMlOBLao+3If0AVKSEP5+pb/hJwpXPomt6Lm4+fuZ6vX/ +q15llQL3YWkHZ49t/X47yCqxvM0txRaZafffbk6lJrofKhKKrYEcrr/rHiG+IQXn +U1MGaQ/a8DfRDWiCQxonh7CthrLi7tr+QI9s9fDvvHPCK9OmmhZvCUDOydf+/vMn +VShQ57KsUFcuZ0EX9UwsaIGCYGyvsYwk54UtoIGovg2IavfZK8ai0/5XEVpvKQiR +8AixZpg5bSe7555+LhzpL5+LdqX6/hVRah0JrdR8KiXvr16Kmcitj+JLqVc0Wkd8 +zwIDAQAB diff --git a/rp/rcynic/sample-trust-anchors/lacnic.tal b/rp/rcynic/sample-trust-anchors/lacnic.tal new file mode 100644 index 00000000..29220d37 --- /dev/null +++ b/rp/rcynic/sample-trust-anchors/lacnic.tal @@ -0,0 +1,8 @@ +rsync://repository.lacnic.net/rpki/lacnic/rta-lacnic-rpki.cer +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqZEzhYK0+PtDOPfub/KR +c3MeWx3neXx4/wbnJWGbNAtbYqXg3uU5J4HFzPgk/VIppgSKAhlO0H60DRP48by9 +gr5/yDHu2KXhOmnMg46sYsUIpfgtBS9+VtrqWziJfb+pkGtuOWeTnj6zBmBNZKK+ +5AlMCW1WPhrylIcB+XSZx8tk9GS/3SMQ+YfMVwwAyYjsex14Uzto4GjONALE5oh1 +M3+glRQduD6vzSwOD+WahMbc9vCOTED+2McLHRKgNaQf0YJ9a1jG9oJIvDkKXEqd +fqDRktwyoD74cV57bW3tBAexB7GglITbInyQAsmdngtfg2LUMrcROHHP86QPZINj +DQIDAQAB diff --git a/rp/rcynic/sample-trust-anchors/ripe-ncc-root.tal b/rp/rcynic/sample-trust-anchors/ripe-ncc-root.tal new file mode 100644 index 00000000..ebdb9f45 --- /dev/null +++ b/rp/rcynic/sample-trust-anchors/ripe-ncc-root.tal @@ -0,0 +1,9 @@ +rsync://rpki.ripe.net/ta/ripe-ncc-ta.cer +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0URYSGqUz2m +yBsOzeW1jQ6NsxNvlLMyhWknvnl8NiBCs/T/S2XuNKQNZ+wBZxIgPPV +2pFBFeQAvoH/WK83HwA26V2siwm/MY2nKZ+Olw+wlpzlZ1p3Ipj2eNc +Krmit8BwBC8xImzuCGaV0jkRB0GZ0hoH6Ml03umLprRsn6v0xOP0+l6 +Qc1ZHMFVFb385IQ7FQQTcVIxrdeMsoyJq9eMkE6DoclHhF/NlSllXub +ASQ9KUWqJ0+Ot3QCXr4LXECMfkpkVR2TZT+v5v658bHVs6ZxRD1b6Uk +1uQKAyHUbn/tXvP8lrjAibGzVsXDT2L0x4Edx+QdixPgOji3gBMyL2V +wIDAQAB diff --git a/rp/rcynic/sample-trust-anchors/ripe-pilot.tal b/rp/rcynic/sample-trust-anchors/ripe-pilot.tal new file mode 100644 index 00000000..971128e0 --- /dev/null +++ b/rp/rcynic/sample-trust-anchors/ripe-pilot.tal @@ -0,0 +1,7 @@ +rsync://localcert.ripe.net/ta/ripe-ncc-pilot.cer + +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApIXenLOBfyo7cOnm4mGKmYxsoWCp28dw3XJAoZNW +PDK8i9MxYACpwfz7bjyGma1BWPBJuievNd6nriFI+3WG+wt2bnO2ZmiLenCwMtm8bu7BeldpWRwlAnRp4t4I +L6sZ7T9bF+4sTrv1qiEANqam0mhtLtUfbWXV5Z4mjgnNur7fJH2lIOm7Oc2/tok1rid8WsPe18zuvgwA3M0f +KQ/Oa4SMXKnHr3fg2cHAm1cfEEvhMKa3rUAvsKGVEYeTJNg6rh3IRnjWhZ8GmE1ywl/9qMa2z4YsUi9Bx9U+ +/zMS8qpJn/q6XBbZ8XYTTFvSWfXd6b82jSfABa4ukIDCUF/QFwIDAQAB diff --git a/rp/rcynic/sample-trust-anchors/rpki.net-testbed.tal b/rp/rcynic/sample-trust-anchors/rpki.net-testbed.tal new file mode 100644 index 00000000..1e466300 --- /dev/null +++ b/rp/rcynic/sample-trust-anchors/rpki.net-testbed.tal @@ -0,0 +1,9 @@ +rsync://repo0.rpki.net/rpki/root.cer + +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAovWQL2lh6knDxGUG5hbt +CXvvh4AOzjhDkSHlj22gn/1oiM9IeDATIwP44vhQ6L/xvuk7W6Kfa5ygmqQ+xOZO +wTWPcrUbqaQyPNxokuivzyvqVZVDecOEqs78q58mSp9nbtxmLRW7B67SJCBSzfa5 +XpVyXYEgYAjkk3fpmefU+AcxtxvvHB5OVPIaBfPcs80ICMgHQX+fphvute9XLxjf +JKJWkhZqZ0v7pZm2uhkcPx1PMGcrGee0WSDC3fr3erLueagpiLsFjwwpX6F+Ms8v +qz45H+DKmYKvPSstZjCCq9aJ0qANT9OtnfSDOS+aLRPjZryCNyvvBHxZXqj5YCGK +twIDAQAB diff --git a/rp/rcynic/sample-trust-anchors/testbed-apnicrpki.tal.disabled b/rp/rcynic/sample-trust-anchors/testbed-apnicrpki.tal.disabled new file mode 100644 index 00000000..ae16a302 --- /dev/null +++ b/rp/rcynic/sample-trust-anchors/testbed-apnicrpki.tal.disabled @@ -0,0 +1,8 @@ +rsync://apnicrpki.rand.apnic.net/repository/root.cer +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApQofhU9VeRGZxlNgkrFR +ShSIkGtRUwg1mkISvieM2oxA6LFhHoqFV25BdK/3IhOh/0I/E9WX7uWPD72MFGBB +LsFc6JpZFkvg8+9KUIHquk46wn4bEvA8xMsPbGo+mK0CAkqOdHQSemC7nqUlR9VH +5zH1t8aYS4mrVN59LfClmiEE7QElgmVyvccfaEd+gMGaxsDvSylWIdvVxYfFG2rB +LiEsmfXwdn2a8b1Zx5eaFD80XV9Z7h15ESP1epSScGzMp2XB0FYMC3f9k7l5sydX +Rj5BYo1rWaM+Y6AoHe/d8G0xm6iX6b/JqT4Ywdt8/7DbsweaIk73VnpnXrYATCt9 +VwIDAQAB diff --git a/rp/rcynic/static-rsync/Makefile.in b/rp/rcynic/static-rsync/Makefile.in new file mode 100644 index 00000000..8a433c7b --- /dev/null +++ b/rp/rcynic/static-rsync/Makefile.in @@ -0,0 +1,44 @@ +# $Id$ + +VERSION = 2.6.9 + +CFG_ENV = CFLAGS='@CFLAGS@' LDFLAGS='@LDFLAGS@ @LD_STATIC_FLAG@' +CFG_ARG = + +TARBALL = rsync-${VERSION}.tar.gz +DIRNAME = rsync-${VERSION} + +CFG_LOG = > ../config.log 2>&1 +BIN_LOG = > ../build.log 2>&1 + +BIN = rsync + +abs_top_srcdir = @abs_top_srcdir@ +abs_top_builddir = @abs_top_builddir@ + +all: ${BIN} + +${BIN}: ${DIRNAME}/${BIN} + ln ${DIRNAME}/${BIN} $@ + file $@ + +${DIRNAME}/${BIN}: configured.stamp + cd ${DIRNAME} && ${MAKE} ${BIN_LOG} + +extracted.stamp: ${TARBALL} + gzip -c -d ${TARBALL} | tar -xf - + touch $@ + +patched.stamp: extracted.stamp + for i in patches/patch-*; do if test -f "$$i"; then patch -d ${DIRNAME} <"$$i"; else :; fi; done + touch $@ + +configured.stamp: patched.stamp + cd ${DIRNAME} && ${CFG_ENV} ./configure ${CFG_ARG} ${CFG_LOG} + touch $@ + +clean: + rm -rf ${BIN} ${DIRNAME} *.stamp *.log + +distclean: clean + rm -f Makefile diff --git a/rp/rcynic/static-rsync/README b/rp/rcynic/static-rsync/README new file mode 100644 index 00000000..9ff5afa8 --- /dev/null +++ b/rp/rcynic/static-rsync/README @@ -0,0 +1,15 @@ +$Id$ + +Hack to build a static rsync binary suitable for use in a chroot jail. + +The default configuration is for gcc, since that's the most widely +used compiler on the platforms we use. I've provided hooks intended +to make it simple to support other compilers just by overriding make +variables on the command line: if you need to do something more +drastic than this to get your compiler working, please tell me. + +If your platform doesn't support static binaries at all, you're on +your own (and should whine at your OS vendor, as this is nuts). + +We try to stick with rsync release code, but apply security patches +when necessary. diff --git a/rp/rcynic/static-rsync/patches/patch-CVE-2007-4091 b/rp/rcynic/static-rsync/patches/patch-CVE-2007-4091 new file mode 100644 index 00000000..201af96a --- /dev/null +++ b/rp/rcynic/static-rsync/patches/patch-CVE-2007-4091 @@ -0,0 +1,60 @@ +--- sender.c 2006-09-20 03:53:32.000000000 +0200 ++++ sender.c 2007-07-25 15:33:05.000000000 +0200 +@@ -123,6 +123,7 @@ + char fname[MAXPATHLEN]; + struct file_struct *file; + unsigned int offset; ++ size_t l = 0; + + if (ndx < 0 || ndx >= the_file_list->count) + return; +@@ -133,6 +134,20 @@ + file->dir.root, "/", NULL); + } else + offset = 0; ++ ++ l = offset + 1; ++ if (file) { ++ if (file->dirname) ++ l += strlen(file->dirname); ++ if (file->basename) ++ l += strlen(file->basename); ++ } ++ ++ if (l >= sizeof(fname)) { ++ rprintf(FERROR, "Overlong pathname\n"); ++ exit_cleanup(RERR_FILESELECT); ++ } ++ + f_name(file, fname + offset); + if (remove_source_files) { + if (do_unlink(fname) == 0) { +@@ -224,6 +239,7 @@ + enum logcode log_code = log_before_transfer ? FLOG : FINFO; + int f_xfer = write_batch < 0 ? batch_fd : f_out; + int i, j; ++ size_t l = 0; + + if (verbose > 2) + rprintf(FINFO, "send_files starting\n"); +@@ -259,6 +275,20 @@ + fname[offset++] = '/'; + } else + offset = 0; ++ ++ l = offset + 1; ++ if (file) { ++ if (file->dirname) ++ l += strlen(file->dirname); ++ if (file->basename) ++ l += strlen(file->basename); ++ } ++ ++ if (l >= sizeof(fname)) { ++ rprintf(FERROR, "Overlong pathname\n"); ++ exit_cleanup(RERR_FILESELECT); ++ } ++ + fname2 = f_name(file, fname + offset); + + if (verbose > 2) diff --git a/rp/rcynic/static-rsync/rsync-2.6.9.tar.gz b/rp/rcynic/static-rsync/rsync-2.6.9.tar.gz Binary files differnew file mode 100644 index 00000000..6377f639 --- /dev/null +++ b/rp/rcynic/static-rsync/rsync-2.6.9.tar.gz diff --git a/rp/rcynic/validation_status b/rp/rcynic/validation_status new file mode 100755 index 00000000..1f7a704d --- /dev/null +++ b/rp/rcynic/validation_status @@ -0,0 +1,34 @@ +# $Id$ +# +# Copyright (C) 2012 Internet Systems Consortium, Inc. ("ISC") +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH +# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, +# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE +# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +# PERFORMANCE OF THIS SOFTWARE. + +""" +Flat text listing of <validation_status/> elements from rcynic.xml. +""" + +import sys + +try: + from lxml.etree import ElementTree +except ImportError: + from xml.etree.ElementTree import ElementTree + +for filename in ([sys.stdin] if len(sys.argv) < 2 else sys.argv[1:]): + for elt in ElementTree(file = filename).findall("validation_status"): + print "%s %8s %-40s %s" % ( + elt.get("timestamp"), + elt.get("generation"), + elt.get("status"), + elt.text.strip()) |