Upgrade: Scons Doxygen Builder from 2022

A long time ago, we grabbed this plug-in from the Scons Wiki.
The latest version from 2022-07-31 was upgraded to Python-3

Note: this merge retains our reformatted copyright header
      and the addition of a ",doxylog" file in the target dir
This commit is contained in:
Fischlurch 2025-04-08 04:17:57 +02:00
commit 425f7b3f21

View file

@ -8,6 +8,9 @@
# Copyright (C) # Copyright (C)
# 2005, 2006, Matthew A. Nicholson # 2005, 2006, Matthew A. Nicholson
# 2006, Tim Blechmann # 2006, Tim Blechmann
# 2007, Christoph Boehme
# 2012, Dirk Baechle
# 2013, Russel Winder
# #
# This library is free software; you can redistribute it and/or # This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public # modify it under the terms of the GNU Lesser General Public
@ -15,8 +18,9 @@
##################################################################### #####################################################################
# history: 8/2008 adapted for Lumiera build system # history: 8/2008 adapted for Lumiera build system
# added patch for Doxyfile in subdirectory # added patch for Doxyfile in subdirectory
# see http://www.scons.org/wiki/DoxygenBuilder # see http://www.scons.org/wiki/DoxygenBuilder
# 4/2025 Upgrade to latest upstream version for Python-3
import os import os
@ -25,39 +29,57 @@ import glob
from fnmatch import fnmatch from fnmatch import fnmatch
from functools import reduce from functools import reduce
# Currently supported output formats and their default
# values and output locations.
# From left to right:
# 1. default setting YES|NO
# 2. default output folder for this format
# 3. name of the (main) output file
# 4. default extension "
# 5. field for overriding the output file extension
output_formats = {
"HTML": ("YES", "html", "index", ".html", "HTML_FILE_EXTENSION"),
"LATEX": ("YES", "latex", "refman", ".tex", ""),
"RTF": ("NO", "rtf", "refman", ".rtf", ""),
"MAN": ("NO", "man", "", ".3", "MAN_EXTENSION"),
"XML": ("NO", "xml", "index", ".xml", ""),
}
def DoxyfileParse(file_contents):
""" Parse a Doxygen source file and return a dictionary of all the values. def DoxyfileParse(file_contents, conf_dir, data=None):
Values will be strings and lists of strings.
""" """
data = {} Parse a Doxygen source file and return a dictionary of all the values.
Values will be strings and lists of strings.
"""
if data is None:
data = {}
import shlex import shlex
lex = shlex.shlex(instream = file_contents, posix = True)
#lex = shlex.shlex(instream = file_contents, infile = file_contents, posix = True) lex = shlex.shlex(instream=file_contents, posix=True)
lex.wordchars += "*+./-:" lex.wordchars += "*+./-:@"
lex.whitespace = lex.whitespace.replace("\n", "") lex.whitespace = lex.whitespace.replace("\n", "")
lex.escape = "" lex.escape = ""
lineno = lex.lineno lineno = lex.lineno
token = lex.read_token() token = lex.get_token()
key = token # the first token should be a key key = None
last_token = "" last_token = ""
key_token = False key_token = True # The first token should be a key.
next_key = False next_key = False
new_data = True new_data = True
def append_data(data, key, new_data, token): def append_data(data, key, new_data, token):
if new_data or len(data[key]) == 0: if new_data or len(data[key]) == 0:
data[key].append(token) data[key].append(token)
else: else:
data[key][-1] += token data[key][-1] += token
while token: while token:
if token in ['\n']: if token in ["\n"]:
if last_token not in ['\\']: if last_token not in ["\\"]:
key_token = True key_token = True
elif token in ['\\']: elif token in ["\\"]:
pass pass
elif key_token: elif key_token:
key = token key = token
@ -65,160 +87,321 @@ def DoxyfileParse(file_contents):
else: else:
if token == "+=": if token == "+=":
if key not in data: if key not in data:
data[key] = list() data[key] = []
elif token == "=": elif token == "=":
data[key] = list() if key == "TAGFILES" and key in data:
append_data(data, key, False, "=")
new_data = False
elif key == "@INCLUDE" and key in data:
# don't reset the @INCLUDE list when we see a new @INCLUDE line.
pass
else:
data[key] = []
elif key == "@INCLUDE":
# special case for @INCLUDE key: read the referenced
# file as a doxyfile too.
nextfile = token
if not os.path.isabs(nextfile):
nextfile = os.path.join(conf_dir, nextfile)
if nextfile in data[key]:
raise Exception("recursive @INCLUDE in Doxygen config: " + nextfile)
data[key].append(nextfile)
with open(nextfile, "r") as fh:
DoxyfileParse(fh.read(), conf_dir, data)
else: else:
append_data(data, key, new_data, token) append_data(data, key, new_data, token)
new_data = True new_data = True
last_token = token last_token = token
token = lex.get_token() token = lex.get_token()
if last_token == '\\' and token != '\n': if last_token == "\\" and token != "\n":
new_data = False new_data = False
append_data(data, key, new_data, '\\') append_data(data, key, new_data, "\\")
# compress lists of len 1 into single strings # compress lists of len 1 into single strings
for (k, v) in list(data.items()): for (k, v) in list(data.items()):
if len(v) == 0: if len(v) == 0:
data.pop(k) data.pop(k)
# items in the following list will be kept as lists and not converted to strings # items in the following list will be kept as lists and not converted to strings
if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS"]: if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS", "TAGFILES", "@INCLUDE"]:
continue continue
if len(v) == 1: if len(v) == 1:
data[k] = v[0] data[k] = v[0]
return data return data
def DoxySourceScan(node, env, path): def DoxySourceFiles(node, env):
""" Doxygen Doxyfile source scanner. """
This should scan the Doxygen file and add any files Scan the given node's contents (a Doxygen file) and add
used to generate docs to the list of source files. any files used to generate docs to the list of source files.
""" """
default_file_patterns = [ default_file_patterns = [
'*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx', "*.c",
'*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++', "*.cc",
'*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm', "*.cxx",
'*.py', "*.cpp",
"*.c++",
"*.java",
"*.ii",
"*.ixx",
"*.ipp",
"*.i++",
"*.inl",
"*.h",
"*.hh ",
"*.hxx",
"*.hpp",
"*.h++",
"*.idl",
"*.odl",
"*.cs",
"*.php",
"*.php3",
"*.inc",
"*.m",
"*.mm",
"*.py",
] ]
default_exclude_patterns = [ default_exclude_patterns = [
'*~', "*~",
] ]
sources = [] sources = []
data = DoxyfileParse(node.get_contents()) # We're running in the top-level directory, but the doxygen
# configuration file is in the same directory as node; this means
# that relative pathnames in node must be adjusted before they can
# go onto the sources list
conf_dir = os.path.dirname(str(node))
data = DoxyfileParse(node.get_text_contents(), conf_dir)
if data.get("RECURSIVE", "NO") == "YES": if data.get("RECURSIVE", "NO") == "YES":
recursive = True recursive = True
else: else:
recursive = False recursive = False
file_patterns = data.get("FILE_PATTERNS", default_file_patterns) file_patterns = data.get("FILE_PATTERNS", default_file_patterns)
exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns) exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns)
# input = data.get("INPUT")
# We're running in the top-level directory, but the doxygen configuration file if input:
# is in the same directory as node; this means that relative pathnames in node for node in data.get("INPUT", []):
# must be adjusted before they can go onto the sources list if not os.path.isabs(node):
conf_dir = os.path.dirname(str(node)) node = os.path.join(conf_dir, node)
if os.path.isfile(node):
for node in data.get("INPUT", []): sources.append(node)
if not os.path.isabs(node): elif os.path.isdir(node):
node = os.path.join(conf_dir, node) if recursive:
if os.path.isfile(node): for root, dirs, files in os.walk(node):
sources.append(node) for f in files:
elif os.path.isdir(node): filename = os.path.join(root, f)
if recursive:
for root, dirs, files in os.walk(node): pattern_check = reduce(
for f in files: lambda x, y: x or bool(fnmatch(filename, y)),
filename = os.path.join(root, f) file_patterns,
False,
pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False) )
exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True) exclude_check = reduce(
lambda x, y: x and fnmatch(filename, y),
if pattern_check and not exclude_check: exclude_patterns,
sources.append(filename) True,
else: )
for pattern in file_patterns:
sources.extend(glob.glob("/".join([node, pattern]))) if pattern_check and not exclude_check:
sources.append(filename)
sources = [env.File(path) for path in sources] else:
for pattern in file_patterns:
sources.extend(glob.glob("/".join([node, pattern])))
else:
# No INPUT specified, so apply plain patterns only
if recursive:
for root, dirs, files in os.walk("."):
for f in files:
filename = os.path.join(root, f)
pattern_check = reduce(
lambda x, y: x or bool(fnmatch(filename, y)),
file_patterns,
False,
)
exclude_check = reduce(
lambda x, y: x and fnmatch(filename, y), exclude_patterns, True
)
if pattern_check and not exclude_check:
sources.append(filename)
else:
for pattern in file_patterns:
sources.extend(glob.glob(pattern))
# Add @INCLUDEd files to the list of source files:
for node in data.get("@INCLUDE", []):
sources.append(node)
# Add tagfiles to the list of source files:
for node in data.get("TAGFILES", []):
file = node.split("=")[0]
if not os.path.isabs(file):
file = os.path.join(conf_dir, file)
sources.append(file)
# Add additional files to the list of source files:
def append_additional_source(option, formats):
for f in formats:
if data.get("GENERATE_" + f, output_formats[f][0]) == "YES":
file = data.get(option, "")
if file != "":
if not os.path.isabs(file):
file = os.path.join(conf_dir, file)
if os.path.isfile(file):
sources.append(file)
break
append_additional_source("HTML_STYLESHEET", ["HTML"])
append_additional_source("HTML_HEADER", ["HTML"])
append_additional_source("HTML_FOOTER", ["HTML"])
return sources
def DoxySourceScan(node, env, path):
"""
Doxygen Doxyfile source scanner. This should scan the Doxygen file and add
any files used to generate docs to the list of source files.
"""
filepaths = DoxySourceFiles(node, env)
sources = [env.File(path) for path in filepaths]
return sources return sources
def DoxySourceScanCheck(node, env): def DoxySourceScanCheck(node, env):
""" Check if we should scan this file """ """Check if we should scan this file"""
return os.path.isfile(node.path) return os.path.isfile(node.path)
def DoxyEmitter(source, target, env): def DoxyEmitter(target, source, env):
""" Doxygen Doxyfile emitter """ """Doxygen Doxyfile emitter"""
# possible output formats and their default values and output locations doxy_fpath = str(source[0])
output_formats = { conf_dir = os.path.dirname(doxy_fpath)
"HTML": ("YES", "html"),
"LATEX": ("YES", "latex"), data = DoxyfileParse(source[0].get_text_contents(), conf_dir)
"RTF": ("NO", "rtf"),
"MAN": ("NO", "man"),
"XML": ("NO", "xml"),
}
data = DoxyfileParse(source[0].get_contents())
targets = [] targets = []
out_dir = data.get("OUTPUT_DIRECTORY", ".") out_dir = data.get("OUTPUT_DIRECTORY", ".")
if not os.path.isabs(out_dir):
out_dir = os.path.join(conf_dir, out_dir)
# add our output locations # add our output locations
for (k, v) in list(output_formats.items()): for (k, v) in list(output_formats.items()):
if data.get("GENERATE_" + k, v[0]) == "YES": if data.get("GENERATE_" + k, v[0]) == "YES":
targets.append(env.Dir( os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))) ) # Initialize output file extension for MAN pages
if k == "MAN":
# don't clobber targets # Is the given extension valid?
for node in targets: manext = v[3]
env.Precious(node) if v[4] and v[4] in data:
manext = data.get(v[4])
# set up cleaning stuff # Try to strip off dots
for node in targets: manext = manext.replace(".", "")
env.Clean(node, node) # Can we convert it to an int?
try:
e = int(manext)
except:
# No, so set back to default
manext = "3"
od = env.Dir(
os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]), "man" + manext)
)
else:
od = env.Dir(os.path.join(out_dir, data.get(k + "_OUTPUT", v[1])))
# don't clobber target folders
env.Precious(od)
# set up cleaning stuff
env.Clean(od, od)
# Add target files
if k != "MAN":
# Is an extension override var given?
if v[4] and v[4] in data:
fname = v[2] + data.get(v[4])
else:
fname = v[2] + v[3]
of = env.File(
os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]), fname)
)
targets.append(of)
# don't clean single files, we remove the complete output folders (see above)
env.NoClean(of)
else:
# Special case: MAN pages
# We have to add a target file docs/man/man3/foo.h.3
# for each input file foo.h, so we scan the config file
# a second time... :(
filepaths = DoxySourceFiles(source[0], env)
for f in filepaths:
if os.path.isfile(f) and f != doxy_fpath:
of = env.File(
os.path.join(
out_dir,
data.get(k + "_OUTPUT", v[1]),
"man" + manext,
f + "." + manext,
)
)
targets.append(of)
# don't clean single files, we remove the complete output folders (see above)
env.NoClean(of)
# add the tag file if neccessary:
tagfile = data.get("GENERATE_TAGFILE", "")
if tagfile != "":
if not os.path.isabs(tagfile):
tagfile = os.path.join(conf_dir, tagfile)
targets.append(env.File(tagfile))
return (targets, source) return (targets, source)
def generate(env): def generate(env):
""" Add builders and construction variables for the """
Doxygen tool. This is currently for Doxygen 1.4.6. Add builders and construction variables for the
Doxygen tool. This is currently for Doxygen 1.4.6.
""" """
doxyfile_scanner = env.Scanner( doxyfile_scanner = env.Scanner(
DoxySourceScan, DoxySourceScan,
"DoxySourceScan", "DoxySourceScan",
scan_check = DoxySourceScanCheck, scan_check=DoxySourceScanCheck,
) )
import SCons.Builder import SCons.Builder
doxyfile_builder = SCons.Builder.Builder( doxyfile_builder = SCons.Builder.Builder(
action = "cd ${SOURCE.dir} && (${DOXYGEN} ${SOURCE.file} 2>&1 |tee ,doxylog)", action = "cd ${SOURCE.dir} && (${DOXYGEN} ${SOURCE.file} 2>&1 |tee ,doxylog)",
emitter = DoxyEmitter, emitter = DoxyEmitter,
target_factory = env.fs.Entry, target_factory = env.fs.Entry,
single_source = True, single_source = True,
source_scanner = doxyfile_scanner, source_scanner = doxyfile_scanner,
) )
env.Append(BUILDERS = { env.Append(
'Doxygen': doxyfile_builder, BUILDERS={
}) "Doxygen": doxyfile_builder,
}
env.Replace( )
DOXYGEN = 'doxygen'
env.AppendUnique(
DOXYGEN="doxygen",
) )
def exists(env): def exists(env):
""" Make sure doxygen exists. """
Make sure doxygen exists.
""" """
return env.Detect("doxygen") return env.Detect("doxygen")