Copyright: clarify and simplify the file headers
* Lumiera source code always was copyrighted by individual contributors
* there is no entity "Lumiera.org" which holds any copyrights
* Lumiera source code is provided under the GPL Version 2+
== Explanations ==
Lumiera as a whole is distributed under Copyleft, GNU General Public License Version 2 or above.
For this to become legally effective, the ''File COPYING in the root directory is sufficient.''
The licensing header in each file is not strictly necessary, yet considered good practice;
attaching a licence notice increases the likeliness that this information is retained
in case someone extracts individual code files. However, it is not by the presence of some
text, that legally binding licensing terms become effective; rather the fact matters that a
given piece of code was provably copyrighted and published under a license. Even reformatting
the code, renaming some variables or deleting parts of the code will not alter this legal
situation, but rather creates a derivative work, which is likewise covered by the GPL!
The most relevant information in the file header is the notice regarding the
time of the first individual copyright claim. By virtue of this initial copyright,
the first author is entitled to choose the terms of licensing. All further
modifications are permitted and covered by the License. The specific wording
or format of the copyright header is not legally relevant, as long as the
intention to publish under the GPL remains clear. The extended wording was
based on a recommendation by the FSF. It can be shortened, because the full terms
of the license are provided alongside the distribution, in the file COPYING.
2024-11-17 23:42:55 +01:00
|
|
|
# coding: utf-8
|
2008-08-28 02:57:12 +02:00
|
|
|
##
|
|
|
|
|
## BuilderDoxygen.py - SCons builder for generating Doxygen documentation
|
|
|
|
|
##
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Astxx, the Asterisk C++ API and Utility Library.
|
Copyright: clarify and simplify the file headers
* Lumiera source code always was copyrighted by individual contributors
* there is no entity "Lumiera.org" which holds any copyrights
* Lumiera source code is provided under the GPL Version 2+
== Explanations ==
Lumiera as a whole is distributed under Copyleft, GNU General Public License Version 2 or above.
For this to become legally effective, the ''File COPYING in the root directory is sufficient.''
The licensing header in each file is not strictly necessary, yet considered good practice;
attaching a licence notice increases the likeliness that this information is retained
in case someone extracts individual code files. However, it is not by the presence of some
text, that legally binding licensing terms become effective; rather the fact matters that a
given piece of code was provably copyrighted and published under a license. Even reformatting
the code, renaming some variables or deleting parts of the code will not alter this legal
situation, but rather creates a derivative work, which is likewise covered by the GPL!
The most relevant information in the file header is the notice regarding the
time of the first individual copyright claim. By virtue of this initial copyright,
the first author is entitled to choose the terms of licensing. All further
modifications are permitted and covered by the License. The specific wording
or format of the copyright header is not legally relevant, as long as the
intention to publish under the GPL remains clear. The extended wording was
based on a recommendation by the FSF. It can be shortened, because the full terms
of the license are provided alongside the distribution, in the file COPYING.
2024-11-17 23:42:55 +01:00
|
|
|
# Copyright (C)
|
|
|
|
|
# 2005, 2006, Matthew A. Nicholson
|
|
|
|
|
# 2006, Tim Blechmann
|
2025-04-08 04:17:57 +02:00
|
|
|
# 2007, Christoph Boehme
|
|
|
|
|
# 2012, Dirk Baechle
|
|
|
|
|
# 2013, Russel Winder
|
2008-08-28 02:57:12 +02:00
|
|
|
#
|
|
|
|
|
# This library is free software; you can redistribute it and/or
|
|
|
|
|
# modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
# License version 2.1 as published by the Free Software Foundation.
|
|
|
|
|
#####################################################################
|
|
|
|
|
|
|
|
|
|
# history: 8/2008 adapted for Lumiera build system
|
2025-04-08 04:17:57 +02:00
|
|
|
# added patch for Doxyfile in subdirectory
|
|
|
|
|
# see http://www.scons.org/wiki/DoxygenBuilder
|
|
|
|
|
# 4/2025 Upgrade to latest upstream version for Python-3
|
2008-08-28 02:57:12 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
import os
|
|
|
|
|
import os.path
|
|
|
|
|
import glob
|
|
|
|
|
from fnmatch import fnmatch
|
2022-09-05 23:02:29 +02:00
|
|
|
from functools import reduce
|
2008-08-28 02:57:12 +02:00
|
|
|
|
2013-08-12 17:05:59 +02:00
|
|
|
# Currently supported output formats and their default
|
|
|
|
|
# values and output locations.
|
|
|
|
|
# From left to right:
|
|
|
|
|
# 1. default setting YES|NO
|
|
|
|
|
# 2. default output folder for this format
|
|
|
|
|
# 3. name of the (main) output file
|
|
|
|
|
# 4. default extension "
|
|
|
|
|
# 5. field for overriding the output file extension
|
|
|
|
|
output_formats = {
|
2020-06-02 20:26:31 +02:00
|
|
|
"HTML": ("YES", "html", "index", ".html", "HTML_FILE_EXTENSION"),
|
|
|
|
|
"LATEX": ("YES", "latex", "refman", ".tex", ""),
|
|
|
|
|
"RTF": ("NO", "rtf", "refman", ".rtf", ""),
|
|
|
|
|
"MAN": ("NO", "man", "", ".3", "MAN_EXTENSION"),
|
|
|
|
|
"XML": ("NO", "xml", "index", ".xml", ""),
|
2013-08-12 17:05:59 +02:00
|
|
|
}
|
2008-08-28 02:57:12 +02:00
|
|
|
|
2020-06-02 20:26:31 +02:00
|
|
|
|
2013-08-12 23:34:57 +02:00
|
|
|
def DoxyfileParse(file_contents, conf_dir, data=None):
|
2020-06-02 20:26:31 +02:00
|
|
|
"""
|
|
|
|
|
Parse a Doxygen source file and return a dictionary of all the values.
|
|
|
|
|
Values will be strings and lists of strings.
|
2008-12-19 20:17:23 +01:00
|
|
|
"""
|
2020-06-02 20:26:31 +02:00
|
|
|
if data is None:
|
|
|
|
|
data = {}
|
|
|
|
|
|
2008-12-19 20:17:23 +01:00
|
|
|
import shlex
|
2021-03-05 16:55:42 +01:00
|
|
|
|
2022-07-30 09:19:08 +02:00
|
|
|
lex = shlex.shlex(instream=file_contents, posix=True)
|
2020-06-02 20:26:31 +02:00
|
|
|
lex.wordchars += "*+./-:@"
|
2008-12-19 20:17:23 +01:00
|
|
|
lex.whitespace = lex.whitespace.replace("\n", "")
|
|
|
|
|
lex.escape = ""
|
2020-06-02 20:26:31 +02:00
|
|
|
|
2008-12-19 20:17:23 +01:00
|
|
|
lineno = lex.lineno
|
2020-06-02 20:26:31 +02:00
|
|
|
token = lex.get_token()
|
|
|
|
|
key = None
|
2008-12-19 20:17:23 +01:00
|
|
|
last_token = ""
|
2020-06-02 20:26:31 +02:00
|
|
|
key_token = True # The first token should be a key.
|
2008-12-19 20:17:23 +01:00
|
|
|
next_key = False
|
|
|
|
|
new_data = True
|
2020-06-02 20:26:31 +02:00
|
|
|
|
2008-12-19 20:17:23 +01:00
|
|
|
def append_data(data, key, new_data, token):
|
|
|
|
|
if new_data or len(data[key]) == 0:
|
|
|
|
|
data[key].append(token)
|
|
|
|
|
else:
|
|
|
|
|
data[key][-1] += token
|
2020-06-02 20:26:31 +02:00
|
|
|
|
2008-12-19 20:17:23 +01:00
|
|
|
while token:
|
2021-03-05 16:55:42 +01:00
|
|
|
if token in ["\n"]:
|
|
|
|
|
if last_token not in ["\\"]:
|
2013-10-25 06:34:38 +02:00
|
|
|
key_token = True
|
2021-03-05 16:55:42 +01:00
|
|
|
elif token in ["\\"]:
|
2013-10-25 06:34:38 +02:00
|
|
|
pass
|
|
|
|
|
elif key_token:
|
|
|
|
|
key = token
|
|
|
|
|
key_token = False
|
|
|
|
|
else:
|
|
|
|
|
if token == "+=":
|
2022-09-05 23:02:29 +02:00
|
|
|
if key not in data:
|
2020-06-02 20:26:31 +02:00
|
|
|
data[key] = []
|
2013-10-25 06:34:38 +02:00
|
|
|
elif token == "=":
|
2020-06-02 20:26:31 +02:00
|
|
|
if key == "TAGFILES" and key in data:
|
|
|
|
|
append_data(data, key, False, "=")
|
|
|
|
|
new_data = False
|
|
|
|
|
elif key == "@INCLUDE" and key in data:
|
|
|
|
|
# don't reset the @INCLUDE list when we see a new @INCLUDE line.
|
|
|
|
|
pass
|
|
|
|
|
else:
|
|
|
|
|
data[key] = []
|
|
|
|
|
elif key == "@INCLUDE":
|
|
|
|
|
# special case for @INCLUDE key: read the referenced
|
|
|
|
|
# file as a doxyfile too.
|
|
|
|
|
nextfile = token
|
|
|
|
|
if not os.path.isabs(nextfile):
|
|
|
|
|
nextfile = os.path.join(conf_dir, nextfile)
|
|
|
|
|
if nextfile in data[key]:
|
|
|
|
|
raise Exception("recursive @INCLUDE in Doxygen config: " + nextfile)
|
|
|
|
|
data[key].append(nextfile)
|
2022-07-31 16:22:56 +02:00
|
|
|
with open(nextfile, "r") as fh:
|
|
|
|
|
DoxyfileParse(fh.read(), conf_dir, data)
|
2013-10-25 06:34:38 +02:00
|
|
|
else:
|
|
|
|
|
append_data(data, key, new_data, token)
|
|
|
|
|
new_data = True
|
2020-06-02 20:26:31 +02:00
|
|
|
|
2013-10-25 06:34:38 +02:00
|
|
|
last_token = token
|
|
|
|
|
token = lex.get_token()
|
2020-06-02 20:26:31 +02:00
|
|
|
|
2021-03-05 16:55:42 +01:00
|
|
|
if last_token == "\\" and token != "\n":
|
2013-10-25 06:34:38 +02:00
|
|
|
new_data = False
|
2021-03-05 16:55:42 +01:00
|
|
|
append_data(data, key, new_data, "\\")
|
2020-06-02 20:26:31 +02:00
|
|
|
|
2008-12-19 20:17:23 +01:00
|
|
|
# compress lists of len 1 into single strings
|
2022-09-05 23:02:29 +02:00
|
|
|
for (k, v) in list(data.items()):
|
2008-12-19 20:17:23 +01:00
|
|
|
if len(v) == 0:
|
|
|
|
|
data.pop(k)
|
2020-06-02 20:26:31 +02:00
|
|
|
|
2008-12-19 20:17:23 +01:00
|
|
|
# items in the following list will be kept as lists and not converted to strings
|
2020-06-02 20:26:31 +02:00
|
|
|
if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS", "TAGFILES", "@INCLUDE"]:
|
2008-12-19 20:17:23 +01:00
|
|
|
continue
|
2020-06-02 20:26:31 +02:00
|
|
|
|
2008-12-19 20:17:23 +01:00
|
|
|
if len(v) == 1:
|
|
|
|
|
data[k] = v[0]
|
2020-06-02 20:26:31 +02:00
|
|
|
|
2008-12-19 20:17:23 +01:00
|
|
|
return data
|
2008-08-28 02:57:12 +02:00
|
|
|
|
|
|
|
|
|
2013-08-12 17:05:59 +02:00
|
|
|
def DoxySourceFiles(node, env):
|
2020-06-02 20:26:31 +02:00
|
|
|
"""
|
|
|
|
|
Scan the given node's contents (a Doxygen file) and add
|
|
|
|
|
any files used to generate docs to the list of source files.
|
2008-12-19 20:17:23 +01:00
|
|
|
"""
|
|
|
|
|
default_file_patterns = [
|
2021-03-05 16:55:42 +01:00
|
|
|
"*.c",
|
|
|
|
|
"*.cc",
|
|
|
|
|
"*.cxx",
|
|
|
|
|
"*.cpp",
|
|
|
|
|
"*.c++",
|
|
|
|
|
"*.java",
|
|
|
|
|
"*.ii",
|
|
|
|
|
"*.ixx",
|
|
|
|
|
"*.ipp",
|
|
|
|
|
"*.i++",
|
|
|
|
|
"*.inl",
|
|
|
|
|
"*.h",
|
|
|
|
|
"*.hh ",
|
|
|
|
|
"*.hxx",
|
|
|
|
|
"*.hpp",
|
|
|
|
|
"*.h++",
|
|
|
|
|
"*.idl",
|
|
|
|
|
"*.odl",
|
|
|
|
|
"*.cs",
|
|
|
|
|
"*.php",
|
|
|
|
|
"*.php3",
|
|
|
|
|
"*.inc",
|
|
|
|
|
"*.m",
|
|
|
|
|
"*.mm",
|
|
|
|
|
"*.py",
|
2008-12-19 20:17:23 +01:00
|
|
|
]
|
2020-06-02 20:26:31 +02:00
|
|
|
|
2008-12-19 20:17:23 +01:00
|
|
|
default_exclude_patterns = [
|
2021-03-05 16:55:42 +01:00
|
|
|
"*~",
|
2008-12-19 20:17:23 +01:00
|
|
|
]
|
2020-06-02 20:26:31 +02:00
|
|
|
|
2008-12-19 20:17:23 +01:00
|
|
|
sources = []
|
2020-06-02 20:26:31 +02:00
|
|
|
|
|
|
|
|
# We're running in the top-level directory, but the doxygen
|
|
|
|
|
# configuration file is in the same directory as node; this means
|
|
|
|
|
# that relative pathnames in node must be adjusted before they can
|
|
|
|
|
# go onto the sources list
|
|
|
|
|
conf_dir = os.path.dirname(str(node))
|
|
|
|
|
|
2022-07-30 09:19:08 +02:00
|
|
|
data = DoxyfileParse(node.get_text_contents(), conf_dir)
|
2020-06-02 20:26:31 +02:00
|
|
|
|
2008-12-19 20:17:23 +01:00
|
|
|
if data.get("RECURSIVE", "NO") == "YES":
|
|
|
|
|
recursive = True
|
|
|
|
|
else:
|
|
|
|
|
recursive = False
|
2020-06-02 20:26:31 +02:00
|
|
|
|
2008-12-19 20:17:23 +01:00
|
|
|
file_patterns = data.get("FILE_PATTERNS", default_file_patterns)
|
|
|
|
|
exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns)
|
2020-06-02 20:26:31 +02:00
|
|
|
|
|
|
|
|
input = data.get("INPUT")
|
|
|
|
|
if input:
|
|
|
|
|
for node in data.get("INPUT", []):
|
|
|
|
|
if not os.path.isabs(node):
|
|
|
|
|
node = os.path.join(conf_dir, node)
|
|
|
|
|
if os.path.isfile(node):
|
|
|
|
|
sources.append(node)
|
|
|
|
|
elif os.path.isdir(node):
|
|
|
|
|
if recursive:
|
|
|
|
|
for root, dirs, files in os.walk(node):
|
|
|
|
|
for f in files:
|
|
|
|
|
filename = os.path.join(root, f)
|
|
|
|
|
|
2021-03-05 16:55:42 +01:00
|
|
|
pattern_check = reduce(
|
|
|
|
|
lambda x, y: x or bool(fnmatch(filename, y)),
|
|
|
|
|
file_patterns,
|
|
|
|
|
False,
|
|
|
|
|
)
|
|
|
|
|
exclude_check = reduce(
|
|
|
|
|
lambda x, y: x and fnmatch(filename, y),
|
|
|
|
|
exclude_patterns,
|
|
|
|
|
True,
|
|
|
|
|
)
|
2020-06-02 20:26:31 +02:00
|
|
|
|
|
|
|
|
if pattern_check and not exclude_check:
|
|
|
|
|
sources.append(filename)
|
|
|
|
|
else:
|
|
|
|
|
for pattern in file_patterns:
|
|
|
|
|
sources.extend(glob.glob("/".join([node, pattern])))
|
|
|
|
|
else:
|
|
|
|
|
# No INPUT specified, so apply plain patterns only
|
|
|
|
|
if recursive:
|
2021-03-05 16:55:42 +01:00
|
|
|
for root, dirs, files in os.walk("."):
|
2020-06-02 20:26:31 +02:00
|
|
|
for f in files:
|
|
|
|
|
filename = os.path.join(root, f)
|
|
|
|
|
|
2021-03-05 16:55:42 +01:00
|
|
|
pattern_check = reduce(
|
|
|
|
|
lambda x, y: x or bool(fnmatch(filename, y)),
|
|
|
|
|
file_patterns,
|
|
|
|
|
False,
|
|
|
|
|
)
|
|
|
|
|
exclude_check = reduce(
|
|
|
|
|
lambda x, y: x and fnmatch(filename, y), exclude_patterns, True
|
|
|
|
|
)
|
2020-06-02 20:26:31 +02:00
|
|
|
|
|
|
|
|
if pattern_check and not exclude_check:
|
2013-08-12 17:05:59 +02:00
|
|
|
sources.append(filename)
|
2020-06-02 20:26:31 +02:00
|
|
|
else:
|
|
|
|
|
for pattern in file_patterns:
|
|
|
|
|
sources.extend(glob.glob(pattern))
|
|
|
|
|
|
|
|
|
|
# Add @INCLUDEd files to the list of source files:
|
|
|
|
|
for node in data.get("@INCLUDE", []):
|
|
|
|
|
sources.append(node)
|
|
|
|
|
|
|
|
|
|
# Add tagfiles to the list of source files:
|
|
|
|
|
for node in data.get("TAGFILES", []):
|
|
|
|
|
file = node.split("=")[0]
|
|
|
|
|
if not os.path.isabs(file):
|
|
|
|
|
file = os.path.join(conf_dir, file)
|
|
|
|
|
sources.append(file)
|
|
|
|
|
|
|
|
|
|
# Add additional files to the list of source files:
|
|
|
|
|
def append_additional_source(option, formats):
|
|
|
|
|
for f in formats:
|
2021-03-05 16:55:42 +01:00
|
|
|
if data.get("GENERATE_" + f, output_formats[f][0]) == "YES":
|
2020-06-02 20:26:31 +02:00
|
|
|
file = data.get(option, "")
|
|
|
|
|
if file != "":
|
|
|
|
|
if not os.path.isabs(file):
|
|
|
|
|
file = os.path.join(conf_dir, file)
|
|
|
|
|
if os.path.isfile(file):
|
|
|
|
|
sources.append(file)
|
|
|
|
|
break
|
|
|
|
|
|
2021-03-05 16:55:42 +01:00
|
|
|
append_additional_source("HTML_STYLESHEET", ["HTML"])
|
|
|
|
|
append_additional_source("HTML_HEADER", ["HTML"])
|
|
|
|
|
append_additional_source("HTML_FOOTER", ["HTML"])
|
2020-06-02 20:26:31 +02:00
|
|
|
|
|
|
|
|
return sources
|
|
|
|
|
|
2013-08-12 17:05:59 +02:00
|
|
|
|
|
|
|
|
def DoxySourceScan(node, env, path):
|
2020-06-02 20:26:31 +02:00
|
|
|
"""
|
|
|
|
|
Doxygen Doxyfile source scanner. This should scan the Doxygen file and add
|
|
|
|
|
any files used to generate docs to the list of source files.
|
|
|
|
|
"""
|
|
|
|
|
filepaths = DoxySourceFiles(node, env)
|
2021-03-05 16:55:42 +01:00
|
|
|
sources = [env.File(path) for path in filepaths]
|
2008-12-19 20:17:23 +01:00
|
|
|
return sources
|
2008-08-28 02:57:12 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def DoxySourceScanCheck(node, env):
|
2020-06-02 20:26:31 +02:00
|
|
|
"""Check if we should scan this file"""
|
2008-12-19 20:17:23 +01:00
|
|
|
return os.path.isfile(node.path)
|
2008-08-28 02:57:12 +02:00
|
|
|
|
|
|
|
|
|
2013-08-12 17:05:59 +02:00
|
|
|
def DoxyEmitter(target, source, env):
|
2020-06-02 20:26:31 +02:00
|
|
|
"""Doxygen Doxyfile emitter"""
|
|
|
|
|
doxy_fpath = str(source[0])
|
|
|
|
|
conf_dir = os.path.dirname(doxy_fpath)
|
2022-07-30 09:19:08 +02:00
|
|
|
|
|
|
|
|
data = DoxyfileParse(source[0].get_text_contents(), conf_dir)
|
2020-06-02 20:26:31 +02:00
|
|
|
|
2008-12-19 20:17:23 +01:00
|
|
|
targets = []
|
|
|
|
|
out_dir = data.get("OUTPUT_DIRECTORY", ".")
|
2020-06-02 20:26:31 +02:00
|
|
|
if not os.path.isabs(out_dir):
|
|
|
|
|
out_dir = os.path.join(conf_dir, out_dir)
|
|
|
|
|
|
2008-12-19 20:17:23 +01:00
|
|
|
# add our output locations
|
2022-09-05 23:02:29 +02:00
|
|
|
for (k, v) in list(output_formats.items()):
|
2008-12-19 20:17:23 +01:00
|
|
|
if data.get("GENERATE_" + k, v[0]) == "YES":
|
2020-06-02 20:26:31 +02:00
|
|
|
# Initialize output file extension for MAN pages
|
2021-03-05 16:55:42 +01:00
|
|
|
if k == "MAN":
|
2020-06-02 20:26:31 +02:00
|
|
|
# Is the given extension valid?
|
|
|
|
|
manext = v[3]
|
|
|
|
|
if v[4] and v[4] in data:
|
|
|
|
|
manext = data.get(v[4])
|
|
|
|
|
# Try to strip off dots
|
2021-03-05 16:55:42 +01:00
|
|
|
manext = manext.replace(".", "")
|
2020-06-02 20:26:31 +02:00
|
|
|
# Can we convert it to an int?
|
|
|
|
|
try:
|
|
|
|
|
e = int(manext)
|
|
|
|
|
except:
|
|
|
|
|
# No, so set back to default
|
|
|
|
|
manext = "3"
|
|
|
|
|
|
2021-03-05 16:55:42 +01:00
|
|
|
od = env.Dir(
|
|
|
|
|
os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]), "man" + manext)
|
|
|
|
|
)
|
2020-06-02 20:26:31 +02:00
|
|
|
else:
|
|
|
|
|
od = env.Dir(os.path.join(out_dir, data.get(k + "_OUTPUT", v[1])))
|
|
|
|
|
# don't clobber target folders
|
|
|
|
|
env.Precious(od)
|
|
|
|
|
# set up cleaning stuff
|
|
|
|
|
env.Clean(od, od)
|
|
|
|
|
|
|
|
|
|
# Add target files
|
|
|
|
|
if k != "MAN":
|
|
|
|
|
# Is an extension override var given?
|
|
|
|
|
if v[4] and v[4] in data:
|
|
|
|
|
fname = v[2] + data.get(v[4])
|
|
|
|
|
else:
|
|
|
|
|
fname = v[2] + v[3]
|
2021-03-05 16:55:42 +01:00
|
|
|
of = env.File(
|
|
|
|
|
os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]), fname)
|
|
|
|
|
)
|
2020-06-02 20:26:31 +02:00
|
|
|
targets.append(of)
|
|
|
|
|
# don't clean single files, we remove the complete output folders (see above)
|
|
|
|
|
env.NoClean(of)
|
2013-08-12 17:05:59 +02:00
|
|
|
else:
|
2020-06-02 20:26:31 +02:00
|
|
|
# Special case: MAN pages
|
|
|
|
|
# We have to add a target file docs/man/man3/foo.h.3
|
|
|
|
|
# for each input file foo.h, so we scan the config file
|
|
|
|
|
# a second time... :(
|
|
|
|
|
filepaths = DoxySourceFiles(source[0], env)
|
|
|
|
|
for f in filepaths:
|
|
|
|
|
if os.path.isfile(f) and f != doxy_fpath:
|
2021-03-05 16:55:42 +01:00
|
|
|
of = env.File(
|
|
|
|
|
os.path.join(
|
|
|
|
|
out_dir,
|
|
|
|
|
data.get(k + "_OUTPUT", v[1]),
|
|
|
|
|
"man" + manext,
|
|
|
|
|
f + "." + manext,
|
|
|
|
|
)
|
|
|
|
|
)
|
2020-06-02 20:26:31 +02:00
|
|
|
targets.append(of)
|
|
|
|
|
# don't clean single files, we remove the complete output folders (see above)
|
|
|
|
|
env.NoClean(of)
|
|
|
|
|
|
|
|
|
|
# add the tag file if neccessary:
|
|
|
|
|
tagfile = data.get("GENERATE_TAGFILE", "")
|
|
|
|
|
if tagfile != "":
|
|
|
|
|
if not os.path.isabs(tagfile):
|
|
|
|
|
tagfile = os.path.join(conf_dir, tagfile)
|
|
|
|
|
targets.append(env.File(tagfile))
|
|
|
|
|
|
2008-12-19 20:17:23 +01:00
|
|
|
return (targets, source)
|
2008-08-28 02:57:12 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate(env):
|
2020-06-02 20:26:31 +02:00
|
|
|
"""
|
|
|
|
|
Add builders and construction variables for the
|
|
|
|
|
Doxygen tool. This is currently for Doxygen 1.4.6.
|
2008-12-19 20:17:23 +01:00
|
|
|
"""
|
|
|
|
|
doxyfile_scanner = env.Scanner(
|
|
|
|
|
DoxySourceScan,
|
|
|
|
|
"DoxySourceScan",
|
2020-06-02 20:26:31 +02:00
|
|
|
scan_check=DoxySourceScanCheck,
|
2008-12-19 20:17:23 +01:00
|
|
|
)
|
2020-06-02 20:26:31 +02:00
|
|
|
|
2008-12-19 20:17:23 +01:00
|
|
|
import SCons.Builder
|
2021-03-05 16:55:42 +01:00
|
|
|
|
2008-12-19 20:17:23 +01:00
|
|
|
doxyfile_builder = SCons.Builder.Builder(
|
|
|
|
|
action = "cd ${SOURCE.dir} && (${DOXYGEN} ${SOURCE.file} 2>&1 |tee ,doxylog)",
|
|
|
|
|
emitter = DoxyEmitter,
|
|
|
|
|
target_factory = env.fs.Entry,
|
2025-04-08 04:17:57 +02:00
|
|
|
single_source = True,
|
|
|
|
|
source_scanner = doxyfile_scanner,
|
2008-12-19 20:17:23 +01:00
|
|
|
)
|
2020-06-02 20:26:31 +02:00
|
|
|
|
2021-03-05 16:55:42 +01:00
|
|
|
env.Append(
|
|
|
|
|
BUILDERS={
|
|
|
|
|
"Doxygen": doxyfile_builder,
|
|
|
|
|
}
|
|
|
|
|
)
|
2020-06-02 20:26:31 +02:00
|
|
|
|
|
|
|
|
env.AppendUnique(
|
2021-03-05 16:55:42 +01:00
|
|
|
DOXYGEN="doxygen",
|
2008-12-19 20:17:23 +01:00
|
|
|
)
|
|
|
|
|
|
2008-08-28 02:57:12 +02:00
|
|
|
|
|
|
|
|
def exists(env):
|
2020-06-02 20:26:31 +02:00
|
|
|
"""
|
|
|
|
|
Make sure doxygen exists.
|
2008-12-19 20:17:23 +01:00
|
|
|
"""
|
|
|
|
|
return env.Detect("doxygen")
|