Copyright: clarify and simplify the file headers
* Lumiera source code always was copyrighted by individual contributors
* there is no entity "Lumiera.org" which holds any copyrights
* Lumiera source code is provided under the GPL Version 2+
== Explanations ==
Lumiera as a whole is distributed under Copyleft, GNU General Public License Version 2 or above.
For this to become legally effective, the ''File COPYING in the root directory is sufficient.''
The licensing header in each file is not strictly necessary, yet considered good practice;
attaching a licence notice increases the likeliness that this information is retained
in case someone extracts individual code files. However, it is not by the presence of some
text, that legally binding licensing terms become effective; rather the fact matters that a
given piece of code was provably copyrighted and published under a license. Even reformatting
the code, renaming some variables or deleting parts of the code will not alter this legal
situation, but rather creates a derivative work, which is likewise covered by the GPL!
The most relevant information in the file header is the notice regarding the
time of the first individual copyright claim. By virtue of this initial copyright,
the first author is entitled to choose the terms of licensing. All further
modifications are permitted and covered by the License. The specific wording
or format of the copyright header is not legally relevant, as long as the
intention to publish under the GPL remains clear. The extended wording was
based on a recommendation by the FSF. It can be shortened, because the full terms
of the license are provided alongside the distribution, in the file COPYING.
2024-11-17 23:42:55 +01:00
|
|
|
# coding: utf-8
|
2008-08-28 02:57:12 +02:00
|
|
|
##
|
|
|
|
|
## BuilderDoxygen.py - SCons builder for generating Doxygen documentation
|
|
|
|
|
##
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# Astxx, the Asterisk C++ API and Utility Library.
|
Copyright: clarify and simplify the file headers
* Lumiera source code always was copyrighted by individual contributors
* there is no entity "Lumiera.org" which holds any copyrights
* Lumiera source code is provided under the GPL Version 2+
== Explanations ==
Lumiera as a whole is distributed under Copyleft, GNU General Public License Version 2 or above.
For this to become legally effective, the ''File COPYING in the root directory is sufficient.''
The licensing header in each file is not strictly necessary, yet considered good practice;
attaching a licence notice increases the likeliness that this information is retained
in case someone extracts individual code files. However, it is not by the presence of some
text, that legally binding licensing terms become effective; rather the fact matters that a
given piece of code was provably copyrighted and published under a license. Even reformatting
the code, renaming some variables or deleting parts of the code will not alter this legal
situation, but rather creates a derivative work, which is likewise covered by the GPL!
The most relevant information in the file header is the notice regarding the
time of the first individual copyright claim. By virtue of this initial copyright,
the first author is entitled to choose the terms of licensing. All further
modifications are permitted and covered by the License. The specific wording
or format of the copyright header is not legally relevant, as long as the
intention to publish under the GPL remains clear. The extended wording was
based on a recommendation by the FSF. It can be shortened, because the full terms
of the license are provided alongside the distribution, in the file COPYING.
2024-11-17 23:42:55 +01:00
|
|
|
# Copyright (C)
|
|
|
|
|
# 2005, 2006, Matthew A. Nicholson
|
|
|
|
|
# 2006, Tim Blechmann
|
2008-08-28 02:57:12 +02:00
|
|
|
#
|
|
|
|
|
# This library is free software; you can redistribute it and/or
|
|
|
|
|
# modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
# License version 2.1 as published by the Free Software Foundation.
|
|
|
|
|
#####################################################################
|
|
|
|
|
|
|
|
|
|
# history: 8/2008 adapted for Lumiera build system
|
|
|
|
|
# added patch for Doxyfile in subdirectory
|
|
|
|
|
# see http://www.scons.org/wiki/DoxygenBuilder
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import os
|
|
|
|
|
import os.path
|
|
|
|
|
import glob
|
|
|
|
|
from fnmatch import fnmatch
|
2022-09-05 23:02:29 +02:00
|
|
|
from functools import reduce
|
2008-08-28 02:57:12 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def DoxyfileParse(file_contents):
|
2008-12-19 20:17:23 +01:00
|
|
|
""" Parse a Doxygen source file and return a dictionary of all the values.
|
|
|
|
|
Values will be strings and lists of strings.
|
|
|
|
|
"""
|
|
|
|
|
data = {}
|
|
|
|
|
|
|
|
|
|
import shlex
|
|
|
|
|
lex = shlex.shlex(instream = file_contents, posix = True)
|
2023-08-07 23:42:57 +02:00
|
|
|
#lex = shlex.shlex(instream = file_contents, infile = file_contents, posix = True)
|
2008-12-19 20:17:23 +01:00
|
|
|
lex.wordchars += "*+./-:"
|
|
|
|
|
lex.whitespace = lex.whitespace.replace("\n", "")
|
|
|
|
|
lex.escape = ""
|
|
|
|
|
|
|
|
|
|
lineno = lex.lineno
|
2023-08-07 23:42:57 +02:00
|
|
|
token = lex.read_token()
|
2008-12-19 20:17:23 +01:00
|
|
|
key = token # the first token should be a key
|
|
|
|
|
last_token = ""
|
|
|
|
|
key_token = False
|
|
|
|
|
next_key = False
|
|
|
|
|
new_data = True
|
|
|
|
|
|
|
|
|
|
def append_data(data, key, new_data, token):
|
|
|
|
|
if new_data or len(data[key]) == 0:
|
|
|
|
|
data[key].append(token)
|
|
|
|
|
else:
|
|
|
|
|
data[key][-1] += token
|
|
|
|
|
|
|
|
|
|
while token:
|
2013-10-25 06:34:38 +02:00
|
|
|
if token in ['\n']:
|
|
|
|
|
if last_token not in ['\\']:
|
|
|
|
|
key_token = True
|
|
|
|
|
elif token in ['\\']:
|
|
|
|
|
pass
|
|
|
|
|
elif key_token:
|
|
|
|
|
key = token
|
|
|
|
|
key_token = False
|
|
|
|
|
else:
|
|
|
|
|
if token == "+=":
|
2022-09-05 23:02:29 +02:00
|
|
|
if key not in data:
|
2013-10-25 06:34:38 +02:00
|
|
|
data[key] = list()
|
|
|
|
|
elif token == "=":
|
|
|
|
|
data[key] = list()
|
|
|
|
|
else:
|
|
|
|
|
append_data(data, key, new_data, token)
|
|
|
|
|
new_data = True
|
|
|
|
|
|
|
|
|
|
last_token = token
|
|
|
|
|
token = lex.get_token()
|
|
|
|
|
|
|
|
|
|
if last_token == '\\' and token != '\n':
|
|
|
|
|
new_data = False
|
|
|
|
|
append_data(data, key, new_data, '\\')
|
2008-12-19 20:17:23 +01:00
|
|
|
|
|
|
|
|
# compress lists of len 1 into single strings
|
2022-09-05 23:02:29 +02:00
|
|
|
for (k, v) in list(data.items()):
|
2008-12-19 20:17:23 +01:00
|
|
|
if len(v) == 0:
|
|
|
|
|
data.pop(k)
|
|
|
|
|
|
|
|
|
|
# items in the following list will be kept as lists and not converted to strings
|
|
|
|
|
if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS"]:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
if len(v) == 1:
|
|
|
|
|
data[k] = v[0]
|
|
|
|
|
|
|
|
|
|
return data
|
2008-08-28 02:57:12 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def DoxySourceScan(node, env, path):
|
2008-12-19 20:17:23 +01:00
|
|
|
""" Doxygen Doxyfile source scanner.
|
|
|
|
|
This should scan the Doxygen file and add any files
|
|
|
|
|
used to generate docs to the list of source files.
|
|
|
|
|
"""
|
|
|
|
|
default_file_patterns = [
|
|
|
|
|
'*.c', '*.cc', '*.cxx', '*.cpp', '*.c++', '*.java', '*.ii', '*.ixx',
|
|
|
|
|
'*.ipp', '*.i++', '*.inl', '*.h', '*.hh ', '*.hxx', '*.hpp', '*.h++',
|
|
|
|
|
'*.idl', '*.odl', '*.cs', '*.php', '*.php3', '*.inc', '*.m', '*.mm',
|
|
|
|
|
'*.py',
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
default_exclude_patterns = [
|
|
|
|
|
'*~',
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
sources = []
|
|
|
|
|
|
|
|
|
|
data = DoxyfileParse(node.get_contents())
|
|
|
|
|
|
|
|
|
|
if data.get("RECURSIVE", "NO") == "YES":
|
|
|
|
|
recursive = True
|
|
|
|
|
else:
|
|
|
|
|
recursive = False
|
|
|
|
|
|
|
|
|
|
file_patterns = data.get("FILE_PATTERNS", default_file_patterns)
|
|
|
|
|
exclude_patterns = data.get("EXCLUDE_PATTERNS", default_exclude_patterns)
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# We're running in the top-level directory, but the doxygen configuration file
|
|
|
|
|
# is in the same directory as node; this means that relative pathnames in node
|
|
|
|
|
# must be adjusted before they can go onto the sources list
|
|
|
|
|
conf_dir = os.path.dirname(str(node))
|
|
|
|
|
|
|
|
|
|
for node in data.get("INPUT", []):
|
|
|
|
|
if not os.path.isabs(node):
|
|
|
|
|
node = os.path.join(conf_dir, node)
|
|
|
|
|
if os.path.isfile(node):
|
|
|
|
|
sources.append(node)
|
|
|
|
|
elif os.path.isdir(node):
|
|
|
|
|
if recursive:
|
|
|
|
|
for root, dirs, files in os.walk(node):
|
|
|
|
|
for f in files:
|
|
|
|
|
filename = os.path.join(root, f)
|
|
|
|
|
|
|
|
|
|
pattern_check = reduce(lambda x, y: x or bool(fnmatch(filename, y)), file_patterns, False)
|
|
|
|
|
exclude_check = reduce(lambda x, y: x and fnmatch(filename, y), exclude_patterns, True)
|
|
|
|
|
|
|
|
|
|
if pattern_check and not exclude_check:
|
|
|
|
|
sources.append(filename)
|
|
|
|
|
else:
|
|
|
|
|
for pattern in file_patterns:
|
|
|
|
|
sources.extend(glob.glob("/".join([node, pattern])))
|
|
|
|
|
|
2022-09-05 23:02:29 +02:00
|
|
|
sources = [env.File(path) for path in sources]
|
2008-12-19 20:17:23 +01:00
|
|
|
return sources
|
2008-08-28 02:57:12 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def DoxySourceScanCheck(node, env):
|
2008-12-19 20:17:23 +01:00
|
|
|
""" Check if we should scan this file """
|
|
|
|
|
return os.path.isfile(node.path)
|
2008-08-28 02:57:12 +02:00
|
|
|
|
|
|
|
|
|
2008-12-19 20:17:23 +01:00
|
|
|
def DoxyEmitter(source, target, env):
|
|
|
|
|
""" Doxygen Doxyfile emitter """
|
|
|
|
|
# possible output formats and their default values and output locations
|
|
|
|
|
output_formats = {
|
|
|
|
|
"HTML": ("YES", "html"),
|
|
|
|
|
"LATEX": ("YES", "latex"),
|
|
|
|
|
"RTF": ("NO", "rtf"),
|
|
|
|
|
"MAN": ("NO", "man"),
|
|
|
|
|
"XML": ("NO", "xml"),
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
data = DoxyfileParse(source[0].get_contents())
|
|
|
|
|
|
|
|
|
|
targets = []
|
|
|
|
|
out_dir = data.get("OUTPUT_DIRECTORY", ".")
|
|
|
|
|
|
|
|
|
|
# add our output locations
|
2022-09-05 23:02:29 +02:00
|
|
|
for (k, v) in list(output_formats.items()):
|
2008-12-19 20:17:23 +01:00
|
|
|
if data.get("GENERATE_" + k, v[0]) == "YES":
|
|
|
|
|
targets.append(env.Dir( os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))) )
|
|
|
|
|
|
|
|
|
|
# don't clobber targets
|
|
|
|
|
for node in targets:
|
|
|
|
|
env.Precious(node)
|
|
|
|
|
|
|
|
|
|
# set up cleaning stuff
|
|
|
|
|
for node in targets:
|
|
|
|
|
env.Clean(node, node)
|
|
|
|
|
|
|
|
|
|
return (targets, source)
|
2008-08-28 02:57:12 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate(env):
|
2008-12-19 20:17:23 +01:00
|
|
|
""" Add builders and construction variables for the
|
|
|
|
|
Doxygen tool. This is currently for Doxygen 1.4.6.
|
|
|
|
|
"""
|
|
|
|
|
doxyfile_scanner = env.Scanner(
|
|
|
|
|
DoxySourceScan,
|
|
|
|
|
"DoxySourceScan",
|
|
|
|
|
scan_check = DoxySourceScanCheck,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
import SCons.Builder
|
|
|
|
|
doxyfile_builder = SCons.Builder.Builder(
|
|
|
|
|
action = "cd ${SOURCE.dir} && (${DOXYGEN} ${SOURCE.file} 2>&1 |tee ,doxylog)",
|
|
|
|
|
emitter = DoxyEmitter,
|
|
|
|
|
target_factory = env.fs.Entry,
|
|
|
|
|
single_source = True,
|
|
|
|
|
source_scanner = doxyfile_scanner,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
env.Append(BUILDERS = {
|
|
|
|
|
'Doxygen': doxyfile_builder,
|
|
|
|
|
})
|
|
|
|
|
|
2011-01-30 15:27:21 +01:00
|
|
|
env.Replace(
|
|
|
|
|
DOXYGEN = 'doxygen'
|
2008-12-19 20:17:23 +01:00
|
|
|
)
|
|
|
|
|
|
2008-08-28 02:57:12 +02:00
|
|
|
|
|
|
|
|
def exists(env):
|
2008-12-19 20:17:23 +01:00
|
|
|
""" Make sure doxygen exists.
|
|
|
|
|
"""
|
|
|
|
|
return env.Detect("doxygen")
|
2008-08-28 02:57:12 +02:00
|
|
|
|