import argparse import hashlib import html import json import re import xml.sax.saxutils as xml from abc import abstractmethod from collections.abc import Mapping, Sequence from pathlib import Path from typing import Any, Callable, cast, ClassVar, Generic, get_args, NamedTuple from markdown_it.token import Token from . import md, options from .html import HTMLRenderer, UnresolvedXrefError from .manual_structure import check_structure, FragmentType, is_include, make_xml_id, TocEntry, TocEntryType, XrefTarget from .md import Converter, Renderer class BaseConverter(Converter[md.TR], Generic[md.TR]): # per-converter configuration for ns:arg=value arguments to include blocks, following # the include type. html converters need something like this to support chunking, or # another external method like the chunktocs docbook uses (but block options seem like # a much nicer of doing this). INCLUDE_ARGS_NS: ClassVar[str] INCLUDE_FRAGMENT_ALLOWED_ARGS: ClassVar[set[str]] = set() INCLUDE_OPTIONS_ALLOWED_ARGS: ClassVar[set[str]] = set() _base_paths: list[Path] _current_type: list[TocEntryType] def convert(self, infile: Path, outfile: Path) -> None: self._base_paths = [ infile ] self._current_type = ['book'] try: tokens = self._parse(infile.read_text()) self._postprocess(infile, outfile, tokens) converted = self._renderer.render(tokens) outfile.write_text(converted) except Exception as e: raise RuntimeError(f"failed to render manual {infile}") from e def _postprocess(self, infile: Path, outfile: Path, tokens: Sequence[Token]) -> None: pass def _handle_headings(self, tokens: list[Token], *, on_heading: Callable[[Token,str],None]) -> None: # Headings in a globally numbered order # h1 to h6 curr_heading_pos: list[int] = [] for token in tokens: if token.type == "heading_open": if token.tag not in ["h1", "h2", "h3", "h4", "h5", "h6"]: raise RuntimeError(f"Got invalid heading tag {token.tag} in line {token.map[0] + 1 if token.map else 'NOT FOUND'}. Only h1 to h6 headings are allowed.") idx = int(token.tag[1:]) - 1 if idx >= len(curr_heading_pos): # extend the list if necessary curr_heading_pos.extend([0 for _i in range(idx+1 - len(curr_heading_pos))]) curr_heading_pos = curr_heading_pos[:idx+1] curr_heading_pos[-1] += 1 ident = ".".join(f"{a}" for a in curr_heading_pos) on_heading(token,ident) def _parse(self, src: str, *, auto_id_prefix: None | str = None) -> list[Token]: tokens = super()._parse(src) if auto_id_prefix: def set_token_ident(token: Token, ident: str) -> None: if "id" not in token.attrs: token.attrs["id"] = f"{auto_id_prefix}-{ident}" self._handle_headings(tokens, on_heading=set_token_ident) check_structure(self._current_type[-1], tokens) for token in tokens: if not is_include(token): continue directive = token.info[12:].split() if not directive: continue args = { k: v for k, _sep, v in map(lambda s: s.partition('='), directive[1:]) } typ = directive[0] if typ == 'options': token.type = 'included_options' self._process_include_args(token, args, self.INCLUDE_OPTIONS_ALLOWED_ARGS) self._parse_options(token, args) else: fragment_type = typ.removesuffix('s') if fragment_type not in get_args(FragmentType): raise RuntimeError(f"unsupported structural include type '{typ}'") self._current_type.append(cast(FragmentType, fragment_type)) token.type = 'included_' + typ self._process_include_args(token, args, self.INCLUDE_FRAGMENT_ALLOWED_ARGS) self._parse_included_blocks(token, args) self._current_type.pop() return tokens def _process_include_args(self, token: Token, args: dict[str, str], allowed: set[str]) -> None: ns = self.INCLUDE_ARGS_NS + ":" args = { k[len(ns):]: v for k, v in args.items() if k.startswith(ns) } if unknown := set(args.keys()) - allowed: assert token.map raise RuntimeError(f"unrecognized include argument in line {token.map[0] + 1}", unknown) token.meta['include-args'] = args def _parse_included_blocks(self, token: Token, block_args: dict[str, str]) -> None: assert token.map included = token.meta['included'] = [] for (lnum, line) in enumerate(token.content.splitlines(), token.map[0] + 2): line = line.strip() path = self._base_paths[-1].parent / line if path in self._base_paths: raise RuntimeError(f"circular include found in line {lnum}") try: self._base_paths.append(path) with open(path, 'r') as f: prefix = None if "auto-id-prefix" in block_args: # include the current file number to prevent duplicate ids within include blocks prefix = f"{block_args.get('auto-id-prefix')}-{lnum}" tokens = self._parse(f.read(), auto_id_prefix=prefix) included.append((tokens, path)) self._base_paths.pop() except Exception as e: raise RuntimeError(f"processing included file {path} from line {lnum}") from e def _parse_options(self, token: Token, block_args: dict[str, str]) -> None: assert token.map items = {} for (lnum, line) in enumerate(token.content.splitlines(), token.map[0] + 2): if len(args := line.split(":", 1)) != 2: raise RuntimeError(f"options directive with no argument in line {lnum}") (k, v) = (args[0].strip(), args[1].strip()) if k in items: raise RuntimeError(f"duplicate options directive {k} in line {lnum}") items[k] = v try: id_prefix = items.pop('id-prefix') varlist_id = items.pop('list-id') source = items.pop('source') except KeyError as e: raise RuntimeError(f"options directive {e} missing in block at line {token.map[0] + 1}") if items.keys(): raise RuntimeError( f"unsupported options directives in block at line {token.map[0] + 1}", " ".join(items.keys())) try: with open(self._base_paths[-1].parent / source, 'r') as f: token.meta['id-prefix'] = id_prefix token.meta['list-id'] = varlist_id token.meta['source'] = json.load(f) except Exception as e: raise RuntimeError(f"processing options block in line {token.map[0] + 1}") from e class RendererMixin(Renderer): _toplevel_tag: str _revision: str def __init__(self, toplevel_tag: str, revision: str, *args: Any, **kwargs: Any): super().__init__(*args, **kwargs) self._toplevel_tag = toplevel_tag self._revision = revision self.rules |= { 'included_sections': lambda *args: self._included_thing("section", *args), 'included_chapters': lambda *args: self._included_thing("chapter", *args), 'included_preface': lambda *args: self._included_thing("preface", *args), 'included_parts': lambda *args: self._included_thing("part", *args), 'included_appendix': lambda *args: self._included_thing("appendix", *args), 'included_options': self.included_options, } def render(self, tokens: Sequence[Token]) -> str: # books get special handling because they have *two* title tags. doing this with # generic code is more complicated than it's worth. the checks above have verified # that both titles actually exist. if self._toplevel_tag == 'book': return self._render_book(tokens) return super().render(tokens) @abstractmethod def _render_book(self, tokens: Sequence[Token]) -> str: raise NotImplementedError() @abstractmethod def _included_thing(self, tag: str, token: Token, tokens: Sequence[Token], i: int) -> str: raise NotImplementedError() @abstractmethod def included_options(self, token: Token, tokens: Sequence[Token], i: int) -> str: raise NotImplementedError() class HTMLParameters(NamedTuple): generator: str stylesheets: Sequence[str] scripts: Sequence[str] # number of levels in the rendered table of contents. tables are prepended to # the content they apply to (entire document / document chunk / top-level section # of a chapter), setting a depth of 0 omits the respective table. toc_depth: int chunk_toc_depth: int section_toc_depth: int media_dir: Path class ManualHTMLRenderer(RendererMixin, HTMLRenderer): _base_path: Path _in_dir: Path _html_params: HTMLParameters def __init__(self, toplevel_tag: str, revision: str, html_params: HTMLParameters, manpage_urls: Mapping[str, str], xref_targets: dict[str, XrefTarget], in_dir: Path, base_path: Path): super().__init__(toplevel_tag, revision, manpage_urls, xref_targets) self._in_dir = in_dir self._base_path = base_path.absolute() self._html_params = html_params def _pull_image(self, src: str) -> str: src_path = Path(src) content = (self._in_dir / src_path).read_bytes() # images may be used more than once, but we want to store them only once and # in an easily accessible (ie, not input-file-path-dependent) location without # having to maintain a mapping structure. hashing the file and using the hash # as both the path of the final image provides both. content_hash = hashlib.sha3_256(content).hexdigest() target_name = f"{content_hash}{src_path.suffix}" target_path = self._base_path / self._html_params.media_dir / target_name target_path.write_bytes(content) return f"./{self._html_params.media_dir}/{target_name}" def _push(self, tag: str, hlevel_offset: int) -> Any: result = (self._toplevel_tag, self._headings, self._attrspans, self._hlevel_offset, self._in_dir) self._hlevel_offset += hlevel_offset self._toplevel_tag, self._headings, self._attrspans = tag, [], [] return result def _pop(self, state: Any) -> None: (self._toplevel_tag, self._headings, self._attrspans, self._hlevel_offset, self._in_dir) = state def _render_book(self, tokens: Sequence[Token]) -> str: assert tokens[4].children title_id = cast(str, tokens[0].attrs.get('id', "")) title = self._xref_targets[title_id].title # subtitles don't have IDs, so we can't use xrefs to get them subtitle = self.renderInline(tokens[4].children) toc = TocEntry.of(tokens[0]) return "\n".join([ self._file_header(toc), '
List of {kind}
' f'Table of Contents
' if print_title else "", f'{name}
', name, None, target_file))
elif bt.type.startswith('included_'):
sub_file = bt.meta['include-args'].get('into-file', target_file)
subtyp = bt.type.removeprefix('included_').removesuffix('s')
for si, (sub, _path) in enumerate(bt.meta['included']):
result += self._collect_ids(sub, sub_file, subtyp, si == 0 and sub_file != target_file)
elif bt.type == 'example_open' and (id := cast(str, bt.attrs.get('id', ''))):
result.append((id, 'example', tokens[i + 2], target_file, False))
elif bt.type == 'figure_open' and (id := cast(str, bt.attrs.get('id', ''))):
result.append((id, 'figure', tokens[i + 2], target_file, False))
elif bt.type == 'footnote_open' and (id := cast(str, bt.attrs.get('id', ''))):
result.append(XrefTarget(id, "???", None, None, target_file))
elif bt.type == 'footnote_ref' and (id := cast(str, bt.attrs.get('id', ''))):
result.append(XrefTarget(id, "???", None, None, target_file))
elif bt.type == 'inline':
assert bt.children is not None
result += self._collect_ids(bt.children, target_file, typ, False)
elif id := cast(str, bt.attrs.get('id', '')):
# anchors and examples have no titles we could use, but we'll have to put
# *something* here to communicate that there's no title.
result.append(XrefTarget(id, "???", None, None, target_file))
return result
def _render_xref(self, id: str, typ: str, inlines: Token, path: str, drop_fragment: bool) -> XrefTarget:
assert inlines.children
title_html = self._renderer.renderInline(inlines.children)
if typ == 'appendix':
# NOTE the docbook compat is strong here
n = self._next_appendix_id()
prefix = f"Appendix\u00A0{n}.\u00A0"
# HACK for docbook compat: prefix the title inlines with appendix id if
# necessary. the alternative is to mess with titlepage rendering in headings,
# which seems just a lot worse than this
prefix_tokens = [Token(type='text', tag='', nesting=0, content=prefix)]
inlines.children = prefix_tokens + list(inlines.children)
title = prefix + title_html
toc_html = f"{n}. {title_html}"
title_html = f"Appendix {n}"
elif typ in ['example', 'figure']:
# skip the prepended `{Example,Figure} N. ` from numbering
toc_html, title = self._renderer.renderInline(inlines.children[2:]), title_html
# xref title wants only the prepended text, sans the trailing colon and space
title_html = self._renderer.renderInline(inlines.children[0:1])
else:
toc_html, title = title_html, title_html
title_html = (
f"{title_html}"
if typ == 'chapter'
else title_html if typ in [ 'book', 'part' ]
else f'the section called “{title_html}”'
)
return XrefTarget(id, title_html, toc_html, re.sub('<.*?>', '', title), path, drop_fragment)
def _postprocess(self, infile: Path, outfile: Path, tokens: Sequence[Token]) -> None:
self._number_block('example', "Example", tokens)
self._number_block('figure', "Figure", tokens)
xref_queue = self._collect_ids(tokens, outfile.name, 'book', True)
failed = False
deferred = []
while xref_queue:
for item in xref_queue:
try:
target = item if isinstance(item, XrefTarget) else self._render_xref(*item)
except UnresolvedXrefError:
if failed:
raise
deferred.append(item)
continue
if target.id in self._xref_targets:
raise RuntimeError(f"found duplicate id #{target.id}")
self._xref_targets[target.id] = target
if len(deferred) == len(xref_queue):
failed = True # do another round and report the first error
xref_queue = deferred
paths_seen = set()
for t in self._xref_targets.values():
paths_seen.add(t.path)
if len(paths_seen) == 1:
for (k, t) in self._xref_targets.items():
self._xref_targets[k] = XrefTarget(
t.id,
t.title_html,
t.toc_html,
t.title,
t.path,
t.drop_fragment,
drop_target=True
)
TocEntry.collect_and_link(self._xref_targets, tokens)
def _build_cli_html(p: argparse.ArgumentParser) -> None:
p.add_argument('--manpage-urls', required=True)
p.add_argument('--revision', required=True)
p.add_argument('--generator', default='nixos-render-docs')
p.add_argument('--stylesheet', default=[], action='append')
p.add_argument('--script', default=[], action='append')
p.add_argument('--toc-depth', default=1, type=int)
p.add_argument('--chunk-toc-depth', default=1, type=int)
p.add_argument('--section-toc-depth', default=0, type=int)
p.add_argument('--media-dir', default="media", type=Path)
p.add_argument('infile', type=Path)
p.add_argument('outfile', type=Path)
def _run_cli_html(args: argparse.Namespace) -> None:
with open(args.manpage_urls, 'r') as manpage_urls:
md = HTMLConverter(
args.revision,
HTMLParameters(args.generator, args.stylesheet, args.script, args.toc_depth,
args.chunk_toc_depth, args.section_toc_depth, args.media_dir),
json.load(manpage_urls))
md.convert(args.infile, args.outfile)
def build_cli(p: argparse.ArgumentParser) -> None:
formats = p.add_subparsers(dest='format', required=True)
_build_cli_html(formats.add_parser('html'))
def run_cli(args: argparse.Namespace) -> None:
if args.format == 'html':
_run_cli_html(args)
else:
raise RuntimeError('format not hooked up', args)