Skip to content

Commit

Permalink
Merge pull request #6977 from tk0miya/refactor_glossary2
Browse files Browse the repository at this point in the history
Fix #6559: Wrong node-ids are generated in glossary directive
  • Loading branch information
tk0miya committed Jan 2, 2020
2 parents efe1866 + 1c5a5bb commit 577b80c
Show file tree
Hide file tree
Showing 7 changed files with 89 additions and 26 deletions.
2 changes: 2 additions & 0 deletions CHANGES
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ Deprecated

* The ``decode`` argument of ``sphinx.pycode.ModuleAnalyzer()``
* ``sphinx.directives.other.Index``
* ``sphinx.environment.temp_data['gloss_entries']``
* ``sphinx.environment.BuildEnvironment.indexentries``
* ``sphinx.environment.collectors.indexentries.IndexEntriesCollector``
* ``sphinx.io.FiletypeNotFoundError``
Expand All @@ -36,6 +37,7 @@ Bugs fixed
* #6925: html: Remove redundant type="text/javascript" from <script> elements
* #6906, #6907: autodoc: failed to read the source codes encoeded in cp1251
* #6961: latex: warning for babel shown twice
* #6559: Wrong node-ids are generated in glossary directive

Testing
--------
Expand Down
5 changes: 5 additions & 0 deletions doc/extdev/deprecated.rst
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,11 @@ The following is a list of deprecated interfaces.
- 4.0
- ``sphinx.domains.index.IndexDirective``

* - ``sphinx.environment.temp_data['gloss_entries']``
- 2.4
- 4.0
- ``documents.nameids``

* - ``sphinx.environment.BuildEnvironment.indexentries``
- 2.4
- 4.0
Expand Down
45 changes: 28 additions & 17 deletions sphinx/domains/std.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
from sphinx.roles import XRefRole
from sphinx.util import ws_re, logging, docname_join
from sphinx.util.docutils import SphinxDirective
from sphinx.util.nodes import clean_astext, make_refnode
from sphinx.util.nodes import clean_astext, make_id, make_refnode
from sphinx.util.typing import RoleFunction

if False:
Expand Down Expand Up @@ -243,34 +243,44 @@ def split_term_classifiers(line: str) -> List[Optional[str]]:


def make_glossary_term(env: "BuildEnvironment", textnodes: Iterable[Node], index_key: str,
source: str, lineno: int, new_id: str = None) -> nodes.term:
source: str, lineno: int, node_id: str = None,
document: nodes.document = None) -> nodes.term:
# get a text-only representation of the term and register it
# as a cross-reference target
term = nodes.term('', '', *textnodes)
term.source = source
term.line = lineno

gloss_entries = env.temp_data.setdefault('gloss_entries', set())
termtext = term.astext()
if new_id is None:
new_id = nodes.make_id('term-' + termtext)
if new_id == 'term':
# the term is not good for node_id. Generate it by sequence number instead.
new_id = 'term-%d' % env.new_serialno('glossary')
while new_id in gloss_entries:
new_id = 'term-%d' % env.new_serialno('glossary')
gloss_entries.add(new_id)

if node_id:
# node_id is given from outside (mainly i18n module), use it forcedly
pass
elif document:
node_id = make_id(env, document, 'term', termtext)
term['ids'].append(node_id)
document.note_explicit_target(term)
else:
warnings.warn('make_glossary_term() expects document is passed as an argument.',
RemovedInSphinx40Warning)
gloss_entries = env.temp_data.setdefault('gloss_entries', set())
node_id = nodes.make_id('term-' + termtext)
if node_id == 'term':
# "term" is not good for node_id. Generate it by sequence number instead.
node_id = 'term-%d' % env.new_serialno('glossary')

while node_id in gloss_entries:
node_id = 'term-%d' % env.new_serialno('glossary')
gloss_entries.add(node_id)
term['ids'].append(node_id)

std = cast(StandardDomain, env.get_domain('std'))
std.add_object('term', termtext.lower(), env.docname, new_id)
std.add_object('term', termtext.lower(), env.docname, node_id)

# add an index entry too
indexnode = addnodes.index()
indexnode['entries'] = [('single', termtext, new_id, 'main', index_key)]
indexnode['entries'] = [('single', termtext, node_id, 'main', index_key)]
indexnode.source, indexnode.line = term.source, term.line
term.append(indexnode)
term['ids'].append(new_id)
term['names'].append(new_id)

return term

Expand Down Expand Up @@ -368,7 +378,8 @@ def run(self) -> List[Node]:
textnodes, sysmsg = self.state.inline_text(parts[0], lineno)

# use first classifier as a index key
term = make_glossary_term(self.env, textnodes, parts[1], source, lineno)
term = make_glossary_term(self.env, textnodes, parts[1], source, lineno,
document=self.state.document)
term.rawsource = line
system_messages.extend(sysmsg)
termtexts.append(term.astext())
Expand Down
11 changes: 3 additions & 8 deletions sphinx/transforms/i18n.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,18 +202,13 @@ def apply(self, **kwargs: Any) -> None:

# glossary terms update refid
if isinstance(node, nodes.term):
gloss_entries = self.env.temp_data.setdefault('gloss_entries', set())
for _id in node['names']:
if _id in gloss_entries:
gloss_entries.remove(_id)

for _id in node['ids']:
parts = split_term_classifiers(msgstr)
patch = publish_msgstr(self.app, parts[0], source,
node.line, self.config, settings)
patch = make_glossary_term(self.env, patch, parts[1],
source, node.line, _id)
node['ids'] = patch['ids']
node['names'] = patch['names']
source, node.line, _id,
self.document)
processed = True

# update leaves with processed nodes
Expand Down
23 changes: 23 additions & 0 deletions sphinx/util/nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
# For type annotation
from typing import Type # for python3.5.1
from sphinx.builders import Builder
from sphinx.environment import BuildEnvironment
from sphinx.utils.tags import Tags

logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -435,6 +436,28 @@ def inline_all_toctrees(builder: "Builder", docnameset: Set[str], docname: str,
return tree


def make_id(env: "BuildEnvironment", document: nodes.document,
prefix: str = '', term: str = None) -> str:
"""Generate an appropriate node_id for given *prefix* and *term*."""
node_id = None
if prefix:
idformat = prefix + "-%s"
else:
idformat = document.settings.id_prefix + "%s"

# try to generate node_id by *term*
if prefix and term:
node_id = nodes.make_id(idformat % term)
if node_id == prefix:
# *term* is not good to generate a node_id.
node_id = None

while node_id is None or node_id in document.ids:
node_id = idformat % env.new_serialno(prefix)

return node_id


def make_refnode(builder: "Builder", fromdocname: str, todocname: str, targetid: str,
child: Node, title: str = None) -> nodes.reference:
"""Shortcut to create a reference node."""
Expand Down
10 changes: 10 additions & 0 deletions tests/test_domain_std.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,6 +255,16 @@ def test_glossary_alphanumeric(app):
assert ("/", "/", "term", "index", "term-0", -1) in objects


def test_glossary_conflicted_labels(app):
text = (".. _term-foo:\n"
".. glossary::\n"
"\n"
" foo\n")
restructuredtext.parse(app, text)
objects = list(app.env.get_domain("std").get_objects())
assert ("foo", "foo", "term", "index", "term-0", -1) in objects


def test_cmdoption(app):
text = (".. program:: ls\n"
"\n"
Expand Down
19 changes: 18 additions & 1 deletion tests/test_util_nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,9 @@
from docutils.utils import new_document

from sphinx.transforms import ApplySourceWorkaround
from sphinx.util.nodes import NodeMatcher, extract_messages, clean_astext, split_explicit_title
from sphinx.util.nodes import (
NodeMatcher, extract_messages, clean_astext, make_id, split_explicit_title
)


def _transform(doctree):
Expand All @@ -27,6 +29,7 @@ def _transform(doctree):
def create_new_document():
settings = frontend.OptionParser(
components=(rst.Parser,)).get_default_values()
settings.id_prefix = 'id'
document = new_document('dummy.txt', settings)
return document

Expand Down Expand Up @@ -180,6 +183,20 @@ def test_clean_astext():
assert 'hello world' == clean_astext(node)


def test_make_id(app):
document = create_new_document()
assert make_id(app.env, document) == 'id0'
assert make_id(app.env, document, 'term') == 'term-0'
assert make_id(app.env, document, 'term', 'Sphinx') == 'term-sphinx'

# when same ID is already registered
document.ids['term-sphinx'] = True
assert make_id(app.env, document, 'term', 'Sphinx') == 'term-1'

document.ids['term-2'] = True
assert make_id(app.env, document, 'term') == 'term-3'


@pytest.mark.parametrize(
'title, expected',
[
Expand Down

0 comments on commit 577b80c

Please sign in to comment.