summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--abs/extra/epydoc/PKGBUILD55
-rw-r--r--abs/extra/epydoc/handle-docutils-0.6.patch47
-rw-r--r--abs/extra/epydoc/python26-tokenizer.patch65
-rw-r--r--abs/extra/epydoc/string-exceptions.patch18
4 files changed, 185 insertions, 0 deletions
diff --git a/abs/extra/epydoc/PKGBUILD b/abs/extra/epydoc/PKGBUILD
new file mode 100644
index 0000000..7ccbd55
--- /dev/null
+++ b/abs/extra/epydoc/PKGBUILD
@@ -0,0 +1,55 @@
+# $Id$
+# Maintainer: Alexander F Rødseth <xyproto@archlinux.org>
+# Contributor: Chris Brannon <chris@the-brannons.com>
+# Contributor: Douglas Soares de Andrade <dsa@aur.archlinux.org>
+# Contributor: Angelo Theodorou <encelo@users.sourceforge.net>
+# Contributor: SpepS <dreamspepser at yahoo dot it>
+
+pkgname=epydoc
+pkgver=3.0.1
+pkgrel=4
+pkgdesc='Tool for generating API documentation for Python modules, based on their docstrings'
+arch=('any')
+license=('MIT')
+url='https://epydoc.sourceforge.net/'
+depends=('python2' 'docutils')
+optdepends=('tk: needed for epydocgui'
+ 'texlive-bin: needed for PDF conversion'
+ 'graphviz: needed for graph generation')
+source=("https://downloads.sourceforge.net/sourceforge/$pkgname/$pkgname-$pkgver.zip"
+ 'handle-docutils-0.6.patch'
+ 'python26-tokenizer.patch'
+ 'string-exceptions.patch')
+sha256sums=('574c1dca1b0e8783be0121c32f589cf56255cdb288b4d4e52e60f0a8bcf799cb'
+ '84d6724e0fcb3a5307963cbe37875e72110bf707781c64e7ddff0dfe96eeb1ab'
+ '8bfd54be68ee8e743ab470370042e40130e9cf8c0430d493401fa44eae2d66f6'
+ '099a94ba394f0c1c4f3f721fc3c9cf982a54f182be457faa03a7bb54188c8364')
+
+prepare() {
+ cd "$pkgname-$pkgver"
+
+ # py2 fix
+ sed -i "s|env python|&2|" `grep -Erl "env python" .`
+
+ patch -p1 -i "$srcdir/handle-docutils-0.6.patch"
+ patch -p1 -i "$srcdir/python26-tokenizer.patch"
+ patch -p1 -i "$srcdir/string-exceptions.patch"
+}
+
+build() {
+ cd "$pkgname-$pkgver"
+
+ python2 setup.py build
+}
+
+package() {
+ cd "$pkgname-$pkgver"
+
+ python2 setup.py install --root="$pkgdir" --prefix=/usr --optimize=1
+
+ install -d "$pkgdir/usr/share/man/man1"
+ install -m644 man/*.1 "$pkgdir/usr/share/man/man1"
+ install -Dm644 LICENSE.txt "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
+}
+
+# vim:ts=2 sw=2 et:
diff --git a/abs/extra/epydoc/handle-docutils-0.6.patch b/abs/extra/epydoc/handle-docutils-0.6.patch
new file mode 100644
index 0000000..53f941c
--- /dev/null
+++ b/abs/extra/epydoc/handle-docutils-0.6.patch
@@ -0,0 +1,47 @@
+# Description: Handle problems encountered with docutils 0.6.
+# The problem here is that the child.data element does not always exist any
+# more. Apparently, the child element is sometimes a string instead. So, we
+# work around it by only executing the code in question if child.data can be
+# referenced. Thanks to Thomas Hille for research and the initial patch.
+# Bug-Debian: http://bugs.debian.org/561793
+# Author: Kenneth J. Pronovici <pronovic@debian.org>
+--- a/epydoc/markup/restructuredtext.py
++++ b/epydoc/markup/restructuredtext.py
+@@ -304,13 +304,14 @@ class _SummaryExtractor(NodeVisitor):
+ # Extract the first sentence.
+ for child in node:
+ if isinstance(child, docutils.nodes.Text):
+- m = self._SUMMARY_RE.match(child.data)
+- if m:
+- summary_pieces.append(docutils.nodes.Text(m.group(1)))
+- other = child.data[m.end():]
+- if other and not other.isspace():
+- self.other_docs = True
+- break
++ if hasattr(child, 'data'):
++ m = self._SUMMARY_RE.match(child.data)
++ if m:
++ summary_pieces.append(docutils.nodes.Text(m.group(1)))
++ other = child.data[m.end():]
++ if other and not other.isspace():
++ self.other_docs = True
++ break
+ summary_pieces.append(child)
+
+ summary_doc = self.document.copy() # shallow copy
+@@ -489,10 +490,11 @@ class _SplitFieldsTranslator(NodeVisitor
+ if (len(fbody[0]) > 0 and
+ isinstance(fbody[0][0], docutils.nodes.Text)):
+ child = fbody[0][0]
+- if child.data[:1] in ':-':
+- child.data = child.data[1:].lstrip()
+- elif child.data[:2] in (' -', ' :'):
+- child.data = child.data[2:].lstrip()
++ if hasattr(child, 'data'):
++ if child.data[:1] in ':-':
++ child.data = child.data[1:].lstrip()
++ elif child.data[:2] in (' -', ' :'):
++ child.data = child.data[2:].lstrip()
+
+ # Wrap the field body, and add a new field
+ self._add_field(tagname, arg, fbody)
diff --git a/abs/extra/epydoc/python26-tokenizer.patch b/abs/extra/epydoc/python26-tokenizer.patch
new file mode 100644
index 0000000..c4956ad
--- /dev/null
+++ b/abs/extra/epydoc/python26-tokenizer.patch
@@ -0,0 +1,65 @@
+# Description: Fix the tokenizer so comment docstrings work with Python 2.6.
+# Bug: https://sourceforge.net/tracker/index.php?func=detail&aid=2585292&group_id=32455&atid=405618
+# Bug-Debian: http://bugs.debian.org/590112
+# Origin: https://sourceforge.net/tracker/?func=detail&aid=2872545&group_id=32455&atid=405620
+# Author: Andre Malo (ndparker)
+# Reviewed-by: Kenneth J. Pronovici <pronovic@debian.org>
+--- a/epydoc/docparser.py
++++ b/epydoc/docparser.py
+@@ -72,6 +72,26 @@
+ from epydoc.compat import *
+
+ ######################################################################
++## Tokenizer change in 2.6
++######################################################################
++
++def comment_includes_nl():
++ """ Determine whether comments are parsed as one or two tokens... """
++ readline = iter(u'\n#\n\n'.splitlines(True)).next
++ tokens = [
++ token.tok_name[tup[0]] for tup in tokenize.generate_tokens(readline)
++ ]
++ if tokens == ['NL', 'COMMENT', 'NL', 'ENDMARKER']:
++ return True
++ elif tokens == ['NL', 'COMMENT', 'NL', 'NL', 'ENDMARKER']:
++ return False
++ raise AssertionError(
++ "Tokenizer returns unexexpected tokens: %r" % tokens
++ )
++
++comment_includes_nl = comment_includes_nl()
++
++######################################################################
+ ## Doc Parser
+ ######################################################################
+
+@@ -520,6 +540,10 @@
+ # inside that block, not outside it.
+ start_group = None
+
++ # If the comment tokens do not include the NL, every comment token
++ # sets this to True in order to swallow the next NL token unprocessed.
++ comment_nl_waiting = False
++
+ # Check if the source file declares an encoding.
+ encoding = get_module_encoding(module_doc.filename)
+
+@@ -570,7 +594,9 @@
+ # then discard them: blank lines are not allowed between a
+ # comment block and the thing it describes.
+ elif toktype == tokenize.NL:
+- if comments and not line_toks:
++ if comment_nl_waiting:
++ comment_nl_waiting = False
++ elif comments and not line_toks:
+ log.warning('Ignoring docstring comment block followed by '
+ 'a blank line in %r on line %r' %
+ (module_doc.filename, srow-1))
+@@ -578,6 +604,7 @@
+
+ # Comment token: add to comments if appropriate.
+ elif toktype == tokenize.COMMENT:
++ comment_nl_waiting = not comment_includes_nl
+ if toktext.startswith(COMMENT_DOCSTRING_MARKER):
+ comment_line = toktext[len(COMMENT_DOCSTRING_MARKER):].rstrip()
+ if comment_line.startswith(" "):
diff --git a/abs/extra/epydoc/string-exceptions.patch b/abs/extra/epydoc/string-exceptions.patch
new file mode 100644
index 0000000..eca9793
--- /dev/null
+++ b/abs/extra/epydoc/string-exceptions.patch
@@ -0,0 +1,18 @@
+# Description: Get rid of string exceptions.
+# One of the changes brought by Python 2.6 is the removal of string
+# exceptions. A mass bug filing identified Epydoc as having potential
+# problems. I later spot-checked all of the exceptions in the code, and I
+# believe this is the only one that we have to worry about.
+# Bug-Debian: http://bugs.debian.org/585290
+# Author: Kenneth J. Pronovici <pronovic@debian.org>
+--- a/epydoc/apidoc.py
++++ b/epydoc/apidoc.py
+@@ -1352,7 +1352,7 @@ class ClassDoc(NamespaceDoc):
+ nothead=[s for s in nonemptyseqs if cand in s[1:]]
+ if nothead: cand=None #reject candidate
+ else: break
+- if not cand: raise "Inconsistent hierarchy"
++ if not cand: raise TypeError("Inconsistent hierarchy")
+ res.append(cand)
+ for seq in nonemptyseqs: # remove cand
+ if seq[0] == cand: del seq[0]