From 9f010b1561421d8b3e128e07b50de9919955f258 Mon Sep 17 00:00:00 2001 From: Yaroslav Halchenko Date: Sat, 14 Jun 2014 13:34:18 +0200 Subject: nitime (0.5-1) unstable; urgency=medium * New release - does not ship sphinxext/inheritance_diagram.py any longer (Closes: #706533) * debian/copyright - extended to cover added 3rd party snippets and updated years * debian/watch - updated to use githubredir.debian.net service * debian/patches - debian/patches/up_version_info_python2.6 for compatibility with python2.6 (on wheezy etc) # imported from the archive --- tools/apigen.py | 426 ++++++++++++++++++++++++++++++++++++++++ tools/build_modref_templates.py | 19 ++ tools/build_release | 31 +++ tools/ex2rst | 277 ++++++++++++++++++++++++++ tools/github_stats.py | 149 ++++++++++++++ tools/gitwash_dumper.py | 151 ++++++++++++++ tools/make_examples.py | 98 +++++++++ tools/release | 31 +++ tools/sneeze.py | 50 +++++ tools/toollib.py | 34 ++++ 10 files changed, 1266 insertions(+) create mode 100644 tools/apigen.py create mode 100755 tools/build_modref_templates.py create mode 100755 tools/build_release create mode 100755 tools/ex2rst create mode 100755 tools/github_stats.py create mode 100755 tools/gitwash_dumper.py create mode 100755 tools/make_examples.py create mode 100755 tools/release create mode 100755 tools/sneeze.py create mode 100644 tools/toollib.py (limited to 'tools') diff --git a/tools/apigen.py b/tools/apigen.py new file mode 100644 index 0000000..afce9da --- /dev/null +++ b/tools/apigen.py @@ -0,0 +1,426 @@ +"""Attempt to generate templates for module reference with Sphinx + +XXX - we exclude extension modules + +To include extension modules, first identify them as valid in the +``_uri2path`` method, then handle them in the ``_parse_module`` script. + +We get functions and classes by parsing the text of .py files. +Alternatively we could import the modules for discovery, and we'd have +to do that for extension modules. This would involve changing the +``_parse_module`` method to work via import and introspection, and +might involve changing ``discover_modules`` (which determines which +files are modules, and therefore which module URIs will be passed to +``_parse_module``). + +NOTE: this is a modified version of a script originally shipped with the +PyMVPA project, which we've adapted for NIPY use. PyMVPA is an MIT-licensed +project.""" + +# Stdlib imports +import os +import re + +# Functions and classes +class ApiDocWriter(object): + ''' Class for automatic detection and parsing of API docs + to Sphinx-parsable reST format''' + + # only separating first two levels + rst_section_levels = ['*', '=', '-', '~', '^'] + + def __init__(self, + package_name, + rst_extension='.rst', + package_skip_patterns=None, + module_skip_patterns=None, + ): + ''' Initialize package for parsing + + Parameters + ---------- + package_name : string + Name of the top-level package. *package_name* must be the + name of an importable package + rst_extension : string, optional + Extension for reST files, default '.rst' + package_skip_patterns : None or sequence of {strings, regexps} + Sequence of strings giving URIs of packages to be excluded + Operates on the package path, starting at (including) the + first dot in the package path, after *package_name* - so, + if *package_name* is ``sphinx``, then ``sphinx.util`` will + result in ``.util`` being passed for earching by these + regexps. If is None, gives default. Default is: + ['\.tests$'] + module_skip_patterns : None or sequence + Sequence of strings giving URIs of modules to be excluded + Operates on the module name including preceding URI path, + back to the first dot after *package_name*. For example + ``sphinx.util.console`` results in the string to search of + ``.util.console`` + If is None, gives default. Default is: + ['\.setup$', '\._'] + ''' + if package_skip_patterns is None: + package_skip_patterns = ['\\.tests$'] + if module_skip_patterns is None: + module_skip_patterns = ['\\.setup$', '\\._'] + self.package_name = package_name + self.rst_extension = rst_extension + self.package_skip_patterns = package_skip_patterns + self.module_skip_patterns = module_skip_patterns + + def get_package_name(self): + return self._package_name + + def set_package_name(self, package_name): + ''' Set package_name + + >>> docwriter = ApiDocWriter('sphinx') + >>> import sphinx + >>> docwriter.root_path == sphinx.__path__[0] + True + >>> docwriter.package_name = 'docutils' + >>> import docutils + >>> docwriter.root_path == docutils.__path__[0] + True + ''' + # It's also possible to imagine caching the module parsing here + self._package_name = package_name + self.root_module = __import__(package_name) + self.root_path = self.root_module.__path__[0] + self.written_modules = None + + package_name = property(get_package_name, set_package_name, None, + 'get/set package_name') + + def _get_object_name(self, line): + ''' Get second token in line + >>> docwriter = ApiDocWriter('sphinx') + >>> docwriter._get_object_name(" def func(): ") + 'func' + >>> docwriter._get_object_name(" class Klass(object): ") + 'Klass' + >>> docwriter._get_object_name(" class Klass: ") + 'Klass' + ''' + name = line.split()[1].split('(')[0].strip() + # in case we have classes which are not derived from object + # ie. old style classes + return name.rstrip(':') + + def _uri2path(self, uri): + ''' Convert uri to absolute filepath + + Parameters + ---------- + uri : string + URI of python module to return path for + + Returns + ------- + path : None or string + Returns None if there is no valid path for this URI + Otherwise returns absolute file system path for URI + + Examples + -------- + >>> docwriter = ApiDocWriter('sphinx') + >>> import sphinx + >>> modpath = sphinx.__path__[0] + >>> res = docwriter._uri2path('sphinx.builder') + >>> res == os.path.join(modpath, 'builder.py') + True + >>> res = docwriter._uri2path('sphinx') + >>> res == os.path.join(modpath, '__init__.py') + True + >>> docwriter._uri2path('sphinx.does_not_exist') + + ''' + if uri == self.package_name: + return os.path.join(self.root_path, '__init__.py') + path = uri.replace('.', os.path.sep) + path = path.replace(self.package_name + os.path.sep, '') + path = os.path.join(self.root_path, path) + # XXX maybe check for extensions as well? + if os.path.exists(path + '.py'): # file + path += '.py' + elif os.path.exists(os.path.join(path, '__init__.py')): + path = os.path.join(path, '__init__.py') + else: + return None + return path + + def _path2uri(self, dirpath): + ''' Convert directory path to uri ''' + relpath = dirpath.replace(self.root_path, self.package_name) + if relpath.startswith(os.path.sep): + relpath = relpath[1:] + return relpath.replace(os.path.sep, '.') + + def _parse_module(self, uri): + ''' Parse module defined in *uri* ''' + filename = self._uri2path(uri) + if filename is None: + # nothing that we could handle here. + return ([],[]) + f = open(filename, 'rt') + functions, classes = self._parse_lines(f) + f.close() + return functions, classes + + def _parse_lines(self, linesource): + ''' Parse lines of text for functions and classes ''' + functions = [] + classes = [] + for line in linesource: + if line.startswith('def ') and line.count('('): + # exclude private stuff + name = self._get_object_name(line) + if not name.startswith('_'): + functions.append(name) + elif line.startswith('class '): + # exclude private stuff + name = self._get_object_name(line) + if not name.startswith('_'): + classes.append(name) + else: + pass + functions.sort() + classes.sort() + return functions, classes + + def generate_api_doc(self, uri): + '''Make autodoc documentation template string for a module + + Parameters + ---------- + uri : string + python location of module - e.g 'sphinx.builder' + + Returns + ------- + S : string + Contents of API doc + ''' + # get the names of all classes and functions + functions, classes = self._parse_module(uri) + if not len(functions) and not len(classes): + print 'WARNING: Empty -',uri # dbg + return '' + + # Make a shorter version of the uri that omits the package name for + # titles + uri_short = re.sub(r'^%s\.' % self.package_name,'',uri) + + ad = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n' + + chap_title = uri_short + ad += (chap_title+'\n'+ self.rst_section_levels[1] * len(chap_title) + + '\n\n') + + # Set the chapter title to read 'module' for all modules except for the + # main packages + if '.' in uri: + title = 'Module: :mod:`' + uri_short + '`' + else: + title = ':mod:`' + uri_short + '`' + ad += title + '\n' + self.rst_section_levels[2] * len(title) + + if len(classes): + ad += '\nInheritance diagram for ``%s``:\n\n' % uri + ad += '.. inheritance-diagram:: %s \n' % uri + ad += ' :parts: 3\n' + + ad += '\n.. automodule:: ' + uri + '\n' + ad += '\n.. currentmodule:: ' + uri + '\n' + multi_class = len(classes) > 1 + multi_fx = len(functions) > 1 + if multi_class: + ad += '\n' + 'Classes' + '\n' + \ + self.rst_section_levels[2] * 7 + '\n' + elif len(classes) and multi_fx: + ad += '\n' + 'Class' + '\n' + \ + self.rst_section_levels[2] * 5 + '\n' + for c in classes: + ad += '\n:class:`' + c + '`\n' \ + + self.rst_section_levels[multi_class + 2 ] * \ + (len(c)+9) + '\n\n' + ad += '\n.. autoclass:: ' + c + '\n' + # must NOT exclude from index to keep cross-refs working + ad += ' :members:\n' \ + ' :undoc-members:\n' \ + ' :show-inheritance:\n' \ + '\n' \ + ' .. automethod:: __init__\n' + if multi_fx: + ad += '\n' + 'Functions' + '\n' + \ + self.rst_section_levels[2] * 9 + '\n\n' + elif len(functions) and multi_class: + ad += '\n' + 'Function' + '\n' + \ + self.rst_section_levels[2] * 8 + '\n\n' + for f in functions: + # must NOT exclude from index to keep cross-refs working + ad += '\n.. autofunction:: ' + uri + '.' + f + '\n\n' + return ad + + def _survives_exclude(self, matchstr, match_type): + ''' Returns True if *matchstr* does not match patterns + + ``self.package_name`` removed from front of string if present + + Examples + -------- + >>> dw = ApiDocWriter('sphinx') + >>> dw._survives_exclude('sphinx.okpkg', 'package') + True + >>> dw.package_skip_patterns.append('^\\.badpkg$') + >>> dw._survives_exclude('sphinx.badpkg', 'package') + False + >>> dw._survives_exclude('sphinx.badpkg', 'module') + True + >>> dw._survives_exclude('sphinx.badmod', 'module') + True + >>> dw.module_skip_patterns.append('^\\.badmod$') + >>> dw._survives_exclude('sphinx.badmod', 'module') + False + ''' + if match_type == 'module': + patterns = self.module_skip_patterns + elif match_type == 'package': + patterns = self.package_skip_patterns + else: + raise ValueError('Cannot interpret match type "%s"' + % match_type) + # Match to URI without package name + L = len(self.package_name) + if matchstr[:L] == self.package_name: + matchstr = matchstr[L:] + for pat in patterns: + try: + pat.search + except AttributeError: + pat = re.compile(pat) + if pat.search(matchstr): + return False + return True + + def discover_modules(self): + ''' Return module sequence discovered from ``self.package_name`` + + + Parameters + ---------- + None + + Returns + ------- + mods : sequence + Sequence of module names within ``self.package_name`` + + Examples + -------- + >>> dw = ApiDocWriter('sphinx') + >>> mods = dw.discover_modules() + >>> 'sphinx.util' in mods + True + >>> dw.package_skip_patterns.append('\.util$') + >>> 'sphinx.util' in dw.discover_modules() + False + >>> + ''' + modules = [self.package_name] + # raw directory parsing + for dirpath, dirnames, filenames in os.walk(self.root_path): + # Check directory names for packages + root_uri = self._path2uri(os.path.join(self.root_path, + dirpath)) + for dirname in dirnames[:]: # copy list - we modify inplace + package_uri = '.'.join((root_uri, dirname)) + if (self._uri2path(package_uri) and + self._survives_exclude(package_uri, 'package')): + modules.append(package_uri) + else: + dirnames.remove(dirname) + # Check filenames for modules + for filename in filenames: + module_name = filename[:-3] + module_uri = '.'.join((root_uri, module_name)) + if (self._uri2path(module_uri) and + self._survives_exclude(module_uri, 'module')): + modules.append(module_uri) + return sorted(modules) + + def write_modules_api(self, modules,outdir): + # write the list + written_modules = [] + for m in modules: + api_str = self.generate_api_doc(m) + if not api_str: + continue + # write out to file + outfile = os.path.join(outdir, + m + self.rst_extension) + fileobj = open(outfile, 'wt') + fileobj.write(api_str) + fileobj.close() + written_modules.append(m) + self.written_modules = written_modules + + def write_api_docs(self, outdir): + """Generate API reST files. + + Parameters + ---------- + outdir : string + Directory name in which to store files + We create automatic filenames for each module + + Returns + ------- + None + + Notes + ----- + Sets self.written_modules to list of written modules + """ + if not os.path.exists(outdir): + os.mkdir(outdir) + # compose list of modules + modules = self.discover_modules() + self.write_modules_api(modules,outdir) + + def write_index(self, outdir, froot='gen', relative_to=None): + """Make a reST API index file from written files + + Parameters + ---------- + path : string + Filename to write index to + outdir : string + Directory to which to write generated index file + froot : string, optional + root (filename without extension) of filename to write to + Defaults to 'gen'. We add ``self.rst_extension``. + relative_to : string + path to which written filenames are relative. This + component of the written file path will be removed from + outdir, in the generated index. Default is None, meaning, + leave path as it is. + """ + if self.written_modules is None: + raise ValueError('No modules written') + # Get full filename path + path = os.path.join(outdir, froot+self.rst_extension) + # Path written into index is relative to rootpath + if relative_to is not None: + relpath = outdir.replace(relative_to + os.path.sep, '') + else: + relpath = outdir + idx = open(path,'wt') + w = idx.write + w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n') + w('.. toctree::\n\n') + for f in self.written_modules: + w(' %s\n' % os.path.join(relpath,f)) + idx.close() diff --git a/tools/build_modref_templates.py b/tools/build_modref_templates.py new file mode 100755 index 0000000..be584b6 --- /dev/null +++ b/tools/build_modref_templates.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python +"""Script to auto-generate our API docs. +""" +# stdlib imports +import os + +# local imports +from apigen import ApiDocWriter + +#***************************************************************************** +if __name__ == '__main__': + package = 'nitime' + outdir = os.path.join('api','generated') + docwriter = ApiDocWriter(package) + docwriter.package_skip_patterns += [r'\.fixes$', + ] + docwriter.write_api_docs(outdir) + docwriter.write_index(outdir, 'gen', relative_to='api') + print '%d files written' % len(docwriter.written_modules) diff --git a/tools/build_release b/tools/build_release new file mode 100755 index 0000000..00c16ab --- /dev/null +++ b/tools/build_release @@ -0,0 +1,31 @@ +#!/usr/bin/env python +"""Nitime release build script. +""" +from toollib import * + +# Find our directory and go one above, regardless of where we were called from +my_dir = os.path.split(os.path.abspath(__file__))[0] +cd(pjoin(my_dir, '..')) + +# Load release info +execfile(pjoin('nitime','version.py')) + +# Check that everything compiles +compile_tree() + +# Cleanup +for d in ['build','dist', pjoin('doc','_build'), pjoin('docs','dist')]: + if os.path.isdir(d): + remove_tree(d) + +# Build source and binary distros +sh('./setup.py sdist --formats=gztar,zip') + +# Build eggs +sh('python ./setup_egg.py bdist_egg') + +# Create a windows distribution +# sh("python setup.py bdist_wininst") + +# Change name so retarded Vista runs the installer correctly +# sh("rename 's/linux-i686/win32-setup/' dist/*.exe") diff --git a/tools/ex2rst b/tools/ex2rst new file mode 100755 index 0000000..59207a3 --- /dev/null +++ b/tools/ex2rst @@ -0,0 +1,277 @@ +#!/usr/bin/env python +# +# Note: this file is copied (possibly with minor modifications) from the +# sources of the PyMVPA project - http://pymvpa.org. It remains licensed as +# the rest of PyMVPA (MIT license as of October 2010). +# +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the PyMVPA package for the +# copyright and license terms. +# +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## + +"""Helper to automagically generate ReST versions of examples""" + +__docformat__ = 'restructuredtext' + + +import os +import sys +import re +import glob +from optparse import OptionParser + + +def auto_image(line): + """Automatically replace generic image markers with ones that have full + size (width/height) info, plus a :target: link to the original png, to be + used in the html docs. + """ + img_re = re.compile(r'(\s*)\.\. image::\s*(.*)$') + m = img_re.match(line) + if m is None: + # Not an image declaration, leave the line alone and return unmodified + return line + + # Match means it's an image spec, we rewrite it with extra tags + ini_space = m.group(1) + lines = [line, + ini_space + ' :width: 500\n', + #ini_space + ' :height: 350\n' + ] + fspec = m.group(2) + if fspec.endswith('.*'): + fspec = fspec.replace('.*', '.png') + fspec = fspec.replace('fig/', '../_images/') + lines.append(ini_space + (' :target: %s\n' % fspec) ) + lines.append('\n') + return ''.join(lines) + +def exfile2rst(filename): + """Open a Python script and convert it into an ReST string. + """ + # output string + s = '' + + # open source file + xfile = open(filename) + + # parser status vars + inheader = True + indocs = False + doc2code = False + code2doc = False + # an empty line found in the example enables the check for a potentially + # indented docstring starting on the next line (as an attempt to exclude + # function or class docstrings) + last_line_empty = False + # indentation of indented docstring, which is removed from the RsT output + # since we typically do not want an indentation there. + indent_level = 0 + + for line in xfile: + # skip header + if inheader and \ + not (line.startswith('"""') or line.startswith("'''")): + continue + # determine end of header + if inheader and (line.startswith('"""') or line.startswith("'''")): + inheader = False + + # strip comments and remove trailing whitespace + if not indocs and last_line_empty: + # first remove leading whitespace and store indent level + cleanline = line[:line.find('#')].lstrip() + indent_level = len(line) - len(cleanline) - 1 + cleanline = cleanline.rstrip() + else: + cleanline = line[:line.find('#')].rstrip() + + if not indocs and line == '\n': + last_line_empty = True + else: + last_line_empty = False + + # if we have something that should go into the text + if indocs \ + or (cleanline.startswith('"""') or cleanline.startswith("'''")): + proc_line = None + # handle doc start + if not indocs: + # guarenteed to start with """ + if len(cleanline) > 3 \ + and (cleanline.endswith('"""') \ + or cleanline.endswith("'''")): + # single line doc + code2doc = True + doc2code = True + proc_line = cleanline[3:-3] + else: + # must be start of multiline block + indocs = True + code2doc = True + # rescue what is left on the line + proc_line = cleanline[3:] # strip """ + else: + # we are already in the docs + # handle doc end + if cleanline.endswith('"""') or cleanline.endswith("'''"): + indocs = False + doc2code = True + # rescue what is left on the line + proc_line = cleanline[:-3] + # reset the indentation + indent_level = 0 + else: + # has to be documentation + # if the indentation is whitespace remove it, other wise + # keep it (accounts for some variation in docstring + # styles + real_indent = \ + indent_level - len(line[:indent_level].lstrip()) + proc_line = line[real_indent:] + + if code2doc: + code2doc = False + s += '\n' + + proc_line = auto_image(proc_line) + + if proc_line: + s += proc_line.rstrip() + '\n' + + else: + if doc2code: + doc2code = False + s += '\n::\n' + + # has to be code + s += ' %s' % line + + xfile.close() + + return s + + +def exfile2rstfile(filename, opts): + """ + """ + # doc filename + dfilename = os.path.basename(filename[:-3]) + '.rst' + + # open dest file + dfile = open(os.path.join(opts.outdir, os.path.basename(dfilename)), 'w') + + # place header + dfile.write('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n') + + # place cross-ref target + dfile.write('.. _example_' + dfilename[:-4] + ':\n\n') + + # write converted ReST + dfile.write(exfile2rst(filename)) + + if opts.sourceref: + # write post example see also box + msg = """ + +.. admonition:: Example source code + + You can download :download:`the full source code of this example <%s>`. + This same script is also included in the %s source distribution under the + :file:`doc/examples/` directory. + +""" % (filename, opts.project) + + dfile.write(msg) + + dfile.close() + + + +def main(): + parser = OptionParser( \ + usage="%prog [options] [...]", \ + version="%prog 0.1", description="""\ +%prog converts Python scripts into restructered text (ReST) format suitable for +integration into the Sphinx documentation framework. Its key feature is that it +extracts stand-alone (unassigned) single, or multiline triple-quote docstrings +and moves them out of the code listing so that they are rendered as regular +ReST, while at the same time maintaining their position relative to the +listing. + +The detection of such docstrings is exclusively done by parsing the raw code so +it is never actually imported into a running Python session. Docstrings have to +be written using triple quotes (both forms " and ' are possible). + +It is recommend that such docstrings are preceded and followed by an empty line. +Intended docstring can make use of the full linewidth from the second docstring +line on. If the indentation of multiline docstring is maintained for all lines, +the respective indentation is removed in the ReST output. + +The parser algorithm automatically excludes file headers and starts with the +first (module-level) docstring instead. +""" ) #' + + # define options + parser.add_option('--verbose', action='store_true', dest='verbose', + default=False, help='print status messages') + parser.add_option('-x', '--exclude', action='append', dest='excluded', + help="""\ +Use this option to exclude single files from the to be parsed files. This is +especially useful to exclude files when parsing complete directories. This +option can be specified multiple times. +""") + parser.add_option('-o', '--outdir', action='store', dest='outdir', + type='string', default=None, help="""\ +Target directory to write the ReST output to. This is a required option. +""") + parser.add_option('--no-sourceref', action='store_false', default=True, + dest='sourceref', help="""\ +If specified, the source reference section will be suppressed. +""") + parser.add_option('--project', type='string', action='store', default='', + dest='project', help="""\ +Name of the project that contains the examples. This name is used in the +'seealso' source references. Default: '' +""") + + # parse options + (opts, args) = parser.parse_args() # read sys.argv[1:] by default + + # check for required options + if opts.outdir is None: + print('Required option -o, --outdir not specified.') + sys.exit(1) + + # build up list of things to parse + toparse = [] + for t in args: + # expand dirs + if os.path.isdir(t): + # add all python files in that dir + toparse += glob.glob(os.path.join(t, '*.py')) + else: + toparse.append(t) + + # filter parse list + if not opts.excluded is None: + toparse = [t for t in toparse if not t in opts.excluded] + + toparse_list = toparse + toparse = set(toparse) + + if len(toparse) != len(toparse_list): + print('Ignoring duplicate parse targets.') + + if not os.path.exists(opts.outdir): + os.mkdir(outdir) + + # finally process all examples + for t in toparse: + exfile2rstfile(t, opts) + + +if __name__ == '__main__': + main() diff --git a/tools/github_stats.py b/tools/github_stats.py new file mode 100755 index 0000000..c214872 --- /dev/null +++ b/tools/github_stats.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python +"""Simple tools to query github.com and gather stats about issues. +""" +#----------------------------------------------------------------------------- +# Imports +#----------------------------------------------------------------------------- + +from __future__ import print_function + +import json +import re +import sys + +from datetime import datetime, timedelta +from urllib import urlopen + +#----------------------------------------------------------------------------- +# Globals +#----------------------------------------------------------------------------- + +ISO8601 = "%Y-%m-%dT%H:%M:%SZ" +PER_PAGE = 100 + +element_pat = re.compile(r'<(.+?)>') +rel_pat = re.compile(r'rel=[\'"](\w+)[\'"]') + +#----------------------------------------------------------------------------- +# Functions +#----------------------------------------------------------------------------- + +def parse_link_header(headers): + link_s = headers.get('link', '') + urls = element_pat.findall(link_s) + rels = rel_pat.findall(link_s) + d = {} + for rel,url in zip(rels, urls): + d[rel] = url + return d + +def get_paged_request(url): + """get a full list, handling APIv3's paging""" + results = [] + while url: + print("fetching %s" % url, file=sys.stderr) + f = urlopen(url) + results.extend(json.load(f)) + links = parse_link_header(f.headers) + url = links.get('next') + return results + +def get_issues(project="nipy/nitime", state="closed", pulls=False): + """Get a list of the issues from the Github API.""" + which = 'pulls' if pulls else 'issues' + url = "https://api.github.com/repos/%s/%s?state=%s&per_page=%i" % (project, which, state, PER_PAGE) + return get_paged_request(url) + + +def _parse_datetime(s): + """Parse dates in the format returned by the Github API.""" + if s: + return datetime.strptime(s, ISO8601) + else: + return datetime.fromtimestamp(0) + + +def issues2dict(issues): + """Convert a list of issues to a dict, keyed by issue number.""" + idict = {} + for i in issues: + idict[i['number']] = i + return idict + + +def is_pull_request(issue): + """Return True if the given issue is a pull request.""" + return 'pull_request_url' in issue + + +def issues_closed_since(period=timedelta(days=730), project="nipy/nitime", pulls=False): + """Get all issues closed since a particular point in time. period +can either be a datetime object, or a timedelta object. In the +latter case, it is used as a time before the present.""" + + which = 'pulls' if pulls else 'issues' + + if isinstance(period, timedelta): + period = datetime.now() - period + url = "https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i" % (project, which, period.strftime(ISO8601), PER_PAGE) + allclosed = get_paged_request(url) + # allclosed = get_issues(project=project, state='closed', pulls=pulls, since=period) + filtered = [i for i in allclosed if _parse_datetime(i['closed_at']) > period] + return filtered + + +def sorted_by_field(issues, field='closed_at', reverse=False): + """Return a list of issues sorted by closing date date.""" + return sorted(issues, key = lambda i:i[field], reverse=reverse) + + +def report(issues, show_urls=False): + """Summary report about a list of issues, printing number and title. + """ + # titles may have unicode in them, so we must encode everything below + if show_urls: + for i in issues: + role = 'ghpull' if 'merged' in i else 'ghissue' + print('* :%s:`%d`: %s' % (role, i['number'], + i['title'].encode('utf-8'))) + else: + for i in issues: + print('* %d: %s' % (i['number'], i['title'].encode('utf-8'))) + +#----------------------------------------------------------------------------- +# Main script +#----------------------------------------------------------------------------- + +if __name__ == "__main__": + # Whether to add reST urls for all issues in printout. + show_urls = True + + # By default, search one month back + if len(sys.argv) > 1: + days = int(sys.argv[1]) + else: + days = 730 + + # turn off to play interactively without redownloading, use %run -i + if 1: + issues = issues_closed_since(timedelta(days=days), pulls=False) + pulls = issues_closed_since(timedelta(days=days), pulls=True) + + # For regular reports, it's nice to show them in reverse chronological order + issues = sorted_by_field(issues, reverse=True) + pulls = sorted_by_field(pulls, reverse=True) + + n_issues, n_pulls = map(len, (issues, pulls)) + n_total = n_issues + n_pulls + + # Print summary report we can directly include into release notes. + print("GitHub stats for the last %d days." % days) + print("We closed a total of %d issues, %d pull requests and %d regular \n" + "issues; this is the full list (generated with the script \n" + "`tools/github_stats.py`):" % (n_total, n_pulls, n_issues)) + print() + print('Pull Requests (%d):\n' % n_pulls) + report(pulls, show_urls) + print() + print('Issues (%d):\n' % n_issues) + report(issues, show_urls) diff --git a/tools/gitwash_dumper.py b/tools/gitwash_dumper.py new file mode 100755 index 0000000..000245a --- /dev/null +++ b/tools/gitwash_dumper.py @@ -0,0 +1,151 @@ +#!/usr/bin/env python +''' Checkout gitwash repo into directory and do search replace on name ''' + +import os +from os.path import join as pjoin +import shutil +import sys +import re +import glob +import fnmatch +import tempfile +from subprocess import call + + +verbose = False + + +def clone_repo(url, branch): + cwd = os.getcwd() + tmpdir = tempfile.mkdtemp() + try: + cmd = 'git clone %s %s' % (url, tmpdir) + call(cmd, shell=True) + os.chdir(tmpdir) + cmd = 'git checkout %s' % branch + call(cmd, shell=True) + except: + shutil.rmtree(tmpdir) + raise + finally: + os.chdir(cwd) + return tmpdir + + +def cp_files(in_path, globs, out_path): + try: + os.makedirs(out_path) + except OSError: + pass + out_fnames = [] + for in_glob in globs: + in_glob_path = pjoin(in_path, in_glob) + for in_fname in glob.glob(in_glob_path): + out_fname = in_fname.replace(in_path, out_path) + pth, _ = os.path.split(out_fname) + if not os.path.isdir(pth): + os.makedirs(pth) + shutil.copyfile(in_fname, out_fname) + out_fnames.append(out_fname) + return out_fnames + + +def filename_search_replace(sr_pairs, filename, backup=False): + ''' Search and replace for expressions in files + + ''' + in_txt = open(filename, 'rt').read(-1) + out_txt = in_txt[:] + for in_exp, out_exp in sr_pairs: + in_exp = re.compile(in_exp) + out_txt = in_exp.sub(out_exp, out_txt) + if in_txt == out_txt: + return False + open(filename, 'wt').write(out_txt) + if backup: + open(filename + '.bak', 'wt').write(in_txt) + return True + + +def copy_replace(replace_pairs, + out_path, + repo_url, + repo_branch = 'master', + cp_globs=('*',), + rep_globs=('*',), + renames = ()): + repo_path = clone_repo(repo_url, repo_branch) + try: + out_fnames = cp_files(repo_path, cp_globs, out_path) + finally: + shutil.rmtree(repo_path) + renames = [(re.compile(in_exp), out_exp) for in_exp, out_exp in renames] + fnames = [] + for rep_glob in rep_globs: + fnames += fnmatch.filter(out_fnames, rep_glob) + if verbose: + print '\n'.join(fnames) + for fname in fnames: + filename_search_replace(replace_pairs, fname, False) + for in_exp, out_exp in renames: + new_fname, n = in_exp.subn(out_exp, fname) + if n: + os.rename(fname, new_fname) + break + + +USAGE = ''' + +If not set with options, the repository name is the same as the + +If not set with options, the main github user is the same as the +repository name.''' + + +GITWASH_CENTRAL = 'git://github.com/matthew-brett/gitwash.git' +GITWASH_BRANCH = 'master' + + +if __name__ == '__main__': + from optparse import OptionParser + parser = OptionParser() + parser.set_usage(parser.get_usage().strip() + USAGE) + parser.add_option("--repo-name", dest="repo_name", + help="repository name - e.g. nitime", + metavar="REPO_NAME") + parser.add_option("--github-user", dest="main_gh_user", + help="github username for main repo - e.g fperez", + metavar="MAIN_GH_USER") + parser.add_option("--gitwash-url", dest="gitwash_url", + help="URL to gitwash repository - default %s" + % GITWASH_CENTRAL, + default=GITWASH_CENTRAL, + metavar="GITWASH_URL") + parser.add_option("--gitwash-branch", dest="gitwash_branch", + help="branch in gitwash repository - default %s" + % GITWASH_BRANCH, + default=GITWASH_BRANCH, + metavar="GITWASH_BRANCH") + parser.add_option("--source-suffix", dest="source_suffix", + help="suffix of ReST source files - default '.rst'", + default='.rst', + metavar="SOURCE_SUFFIX") + (options, args) = parser.parse_args() + if len(args) < 2: + parser.print_help() + sys.exit() + out_path, project_name = args + if options.repo_name is None: + options.repo_name = project_name + if options.main_gh_user is None: + options.main_gh_user = options.repo_name + copy_replace((('PROJECTNAME', project_name), + ('REPONAME', options.repo_name), + ('MAIN_GH_USER', options.main_gh_user)), + out_path, + options.gitwash_url, + options.gitwash_branch, + cp_globs=(pjoin('gitwash', '*'),), + rep_globs=('*.rst',), + renames=(('\.rst$', options.source_suffix),)) diff --git a/tools/make_examples.py b/tools/make_examples.py new file mode 100755 index 0000000..97ce75a --- /dev/null +++ b/tools/make_examples.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python +"""Run the py->rst conversion and run all examples. + +This also creates the index.rst file appropriately, makes figures, etc. +""" +#----------------------------------------------------------------------------- +# Library imports +#----------------------------------------------------------------------------- + +# Stdlib imports +import os +import sys + +from glob import glob + +# Third-party imports + +# We must configure the mpl backend before making any further mpl imports +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt + +from matplotlib._pylab_helpers import Gcf + +# Local tools +from toollib import * + +#----------------------------------------------------------------------------- +# Globals +#----------------------------------------------------------------------------- + +examples_header = """ + +.. _examples: + +======== +Examples +======== + +.. include:: note_about_examples.txt + +.. toctree:: + :maxdepth: 2 + + +""" +#----------------------------------------------------------------------------- +# Function defintions +#----------------------------------------------------------------------------- + +# These global variables let show() be called by the scripts in the usual +# manner, but when generating examples, we override it to write the figures to +# files with a known name (derived from the script name) plus a counter +figure_basename = None + +# We must change the show command to save instead +def show(): + allfm = Gcf.get_all_fig_managers() + for fcount, fm in enumerate(allfm): + fm.canvas.figure.savefig('%s_%02i.png' % + (figure_basename, fcount+1)) + +_mpl_show = plt.show +plt.show = show + +#----------------------------------------------------------------------------- +# Main script +#----------------------------------------------------------------------------- + +# Work in examples directory +cd('examples') +if not os.getcwd().endswith('doc/examples'): + raise OSError('This must be run from doc/examples directory') + +# Run the conversion from .py to rst file +sh('../../tools/ex2rst --project Nitime --outdir . .') + +# Make the index.rst file +index = open('index.rst', 'w') +index.write(examples_header) +for name in [os.path.splitext(f)[0] for f in glob('*.rst')]: + #Don't add the index in there to avoid sphinx errors and don't add the + #note_about examples again (because it was added at the top): + if name not in(['index','note_about_examples']): + index.write(' %s\n' % name) +index.close() +# Execute each python script in the directory. +if '--no-exec' in sys.argv: + pass +else: + if not os.path.isdir('fig'): + os.mkdir('fig') + + for script in glob('*.py'): + figure_basename = pjoin('fig', os.path.splitext(script)[0]) + execfile(script) + plt.close('all') + diff --git a/tools/release b/tools/release new file mode 100755 index 0000000..26761a5 --- /dev/null +++ b/tools/release @@ -0,0 +1,31 @@ +#!/usr/bin/env python +"""Nitime release script. + +This should only be run at real release time. +""" + +from toollib import * + +# Find our directory and go one above, regardless of where we were called from +my_dir = os.path.split(os.path.abspath(__file__))[0] +cd(pjoin(my_dir, '..')) + +# Load release info +execfile(pjoin('nitime','version.py')) + +print +print "Releasing Nitime version %s" % __version__ +print + +# Build release files +sh('tools/build_release') + +# Register with the Python Package Index (PyPI) +print "Registering with PyPI..." +sh('./setup.py register') + +# Upload all files +print "Uploading distribution files..." +sh('./setup.py sdist upload') + +print "Done!" diff --git a/tools/sneeze.py b/tools/sneeze.py new file mode 100755 index 0000000..c9cb6b9 --- /dev/null +++ b/tools/sneeze.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python +"""Script to run nose with coverage reporting without boilerplate params. + +Usage: + sneeze test_coordinate_system.py + +Coverage will be reported on the module extracted from the test file +name by removing the 'test_' prefix and '.py' suffix. In the above +example, we'd get the coverage on the coordinate_system module. The +test file is searched for an import statement containing the module +name. + +The nose command would look like this: + +nosetests -sv --with-coverage --cover-package=nipy.core.reference.coordinate_system test_coordinate_system.py + +""" + +import re +import os +import sys +import nose + +test_file = sys.argv[1] +module = os.path.splitext(test_file)[0] # remove '.py' extension +module = module.split('test_')[1] # remove 'test_' prefix +regexp = "[\w\.]+%s"%module +compexp = re.compile(regexp) + +cover_pkg = None +fp = open(test_file, 'r') +for line in fp: + if line.startswith('from') or line.startswith('import'): + pkg = re.search(regexp, line) + if pkg: + cover_pkg = pkg.group() + break +fp.close() + +if cover_pkg: + cover_arg = '--cover-package=%s' % cover_pkg + sys.argv += ['-sv', '--with-coverage', cover_arg] + # Print out command for user feedback and debugging + cmd = 'nosetests -sv --with-coverage %s %s' % (cover_arg, test_file) + print cmd + print + nose.run() +else: + raise ValueError('Unable to find module %s imported in test file %s' + % (module, test_file)) diff --git a/tools/toollib.py b/tools/toollib.py new file mode 100644 index 0000000..fe7f437 --- /dev/null +++ b/tools/toollib.py @@ -0,0 +1,34 @@ +"""Various utilities common to IPython release and maintenance tools. +""" +# Library imports +import os +import sys +import compileall + +from subprocess import Popen, PIPE, CalledProcessError, check_call + +from distutils.dir_util import remove_tree + +# Useful shorthands +pjoin = os.path.join +cd = os.chdir + +# Utility functions + +#----------------------------------------------------------------------------- +# Functions +#----------------------------------------------------------------------------- +def sh(cmd): + """Execute command in a subshell, return status code.""" + return check_call(cmd, shell=True) + + +def compile_tree(): + """Compile all Python files below current directory.""" + vstr = '.'.join(map(str, sys.version_info[:2])) + ca = compileall.__file__ + stat = os.system('python %s .' % ca) + if stat: + msg = '*** ERROR: Some Python files in tree do NOT compile! ***\n' + msg += 'See messages above for the actual file that produced it.\n' + raise SystemExit(msg) -- cgit v1.2.3